diff options
Diffstat (limited to 'main/linux-grsec/grsecurity-3.0-3.14.18-201409082127.patch')
-rw-r--r-- | main/linux-grsec/grsecurity-3.0-3.14.18-201409082127.patch | 122878 |
1 files changed, 122878 insertions, 0 deletions
diff --git a/main/linux-grsec/grsecurity-3.0-3.14.18-201409082127.patch b/main/linux-grsec/grsecurity-3.0-3.14.18-201409082127.patch new file mode 100644 index 000000000..2a009861c --- /dev/null +++ b/main/linux-grsec/grsecurity-3.0-3.14.18-201409082127.patch @@ -0,0 +1,122878 @@ +diff --git a/Documentation/dontdiff b/Documentation/dontdiff +index b89a739..e289b9b 100644 +--- a/Documentation/dontdiff ++++ b/Documentation/dontdiff +@@ -2,9 +2,11 @@ + *.aux + *.bin + *.bz2 ++*.c.[012]*.* + *.cis + *.cpio + *.csp ++*.dbg + *.dsp + *.dvi + *.elf +@@ -14,6 +16,7 @@ + *.gcov + *.gen.S + *.gif ++*.gmo + *.grep + *.grp + *.gz +@@ -48,14 +51,17 @@ + *.tab.h + *.tex + *.ver ++*.vim + *.xml + *.xz + *_MODULES ++*_reg_safe.h + *_vga16.c + *~ + \#*# + *.9 +-.* ++.[^g]* ++.gen* + .*.d + .mm + 53c700_d.h +@@ -69,9 +75,11 @@ Image + Module.markers + Module.symvers + PENDING ++PERF* + SCCS + System.map* + TAGS ++TRACEEVENT-CFLAGS + aconf + af_names.h + aic7*reg.h* +@@ -80,6 +88,7 @@ aic7*seq.h* + aicasm + aicdb.h* + altivec*.c ++ashldi3.S + asm-offsets.h + asm_offsets.h + autoconf.h* +@@ -92,32 +101,40 @@ bounds.h + bsetup + btfixupprep + build ++builtin-policy.h + bvmlinux + bzImage* + capability_names.h + capflags.c + classlist.h* ++clut_vga16.c ++common-cmds.h + comp*.log + compile.h* + conf + config + config-* + config_data.h* ++config.c + config.mak + config.mak.autogen ++config.tmp + conmakehash + consolemap_deftbl.c* + cpustr.h + crc32table.h* + cscope.* + defkeymap.c ++devicetable-offsets.h + devlist.h* + dnotify_test + docproc + dslm ++dtc-lexer.lex.c + elf2ecoff + elfconfig.h* + evergreen_reg_safe.h ++exception_policy.conf + fixdep + flask.h + fore200e_mkfirm +@@ -125,12 +142,15 @@ fore200e_pca_fw.c* + gconf + gconf.glade.h + gen-devlist ++gen-kdb_cmds.c + gen_crc32table + gen_init_cpio + generated + genheaders + genksyms + *_gray256.c ++hash ++hid-example + hpet_example + hugepage-mmap + hugepage-shm +@@ -145,14 +165,14 @@ int32.c + int4.c + int8.c + kallsyms +-kconfig ++kern_constants.h + keywords.c + ksym.c* + ksym.h* + kxgettext + lex.c + lex.*.c +-linux ++lib1funcs.S + logo_*.c + logo_*_clut224.c + logo_*_mono.c +@@ -162,14 +182,15 @@ mach-types.h + machtypes.h + map + map_hugetlb +-media + mconf ++mdp + miboot* + mk_elfconfig + mkboot + mkbugboot + mkcpustr + mkdep ++mkpiggy + mkprep + mkregtable + mktables +@@ -185,6 +206,8 @@ oui.c* + page-types + parse.c + parse.h ++parse-events* ++pasyms.h + patches* + pca200e.bin + pca200e_ecd.bin2 +@@ -194,6 +217,7 @@ perf-archive + piggyback + piggy.gzip + piggy.S ++pmu-* + pnmtologo + ppc_defs.h* + pss_boot.h +@@ -203,7 +227,12 @@ r200_reg_safe.h + r300_reg_safe.h + r420_reg_safe.h + r600_reg_safe.h ++randomize_layout_hash.h ++randomize_layout_seed.h ++realmode.lds ++realmode.relocs + recordmcount ++regdb.c + relocs + rlim_names.h + rn50_reg_safe.h +@@ -213,8 +242,12 @@ series + setup + setup.bin + setup.elf ++signing_key* ++size_overflow_hash.h + sImage ++slabinfo + sm_tbl* ++sortextable + split-include + syscalltab.h + tables.c +@@ -224,6 +257,7 @@ tftpboot.img + timeconst.h + times.h* + trix_boot.h ++user_constants.h + utsrelease.h* + vdso-syms.lds + vdso.lds +@@ -235,13 +269,17 @@ vdso32.lds + vdso32.so.dbg + vdso64.lds + vdso64.so.dbg ++vdsox32.lds ++vdsox32-syms.lds + version.h* + vmImage + vmlinux + vmlinux-* + vmlinux.aout + vmlinux.bin.all ++vmlinux.bin.bz2 + vmlinux.lds ++vmlinux.relocs + vmlinuz + voffset.h + vsyscall.lds +@@ -249,9 +287,12 @@ vsyscall_32.lds + wanxlfw.inc + uImage + unifdef ++utsrelease.h + wakeup.bin + wakeup.elf + wakeup.lds ++x509* + zImage* + zconf.hash.c ++zconf.lex.c + zoffset.h +diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt +index 7116fda..d8ed6e8 100644 +--- a/Documentation/kernel-parameters.txt ++++ b/Documentation/kernel-parameters.txt +@@ -1084,6 +1084,10 @@ bytes respectively. Such letter suffixes can also be entirely omitted. + Format: <unsigned int> such that (rxsize & ~0x1fffc0) == 0. + Default: 1024 + ++ grsec_proc_gid= [GRKERNSEC_PROC_USERGROUP] Chooses GID to ++ ignore grsecurity's /proc restrictions ++ ++ + hashdist= [KNL,NUMA] Large hashes allocated during boot + are distributed across NUMA nodes. Defaults on + for 64-bit NUMA, off otherwise. +@@ -2080,6 +2084,10 @@ bytes respectively. Such letter suffixes can also be entirely omitted. + noexec=on: enable non-executable mappings (default) + noexec=off: disable non-executable mappings + ++ nopcid [X86-64] ++ Disable PCID (Process-Context IDentifier) even if it ++ is supported by the processor. ++ + nosmap [X86] + Disable SMAP (Supervisor Mode Access Prevention) + even if it is supported by processor. +@@ -2347,6 +2355,25 @@ bytes respectively. Such letter suffixes can also be entirely omitted. + the specified number of seconds. This is to be used if + your oopses keep scrolling off the screen. + ++ pax_nouderef [X86] disables UDEREF. Most likely needed under certain ++ virtualization environments that don't cope well with the ++ expand down segment used by UDEREF on X86-32 or the frequent ++ page table updates on X86-64. ++ ++ pax_sanitize_slab= ++ 0/1 to disable/enable slab object sanitization (enabled by ++ default). ++ ++ pax_softmode= 0/1 to disable/enable PaX softmode on boot already. ++ ++ pax_extra_latent_entropy ++ Enable a very simple form of latent entropy extraction ++ from the first 4GB of memory as the bootmem allocator ++ passes the memory pages to the buddy allocator. ++ ++ pax_weakuderef [X86-64] enables the weaker but faster form of UDEREF ++ when the processor supports PCID. ++ + pcbit= [HW,ISDN] + + pcd. [PARIDE] +diff --git a/Makefile b/Makefile +index 05279d4..c24e149 100644 +--- a/Makefile ++++ b/Makefile +@@ -244,8 +244,9 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH" ]; then echo $$BASH; \ + + HOSTCC = gcc + HOSTCXX = g++ +-HOSTCFLAGS = -Wall -Wmissing-prototypes -Wstrict-prototypes -O2 -fomit-frame-pointer +-HOSTCXXFLAGS = -O2 ++HOSTCFLAGS = -Wall -W -Wmissing-prototypes -Wstrict-prototypes -Wno-unused-parameter -Wno-missing-field-initializers -O2 -fomit-frame-pointer -fno-delete-null-pointer-checks ++HOSTCFLAGS += $(call cc-option, -Wno-empty-body) ++HOSTCXXFLAGS = -O2 -Wall -W -Wno-array-bounds + + # Decide whether to build built-in, modular, or both. + # Normally, just do built-in. +@@ -423,8 +424,8 @@ export RCS_TAR_IGNORE := --exclude SCCS --exclude BitKeeper --exclude .svn \ + # Rules shared between *config targets and build targets + + # Basic helpers built in scripts/ +-PHONY += scripts_basic +-scripts_basic: ++PHONY += scripts_basic gcc-plugins ++scripts_basic: gcc-plugins + $(Q)$(MAKE) $(build)=scripts/basic + $(Q)rm -f .tmp_quiet_recordmcount + +@@ -585,6 +586,75 @@ else + KBUILD_CFLAGS += -O2 + endif + ++# Tell gcc to never replace conditional load with a non-conditional one ++KBUILD_CFLAGS += $(call cc-option,--param=allow-store-data-races=0) ++ ++ifndef DISABLE_PAX_PLUGINS ++ifeq ($(call cc-ifversion, -ge, 0408, y), y) ++PLUGINCC := $(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-plugin.sh "$(HOSTCXX)" "$(HOSTCXX)" "$(CC)") ++else ++PLUGINCC := $(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-plugin.sh "$(HOSTCC)" "$(HOSTCXX)" "$(CC)") ++endif ++ifneq ($(PLUGINCC),) ++ifdef CONFIG_PAX_CONSTIFY_PLUGIN ++CONSTIFY_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/constify_plugin.so -DCONSTIFY_PLUGIN ++endif ++ifdef CONFIG_PAX_MEMORY_STACKLEAK ++STACKLEAK_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/stackleak_plugin.so -DSTACKLEAK_PLUGIN ++STACKLEAK_PLUGIN_CFLAGS += -fplugin-arg-stackleak_plugin-track-lowest-sp=100 ++endif ++ifdef CONFIG_KALLOCSTAT_PLUGIN ++KALLOCSTAT_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/kallocstat_plugin.so ++endif ++ifdef CONFIG_PAX_KERNEXEC_PLUGIN ++KERNEXEC_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/kernexec_plugin.so ++KERNEXEC_PLUGIN_CFLAGS += -fplugin-arg-kernexec_plugin-method=$(CONFIG_PAX_KERNEXEC_PLUGIN_METHOD) -DKERNEXEC_PLUGIN ++KERNEXEC_PLUGIN_AFLAGS := -DKERNEXEC_PLUGIN ++endif ++ifdef CONFIG_GRKERNSEC_RANDSTRUCT ++RANDSTRUCT_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/randomize_layout_plugin.so -DRANDSTRUCT_PLUGIN ++ifdef CONFIG_GRKERNSEC_RANDSTRUCT_PERFORMANCE ++RANDSTRUCT_PLUGIN_CFLAGS += -fplugin-arg-randomize_layout_plugin-performance-mode ++endif ++endif ++ifdef CONFIG_CHECKER_PLUGIN ++ifeq ($(call cc-ifversion, -ge, 0406, y), y) ++CHECKER_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/checker_plugin.so -DCHECKER_PLUGIN ++endif ++endif ++COLORIZE_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/colorize_plugin.so ++ifdef CONFIG_PAX_SIZE_OVERFLOW ++SIZE_OVERFLOW_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/size_overflow_plugin/size_overflow_plugin.so -DSIZE_OVERFLOW_PLUGIN ++endif ++ifdef CONFIG_PAX_LATENT_ENTROPY ++LATENT_ENTROPY_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/latent_entropy_plugin.so -DLATENT_ENTROPY_PLUGIN ++endif ++ifdef CONFIG_PAX_MEMORY_STRUCTLEAK ++STRUCTLEAK_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/structleak_plugin.so -DSTRUCTLEAK_PLUGIN ++endif ++GCC_PLUGINS_CFLAGS := $(CONSTIFY_PLUGIN_CFLAGS) $(STACKLEAK_PLUGIN_CFLAGS) $(KALLOCSTAT_PLUGIN_CFLAGS) ++GCC_PLUGINS_CFLAGS += $(KERNEXEC_PLUGIN_CFLAGS) $(CHECKER_PLUGIN_CFLAGS) $(COLORIZE_PLUGIN_CFLAGS) ++GCC_PLUGINS_CFLAGS += $(SIZE_OVERFLOW_PLUGIN_CFLAGS) $(LATENT_ENTROPY_PLUGIN_CFLAGS) $(STRUCTLEAK_PLUGIN_CFLAGS) ++GCC_PLUGINS_CFLAGS += $(RANDSTRUCT_PLUGIN_CFLAGS) ++GCC_PLUGINS_AFLAGS := $(KERNEXEC_PLUGIN_AFLAGS) ++export PLUGINCC GCC_PLUGINS_CFLAGS GCC_PLUGINS_AFLAGS CONSTIFY_PLUGIN LATENT_ENTROPY_PLUGIN_CFLAGS ++ifeq ($(KBUILD_EXTMOD),) ++gcc-plugins: ++ $(Q)$(MAKE) $(build)=tools/gcc ++else ++gcc-plugins: ; ++endif ++else ++gcc-plugins: ++ifeq ($(call cc-ifversion, -ge, 0405, y), y) ++ $(error Your gcc installation does not support plugins. If the necessary headers for plugin support are missing, they should be installed. On Debian, apt-get install gcc-<ver>-plugin-dev. If you choose to ignore this error and lessen the improvements provided by this patch, re-run make with the DISABLE_PAX_PLUGINS=y argument.)) ++else ++ $(Q)echo "warning, your gcc version does not support plugins, you should upgrade it to gcc 4.5 at least" ++endif ++ $(Q)echo "PAX_MEMORY_STACKLEAK, constification, PAX_LATENT_ENTROPY and other features will be less secure. PAX_SIZE_OVERFLOW will not be active." ++endif ++endif ++ + include $(srctree)/arch/$(SRCARCH)/Makefile + + ifdef CONFIG_READABLE_ASM +@@ -781,7 +851,7 @@ export mod_sign_cmd + + + ifeq ($(KBUILD_EXTMOD),) +-core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/ ++core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/ + + vmlinux-dirs := $(patsubst %/,%,$(filter %/, $(init-y) $(init-m) \ + $(core-y) $(core-m) $(drivers-y) $(drivers-m) \ +@@ -830,6 +900,8 @@ endif + + # The actual objects are generated when descending, + # make sure no implicit rule kicks in ++$(filter-out $(init-y),$(vmlinux-deps)): KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS) ++$(filter-out $(init-y),$(vmlinux-deps)): KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS) + $(sort $(vmlinux-deps)): $(vmlinux-dirs) ; + + # Handle descending into subdirectories listed in $(vmlinux-dirs) +@@ -839,7 +911,7 @@ $(sort $(vmlinux-deps)): $(vmlinux-dirs) ; + # Error messages still appears in the original language + + PHONY += $(vmlinux-dirs) +-$(vmlinux-dirs): prepare scripts ++$(vmlinux-dirs): gcc-plugins prepare scripts + $(Q)$(MAKE) $(build)=$@ + + define filechk_kernel.release +@@ -882,10 +954,13 @@ prepare1: prepare2 $(version_h) include/generated/utsrelease.h \ + + archprepare: archheaders archscripts prepare1 scripts_basic + ++prepare0: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS) ++prepare0: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS) + prepare0: archprepare FORCE + $(Q)$(MAKE) $(build)=. + + # All the preparing.. ++prepare: KBUILD_CFLAGS := $(filter-out $(GCC_PLUGINS_CFLAGS),$(KBUILD_CFLAGS)) + prepare: prepare0 + + # Generate some files +@@ -993,6 +1068,8 @@ all: modules + # using awk while concatenating to the final file. + + PHONY += modules ++modules: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS) ++modules: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS) + modules: $(vmlinux-dirs) $(if $(KBUILD_BUILTIN),vmlinux) modules.builtin + $(Q)$(AWK) '!x[$$0]++' $(vmlinux-dirs:%=$(objtree)/%/modules.order) > $(objtree)/modules.order + @$(kecho) ' Building modules, stage 2.'; +@@ -1008,7 +1085,7 @@ modules.builtin: $(vmlinux-dirs:%=%/modules.builtin) + + # Target to prepare building external modules + PHONY += modules_prepare +-modules_prepare: prepare scripts ++modules_prepare: gcc-plugins prepare scripts + + # Target to install modules + PHONY += modules_install +@@ -1074,7 +1151,10 @@ MRPROPER_FILES += .config .config.old .version .old_version $(version_h) \ + Module.symvers tags TAGS cscope* GPATH GTAGS GRTAGS GSYMS \ + signing_key.priv signing_key.x509 x509.genkey \ + extra_certificates signing_key.x509.keyid \ +- signing_key.x509.signer ++ signing_key.x509.signer \ ++ tools/gcc/size_overflow_plugin/size_overflow_hash_aux.h \ ++ tools/gcc/size_overflow_plugin/size_overflow_hash.h \ ++ tools/gcc/randomize_layout_seed.h + + # clean - Delete most, but leave enough to build external modules + # +@@ -1113,7 +1193,7 @@ distclean: mrproper + @find $(srctree) $(RCS_FIND_IGNORE) \ + \( -name '*.orig' -o -name '*.rej' -o -name '*~' \ + -o -name '*.bak' -o -name '#*#' -o -name '.*.orig' \ +- -o -name '.*.rej' \ ++ -o -name '.*.rej' -o -name '*.so' \ + -o -name '*%' -o -name '.*.cmd' -o -name 'core' \) \ + -type f -print | xargs rm -f + +@@ -1275,6 +1355,8 @@ PHONY += $(module-dirs) modules + $(module-dirs): crmodverdir $(objtree)/Module.symvers + $(Q)$(MAKE) $(build)=$(patsubst _module_%,%,$@) + ++modules: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS) ++modules: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS) + modules: $(module-dirs) + @$(kecho) ' Building modules, stage 2.'; + $(Q)$(MAKE) -f $(srctree)/scripts/Makefile.modpost +@@ -1414,17 +1496,21 @@ else + target-dir = $(if $(KBUILD_EXTMOD),$(dir $<),$(dir $@)) + endif + +-%.s: %.c prepare scripts FORCE ++%.s: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS) ++%.s: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS) ++%.s: %.c gcc-plugins prepare scripts FORCE + $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@) + %.i: %.c prepare scripts FORCE + $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@) +-%.o: %.c prepare scripts FORCE ++%.o: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS) ++%.o: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS) ++%.o: %.c gcc-plugins prepare scripts FORCE + $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@) + %.lst: %.c prepare scripts FORCE + $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@) +-%.s: %.S prepare scripts FORCE ++%.s: %.S gcc-plugins prepare scripts FORCE + $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@) +-%.o: %.S prepare scripts FORCE ++%.o: %.S gcc-plugins prepare scripts FORCE + $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@) + %.symtypes: %.c prepare scripts FORCE + $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@) +@@ -1434,11 +1520,15 @@ endif + $(cmd_crmodverdir) + $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \ + $(build)=$(build-dir) +-%/: prepare scripts FORCE ++%/: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS) ++%/: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS) ++%/: gcc-plugins prepare scripts FORCE + $(cmd_crmodverdir) + $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \ + $(build)=$(build-dir) +-%.ko: prepare scripts FORCE ++%.ko: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS) ++%.ko: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS) ++%.ko: gcc-plugins prepare scripts FORCE + $(cmd_crmodverdir) + $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \ + $(build)=$(build-dir) $(@:.ko=.o) +diff --git a/arch/alpha/include/asm/atomic.h b/arch/alpha/include/asm/atomic.h +index 78b03ef..da28a51 100644 +--- a/arch/alpha/include/asm/atomic.h ++++ b/arch/alpha/include/asm/atomic.h +@@ -292,6 +292,16 @@ static inline long atomic64_dec_if_positive(atomic64_t *v) + #define atomic_dec(v) atomic_sub(1,(v)) + #define atomic64_dec(v) atomic64_sub(1,(v)) + ++#define atomic64_read_unchecked(v) atomic64_read(v) ++#define atomic64_set_unchecked(v, i) atomic64_set((v), (i)) ++#define atomic64_add_unchecked(a, v) atomic64_add((a), (v)) ++#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v)) ++#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v)) ++#define atomic64_inc_unchecked(v) atomic64_inc(v) ++#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v) ++#define atomic64_dec_unchecked(v) atomic64_dec(v) ++#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n)) ++ + #define smp_mb__before_atomic_dec() smp_mb() + #define smp_mb__after_atomic_dec() smp_mb() + #define smp_mb__before_atomic_inc() smp_mb() +diff --git a/arch/alpha/include/asm/cache.h b/arch/alpha/include/asm/cache.h +index ad368a9..fbe0f25 100644 +--- a/arch/alpha/include/asm/cache.h ++++ b/arch/alpha/include/asm/cache.h +@@ -4,19 +4,19 @@ + #ifndef __ARCH_ALPHA_CACHE_H + #define __ARCH_ALPHA_CACHE_H + ++#include <linux/const.h> + + /* Bytes per L1 (data) cache line. */ + #if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_EV6) +-# define L1_CACHE_BYTES 64 + # define L1_CACHE_SHIFT 6 + #else + /* Both EV4 and EV5 are write-through, read-allocate, + direct-mapped, physical. + */ +-# define L1_CACHE_BYTES 32 + # define L1_CACHE_SHIFT 5 + #endif + ++#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) + #define SMP_CACHE_BYTES L1_CACHE_BYTES + + #endif +diff --git a/arch/alpha/include/asm/elf.h b/arch/alpha/include/asm/elf.h +index 968d999..d36b2df 100644 +--- a/arch/alpha/include/asm/elf.h ++++ b/arch/alpha/include/asm/elf.h +@@ -91,6 +91,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG]; + + #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x1000000) + ++#ifdef CONFIG_PAX_ASLR ++#define PAX_ELF_ET_DYN_BASE (current->personality & ADDR_LIMIT_32BIT ? 0x10000 : 0x120000000UL) ++ ++#define PAX_DELTA_MMAP_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 28) ++#define PAX_DELTA_STACK_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 19) ++#endif ++ + /* $0 is set by ld.so to a pointer to a function which might be + registered using atexit. This provides a mean for the dynamic + linker to call DT_FINI functions for shared libraries that have +diff --git a/arch/alpha/include/asm/pgalloc.h b/arch/alpha/include/asm/pgalloc.h +index aab14a0..b4fa3e7 100644 +--- a/arch/alpha/include/asm/pgalloc.h ++++ b/arch/alpha/include/asm/pgalloc.h +@@ -29,6 +29,12 @@ pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd) + pgd_set(pgd, pmd); + } + ++static inline void ++pgd_populate_kernel(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd) ++{ ++ pgd_populate(mm, pgd, pmd); ++} ++ + extern pgd_t *pgd_alloc(struct mm_struct *mm); + + static inline void +diff --git a/arch/alpha/include/asm/pgtable.h b/arch/alpha/include/asm/pgtable.h +index d8f9b7e..f6222fa 100644 +--- a/arch/alpha/include/asm/pgtable.h ++++ b/arch/alpha/include/asm/pgtable.h +@@ -102,6 +102,17 @@ struct vm_area_struct; + #define PAGE_SHARED __pgprot(_PAGE_VALID | __ACCESS_BITS) + #define PAGE_COPY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW) + #define PAGE_READONLY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW) ++ ++#ifdef CONFIG_PAX_PAGEEXEC ++# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOE) ++# define PAGE_COPY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE) ++# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE) ++#else ++# define PAGE_SHARED_NOEXEC PAGE_SHARED ++# define PAGE_COPY_NOEXEC PAGE_COPY ++# define PAGE_READONLY_NOEXEC PAGE_READONLY ++#endif ++ + #define PAGE_KERNEL __pgprot(_PAGE_VALID | _PAGE_ASM | _PAGE_KRE | _PAGE_KWE) + + #define _PAGE_NORMAL(x) __pgprot(_PAGE_VALID | __ACCESS_BITS | (x)) +diff --git a/arch/alpha/kernel/module.c b/arch/alpha/kernel/module.c +index 2fd00b7..cfd5069 100644 +--- a/arch/alpha/kernel/module.c ++++ b/arch/alpha/kernel/module.c +@@ -160,7 +160,7 @@ apply_relocate_add(Elf64_Shdr *sechdrs, const char *strtab, + + /* The small sections were sorted to the end of the segment. + The following should definitely cover them. */ +- gp = (u64)me->module_core + me->core_size - 0x8000; ++ gp = (u64)me->module_core_rw + me->core_size_rw - 0x8000; + got = sechdrs[me->arch.gotsecindex].sh_addr; + + for (i = 0; i < n; i++) { +diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c +index 1402fcc..0b1abd2 100644 +--- a/arch/alpha/kernel/osf_sys.c ++++ b/arch/alpha/kernel/osf_sys.c +@@ -1298,10 +1298,11 @@ SYSCALL_DEFINE1(old_adjtimex, struct timex32 __user *, txc_p) + generic version except that we know how to honor ADDR_LIMIT_32BIT. */ + + static unsigned long +-arch_get_unmapped_area_1(unsigned long addr, unsigned long len, +- unsigned long limit) ++arch_get_unmapped_area_1(struct file *filp, unsigned long addr, unsigned long len, ++ unsigned long limit, unsigned long flags) + { + struct vm_unmapped_area_info info; ++ unsigned long offset = gr_rand_threadstack_offset(current->mm, filp, flags); + + info.flags = 0; + info.length = len; +@@ -1309,6 +1310,7 @@ arch_get_unmapped_area_1(unsigned long addr, unsigned long len, + info.high_limit = limit; + info.align_mask = 0; + info.align_offset = 0; ++ info.threadstack_offset = offset; + return vm_unmapped_area(&info); + } + +@@ -1341,20 +1343,24 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr, + merely specific addresses, but regions of memory -- perhaps + this feature should be incorporated into all ports? */ + ++#ifdef CONFIG_PAX_RANDMMAP ++ if (!(current->mm->pax_flags & MF_PAX_RANDMMAP)) ++#endif ++ + if (addr) { +- addr = arch_get_unmapped_area_1 (PAGE_ALIGN(addr), len, limit); ++ addr = arch_get_unmapped_area_1 (filp, PAGE_ALIGN(addr), len, limit, flags); + if (addr != (unsigned long) -ENOMEM) + return addr; + } + + /* Next, try allocating at TASK_UNMAPPED_BASE. */ +- addr = arch_get_unmapped_area_1 (PAGE_ALIGN(TASK_UNMAPPED_BASE), +- len, limit); ++ addr = arch_get_unmapped_area_1 (filp, PAGE_ALIGN(current->mm->mmap_base), len, limit, flags); ++ + if (addr != (unsigned long) -ENOMEM) + return addr; + + /* Finally, try allocating in low memory. */ +- addr = arch_get_unmapped_area_1 (PAGE_SIZE, len, limit); ++ addr = arch_get_unmapped_area_1 (filp, PAGE_SIZE, len, limit, flags); + + return addr; + } +diff --git a/arch/alpha/mm/fault.c b/arch/alpha/mm/fault.c +index 98838a0..b304fb4 100644 +--- a/arch/alpha/mm/fault.c ++++ b/arch/alpha/mm/fault.c +@@ -53,6 +53,124 @@ __load_new_mm_context(struct mm_struct *next_mm) + __reload_thread(pcb); + } + ++#ifdef CONFIG_PAX_PAGEEXEC ++/* ++ * PaX: decide what to do with offenders (regs->pc = fault address) ++ * ++ * returns 1 when task should be killed ++ * 2 when patched PLT trampoline was detected ++ * 3 when unpatched PLT trampoline was detected ++ */ ++static int pax_handle_fetch_fault(struct pt_regs *regs) ++{ ++ ++#ifdef CONFIG_PAX_EMUPLT ++ int err; ++ ++ do { /* PaX: patched PLT emulation #1 */ ++ unsigned int ldah, ldq, jmp; ++ ++ err = get_user(ldah, (unsigned int *)regs->pc); ++ err |= get_user(ldq, (unsigned int *)(regs->pc+4)); ++ err |= get_user(jmp, (unsigned int *)(regs->pc+8)); ++ ++ if (err) ++ break; ++ ++ if ((ldah & 0xFFFF0000U) == 0x277B0000U && ++ (ldq & 0xFFFF0000U) == 0xA77B0000U && ++ jmp == 0x6BFB0000U) ++ { ++ unsigned long r27, addr; ++ unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16; ++ unsigned long addrl = ldq | 0xFFFFFFFFFFFF0000UL; ++ ++ addr = regs->r27 + ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL); ++ err = get_user(r27, (unsigned long *)addr); ++ if (err) ++ break; ++ ++ regs->r27 = r27; ++ regs->pc = r27; ++ return 2; ++ } ++ } while (0); ++ ++ do { /* PaX: patched PLT emulation #2 */ ++ unsigned int ldah, lda, br; ++ ++ err = get_user(ldah, (unsigned int *)regs->pc); ++ err |= get_user(lda, (unsigned int *)(regs->pc+4)); ++ err |= get_user(br, (unsigned int *)(regs->pc+8)); ++ ++ if (err) ++ break; ++ ++ if ((ldah & 0xFFFF0000U) == 0x277B0000U && ++ (lda & 0xFFFF0000U) == 0xA77B0000U && ++ (br & 0xFFE00000U) == 0xC3E00000U) ++ { ++ unsigned long addr = br | 0xFFFFFFFFFFE00000UL; ++ unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16; ++ unsigned long addrl = lda | 0xFFFFFFFFFFFF0000UL; ++ ++ regs->r27 += ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL); ++ regs->pc += 12 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2); ++ return 2; ++ } ++ } while (0); ++ ++ do { /* PaX: unpatched PLT emulation */ ++ unsigned int br; ++ ++ err = get_user(br, (unsigned int *)regs->pc); ++ ++ if (!err && (br & 0xFFE00000U) == 0xC3800000U) { ++ unsigned int br2, ldq, nop, jmp; ++ unsigned long addr = br | 0xFFFFFFFFFFE00000UL, resolver; ++ ++ addr = regs->pc + 4 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2); ++ err = get_user(br2, (unsigned int *)addr); ++ err |= get_user(ldq, (unsigned int *)(addr+4)); ++ err |= get_user(nop, (unsigned int *)(addr+8)); ++ err |= get_user(jmp, (unsigned int *)(addr+12)); ++ err |= get_user(resolver, (unsigned long *)(addr+16)); ++ ++ if (err) ++ break; ++ ++ if (br2 == 0xC3600000U && ++ ldq == 0xA77B000CU && ++ nop == 0x47FF041FU && ++ jmp == 0x6B7B0000U) ++ { ++ regs->r28 = regs->pc+4; ++ regs->r27 = addr+16; ++ regs->pc = resolver; ++ return 3; ++ } ++ } ++ } while (0); ++#endif ++ ++ return 1; ++} ++ ++void pax_report_insns(struct pt_regs *regs, void *pc, void *sp) ++{ ++ unsigned long i; ++ ++ printk(KERN_ERR "PAX: bytes at PC: "); ++ for (i = 0; i < 5; i++) { ++ unsigned int c; ++ if (get_user(c, (unsigned int *)pc+i)) ++ printk(KERN_CONT "???????? "); ++ else ++ printk(KERN_CONT "%08x ", c); ++ } ++ printk("\n"); ++} ++#endif + + /* + * This routine handles page faults. It determines the address, +@@ -133,8 +251,29 @@ retry: + good_area: + si_code = SEGV_ACCERR; + if (cause < 0) { +- if (!(vma->vm_flags & VM_EXEC)) ++ if (!(vma->vm_flags & VM_EXEC)) { ++ ++#ifdef CONFIG_PAX_PAGEEXEC ++ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->pc) ++ goto bad_area; ++ ++ up_read(&mm->mmap_sem); ++ switch (pax_handle_fetch_fault(regs)) { ++ ++#ifdef CONFIG_PAX_EMUPLT ++ case 2: ++ case 3: ++ return; ++#endif ++ ++ } ++ pax_report_fault(regs, (void *)regs->pc, (void *)rdusp()); ++ do_group_exit(SIGKILL); ++#else + goto bad_area; ++#endif ++ ++ } + } else if (!cause) { + /* Allow reads even for write-only mappings */ + if (!(vma->vm_flags & (VM_READ | VM_WRITE))) +diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig +index 4733d32..b142a40 100644 +--- a/arch/arm/Kconfig ++++ b/arch/arm/Kconfig +@@ -1863,7 +1863,7 @@ config ALIGNMENT_TRAP + + config UACCESS_WITH_MEMCPY + bool "Use kernel mem{cpy,set}() for {copy_to,clear}_user()" +- depends on MMU ++ depends on MMU && !PAX_MEMORY_UDEREF + default y if CPU_FEROCEON + help + Implement faster copy_to_user and clear_user methods for CPU +@@ -2126,6 +2126,7 @@ config XIP_PHYS_ADDR + config KEXEC + bool "Kexec system call (EXPERIMENTAL)" + depends on (!SMP || PM_SLEEP_SMP) ++ depends on !GRKERNSEC_KMEM + help + kexec is a system call that implements the ability to shutdown your + current kernel, and to start another kernel. It is like a reboot +diff --git a/arch/arm/include/asm/atomic.h b/arch/arm/include/asm/atomic.h +index 62d2cb5..09d45e3 100644 +--- a/arch/arm/include/asm/atomic.h ++++ b/arch/arm/include/asm/atomic.h +@@ -18,17 +18,35 @@ + #include <asm/barrier.h> + #include <asm/cmpxchg.h> + ++#ifdef CONFIG_GENERIC_ATOMIC64 ++#include <asm-generic/atomic64.h> ++#endif ++ + #define ATOMIC_INIT(i) { (i) } + + #ifdef __KERNEL__ + ++#define _ASM_EXTABLE(from, to) \ ++" .pushsection __ex_table,\"a\"\n"\ ++" .align 3\n" \ ++" .long " #from ", " #to"\n" \ ++" .popsection" ++ + /* + * On ARM, ordinary assignment (str instruction) doesn't clear the local + * strex/ldrex monitor on some implementations. The reason we can use it for + * atomic_set() is the clrex or dummy strex done on every exception return. + */ + #define atomic_read(v) (*(volatile int *)&(v)->counter) ++static inline int atomic_read_unchecked(const atomic_unchecked_t *v) ++{ ++ return v->counter; ++} + #define atomic_set(v,i) (((v)->counter) = (i)) ++static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i) ++{ ++ v->counter = i; ++} + + #if __LINUX_ARM_ARCH__ >= 6 + +@@ -44,6 +62,36 @@ static inline void atomic_add(int i, atomic_t *v) + + prefetchw(&v->counter); + __asm__ __volatile__("@ atomic_add\n" ++"1: ldrex %1, [%3]\n" ++" adds %0, %1, %4\n" ++ ++#ifdef CONFIG_PAX_REFCOUNT ++" bvc 3f\n" ++"2: bkpt 0xf103\n" ++"3:\n" ++#endif ++ ++" strex %1, %0, [%3]\n" ++" teq %1, #0\n" ++" bne 1b" ++ ++#ifdef CONFIG_PAX_REFCOUNT ++"\n4:\n" ++ _ASM_EXTABLE(2b, 4b) ++#endif ++ ++ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) ++ : "r" (&v->counter), "Ir" (i) ++ : "cc"); ++} ++ ++static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v) ++{ ++ unsigned long tmp; ++ int result; ++ ++ prefetchw(&v->counter); ++ __asm__ __volatile__("@ atomic_add_unchecked\n" + "1: ldrex %0, [%3]\n" + " add %0, %0, %4\n" + " strex %1, %0, [%3]\n" +@@ -62,6 +110,42 @@ static inline int atomic_add_return(int i, atomic_t *v) + smp_mb(); + + __asm__ __volatile__("@ atomic_add_return\n" ++"1: ldrex %1, [%3]\n" ++" adds %0, %1, %4\n" ++ ++#ifdef CONFIG_PAX_REFCOUNT ++" bvc 3f\n" ++" mov %0, %1\n" ++"2: bkpt 0xf103\n" ++"3:\n" ++#endif ++ ++" strex %1, %0, [%3]\n" ++" teq %1, #0\n" ++" bne 1b" ++ ++#ifdef CONFIG_PAX_REFCOUNT ++"\n4:\n" ++ _ASM_EXTABLE(2b, 4b) ++#endif ++ ++ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) ++ : "r" (&v->counter), "Ir" (i) ++ : "cc"); ++ ++ smp_mb(); ++ ++ return result; ++} ++ ++static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v) ++{ ++ unsigned long tmp; ++ int result; ++ ++ smp_mb(); ++ ++ __asm__ __volatile__("@ atomic_add_return_unchecked\n" + "1: ldrex %0, [%3]\n" + " add %0, %0, %4\n" + " strex %1, %0, [%3]\n" +@@ -83,6 +167,36 @@ static inline void atomic_sub(int i, atomic_t *v) + + prefetchw(&v->counter); + __asm__ __volatile__("@ atomic_sub\n" ++"1: ldrex %1, [%3]\n" ++" subs %0, %1, %4\n" ++ ++#ifdef CONFIG_PAX_REFCOUNT ++" bvc 3f\n" ++"2: bkpt 0xf103\n" ++"3:\n" ++#endif ++ ++" strex %1, %0, [%3]\n" ++" teq %1, #0\n" ++" bne 1b" ++ ++#ifdef CONFIG_PAX_REFCOUNT ++"\n4:\n" ++ _ASM_EXTABLE(2b, 4b) ++#endif ++ ++ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) ++ : "r" (&v->counter), "Ir" (i) ++ : "cc"); ++} ++ ++static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v) ++{ ++ unsigned long tmp; ++ int result; ++ ++ prefetchw(&v->counter); ++ __asm__ __volatile__("@ atomic_sub_unchecked\n" + "1: ldrex %0, [%3]\n" + " sub %0, %0, %4\n" + " strex %1, %0, [%3]\n" +@@ -101,11 +215,25 @@ static inline int atomic_sub_return(int i, atomic_t *v) + smp_mb(); + + __asm__ __volatile__("@ atomic_sub_return\n" +-"1: ldrex %0, [%3]\n" +-" sub %0, %0, %4\n" ++"1: ldrex %1, [%3]\n" ++" subs %0, %1, %4\n" ++ ++#ifdef CONFIG_PAX_REFCOUNT ++" bvc 3f\n" ++" mov %0, %1\n" ++"2: bkpt 0xf103\n" ++"3:\n" ++#endif ++ + " strex %1, %0, [%3]\n" + " teq %1, #0\n" + " bne 1b" ++ ++#ifdef CONFIG_PAX_REFCOUNT ++"\n4:\n" ++ _ASM_EXTABLE(2b, 4b) ++#endif ++ + : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) + : "r" (&v->counter), "Ir" (i) + : "cc"); +@@ -138,6 +266,28 @@ static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new) + return oldval; + } + ++static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *ptr, int old, int new) ++{ ++ unsigned long oldval, res; ++ ++ smp_mb(); ++ ++ do { ++ __asm__ __volatile__("@ atomic_cmpxchg_unchecked\n" ++ "ldrex %1, [%3]\n" ++ "mov %0, #0\n" ++ "teq %1, %4\n" ++ "strexeq %0, %5, [%3]\n" ++ : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter) ++ : "r" (&ptr->counter), "Ir" (old), "r" (new) ++ : "cc"); ++ } while (res); ++ ++ smp_mb(); ++ ++ return oldval; ++} ++ + #else /* ARM_ARCH_6 */ + + #ifdef CONFIG_SMP +@@ -156,7 +306,17 @@ static inline int atomic_add_return(int i, atomic_t *v) + + return val; + } ++ ++static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v) ++{ ++ return atomic_add_return(i, v); ++} ++ + #define atomic_add(i, v) (void) atomic_add_return(i, v) ++static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v) ++{ ++ (void) atomic_add_return(i, v); ++} + + static inline int atomic_sub_return(int i, atomic_t *v) + { +@@ -171,6 +331,10 @@ static inline int atomic_sub_return(int i, atomic_t *v) + return val; + } + #define atomic_sub(i, v) (void) atomic_sub_return(i, v) ++static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v) ++{ ++ (void) atomic_sub_return(i, v); ++} + + static inline int atomic_cmpxchg(atomic_t *v, int old, int new) + { +@@ -186,9 +350,18 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new) + return ret; + } + ++static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new) ++{ ++ return atomic_cmpxchg(v, old, new); ++} ++ + #endif /* __LINUX_ARM_ARCH__ */ + + #define atomic_xchg(v, new) (xchg(&((v)->counter), new)) ++static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new) ++{ ++ return xchg(&v->counter, new); ++} + + static inline int __atomic_add_unless(atomic_t *v, int a, int u) + { +@@ -201,11 +374,27 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u) + } + + #define atomic_inc(v) atomic_add(1, v) ++static inline void atomic_inc_unchecked(atomic_unchecked_t *v) ++{ ++ atomic_add_unchecked(1, v); ++} + #define atomic_dec(v) atomic_sub(1, v) ++static inline void atomic_dec_unchecked(atomic_unchecked_t *v) ++{ ++ atomic_sub_unchecked(1, v); ++} + + #define atomic_inc_and_test(v) (atomic_add_return(1, v) == 0) ++static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v) ++{ ++ return atomic_add_return_unchecked(1, v) == 0; ++} + #define atomic_dec_and_test(v) (atomic_sub_return(1, v) == 0) + #define atomic_inc_return(v) (atomic_add_return(1, v)) ++static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v) ++{ ++ return atomic_add_return_unchecked(1, v); ++} + #define atomic_dec_return(v) (atomic_sub_return(1, v)) + #define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0) + +@@ -221,6 +410,14 @@ typedef struct { + long long counter; + } atomic64_t; + ++#ifdef CONFIG_PAX_REFCOUNT ++typedef struct { ++ long long counter; ++} atomic64_unchecked_t; ++#else ++typedef atomic64_t atomic64_unchecked_t; ++#endif ++ + #define ATOMIC64_INIT(i) { (i) } + + #ifdef CONFIG_ARM_LPAE +@@ -237,6 +434,19 @@ static inline long long atomic64_read(const atomic64_t *v) + return result; + } + ++static inline long long atomic64_read_unchecked(const atomic64_unchecked_t *v) ++{ ++ long long result; ++ ++ __asm__ __volatile__("@ atomic64_read_unchecked\n" ++" ldrd %0, %H0, [%1]" ++ : "=&r" (result) ++ : "r" (&v->counter), "Qo" (v->counter) ++ ); ++ ++ return result; ++} ++ + static inline void atomic64_set(atomic64_t *v, long long i) + { + __asm__ __volatile__("@ atomic64_set\n" +@@ -245,6 +455,15 @@ static inline void atomic64_set(atomic64_t *v, long long i) + : "r" (&v->counter), "r" (i) + ); + } ++ ++static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long long i) ++{ ++ __asm__ __volatile__("@ atomic64_set_unchecked\n" ++" strd %2, %H2, [%1]" ++ : "=Qo" (v->counter) ++ : "r" (&v->counter), "r" (i) ++ ); ++} + #else + static inline long long atomic64_read(const atomic64_t *v) + { +@@ -259,6 +478,19 @@ static inline long long atomic64_read(const atomic64_t *v) + return result; + } + ++static inline long long atomic64_read_unchecked(const atomic64_unchecked_t *v) ++{ ++ long long result; ++ ++ __asm__ __volatile__("@ atomic64_read_unchecked\n" ++" ldrexd %0, %H0, [%1]" ++ : "=&r" (result) ++ : "r" (&v->counter), "Qo" (v->counter) ++ ); ++ ++ return result; ++} ++ + static inline void atomic64_set(atomic64_t *v, long long i) + { + long long tmp; +@@ -273,6 +505,21 @@ static inline void atomic64_set(atomic64_t *v, long long i) + : "r" (&v->counter), "r" (i) + : "cc"); + } ++ ++static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long long i) ++{ ++ long long tmp; ++ ++ prefetchw(&v->counter); ++ __asm__ __volatile__("@ atomic64_set_unchecked\n" ++"1: ldrexd %0, %H0, [%2]\n" ++" strexd %0, %3, %H3, [%2]\n" ++" teq %0, #0\n" ++" bne 1b" ++ : "=&r" (tmp), "=Qo" (v->counter) ++ : "r" (&v->counter), "r" (i) ++ : "cc"); ++} + #endif + + static inline void atomic64_add(long long i, atomic64_t *v) +@@ -284,6 +531,37 @@ static inline void atomic64_add(long long i, atomic64_t *v) + __asm__ __volatile__("@ atomic64_add\n" + "1: ldrexd %0, %H0, [%3]\n" + " adds %Q0, %Q0, %Q4\n" ++" adcs %R0, %R0, %R4\n" ++ ++#ifdef CONFIG_PAX_REFCOUNT ++" bvc 3f\n" ++"2: bkpt 0xf103\n" ++"3:\n" ++#endif ++ ++" strexd %1, %0, %H0, [%3]\n" ++" teq %1, #0\n" ++" bne 1b" ++ ++#ifdef CONFIG_PAX_REFCOUNT ++"\n4:\n" ++ _ASM_EXTABLE(2b, 4b) ++#endif ++ ++ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) ++ : "r" (&v->counter), "r" (i) ++ : "cc"); ++} ++ ++static inline void atomic64_add_unchecked(long long i, atomic64_unchecked_t *v) ++{ ++ long long result; ++ unsigned long tmp; ++ ++ prefetchw(&v->counter); ++ __asm__ __volatile__("@ atomic64_add_unchecked\n" ++"1: ldrexd %0, %H0, [%3]\n" ++" adds %Q0, %Q0, %Q4\n" + " adc %R0, %R0, %R4\n" + " strexd %1, %0, %H0, [%3]\n" + " teq %1, #0\n" +@@ -303,6 +581,44 @@ static inline long long atomic64_add_return(long long i, atomic64_t *v) + __asm__ __volatile__("@ atomic64_add_return\n" + "1: ldrexd %0, %H0, [%3]\n" + " adds %Q0, %Q0, %Q4\n" ++" adcs %R0, %R0, %R4\n" ++ ++#ifdef CONFIG_PAX_REFCOUNT ++" bvc 3f\n" ++" mov %0, %1\n" ++" mov %H0, %H1\n" ++"2: bkpt 0xf103\n" ++"3:\n" ++#endif ++ ++" strexd %1, %0, %H0, [%3]\n" ++" teq %1, #0\n" ++" bne 1b" ++ ++#ifdef CONFIG_PAX_REFCOUNT ++"\n4:\n" ++ _ASM_EXTABLE(2b, 4b) ++#endif ++ ++ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) ++ : "r" (&v->counter), "r" (i) ++ : "cc"); ++ ++ smp_mb(); ++ ++ return result; ++} ++ ++static inline long long atomic64_add_return_unchecked(long long i, atomic64_unchecked_t *v) ++{ ++ long long result; ++ unsigned long tmp; ++ ++ smp_mb(); ++ ++ __asm__ __volatile__("@ atomic64_add_return_unchecked\n" ++"1: ldrexd %0, %H0, [%3]\n" ++" adds %Q0, %Q0, %Q4\n" + " adc %R0, %R0, %R4\n" + " strexd %1, %0, %H0, [%3]\n" + " teq %1, #0\n" +@@ -325,6 +641,37 @@ static inline void atomic64_sub(long long i, atomic64_t *v) + __asm__ __volatile__("@ atomic64_sub\n" + "1: ldrexd %0, %H0, [%3]\n" + " subs %Q0, %Q0, %Q4\n" ++" sbcs %R0, %R0, %R4\n" ++ ++#ifdef CONFIG_PAX_REFCOUNT ++" bvc 3f\n" ++"2: bkpt 0xf103\n" ++"3:\n" ++#endif ++ ++" strexd %1, %0, %H0, [%3]\n" ++" teq %1, #0\n" ++" bne 1b" ++ ++#ifdef CONFIG_PAX_REFCOUNT ++"\n4:\n" ++ _ASM_EXTABLE(2b, 4b) ++#endif ++ ++ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) ++ : "r" (&v->counter), "r" (i) ++ : "cc"); ++} ++ ++static inline void atomic64_sub_unchecked(long long i, atomic64_unchecked_t *v) ++{ ++ long long result; ++ unsigned long tmp; ++ ++ prefetchw(&v->counter); ++ __asm__ __volatile__("@ atomic64_sub_unchecked\n" ++"1: ldrexd %0, %H0, [%3]\n" ++" subs %Q0, %Q0, %Q4\n" + " sbc %R0, %R0, %R4\n" + " strexd %1, %0, %H0, [%3]\n" + " teq %1, #0\n" +@@ -344,16 +691,29 @@ static inline long long atomic64_sub_return(long long i, atomic64_t *v) + __asm__ __volatile__("@ atomic64_sub_return\n" + "1: ldrexd %0, %H0, [%3]\n" + " subs %Q0, %Q0, %Q4\n" +-" sbc %R0, %R0, %R4\n" ++" sbcs %R0, %R0, %R4\n" ++ ++#ifdef CONFIG_PAX_REFCOUNT ++" bvc 3f\n" ++" mov %0, %1\n" ++" mov %H0, %H1\n" ++"2: bkpt 0xf103\n" ++"3:\n" ++#endif ++ + " strexd %1, %0, %H0, [%3]\n" + " teq %1, #0\n" + " bne 1b" ++ ++#ifdef CONFIG_PAX_REFCOUNT ++"\n4:\n" ++ _ASM_EXTABLE(2b, 4b) ++#endif ++ + : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) + : "r" (&v->counter), "r" (i) + : "cc"); + +- smp_mb(); +- + return result; + } + +@@ -382,6 +742,31 @@ static inline long long atomic64_cmpxchg(atomic64_t *ptr, long long old, + return oldval; + } + ++static inline long long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *ptr, long long old, ++ long long new) ++{ ++ long long oldval; ++ unsigned long res; ++ ++ smp_mb(); ++ ++ do { ++ __asm__ __volatile__("@ atomic64_cmpxchg_unchecked\n" ++ "ldrexd %1, %H1, [%3]\n" ++ "mov %0, #0\n" ++ "teq %1, %4\n" ++ "teqeq %H1, %H4\n" ++ "strexdeq %0, %5, %H5, [%3]" ++ : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter) ++ : "r" (&ptr->counter), "r" (old), "r" (new) ++ : "cc"); ++ } while (res); ++ ++ smp_mb(); ++ ++ return oldval; ++} ++ + static inline long long atomic64_xchg(atomic64_t *ptr, long long new) + { + long long result; +@@ -406,20 +791,34 @@ static inline long long atomic64_xchg(atomic64_t *ptr, long long new) + static inline long long atomic64_dec_if_positive(atomic64_t *v) + { + long long result; +- unsigned long tmp; ++ u64 tmp; + + smp_mb(); + + __asm__ __volatile__("@ atomic64_dec_if_positive\n" +-"1: ldrexd %0, %H0, [%3]\n" +-" subs %Q0, %Q0, #1\n" +-" sbc %R0, %R0, #0\n" ++"1: ldrexd %1, %H1, [%3]\n" ++" subs %Q0, %Q1, #1\n" ++" sbcs %R0, %R1, #0\n" ++ ++#ifdef CONFIG_PAX_REFCOUNT ++" bvc 3f\n" ++" mov %Q0, %Q1\n" ++" mov %R0, %R1\n" ++"2: bkpt 0xf103\n" ++"3:\n" ++#endif ++ + " teq %R0, #0\n" +-" bmi 2f\n" ++" bmi 4f\n" + " strexd %1, %0, %H0, [%3]\n" + " teq %1, #0\n" + " bne 1b\n" +-"2:" ++"4:\n" ++ ++#ifdef CONFIG_PAX_REFCOUNT ++ _ASM_EXTABLE(2b, 4b) ++#endif ++ + : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) + : "r" (&v->counter) + : "cc"); +@@ -442,13 +841,25 @@ static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u) + " teq %0, %5\n" + " teqeq %H0, %H5\n" + " moveq %1, #0\n" +-" beq 2f\n" ++" beq 4f\n" + " adds %Q0, %Q0, %Q6\n" +-" adc %R0, %R0, %R6\n" ++" adcs %R0, %R0, %R6\n" ++ ++#ifdef CONFIG_PAX_REFCOUNT ++" bvc 3f\n" ++"2: bkpt 0xf103\n" ++"3:\n" ++#endif ++ + " strexd %2, %0, %H0, [%4]\n" + " teq %2, #0\n" + " bne 1b\n" +-"2:" ++"4:\n" ++ ++#ifdef CONFIG_PAX_REFCOUNT ++ _ASM_EXTABLE(2b, 4b) ++#endif ++ + : "=&r" (val), "+r" (ret), "=&r" (tmp), "+Qo" (v->counter) + : "r" (&v->counter), "r" (u), "r" (a) + : "cc"); +@@ -461,10 +872,13 @@ static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u) + + #define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0) + #define atomic64_inc(v) atomic64_add(1LL, (v)) ++#define atomic64_inc_unchecked(v) atomic64_add_unchecked(1LL, (v)) + #define atomic64_inc_return(v) atomic64_add_return(1LL, (v)) ++#define atomic64_inc_return_unchecked(v) atomic64_add_return_unchecked(1LL, (v)) + #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0) + #define atomic64_sub_and_test(a, v) (atomic64_sub_return((a), (v)) == 0) + #define atomic64_dec(v) atomic64_sub(1LL, (v)) ++#define atomic64_dec_unchecked(v) atomic64_sub_unchecked(1LL, (v)) + #define atomic64_dec_return(v) atomic64_sub_return(1LL, (v)) + #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0) + #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL) +diff --git a/arch/arm/include/asm/barrier.h b/arch/arm/include/asm/barrier.h +index 2f59f74..1594659 100644 +--- a/arch/arm/include/asm/barrier.h ++++ b/arch/arm/include/asm/barrier.h +@@ -63,7 +63,7 @@ + do { \ + compiletime_assert_atomic_type(*p); \ + smp_mb(); \ +- ACCESS_ONCE(*p) = (v); \ ++ ACCESS_ONCE_RW(*p) = (v); \ + } while (0) + + #define smp_load_acquire(p) \ +diff --git a/arch/arm/include/asm/cache.h b/arch/arm/include/asm/cache.h +index 75fe66b..ba3dee4 100644 +--- a/arch/arm/include/asm/cache.h ++++ b/arch/arm/include/asm/cache.h +@@ -4,8 +4,10 @@ + #ifndef __ASMARM_CACHE_H + #define __ASMARM_CACHE_H + ++#include <linux/const.h> ++ + #define L1_CACHE_SHIFT CONFIG_ARM_L1_CACHE_SHIFT +-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT) ++#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) + + /* + * Memory returned by kmalloc() may be used for DMA, so we must make +@@ -24,5 +26,6 @@ + #endif + + #define __read_mostly __attribute__((__section__(".data..read_mostly"))) ++#define __read_only __attribute__ ((__section__(".data..read_only"))) + + #endif +diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h +index 8b8b616..d973d24 100644 +--- a/arch/arm/include/asm/cacheflush.h ++++ b/arch/arm/include/asm/cacheflush.h +@@ -116,7 +116,7 @@ struct cpu_cache_fns { + void (*dma_unmap_area)(const void *, size_t, int); + + void (*dma_flush_range)(const void *, const void *); +-}; ++} __no_const; + + /* + * Select the calling method +diff --git a/arch/arm/include/asm/checksum.h b/arch/arm/include/asm/checksum.h +index 5233151..87a71fa 100644 +--- a/arch/arm/include/asm/checksum.h ++++ b/arch/arm/include/asm/checksum.h +@@ -37,7 +37,19 @@ __wsum + csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum); + + __wsum +-csum_partial_copy_from_user(const void __user *src, void *dst, int len, __wsum sum, int *err_ptr); ++__csum_partial_copy_from_user(const void __user *src, void *dst, int len, __wsum sum, int *err_ptr); ++ ++static inline __wsum ++csum_partial_copy_from_user(const void __user *src, void *dst, int len, __wsum sum, int *err_ptr) ++{ ++ __wsum ret; ++ pax_open_userland(); ++ ret = __csum_partial_copy_from_user(src, dst, len, sum, err_ptr); ++ pax_close_userland(); ++ return ret; ++} ++ ++ + + /* + * Fold a partial checksum without adding pseudo headers +diff --git a/arch/arm/include/asm/cmpxchg.h b/arch/arm/include/asm/cmpxchg.h +index df2fbba..63fe3e1 100644 +--- a/arch/arm/include/asm/cmpxchg.h ++++ b/arch/arm/include/asm/cmpxchg.h +@@ -102,6 +102,8 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size + + #define xchg(ptr,x) \ + ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr)))) ++#define xchg_unchecked(ptr,x) \ ++ ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr)))) + + #include <asm-generic/cmpxchg-local.h> + +diff --git a/arch/arm/include/asm/domain.h b/arch/arm/include/asm/domain.h +index 6ddbe44..b5e38b1 100644 +--- a/arch/arm/include/asm/domain.h ++++ b/arch/arm/include/asm/domain.h +@@ -48,18 +48,37 @@ + * Domain types + */ + #define DOMAIN_NOACCESS 0 +-#define DOMAIN_CLIENT 1 + #ifdef CONFIG_CPU_USE_DOMAINS ++#define DOMAIN_USERCLIENT 1 ++#define DOMAIN_KERNELCLIENT 1 + #define DOMAIN_MANAGER 3 ++#define DOMAIN_VECTORS DOMAIN_USER + #else ++ ++#ifdef CONFIG_PAX_KERNEXEC + #define DOMAIN_MANAGER 1 ++#define DOMAIN_KERNEXEC 3 ++#else ++#define DOMAIN_MANAGER 1 ++#endif ++ ++#ifdef CONFIG_PAX_MEMORY_UDEREF ++#define DOMAIN_USERCLIENT 0 ++#define DOMAIN_UDEREF 1 ++#define DOMAIN_VECTORS DOMAIN_KERNEL ++#else ++#define DOMAIN_USERCLIENT 1 ++#define DOMAIN_VECTORS DOMAIN_USER ++#endif ++#define DOMAIN_KERNELCLIENT 1 ++ + #endif + + #define domain_val(dom,type) ((type) << (2*(dom))) + + #ifndef __ASSEMBLY__ + +-#ifdef CONFIG_CPU_USE_DOMAINS ++#if defined(CONFIG_CPU_USE_DOMAINS) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF) + static inline void set_domain(unsigned val) + { + asm volatile( +@@ -68,15 +87,7 @@ static inline void set_domain(unsigned val) + isb(); + } + +-#define modify_domain(dom,type) \ +- do { \ +- struct thread_info *thread = current_thread_info(); \ +- unsigned int domain = thread->cpu_domain; \ +- domain &= ~domain_val(dom, DOMAIN_MANAGER); \ +- thread->cpu_domain = domain | domain_val(dom, type); \ +- set_domain(thread->cpu_domain); \ +- } while (0) +- ++extern void modify_domain(unsigned int dom, unsigned int type); + #else + static inline void set_domain(unsigned val) { } + static inline void modify_domain(unsigned dom, unsigned type) { } +diff --git a/arch/arm/include/asm/elf.h b/arch/arm/include/asm/elf.h +index f4b46d3..abc9b2b 100644 +--- a/arch/arm/include/asm/elf.h ++++ b/arch/arm/include/asm/elf.h +@@ -114,7 +114,14 @@ int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs); + the loader. We need to make sure that it is out of the way of the program + that it will "exec", and that there is sufficient room for the brk. */ + +-#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3) ++#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2) ++ ++#ifdef CONFIG_PAX_ASLR ++#define PAX_ELF_ET_DYN_BASE 0x00008000UL ++ ++#define PAX_DELTA_MMAP_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10) ++#define PAX_DELTA_STACK_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10) ++#endif + + /* When the program starts, a1 contains a pointer to a function to be + registered with atexit, as per the SVR4 ABI. A value of 0 means we +@@ -124,10 +131,6 @@ int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs); + extern void elf_set_personality(const struct elf32_hdr *); + #define SET_PERSONALITY(ex) elf_set_personality(&(ex)) + +-struct mm_struct; +-extern unsigned long arch_randomize_brk(struct mm_struct *mm); +-#define arch_randomize_brk arch_randomize_brk +- + #ifdef CONFIG_MMU + #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1 + struct linux_binprm; +diff --git a/arch/arm/include/asm/fncpy.h b/arch/arm/include/asm/fncpy.h +index de53547..52b9a28 100644 +--- a/arch/arm/include/asm/fncpy.h ++++ b/arch/arm/include/asm/fncpy.h +@@ -81,7 +81,9 @@ + BUG_ON((uintptr_t)(dest_buf) & (FNCPY_ALIGN - 1) || \ + (__funcp_address & ~(uintptr_t)1 & (FNCPY_ALIGN - 1))); \ + \ ++ pax_open_kernel(); \ + memcpy(dest_buf, (void const *)(__funcp_address & ~1), size); \ ++ pax_close_kernel(); \ + flush_icache_range((unsigned long)(dest_buf), \ + (unsigned long)(dest_buf) + (size)); \ + \ +diff --git a/arch/arm/include/asm/futex.h b/arch/arm/include/asm/futex.h +index 2aff798..099eb15 100644 +--- a/arch/arm/include/asm/futex.h ++++ b/arch/arm/include/asm/futex.h +@@ -45,6 +45,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, + if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))) + return -EFAULT; + ++ pax_open_userland(); ++ + smp_mb(); + __asm__ __volatile__("@futex_atomic_cmpxchg_inatomic\n" + "1: ldrex %1, [%4]\n" +@@ -60,6 +62,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, + : "cc", "memory"); + smp_mb(); + ++ pax_close_userland(); ++ + *uval = val; + return ret; + } +@@ -90,6 +94,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, + if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))) + return -EFAULT; + ++ pax_open_userland(); ++ + __asm__ __volatile__("@futex_atomic_cmpxchg_inatomic\n" + "1: " TUSER(ldr) " %1, [%4]\n" + " teq %1, %2\n" +@@ -100,6 +106,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, + : "r" (oldval), "r" (newval), "r" (uaddr), "Ir" (-EFAULT) + : "cc", "memory"); + ++ pax_close_userland(); ++ + *uval = val; + return ret; + } +@@ -122,6 +130,7 @@ futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr) + return -EFAULT; + + pagefault_disable(); /* implies preempt_disable() */ ++ pax_open_userland(); + + switch (op) { + case FUTEX_OP_SET: +@@ -143,6 +152,7 @@ futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr) + ret = -ENOSYS; + } + ++ pax_close_userland(); + pagefault_enable(); /* subsumes preempt_enable() */ + + if (!ret) { +diff --git a/arch/arm/include/asm/kmap_types.h b/arch/arm/include/asm/kmap_types.h +index 83eb2f7..ed77159 100644 +--- a/arch/arm/include/asm/kmap_types.h ++++ b/arch/arm/include/asm/kmap_types.h +@@ -4,6 +4,6 @@ + /* + * This is the "bare minimum". AIO seems to require this. + */ +-#define KM_TYPE_NR 16 ++#define KM_TYPE_NR 17 + + #endif +diff --git a/arch/arm/include/asm/mach/dma.h b/arch/arm/include/asm/mach/dma.h +index 9e614a1..3302cca 100644 +--- a/arch/arm/include/asm/mach/dma.h ++++ b/arch/arm/include/asm/mach/dma.h +@@ -22,7 +22,7 @@ struct dma_ops { + int (*residue)(unsigned int, dma_t *); /* optional */ + int (*setspeed)(unsigned int, dma_t *, int); /* optional */ + const char *type; +-}; ++} __do_const; + + struct dma_struct { + void *addr; /* single DMA address */ +diff --git a/arch/arm/include/asm/mach/map.h b/arch/arm/include/asm/mach/map.h +index f98c7f3..e5c626d 100644 +--- a/arch/arm/include/asm/mach/map.h ++++ b/arch/arm/include/asm/mach/map.h +@@ -23,17 +23,19 @@ struct map_desc { + + /* types 0-3 are defined in asm/io.h */ + enum { +- MT_UNCACHED = 4, +- MT_CACHECLEAN, +- MT_MINICLEAN, ++ MT_UNCACHED_RW = 4, ++ MT_CACHECLEAN_RO, ++ MT_MINICLEAN_RO, + MT_LOW_VECTORS, + MT_HIGH_VECTORS, +- MT_MEMORY_RWX, ++ __MT_MEMORY_RWX, + MT_MEMORY_RW, +- MT_ROM, +- MT_MEMORY_RWX_NONCACHED, ++ MT_MEMORY_RX, ++ MT_ROM_RX, ++ MT_MEMORY_RW_NONCACHED, ++ MT_MEMORY_RX_NONCACHED, + MT_MEMORY_RW_DTCM, +- MT_MEMORY_RWX_ITCM, ++ MT_MEMORY_RX_ITCM, + MT_MEMORY_RW_SO, + MT_MEMORY_DMA_READY, + }; +diff --git a/arch/arm/include/asm/outercache.h b/arch/arm/include/asm/outercache.h +index f94784f..9a09a4a 100644 +--- a/arch/arm/include/asm/outercache.h ++++ b/arch/arm/include/asm/outercache.h +@@ -35,7 +35,7 @@ struct outer_cache_fns { + #endif + void (*set_debug)(unsigned long); + void (*resume)(void); +-}; ++} __no_const; + + extern struct outer_cache_fns outer_cache; + +diff --git a/arch/arm/include/asm/page.h b/arch/arm/include/asm/page.h +index 4355f0e..cd9168e 100644 +--- a/arch/arm/include/asm/page.h ++++ b/arch/arm/include/asm/page.h +@@ -23,6 +23,7 @@ + + #else + ++#include <linux/compiler.h> + #include <asm/glue.h> + + /* +@@ -114,7 +115,7 @@ struct cpu_user_fns { + void (*cpu_clear_user_highpage)(struct page *page, unsigned long vaddr); + void (*cpu_copy_user_highpage)(struct page *to, struct page *from, + unsigned long vaddr, struct vm_area_struct *vma); +-}; ++} __no_const; + + #ifdef MULTI_USER + extern struct cpu_user_fns cpu_user; +diff --git a/arch/arm/include/asm/pgalloc.h b/arch/arm/include/asm/pgalloc.h +index 78a7793..e3dc06c 100644 +--- a/arch/arm/include/asm/pgalloc.h ++++ b/arch/arm/include/asm/pgalloc.h +@@ -17,6 +17,7 @@ + #include <asm/processor.h> + #include <asm/cacheflush.h> + #include <asm/tlbflush.h> ++#include <asm/system_info.h> + + #define check_pgt_cache() do { } while (0) + +@@ -43,6 +44,11 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd) + set_pud(pud, __pud(__pa(pmd) | PMD_TYPE_TABLE)); + } + ++static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd) ++{ ++ pud_populate(mm, pud, pmd); ++} ++ + #else /* !CONFIG_ARM_LPAE */ + + /* +@@ -51,6 +57,7 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd) + #define pmd_alloc_one(mm,addr) ({ BUG(); ((pmd_t *)2); }) + #define pmd_free(mm, pmd) do { } while (0) + #define pud_populate(mm,pmd,pte) BUG() ++#define pud_populate_kernel(mm,pmd,pte) BUG() + + #endif /* CONFIG_ARM_LPAE */ + +@@ -128,6 +135,19 @@ static inline void pte_free(struct mm_struct *mm, pgtable_t pte) + __free_page(pte); + } + ++static inline void __section_update(pmd_t *pmdp, unsigned long addr, pmdval_t prot) ++{ ++#ifdef CONFIG_ARM_LPAE ++ pmdp[0] = __pmd(pmd_val(pmdp[0]) | prot); ++#else ++ if (addr & SECTION_SIZE) ++ pmdp[1] = __pmd(pmd_val(pmdp[1]) | prot); ++ else ++ pmdp[0] = __pmd(pmd_val(pmdp[0]) | prot); ++#endif ++ flush_pmd_entry(pmdp); ++} ++ + static inline void __pmd_populate(pmd_t *pmdp, phys_addr_t pte, + pmdval_t prot) + { +@@ -157,7 +177,7 @@ pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmdp, pte_t *ptep) + static inline void + pmd_populate(struct mm_struct *mm, pmd_t *pmdp, pgtable_t ptep) + { +- __pmd_populate(pmdp, page_to_phys(ptep), _PAGE_USER_TABLE); ++ __pmd_populate(pmdp, page_to_phys(ptep), _PAGE_USER_TABLE | __supported_pmd_mask); + } + #define pmd_pgtable(pmd) pmd_page(pmd) + +diff --git a/arch/arm/include/asm/pgtable-2level-hwdef.h b/arch/arm/include/asm/pgtable-2level-hwdef.h +index 5cfba15..f415e1a 100644 +--- a/arch/arm/include/asm/pgtable-2level-hwdef.h ++++ b/arch/arm/include/asm/pgtable-2level-hwdef.h +@@ -20,12 +20,15 @@ + #define PMD_TYPE_FAULT (_AT(pmdval_t, 0) << 0) + #define PMD_TYPE_TABLE (_AT(pmdval_t, 1) << 0) + #define PMD_TYPE_SECT (_AT(pmdval_t, 2) << 0) ++#define PMD_PXNTABLE (_AT(pmdval_t, 1) << 2) /* v7 */ + #define PMD_BIT4 (_AT(pmdval_t, 1) << 4) + #define PMD_DOMAIN(x) (_AT(pmdval_t, (x)) << 5) + #define PMD_PROTECTION (_AT(pmdval_t, 1) << 9) /* v5 */ ++ + /* + * - section + */ ++#define PMD_SECT_PXN (_AT(pmdval_t, 1) << 0) /* v7 */ + #define PMD_SECT_BUFFERABLE (_AT(pmdval_t, 1) << 2) + #define PMD_SECT_CACHEABLE (_AT(pmdval_t, 1) << 3) + #define PMD_SECT_XN (_AT(pmdval_t, 1) << 4) /* v6 */ +@@ -37,6 +40,7 @@ + #define PMD_SECT_nG (_AT(pmdval_t, 1) << 17) /* v6 */ + #define PMD_SECT_SUPER (_AT(pmdval_t, 1) << 18) /* v6 */ + #define PMD_SECT_AF (_AT(pmdval_t, 0)) ++#define PMD_SECT_RDONLY (_AT(pmdval_t, 0)) + + #define PMD_SECT_UNCACHED (_AT(pmdval_t, 0)) + #define PMD_SECT_BUFFERED (PMD_SECT_BUFFERABLE) +@@ -66,6 +70,7 @@ + * - extended small page/tiny page + */ + #define PTE_EXT_XN (_AT(pteval_t, 1) << 0) /* v6 */ ++#define PTE_EXT_PXN (_AT(pteval_t, 1) << 2) /* v7 */ + #define PTE_EXT_AP_MASK (_AT(pteval_t, 3) << 4) + #define PTE_EXT_AP0 (_AT(pteval_t, 1) << 4) + #define PTE_EXT_AP1 (_AT(pteval_t, 2) << 4) +diff --git a/arch/arm/include/asm/pgtable-2level.h b/arch/arm/include/asm/pgtable-2level.h +index 219ac88..73ec32a 100644 +--- a/arch/arm/include/asm/pgtable-2level.h ++++ b/arch/arm/include/asm/pgtable-2level.h +@@ -126,6 +126,9 @@ + #define L_PTE_SHARED (_AT(pteval_t, 1) << 10) /* shared(v6), coherent(xsc3) */ + #define L_PTE_NONE (_AT(pteval_t, 1) << 11) + ++/* Two-level page tables only have PXN in the PGD, not in the PTE. */ ++#define L_PTE_PXN (_AT(pteval_t, 0)) ++ + /* + * These are the memory types, defined to be compatible with + * pre-ARMv6 CPUs cacheable and bufferable bits: XXCB +diff --git a/arch/arm/include/asm/pgtable-3level-hwdef.h b/arch/arm/include/asm/pgtable-3level-hwdef.h +index 626989f..9d67a33 100644 +--- a/arch/arm/include/asm/pgtable-3level-hwdef.h ++++ b/arch/arm/include/asm/pgtable-3level-hwdef.h +@@ -75,6 +75,7 @@ + #define PTE_EXT_SHARED (_AT(pteval_t, 3) << 8) /* SH[1:0], inner shareable */ + #define PTE_EXT_AF (_AT(pteval_t, 1) << 10) /* Access Flag */ + #define PTE_EXT_NG (_AT(pteval_t, 1) << 11) /* nG */ ++#define PTE_EXT_PXN (_AT(pteval_t, 1) << 53) /* PXN */ + #define PTE_EXT_XN (_AT(pteval_t, 1) << 54) /* XN */ + + /* +diff --git a/arch/arm/include/asm/pgtable-3level.h b/arch/arm/include/asm/pgtable-3level.h +index 85c60ad..b0bbd7e 100644 +--- a/arch/arm/include/asm/pgtable-3level.h ++++ b/arch/arm/include/asm/pgtable-3level.h +@@ -82,6 +82,7 @@ + #define L_PTE_RDONLY (_AT(pteval_t, 1) << 7) /* AP[2] */ + #define L_PTE_SHARED (_AT(pteval_t, 3) << 8) /* SH[1:0], inner shareable */ + #define L_PTE_YOUNG (_AT(pteval_t, 1) << 10) /* AF */ ++#define L_PTE_PXN (_AT(pteval_t, 1) << 53) /* PXN */ + #define L_PTE_XN (_AT(pteval_t, 1) << 54) /* XN */ + #define L_PTE_DIRTY (_AT(pteval_t, 1) << 55) /* unused */ + #define L_PTE_SPECIAL (_AT(pteval_t, 1) << 56) /* unused */ +@@ -95,6 +96,7 @@ + /* + * To be used in assembly code with the upper page attributes. + */ ++#define L_PTE_PXN_HIGH (1 << (53 - 32)) + #define L_PTE_XN_HIGH (1 << (54 - 32)) + #define L_PTE_DIRTY_HIGH (1 << (55 - 32)) + +diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h +index 7d59b52..27a12f8 100644 +--- a/arch/arm/include/asm/pgtable.h ++++ b/arch/arm/include/asm/pgtable.h +@@ -33,6 +33,9 @@ + #include <asm/pgtable-2level.h> + #endif + ++#define ktla_ktva(addr) (addr) ++#define ktva_ktla(addr) (addr) ++ + /* + * Just any arbitrary offset to the start of the vmalloc VM area: the + * current 8MB value just means that there will be a 8MB "hole" after the +@@ -48,6 +51,9 @@ + #define LIBRARY_TEXT_START 0x0c000000 + + #ifndef __ASSEMBLY__ ++extern pteval_t __supported_pte_mask; ++extern pmdval_t __supported_pmd_mask; ++ + extern void __pte_error(const char *file, int line, pte_t); + extern void __pmd_error(const char *file, int line, pmd_t); + extern void __pgd_error(const char *file, int line, pgd_t); +@@ -56,6 +62,48 @@ extern void __pgd_error(const char *file, int line, pgd_t); + #define pmd_ERROR(pmd) __pmd_error(__FILE__, __LINE__, pmd) + #define pgd_ERROR(pgd) __pgd_error(__FILE__, __LINE__, pgd) + ++#define __HAVE_ARCH_PAX_OPEN_KERNEL ++#define __HAVE_ARCH_PAX_CLOSE_KERNEL ++ ++#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF) ++#include <asm/domain.h> ++#include <linux/thread_info.h> ++#include <linux/preempt.h> ++ ++static inline int test_domain(int domain, int domaintype) ++{ ++ return ((current_thread_info()->cpu_domain) & domain_val(domain, 3)) == domain_val(domain, domaintype); ++} ++#endif ++ ++#ifdef CONFIG_PAX_KERNEXEC ++static inline unsigned long pax_open_kernel(void) { ++#ifdef CONFIG_ARM_LPAE ++ /* TODO */ ++#else ++ preempt_disable(); ++ BUG_ON(test_domain(DOMAIN_KERNEL, DOMAIN_KERNEXEC)); ++ modify_domain(DOMAIN_KERNEL, DOMAIN_KERNEXEC); ++#endif ++ return 0; ++} ++ ++static inline unsigned long pax_close_kernel(void) { ++#ifdef CONFIG_ARM_LPAE ++ /* TODO */ ++#else ++ BUG_ON(test_domain(DOMAIN_KERNEL, DOMAIN_MANAGER)); ++ /* DOMAIN_MANAGER = "client" under KERNEXEC */ ++ modify_domain(DOMAIN_KERNEL, DOMAIN_MANAGER); ++ preempt_enable_no_resched(); ++#endif ++ return 0; ++} ++#else ++static inline unsigned long pax_open_kernel(void) { return 0; } ++static inline unsigned long pax_close_kernel(void) { return 0; } ++#endif ++ + /* + * This is the lowest virtual address we can permit any user space + * mapping to be mapped at. This is particularly important for +@@ -75,8 +123,8 @@ extern void __pgd_error(const char *file, int line, pgd_t); + /* + * The pgprot_* and protection_map entries will be fixed up in runtime + * to include the cachable and bufferable bits based on memory policy, +- * as well as any architecture dependent bits like global/ASID and SMP +- * shared mapping bits. ++ * as well as any architecture dependent bits like global/ASID, PXN, ++ * and SMP shared mapping bits. + */ + #define _L_PTE_DEFAULT L_PTE_PRESENT | L_PTE_YOUNG + +@@ -262,7 +310,7 @@ static inline pte_t pte_mkspecial(pte_t pte) { return pte; } + static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) + { + const pteval_t mask = L_PTE_XN | L_PTE_RDONLY | L_PTE_USER | +- L_PTE_NONE | L_PTE_VALID; ++ L_PTE_NONE | L_PTE_VALID | __supported_pte_mask; + pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask); + return pte; + } +diff --git a/arch/arm/include/asm/psci.h b/arch/arm/include/asm/psci.h +index c4ae171..ea0c0c2 100644 +--- a/arch/arm/include/asm/psci.h ++++ b/arch/arm/include/asm/psci.h +@@ -29,7 +29,7 @@ struct psci_operations { + int (*cpu_off)(struct psci_power_state state); + int (*cpu_on)(unsigned long cpuid, unsigned long entry_point); + int (*migrate)(unsigned long cpuid); +-}; ++} __no_const; + + extern struct psci_operations psci_ops; + extern struct smp_operations psci_smp_ops; +diff --git a/arch/arm/include/asm/smp.h b/arch/arm/include/asm/smp.h +index 22a3b9b..7f214ee 100644 +--- a/arch/arm/include/asm/smp.h ++++ b/arch/arm/include/asm/smp.h +@@ -112,7 +112,7 @@ struct smp_operations { + int (*cpu_disable)(unsigned int cpu); + #endif + #endif +-}; ++} __no_const; + + /* + * set platform specific SMP operations +diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h +index 71a06b2..8bb9ae1 100644 +--- a/arch/arm/include/asm/thread_info.h ++++ b/arch/arm/include/asm/thread_info.h +@@ -88,9 +88,9 @@ struct thread_info { + .flags = 0, \ + .preempt_count = INIT_PREEMPT_COUNT, \ + .addr_limit = KERNEL_DS, \ +- .cpu_domain = domain_val(DOMAIN_USER, DOMAIN_MANAGER) | \ +- domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \ +- domain_val(DOMAIN_IO, DOMAIN_CLIENT), \ ++ .cpu_domain = domain_val(DOMAIN_USER, DOMAIN_USERCLIENT) | \ ++ domain_val(DOMAIN_KERNEL, DOMAIN_KERNELCLIENT) | \ ++ domain_val(DOMAIN_IO, DOMAIN_KERNELCLIENT), \ + .restart_block = { \ + .fn = do_no_restart_syscall, \ + }, \ +@@ -157,7 +157,11 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *, + #define TIF_SYSCALL_AUDIT 9 + #define TIF_SYSCALL_TRACEPOINT 10 + #define TIF_SECCOMP 11 /* seccomp syscall filtering active */ +-#define TIF_NOHZ 12 /* in adaptive nohz mode */ ++/* within 8 bits of TIF_SYSCALL_TRACE ++ * to meet flexible second operand requirements ++ */ ++#define TIF_GRSEC_SETXID 12 ++#define TIF_NOHZ 13 /* in adaptive nohz mode */ + #define TIF_USING_IWMMXT 17 + #define TIF_MEMDIE 18 /* is terminating due to OOM killer */ + #define TIF_RESTORE_SIGMASK 20 +@@ -170,10 +174,11 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *, + #define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT) + #define _TIF_SECCOMP (1 << TIF_SECCOMP) + #define _TIF_USING_IWMMXT (1 << TIF_USING_IWMMXT) ++#define _TIF_GRSEC_SETXID (1 << TIF_GRSEC_SETXID) + + /* Checks for any syscall work in entry-common.S */ + #define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \ +- _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP) ++ _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP | _TIF_GRSEC_SETXID) + + /* + * Change these and you break ASM code in entry-common.S +diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h +index 7f3f3cc..bdf0665 100644 +--- a/arch/arm/include/asm/uaccess.h ++++ b/arch/arm/include/asm/uaccess.h +@@ -18,6 +18,7 @@ + #include <asm/domain.h> + #include <asm/unified.h> + #include <asm/compiler.h> ++#include <asm/pgtable.h> + + #if __LINUX_ARM_ARCH__ < 6 + #include <asm-generic/uaccess-unaligned.h> +@@ -70,11 +71,38 @@ extern int __put_user_bad(void); + static inline void set_fs(mm_segment_t fs) + { + current_thread_info()->addr_limit = fs; +- modify_domain(DOMAIN_KERNEL, fs ? DOMAIN_CLIENT : DOMAIN_MANAGER); ++ modify_domain(DOMAIN_KERNEL, fs ? DOMAIN_KERNELCLIENT : DOMAIN_MANAGER); + } + + #define segment_eq(a,b) ((a) == (b)) + ++#define __HAVE_ARCH_PAX_OPEN_USERLAND ++#define __HAVE_ARCH_PAX_CLOSE_USERLAND ++ ++static inline void pax_open_userland(void) ++{ ++ ++#ifdef CONFIG_PAX_MEMORY_UDEREF ++ if (segment_eq(get_fs(), USER_DS)) { ++ BUG_ON(test_domain(DOMAIN_USER, DOMAIN_UDEREF)); ++ modify_domain(DOMAIN_USER, DOMAIN_UDEREF); ++ } ++#endif ++ ++} ++ ++static inline void pax_close_userland(void) ++{ ++ ++#ifdef CONFIG_PAX_MEMORY_UDEREF ++ if (segment_eq(get_fs(), USER_DS)) { ++ BUG_ON(test_domain(DOMAIN_USER, DOMAIN_NOACCESS)); ++ modify_domain(DOMAIN_USER, DOMAIN_NOACCESS); ++ } ++#endif ++ ++} ++ + #define __addr_ok(addr) ({ \ + unsigned long flag; \ + __asm__("cmp %2, %0; movlo %0, #0" \ +@@ -150,8 +178,12 @@ extern int __get_user_4(void *); + + #define get_user(x,p) \ + ({ \ ++ int __e; \ + might_fault(); \ +- __get_user_check(x,p); \ ++ pax_open_userland(); \ ++ __e = __get_user_check(x,p); \ ++ pax_close_userland(); \ ++ __e; \ + }) + + extern int __put_user_1(void *, unsigned int); +@@ -196,8 +228,12 @@ extern int __put_user_8(void *, unsigned long long); + + #define put_user(x,p) \ + ({ \ ++ int __e; \ + might_fault(); \ +- __put_user_check(x,p); \ ++ pax_open_userland(); \ ++ __e = __put_user_check(x,p); \ ++ pax_close_userland(); \ ++ __e; \ + }) + + #else /* CONFIG_MMU */ +@@ -221,6 +257,7 @@ static inline void set_fs(mm_segment_t fs) + + #endif /* CONFIG_MMU */ + ++#define access_ok_noprefault(type,addr,size) access_ok((type),(addr),(size)) + #define access_ok(type,addr,size) (__range_ok(addr,size) == 0) + + #define user_addr_max() \ +@@ -238,13 +275,17 @@ static inline void set_fs(mm_segment_t fs) + #define __get_user(x,ptr) \ + ({ \ + long __gu_err = 0; \ ++ pax_open_userland(); \ + __get_user_err((x),(ptr),__gu_err); \ ++ pax_close_userland(); \ + __gu_err; \ + }) + + #define __get_user_error(x,ptr,err) \ + ({ \ ++ pax_open_userland(); \ + __get_user_err((x),(ptr),err); \ ++ pax_close_userland(); \ + (void) 0; \ + }) + +@@ -320,13 +361,17 @@ do { \ + #define __put_user(x,ptr) \ + ({ \ + long __pu_err = 0; \ ++ pax_open_userland(); \ + __put_user_err((x),(ptr),__pu_err); \ ++ pax_close_userland(); \ + __pu_err; \ + }) + + #define __put_user_error(x,ptr,err) \ + ({ \ ++ pax_open_userland(); \ + __put_user_err((x),(ptr),err); \ ++ pax_close_userland(); \ + (void) 0; \ + }) + +@@ -426,11 +471,44 @@ do { \ + + + #ifdef CONFIG_MMU +-extern unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n); +-extern unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n); ++extern unsigned long __must_check ___copy_from_user(void *to, const void __user *from, unsigned long n); ++extern unsigned long __must_check ___copy_to_user(void __user *to, const void *from, unsigned long n); ++ ++static inline unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n) ++{ ++ unsigned long ret; ++ ++ check_object_size(to, n, false); ++ pax_open_userland(); ++ ret = ___copy_from_user(to, from, n); ++ pax_close_userland(); ++ return ret; ++} ++ ++static inline unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n) ++{ ++ unsigned long ret; ++ ++ check_object_size(from, n, true); ++ pax_open_userland(); ++ ret = ___copy_to_user(to, from, n); ++ pax_close_userland(); ++ return ret; ++} ++ + extern unsigned long __must_check __copy_to_user_std(void __user *to, const void *from, unsigned long n); +-extern unsigned long __must_check __clear_user(void __user *addr, unsigned long n); ++extern unsigned long __must_check ___clear_user(void __user *addr, unsigned long n); + extern unsigned long __must_check __clear_user_std(void __user *addr, unsigned long n); ++ ++static inline unsigned long __must_check __clear_user(void __user *addr, unsigned long n) ++{ ++ unsigned long ret; ++ pax_open_userland(); ++ ret = ___clear_user(addr, n); ++ pax_close_userland(); ++ return ret; ++} ++ + #else + #define __copy_from_user(to,from,n) (memcpy(to, (void __force *)from, n), 0) + #define __copy_to_user(to,from,n) (memcpy((void __force *)to, from, n), 0) +@@ -439,6 +517,9 @@ extern unsigned long __must_check __clear_user_std(void __user *addr, unsigned l + + static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n) + { ++ if ((long)n < 0) ++ return n; ++ + if (access_ok(VERIFY_READ, from, n)) + n = __copy_from_user(to, from, n); + else /* security hole - plug it */ +@@ -448,6 +529,9 @@ static inline unsigned long __must_check copy_from_user(void *to, const void __u + + static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n) + { ++ if ((long)n < 0) ++ return n; ++ + if (access_ok(VERIFY_WRITE, to, n)) + n = __copy_to_user(to, from, n); + return n; +diff --git a/arch/arm/include/uapi/asm/ptrace.h b/arch/arm/include/uapi/asm/ptrace.h +index 5af0ed1..cea83883 100644 +--- a/arch/arm/include/uapi/asm/ptrace.h ++++ b/arch/arm/include/uapi/asm/ptrace.h +@@ -92,7 +92,7 @@ + * ARMv7 groups of PSR bits + */ + #define APSR_MASK 0xf80f0000 /* N, Z, C, V, Q and GE flags */ +-#define PSR_ISET_MASK 0x01000010 /* ISA state (J, T) mask */ ++#define PSR_ISET_MASK 0x01000020 /* ISA state (J, T) mask */ + #define PSR_IT_MASK 0x0600fc00 /* If-Then execution state mask */ + #define PSR_ENDIAN_MASK 0x00000200 /* Endianness state mask */ + +diff --git a/arch/arm/kernel/armksyms.c b/arch/arm/kernel/armksyms.c +index 85e664b..419a1cd 100644 +--- a/arch/arm/kernel/armksyms.c ++++ b/arch/arm/kernel/armksyms.c +@@ -55,7 +55,7 @@ EXPORT_SYMBOL(arm_delay_ops); + + /* networking */ + EXPORT_SYMBOL(csum_partial); +-EXPORT_SYMBOL(csum_partial_copy_from_user); ++EXPORT_SYMBOL(__csum_partial_copy_from_user); + EXPORT_SYMBOL(csum_partial_copy_nocheck); + EXPORT_SYMBOL(__csum_ipv6_magic); + +@@ -91,9 +91,9 @@ EXPORT_SYMBOL(__memzero); + #ifdef CONFIG_MMU + EXPORT_SYMBOL(copy_page); + +-EXPORT_SYMBOL(__copy_from_user); +-EXPORT_SYMBOL(__copy_to_user); +-EXPORT_SYMBOL(__clear_user); ++EXPORT_SYMBOL(___copy_from_user); ++EXPORT_SYMBOL(___copy_to_user); ++EXPORT_SYMBOL(___clear_user); + + EXPORT_SYMBOL(__get_user_1); + EXPORT_SYMBOL(__get_user_2); +diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S +index 1879e8d..b2207fc 100644 +--- a/arch/arm/kernel/entry-armv.S ++++ b/arch/arm/kernel/entry-armv.S +@@ -47,6 +47,87 @@ + 9997: + .endm + ++ .macro pax_enter_kernel ++#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF) ++ @ make aligned space for saved DACR ++ sub sp, sp, #8 ++ @ save regs ++ stmdb sp!, {r1, r2} ++ @ read DACR from cpu_domain into r1 ++ mov r2, sp ++ @ assume 8K pages, since we have to split the immediate in two ++ bic r2, r2, #(0x1fc0) ++ bic r2, r2, #(0x3f) ++ ldr r1, [r2, #TI_CPU_DOMAIN] ++ @ store old DACR on stack ++ str r1, [sp, #8] ++#ifdef CONFIG_PAX_KERNEXEC ++ @ set type of DOMAIN_KERNEL to DOMAIN_KERNELCLIENT ++ bic r1, r1, #(domain_val(DOMAIN_KERNEL, 3)) ++ orr r1, r1, #(domain_val(DOMAIN_KERNEL, DOMAIN_KERNELCLIENT)) ++#endif ++#ifdef CONFIG_PAX_MEMORY_UDEREF ++ @ set current DOMAIN_USER to DOMAIN_NOACCESS ++ bic r1, r1, #(domain_val(DOMAIN_USER, 3)) ++#endif ++ @ write r1 to current_thread_info()->cpu_domain ++ str r1, [r2, #TI_CPU_DOMAIN] ++ @ write r1 to DACR ++ mcr p15, 0, r1, c3, c0, 0 ++ @ instruction sync ++ instr_sync ++ @ restore regs ++ ldmia sp!, {r1, r2} ++#endif ++ .endm ++ ++ .macro pax_open_userland ++#ifdef CONFIG_PAX_MEMORY_UDEREF ++ @ save regs ++ stmdb sp!, {r0, r1} ++ @ read DACR from cpu_domain into r1 ++ mov r0, sp ++ @ assume 8K pages, since we have to split the immediate in two ++ bic r0, r0, #(0x1fc0) ++ bic r0, r0, #(0x3f) ++ ldr r1, [r0, #TI_CPU_DOMAIN] ++ @ set current DOMAIN_USER to DOMAIN_CLIENT ++ bic r1, r1, #(domain_val(DOMAIN_USER, 3)) ++ orr r1, r1, #(domain_val(DOMAIN_USER, DOMAIN_UDEREF)) ++ @ write r1 to current_thread_info()->cpu_domain ++ str r1, [r0, #TI_CPU_DOMAIN] ++ @ write r1 to DACR ++ mcr p15, 0, r1, c3, c0, 0 ++ @ instruction sync ++ instr_sync ++ @ restore regs ++ ldmia sp!, {r0, r1} ++#endif ++ .endm ++ ++ .macro pax_close_userland ++#ifdef CONFIG_PAX_MEMORY_UDEREF ++ @ save regs ++ stmdb sp!, {r0, r1} ++ @ read DACR from cpu_domain into r1 ++ mov r0, sp ++ @ assume 8K pages, since we have to split the immediate in two ++ bic r0, r0, #(0x1fc0) ++ bic r0, r0, #(0x3f) ++ ldr r1, [r0, #TI_CPU_DOMAIN] ++ @ set current DOMAIN_USER to DOMAIN_NOACCESS ++ bic r1, r1, #(domain_val(DOMAIN_USER, 3)) ++ @ write r1 to current_thread_info()->cpu_domain ++ str r1, [r0, #TI_CPU_DOMAIN] ++ @ write r1 to DACR ++ mcr p15, 0, r1, c3, c0, 0 ++ @ instruction sync ++ instr_sync ++ @ restore regs ++ ldmia sp!, {r0, r1} ++#endif ++ .endm ++ + .macro pabt_helper + @ PABORT handler takes pt_regs in r2, fault address in r4 and psr in r5 + #ifdef MULTI_PABORT +@@ -89,11 +170,15 @@ + * Invalid mode handlers + */ + .macro inv_entry, reason ++ ++ pax_enter_kernel ++ + sub sp, sp, #S_FRAME_SIZE + ARM( stmib sp, {r1 - lr} ) + THUMB( stmia sp, {r0 - r12} ) + THUMB( str sp, [sp, #S_SP] ) + THUMB( str lr, [sp, #S_LR] ) ++ + mov r1, #\reason + .endm + +@@ -149,7 +234,11 @@ ENDPROC(__und_invalid) + .macro svc_entry, stack_hole=0 + UNWIND(.fnstart ) + UNWIND(.save {r0 - pc} ) ++ ++ pax_enter_kernel ++ + sub sp, sp, #(S_FRAME_SIZE + \stack_hole - 4) ++ + #ifdef CONFIG_THUMB2_KERNEL + SPFIX( str r0, [sp] ) @ temporarily saved + SPFIX( mov r0, sp ) +@@ -164,7 +253,12 @@ ENDPROC(__und_invalid) + ldmia r0, {r3 - r5} + add r7, sp, #S_SP - 4 @ here for interlock avoidance + mov r6, #-1 @ "" "" "" "" ++#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF) ++ @ offset sp by 8 as done in pax_enter_kernel ++ add r2, sp, #(S_FRAME_SIZE + \stack_hole + 4) ++#else + add r2, sp, #(S_FRAME_SIZE + \stack_hole - 4) ++#endif + SPFIX( addeq r2, r2, #4 ) + str r3, [sp, #-4]! @ save the "real" r0 copied + @ from the exception stack +@@ -317,6 +411,9 @@ ENDPROC(__pabt_svc) + .macro usr_entry + UNWIND(.fnstart ) + UNWIND(.cantunwind ) @ don't unwind the user space ++ ++ pax_enter_kernel_user ++ + sub sp, sp, #S_FRAME_SIZE + ARM( stmib sp, {r1 - r12} ) + THUMB( stmia sp, {r0 - r12} ) +@@ -416,7 +513,9 @@ __und_usr: + tst r3, #PSR_T_BIT @ Thumb mode? + bne __und_usr_thumb + sub r4, r2, #4 @ ARM instr at LR - 4 ++ pax_open_userland + 1: ldrt r0, [r4] ++ pax_close_userland + ARM_BE8(rev r0, r0) @ little endian instruction + + @ r0 = 32-bit ARM instruction which caused the exception +@@ -450,11 +549,15 @@ __und_usr_thumb: + */ + .arch armv6t2 + #endif ++ pax_open_userland + 2: ldrht r5, [r4] ++ pax_close_userland + ARM_BE8(rev16 r5, r5) @ little endian instruction + cmp r5, #0xe800 @ 32bit instruction if xx != 0 + blo __und_usr_fault_16 @ 16bit undefined instruction ++ pax_open_userland + 3: ldrht r0, [r2] ++ pax_close_userland + ARM_BE8(rev16 r0, r0) @ little endian instruction + add r2, r2, #2 @ r2 is PC + 2, make it PC + 4 + str r2, [sp, #S_PC] @ it's a 2x16bit instr, update +@@ -484,7 +587,8 @@ ENDPROC(__und_usr) + */ + .pushsection .fixup, "ax" + .align 2 +-4: mov pc, r9 ++4: pax_close_userland ++ mov pc, r9 + .popsection + .pushsection __ex_table,"a" + .long 1b, 4b +@@ -694,7 +798,7 @@ ENTRY(__switch_to) + THUMB( str lr, [ip], #4 ) + ldr r4, [r2, #TI_TP_VALUE] + ldr r5, [r2, #TI_TP_VALUE + 4] +-#ifdef CONFIG_CPU_USE_DOMAINS ++#if defined(CONFIG_CPU_USE_DOMAINS) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF) + ldr r6, [r2, #TI_CPU_DOMAIN] + #endif + switch_tls r1, r4, r5, r3, r7 +@@ -703,7 +807,7 @@ ENTRY(__switch_to) + ldr r8, =__stack_chk_guard + ldr r7, [r7, #TSK_STACK_CANARY] + #endif +-#ifdef CONFIG_CPU_USE_DOMAINS ++#if defined(CONFIG_CPU_USE_DOMAINS) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF) + mcr p15, 0, r6, c3, c0, 0 @ Set domain register + #endif + mov r5, r0 +diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S +index a2dcafd..1048b5a 100644 +--- a/arch/arm/kernel/entry-common.S ++++ b/arch/arm/kernel/entry-common.S +@@ -10,18 +10,46 @@ + + #include <asm/unistd.h> + #include <asm/ftrace.h> ++#include <asm/domain.h> + #include <asm/unwind.h> + ++#include "entry-header.S" ++ + #ifdef CONFIG_NEED_RET_TO_USER + #include <mach/entry-macro.S> + #else + .macro arch_ret_to_user, tmp1, tmp2 ++#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF) ++ @ save regs ++ stmdb sp!, {r1, r2} ++ @ read DACR from cpu_domain into r1 ++ mov r2, sp ++ @ assume 8K pages, since we have to split the immediate in two ++ bic r2, r2, #(0x1fc0) ++ bic r2, r2, #(0x3f) ++ ldr r1, [r2, #TI_CPU_DOMAIN] ++#ifdef CONFIG_PAX_KERNEXEC ++ @ set type of DOMAIN_KERNEL to DOMAIN_KERNELCLIENT ++ bic r1, r1, #(domain_val(DOMAIN_KERNEL, 3)) ++ orr r1, r1, #(domain_val(DOMAIN_KERNEL, DOMAIN_KERNELCLIENT)) ++#endif ++#ifdef CONFIG_PAX_MEMORY_UDEREF ++ @ set current DOMAIN_USER to DOMAIN_UDEREF ++ bic r1, r1, #(domain_val(DOMAIN_USER, 3)) ++ orr r1, r1, #(domain_val(DOMAIN_USER, DOMAIN_UDEREF)) ++#endif ++ @ write r1 to current_thread_info()->cpu_domain ++ str r1, [r2, #TI_CPU_DOMAIN] ++ @ write r1 to DACR ++ mcr p15, 0, r1, c3, c0, 0 ++ @ instruction sync ++ instr_sync ++ @ restore regs ++ ldmia sp!, {r1, r2} ++#endif + .endm + #endif + +-#include "entry-header.S" +- +- + .align 5 + /* + * This is the fast syscall return path. We do as little as +@@ -411,6 +439,12 @@ ENTRY(vector_swi) + USER( ldr scno, [lr, #-4] ) @ get SWI instruction + #endif + ++ /* ++ * do this here to avoid a performance hit of wrapping the code above ++ * that directly dereferences userland to parse the SWI instruction ++ */ ++ pax_enter_kernel_user ++ + adr tbl, sys_call_table @ load syscall table pointer + + #if defined(CONFIG_OABI_COMPAT) +diff --git a/arch/arm/kernel/entry-header.S b/arch/arm/kernel/entry-header.S +index 88c6bab..652981b 100644 +--- a/arch/arm/kernel/entry-header.S ++++ b/arch/arm/kernel/entry-header.S +@@ -188,6 +188,60 @@ + msr cpsr_c, \rtemp @ switch back to the SVC mode + .endm + ++ .macro pax_enter_kernel_user ++#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF) ++ @ save regs ++ stmdb sp!, {r0, r1} ++ @ read DACR from cpu_domain into r1 ++ mov r0, sp ++ @ assume 8K pages, since we have to split the immediate in two ++ bic r0, r0, #(0x1fc0) ++ bic r0, r0, #(0x3f) ++ ldr r1, [r0, #TI_CPU_DOMAIN] ++#ifdef CONFIG_PAX_MEMORY_UDEREF ++ @ set current DOMAIN_USER to DOMAIN_NOACCESS ++ bic r1, r1, #(domain_val(DOMAIN_USER, 3)) ++#endif ++#ifdef CONFIG_PAX_KERNEXEC ++ @ set current DOMAIN_KERNEL to DOMAIN_KERNELCLIENT ++ bic r1, r1, #(domain_val(DOMAIN_KERNEL, 3)) ++ orr r1, r1, #(domain_val(DOMAIN_KERNEL, DOMAIN_KERNELCLIENT)) ++#endif ++ @ write r1 to current_thread_info()->cpu_domain ++ str r1, [r0, #TI_CPU_DOMAIN] ++ @ write r1 to DACR ++ mcr p15, 0, r1, c3, c0, 0 ++ @ instruction sync ++ instr_sync ++ @ restore regs ++ ldmia sp!, {r0, r1} ++#endif ++ .endm ++ ++ .macro pax_exit_kernel ++#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF) ++ @ save regs ++ stmdb sp!, {r0, r1} ++ @ read old DACR from stack into r1 ++ ldr r1, [sp, #(8 + S_SP)] ++ sub r1, r1, #8 ++ ldr r1, [r1] ++ ++ @ write r1 to current_thread_info()->cpu_domain ++ mov r0, sp ++ @ assume 8K pages, since we have to split the immediate in two ++ bic r0, r0, #(0x1fc0) ++ bic r0, r0, #(0x3f) ++ str r1, [r0, #TI_CPU_DOMAIN] ++ @ write r1 to DACR ++ mcr p15, 0, r1, c3, c0, 0 ++ @ instruction sync ++ instr_sync ++ @ restore regs ++ ldmia sp!, {r0, r1} ++#endif ++ .endm ++ + #ifndef CONFIG_THUMB2_KERNEL + .macro svc_exit, rpsr, irq = 0 + .if \irq != 0 +@@ -207,6 +261,9 @@ + blne trace_hardirqs_off + #endif + .endif ++ ++ pax_exit_kernel ++ + msr spsr_cxsf, \rpsr + #if defined(CONFIG_CPU_V6) + ldr r0, [sp] +@@ -270,6 +327,9 @@ + blne trace_hardirqs_off + #endif + .endif ++ ++ pax_exit_kernel ++ + ldr lr, [sp, #S_SP] @ top of the stack + ldrd r0, r1, [sp, #S_LR] @ calling lr and pc + clrex @ clear the exclusive monitor +diff --git a/arch/arm/kernel/fiq.c b/arch/arm/kernel/fiq.c +index 918875d..cd5fa27 100644 +--- a/arch/arm/kernel/fiq.c ++++ b/arch/arm/kernel/fiq.c +@@ -87,7 +87,10 @@ void set_fiq_handler(void *start, unsigned int length) + void *base = vectors_page; + unsigned offset = FIQ_OFFSET; + ++ pax_open_kernel(); + memcpy(base + offset, start, length); ++ pax_close_kernel(); ++ + if (!cache_is_vipt_nonaliasing()) + flush_icache_range((unsigned long)base + offset, offset + + length); +diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S +index f5f381d..a6f36a1 100644 +--- a/arch/arm/kernel/head.S ++++ b/arch/arm/kernel/head.S +@@ -437,7 +437,7 @@ __enable_mmu: + mov r5, #(domain_val(DOMAIN_USER, DOMAIN_MANAGER) | \ + domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \ + domain_val(DOMAIN_TABLE, DOMAIN_MANAGER) | \ +- domain_val(DOMAIN_IO, DOMAIN_CLIENT)) ++ domain_val(DOMAIN_IO, DOMAIN_KERNELCLIENT)) + mcr p15, 0, r5, c3, c0, 0 @ load domain access register + mcr p15, 0, r4, c2, c0, 0 @ load page table pointer + #endif +diff --git a/arch/arm/kernel/module.c b/arch/arm/kernel/module.c +index 45e4781..8eac93d 100644 +--- a/arch/arm/kernel/module.c ++++ b/arch/arm/kernel/module.c +@@ -38,12 +38,39 @@ + #endif + + #ifdef CONFIG_MMU +-void *module_alloc(unsigned long size) ++static inline void *__module_alloc(unsigned long size, pgprot_t prot) + { ++ if (!size || PAGE_ALIGN(size) > MODULES_END - MODULES_VADDR) ++ return NULL; + return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END, +- GFP_KERNEL, PAGE_KERNEL_EXEC, NUMA_NO_NODE, ++ GFP_KERNEL, prot, NUMA_NO_NODE, + __builtin_return_address(0)); + } ++ ++void *module_alloc(unsigned long size) ++{ ++ ++#ifdef CONFIG_PAX_KERNEXEC ++ return __module_alloc(size, PAGE_KERNEL); ++#else ++ return __module_alloc(size, PAGE_KERNEL_EXEC); ++#endif ++ ++} ++ ++#ifdef CONFIG_PAX_KERNEXEC ++void module_free_exec(struct module *mod, void *module_region) ++{ ++ module_free(mod, module_region); ++} ++EXPORT_SYMBOL(module_free_exec); ++ ++void *module_alloc_exec(unsigned long size) ++{ ++ return __module_alloc(size, PAGE_KERNEL_EXEC); ++} ++EXPORT_SYMBOL(module_alloc_exec); ++#endif + #endif + + int +diff --git a/arch/arm/kernel/patch.c b/arch/arm/kernel/patch.c +index 07314af..c46655c 100644 +--- a/arch/arm/kernel/patch.c ++++ b/arch/arm/kernel/patch.c +@@ -18,6 +18,7 @@ void __kprobes __patch_text(void *addr, unsigned int insn) + bool thumb2 = IS_ENABLED(CONFIG_THUMB2_KERNEL); + int size; + ++ pax_open_kernel(); + if (thumb2 && __opcode_is_thumb16(insn)) { + *(u16 *)addr = __opcode_to_mem_thumb16(insn); + size = sizeof(u16); +@@ -39,6 +40,7 @@ void __kprobes __patch_text(void *addr, unsigned int insn) + *(u32 *)addr = insn; + size = sizeof(u32); + } ++ pax_close_kernel(); + + flush_icache_range((uintptr_t)(addr), + (uintptr_t)(addr) + size); +diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c +index 92f7b15..7048500 100644 +--- a/arch/arm/kernel/process.c ++++ b/arch/arm/kernel/process.c +@@ -217,6 +217,7 @@ void machine_power_off(void) + + if (pm_power_off) + pm_power_off(); ++ BUG(); + } + + /* +@@ -230,7 +231,7 @@ void machine_power_off(void) + * executing pre-reset code, and using RAM that the primary CPU's code wishes + * to use. Implementing such co-ordination would be essentially impossible. + */ +-void machine_restart(char *cmd) ++__noreturn void machine_restart(char *cmd) + { + local_irq_disable(); + smp_send_stop(); +@@ -253,8 +254,8 @@ void __show_regs(struct pt_regs *regs) + + show_regs_print_info(KERN_DEFAULT); + +- print_symbol("PC is at %s\n", instruction_pointer(regs)); +- print_symbol("LR is at %s\n", regs->ARM_lr); ++ printk("PC is at %pA\n", (void *)instruction_pointer(regs)); ++ printk("LR is at %pA\n", (void *)regs->ARM_lr); + printk("pc : [<%08lx>] lr : [<%08lx>] psr: %08lx\n" + "sp : %08lx ip : %08lx fp : %08lx\n", + regs->ARM_pc, regs->ARM_lr, regs->ARM_cpsr, +@@ -425,12 +426,6 @@ unsigned long get_wchan(struct task_struct *p) + return 0; + } + +-unsigned long arch_randomize_brk(struct mm_struct *mm) +-{ +- unsigned long range_end = mm->brk + 0x02000000; +- return randomize_range(mm->brk, range_end, 0) ? : mm->brk; +-} +- + #ifdef CONFIG_MMU + #ifdef CONFIG_KUSER_HELPERS + /* +@@ -446,7 +441,7 @@ static struct vm_area_struct gate_vma = { + + static int __init gate_vma_init(void) + { +- gate_vma.vm_page_prot = PAGE_READONLY_EXEC; ++ gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags); + return 0; + } + arch_initcall(gate_vma_init); +@@ -472,41 +467,16 @@ int in_gate_area_no_mm(unsigned long addr) + + const char *arch_vma_name(struct vm_area_struct *vma) + { +- return is_gate_vma(vma) ? "[vectors]" : +- (vma->vm_mm && vma->vm_start == vma->vm_mm->context.sigpage) ? +- "[sigpage]" : NULL; ++ return is_gate_vma(vma) ? "[vectors]" : NULL; + } + +-static struct page *signal_page; +-extern struct page *get_signal_page(void); +- + int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) + { + struct mm_struct *mm = current->mm; +- unsigned long addr; +- int ret; +- +- if (!signal_page) +- signal_page = get_signal_page(); +- if (!signal_page) +- return -ENOMEM; + + down_write(&mm->mmap_sem); +- addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0); +- if (IS_ERR_VALUE(addr)) { +- ret = addr; +- goto up_fail; +- } +- +- ret = install_special_mapping(mm, addr, PAGE_SIZE, +- VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC, +- &signal_page); +- +- if (ret == 0) +- mm->context.sigpage = addr; +- +- up_fail: ++ mm->context.sigpage = (PAGE_OFFSET + (get_random_int() % 0x3FFEFFE0)) & 0xFFFFFFFC; + up_write(&mm->mmap_sem); +- return ret; ++ return 0; + } + #endif +diff --git a/arch/arm/kernel/psci.c b/arch/arm/kernel/psci.c +index 4693188..4596c5e 100644 +--- a/arch/arm/kernel/psci.c ++++ b/arch/arm/kernel/psci.c +@@ -24,7 +24,7 @@ + #include <asm/opcodes-virt.h> + #include <asm/psci.h> + +-struct psci_operations psci_ops; ++struct psci_operations psci_ops __read_only; + + static int (*invoke_psci_fn)(u32, u32, u32, u32); + +diff --git a/arch/arm/kernel/ptrace.c b/arch/arm/kernel/ptrace.c +index 0dd3b79..b67388e 100644 +--- a/arch/arm/kernel/ptrace.c ++++ b/arch/arm/kernel/ptrace.c +@@ -908,7 +908,7 @@ enum ptrace_syscall_dir { + PTRACE_SYSCALL_EXIT, + }; + +-static int tracehook_report_syscall(struct pt_regs *regs, ++static void tracehook_report_syscall(struct pt_regs *regs, + enum ptrace_syscall_dir dir) + { + unsigned long ip; +@@ -926,19 +926,29 @@ static int tracehook_report_syscall(struct pt_regs *regs, + current_thread_info()->syscall = -1; + + regs->ARM_ip = ip; +- return current_thread_info()->syscall; + } + ++#ifdef CONFIG_GRKERNSEC_SETXID ++extern void gr_delayed_cred_worker(void); ++#endif ++ + asmlinkage int syscall_trace_enter(struct pt_regs *regs, int scno) + { + current_thread_info()->syscall = scno; + ++#ifdef CONFIG_GRKERNSEC_SETXID ++ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID))) ++ gr_delayed_cred_worker(); ++#endif ++ + /* Do the secure computing check first; failures should be fast. */ + if (secure_computing(scno) == -1) + return -1; + + if (test_thread_flag(TIF_SYSCALL_TRACE)) +- scno = tracehook_report_syscall(regs, PTRACE_SYSCALL_ENTER); ++ tracehook_report_syscall(regs, PTRACE_SYSCALL_ENTER); ++ ++ scno = current_thread_info()->syscall; + + if (test_thread_flag(TIF_SYSCALL_TRACEPOINT)) + trace_sys_enter(regs, scno); +diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c +index 1e8b030..37c3022 100644 +--- a/arch/arm/kernel/setup.c ++++ b/arch/arm/kernel/setup.c +@@ -100,21 +100,23 @@ EXPORT_SYMBOL(system_serial_high); + unsigned int elf_hwcap __read_mostly; + EXPORT_SYMBOL(elf_hwcap); + ++pteval_t __supported_pte_mask __read_only; ++pmdval_t __supported_pmd_mask __read_only; + + #ifdef MULTI_CPU +-struct processor processor __read_mostly; ++struct processor processor __read_only; + #endif + #ifdef MULTI_TLB +-struct cpu_tlb_fns cpu_tlb __read_mostly; ++struct cpu_tlb_fns cpu_tlb __read_only; + #endif + #ifdef MULTI_USER +-struct cpu_user_fns cpu_user __read_mostly; ++struct cpu_user_fns cpu_user __read_only; + #endif + #ifdef MULTI_CACHE +-struct cpu_cache_fns cpu_cache __read_mostly; ++struct cpu_cache_fns cpu_cache __read_only; + #endif + #ifdef CONFIG_OUTER_CACHE +-struct outer_cache_fns outer_cache __read_mostly; ++struct outer_cache_fns outer_cache __read_only; + EXPORT_SYMBOL(outer_cache); + #endif + +@@ -247,9 +249,13 @@ static int __get_cpu_architecture(void) + asm("mrc p15, 0, %0, c0, c1, 4" + : "=r" (mmfr0)); + if ((mmfr0 & 0x0000000f) >= 0x00000003 || +- (mmfr0 & 0x000000f0) >= 0x00000030) ++ (mmfr0 & 0x000000f0) >= 0x00000030) { + cpu_arch = CPU_ARCH_ARMv7; +- else if ((mmfr0 & 0x0000000f) == 0x00000002 || ++ if ((mmfr0 & 0x0000000f) == 0x00000005 || (mmfr0 & 0x0000000f) == 0x00000004) { ++ __supported_pte_mask |= L_PTE_PXN; ++ __supported_pmd_mask |= PMD_PXNTABLE; ++ } ++ } else if ((mmfr0 & 0x0000000f) == 0x00000002 || + (mmfr0 & 0x000000f0) == 0x00000020) + cpu_arch = CPU_ARCH_ARMv6; + else +diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c +index 04d6388..5115238 100644 +--- a/arch/arm/kernel/signal.c ++++ b/arch/arm/kernel/signal.c +@@ -23,8 +23,6 @@ + + extern const unsigned long sigreturn_codes[7]; + +-static unsigned long signal_return_offset; +- + #ifdef CONFIG_CRUNCH + static int preserve_crunch_context(struct crunch_sigframe __user *frame) + { +@@ -395,8 +393,7 @@ setup_return(struct pt_regs *regs, struct ksignal *ksig, + * except when the MPU has protected the vectors + * page from PL0 + */ +- retcode = mm->context.sigpage + signal_return_offset + +- (idx << 2) + thumb; ++ retcode = mm->context.sigpage + (idx << 2) + thumb; + } else + #endif + { +@@ -600,33 +597,3 @@ do_work_pending(struct pt_regs *regs, unsigned int thread_flags, int syscall) + } while (thread_flags & _TIF_WORK_MASK); + return 0; + } +- +-struct page *get_signal_page(void) +-{ +- unsigned long ptr; +- unsigned offset; +- struct page *page; +- void *addr; +- +- page = alloc_pages(GFP_KERNEL, 0); +- +- if (!page) +- return NULL; +- +- addr = page_address(page); +- +- /* Give the signal return code some randomness */ +- offset = 0x200 + (get_random_int() & 0x7fc); +- signal_return_offset = offset; +- +- /* +- * Copy signal return handlers into the vector page, and +- * set sigreturn to be a pointer to these. +- */ +- memcpy(addr + offset, sigreturn_codes, sizeof(sigreturn_codes)); +- +- ptr = (unsigned long)addr + offset; +- flush_icache_range(ptr, ptr + sizeof(sigreturn_codes)); +- +- return page; +-} +diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c +index b7b4c86..47c4f77 100644 +--- a/arch/arm/kernel/smp.c ++++ b/arch/arm/kernel/smp.c +@@ -73,7 +73,7 @@ enum ipi_msg_type { + + static DECLARE_COMPLETION(cpu_running); + +-static struct smp_operations smp_ops; ++static struct smp_operations smp_ops __read_only; + + void __init smp_set_ops(struct smp_operations *ops) + { +diff --git a/arch/arm/kernel/tcm.c b/arch/arm/kernel/tcm.c +index 7a3be1d..b00c7de 100644 +--- a/arch/arm/kernel/tcm.c ++++ b/arch/arm/kernel/tcm.c +@@ -61,7 +61,7 @@ static struct map_desc itcm_iomap[] __initdata = { + .virtual = ITCM_OFFSET, + .pfn = __phys_to_pfn(ITCM_OFFSET), + .length = 0, +- .type = MT_MEMORY_RWX_ITCM, ++ .type = MT_MEMORY_RX_ITCM, + } + }; + +@@ -267,7 +267,9 @@ no_dtcm: + start = &__sitcm_text; + end = &__eitcm_text; + ram = &__itcm_start; ++ pax_open_kernel(); + memcpy(start, ram, itcm_code_sz); ++ pax_close_kernel(); + pr_debug("CPU ITCM: copied code from %p - %p\n", + start, end); + itcm_present = true; +diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c +index 172ee18..ce4ec3d 100644 +--- a/arch/arm/kernel/traps.c ++++ b/arch/arm/kernel/traps.c +@@ -62,7 +62,7 @@ static void dump_mem(const char *, const char *, unsigned long, unsigned long); + void dump_backtrace_entry(unsigned long where, unsigned long from, unsigned long frame) + { + #ifdef CONFIG_KALLSYMS +- printk("[<%08lx>] (%ps) from [<%08lx>] (%pS)\n", where, (void *)where, from, (void *)from); ++ printk("[<%08lx>] (%pA) from [<%08lx>] (%pA)\n", where, (void *)where, from, (void *)from); + #else + printk("Function entered at [<%08lx>] from [<%08lx>]\n", where, from); + #endif +@@ -264,6 +264,8 @@ static arch_spinlock_t die_lock = __ARCH_SPIN_LOCK_UNLOCKED; + static int die_owner = -1; + static unsigned int die_nest_count; + ++extern void gr_handle_kernel_exploit(void); ++ + static unsigned long oops_begin(void) + { + int cpu; +@@ -306,6 +308,9 @@ static void oops_end(unsigned long flags, struct pt_regs *regs, int signr) + panic("Fatal exception in interrupt"); + if (panic_on_oops) + panic("Fatal exception"); ++ ++ gr_handle_kernel_exploit(); ++ + if (signr) + do_exit(signr); + } +@@ -642,7 +647,9 @@ asmlinkage int arm_syscall(int no, struct pt_regs *regs) + * The user helper at 0xffff0fe0 must be used instead. + * (see entry-armv.S for details) + */ ++ pax_open_kernel(); + *((unsigned int *)0xffff0ff0) = regs->ARM_r0; ++ pax_close_kernel(); + } + return 0; + +@@ -899,7 +906,11 @@ void __init early_trap_init(void *vectors_base) + kuser_init(vectors_base); + + flush_icache_range(vectors, vectors + PAGE_SIZE * 2); +- modify_domain(DOMAIN_USER, DOMAIN_CLIENT); ++ ++#ifndef CONFIG_PAX_MEMORY_UDEREF ++ modify_domain(DOMAIN_USER, DOMAIN_USERCLIENT); ++#endif ++ + #else /* ifndef CONFIG_CPU_V7M */ + /* + * on V7-M there is no need to copy the vector table to a dedicated +diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S +index 7bcee5c..e2f3249 100644 +--- a/arch/arm/kernel/vmlinux.lds.S ++++ b/arch/arm/kernel/vmlinux.lds.S +@@ -8,7 +8,11 @@ + #include <asm/thread_info.h> + #include <asm/memory.h> + #include <asm/page.h> +- ++ ++#ifdef CONFIG_PAX_KERNEXEC ++#include <asm/pgtable.h> ++#endif ++ + #define PROC_INFO \ + . = ALIGN(4); \ + VMLINUX_SYMBOL(__proc_info_begin) = .; \ +@@ -34,7 +38,7 @@ + #endif + + #if (defined(CONFIG_SMP_ON_UP) && !defined(CONFIG_DEBUG_SPINLOCK)) || \ +- defined(CONFIG_GENERIC_BUG) ++ defined(CONFIG_GENERIC_BUG) || defined(CONFIG_PAX_REFCOUNT) + #define ARM_EXIT_KEEP(x) x + #define ARM_EXIT_DISCARD(x) + #else +@@ -90,6 +94,11 @@ SECTIONS + _text = .; + HEAD_TEXT + } ++ ++#ifdef CONFIG_PAX_KERNEXEC ++ . = ALIGN(1<<SECTION_SHIFT); ++#endif ++ + .text : { /* Real text segment */ + _stext = .; /* Text and read-only data */ + __exception_text_start = .; +@@ -112,6 +121,8 @@ SECTIONS + ARM_CPU_KEEP(PROC_INFO) + } + ++ _etext = .; /* End of text section */ ++ + RO_DATA(PAGE_SIZE) + + . = ALIGN(4); +@@ -142,7 +153,9 @@ SECTIONS + + NOTES + +- _etext = .; /* End of text and rodata section */ ++#ifdef CONFIG_PAX_KERNEXEC ++ . = ALIGN(1<<SECTION_SHIFT); ++#endif + + #ifndef CONFIG_XIP_KERNEL + . = ALIGN(PAGE_SIZE); +@@ -220,6 +233,11 @@ SECTIONS + . = PAGE_OFFSET + TEXT_OFFSET; + #else + __init_end = .; ++ ++#ifdef CONFIG_PAX_KERNEXEC ++ . = ALIGN(1<<SECTION_SHIFT); ++#endif ++ + . = ALIGN(THREAD_SIZE); + __data_loc = .; + #endif +diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c +index bd18bb8..87ede26 100644 +--- a/arch/arm/kvm/arm.c ++++ b/arch/arm/kvm/arm.c +@@ -57,7 +57,7 @@ static unsigned long hyp_default_vectors; + static DEFINE_PER_CPU(struct kvm_vcpu *, kvm_arm_running_vcpu); + + /* The VMID used in the VTTBR */ +-static atomic64_t kvm_vmid_gen = ATOMIC64_INIT(1); ++static atomic64_unchecked_t kvm_vmid_gen = ATOMIC64_INIT(1); + static u8 kvm_next_vmid; + static DEFINE_SPINLOCK(kvm_vmid_lock); + +@@ -408,7 +408,7 @@ void force_vm_exit(const cpumask_t *mask) + */ + static bool need_new_vmid_gen(struct kvm *kvm) + { +- return unlikely(kvm->arch.vmid_gen != atomic64_read(&kvm_vmid_gen)); ++ return unlikely(kvm->arch.vmid_gen != atomic64_read_unchecked(&kvm_vmid_gen)); + } + + /** +@@ -441,7 +441,7 @@ static void update_vttbr(struct kvm *kvm) + + /* First user of a new VMID generation? */ + if (unlikely(kvm_next_vmid == 0)) { +- atomic64_inc(&kvm_vmid_gen); ++ atomic64_inc_unchecked(&kvm_vmid_gen); + kvm_next_vmid = 1; + + /* +@@ -458,7 +458,7 @@ static void update_vttbr(struct kvm *kvm) + kvm_call_hyp(__kvm_flush_vm_context); + } + +- kvm->arch.vmid_gen = atomic64_read(&kvm_vmid_gen); ++ kvm->arch.vmid_gen = atomic64_read_unchecked(&kvm_vmid_gen); + kvm->arch.vmid = kvm_next_vmid; + kvm_next_vmid++; + +diff --git a/arch/arm/lib/clear_user.S b/arch/arm/lib/clear_user.S +index 14a0d98..7771a7d 100644 +--- a/arch/arm/lib/clear_user.S ++++ b/arch/arm/lib/clear_user.S +@@ -12,14 +12,14 @@ + + .text + +-/* Prototype: int __clear_user(void *addr, size_t sz) ++/* Prototype: int ___clear_user(void *addr, size_t sz) + * Purpose : clear some user memory + * Params : addr - user memory address to clear + * : sz - number of bytes to clear + * Returns : number of bytes NOT cleared + */ + ENTRY(__clear_user_std) +-WEAK(__clear_user) ++WEAK(___clear_user) + stmfd sp!, {r1, lr} + mov r2, #0 + cmp r1, #4 +@@ -44,7 +44,7 @@ WEAK(__clear_user) + USER( strnebt r2, [r0]) + mov r0, #0 + ldmfd sp!, {r1, pc} +-ENDPROC(__clear_user) ++ENDPROC(___clear_user) + ENDPROC(__clear_user_std) + + .pushsection .fixup,"ax" +diff --git a/arch/arm/lib/copy_from_user.S b/arch/arm/lib/copy_from_user.S +index 66a477a..bee61d3 100644 +--- a/arch/arm/lib/copy_from_user.S ++++ b/arch/arm/lib/copy_from_user.S +@@ -16,7 +16,7 @@ + /* + * Prototype: + * +- * size_t __copy_from_user(void *to, const void *from, size_t n) ++ * size_t ___copy_from_user(void *to, const void *from, size_t n) + * + * Purpose: + * +@@ -84,11 +84,11 @@ + + .text + +-ENTRY(__copy_from_user) ++ENTRY(___copy_from_user) + + #include "copy_template.S" + +-ENDPROC(__copy_from_user) ++ENDPROC(___copy_from_user) + + .pushsection .fixup,"ax" + .align 0 +diff --git a/arch/arm/lib/copy_page.S b/arch/arm/lib/copy_page.S +index 6ee2f67..d1cce76 100644 +--- a/arch/arm/lib/copy_page.S ++++ b/arch/arm/lib/copy_page.S +@@ -10,6 +10,7 @@ + * ASM optimised string functions + */ + #include <linux/linkage.h> ++#include <linux/const.h> + #include <asm/assembler.h> + #include <asm/asm-offsets.h> + #include <asm/cache.h> +diff --git a/arch/arm/lib/copy_to_user.S b/arch/arm/lib/copy_to_user.S +index d066df6..df28194 100644 +--- a/arch/arm/lib/copy_to_user.S ++++ b/arch/arm/lib/copy_to_user.S +@@ -16,7 +16,7 @@ + /* + * Prototype: + * +- * size_t __copy_to_user(void *to, const void *from, size_t n) ++ * size_t ___copy_to_user(void *to, const void *from, size_t n) + * + * Purpose: + * +@@ -88,11 +88,11 @@ + .text + + ENTRY(__copy_to_user_std) +-WEAK(__copy_to_user) ++WEAK(___copy_to_user) + + #include "copy_template.S" + +-ENDPROC(__copy_to_user) ++ENDPROC(___copy_to_user) + ENDPROC(__copy_to_user_std) + + .pushsection .fixup,"ax" +diff --git a/arch/arm/lib/csumpartialcopyuser.S b/arch/arm/lib/csumpartialcopyuser.S +index 7d08b43..f7ca7ea 100644 +--- a/arch/arm/lib/csumpartialcopyuser.S ++++ b/arch/arm/lib/csumpartialcopyuser.S +@@ -57,8 +57,8 @@ + * Returns : r0 = checksum, [[sp, #0], #0] = 0 or -EFAULT + */ + +-#define FN_ENTRY ENTRY(csum_partial_copy_from_user) +-#define FN_EXIT ENDPROC(csum_partial_copy_from_user) ++#define FN_ENTRY ENTRY(__csum_partial_copy_from_user) ++#define FN_EXIT ENDPROC(__csum_partial_copy_from_user) + + #include "csumpartialcopygeneric.S" + +diff --git a/arch/arm/lib/delay.c b/arch/arm/lib/delay.c +index 5306de3..aed6d03 100644 +--- a/arch/arm/lib/delay.c ++++ b/arch/arm/lib/delay.c +@@ -28,7 +28,7 @@ + /* + * Default to the loop-based delay implementation. + */ +-struct arm_delay_ops arm_delay_ops = { ++struct arm_delay_ops arm_delay_ops __read_only = { + .delay = __loop_delay, + .const_udelay = __loop_const_udelay, + .udelay = __loop_udelay, +diff --git a/arch/arm/lib/uaccess_with_memcpy.c b/arch/arm/lib/uaccess_with_memcpy.c +index 3e58d71..029817c 100644 +--- a/arch/arm/lib/uaccess_with_memcpy.c ++++ b/arch/arm/lib/uaccess_with_memcpy.c +@@ -136,7 +136,7 @@ out: + } + + unsigned long +-__copy_to_user(void __user *to, const void *from, unsigned long n) ++___copy_to_user(void __user *to, const void *from, unsigned long n) + { + /* + * This test is stubbed out of the main function above to keep +@@ -190,7 +190,7 @@ out: + return n; + } + +-unsigned long __clear_user(void __user *addr, unsigned long n) ++unsigned long ___clear_user(void __user *addr, unsigned long n) + { + /* See rational for this in __copy_to_user() above. */ + if (n < 64) +diff --git a/arch/arm/mach-at91/setup.c b/arch/arm/mach-at91/setup.c +index f7ca97b..3d7e719 100644 +--- a/arch/arm/mach-at91/setup.c ++++ b/arch/arm/mach-at91/setup.c +@@ -81,7 +81,7 @@ void __init at91_init_sram(int bank, unsigned long base, unsigned int length) + + desc->pfn = __phys_to_pfn(base); + desc->length = length; +- desc->type = MT_MEMORY_RWX_NONCACHED; ++ desc->type = MT_MEMORY_RW_NONCACHED; + + pr_info("AT91: sram at 0x%lx of 0x%x mapped at 0x%lx\n", + base, length, desc->virtual); +diff --git a/arch/arm/mach-kirkwood/common.c b/arch/arm/mach-kirkwood/common.c +index f3407a5..bd4256f 100644 +--- a/arch/arm/mach-kirkwood/common.c ++++ b/arch/arm/mach-kirkwood/common.c +@@ -156,7 +156,16 @@ static void clk_gate_fn_disable(struct clk_hw *hw) + clk_gate_ops.disable(hw); + } + +-static struct clk_ops clk_gate_fn_ops; ++static int clk_gate_fn_is_enabled(struct clk_hw *hw) ++{ ++ return clk_gate_ops.is_enabled(hw); ++} ++ ++static struct clk_ops clk_gate_fn_ops = { ++ .enable = clk_gate_fn_enable, ++ .disable = clk_gate_fn_disable, ++ .is_enabled = clk_gate_fn_is_enabled, ++}; + + static struct clk __init *clk_register_gate_fn(struct device *dev, + const char *name, +@@ -190,14 +199,6 @@ static struct clk __init *clk_register_gate_fn(struct device *dev, + gate_fn->fn_en = fn_en; + gate_fn->fn_dis = fn_dis; + +- /* ops is the gate ops, but with our enable/disable functions */ +- if (clk_gate_fn_ops.enable != clk_gate_fn_enable || +- clk_gate_fn_ops.disable != clk_gate_fn_disable) { +- clk_gate_fn_ops = clk_gate_ops; +- clk_gate_fn_ops.enable = clk_gate_fn_enable; +- clk_gate_fn_ops.disable = clk_gate_fn_disable; +- } +- + clk = clk_register(dev, &gate_fn->gate.hw); + + if (IS_ERR(clk)) +diff --git a/arch/arm/mach-omap2/board-n8x0.c b/arch/arm/mach-omap2/board-n8x0.c +index aead77a..a2253fa 100644 +--- a/arch/arm/mach-omap2/board-n8x0.c ++++ b/arch/arm/mach-omap2/board-n8x0.c +@@ -568,7 +568,7 @@ static int n8x0_menelaus_late_init(struct device *dev) + } + #endif + +-static struct menelaus_platform_data n8x0_menelaus_platform_data __initdata = { ++static struct menelaus_platform_data n8x0_menelaus_platform_data __initconst = { + .late_init = n8x0_menelaus_late_init, + }; + +diff --git a/arch/arm/mach-omap2/gpmc.c b/arch/arm/mach-omap2/gpmc.c +index ab43755..ccfa231 100644 +--- a/arch/arm/mach-omap2/gpmc.c ++++ b/arch/arm/mach-omap2/gpmc.c +@@ -148,7 +148,6 @@ struct omap3_gpmc_regs { + }; + + static struct gpmc_client_irq gpmc_client_irq[GPMC_NR_IRQ]; +-static struct irq_chip gpmc_irq_chip; + static int gpmc_irq_start; + + static struct resource gpmc_mem_root; +@@ -716,6 +715,18 @@ static void gpmc_irq_noop(struct irq_data *data) { } + + static unsigned int gpmc_irq_noop_ret(struct irq_data *data) { return 0; } + ++static struct irq_chip gpmc_irq_chip = { ++ .name = "gpmc", ++ .irq_startup = gpmc_irq_noop_ret, ++ .irq_enable = gpmc_irq_enable, ++ .irq_disable = gpmc_irq_disable, ++ .irq_shutdown = gpmc_irq_noop, ++ .irq_ack = gpmc_irq_noop, ++ .irq_mask = gpmc_irq_noop, ++ .irq_unmask = gpmc_irq_noop, ++ ++}; ++ + static int gpmc_setup_irq(void) + { + int i; +@@ -730,15 +741,6 @@ static int gpmc_setup_irq(void) + return gpmc_irq_start; + } + +- gpmc_irq_chip.name = "gpmc"; +- gpmc_irq_chip.irq_startup = gpmc_irq_noop_ret; +- gpmc_irq_chip.irq_enable = gpmc_irq_enable; +- gpmc_irq_chip.irq_disable = gpmc_irq_disable; +- gpmc_irq_chip.irq_shutdown = gpmc_irq_noop; +- gpmc_irq_chip.irq_ack = gpmc_irq_noop; +- gpmc_irq_chip.irq_mask = gpmc_irq_noop; +- gpmc_irq_chip.irq_unmask = gpmc_irq_noop; +- + gpmc_client_irq[0].bitmask = GPMC_IRQ_FIFOEVENTENABLE; + gpmc_client_irq[1].bitmask = GPMC_IRQ_COUNT_EVENT; + +diff --git a/arch/arm/mach-omap2/omap-mpuss-lowpower.c b/arch/arm/mach-omap2/omap-mpuss-lowpower.c +index 667915d..2ee1219 100644 +--- a/arch/arm/mach-omap2/omap-mpuss-lowpower.c ++++ b/arch/arm/mach-omap2/omap-mpuss-lowpower.c +@@ -84,7 +84,7 @@ struct cpu_pm_ops { + int (*finish_suspend)(unsigned long cpu_state); + void (*resume)(void); + void (*scu_prepare)(unsigned int cpu_id, unsigned int cpu_state); +-}; ++} __no_const; + + static DEFINE_PER_CPU(struct omap4_cpu_pm_info, omap4_pm_info); + static struct powerdomain *mpuss_pd; +@@ -102,7 +102,7 @@ static void dummy_cpu_resume(void) + static void dummy_scu_prepare(unsigned int cpu_id, unsigned int cpu_state) + {} + +-struct cpu_pm_ops omap_pm_ops = { ++static struct cpu_pm_ops omap_pm_ops __read_only = { + .finish_suspend = default_finish_suspend, + .resume = dummy_cpu_resume, + .scu_prepare = dummy_scu_prepare, +diff --git a/arch/arm/mach-omap2/omap-wakeupgen.c b/arch/arm/mach-omap2/omap-wakeupgen.c +index 3664562..72f85c6 100644 +--- a/arch/arm/mach-omap2/omap-wakeupgen.c ++++ b/arch/arm/mach-omap2/omap-wakeupgen.c +@@ -343,7 +343,7 @@ static int irq_cpu_hotplug_notify(struct notifier_block *self, + return NOTIFY_OK; + } + +-static struct notifier_block __refdata irq_hotplug_notifier = { ++static struct notifier_block irq_hotplug_notifier = { + .notifier_call = irq_cpu_hotplug_notify, + }; + +diff --git a/arch/arm/mach-omap2/omap_device.c b/arch/arm/mach-omap2/omap_device.c +index 01ef59d..32ae28a8 100644 +--- a/arch/arm/mach-omap2/omap_device.c ++++ b/arch/arm/mach-omap2/omap_device.c +@@ -510,7 +510,7 @@ void omap_device_delete(struct omap_device *od) + struct platform_device __init *omap_device_build(const char *pdev_name, + int pdev_id, + struct omap_hwmod *oh, +- void *pdata, int pdata_len) ++ const void *pdata, int pdata_len) + { + struct omap_hwmod *ohs[] = { oh }; + +@@ -538,7 +538,7 @@ struct platform_device __init *omap_device_build(const char *pdev_name, + struct platform_device __init *omap_device_build_ss(const char *pdev_name, + int pdev_id, + struct omap_hwmod **ohs, +- int oh_cnt, void *pdata, ++ int oh_cnt, const void *pdata, + int pdata_len) + { + int ret = -ENOMEM; +diff --git a/arch/arm/mach-omap2/omap_device.h b/arch/arm/mach-omap2/omap_device.h +index 78c02b3..c94109a 100644 +--- a/arch/arm/mach-omap2/omap_device.h ++++ b/arch/arm/mach-omap2/omap_device.h +@@ -72,12 +72,12 @@ int omap_device_idle(struct platform_device *pdev); + /* Core code interface */ + + struct platform_device *omap_device_build(const char *pdev_name, int pdev_id, +- struct omap_hwmod *oh, void *pdata, ++ struct omap_hwmod *oh, const void *pdata, + int pdata_len); + + struct platform_device *omap_device_build_ss(const char *pdev_name, int pdev_id, + struct omap_hwmod **oh, int oh_cnt, +- void *pdata, int pdata_len); ++ const void *pdata, int pdata_len); + + struct omap_device *omap_device_alloc(struct platform_device *pdev, + struct omap_hwmod **ohs, int oh_cnt); +diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c +index c914b00..8a653a7 100644 +--- a/arch/arm/mach-omap2/omap_hwmod.c ++++ b/arch/arm/mach-omap2/omap_hwmod.c +@@ -194,10 +194,10 @@ struct omap_hwmod_soc_ops { + int (*init_clkdm)(struct omap_hwmod *oh); + void (*update_context_lost)(struct omap_hwmod *oh); + int (*get_context_lost)(struct omap_hwmod *oh); +-}; ++} __no_const; + + /* soc_ops: adapts the omap_hwmod code to the currently-booted SoC */ +-static struct omap_hwmod_soc_ops soc_ops; ++static struct omap_hwmod_soc_ops soc_ops __read_only; + + /* omap_hwmod_list contains all registered struct omap_hwmods */ + static LIST_HEAD(omap_hwmod_list); +diff --git a/arch/arm/mach-omap2/powerdomains43xx_data.c b/arch/arm/mach-omap2/powerdomains43xx_data.c +index 95fee54..cfa9cf1 100644 +--- a/arch/arm/mach-omap2/powerdomains43xx_data.c ++++ b/arch/arm/mach-omap2/powerdomains43xx_data.c +@@ -10,6 +10,7 @@ + + #include <linux/kernel.h> + #include <linux/init.h> ++#include <asm/pgtable.h> + + #include "powerdomain.h" + +@@ -129,7 +130,9 @@ static int am43xx_check_vcvp(void) + + void __init am43xx_powerdomains_init(void) + { +- omap4_pwrdm_operations.pwrdm_has_voltdm = am43xx_check_vcvp; ++ pax_open_kernel(); ++ *(void **)&omap4_pwrdm_operations.pwrdm_has_voltdm = am43xx_check_vcvp; ++ pax_close_kernel(); + pwrdm_register_platform_funcs(&omap4_pwrdm_operations); + pwrdm_register_pwrdms(powerdomains_am43xx); + pwrdm_complete_init(); +diff --git a/arch/arm/mach-omap2/wd_timer.c b/arch/arm/mach-omap2/wd_timer.c +index d15c7bb..b2d1f0c 100644 +--- a/arch/arm/mach-omap2/wd_timer.c ++++ b/arch/arm/mach-omap2/wd_timer.c +@@ -110,7 +110,9 @@ static int __init omap_init_wdt(void) + struct omap_hwmod *oh; + char *oh_name = "wd_timer2"; + char *dev_name = "omap_wdt"; +- struct omap_wd_timer_platform_data pdata; ++ static struct omap_wd_timer_platform_data pdata = { ++ .read_reset_sources = prm_read_reset_sources ++ }; + + if (!cpu_class_is_omap2() || of_have_populated_dt()) + return 0; +@@ -121,8 +123,6 @@ static int __init omap_init_wdt(void) + return -EINVAL; + } + +- pdata.read_reset_sources = prm_read_reset_sources; +- + pdev = omap_device_build(dev_name, id, oh, &pdata, + sizeof(struct omap_wd_timer_platform_data)); + WARN(IS_ERR(pdev), "Can't build omap_device for %s:%s.\n", +diff --git a/arch/arm/mach-tegra/cpuidle-tegra20.c b/arch/arm/mach-tegra/cpuidle-tegra20.c +index b82dcae..44ee5b6 100644 +--- a/arch/arm/mach-tegra/cpuidle-tegra20.c ++++ b/arch/arm/mach-tegra/cpuidle-tegra20.c +@@ -180,7 +180,7 @@ static int tegra20_idle_lp2_coupled(struct cpuidle_device *dev, + bool entered_lp2 = false; + + if (tegra_pending_sgi()) +- ACCESS_ONCE(abort_flag) = true; ++ ACCESS_ONCE_RW(abort_flag) = true; + + cpuidle_coupled_parallel_barrier(dev, &abort_barrier); + +diff --git a/arch/arm/mach-ux500/setup.h b/arch/arm/mach-ux500/setup.h +index 2dea8b5..6499da2 100644 +--- a/arch/arm/mach-ux500/setup.h ++++ b/arch/arm/mach-ux500/setup.h +@@ -33,13 +33,6 @@ extern void ux500_timer_init(void); + .type = MT_DEVICE, \ + } + +-#define __MEM_DEV_DESC(x, sz) { \ +- .virtual = IO_ADDRESS(x), \ +- .pfn = __phys_to_pfn(x), \ +- .length = sz, \ +- .type = MT_MEMORY_RWX, \ +-} +- + extern struct smp_operations ux500_smp_ops; + extern void ux500_cpu_die(unsigned int cpu); + +diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig +index ca8ecde..58ba893 100644 +--- a/arch/arm/mm/Kconfig ++++ b/arch/arm/mm/Kconfig +@@ -446,6 +446,7 @@ config CPU_32v5 + + config CPU_32v6 + bool ++ select CPU_USE_DOMAINS if CPU_V6 && MMU && !PAX_KERNEXEC && !PAX_MEMORY_UDEREF + select TLS_REG_EMUL if !CPU_32v6K && !MMU + + config CPU_32v6K +@@ -600,6 +601,7 @@ config CPU_CP15_MPU + + config CPU_USE_DOMAINS + bool ++ depends on !ARM_LPAE && !PAX_KERNEXEC && !PAX_MEMORY_UDEREF + help + This option enables or disables the use of domain switching + via the set_fs() function. +@@ -799,6 +801,7 @@ config NEED_KUSER_HELPERS + config KUSER_HELPERS + bool "Enable kuser helpers in vector page" if !NEED_KUSER_HELPERS + default y ++ depends on !(CPU_V6 || CPU_V6K || CPU_V7) || GRKERNSEC_OLD_ARM_USERLAND + help + Warning: disabling this option may break user programs. + +@@ -811,7 +814,7 @@ config KUSER_HELPERS + See Documentation/arm/kernel_user_helpers.txt for details. + + However, the fixed address nature of these helpers can be used +- by ROP (return orientated programming) authors when creating ++ by ROP (Return Oriented Programming) authors when creating + exploits. + + If all of the binaries and libraries which run on your platform +diff --git a/arch/arm/mm/alignment.c b/arch/arm/mm/alignment.c +index 9240364..a2b8cf3 100644 +--- a/arch/arm/mm/alignment.c ++++ b/arch/arm/mm/alignment.c +@@ -212,10 +212,12 @@ union offset_union { + #define __get16_unaligned_check(ins,val,addr) \ + do { \ + unsigned int err = 0, v, a = addr; \ ++ pax_open_userland(); \ + __get8_unaligned_check(ins,v,a,err); \ + val = v << ((BE) ? 8 : 0); \ + __get8_unaligned_check(ins,v,a,err); \ + val |= v << ((BE) ? 0 : 8); \ ++ pax_close_userland(); \ + if (err) \ + goto fault; \ + } while (0) +@@ -229,6 +231,7 @@ union offset_union { + #define __get32_unaligned_check(ins,val,addr) \ + do { \ + unsigned int err = 0, v, a = addr; \ ++ pax_open_userland(); \ + __get8_unaligned_check(ins,v,a,err); \ + val = v << ((BE) ? 24 : 0); \ + __get8_unaligned_check(ins,v,a,err); \ +@@ -237,6 +240,7 @@ union offset_union { + val |= v << ((BE) ? 8 : 16); \ + __get8_unaligned_check(ins,v,a,err); \ + val |= v << ((BE) ? 0 : 24); \ ++ pax_close_userland(); \ + if (err) \ + goto fault; \ + } while (0) +@@ -250,6 +254,7 @@ union offset_union { + #define __put16_unaligned_check(ins,val,addr) \ + do { \ + unsigned int err = 0, v = val, a = addr; \ ++ pax_open_userland(); \ + __asm__( FIRST_BYTE_16 \ + ARM( "1: "ins" %1, [%2], #1\n" ) \ + THUMB( "1: "ins" %1, [%2]\n" ) \ +@@ -269,6 +274,7 @@ union offset_union { + " .popsection\n" \ + : "=r" (err), "=&r" (v), "=&r" (a) \ + : "0" (err), "1" (v), "2" (a)); \ ++ pax_close_userland(); \ + if (err) \ + goto fault; \ + } while (0) +@@ -282,6 +288,7 @@ union offset_union { + #define __put32_unaligned_check(ins,val,addr) \ + do { \ + unsigned int err = 0, v = val, a = addr; \ ++ pax_open_userland(); \ + __asm__( FIRST_BYTE_32 \ + ARM( "1: "ins" %1, [%2], #1\n" ) \ + THUMB( "1: "ins" %1, [%2]\n" ) \ +@@ -311,6 +318,7 @@ union offset_union { + " .popsection\n" \ + : "=r" (err), "=&r" (v), "=&r" (a) \ + : "0" (err), "1" (v), "2" (a)); \ ++ pax_close_userland(); \ + if (err) \ + goto fault; \ + } while (0) +diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c +index 7abde2c..9df495f 100644 +--- a/arch/arm/mm/cache-l2x0.c ++++ b/arch/arm/mm/cache-l2x0.c +@@ -46,7 +46,7 @@ struct l2x0_of_data { + void (*setup)(const struct device_node *, u32 *, u32 *); + void (*save)(void); + struct outer_cache_fns outer_cache; +-}; ++} __do_const; + + static bool of_init = false; + +diff --git a/arch/arm/mm/context.c b/arch/arm/mm/context.c +index 6eb97b3..ac509f6 100644 +--- a/arch/arm/mm/context.c ++++ b/arch/arm/mm/context.c +@@ -43,7 +43,7 @@ + #define NUM_USER_ASIDS ASID_FIRST_VERSION + + static DEFINE_RAW_SPINLOCK(cpu_asid_lock); +-static atomic64_t asid_generation = ATOMIC64_INIT(ASID_FIRST_VERSION); ++static atomic64_unchecked_t asid_generation = ATOMIC64_INIT(ASID_FIRST_VERSION); + static DECLARE_BITMAP(asid_map, NUM_USER_ASIDS); + + static DEFINE_PER_CPU(atomic64_t, active_asids); +@@ -182,7 +182,7 @@ static u64 new_context(struct mm_struct *mm, unsigned int cpu) + { + static u32 cur_idx = 1; + u64 asid = atomic64_read(&mm->context.id); +- u64 generation = atomic64_read(&asid_generation); ++ u64 generation = atomic64_read_unchecked(&asid_generation); + + if (asid != 0 && is_reserved_asid(asid)) { + /* +@@ -203,7 +203,7 @@ static u64 new_context(struct mm_struct *mm, unsigned int cpu) + */ + asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, cur_idx); + if (asid == NUM_USER_ASIDS) { +- generation = atomic64_add_return(ASID_FIRST_VERSION, ++ generation = atomic64_add_return_unchecked(ASID_FIRST_VERSION, + &asid_generation); + flush_context(cpu); + asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, 1); +@@ -234,14 +234,14 @@ void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk) + cpu_set_reserved_ttbr0(); + + asid = atomic64_read(&mm->context.id); +- if (!((asid ^ atomic64_read(&asid_generation)) >> ASID_BITS) ++ if (!((asid ^ atomic64_read_unchecked(&asid_generation)) >> ASID_BITS) + && atomic64_xchg(&per_cpu(active_asids, cpu), asid)) + goto switch_mm_fastpath; + + raw_spin_lock_irqsave(&cpu_asid_lock, flags); + /* Check that our ASID belongs to the current generation. */ + asid = atomic64_read(&mm->context.id); +- if ((asid ^ atomic64_read(&asid_generation)) >> ASID_BITS) { ++ if ((asid ^ atomic64_read_unchecked(&asid_generation)) >> ASID_BITS) { + asid = new_context(mm, cpu); + atomic64_set(&mm->context.id, asid); + } +diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c +index eb8830a..5360ce7 100644 +--- a/arch/arm/mm/fault.c ++++ b/arch/arm/mm/fault.c +@@ -25,6 +25,7 @@ + #include <asm/system_misc.h> + #include <asm/system_info.h> + #include <asm/tlbflush.h> ++#include <asm/sections.h> + + #include "fault.h" + +@@ -138,6 +139,31 @@ __do_kernel_fault(struct mm_struct *mm, unsigned long addr, unsigned int fsr, + if (fixup_exception(regs)) + return; + ++#ifdef CONFIG_PAX_MEMORY_UDEREF ++ if (addr < TASK_SIZE) { ++ if (current->signal->curr_ip) ++ printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to access userland memory at %08lx\n", ¤t->signal->curr_ip, current->comm, task_pid_nr(current), ++ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()), addr); ++ else ++ printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to access userland memory at %08lx\n", current->comm, task_pid_nr(current), ++ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()), addr); ++ } ++#endif ++ ++#ifdef CONFIG_PAX_KERNEXEC ++ if ((fsr & FSR_WRITE) && ++ (((unsigned long)_stext <= addr && addr < init_mm.end_code) || ++ (MODULES_VADDR <= addr && addr < MODULES_END))) ++ { ++ if (current->signal->curr_ip) ++ printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n", ¤t->signal->curr_ip, current->comm, task_pid_nr(current), ++ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid())); ++ else ++ printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n", current->comm, task_pid_nr(current), ++ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid())); ++ } ++#endif ++ + /* + * No handler, we'll have to terminate things with extreme prejudice. + */ +@@ -174,6 +200,13 @@ __do_user_fault(struct task_struct *tsk, unsigned long addr, + } + #endif + ++#ifdef CONFIG_PAX_PAGEEXEC ++ if (fsr & FSR_LNX_PF) { ++ pax_report_fault(regs, (void *)regs->ARM_pc, (void *)regs->ARM_sp); ++ do_group_exit(SIGKILL); ++ } ++#endif ++ + tsk->thread.address = addr; + tsk->thread.error_code = fsr; + tsk->thread.trap_no = 14; +@@ -401,6 +434,33 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs) + } + #endif /* CONFIG_MMU */ + ++#ifdef CONFIG_PAX_PAGEEXEC ++void pax_report_insns(struct pt_regs *regs, void *pc, void *sp) ++{ ++ long i; ++ ++ printk(KERN_ERR "PAX: bytes at PC: "); ++ for (i = 0; i < 20; i++) { ++ unsigned char c; ++ if (get_user(c, (__force unsigned char __user *)pc+i)) ++ printk(KERN_CONT "?? "); ++ else ++ printk(KERN_CONT "%02x ", c); ++ } ++ printk("\n"); ++ ++ printk(KERN_ERR "PAX: bytes at SP-4: "); ++ for (i = -1; i < 20; i++) { ++ unsigned long c; ++ if (get_user(c, (__force unsigned long __user *)sp+i)) ++ printk(KERN_CONT "???????? "); ++ else ++ printk(KERN_CONT "%08lx ", c); ++ } ++ printk("\n"); ++} ++#endif ++ + /* + * First Level Translation Fault Handler + * +@@ -548,9 +608,22 @@ do_DataAbort(unsigned long addr, unsigned int fsr, struct pt_regs *regs) + const struct fsr_info *inf = fsr_info + fsr_fs(fsr); + struct siginfo info; + ++#ifdef CONFIG_PAX_MEMORY_UDEREF ++ if (addr < TASK_SIZE && is_domain_fault(fsr)) { ++ if (current->signal->curr_ip) ++ printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to access userland memory at %08lx\n", ¤t->signal->curr_ip, current->comm, task_pid_nr(current), ++ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()), addr); ++ else ++ printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to access userland memory at %08lx\n", current->comm, task_pid_nr(current), ++ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()), addr); ++ goto die; ++ } ++#endif ++ + if (!inf->fn(addr, fsr & ~FSR_LNX_PF, regs)) + return; + ++die: + printk(KERN_ALERT "Unhandled fault: %s (0x%03x) at 0x%08lx\n", + inf->name, fsr, addr); + +@@ -574,15 +647,98 @@ hook_ifault_code(int nr, int (*fn)(unsigned long, unsigned int, struct pt_regs * + ifsr_info[nr].name = name; + } + ++asmlinkage int sys_sigreturn(struct pt_regs *regs); ++asmlinkage int sys_rt_sigreturn(struct pt_regs *regs); ++ + asmlinkage void __exception + do_PrefetchAbort(unsigned long addr, unsigned int ifsr, struct pt_regs *regs) + { + const struct fsr_info *inf = ifsr_info + fsr_fs(ifsr); + struct siginfo info; ++ unsigned long pc = instruction_pointer(regs); ++ ++ if (user_mode(regs)) { ++ unsigned long sigpage = current->mm->context.sigpage; ++ ++ if (sigpage <= pc && pc < sigpage + 7*4) { ++ if (pc < sigpage + 3*4) ++ sys_sigreturn(regs); ++ else ++ sys_rt_sigreturn(regs); ++ return; ++ } ++ if (pc == 0xffff0f60UL) { ++ /* ++ * PaX: __kuser_cmpxchg64 emulation ++ */ ++ // TODO ++ //regs->ARM_pc = regs->ARM_lr; ++ //return; ++ } ++ if (pc == 0xffff0fa0UL) { ++ /* ++ * PaX: __kuser_memory_barrier emulation ++ */ ++ // dmb(); implied by the exception ++ regs->ARM_pc = regs->ARM_lr; ++ return; ++ } ++ if (pc == 0xffff0fc0UL) { ++ /* ++ * PaX: __kuser_cmpxchg emulation ++ */ ++ // TODO ++ //long new; ++ //int op; ++ ++ //op = FUTEX_OP_SET << 28; ++ //new = futex_atomic_op_inuser(op, regs->ARM_r2); ++ //regs->ARM_r0 = old != new; ++ //regs->ARM_pc = regs->ARM_lr; ++ //return; ++ } ++ if (pc == 0xffff0fe0UL) { ++ /* ++ * PaX: __kuser_get_tls emulation ++ */ ++ regs->ARM_r0 = current_thread_info()->tp_value[0]; ++ regs->ARM_pc = regs->ARM_lr; ++ return; ++ } ++ } ++ ++#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF) ++ else if (is_domain_fault(ifsr) || is_xn_fault(ifsr)) { ++ if (current->signal->curr_ip) ++ printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to execute %s memory at %08lx\n", ¤t->signal->curr_ip, current->comm, task_pid_nr(current), ++ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()), ++ pc >= TASK_SIZE ? "non-executable kernel" : "userland", pc); ++ else ++ printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to execute %s memory at %08lx\n", current->comm, task_pid_nr(current), ++ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()), ++ pc >= TASK_SIZE ? "non-executable kernel" : "userland", pc); ++ goto die; ++ } ++#endif ++ ++#ifdef CONFIG_PAX_REFCOUNT ++ if (fsr_fs(ifsr) == FAULT_CODE_DEBUG) { ++ unsigned int bkpt; ++ ++ if (!probe_kernel_address(pc, bkpt) && cpu_to_le32(bkpt) == 0xe12f1073) { ++ current->thread.error_code = ifsr; ++ current->thread.trap_no = 0; ++ pax_report_refcount_overflow(regs); ++ fixup_exception(regs); ++ return; ++ } ++ } ++#endif + + if (!inf->fn(addr, ifsr | FSR_LNX_PF, regs)) + return; + ++die: + printk(KERN_ALERT "Unhandled prefetch abort: %s (0x%03x) at 0x%08lx\n", + inf->name, ifsr, addr); + +diff --git a/arch/arm/mm/fault.h b/arch/arm/mm/fault.h +index cf08bdf..772656c 100644 +--- a/arch/arm/mm/fault.h ++++ b/arch/arm/mm/fault.h +@@ -3,6 +3,7 @@ + + /* + * Fault status register encodings. We steal bit 31 for our own purposes. ++ * Set when the FSR value is from an instruction fault. + */ + #define FSR_LNX_PF (1 << 31) + #define FSR_WRITE (1 << 11) +@@ -22,6 +23,17 @@ static inline int fsr_fs(unsigned int fsr) + } + #endif + ++/* valid for LPAE and !LPAE */ ++static inline int is_xn_fault(unsigned int fsr) ++{ ++ return ((fsr_fs(fsr) & 0x3c) == 0xc); ++} ++ ++static inline int is_domain_fault(unsigned int fsr) ++{ ++ return ((fsr_fs(fsr) & 0xD) == 0x9); ++} ++ + void do_bad_area(unsigned long addr, unsigned int fsr, struct pt_regs *regs); + unsigned long search_exception_table(unsigned long addr); + +diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c +index 804d615..fcec50a 100644 +--- a/arch/arm/mm/init.c ++++ b/arch/arm/mm/init.c +@@ -30,6 +30,8 @@ + #include <asm/setup.h> + #include <asm/tlb.h> + #include <asm/fixmap.h> ++#include <asm/system_info.h> ++#include <asm/cp15.h> + + #include <asm/mach/arch.h> + #include <asm/mach/map.h> +@@ -625,7 +627,46 @@ void free_initmem(void) + { + #ifdef CONFIG_HAVE_TCM + extern char __tcm_start, __tcm_end; ++#endif + ++#ifdef CONFIG_PAX_KERNEXEC ++ unsigned long addr; ++ pgd_t *pgd; ++ pud_t *pud; ++ pmd_t *pmd; ++ int cpu_arch = cpu_architecture(); ++ unsigned int cr = get_cr(); ++ ++ if (cpu_arch >= CPU_ARCH_ARMv6 && (cr & CR_XP)) { ++ /* make pages tables, etc before .text NX */ ++ for (addr = PAGE_OFFSET; addr < (unsigned long)_stext; addr += SECTION_SIZE) { ++ pgd = pgd_offset_k(addr); ++ pud = pud_offset(pgd, addr); ++ pmd = pmd_offset(pud, addr); ++ __section_update(pmd, addr, PMD_SECT_XN); ++ } ++ /* make init NX */ ++ for (addr = (unsigned long)__init_begin; addr < (unsigned long)_sdata; addr += SECTION_SIZE) { ++ pgd = pgd_offset_k(addr); ++ pud = pud_offset(pgd, addr); ++ pmd = pmd_offset(pud, addr); ++ __section_update(pmd, addr, PMD_SECT_XN); ++ } ++ /* make kernel code/rodata RX */ ++ for (addr = (unsigned long)_stext; addr < (unsigned long)__init_begin; addr += SECTION_SIZE) { ++ pgd = pgd_offset_k(addr); ++ pud = pud_offset(pgd, addr); ++ pmd = pmd_offset(pud, addr); ++#ifdef CONFIG_ARM_LPAE ++ __section_update(pmd, addr, PMD_SECT_RDONLY); ++#else ++ __section_update(pmd, addr, PMD_SECT_APX|PMD_SECT_AP_WRITE); ++#endif ++ } ++ } ++#endif ++ ++#ifdef CONFIG_HAVE_TCM + poison_init_mem(&__tcm_start, &__tcm_end - &__tcm_start); + free_reserved_area(&__tcm_start, &__tcm_end, -1, "TCM link"); + #endif +diff --git a/arch/arm/mm/ioremap.c b/arch/arm/mm/ioremap.c +index f9c32ba..8540068 100644 +--- a/arch/arm/mm/ioremap.c ++++ b/arch/arm/mm/ioremap.c +@@ -392,9 +392,9 @@ __arm_ioremap_exec(phys_addr_t phys_addr, size_t size, bool cached) + unsigned int mtype; + + if (cached) +- mtype = MT_MEMORY_RWX; ++ mtype = MT_MEMORY_RX; + else +- mtype = MT_MEMORY_RWX_NONCACHED; ++ mtype = MT_MEMORY_RX_NONCACHED; + + return __arm_ioremap_caller(phys_addr, size, mtype, + __builtin_return_address(0)); +diff --git a/arch/arm/mm/mmap.c b/arch/arm/mm/mmap.c +index 5e85ed3..b10a7ed 100644 +--- a/arch/arm/mm/mmap.c ++++ b/arch/arm/mm/mmap.c +@@ -59,6 +59,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr, + struct vm_area_struct *vma; + int do_align = 0; + int aliasing = cache_is_vipt_aliasing(); ++ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags); + struct vm_unmapped_area_info info; + + /* +@@ -81,6 +82,10 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr, + if (len > TASK_SIZE) + return -ENOMEM; + ++#ifdef CONFIG_PAX_RANDMMAP ++ if (!(mm->pax_flags & MF_PAX_RANDMMAP)) ++#endif ++ + if (addr) { + if (do_align) + addr = COLOUR_ALIGN(addr, pgoff); +@@ -88,8 +93,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr, + addr = PAGE_ALIGN(addr); + + vma = find_vma(mm, addr); +- if (TASK_SIZE - len >= addr && +- (!vma || addr + len <= vma->vm_start)) ++ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset)) + return addr; + } + +@@ -99,6 +103,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr, + info.high_limit = TASK_SIZE; + info.align_mask = do_align ? (PAGE_MASK & (SHMLBA - 1)) : 0; + info.align_offset = pgoff << PAGE_SHIFT; ++ info.threadstack_offset = offset; + return vm_unmapped_area(&info); + } + +@@ -112,6 +117,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, + unsigned long addr = addr0; + int do_align = 0; + int aliasing = cache_is_vipt_aliasing(); ++ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags); + struct vm_unmapped_area_info info; + + /* +@@ -132,6 +138,10 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, + return addr; + } + ++#ifdef CONFIG_PAX_RANDMMAP ++ if (!(mm->pax_flags & MF_PAX_RANDMMAP)) ++#endif ++ + /* requesting a specific address */ + if (addr) { + if (do_align) +@@ -139,8 +149,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, + else + addr = PAGE_ALIGN(addr); + vma = find_vma(mm, addr); +- if (TASK_SIZE - len >= addr && +- (!vma || addr + len <= vma->vm_start)) ++ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset)) + return addr; + } + +@@ -150,6 +159,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, + info.high_limit = mm->mmap_base; + info.align_mask = do_align ? (PAGE_MASK & (SHMLBA - 1)) : 0; + info.align_offset = pgoff << PAGE_SHIFT; ++ info.threadstack_offset = offset; + addr = vm_unmapped_area(&info); + + /* +@@ -173,6 +183,10 @@ void arch_pick_mmap_layout(struct mm_struct *mm) + { + unsigned long random_factor = 0UL; + ++#ifdef CONFIG_PAX_RANDMMAP ++ if (!(mm->pax_flags & MF_PAX_RANDMMAP)) ++#endif ++ + /* 8 bits of randomness in 20 address space bits */ + if ((current->flags & PF_RANDOMIZE) && + !(current->personality & ADDR_NO_RANDOMIZE)) +@@ -180,9 +194,21 @@ void arch_pick_mmap_layout(struct mm_struct *mm) + + if (mmap_is_legacy()) { + mm->mmap_base = TASK_UNMAPPED_BASE + random_factor; ++ ++#ifdef CONFIG_PAX_RANDMMAP ++ if (mm->pax_flags & MF_PAX_RANDMMAP) ++ mm->mmap_base += mm->delta_mmap; ++#endif ++ + mm->get_unmapped_area = arch_get_unmapped_area; + } else { + mm->mmap_base = mmap_base(random_factor); ++ ++#ifdef CONFIG_PAX_RANDMMAP ++ if (mm->pax_flags & MF_PAX_RANDMMAP) ++ mm->mmap_base -= mm->delta_mmap + mm->delta_stack; ++#endif ++ + mm->get_unmapped_area = arch_get_unmapped_area_topdown; + } + } +diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c +index f15c22e..d830561 100644 +--- a/arch/arm/mm/mmu.c ++++ b/arch/arm/mm/mmu.c +@@ -39,6 +39,22 @@ + #include "mm.h" + #include "tcm.h" + ++#if defined(CONFIG_CPU_USE_DOMAINS) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF) ++void modify_domain(unsigned int dom, unsigned int type) ++{ ++ struct thread_info *thread = current_thread_info(); ++ unsigned int domain = thread->cpu_domain; ++ /* ++ * DOMAIN_MANAGER might be defined to some other value, ++ * use the arch-defined constant ++ */ ++ domain &= ~domain_val(dom, 3); ++ thread->cpu_domain = domain | domain_val(dom, type); ++ set_domain(thread->cpu_domain); ++} ++EXPORT_SYMBOL(modify_domain); ++#endif ++ + /* + * empty_zero_page is a special page that is used for + * zero-initialized data and COW. +@@ -235,7 +251,15 @@ __setup("noalign", noalign_setup); + #define PROT_PTE_S2_DEVICE PROT_PTE_DEVICE + #define PROT_SECT_DEVICE PMD_TYPE_SECT|PMD_SECT_AP_WRITE + +-static struct mem_type mem_types[] = { ++#ifdef CONFIG_PAX_KERNEXEC ++#define L_PTE_KERNEXEC L_PTE_RDONLY ++#define PMD_SECT_KERNEXEC PMD_SECT_RDONLY ++#else ++#define L_PTE_KERNEXEC L_PTE_DIRTY ++#define PMD_SECT_KERNEXEC PMD_SECT_AP_WRITE ++#endif ++ ++static struct mem_type mem_types[] __read_only = { + [MT_DEVICE] = { /* Strongly ordered / ARMv6 shared device */ + .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_SHARED | + L_PTE_SHARED, +@@ -264,19 +288,19 @@ static struct mem_type mem_types[] = { + .prot_sect = PROT_SECT_DEVICE, + .domain = DOMAIN_IO, + }, +- [MT_UNCACHED] = { ++ [MT_UNCACHED_RW] = { + .prot_pte = PROT_PTE_DEVICE, + .prot_l1 = PMD_TYPE_TABLE, + .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN, + .domain = DOMAIN_IO, + }, +- [MT_CACHECLEAN] = { +- .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN, ++ [MT_CACHECLEAN_RO] = { ++ .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN | PMD_SECT_RDONLY, + .domain = DOMAIN_KERNEL, + }, + #ifndef CONFIG_ARM_LPAE +- [MT_MINICLEAN] = { +- .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN | PMD_SECT_MINICACHE, ++ [MT_MINICLEAN_RO] = { ++ .prot_sect = PMD_TYPE_SECT | PMD_SECT_MINICACHE | PMD_SECT_XN | PMD_SECT_RDONLY, + .domain = DOMAIN_KERNEL, + }, + #endif +@@ -284,15 +308,15 @@ static struct mem_type mem_types[] = { + .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | + L_PTE_RDONLY, + .prot_l1 = PMD_TYPE_TABLE, +- .domain = DOMAIN_USER, ++ .domain = DOMAIN_VECTORS, + }, + [MT_HIGH_VECTORS] = { + .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | + L_PTE_USER | L_PTE_RDONLY, + .prot_l1 = PMD_TYPE_TABLE, +- .domain = DOMAIN_USER, ++ .domain = DOMAIN_VECTORS, + }, +- [MT_MEMORY_RWX] = { ++ [__MT_MEMORY_RWX] = { + .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY, + .prot_l1 = PMD_TYPE_TABLE, + .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE, +@@ -305,17 +329,30 @@ static struct mem_type mem_types[] = { + .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE, + .domain = DOMAIN_KERNEL, + }, +- [MT_ROM] = { +- .prot_sect = PMD_TYPE_SECT, ++ [MT_MEMORY_RX] = { ++ .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_KERNEXEC, ++ .prot_l1 = PMD_TYPE_TABLE, ++ .prot_sect = PMD_TYPE_SECT | PMD_SECT_KERNEXEC, ++ .domain = DOMAIN_KERNEL, ++ }, ++ [MT_ROM_RX] = { ++ .prot_sect = PMD_TYPE_SECT | PMD_SECT_RDONLY, + .domain = DOMAIN_KERNEL, + }, +- [MT_MEMORY_RWX_NONCACHED] = { ++ [MT_MEMORY_RW_NONCACHED] = { + .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | + L_PTE_MT_BUFFERABLE, + .prot_l1 = PMD_TYPE_TABLE, + .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE, + .domain = DOMAIN_KERNEL, + }, ++ [MT_MEMORY_RX_NONCACHED] = { ++ .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_KERNEXEC | ++ L_PTE_MT_BUFFERABLE, ++ .prot_l1 = PMD_TYPE_TABLE, ++ .prot_sect = PMD_TYPE_SECT | PMD_SECT_KERNEXEC, ++ .domain = DOMAIN_KERNEL, ++ }, + [MT_MEMORY_RW_DTCM] = { + .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | + L_PTE_XN, +@@ -323,9 +360,10 @@ static struct mem_type mem_types[] = { + .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN, + .domain = DOMAIN_KERNEL, + }, +- [MT_MEMORY_RWX_ITCM] = { +- .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY, ++ [MT_MEMORY_RX_ITCM] = { ++ .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_KERNEXEC, + .prot_l1 = PMD_TYPE_TABLE, ++ .prot_sect = PMD_TYPE_SECT | PMD_SECT_KERNEXEC, + .domain = DOMAIN_KERNEL, + }, + [MT_MEMORY_RW_SO] = { +@@ -534,9 +572,14 @@ static void __init build_mem_type_table(void) + * Mark cache clean areas and XIP ROM read only + * from SVC mode and no access from userspace. + */ +- mem_types[MT_ROM].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE; +- mem_types[MT_MINICLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE; +- mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE; ++ mem_types[MT_ROM_RX].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE; ++#ifdef CONFIG_PAX_KERNEXEC ++ mem_types[MT_MEMORY_RX].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE; ++ mem_types[MT_MEMORY_RX_NONCACHED].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE; ++ mem_types[MT_MEMORY_RX_ITCM].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE; ++#endif ++ mem_types[MT_MINICLEAN_RO].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE; ++ mem_types[MT_CACHECLEAN_RO].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE; + #endif + + if (is_smp()) { +@@ -552,13 +595,17 @@ static void __init build_mem_type_table(void) + mem_types[MT_DEVICE_WC].prot_pte |= L_PTE_SHARED; + mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_S; + mem_types[MT_DEVICE_CACHED].prot_pte |= L_PTE_SHARED; +- mem_types[MT_MEMORY_RWX].prot_sect |= PMD_SECT_S; +- mem_types[MT_MEMORY_RWX].prot_pte |= L_PTE_SHARED; ++ mem_types[__MT_MEMORY_RWX].prot_sect |= PMD_SECT_S; ++ mem_types[__MT_MEMORY_RWX].prot_pte |= L_PTE_SHARED; + mem_types[MT_MEMORY_RW].prot_sect |= PMD_SECT_S; + mem_types[MT_MEMORY_RW].prot_pte |= L_PTE_SHARED; ++ mem_types[MT_MEMORY_RX].prot_sect |= PMD_SECT_S; ++ mem_types[MT_MEMORY_RX].prot_pte |= L_PTE_SHARED; + mem_types[MT_MEMORY_DMA_READY].prot_pte |= L_PTE_SHARED; +- mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |= PMD_SECT_S; +- mem_types[MT_MEMORY_RWX_NONCACHED].prot_pte |= L_PTE_SHARED; ++ mem_types[MT_MEMORY_RW_NONCACHED].prot_sect |= PMD_SECT_S; ++ mem_types[MT_MEMORY_RW_NONCACHED].prot_pte |= L_PTE_SHARED; ++ mem_types[MT_MEMORY_RX_NONCACHED].prot_sect |= PMD_SECT_S; ++ mem_types[MT_MEMORY_RX_NONCACHED].prot_pte |= L_PTE_SHARED; + } + } + +@@ -569,15 +616,20 @@ static void __init build_mem_type_table(void) + if (cpu_arch >= CPU_ARCH_ARMv6) { + if (cpu_arch >= CPU_ARCH_ARMv7 && (cr & CR_TRE)) { + /* Non-cacheable Normal is XCB = 001 */ +- mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |= ++ mem_types[MT_MEMORY_RW_NONCACHED].prot_sect |= ++ PMD_SECT_BUFFERED; ++ mem_types[MT_MEMORY_RX_NONCACHED].prot_sect |= + PMD_SECT_BUFFERED; + } else { + /* For both ARMv6 and non-TEX-remapping ARMv7 */ +- mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |= ++ mem_types[MT_MEMORY_RW_NONCACHED].prot_sect |= ++ PMD_SECT_TEX(1); ++ mem_types[MT_MEMORY_RX_NONCACHED].prot_sect |= + PMD_SECT_TEX(1); + } + } else { +- mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |= PMD_SECT_BUFFERABLE; ++ mem_types[MT_MEMORY_RW_NONCACHED].prot_sect |= PMD_SECT_BUFFERABLE; ++ mem_types[MT_MEMORY_RX_NONCACHED].prot_sect |= PMD_SECT_BUFFERABLE; + } + + #ifdef CONFIG_ARM_LPAE +@@ -593,6 +645,8 @@ static void __init build_mem_type_table(void) + vecs_pgprot |= PTE_EXT_AF; + #endif + ++ user_pgprot |= __supported_pte_mask; ++ + for (i = 0; i < 16; i++) { + pteval_t v = pgprot_val(protection_map[i]); + protection_map[i] = __pgprot(v | user_pgprot); +@@ -610,21 +664,24 @@ static void __init build_mem_type_table(void) + + mem_types[MT_LOW_VECTORS].prot_l1 |= ecc_mask; + mem_types[MT_HIGH_VECTORS].prot_l1 |= ecc_mask; +- mem_types[MT_MEMORY_RWX].prot_sect |= ecc_mask | cp->pmd; +- mem_types[MT_MEMORY_RWX].prot_pte |= kern_pgprot; ++ mem_types[__MT_MEMORY_RWX].prot_sect |= ecc_mask | cp->pmd; ++ mem_types[__MT_MEMORY_RWX].prot_pte |= kern_pgprot; + mem_types[MT_MEMORY_RW].prot_sect |= ecc_mask | cp->pmd; + mem_types[MT_MEMORY_RW].prot_pte |= kern_pgprot; ++ mem_types[MT_MEMORY_RX].prot_sect |= ecc_mask | cp->pmd; ++ mem_types[MT_MEMORY_RX].prot_pte |= kern_pgprot; + mem_types[MT_MEMORY_DMA_READY].prot_pte |= kern_pgprot; +- mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |= ecc_mask; +- mem_types[MT_ROM].prot_sect |= cp->pmd; ++ mem_types[MT_MEMORY_RW_NONCACHED].prot_sect |= ecc_mask; ++ mem_types[MT_MEMORY_RX_NONCACHED].prot_sect |= ecc_mask; ++ mem_types[MT_ROM_RX].prot_sect |= cp->pmd; + + switch (cp->pmd) { + case PMD_SECT_WT: +- mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_WT; ++ mem_types[MT_CACHECLEAN_RO].prot_sect |= PMD_SECT_WT; + break; + case PMD_SECT_WB: + case PMD_SECT_WBWA: +- mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_WB; ++ mem_types[MT_CACHECLEAN_RO].prot_sect |= PMD_SECT_WB; + break; + } + pr_info("Memory policy: %sData cache %s\n", +@@ -842,7 +899,7 @@ static void __init create_mapping(struct map_desc *md) + return; + } + +- if ((md->type == MT_DEVICE || md->type == MT_ROM) && ++ if ((md->type == MT_DEVICE || md->type == MT_ROM_RX) && + md->virtual >= PAGE_OFFSET && + (md->virtual < VMALLOC_START || md->virtual >= VMALLOC_END)) { + printk(KERN_WARNING "BUG: mapping for 0x%08llx" +@@ -1257,18 +1314,15 @@ void __init arm_mm_memblock_reserve(void) + * called function. This means you can't use any function or debugging + * method which may touch any device, otherwise the kernel _will_ crash. + */ ++ ++static char vectors[PAGE_SIZE * 2] __read_only __aligned(PAGE_SIZE); ++ + static void __init devicemaps_init(const struct machine_desc *mdesc) + { + struct map_desc map; + unsigned long addr; +- void *vectors; + +- /* +- * Allocate the vector page early. +- */ +- vectors = early_alloc(PAGE_SIZE * 2); +- +- early_trap_init(vectors); ++ early_trap_init(&vectors); + + for (addr = VMALLOC_START; addr; addr += PMD_SIZE) + pmd_clear(pmd_off_k(addr)); +@@ -1281,7 +1335,7 @@ static void __init devicemaps_init(const struct machine_desc *mdesc) + map.pfn = __phys_to_pfn(CONFIG_XIP_PHYS_ADDR & SECTION_MASK); + map.virtual = MODULES_VADDR; + map.length = ((unsigned long)_etext - map.virtual + ~SECTION_MASK) & SECTION_MASK; +- map.type = MT_ROM; ++ map.type = MT_ROM_RX; + create_mapping(&map); + #endif + +@@ -1292,14 +1346,14 @@ static void __init devicemaps_init(const struct machine_desc *mdesc) + map.pfn = __phys_to_pfn(FLUSH_BASE_PHYS); + map.virtual = FLUSH_BASE; + map.length = SZ_1M; +- map.type = MT_CACHECLEAN; ++ map.type = MT_CACHECLEAN_RO; + create_mapping(&map); + #endif + #ifdef FLUSH_BASE_MINICACHE + map.pfn = __phys_to_pfn(FLUSH_BASE_PHYS + SZ_1M); + map.virtual = FLUSH_BASE_MINICACHE; + map.length = SZ_1M; +- map.type = MT_MINICLEAN; ++ map.type = MT_MINICLEAN_RO; + create_mapping(&map); + #endif + +@@ -1308,7 +1362,7 @@ static void __init devicemaps_init(const struct machine_desc *mdesc) + * location (0xffff0000). If we aren't using high-vectors, also + * create a mapping at the low-vectors virtual address. + */ +- map.pfn = __phys_to_pfn(virt_to_phys(vectors)); ++ map.pfn = __phys_to_pfn(virt_to_phys(&vectors)); + map.virtual = 0xffff0000; + map.length = PAGE_SIZE; + #ifdef CONFIG_KUSER_HELPERS +@@ -1365,8 +1419,10 @@ static void __init kmap_init(void) + static void __init map_lowmem(void) + { + struct memblock_region *reg; ++#ifndef CONFIG_PAX_KERNEXEC + unsigned long kernel_x_start = round_down(__pa(_stext), SECTION_SIZE); + unsigned long kernel_x_end = round_up(__pa(__init_end), SECTION_SIZE); ++#endif + + /* Map all the lowmem memory banks. */ + for_each_memblock(memory, reg) { +@@ -1379,11 +1435,48 @@ static void __init map_lowmem(void) + if (start >= end) + break; + ++#ifdef CONFIG_PAX_KERNEXEC ++ map.pfn = __phys_to_pfn(start); ++ map.virtual = __phys_to_virt(start); ++ map.length = end - start; ++ ++ if (map.virtual <= (unsigned long)_stext && ((unsigned long)_end < (map.virtual + map.length))) { ++ struct map_desc kernel; ++ struct map_desc initmap; ++ ++ /* when freeing initmem we will make this RW */ ++ initmap.pfn = __phys_to_pfn(__pa(__init_begin)); ++ initmap.virtual = (unsigned long)__init_begin; ++ initmap.length = _sdata - __init_begin; ++ initmap.type = __MT_MEMORY_RWX; ++ create_mapping(&initmap); ++ ++ /* when freeing initmem we will make this RX */ ++ kernel.pfn = __phys_to_pfn(__pa(_stext)); ++ kernel.virtual = (unsigned long)_stext; ++ kernel.length = __init_begin - _stext; ++ kernel.type = __MT_MEMORY_RWX; ++ create_mapping(&kernel); ++ ++ if (map.virtual < (unsigned long)_stext) { ++ map.length = (unsigned long)_stext - map.virtual; ++ map.type = __MT_MEMORY_RWX; ++ create_mapping(&map); ++ } ++ ++ map.pfn = __phys_to_pfn(__pa(_sdata)); ++ map.virtual = (unsigned long)_sdata; ++ map.length = end - __pa(_sdata); ++ } ++ ++ map.type = MT_MEMORY_RW; ++ create_mapping(&map); ++#else + if (end < kernel_x_start || start >= kernel_x_end) { + map.pfn = __phys_to_pfn(start); + map.virtual = __phys_to_virt(start); + map.length = end - start; +- map.type = MT_MEMORY_RWX; ++ map.type = __MT_MEMORY_RWX; + + create_mapping(&map); + } else { +@@ -1400,7 +1493,7 @@ static void __init map_lowmem(void) + map.pfn = __phys_to_pfn(kernel_x_start); + map.virtual = __phys_to_virt(kernel_x_start); + map.length = kernel_x_end - kernel_x_start; +- map.type = MT_MEMORY_RWX; ++ map.type = __MT_MEMORY_RWX; + + create_mapping(&map); + +@@ -1413,6 +1506,7 @@ static void __init map_lowmem(void) + create_mapping(&map); + } + } ++#endif + } + } + +diff --git a/arch/arm/plat-iop/setup.c b/arch/arm/plat-iop/setup.c +index 5b217f4..c23f40e 100644 +--- a/arch/arm/plat-iop/setup.c ++++ b/arch/arm/plat-iop/setup.c +@@ -24,7 +24,7 @@ static struct map_desc iop3xx_std_desc[] __initdata = { + .virtual = IOP3XX_PERIPHERAL_VIRT_BASE, + .pfn = __phys_to_pfn(IOP3XX_PERIPHERAL_PHYS_BASE), + .length = IOP3XX_PERIPHERAL_SIZE, +- .type = MT_UNCACHED, ++ .type = MT_UNCACHED_RW, + }, + }; + +diff --git a/arch/arm/plat-omap/sram.c b/arch/arm/plat-omap/sram.c +index a5bc92d..0bb4730 100644 +--- a/arch/arm/plat-omap/sram.c ++++ b/arch/arm/plat-omap/sram.c +@@ -93,6 +93,8 @@ void __init omap_map_sram(unsigned long start, unsigned long size, + * Looks like we need to preserve some bootloader code at the + * beginning of SRAM for jumping to flash for reboot to work... + */ ++ pax_open_kernel(); + memset_io(omap_sram_base + omap_sram_skip, 0, + omap_sram_size - omap_sram_skip); ++ pax_close_kernel(); + } +diff --git a/arch/arm/plat-samsung/include/plat/dma-ops.h b/arch/arm/plat-samsung/include/plat/dma-ops.h +index ce6d763..cfea917 100644 +--- a/arch/arm/plat-samsung/include/plat/dma-ops.h ++++ b/arch/arm/plat-samsung/include/plat/dma-ops.h +@@ -47,7 +47,7 @@ struct samsung_dma_ops { + int (*started)(unsigned ch); + int (*flush)(unsigned ch); + int (*stop)(unsigned ch); +-}; ++} __no_const; + + extern void *samsung_dmadev_get_ops(void); + extern void *s3c_dma_get_ops(void); +diff --git a/arch/arm64/include/asm/barrier.h b/arch/arm64/include/asm/barrier.h +index 409ca37..10c87ad 100644 +--- a/arch/arm64/include/asm/barrier.h ++++ b/arch/arm64/include/asm/barrier.h +@@ -40,7 +40,7 @@ + do { \ + compiletime_assert_atomic_type(*p); \ + smp_mb(); \ +- ACCESS_ONCE(*p) = (v); \ ++ ACCESS_ONCE_RW(*p) = (v); \ + } while (0) + + #define smp_load_acquire(p) \ +diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h +index 6c0f684..5faea9d 100644 +--- a/arch/arm64/include/asm/uaccess.h ++++ b/arch/arm64/include/asm/uaccess.h +@@ -99,6 +99,7 @@ static inline void set_fs(mm_segment_t fs) + flag; \ + }) + ++#define access_ok_noprefault(type, addr, size) access_ok((type), (addr), (size)) + #define access_ok(type, addr, size) __range_ok(addr, size) + #define user_addr_max get_fs + +diff --git a/arch/avr32/include/asm/cache.h b/arch/avr32/include/asm/cache.h +index c3a58a1..78fbf54 100644 +--- a/arch/avr32/include/asm/cache.h ++++ b/arch/avr32/include/asm/cache.h +@@ -1,8 +1,10 @@ + #ifndef __ASM_AVR32_CACHE_H + #define __ASM_AVR32_CACHE_H + ++#include <linux/const.h> ++ + #define L1_CACHE_SHIFT 5 +-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT) ++#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) + + /* + * Memory returned by kmalloc() may be used for DMA, so we must make +diff --git a/arch/avr32/include/asm/elf.h b/arch/avr32/include/asm/elf.h +index d232888..87c8df1 100644 +--- a/arch/avr32/include/asm/elf.h ++++ b/arch/avr32/include/asm/elf.h +@@ -84,8 +84,14 @@ typedef struct user_fpu_struct elf_fpregset_t; + the loader. We need to make sure that it is out of the way of the program + that it will "exec", and that there is sufficient room for the brk. */ + +-#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3) ++#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2) + ++#ifdef CONFIG_PAX_ASLR ++#define PAX_ELF_ET_DYN_BASE 0x00001000UL ++ ++#define PAX_DELTA_MMAP_LEN 15 ++#define PAX_DELTA_STACK_LEN 15 ++#endif + + /* This yields a mask that user programs can use to figure out what + instruction set this CPU supports. This could be done in user space, +diff --git a/arch/avr32/include/asm/kmap_types.h b/arch/avr32/include/asm/kmap_types.h +index 479330b..53717a8 100644 +--- a/arch/avr32/include/asm/kmap_types.h ++++ b/arch/avr32/include/asm/kmap_types.h +@@ -2,9 +2,9 @@ + #define __ASM_AVR32_KMAP_TYPES_H + + #ifdef CONFIG_DEBUG_HIGHMEM +-# define KM_TYPE_NR 29 ++# define KM_TYPE_NR 30 + #else +-# define KM_TYPE_NR 14 ++# define KM_TYPE_NR 15 + #endif + + #endif /* __ASM_AVR32_KMAP_TYPES_H */ +diff --git a/arch/avr32/mm/fault.c b/arch/avr32/mm/fault.c +index 0eca933..eb78c7b 100644 +--- a/arch/avr32/mm/fault.c ++++ b/arch/avr32/mm/fault.c +@@ -41,6 +41,23 @@ static inline int notify_page_fault(struct pt_regs *regs, int trap) + + int exception_trace = 1; + ++#ifdef CONFIG_PAX_PAGEEXEC ++void pax_report_insns(struct pt_regs *regs, void *pc, void *sp) ++{ ++ unsigned long i; ++ ++ printk(KERN_ERR "PAX: bytes at PC: "); ++ for (i = 0; i < 20; i++) { ++ unsigned char c; ++ if (get_user(c, (unsigned char *)pc+i)) ++ printk(KERN_CONT "???????? "); ++ else ++ printk(KERN_CONT "%02x ", c); ++ } ++ printk("\n"); ++} ++#endif ++ + /* + * This routine handles page faults. It determines the address and the + * problem, and then passes it off to one of the appropriate routines. +@@ -176,6 +193,16 @@ bad_area: + up_read(&mm->mmap_sem); + + if (user_mode(regs)) { ++ ++#ifdef CONFIG_PAX_PAGEEXEC ++ if (mm->pax_flags & MF_PAX_PAGEEXEC) { ++ if (ecr == ECR_PROTECTION_X || ecr == ECR_TLB_MISS_X) { ++ pax_report_fault(regs, (void *)regs->pc, (void *)regs->sp); ++ do_group_exit(SIGKILL); ++ } ++ } ++#endif ++ + if (exception_trace && printk_ratelimit()) + printk("%s%s[%d]: segfault at %08lx pc %08lx " + "sp %08lx ecr %lu\n", +diff --git a/arch/blackfin/include/asm/cache.h b/arch/blackfin/include/asm/cache.h +index 568885a..f8008df 100644 +--- a/arch/blackfin/include/asm/cache.h ++++ b/arch/blackfin/include/asm/cache.h +@@ -7,6 +7,7 @@ + #ifndef __ARCH_BLACKFIN_CACHE_H + #define __ARCH_BLACKFIN_CACHE_H + ++#include <linux/const.h> + #include <linux/linkage.h> /* for asmlinkage */ + + /* +@@ -14,7 +15,7 @@ + * Blackfin loads 32 bytes for cache + */ + #define L1_CACHE_SHIFT 5 +-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT) ++#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) + #define SMP_CACHE_BYTES L1_CACHE_BYTES + + #define ARCH_DMA_MINALIGN L1_CACHE_BYTES +diff --git a/arch/cris/include/arch-v10/arch/cache.h b/arch/cris/include/arch-v10/arch/cache.h +index aea2718..3639a60 100644 +--- a/arch/cris/include/arch-v10/arch/cache.h ++++ b/arch/cris/include/arch-v10/arch/cache.h +@@ -1,8 +1,9 @@ + #ifndef _ASM_ARCH_CACHE_H + #define _ASM_ARCH_CACHE_H + ++#include <linux/const.h> + /* Etrax 100LX have 32-byte cache-lines. */ +-#define L1_CACHE_BYTES 32 + #define L1_CACHE_SHIFT 5 ++#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) + + #endif /* _ASM_ARCH_CACHE_H */ +diff --git a/arch/cris/include/arch-v32/arch/cache.h b/arch/cris/include/arch-v32/arch/cache.h +index 7caf25d..ee65ac5 100644 +--- a/arch/cris/include/arch-v32/arch/cache.h ++++ b/arch/cris/include/arch-v32/arch/cache.h +@@ -1,11 +1,12 @@ + #ifndef _ASM_CRIS_ARCH_CACHE_H + #define _ASM_CRIS_ARCH_CACHE_H + ++#include <linux/const.h> + #include <arch/hwregs/dma.h> + + /* A cache-line is 32 bytes. */ +-#define L1_CACHE_BYTES 32 + #define L1_CACHE_SHIFT 5 ++#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) + + #define __read_mostly __attribute__((__section__(".data..read_mostly"))) + +diff --git a/arch/frv/include/asm/atomic.h b/arch/frv/include/asm/atomic.h +index b86329d..6709906 100644 +--- a/arch/frv/include/asm/atomic.h ++++ b/arch/frv/include/asm/atomic.h +@@ -186,6 +186,16 @@ static inline void atomic64_dec(atomic64_t *v) + #define atomic64_cmpxchg(v, old, new) (__cmpxchg_64(old, new, &(v)->counter)) + #define atomic64_xchg(v, new) (__xchg_64(new, &(v)->counter)) + ++#define atomic64_read_unchecked(v) atomic64_read(v) ++#define atomic64_set_unchecked(v, i) atomic64_set((v), (i)) ++#define atomic64_add_unchecked(a, v) atomic64_add((a), (v)) ++#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v)) ++#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v)) ++#define atomic64_inc_unchecked(v) atomic64_inc(v) ++#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v) ++#define atomic64_dec_unchecked(v) atomic64_dec(v) ++#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n)) ++ + static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u) + { + int c, old; +diff --git a/arch/frv/include/asm/cache.h b/arch/frv/include/asm/cache.h +index 2797163..c2a401d 100644 +--- a/arch/frv/include/asm/cache.h ++++ b/arch/frv/include/asm/cache.h +@@ -12,10 +12,11 @@ + #ifndef __ASM_CACHE_H + #define __ASM_CACHE_H + ++#include <linux/const.h> + + /* bytes per L1 cache line */ + #define L1_CACHE_SHIFT (CONFIG_FRV_L1_CACHE_SHIFT) +-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT) ++#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) + + #define __cacheline_aligned __attribute__((aligned(L1_CACHE_BYTES))) + #define ____cacheline_aligned __attribute__((aligned(L1_CACHE_BYTES))) +diff --git a/arch/frv/include/asm/kmap_types.h b/arch/frv/include/asm/kmap_types.h +index 43901f2..0d8b865 100644 +--- a/arch/frv/include/asm/kmap_types.h ++++ b/arch/frv/include/asm/kmap_types.h +@@ -2,6 +2,6 @@ + #ifndef _ASM_KMAP_TYPES_H + #define _ASM_KMAP_TYPES_H + +-#define KM_TYPE_NR 17 ++#define KM_TYPE_NR 18 + + #endif +diff --git a/arch/frv/mm/elf-fdpic.c b/arch/frv/mm/elf-fdpic.c +index 836f147..4cf23f5 100644 +--- a/arch/frv/mm/elf-fdpic.c ++++ b/arch/frv/mm/elf-fdpic.c +@@ -61,6 +61,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi + { + struct vm_area_struct *vma; + struct vm_unmapped_area_info info; ++ unsigned long offset = gr_rand_threadstack_offset(current->mm, filp, flags); + + if (len > TASK_SIZE) + return -ENOMEM; +@@ -73,8 +74,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi + if (addr) { + addr = PAGE_ALIGN(addr); + vma = find_vma(current->mm, addr); +- if (TASK_SIZE - len >= addr && +- (!vma || addr + len <= vma->vm_start)) ++ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset)) + goto success; + } + +@@ -85,6 +85,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi + info.high_limit = (current->mm->start_stack - 0x00200000); + info.align_mask = 0; + info.align_offset = 0; ++ info.threadstack_offset = offset; + addr = vm_unmapped_area(&info); + if (!(addr & ~PAGE_MASK)) + goto success; +diff --git a/arch/hexagon/include/asm/cache.h b/arch/hexagon/include/asm/cache.h +index f4ca594..adc72fd6 100644 +--- a/arch/hexagon/include/asm/cache.h ++++ b/arch/hexagon/include/asm/cache.h +@@ -21,9 +21,11 @@ + #ifndef __ASM_CACHE_H + #define __ASM_CACHE_H + ++#include <linux/const.h> ++ + /* Bytes per L1 cache line */ +-#define L1_CACHE_SHIFT (5) +-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT) ++#define L1_CACHE_SHIFT 5 ++#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) + + #define __cacheline_aligned __aligned(L1_CACHE_BYTES) + #define ____cacheline_aligned __aligned(L1_CACHE_BYTES) +diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig +index 0c8e553..112d734 100644 +--- a/arch/ia64/Kconfig ++++ b/arch/ia64/Kconfig +@@ -544,6 +544,7 @@ source "drivers/sn/Kconfig" + config KEXEC + bool "kexec system call" + depends on !IA64_HP_SIM && (!SMP || HOTPLUG_CPU) ++ depends on !GRKERNSEC_KMEM + help + kexec is a system call that implements the ability to shutdown your + current kernel, and to start another kernel. It is like a reboot +diff --git a/arch/ia64/include/asm/atomic.h b/arch/ia64/include/asm/atomic.h +index 6e6fe18..a6ae668 100644 +--- a/arch/ia64/include/asm/atomic.h ++++ b/arch/ia64/include/asm/atomic.h +@@ -208,6 +208,16 @@ atomic64_add_negative (__s64 i, atomic64_t *v) + #define atomic64_inc(v) atomic64_add(1, (v)) + #define atomic64_dec(v) atomic64_sub(1, (v)) + ++#define atomic64_read_unchecked(v) atomic64_read(v) ++#define atomic64_set_unchecked(v, i) atomic64_set((v), (i)) ++#define atomic64_add_unchecked(a, v) atomic64_add((a), (v)) ++#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v)) ++#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v)) ++#define atomic64_inc_unchecked(v) atomic64_inc(v) ++#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v) ++#define atomic64_dec_unchecked(v) atomic64_dec(v) ++#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n)) ++ + /* Atomic operations are already serializing */ + #define smp_mb__before_atomic_dec() barrier() + #define smp_mb__after_atomic_dec() barrier() +diff --git a/arch/ia64/include/asm/barrier.h b/arch/ia64/include/asm/barrier.h +index d0a69aa..142f878 100644 +--- a/arch/ia64/include/asm/barrier.h ++++ b/arch/ia64/include/asm/barrier.h +@@ -64,7 +64,7 @@ + do { \ + compiletime_assert_atomic_type(*p); \ + barrier(); \ +- ACCESS_ONCE(*p) = (v); \ ++ ACCESS_ONCE_RW(*p) = (v); \ + } while (0) + + #define smp_load_acquire(p) \ +diff --git a/arch/ia64/include/asm/cache.h b/arch/ia64/include/asm/cache.h +index 988254a..e1ee885 100644 +--- a/arch/ia64/include/asm/cache.h ++++ b/arch/ia64/include/asm/cache.h +@@ -1,6 +1,7 @@ + #ifndef _ASM_IA64_CACHE_H + #define _ASM_IA64_CACHE_H + ++#include <linux/const.h> + + /* + * Copyright (C) 1998-2000 Hewlett-Packard Co +@@ -9,7 +10,7 @@ + + /* Bytes per L1 (data) cache line. */ + #define L1_CACHE_SHIFT CONFIG_IA64_L1_CACHE_SHIFT +-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT) ++#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) + + #ifdef CONFIG_SMP + # define SMP_CACHE_SHIFT L1_CACHE_SHIFT +diff --git a/arch/ia64/include/asm/elf.h b/arch/ia64/include/asm/elf.h +index 5a83c5c..4d7f553 100644 +--- a/arch/ia64/include/asm/elf.h ++++ b/arch/ia64/include/asm/elf.h +@@ -42,6 +42,13 @@ + */ + #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x800000000UL) + ++#ifdef CONFIG_PAX_ASLR ++#define PAX_ELF_ET_DYN_BASE (current->personality == PER_LINUX32 ? 0x08048000UL : 0x4000000000000000UL) ++ ++#define PAX_DELTA_MMAP_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13) ++#define PAX_DELTA_STACK_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13) ++#endif ++ + #define PT_IA_64_UNWIND 0x70000001 + + /* IA-64 relocations: */ +diff --git a/arch/ia64/include/asm/pgalloc.h b/arch/ia64/include/asm/pgalloc.h +index 5767cdf..7462574 100644 +--- a/arch/ia64/include/asm/pgalloc.h ++++ b/arch/ia64/include/asm/pgalloc.h +@@ -39,6 +39,12 @@ pgd_populate(struct mm_struct *mm, pgd_t * pgd_entry, pud_t * pud) + pgd_val(*pgd_entry) = __pa(pud); + } + ++static inline void ++pgd_populate_kernel(struct mm_struct *mm, pgd_t * pgd_entry, pud_t * pud) ++{ ++ pgd_populate(mm, pgd_entry, pud); ++} ++ + static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr) + { + return quicklist_alloc(0, GFP_KERNEL, NULL); +@@ -57,6 +63,12 @@ pud_populate(struct mm_struct *mm, pud_t * pud_entry, pmd_t * pmd) + pud_val(*pud_entry) = __pa(pmd); + } + ++static inline void ++pud_populate_kernel(struct mm_struct *mm, pud_t * pud_entry, pmd_t * pmd) ++{ ++ pud_populate(mm, pud_entry, pmd); ++} ++ + static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr) + { + return quicklist_alloc(0, GFP_KERNEL, NULL); +diff --git a/arch/ia64/include/asm/pgtable.h b/arch/ia64/include/asm/pgtable.h +index 7935115..c0eca6a 100644 +--- a/arch/ia64/include/asm/pgtable.h ++++ b/arch/ia64/include/asm/pgtable.h +@@ -12,7 +12,7 @@ + * David Mosberger-Tang <davidm@hpl.hp.com> + */ + +- ++#include <linux/const.h> + #include <asm/mman.h> + #include <asm/page.h> + #include <asm/processor.h> +@@ -142,6 +142,17 @@ + #define PAGE_READONLY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R) + #define PAGE_COPY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R) + #define PAGE_COPY_EXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX) ++ ++#ifdef CONFIG_PAX_PAGEEXEC ++# define PAGE_SHARED_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RW) ++# define PAGE_READONLY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R) ++# define PAGE_COPY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R) ++#else ++# define PAGE_SHARED_NOEXEC PAGE_SHARED ++# define PAGE_READONLY_NOEXEC PAGE_READONLY ++# define PAGE_COPY_NOEXEC PAGE_COPY ++#endif ++ + #define PAGE_GATE __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_X_RX) + #define PAGE_KERNEL __pgprot(__DIRTY_BITS | _PAGE_PL_0 | _PAGE_AR_RWX) + #define PAGE_KERNELRX __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_RX) +diff --git a/arch/ia64/include/asm/spinlock.h b/arch/ia64/include/asm/spinlock.h +index 45698cd..e8e2dbc 100644 +--- a/arch/ia64/include/asm/spinlock.h ++++ b/arch/ia64/include/asm/spinlock.h +@@ -71,7 +71,7 @@ static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock) + unsigned short *p = (unsigned short *)&lock->lock + 1, tmp; + + asm volatile ("ld2.bias %0=[%1]" : "=r"(tmp) : "r"(p)); +- ACCESS_ONCE(*p) = (tmp + 2) & ~1; ++ ACCESS_ONCE_RW(*p) = (tmp + 2) & ~1; + } + + static __always_inline void __ticket_spin_unlock_wait(arch_spinlock_t *lock) +diff --git a/arch/ia64/include/asm/uaccess.h b/arch/ia64/include/asm/uaccess.h +index 449c8c0..3d4b1e9 100644 +--- a/arch/ia64/include/asm/uaccess.h ++++ b/arch/ia64/include/asm/uaccess.h +@@ -70,6 +70,7 @@ + && ((segment).seg == KERNEL_DS.seg \ + || likely(REGION_OFFSET((unsigned long) (addr)) < RGN_MAP_LIMIT))); \ + }) ++#define access_ok_noprefault(type, addr, size) access_ok((type), (addr), (size)) + #define access_ok(type, addr, size) __access_ok((addr), (size), get_fs()) + + /* +@@ -240,12 +241,24 @@ extern unsigned long __must_check __copy_user (void __user *to, const void __use + static inline unsigned long + __copy_to_user (void __user *to, const void *from, unsigned long count) + { ++ if (count > INT_MAX) ++ return count; ++ ++ if (!__builtin_constant_p(count)) ++ check_object_size(from, count, true); ++ + return __copy_user(to, (__force void __user *) from, count); + } + + static inline unsigned long + __copy_from_user (void *to, const void __user *from, unsigned long count) + { ++ if (count > INT_MAX) ++ return count; ++ ++ if (!__builtin_constant_p(count)) ++ check_object_size(to, count, false); ++ + return __copy_user((__force void __user *) to, from, count); + } + +@@ -255,10 +268,13 @@ __copy_from_user (void *to, const void __user *from, unsigned long count) + ({ \ + void __user *__cu_to = (to); \ + const void *__cu_from = (from); \ +- long __cu_len = (n); \ ++ unsigned long __cu_len = (n); \ + \ +- if (__access_ok(__cu_to, __cu_len, get_fs())) \ ++ if (__cu_len <= INT_MAX && __access_ok(__cu_to, __cu_len, get_fs())) { \ ++ if (!__builtin_constant_p(n)) \ ++ check_object_size(__cu_from, __cu_len, true); \ + __cu_len = __copy_user(__cu_to, (__force void __user *) __cu_from, __cu_len); \ ++ } \ + __cu_len; \ + }) + +@@ -266,11 +282,14 @@ __copy_from_user (void *to, const void __user *from, unsigned long count) + ({ \ + void *__cu_to = (to); \ + const void __user *__cu_from = (from); \ +- long __cu_len = (n); \ ++ unsigned long __cu_len = (n); \ + \ + __chk_user_ptr(__cu_from); \ +- if (__access_ok(__cu_from, __cu_len, get_fs())) \ ++ if (__cu_len <= INT_MAX && __access_ok(__cu_from, __cu_len, get_fs())) { \ ++ if (!__builtin_constant_p(n)) \ ++ check_object_size(__cu_to, __cu_len, false); \ + __cu_len = __copy_user((__force void __user *) __cu_to, __cu_from, __cu_len); \ ++ } \ + __cu_len; \ + }) + +diff --git a/arch/ia64/kernel/module.c b/arch/ia64/kernel/module.c +index 24603be..948052d 100644 +--- a/arch/ia64/kernel/module.c ++++ b/arch/ia64/kernel/module.c +@@ -307,8 +307,7 @@ plt_target (struct plt_entry *plt) + void + module_free (struct module *mod, void *module_region) + { +- if (mod && mod->arch.init_unw_table && +- module_region == mod->module_init) { ++ if (mod && mod->arch.init_unw_table && module_region == mod->module_init_rx) { + unw_remove_unwind_table(mod->arch.init_unw_table); + mod->arch.init_unw_table = NULL; + } +@@ -494,15 +493,39 @@ module_frob_arch_sections (Elf_Ehdr *ehdr, Elf_Shdr *sechdrs, char *secstrings, + } + + static inline int ++in_init_rx (const struct module *mod, uint64_t addr) ++{ ++ return addr - (uint64_t) mod->module_init_rx < mod->init_size_rx; ++} ++ ++static inline int ++in_init_rw (const struct module *mod, uint64_t addr) ++{ ++ return addr - (uint64_t) mod->module_init_rw < mod->init_size_rw; ++} ++ ++static inline int + in_init (const struct module *mod, uint64_t addr) + { +- return addr - (uint64_t) mod->module_init < mod->init_size; ++ return in_init_rx(mod, addr) || in_init_rw(mod, addr); ++} ++ ++static inline int ++in_core_rx (const struct module *mod, uint64_t addr) ++{ ++ return addr - (uint64_t) mod->module_core_rx < mod->core_size_rx; ++} ++ ++static inline int ++in_core_rw (const struct module *mod, uint64_t addr) ++{ ++ return addr - (uint64_t) mod->module_core_rw < mod->core_size_rw; + } + + static inline int + in_core (const struct module *mod, uint64_t addr) + { +- return addr - (uint64_t) mod->module_core < mod->core_size; ++ return in_core_rx(mod, addr) || in_core_rw(mod, addr); + } + + static inline int +@@ -685,7 +708,14 @@ do_reloc (struct module *mod, uint8_t r_type, Elf64_Sym *sym, uint64_t addend, + break; + + case RV_BDREL: +- val -= (uint64_t) (in_init(mod, val) ? mod->module_init : mod->module_core); ++ if (in_init_rx(mod, val)) ++ val -= (uint64_t) mod->module_init_rx; ++ else if (in_init_rw(mod, val)) ++ val -= (uint64_t) mod->module_init_rw; ++ else if (in_core_rx(mod, val)) ++ val -= (uint64_t) mod->module_core_rx; ++ else if (in_core_rw(mod, val)) ++ val -= (uint64_t) mod->module_core_rw; + break; + + case RV_LTV: +@@ -820,15 +850,15 @@ apply_relocate_add (Elf64_Shdr *sechdrs, const char *strtab, unsigned int symind + * addresses have been selected... + */ + uint64_t gp; +- if (mod->core_size > MAX_LTOFF) ++ if (mod->core_size_rx + mod->core_size_rw > MAX_LTOFF) + /* + * This takes advantage of fact that SHF_ARCH_SMALL gets allocated + * at the end of the module. + */ +- gp = mod->core_size - MAX_LTOFF / 2; ++ gp = mod->core_size_rx + mod->core_size_rw - MAX_LTOFF / 2; + else +- gp = mod->core_size / 2; +- gp = (uint64_t) mod->module_core + ((gp + 7) & -8); ++ gp = (mod->core_size_rx + mod->core_size_rw) / 2; ++ gp = (uint64_t) mod->module_core_rx + ((gp + 7) & -8); + mod->arch.gp = gp; + DEBUGP("%s: placing gp at 0x%lx\n", __func__, gp); + } +diff --git a/arch/ia64/kernel/palinfo.c b/arch/ia64/kernel/palinfo.c +index ab33328..f39506c 100644 +--- a/arch/ia64/kernel/palinfo.c ++++ b/arch/ia64/kernel/palinfo.c +@@ -980,7 +980,7 @@ static int palinfo_cpu_callback(struct notifier_block *nfb, + return NOTIFY_OK; + } + +-static struct notifier_block __refdata palinfo_cpu_notifier = ++static struct notifier_block palinfo_cpu_notifier = + { + .notifier_call = palinfo_cpu_callback, + .priority = 0, +diff --git a/arch/ia64/kernel/sys_ia64.c b/arch/ia64/kernel/sys_ia64.c +index 41e33f8..65180b2a 100644 +--- a/arch/ia64/kernel/sys_ia64.c ++++ b/arch/ia64/kernel/sys_ia64.c +@@ -28,6 +28,7 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len + unsigned long align_mask = 0; + struct mm_struct *mm = current->mm; + struct vm_unmapped_area_info info; ++ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags); + + if (len > RGN_MAP_LIMIT) + return -ENOMEM; +@@ -43,6 +44,13 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len + if (REGION_NUMBER(addr) == RGN_HPAGE) + addr = 0; + #endif ++ ++#ifdef CONFIG_PAX_RANDMMAP ++ if (mm->pax_flags & MF_PAX_RANDMMAP) ++ addr = mm->free_area_cache; ++ else ++#endif ++ + if (!addr) + addr = TASK_UNMAPPED_BASE; + +@@ -61,6 +69,7 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len + info.high_limit = TASK_SIZE; + info.align_mask = align_mask; + info.align_offset = 0; ++ info.threadstack_offset = offset; + return vm_unmapped_area(&info); + } + +diff --git a/arch/ia64/kernel/vmlinux.lds.S b/arch/ia64/kernel/vmlinux.lds.S +index 84f8a52..7c76178 100644 +--- a/arch/ia64/kernel/vmlinux.lds.S ++++ b/arch/ia64/kernel/vmlinux.lds.S +@@ -192,7 +192,7 @@ SECTIONS { + /* Per-cpu data: */ + . = ALIGN(PERCPU_PAGE_SIZE); + PERCPU_VADDR(SMP_CACHE_BYTES, PERCPU_ADDR, :percpu) +- __phys_per_cpu_start = __per_cpu_load; ++ __phys_per_cpu_start = per_cpu_load; + /* + * ensure percpu data fits + * into percpu page size +diff --git a/arch/ia64/mm/fault.c b/arch/ia64/mm/fault.c +index 7225dad..2a7c8256 100644 +--- a/arch/ia64/mm/fault.c ++++ b/arch/ia64/mm/fault.c +@@ -72,6 +72,23 @@ mapped_kernel_page_is_present (unsigned long address) + return pte_present(pte); + } + ++#ifdef CONFIG_PAX_PAGEEXEC ++void pax_report_insns(struct pt_regs *regs, void *pc, void *sp) ++{ ++ unsigned long i; ++ ++ printk(KERN_ERR "PAX: bytes at PC: "); ++ for (i = 0; i < 8; i++) { ++ unsigned int c; ++ if (get_user(c, (unsigned int *)pc+i)) ++ printk(KERN_CONT "???????? "); ++ else ++ printk(KERN_CONT "%08x ", c); ++ } ++ printk("\n"); ++} ++#endif ++ + # define VM_READ_BIT 0 + # define VM_WRITE_BIT 1 + # define VM_EXEC_BIT 2 +@@ -151,8 +168,21 @@ retry: + if (((isr >> IA64_ISR_R_BIT) & 1UL) && (!(vma->vm_flags & (VM_READ | VM_WRITE)))) + goto bad_area; + +- if ((vma->vm_flags & mask) != mask) ++ if ((vma->vm_flags & mask) != mask) { ++ ++#ifdef CONFIG_PAX_PAGEEXEC ++ if (!(vma->vm_flags & VM_EXEC) && (mask & VM_EXEC)) { ++ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->cr_iip) ++ goto bad_area; ++ ++ up_read(&mm->mmap_sem); ++ pax_report_fault(regs, (void *)regs->cr_iip, (void *)regs->r12); ++ do_group_exit(SIGKILL); ++ } ++#endif ++ + goto bad_area; ++ } + + /* + * If for any reason at all we couldn't handle the fault, make +diff --git a/arch/ia64/mm/hugetlbpage.c b/arch/ia64/mm/hugetlbpage.c +index 76069c1..c2aa816 100644 +--- a/arch/ia64/mm/hugetlbpage.c ++++ b/arch/ia64/mm/hugetlbpage.c +@@ -149,6 +149,7 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, u + unsigned long pgoff, unsigned long flags) + { + struct vm_unmapped_area_info info; ++ unsigned long offset = gr_rand_threadstack_offset(current->mm, file, flags); + + if (len > RGN_MAP_LIMIT) + return -ENOMEM; +@@ -172,6 +173,7 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, u + info.high_limit = HPAGE_REGION_BASE + RGN_MAP_LIMIT; + info.align_mask = PAGE_MASK & (HPAGE_SIZE - 1); + info.align_offset = 0; ++ info.threadstack_offset = offset; + return vm_unmapped_area(&info); + } + +diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c +index 25c3502..560dae7 100644 +--- a/arch/ia64/mm/init.c ++++ b/arch/ia64/mm/init.c +@@ -120,6 +120,19 @@ ia64_init_addr_space (void) + vma->vm_start = current->thread.rbs_bot & PAGE_MASK; + vma->vm_end = vma->vm_start + PAGE_SIZE; + vma->vm_flags = VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT; ++ ++#ifdef CONFIG_PAX_PAGEEXEC ++ if (current->mm->pax_flags & MF_PAX_PAGEEXEC) { ++ vma->vm_flags &= ~VM_EXEC; ++ ++#ifdef CONFIG_PAX_MPROTECT ++ if (current->mm->pax_flags & MF_PAX_MPROTECT) ++ vma->vm_flags &= ~VM_MAYEXEC; ++#endif ++ ++ } ++#endif ++ + vma->vm_page_prot = vm_get_page_prot(vma->vm_flags); + down_write(¤t->mm->mmap_sem); + if (insert_vm_struct(current->mm, vma)) { +diff --git a/arch/m32r/include/asm/cache.h b/arch/m32r/include/asm/cache.h +index 40b3ee9..8c2c112 100644 +--- a/arch/m32r/include/asm/cache.h ++++ b/arch/m32r/include/asm/cache.h +@@ -1,8 +1,10 @@ + #ifndef _ASM_M32R_CACHE_H + #define _ASM_M32R_CACHE_H + ++#include <linux/const.h> ++ + /* L1 cache line size */ + #define L1_CACHE_SHIFT 4 +-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT) ++#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) + + #endif /* _ASM_M32R_CACHE_H */ +diff --git a/arch/m32r/lib/usercopy.c b/arch/m32r/lib/usercopy.c +index 82abd15..d95ae5d 100644 +--- a/arch/m32r/lib/usercopy.c ++++ b/arch/m32r/lib/usercopy.c +@@ -14,6 +14,9 @@ + unsigned long + __generic_copy_to_user(void __user *to, const void *from, unsigned long n) + { ++ if ((long)n < 0) ++ return n; ++ + prefetch(from); + if (access_ok(VERIFY_WRITE, to, n)) + __copy_user(to,from,n); +@@ -23,6 +26,9 @@ __generic_copy_to_user(void __user *to, const void *from, unsigned long n) + unsigned long + __generic_copy_from_user(void *to, const void __user *from, unsigned long n) + { ++ if ((long)n < 0) ++ return n; ++ + prefetchw(to); + if (access_ok(VERIFY_READ, from, n)) + __copy_user_zeroing(to,from,n); +diff --git a/arch/m68k/include/asm/cache.h b/arch/m68k/include/asm/cache.h +index 0395c51..5f26031 100644 +--- a/arch/m68k/include/asm/cache.h ++++ b/arch/m68k/include/asm/cache.h +@@ -4,9 +4,11 @@ + #ifndef __ARCH_M68K_CACHE_H + #define __ARCH_M68K_CACHE_H + ++#include <linux/const.h> ++ + /* bytes per L1 cache line */ + #define L1_CACHE_SHIFT 4 +-#define L1_CACHE_BYTES (1<< L1_CACHE_SHIFT) ++#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) + + #define ARCH_DMA_MINALIGN L1_CACHE_BYTES + +diff --git a/arch/metag/include/asm/barrier.h b/arch/metag/include/asm/barrier.h +index 2d6f0de..de5f5ac 100644 +--- a/arch/metag/include/asm/barrier.h ++++ b/arch/metag/include/asm/barrier.h +@@ -89,7 +89,7 @@ static inline void fence(void) + do { \ + compiletime_assert_atomic_type(*p); \ + smp_mb(); \ +- ACCESS_ONCE(*p) = (v); \ ++ ACCESS_ONCE_RW(*p) = (v); \ + } while (0) + + #define smp_load_acquire(p) \ +diff --git a/arch/metag/mm/hugetlbpage.c b/arch/metag/mm/hugetlbpage.c +index 3c52fa6..11b2ad8 100644 +--- a/arch/metag/mm/hugetlbpage.c ++++ b/arch/metag/mm/hugetlbpage.c +@@ -200,6 +200,7 @@ hugetlb_get_unmapped_area_new_pmd(unsigned long len) + info.high_limit = TASK_SIZE; + info.align_mask = PAGE_MASK & HUGEPT_MASK; + info.align_offset = 0; ++ info.threadstack_offset = 0; + return vm_unmapped_area(&info); + } + +diff --git a/arch/microblaze/include/asm/cache.h b/arch/microblaze/include/asm/cache.h +index 4efe96a..60e8699 100644 +--- a/arch/microblaze/include/asm/cache.h ++++ b/arch/microblaze/include/asm/cache.h +@@ -13,11 +13,12 @@ + #ifndef _ASM_MICROBLAZE_CACHE_H + #define _ASM_MICROBLAZE_CACHE_H + ++#include <linux/const.h> + #include <asm/registers.h> + + #define L1_CACHE_SHIFT 5 + /* word-granular cache in microblaze */ +-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT) ++#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) + + #define SMP_CACHE_BYTES L1_CACHE_BYTES + +diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig +index 95fa1f1..56a6fa2 100644 +--- a/arch/mips/Kconfig ++++ b/arch/mips/Kconfig +@@ -2298,6 +2298,7 @@ source "kernel/Kconfig.preempt" + + config KEXEC + bool "Kexec system call" ++ depends on !GRKERNSEC_KMEM + help + kexec is a system call that implements the ability to shutdown your + current kernel, and to start another kernel. It is like a reboot +diff --git a/arch/mips/cavium-octeon/dma-octeon.c b/arch/mips/cavium-octeon/dma-octeon.c +index 02f2444..506969c 100644 +--- a/arch/mips/cavium-octeon/dma-octeon.c ++++ b/arch/mips/cavium-octeon/dma-octeon.c +@@ -199,7 +199,7 @@ static void octeon_dma_free_coherent(struct device *dev, size_t size, + if (dma_release_from_coherent(dev, order, vaddr)) + return; + +- swiotlb_free_coherent(dev, size, vaddr, dma_handle); ++ swiotlb_free_coherent(dev, size, vaddr, dma_handle, attrs); + } + + static dma_addr_t octeon_unity_phys_to_dma(struct device *dev, phys_addr_t paddr) +diff --git a/arch/mips/include/asm/atomic.h b/arch/mips/include/asm/atomic.h +index 7eed2f2..c4e385d 100644 +--- a/arch/mips/include/asm/atomic.h ++++ b/arch/mips/include/asm/atomic.h +@@ -21,15 +21,39 @@ + #include <asm/cmpxchg.h> + #include <asm/war.h> + ++#ifdef CONFIG_GENERIC_ATOMIC64 ++#include <asm-generic/atomic64.h> ++#endif ++ + #define ATOMIC_INIT(i) { (i) } + ++#ifdef CONFIG_64BIT ++#define _ASM_EXTABLE(from, to) \ ++" .section __ex_table,\"a\"\n" \ ++" .dword " #from ", " #to"\n" \ ++" .previous\n" ++#else ++#define _ASM_EXTABLE(from, to) \ ++" .section __ex_table,\"a\"\n" \ ++" .word " #from ", " #to"\n" \ ++" .previous\n" ++#endif ++ + /* + * atomic_read - read atomic variable + * @v: pointer of type atomic_t + * + * Atomically reads the value of @v. + */ +-#define atomic_read(v) (*(volatile int *)&(v)->counter) ++static inline int atomic_read(const atomic_t *v) ++{ ++ return (*(volatile const int *) &v->counter); ++} ++ ++static inline int atomic_read_unchecked(const atomic_unchecked_t *v) ++{ ++ return (*(volatile const int *) &v->counter); ++} + + /* + * atomic_set - set atomic variable +@@ -38,7 +62,15 @@ + * + * Atomically sets the value of @v to @i. + */ +-#define atomic_set(v, i) ((v)->counter = (i)) ++static inline void atomic_set(atomic_t *v, int i) ++{ ++ v->counter = i; ++} ++ ++static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i) ++{ ++ v->counter = i; ++} + + /* + * atomic_add - add integer to atomic variable +@@ -47,7 +79,67 @@ + * + * Atomically adds @i to @v. + */ +-static __inline__ void atomic_add(int i, atomic_t * v) ++static __inline__ void atomic_add(int i, atomic_t *v) ++{ ++ int temp; ++ ++ if (kernel_uses_llsc && R10000_LLSC_WAR) { ++ __asm__ __volatile__( ++ " .set mips3 \n" ++ "1: ll %0, %1 # atomic_add \n" ++#ifdef CONFIG_PAX_REFCOUNT ++ /* Exception on overflow. */ ++ "2: add %0, %2 \n" ++#else ++ " addu %0, %2 \n" ++#endif ++ " sc %0, %1 \n" ++ " beqzl %0, 1b \n" ++#ifdef CONFIG_PAX_REFCOUNT ++ "3: \n" ++ _ASM_EXTABLE(2b, 3b) ++#endif ++ " .set mips0 \n" ++ : "=&r" (temp), "+m" (v->counter) ++ : "Ir" (i)); ++ } else if (kernel_uses_llsc) { ++ __asm__ __volatile__( ++ " .set mips3 \n" ++ "1: ll %0, %1 # atomic_add \n" ++#ifdef CONFIG_PAX_REFCOUNT ++ /* Exception on overflow. */ ++ "2: add %0, %2 \n" ++#else ++ " addu %0, %2 \n" ++#endif ++ " sc %0, %1 \n" ++ " beqz %0, 1b \n" ++#ifdef CONFIG_PAX_REFCOUNT ++ "3: \n" ++ _ASM_EXTABLE(2b, 3b) ++#endif ++ " .set mips0 \n" ++ : "=&r" (temp), "+m" (v->counter) ++ : "Ir" (i)); ++ } else { ++ unsigned long flags; ++ ++ raw_local_irq_save(flags); ++ __asm__ __volatile__( ++#ifdef CONFIG_PAX_REFCOUNT ++ /* Exception on overflow. */ ++ "1: add %0, %1 \n" ++ "2: \n" ++ _ASM_EXTABLE(1b, 2b) ++#else ++ " addu %0, %1 \n" ++#endif ++ : "+r" (v->counter) : "Ir" (i)); ++ raw_local_irq_restore(flags); ++ } ++} ++ ++static __inline__ void atomic_add_unchecked(int i, atomic_unchecked_t *v) + { + if (kernel_uses_llsc && R10000_LLSC_WAR) { + int temp; +@@ -90,7 +182,67 @@ static __inline__ void atomic_add(int i, atomic_t * v) + * + * Atomically subtracts @i from @v. + */ +-static __inline__ void atomic_sub(int i, atomic_t * v) ++static __inline__ void atomic_sub(int i, atomic_t *v) ++{ ++ int temp; ++ ++ if (kernel_uses_llsc && R10000_LLSC_WAR) { ++ __asm__ __volatile__( ++ " .set mips3 \n" ++ "1: ll %0, %1 # atomic64_sub \n" ++#ifdef CONFIG_PAX_REFCOUNT ++ /* Exception on overflow. */ ++ "2: sub %0, %2 \n" ++#else ++ " subu %0, %2 \n" ++#endif ++ " sc %0, %1 \n" ++ " beqzl %0, 1b \n" ++#ifdef CONFIG_PAX_REFCOUNT ++ "3: \n" ++ _ASM_EXTABLE(2b, 3b) ++#endif ++ " .set mips0 \n" ++ : "=&r" (temp), "+m" (v->counter) ++ : "Ir" (i)); ++ } else if (kernel_uses_llsc) { ++ __asm__ __volatile__( ++ " .set mips3 \n" ++ "1: ll %0, %1 # atomic64_sub \n" ++#ifdef CONFIG_PAX_REFCOUNT ++ /* Exception on overflow. */ ++ "2: sub %0, %2 \n" ++#else ++ " subu %0, %2 \n" ++#endif ++ " sc %0, %1 \n" ++ " beqz %0, 1b \n" ++#ifdef CONFIG_PAX_REFCOUNT ++ "3: \n" ++ _ASM_EXTABLE(2b, 3b) ++#endif ++ " .set mips0 \n" ++ : "=&r" (temp), "+m" (v->counter) ++ : "Ir" (i)); ++ } else { ++ unsigned long flags; ++ ++ raw_local_irq_save(flags); ++ __asm__ __volatile__( ++#ifdef CONFIG_PAX_REFCOUNT ++ /* Exception on overflow. */ ++ "1: sub %0, %1 \n" ++ "2: \n" ++ _ASM_EXTABLE(1b, 2b) ++#else ++ " subu %0, %1 \n" ++#endif ++ : "+r" (v->counter) : "Ir" (i)); ++ raw_local_irq_restore(flags); ++ } ++} ++ ++static __inline__ void atomic_sub_unchecked(long i, atomic_unchecked_t *v) + { + if (kernel_uses_llsc && R10000_LLSC_WAR) { + int temp; +@@ -129,7 +281,93 @@ static __inline__ void atomic_sub(int i, atomic_t * v) + /* + * Same as above, but return the result value + */ +-static __inline__ int atomic_add_return(int i, atomic_t * v) ++static __inline__ int atomic_add_return(int i, atomic_t *v) ++{ ++ int result; ++ int temp; ++ ++ smp_mb__before_llsc(); ++ ++ if (kernel_uses_llsc && R10000_LLSC_WAR) { ++ __asm__ __volatile__( ++ " .set mips3 \n" ++ "1: ll %1, %2 # atomic_add_return \n" ++#ifdef CONFIG_PAX_REFCOUNT ++ "2: add %0, %1, %3 \n" ++#else ++ " addu %0, %1, %3 \n" ++#endif ++ " sc %0, %2 \n" ++ " beqzl %0, 1b \n" ++#ifdef CONFIG_PAX_REFCOUNT ++ " b 4f \n" ++ " .set noreorder \n" ++ "3: b 5f \n" ++ " move %0, %1 \n" ++ " .set reorder \n" ++ _ASM_EXTABLE(2b, 3b) ++#endif ++ "4: addu %0, %1, %3 \n" ++#ifdef CONFIG_PAX_REFCOUNT ++ "5: \n" ++#endif ++ " .set mips0 \n" ++ : "=&r" (result), "=&r" (temp), "+m" (v->counter) ++ : "Ir" (i)); ++ } else if (kernel_uses_llsc) { ++ __asm__ __volatile__( ++ " .set mips3 \n" ++ "1: ll %1, %2 # atomic_add_return \n" ++#ifdef CONFIG_PAX_REFCOUNT ++ "2: add %0, %1, %3 \n" ++#else ++ " addu %0, %1, %3 \n" ++#endif ++ " sc %0, %2 \n" ++ " bnez %0, 4f \n" ++ " b 1b \n" ++#ifdef CONFIG_PAX_REFCOUNT ++ " .set noreorder \n" ++ "3: b 5f \n" ++ " move %0, %1 \n" ++ " .set reorder \n" ++ _ASM_EXTABLE(2b, 3b) ++#endif ++ "4: addu %0, %1, %3 \n" ++#ifdef CONFIG_PAX_REFCOUNT ++ "5: \n" ++#endif ++ " .set mips0 \n" ++ : "=&r" (result), "=&r" (temp), "+m" (v->counter) ++ : "Ir" (i)); ++ } else { ++ unsigned long flags; ++ ++ raw_local_irq_save(flags); ++ __asm__ __volatile__( ++ " lw %0, %1 \n" ++#ifdef CONFIG_PAX_REFCOUNT ++ /* Exception on overflow. */ ++ "1: add %0, %2 \n" ++#else ++ " addu %0, %2 \n" ++#endif ++ " sw %0, %1 \n" ++#ifdef CONFIG_PAX_REFCOUNT ++ /* Note: Dest reg is not modified on overflow */ ++ "2: \n" ++ _ASM_EXTABLE(1b, 2b) ++#endif ++ : "=&r" (result), "+m" (v->counter) : "Ir" (i)); ++ raw_local_irq_restore(flags); ++ } ++ ++ smp_llsc_mb(); ++ ++ return result; ++} ++ ++static __inline__ int atomic_add_return_unchecked(int i, atomic_unchecked_t *v) + { + int result; + +@@ -178,7 +416,93 @@ static __inline__ int atomic_add_return(int i, atomic_t * v) + return result; + } + +-static __inline__ int atomic_sub_return(int i, atomic_t * v) ++static __inline__ int atomic_sub_return(int i, atomic_t *v) ++{ ++ int result; ++ int temp; ++ ++ smp_mb__before_llsc(); ++ ++ if (kernel_uses_llsc && R10000_LLSC_WAR) { ++ __asm__ __volatile__( ++ " .set mips3 \n" ++ "1: ll %1, %2 # atomic_sub_return \n" ++#ifdef CONFIG_PAX_REFCOUNT ++ "2: sub %0, %1, %3 \n" ++#else ++ " subu %0, %1, %3 \n" ++#endif ++ " sc %0, %2 \n" ++ " beqzl %0, 1b \n" ++#ifdef CONFIG_PAX_REFCOUNT ++ " b 4f \n" ++ " .set noreorder \n" ++ "3: b 5f \n" ++ " move %0, %1 \n" ++ " .set reorder \n" ++ _ASM_EXTABLE(2b, 3b) ++#endif ++ "4: subu %0, %1, %3 \n" ++#ifdef CONFIG_PAX_REFCOUNT ++ "5: \n" ++#endif ++ " .set mips0 \n" ++ : "=&r" (result), "=&r" (temp), "=m" (v->counter) ++ : "Ir" (i), "m" (v->counter) ++ : "memory"); ++ } else if (kernel_uses_llsc) { ++ __asm__ __volatile__( ++ " .set mips3 \n" ++ "1: ll %1, %2 # atomic_sub_return \n" ++#ifdef CONFIG_PAX_REFCOUNT ++ "2: sub %0, %1, %3 \n" ++#else ++ " subu %0, %1, %3 \n" ++#endif ++ " sc %0, %2 \n" ++ " bnez %0, 4f \n" ++ " b 1b \n" ++#ifdef CONFIG_PAX_REFCOUNT ++ " .set noreorder \n" ++ "3: b 5f \n" ++ " move %0, %1 \n" ++ " .set reorder \n" ++ _ASM_EXTABLE(2b, 3b) ++#endif ++ "4: subu %0, %1, %3 \n" ++#ifdef CONFIG_PAX_REFCOUNT ++ "5: \n" ++#endif ++ " .set mips0 \n" ++ : "=&r" (result), "=&r" (temp), "+m" (v->counter) ++ : "Ir" (i)); ++ } else { ++ unsigned long flags; ++ ++ raw_local_irq_save(flags); ++ __asm__ __volatile__( ++ " lw %0, %1 \n" ++#ifdef CONFIG_PAX_REFCOUNT ++ /* Exception on overflow. */ ++ "1: sub %0, %2 \n" ++#else ++ " subu %0, %2 \n" ++#endif ++ " sw %0, %1 \n" ++#ifdef CONFIG_PAX_REFCOUNT ++ /* Note: Dest reg is not modified on overflow */ ++ "2: \n" ++ _ASM_EXTABLE(1b, 2b) ++#endif ++ : "=&r" (result), "+m" (v->counter) : "Ir" (i)); ++ raw_local_irq_restore(flags); ++ } ++ ++ smp_llsc_mb(); ++ ++ return result; ++} ++static __inline__ int atomic_sub_return_unchecked(int i, atomic_unchecked_t *v) + { + int result; + +@@ -238,7 +562,7 @@ static __inline__ int atomic_sub_return(int i, atomic_t * v) + * Atomically test @v and subtract @i if @v is greater or equal than @i. + * The function returns the old value of @v minus @i. + */ +-static __inline__ int atomic_sub_if_positive(int i, atomic_t * v) ++static __inline__ int atomic_sub_if_positive(int i, atomic_t *v) + { + int result; + +@@ -295,8 +619,26 @@ static __inline__ int atomic_sub_if_positive(int i, atomic_t * v) + return result; + } + +-#define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n))) +-#define atomic_xchg(v, new) (xchg(&((v)->counter), (new))) ++static inline int atomic_cmpxchg(atomic_t *v, int old, int new) ++{ ++ return cmpxchg(&v->counter, old, new); ++} ++ ++static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, ++ int new) ++{ ++ return cmpxchg(&(v->counter), old, new); ++} ++ ++static inline int atomic_xchg(atomic_t *v, int new) ++{ ++ return xchg(&v->counter, new); ++} ++ ++static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new) ++{ ++ return xchg(&(v->counter), new); ++} + + /** + * __atomic_add_unless - add unless the number is a given value +@@ -324,6 +666,10 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u) + + #define atomic_dec_return(v) atomic_sub_return(1, (v)) + #define atomic_inc_return(v) atomic_add_return(1, (v)) ++static __inline__ int atomic_inc_return_unchecked(atomic_unchecked_t *v) ++{ ++ return atomic_add_return_unchecked(1, v); ++} + + /* + * atomic_sub_and_test - subtract value from variable and test result +@@ -345,6 +691,10 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u) + * other cases. + */ + #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0) ++static __inline__ int atomic_inc_and_test_unchecked(atomic_unchecked_t *v) ++{ ++ return atomic_add_return_unchecked(1, v) == 0; ++} + + /* + * atomic_dec_and_test - decrement by 1 and test +@@ -369,6 +719,10 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u) + * Atomically increments @v by 1. + */ + #define atomic_inc(v) atomic_add(1, (v)) ++static __inline__ void atomic_inc_unchecked(atomic_unchecked_t *v) ++{ ++ atomic_add_unchecked(1, v); ++} + + /* + * atomic_dec - decrement and test +@@ -377,6 +731,10 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u) + * Atomically decrements @v by 1. + */ + #define atomic_dec(v) atomic_sub(1, (v)) ++static __inline__ void atomic_dec_unchecked(atomic_unchecked_t *v) ++{ ++ atomic_sub_unchecked(1, v); ++} + + /* + * atomic_add_negative - add and test if negative +@@ -398,14 +756,30 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u) + * @v: pointer of type atomic64_t + * + */ +-#define atomic64_read(v) (*(volatile long *)&(v)->counter) ++static inline long atomic64_read(const atomic64_t *v) ++{ ++ return (*(volatile const long *) &v->counter); ++} ++ ++static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v) ++{ ++ return (*(volatile const long *) &v->counter); ++} + + /* + * atomic64_set - set atomic variable + * @v: pointer of type atomic64_t + * @i: required value + */ +-#define atomic64_set(v, i) ((v)->counter = (i)) ++static inline void atomic64_set(atomic64_t *v, long i) ++{ ++ v->counter = i; ++} ++ ++static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i) ++{ ++ v->counter = i; ++} + + /* + * atomic64_add - add integer to atomic variable +@@ -414,7 +788,66 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u) + * + * Atomically adds @i to @v. + */ +-static __inline__ void atomic64_add(long i, atomic64_t * v) ++static __inline__ void atomic64_add(long i, atomic64_t *v) ++{ ++ long temp; ++ ++ if (kernel_uses_llsc && R10000_LLSC_WAR) { ++ __asm__ __volatile__( ++ " .set mips3 \n" ++ "1: lld %0, %1 # atomic64_add \n" ++#ifdef CONFIG_PAX_REFCOUNT ++ /* Exception on overflow. */ ++ "2: dadd %0, %2 \n" ++#else ++ " daddu %0, %2 \n" ++#endif ++ " scd %0, %1 \n" ++ " beqzl %0, 1b \n" ++#ifdef CONFIG_PAX_REFCOUNT ++ "3: \n" ++ _ASM_EXTABLE(2b, 3b) ++#endif ++ " .set mips0 \n" ++ : "=&r" (temp), "+m" (v->counter) ++ : "Ir" (i)); ++ } else if (kernel_uses_llsc) { ++ __asm__ __volatile__( ++ " .set mips3 \n" ++ "1: lld %0, %1 # atomic64_add \n" ++#ifdef CONFIG_PAX_REFCOUNT ++ /* Exception on overflow. */ ++ "2: dadd %0, %2 \n" ++#else ++ " daddu %0, %2 \n" ++#endif ++ " scd %0, %1 \n" ++ " beqz %0, 1b \n" ++#ifdef CONFIG_PAX_REFCOUNT ++ "3: \n" ++ _ASM_EXTABLE(2b, 3b) ++#endif ++ " .set mips0 \n" ++ : "=&r" (temp), "+m" (v->counter) ++ : "Ir" (i)); ++ } else { ++ unsigned long flags; ++ ++ raw_local_irq_save(flags); ++ __asm__ __volatile__( ++#ifdef CONFIG_PAX_REFCOUNT ++ /* Exception on overflow. */ ++ "1: dadd %0, %1 \n" ++ "2: \n" ++ _ASM_EXTABLE(1b, 2b) ++#else ++ " daddu %0, %1 \n" ++#endif ++ : "+r" (v->counter) : "Ir" (i)); ++ raw_local_irq_restore(flags); ++ } ++} ++static __inline__ void atomic64_add_unchecked(long i, atomic64_unchecked_t *v) + { + if (kernel_uses_llsc && R10000_LLSC_WAR) { + long temp; +@@ -457,7 +890,67 @@ static __inline__ void atomic64_add(long i, atomic64_t * v) + * + * Atomically subtracts @i from @v. + */ +-static __inline__ void atomic64_sub(long i, atomic64_t * v) ++static __inline__ void atomic64_sub(long i, atomic64_t *v) ++{ ++ long temp; ++ ++ if (kernel_uses_llsc && R10000_LLSC_WAR) { ++ __asm__ __volatile__( ++ " .set mips3 \n" ++ "1: lld %0, %1 # atomic64_sub \n" ++#ifdef CONFIG_PAX_REFCOUNT ++ /* Exception on overflow. */ ++ "2: dsub %0, %2 \n" ++#else ++ " dsubu %0, %2 \n" ++#endif ++ " scd %0, %1 \n" ++ " beqzl %0, 1b \n" ++#ifdef CONFIG_PAX_REFCOUNT ++ "3: \n" ++ _ASM_EXTABLE(2b, 3b) ++#endif ++ " .set mips0 \n" ++ : "=&r" (temp), "+m" (v->counter) ++ : "Ir" (i)); ++ } else if (kernel_uses_llsc) { ++ __asm__ __volatile__( ++ " .set mips3 \n" ++ "1: lld %0, %1 # atomic64_sub \n" ++#ifdef CONFIG_PAX_REFCOUNT ++ /* Exception on overflow. */ ++ "2: dsub %0, %2 \n" ++#else ++ " dsubu %0, %2 \n" ++#endif ++ " scd %0, %1 \n" ++ " beqz %0, 1b \n" ++#ifdef CONFIG_PAX_REFCOUNT ++ "3: \n" ++ _ASM_EXTABLE(2b, 3b) ++#endif ++ " .set mips0 \n" ++ : "=&r" (temp), "+m" (v->counter) ++ : "Ir" (i)); ++ } else { ++ unsigned long flags; ++ ++ raw_local_irq_save(flags); ++ __asm__ __volatile__( ++#ifdef CONFIG_PAX_REFCOUNT ++ /* Exception on overflow. */ ++ "1: dsub %0, %1 \n" ++ "2: \n" ++ _ASM_EXTABLE(1b, 2b) ++#else ++ " dsubu %0, %1 \n" ++#endif ++ : "+r" (v->counter) : "Ir" (i)); ++ raw_local_irq_restore(flags); ++ } ++} ++ ++static __inline__ void atomic64_sub_unchecked(long i, atomic64_unchecked_t *v) + { + if (kernel_uses_llsc && R10000_LLSC_WAR) { + long temp; +@@ -496,7 +989,93 @@ static __inline__ void atomic64_sub(long i, atomic64_t * v) + /* + * Same as above, but return the result value + */ +-static __inline__ long atomic64_add_return(long i, atomic64_t * v) ++static __inline__ long atomic64_add_return(long i, atomic64_t *v) ++{ ++ long result; ++ long temp; ++ ++ smp_mb__before_llsc(); ++ ++ if (kernel_uses_llsc && R10000_LLSC_WAR) { ++ __asm__ __volatile__( ++ " .set mips3 \n" ++ "1: lld %1, %2 # atomic64_add_return \n" ++#ifdef CONFIG_PAX_REFCOUNT ++ "2: dadd %0, %1, %3 \n" ++#else ++ " daddu %0, %1, %3 \n" ++#endif ++ " scd %0, %2 \n" ++ " beqzl %0, 1b \n" ++#ifdef CONFIG_PAX_REFCOUNT ++ " b 4f \n" ++ " .set noreorder \n" ++ "3: b 5f \n" ++ " move %0, %1 \n" ++ " .set reorder \n" ++ _ASM_EXTABLE(2b, 3b) ++#endif ++ "4: daddu %0, %1, %3 \n" ++#ifdef CONFIG_PAX_REFCOUNT ++ "5: \n" ++#endif ++ " .set mips0 \n" ++ : "=&r" (result), "=&r" (temp), "+m" (v->counter) ++ : "Ir" (i)); ++ } else if (kernel_uses_llsc) { ++ __asm__ __volatile__( ++ " .set mips3 \n" ++ "1: lld %1, %2 # atomic64_add_return \n" ++#ifdef CONFIG_PAX_REFCOUNT ++ "2: dadd %0, %1, %3 \n" ++#else ++ " daddu %0, %1, %3 \n" ++#endif ++ " scd %0, %2 \n" ++ " bnez %0, 4f \n" ++ " b 1b \n" ++#ifdef CONFIG_PAX_REFCOUNT ++ " .set noreorder \n" ++ "3: b 5f \n" ++ " move %0, %1 \n" ++ " .set reorder \n" ++ _ASM_EXTABLE(2b, 3b) ++#endif ++ "4: daddu %0, %1, %3 \n" ++#ifdef CONFIG_PAX_REFCOUNT ++ "5: \n" ++#endif ++ " .set mips0 \n" ++ : "=&r" (result), "=&r" (temp), "=m" (v->counter) ++ : "Ir" (i), "m" (v->counter) ++ : "memory"); ++ } else { ++ unsigned long flags; ++ ++ raw_local_irq_save(flags); ++ __asm__ __volatile__( ++ " ld %0, %1 \n" ++#ifdef CONFIG_PAX_REFCOUNT ++ /* Exception on overflow. */ ++ "1: dadd %0, %2 \n" ++#else ++ " daddu %0, %2 \n" ++#endif ++ " sd %0, %1 \n" ++#ifdef CONFIG_PAX_REFCOUNT ++ /* Note: Dest reg is not modified on overflow */ ++ "2: \n" ++ _ASM_EXTABLE(1b, 2b) ++#endif ++ : "=&r" (result), "+m" (v->counter) : "Ir" (i)); ++ raw_local_irq_restore(flags); ++ } ++ ++ smp_llsc_mb(); ++ ++ return result; ++} ++static __inline__ long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v) + { + long result; + +@@ -546,7 +1125,97 @@ static __inline__ long atomic64_add_return(long i, atomic64_t * v) + return result; + } + +-static __inline__ long atomic64_sub_return(long i, atomic64_t * v) ++static __inline__ long atomic64_sub_return(long i, atomic64_t *v) ++{ ++ long result; ++ long temp; ++ ++ smp_mb__before_llsc(); ++ ++ if (kernel_uses_llsc && R10000_LLSC_WAR) { ++ long temp; ++ ++ __asm__ __volatile__( ++ " .set mips3 \n" ++ "1: lld %1, %2 # atomic64_sub_return \n" ++#ifdef CONFIG_PAX_REFCOUNT ++ "2: dsub %0, %1, %3 \n" ++#else ++ " dsubu %0, %1, %3 \n" ++#endif ++ " scd %0, %2 \n" ++ " beqzl %0, 1b \n" ++#ifdef CONFIG_PAX_REFCOUNT ++ " b 4f \n" ++ " .set noreorder \n" ++ "3: b 5f \n" ++ " move %0, %1 \n" ++ " .set reorder \n" ++ _ASM_EXTABLE(2b, 3b) ++#endif ++ "4: dsubu %0, %1, %3 \n" ++#ifdef CONFIG_PAX_REFCOUNT ++ "5: \n" ++#endif ++ " .set mips0 \n" ++ : "=&r" (result), "=&r" (temp), "=m" (v->counter) ++ : "Ir" (i), "m" (v->counter) ++ : "memory"); ++ } else if (kernel_uses_llsc) { ++ __asm__ __volatile__( ++ " .set mips3 \n" ++ "1: lld %1, %2 # atomic64_sub_return \n" ++#ifdef CONFIG_PAX_REFCOUNT ++ "2: dsub %0, %1, %3 \n" ++#else ++ " dsubu %0, %1, %3 \n" ++#endif ++ " scd %0, %2 \n" ++ " bnez %0, 4f \n" ++ " b 1b \n" ++#ifdef CONFIG_PAX_REFCOUNT ++ " .set noreorder \n" ++ "3: b 5f \n" ++ " move %0, %1 \n" ++ " .set reorder \n" ++ _ASM_EXTABLE(2b, 3b) ++#endif ++ "4: dsubu %0, %1, %3 \n" ++#ifdef CONFIG_PAX_REFCOUNT ++ "5: \n" ++#endif ++ " .set mips0 \n" ++ : "=&r" (result), "=&r" (temp), "=m" (v->counter) ++ : "Ir" (i), "m" (v->counter) ++ : "memory"); ++ } else { ++ unsigned long flags; ++ ++ raw_local_irq_save(flags); ++ __asm__ __volatile__( ++ " ld %0, %1 \n" ++#ifdef CONFIG_PAX_REFCOUNT ++ /* Exception on overflow. */ ++ "1: dsub %0, %2 \n" ++#else ++ " dsubu %0, %2 \n" ++#endif ++ " sd %0, %1 \n" ++#ifdef CONFIG_PAX_REFCOUNT ++ /* Note: Dest reg is not modified on overflow */ ++ "2: \n" ++ _ASM_EXTABLE(1b, 2b) ++#endif ++ : "=&r" (result), "+m" (v->counter) : "Ir" (i)); ++ raw_local_irq_restore(flags); ++ } ++ ++ smp_llsc_mb(); ++ ++ return result; ++} ++ ++static __inline__ long atomic64_sub_return_unchecked(long i, atomic64_unchecked_t *v) + { + long result; + +@@ -605,7 +1274,7 @@ static __inline__ long atomic64_sub_return(long i, atomic64_t * v) + * Atomically test @v and subtract @i if @v is greater or equal than @i. + * The function returns the old value of @v minus @i. + */ +-static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v) ++static __inline__ long atomic64_sub_if_positive(long i, atomic64_t *v) + { + long result; + +@@ -662,9 +1331,26 @@ static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v) + return result; + } + +-#define atomic64_cmpxchg(v, o, n) \ +- ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n))) +-#define atomic64_xchg(v, new) (xchg(&((v)->counter), (new))) ++static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new) ++{ ++ return cmpxchg(&v->counter, old, new); ++} ++ ++static inline long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long old, ++ long new) ++{ ++ return cmpxchg(&(v->counter), old, new); ++} ++ ++static inline long atomic64_xchg(atomic64_t *v, long new) ++{ ++ return xchg(&v->counter, new); ++} ++ ++static inline long atomic64_xchg_unchecked(atomic64_unchecked_t *v, long new) ++{ ++ return xchg(&(v->counter), new); ++} + + /** + * atomic64_add_unless - add unless the number is a given value +@@ -694,6 +1380,7 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u) + + #define atomic64_dec_return(v) atomic64_sub_return(1, (v)) + #define atomic64_inc_return(v) atomic64_add_return(1, (v)) ++#define atomic64_inc_return_unchecked(v) atomic64_add_return_unchecked(1, (v)) + + /* + * atomic64_sub_and_test - subtract value from variable and test result +@@ -715,6 +1402,7 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u) + * other cases. + */ + #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0) ++#define atomic64_inc_and_test_unchecked(v) atomic64_add_return_unchecked(1, (v)) == 0) + + /* + * atomic64_dec_and_test - decrement by 1 and test +@@ -739,6 +1427,7 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u) + * Atomically increments @v by 1. + */ + #define atomic64_inc(v) atomic64_add(1, (v)) ++#define atomic64_inc_unchecked(v) atomic64_add_unchecked(1, (v)) + + /* + * atomic64_dec - decrement and test +@@ -747,6 +1436,7 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u) + * Atomically decrements @v by 1. + */ + #define atomic64_dec(v) atomic64_sub(1, (v)) ++#define atomic64_dec_unchecked(v) atomic64_sub_unchecked(1, (v)) + + /* + * atomic64_add_negative - add and test if negative +diff --git a/arch/mips/include/asm/barrier.h b/arch/mips/include/asm/barrier.h +index e1aa4e4..670b68b 100644 +--- a/arch/mips/include/asm/barrier.h ++++ b/arch/mips/include/asm/barrier.h +@@ -184,7 +184,7 @@ + do { \ + compiletime_assert_atomic_type(*p); \ + smp_mb(); \ +- ACCESS_ONCE(*p) = (v); \ ++ ACCESS_ONCE_RW(*p) = (v); \ + } while (0) + + #define smp_load_acquire(p) \ +diff --git a/arch/mips/include/asm/cache.h b/arch/mips/include/asm/cache.h +index b4db69f..8f3b093 100644 +--- a/arch/mips/include/asm/cache.h ++++ b/arch/mips/include/asm/cache.h +@@ -9,10 +9,11 @@ + #ifndef _ASM_CACHE_H + #define _ASM_CACHE_H + ++#include <linux/const.h> + #include <kmalloc.h> + + #define L1_CACHE_SHIFT CONFIG_MIPS_L1_CACHE_SHIFT +-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT) ++#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) + + #define SMP_CACHE_SHIFT L1_CACHE_SHIFT + #define SMP_CACHE_BYTES L1_CACHE_BYTES +diff --git a/arch/mips/include/asm/elf.h b/arch/mips/include/asm/elf.h +index d414405..6bb4ba2 100644 +--- a/arch/mips/include/asm/elf.h ++++ b/arch/mips/include/asm/elf.h +@@ -398,13 +398,16 @@ extern const char *__elf_platform; + #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2) + #endif + ++#ifdef CONFIG_PAX_ASLR ++#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL) ++ ++#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT) ++#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT) ++#endif ++ + #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1 + struct linux_binprm; + extern int arch_setup_additional_pages(struct linux_binprm *bprm, + int uses_interp); + +-struct mm_struct; +-extern unsigned long arch_randomize_brk(struct mm_struct *mm); +-#define arch_randomize_brk arch_randomize_brk +- + #endif /* _ASM_ELF_H */ +diff --git a/arch/mips/include/asm/exec.h b/arch/mips/include/asm/exec.h +index c1f6afa..38cc6e9 100644 +--- a/arch/mips/include/asm/exec.h ++++ b/arch/mips/include/asm/exec.h +@@ -12,6 +12,6 @@ + #ifndef _ASM_EXEC_H + #define _ASM_EXEC_H + +-extern unsigned long arch_align_stack(unsigned long sp); ++#define arch_align_stack(x) ((x) & ~0xfUL) + + #endif /* _ASM_EXEC_H */ +diff --git a/arch/mips/include/asm/hw_irq.h b/arch/mips/include/asm/hw_irq.h +index 9e8ef59..1139d6b 100644 +--- a/arch/mips/include/asm/hw_irq.h ++++ b/arch/mips/include/asm/hw_irq.h +@@ -10,7 +10,7 @@ + + #include <linux/atomic.h> + +-extern atomic_t irq_err_count; ++extern atomic_unchecked_t irq_err_count; + + /* + * interrupt-retrigger: NOP for now. This may not be appropriate for all +diff --git a/arch/mips/include/asm/local.h b/arch/mips/include/asm/local.h +index d44622c..64990d2 100644 +--- a/arch/mips/include/asm/local.h ++++ b/arch/mips/include/asm/local.h +@@ -12,15 +12,25 @@ typedef struct + atomic_long_t a; + } local_t; + ++typedef struct { ++ atomic_long_unchecked_t a; ++} local_unchecked_t; ++ + #define LOCAL_INIT(i) { ATOMIC_LONG_INIT(i) } + + #define local_read(l) atomic_long_read(&(l)->a) ++#define local_read_unchecked(l) atomic_long_read_unchecked(&(l)->a) + #define local_set(l, i) atomic_long_set(&(l)->a, (i)) ++#define local_set_unchecked(l, i) atomic_long_set_unchecked(&(l)->a, (i)) + + #define local_add(i, l) atomic_long_add((i), (&(l)->a)) ++#define local_add_unchecked(i, l) atomic_long_add_unchecked((i), (&(l)->a)) + #define local_sub(i, l) atomic_long_sub((i), (&(l)->a)) ++#define local_sub_unchecked(i, l) atomic_long_sub_unchecked((i), (&(l)->a)) + #define local_inc(l) atomic_long_inc(&(l)->a) ++#define local_inc_unchecked(l) atomic_long_inc_unchecked(&(l)->a) + #define local_dec(l) atomic_long_dec(&(l)->a) ++#define local_dec_unchecked(l) atomic_long_dec_unchecked(&(l)->a) + + /* + * Same as above, but return the result value +@@ -70,6 +80,51 @@ static __inline__ long local_add_return(long i, local_t * l) + return result; + } + ++static __inline__ long local_add_return_unchecked(long i, local_unchecked_t * l) ++{ ++ unsigned long result; ++ ++ if (kernel_uses_llsc && R10000_LLSC_WAR) { ++ unsigned long temp; ++ ++ __asm__ __volatile__( ++ " .set mips3 \n" ++ "1:" __LL "%1, %2 # local_add_return \n" ++ " addu %0, %1, %3 \n" ++ __SC "%0, %2 \n" ++ " beqzl %0, 1b \n" ++ " addu %0, %1, %3 \n" ++ " .set mips0 \n" ++ : "=&r" (result), "=&r" (temp), "=m" (l->a.counter) ++ : "Ir" (i), "m" (l->a.counter) ++ : "memory"); ++ } else if (kernel_uses_llsc) { ++ unsigned long temp; ++ ++ __asm__ __volatile__( ++ " .set mips3 \n" ++ "1:" __LL "%1, %2 # local_add_return \n" ++ " addu %0, %1, %3 \n" ++ __SC "%0, %2 \n" ++ " beqz %0, 1b \n" ++ " addu %0, %1, %3 \n" ++ " .set mips0 \n" ++ : "=&r" (result), "=&r" (temp), "=m" (l->a.counter) ++ : "Ir" (i), "m" (l->a.counter) ++ : "memory"); ++ } else { ++ unsigned long flags; ++ ++ local_irq_save(flags); ++ result = l->a.counter; ++ result += i; ++ l->a.counter = result; ++ local_irq_restore(flags); ++ } ++ ++ return result; ++} ++ + static __inline__ long local_sub_return(long i, local_t * l) + { + unsigned long result; +@@ -117,6 +172,8 @@ static __inline__ long local_sub_return(long i, local_t * l) + + #define local_cmpxchg(l, o, n) \ + ((long)cmpxchg_local(&((l)->a.counter), (o), (n))) ++#define local_cmpxchg_unchecked(l, o, n) \ ++ ((long)cmpxchg_local(&((l)->a.counter), (o), (n))) + #define local_xchg(l, n) (atomic_long_xchg((&(l)->a), (n))) + + /** +diff --git a/arch/mips/include/asm/page.h b/arch/mips/include/asm/page.h +index 5e08bcc..cfedefc 100644 +--- a/arch/mips/include/asm/page.h ++++ b/arch/mips/include/asm/page.h +@@ -120,7 +120,7 @@ extern void copy_user_highpage(struct page *to, struct page *from, + #ifdef CONFIG_CPU_MIPS32 + typedef struct { unsigned long pte_low, pte_high; } pte_t; + #define pte_val(x) ((x).pte_low | ((unsigned long long)(x).pte_high << 32)) +- #define __pte(x) ({ pte_t __pte = {(x), ((unsigned long long)(x)) >> 32}; __pte; }) ++ #define __pte(x) ({ pte_t __pte = {(x), (x) >> 32}; __pte; }) + #else + typedef struct { unsigned long long pte; } pte_t; + #define pte_val(x) ((x).pte) +diff --git a/arch/mips/include/asm/pgalloc.h b/arch/mips/include/asm/pgalloc.h +index b336037..5b874cc 100644 +--- a/arch/mips/include/asm/pgalloc.h ++++ b/arch/mips/include/asm/pgalloc.h +@@ -37,6 +37,11 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd) + { + set_pud(pud, __pud((unsigned long)pmd)); + } ++ ++static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd) ++{ ++ pud_populate(mm, pud, pmd); ++} + #endif + + /* +diff --git a/arch/mips/include/asm/pgtable.h b/arch/mips/include/asm/pgtable.h +index 008324d..f67c239 100644 +--- a/arch/mips/include/asm/pgtable.h ++++ b/arch/mips/include/asm/pgtable.h +@@ -20,6 +20,9 @@ + #include <asm/io.h> + #include <asm/pgtable-bits.h> + ++#define ktla_ktva(addr) (addr) ++#define ktva_ktla(addr) (addr) ++ + struct mm_struct; + struct vm_area_struct; + +diff --git a/arch/mips/include/asm/smtc_proc.h b/arch/mips/include/asm/smtc_proc.h +index 25da651..ae2a259 100644 +--- a/arch/mips/include/asm/smtc_proc.h ++++ b/arch/mips/include/asm/smtc_proc.h +@@ -18,6 +18,6 @@ extern struct smtc_cpu_proc smtc_cpu_stats[NR_CPUS]; + + /* Count of number of recoveries of "stolen" FPU access rights on 34K */ + +-extern atomic_t smtc_fpu_recoveries; ++extern atomic_unchecked_t smtc_fpu_recoveries; + + #endif /* __ASM_SMTC_PROC_H */ +diff --git a/arch/mips/include/asm/thread_info.h b/arch/mips/include/asm/thread_info.h +index e80ae50..b93dd2e 100644 +--- a/arch/mips/include/asm/thread_info.h ++++ b/arch/mips/include/asm/thread_info.h +@@ -105,6 +105,8 @@ static inline struct thread_info *current_thread_info(void) + #define TIF_SECCOMP 4 /* secure computing */ + #define TIF_NOTIFY_RESUME 5 /* callback before returning to user */ + #define TIF_RESTORE_SIGMASK 9 /* restore signal mask in do_signal() */ ++/* li takes a 32bit immediate */ ++#define TIF_GRSEC_SETXID 10 /* update credentials on syscall entry/exit */ + #define TIF_USEDFPU 16 /* FPU was used by this task this quantum (SMP) */ + #define TIF_MEMDIE 18 /* is terminating due to OOM killer */ + #define TIF_NOHZ 19 /* in adaptive nohz mode */ +@@ -134,14 +136,15 @@ static inline struct thread_info *current_thread_info(void) + #define _TIF_LOAD_WATCH (1<<TIF_LOAD_WATCH) + #define _TIF_32BIT_FPREGS (1<<TIF_32BIT_FPREGS) + #define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT) ++#define _TIF_GRSEC_SETXID (1<<TIF_GRSEC_SETXID) + + #define _TIF_WORK_SYSCALL_ENTRY (_TIF_NOHZ | _TIF_SYSCALL_TRACE | \ + _TIF_SYSCALL_AUDIT | \ +- _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP) ++ _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP | _TIF_GRSEC_SETXID) + + /* work to do in syscall_trace_leave() */ + #define _TIF_WORK_SYSCALL_EXIT (_TIF_NOHZ | _TIF_SYSCALL_TRACE | \ +- _TIF_SYSCALL_AUDIT | _TIF_SYSCALL_TRACEPOINT) ++ _TIF_SYSCALL_AUDIT | _TIF_SYSCALL_TRACEPOINT | _TIF_GRSEC_SETXID) + + /* work to do on interrupt/exception return */ + #define _TIF_WORK_MASK \ +@@ -149,7 +152,7 @@ static inline struct thread_info *current_thread_info(void) + /* work to do on any return to u-space */ + #define _TIF_ALLWORK_MASK (_TIF_NOHZ | _TIF_WORK_MASK | \ + _TIF_WORK_SYSCALL_EXIT | \ +- _TIF_SYSCALL_TRACEPOINT) ++ _TIF_SYSCALL_TRACEPOINT | _TIF_GRSEC_SETXID) + + /* + * We stash processor id into a COP0 register to retrieve it fast +diff --git a/arch/mips/include/asm/uaccess.h b/arch/mips/include/asm/uaccess.h +index f3fa375..3af6637 100644 +--- a/arch/mips/include/asm/uaccess.h ++++ b/arch/mips/include/asm/uaccess.h +@@ -128,6 +128,7 @@ extern u64 __ua_limit; + __ok == 0; \ + }) + ++#define access_ok_noprefault(type, addr, size) access_ok((type), (addr), (size)) + #define access_ok(type, addr, size) \ + likely(__access_ok((addr), (size), __access_mask)) + +diff --git a/arch/mips/kernel/binfmt_elfn32.c b/arch/mips/kernel/binfmt_elfn32.c +index 1188e00..41cf144 100644 +--- a/arch/mips/kernel/binfmt_elfn32.c ++++ b/arch/mips/kernel/binfmt_elfn32.c +@@ -50,6 +50,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG]; + #undef ELF_ET_DYN_BASE + #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2) + ++#ifdef CONFIG_PAX_ASLR ++#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL) ++ ++#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT) ++#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT) ++#endif ++ + #include <asm/processor.h> + #include <linux/module.h> + #include <linux/elfcore.h> +diff --git a/arch/mips/kernel/binfmt_elfo32.c b/arch/mips/kernel/binfmt_elfo32.c +index 7faf5f2..f3d3cf4 100644 +--- a/arch/mips/kernel/binfmt_elfo32.c ++++ b/arch/mips/kernel/binfmt_elfo32.c +@@ -70,6 +70,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG]; + #undef ELF_ET_DYN_BASE + #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2) + ++#ifdef CONFIG_PAX_ASLR ++#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL) ++ ++#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT) ++#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT) ++#endif ++ + #include <asm/processor.h> + + /* +diff --git a/arch/mips/kernel/i8259.c b/arch/mips/kernel/i8259.c +index 2b91fe8..fe4f6b4 100644 +--- a/arch/mips/kernel/i8259.c ++++ b/arch/mips/kernel/i8259.c +@@ -205,7 +205,7 @@ spurious_8259A_irq: + printk(KERN_DEBUG "spurious 8259A interrupt: IRQ%d.\n", irq); + spurious_irq_mask |= irqmask; + } +- atomic_inc(&irq_err_count); ++ atomic_inc_unchecked(&irq_err_count); + /* + * Theoretically we do not have to handle this IRQ, + * but in Linux this does not cause problems and is +diff --git a/arch/mips/kernel/irq-gt641xx.c b/arch/mips/kernel/irq-gt641xx.c +index 44a1f79..2bd6aa3 100644 +--- a/arch/mips/kernel/irq-gt641xx.c ++++ b/arch/mips/kernel/irq-gt641xx.c +@@ -110,7 +110,7 @@ void gt641xx_irq_dispatch(void) + } + } + +- atomic_inc(&irq_err_count); ++ atomic_inc_unchecked(&irq_err_count); + } + + void __init gt641xx_irq_init(void) +diff --git a/arch/mips/kernel/irq.c b/arch/mips/kernel/irq.c +index d1fea7a..2e591b0 100644 +--- a/arch/mips/kernel/irq.c ++++ b/arch/mips/kernel/irq.c +@@ -77,17 +77,17 @@ void ack_bad_irq(unsigned int irq) + printk("unexpected IRQ # %d\n", irq); + } + +-atomic_t irq_err_count; ++atomic_unchecked_t irq_err_count; + + int arch_show_interrupts(struct seq_file *p, int prec) + { +- seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count)); ++ seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read_unchecked(&irq_err_count)); + return 0; + } + + asmlinkage void spurious_interrupt(void) + { +- atomic_inc(&irq_err_count); ++ atomic_inc_unchecked(&irq_err_count); + } + + void __init init_IRQ(void) +@@ -110,7 +110,10 @@ void __init init_IRQ(void) + #endif + } + ++ + #ifdef DEBUG_STACKOVERFLOW ++extern void gr_handle_kernel_exploit(void); ++ + static inline void check_stack_overflow(void) + { + unsigned long sp; +@@ -126,6 +129,7 @@ static inline void check_stack_overflow(void) + printk("do_IRQ: stack overflow: %ld\n", + sp - sizeof(struct thread_info)); + dump_stack(); ++ gr_handle_kernel_exploit(); + } + } + #else +diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c +index 6ae540e..b7396dc 100644 +--- a/arch/mips/kernel/process.c ++++ b/arch/mips/kernel/process.c +@@ -562,15 +562,3 @@ unsigned long get_wchan(struct task_struct *task) + out: + return pc; + } +- +-/* +- * Don't forget that the stack pointer must be aligned on a 8 bytes +- * boundary for 32-bits ABI and 16 bytes for 64-bits ABI. +- */ +-unsigned long arch_align_stack(unsigned long sp) +-{ +- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space) +- sp -= get_random_int() & ~PAGE_MASK; +- +- return sp & ALMASK; +-} +diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c +index 7da9b76..21578be 100644 +--- a/arch/mips/kernel/ptrace.c ++++ b/arch/mips/kernel/ptrace.c +@@ -658,6 +658,10 @@ long arch_ptrace(struct task_struct *child, long request, + return ret; + } + ++#ifdef CONFIG_GRKERNSEC_SETXID ++extern void gr_delayed_cred_worker(void); ++#endif ++ + /* + * Notification of system call entry/exit + * - triggered by current->work.syscall_trace +@@ -674,6 +678,11 @@ asmlinkage void syscall_trace_enter(struct pt_regs *regs) + tracehook_report_syscall_entry(regs)) + ret = -1; + ++#ifdef CONFIG_GRKERNSEC_SETXID ++ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID))) ++ gr_delayed_cred_worker(); ++#endif ++ + if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT))) + trace_sys_enter(regs, regs->regs[2]); + +diff --git a/arch/mips/kernel/reset.c b/arch/mips/kernel/reset.c +index 07fc524..b9d7f28 100644 +--- a/arch/mips/kernel/reset.c ++++ b/arch/mips/kernel/reset.c +@@ -13,6 +13,7 @@ + #include <linux/reboot.h> + + #include <asm/reboot.h> ++#include <asm/bug.h> + + /* + * Urgs ... Too many MIPS machines to handle this in a generic way. +@@ -29,16 +30,19 @@ void machine_restart(char *command) + { + if (_machine_restart) + _machine_restart(command); ++ BUG(); + } + + void machine_halt(void) + { + if (_machine_halt) + _machine_halt(); ++ BUG(); + } + + void machine_power_off(void) + { + if (pm_power_off) + pm_power_off(); ++ BUG(); + } +diff --git a/arch/mips/kernel/smtc-proc.c b/arch/mips/kernel/smtc-proc.c +index c10aa84..9ec2e60 100644 +--- a/arch/mips/kernel/smtc-proc.c ++++ b/arch/mips/kernel/smtc-proc.c +@@ -31,7 +31,7 @@ unsigned long selfipis[NR_CPUS]; + + struct smtc_cpu_proc smtc_cpu_stats[NR_CPUS]; + +-atomic_t smtc_fpu_recoveries; ++atomic_unchecked_t smtc_fpu_recoveries; + + static int smtc_proc_show(struct seq_file *m, void *v) + { +@@ -48,7 +48,7 @@ static int smtc_proc_show(struct seq_file *m, void *v) + for(i = 0; i < NR_CPUS; i++) + seq_printf(m, "%d: %ld\n", i, smtc_cpu_stats[i].selfipis); + seq_printf(m, "%d Recoveries of \"stolen\" FPU\n", +- atomic_read(&smtc_fpu_recoveries)); ++ atomic_read_unchecked(&smtc_fpu_recoveries)); + return 0; + } + +@@ -73,7 +73,7 @@ void init_smtc_stats(void) + smtc_cpu_stats[i].selfipis = 0; + } + +- atomic_set(&smtc_fpu_recoveries, 0); ++ atomic_set_unchecked(&smtc_fpu_recoveries, 0); + + proc_create("smtc", 0444, NULL, &smtc_proc_fops); + } +diff --git a/arch/mips/kernel/smtc.c b/arch/mips/kernel/smtc.c +index dfc1b91..11a2c07 100644 +--- a/arch/mips/kernel/smtc.c ++++ b/arch/mips/kernel/smtc.c +@@ -1359,7 +1359,7 @@ void smtc_soft_dump(void) + } + smtc_ipi_qdump(); + printk("%d Recoveries of \"stolen\" FPU\n", +- atomic_read(&smtc_fpu_recoveries)); ++ atomic_read_unchecked(&smtc_fpu_recoveries)); + } + + +diff --git a/arch/mips/kernel/sync-r4k.c b/arch/mips/kernel/sync-r4k.c +index c24ad5f..9983ab2 100644 +--- a/arch/mips/kernel/sync-r4k.c ++++ b/arch/mips/kernel/sync-r4k.c +@@ -20,8 +20,8 @@ + #include <asm/mipsregs.h> + + static atomic_t count_start_flag = ATOMIC_INIT(0); +-static atomic_t count_count_start = ATOMIC_INIT(0); +-static atomic_t count_count_stop = ATOMIC_INIT(0); ++static atomic_unchecked_t count_count_start = ATOMIC_INIT(0); ++static atomic_unchecked_t count_count_stop = ATOMIC_INIT(0); + static atomic_t count_reference = ATOMIC_INIT(0); + + #define COUNTON 100 +@@ -68,13 +68,13 @@ void synchronise_count_master(int cpu) + + for (i = 0; i < NR_LOOPS; i++) { + /* slaves loop on '!= 2' */ +- while (atomic_read(&count_count_start) != 1) ++ while (atomic_read_unchecked(&count_count_start) != 1) + mb(); +- atomic_set(&count_count_stop, 0); ++ atomic_set_unchecked(&count_count_stop, 0); + smp_wmb(); + + /* this lets the slaves write their count register */ +- atomic_inc(&count_count_start); ++ atomic_inc_unchecked(&count_count_start); + + /* + * Everyone initialises count in the last loop: +@@ -85,11 +85,11 @@ void synchronise_count_master(int cpu) + /* + * Wait for all slaves to leave the synchronization point: + */ +- while (atomic_read(&count_count_stop) != 1) ++ while (atomic_read_unchecked(&count_count_stop) != 1) + mb(); +- atomic_set(&count_count_start, 0); ++ atomic_set_unchecked(&count_count_start, 0); + smp_wmb(); +- atomic_inc(&count_count_stop); ++ atomic_inc_unchecked(&count_count_stop); + } + /* Arrange for an interrupt in a short while */ + write_c0_compare(read_c0_count() + COUNTON); +@@ -130,8 +130,8 @@ void synchronise_count_slave(int cpu) + initcount = atomic_read(&count_reference); + + for (i = 0; i < NR_LOOPS; i++) { +- atomic_inc(&count_count_start); +- while (atomic_read(&count_count_start) != 2) ++ atomic_inc_unchecked(&count_count_start); ++ while (atomic_read_unchecked(&count_count_start) != 2) + mb(); + + /* +@@ -140,8 +140,8 @@ void synchronise_count_slave(int cpu) + if (i == NR_LOOPS-1) + write_c0_count(initcount); + +- atomic_inc(&count_count_stop); +- while (atomic_read(&count_count_stop) != 2) ++ atomic_inc_unchecked(&count_count_stop); ++ while (atomic_read_unchecked(&count_count_stop) != 2) + mb(); + } + /* Arrange for an interrupt in a short while */ +diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c +index 81e6ae0..6ab6e79 100644 +--- a/arch/mips/kernel/traps.c ++++ b/arch/mips/kernel/traps.c +@@ -691,7 +691,18 @@ asmlinkage void do_ov(struct pt_regs *regs) + siginfo_t info; + + prev_state = exception_enter(); +- die_if_kernel("Integer overflow", regs); ++ if (unlikely(!user_mode(regs))) { ++ ++#ifdef CONFIG_PAX_REFCOUNT ++ if (fixup_exception(regs)) { ++ pax_report_refcount_overflow(regs); ++ exception_exit(prev_state); ++ return; ++ } ++#endif ++ ++ die("Integer overflow", regs); ++ } + + info.si_code = FPE_INTOVF; + info.si_signo = SIGFPE; +diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c +index becc42b..9e43d4b 100644 +--- a/arch/mips/mm/fault.c ++++ b/arch/mips/mm/fault.c +@@ -28,6 +28,23 @@ + #include <asm/highmem.h> /* For VMALLOC_END */ + #include <linux/kdebug.h> + ++#ifdef CONFIG_PAX_PAGEEXEC ++void pax_report_insns(struct pt_regs *regs, void *pc, void *sp) ++{ ++ unsigned long i; ++ ++ printk(KERN_ERR "PAX: bytes at PC: "); ++ for (i = 0; i < 5; i++) { ++ unsigned int c; ++ if (get_user(c, (unsigned int *)pc+i)) ++ printk(KERN_CONT "???????? "); ++ else ++ printk(KERN_CONT "%08x ", c); ++ } ++ printk("\n"); ++} ++#endif ++ + /* + * This routine handles page faults. It determines the address, + * and the problem, and then passes it off to one of the appropriate +@@ -199,6 +216,14 @@ bad_area: + bad_area_nosemaphore: + /* User mode accesses just cause a SIGSEGV */ + if (user_mode(regs)) { ++ ++#ifdef CONFIG_PAX_PAGEEXEC ++ if (cpu_has_rixi && (mm->pax_flags & MF_PAX_PAGEEXEC) && !write && address == instruction_pointer(regs)) { ++ pax_report_fault(regs, (void *)address, (void *)user_stack_pointer(regs)); ++ do_group_exit(SIGKILL); ++ } ++#endif ++ + tsk->thread.cp0_badvaddr = address; + tsk->thread.error_code = write; + #if 0 +diff --git a/arch/mips/mm/mmap.c b/arch/mips/mm/mmap.c +index f1baadd..5472dca 100644 +--- a/arch/mips/mm/mmap.c ++++ b/arch/mips/mm/mmap.c +@@ -59,6 +59,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp, + struct vm_area_struct *vma; + unsigned long addr = addr0; + int do_color_align; ++ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags); + struct vm_unmapped_area_info info; + + if (unlikely(len > TASK_SIZE)) +@@ -84,6 +85,11 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp, + do_color_align = 1; + + /* requesting a specific address */ ++ ++#ifdef CONFIG_PAX_RANDMMAP ++ if (!(current->mm->pax_flags & MF_PAX_RANDMMAP)) ++#endif ++ + if (addr) { + if (do_color_align) + addr = COLOUR_ALIGN(addr, pgoff); +@@ -91,14 +97,14 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp, + addr = PAGE_ALIGN(addr); + + vma = find_vma(mm, addr); +- if (TASK_SIZE - len >= addr && +- (!vma || addr + len <= vma->vm_start)) ++ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset)) + return addr; + } + + info.length = len; + info.align_mask = do_color_align ? (PAGE_MASK & shm_align_mask) : 0; + info.align_offset = pgoff << PAGE_SHIFT; ++ info.threadstack_offset = offset; + + if (dir == DOWN) { + info.flags = VM_UNMAPPED_AREA_TOPDOWN; +@@ -146,6 +152,10 @@ void arch_pick_mmap_layout(struct mm_struct *mm) + { + unsigned long random_factor = 0UL; + ++#ifdef CONFIG_PAX_RANDMMAP ++ if (!(mm->pax_flags & MF_PAX_RANDMMAP)) ++#endif ++ + if (current->flags & PF_RANDOMIZE) { + random_factor = get_random_int(); + random_factor = random_factor << PAGE_SHIFT; +@@ -157,40 +167,25 @@ void arch_pick_mmap_layout(struct mm_struct *mm) + + if (mmap_is_legacy()) { + mm->mmap_base = TASK_UNMAPPED_BASE + random_factor; ++ ++#ifdef CONFIG_PAX_RANDMMAP ++ if (mm->pax_flags & MF_PAX_RANDMMAP) ++ mm->mmap_base += mm->delta_mmap; ++#endif ++ + mm->get_unmapped_area = arch_get_unmapped_area; + } else { + mm->mmap_base = mmap_base(random_factor); ++ ++#ifdef CONFIG_PAX_RANDMMAP ++ if (mm->pax_flags & MF_PAX_RANDMMAP) ++ mm->mmap_base -= mm->delta_mmap + mm->delta_stack; ++#endif ++ + mm->get_unmapped_area = arch_get_unmapped_area_topdown; + } + } + +-static inline unsigned long brk_rnd(void) +-{ +- unsigned long rnd = get_random_int(); +- +- rnd = rnd << PAGE_SHIFT; +- /* 8MB for 32bit, 256MB for 64bit */ +- if (TASK_IS_32BIT_ADDR) +- rnd = rnd & 0x7ffffful; +- else +- rnd = rnd & 0xffffffful; +- +- return rnd; +-} +- +-unsigned long arch_randomize_brk(struct mm_struct *mm) +-{ +- unsigned long base = mm->brk; +- unsigned long ret; +- +- ret = PAGE_ALIGN(base + brk_rnd()); +- +- if (ret < mm->brk) +- return mm->brk; +- +- return ret; +-} +- + int __virt_addr_valid(const volatile void *kaddr) + { + return pfn_valid(PFN_DOWN(virt_to_phys(kaddr))); +diff --git a/arch/mips/pci/pci-octeon.c b/arch/mips/pci/pci-octeon.c +index 59cccd9..f39ac2f 100644 +--- a/arch/mips/pci/pci-octeon.c ++++ b/arch/mips/pci/pci-octeon.c +@@ -327,8 +327,8 @@ static int octeon_write_config(struct pci_bus *bus, unsigned int devfn, + + + static struct pci_ops octeon_pci_ops = { +- octeon_read_config, +- octeon_write_config, ++ .read = octeon_read_config, ++ .write = octeon_write_config, + }; + + static struct resource octeon_pci_mem_resource = { +diff --git a/arch/mips/pci/pcie-octeon.c b/arch/mips/pci/pcie-octeon.c +index 5e36c33..eb4a17b 100644 +--- a/arch/mips/pci/pcie-octeon.c ++++ b/arch/mips/pci/pcie-octeon.c +@@ -1792,8 +1792,8 @@ static int octeon_dummy_write_config(struct pci_bus *bus, unsigned int devfn, + } + + static struct pci_ops octeon_pcie0_ops = { +- octeon_pcie0_read_config, +- octeon_pcie0_write_config, ++ .read = octeon_pcie0_read_config, ++ .write = octeon_pcie0_write_config, + }; + + static struct resource octeon_pcie0_mem_resource = { +@@ -1813,8 +1813,8 @@ static struct pci_controller octeon_pcie0_controller = { + }; + + static struct pci_ops octeon_pcie1_ops = { +- octeon_pcie1_read_config, +- octeon_pcie1_write_config, ++ .read = octeon_pcie1_read_config, ++ .write = octeon_pcie1_write_config, + }; + + static struct resource octeon_pcie1_mem_resource = { +@@ -1834,8 +1834,8 @@ static struct pci_controller octeon_pcie1_controller = { + }; + + static struct pci_ops octeon_dummy_ops = { +- octeon_dummy_read_config, +- octeon_dummy_write_config, ++ .read = octeon_dummy_read_config, ++ .write = octeon_dummy_write_config, + }; + + static struct resource octeon_dummy_mem_resource = { +diff --git a/arch/mips/sgi-ip27/ip27-nmi.c b/arch/mips/sgi-ip27/ip27-nmi.c +index a2358b4..7cead4f 100644 +--- a/arch/mips/sgi-ip27/ip27-nmi.c ++++ b/arch/mips/sgi-ip27/ip27-nmi.c +@@ -187,9 +187,9 @@ void + cont_nmi_dump(void) + { + #ifndef REAL_NMI_SIGNAL +- static atomic_t nmied_cpus = ATOMIC_INIT(0); ++ static atomic_unchecked_t nmied_cpus = ATOMIC_INIT(0); + +- atomic_inc(&nmied_cpus); ++ atomic_inc_unchecked(&nmied_cpus); + #endif + /* + * Only allow 1 cpu to proceed +@@ -233,7 +233,7 @@ cont_nmi_dump(void) + udelay(10000); + } + #else +- while (atomic_read(&nmied_cpus) != num_online_cpus()); ++ while (atomic_read_unchecked(&nmied_cpus) != num_online_cpus()); + #endif + + /* +diff --git a/arch/mips/sni/rm200.c b/arch/mips/sni/rm200.c +index a046b30..6799527 100644 +--- a/arch/mips/sni/rm200.c ++++ b/arch/mips/sni/rm200.c +@@ -270,7 +270,7 @@ spurious_8259A_irq: + "spurious RM200 8259A interrupt: IRQ%d.\n", irq); + spurious_irq_mask |= irqmask; + } +- atomic_inc(&irq_err_count); ++ atomic_inc_unchecked(&irq_err_count); + /* + * Theoretically we do not have to handle this IRQ, + * but in Linux this does not cause problems and is +diff --git a/arch/mips/vr41xx/common/icu.c b/arch/mips/vr41xx/common/icu.c +index 41e873b..34d33a7 100644 +--- a/arch/mips/vr41xx/common/icu.c ++++ b/arch/mips/vr41xx/common/icu.c +@@ -653,7 +653,7 @@ static int icu_get_irq(unsigned int irq) + + printk(KERN_ERR "spurious ICU interrupt: %04x,%04x\n", pend1, pend2); + +- atomic_inc(&irq_err_count); ++ atomic_inc_unchecked(&irq_err_count); + + return -1; + } +diff --git a/arch/mips/vr41xx/common/irq.c b/arch/mips/vr41xx/common/irq.c +index ae0e4ee..e8f0692 100644 +--- a/arch/mips/vr41xx/common/irq.c ++++ b/arch/mips/vr41xx/common/irq.c +@@ -64,7 +64,7 @@ static void irq_dispatch(unsigned int irq) + irq_cascade_t *cascade; + + if (irq >= NR_IRQS) { +- atomic_inc(&irq_err_count); ++ atomic_inc_unchecked(&irq_err_count); + return; + } + +@@ -84,7 +84,7 @@ static void irq_dispatch(unsigned int irq) + ret = cascade->get_irq(irq); + irq = ret; + if (ret < 0) +- atomic_inc(&irq_err_count); ++ atomic_inc_unchecked(&irq_err_count); + else + irq_dispatch(irq); + if (!irqd_irq_disabled(idata) && chip->irq_unmask) +diff --git a/arch/mn10300/proc-mn103e010/include/proc/cache.h b/arch/mn10300/proc-mn103e010/include/proc/cache.h +index 967d144..db12197 100644 +--- a/arch/mn10300/proc-mn103e010/include/proc/cache.h ++++ b/arch/mn10300/proc-mn103e010/include/proc/cache.h +@@ -11,12 +11,14 @@ + #ifndef _ASM_PROC_CACHE_H + #define _ASM_PROC_CACHE_H + ++#include <linux/const.h> ++ + /* L1 cache */ + + #define L1_CACHE_NWAYS 4 /* number of ways in caches */ + #define L1_CACHE_NENTRIES 256 /* number of entries in each way */ +-#define L1_CACHE_BYTES 16 /* bytes per entry */ + #define L1_CACHE_SHIFT 4 /* shift for bytes per entry */ ++#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) /* bytes per entry */ + #define L1_CACHE_WAYDISP 0x1000 /* displacement of one way from the next */ + + #define L1_CACHE_TAG_VALID 0x00000001 /* cache tag valid bit */ +diff --git a/arch/mn10300/proc-mn2ws0050/include/proc/cache.h b/arch/mn10300/proc-mn2ws0050/include/proc/cache.h +index bcb5df2..84fabd2 100644 +--- a/arch/mn10300/proc-mn2ws0050/include/proc/cache.h ++++ b/arch/mn10300/proc-mn2ws0050/include/proc/cache.h +@@ -16,13 +16,15 @@ + #ifndef _ASM_PROC_CACHE_H + #define _ASM_PROC_CACHE_H + ++#include <linux/const.h> ++ + /* + * L1 cache + */ + #define L1_CACHE_NWAYS 4 /* number of ways in caches */ + #define L1_CACHE_NENTRIES 128 /* number of entries in each way */ +-#define L1_CACHE_BYTES 32 /* bytes per entry */ + #define L1_CACHE_SHIFT 5 /* shift for bytes per entry */ ++#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) /* bytes per entry */ + #define L1_CACHE_WAYDISP 0x1000 /* distance from one way to the next */ + + #define L1_CACHE_TAG_VALID 0x00000001 /* cache tag valid bit */ +diff --git a/arch/openrisc/include/asm/cache.h b/arch/openrisc/include/asm/cache.h +index 4ce7a01..449202a 100644 +--- a/arch/openrisc/include/asm/cache.h ++++ b/arch/openrisc/include/asm/cache.h +@@ -19,11 +19,13 @@ + #ifndef __ASM_OPENRISC_CACHE_H + #define __ASM_OPENRISC_CACHE_H + ++#include <linux/const.h> ++ + /* FIXME: How can we replace these with values from the CPU... + * they shouldn't be hard-coded! + */ + +-#define L1_CACHE_BYTES 16 + #define L1_CACHE_SHIFT 4 ++#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) + + #endif /* __ASM_OPENRISC_CACHE_H */ +diff --git a/arch/parisc/include/asm/atomic.h b/arch/parisc/include/asm/atomic.h +index 472886c..00e7df9 100644 +--- a/arch/parisc/include/asm/atomic.h ++++ b/arch/parisc/include/asm/atomic.h +@@ -252,6 +252,16 @@ static inline long atomic64_dec_if_positive(atomic64_t *v) + return dec; + } + ++#define atomic64_read_unchecked(v) atomic64_read(v) ++#define atomic64_set_unchecked(v, i) atomic64_set((v), (i)) ++#define atomic64_add_unchecked(a, v) atomic64_add((a), (v)) ++#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v)) ++#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v)) ++#define atomic64_inc_unchecked(v) atomic64_inc(v) ++#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v) ++#define atomic64_dec_unchecked(v) atomic64_dec(v) ++#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n)) ++ + #endif /* !CONFIG_64BIT */ + + +diff --git a/arch/parisc/include/asm/cache.h b/arch/parisc/include/asm/cache.h +index 47f11c7..3420df2 100644 +--- a/arch/parisc/include/asm/cache.h ++++ b/arch/parisc/include/asm/cache.h +@@ -5,6 +5,7 @@ + #ifndef __ARCH_PARISC_CACHE_H + #define __ARCH_PARISC_CACHE_H + ++#include <linux/const.h> + + /* + * PA 2.0 processors have 64-byte cachelines; PA 1.1 processors have +@@ -15,13 +16,13 @@ + * just ruin performance. + */ + #ifdef CONFIG_PA20 +-#define L1_CACHE_BYTES 64 + #define L1_CACHE_SHIFT 6 + #else +-#define L1_CACHE_BYTES 32 + #define L1_CACHE_SHIFT 5 + #endif + ++#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) ++ + #ifndef __ASSEMBLY__ + + #define SMP_CACHE_BYTES L1_CACHE_BYTES +diff --git a/arch/parisc/include/asm/elf.h b/arch/parisc/include/asm/elf.h +index 3391d06..c23a2cc 100644 +--- a/arch/parisc/include/asm/elf.h ++++ b/arch/parisc/include/asm/elf.h +@@ -342,6 +342,13 @@ struct pt_regs; /* forward declaration... */ + + #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x01000000) + ++#ifdef CONFIG_PAX_ASLR ++#define PAX_ELF_ET_DYN_BASE 0x10000UL ++ ++#define PAX_DELTA_MMAP_LEN 16 ++#define PAX_DELTA_STACK_LEN 16 ++#endif ++ + /* This yields a mask that user programs can use to figure out what + instruction set this CPU supports. This could be done in user space, + but it's not easy, and we've already done it here. */ +diff --git a/arch/parisc/include/asm/pgalloc.h b/arch/parisc/include/asm/pgalloc.h +index f213f5b..0af3e8e 100644 +--- a/arch/parisc/include/asm/pgalloc.h ++++ b/arch/parisc/include/asm/pgalloc.h +@@ -61,6 +61,11 @@ static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd) + (__u32)(__pa((unsigned long)pmd) >> PxD_VALUE_SHIFT)); + } + ++static inline void pgd_populate_kernel(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd) ++{ ++ pgd_populate(mm, pgd, pmd); ++} ++ + static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address) + { + pmd_t *pmd = (pmd_t *)__get_free_pages(GFP_KERNEL|__GFP_REPEAT, +@@ -93,6 +98,7 @@ static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd) + #define pmd_alloc_one(mm, addr) ({ BUG(); ((pmd_t *)2); }) + #define pmd_free(mm, x) do { } while (0) + #define pgd_populate(mm, pmd, pte) BUG() ++#define pgd_populate_kernel(mm, pmd, pte) BUG() + + #endif + +diff --git a/arch/parisc/include/asm/pgtable.h b/arch/parisc/include/asm/pgtable.h +index 22b89d1..ce34230 100644 +--- a/arch/parisc/include/asm/pgtable.h ++++ b/arch/parisc/include/asm/pgtable.h +@@ -223,6 +223,17 @@ extern void purge_tlb_entries(struct mm_struct *, unsigned long); + #define PAGE_EXECREAD __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_EXEC |_PAGE_ACCESSED) + #define PAGE_COPY PAGE_EXECREAD + #define PAGE_RWX __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_EXEC |_PAGE_ACCESSED) ++ ++#ifdef CONFIG_PAX_PAGEEXEC ++# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_ACCESSED) ++# define PAGE_COPY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED) ++# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED) ++#else ++# define PAGE_SHARED_NOEXEC PAGE_SHARED ++# define PAGE_COPY_NOEXEC PAGE_COPY ++# define PAGE_READONLY_NOEXEC PAGE_READONLY ++#endif ++ + #define PAGE_KERNEL __pgprot(_PAGE_KERNEL) + #define PAGE_KERNEL_EXEC __pgprot(_PAGE_KERNEL_EXEC) + #define PAGE_KERNEL_RWX __pgprot(_PAGE_KERNEL_RWX) +diff --git a/arch/parisc/include/asm/uaccess.h b/arch/parisc/include/asm/uaccess.h +index 4006964..fcb3cc2 100644 +--- a/arch/parisc/include/asm/uaccess.h ++++ b/arch/parisc/include/asm/uaccess.h +@@ -246,10 +246,10 @@ static inline unsigned long __must_check copy_from_user(void *to, + const void __user *from, + unsigned long n) + { +- int sz = __compiletime_object_size(to); ++ size_t sz = __compiletime_object_size(to); + int ret = -EFAULT; + +- if (likely(sz == -1 || !__builtin_constant_p(n) || sz >= n)) ++ if (likely(sz == (size_t)-1 || !__builtin_constant_p(n) || sz >= n)) + ret = __copy_from_user(to, from, n); + else + copy_from_user_overflow(); +diff --git a/arch/parisc/kernel/module.c b/arch/parisc/kernel/module.c +index 50dfafc..b9fc230 100644 +--- a/arch/parisc/kernel/module.c ++++ b/arch/parisc/kernel/module.c +@@ -98,16 +98,38 @@ + + /* three functions to determine where in the module core + * or init pieces the location is */ ++static inline int in_init_rx(struct module *me, void *loc) ++{ ++ return (loc >= me->module_init_rx && ++ loc < (me->module_init_rx + me->init_size_rx)); ++} ++ ++static inline int in_init_rw(struct module *me, void *loc) ++{ ++ return (loc >= me->module_init_rw && ++ loc < (me->module_init_rw + me->init_size_rw)); ++} ++ + static inline int in_init(struct module *me, void *loc) + { +- return (loc >= me->module_init && +- loc <= (me->module_init + me->init_size)); ++ return in_init_rx(me, loc) || in_init_rw(me, loc); ++} ++ ++static inline int in_core_rx(struct module *me, void *loc) ++{ ++ return (loc >= me->module_core_rx && ++ loc < (me->module_core_rx + me->core_size_rx)); ++} ++ ++static inline int in_core_rw(struct module *me, void *loc) ++{ ++ return (loc >= me->module_core_rw && ++ loc < (me->module_core_rw + me->core_size_rw)); + } + + static inline int in_core(struct module *me, void *loc) + { +- return (loc >= me->module_core && +- loc <= (me->module_core + me->core_size)); ++ return in_core_rx(me, loc) || in_core_rw(me, loc); + } + + static inline int in_local(struct module *me, void *loc) +@@ -371,13 +393,13 @@ int module_frob_arch_sections(CONST Elf_Ehdr *hdr, + } + + /* align things a bit */ +- me->core_size = ALIGN(me->core_size, 16); +- me->arch.got_offset = me->core_size; +- me->core_size += gots * sizeof(struct got_entry); ++ me->core_size_rw = ALIGN(me->core_size_rw, 16); ++ me->arch.got_offset = me->core_size_rw; ++ me->core_size_rw += gots * sizeof(struct got_entry); + +- me->core_size = ALIGN(me->core_size, 16); +- me->arch.fdesc_offset = me->core_size; +- me->core_size += fdescs * sizeof(Elf_Fdesc); ++ me->core_size_rw = ALIGN(me->core_size_rw, 16); ++ me->arch.fdesc_offset = me->core_size_rw; ++ me->core_size_rw += fdescs * sizeof(Elf_Fdesc); + + me->arch.got_max = gots; + me->arch.fdesc_max = fdescs; +@@ -395,7 +417,7 @@ static Elf64_Word get_got(struct module *me, unsigned long value, long addend) + + BUG_ON(value == 0); + +- got = me->module_core + me->arch.got_offset; ++ got = me->module_core_rw + me->arch.got_offset; + for (i = 0; got[i].addr; i++) + if (got[i].addr == value) + goto out; +@@ -413,7 +435,7 @@ static Elf64_Word get_got(struct module *me, unsigned long value, long addend) + #ifdef CONFIG_64BIT + static Elf_Addr get_fdesc(struct module *me, unsigned long value) + { +- Elf_Fdesc *fdesc = me->module_core + me->arch.fdesc_offset; ++ Elf_Fdesc *fdesc = me->module_core_rw + me->arch.fdesc_offset; + + if (!value) { + printk(KERN_ERR "%s: zero OPD requested!\n", me->name); +@@ -431,7 +453,7 @@ static Elf_Addr get_fdesc(struct module *me, unsigned long value) + + /* Create new one */ + fdesc->addr = value; +- fdesc->gp = (Elf_Addr)me->module_core + me->arch.got_offset; ++ fdesc->gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset; + return (Elf_Addr)fdesc; + } + #endif /* CONFIG_64BIT */ +@@ -843,7 +865,7 @@ register_unwind_table(struct module *me, + + table = (unsigned char *)sechdrs[me->arch.unwind_section].sh_addr; + end = table + sechdrs[me->arch.unwind_section].sh_size; +- gp = (Elf_Addr)me->module_core + me->arch.got_offset; ++ gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset; + + DEBUGP("register_unwind_table(), sect = %d at 0x%p - 0x%p (gp=0x%lx)\n", + me->arch.unwind_section, table, end, gp); +diff --git a/arch/parisc/kernel/sys_parisc.c b/arch/parisc/kernel/sys_parisc.c +index e1ffea2..46ed66e 100644 +--- a/arch/parisc/kernel/sys_parisc.c ++++ b/arch/parisc/kernel/sys_parisc.c +@@ -89,6 +89,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, + unsigned long task_size = TASK_SIZE; + int do_color_align, last_mmap; + struct vm_unmapped_area_info info; ++ unsigned long offset = gr_rand_threadstack_offset(current->mm, filp, flags); + + if (len > task_size) + return -ENOMEM; +@@ -106,6 +107,10 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, + goto found_addr; + } + ++#ifdef CONFIG_PAX_RANDMMAP ++ if (!(mm->pax_flags & MF_PAX_RANDMMAP)) ++#endif ++ + if (addr) { + if (do_color_align && last_mmap) + addr = COLOR_ALIGN(addr, last_mmap, pgoff); +@@ -124,6 +129,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, + info.high_limit = mmap_upper_limit(); + info.align_mask = last_mmap ? (PAGE_MASK & (SHM_COLOUR - 1)) : 0; + info.align_offset = shared_align_offset(last_mmap, pgoff); ++ info.threadstack_offset = offset; + addr = vm_unmapped_area(&info); + + found_addr: +@@ -143,6 +149,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, + unsigned long addr = addr0; + int do_color_align, last_mmap; + struct vm_unmapped_area_info info; ++ unsigned long offset = gr_rand_threadstack_offset(current->mm, filp, flags); + + #ifdef CONFIG_64BIT + /* This should only ever run for 32-bit processes. */ +@@ -167,6 +174,10 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, + } + + /* requesting a specific address */ ++#ifdef CONFIG_PAX_RANDMMAP ++ if (!(mm->pax_flags & MF_PAX_RANDMMAP)) ++#endif ++ + if (addr) { + if (do_color_align && last_mmap) + addr = COLOR_ALIGN(addr, last_mmap, pgoff); +@@ -184,6 +195,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, + info.high_limit = mm->mmap_base; + info.align_mask = last_mmap ? (PAGE_MASK & (SHM_COLOUR - 1)) : 0; + info.align_offset = shared_align_offset(last_mmap, pgoff); ++ info.threadstack_offset = offset; + addr = vm_unmapped_area(&info); + if (!(addr & ~PAGE_MASK)) + goto found_addr; +@@ -249,6 +261,13 @@ void arch_pick_mmap_layout(struct mm_struct *mm) + mm->mmap_legacy_base = mmap_legacy_base(); + mm->mmap_base = mmap_upper_limit(); + ++#ifdef CONFIG_PAX_RANDMMAP ++ if (mm->pax_flags & MF_PAX_RANDMMAP) { ++ mm->mmap_legacy_base += mm->delta_mmap; ++ mm->mmap_base -= mm->delta_mmap + mm->delta_stack; ++ } ++#endif ++ + if (mmap_is_legacy()) { + mm->mmap_base = mm->mmap_legacy_base; + mm->get_unmapped_area = arch_get_unmapped_area; +diff --git a/arch/parisc/kernel/traps.c b/arch/parisc/kernel/traps.c +index 47ee620..1107387 100644 +--- a/arch/parisc/kernel/traps.c ++++ b/arch/parisc/kernel/traps.c +@@ -726,9 +726,7 @@ void notrace handle_interruption(int code, struct pt_regs *regs) + + down_read(¤t->mm->mmap_sem); + vma = find_vma(current->mm,regs->iaoq[0]); +- if (vma && (regs->iaoq[0] >= vma->vm_start) +- && (vma->vm_flags & VM_EXEC)) { +- ++ if (vma && (regs->iaoq[0] >= vma->vm_start)) { + fault_address = regs->iaoq[0]; + fault_space = regs->iasq[0]; + +diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c +index d72197f..c017c84 100644 +--- a/arch/parisc/mm/fault.c ++++ b/arch/parisc/mm/fault.c +@@ -15,6 +15,7 @@ + #include <linux/sched.h> + #include <linux/interrupt.h> + #include <linux/module.h> ++#include <linux/unistd.h> + + #include <asm/uaccess.h> + #include <asm/traps.h> +@@ -50,7 +51,7 @@ int show_unhandled_signals = 1; + static unsigned long + parisc_acctyp(unsigned long code, unsigned int inst) + { +- if (code == 6 || code == 16) ++ if (code == 6 || code == 7 || code == 16) + return VM_EXEC; + + switch (inst & 0xf0000000) { +@@ -136,6 +137,116 @@ parisc_acctyp(unsigned long code, unsigned int inst) + } + #endif + ++#ifdef CONFIG_PAX_PAGEEXEC ++/* ++ * PaX: decide what to do with offenders (instruction_pointer(regs) = fault address) ++ * ++ * returns 1 when task should be killed ++ * 2 when rt_sigreturn trampoline was detected ++ * 3 when unpatched PLT trampoline was detected ++ */ ++static int pax_handle_fetch_fault(struct pt_regs *regs) ++{ ++ ++#ifdef CONFIG_PAX_EMUPLT ++ int err; ++ ++ do { /* PaX: unpatched PLT emulation */ ++ unsigned int bl, depwi; ++ ++ err = get_user(bl, (unsigned int *)instruction_pointer(regs)); ++ err |= get_user(depwi, (unsigned int *)(instruction_pointer(regs)+4)); ++ ++ if (err) ++ break; ++ ++ if (bl == 0xEA9F1FDDU && depwi == 0xD6801C1EU) { ++ unsigned int ldw, bv, ldw2, addr = instruction_pointer(regs)-12; ++ ++ err = get_user(ldw, (unsigned int *)addr); ++ err |= get_user(bv, (unsigned int *)(addr+4)); ++ err |= get_user(ldw2, (unsigned int *)(addr+8)); ++ ++ if (err) ++ break; ++ ++ if (ldw == 0x0E801096U && ++ bv == 0xEAC0C000U && ++ ldw2 == 0x0E881095U) ++ { ++ unsigned int resolver, map; ++ ++ err = get_user(resolver, (unsigned int *)(instruction_pointer(regs)+8)); ++ err |= get_user(map, (unsigned int *)(instruction_pointer(regs)+12)); ++ if (err) ++ break; ++ ++ regs->gr[20] = instruction_pointer(regs)+8; ++ regs->gr[21] = map; ++ regs->gr[22] = resolver; ++ regs->iaoq[0] = resolver | 3UL; ++ regs->iaoq[1] = regs->iaoq[0] + 4; ++ return 3; ++ } ++ } ++ } while (0); ++#endif ++ ++#ifdef CONFIG_PAX_EMUTRAMP ++ ++#ifndef CONFIG_PAX_EMUSIGRT ++ if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP)) ++ return 1; ++#endif ++ ++ do { /* PaX: rt_sigreturn emulation */ ++ unsigned int ldi1, ldi2, bel, nop; ++ ++ err = get_user(ldi1, (unsigned int *)instruction_pointer(regs)); ++ err |= get_user(ldi2, (unsigned int *)(instruction_pointer(regs)+4)); ++ err |= get_user(bel, (unsigned int *)(instruction_pointer(regs)+8)); ++ err |= get_user(nop, (unsigned int *)(instruction_pointer(regs)+12)); ++ ++ if (err) ++ break; ++ ++ if ((ldi1 == 0x34190000U || ldi1 == 0x34190002U) && ++ ldi2 == 0x3414015AU && ++ bel == 0xE4008200U && ++ nop == 0x08000240U) ++ { ++ regs->gr[25] = (ldi1 & 2) >> 1; ++ regs->gr[20] = __NR_rt_sigreturn; ++ regs->gr[31] = regs->iaoq[1] + 16; ++ regs->sr[0] = regs->iasq[1]; ++ regs->iaoq[0] = 0x100UL; ++ regs->iaoq[1] = regs->iaoq[0] + 4; ++ regs->iasq[0] = regs->sr[2]; ++ regs->iasq[1] = regs->sr[2]; ++ return 2; ++ } ++ } while (0); ++#endif ++ ++ return 1; ++} ++ ++void pax_report_insns(struct pt_regs *regs, void *pc, void *sp) ++{ ++ unsigned long i; ++ ++ printk(KERN_ERR "PAX: bytes at PC: "); ++ for (i = 0; i < 5; i++) { ++ unsigned int c; ++ if (get_user(c, (unsigned int *)pc+i)) ++ printk(KERN_CONT "???????? "); ++ else ++ printk(KERN_CONT "%08x ", c); ++ } ++ printk("\n"); ++} ++#endif ++ + int fixup_exception(struct pt_regs *regs) + { + const struct exception_table_entry *fix; +@@ -234,8 +345,33 @@ retry: + + good_area: + +- if ((vma->vm_flags & acc_type) != acc_type) ++ if ((vma->vm_flags & acc_type) != acc_type) { ++ ++#ifdef CONFIG_PAX_PAGEEXEC ++ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && (acc_type & VM_EXEC) && ++ (address & ~3UL) == instruction_pointer(regs)) ++ { ++ up_read(&mm->mmap_sem); ++ switch (pax_handle_fetch_fault(regs)) { ++ ++#ifdef CONFIG_PAX_EMUPLT ++ case 3: ++ return; ++#endif ++ ++#ifdef CONFIG_PAX_EMUTRAMP ++ case 2: ++ return; ++#endif ++ ++ } ++ pax_report_fault(regs, (void *)instruction_pointer(regs), (void *)regs->gr[30]); ++ do_group_exit(SIGKILL); ++ } ++#endif ++ + goto bad_area; ++ } + + /* + * If for any reason at all we couldn't handle the fault, make +diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig +index ee3c660..afa4212 100644 +--- a/arch/powerpc/Kconfig ++++ b/arch/powerpc/Kconfig +@@ -394,6 +394,7 @@ config PPC64_SUPPORTS_MEMORY_FAILURE + config KEXEC + bool "kexec system call" + depends on (PPC_BOOK3S || FSL_BOOKE || (44x && !SMP)) ++ depends on !GRKERNSEC_KMEM + help + kexec is a system call that implements the ability to shutdown your + current kernel, and to start another kernel. It is like a reboot +diff --git a/arch/powerpc/include/asm/atomic.h b/arch/powerpc/include/asm/atomic.h +index e3b1d41..8e81edf 100644 +--- a/arch/powerpc/include/asm/atomic.h ++++ b/arch/powerpc/include/asm/atomic.h +@@ -523,6 +523,16 @@ static __inline__ long atomic64_inc_not_zero(atomic64_t *v) + return t1; + } + ++#define atomic64_read_unchecked(v) atomic64_read(v) ++#define atomic64_set_unchecked(v, i) atomic64_set((v), (i)) ++#define atomic64_add_unchecked(a, v) atomic64_add((a), (v)) ++#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v)) ++#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v)) ++#define atomic64_inc_unchecked(v) atomic64_inc(v) ++#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v) ++#define atomic64_dec_unchecked(v) atomic64_dec(v) ++#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n)) ++ + #endif /* __powerpc64__ */ + + #endif /* __KERNEL__ */ +diff --git a/arch/powerpc/include/asm/barrier.h b/arch/powerpc/include/asm/barrier.h +index f89da80..7f5b05a 100644 +--- a/arch/powerpc/include/asm/barrier.h ++++ b/arch/powerpc/include/asm/barrier.h +@@ -73,7 +73,7 @@ + do { \ + compiletime_assert_atomic_type(*p); \ + __lwsync(); \ +- ACCESS_ONCE(*p) = (v); \ ++ ACCESS_ONCE_RW(*p) = (v); \ + } while (0) + + #define smp_load_acquire(p) \ +diff --git a/arch/powerpc/include/asm/cache.h b/arch/powerpc/include/asm/cache.h +index ed0afc1..0332825 100644 +--- a/arch/powerpc/include/asm/cache.h ++++ b/arch/powerpc/include/asm/cache.h +@@ -3,6 +3,7 @@ + + #ifdef __KERNEL__ + ++#include <linux/const.h> + + /* bytes per L1 cache line */ + #if defined(CONFIG_8xx) || defined(CONFIG_403GCX) +@@ -22,7 +23,7 @@ + #define L1_CACHE_SHIFT 7 + #endif + +-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT) ++#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) + + #define SMP_CACHE_BYTES L1_CACHE_BYTES + +diff --git a/arch/powerpc/include/asm/elf.h b/arch/powerpc/include/asm/elf.h +index 935b5e7..7001d2d 100644 +--- a/arch/powerpc/include/asm/elf.h ++++ b/arch/powerpc/include/asm/elf.h +@@ -28,8 +28,19 @@ + the loader. We need to make sure that it is out of the way of the program + that it will "exec", and that there is sufficient room for the brk. */ + +-extern unsigned long randomize_et_dyn(unsigned long base); +-#define ELF_ET_DYN_BASE (randomize_et_dyn(0x20000000)) ++#define ELF_ET_DYN_BASE (0x20000000) ++ ++#ifdef CONFIG_PAX_ASLR ++#define PAX_ELF_ET_DYN_BASE (0x10000000UL) ++ ++#ifdef __powerpc64__ ++#define PAX_DELTA_MMAP_LEN (is_32bit_task() ? 16 : 28) ++#define PAX_DELTA_STACK_LEN (is_32bit_task() ? 16 : 28) ++#else ++#define PAX_DELTA_MMAP_LEN 15 ++#define PAX_DELTA_STACK_LEN 15 ++#endif ++#endif + + #define ELF_CORE_EFLAGS (is_elf2_task() ? 2 : 0) + +@@ -127,10 +138,6 @@ extern int arch_setup_additional_pages(struct linux_binprm *bprm, + (0x7ff >> (PAGE_SHIFT - 12)) : \ + (0x3ffff >> (PAGE_SHIFT - 12))) + +-extern unsigned long arch_randomize_brk(struct mm_struct *mm); +-#define arch_randomize_brk arch_randomize_brk +- +- + #ifdef CONFIG_SPU_BASE + /* Notes used in ET_CORE. Note name is "SPU/<fd>/<filename>". */ + #define NT_SPU 1 +diff --git a/arch/powerpc/include/asm/exec.h b/arch/powerpc/include/asm/exec.h +index 8196e9c..d83a9f3 100644 +--- a/arch/powerpc/include/asm/exec.h ++++ b/arch/powerpc/include/asm/exec.h +@@ -4,6 +4,6 @@ + #ifndef _ASM_POWERPC_EXEC_H + #define _ASM_POWERPC_EXEC_H + +-extern unsigned long arch_align_stack(unsigned long sp); ++#define arch_align_stack(x) ((x) & ~0xfUL) + + #endif /* _ASM_POWERPC_EXEC_H */ +diff --git a/arch/powerpc/include/asm/kmap_types.h b/arch/powerpc/include/asm/kmap_types.h +index 5acabbd..7ea14fa 100644 +--- a/arch/powerpc/include/asm/kmap_types.h ++++ b/arch/powerpc/include/asm/kmap_types.h +@@ -10,7 +10,7 @@ + * 2 of the License, or (at your option) any later version. + */ + +-#define KM_TYPE_NR 16 ++#define KM_TYPE_NR 17 + + #endif /* __KERNEL__ */ + #endif /* _ASM_POWERPC_KMAP_TYPES_H */ +diff --git a/arch/powerpc/include/asm/local.h b/arch/powerpc/include/asm/local.h +index b8da913..60b608a 100644 +--- a/arch/powerpc/include/asm/local.h ++++ b/arch/powerpc/include/asm/local.h +@@ -9,15 +9,26 @@ typedef struct + atomic_long_t a; + } local_t; + ++typedef struct ++{ ++ atomic_long_unchecked_t a; ++} local_unchecked_t; ++ + #define LOCAL_INIT(i) { ATOMIC_LONG_INIT(i) } + + #define local_read(l) atomic_long_read(&(l)->a) ++#define local_read_unchecked(l) atomic_long_read_unchecked(&(l)->a) + #define local_set(l,i) atomic_long_set(&(l)->a, (i)) ++#define local_set_unchecked(l,i) atomic_long_set_unchecked(&(l)->a, (i)) + + #define local_add(i,l) atomic_long_add((i),(&(l)->a)) ++#define local_add_unchecked(i,l) atomic_long_add_unchecked((i),(&(l)->a)) + #define local_sub(i,l) atomic_long_sub((i),(&(l)->a)) ++#define local_sub_unchecked(i,l) atomic_long_sub_unchecked((i),(&(l)->a)) + #define local_inc(l) atomic_long_inc(&(l)->a) ++#define local_inc_unchecked(l) atomic_long_inc_unchecked(&(l)->a) + #define local_dec(l) atomic_long_dec(&(l)->a) ++#define local_dec_unchecked(l) atomic_long_dec_unchecked(&(l)->a) + + static __inline__ long local_add_return(long a, local_t *l) + { +@@ -35,6 +46,7 @@ static __inline__ long local_add_return(long a, local_t *l) + + return t; + } ++#define local_add_return_unchecked(i, l) atomic_long_add_return_unchecked((i), (&(l)->a)) + + #define local_add_negative(a, l) (local_add_return((a), (l)) < 0) + +@@ -54,6 +66,7 @@ static __inline__ long local_sub_return(long a, local_t *l) + + return t; + } ++#define local_sub_return_unchecked(i, l) atomic_long_sub_return_unchecked((i), (&(l)->a)) + + static __inline__ long local_inc_return(local_t *l) + { +@@ -101,6 +114,8 @@ static __inline__ long local_dec_return(local_t *l) + + #define local_cmpxchg(l, o, n) \ + (cmpxchg_local(&((l)->a.counter), (o), (n))) ++#define local_cmpxchg_unchecked(l, o, n) \ ++ (cmpxchg_local(&((l)->a.counter), (o), (n))) + #define local_xchg(l, n) (xchg_local(&((l)->a.counter), (n))) + + /** +diff --git a/arch/powerpc/include/asm/mman.h b/arch/powerpc/include/asm/mman.h +index 8565c25..2865190 100644 +--- a/arch/powerpc/include/asm/mman.h ++++ b/arch/powerpc/include/asm/mman.h +@@ -24,7 +24,7 @@ static inline unsigned long arch_calc_vm_prot_bits(unsigned long prot) + } + #define arch_calc_vm_prot_bits(prot) arch_calc_vm_prot_bits(prot) + +-static inline pgprot_t arch_vm_get_page_prot(unsigned long vm_flags) ++static inline pgprot_t arch_vm_get_page_prot(vm_flags_t vm_flags) + { + return (vm_flags & VM_SAO) ? __pgprot(_PAGE_SAO) : __pgprot(0); + } +diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h +index 32e4e21..62afb12 100644 +--- a/arch/powerpc/include/asm/page.h ++++ b/arch/powerpc/include/asm/page.h +@@ -230,8 +230,9 @@ extern long long virt_phys_offset; + * and needs to be executable. This means the whole heap ends + * up being executable. + */ +-#define VM_DATA_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \ +- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) ++#define VM_DATA_DEFAULT_FLAGS32 \ ++ (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \ ++ VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) + + #define VM_DATA_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \ + VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) +@@ -259,6 +260,9 @@ extern long long virt_phys_offset; + #define is_kernel_addr(x) ((x) >= PAGE_OFFSET) + #endif + ++#define ktla_ktva(addr) (addr) ++#define ktva_ktla(addr) (addr) ++ + #ifndef CONFIG_PPC_BOOK3S_64 + /* + * Use the top bit of the higher-level page table entries to indicate whether +diff --git a/arch/powerpc/include/asm/page_64.h b/arch/powerpc/include/asm/page_64.h +index 88693ce..ac6f9ab 100644 +--- a/arch/powerpc/include/asm/page_64.h ++++ b/arch/powerpc/include/asm/page_64.h +@@ -153,15 +153,18 @@ do { \ + * stack by default, so in the absence of a PT_GNU_STACK program header + * we turn execute permission off. + */ +-#define VM_STACK_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \ +- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) ++#define VM_STACK_DEFAULT_FLAGS32 \ ++ (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \ ++ VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) + + #define VM_STACK_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \ + VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) + ++#ifndef CONFIG_PAX_PAGEEXEC + #define VM_STACK_DEFAULT_FLAGS \ + (is_32bit_task() ? \ + VM_STACK_DEFAULT_FLAGS32 : VM_STACK_DEFAULT_FLAGS64) ++#endif + + #include <asm-generic/getorder.h> + +diff --git a/arch/powerpc/include/asm/pgalloc-64.h b/arch/powerpc/include/asm/pgalloc-64.h +index 4b0be20..c15a27d 100644 +--- a/arch/powerpc/include/asm/pgalloc-64.h ++++ b/arch/powerpc/include/asm/pgalloc-64.h +@@ -54,6 +54,7 @@ static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd) + #ifndef CONFIG_PPC_64K_PAGES + + #define pgd_populate(MM, PGD, PUD) pgd_set(PGD, PUD) ++#define pgd_populate_kernel(MM, PGD, PUD) pgd_populate((MM), (PGD), (PUD)) + + static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr) + { +@@ -71,6 +72,11 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd) + pud_set(pud, (unsigned long)pmd); + } + ++static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd) ++{ ++ pud_populate(mm, pud, pmd); ++} ++ + #define pmd_populate(mm, pmd, pte_page) \ + pmd_populate_kernel(mm, pmd, page_address(pte_page)) + #define pmd_populate_kernel(mm, pmd, pte) pmd_set(pmd, (unsigned long)(pte)) +@@ -173,6 +179,7 @@ extern void __tlb_remove_table(void *_table); + #endif + + #define pud_populate(mm, pud, pmd) pud_set(pud, (unsigned long)pmd) ++#define pud_populate_kernel(mm, pud, pmd) pud_populate((mm), (pud), (pmd)) + + static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, + pte_t *pte) +diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h +index 3ebb188..e17dddf 100644 +--- a/arch/powerpc/include/asm/pgtable.h ++++ b/arch/powerpc/include/asm/pgtable.h +@@ -2,6 +2,7 @@ + #define _ASM_POWERPC_PGTABLE_H + #ifdef __KERNEL__ + ++#include <linux/const.h> + #ifndef __ASSEMBLY__ + #include <linux/mmdebug.h> + #include <asm/processor.h> /* For TASK_SIZE */ +diff --git a/arch/powerpc/include/asm/pte-hash32.h b/arch/powerpc/include/asm/pte-hash32.h +index 4aad413..85d86bf 100644 +--- a/arch/powerpc/include/asm/pte-hash32.h ++++ b/arch/powerpc/include/asm/pte-hash32.h +@@ -21,6 +21,7 @@ + #define _PAGE_FILE 0x004 /* when !present: nonlinear file mapping */ + #define _PAGE_USER 0x004 /* usermode access allowed */ + #define _PAGE_GUARDED 0x008 /* G: prohibit speculative access */ ++#define _PAGE_EXEC _PAGE_GUARDED + #define _PAGE_COHERENT 0x010 /* M: enforce memory coherence (SMP systems) */ + #define _PAGE_NO_CACHE 0x020 /* I: cache inhibit */ + #define _PAGE_WRITETHRU 0x040 /* W: cache write-through */ +diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h +index ce17815..c5574cc 100644 +--- a/arch/powerpc/include/asm/reg.h ++++ b/arch/powerpc/include/asm/reg.h +@@ -249,6 +249,7 @@ + #define SPRN_DBCR 0x136 /* e300 Data Breakpoint Control Reg */ + #define SPRN_DSISR 0x012 /* Data Storage Interrupt Status Register */ + #define DSISR_NOHPTE 0x40000000 /* no translation found */ ++#define DSISR_GUARDED 0x10000000 /* fetch from guarded storage */ + #define DSISR_PROTFAULT 0x08000000 /* protection fault */ + #define DSISR_ISSTORE 0x02000000 /* access was a store */ + #define DSISR_DABRMATCH 0x00400000 /* hit data breakpoint */ +diff --git a/arch/powerpc/include/asm/smp.h b/arch/powerpc/include/asm/smp.h +index 084e080..9415a3d 100644 +--- a/arch/powerpc/include/asm/smp.h ++++ b/arch/powerpc/include/asm/smp.h +@@ -51,7 +51,7 @@ struct smp_ops_t { + int (*cpu_disable)(void); + void (*cpu_die)(unsigned int nr); + int (*cpu_bootable)(unsigned int nr); +-}; ++} __no_const; + + extern void smp_send_debugger_break(void); + extern void start_secondary_resume(void); +diff --git a/arch/powerpc/include/asm/thread_info.h b/arch/powerpc/include/asm/thread_info.h +index b034ecd..af7e31f 100644 +--- a/arch/powerpc/include/asm/thread_info.h ++++ b/arch/powerpc/include/asm/thread_info.h +@@ -107,6 +107,8 @@ static inline struct thread_info *current_thread_info(void) + #if defined(CONFIG_PPC64) + #define TIF_ELF2ABI 18 /* function descriptors must die! */ + #endif ++/* mask must be expressable within 16 bits to satisfy 'andi' instruction reqs */ ++#define TIF_GRSEC_SETXID 6 /* update credentials on syscall entry/exit */ + + /* as above, but as bit values */ + #define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE) +@@ -125,9 +127,10 @@ static inline struct thread_info *current_thread_info(void) + #define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT) + #define _TIF_EMULATE_STACK_STORE (1<<TIF_EMULATE_STACK_STORE) + #define _TIF_NOHZ (1<<TIF_NOHZ) ++#define _TIF_GRSEC_SETXID (1<<TIF_GRSEC_SETXID) + #define _TIF_SYSCALL_T_OR_A (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \ + _TIF_SECCOMP | _TIF_SYSCALL_TRACEPOINT | \ +- _TIF_NOHZ) ++ _TIF_NOHZ | _TIF_GRSEC_SETXID) + + #define _TIF_USER_WORK_MASK (_TIF_SIGPENDING | _TIF_NEED_RESCHED | \ + _TIF_NOTIFY_RESUME | _TIF_UPROBE | \ +diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h +index 9485b43..3bd3c16 100644 +--- a/arch/powerpc/include/asm/uaccess.h ++++ b/arch/powerpc/include/asm/uaccess.h +@@ -58,6 +58,7 @@ + + #endif + ++#define access_ok_noprefault(type, addr, size) access_ok((type), (addr), (size)) + #define access_ok(type, addr, size) \ + (__chk_user_ptr(addr), \ + __access_ok((__force unsigned long)(addr), (size), get_fs())) +@@ -318,52 +319,6 @@ do { \ + extern unsigned long __copy_tofrom_user(void __user *to, + const void __user *from, unsigned long size); + +-#ifndef __powerpc64__ +- +-static inline unsigned long copy_from_user(void *to, +- const void __user *from, unsigned long n) +-{ +- unsigned long over; +- +- if (access_ok(VERIFY_READ, from, n)) +- return __copy_tofrom_user((__force void __user *)to, from, n); +- if ((unsigned long)from < TASK_SIZE) { +- over = (unsigned long)from + n - TASK_SIZE; +- return __copy_tofrom_user((__force void __user *)to, from, +- n - over) + over; +- } +- return n; +-} +- +-static inline unsigned long copy_to_user(void __user *to, +- const void *from, unsigned long n) +-{ +- unsigned long over; +- +- if (access_ok(VERIFY_WRITE, to, n)) +- return __copy_tofrom_user(to, (__force void __user *)from, n); +- if ((unsigned long)to < TASK_SIZE) { +- over = (unsigned long)to + n - TASK_SIZE; +- return __copy_tofrom_user(to, (__force void __user *)from, +- n - over) + over; +- } +- return n; +-} +- +-#else /* __powerpc64__ */ +- +-#define __copy_in_user(to, from, size) \ +- __copy_tofrom_user((to), (from), (size)) +- +-extern unsigned long copy_from_user(void *to, const void __user *from, +- unsigned long n); +-extern unsigned long copy_to_user(void __user *to, const void *from, +- unsigned long n); +-extern unsigned long copy_in_user(void __user *to, const void __user *from, +- unsigned long n); +- +-#endif /* __powerpc64__ */ +- + static inline unsigned long __copy_from_user_inatomic(void *to, + const void __user *from, unsigned long n) + { +@@ -387,6 +342,10 @@ static inline unsigned long __copy_from_user_inatomic(void *to, + if (ret == 0) + return 0; + } ++ ++ if (!__builtin_constant_p(n)) ++ check_object_size(to, n, false); ++ + return __copy_tofrom_user((__force void __user *)to, from, n); + } + +@@ -413,6 +372,10 @@ static inline unsigned long __copy_to_user_inatomic(void __user *to, + if (ret == 0) + return 0; + } ++ ++ if (!__builtin_constant_p(n)) ++ check_object_size(from, n, true); ++ + return __copy_tofrom_user(to, (__force const void __user *)from, n); + } + +@@ -430,6 +393,92 @@ static inline unsigned long __copy_to_user(void __user *to, + return __copy_to_user_inatomic(to, from, size); + } + ++#ifndef __powerpc64__ ++ ++static inline unsigned long __must_check copy_from_user(void *to, ++ const void __user *from, unsigned long n) ++{ ++ unsigned long over; ++ ++ if ((long)n < 0) ++ return n; ++ ++ if (access_ok(VERIFY_READ, from, n)) { ++ if (!__builtin_constant_p(n)) ++ check_object_size(to, n, false); ++ return __copy_tofrom_user((__force void __user *)to, from, n); ++ } ++ if ((unsigned long)from < TASK_SIZE) { ++ over = (unsigned long)from + n - TASK_SIZE; ++ if (!__builtin_constant_p(n - over)) ++ check_object_size(to, n - over, false); ++ return __copy_tofrom_user((__force void __user *)to, from, ++ n - over) + over; ++ } ++ return n; ++} ++ ++static inline unsigned long __must_check copy_to_user(void __user *to, ++ const void *from, unsigned long n) ++{ ++ unsigned long over; ++ ++ if ((long)n < 0) ++ return n; ++ ++ if (access_ok(VERIFY_WRITE, to, n)) { ++ if (!__builtin_constant_p(n)) ++ check_object_size(from, n, true); ++ return __copy_tofrom_user(to, (__force void __user *)from, n); ++ } ++ if ((unsigned long)to < TASK_SIZE) { ++ over = (unsigned long)to + n - TASK_SIZE; ++ if (!__builtin_constant_p(n)) ++ check_object_size(from, n - over, true); ++ return __copy_tofrom_user(to, (__force void __user *)from, ++ n - over) + over; ++ } ++ return n; ++} ++ ++#else /* __powerpc64__ */ ++ ++#define __copy_in_user(to, from, size) \ ++ __copy_tofrom_user((to), (from), (size)) ++ ++static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n) ++{ ++ if ((long)n < 0 || n > INT_MAX) ++ return n; ++ ++ if (!__builtin_constant_p(n)) ++ check_object_size(to, n, false); ++ ++ if (likely(access_ok(VERIFY_READ, from, n))) ++ n = __copy_from_user(to, from, n); ++ else ++ memset(to, 0, n); ++ return n; ++} ++ ++static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n) ++{ ++ if ((long)n < 0 || n > INT_MAX) ++ return n; ++ ++ if (likely(access_ok(VERIFY_WRITE, to, n))) { ++ if (!__builtin_constant_p(n)) ++ check_object_size(from, n, true); ++ n = __copy_to_user(to, from, n); ++ } ++ return n; ++} ++ ++extern unsigned long copy_in_user(void __user *to, const void __user *from, ++ unsigned long n); ++ ++#endif /* __powerpc64__ */ ++ + extern unsigned long __clear_user(void __user *addr, unsigned long size); + + static inline unsigned long clear_user(void __user *addr, unsigned long size) +diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile +index fcc9a89..07be2bb 100644 +--- a/arch/powerpc/kernel/Makefile ++++ b/arch/powerpc/kernel/Makefile +@@ -26,6 +26,8 @@ CFLAGS_REMOVE_ftrace.o = -pg -mno-sched-epilog + CFLAGS_REMOVE_time.o = -pg -mno-sched-epilog + endif + ++CFLAGS_REMOVE_prom_init.o += $(LATENT_ENTROPY_PLUGIN_CFLAGS) ++ + obj-y := cputable.o ptrace.o syscalls.o \ + irq.o align.o signal_32.o pmc.o vdso.o \ + process.o systbl.o idle.o \ +diff --git a/arch/powerpc/kernel/exceptions-64e.S b/arch/powerpc/kernel/exceptions-64e.S +index 063b65d..7a26e9d 100644 +--- a/arch/powerpc/kernel/exceptions-64e.S ++++ b/arch/powerpc/kernel/exceptions-64e.S +@@ -771,6 +771,7 @@ storage_fault_common: + std r14,_DAR(r1) + std r15,_DSISR(r1) + addi r3,r1,STACK_FRAME_OVERHEAD ++ bl .save_nvgprs + mr r4,r14 + mr r5,r15 + ld r14,PACA_EXGEN+EX_R14(r13) +@@ -779,8 +780,7 @@ storage_fault_common: + cmpdi r3,0 + bne- 1f + b .ret_from_except_lite +-1: bl .save_nvgprs +- mr r5,r3 ++1: mr r5,r3 + addi r3,r1,STACK_FRAME_OVERHEAD + ld r4,_DAR(r1) + bl .bad_page_fault +diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S +index 38d5073..f00af8d 100644 +--- a/arch/powerpc/kernel/exceptions-64s.S ++++ b/arch/powerpc/kernel/exceptions-64s.S +@@ -1584,10 +1584,10 @@ handle_page_fault: + 11: ld r4,_DAR(r1) + ld r5,_DSISR(r1) + addi r3,r1,STACK_FRAME_OVERHEAD ++ bl .save_nvgprs + bl .do_page_fault + cmpdi r3,0 + beq+ 12f +- bl .save_nvgprs + mr r5,r3 + addi r3,r1,STACK_FRAME_OVERHEAD + lwz r4,_DAR(r1) +diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c +index 1d0848b..d74685f 100644 +--- a/arch/powerpc/kernel/irq.c ++++ b/arch/powerpc/kernel/irq.c +@@ -447,6 +447,8 @@ void migrate_irqs(void) + } + #endif + ++extern void gr_handle_kernel_exploit(void); ++ + static inline void check_stack_overflow(void) + { + #ifdef CONFIG_DEBUG_STACKOVERFLOW +@@ -459,6 +461,7 @@ static inline void check_stack_overflow(void) + printk("do_IRQ: stack overflow: %ld\n", + sp - sizeof(struct thread_info)); + dump_stack(); ++ gr_handle_kernel_exploit(); + } + #endif + } +diff --git a/arch/powerpc/kernel/module_32.c b/arch/powerpc/kernel/module_32.c +index 6cff040..74ac5d1b 100644 +--- a/arch/powerpc/kernel/module_32.c ++++ b/arch/powerpc/kernel/module_32.c +@@ -161,7 +161,7 @@ int module_frob_arch_sections(Elf32_Ehdr *hdr, + me->arch.core_plt_section = i; + } + if (!me->arch.core_plt_section || !me->arch.init_plt_section) { +- printk("Module doesn't contain .plt or .init.plt sections.\n"); ++ printk("Module %s doesn't contain .plt or .init.plt sections.\n", me->name); + return -ENOEXEC; + } + +@@ -191,11 +191,16 @@ static uint32_t do_plt_call(void *location, + + DEBUGP("Doing plt for call to 0x%x at 0x%x\n", val, (unsigned int)location); + /* Init, or core PLT? */ +- if (location >= mod->module_core +- && location < mod->module_core + mod->core_size) ++ if ((location >= mod->module_core_rx && location < mod->module_core_rx + mod->core_size_rx) || ++ (location >= mod->module_core_rw && location < mod->module_core_rw + mod->core_size_rw)) + entry = (void *)sechdrs[mod->arch.core_plt_section].sh_addr; +- else ++ else if ((location >= mod->module_init_rx && location < mod->module_init_rx + mod->init_size_rx) || ++ (location >= mod->module_init_rw && location < mod->module_init_rw + mod->init_size_rw)) + entry = (void *)sechdrs[mod->arch.init_plt_section].sh_addr; ++ else { ++ printk(KERN_ERR "%s: invalid R_PPC_REL24 entry found\n", mod->name); ++ return ~0UL; ++ } + + /* Find this entry, or if that fails, the next avail. entry */ + while (entry->jump[0]) { +@@ -299,7 +304,7 @@ int apply_relocate_add(Elf32_Shdr *sechdrs, + } + #ifdef CONFIG_DYNAMIC_FTRACE + module->arch.tramp = +- do_plt_call(module->module_core, ++ do_plt_call(module->module_core_rx, + (unsigned long)ftrace_caller, + sechdrs, module); + #endif +diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c +index 31d0215..206af70 100644 +--- a/arch/powerpc/kernel/process.c ++++ b/arch/powerpc/kernel/process.c +@@ -1031,8 +1031,8 @@ void show_regs(struct pt_regs * regs) + * Lookup NIP late so we have the best change of getting the + * above info out without failing + */ +- printk("NIP ["REG"] %pS\n", regs->nip, (void *)regs->nip); +- printk("LR ["REG"] %pS\n", regs->link, (void *)regs->link); ++ printk("NIP ["REG"] %pA\n", regs->nip, (void *)regs->nip); ++ printk("LR ["REG"] %pA\n", regs->link, (void *)regs->link); + #endif + show_stack(current, (unsigned long *) regs->gpr[1]); + if (!user_mode(regs)) +@@ -1554,10 +1554,10 @@ void show_stack(struct task_struct *tsk, unsigned long *stack) + newsp = stack[0]; + ip = stack[STACK_FRAME_LR_SAVE]; + if (!firstframe || ip != lr) { +- printk("["REG"] ["REG"] %pS", sp, ip, (void *)ip); ++ printk("["REG"] ["REG"] %pA", sp, ip, (void *)ip); + #ifdef CONFIG_FUNCTION_GRAPH_TRACER + if ((ip == rth || ip == mrth) && curr_frame >= 0) { +- printk(" (%pS)", ++ printk(" (%pA)", + (void *)current->ret_stack[curr_frame].ret); + curr_frame--; + } +@@ -1577,7 +1577,7 @@ void show_stack(struct task_struct *tsk, unsigned long *stack) + struct pt_regs *regs = (struct pt_regs *) + (sp + STACK_FRAME_OVERHEAD); + lr = regs->link; +- printk("--- Exception: %lx at %pS\n LR = %pS\n", ++ printk("--- Exception: %lx at %pA\n LR = %pA\n", + regs->trap, (void *)regs->nip, (void *)lr); + firstframe = 1; + } +@@ -1613,58 +1613,3 @@ void notrace __ppc64_runlatch_off(void) + mtspr(SPRN_CTRLT, ctrl); + } + #endif /* CONFIG_PPC64 */ +- +-unsigned long arch_align_stack(unsigned long sp) +-{ +- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space) +- sp -= get_random_int() & ~PAGE_MASK; +- return sp & ~0xf; +-} +- +-static inline unsigned long brk_rnd(void) +-{ +- unsigned long rnd = 0; +- +- /* 8MB for 32bit, 1GB for 64bit */ +- if (is_32bit_task()) +- rnd = (long)(get_random_int() % (1<<(23-PAGE_SHIFT))); +- else +- rnd = (long)(get_random_int() % (1<<(30-PAGE_SHIFT))); +- +- return rnd << PAGE_SHIFT; +-} +- +-unsigned long arch_randomize_brk(struct mm_struct *mm) +-{ +- unsigned long base = mm->brk; +- unsigned long ret; +- +-#ifdef CONFIG_PPC_STD_MMU_64 +- /* +- * If we are using 1TB segments and we are allowed to randomise +- * the heap, we can put it above 1TB so it is backed by a 1TB +- * segment. Otherwise the heap will be in the bottom 1TB +- * which always uses 256MB segments and this may result in a +- * performance penalty. +- */ +- if (!is_32bit_task() && (mmu_highuser_ssize == MMU_SEGSIZE_1T)) +- base = max_t(unsigned long, mm->brk, 1UL << SID_SHIFT_1T); +-#endif +- +- ret = PAGE_ALIGN(base + brk_rnd()); +- +- if (ret < mm->brk) +- return mm->brk; +- +- return ret; +-} +- +-unsigned long randomize_et_dyn(unsigned long base) +-{ +- unsigned long ret = PAGE_ALIGN(base + brk_rnd()); +- +- if (ret < base) +- return base; +- +- return ret; +-} +diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c +index 2e3d2bf..35df241 100644 +--- a/arch/powerpc/kernel/ptrace.c ++++ b/arch/powerpc/kernel/ptrace.c +@@ -1762,6 +1762,10 @@ long arch_ptrace(struct task_struct *child, long request, + return ret; + } + ++#ifdef CONFIG_GRKERNSEC_SETXID ++extern void gr_delayed_cred_worker(void); ++#endif ++ + /* + * We must return the syscall number to actually look up in the table. + * This can be -1L to skip running any syscall at all. +@@ -1774,6 +1778,11 @@ long do_syscall_trace_enter(struct pt_regs *regs) + + secure_computing_strict(regs->gpr[0]); + ++#ifdef CONFIG_GRKERNSEC_SETXID ++ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID))) ++ gr_delayed_cred_worker(); ++#endif ++ + if (test_thread_flag(TIF_SYSCALL_TRACE) && + tracehook_report_syscall_entry(regs)) + /* +@@ -1808,6 +1817,11 @@ void do_syscall_trace_leave(struct pt_regs *regs) + { + int step; + ++#ifdef CONFIG_GRKERNSEC_SETXID ++ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID))) ++ gr_delayed_cred_worker(); ++#endif ++ + audit_syscall_exit(regs); + + if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT))) +diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c +index 4e47db6..6dcc96e 100644 +--- a/arch/powerpc/kernel/signal_32.c ++++ b/arch/powerpc/kernel/signal_32.c +@@ -1013,7 +1013,7 @@ int handle_rt_signal32(unsigned long sig, struct k_sigaction *ka, + /* Save user registers on the stack */ + frame = &rt_sf->uc.uc_mcontext; + addr = frame; +- if (vdso32_rt_sigtramp && current->mm->context.vdso_base) { ++ if (vdso32_rt_sigtramp && current->mm->context.vdso_base != ~0UL) { + sigret = 0; + tramp = current->mm->context.vdso_base + vdso32_rt_sigtramp; + } else { +diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c +index d501dc4..e5a0de0 100644 +--- a/arch/powerpc/kernel/signal_64.c ++++ b/arch/powerpc/kernel/signal_64.c +@@ -760,7 +760,7 @@ int handle_rt_signal64(int signr, struct k_sigaction *ka, siginfo_t *info, + current->thread.fp_state.fpscr = 0; + + /* Set up to return from userspace. */ +- if (vdso64_rt_sigtramp && current->mm->context.vdso_base) { ++ if (vdso64_rt_sigtramp && current->mm->context.vdso_base != ~0UL) { + regs->link = current->mm->context.vdso_base + vdso64_rt_sigtramp; + } else { + err |= setup_trampoline(__NR_rt_sigreturn, &frame->tramp[0]); +diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c +index 33cd7a0..d615344 100644 +--- a/arch/powerpc/kernel/traps.c ++++ b/arch/powerpc/kernel/traps.c +@@ -142,6 +142,8 @@ static unsigned __kprobes long oops_begin(struct pt_regs *regs) + return flags; + } + ++extern void gr_handle_kernel_exploit(void); ++ + static void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, + int signr) + { +@@ -191,6 +193,9 @@ static void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, + panic("Fatal exception in interrupt"); + if (panic_on_oops) + panic("Fatal exception"); ++ ++ gr_handle_kernel_exploit(); ++ + do_exit(signr); + } + +diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c +index 094e45c..d82b848 100644 +--- a/arch/powerpc/kernel/vdso.c ++++ b/arch/powerpc/kernel/vdso.c +@@ -35,6 +35,7 @@ + #include <asm/vdso.h> + #include <asm/vdso_datapage.h> + #include <asm/setup.h> ++#include <asm/mman.h> + + #undef DEBUG + +@@ -221,7 +222,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) + vdso_base = VDSO32_MBASE; + #endif + +- current->mm->context.vdso_base = 0; ++ current->mm->context.vdso_base = ~0UL; + + /* vDSO has a problem and was disabled, just don't "enable" it for the + * process +@@ -241,7 +242,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) + vdso_base = get_unmapped_area(NULL, vdso_base, + (vdso_pages << PAGE_SHIFT) + + ((VDSO_ALIGNMENT - 1) & PAGE_MASK), +- 0, 0); ++ 0, MAP_PRIVATE | MAP_EXECUTABLE); + if (IS_ERR_VALUE(vdso_base)) { + rc = vdso_base; + goto fail_mmapsem; +diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c +index 3cf541a..ab2d825 100644 +--- a/arch/powerpc/kvm/powerpc.c ++++ b/arch/powerpc/kvm/powerpc.c +@@ -1153,7 +1153,7 @@ void kvmppc_init_lpid(unsigned long nr_lpids_param) + } + EXPORT_SYMBOL_GPL(kvmppc_init_lpid); + +-int kvm_arch_init(void *opaque) ++int kvm_arch_init(const void *opaque) + { + return 0; + } +diff --git a/arch/powerpc/lib/usercopy_64.c b/arch/powerpc/lib/usercopy_64.c +index 5eea6f3..5d10396 100644 +--- a/arch/powerpc/lib/usercopy_64.c ++++ b/arch/powerpc/lib/usercopy_64.c +@@ -9,22 +9,6 @@ + #include <linux/module.h> + #include <asm/uaccess.h> + +-unsigned long copy_from_user(void *to, const void __user *from, unsigned long n) +-{ +- if (likely(access_ok(VERIFY_READ, from, n))) +- n = __copy_from_user(to, from, n); +- else +- memset(to, 0, n); +- return n; +-} +- +-unsigned long copy_to_user(void __user *to, const void *from, unsigned long n) +-{ +- if (likely(access_ok(VERIFY_WRITE, to, n))) +- n = __copy_to_user(to, from, n); +- return n; +-} +- + unsigned long copy_in_user(void __user *to, const void __user *from, + unsigned long n) + { +@@ -35,7 +19,5 @@ unsigned long copy_in_user(void __user *to, const void __user *from, + return n; + } + +-EXPORT_SYMBOL(copy_from_user); +-EXPORT_SYMBOL(copy_to_user); + EXPORT_SYMBOL(copy_in_user); + +diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c +index 51ab9e7..7d3c78b 100644 +--- a/arch/powerpc/mm/fault.c ++++ b/arch/powerpc/mm/fault.c +@@ -33,6 +33,10 @@ + #include <linux/magic.h> + #include <linux/ratelimit.h> + #include <linux/context_tracking.h> ++#include <linux/slab.h> ++#include <linux/pagemap.h> ++#include <linux/compiler.h> ++#include <linux/unistd.h> + + #include <asm/firmware.h> + #include <asm/page.h> +@@ -69,6 +73,33 @@ static inline int notify_page_fault(struct pt_regs *regs) + } + #endif + ++#ifdef CONFIG_PAX_PAGEEXEC ++/* ++ * PaX: decide what to do with offenders (regs->nip = fault address) ++ * ++ * returns 1 when task should be killed ++ */ ++static int pax_handle_fetch_fault(struct pt_regs *regs) ++{ ++ return 1; ++} ++ ++void pax_report_insns(struct pt_regs *regs, void *pc, void *sp) ++{ ++ unsigned long i; ++ ++ printk(KERN_ERR "PAX: bytes at PC: "); ++ for (i = 0; i < 5; i++) { ++ unsigned int c; ++ if (get_user(c, (unsigned int __user *)pc+i)) ++ printk(KERN_CONT "???????? "); ++ else ++ printk(KERN_CONT "%08x ", c); ++ } ++ printk("\n"); ++} ++#endif ++ + /* + * Check whether the instruction at regs->nip is a store using + * an update addressing form which will update r1. +@@ -216,7 +247,7 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address, + * indicate errors in DSISR but can validly be set in SRR1. + */ + if (trap == 0x400) +- error_code &= 0x48200000; ++ error_code &= 0x58200000; + else + is_write = error_code & DSISR_ISSTORE; + #else +@@ -378,7 +409,7 @@ good_area: + * "undefined". Of those that can be set, this is the only + * one which seems bad. + */ +- if (error_code & 0x10000000) ++ if (error_code & DSISR_GUARDED) + /* Guarded storage error. */ + goto bad_area; + #endif /* CONFIG_8xx */ +@@ -393,7 +424,7 @@ good_area: + * processors use the same I/D cache coherency mechanism + * as embedded. + */ +- if (error_code & DSISR_PROTFAULT) ++ if (error_code & (DSISR_PROTFAULT | DSISR_GUARDED)) + goto bad_area; + #endif /* CONFIG_PPC_STD_MMU */ + +@@ -483,6 +514,23 @@ bad_area: + bad_area_nosemaphore: + /* User mode accesses cause a SIGSEGV */ + if (user_mode(regs)) { ++ ++#ifdef CONFIG_PAX_PAGEEXEC ++ if (mm->pax_flags & MF_PAX_PAGEEXEC) { ++#ifdef CONFIG_PPC_STD_MMU ++ if (is_exec && (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))) { ++#else ++ if (is_exec && regs->nip == address) { ++#endif ++ switch (pax_handle_fetch_fault(regs)) { ++ } ++ ++ pax_report_fault(regs, (void *)regs->nip, (void *)regs->gpr[PT_R1]); ++ do_group_exit(SIGKILL); ++ } ++ } ++#endif ++ + _exception(SIGSEGV, regs, code, address); + goto bail; + } +diff --git a/arch/powerpc/mm/mmap.c b/arch/powerpc/mm/mmap.c +index cb8bdbe..cde4bc7 100644 +--- a/arch/powerpc/mm/mmap.c ++++ b/arch/powerpc/mm/mmap.c +@@ -53,10 +53,14 @@ static inline int mmap_is_legacy(void) + return sysctl_legacy_va_layout; + } + +-static unsigned long mmap_rnd(void) ++static unsigned long mmap_rnd(struct mm_struct *mm) + { + unsigned long rnd = 0; + ++#ifdef CONFIG_PAX_RANDMMAP ++ if (!(mm->pax_flags & MF_PAX_RANDMMAP)) ++#endif ++ + if (current->flags & PF_RANDOMIZE) { + /* 8MB for 32bit, 1GB for 64bit */ + if (is_32bit_task()) +@@ -67,7 +71,7 @@ static unsigned long mmap_rnd(void) + return rnd << PAGE_SHIFT; + } + +-static inline unsigned long mmap_base(void) ++static inline unsigned long mmap_base(struct mm_struct *mm) + { + unsigned long gap = rlimit(RLIMIT_STACK); + +@@ -76,7 +80,7 @@ static inline unsigned long mmap_base(void) + else if (gap > MAX_GAP) + gap = MAX_GAP; + +- return PAGE_ALIGN(TASK_SIZE - gap - mmap_rnd()); ++ return PAGE_ALIGN(TASK_SIZE - gap - mmap_rnd(mm)); + } + + /* +@@ -91,9 +95,21 @@ void arch_pick_mmap_layout(struct mm_struct *mm) + */ + if (mmap_is_legacy()) { + mm->mmap_base = TASK_UNMAPPED_BASE; ++ ++#ifdef CONFIG_PAX_RANDMMAP ++ if (mm->pax_flags & MF_PAX_RANDMMAP) ++ mm->mmap_base += mm->delta_mmap; ++#endif ++ + mm->get_unmapped_area = arch_get_unmapped_area; + } else { +- mm->mmap_base = mmap_base(); ++ mm->mmap_base = mmap_base(mm); ++ ++#ifdef CONFIG_PAX_RANDMMAP ++ if (mm->pax_flags & MF_PAX_RANDMMAP) ++ mm->mmap_base -= mm->delta_mmap + mm->delta_stack; ++#endif ++ + mm->get_unmapped_area = arch_get_unmapped_area_topdown; + } + } +diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/slice.c +index b0c75cc..ef7fb93 100644 +--- a/arch/powerpc/mm/slice.c ++++ b/arch/powerpc/mm/slice.c +@@ -103,7 +103,7 @@ static int slice_area_is_free(struct mm_struct *mm, unsigned long addr, + if ((mm->task_size - len) < addr) + return 0; + vma = find_vma(mm, addr); +- return (!vma || (addr + len) <= vma->vm_start); ++ return check_heap_stack_gap(vma, addr, len, 0); + } + + static int slice_low_has_vma(struct mm_struct *mm, unsigned long slice) +@@ -277,6 +277,12 @@ static unsigned long slice_find_area_bottomup(struct mm_struct *mm, + info.align_offset = 0; + + addr = TASK_UNMAPPED_BASE; ++ ++#ifdef CONFIG_PAX_RANDMMAP ++ if (mm->pax_flags & MF_PAX_RANDMMAP) ++ addr += mm->delta_mmap; ++#endif ++ + while (addr < TASK_SIZE) { + info.low_limit = addr; + if (!slice_scan_available(addr, available, 1, &addr)) +@@ -410,6 +416,11 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len, + if (fixed && addr > (mm->task_size - len)) + return -ENOMEM; + ++#ifdef CONFIG_PAX_RANDMMAP ++ if (!fixed && (mm->pax_flags & MF_PAX_RANDMMAP)) ++ addr = 0; ++#endif ++ + /* If hint, make sure it matches our alignment restrictions */ + if (!fixed && addr) { + addr = _ALIGN_UP(addr, 1ul << pshift); +diff --git a/arch/powerpc/platforms/cell/celleb_scc_pciex.c b/arch/powerpc/platforms/cell/celleb_scc_pciex.c +index 4278acf..67fd0e6 100644 +--- a/arch/powerpc/platforms/cell/celleb_scc_pciex.c ++++ b/arch/powerpc/platforms/cell/celleb_scc_pciex.c +@@ -400,8 +400,8 @@ static int scc_pciex_write_config(struct pci_bus *bus, unsigned int devfn, + } + + static struct pci_ops scc_pciex_pci_ops = { +- scc_pciex_read_config, +- scc_pciex_write_config, ++ .read = scc_pciex_read_config, ++ .write = scc_pciex_write_config, + }; + + static void pciex_clear_intr_all(unsigned int __iomem *base) +diff --git a/arch/powerpc/platforms/cell/spufs/file.c b/arch/powerpc/platforms/cell/spufs/file.c +index 9098692..3d54cd1 100644 +--- a/arch/powerpc/platforms/cell/spufs/file.c ++++ b/arch/powerpc/platforms/cell/spufs/file.c +@@ -280,9 +280,9 @@ spufs_mem_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf) + return VM_FAULT_NOPAGE; + } + +-static int spufs_mem_mmap_access(struct vm_area_struct *vma, ++static ssize_t spufs_mem_mmap_access(struct vm_area_struct *vma, + unsigned long address, +- void *buf, int len, int write) ++ void *buf, size_t len, int write) + { + struct spu_context *ctx = vma->vm_file->private_data; + unsigned long offset = address - vma->vm_start; +diff --git a/arch/s390/include/asm/atomic.h b/arch/s390/include/asm/atomic.h +index 1d47061..0714963 100644 +--- a/arch/s390/include/asm/atomic.h ++++ b/arch/s390/include/asm/atomic.h +@@ -412,6 +412,16 @@ static inline long long atomic64_dec_if_positive(atomic64_t *v) + #define atomic64_dec_and_test(_v) (atomic64_sub_return(1, _v) == 0) + #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0) + ++#define atomic64_read_unchecked(v) atomic64_read(v) ++#define atomic64_set_unchecked(v, i) atomic64_set((v), (i)) ++#define atomic64_add_unchecked(a, v) atomic64_add((a), (v)) ++#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v)) ++#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v)) ++#define atomic64_inc_unchecked(v) atomic64_inc(v) ++#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v) ++#define atomic64_dec_unchecked(v) atomic64_dec(v) ++#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n)) ++ + #define smp_mb__before_atomic_dec() smp_mb() + #define smp_mb__after_atomic_dec() smp_mb() + #define smp_mb__before_atomic_inc() smp_mb() +diff --git a/arch/s390/include/asm/barrier.h b/arch/s390/include/asm/barrier.h +index 578680f..0eb3b11 100644 +--- a/arch/s390/include/asm/barrier.h ++++ b/arch/s390/include/asm/barrier.h +@@ -36,7 +36,7 @@ + do { \ + compiletime_assert_atomic_type(*p); \ + barrier(); \ +- ACCESS_ONCE(*p) = (v); \ ++ ACCESS_ONCE_RW(*p) = (v); \ + } while (0) + + #define smp_load_acquire(p) \ +diff --git a/arch/s390/include/asm/cache.h b/arch/s390/include/asm/cache.h +index 4d7ccac..d03d0ad 100644 +--- a/arch/s390/include/asm/cache.h ++++ b/arch/s390/include/asm/cache.h +@@ -9,8 +9,10 @@ + #ifndef __ARCH_S390_CACHE_H + #define __ARCH_S390_CACHE_H + +-#define L1_CACHE_BYTES 256 ++#include <linux/const.h> ++ + #define L1_CACHE_SHIFT 8 ++#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) + #define NET_SKB_PAD 32 + + #define __read_mostly __attribute__((__section__(".data..read_mostly"))) +diff --git a/arch/s390/include/asm/elf.h b/arch/s390/include/asm/elf.h +index 78f4f87..598ce39 100644 +--- a/arch/s390/include/asm/elf.h ++++ b/arch/s390/include/asm/elf.h +@@ -162,8 +162,14 @@ extern unsigned int vdso_enabled; + the loader. We need to make sure that it is out of the way of the program + that it will "exec", and that there is sufficient room for the brk. */ + +-extern unsigned long randomize_et_dyn(unsigned long base); +-#define ELF_ET_DYN_BASE (randomize_et_dyn(STACK_TOP / 3 * 2)) ++#define ELF_ET_DYN_BASE (STACK_TOP / 3 * 2) ++ ++#ifdef CONFIG_PAX_ASLR ++#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_31BIT) ? 0x10000UL : 0x80000000UL) ++ ++#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26) ++#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26) ++#endif + + /* This yields a mask that user programs can use to figure out what + instruction set this CPU supports. */ +@@ -222,9 +228,6 @@ struct linux_binprm; + #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1 + int arch_setup_additional_pages(struct linux_binprm *, int); + +-extern unsigned long arch_randomize_brk(struct mm_struct *mm); +-#define arch_randomize_brk arch_randomize_brk +- + void *fill_cpu_elf_notes(void *ptr, struct save_area *sa); + + #endif +diff --git a/arch/s390/include/asm/exec.h b/arch/s390/include/asm/exec.h +index c4a93d6..4d2a9b4 100644 +--- a/arch/s390/include/asm/exec.h ++++ b/arch/s390/include/asm/exec.h +@@ -7,6 +7,6 @@ + #ifndef __ASM_EXEC_H + #define __ASM_EXEC_H + +-extern unsigned long arch_align_stack(unsigned long sp); ++#define arch_align_stack(x) ((x) & ~0xfUL) + + #endif /* __ASM_EXEC_H */ +diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h +index 79330af..254cf37 100644 +--- a/arch/s390/include/asm/uaccess.h ++++ b/arch/s390/include/asm/uaccess.h +@@ -59,6 +59,7 @@ static inline int __range_ok(unsigned long addr, unsigned long size) + __range_ok((unsigned long)(addr), (size)); \ + }) + ++#define access_ok_noprefault(type, addr, size) access_ok((type), (addr), (size)) + #define access_ok(type, addr, size) __access_ok(addr, size) + + /* +@@ -245,6 +246,10 @@ static inline unsigned long __must_check + copy_to_user(void __user *to, const void *from, unsigned long n) + { + might_fault(); ++ ++ if ((long)n < 0) ++ return n; ++ + return __copy_to_user(to, from, n); + } + +@@ -268,6 +273,9 @@ copy_to_user(void __user *to, const void *from, unsigned long n) + static inline unsigned long __must_check + __copy_from_user(void *to, const void __user *from, unsigned long n) + { ++ if ((long)n < 0) ++ return n; ++ + return uaccess.copy_from_user(n, from, to); + } + +@@ -296,10 +304,14 @@ __compiletime_warning("copy_from_user() buffer size is not provably correct") + static inline unsigned long __must_check + copy_from_user(void *to, const void __user *from, unsigned long n) + { +- unsigned int sz = __compiletime_object_size(to); ++ size_t sz = __compiletime_object_size(to); + + might_fault(); +- if (unlikely(sz != -1 && sz < n)) { ++ ++ if ((long)n < 0) ++ return n; ++ ++ if (unlikely(sz != (size_t)-1 && sz < n)) { + copy_from_user_overflow(); + return n; + } +diff --git a/arch/s390/kernel/module.c b/arch/s390/kernel/module.c +index b89b591..fd9609d 100644 +--- a/arch/s390/kernel/module.c ++++ b/arch/s390/kernel/module.c +@@ -169,11 +169,11 @@ int module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs, + + /* Increase core size by size of got & plt and set start + offsets for got and plt. */ +- me->core_size = ALIGN(me->core_size, 4); +- me->arch.got_offset = me->core_size; +- me->core_size += me->arch.got_size; +- me->arch.plt_offset = me->core_size; +- me->core_size += me->arch.plt_size; ++ me->core_size_rw = ALIGN(me->core_size_rw, 4); ++ me->arch.got_offset = me->core_size_rw; ++ me->core_size_rw += me->arch.got_size; ++ me->arch.plt_offset = me->core_size_rx; ++ me->core_size_rx += me->arch.plt_size; + return 0; + } + +@@ -289,7 +289,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab, + if (info->got_initialized == 0) { + Elf_Addr *gotent; + +- gotent = me->module_core + me->arch.got_offset + ++ gotent = me->module_core_rw + me->arch.got_offset + + info->got_offset; + *gotent = val; + info->got_initialized = 1; +@@ -312,7 +312,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab, + rc = apply_rela_bits(loc, val, 0, 64, 0); + else if (r_type == R_390_GOTENT || + r_type == R_390_GOTPLTENT) { +- val += (Elf_Addr) me->module_core - loc; ++ val += (Elf_Addr) me->module_core_rw - loc; + rc = apply_rela_bits(loc, val, 1, 32, 1); + } + break; +@@ -325,7 +325,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab, + case R_390_PLTOFF64: /* 16 bit offset from GOT to PLT. */ + if (info->plt_initialized == 0) { + unsigned int *ip; +- ip = me->module_core + me->arch.plt_offset + ++ ip = me->module_core_rx + me->arch.plt_offset + + info->plt_offset; + #ifndef CONFIG_64BIT + ip[0] = 0x0d105810; /* basr 1,0; l 1,6(1); br 1 */ +@@ -350,7 +350,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab, + val - loc + 0xffffUL < 0x1ffffeUL) || + (r_type == R_390_PLT32DBL && + val - loc + 0xffffffffULL < 0x1fffffffeULL))) +- val = (Elf_Addr) me->module_core + ++ val = (Elf_Addr) me->module_core_rx + + me->arch.plt_offset + + info->plt_offset; + val += rela->r_addend - loc; +@@ -372,7 +372,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab, + case R_390_GOTOFF32: /* 32 bit offset to GOT. */ + case R_390_GOTOFF64: /* 64 bit offset to GOT. */ + val = val + rela->r_addend - +- ((Elf_Addr) me->module_core + me->arch.got_offset); ++ ((Elf_Addr) me->module_core_rw + me->arch.got_offset); + if (r_type == R_390_GOTOFF16) + rc = apply_rela_bits(loc, val, 0, 16, 0); + else if (r_type == R_390_GOTOFF32) +@@ -382,7 +382,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab, + break; + case R_390_GOTPC: /* 32 bit PC relative offset to GOT. */ + case R_390_GOTPCDBL: /* 32 bit PC rel. off. to GOT shifted by 1. */ +- val = (Elf_Addr) me->module_core + me->arch.got_offset + ++ val = (Elf_Addr) me->module_core_rw + me->arch.got_offset + + rela->r_addend - loc; + if (r_type == R_390_GOTPC) + rc = apply_rela_bits(loc, val, 1, 32, 0); +diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c +index dd14532..1dfc145 100644 +--- a/arch/s390/kernel/process.c ++++ b/arch/s390/kernel/process.c +@@ -242,37 +242,3 @@ unsigned long get_wchan(struct task_struct *p) + } + return 0; + } +- +-unsigned long arch_align_stack(unsigned long sp) +-{ +- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space) +- sp -= get_random_int() & ~PAGE_MASK; +- return sp & ~0xf; +-} +- +-static inline unsigned long brk_rnd(void) +-{ +- /* 8MB for 32bit, 1GB for 64bit */ +- if (is_32bit_task()) +- return (get_random_int() & 0x7ffUL) << PAGE_SHIFT; +- else +- return (get_random_int() & 0x3ffffUL) << PAGE_SHIFT; +-} +- +-unsigned long arch_randomize_brk(struct mm_struct *mm) +-{ +- unsigned long ret; +- +- ret = PAGE_ALIGN(mm->brk + brk_rnd()); +- return (ret > mm->brk) ? ret : mm->brk; +-} +- +-unsigned long randomize_et_dyn(unsigned long base) +-{ +- unsigned long ret; +- +- if (!(current->flags & PF_RANDOMIZE)) +- return base; +- ret = PAGE_ALIGN(base + brk_rnd()); +- return (ret > base) ? ret : base; +-} +diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c +index 9b436c2..54fbf0a 100644 +--- a/arch/s390/mm/mmap.c ++++ b/arch/s390/mm/mmap.c +@@ -95,9 +95,21 @@ void arch_pick_mmap_layout(struct mm_struct *mm) + */ + if (mmap_is_legacy()) { + mm->mmap_base = mmap_base_legacy(); ++ ++#ifdef CONFIG_PAX_RANDMMAP ++ if (mm->pax_flags & MF_PAX_RANDMMAP) ++ mm->mmap_base += mm->delta_mmap; ++#endif ++ + mm->get_unmapped_area = arch_get_unmapped_area; + } else { + mm->mmap_base = mmap_base(); ++ ++#ifdef CONFIG_PAX_RANDMMAP ++ if (mm->pax_flags & MF_PAX_RANDMMAP) ++ mm->mmap_base -= mm->delta_mmap + mm->delta_stack; ++#endif ++ + mm->get_unmapped_area = arch_get_unmapped_area_topdown; + } + } +@@ -170,9 +182,21 @@ void arch_pick_mmap_layout(struct mm_struct *mm) + */ + if (mmap_is_legacy()) { + mm->mmap_base = mmap_base_legacy(); ++ ++#ifdef CONFIG_PAX_RANDMMAP ++ if (mm->pax_flags & MF_PAX_RANDMMAP) ++ mm->mmap_base += mm->delta_mmap; ++#endif ++ + mm->get_unmapped_area = s390_get_unmapped_area; + } else { + mm->mmap_base = mmap_base(); ++ ++#ifdef CONFIG_PAX_RANDMMAP ++ if (mm->pax_flags & MF_PAX_RANDMMAP) ++ mm->mmap_base -= mm->delta_mmap + mm->delta_stack; ++#endif ++ + mm->get_unmapped_area = s390_get_unmapped_area_topdown; + } + } +diff --git a/arch/score/include/asm/cache.h b/arch/score/include/asm/cache.h +index ae3d59f..f65f075 100644 +--- a/arch/score/include/asm/cache.h ++++ b/arch/score/include/asm/cache.h +@@ -1,7 +1,9 @@ + #ifndef _ASM_SCORE_CACHE_H + #define _ASM_SCORE_CACHE_H + ++#include <linux/const.h> ++ + #define L1_CACHE_SHIFT 4 +-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT) ++#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) + + #endif /* _ASM_SCORE_CACHE_H */ +diff --git a/arch/score/include/asm/exec.h b/arch/score/include/asm/exec.h +index f9f3cd5..58ff438 100644 +--- a/arch/score/include/asm/exec.h ++++ b/arch/score/include/asm/exec.h +@@ -1,6 +1,6 @@ + #ifndef _ASM_SCORE_EXEC_H + #define _ASM_SCORE_EXEC_H + +-extern unsigned long arch_align_stack(unsigned long sp); ++#define arch_align_stack(x) (x) + + #endif /* _ASM_SCORE_EXEC_H */ +diff --git a/arch/score/kernel/process.c b/arch/score/kernel/process.c +index a1519ad3..e8ac1ff 100644 +--- a/arch/score/kernel/process.c ++++ b/arch/score/kernel/process.c +@@ -116,8 +116,3 @@ unsigned long get_wchan(struct task_struct *task) + + return task_pt_regs(task)->cp0_epc; + } +- +-unsigned long arch_align_stack(unsigned long sp) +-{ +- return sp; +-} +diff --git a/arch/sh/include/asm/cache.h b/arch/sh/include/asm/cache.h +index ef9e555..331bd29 100644 +--- a/arch/sh/include/asm/cache.h ++++ b/arch/sh/include/asm/cache.h +@@ -9,10 +9,11 @@ + #define __ASM_SH_CACHE_H + #ifdef __KERNEL__ + ++#include <linux/const.h> + #include <linux/init.h> + #include <cpu/cache.h> + +-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT) ++#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) + + #define __read_mostly __attribute__((__section__(".data..read_mostly"))) + +diff --git a/arch/sh/mm/mmap.c b/arch/sh/mm/mmap.c +index 6777177..cb5e44f 100644 +--- a/arch/sh/mm/mmap.c ++++ b/arch/sh/mm/mmap.c +@@ -36,6 +36,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, + struct mm_struct *mm = current->mm; + struct vm_area_struct *vma; + int do_colour_align; ++ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags); + struct vm_unmapped_area_info info; + + if (flags & MAP_FIXED) { +@@ -55,6 +56,10 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, + if (filp || (flags & MAP_SHARED)) + do_colour_align = 1; + ++#ifdef CONFIG_PAX_RANDMMAP ++ if (!(mm->pax_flags & MF_PAX_RANDMMAP)) ++#endif ++ + if (addr) { + if (do_colour_align) + addr = COLOUR_ALIGN(addr, pgoff); +@@ -62,14 +67,13 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, + addr = PAGE_ALIGN(addr); + + vma = find_vma(mm, addr); +- if (TASK_SIZE - len >= addr && +- (!vma || addr + len <= vma->vm_start)) ++ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset)) + return addr; + } + + info.flags = 0; + info.length = len; +- info.low_limit = TASK_UNMAPPED_BASE; ++ info.low_limit = mm->mmap_base; + info.high_limit = TASK_SIZE; + info.align_mask = do_colour_align ? (PAGE_MASK & shm_align_mask) : 0; + info.align_offset = pgoff << PAGE_SHIFT; +@@ -85,6 +89,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, + struct mm_struct *mm = current->mm; + unsigned long addr = addr0; + int do_colour_align; ++ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags); + struct vm_unmapped_area_info info; + + if (flags & MAP_FIXED) { +@@ -104,6 +109,10 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, + if (filp || (flags & MAP_SHARED)) + do_colour_align = 1; + ++#ifdef CONFIG_PAX_RANDMMAP ++ if (!(mm->pax_flags & MF_PAX_RANDMMAP)) ++#endif ++ + /* requesting a specific address */ + if (addr) { + if (do_colour_align) +@@ -112,8 +121,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, + addr = PAGE_ALIGN(addr); + + vma = find_vma(mm, addr); +- if (TASK_SIZE - len >= addr && +- (!vma || addr + len <= vma->vm_start)) ++ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset)) + return addr; + } + +@@ -135,6 +143,12 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, + VM_BUG_ON(addr != -ENOMEM); + info.flags = 0; + info.low_limit = TASK_UNMAPPED_BASE; ++ ++#ifdef CONFIG_PAX_RANDMMAP ++ if (mm->pax_flags & MF_PAX_RANDMMAP) ++ info.low_limit += mm->delta_mmap; ++#endif ++ + info.high_limit = TASK_SIZE; + addr = vm_unmapped_area(&info); + } +diff --git a/arch/sparc/include/asm/atomic_64.h b/arch/sparc/include/asm/atomic_64.h +index be56a24..443328f 100644 +--- a/arch/sparc/include/asm/atomic_64.h ++++ b/arch/sparc/include/asm/atomic_64.h +@@ -14,18 +14,40 @@ + #define ATOMIC64_INIT(i) { (i) } + + #define atomic_read(v) (*(volatile int *)&(v)->counter) ++static inline int atomic_read_unchecked(const atomic_unchecked_t *v) ++{ ++ return v->counter; ++} + #define atomic64_read(v) (*(volatile long *)&(v)->counter) ++static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v) ++{ ++ return v->counter; ++} + + #define atomic_set(v, i) (((v)->counter) = i) ++static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i) ++{ ++ v->counter = i; ++} + #define atomic64_set(v, i) (((v)->counter) = i) ++static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i) ++{ ++ v->counter = i; ++} + + extern void atomic_add(int, atomic_t *); ++extern void atomic_add_unchecked(int, atomic_unchecked_t *); + extern void atomic64_add(long, atomic64_t *); ++extern void atomic64_add_unchecked(long, atomic64_unchecked_t *); + extern void atomic_sub(int, atomic_t *); ++extern void atomic_sub_unchecked(int, atomic_unchecked_t *); + extern void atomic64_sub(long, atomic64_t *); ++extern void atomic64_sub_unchecked(long, atomic64_unchecked_t *); + + extern int atomic_add_ret(int, atomic_t *); ++extern int atomic_add_ret_unchecked(int, atomic_unchecked_t *); + extern long atomic64_add_ret(long, atomic64_t *); ++extern long atomic64_add_ret_unchecked(long, atomic64_unchecked_t *); + extern int atomic_sub_ret(int, atomic_t *); + extern long atomic64_sub_ret(long, atomic64_t *); + +@@ -33,13 +55,29 @@ extern long atomic64_sub_ret(long, atomic64_t *); + #define atomic64_dec_return(v) atomic64_sub_ret(1, v) + + #define atomic_inc_return(v) atomic_add_ret(1, v) ++static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v) ++{ ++ return atomic_add_ret_unchecked(1, v); ++} + #define atomic64_inc_return(v) atomic64_add_ret(1, v) ++static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v) ++{ ++ return atomic64_add_ret_unchecked(1, v); ++} + + #define atomic_sub_return(i, v) atomic_sub_ret(i, v) + #define atomic64_sub_return(i, v) atomic64_sub_ret(i, v) + + #define atomic_add_return(i, v) atomic_add_ret(i, v) ++static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v) ++{ ++ return atomic_add_ret_unchecked(i, v); ++} + #define atomic64_add_return(i, v) atomic64_add_ret(i, v) ++static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v) ++{ ++ return atomic64_add_ret_unchecked(i, v); ++} + + /* + * atomic_inc_and_test - increment and test +@@ -50,6 +88,10 @@ extern long atomic64_sub_ret(long, atomic64_t *); + * other cases. + */ + #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0) ++static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v) ++{ ++ return atomic_inc_return_unchecked(v) == 0; ++} + #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0) + + #define atomic_sub_and_test(i, v) (atomic_sub_ret(i, v) == 0) +@@ -59,25 +101,60 @@ extern long atomic64_sub_ret(long, atomic64_t *); + #define atomic64_dec_and_test(v) (atomic64_sub_ret(1, v) == 0) + + #define atomic_inc(v) atomic_add(1, v) ++static inline void atomic_inc_unchecked(atomic_unchecked_t *v) ++{ ++ atomic_add_unchecked(1, v); ++} + #define atomic64_inc(v) atomic64_add(1, v) ++static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v) ++{ ++ atomic64_add_unchecked(1, v); ++} + + #define atomic_dec(v) atomic_sub(1, v) ++static inline void atomic_dec_unchecked(atomic_unchecked_t *v) ++{ ++ atomic_sub_unchecked(1, v); ++} + #define atomic64_dec(v) atomic64_sub(1, v) ++static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v) ++{ ++ atomic64_sub_unchecked(1, v); ++} + + #define atomic_add_negative(i, v) (atomic_add_ret(i, v) < 0) + #define atomic64_add_negative(i, v) (atomic64_add_ret(i, v) < 0) + + #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n))) ++static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new) ++{ ++ return cmpxchg(&v->counter, old, new); ++} + #define atomic_xchg(v, new) (xchg(&((v)->counter), new)) ++static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new) ++{ ++ return xchg(&v->counter, new); ++} + + static inline int __atomic_add_unless(atomic_t *v, int a, int u) + { +- int c, old; ++ int c, old, new; + c = atomic_read(v); + for (;;) { +- if (unlikely(c == (u))) ++ if (unlikely(c == u)) + break; +- old = atomic_cmpxchg((v), c, c + (a)); ++ ++ asm volatile("addcc %2, %0, %0\n" ++ ++#ifdef CONFIG_PAX_REFCOUNT ++ "tvs %%icc, 6\n" ++#endif ++ ++ : "=r" (new) ++ : "0" (c), "ir" (a) ++ : "cc"); ++ ++ old = atomic_cmpxchg(v, c, new); + if (likely(old == c)) + break; + c = old; +@@ -88,20 +165,35 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u) + #define atomic64_cmpxchg(v, o, n) \ + ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n))) + #define atomic64_xchg(v, new) (xchg(&((v)->counter), new)) ++static inline long atomic64_xchg_unchecked(atomic64_unchecked_t *v, long new) ++{ ++ return xchg(&v->counter, new); ++} + + static inline long atomic64_add_unless(atomic64_t *v, long a, long u) + { +- long c, old; ++ long c, old, new; + c = atomic64_read(v); + for (;;) { +- if (unlikely(c == (u))) ++ if (unlikely(c == u)) + break; +- old = atomic64_cmpxchg((v), c, c + (a)); ++ ++ asm volatile("addcc %2, %0, %0\n" ++ ++#ifdef CONFIG_PAX_REFCOUNT ++ "tvs %%xcc, 6\n" ++#endif ++ ++ : "=r" (new) ++ : "0" (c), "ir" (a) ++ : "cc"); ++ ++ old = atomic64_cmpxchg(v, c, new); + if (likely(old == c)) + break; + c = old; + } +- return c != (u); ++ return c != u; + } + + #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0) +diff --git a/arch/sparc/include/asm/barrier_64.h b/arch/sparc/include/asm/barrier_64.h +index b5aad96..99d7465 100644 +--- a/arch/sparc/include/asm/barrier_64.h ++++ b/arch/sparc/include/asm/barrier_64.h +@@ -57,7 +57,7 @@ do { __asm__ __volatile__("ba,pt %%xcc, 1f\n\t" \ + do { \ + compiletime_assert_atomic_type(*p); \ + barrier(); \ +- ACCESS_ONCE(*p) = (v); \ ++ ACCESS_ONCE_RW(*p) = (v); \ + } while (0) + + #define smp_load_acquire(p) \ +diff --git a/arch/sparc/include/asm/cache.h b/arch/sparc/include/asm/cache.h +index 5bb6991..5c2132e 100644 +--- a/arch/sparc/include/asm/cache.h ++++ b/arch/sparc/include/asm/cache.h +@@ -7,10 +7,12 @@ + #ifndef _SPARC_CACHE_H + #define _SPARC_CACHE_H + ++#include <linux/const.h> ++ + #define ARCH_SLAB_MINALIGN __alignof__(unsigned long long) + + #define L1_CACHE_SHIFT 5 +-#define L1_CACHE_BYTES 32 ++#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) + + #ifdef CONFIG_SPARC32 + #define SMP_CACHE_BYTES_SHIFT 5 +diff --git a/arch/sparc/include/asm/elf_32.h b/arch/sparc/include/asm/elf_32.h +index a24e41f..47677ff 100644 +--- a/arch/sparc/include/asm/elf_32.h ++++ b/arch/sparc/include/asm/elf_32.h +@@ -114,6 +114,13 @@ typedef struct { + + #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE) + ++#ifdef CONFIG_PAX_ASLR ++#define PAX_ELF_ET_DYN_BASE 0x10000UL ++ ++#define PAX_DELTA_MMAP_LEN 16 ++#define PAX_DELTA_STACK_LEN 16 ++#endif ++ + /* This yields a mask that user programs can use to figure out what + instruction set this cpu supports. This can NOT be done in userspace + on Sparc. */ +diff --git a/arch/sparc/include/asm/elf_64.h b/arch/sparc/include/asm/elf_64.h +index 370ca1e..d4f4a98 100644 +--- a/arch/sparc/include/asm/elf_64.h ++++ b/arch/sparc/include/asm/elf_64.h +@@ -189,6 +189,13 @@ typedef struct { + #define ELF_ET_DYN_BASE 0x0000010000000000UL + #define COMPAT_ELF_ET_DYN_BASE 0x0000000070000000UL + ++#ifdef CONFIG_PAX_ASLR ++#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT) ? 0x10000UL : 0x100000UL) ++ ++#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT) ? 14 : 28) ++#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT) ? 15 : 29) ++#endif ++ + extern unsigned long sparc64_elf_hwcap; + #define ELF_HWCAP sparc64_elf_hwcap + +diff --git a/arch/sparc/include/asm/pgalloc_32.h b/arch/sparc/include/asm/pgalloc_32.h +index 9b1c36d..209298b 100644 +--- a/arch/sparc/include/asm/pgalloc_32.h ++++ b/arch/sparc/include/asm/pgalloc_32.h +@@ -33,6 +33,7 @@ static inline void pgd_set(pgd_t * pgdp, pmd_t * pmdp) + } + + #define pgd_populate(MM, PGD, PMD) pgd_set(PGD, PMD) ++#define pgd_populate_kernel(MM, PGD, PMD) pgd_populate((MM), (PGD), (PMD)) + + static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, + unsigned long address) +diff --git a/arch/sparc/include/asm/pgalloc_64.h b/arch/sparc/include/asm/pgalloc_64.h +index bcfe063..b333142 100644 +--- a/arch/sparc/include/asm/pgalloc_64.h ++++ b/arch/sparc/include/asm/pgalloc_64.h +@@ -26,6 +26,7 @@ static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd) + } + + #define pud_populate(MM, PUD, PMD) pud_set(PUD, PMD) ++#define pud_populate_kernel(MM, PUD, PMD) pud_populate((MM), (PUD), (PMD)) + + static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr) + { +diff --git a/arch/sparc/include/asm/pgtable.h b/arch/sparc/include/asm/pgtable.h +index 59ba6f6..4518128 100644 +--- a/arch/sparc/include/asm/pgtable.h ++++ b/arch/sparc/include/asm/pgtable.h +@@ -5,4 +5,8 @@ + #else + #include <asm/pgtable_32.h> + #endif ++ ++#define ktla_ktva(addr) (addr) ++#define ktva_ktla(addr) (addr) ++ + #endif +diff --git a/arch/sparc/include/asm/pgtable_32.h b/arch/sparc/include/asm/pgtable_32.h +index 502f632..da1917f 100644 +--- a/arch/sparc/include/asm/pgtable_32.h ++++ b/arch/sparc/include/asm/pgtable_32.h +@@ -50,6 +50,9 @@ extern unsigned long calc_highpages(void); + #define PAGE_SHARED SRMMU_PAGE_SHARED + #define PAGE_COPY SRMMU_PAGE_COPY + #define PAGE_READONLY SRMMU_PAGE_RDONLY ++#define PAGE_SHARED_NOEXEC SRMMU_PAGE_SHARED_NOEXEC ++#define PAGE_COPY_NOEXEC SRMMU_PAGE_COPY_NOEXEC ++#define PAGE_READONLY_NOEXEC SRMMU_PAGE_RDONLY_NOEXEC + #define PAGE_KERNEL SRMMU_PAGE_KERNEL + + /* Top-level page directory - dummy used by init-mm. +@@ -62,18 +65,18 @@ extern unsigned long ptr_in_current_pgd; + + /* xwr */ + #define __P000 PAGE_NONE +-#define __P001 PAGE_READONLY +-#define __P010 PAGE_COPY +-#define __P011 PAGE_COPY ++#define __P001 PAGE_READONLY_NOEXEC ++#define __P010 PAGE_COPY_NOEXEC ++#define __P011 PAGE_COPY_NOEXEC + #define __P100 PAGE_READONLY + #define __P101 PAGE_READONLY + #define __P110 PAGE_COPY + #define __P111 PAGE_COPY + + #define __S000 PAGE_NONE +-#define __S001 PAGE_READONLY +-#define __S010 PAGE_SHARED +-#define __S011 PAGE_SHARED ++#define __S001 PAGE_READONLY_NOEXEC ++#define __S010 PAGE_SHARED_NOEXEC ++#define __S011 PAGE_SHARED_NOEXEC + #define __S100 PAGE_READONLY + #define __S101 PAGE_READONLY + #define __S110 PAGE_SHARED +diff --git a/arch/sparc/include/asm/pgtsrmmu.h b/arch/sparc/include/asm/pgtsrmmu.h +index 79da178..c2eede8 100644 +--- a/arch/sparc/include/asm/pgtsrmmu.h ++++ b/arch/sparc/include/asm/pgtsrmmu.h +@@ -115,6 +115,11 @@ + SRMMU_EXEC | SRMMU_REF) + #define SRMMU_PAGE_RDONLY __pgprot(SRMMU_VALID | SRMMU_CACHE | \ + SRMMU_EXEC | SRMMU_REF) ++ ++#define SRMMU_PAGE_SHARED_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_WRITE | SRMMU_REF) ++#define SRMMU_PAGE_COPY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF) ++#define SRMMU_PAGE_RDONLY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF) ++ + #define SRMMU_PAGE_KERNEL __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_PRIV | \ + SRMMU_DIRTY | SRMMU_REF) + +diff --git a/arch/sparc/include/asm/spinlock_64.h b/arch/sparc/include/asm/spinlock_64.h +index 9689176..63c18ea 100644 +--- a/arch/sparc/include/asm/spinlock_64.h ++++ b/arch/sparc/include/asm/spinlock_64.h +@@ -92,14 +92,19 @@ static inline void arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long fla + + /* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */ + +-static void inline arch_read_lock(arch_rwlock_t *lock) ++static inline void arch_read_lock(arch_rwlock_t *lock) + { + unsigned long tmp1, tmp2; + + __asm__ __volatile__ ( + "1: ldsw [%2], %0\n" + " brlz,pn %0, 2f\n" +-"4: add %0, 1, %1\n" ++"4: addcc %0, 1, %1\n" ++ ++#ifdef CONFIG_PAX_REFCOUNT ++" tvs %%icc, 6\n" ++#endif ++ + " cas [%2], %0, %1\n" + " cmp %0, %1\n" + " bne,pn %%icc, 1b\n" +@@ -112,10 +117,10 @@ static void inline arch_read_lock(arch_rwlock_t *lock) + " .previous" + : "=&r" (tmp1), "=&r" (tmp2) + : "r" (lock) +- : "memory"); ++ : "memory", "cc"); + } + +-static int inline arch_read_trylock(arch_rwlock_t *lock) ++static inline int arch_read_trylock(arch_rwlock_t *lock) + { + int tmp1, tmp2; + +@@ -123,7 +128,12 @@ static int inline arch_read_trylock(arch_rwlock_t *lock) + "1: ldsw [%2], %0\n" + " brlz,a,pn %0, 2f\n" + " mov 0, %0\n" +-" add %0, 1, %1\n" ++" addcc %0, 1, %1\n" ++ ++#ifdef CONFIG_PAX_REFCOUNT ++" tvs %%icc, 6\n" ++#endif ++ + " cas [%2], %0, %1\n" + " cmp %0, %1\n" + " bne,pn %%icc, 1b\n" +@@ -136,13 +146,18 @@ static int inline arch_read_trylock(arch_rwlock_t *lock) + return tmp1; + } + +-static void inline arch_read_unlock(arch_rwlock_t *lock) ++static inline void arch_read_unlock(arch_rwlock_t *lock) + { + unsigned long tmp1, tmp2; + + __asm__ __volatile__( + "1: lduw [%2], %0\n" +-" sub %0, 1, %1\n" ++" subcc %0, 1, %1\n" ++ ++#ifdef CONFIG_PAX_REFCOUNT ++" tvs %%icc, 6\n" ++#endif ++ + " cas [%2], %0, %1\n" + " cmp %0, %1\n" + " bne,pn %%xcc, 1b\n" +@@ -152,7 +167,7 @@ static void inline arch_read_unlock(arch_rwlock_t *lock) + : "memory"); + } + +-static void inline arch_write_lock(arch_rwlock_t *lock) ++static inline void arch_write_lock(arch_rwlock_t *lock) + { + unsigned long mask, tmp1, tmp2; + +@@ -177,7 +192,7 @@ static void inline arch_write_lock(arch_rwlock_t *lock) + : "memory"); + } + +-static void inline arch_write_unlock(arch_rwlock_t *lock) ++static inline void arch_write_unlock(arch_rwlock_t *lock) + { + __asm__ __volatile__( + " stw %%g0, [%0]" +@@ -186,7 +201,7 @@ static void inline arch_write_unlock(arch_rwlock_t *lock) + : "memory"); + } + +-static int inline arch_write_trylock(arch_rwlock_t *lock) ++static inline int arch_write_trylock(arch_rwlock_t *lock) + { + unsigned long mask, tmp1, tmp2, result; + +diff --git a/arch/sparc/include/asm/thread_info_32.h b/arch/sparc/include/asm/thread_info_32.h +index 96efa7a..16858bf 100644 +--- a/arch/sparc/include/asm/thread_info_32.h ++++ b/arch/sparc/include/asm/thread_info_32.h +@@ -49,6 +49,8 @@ struct thread_info { + unsigned long w_saved; + + struct restart_block restart_block; ++ ++ unsigned long lowest_stack; + }; + + /* +diff --git a/arch/sparc/include/asm/thread_info_64.h b/arch/sparc/include/asm/thread_info_64.h +index a5f01ac..703b554 100644 +--- a/arch/sparc/include/asm/thread_info_64.h ++++ b/arch/sparc/include/asm/thread_info_64.h +@@ -63,6 +63,8 @@ struct thread_info { + struct pt_regs *kern_una_regs; + unsigned int kern_una_insn; + ++ unsigned long lowest_stack; ++ + unsigned long fpregs[0] __attribute__ ((aligned(64))); + }; + +@@ -188,12 +190,13 @@ register struct thread_info *current_thread_info_reg asm("g6"); + #define TIF_NEED_RESCHED 3 /* rescheduling necessary */ + /* flag bit 4 is available */ + #define TIF_UNALIGNED 5 /* allowed to do unaligned accesses */ +-/* flag bit 6 is available */ ++#define TIF_GRSEC_SETXID 6 /* update credentials on syscall entry/exit */ + #define TIF_32BIT 7 /* 32-bit binary */ + #define TIF_NOHZ 8 /* in adaptive nohz mode */ + #define TIF_SECCOMP 9 /* secure computing */ + #define TIF_SYSCALL_AUDIT 10 /* syscall auditing active */ + #define TIF_SYSCALL_TRACEPOINT 11 /* syscall tracepoint instrumentation */ ++ + /* NOTE: Thread flags >= 12 should be ones we have no interest + * in using in assembly, else we can't use the mask as + * an immediate value in instructions such as andcc. +@@ -213,12 +216,18 @@ register struct thread_info *current_thread_info_reg asm("g6"); + #define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT) + #define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT) + #define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG) ++#define _TIF_GRSEC_SETXID (1<<TIF_GRSEC_SETXID) + + #define _TIF_USER_WORK_MASK ((0xff << TI_FLAG_WSAVED_SHIFT) | \ + _TIF_DO_NOTIFY_RESUME_MASK | \ + _TIF_NEED_RESCHED) + #define _TIF_DO_NOTIFY_RESUME_MASK (_TIF_NOTIFY_RESUME | _TIF_SIGPENDING) + ++#define _TIF_WORK_SYSCALL \ ++ (_TIF_SYSCALL_TRACE | _TIF_SECCOMP | _TIF_SYSCALL_AUDIT | \ ++ _TIF_SYSCALL_TRACEPOINT | _TIF_NOHZ | _TIF_GRSEC_SETXID) ++ ++ + /* + * Thread-synchronous status. + * +diff --git a/arch/sparc/include/asm/uaccess.h b/arch/sparc/include/asm/uaccess.h +index 0167d26..767bb0c 100644 +--- a/arch/sparc/include/asm/uaccess.h ++++ b/arch/sparc/include/asm/uaccess.h +@@ -1,5 +1,6 @@ + #ifndef ___ASM_SPARC_UACCESS_H + #define ___ASM_SPARC_UACCESS_H ++ + #if defined(__sparc__) && defined(__arch64__) + #include <asm/uaccess_64.h> + #else +diff --git a/arch/sparc/include/asm/uaccess_32.h b/arch/sparc/include/asm/uaccess_32.h +index 53a28dd..50c38c3 100644 +--- a/arch/sparc/include/asm/uaccess_32.h ++++ b/arch/sparc/include/asm/uaccess_32.h +@@ -250,27 +250,46 @@ extern unsigned long __copy_user(void __user *to, const void __user *from, unsig + + static inline unsigned long copy_to_user(void __user *to, const void *from, unsigned long n) + { +- if (n && __access_ok((unsigned long) to, n)) ++ if ((long)n < 0) ++ return n; ++ ++ if (n && __access_ok((unsigned long) to, n)) { ++ if (!__builtin_constant_p(n)) ++ check_object_size(from, n, true); + return __copy_user(to, (__force void __user *) from, n); +- else ++ } else + return n; + } + + static inline unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n) + { ++ if ((long)n < 0) ++ return n; ++ ++ if (!__builtin_constant_p(n)) ++ check_object_size(from, n, true); ++ + return __copy_user(to, (__force void __user *) from, n); + } + + static inline unsigned long copy_from_user(void *to, const void __user *from, unsigned long n) + { +- if (n && __access_ok((unsigned long) from, n)) ++ if ((long)n < 0) ++ return n; ++ ++ if (n && __access_ok((unsigned long) from, n)) { ++ if (!__builtin_constant_p(n)) ++ check_object_size(to, n, false); + return __copy_user((__force void __user *) to, from, n); +- else ++ } else + return n; + } + + static inline unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n) + { ++ if ((long)n < 0) ++ return n; ++ + return __copy_user((__force void __user *) to, from, n); + } + +diff --git a/arch/sparc/include/asm/uaccess_64.h b/arch/sparc/include/asm/uaccess_64.h +index ad7e178..c9e7423 100644 +--- a/arch/sparc/include/asm/uaccess_64.h ++++ b/arch/sparc/include/asm/uaccess_64.h +@@ -10,6 +10,7 @@ + #include <linux/compiler.h> + #include <linux/string.h> + #include <linux/thread_info.h> ++#include <linux/kernel.h> + #include <asm/asi.h> + #include <asm/spitfire.h> + #include <asm-generic/uaccess-unaligned.h> +@@ -214,8 +215,15 @@ extern unsigned long copy_from_user_fixup(void *to, const void __user *from, + static inline unsigned long __must_check + copy_from_user(void *to, const void __user *from, unsigned long size) + { +- unsigned long ret = ___copy_from_user(to, from, size); ++ unsigned long ret; + ++ if ((long)size < 0 || size > INT_MAX) ++ return size; ++ ++ if (!__builtin_constant_p(size)) ++ check_object_size(to, size, false); ++ ++ ret = ___copy_from_user(to, from, size); + if (unlikely(ret)) + ret = copy_from_user_fixup(to, from, size); + +@@ -231,8 +239,15 @@ extern unsigned long copy_to_user_fixup(void __user *to, const void *from, + static inline unsigned long __must_check + copy_to_user(void __user *to, const void *from, unsigned long size) + { +- unsigned long ret = ___copy_to_user(to, from, size); ++ unsigned long ret; + ++ if ((long)size < 0 || size > INT_MAX) ++ return size; ++ ++ if (!__builtin_constant_p(size)) ++ check_object_size(from, size, true); ++ ++ ret = ___copy_to_user(to, from, size); + if (unlikely(ret)) + ret = copy_to_user_fixup(to, from, size); + return ret; +diff --git a/arch/sparc/kernel/Makefile b/arch/sparc/kernel/Makefile +index d15cc17..d0ae796 100644 +--- a/arch/sparc/kernel/Makefile ++++ b/arch/sparc/kernel/Makefile +@@ -4,7 +4,7 @@ + # + + asflags-y := -ansi +-ccflags-y := -Werror ++#ccflags-y := -Werror + + extra-y := head_$(BITS).o + +diff --git a/arch/sparc/kernel/process_32.c b/arch/sparc/kernel/process_32.c +index 510baec..9ff2607 100644 +--- a/arch/sparc/kernel/process_32.c ++++ b/arch/sparc/kernel/process_32.c +@@ -115,14 +115,14 @@ void show_regs(struct pt_regs *r) + + printk("PSR: %08lx PC: %08lx NPC: %08lx Y: %08lx %s\n", + r->psr, r->pc, r->npc, r->y, print_tainted()); +- printk("PC: <%pS>\n", (void *) r->pc); ++ printk("PC: <%pA>\n", (void *) r->pc); + printk("%%G: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n", + r->u_regs[0], r->u_regs[1], r->u_regs[2], r->u_regs[3], + r->u_regs[4], r->u_regs[5], r->u_regs[6], r->u_regs[7]); + printk("%%O: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n", + r->u_regs[8], r->u_regs[9], r->u_regs[10], r->u_regs[11], + r->u_regs[12], r->u_regs[13], r->u_regs[14], r->u_regs[15]); +- printk("RPC: <%pS>\n", (void *) r->u_regs[15]); ++ printk("RPC: <%pA>\n", (void *) r->u_regs[15]); + + printk("%%L: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n", + rw->locals[0], rw->locals[1], rw->locals[2], rw->locals[3], +@@ -159,7 +159,7 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp) + rw = (struct reg_window32 *) fp; + pc = rw->ins[7]; + printk("[%08lx : ", pc); +- printk("%pS ] ", (void *) pc); ++ printk("%pA ] ", (void *) pc); + fp = rw->ins[6]; + } while (++count < 16); + printk("\n"); +diff --git a/arch/sparc/kernel/process_64.c b/arch/sparc/kernel/process_64.c +index d7b4967..2edf827 100644 +--- a/arch/sparc/kernel/process_64.c ++++ b/arch/sparc/kernel/process_64.c +@@ -161,7 +161,7 @@ static void show_regwindow(struct pt_regs *regs) + printk("i4: %016lx i5: %016lx i6: %016lx i7: %016lx\n", + rwk->ins[4], rwk->ins[5], rwk->ins[6], rwk->ins[7]); + if (regs->tstate & TSTATE_PRIV) +- printk("I7: <%pS>\n", (void *) rwk->ins[7]); ++ printk("I7: <%pA>\n", (void *) rwk->ins[7]); + } + + void show_regs(struct pt_regs *regs) +@@ -170,7 +170,7 @@ void show_regs(struct pt_regs *regs) + + printk("TSTATE: %016lx TPC: %016lx TNPC: %016lx Y: %08x %s\n", regs->tstate, + regs->tpc, regs->tnpc, regs->y, print_tainted()); +- printk("TPC: <%pS>\n", (void *) regs->tpc); ++ printk("TPC: <%pA>\n", (void *) regs->tpc); + printk("g0: %016lx g1: %016lx g2: %016lx g3: %016lx\n", + regs->u_regs[0], regs->u_regs[1], regs->u_regs[2], + regs->u_regs[3]); +@@ -183,7 +183,7 @@ void show_regs(struct pt_regs *regs) + printk("o4: %016lx o5: %016lx sp: %016lx ret_pc: %016lx\n", + regs->u_regs[12], regs->u_regs[13], regs->u_regs[14], + regs->u_regs[15]); +- printk("RPC: <%pS>\n", (void *) regs->u_regs[15]); ++ printk("RPC: <%pA>\n", (void *) regs->u_regs[15]); + show_regwindow(regs); + show_stack(current, (unsigned long *) regs->u_regs[UREG_FP]); + } +@@ -272,7 +272,7 @@ void arch_trigger_all_cpu_backtrace(void) + ((tp && tp->task) ? tp->task->pid : -1)); + + if (gp->tstate & TSTATE_PRIV) { +- printk(" TPC[%pS] O7[%pS] I7[%pS] RPC[%pS]\n", ++ printk(" TPC[%pA] O7[%pA] I7[%pA] RPC[%pA]\n", + (void *) gp->tpc, + (void *) gp->o7, + (void *) gp->i7, +diff --git a/arch/sparc/kernel/prom_common.c b/arch/sparc/kernel/prom_common.c +index 79cc0d1..ec62734 100644 +--- a/arch/sparc/kernel/prom_common.c ++++ b/arch/sparc/kernel/prom_common.c +@@ -144,7 +144,7 @@ static int __init prom_common_nextprop(phandle node, char *prev, char *buf) + + unsigned int prom_early_allocated __initdata; + +-static struct of_pdt_ops prom_sparc_ops __initdata = { ++static struct of_pdt_ops prom_sparc_ops __initconst = { + .nextprop = prom_common_nextprop, + .getproplen = prom_getproplen, + .getproperty = prom_getproperty, +diff --git a/arch/sparc/kernel/ptrace_64.c b/arch/sparc/kernel/ptrace_64.c +index c13c9f2..d572c34 100644 +--- a/arch/sparc/kernel/ptrace_64.c ++++ b/arch/sparc/kernel/ptrace_64.c +@@ -1060,6 +1060,10 @@ long arch_ptrace(struct task_struct *child, long request, + return ret; + } + ++#ifdef CONFIG_GRKERNSEC_SETXID ++extern void gr_delayed_cred_worker(void); ++#endif ++ + asmlinkage int syscall_trace_enter(struct pt_regs *regs) + { + int ret = 0; +@@ -1070,6 +1074,11 @@ asmlinkage int syscall_trace_enter(struct pt_regs *regs) + if (test_thread_flag(TIF_NOHZ)) + user_exit(); + ++#ifdef CONFIG_GRKERNSEC_SETXID ++ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID))) ++ gr_delayed_cred_worker(); ++#endif ++ + if (test_thread_flag(TIF_SYSCALL_TRACE)) + ret = tracehook_report_syscall_entry(regs); + +@@ -1093,6 +1102,11 @@ asmlinkage void syscall_trace_leave(struct pt_regs *regs) + if (test_thread_flag(TIF_NOHZ)) + user_exit(); + ++#ifdef CONFIG_GRKERNSEC_SETXID ++ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID))) ++ gr_delayed_cred_worker(); ++#endif ++ + audit_syscall_exit(regs); + + if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT))) +diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c +index 8416d7f..f83823c 100644 +--- a/arch/sparc/kernel/smp_64.c ++++ b/arch/sparc/kernel/smp_64.c +@@ -870,8 +870,8 @@ extern unsigned long xcall_flush_dcache_page_cheetah; + extern unsigned long xcall_flush_dcache_page_spitfire; + + #ifdef CONFIG_DEBUG_DCFLUSH +-extern atomic_t dcpage_flushes; +-extern atomic_t dcpage_flushes_xcall; ++extern atomic_unchecked_t dcpage_flushes; ++extern atomic_unchecked_t dcpage_flushes_xcall; + #endif + + static inline void __local_flush_dcache_page(struct page *page) +@@ -895,7 +895,7 @@ void smp_flush_dcache_page_impl(struct page *page, int cpu) + return; + + #ifdef CONFIG_DEBUG_DCFLUSH +- atomic_inc(&dcpage_flushes); ++ atomic_inc_unchecked(&dcpage_flushes); + #endif + + this_cpu = get_cpu(); +@@ -919,7 +919,7 @@ void smp_flush_dcache_page_impl(struct page *page, int cpu) + xcall_deliver(data0, __pa(pg_addr), + (u64) pg_addr, cpumask_of(cpu)); + #ifdef CONFIG_DEBUG_DCFLUSH +- atomic_inc(&dcpage_flushes_xcall); ++ atomic_inc_unchecked(&dcpage_flushes_xcall); + #endif + } + } +@@ -938,7 +938,7 @@ void flush_dcache_page_all(struct mm_struct *mm, struct page *page) + preempt_disable(); + + #ifdef CONFIG_DEBUG_DCFLUSH +- atomic_inc(&dcpage_flushes); ++ atomic_inc_unchecked(&dcpage_flushes); + #endif + data0 = 0; + pg_addr = page_address(page); +@@ -955,7 +955,7 @@ void flush_dcache_page_all(struct mm_struct *mm, struct page *page) + xcall_deliver(data0, __pa(pg_addr), + (u64) pg_addr, cpu_online_mask); + #ifdef CONFIG_DEBUG_DCFLUSH +- atomic_inc(&dcpage_flushes_xcall); ++ atomic_inc_unchecked(&dcpage_flushes_xcall); + #endif + } + __local_flush_dcache_page(page); +diff --git a/arch/sparc/kernel/sys_sparc_32.c b/arch/sparc/kernel/sys_sparc_32.c +index 3a8d184..49498a8 100644 +--- a/arch/sparc/kernel/sys_sparc_32.c ++++ b/arch/sparc/kernel/sys_sparc_32.c +@@ -52,7 +52,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi + if (len > TASK_SIZE - PAGE_SIZE) + return -ENOMEM; + if (!addr) +- addr = TASK_UNMAPPED_BASE; ++ addr = current->mm->mmap_base; + + info.flags = 0; + info.length = len; +diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c +index beb0b5a..5a153f7 100644 +--- a/arch/sparc/kernel/sys_sparc_64.c ++++ b/arch/sparc/kernel/sys_sparc_64.c +@@ -88,13 +88,14 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi + struct vm_area_struct * vma; + unsigned long task_size = TASK_SIZE; + int do_color_align; ++ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags); + struct vm_unmapped_area_info info; + + if (flags & MAP_FIXED) { + /* We do not accept a shared mapping if it would violate + * cache aliasing constraints. + */ +- if ((flags & MAP_SHARED) && ++ if ((filp || (flags & MAP_SHARED)) && + ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1))) + return -EINVAL; + return addr; +@@ -109,6 +110,10 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi + if (filp || (flags & MAP_SHARED)) + do_color_align = 1; + ++#ifdef CONFIG_PAX_RANDMMAP ++ if (!(mm->pax_flags & MF_PAX_RANDMMAP)) ++#endif ++ + if (addr) { + if (do_color_align) + addr = COLOR_ALIGN(addr, pgoff); +@@ -116,22 +121,28 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi + addr = PAGE_ALIGN(addr); + + vma = find_vma(mm, addr); +- if (task_size - len >= addr && +- (!vma || addr + len <= vma->vm_start)) ++ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len, offset)) + return addr; + } + + info.flags = 0; + info.length = len; +- info.low_limit = TASK_UNMAPPED_BASE; ++ info.low_limit = mm->mmap_base; + info.high_limit = min(task_size, VA_EXCLUDE_START); + info.align_mask = do_color_align ? (PAGE_MASK & (SHMLBA - 1)) : 0; + info.align_offset = pgoff << PAGE_SHIFT; ++ info.threadstack_offset = offset; + addr = vm_unmapped_area(&info); + + if ((addr & ~PAGE_MASK) && task_size > VA_EXCLUDE_END) { + VM_BUG_ON(addr != -ENOMEM); + info.low_limit = VA_EXCLUDE_END; ++ ++#ifdef CONFIG_PAX_RANDMMAP ++ if (mm->pax_flags & MF_PAX_RANDMMAP) ++ info.low_limit += mm->delta_mmap; ++#endif ++ + info.high_limit = task_size; + addr = vm_unmapped_area(&info); + } +@@ -149,6 +160,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, + unsigned long task_size = STACK_TOP32; + unsigned long addr = addr0; + int do_color_align; ++ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags); + struct vm_unmapped_area_info info; + + /* This should only ever run for 32-bit processes. */ +@@ -158,7 +170,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, + /* We do not accept a shared mapping if it would violate + * cache aliasing constraints. + */ +- if ((flags & MAP_SHARED) && ++ if ((filp || (flags & MAP_SHARED)) && + ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1))) + return -EINVAL; + return addr; +@@ -171,6 +183,10 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, + if (filp || (flags & MAP_SHARED)) + do_color_align = 1; + ++#ifdef CONFIG_PAX_RANDMMAP ++ if (!(mm->pax_flags & MF_PAX_RANDMMAP)) ++#endif ++ + /* requesting a specific address */ + if (addr) { + if (do_color_align) +@@ -179,8 +195,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, + addr = PAGE_ALIGN(addr); + + vma = find_vma(mm, addr); +- if (task_size - len >= addr && +- (!vma || addr + len <= vma->vm_start)) ++ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len, offset)) + return addr; + } + +@@ -190,6 +205,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, + info.high_limit = mm->mmap_base; + info.align_mask = do_color_align ? (PAGE_MASK & (SHMLBA - 1)) : 0; + info.align_offset = pgoff << PAGE_SHIFT; ++ info.threadstack_offset = offset; + addr = vm_unmapped_area(&info); + + /* +@@ -202,6 +218,12 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, + VM_BUG_ON(addr != -ENOMEM); + info.flags = 0; + info.low_limit = TASK_UNMAPPED_BASE; ++ ++#ifdef CONFIG_PAX_RANDMMAP ++ if (mm->pax_flags & MF_PAX_RANDMMAP) ++ info.low_limit += mm->delta_mmap; ++#endif ++ + info.high_limit = STACK_TOP32; + addr = vm_unmapped_area(&info); + } +@@ -258,10 +280,14 @@ unsigned long get_fb_unmapped_area(struct file *filp, unsigned long orig_addr, u + EXPORT_SYMBOL(get_fb_unmapped_area); + + /* Essentially the same as PowerPC. */ +-static unsigned long mmap_rnd(void) ++static unsigned long mmap_rnd(struct mm_struct *mm) + { + unsigned long rnd = 0UL; + ++#ifdef CONFIG_PAX_RANDMMAP ++ if (!(mm->pax_flags & MF_PAX_RANDMMAP)) ++#endif ++ + if (current->flags & PF_RANDOMIZE) { + unsigned long val = get_random_int(); + if (test_thread_flag(TIF_32BIT)) +@@ -274,7 +300,7 @@ static unsigned long mmap_rnd(void) + + void arch_pick_mmap_layout(struct mm_struct *mm) + { +- unsigned long random_factor = mmap_rnd(); ++ unsigned long random_factor = mmap_rnd(mm); + unsigned long gap; + + /* +@@ -287,6 +313,12 @@ void arch_pick_mmap_layout(struct mm_struct *mm) + gap == RLIM_INFINITY || + sysctl_legacy_va_layout) { + mm->mmap_base = TASK_UNMAPPED_BASE + random_factor; ++ ++#ifdef CONFIG_PAX_RANDMMAP ++ if (mm->pax_flags & MF_PAX_RANDMMAP) ++ mm->mmap_base += mm->delta_mmap; ++#endif ++ + mm->get_unmapped_area = arch_get_unmapped_area; + } else { + /* We know it's 32-bit */ +@@ -298,6 +330,12 @@ void arch_pick_mmap_layout(struct mm_struct *mm) + gap = (task_size / 6 * 5); + + mm->mmap_base = PAGE_ALIGN(task_size - gap - random_factor); ++ ++#ifdef CONFIG_PAX_RANDMMAP ++ if (mm->pax_flags & MF_PAX_RANDMMAP) ++ mm->mmap_base -= mm->delta_mmap + mm->delta_stack; ++#endif ++ + mm->get_unmapped_area = arch_get_unmapped_area_topdown; + } + } +diff --git a/arch/sparc/kernel/syscalls.S b/arch/sparc/kernel/syscalls.S +index 33a17e7..d87fb1f 100644 +--- a/arch/sparc/kernel/syscalls.S ++++ b/arch/sparc/kernel/syscalls.S +@@ -52,7 +52,7 @@ sys32_rt_sigreturn: + #endif + .align 32 + 1: ldx [%g6 + TI_FLAGS], %l5 +- andcc %l5, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT|_TIF_NOHZ), %g0 ++ andcc %l5, _TIF_WORK_SYSCALL, %g0 + be,pt %icc, rtrap + nop + call syscall_trace_leave +@@ -184,7 +184,7 @@ linux_sparc_syscall32: + + srl %i3, 0, %o3 ! IEU0 + srl %i2, 0, %o2 ! IEU0 Group +- andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT|_TIF_NOHZ), %g0 ++ andcc %l0, _TIF_WORK_SYSCALL, %g0 + bne,pn %icc, linux_syscall_trace32 ! CTI + mov %i0, %l5 ! IEU1 + 5: call %l7 ! CTI Group brk forced +@@ -208,7 +208,7 @@ linux_sparc_syscall: + + mov %i3, %o3 ! IEU1 + mov %i4, %o4 ! IEU0 Group +- andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT|_TIF_NOHZ), %g0 ++ andcc %l0, _TIF_WORK_SYSCALL, %g0 + bne,pn %icc, linux_syscall_trace ! CTI Group + mov %i0, %l5 ! IEU0 + 2: call %l7 ! CTI Group brk forced +@@ -223,7 +223,7 @@ ret_sys_call: + + cmp %o0, -ERESTART_RESTARTBLOCK + bgeu,pn %xcc, 1f +- andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT|_TIF_NOHZ), %g0 ++ andcc %l0, _TIF_WORK_SYSCALL, %g0 + ldx [%sp + PTREGS_OFF + PT_V9_TNPC], %l1 ! pc = npc + + 2: +diff --git a/arch/sparc/kernel/traps_32.c b/arch/sparc/kernel/traps_32.c +index 6629829..036032d 100644 +--- a/arch/sparc/kernel/traps_32.c ++++ b/arch/sparc/kernel/traps_32.c +@@ -44,6 +44,8 @@ static void instruction_dump(unsigned long *pc) + #define __SAVE __asm__ __volatile__("save %sp, -0x40, %sp\n\t") + #define __RESTORE __asm__ __volatile__("restore %g0, %g0, %g0\n\t") + ++extern void gr_handle_kernel_exploit(void); ++ + void die_if_kernel(char *str, struct pt_regs *regs) + { + static int die_counter; +@@ -76,15 +78,17 @@ void die_if_kernel(char *str, struct pt_regs *regs) + count++ < 30 && + (((unsigned long) rw) >= PAGE_OFFSET) && + !(((unsigned long) rw) & 0x7)) { +- printk("Caller[%08lx]: %pS\n", rw->ins[7], ++ printk("Caller[%08lx]: %pA\n", rw->ins[7], + (void *) rw->ins[7]); + rw = (struct reg_window32 *)rw->ins[6]; + } + } + printk("Instruction DUMP:"); + instruction_dump ((unsigned long *) regs->pc); +- if(regs->psr & PSR_PS) ++ if(regs->psr & PSR_PS) { ++ gr_handle_kernel_exploit(); + do_exit(SIGKILL); ++ } + do_exit(SIGSEGV); + } + +diff --git a/arch/sparc/kernel/traps_64.c b/arch/sparc/kernel/traps_64.c +index 4ced92f..965eeed 100644 +--- a/arch/sparc/kernel/traps_64.c ++++ b/arch/sparc/kernel/traps_64.c +@@ -77,7 +77,7 @@ static void dump_tl1_traplog(struct tl1_traplog *p) + i + 1, + p->trapstack[i].tstate, p->trapstack[i].tpc, + p->trapstack[i].tnpc, p->trapstack[i].tt); +- printk("TRAPLOG: TPC<%pS>\n", (void *) p->trapstack[i].tpc); ++ printk("TRAPLOG: TPC<%pA>\n", (void *) p->trapstack[i].tpc); + } + } + +@@ -97,6 +97,12 @@ void bad_trap(struct pt_regs *regs, long lvl) + + lvl -= 0x100; + if (regs->tstate & TSTATE_PRIV) { ++ ++#ifdef CONFIG_PAX_REFCOUNT ++ if (lvl == 6) ++ pax_report_refcount_overflow(regs); ++#endif ++ + sprintf(buffer, "Kernel bad sw trap %lx", lvl); + die_if_kernel(buffer, regs); + } +@@ -115,11 +121,16 @@ void bad_trap(struct pt_regs *regs, long lvl) + void bad_trap_tl1(struct pt_regs *regs, long lvl) + { + char buffer[32]; +- ++ + if (notify_die(DIE_TRAP_TL1, "bad trap tl1", regs, + 0, lvl, SIGTRAP) == NOTIFY_STOP) + return; + ++#ifdef CONFIG_PAX_REFCOUNT ++ if (lvl == 6) ++ pax_report_refcount_overflow(regs); ++#endif ++ + dump_tl1_traplog((struct tl1_traplog *)(regs + 1)); + + sprintf (buffer, "Bad trap %lx at tl>0", lvl); +@@ -1149,7 +1160,7 @@ static void cheetah_log_errors(struct pt_regs *regs, struct cheetah_err_info *in + regs->tpc, regs->tnpc, regs->u_regs[UREG_I7], regs->tstate); + printk("%s" "ERROR(%d): ", + (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id()); +- printk("TPC<%pS>\n", (void *) regs->tpc); ++ printk("TPC<%pA>\n", (void *) regs->tpc); + printk("%s" "ERROR(%d): M_SYND(%lx), E_SYND(%lx)%s%s\n", + (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(), + (afsr & CHAFSR_M_SYNDROME) >> CHAFSR_M_SYNDROME_SHIFT, +@@ -1756,7 +1767,7 @@ void cheetah_plus_parity_error(int type, struct pt_regs *regs) + smp_processor_id(), + (type & 0x1) ? 'I' : 'D', + regs->tpc); +- printk(KERN_EMERG "TPC<%pS>\n", (void *) regs->tpc); ++ printk(KERN_EMERG "TPC<%pA>\n", (void *) regs->tpc); + panic("Irrecoverable Cheetah+ parity error."); + } + +@@ -1764,7 +1775,7 @@ void cheetah_plus_parity_error(int type, struct pt_regs *regs) + smp_processor_id(), + (type & 0x1) ? 'I' : 'D', + regs->tpc); +- printk(KERN_WARNING "TPC<%pS>\n", (void *) regs->tpc); ++ printk(KERN_WARNING "TPC<%pA>\n", (void *) regs->tpc); + } + + struct sun4v_error_entry { +@@ -1837,8 +1848,8 @@ struct sun4v_error_entry { + /*0x38*/u64 reserved_5; + }; + +-static atomic_t sun4v_resum_oflow_cnt = ATOMIC_INIT(0); +-static atomic_t sun4v_nonresum_oflow_cnt = ATOMIC_INIT(0); ++static atomic_unchecked_t sun4v_resum_oflow_cnt = ATOMIC_INIT(0); ++static atomic_unchecked_t sun4v_nonresum_oflow_cnt = ATOMIC_INIT(0); + + static const char *sun4v_err_type_to_str(u8 type) + { +@@ -1930,7 +1941,7 @@ static void sun4v_report_real_raddr(const char *pfx, struct pt_regs *regs) + } + + static void sun4v_log_error(struct pt_regs *regs, struct sun4v_error_entry *ent, +- int cpu, const char *pfx, atomic_t *ocnt) ++ int cpu, const char *pfx, atomic_unchecked_t *ocnt) + { + u64 *raw_ptr = (u64 *) ent; + u32 attrs; +@@ -1988,8 +1999,8 @@ static void sun4v_log_error(struct pt_regs *regs, struct sun4v_error_entry *ent, + + show_regs(regs); + +- if ((cnt = atomic_read(ocnt)) != 0) { +- atomic_set(ocnt, 0); ++ if ((cnt = atomic_read_unchecked(ocnt)) != 0) { ++ atomic_set_unchecked(ocnt, 0); + wmb(); + printk("%s: Queue overflowed %d times.\n", + pfx, cnt); +@@ -2046,7 +2057,7 @@ out: + */ + void sun4v_resum_overflow(struct pt_regs *regs) + { +- atomic_inc(&sun4v_resum_oflow_cnt); ++ atomic_inc_unchecked(&sun4v_resum_oflow_cnt); + } + + /* We run with %pil set to PIL_NORMAL_MAX and PSTATE_IE enabled in %pstate. +@@ -2099,7 +2110,7 @@ void sun4v_nonresum_overflow(struct pt_regs *regs) + /* XXX Actually even this can make not that much sense. Perhaps + * XXX we should just pull the plug and panic directly from here? + */ +- atomic_inc(&sun4v_nonresum_oflow_cnt); ++ atomic_inc_unchecked(&sun4v_nonresum_oflow_cnt); + } + + unsigned long sun4v_err_itlb_vaddr; +@@ -2114,9 +2125,9 @@ void sun4v_itlb_error_report(struct pt_regs *regs, int tl) + + printk(KERN_EMERG "SUN4V-ITLB: Error at TPC[%lx], tl %d\n", + regs->tpc, tl); +- printk(KERN_EMERG "SUN4V-ITLB: TPC<%pS>\n", (void *) regs->tpc); ++ printk(KERN_EMERG "SUN4V-ITLB: TPC<%pA>\n", (void *) regs->tpc); + printk(KERN_EMERG "SUN4V-ITLB: O7[%lx]\n", regs->u_regs[UREG_I7]); +- printk(KERN_EMERG "SUN4V-ITLB: O7<%pS>\n", ++ printk(KERN_EMERG "SUN4V-ITLB: O7<%pA>\n", + (void *) regs->u_regs[UREG_I7]); + printk(KERN_EMERG "SUN4V-ITLB: vaddr[%lx] ctx[%lx] " + "pte[%lx] error[%lx]\n", +@@ -2138,9 +2149,9 @@ void sun4v_dtlb_error_report(struct pt_regs *regs, int tl) + + printk(KERN_EMERG "SUN4V-DTLB: Error at TPC[%lx], tl %d\n", + regs->tpc, tl); +- printk(KERN_EMERG "SUN4V-DTLB: TPC<%pS>\n", (void *) regs->tpc); ++ printk(KERN_EMERG "SUN4V-DTLB: TPC<%pA>\n", (void *) regs->tpc); + printk(KERN_EMERG "SUN4V-DTLB: O7[%lx]\n", regs->u_regs[UREG_I7]); +- printk(KERN_EMERG "SUN4V-DTLB: O7<%pS>\n", ++ printk(KERN_EMERG "SUN4V-DTLB: O7<%pA>\n", + (void *) regs->u_regs[UREG_I7]); + printk(KERN_EMERG "SUN4V-DTLB: vaddr[%lx] ctx[%lx] " + "pte[%lx] error[%lx]\n", +@@ -2359,13 +2370,13 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp) + fp = (unsigned long)sf->fp + STACK_BIAS; + } + +- printk(" [%016lx] %pS\n", pc, (void *) pc); ++ printk(" [%016lx] %pA\n", pc, (void *) pc); + #ifdef CONFIG_FUNCTION_GRAPH_TRACER + if ((pc + 8UL) == (unsigned long) &return_to_handler) { + int index = tsk->curr_ret_stack; + if (tsk->ret_stack && index >= graph) { + pc = tsk->ret_stack[index - graph].ret; +- printk(" [%016lx] %pS\n", pc, (void *) pc); ++ printk(" [%016lx] %pA\n", pc, (void *) pc); + graph++; + } + } +@@ -2383,6 +2394,8 @@ static inline struct reg_window *kernel_stack_up(struct reg_window *rw) + return (struct reg_window *) (fp + STACK_BIAS); + } + ++extern void gr_handle_kernel_exploit(void); ++ + void die_if_kernel(char *str, struct pt_regs *regs) + { + static int die_counter; +@@ -2411,7 +2424,7 @@ void die_if_kernel(char *str, struct pt_regs *regs) + while (rw && + count++ < 30 && + kstack_valid(tp, (unsigned long) rw)) { +- printk("Caller[%016lx]: %pS\n", rw->ins[7], ++ printk("Caller[%016lx]: %pA\n", rw->ins[7], + (void *) rw->ins[7]); + + rw = kernel_stack_up(rw); +@@ -2424,8 +2437,10 @@ void die_if_kernel(char *str, struct pt_regs *regs) + } + user_instruction_dump ((unsigned int __user *) regs->tpc); + } +- if (regs->tstate & TSTATE_PRIV) ++ if (regs->tstate & TSTATE_PRIV) { ++ gr_handle_kernel_exploit(); + do_exit(SIGKILL); ++ } + do_exit(SIGSEGV); + } + EXPORT_SYMBOL(die_if_kernel); +diff --git a/arch/sparc/kernel/unaligned_64.c b/arch/sparc/kernel/unaligned_64.c +index 35ab8b6..9046547 100644 +--- a/arch/sparc/kernel/unaligned_64.c ++++ b/arch/sparc/kernel/unaligned_64.c +@@ -295,7 +295,7 @@ static void log_unaligned(struct pt_regs *regs) + static DEFINE_RATELIMIT_STATE(ratelimit, 5 * HZ, 5); + + if (__ratelimit(&ratelimit)) { +- printk("Kernel unaligned access at TPC[%lx] %pS\n", ++ printk("Kernel unaligned access at TPC[%lx] %pA\n", + regs->tpc, (void *) regs->tpc); + } + } +diff --git a/arch/sparc/lib/Makefile b/arch/sparc/lib/Makefile +index dbe119b..089c7c1 100644 +--- a/arch/sparc/lib/Makefile ++++ b/arch/sparc/lib/Makefile +@@ -2,7 +2,7 @@ + # + + asflags-y := -ansi -DST_DIV0=0x02 +-ccflags-y := -Werror ++#ccflags-y := -Werror + + lib-$(CONFIG_SPARC32) += ashrdi3.o + lib-$(CONFIG_SPARC32) += memcpy.o memset.o +diff --git a/arch/sparc/lib/atomic_64.S b/arch/sparc/lib/atomic_64.S +index 85c233d..68500e0 100644 +--- a/arch/sparc/lib/atomic_64.S ++++ b/arch/sparc/lib/atomic_64.S +@@ -17,7 +17,12 @@ + ENTRY(atomic_add) /* %o0 = increment, %o1 = atomic_ptr */ + BACKOFF_SETUP(%o2) + 1: lduw [%o1], %g1 +- add %g1, %o0, %g7 ++ addcc %g1, %o0, %g7 ++ ++#ifdef CONFIG_PAX_REFCOUNT ++ tvs %icc, 6 ++#endif ++ + cas [%o1], %g1, %g7 + cmp %g1, %g7 + bne,pn %icc, BACKOFF_LABEL(2f, 1b) +@@ -27,10 +32,28 @@ ENTRY(atomic_add) /* %o0 = increment, %o1 = atomic_ptr */ + 2: BACKOFF_SPIN(%o2, %o3, 1b) + ENDPROC(atomic_add) + ++ENTRY(atomic_add_unchecked) /* %o0 = increment, %o1 = atomic_ptr */ ++ BACKOFF_SETUP(%o2) ++1: lduw [%o1], %g1 ++ add %g1, %o0, %g7 ++ cas [%o1], %g1, %g7 ++ cmp %g1, %g7 ++ bne,pn %icc, 2f ++ nop ++ retl ++ nop ++2: BACKOFF_SPIN(%o2, %o3, 1b) ++ENDPROC(atomic_add_unchecked) ++ + ENTRY(atomic_sub) /* %o0 = decrement, %o1 = atomic_ptr */ + BACKOFF_SETUP(%o2) + 1: lduw [%o1], %g1 +- sub %g1, %o0, %g7 ++ subcc %g1, %o0, %g7 ++ ++#ifdef CONFIG_PAX_REFCOUNT ++ tvs %icc, 6 ++#endif ++ + cas [%o1], %g1, %g7 + cmp %g1, %g7 + bne,pn %icc, BACKOFF_LABEL(2f, 1b) +@@ -40,10 +63,28 @@ ENTRY(atomic_sub) /* %o0 = decrement, %o1 = atomic_ptr */ + 2: BACKOFF_SPIN(%o2, %o3, 1b) + ENDPROC(atomic_sub) + ++ENTRY(atomic_sub_unchecked) /* %o0 = decrement, %o1 = atomic_ptr */ ++ BACKOFF_SETUP(%o2) ++1: lduw [%o1], %g1 ++ sub %g1, %o0, %g7 ++ cas [%o1], %g1, %g7 ++ cmp %g1, %g7 ++ bne,pn %icc, 2f ++ nop ++ retl ++ nop ++2: BACKOFF_SPIN(%o2, %o3, 1b) ++ENDPROC(atomic_sub_unchecked) ++ + ENTRY(atomic_add_ret) /* %o0 = increment, %o1 = atomic_ptr */ + BACKOFF_SETUP(%o2) + 1: lduw [%o1], %g1 +- add %g1, %o0, %g7 ++ addcc %g1, %o0, %g7 ++ ++#ifdef CONFIG_PAX_REFCOUNT ++ tvs %icc, 6 ++#endif ++ + cas [%o1], %g1, %g7 + cmp %g1, %g7 + bne,pn %icc, BACKOFF_LABEL(2f, 1b) +@@ -53,10 +94,29 @@ ENTRY(atomic_add_ret) /* %o0 = increment, %o1 = atomic_ptr */ + 2: BACKOFF_SPIN(%o2, %o3, 1b) + ENDPROC(atomic_add_ret) + ++ENTRY(atomic_add_ret_unchecked) /* %o0 = increment, %o1 = atomic_ptr */ ++ BACKOFF_SETUP(%o2) ++1: lduw [%o1], %g1 ++ addcc %g1, %o0, %g7 ++ cas [%o1], %g1, %g7 ++ cmp %g1, %g7 ++ bne,pn %icc, 2f ++ add %g7, %o0, %g7 ++ sra %g7, 0, %o0 ++ retl ++ nop ++2: BACKOFF_SPIN(%o2, %o3, 1b) ++ENDPROC(atomic_add_ret_unchecked) ++ + ENTRY(atomic_sub_ret) /* %o0 = decrement, %o1 = atomic_ptr */ + BACKOFF_SETUP(%o2) + 1: lduw [%o1], %g1 +- sub %g1, %o0, %g7 ++ subcc %g1, %o0, %g7 ++ ++#ifdef CONFIG_PAX_REFCOUNT ++ tvs %icc, 6 ++#endif ++ + cas [%o1], %g1, %g7 + cmp %g1, %g7 + bne,pn %icc, BACKOFF_LABEL(2f, 1b) +@@ -69,7 +129,12 @@ ENDPROC(atomic_sub_ret) + ENTRY(atomic64_add) /* %o0 = increment, %o1 = atomic_ptr */ + BACKOFF_SETUP(%o2) + 1: ldx [%o1], %g1 +- add %g1, %o0, %g7 ++ addcc %g1, %o0, %g7 ++ ++#ifdef CONFIG_PAX_REFCOUNT ++ tvs %xcc, 6 ++#endif ++ + casx [%o1], %g1, %g7 + cmp %g1, %g7 + bne,pn %xcc, BACKOFF_LABEL(2f, 1b) +@@ -79,10 +144,28 @@ ENTRY(atomic64_add) /* %o0 = increment, %o1 = atomic_ptr */ + 2: BACKOFF_SPIN(%o2, %o3, 1b) + ENDPROC(atomic64_add) + ++ENTRY(atomic64_add_unchecked) /* %o0 = increment, %o1 = atomic_ptr */ ++ BACKOFF_SETUP(%o2) ++1: ldx [%o1], %g1 ++ addcc %g1, %o0, %g7 ++ casx [%o1], %g1, %g7 ++ cmp %g1, %g7 ++ bne,pn %xcc, 2f ++ nop ++ retl ++ nop ++2: BACKOFF_SPIN(%o2, %o3, 1b) ++ENDPROC(atomic64_add_unchecked) ++ + ENTRY(atomic64_sub) /* %o0 = decrement, %o1 = atomic_ptr */ + BACKOFF_SETUP(%o2) + 1: ldx [%o1], %g1 +- sub %g1, %o0, %g7 ++ subcc %g1, %o0, %g7 ++ ++#ifdef CONFIG_PAX_REFCOUNT ++ tvs %xcc, 6 ++#endif ++ + casx [%o1], %g1, %g7 + cmp %g1, %g7 + bne,pn %xcc, BACKOFF_LABEL(2f, 1b) +@@ -92,10 +175,28 @@ ENTRY(atomic64_sub) /* %o0 = decrement, %o1 = atomic_ptr */ + 2: BACKOFF_SPIN(%o2, %o3, 1b) + ENDPROC(atomic64_sub) + ++ENTRY(atomic64_sub_unchecked) /* %o0 = decrement, %o1 = atomic_ptr */ ++ BACKOFF_SETUP(%o2) ++1: ldx [%o1], %g1 ++ subcc %g1, %o0, %g7 ++ casx [%o1], %g1, %g7 ++ cmp %g1, %g7 ++ bne,pn %xcc, 2f ++ nop ++ retl ++ nop ++2: BACKOFF_SPIN(%o2, %o3, 1b) ++ENDPROC(atomic64_sub_unchecked) ++ + ENTRY(atomic64_add_ret) /* %o0 = increment, %o1 = atomic_ptr */ + BACKOFF_SETUP(%o2) + 1: ldx [%o1], %g1 +- add %g1, %o0, %g7 ++ addcc %g1, %o0, %g7 ++ ++#ifdef CONFIG_PAX_REFCOUNT ++ tvs %xcc, 6 ++#endif ++ + casx [%o1], %g1, %g7 + cmp %g1, %g7 + bne,pn %xcc, BACKOFF_LABEL(2f, 1b) +@@ -105,10 +206,29 @@ ENTRY(atomic64_add_ret) /* %o0 = increment, %o1 = atomic_ptr */ + 2: BACKOFF_SPIN(%o2, %o3, 1b) + ENDPROC(atomic64_add_ret) + ++ENTRY(atomic64_add_ret_unchecked) /* %o0 = increment, %o1 = atomic_ptr */ ++ BACKOFF_SETUP(%o2) ++1: ldx [%o1], %g1 ++ addcc %g1, %o0, %g7 ++ casx [%o1], %g1, %g7 ++ cmp %g1, %g7 ++ bne,pn %xcc, 2f ++ add %g7, %o0, %g7 ++ mov %g7, %o0 ++ retl ++ nop ++2: BACKOFF_SPIN(%o2, %o3, 1b) ++ENDPROC(atomic64_add_ret_unchecked) ++ + ENTRY(atomic64_sub_ret) /* %o0 = decrement, %o1 = atomic_ptr */ + BACKOFF_SETUP(%o2) + 1: ldx [%o1], %g1 +- sub %g1, %o0, %g7 ++ subcc %g1, %o0, %g7 ++ ++#ifdef CONFIG_PAX_REFCOUNT ++ tvs %xcc, 6 ++#endif ++ + casx [%o1], %g1, %g7 + cmp %g1, %g7 + bne,pn %xcc, BACKOFF_LABEL(2f, 1b) +diff --git a/arch/sparc/lib/ksyms.c b/arch/sparc/lib/ksyms.c +index 323335b..ed85ea2 100644 +--- a/arch/sparc/lib/ksyms.c ++++ b/arch/sparc/lib/ksyms.c +@@ -100,12 +100,18 @@ EXPORT_SYMBOL(__clear_user); + + /* Atomic counter implementation. */ + EXPORT_SYMBOL(atomic_add); ++EXPORT_SYMBOL(atomic_add_unchecked); + EXPORT_SYMBOL(atomic_add_ret); ++EXPORT_SYMBOL(atomic_add_ret_unchecked); + EXPORT_SYMBOL(atomic_sub); ++EXPORT_SYMBOL(atomic_sub_unchecked); + EXPORT_SYMBOL(atomic_sub_ret); + EXPORT_SYMBOL(atomic64_add); ++EXPORT_SYMBOL(atomic64_add_unchecked); + EXPORT_SYMBOL(atomic64_add_ret); ++EXPORT_SYMBOL(atomic64_add_ret_unchecked); + EXPORT_SYMBOL(atomic64_sub); ++EXPORT_SYMBOL(atomic64_sub_unchecked); + EXPORT_SYMBOL(atomic64_sub_ret); + EXPORT_SYMBOL(atomic64_dec_if_positive); + +diff --git a/arch/sparc/mm/Makefile b/arch/sparc/mm/Makefile +index 30c3ecc..736f015 100644 +--- a/arch/sparc/mm/Makefile ++++ b/arch/sparc/mm/Makefile +@@ -2,7 +2,7 @@ + # + + asflags-y := -ansi +-ccflags-y := -Werror ++#ccflags-y := -Werror + + obj-$(CONFIG_SPARC64) += ultra.o tlb.o tsb.o gup.o + obj-y += fault_$(BITS).o +diff --git a/arch/sparc/mm/fault_32.c b/arch/sparc/mm/fault_32.c +index 59dbd46..1dd7f5e 100644 +--- a/arch/sparc/mm/fault_32.c ++++ b/arch/sparc/mm/fault_32.c +@@ -21,6 +21,9 @@ + #include <linux/perf_event.h> + #include <linux/interrupt.h> + #include <linux/kdebug.h> ++#include <linux/slab.h> ++#include <linux/pagemap.h> ++#include <linux/compiler.h> + + #include <asm/page.h> + #include <asm/pgtable.h> +@@ -159,6 +162,277 @@ static unsigned long compute_si_addr(struct pt_regs *regs, int text_fault) + return safe_compute_effective_address(regs, insn); + } + ++#ifdef CONFIG_PAX_PAGEEXEC ++#ifdef CONFIG_PAX_DLRESOLVE ++static void pax_emuplt_close(struct vm_area_struct *vma) ++{ ++ vma->vm_mm->call_dl_resolve = 0UL; ++} ++ ++static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf) ++{ ++ unsigned int *kaddr; ++ ++ vmf->page = alloc_page(GFP_HIGHUSER); ++ if (!vmf->page) ++ return VM_FAULT_OOM; ++ ++ kaddr = kmap(vmf->page); ++ memset(kaddr, 0, PAGE_SIZE); ++ kaddr[0] = 0x9DE3BFA8U; /* save */ ++ flush_dcache_page(vmf->page); ++ kunmap(vmf->page); ++ return VM_FAULT_MAJOR; ++} ++ ++static const struct vm_operations_struct pax_vm_ops = { ++ .close = pax_emuplt_close, ++ .fault = pax_emuplt_fault ++}; ++ ++static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr) ++{ ++ int ret; ++ ++ INIT_LIST_HEAD(&vma->anon_vma_chain); ++ vma->vm_mm = current->mm; ++ vma->vm_start = addr; ++ vma->vm_end = addr + PAGE_SIZE; ++ vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC; ++ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags); ++ vma->vm_ops = &pax_vm_ops; ++ ++ ret = insert_vm_struct(current->mm, vma); ++ if (ret) ++ return ret; ++ ++ ++current->mm->total_vm; ++ return 0; ++} ++#endif ++ ++/* ++ * PaX: decide what to do with offenders (regs->pc = fault address) ++ * ++ * returns 1 when task should be killed ++ * 2 when patched PLT trampoline was detected ++ * 3 when unpatched PLT trampoline was detected ++ */ ++static int pax_handle_fetch_fault(struct pt_regs *regs) ++{ ++ ++#ifdef CONFIG_PAX_EMUPLT ++ int err; ++ ++ do { /* PaX: patched PLT emulation #1 */ ++ unsigned int sethi1, sethi2, jmpl; ++ ++ err = get_user(sethi1, (unsigned int *)regs->pc); ++ err |= get_user(sethi2, (unsigned int *)(regs->pc+4)); ++ err |= get_user(jmpl, (unsigned int *)(regs->pc+8)); ++ ++ if (err) ++ break; ++ ++ if ((sethi1 & 0xFFC00000U) == 0x03000000U && ++ (sethi2 & 0xFFC00000U) == 0x03000000U && ++ (jmpl & 0xFFFFE000U) == 0x81C06000U) ++ { ++ unsigned int addr; ++ ++ regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10; ++ addr = regs->u_regs[UREG_G1]; ++ addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U); ++ regs->pc = addr; ++ regs->npc = addr+4; ++ return 2; ++ } ++ } while (0); ++ ++ do { /* PaX: patched PLT emulation #2 */ ++ unsigned int ba; ++ ++ err = get_user(ba, (unsigned int *)regs->pc); ++ ++ if (err) ++ break; ++ ++ if ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30480000U) { ++ unsigned int addr; ++ ++ if ((ba & 0xFFC00000U) == 0x30800000U) ++ addr = regs->pc + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2); ++ else ++ addr = regs->pc + ((((ba | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2); ++ regs->pc = addr; ++ regs->npc = addr+4; ++ return 2; ++ } ++ } while (0); ++ ++ do { /* PaX: patched PLT emulation #3 */ ++ unsigned int sethi, bajmpl, nop; ++ ++ err = get_user(sethi, (unsigned int *)regs->pc); ++ err |= get_user(bajmpl, (unsigned int *)(regs->pc+4)); ++ err |= get_user(nop, (unsigned int *)(regs->pc+8)); ++ ++ if (err) ++ break; ++ ++ if ((sethi & 0xFFC00000U) == 0x03000000U && ++ ((bajmpl & 0xFFFFE000U) == 0x81C06000U || (bajmpl & 0xFFF80000U) == 0x30480000U) && ++ nop == 0x01000000U) ++ { ++ unsigned int addr; ++ ++ addr = (sethi & 0x003FFFFFU) << 10; ++ regs->u_regs[UREG_G1] = addr; ++ if ((bajmpl & 0xFFFFE000U) == 0x81C06000U) ++ addr += (((bajmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U); ++ else ++ addr = regs->pc + ((((bajmpl | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2); ++ regs->pc = addr; ++ regs->npc = addr+4; ++ return 2; ++ } ++ } while (0); ++ ++ do { /* PaX: unpatched PLT emulation step 1 */ ++ unsigned int sethi, ba, nop; ++ ++ err = get_user(sethi, (unsigned int *)regs->pc); ++ err |= get_user(ba, (unsigned int *)(regs->pc+4)); ++ err |= get_user(nop, (unsigned int *)(regs->pc+8)); ++ ++ if (err) ++ break; ++ ++ if ((sethi & 0xFFC00000U) == 0x03000000U && ++ ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) && ++ nop == 0x01000000U) ++ { ++ unsigned int addr, save, call; ++ ++ if ((ba & 0xFFC00000U) == 0x30800000U) ++ addr = regs->pc + 4 + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2); ++ else ++ addr = regs->pc + 4 + ((((ba | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2); ++ ++ err = get_user(save, (unsigned int *)addr); ++ err |= get_user(call, (unsigned int *)(addr+4)); ++ err |= get_user(nop, (unsigned int *)(addr+8)); ++ if (err) ++ break; ++ ++#ifdef CONFIG_PAX_DLRESOLVE ++ if (save == 0x9DE3BFA8U && ++ (call & 0xC0000000U) == 0x40000000U && ++ nop == 0x01000000U) ++ { ++ struct vm_area_struct *vma; ++ unsigned long call_dl_resolve; ++ ++ down_read(¤t->mm->mmap_sem); ++ call_dl_resolve = current->mm->call_dl_resolve; ++ up_read(¤t->mm->mmap_sem); ++ if (likely(call_dl_resolve)) ++ goto emulate; ++ ++ vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL); ++ ++ down_write(¤t->mm->mmap_sem); ++ if (current->mm->call_dl_resolve) { ++ call_dl_resolve = current->mm->call_dl_resolve; ++ up_write(¤t->mm->mmap_sem); ++ if (vma) ++ kmem_cache_free(vm_area_cachep, vma); ++ goto emulate; ++ } ++ ++ call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE); ++ if (!vma || (call_dl_resolve & ~PAGE_MASK)) { ++ up_write(¤t->mm->mmap_sem); ++ if (vma) ++ kmem_cache_free(vm_area_cachep, vma); ++ return 1; ++ } ++ ++ if (pax_insert_vma(vma, call_dl_resolve)) { ++ up_write(¤t->mm->mmap_sem); ++ kmem_cache_free(vm_area_cachep, vma); ++ return 1; ++ } ++ ++ current->mm->call_dl_resolve = call_dl_resolve; ++ up_write(¤t->mm->mmap_sem); ++ ++emulate: ++ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10; ++ regs->pc = call_dl_resolve; ++ regs->npc = addr+4; ++ return 3; ++ } ++#endif ++ ++ /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */ ++ if ((save & 0xFFC00000U) == 0x05000000U && ++ (call & 0xFFFFE000U) == 0x85C0A000U && ++ nop == 0x01000000U) ++ { ++ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10; ++ regs->u_regs[UREG_G2] = addr + 4; ++ addr = (save & 0x003FFFFFU) << 10; ++ addr += (((call | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U); ++ regs->pc = addr; ++ regs->npc = addr+4; ++ return 3; ++ } ++ } ++ } while (0); ++ ++ do { /* PaX: unpatched PLT emulation step 2 */ ++ unsigned int save, call, nop; ++ ++ err = get_user(save, (unsigned int *)(regs->pc-4)); ++ err |= get_user(call, (unsigned int *)regs->pc); ++ err |= get_user(nop, (unsigned int *)(regs->pc+4)); ++ if (err) ++ break; ++ ++ if (save == 0x9DE3BFA8U && ++ (call & 0xC0000000U) == 0x40000000U && ++ nop == 0x01000000U) ++ { ++ unsigned int dl_resolve = regs->pc + ((((call | 0xC0000000U) ^ 0x20000000U) + 0x20000000U) << 2); ++ ++ regs->u_regs[UREG_RETPC] = regs->pc; ++ regs->pc = dl_resolve; ++ regs->npc = dl_resolve+4; ++ return 3; ++ } ++ } while (0); ++#endif ++ ++ return 1; ++} ++ ++void pax_report_insns(struct pt_regs *regs, void *pc, void *sp) ++{ ++ unsigned long i; ++ ++ printk(KERN_ERR "PAX: bytes at PC: "); ++ for (i = 0; i < 8; i++) { ++ unsigned int c; ++ if (get_user(c, (unsigned int *)pc+i)) ++ printk(KERN_CONT "???????? "); ++ else ++ printk(KERN_CONT "%08x ", c); ++ } ++ printk("\n"); ++} ++#endif ++ + static noinline void do_fault_siginfo(int code, int sig, struct pt_regs *regs, + int text_fault) + { +@@ -229,6 +503,24 @@ good_area: + if (!(vma->vm_flags & VM_WRITE)) + goto bad_area; + } else { ++ ++#ifdef CONFIG_PAX_PAGEEXEC ++ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && text_fault && !(vma->vm_flags & VM_EXEC)) { ++ up_read(&mm->mmap_sem); ++ switch (pax_handle_fetch_fault(regs)) { ++ ++#ifdef CONFIG_PAX_EMUPLT ++ case 2: ++ case 3: ++ return; ++#endif ++ ++ } ++ pax_report_fault(regs, (void *)regs->pc, (void *)regs->u_regs[UREG_FP]); ++ do_group_exit(SIGKILL); ++ } ++#endif ++ + /* Allow reads even for write-only mappings */ + if (!(vma->vm_flags & (VM_READ | VM_EXEC))) + goto bad_area; +diff --git a/arch/sparc/mm/fault_64.c b/arch/sparc/mm/fault_64.c +index 4ced3fc..234f1e4 100644 +--- a/arch/sparc/mm/fault_64.c ++++ b/arch/sparc/mm/fault_64.c +@@ -22,6 +22,9 @@ + #include <linux/kdebug.h> + #include <linux/percpu.h> + #include <linux/context_tracking.h> ++#include <linux/slab.h> ++#include <linux/pagemap.h> ++#include <linux/compiler.h> + + #include <asm/page.h> + #include <asm/pgtable.h> +@@ -75,7 +78,7 @@ static void __kprobes bad_kernel_pc(struct pt_regs *regs, unsigned long vaddr) + printk(KERN_CRIT "OOPS: Bogus kernel PC [%016lx] in fault handler\n", + regs->tpc); + printk(KERN_CRIT "OOPS: RPC [%016lx]\n", regs->u_regs[15]); +- printk("OOPS: RPC <%pS>\n", (void *) regs->u_regs[15]); ++ printk("OOPS: RPC <%pA>\n", (void *) regs->u_regs[15]); + printk(KERN_CRIT "OOPS: Fault was to vaddr[%lx]\n", vaddr); + dump_stack(); + unhandled_fault(regs->tpc, current, regs); +@@ -281,6 +284,466 @@ static void noinline __kprobes bogus_32bit_fault_tpc(struct pt_regs *regs) + show_regs(regs); + } + ++#ifdef CONFIG_PAX_PAGEEXEC ++#ifdef CONFIG_PAX_DLRESOLVE ++static void pax_emuplt_close(struct vm_area_struct *vma) ++{ ++ vma->vm_mm->call_dl_resolve = 0UL; ++} ++ ++static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf) ++{ ++ unsigned int *kaddr; ++ ++ vmf->page = alloc_page(GFP_HIGHUSER); ++ if (!vmf->page) ++ return VM_FAULT_OOM; ++ ++ kaddr = kmap(vmf->page); ++ memset(kaddr, 0, PAGE_SIZE); ++ kaddr[0] = 0x9DE3BFA8U; /* save */ ++ flush_dcache_page(vmf->page); ++ kunmap(vmf->page); ++ return VM_FAULT_MAJOR; ++} ++ ++static const struct vm_operations_struct pax_vm_ops = { ++ .close = pax_emuplt_close, ++ .fault = pax_emuplt_fault ++}; ++ ++static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr) ++{ ++ int ret; ++ ++ INIT_LIST_HEAD(&vma->anon_vma_chain); ++ vma->vm_mm = current->mm; ++ vma->vm_start = addr; ++ vma->vm_end = addr + PAGE_SIZE; ++ vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC; ++ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags); ++ vma->vm_ops = &pax_vm_ops; ++ ++ ret = insert_vm_struct(current->mm, vma); ++ if (ret) ++ return ret; ++ ++ ++current->mm->total_vm; ++ return 0; ++} ++#endif ++ ++/* ++ * PaX: decide what to do with offenders (regs->tpc = fault address) ++ * ++ * returns 1 when task should be killed ++ * 2 when patched PLT trampoline was detected ++ * 3 when unpatched PLT trampoline was detected ++ */ ++static int pax_handle_fetch_fault(struct pt_regs *regs) ++{ ++ ++#ifdef CONFIG_PAX_EMUPLT ++ int err; ++ ++ do { /* PaX: patched PLT emulation #1 */ ++ unsigned int sethi1, sethi2, jmpl; ++ ++ err = get_user(sethi1, (unsigned int *)regs->tpc); ++ err |= get_user(sethi2, (unsigned int *)(regs->tpc+4)); ++ err |= get_user(jmpl, (unsigned int *)(regs->tpc+8)); ++ ++ if (err) ++ break; ++ ++ if ((sethi1 & 0xFFC00000U) == 0x03000000U && ++ (sethi2 & 0xFFC00000U) == 0x03000000U && ++ (jmpl & 0xFFFFE000U) == 0x81C06000U) ++ { ++ unsigned long addr; ++ ++ regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10; ++ addr = regs->u_regs[UREG_G1]; ++ addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL); ++ ++ if (test_thread_flag(TIF_32BIT)) ++ addr &= 0xFFFFFFFFUL; ++ ++ regs->tpc = addr; ++ regs->tnpc = addr+4; ++ return 2; ++ } ++ } while (0); ++ ++ do { /* PaX: patched PLT emulation #2 */ ++ unsigned int ba; ++ ++ err = get_user(ba, (unsigned int *)regs->tpc); ++ ++ if (err) ++ break; ++ ++ if ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30480000U) { ++ unsigned long addr; ++ ++ if ((ba & 0xFFC00000U) == 0x30800000U) ++ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2); ++ else ++ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2); ++ ++ if (test_thread_flag(TIF_32BIT)) ++ addr &= 0xFFFFFFFFUL; ++ ++ regs->tpc = addr; ++ regs->tnpc = addr+4; ++ return 2; ++ } ++ } while (0); ++ ++ do { /* PaX: patched PLT emulation #3 */ ++ unsigned int sethi, bajmpl, nop; ++ ++ err = get_user(sethi, (unsigned int *)regs->tpc); ++ err |= get_user(bajmpl, (unsigned int *)(regs->tpc+4)); ++ err |= get_user(nop, (unsigned int *)(regs->tpc+8)); ++ ++ if (err) ++ break; ++ ++ if ((sethi & 0xFFC00000U) == 0x03000000U && ++ ((bajmpl & 0xFFFFE000U) == 0x81C06000U || (bajmpl & 0xFFF80000U) == 0x30480000U) && ++ nop == 0x01000000U) ++ { ++ unsigned long addr; ++ ++ addr = (sethi & 0x003FFFFFU) << 10; ++ regs->u_regs[UREG_G1] = addr; ++ if ((bajmpl & 0xFFFFE000U) == 0x81C06000U) ++ addr += (((bajmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL); ++ else ++ addr = regs->tpc + ((((bajmpl | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2); ++ ++ if (test_thread_flag(TIF_32BIT)) ++ addr &= 0xFFFFFFFFUL; ++ ++ regs->tpc = addr; ++ regs->tnpc = addr+4; ++ return 2; ++ } ++ } while (0); ++ ++ do { /* PaX: patched PLT emulation #4 */ ++ unsigned int sethi, mov1, call, mov2; ++ ++ err = get_user(sethi, (unsigned int *)regs->tpc); ++ err |= get_user(mov1, (unsigned int *)(regs->tpc+4)); ++ err |= get_user(call, (unsigned int *)(regs->tpc+8)); ++ err |= get_user(mov2, (unsigned int *)(regs->tpc+12)); ++ ++ if (err) ++ break; ++ ++ if ((sethi & 0xFFC00000U) == 0x03000000U && ++ mov1 == 0x8210000FU && ++ (call & 0xC0000000U) == 0x40000000U && ++ mov2 == 0x9E100001U) ++ { ++ unsigned long addr; ++ ++ regs->u_regs[UREG_G1] = regs->u_regs[UREG_RETPC]; ++ addr = regs->tpc + 4 + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2); ++ ++ if (test_thread_flag(TIF_32BIT)) ++ addr &= 0xFFFFFFFFUL; ++ ++ regs->tpc = addr; ++ regs->tnpc = addr+4; ++ return 2; ++ } ++ } while (0); ++ ++ do { /* PaX: patched PLT emulation #5 */ ++ unsigned int sethi, sethi1, sethi2, or1, or2, sllx, jmpl, nop; ++ ++ err = get_user(sethi, (unsigned int *)regs->tpc); ++ err |= get_user(sethi1, (unsigned int *)(regs->tpc+4)); ++ err |= get_user(sethi2, (unsigned int *)(regs->tpc+8)); ++ err |= get_user(or1, (unsigned int *)(regs->tpc+12)); ++ err |= get_user(or2, (unsigned int *)(regs->tpc+16)); ++ err |= get_user(sllx, (unsigned int *)(regs->tpc+20)); ++ err |= get_user(jmpl, (unsigned int *)(regs->tpc+24)); ++ err |= get_user(nop, (unsigned int *)(regs->tpc+28)); ++ ++ if (err) ++ break; ++ ++ if ((sethi & 0xFFC00000U) == 0x03000000U && ++ (sethi1 & 0xFFC00000U) == 0x03000000U && ++ (sethi2 & 0xFFC00000U) == 0x0B000000U && ++ (or1 & 0xFFFFE000U) == 0x82106000U && ++ (or2 & 0xFFFFE000U) == 0x8A116000U && ++ sllx == 0x83287020U && ++ jmpl == 0x81C04005U && ++ nop == 0x01000000U) ++ { ++ unsigned long addr; ++ ++ regs->u_regs[UREG_G1] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU); ++ regs->u_regs[UREG_G1] <<= 32; ++ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU); ++ addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5]; ++ regs->tpc = addr; ++ regs->tnpc = addr+4; ++ return 2; ++ } ++ } while (0); ++ ++ do { /* PaX: patched PLT emulation #6 */ ++ unsigned int sethi, sethi1, sethi2, sllx, or, jmpl, nop; ++ ++ err = get_user(sethi, (unsigned int *)regs->tpc); ++ err |= get_user(sethi1, (unsigned int *)(regs->tpc+4)); ++ err |= get_user(sethi2, (unsigned int *)(regs->tpc+8)); ++ err |= get_user(sllx, (unsigned int *)(regs->tpc+12)); ++ err |= get_user(or, (unsigned int *)(regs->tpc+16)); ++ err |= get_user(jmpl, (unsigned int *)(regs->tpc+20)); ++ err |= get_user(nop, (unsigned int *)(regs->tpc+24)); ++ ++ if (err) ++ break; ++ ++ if ((sethi & 0xFFC00000U) == 0x03000000U && ++ (sethi1 & 0xFFC00000U) == 0x03000000U && ++ (sethi2 & 0xFFC00000U) == 0x0B000000U && ++ sllx == 0x83287020U && ++ (or & 0xFFFFE000U) == 0x8A116000U && ++ jmpl == 0x81C04005U && ++ nop == 0x01000000U) ++ { ++ unsigned long addr; ++ ++ regs->u_regs[UREG_G1] = (sethi1 & 0x003FFFFFU) << 10; ++ regs->u_regs[UREG_G1] <<= 32; ++ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or & 0x3FFU); ++ addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5]; ++ regs->tpc = addr; ++ regs->tnpc = addr+4; ++ return 2; ++ } ++ } while (0); ++ ++ do { /* PaX: unpatched PLT emulation step 1 */ ++ unsigned int sethi, ba, nop; ++ ++ err = get_user(sethi, (unsigned int *)regs->tpc); ++ err |= get_user(ba, (unsigned int *)(regs->tpc+4)); ++ err |= get_user(nop, (unsigned int *)(regs->tpc+8)); ++ ++ if (err) ++ break; ++ ++ if ((sethi & 0xFFC00000U) == 0x03000000U && ++ ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) && ++ nop == 0x01000000U) ++ { ++ unsigned long addr; ++ unsigned int save, call; ++ unsigned int sethi1, sethi2, or1, or2, sllx, add, jmpl; ++ ++ if ((ba & 0xFFC00000U) == 0x30800000U) ++ addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2); ++ else ++ addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2); ++ ++ if (test_thread_flag(TIF_32BIT)) ++ addr &= 0xFFFFFFFFUL; ++ ++ err = get_user(save, (unsigned int *)addr); ++ err |= get_user(call, (unsigned int *)(addr+4)); ++ err |= get_user(nop, (unsigned int *)(addr+8)); ++ if (err) ++ break; ++ ++#ifdef CONFIG_PAX_DLRESOLVE ++ if (save == 0x9DE3BFA8U && ++ (call & 0xC0000000U) == 0x40000000U && ++ nop == 0x01000000U) ++ { ++ struct vm_area_struct *vma; ++ unsigned long call_dl_resolve; ++ ++ down_read(¤t->mm->mmap_sem); ++ call_dl_resolve = current->mm->call_dl_resolve; ++ up_read(¤t->mm->mmap_sem); ++ if (likely(call_dl_resolve)) ++ goto emulate; ++ ++ vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL); ++ ++ down_write(¤t->mm->mmap_sem); ++ if (current->mm->call_dl_resolve) { ++ call_dl_resolve = current->mm->call_dl_resolve; ++ up_write(¤t->mm->mmap_sem); ++ if (vma) ++ kmem_cache_free(vm_area_cachep, vma); ++ goto emulate; ++ } ++ ++ call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE); ++ if (!vma || (call_dl_resolve & ~PAGE_MASK)) { ++ up_write(¤t->mm->mmap_sem); ++ if (vma) ++ kmem_cache_free(vm_area_cachep, vma); ++ return 1; ++ } ++ ++ if (pax_insert_vma(vma, call_dl_resolve)) { ++ up_write(¤t->mm->mmap_sem); ++ kmem_cache_free(vm_area_cachep, vma); ++ return 1; ++ } ++ ++ current->mm->call_dl_resolve = call_dl_resolve; ++ up_write(¤t->mm->mmap_sem); ++ ++emulate: ++ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10; ++ regs->tpc = call_dl_resolve; ++ regs->tnpc = addr+4; ++ return 3; ++ } ++#endif ++ ++ /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */ ++ if ((save & 0xFFC00000U) == 0x05000000U && ++ (call & 0xFFFFE000U) == 0x85C0A000U && ++ nop == 0x01000000U) ++ { ++ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10; ++ regs->u_regs[UREG_G2] = addr + 4; ++ addr = (save & 0x003FFFFFU) << 10; ++ addr += (((call | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL); ++ ++ if (test_thread_flag(TIF_32BIT)) ++ addr &= 0xFFFFFFFFUL; ++ ++ regs->tpc = addr; ++ regs->tnpc = addr+4; ++ return 3; ++ } ++ ++ /* PaX: 64-bit PLT stub */ ++ err = get_user(sethi1, (unsigned int *)addr); ++ err |= get_user(sethi2, (unsigned int *)(addr+4)); ++ err |= get_user(or1, (unsigned int *)(addr+8)); ++ err |= get_user(or2, (unsigned int *)(addr+12)); ++ err |= get_user(sllx, (unsigned int *)(addr+16)); ++ err |= get_user(add, (unsigned int *)(addr+20)); ++ err |= get_user(jmpl, (unsigned int *)(addr+24)); ++ err |= get_user(nop, (unsigned int *)(addr+28)); ++ if (err) ++ break; ++ ++ if ((sethi1 & 0xFFC00000U) == 0x09000000U && ++ (sethi2 & 0xFFC00000U) == 0x0B000000U && ++ (or1 & 0xFFFFE000U) == 0x88112000U && ++ (or2 & 0xFFFFE000U) == 0x8A116000U && ++ sllx == 0x89293020U && ++ add == 0x8A010005U && ++ jmpl == 0x89C14000U && ++ nop == 0x01000000U) ++ { ++ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10; ++ regs->u_regs[UREG_G4] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU); ++ regs->u_regs[UREG_G4] <<= 32; ++ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU); ++ regs->u_regs[UREG_G5] += regs->u_regs[UREG_G4]; ++ regs->u_regs[UREG_G4] = addr + 24; ++ addr = regs->u_regs[UREG_G5]; ++ regs->tpc = addr; ++ regs->tnpc = addr+4; ++ return 3; ++ } ++ } ++ } while (0); ++ ++#ifdef CONFIG_PAX_DLRESOLVE ++ do { /* PaX: unpatched PLT emulation step 2 */ ++ unsigned int save, call, nop; ++ ++ err = get_user(save, (unsigned int *)(regs->tpc-4)); ++ err |= get_user(call, (unsigned int *)regs->tpc); ++ err |= get_user(nop, (unsigned int *)(regs->tpc+4)); ++ if (err) ++ break; ++ ++ if (save == 0x9DE3BFA8U && ++ (call & 0xC0000000U) == 0x40000000U && ++ nop == 0x01000000U) ++ { ++ unsigned long dl_resolve = regs->tpc + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2); ++ ++ if (test_thread_flag(TIF_32BIT)) ++ dl_resolve &= 0xFFFFFFFFUL; ++ ++ regs->u_regs[UREG_RETPC] = regs->tpc; ++ regs->tpc = dl_resolve; ++ regs->tnpc = dl_resolve+4; ++ return 3; ++ } ++ } while (0); ++#endif ++ ++ do { /* PaX: patched PLT emulation #7, must be AFTER the unpatched PLT emulation */ ++ unsigned int sethi, ba, nop; ++ ++ err = get_user(sethi, (unsigned int *)regs->tpc); ++ err |= get_user(ba, (unsigned int *)(regs->tpc+4)); ++ err |= get_user(nop, (unsigned int *)(regs->tpc+8)); ++ ++ if (err) ++ break; ++ ++ if ((sethi & 0xFFC00000U) == 0x03000000U && ++ (ba & 0xFFF00000U) == 0x30600000U && ++ nop == 0x01000000U) ++ { ++ unsigned long addr; ++ ++ addr = (sethi & 0x003FFFFFU) << 10; ++ regs->u_regs[UREG_G1] = addr; ++ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2); ++ ++ if (test_thread_flag(TIF_32BIT)) ++ addr &= 0xFFFFFFFFUL; ++ ++ regs->tpc = addr; ++ regs->tnpc = addr+4; ++ return 2; ++ } ++ } while (0); ++ ++#endif ++ ++ return 1; ++} ++ ++void pax_report_insns(struct pt_regs *regs, void *pc, void *sp) ++{ ++ unsigned long i; ++ ++ printk(KERN_ERR "PAX: bytes at PC: "); ++ for (i = 0; i < 8; i++) { ++ unsigned int c; ++ if (get_user(c, (unsigned int *)pc+i)) ++ printk(KERN_CONT "???????? "); ++ else ++ printk(KERN_CONT "%08x ", c); ++ } ++ printk("\n"); ++} ++#endif ++ + asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs) + { + enum ctx_state prev_state = exception_enter(); +@@ -352,6 +815,29 @@ retry: + if (!vma) + goto bad_area; + ++#ifdef CONFIG_PAX_PAGEEXEC ++ /* PaX: detect ITLB misses on non-exec pages */ ++ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && vma->vm_start <= address && ++ !(vma->vm_flags & VM_EXEC) && (fault_code & FAULT_CODE_ITLB)) ++ { ++ if (address != regs->tpc) ++ goto good_area; ++ ++ up_read(&mm->mmap_sem); ++ switch (pax_handle_fetch_fault(regs)) { ++ ++#ifdef CONFIG_PAX_EMUPLT ++ case 2: ++ case 3: ++ return; ++#endif ++ ++ } ++ pax_report_fault(regs, (void *)regs->tpc, (void *)(regs->u_regs[UREG_FP] + STACK_BIAS)); ++ do_group_exit(SIGKILL); ++ } ++#endif ++ + /* Pure DTLB misses do not tell us whether the fault causing + * load/store/atomic was a write or not, it only says that there + * was no match. So in such a case we (carefully) read the +diff --git a/arch/sparc/mm/hugetlbpage.c b/arch/sparc/mm/hugetlbpage.c +index d329537..2c3746a 100644 +--- a/arch/sparc/mm/hugetlbpage.c ++++ b/arch/sparc/mm/hugetlbpage.c +@@ -25,8 +25,10 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *filp, + unsigned long addr, + unsigned long len, + unsigned long pgoff, +- unsigned long flags) ++ unsigned long flags, ++ unsigned long offset) + { ++ struct mm_struct *mm = current->mm; + unsigned long task_size = TASK_SIZE; + struct vm_unmapped_area_info info; + +@@ -35,15 +37,22 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *filp, + + info.flags = 0; + info.length = len; +- info.low_limit = TASK_UNMAPPED_BASE; ++ info.low_limit = mm->mmap_base; + info.high_limit = min(task_size, VA_EXCLUDE_START); + info.align_mask = PAGE_MASK & ~HPAGE_MASK; + info.align_offset = 0; ++ info.threadstack_offset = offset; + addr = vm_unmapped_area(&info); + + if ((addr & ~PAGE_MASK) && task_size > VA_EXCLUDE_END) { + VM_BUG_ON(addr != -ENOMEM); + info.low_limit = VA_EXCLUDE_END; ++ ++#ifdef CONFIG_PAX_RANDMMAP ++ if (mm->pax_flags & MF_PAX_RANDMMAP) ++ info.low_limit += mm->delta_mmap; ++#endif ++ + info.high_limit = task_size; + addr = vm_unmapped_area(&info); + } +@@ -55,7 +64,8 @@ static unsigned long + hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, + const unsigned long len, + const unsigned long pgoff, +- const unsigned long flags) ++ const unsigned long flags, ++ const unsigned long offset) + { + struct mm_struct *mm = current->mm; + unsigned long addr = addr0; +@@ -70,6 +80,7 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, + info.high_limit = mm->mmap_base; + info.align_mask = PAGE_MASK & ~HPAGE_MASK; + info.align_offset = 0; ++ info.threadstack_offset = offset; + addr = vm_unmapped_area(&info); + + /* +@@ -82,6 +93,12 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, + VM_BUG_ON(addr != -ENOMEM); + info.flags = 0; + info.low_limit = TASK_UNMAPPED_BASE; ++ ++#ifdef CONFIG_PAX_RANDMMAP ++ if (mm->pax_flags & MF_PAX_RANDMMAP) ++ info.low_limit += mm->delta_mmap; ++#endif ++ + info.high_limit = STACK_TOP32; + addr = vm_unmapped_area(&info); + } +@@ -96,6 +113,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr, + struct mm_struct *mm = current->mm; + struct vm_area_struct *vma; + unsigned long task_size = TASK_SIZE; ++ unsigned long offset = gr_rand_threadstack_offset(mm, file, flags); + + if (test_thread_flag(TIF_32BIT)) + task_size = STACK_TOP32; +@@ -111,19 +129,22 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr, + return addr; + } + ++#ifdef CONFIG_PAX_RANDMMAP ++ if (!(mm->pax_flags & MF_PAX_RANDMMAP)) ++#endif ++ + if (addr) { + addr = ALIGN(addr, HPAGE_SIZE); + vma = find_vma(mm, addr); +- if (task_size - len >= addr && +- (!vma || addr + len <= vma->vm_start)) ++ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len, offset)) + return addr; + } + if (mm->get_unmapped_area == arch_get_unmapped_area) + return hugetlb_get_unmapped_area_bottomup(file, addr, len, +- pgoff, flags); ++ pgoff, flags, offset); + else + return hugetlb_get_unmapped_area_topdown(file, addr, len, +- pgoff, flags); ++ pgoff, flags, offset); + } + + pte_t *huge_pte_alloc(struct mm_struct *mm, +diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c +index 9686224..dfbdb10 100644 +--- a/arch/sparc/mm/init_64.c ++++ b/arch/sparc/mm/init_64.c +@@ -188,9 +188,9 @@ unsigned long sparc64_kern_sec_context __read_mostly; + int num_kernel_image_mappings; + + #ifdef CONFIG_DEBUG_DCFLUSH +-atomic_t dcpage_flushes = ATOMIC_INIT(0); ++atomic_unchecked_t dcpage_flushes = ATOMIC_INIT(0); + #ifdef CONFIG_SMP +-atomic_t dcpage_flushes_xcall = ATOMIC_INIT(0); ++atomic_unchecked_t dcpage_flushes_xcall = ATOMIC_INIT(0); + #endif + #endif + +@@ -198,7 +198,7 @@ inline void flush_dcache_page_impl(struct page *page) + { + BUG_ON(tlb_type == hypervisor); + #ifdef CONFIG_DEBUG_DCFLUSH +- atomic_inc(&dcpage_flushes); ++ atomic_inc_unchecked(&dcpage_flushes); + #endif + + #ifdef DCACHE_ALIASING_POSSIBLE +@@ -470,10 +470,10 @@ void mmu_info(struct seq_file *m) + + #ifdef CONFIG_DEBUG_DCFLUSH + seq_printf(m, "DCPageFlushes\t: %d\n", +- atomic_read(&dcpage_flushes)); ++ atomic_read_unchecked(&dcpage_flushes)); + #ifdef CONFIG_SMP + seq_printf(m, "DCPageFlushesXC\t: %d\n", +- atomic_read(&dcpage_flushes_xcall)); ++ atomic_read_unchecked(&dcpage_flushes_xcall)); + #endif /* CONFIG_SMP */ + #endif /* CONFIG_DEBUG_DCFLUSH */ + } +diff --git a/arch/tile/Kconfig b/arch/tile/Kconfig +index b3692ce..e4517c9 100644 +--- a/arch/tile/Kconfig ++++ b/arch/tile/Kconfig +@@ -184,6 +184,7 @@ source "kernel/Kconfig.hz" + + config KEXEC + bool "kexec system call" ++ depends on !GRKERNSEC_KMEM + ---help--- + kexec is a system call that implements the ability to shutdown your + current kernel, and to start another kernel. It is like a reboot +diff --git a/arch/tile/include/asm/atomic_64.h b/arch/tile/include/asm/atomic_64.h +index ad220ee..2f537b3 100644 +--- a/arch/tile/include/asm/atomic_64.h ++++ b/arch/tile/include/asm/atomic_64.h +@@ -105,6 +105,16 @@ static inline long atomic64_add_unless(atomic64_t *v, long a, long u) + + #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0) + ++#define atomic64_read_unchecked(v) atomic64_read(v) ++#define atomic64_set_unchecked(v, i) atomic64_set((v), (i)) ++#define atomic64_add_unchecked(a, v) atomic64_add((a), (v)) ++#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v)) ++#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v)) ++#define atomic64_inc_unchecked(v) atomic64_inc(v) ++#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v) ++#define atomic64_dec_unchecked(v) atomic64_dec(v) ++#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n)) ++ + /* Atomic dec and inc don't implement barrier, so provide them if needed. */ + #define smp_mb__before_atomic_dec() smp_mb() + #define smp_mb__after_atomic_dec() smp_mb() +diff --git a/arch/tile/include/asm/cache.h b/arch/tile/include/asm/cache.h +index 6160761..00cac88 100644 +--- a/arch/tile/include/asm/cache.h ++++ b/arch/tile/include/asm/cache.h +@@ -15,11 +15,12 @@ + #ifndef _ASM_TILE_CACHE_H + #define _ASM_TILE_CACHE_H + ++#include <linux/const.h> + #include <arch/chip.h> + + /* bytes per L1 data cache line */ + #define L1_CACHE_SHIFT CHIP_L1D_LOG_LINE_SIZE() +-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT) ++#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) + + /* bytes per L2 cache line */ + #define L2_CACHE_SHIFT CHIP_L2_LOG_LINE_SIZE() +diff --git a/arch/tile/include/asm/uaccess.h b/arch/tile/include/asm/uaccess.h +index b6cde32..c0cb736 100644 +--- a/arch/tile/include/asm/uaccess.h ++++ b/arch/tile/include/asm/uaccess.h +@@ -414,9 +414,9 @@ static inline unsigned long __must_check copy_from_user(void *to, + const void __user *from, + unsigned long n) + { +- int sz = __compiletime_object_size(to); ++ size_t sz = __compiletime_object_size(to); + +- if (likely(sz == -1 || sz >= n)) ++ if (likely(sz == (size_t)-1 || sz >= n)) + n = _copy_from_user(to, from, n); + else + copy_from_user_overflow(); +diff --git a/arch/tile/mm/hugetlbpage.c b/arch/tile/mm/hugetlbpage.c +index e514899..f8743c4 100644 +--- a/arch/tile/mm/hugetlbpage.c ++++ b/arch/tile/mm/hugetlbpage.c +@@ -207,6 +207,7 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file, + info.high_limit = TASK_SIZE; + info.align_mask = PAGE_MASK & ~huge_page_mask(h); + info.align_offset = 0; ++ info.threadstack_offset = 0; + return vm_unmapped_area(&info); + } + +@@ -224,6 +225,7 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file, + info.high_limit = current->mm->mmap_base; + info.align_mask = PAGE_MASK & ~huge_page_mask(h); + info.align_offset = 0; ++ info.threadstack_offset = 0; + addr = vm_unmapped_area(&info); + + /* +diff --git a/arch/um/Makefile b/arch/um/Makefile +index 36e658a..71a5c5a 100644 +--- a/arch/um/Makefile ++++ b/arch/um/Makefile +@@ -72,6 +72,10 @@ USER_CFLAGS = $(patsubst $(KERNEL_DEFINES),,$(patsubst -D__KERNEL__,,\ + $(patsubst -I%,,$(KBUILD_CFLAGS)))) $(ARCH_INCLUDE) $(MODE_INCLUDE) \ + $(filter -I%,$(CFLAGS)) -D_FILE_OFFSET_BITS=64 -idirafter include + ++ifdef CONSTIFY_PLUGIN ++USER_CFLAGS += -fplugin-arg-constify_plugin-no-constify ++endif ++ + #This will adjust *FLAGS accordingly to the platform. + include $(srctree)/$(ARCH_DIR)/Makefile-os-$(OS) + +diff --git a/arch/um/include/asm/cache.h b/arch/um/include/asm/cache.h +index 19e1bdd..3665b77 100644 +--- a/arch/um/include/asm/cache.h ++++ b/arch/um/include/asm/cache.h +@@ -1,6 +1,7 @@ + #ifndef __UM_CACHE_H + #define __UM_CACHE_H + ++#include <linux/const.h> + + #if defined(CONFIG_UML_X86) && !defined(CONFIG_64BIT) + # define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT) +@@ -12,6 +13,6 @@ + # define L1_CACHE_SHIFT 5 + #endif + +-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT) ++#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) + + #endif +diff --git a/arch/um/include/asm/kmap_types.h b/arch/um/include/asm/kmap_types.h +index 2e0a6b1..a64d0f5 100644 +--- a/arch/um/include/asm/kmap_types.h ++++ b/arch/um/include/asm/kmap_types.h +@@ -8,6 +8,6 @@ + + /* No more #include "asm/arch/kmap_types.h" ! */ + +-#define KM_TYPE_NR 14 ++#define KM_TYPE_NR 15 + + #endif +diff --git a/arch/um/include/asm/page.h b/arch/um/include/asm/page.h +index 5ff53d9..5850cdf 100644 +--- a/arch/um/include/asm/page.h ++++ b/arch/um/include/asm/page.h +@@ -14,6 +14,9 @@ + #define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT) + #define PAGE_MASK (~(PAGE_SIZE-1)) + ++#define ktla_ktva(addr) (addr) ++#define ktva_ktla(addr) (addr) ++ + #ifndef __ASSEMBLY__ + + struct page; +diff --git a/arch/um/include/asm/pgtable-3level.h b/arch/um/include/asm/pgtable-3level.h +index 0032f92..cd151e0 100644 +--- a/arch/um/include/asm/pgtable-3level.h ++++ b/arch/um/include/asm/pgtable-3level.h +@@ -58,6 +58,7 @@ + #define pud_present(x) (pud_val(x) & _PAGE_PRESENT) + #define pud_populate(mm, pud, pmd) \ + set_pud(pud, __pud(_PAGE_TABLE + __pa(pmd))) ++#define pud_populate_kernel(mm, pud, pmd) pud_populate((mm), (pud), (pmd)) + + #ifdef CONFIG_64BIT + #define set_pud(pudptr, pudval) set_64bit((u64 *) (pudptr), pud_val(pudval)) +diff --git a/arch/um/kernel/process.c b/arch/um/kernel/process.c +index eecc414..48adb87 100644 +--- a/arch/um/kernel/process.c ++++ b/arch/um/kernel/process.c +@@ -356,22 +356,6 @@ int singlestepping(void * t) + return 2; + } + +-/* +- * Only x86 and x86_64 have an arch_align_stack(). +- * All other arches have "#define arch_align_stack(x) (x)" +- * in their asm/system.h +- * As this is included in UML from asm-um/system-generic.h, +- * we can use it to behave as the subarch does. +- */ +-#ifndef arch_align_stack +-unsigned long arch_align_stack(unsigned long sp) +-{ +- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space) +- sp -= get_random_int() % 8192; +- return sp & ~0xf; +-} +-#endif +- + unsigned long get_wchan(struct task_struct *p) + { + unsigned long stack_page, sp, ip; +diff --git a/arch/unicore32/include/asm/cache.h b/arch/unicore32/include/asm/cache.h +index ad8f795..2c7eec6 100644 +--- a/arch/unicore32/include/asm/cache.h ++++ b/arch/unicore32/include/asm/cache.h +@@ -12,8 +12,10 @@ + #ifndef __UNICORE_CACHE_H__ + #define __UNICORE_CACHE_H__ + +-#define L1_CACHE_SHIFT (5) +-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT) ++#include <linux/const.h> ++ ++#define L1_CACHE_SHIFT 5 ++#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) + + /* + * Memory returned by kmalloc() may be used for DMA, so we must make +diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig +index e409891..d64a8f7 100644 +--- a/arch/x86/Kconfig ++++ b/arch/x86/Kconfig +@@ -126,7 +126,7 @@ config X86 + select RTC_LIB + select HAVE_DEBUG_STACKOVERFLOW + select HAVE_IRQ_EXIT_ON_IRQ_STACK if X86_64 +- select HAVE_CC_STACKPROTECTOR ++ select HAVE_CC_STACKPROTECTOR if X86_64 || !PAX_MEMORY_UDEREF + select ARCH_SUPPORTS_ATOMIC_RMW + + config INSTRUCTION_DECODER +@@ -252,7 +252,7 @@ config X86_HT + + config X86_32_LAZY_GS + def_bool y +- depends on X86_32 && !CC_STACKPROTECTOR ++ depends on X86_32 && !CC_STACKPROTECTOR && !PAX_MEMORY_UDEREF + + config ARCH_HWEIGHT_CFLAGS + string +@@ -590,6 +590,7 @@ config SCHED_OMIT_FRAME_POINTER + + menuconfig HYPERVISOR_GUEST + bool "Linux guest support" ++ depends on !GRKERNSEC_CONFIG_AUTO || GRKERNSEC_CONFIG_VIRT_GUEST || (GRKERNSEC_CONFIG_VIRT_HOST && GRKERNSEC_CONFIG_VIRT_XEN) + ---help--- + Say Y here to enable options for running Linux under various hyper- + visors. This option enables basic hypervisor detection and platform +@@ -1129,7 +1130,7 @@ choice + + config NOHIGHMEM + bool "off" +- depends on !X86_NUMAQ ++ depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE) + ---help--- + Linux can use up to 64 Gigabytes of physical memory on x86 systems. + However, the address space of 32-bit x86 processors is only 4 +@@ -1166,7 +1167,7 @@ config NOHIGHMEM + + config HIGHMEM4G + bool "4GB" +- depends on !X86_NUMAQ ++ depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE) + ---help--- + Select this if you have a 32-bit processor and between 1 and 4 + gigabytes of physical RAM. +@@ -1219,7 +1220,7 @@ config PAGE_OFFSET + hex + default 0xB0000000 if VMSPLIT_3G_OPT + default 0x80000000 if VMSPLIT_2G +- default 0x78000000 if VMSPLIT_2G_OPT ++ default 0x70000000 if VMSPLIT_2G_OPT + default 0x40000000 if VMSPLIT_1G + default 0xC0000000 + depends on X86_32 +@@ -1624,6 +1625,7 @@ source kernel/Kconfig.hz + + config KEXEC + bool "kexec system call" ++ depends on !GRKERNSEC_KMEM + ---help--- + kexec is a system call that implements the ability to shutdown your + current kernel, and to start another kernel. It is like a reboot +@@ -1775,7 +1777,9 @@ config X86_NEED_RELOCS + + config PHYSICAL_ALIGN + hex "Alignment value to which kernel should be aligned" +- default "0x200000" ++ default "0x1000000" ++ range 0x200000 0x1000000 if PAX_KERNEXEC && X86_PAE ++ range 0x400000 0x1000000 if PAX_KERNEXEC && !X86_PAE + range 0x2000 0x1000000 if X86_32 + range 0x200000 0x1000000 if X86_64 + ---help--- +@@ -1855,9 +1859,10 @@ config DEBUG_HOTPLUG_CPU0 + If unsure, say N. + + config COMPAT_VDSO +- def_bool y ++ def_bool n + prompt "Compat VDSO support" + depends on X86_32 || IA32_EMULATION ++ depends on !PAX_PAGEEXEC && !PAX_SEGMEXEC && !PAX_KERNEXEC && !PAX_MEMORY_UDEREF + ---help--- + Map the 32-bit VDSO to the predictable old-style address too. + +diff --git a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu +index f3aaf23..a1d3c49 100644 +--- a/arch/x86/Kconfig.cpu ++++ b/arch/x86/Kconfig.cpu +@@ -319,7 +319,7 @@ config X86_PPRO_FENCE + + config X86_F00F_BUG + def_bool y +- depends on M586MMX || M586TSC || M586 || M486 ++ depends on (M586MMX || M586TSC || M586 || M486) && !PAX_KERNEXEC + + config X86_INVD_BUG + def_bool y +@@ -327,7 +327,7 @@ config X86_INVD_BUG + + config X86_ALIGNMENT_16 + def_bool y +- depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || MELAN || MK6 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1 ++ depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || X86_ELAN || MK8 || MK7 || MK6 || MCORE2 || MPENTIUM4 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1 + + config X86_INTEL_USERCOPY + def_bool y +@@ -369,7 +369,7 @@ config X86_CMPXCHG64 + # generates cmov. + config X86_CMOV + def_bool y +- depends on (MK8 || MK7 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX) ++ depends on (MK8 || MK7 || MCORE2 || MPSC || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX) + + config X86_MINIMUM_CPU_FAMILY + int +diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug +index 321a52c..3d51a5e 100644 +--- a/arch/x86/Kconfig.debug ++++ b/arch/x86/Kconfig.debug +@@ -84,7 +84,7 @@ config X86_PTDUMP + config DEBUG_RODATA + bool "Write protect kernel read-only data structures" + default y +- depends on DEBUG_KERNEL ++ depends on DEBUG_KERNEL && BROKEN + ---help--- + Mark the kernel read-only data as write-protected in the pagetables, + in order to catch accidental (and incorrect) writes to such const +@@ -102,7 +102,7 @@ config DEBUG_RODATA_TEST + + config DEBUG_SET_MODULE_RONX + bool "Set loadable kernel module data as NX and text as RO" +- depends on MODULES ++ depends on MODULES && BROKEN + ---help--- + This option helps catch unintended modifications to loadable + kernel module's text and read-only data. It also prevents execution +diff --git a/arch/x86/Makefile b/arch/x86/Makefile +index 0dd99ea..4a63d82 100644 +--- a/arch/x86/Makefile ++++ b/arch/x86/Makefile +@@ -71,9 +71,6 @@ ifeq ($(CONFIG_X86_32),y) + # CPU-specific tuning. Anything which can be shared with UML should go here. + include $(srctree)/arch/x86/Makefile_32.cpu + KBUILD_CFLAGS += $(cflags-y) +- +- # temporary until string.h is fixed +- KBUILD_CFLAGS += -ffreestanding + else + BITS := 64 + UTS_MACHINE := x86_64 +@@ -112,6 +109,9 @@ else + KBUILD_CFLAGS += -maccumulate-outgoing-args + endif + ++# temporary until string.h is fixed ++KBUILD_CFLAGS += -ffreestanding ++ + # Make sure compiler does not have buggy stack-protector support. + ifdef CONFIG_CC_STACKPROTECTOR + cc_has_sp := $(srctree)/scripts/gcc-x86_$(BITS)-has-stack-protector.sh +@@ -269,3 +269,12 @@ define archhelp + echo ' FDINITRD=file initrd for the booted kernel' + echo ' kvmconfig - Enable additional options for guest kernel support' + endef ++ ++define OLD_LD ++ ++*** ${VERSION}.${PATCHLEVEL} PaX kernels no longer build correctly with old versions of binutils. ++*** Please upgrade your binutils to 2.18 or newer ++endef ++ ++archprepare: ++ $(if $(LDFLAGS_BUILD_ID),,$(error $(OLD_LD))) +diff --git a/arch/x86/boot/Makefile b/arch/x86/boot/Makefile +index 878df7e..a803913 100644 +--- a/arch/x86/boot/Makefile ++++ b/arch/x86/boot/Makefile +@@ -52,6 +52,9 @@ $(obj)/cpustr.h: $(obj)/mkcpustr FORCE + # --------------------------------------------------------------------------- + + KBUILD_CFLAGS := $(USERINCLUDE) $(REALMODE_CFLAGS) -D_SETUP ++ifdef CONSTIFY_PLUGIN ++KBUILD_CFLAGS += -fplugin-arg-constify_plugin-no-constify ++endif + KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__ + GCOV_PROFILE := n + +diff --git a/arch/x86/boot/bitops.h b/arch/x86/boot/bitops.h +index 878e4b9..20537ab 100644 +--- a/arch/x86/boot/bitops.h ++++ b/arch/x86/boot/bitops.h +@@ -26,7 +26,7 @@ static inline int variable_test_bit(int nr, const void *addr) + u8 v; + const u32 *p = (const u32 *)addr; + +- asm("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr)); ++ asm volatile("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr)); + return v; + } + +@@ -37,7 +37,7 @@ static inline int variable_test_bit(int nr, const void *addr) + + static inline void set_bit(int nr, void *addr) + { +- asm("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr)); ++ asm volatile("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr)); + } + + #endif /* BOOT_BITOPS_H */ +diff --git a/arch/x86/boot/boot.h b/arch/x86/boot/boot.h +index 50f8c5e..4f84fff 100644 +--- a/arch/x86/boot/boot.h ++++ b/arch/x86/boot/boot.h +@@ -84,7 +84,7 @@ static inline void io_delay(void) + static inline u16 ds(void) + { + u16 seg; +- asm("movw %%ds,%0" : "=rm" (seg)); ++ asm volatile("movw %%ds,%0" : "=rm" (seg)); + return seg; + } + +@@ -180,7 +180,7 @@ static inline void wrgs32(u32 v, addr_t addr) + static inline int memcmp(const void *s1, const void *s2, size_t len) + { + u8 diff; +- asm("repe; cmpsb; setnz %0" ++ asm volatile("repe; cmpsb; setnz %0" + : "=qm" (diff), "+D" (s1), "+S" (s2), "+c" (len)); + return diff; + } +diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile +index 0fcd913..3bb5c42 100644 +--- a/arch/x86/boot/compressed/Makefile ++++ b/arch/x86/boot/compressed/Makefile +@@ -16,6 +16,9 @@ KBUILD_CFLAGS += $(cflags-y) + KBUILD_CFLAGS += -mno-mmx -mno-sse + KBUILD_CFLAGS += $(call cc-option,-ffreestanding) + KBUILD_CFLAGS += $(call cc-option,-fno-stack-protector) ++ifdef CONSTIFY_PLUGIN ++KBUILD_CFLAGS += -fplugin-arg-constify_plugin-no-constify ++endif + + KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__ + GCOV_PROFILE := n +diff --git a/arch/x86/boot/compressed/efi_stub_32.S b/arch/x86/boot/compressed/efi_stub_32.S +index a53440e..c3dbf1e 100644 +--- a/arch/x86/boot/compressed/efi_stub_32.S ++++ b/arch/x86/boot/compressed/efi_stub_32.S +@@ -46,16 +46,13 @@ ENTRY(efi_call_phys) + * parameter 2, ..., param n. To make things easy, we save the return + * address of efi_call_phys in a global variable. + */ +- popl %ecx +- movl %ecx, saved_return_addr(%edx) +- /* get the function pointer into ECX*/ +- popl %ecx +- movl %ecx, efi_rt_function_ptr(%edx) ++ popl saved_return_addr(%edx) ++ popl efi_rt_function_ptr(%edx) + + /* + * 3. Call the physical function. + */ +- call *%ecx ++ call *efi_rt_function_ptr(%edx) + + /* + * 4. Balance the stack. And because EAX contain the return value, +@@ -67,15 +64,12 @@ ENTRY(efi_call_phys) + 1: popl %edx + subl $1b, %edx + +- movl efi_rt_function_ptr(%edx), %ecx +- pushl %ecx ++ pushl efi_rt_function_ptr(%edx) + + /* + * 10. Push the saved return address onto the stack and return. + */ +- movl saved_return_addr(%edx), %ecx +- pushl %ecx +- ret ++ jmpl *saved_return_addr(%edx) + ENDPROC(efi_call_phys) + .previous + +diff --git a/arch/x86/boot/compressed/head_32.S b/arch/x86/boot/compressed/head_32.S +index f45ab7a..ebc015f 100644 +--- a/arch/x86/boot/compressed/head_32.S ++++ b/arch/x86/boot/compressed/head_32.S +@@ -119,10 +119,10 @@ preferred_addr: + addl %eax, %ebx + notl %eax + andl %eax, %ebx +- cmpl $LOAD_PHYSICAL_ADDR, %ebx ++ cmpl $____LOAD_PHYSICAL_ADDR, %ebx + jge 1f + #endif +- movl $LOAD_PHYSICAL_ADDR, %ebx ++ movl $____LOAD_PHYSICAL_ADDR, %ebx + 1: + + /* Target address to relocate to for decompression */ +diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S +index b10fa66..5ee0472 100644 +--- a/arch/x86/boot/compressed/head_64.S ++++ b/arch/x86/boot/compressed/head_64.S +@@ -94,10 +94,10 @@ ENTRY(startup_32) + addl %eax, %ebx + notl %eax + andl %eax, %ebx +- cmpl $LOAD_PHYSICAL_ADDR, %ebx ++ cmpl $____LOAD_PHYSICAL_ADDR, %ebx + jge 1f + #endif +- movl $LOAD_PHYSICAL_ADDR, %ebx ++ movl $____LOAD_PHYSICAL_ADDR, %ebx + 1: + + /* Target address to relocate to for decompression */ +@@ -268,10 +268,10 @@ preferred_addr: + addq %rax, %rbp + notq %rax + andq %rax, %rbp +- cmpq $LOAD_PHYSICAL_ADDR, %rbp ++ cmpq $____LOAD_PHYSICAL_ADDR, %rbp + jge 1f + #endif +- movq $LOAD_PHYSICAL_ADDR, %rbp ++ movq $____LOAD_PHYSICAL_ADDR, %rbp + 1: + + /* Target address to relocate to for decompression */ +@@ -363,8 +363,8 @@ gdt: + .long gdt + .word 0 + .quad 0x0000000000000000 /* NULL descriptor */ +- .quad 0x00af9a000000ffff /* __KERNEL_CS */ +- .quad 0x00cf92000000ffff /* __KERNEL_DS */ ++ .quad 0x00af9b000000ffff /* __KERNEL_CS */ ++ .quad 0x00cf93000000ffff /* __KERNEL_DS */ + .quad 0x0080890000000000 /* TS descriptor */ + .quad 0x0000000000000000 /* TS continued */ + gdt_end: +diff --git a/arch/x86/boot/compressed/misc.c b/arch/x86/boot/compressed/misc.c +index 196eaf3..c96716d 100644 +--- a/arch/x86/boot/compressed/misc.c ++++ b/arch/x86/boot/compressed/misc.c +@@ -218,7 +218,7 @@ void __putstr(const char *s) + + void *memset(void *s, int c, size_t n) + { +- int i; ++ size_t i; + char *ss = s; + + for (i = 0; i < n; i++) +@@ -277,7 +277,7 @@ static void handle_relocations(void *output, unsigned long output_len) + * Calculate the delta between where vmlinux was linked to load + * and where it was actually loaded. + */ +- delta = min_addr - LOAD_PHYSICAL_ADDR; ++ delta = min_addr - ____LOAD_PHYSICAL_ADDR; + if (!delta) { + debug_putstr("No relocation needed... "); + return; +@@ -347,7 +347,7 @@ static void parse_elf(void *output) + Elf32_Ehdr ehdr; + Elf32_Phdr *phdrs, *phdr; + #endif +- void *dest; ++ void *dest, *prev; + int i; + + memcpy(&ehdr, output, sizeof(ehdr)); +@@ -374,13 +374,16 @@ static void parse_elf(void *output) + case PT_LOAD: + #ifdef CONFIG_RELOCATABLE + dest = output; +- dest += (phdr->p_paddr - LOAD_PHYSICAL_ADDR); ++ dest += (phdr->p_paddr - ____LOAD_PHYSICAL_ADDR); + #else + dest = (void *)(phdr->p_paddr); + #endif + memcpy(dest, + output + phdr->p_offset, + phdr->p_filesz); ++ if (i) ++ memset(prev, 0xff, dest - prev); ++ prev = dest + phdr->p_filesz; + break; + default: /* Ignore other PT_* */ break; + } +@@ -430,7 +433,7 @@ asmlinkage void *decompress_kernel(void *rmode, memptr heap, + error("Destination address too large"); + #endif + #ifndef CONFIG_RELOCATABLE +- if ((unsigned long)output != LOAD_PHYSICAL_ADDR) ++ if ((unsigned long)output != ____LOAD_PHYSICAL_ADDR) + error("Wrong destination address"); + #endif + +diff --git a/arch/x86/boot/cpucheck.c b/arch/x86/boot/cpucheck.c +index 100a9a1..bb3bdb0 100644 +--- a/arch/x86/boot/cpucheck.c ++++ b/arch/x86/boot/cpucheck.c +@@ -117,9 +117,9 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr) + u32 ecx = MSR_K7_HWCR; + u32 eax, edx; + +- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx)); ++ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx)); + eax &= ~(1 << 15); +- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx)); ++ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx)); + + get_cpuflags(); /* Make sure it really did something */ + err = check_cpuflags(); +@@ -132,9 +132,9 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr) + u32 ecx = MSR_VIA_FCR; + u32 eax, edx; + +- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx)); ++ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx)); + eax |= (1<<1)|(1<<7); +- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx)); ++ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx)); + + set_bit(X86_FEATURE_CX8, cpu.flags); + err = check_cpuflags(); +@@ -145,12 +145,12 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr) + u32 eax, edx; + u32 level = 1; + +- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx)); +- asm("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx)); +- asm("cpuid" ++ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx)); ++ asm volatile("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx)); ++ asm volatile("cpuid" + : "+a" (level), "=d" (cpu.flags[0]) + : : "ecx", "ebx"); +- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx)); ++ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx)); + + err = check_cpuflags(); + } +diff --git a/arch/x86/boot/header.S b/arch/x86/boot/header.S +index 04da6c2..a151f55 100644 +--- a/arch/x86/boot/header.S ++++ b/arch/x86/boot/header.S +@@ -434,10 +434,14 @@ setup_data: .quad 0 # 64-bit physical pointer to + # single linked list of + # struct setup_data + +-pref_address: .quad LOAD_PHYSICAL_ADDR # preferred load addr ++pref_address: .quad ____LOAD_PHYSICAL_ADDR # preferred load addr + + #define ZO_INIT_SIZE (ZO__end - ZO_startup_32 + ZO_z_extract_offset) ++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC) ++#define VO_INIT_SIZE (VO__end - VO__text - __PAGE_OFFSET - ____LOAD_PHYSICAL_ADDR) ++#else + #define VO_INIT_SIZE (VO__end - VO__text) ++#endif + #if ZO_INIT_SIZE > VO_INIT_SIZE + #define INIT_SIZE ZO_INIT_SIZE + #else +diff --git a/arch/x86/boot/memory.c b/arch/x86/boot/memory.c +index db75d07..8e6d0af 100644 +--- a/arch/x86/boot/memory.c ++++ b/arch/x86/boot/memory.c +@@ -19,7 +19,7 @@ + + static int detect_memory_e820(void) + { +- int count = 0; ++ unsigned int count = 0; + struct biosregs ireg, oreg; + struct e820entry *desc = boot_params.e820_map; + static struct e820entry buf; /* static so it is zeroed */ +diff --git a/arch/x86/boot/video-vesa.c b/arch/x86/boot/video-vesa.c +index 11e8c6e..fdbb1ed 100644 +--- a/arch/x86/boot/video-vesa.c ++++ b/arch/x86/boot/video-vesa.c +@@ -200,6 +200,7 @@ static void vesa_store_pm_info(void) + + boot_params.screen_info.vesapm_seg = oreg.es; + boot_params.screen_info.vesapm_off = oreg.di; ++ boot_params.screen_info.vesapm_size = oreg.cx; + } + + /* +diff --git a/arch/x86/boot/video.c b/arch/x86/boot/video.c +index 43eda28..5ab5fdb 100644 +--- a/arch/x86/boot/video.c ++++ b/arch/x86/boot/video.c +@@ -96,7 +96,7 @@ static void store_mode_params(void) + static unsigned int get_entry(void) + { + char entry_buf[4]; +- int i, len = 0; ++ unsigned int i, len = 0; + int key; + unsigned int v; + +diff --git a/arch/x86/crypto/aes-x86_64-asm_64.S b/arch/x86/crypto/aes-x86_64-asm_64.S +index 9105655..41779c1 100644 +--- a/arch/x86/crypto/aes-x86_64-asm_64.S ++++ b/arch/x86/crypto/aes-x86_64-asm_64.S +@@ -8,6 +8,8 @@ + * including this sentence is retained in full. + */ + ++#include <asm/alternative-asm.h> ++ + .extern crypto_ft_tab + .extern crypto_it_tab + .extern crypto_fl_tab +@@ -70,6 +72,8 @@ + je B192; \ + leaq 32(r9),r9; + ++#define ret pax_force_retaddr; ret ++ + #define epilogue(FUNC,r1,r2,r3,r4,r5,r6,r7,r8,r9) \ + movq r1,r2; \ + movq r3,r4; \ +diff --git a/arch/x86/crypto/aesni-intel_asm.S b/arch/x86/crypto/aesni-intel_asm.S +index 477e9d7..c92c7d8 100644 +--- a/arch/x86/crypto/aesni-intel_asm.S ++++ b/arch/x86/crypto/aesni-intel_asm.S +@@ -31,6 +31,7 @@ + + #include <linux/linkage.h> + #include <asm/inst.h> ++#include <asm/alternative-asm.h> + + #ifdef __x86_64__ + .data +@@ -205,7 +206,7 @@ enc: .octa 0x2 + * num_initial_blocks = b mod 4 + * encrypt the initial num_initial_blocks blocks and apply ghash on + * the ciphertext +-* %r10, %r11, %r12, %rax, %xmm5, %xmm6, %xmm7, %xmm8, %xmm9 registers ++* %r10, %r11, %r15, %rax, %xmm5, %xmm6, %xmm7, %xmm8, %xmm9 registers + * are clobbered + * arg1, %arg2, %arg3, %r14 are used as a pointer only, not modified + */ +@@ -214,8 +215,8 @@ enc: .octa 0x2 + .macro INITIAL_BLOCKS_DEC num_initial_blocks TMP1 TMP2 TMP3 TMP4 TMP5 XMM0 XMM1 \ + XMM2 XMM3 XMM4 XMMDst TMP6 TMP7 i i_seq operation + mov arg7, %r10 # %r10 = AAD +- mov arg8, %r12 # %r12 = aadLen +- mov %r12, %r11 ++ mov arg8, %r15 # %r15 = aadLen ++ mov %r15, %r11 + pxor %xmm\i, %xmm\i + _get_AAD_loop\num_initial_blocks\operation: + movd (%r10), \TMP1 +@@ -223,15 +224,15 @@ _get_AAD_loop\num_initial_blocks\operation: + psrldq $4, %xmm\i + pxor \TMP1, %xmm\i + add $4, %r10 +- sub $4, %r12 ++ sub $4, %r15 + jne _get_AAD_loop\num_initial_blocks\operation + cmp $16, %r11 + je _get_AAD_loop2_done\num_initial_blocks\operation +- mov $16, %r12 ++ mov $16, %r15 + _get_AAD_loop2\num_initial_blocks\operation: + psrldq $4, %xmm\i +- sub $4, %r12 +- cmp %r11, %r12 ++ sub $4, %r15 ++ cmp %r11, %r15 + jne _get_AAD_loop2\num_initial_blocks\operation + _get_AAD_loop2_done\num_initial_blocks\operation: + movdqa SHUF_MASK(%rip), %xmm14 +@@ -443,7 +444,7 @@ _initial_blocks_done\num_initial_blocks\operation: + * num_initial_blocks = b mod 4 + * encrypt the initial num_initial_blocks blocks and apply ghash on + * the ciphertext +-* %r10, %r11, %r12, %rax, %xmm5, %xmm6, %xmm7, %xmm8, %xmm9 registers ++* %r10, %r11, %r15, %rax, %xmm5, %xmm6, %xmm7, %xmm8, %xmm9 registers + * are clobbered + * arg1, %arg2, %arg3, %r14 are used as a pointer only, not modified + */ +@@ -452,8 +453,8 @@ _initial_blocks_done\num_initial_blocks\operation: + .macro INITIAL_BLOCKS_ENC num_initial_blocks TMP1 TMP2 TMP3 TMP4 TMP5 XMM0 XMM1 \ + XMM2 XMM3 XMM4 XMMDst TMP6 TMP7 i i_seq operation + mov arg7, %r10 # %r10 = AAD +- mov arg8, %r12 # %r12 = aadLen +- mov %r12, %r11 ++ mov arg8, %r15 # %r15 = aadLen ++ mov %r15, %r11 + pxor %xmm\i, %xmm\i + _get_AAD_loop\num_initial_blocks\operation: + movd (%r10), \TMP1 +@@ -461,15 +462,15 @@ _get_AAD_loop\num_initial_blocks\operation: + psrldq $4, %xmm\i + pxor \TMP1, %xmm\i + add $4, %r10 +- sub $4, %r12 ++ sub $4, %r15 + jne _get_AAD_loop\num_initial_blocks\operation + cmp $16, %r11 + je _get_AAD_loop2_done\num_initial_blocks\operation +- mov $16, %r12 ++ mov $16, %r15 + _get_AAD_loop2\num_initial_blocks\operation: + psrldq $4, %xmm\i +- sub $4, %r12 +- cmp %r11, %r12 ++ sub $4, %r15 ++ cmp %r11, %r15 + jne _get_AAD_loop2\num_initial_blocks\operation + _get_AAD_loop2_done\num_initial_blocks\operation: + movdqa SHUF_MASK(%rip), %xmm14 +@@ -1269,7 +1270,7 @@ TMP7 XMM1 XMM2 XMM3 XMM4 XMMDst + * + *****************************************************************************/ + ENTRY(aesni_gcm_dec) +- push %r12 ++ push %r15 + push %r13 + push %r14 + mov %rsp, %r14 +@@ -1279,8 +1280,8 @@ ENTRY(aesni_gcm_dec) + */ + sub $VARIABLE_OFFSET, %rsp + and $~63, %rsp # align rsp to 64 bytes +- mov %arg6, %r12 +- movdqu (%r12), %xmm13 # %xmm13 = HashKey ++ mov %arg6, %r15 ++ movdqu (%r15), %xmm13 # %xmm13 = HashKey + movdqa SHUF_MASK(%rip), %xmm2 + PSHUFB_XMM %xmm2, %xmm13 + +@@ -1308,10 +1309,10 @@ ENTRY(aesni_gcm_dec) + movdqa %xmm13, HashKey(%rsp) # store HashKey<<1 (mod poly) + mov %arg4, %r13 # save the number of bytes of plaintext/ciphertext + and $-16, %r13 # %r13 = %r13 - (%r13 mod 16) +- mov %r13, %r12 +- and $(3<<4), %r12 ++ mov %r13, %r15 ++ and $(3<<4), %r15 + jz _initial_num_blocks_is_0_decrypt +- cmp $(2<<4), %r12 ++ cmp $(2<<4), %r15 + jb _initial_num_blocks_is_1_decrypt + je _initial_num_blocks_is_2_decrypt + _initial_num_blocks_is_3_decrypt: +@@ -1361,16 +1362,16 @@ _zero_cipher_left_decrypt: + sub $16, %r11 + add %r13, %r11 + movdqu (%arg3,%r11,1), %xmm1 # receive the last <16 byte block +- lea SHIFT_MASK+16(%rip), %r12 +- sub %r13, %r12 ++ lea SHIFT_MASK+16(%rip), %r15 ++ sub %r13, %r15 + # adjust the shuffle mask pointer to be able to shift 16-%r13 bytes + # (%r13 is the number of bytes in plaintext mod 16) +- movdqu (%r12), %xmm2 # get the appropriate shuffle mask ++ movdqu (%r15), %xmm2 # get the appropriate shuffle mask + PSHUFB_XMM %xmm2, %xmm1 # right shift 16-%r13 butes + + movdqa %xmm1, %xmm2 + pxor %xmm1, %xmm0 # Ciphertext XOR E(K, Yn) +- movdqu ALL_F-SHIFT_MASK(%r12), %xmm1 ++ movdqu ALL_F-SHIFT_MASK(%r15), %xmm1 + # get the appropriate mask to mask out top 16-%r13 bytes of %xmm0 + pand %xmm1, %xmm0 # mask out top 16-%r13 bytes of %xmm0 + pand %xmm1, %xmm2 +@@ -1399,9 +1400,9 @@ _less_than_8_bytes_left_decrypt: + sub $1, %r13 + jne _less_than_8_bytes_left_decrypt + _multiple_of_16_bytes_decrypt: +- mov arg8, %r12 # %r13 = aadLen (number of bytes) +- shl $3, %r12 # convert into number of bits +- movd %r12d, %xmm15 # len(A) in %xmm15 ++ mov arg8, %r15 # %r13 = aadLen (number of bytes) ++ shl $3, %r15 # convert into number of bits ++ movd %r15d, %xmm15 # len(A) in %xmm15 + shl $3, %arg4 # len(C) in bits (*128) + MOVQ_R64_XMM %arg4, %xmm1 + pslldq $8, %xmm15 # %xmm15 = len(A)||0x0000000000000000 +@@ -1440,7 +1441,8 @@ _return_T_done_decrypt: + mov %r14, %rsp + pop %r14 + pop %r13 +- pop %r12 ++ pop %r15 ++ pax_force_retaddr + ret + ENDPROC(aesni_gcm_dec) + +@@ -1529,7 +1531,7 @@ ENDPROC(aesni_gcm_dec) + * poly = x^128 + x^127 + x^126 + x^121 + 1 + ***************************************************************************/ + ENTRY(aesni_gcm_enc) +- push %r12 ++ push %r15 + push %r13 + push %r14 + mov %rsp, %r14 +@@ -1539,8 +1541,8 @@ ENTRY(aesni_gcm_enc) + # + sub $VARIABLE_OFFSET, %rsp + and $~63, %rsp +- mov %arg6, %r12 +- movdqu (%r12), %xmm13 ++ mov %arg6, %r15 ++ movdqu (%r15), %xmm13 + movdqa SHUF_MASK(%rip), %xmm2 + PSHUFB_XMM %xmm2, %xmm13 + +@@ -1564,13 +1566,13 @@ ENTRY(aesni_gcm_enc) + movdqa %xmm13, HashKey(%rsp) + mov %arg4, %r13 # %xmm13 holds HashKey<<1 (mod poly) + and $-16, %r13 +- mov %r13, %r12 ++ mov %r13, %r15 + + # Encrypt first few blocks + +- and $(3<<4), %r12 ++ and $(3<<4), %r15 + jz _initial_num_blocks_is_0_encrypt +- cmp $(2<<4), %r12 ++ cmp $(2<<4), %r15 + jb _initial_num_blocks_is_1_encrypt + je _initial_num_blocks_is_2_encrypt + _initial_num_blocks_is_3_encrypt: +@@ -1623,14 +1625,14 @@ _zero_cipher_left_encrypt: + sub $16, %r11 + add %r13, %r11 + movdqu (%arg3,%r11,1), %xmm1 # receive the last <16 byte blocks +- lea SHIFT_MASK+16(%rip), %r12 +- sub %r13, %r12 ++ lea SHIFT_MASK+16(%rip), %r15 ++ sub %r13, %r15 + # adjust the shuffle mask pointer to be able to shift 16-r13 bytes + # (%r13 is the number of bytes in plaintext mod 16) +- movdqu (%r12), %xmm2 # get the appropriate shuffle mask ++ movdqu (%r15), %xmm2 # get the appropriate shuffle mask + PSHUFB_XMM %xmm2, %xmm1 # shift right 16-r13 byte + pxor %xmm1, %xmm0 # Plaintext XOR Encrypt(K, Yn) +- movdqu ALL_F-SHIFT_MASK(%r12), %xmm1 ++ movdqu ALL_F-SHIFT_MASK(%r15), %xmm1 + # get the appropriate mask to mask out top 16-r13 bytes of xmm0 + pand %xmm1, %xmm0 # mask out top 16-r13 bytes of xmm0 + movdqa SHUF_MASK(%rip), %xmm10 +@@ -1663,9 +1665,9 @@ _less_than_8_bytes_left_encrypt: + sub $1, %r13 + jne _less_than_8_bytes_left_encrypt + _multiple_of_16_bytes_encrypt: +- mov arg8, %r12 # %r12 = addLen (number of bytes) +- shl $3, %r12 +- movd %r12d, %xmm15 # len(A) in %xmm15 ++ mov arg8, %r15 # %r15 = addLen (number of bytes) ++ shl $3, %r15 ++ movd %r15d, %xmm15 # len(A) in %xmm15 + shl $3, %arg4 # len(C) in bits (*128) + MOVQ_R64_XMM %arg4, %xmm1 + pslldq $8, %xmm15 # %xmm15 = len(A)||0x0000000000000000 +@@ -1704,7 +1706,8 @@ _return_T_done_encrypt: + mov %r14, %rsp + pop %r14 + pop %r13 +- pop %r12 ++ pop %r15 ++ pax_force_retaddr + ret + ENDPROC(aesni_gcm_enc) + +@@ -1722,6 +1725,7 @@ _key_expansion_256a: + pxor %xmm1, %xmm0 + movaps %xmm0, (TKEYP) + add $0x10, TKEYP ++ pax_force_retaddr + ret + ENDPROC(_key_expansion_128) + ENDPROC(_key_expansion_256a) +@@ -1748,6 +1752,7 @@ _key_expansion_192a: + shufps $0b01001110, %xmm2, %xmm1 + movaps %xmm1, 0x10(TKEYP) + add $0x20, TKEYP ++ pax_force_retaddr + ret + ENDPROC(_key_expansion_192a) + +@@ -1768,6 +1773,7 @@ _key_expansion_192b: + + movaps %xmm0, (TKEYP) + add $0x10, TKEYP ++ pax_force_retaddr + ret + ENDPROC(_key_expansion_192b) + +@@ -1781,6 +1787,7 @@ _key_expansion_256b: + pxor %xmm1, %xmm2 + movaps %xmm2, (TKEYP) + add $0x10, TKEYP ++ pax_force_retaddr + ret + ENDPROC(_key_expansion_256b) + +@@ -1894,6 +1901,7 @@ ENTRY(aesni_set_key) + #ifndef __x86_64__ + popl KEYP + #endif ++ pax_force_retaddr + ret + ENDPROC(aesni_set_key) + +@@ -1916,6 +1924,7 @@ ENTRY(aesni_enc) + popl KLEN + popl KEYP + #endif ++ pax_force_retaddr + ret + ENDPROC(aesni_enc) + +@@ -1974,6 +1983,7 @@ _aesni_enc1: + AESENC KEY STATE + movaps 0x70(TKEYP), KEY + AESENCLAST KEY STATE ++ pax_force_retaddr + ret + ENDPROC(_aesni_enc1) + +@@ -2083,6 +2093,7 @@ _aesni_enc4: + AESENCLAST KEY STATE2 + AESENCLAST KEY STATE3 + AESENCLAST KEY STATE4 ++ pax_force_retaddr + ret + ENDPROC(_aesni_enc4) + +@@ -2106,6 +2117,7 @@ ENTRY(aesni_dec) + popl KLEN + popl KEYP + #endif ++ pax_force_retaddr + ret + ENDPROC(aesni_dec) + +@@ -2164,6 +2176,7 @@ _aesni_dec1: + AESDEC KEY STATE + movaps 0x70(TKEYP), KEY + AESDECLAST KEY STATE ++ pax_force_retaddr + ret + ENDPROC(_aesni_dec1) + +@@ -2273,6 +2286,7 @@ _aesni_dec4: + AESDECLAST KEY STATE2 + AESDECLAST KEY STATE3 + AESDECLAST KEY STATE4 ++ pax_force_retaddr + ret + ENDPROC(_aesni_dec4) + +@@ -2331,6 +2345,7 @@ ENTRY(aesni_ecb_enc) + popl KEYP + popl LEN + #endif ++ pax_force_retaddr + ret + ENDPROC(aesni_ecb_enc) + +@@ -2390,6 +2405,7 @@ ENTRY(aesni_ecb_dec) + popl KEYP + popl LEN + #endif ++ pax_force_retaddr + ret + ENDPROC(aesni_ecb_dec) + +@@ -2432,6 +2448,7 @@ ENTRY(aesni_cbc_enc) + popl LEN + popl IVP + #endif ++ pax_force_retaddr + ret + ENDPROC(aesni_cbc_enc) + +@@ -2523,6 +2540,7 @@ ENTRY(aesni_cbc_dec) + popl LEN + popl IVP + #endif ++ pax_force_retaddr + ret + ENDPROC(aesni_cbc_dec) + +@@ -2550,6 +2568,7 @@ _aesni_inc_init: + mov $1, TCTR_LOW + MOVQ_R64_XMM TCTR_LOW INC + MOVQ_R64_XMM CTR TCTR_LOW ++ pax_force_retaddr + ret + ENDPROC(_aesni_inc_init) + +@@ -2579,6 +2598,7 @@ _aesni_inc: + .Linc_low: + movaps CTR, IV + PSHUFB_XMM BSWAP_MASK IV ++ pax_force_retaddr + ret + ENDPROC(_aesni_inc) + +@@ -2640,6 +2660,7 @@ ENTRY(aesni_ctr_enc) + .Lctr_enc_ret: + movups IV, (IVP) + .Lctr_enc_just_ret: ++ pax_force_retaddr + ret + ENDPROC(aesni_ctr_enc) + +@@ -2766,6 +2787,7 @@ ENTRY(aesni_xts_crypt8) + pxor INC, STATE4 + movdqu STATE4, 0x70(OUTP) + ++ pax_force_retaddr + ret + ENDPROC(aesni_xts_crypt8) + +diff --git a/arch/x86/crypto/blowfish-x86_64-asm_64.S b/arch/x86/crypto/blowfish-x86_64-asm_64.S +index 246c670..466e2d6 100644 +--- a/arch/x86/crypto/blowfish-x86_64-asm_64.S ++++ b/arch/x86/crypto/blowfish-x86_64-asm_64.S +@@ -21,6 +21,7 @@ + */ + + #include <linux/linkage.h> ++#include <asm/alternative-asm.h> + + .file "blowfish-x86_64-asm.S" + .text +@@ -149,9 +150,11 @@ ENTRY(__blowfish_enc_blk) + jnz .L__enc_xor; + + write_block(); ++ pax_force_retaddr + ret; + .L__enc_xor: + xor_block(); ++ pax_force_retaddr + ret; + ENDPROC(__blowfish_enc_blk) + +@@ -183,6 +186,7 @@ ENTRY(blowfish_dec_blk) + + movq %r11, %rbp; + ++ pax_force_retaddr + ret; + ENDPROC(blowfish_dec_blk) + +@@ -334,6 +338,7 @@ ENTRY(__blowfish_enc_blk_4way) + + popq %rbx; + popq %rbp; ++ pax_force_retaddr + ret; + + .L__enc_xor4: +@@ -341,6 +346,7 @@ ENTRY(__blowfish_enc_blk_4way) + + popq %rbx; + popq %rbp; ++ pax_force_retaddr + ret; + ENDPROC(__blowfish_enc_blk_4way) + +@@ -375,5 +381,6 @@ ENTRY(blowfish_dec_blk_4way) + popq %rbx; + popq %rbp; + ++ pax_force_retaddr + ret; + ENDPROC(blowfish_dec_blk_4way) +diff --git a/arch/x86/crypto/camellia-aesni-avx-asm_64.S b/arch/x86/crypto/camellia-aesni-avx-asm_64.S +index ce71f92..1dce7ec 100644 +--- a/arch/x86/crypto/camellia-aesni-avx-asm_64.S ++++ b/arch/x86/crypto/camellia-aesni-avx-asm_64.S +@@ -16,6 +16,7 @@ + */ + + #include <linux/linkage.h> ++#include <asm/alternative-asm.h> + + #define CAMELLIA_TABLE_BYTE_LEN 272 + +@@ -191,6 +192,7 @@ roundsm16_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_cd: + roundsm16(%xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, + %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, %xmm15, + %rcx, (%r9)); ++ pax_force_retaddr + ret; + ENDPROC(roundsm16_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_cd) + +@@ -199,6 +201,7 @@ roundsm16_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab: + roundsm16(%xmm4, %xmm5, %xmm6, %xmm7, %xmm0, %xmm1, %xmm2, %xmm3, + %xmm12, %xmm13, %xmm14, %xmm15, %xmm8, %xmm9, %xmm10, %xmm11, + %rax, (%r9)); ++ pax_force_retaddr + ret; + ENDPROC(roundsm16_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab) + +@@ -780,6 +783,7 @@ __camellia_enc_blk16: + %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, + %xmm15, (key_table)(CTX, %r8, 8), (%rax), 1 * 16(%rax)); + ++ pax_force_retaddr + ret; + + .align 8 +@@ -865,6 +869,7 @@ __camellia_dec_blk16: + %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, + %xmm15, (key_table)(CTX), (%rax), 1 * 16(%rax)); + ++ pax_force_retaddr + ret; + + .align 8 +@@ -904,6 +909,7 @@ ENTRY(camellia_ecb_enc_16way) + %xmm15, %xmm14, %xmm13, %xmm12, %xmm11, %xmm10, %xmm9, + %xmm8, %rsi); + ++ pax_force_retaddr + ret; + ENDPROC(camellia_ecb_enc_16way) + +@@ -932,6 +938,7 @@ ENTRY(camellia_ecb_dec_16way) + %xmm15, %xmm14, %xmm13, %xmm12, %xmm11, %xmm10, %xmm9, + %xmm8, %rsi); + ++ pax_force_retaddr + ret; + ENDPROC(camellia_ecb_dec_16way) + +@@ -981,6 +988,7 @@ ENTRY(camellia_cbc_dec_16way) + %xmm15, %xmm14, %xmm13, %xmm12, %xmm11, %xmm10, %xmm9, + %xmm8, %rsi); + ++ pax_force_retaddr + ret; + ENDPROC(camellia_cbc_dec_16way) + +@@ -1092,6 +1100,7 @@ ENTRY(camellia_ctr_16way) + %xmm15, %xmm14, %xmm13, %xmm12, %xmm11, %xmm10, %xmm9, + %xmm8, %rsi); + ++ pax_force_retaddr + ret; + ENDPROC(camellia_ctr_16way) + +@@ -1234,6 +1243,7 @@ camellia_xts_crypt_16way: + %xmm15, %xmm14, %xmm13, %xmm12, %xmm11, %xmm10, %xmm9, + %xmm8, %rsi); + ++ pax_force_retaddr + ret; + ENDPROC(camellia_xts_crypt_16way) + +diff --git a/arch/x86/crypto/camellia-aesni-avx2-asm_64.S b/arch/x86/crypto/camellia-aesni-avx2-asm_64.S +index 0e0b886..5a3123c 100644 +--- a/arch/x86/crypto/camellia-aesni-avx2-asm_64.S ++++ b/arch/x86/crypto/camellia-aesni-avx2-asm_64.S +@@ -11,6 +11,7 @@ + */ + + #include <linux/linkage.h> ++#include <asm/alternative-asm.h> + + #define CAMELLIA_TABLE_BYTE_LEN 272 + +@@ -230,6 +231,7 @@ roundsm32_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_cd: + roundsm32(%ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7, + %ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14, %ymm15, + %rcx, (%r9)); ++ pax_force_retaddr + ret; + ENDPROC(roundsm32_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_cd) + +@@ -238,6 +240,7 @@ roundsm32_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab: + roundsm32(%ymm4, %ymm5, %ymm6, %ymm7, %ymm0, %ymm1, %ymm2, %ymm3, + %ymm12, %ymm13, %ymm14, %ymm15, %ymm8, %ymm9, %ymm10, %ymm11, + %rax, (%r9)); ++ pax_force_retaddr + ret; + ENDPROC(roundsm32_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab) + +@@ -820,6 +823,7 @@ __camellia_enc_blk32: + %ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14, + %ymm15, (key_table)(CTX, %r8, 8), (%rax), 1 * 32(%rax)); + ++ pax_force_retaddr + ret; + + .align 8 +@@ -905,6 +909,7 @@ __camellia_dec_blk32: + %ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14, + %ymm15, (key_table)(CTX), (%rax), 1 * 32(%rax)); + ++ pax_force_retaddr + ret; + + .align 8 +@@ -948,6 +953,7 @@ ENTRY(camellia_ecb_enc_32way) + + vzeroupper; + ++ pax_force_retaddr + ret; + ENDPROC(camellia_ecb_enc_32way) + +@@ -980,6 +986,7 @@ ENTRY(camellia_ecb_dec_32way) + + vzeroupper; + ++ pax_force_retaddr + ret; + ENDPROC(camellia_ecb_dec_32way) + +@@ -1046,6 +1053,7 @@ ENTRY(camellia_cbc_dec_32way) + + vzeroupper; + ++ pax_force_retaddr + ret; + ENDPROC(camellia_cbc_dec_32way) + +@@ -1184,6 +1192,7 @@ ENTRY(camellia_ctr_32way) + + vzeroupper; + ++ pax_force_retaddr + ret; + ENDPROC(camellia_ctr_32way) + +@@ -1349,6 +1358,7 @@ camellia_xts_crypt_32way: + + vzeroupper; + ++ pax_force_retaddr + ret; + ENDPROC(camellia_xts_crypt_32way) + +diff --git a/arch/x86/crypto/camellia-x86_64-asm_64.S b/arch/x86/crypto/camellia-x86_64-asm_64.S +index 310319c..db3d7b5 100644 +--- a/arch/x86/crypto/camellia-x86_64-asm_64.S ++++ b/arch/x86/crypto/camellia-x86_64-asm_64.S +@@ -21,6 +21,7 @@ + */ + + #include <linux/linkage.h> ++#include <asm/alternative-asm.h> + + .file "camellia-x86_64-asm_64.S" + .text +@@ -228,12 +229,14 @@ ENTRY(__camellia_enc_blk) + enc_outunpack(mov, RT1); + + movq RRBP, %rbp; ++ pax_force_retaddr + ret; + + .L__enc_xor: + enc_outunpack(xor, RT1); + + movq RRBP, %rbp; ++ pax_force_retaddr + ret; + ENDPROC(__camellia_enc_blk) + +@@ -272,6 +275,7 @@ ENTRY(camellia_dec_blk) + dec_outunpack(); + + movq RRBP, %rbp; ++ pax_force_retaddr + ret; + ENDPROC(camellia_dec_blk) + +@@ -463,6 +467,7 @@ ENTRY(__camellia_enc_blk_2way) + + movq RRBP, %rbp; + popq %rbx; ++ pax_force_retaddr + ret; + + .L__enc2_xor: +@@ -470,6 +475,7 @@ ENTRY(__camellia_enc_blk_2way) + + movq RRBP, %rbp; + popq %rbx; ++ pax_force_retaddr + ret; + ENDPROC(__camellia_enc_blk_2way) + +@@ -510,5 +516,6 @@ ENTRY(camellia_dec_blk_2way) + + movq RRBP, %rbp; + movq RXOR, %rbx; ++ pax_force_retaddr + ret; + ENDPROC(camellia_dec_blk_2way) +diff --git a/arch/x86/crypto/cast5-avx-x86_64-asm_64.S b/arch/x86/crypto/cast5-avx-x86_64-asm_64.S +index c35fd5d..2d8c7db 100644 +--- a/arch/x86/crypto/cast5-avx-x86_64-asm_64.S ++++ b/arch/x86/crypto/cast5-avx-x86_64-asm_64.S +@@ -24,6 +24,7 @@ + */ + + #include <linux/linkage.h> ++#include <asm/alternative-asm.h> + + .file "cast5-avx-x86_64-asm_64.S" + +@@ -281,6 +282,7 @@ __cast5_enc_blk16: + outunpack_blocks(RR3, RL3, RTMP, RX, RKM); + outunpack_blocks(RR4, RL4, RTMP, RX, RKM); + ++ pax_force_retaddr + ret; + ENDPROC(__cast5_enc_blk16) + +@@ -352,6 +354,7 @@ __cast5_dec_blk16: + outunpack_blocks(RR3, RL3, RTMP, RX, RKM); + outunpack_blocks(RR4, RL4, RTMP, RX, RKM); + ++ pax_force_retaddr + ret; + + .L__skip_dec: +@@ -388,6 +391,7 @@ ENTRY(cast5_ecb_enc_16way) + vmovdqu RR4, (6*4*4)(%r11); + vmovdqu RL4, (7*4*4)(%r11); + ++ pax_force_retaddr + ret; + ENDPROC(cast5_ecb_enc_16way) + +@@ -420,6 +424,7 @@ ENTRY(cast5_ecb_dec_16way) + vmovdqu RR4, (6*4*4)(%r11); + vmovdqu RL4, (7*4*4)(%r11); + ++ pax_force_retaddr + ret; + ENDPROC(cast5_ecb_dec_16way) + +@@ -430,10 +435,10 @@ ENTRY(cast5_cbc_dec_16way) + * %rdx: src + */ + +- pushq %r12; ++ pushq %r14; + + movq %rsi, %r11; +- movq %rdx, %r12; ++ movq %rdx, %r14; + + vmovdqu (0*16)(%rdx), RL1; + vmovdqu (1*16)(%rdx), RR1; +@@ -447,16 +452,16 @@ ENTRY(cast5_cbc_dec_16way) + call __cast5_dec_blk16; + + /* xor with src */ +- vmovq (%r12), RX; ++ vmovq (%r14), RX; + vpshufd $0x4f, RX, RX; + vpxor RX, RR1, RR1; +- vpxor 0*16+8(%r12), RL1, RL1; +- vpxor 1*16+8(%r12), RR2, RR2; +- vpxor 2*16+8(%r12), RL2, RL2; +- vpxor 3*16+8(%r12), RR3, RR3; +- vpxor 4*16+8(%r12), RL3, RL3; +- vpxor 5*16+8(%r12), RR4, RR4; +- vpxor 6*16+8(%r12), RL4, RL4; ++ vpxor 0*16+8(%r14), RL1, RL1; ++ vpxor 1*16+8(%r14), RR2, RR2; ++ vpxor 2*16+8(%r14), RL2, RL2; ++ vpxor 3*16+8(%r14), RR3, RR3; ++ vpxor 4*16+8(%r14), RL3, RL3; ++ vpxor 5*16+8(%r14), RR4, RR4; ++ vpxor 6*16+8(%r14), RL4, RL4; + + vmovdqu RR1, (0*16)(%r11); + vmovdqu RL1, (1*16)(%r11); +@@ -467,8 +472,9 @@ ENTRY(cast5_cbc_dec_16way) + vmovdqu RR4, (6*16)(%r11); + vmovdqu RL4, (7*16)(%r11); + +- popq %r12; ++ popq %r14; + ++ pax_force_retaddr + ret; + ENDPROC(cast5_cbc_dec_16way) + +@@ -480,10 +486,10 @@ ENTRY(cast5_ctr_16way) + * %rcx: iv (big endian, 64bit) + */ + +- pushq %r12; ++ pushq %r14; + + movq %rsi, %r11; +- movq %rdx, %r12; ++ movq %rdx, %r14; + + vpcmpeqd RTMP, RTMP, RTMP; + vpsrldq $8, RTMP, RTMP; /* low: -1, high: 0 */ +@@ -523,14 +529,14 @@ ENTRY(cast5_ctr_16way) + call __cast5_enc_blk16; + + /* dst = src ^ iv */ +- vpxor (0*16)(%r12), RR1, RR1; +- vpxor (1*16)(%r12), RL1, RL1; +- vpxor (2*16)(%r12), RR2, RR2; +- vpxor (3*16)(%r12), RL2, RL2; +- vpxor (4*16)(%r12), RR3, RR3; +- vpxor (5*16)(%r12), RL3, RL3; +- vpxor (6*16)(%r12), RR4, RR4; +- vpxor (7*16)(%r12), RL4, RL4; ++ vpxor (0*16)(%r14), RR1, RR1; ++ vpxor (1*16)(%r14), RL1, RL1; ++ vpxor (2*16)(%r14), RR2, RR2; ++ vpxor (3*16)(%r14), RL2, RL2; ++ vpxor (4*16)(%r14), RR3, RR3; ++ vpxor (5*16)(%r14), RL3, RL3; ++ vpxor (6*16)(%r14), RR4, RR4; ++ vpxor (7*16)(%r14), RL4, RL4; + vmovdqu RR1, (0*16)(%r11); + vmovdqu RL1, (1*16)(%r11); + vmovdqu RR2, (2*16)(%r11); +@@ -540,7 +546,8 @@ ENTRY(cast5_ctr_16way) + vmovdqu RR4, (6*16)(%r11); + vmovdqu RL4, (7*16)(%r11); + +- popq %r12; ++ popq %r14; + ++ pax_force_retaddr + ret; + ENDPROC(cast5_ctr_16way) +diff --git a/arch/x86/crypto/cast6-avx-x86_64-asm_64.S b/arch/x86/crypto/cast6-avx-x86_64-asm_64.S +index e3531f8..e123f35 100644 +--- a/arch/x86/crypto/cast6-avx-x86_64-asm_64.S ++++ b/arch/x86/crypto/cast6-avx-x86_64-asm_64.S +@@ -24,6 +24,7 @@ + */ + + #include <linux/linkage.h> ++#include <asm/alternative-asm.h> + #include "glue_helper-asm-avx.S" + + .file "cast6-avx-x86_64-asm_64.S" +@@ -295,6 +296,7 @@ __cast6_enc_blk8: + outunpack_blocks(RA1, RB1, RC1, RD1, RTMP, RX, RKRF, RKM); + outunpack_blocks(RA2, RB2, RC2, RD2, RTMP, RX, RKRF, RKM); + ++ pax_force_retaddr + ret; + ENDPROC(__cast6_enc_blk8) + +@@ -340,6 +342,7 @@ __cast6_dec_blk8: + outunpack_blocks(RA1, RB1, RC1, RD1, RTMP, RX, RKRF, RKM); + outunpack_blocks(RA2, RB2, RC2, RD2, RTMP, RX, RKRF, RKM); + ++ pax_force_retaddr + ret; + ENDPROC(__cast6_dec_blk8) + +@@ -358,6 +361,7 @@ ENTRY(cast6_ecb_enc_8way) + + store_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2); + ++ pax_force_retaddr + ret; + ENDPROC(cast6_ecb_enc_8way) + +@@ -376,6 +380,7 @@ ENTRY(cast6_ecb_dec_8way) + + store_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2); + ++ pax_force_retaddr + ret; + ENDPROC(cast6_ecb_dec_8way) + +@@ -386,19 +391,20 @@ ENTRY(cast6_cbc_dec_8way) + * %rdx: src + */ + +- pushq %r12; ++ pushq %r14; + + movq %rsi, %r11; +- movq %rdx, %r12; ++ movq %rdx, %r14; + + load_8way(%rdx, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2); + + call __cast6_dec_blk8; + +- store_cbc_8way(%r12, %r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2); ++ store_cbc_8way(%r14, %r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2); + +- popq %r12; ++ popq %r14; + ++ pax_force_retaddr + ret; + ENDPROC(cast6_cbc_dec_8way) + +@@ -410,20 +416,21 @@ ENTRY(cast6_ctr_8way) + * %rcx: iv (little endian, 128bit) + */ + +- pushq %r12; ++ pushq %r14; + + movq %rsi, %r11; +- movq %rdx, %r12; ++ movq %rdx, %r14; + + load_ctr_8way(%rcx, .Lbswap128_mask, RA1, RB1, RC1, RD1, RA2, RB2, RC2, + RD2, RX, RKR, RKM); + + call __cast6_enc_blk8; + +- store_ctr_8way(%r12, %r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2); ++ store_ctr_8way(%r14, %r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2); + +- popq %r12; ++ popq %r14; + ++ pax_force_retaddr + ret; + ENDPROC(cast6_ctr_8way) + +@@ -446,6 +453,7 @@ ENTRY(cast6_xts_enc_8way) + /* dst <= regs xor IVs(in dst) */ + store_xts_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2); + ++ pax_force_retaddr + ret; + ENDPROC(cast6_xts_enc_8way) + +@@ -468,5 +476,6 @@ ENTRY(cast6_xts_dec_8way) + /* dst <= regs xor IVs(in dst) */ + store_xts_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2); + ++ pax_force_retaddr + ret; + ENDPROC(cast6_xts_dec_8way) +diff --git a/arch/x86/crypto/crc32c-pcl-intel-asm_64.S b/arch/x86/crypto/crc32c-pcl-intel-asm_64.S +index dbc4339..de6e120 100644 +--- a/arch/x86/crypto/crc32c-pcl-intel-asm_64.S ++++ b/arch/x86/crypto/crc32c-pcl-intel-asm_64.S +@@ -45,6 +45,7 @@ + + #include <asm/inst.h> + #include <linux/linkage.h> ++#include <asm/alternative-asm.h> + + ## ISCSI CRC 32 Implementation with crc32 and pclmulqdq Instruction + +@@ -312,6 +313,7 @@ do_return: + popq %rsi + popq %rdi + popq %rbx ++ pax_force_retaddr + ret + + ################################################################ +diff --git a/arch/x86/crypto/ghash-clmulni-intel_asm.S b/arch/x86/crypto/ghash-clmulni-intel_asm.S +index 185fad4..ff4cd36 100644 +--- a/arch/x86/crypto/ghash-clmulni-intel_asm.S ++++ b/arch/x86/crypto/ghash-clmulni-intel_asm.S +@@ -18,6 +18,7 @@ + + #include <linux/linkage.h> + #include <asm/inst.h> ++#include <asm/alternative-asm.h> + + .data + +@@ -89,6 +90,7 @@ __clmul_gf128mul_ble: + psrlq $1, T2 + pxor T2, T1 + pxor T1, DATA ++ pax_force_retaddr + ret + ENDPROC(__clmul_gf128mul_ble) + +@@ -101,6 +103,7 @@ ENTRY(clmul_ghash_mul) + call __clmul_gf128mul_ble + PSHUFB_XMM BSWAP DATA + movups DATA, (%rdi) ++ pax_force_retaddr + ret + ENDPROC(clmul_ghash_mul) + +@@ -128,5 +131,6 @@ ENTRY(clmul_ghash_update) + PSHUFB_XMM BSWAP DATA + movups DATA, (%rdi) + .Lupdate_just_ret: ++ pax_force_retaddr + ret + ENDPROC(clmul_ghash_update) +diff --git a/arch/x86/crypto/salsa20-x86_64-asm_64.S b/arch/x86/crypto/salsa20-x86_64-asm_64.S +index 9279e0b..c4b3d2c 100644 +--- a/arch/x86/crypto/salsa20-x86_64-asm_64.S ++++ b/arch/x86/crypto/salsa20-x86_64-asm_64.S +@@ -1,4 +1,5 @@ + #include <linux/linkage.h> ++#include <asm/alternative-asm.h> + + # enter salsa20_encrypt_bytes + ENTRY(salsa20_encrypt_bytes) +@@ -789,6 +790,7 @@ ENTRY(salsa20_encrypt_bytes) + add %r11,%rsp + mov %rdi,%rax + mov %rsi,%rdx ++ pax_force_retaddr + ret + # bytesatleast65: + ._bytesatleast65: +@@ -889,6 +891,7 @@ ENTRY(salsa20_keysetup) + add %r11,%rsp + mov %rdi,%rax + mov %rsi,%rdx ++ pax_force_retaddr + ret + ENDPROC(salsa20_keysetup) + +@@ -914,5 +917,6 @@ ENTRY(salsa20_ivsetup) + add %r11,%rsp + mov %rdi,%rax + mov %rsi,%rdx ++ pax_force_retaddr + ret + ENDPROC(salsa20_ivsetup) +diff --git a/arch/x86/crypto/serpent-avx-x86_64-asm_64.S b/arch/x86/crypto/serpent-avx-x86_64-asm_64.S +index 2f202f4..d9164d6 100644 +--- a/arch/x86/crypto/serpent-avx-x86_64-asm_64.S ++++ b/arch/x86/crypto/serpent-avx-x86_64-asm_64.S +@@ -24,6 +24,7 @@ + */ + + #include <linux/linkage.h> ++#include <asm/alternative-asm.h> + #include "glue_helper-asm-avx.S" + + .file "serpent-avx-x86_64-asm_64.S" +@@ -618,6 +619,7 @@ __serpent_enc_blk8_avx: + write_blocks(RA1, RB1, RC1, RD1, RK0, RK1, RK2); + write_blocks(RA2, RB2, RC2, RD2, RK0, RK1, RK2); + ++ pax_force_retaddr + ret; + ENDPROC(__serpent_enc_blk8_avx) + +@@ -672,6 +674,7 @@ __serpent_dec_blk8_avx: + write_blocks(RC1, RD1, RB1, RE1, RK0, RK1, RK2); + write_blocks(RC2, RD2, RB2, RE2, RK0, RK1, RK2); + ++ pax_force_retaddr + ret; + ENDPROC(__serpent_dec_blk8_avx) + +@@ -688,6 +691,7 @@ ENTRY(serpent_ecb_enc_8way_avx) + + store_8way(%rsi, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2); + ++ pax_force_retaddr + ret; + ENDPROC(serpent_ecb_enc_8way_avx) + +@@ -704,6 +708,7 @@ ENTRY(serpent_ecb_dec_8way_avx) + + store_8way(%rsi, RC1, RD1, RB1, RE1, RC2, RD2, RB2, RE2); + ++ pax_force_retaddr + ret; + ENDPROC(serpent_ecb_dec_8way_avx) + +@@ -720,6 +725,7 @@ ENTRY(serpent_cbc_dec_8way_avx) + + store_cbc_8way(%rdx, %rsi, RC1, RD1, RB1, RE1, RC2, RD2, RB2, RE2); + ++ pax_force_retaddr + ret; + ENDPROC(serpent_cbc_dec_8way_avx) + +@@ -738,6 +744,7 @@ ENTRY(serpent_ctr_8way_avx) + + store_ctr_8way(%rdx, %rsi, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2); + ++ pax_force_retaddr + ret; + ENDPROC(serpent_ctr_8way_avx) + +@@ -758,6 +765,7 @@ ENTRY(serpent_xts_enc_8way_avx) + /* dst <= regs xor IVs(in dst) */ + store_xts_8way(%rsi, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2); + ++ pax_force_retaddr + ret; + ENDPROC(serpent_xts_enc_8way_avx) + +@@ -778,5 +786,6 @@ ENTRY(serpent_xts_dec_8way_avx) + /* dst <= regs xor IVs(in dst) */ + store_xts_8way(%rsi, RC1, RD1, RB1, RE1, RC2, RD2, RB2, RE2); + ++ pax_force_retaddr + ret; + ENDPROC(serpent_xts_dec_8way_avx) +diff --git a/arch/x86/crypto/serpent-avx2-asm_64.S b/arch/x86/crypto/serpent-avx2-asm_64.S +index b222085..abd483c 100644 +--- a/arch/x86/crypto/serpent-avx2-asm_64.S ++++ b/arch/x86/crypto/serpent-avx2-asm_64.S +@@ -15,6 +15,7 @@ + */ + + #include <linux/linkage.h> ++#include <asm/alternative-asm.h> + #include "glue_helper-asm-avx2.S" + + .file "serpent-avx2-asm_64.S" +@@ -610,6 +611,7 @@ __serpent_enc_blk16: + write_blocks(RA1, RB1, RC1, RD1, RK0, RK1, RK2); + write_blocks(RA2, RB2, RC2, RD2, RK0, RK1, RK2); + ++ pax_force_retaddr + ret; + ENDPROC(__serpent_enc_blk16) + +@@ -664,6 +666,7 @@ __serpent_dec_blk16: + write_blocks(RC1, RD1, RB1, RE1, RK0, RK1, RK2); + write_blocks(RC2, RD2, RB2, RE2, RK0, RK1, RK2); + ++ pax_force_retaddr + ret; + ENDPROC(__serpent_dec_blk16) + +@@ -684,6 +687,7 @@ ENTRY(serpent_ecb_enc_16way) + + vzeroupper; + ++ pax_force_retaddr + ret; + ENDPROC(serpent_ecb_enc_16way) + +@@ -704,6 +708,7 @@ ENTRY(serpent_ecb_dec_16way) + + vzeroupper; + ++ pax_force_retaddr + ret; + ENDPROC(serpent_ecb_dec_16way) + +@@ -725,6 +730,7 @@ ENTRY(serpent_cbc_dec_16way) + + vzeroupper; + ++ pax_force_retaddr + ret; + ENDPROC(serpent_cbc_dec_16way) + +@@ -748,6 +754,7 @@ ENTRY(serpent_ctr_16way) + + vzeroupper; + ++ pax_force_retaddr + ret; + ENDPROC(serpent_ctr_16way) + +@@ -772,6 +779,7 @@ ENTRY(serpent_xts_enc_16way) + + vzeroupper; + ++ pax_force_retaddr + ret; + ENDPROC(serpent_xts_enc_16way) + +@@ -796,5 +804,6 @@ ENTRY(serpent_xts_dec_16way) + + vzeroupper; + ++ pax_force_retaddr + ret; + ENDPROC(serpent_xts_dec_16way) +diff --git a/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S b/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S +index acc066c..1559cc4 100644 +--- a/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S ++++ b/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S +@@ -25,6 +25,7 @@ + */ + + #include <linux/linkage.h> ++#include <asm/alternative-asm.h> + + .file "serpent-sse2-x86_64-asm_64.S" + .text +@@ -690,12 +691,14 @@ ENTRY(__serpent_enc_blk_8way) + write_blocks(%rsi, RA1, RB1, RC1, RD1, RK0, RK1, RK2); + write_blocks(%rax, RA2, RB2, RC2, RD2, RK0, RK1, RK2); + ++ pax_force_retaddr + ret; + + .L__enc_xor8: + xor_blocks(%rsi, RA1, RB1, RC1, RD1, RK0, RK1, RK2); + xor_blocks(%rax, RA2, RB2, RC2, RD2, RK0, RK1, RK2); + ++ pax_force_retaddr + ret; + ENDPROC(__serpent_enc_blk_8way) + +@@ -750,5 +753,6 @@ ENTRY(serpent_dec_blk_8way) + write_blocks(%rsi, RC1, RD1, RB1, RE1, RK0, RK1, RK2); + write_blocks(%rax, RC2, RD2, RB2, RE2, RK0, RK1, RK2); + ++ pax_force_retaddr + ret; + ENDPROC(serpent_dec_blk_8way) +diff --git a/arch/x86/crypto/sha1_ssse3_asm.S b/arch/x86/crypto/sha1_ssse3_asm.S +index a410950..9dfe7ad 100644 +--- a/arch/x86/crypto/sha1_ssse3_asm.S ++++ b/arch/x86/crypto/sha1_ssse3_asm.S +@@ -29,6 +29,7 @@ + */ + + #include <linux/linkage.h> ++#include <asm/alternative-asm.h> + + #define CTX %rdi // arg1 + #define BUF %rsi // arg2 +@@ -75,9 +76,9 @@ + + push %rbx + push %rbp +- push %r12 ++ push %r14 + +- mov %rsp, %r12 ++ mov %rsp, %r14 + sub $64, %rsp # allocate workspace + and $~15, %rsp # align stack + +@@ -99,11 +100,12 @@ + xor %rax, %rax + rep stosq + +- mov %r12, %rsp # deallocate workspace ++ mov %r14, %rsp # deallocate workspace + +- pop %r12 ++ pop %r14 + pop %rbp + pop %rbx ++ pax_force_retaddr + ret + + ENDPROC(\name) +diff --git a/arch/x86/crypto/sha256-avx-asm.S b/arch/x86/crypto/sha256-avx-asm.S +index 642f156..51a513c 100644 +--- a/arch/x86/crypto/sha256-avx-asm.S ++++ b/arch/x86/crypto/sha256-avx-asm.S +@@ -49,6 +49,7 @@ + + #ifdef CONFIG_AS_AVX + #include <linux/linkage.h> ++#include <asm/alternative-asm.h> + + ## assume buffers not aligned + #define VMOVDQ vmovdqu +@@ -460,6 +461,7 @@ done_hash: + popq %r13 + popq %rbp + popq %rbx ++ pax_force_retaddr + ret + ENDPROC(sha256_transform_avx) + +diff --git a/arch/x86/crypto/sha256-avx2-asm.S b/arch/x86/crypto/sha256-avx2-asm.S +index 9e86944..3795e6a 100644 +--- a/arch/x86/crypto/sha256-avx2-asm.S ++++ b/arch/x86/crypto/sha256-avx2-asm.S +@@ -50,6 +50,7 @@ + + #ifdef CONFIG_AS_AVX2 + #include <linux/linkage.h> ++#include <asm/alternative-asm.h> + + ## assume buffers not aligned + #define VMOVDQ vmovdqu +@@ -720,6 +721,7 @@ done_hash: + popq %r12 + popq %rbp + popq %rbx ++ pax_force_retaddr + ret + ENDPROC(sha256_transform_rorx) + +diff --git a/arch/x86/crypto/sha256-ssse3-asm.S b/arch/x86/crypto/sha256-ssse3-asm.S +index f833b74..8c62a9e 100644 +--- a/arch/x86/crypto/sha256-ssse3-asm.S ++++ b/arch/x86/crypto/sha256-ssse3-asm.S +@@ -47,6 +47,7 @@ + ######################################################################## + + #include <linux/linkage.h> ++#include <asm/alternative-asm.h> + + ## assume buffers not aligned + #define MOVDQ movdqu +@@ -471,6 +472,7 @@ done_hash: + popq %rbp + popq %rbx + ++ pax_force_retaddr + ret + ENDPROC(sha256_transform_ssse3) + +diff --git a/arch/x86/crypto/sha512-avx-asm.S b/arch/x86/crypto/sha512-avx-asm.S +index 974dde9..a823ff9 100644 +--- a/arch/x86/crypto/sha512-avx-asm.S ++++ b/arch/x86/crypto/sha512-avx-asm.S +@@ -49,6 +49,7 @@ + + #ifdef CONFIG_AS_AVX + #include <linux/linkage.h> ++#include <asm/alternative-asm.h> + + .text + +@@ -364,6 +365,7 @@ updateblock: + mov frame_RSPSAVE(%rsp), %rsp + + nowork: ++ pax_force_retaddr + ret + ENDPROC(sha512_transform_avx) + +diff --git a/arch/x86/crypto/sha512-avx2-asm.S b/arch/x86/crypto/sha512-avx2-asm.S +index 568b961..ed20c37 100644 +--- a/arch/x86/crypto/sha512-avx2-asm.S ++++ b/arch/x86/crypto/sha512-avx2-asm.S +@@ -51,6 +51,7 @@ + + #ifdef CONFIG_AS_AVX2 + #include <linux/linkage.h> ++#include <asm/alternative-asm.h> + + .text + +@@ -678,6 +679,7 @@ done_hash: + + # Restore Stack Pointer + mov frame_RSPSAVE(%rsp), %rsp ++ pax_force_retaddr + ret + ENDPROC(sha512_transform_rorx) + +diff --git a/arch/x86/crypto/sha512-ssse3-asm.S b/arch/x86/crypto/sha512-ssse3-asm.S +index fb56855..6edd768 100644 +--- a/arch/x86/crypto/sha512-ssse3-asm.S ++++ b/arch/x86/crypto/sha512-ssse3-asm.S +@@ -48,6 +48,7 @@ + ######################################################################## + + #include <linux/linkage.h> ++#include <asm/alternative-asm.h> + + .text + +@@ -363,6 +364,7 @@ updateblock: + mov frame_RSPSAVE(%rsp), %rsp + + nowork: ++ pax_force_retaddr + ret + ENDPROC(sha512_transform_ssse3) + +diff --git a/arch/x86/crypto/twofish-avx-x86_64-asm_64.S b/arch/x86/crypto/twofish-avx-x86_64-asm_64.S +index 0505813..b067311 100644 +--- a/arch/x86/crypto/twofish-avx-x86_64-asm_64.S ++++ b/arch/x86/crypto/twofish-avx-x86_64-asm_64.S +@@ -24,6 +24,7 @@ + */ + + #include <linux/linkage.h> ++#include <asm/alternative-asm.h> + #include "glue_helper-asm-avx.S" + + .file "twofish-avx-x86_64-asm_64.S" +@@ -284,6 +285,7 @@ __twofish_enc_blk8: + outunpack_blocks(RC1, RD1, RA1, RB1, RK1, RX0, RY0, RK2); + outunpack_blocks(RC2, RD2, RA2, RB2, RK1, RX0, RY0, RK2); + ++ pax_force_retaddr + ret; + ENDPROC(__twofish_enc_blk8) + +@@ -324,6 +326,7 @@ __twofish_dec_blk8: + outunpack_blocks(RA1, RB1, RC1, RD1, RK1, RX0, RY0, RK2); + outunpack_blocks(RA2, RB2, RC2, RD2, RK1, RX0, RY0, RK2); + ++ pax_force_retaddr + ret; + ENDPROC(__twofish_dec_blk8) + +@@ -342,6 +345,7 @@ ENTRY(twofish_ecb_enc_8way) + + store_8way(%r11, RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2); + ++ pax_force_retaddr + ret; + ENDPROC(twofish_ecb_enc_8way) + +@@ -360,6 +364,7 @@ ENTRY(twofish_ecb_dec_8way) + + store_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2); + ++ pax_force_retaddr + ret; + ENDPROC(twofish_ecb_dec_8way) + +@@ -370,19 +375,20 @@ ENTRY(twofish_cbc_dec_8way) + * %rdx: src + */ + +- pushq %r12; ++ pushq %r14; + + movq %rsi, %r11; +- movq %rdx, %r12; ++ movq %rdx, %r14; + + load_8way(%rdx, RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2); + + call __twofish_dec_blk8; + +- store_cbc_8way(%r12, %r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2); ++ store_cbc_8way(%r14, %r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2); + +- popq %r12; ++ popq %r14; + ++ pax_force_retaddr + ret; + ENDPROC(twofish_cbc_dec_8way) + +@@ -394,20 +400,21 @@ ENTRY(twofish_ctr_8way) + * %rcx: iv (little endian, 128bit) + */ + +- pushq %r12; ++ pushq %r14; + + movq %rsi, %r11; +- movq %rdx, %r12; ++ movq %rdx, %r14; + + load_ctr_8way(%rcx, .Lbswap128_mask, RA1, RB1, RC1, RD1, RA2, RB2, RC2, + RD2, RX0, RX1, RY0); + + call __twofish_enc_blk8; + +- store_ctr_8way(%r12, %r11, RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2); ++ store_ctr_8way(%r14, %r11, RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2); + +- popq %r12; ++ popq %r14; + ++ pax_force_retaddr + ret; + ENDPROC(twofish_ctr_8way) + +@@ -430,6 +437,7 @@ ENTRY(twofish_xts_enc_8way) + /* dst <= regs xor IVs(in dst) */ + store_xts_8way(%r11, RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2); + ++ pax_force_retaddr + ret; + ENDPROC(twofish_xts_enc_8way) + +@@ -452,5 +460,6 @@ ENTRY(twofish_xts_dec_8way) + /* dst <= regs xor IVs(in dst) */ + store_xts_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2); + ++ pax_force_retaddr + ret; + ENDPROC(twofish_xts_dec_8way) +diff --git a/arch/x86/crypto/twofish-x86_64-asm_64-3way.S b/arch/x86/crypto/twofish-x86_64-asm_64-3way.S +index 1c3b7ce..02f578d 100644 +--- a/arch/x86/crypto/twofish-x86_64-asm_64-3way.S ++++ b/arch/x86/crypto/twofish-x86_64-asm_64-3way.S +@@ -21,6 +21,7 @@ + */ + + #include <linux/linkage.h> ++#include <asm/alternative-asm.h> + + .file "twofish-x86_64-asm-3way.S" + .text +@@ -258,6 +259,7 @@ ENTRY(__twofish_enc_blk_3way) + popq %r13; + popq %r14; + popq %r15; ++ pax_force_retaddr + ret; + + .L__enc_xor3: +@@ -269,6 +271,7 @@ ENTRY(__twofish_enc_blk_3way) + popq %r13; + popq %r14; + popq %r15; ++ pax_force_retaddr + ret; + ENDPROC(__twofish_enc_blk_3way) + +@@ -308,5 +311,6 @@ ENTRY(twofish_dec_blk_3way) + popq %r13; + popq %r14; + popq %r15; ++ pax_force_retaddr + ret; + ENDPROC(twofish_dec_blk_3way) +diff --git a/arch/x86/crypto/twofish-x86_64-asm_64.S b/arch/x86/crypto/twofish-x86_64-asm_64.S +index a039d21..524b8b2 100644 +--- a/arch/x86/crypto/twofish-x86_64-asm_64.S ++++ b/arch/x86/crypto/twofish-x86_64-asm_64.S +@@ -22,6 +22,7 @@ + + #include <linux/linkage.h> + #include <asm/asm-offsets.h> ++#include <asm/alternative-asm.h> + + #define a_offset 0 + #define b_offset 4 +@@ -265,6 +266,7 @@ ENTRY(twofish_enc_blk) + + popq R1 + movq $1,%rax ++ pax_force_retaddr + ret + ENDPROC(twofish_enc_blk) + +@@ -317,5 +319,6 @@ ENTRY(twofish_dec_blk) + + popq R1 + movq $1,%rax ++ pax_force_retaddr + ret + ENDPROC(twofish_dec_blk) +diff --git a/arch/x86/ia32/ia32_aout.c b/arch/x86/ia32/ia32_aout.c +index d21ff89..6da8e6e 100644 +--- a/arch/x86/ia32/ia32_aout.c ++++ b/arch/x86/ia32/ia32_aout.c +@@ -153,6 +153,8 @@ static int aout_core_dump(struct coredump_params *cprm) + unsigned long dump_start, dump_size; + struct user32 dump; + ++ memset(&dump, 0, sizeof(dump)); ++ + fs = get_fs(); + set_fs(KERNEL_DS); + has_dumped = 1; +diff --git a/arch/x86/ia32/ia32_signal.c b/arch/x86/ia32/ia32_signal.c +index 2206757..85cbcfa 100644 +--- a/arch/x86/ia32/ia32_signal.c ++++ b/arch/x86/ia32/ia32_signal.c +@@ -218,7 +218,7 @@ asmlinkage long sys32_sigreturn(void) + if (__get_user(set.sig[0], &frame->sc.oldmask) + || (_COMPAT_NSIG_WORDS > 1 + && __copy_from_user((((char *) &set.sig) + 4), +- &frame->extramask, ++ frame->extramask, + sizeof(frame->extramask)))) + goto badframe; + +@@ -338,7 +338,7 @@ static void __user *get_sigframe(struct ksignal *ksig, struct pt_regs *regs, + sp -= frame_size; + /* Align the stack pointer according to the i386 ABI, + * i.e. so that on function entry ((sp + 4) & 15) == 0. */ +- sp = ((sp + 4) & -16ul) - 4; ++ sp = ((sp - 12) & -16ul) - 4; + return (void __user *) sp; + } + +@@ -386,7 +386,7 @@ int ia32_setup_frame(int sig, struct ksignal *ksig, + restorer = VDSO32_SYMBOL(current->mm->context.vdso, + sigreturn); + else +- restorer = &frame->retcode; ++ restorer = frame->retcode; + } + + put_user_try { +@@ -396,7 +396,7 @@ int ia32_setup_frame(int sig, struct ksignal *ksig, + * These are actually not used anymore, but left because some + * gdb versions depend on them as a marker. + */ +- put_user_ex(*((u64 *)&code), (u64 __user *)frame->retcode); ++ put_user_ex(*((const u64 *)&code), (u64 __user *)frame->retcode); + } put_user_catch(err); + + if (err) +@@ -438,7 +438,7 @@ int ia32_setup_rt_frame(int sig, struct ksignal *ksig, + 0xb8, + __NR_ia32_rt_sigreturn, + 0x80cd, +- 0, ++ 0 + }; + + frame = get_sigframe(ksig, regs, sizeof(*frame), &fpstate); +@@ -461,16 +461,18 @@ int ia32_setup_rt_frame(int sig, struct ksignal *ksig, + + if (ksig->ka.sa.sa_flags & SA_RESTORER) + restorer = ksig->ka.sa.sa_restorer; ++ else if (current->mm->context.vdso) ++ /* Return stub is in 32bit vsyscall page */ ++ restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn); + else +- restorer = VDSO32_SYMBOL(current->mm->context.vdso, +- rt_sigreturn); ++ restorer = frame->retcode; + put_user_ex(ptr_to_compat(restorer), &frame->pretcode); + + /* + * Not actually used anymore, but left because some gdb + * versions need it. + */ +- put_user_ex(*((u64 *)&code), (u64 __user *)frame->retcode); ++ put_user_ex(*((const u64 *)&code), (u64 __user *)frame->retcode); + } put_user_catch(err); + + err |= copy_siginfo_to_user32(&frame->info, &ksig->info); +diff --git a/arch/x86/ia32/ia32entry.S b/arch/x86/ia32/ia32entry.S +index 4299eb0..c0687a7 100644 +--- a/arch/x86/ia32/ia32entry.S ++++ b/arch/x86/ia32/ia32entry.S +@@ -15,8 +15,10 @@ + #include <asm/irqflags.h> + #include <asm/asm.h> + #include <asm/smap.h> ++#include <asm/pgtable.h> + #include <linux/linkage.h> + #include <linux/err.h> ++#include <asm/alternative-asm.h> + + /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */ + #include <linux/elf-em.h> +@@ -62,12 +64,12 @@ + */ + .macro LOAD_ARGS32 offset, _r9=0 + .if \_r9 +- movl \offset+16(%rsp),%r9d ++ movl \offset+R9(%rsp),%r9d + .endif +- movl \offset+40(%rsp),%ecx +- movl \offset+48(%rsp),%edx +- movl \offset+56(%rsp),%esi +- movl \offset+64(%rsp),%edi ++ movl \offset+RCX(%rsp),%ecx ++ movl \offset+RDX(%rsp),%edx ++ movl \offset+RSI(%rsp),%esi ++ movl \offset+RDI(%rsp),%edi + movl %eax,%eax /* zero extension */ + .endm + +@@ -96,6 +98,32 @@ ENTRY(native_irq_enable_sysexit) + ENDPROC(native_irq_enable_sysexit) + #endif + ++ .macro pax_enter_kernel_user ++ pax_set_fptr_mask ++#ifdef CONFIG_PAX_MEMORY_UDEREF ++ call pax_enter_kernel_user ++#endif ++ .endm ++ ++ .macro pax_exit_kernel_user ++#ifdef CONFIG_PAX_MEMORY_UDEREF ++ call pax_exit_kernel_user ++#endif ++#ifdef CONFIG_PAX_RANDKSTACK ++ pushq %rax ++ pushq %r11 ++ call pax_randomize_kstack ++ popq %r11 ++ popq %rax ++#endif ++ .endm ++ ++ .macro pax_erase_kstack ++#ifdef CONFIG_PAX_MEMORY_STACKLEAK ++ call pax_erase_kstack ++#endif ++ .endm ++ + /* + * 32bit SYSENTER instruction entry. + * +@@ -122,12 +150,6 @@ ENTRY(ia32_sysenter_target) + CFI_REGISTER rsp,rbp + SWAPGS_UNSAFE_STACK + movq PER_CPU_VAR(kernel_stack), %rsp +- addq $(KERNEL_STACK_OFFSET),%rsp +- /* +- * No need to follow this irqs on/off section: the syscall +- * disabled irqs, here we enable it straight after entry: +- */ +- ENABLE_INTERRUPTS(CLBR_NONE) + movl %ebp,%ebp /* zero extension */ + pushq_cfi $__USER32_DS + /*CFI_REL_OFFSET ss,0*/ +@@ -135,24 +157,49 @@ ENTRY(ia32_sysenter_target) + CFI_REL_OFFSET rsp,0 + pushfq_cfi + /*CFI_REL_OFFSET rflags,0*/ +- movl TI_sysenter_return+THREAD_INFO(%rsp,3*8-KERNEL_STACK_OFFSET),%r10d +- CFI_REGISTER rip,r10 ++ orl $X86_EFLAGS_IF,(%rsp) ++ GET_THREAD_INFO(%r11) ++ movl TI_sysenter_return(%r11), %r11d ++ CFI_REGISTER rip,r11 + pushq_cfi $__USER32_CS + /*CFI_REL_OFFSET cs,0*/ + movl %eax, %eax +- pushq_cfi %r10 ++ pushq_cfi %r11 + CFI_REL_OFFSET rip,0 + pushq_cfi %rax + cld + SAVE_ARGS 0,1,0 ++ pax_enter_kernel_user ++ ++#ifdef CONFIG_PAX_RANDKSTACK ++ pax_erase_kstack ++#endif ++ ++ /* ++ * No need to follow this irqs on/off section: the syscall ++ * disabled irqs, here we enable it straight after entry: ++ */ ++ ENABLE_INTERRUPTS(CLBR_NONE) + /* no need to do an access_ok check here because rbp has been + 32bit zero extended */ ++ ++#ifdef CONFIG_PAX_MEMORY_UDEREF ++ addq pax_user_shadow_base,%rbp ++ ASM_PAX_OPEN_USERLAND ++#endif ++ + ASM_STAC + 1: movl (%rbp),%ebp + _ASM_EXTABLE(1b,ia32_badarg) + ASM_CLAC +- orl $TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET) +- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET) ++ ++#ifdef CONFIG_PAX_MEMORY_UDEREF ++ ASM_PAX_CLOSE_USERLAND ++#endif ++ ++ GET_THREAD_INFO(%r11) ++ orl $TS_COMPAT,TI_status(%r11) ++ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11) + CFI_REMEMBER_STATE + jnz sysenter_tracesys + cmpq $(IA32_NR_syscalls-1),%rax +@@ -162,15 +209,18 @@ sysenter_do_call: + sysenter_dispatch: + call *ia32_sys_call_table(,%rax,8) + movq %rax,RAX-ARGOFFSET(%rsp) ++ GET_THREAD_INFO(%r11) + DISABLE_INTERRUPTS(CLBR_NONE) + TRACE_IRQS_OFF +- testl $_TIF_ALLWORK_MASK,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET) ++ testl $_TIF_ALLWORK_MASK,TI_flags(%r11) + jnz sysexit_audit + sysexit_from_sys_call: +- andl $~TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET) ++ pax_exit_kernel_user ++ pax_erase_kstack ++ andl $~TS_COMPAT,TI_status(%r11) + /* clear IF, that popfq doesn't enable interrupts early */ +- andl $~0x200,EFLAGS-R11(%rsp) +- movl RIP-R11(%rsp),%edx /* User %eip */ ++ andl $~X86_EFLAGS_IF,EFLAGS(%rsp) ++ movl RIP(%rsp),%edx /* User %eip */ + CFI_REGISTER rip,rdx + RESTORE_ARGS 0,24,0,0,0,0 + xorq %r8,%r8 +@@ -193,6 +243,9 @@ sysexit_from_sys_call: + movl %eax,%esi /* 2nd arg: syscall number */ + movl $AUDIT_ARCH_I386,%edi /* 1st arg: audit arch */ + call __audit_syscall_entry ++ ++ pax_erase_kstack ++ + movl RAX-ARGOFFSET(%rsp),%eax /* reload syscall number */ + cmpq $(IA32_NR_syscalls-1),%rax + ja ia32_badsys +@@ -204,7 +257,7 @@ sysexit_from_sys_call: + .endm + + .macro auditsys_exit exit +- testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET) ++ testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11) + jnz ia32_ret_from_sys_call + TRACE_IRQS_ON + ENABLE_INTERRUPTS(CLBR_NONE) +@@ -215,11 +268,12 @@ sysexit_from_sys_call: + 1: setbe %al /* 1 if error, 0 if not */ + movzbl %al,%edi /* zero-extend that into %edi */ + call __audit_syscall_exit ++ GET_THREAD_INFO(%r11) + movq RAX-ARGOFFSET(%rsp),%rax /* reload syscall return value */ + movl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),%edi + DISABLE_INTERRUPTS(CLBR_NONE) + TRACE_IRQS_OFF +- testl %edi,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET) ++ testl %edi,TI_flags(%r11) + jz \exit + CLEAR_RREGS -ARGOFFSET + jmp int_with_check +@@ -237,7 +291,7 @@ sysexit_audit: + + sysenter_tracesys: + #ifdef CONFIG_AUDITSYSCALL +- testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET) ++ testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11) + jz sysenter_auditsys + #endif + SAVE_REST +@@ -249,6 +303,9 @@ sysenter_tracesys: + RESTORE_REST + cmpq $(IA32_NR_syscalls-1),%rax + ja int_ret_from_sys_call /* sysenter_tracesys has set RAX(%rsp) */ ++ ++ pax_erase_kstack ++ + jmp sysenter_do_call + CFI_ENDPROC + ENDPROC(ia32_sysenter_target) +@@ -276,19 +333,25 @@ ENDPROC(ia32_sysenter_target) + ENTRY(ia32_cstar_target) + CFI_STARTPROC32 simple + CFI_SIGNAL_FRAME +- CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET ++ CFI_DEF_CFA rsp,0 + CFI_REGISTER rip,rcx + /*CFI_REGISTER rflags,r11*/ + SWAPGS_UNSAFE_STACK + movl %esp,%r8d + CFI_REGISTER rsp,r8 + movq PER_CPU_VAR(kernel_stack),%rsp ++ SAVE_ARGS 8*6,0,0 ++ pax_enter_kernel_user ++ ++#ifdef CONFIG_PAX_RANDKSTACK ++ pax_erase_kstack ++#endif ++ + /* + * No need to follow this irqs on/off section: the syscall + * disabled irqs and here we enable it straight after entry: + */ + ENABLE_INTERRUPTS(CLBR_NONE) +- SAVE_ARGS 8,0,0 + movl %eax,%eax /* zero extension */ + movq %rax,ORIG_RAX-ARGOFFSET(%rsp) + movq %rcx,RIP-ARGOFFSET(%rsp) +@@ -304,12 +367,25 @@ ENTRY(ia32_cstar_target) + /* no need to do an access_ok check here because r8 has been + 32bit zero extended */ + /* hardware stack frame is complete now */ ++ ++#ifdef CONFIG_PAX_MEMORY_UDEREF ++ ASM_PAX_OPEN_USERLAND ++ movq pax_user_shadow_base,%r8 ++ addq RSP-ARGOFFSET(%rsp),%r8 ++#endif ++ + ASM_STAC + 1: movl (%r8),%r9d + _ASM_EXTABLE(1b,ia32_badarg) + ASM_CLAC +- orl $TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET) +- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET) ++ ++#ifdef CONFIG_PAX_MEMORY_UDEREF ++ ASM_PAX_CLOSE_USERLAND ++#endif ++ ++ GET_THREAD_INFO(%r11) ++ orl $TS_COMPAT,TI_status(%r11) ++ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11) + CFI_REMEMBER_STATE + jnz cstar_tracesys + cmpq $IA32_NR_syscalls-1,%rax +@@ -319,13 +395,16 @@ cstar_do_call: + cstar_dispatch: + call *ia32_sys_call_table(,%rax,8) + movq %rax,RAX-ARGOFFSET(%rsp) ++ GET_THREAD_INFO(%r11) + DISABLE_INTERRUPTS(CLBR_NONE) + TRACE_IRQS_OFF +- testl $_TIF_ALLWORK_MASK,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET) ++ testl $_TIF_ALLWORK_MASK,TI_flags(%r11) + jnz sysretl_audit + sysretl_from_sys_call: +- andl $~TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET) +- RESTORE_ARGS 0,-ARG_SKIP,0,0,0 ++ pax_exit_kernel_user ++ pax_erase_kstack ++ andl $~TS_COMPAT,TI_status(%r11) ++ RESTORE_ARGS 0,-ORIG_RAX,0,0,0 + movl RIP-ARGOFFSET(%rsp),%ecx + CFI_REGISTER rip,rcx + movl EFLAGS-ARGOFFSET(%rsp),%r11d +@@ -352,7 +431,7 @@ sysretl_audit: + + cstar_tracesys: + #ifdef CONFIG_AUDITSYSCALL +- testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET) ++ testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11) + jz cstar_auditsys + #endif + xchgl %r9d,%ebp +@@ -366,11 +445,19 @@ cstar_tracesys: + xchgl %ebp,%r9d + cmpq $(IA32_NR_syscalls-1),%rax + ja int_ret_from_sys_call /* cstar_tracesys has set RAX(%rsp) */ ++ ++ pax_erase_kstack ++ + jmp cstar_do_call + END(ia32_cstar_target) + + ia32_badarg: + ASM_CLAC ++ ++#ifdef CONFIG_PAX_MEMORY_UDEREF ++ ASM_PAX_CLOSE_USERLAND ++#endif ++ + movq $-EFAULT,%rax + jmp ia32_sysret + CFI_ENDPROC +@@ -407,19 +494,26 @@ ENTRY(ia32_syscall) + CFI_REL_OFFSET rip,RIP-RIP + PARAVIRT_ADJUST_EXCEPTION_FRAME + SWAPGS +- /* +- * No need to follow this irqs on/off section: the syscall +- * disabled irqs and here we enable it straight after entry: +- */ +- ENABLE_INTERRUPTS(CLBR_NONE) + movl %eax,%eax + pushq_cfi %rax + cld + /* note the registers are not zero extended to the sf. + this could be a problem. */ + SAVE_ARGS 0,1,0 +- orl $TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET) +- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET) ++ pax_enter_kernel_user ++ ++#ifdef CONFIG_PAX_RANDKSTACK ++ pax_erase_kstack ++#endif ++ ++ /* ++ * No need to follow this irqs on/off section: the syscall ++ * disabled irqs and here we enable it straight after entry: ++ */ ++ ENABLE_INTERRUPTS(CLBR_NONE) ++ GET_THREAD_INFO(%r11) ++ orl $TS_COMPAT,TI_status(%r11) ++ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11) + jnz ia32_tracesys + cmpq $(IA32_NR_syscalls-1),%rax + ja ia32_badsys +@@ -442,6 +536,9 @@ ia32_tracesys: + RESTORE_REST + cmpq $(IA32_NR_syscalls-1),%rax + ja int_ret_from_sys_call /* ia32_tracesys has set RAX(%rsp) */ ++ ++ pax_erase_kstack ++ + jmp ia32_do_call + END(ia32_syscall) + +diff --git a/arch/x86/ia32/sys_ia32.c b/arch/x86/ia32/sys_ia32.c +index 8e0ceec..af13504 100644 +--- a/arch/x86/ia32/sys_ia32.c ++++ b/arch/x86/ia32/sys_ia32.c +@@ -69,8 +69,8 @@ asmlinkage long sys32_ftruncate64(unsigned int fd, unsigned long offset_low, + */ + static int cp_stat64(struct stat64 __user *ubuf, struct kstat *stat) + { +- typeof(ubuf->st_uid) uid = 0; +- typeof(ubuf->st_gid) gid = 0; ++ typeof(((struct stat64 *)0)->st_uid) uid = 0; ++ typeof(((struct stat64 *)0)->st_gid) gid = 0; + SET_UID(uid, from_kuid_munged(current_user_ns(), stat->uid)); + SET_GID(gid, from_kgid_munged(current_user_ns(), stat->gid)); + if (!access_ok(VERIFY_WRITE, ubuf, sizeof(struct stat64)) || +diff --git a/arch/x86/include/asm/alternative-asm.h b/arch/x86/include/asm/alternative-asm.h +index 372231c..51b537d 100644 +--- a/arch/x86/include/asm/alternative-asm.h ++++ b/arch/x86/include/asm/alternative-asm.h +@@ -18,6 +18,45 @@ + .endm + #endif + ++#ifdef KERNEXEC_PLUGIN ++ .macro pax_force_retaddr_bts rip=0 ++ btsq $63,\rip(%rsp) ++ .endm ++#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_BTS ++ .macro pax_force_retaddr rip=0, reload=0 ++ btsq $63,\rip(%rsp) ++ .endm ++ .macro pax_force_fptr ptr ++ btsq $63,\ptr ++ .endm ++ .macro pax_set_fptr_mask ++ .endm ++#endif ++#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR ++ .macro pax_force_retaddr rip=0, reload=0 ++ .if \reload ++ pax_set_fptr_mask ++ .endif ++ orq %r12,\rip(%rsp) ++ .endm ++ .macro pax_force_fptr ptr ++ orq %r12,\ptr ++ .endm ++ .macro pax_set_fptr_mask ++ movabs $0x8000000000000000,%r12 ++ .endm ++#endif ++#else ++ .macro pax_force_retaddr rip=0, reload=0 ++ .endm ++ .macro pax_force_fptr ptr ++ .endm ++ .macro pax_force_retaddr_bts rip=0 ++ .endm ++ .macro pax_set_fptr_mask ++ .endm ++#endif ++ + .macro altinstruction_entry orig alt feature orig_len alt_len + .long \orig - . + .long \alt - . +diff --git a/arch/x86/include/asm/alternative.h b/arch/x86/include/asm/alternative.h +index 0a3f9c9..c9d081d 100644 +--- a/arch/x86/include/asm/alternative.h ++++ b/arch/x86/include/asm/alternative.h +@@ -106,7 +106,7 @@ static inline int alternatives_text_reserved(void *start, void *end) + ".pushsection .discard,\"aw\",@progbits\n" \ + DISCARD_ENTRY(1) \ + ".popsection\n" \ +- ".pushsection .altinstr_replacement, \"ax\"\n" \ ++ ".pushsection .altinstr_replacement, \"a\"\n" \ + ALTINSTR_REPLACEMENT(newinstr, feature, 1) \ + ".popsection" + +@@ -120,7 +120,7 @@ static inline int alternatives_text_reserved(void *start, void *end) + DISCARD_ENTRY(1) \ + DISCARD_ENTRY(2) \ + ".popsection\n" \ +- ".pushsection .altinstr_replacement, \"ax\"\n" \ ++ ".pushsection .altinstr_replacement, \"a\"\n" \ + ALTINSTR_REPLACEMENT(newinstr1, feature1, 1) \ + ALTINSTR_REPLACEMENT(newinstr2, feature2, 2) \ + ".popsection" +diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h +index 1d2091a..f5074c1 100644 +--- a/arch/x86/include/asm/apic.h ++++ b/arch/x86/include/asm/apic.h +@@ -45,7 +45,7 @@ static inline void generic_apic_probe(void) + + #ifdef CONFIG_X86_LOCAL_APIC + +-extern unsigned int apic_verbosity; ++extern int apic_verbosity; + extern int local_apic_timer_c2_ok; + + extern int disable_apic; +diff --git a/arch/x86/include/asm/apm.h b/arch/x86/include/asm/apm.h +index 20370c6..a2eb9b0 100644 +--- a/arch/x86/include/asm/apm.h ++++ b/arch/x86/include/asm/apm.h +@@ -34,7 +34,7 @@ static inline void apm_bios_call_asm(u32 func, u32 ebx_in, u32 ecx_in, + __asm__ __volatile__(APM_DO_ZERO_SEGS + "pushl %%edi\n\t" + "pushl %%ebp\n\t" +- "lcall *%%cs:apm_bios_entry\n\t" ++ "lcall *%%ss:apm_bios_entry\n\t" + "setc %%al\n\t" + "popl %%ebp\n\t" + "popl %%edi\n\t" +@@ -58,7 +58,7 @@ static inline u8 apm_bios_call_simple_asm(u32 func, u32 ebx_in, + __asm__ __volatile__(APM_DO_ZERO_SEGS + "pushl %%edi\n\t" + "pushl %%ebp\n\t" +- "lcall *%%cs:apm_bios_entry\n\t" ++ "lcall *%%ss:apm_bios_entry\n\t" + "setc %%bl\n\t" + "popl %%ebp\n\t" + "popl %%edi\n\t" +diff --git a/arch/x86/include/asm/atomic.h b/arch/x86/include/asm/atomic.h +index b17f4f4..9620151 100644 +--- a/arch/x86/include/asm/atomic.h ++++ b/arch/x86/include/asm/atomic.h +@@ -23,7 +23,18 @@ + */ + static inline int atomic_read(const atomic_t *v) + { +- return (*(volatile int *)&(v)->counter); ++ return (*(volatile const int *)&(v)->counter); ++} ++ ++/** ++ * atomic_read_unchecked - read atomic variable ++ * @v: pointer of type atomic_unchecked_t ++ * ++ * Atomically reads the value of @v. ++ */ ++static inline int __intentional_overflow(-1) atomic_read_unchecked(const atomic_unchecked_t *v) ++{ ++ return (*(volatile const int *)&(v)->counter); + } + + /** +@@ -39,6 +50,18 @@ static inline void atomic_set(atomic_t *v, int i) + } + + /** ++ * atomic_set_unchecked - set atomic variable ++ * @v: pointer of type atomic_unchecked_t ++ * @i: required value ++ * ++ * Atomically sets the value of @v to @i. ++ */ ++static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i) ++{ ++ v->counter = i; ++} ++ ++/** + * atomic_add - add integer to atomic variable + * @i: integer value to add + * @v: pointer of type atomic_t +@@ -47,7 +70,29 @@ static inline void atomic_set(atomic_t *v, int i) + */ + static inline void atomic_add(int i, atomic_t *v) + { +- asm volatile(LOCK_PREFIX "addl %1,%0" ++ asm volatile(LOCK_PREFIX "addl %1,%0\n" ++ ++#ifdef CONFIG_PAX_REFCOUNT ++ "jno 0f\n" ++ LOCK_PREFIX "subl %1,%0\n" ++ "int $4\n0:\n" ++ _ASM_EXTABLE(0b, 0b) ++#endif ++ ++ : "+m" (v->counter) ++ : "ir" (i)); ++} ++ ++/** ++ * atomic_add_unchecked - add integer to atomic variable ++ * @i: integer value to add ++ * @v: pointer of type atomic_unchecked_t ++ * ++ * Atomically adds @i to @v. ++ */ ++static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v) ++{ ++ asm volatile(LOCK_PREFIX "addl %1,%0\n" + : "+m" (v->counter) + : "ir" (i)); + } +@@ -61,7 +106,29 @@ static inline void atomic_add(int i, atomic_t *v) + */ + static inline void atomic_sub(int i, atomic_t *v) + { +- asm volatile(LOCK_PREFIX "subl %1,%0" ++ asm volatile(LOCK_PREFIX "subl %1,%0\n" ++ ++#ifdef CONFIG_PAX_REFCOUNT ++ "jno 0f\n" ++ LOCK_PREFIX "addl %1,%0\n" ++ "int $4\n0:\n" ++ _ASM_EXTABLE(0b, 0b) ++#endif ++ ++ : "+m" (v->counter) ++ : "ir" (i)); ++} ++ ++/** ++ * atomic_sub_unchecked - subtract integer from atomic variable ++ * @i: integer value to subtract ++ * @v: pointer of type atomic_unchecked_t ++ * ++ * Atomically subtracts @i from @v. ++ */ ++static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v) ++{ ++ asm volatile(LOCK_PREFIX "subl %1,%0\n" + : "+m" (v->counter) + : "ir" (i)); + } +@@ -77,7 +144,7 @@ static inline void atomic_sub(int i, atomic_t *v) + */ + static inline int atomic_sub_and_test(int i, atomic_t *v) + { +- GEN_BINARY_RMWcc(LOCK_PREFIX "subl", v->counter, "er", i, "%0", "e"); ++ GEN_BINARY_RMWcc(LOCK_PREFIX "subl", LOCK_PREFIX "addl", v->counter, "er", i, "%0", "e"); + } + + /** +@@ -88,7 +155,27 @@ static inline int atomic_sub_and_test(int i, atomic_t *v) + */ + static inline void atomic_inc(atomic_t *v) + { +- asm volatile(LOCK_PREFIX "incl %0" ++ asm volatile(LOCK_PREFIX "incl %0\n" ++ ++#ifdef CONFIG_PAX_REFCOUNT ++ "jno 0f\n" ++ LOCK_PREFIX "decl %0\n" ++ "int $4\n0:\n" ++ _ASM_EXTABLE(0b, 0b) ++#endif ++ ++ : "+m" (v->counter)); ++} ++ ++/** ++ * atomic_inc_unchecked - increment atomic variable ++ * @v: pointer of type atomic_unchecked_t ++ * ++ * Atomically increments @v by 1. ++ */ ++static inline void atomic_inc_unchecked(atomic_unchecked_t *v) ++{ ++ asm volatile(LOCK_PREFIX "incl %0\n" + : "+m" (v->counter)); + } + +@@ -100,7 +187,27 @@ static inline void atomic_inc(atomic_t *v) + */ + static inline void atomic_dec(atomic_t *v) + { +- asm volatile(LOCK_PREFIX "decl %0" ++ asm volatile(LOCK_PREFIX "decl %0\n" ++ ++#ifdef CONFIG_PAX_REFCOUNT ++ "jno 0f\n" ++ LOCK_PREFIX "incl %0\n" ++ "int $4\n0:\n" ++ _ASM_EXTABLE(0b, 0b) ++#endif ++ ++ : "+m" (v->counter)); ++} ++ ++/** ++ * atomic_dec_unchecked - decrement atomic variable ++ * @v: pointer of type atomic_unchecked_t ++ * ++ * Atomically decrements @v by 1. ++ */ ++static inline void atomic_dec_unchecked(atomic_unchecked_t *v) ++{ ++ asm volatile(LOCK_PREFIX "decl %0\n" + : "+m" (v->counter)); + } + +@@ -114,7 +221,7 @@ static inline void atomic_dec(atomic_t *v) + */ + static inline int atomic_dec_and_test(atomic_t *v) + { +- GEN_UNARY_RMWcc(LOCK_PREFIX "decl", v->counter, "%0", "e"); ++ GEN_UNARY_RMWcc(LOCK_PREFIX "decl", LOCK_PREFIX "incl", v->counter, "%0", "e"); + } + + /** +@@ -127,7 +234,20 @@ static inline int atomic_dec_and_test(atomic_t *v) + */ + static inline int atomic_inc_and_test(atomic_t *v) + { +- GEN_UNARY_RMWcc(LOCK_PREFIX "incl", v->counter, "%0", "e"); ++ GEN_UNARY_RMWcc(LOCK_PREFIX "incl", LOCK_PREFIX "decl", v->counter, "%0", "e"); ++} ++ ++/** ++ * atomic_inc_and_test_unchecked - increment and test ++ * @v: pointer of type atomic_unchecked_t ++ * ++ * Atomically increments @v by 1 ++ * and returns true if the result is zero, or false for all ++ * other cases. ++ */ ++static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v) ++{ ++ GEN_UNARY_RMWcc_unchecked(LOCK_PREFIX "incl", v->counter, "%0", "e"); + } + + /** +@@ -141,7 +261,7 @@ static inline int atomic_inc_and_test(atomic_t *v) + */ + static inline int atomic_add_negative(int i, atomic_t *v) + { +- GEN_BINARY_RMWcc(LOCK_PREFIX "addl", v->counter, "er", i, "%0", "s"); ++ GEN_BINARY_RMWcc(LOCK_PREFIX "addl", LOCK_PREFIX "subl", v->counter, "er", i, "%0", "s"); + } + + /** +@@ -153,6 +273,18 @@ static inline int atomic_add_negative(int i, atomic_t *v) + */ + static inline int atomic_add_return(int i, atomic_t *v) + { ++ return i + xadd_check_overflow(&v->counter, i); ++} ++ ++/** ++ * atomic_add_return_unchecked - add integer and return ++ * @i: integer value to add ++ * @v: pointer of type atomic_unchecked_t ++ * ++ * Atomically adds @i to @v and returns @i + @v ++ */ ++static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v) ++{ + return i + xadd(&v->counter, i); + } + +@@ -169,9 +301,18 @@ static inline int atomic_sub_return(int i, atomic_t *v) + } + + #define atomic_inc_return(v) (atomic_add_return(1, v)) ++static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v) ++{ ++ return atomic_add_return_unchecked(1, v); ++} + #define atomic_dec_return(v) (atomic_sub_return(1, v)) + +-static inline int atomic_cmpxchg(atomic_t *v, int old, int new) ++static inline int __intentional_overflow(-1) atomic_cmpxchg(atomic_t *v, int old, int new) ++{ ++ return cmpxchg(&v->counter, old, new); ++} ++ ++static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new) + { + return cmpxchg(&v->counter, old, new); + } +@@ -181,6 +322,11 @@ static inline int atomic_xchg(atomic_t *v, int new) + return xchg(&v->counter, new); + } + ++static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new) ++{ ++ return xchg(&v->counter, new); ++} ++ + /** + * __atomic_add_unless - add unless the number is already a given value + * @v: pointer of type atomic_t +@@ -190,14 +336,27 @@ static inline int atomic_xchg(atomic_t *v, int new) + * Atomically adds @a to @v, so long as @v was not already @u. + * Returns the old value of @v. + */ +-static inline int __atomic_add_unless(atomic_t *v, int a, int u) ++static inline int __intentional_overflow(-1) __atomic_add_unless(atomic_t *v, int a, int u) + { +- int c, old; ++ int c, old, new; + c = atomic_read(v); + for (;;) { +- if (unlikely(c == (u))) ++ if (unlikely(c == u)) + break; +- old = atomic_cmpxchg((v), c, c + (a)); ++ ++ asm volatile("addl %2,%0\n" ++ ++#ifdef CONFIG_PAX_REFCOUNT ++ "jno 0f\n" ++ "subl %2,%0\n" ++ "int $4\n0:\n" ++ _ASM_EXTABLE(0b, 0b) ++#endif ++ ++ : "=r" (new) ++ : "0" (c), "ir" (a)); ++ ++ old = atomic_cmpxchg(v, c, new); + if (likely(old == c)) + break; + c = old; +@@ -206,6 +365,49 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u) + } + + /** ++ * atomic_inc_not_zero_hint - increment if not null ++ * @v: pointer of type atomic_t ++ * @hint: probable value of the atomic before the increment ++ * ++ * This version of atomic_inc_not_zero() gives a hint of probable ++ * value of the atomic. This helps processor to not read the memory ++ * before doing the atomic read/modify/write cycle, lowering ++ * number of bus transactions on some arches. ++ * ++ * Returns: 0 if increment was not done, 1 otherwise. ++ */ ++#define atomic_inc_not_zero_hint atomic_inc_not_zero_hint ++static inline int atomic_inc_not_zero_hint(atomic_t *v, int hint) ++{ ++ int val, c = hint, new; ++ ++ /* sanity test, should be removed by compiler if hint is a constant */ ++ if (!hint) ++ return __atomic_add_unless(v, 1, 0); ++ ++ do { ++ asm volatile("incl %0\n" ++ ++#ifdef CONFIG_PAX_REFCOUNT ++ "jno 0f\n" ++ "decl %0\n" ++ "int $4\n0:\n" ++ _ASM_EXTABLE(0b, 0b) ++#endif ++ ++ : "=r" (new) ++ : "0" (c)); ++ ++ val = atomic_cmpxchg(v, c, new); ++ if (val == c) ++ return 1; ++ c = val; ++ } while (c); ++ ++ return 0; ++} ++ ++/** + * atomic_inc_short - increment of a short integer + * @v: pointer to type int + * +@@ -234,14 +436,37 @@ static inline void atomic_or_long(unsigned long *v1, unsigned long v2) + #endif + + /* These are x86-specific, used by some header files */ +-#define atomic_clear_mask(mask, addr) \ +- asm volatile(LOCK_PREFIX "andl %0,%1" \ +- : : "r" (~(mask)), "m" (*(addr)) : "memory") ++static inline void atomic_clear_mask(unsigned int mask, atomic_t *v) ++{ ++ asm volatile(LOCK_PREFIX "andl %1,%0" ++ : "+m" (v->counter) ++ : "r" (~(mask)) ++ : "memory"); ++} + +-#define atomic_set_mask(mask, addr) \ +- asm volatile(LOCK_PREFIX "orl %0,%1" \ +- : : "r" ((unsigned)(mask)), "m" (*(addr)) \ +- : "memory") ++static inline void atomic_clear_mask_unchecked(unsigned int mask, atomic_unchecked_t *v) ++{ ++ asm volatile(LOCK_PREFIX "andl %1,%0" ++ : "+m" (v->counter) ++ : "r" (~(mask)) ++ : "memory"); ++} ++ ++static inline void atomic_set_mask(unsigned int mask, atomic_t *v) ++{ ++ asm volatile(LOCK_PREFIX "orl %1,%0" ++ : "+m" (v->counter) ++ : "r" (mask) ++ : "memory"); ++} ++ ++static inline void atomic_set_mask_unchecked(unsigned int mask, atomic_unchecked_t *v) ++{ ++ asm volatile(LOCK_PREFIX "orl %1,%0" ++ : "+m" (v->counter) ++ : "r" (mask) ++ : "memory"); ++} + + /* Atomic operations are already serializing on x86 */ + #define smp_mb__before_atomic_dec() barrier() +diff --git a/arch/x86/include/asm/atomic64_32.h b/arch/x86/include/asm/atomic64_32.h +index b154de7..bf18a5a 100644 +--- a/arch/x86/include/asm/atomic64_32.h ++++ b/arch/x86/include/asm/atomic64_32.h +@@ -12,6 +12,14 @@ typedef struct { + u64 __aligned(8) counter; + } atomic64_t; + ++#ifdef CONFIG_PAX_REFCOUNT ++typedef struct { ++ u64 __aligned(8) counter; ++} atomic64_unchecked_t; ++#else ++typedef atomic64_t atomic64_unchecked_t; ++#endif ++ + #define ATOMIC64_INIT(val) { (val) } + + #define __ATOMIC64_DECL(sym) void atomic64_##sym(atomic64_t *, ...) +@@ -37,21 +45,31 @@ typedef struct { + ATOMIC64_DECL_ONE(sym##_386) + + ATOMIC64_DECL_ONE(add_386); ++ATOMIC64_DECL_ONE(add_unchecked_386); + ATOMIC64_DECL_ONE(sub_386); ++ATOMIC64_DECL_ONE(sub_unchecked_386); + ATOMIC64_DECL_ONE(inc_386); ++ATOMIC64_DECL_ONE(inc_unchecked_386); + ATOMIC64_DECL_ONE(dec_386); ++ATOMIC64_DECL_ONE(dec_unchecked_386); + #endif + + #define alternative_atomic64(f, out, in...) \ + __alternative_atomic64(f, f, ASM_OUTPUT2(out), ## in) + + ATOMIC64_DECL(read); ++ATOMIC64_DECL(read_unchecked); + ATOMIC64_DECL(set); ++ATOMIC64_DECL(set_unchecked); + ATOMIC64_DECL(xchg); + ATOMIC64_DECL(add_return); ++ATOMIC64_DECL(add_return_unchecked); + ATOMIC64_DECL(sub_return); ++ATOMIC64_DECL(sub_return_unchecked); + ATOMIC64_DECL(inc_return); ++ATOMIC64_DECL(inc_return_unchecked); + ATOMIC64_DECL(dec_return); ++ATOMIC64_DECL(dec_return_unchecked); + ATOMIC64_DECL(dec_if_positive); + ATOMIC64_DECL(inc_not_zero); + ATOMIC64_DECL(add_unless); +@@ -77,6 +95,21 @@ static inline long long atomic64_cmpxchg(atomic64_t *v, long long o, long long n + } + + /** ++ * atomic64_cmpxchg_unchecked - cmpxchg atomic64 variable ++ * @p: pointer to type atomic64_unchecked_t ++ * @o: expected value ++ * @n: new value ++ * ++ * Atomically sets @v to @n if it was equal to @o and returns ++ * the old value. ++ */ ++ ++static inline long long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long long o, long long n) ++{ ++ return cmpxchg64(&v->counter, o, n); ++} ++ ++/** + * atomic64_xchg - xchg atomic64 variable + * @v: pointer to type atomic64_t + * @n: value to assign +@@ -112,6 +145,22 @@ static inline void atomic64_set(atomic64_t *v, long long i) + } + + /** ++ * atomic64_set_unchecked - set atomic64 variable ++ * @v: pointer to type atomic64_unchecked_t ++ * @n: value to assign ++ * ++ * Atomically sets the value of @v to @n. ++ */ ++static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long long i) ++{ ++ unsigned high = (unsigned)(i >> 32); ++ unsigned low = (unsigned)i; ++ alternative_atomic64(set, /* no output */, ++ "S" (v), "b" (low), "c" (high) ++ : "eax", "edx", "memory"); ++} ++ ++/** + * atomic64_read - read atomic64 variable + * @v: pointer to type atomic64_t + * +@@ -125,6 +174,19 @@ static inline long long atomic64_read(const atomic64_t *v) + } + + /** ++ * atomic64_read_unchecked - read atomic64 variable ++ * @v: pointer to type atomic64_unchecked_t ++ * ++ * Atomically reads the value of @v and returns it. ++ */ ++static inline long long __intentional_overflow(-1) atomic64_read_unchecked(atomic64_unchecked_t *v) ++{ ++ long long r; ++ alternative_atomic64(read, "=&A" (r), "c" (v) : "memory"); ++ return r; ++ } ++ ++/** + * atomic64_add_return - add and return + * @i: integer value to add + * @v: pointer to type atomic64_t +@@ -139,6 +201,21 @@ static inline long long atomic64_add_return(long long i, atomic64_t *v) + return i; + } + ++/** ++ * atomic64_add_return_unchecked - add and return ++ * @i: integer value to add ++ * @v: pointer to type atomic64_unchecked_t ++ * ++ * Atomically adds @i to @v and returns @i + *@v ++ */ ++static inline long long atomic64_add_return_unchecked(long long i, atomic64_unchecked_t *v) ++{ ++ alternative_atomic64(add_return_unchecked, ++ ASM_OUTPUT2("+A" (i), "+c" (v)), ++ ASM_NO_INPUT_CLOBBER("memory")); ++ return i; ++} ++ + /* + * Other variants with different arithmetic operators: + */ +@@ -158,6 +235,14 @@ static inline long long atomic64_inc_return(atomic64_t *v) + return a; + } + ++static inline long long atomic64_inc_return_unchecked(atomic64_unchecked_t *v) ++{ ++ long long a; ++ alternative_atomic64(inc_return_unchecked, "=&A" (a), ++ "S" (v) : "memory", "ecx"); ++ return a; ++} ++ + static inline long long atomic64_dec_return(atomic64_t *v) + { + long long a; +@@ -182,6 +267,21 @@ static inline long long atomic64_add(long long i, atomic64_t *v) + } + + /** ++ * atomic64_add_unchecked - add integer to atomic64 variable ++ * @i: integer value to add ++ * @v: pointer to type atomic64_unchecked_t ++ * ++ * Atomically adds @i to @v. ++ */ ++static inline long long atomic64_add_unchecked(long long i, atomic64_unchecked_t *v) ++{ ++ __alternative_atomic64(add_unchecked, add_return_unchecked, ++ ASM_OUTPUT2("+A" (i), "+c" (v)), ++ ASM_NO_INPUT_CLOBBER("memory")); ++ return i; ++} ++ ++/** + * atomic64_sub - subtract the atomic64 variable + * @i: integer value to subtract + * @v: pointer to type atomic64_t +diff --git a/arch/x86/include/asm/atomic64_64.h b/arch/x86/include/asm/atomic64_64.h +index 46e9052..ae45136 100644 +--- a/arch/x86/include/asm/atomic64_64.h ++++ b/arch/x86/include/asm/atomic64_64.h +@@ -18,7 +18,19 @@ + */ + static inline long atomic64_read(const atomic64_t *v) + { +- return (*(volatile long *)&(v)->counter); ++ return (*(volatile const long *)&(v)->counter); ++} ++ ++/** ++ * atomic64_read_unchecked - read atomic64 variable ++ * @v: pointer of type atomic64_unchecked_t ++ * ++ * Atomically reads the value of @v. ++ * Doesn't imply a read memory barrier. ++ */ ++static inline long __intentional_overflow(-1) atomic64_read_unchecked(const atomic64_unchecked_t *v) ++{ ++ return (*(volatile const long *)&(v)->counter); + } + + /** +@@ -34,6 +46,18 @@ static inline void atomic64_set(atomic64_t *v, long i) + } + + /** ++ * atomic64_set_unchecked - set atomic64 variable ++ * @v: pointer to type atomic64_unchecked_t ++ * @i: required value ++ * ++ * Atomically sets the value of @v to @i. ++ */ ++static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i) ++{ ++ v->counter = i; ++} ++ ++/** + * atomic64_add - add integer to atomic64 variable + * @i: integer value to add + * @v: pointer to type atomic64_t +@@ -42,6 +66,28 @@ static inline void atomic64_set(atomic64_t *v, long i) + */ + static inline void atomic64_add(long i, atomic64_t *v) + { ++ asm volatile(LOCK_PREFIX "addq %1,%0\n" ++ ++#ifdef CONFIG_PAX_REFCOUNT ++ "jno 0f\n" ++ LOCK_PREFIX "subq %1,%0\n" ++ "int $4\n0:\n" ++ _ASM_EXTABLE(0b, 0b) ++#endif ++ ++ : "=m" (v->counter) ++ : "er" (i), "m" (v->counter)); ++} ++ ++/** ++ * atomic64_add_unchecked - add integer to atomic64 variable ++ * @i: integer value to add ++ * @v: pointer to type atomic64_unchecked_t ++ * ++ * Atomically adds @i to @v. ++ */ ++static inline void atomic64_add_unchecked(long i, atomic64_unchecked_t *v) ++{ + asm volatile(LOCK_PREFIX "addq %1,%0" + : "=m" (v->counter) + : "er" (i), "m" (v->counter)); +@@ -56,7 +102,29 @@ static inline void atomic64_add(long i, atomic64_t *v) + */ + static inline void atomic64_sub(long i, atomic64_t *v) + { +- asm volatile(LOCK_PREFIX "subq %1,%0" ++ asm volatile(LOCK_PREFIX "subq %1,%0\n" ++ ++#ifdef CONFIG_PAX_REFCOUNT ++ "jno 0f\n" ++ LOCK_PREFIX "addq %1,%0\n" ++ "int $4\n0:\n" ++ _ASM_EXTABLE(0b, 0b) ++#endif ++ ++ : "=m" (v->counter) ++ : "er" (i), "m" (v->counter)); ++} ++ ++/** ++ * atomic64_sub_unchecked - subtract the atomic64 variable ++ * @i: integer value to subtract ++ * @v: pointer to type atomic64_unchecked_t ++ * ++ * Atomically subtracts @i from @v. ++ */ ++static inline void atomic64_sub_unchecked(long i, atomic64_unchecked_t *v) ++{ ++ asm volatile(LOCK_PREFIX "subq %1,%0\n" + : "=m" (v->counter) + : "er" (i), "m" (v->counter)); + } +@@ -72,7 +140,7 @@ static inline void atomic64_sub(long i, atomic64_t *v) + */ + static inline int atomic64_sub_and_test(long i, atomic64_t *v) + { +- GEN_BINARY_RMWcc(LOCK_PREFIX "subq", v->counter, "er", i, "%0", "e"); ++ GEN_BINARY_RMWcc(LOCK_PREFIX "subq", LOCK_PREFIX "addq", v->counter, "er", i, "%0", "e"); + } + + /** +@@ -83,6 +151,27 @@ static inline int atomic64_sub_and_test(long i, atomic64_t *v) + */ + static inline void atomic64_inc(atomic64_t *v) + { ++ asm volatile(LOCK_PREFIX "incq %0\n" ++ ++#ifdef CONFIG_PAX_REFCOUNT ++ "jno 0f\n" ++ LOCK_PREFIX "decq %0\n" ++ "int $4\n0:\n" ++ _ASM_EXTABLE(0b, 0b) ++#endif ++ ++ : "=m" (v->counter) ++ : "m" (v->counter)); ++} ++ ++/** ++ * atomic64_inc_unchecked - increment atomic64 variable ++ * @v: pointer to type atomic64_unchecked_t ++ * ++ * Atomically increments @v by 1. ++ */ ++static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v) ++{ + asm volatile(LOCK_PREFIX "incq %0" + : "=m" (v->counter) + : "m" (v->counter)); +@@ -96,7 +185,28 @@ static inline void atomic64_inc(atomic64_t *v) + */ + static inline void atomic64_dec(atomic64_t *v) + { +- asm volatile(LOCK_PREFIX "decq %0" ++ asm volatile(LOCK_PREFIX "decq %0\n" ++ ++#ifdef CONFIG_PAX_REFCOUNT ++ "jno 0f\n" ++ LOCK_PREFIX "incq %0\n" ++ "int $4\n0:\n" ++ _ASM_EXTABLE(0b, 0b) ++#endif ++ ++ : "=m" (v->counter) ++ : "m" (v->counter)); ++} ++ ++/** ++ * atomic64_dec_unchecked - decrement atomic64 variable ++ * @v: pointer to type atomic64_t ++ * ++ * Atomically decrements @v by 1. ++ */ ++static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v) ++{ ++ asm volatile(LOCK_PREFIX "decq %0\n" + : "=m" (v->counter) + : "m" (v->counter)); + } +@@ -111,7 +221,7 @@ static inline void atomic64_dec(atomic64_t *v) + */ + static inline int atomic64_dec_and_test(atomic64_t *v) + { +- GEN_UNARY_RMWcc(LOCK_PREFIX "decq", v->counter, "%0", "e"); ++ GEN_UNARY_RMWcc(LOCK_PREFIX "decq", LOCK_PREFIX "incq", v->counter, "%0", "e"); + } + + /** +@@ -124,7 +234,7 @@ static inline int atomic64_dec_and_test(atomic64_t *v) + */ + static inline int atomic64_inc_and_test(atomic64_t *v) + { +- GEN_UNARY_RMWcc(LOCK_PREFIX "incq", v->counter, "%0", "e"); ++ GEN_UNARY_RMWcc(LOCK_PREFIX "incq", LOCK_PREFIX "decq", v->counter, "%0", "e"); + } + + /** +@@ -138,7 +248,7 @@ static inline int atomic64_inc_and_test(atomic64_t *v) + */ + static inline int atomic64_add_negative(long i, atomic64_t *v) + { +- GEN_BINARY_RMWcc(LOCK_PREFIX "addq", v->counter, "er", i, "%0", "s"); ++ GEN_BINARY_RMWcc(LOCK_PREFIX "addq", LOCK_PREFIX "subq", v->counter, "er", i, "%0", "s"); + } + + /** +@@ -150,6 +260,18 @@ static inline int atomic64_add_negative(long i, atomic64_t *v) + */ + static inline long atomic64_add_return(long i, atomic64_t *v) + { ++ return i + xadd_check_overflow(&v->counter, i); ++} ++ ++/** ++ * atomic64_add_return_unchecked - add and return ++ * @i: integer value to add ++ * @v: pointer to type atomic64_unchecked_t ++ * ++ * Atomically adds @i to @v and returns @i + @v ++ */ ++static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v) ++{ + return i + xadd(&v->counter, i); + } + +@@ -159,6 +281,10 @@ static inline long atomic64_sub_return(long i, atomic64_t *v) + } + + #define atomic64_inc_return(v) (atomic64_add_return(1, (v))) ++static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v) ++{ ++ return atomic64_add_return_unchecked(1, v); ++} + #define atomic64_dec_return(v) (atomic64_sub_return(1, (v))) + + static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new) +@@ -166,6 +292,11 @@ static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new) + return cmpxchg(&v->counter, old, new); + } + ++static inline long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long old, long new) ++{ ++ return cmpxchg(&v->counter, old, new); ++} ++ + static inline long atomic64_xchg(atomic64_t *v, long new) + { + return xchg(&v->counter, new); +@@ -182,17 +313,30 @@ static inline long atomic64_xchg(atomic64_t *v, long new) + */ + static inline int atomic64_add_unless(atomic64_t *v, long a, long u) + { +- long c, old; ++ long c, old, new; + c = atomic64_read(v); + for (;;) { +- if (unlikely(c == (u))) ++ if (unlikely(c == u)) + break; +- old = atomic64_cmpxchg((v), c, c + (a)); ++ ++ asm volatile("add %2,%0\n" ++ ++#ifdef CONFIG_PAX_REFCOUNT ++ "jno 0f\n" ++ "sub %2,%0\n" ++ "int $4\n0:\n" ++ _ASM_EXTABLE(0b, 0b) ++#endif ++ ++ : "=r" (new) ++ : "0" (c), "ir" (a)); ++ ++ old = atomic64_cmpxchg(v, c, new); + if (likely(old == c)) + break; + c = old; + } +- return c != (u); ++ return c != u; + } + + #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0) +diff --git a/arch/x86/include/asm/barrier.h b/arch/x86/include/asm/barrier.h +index 69bbb48..32517fe 100644 +--- a/arch/x86/include/asm/barrier.h ++++ b/arch/x86/include/asm/barrier.h +@@ -107,7 +107,7 @@ + do { \ + compiletime_assert_atomic_type(*p); \ + smp_mb(); \ +- ACCESS_ONCE(*p) = (v); \ ++ ACCESS_ONCE_RW(*p) = (v); \ + } while (0) + + #define smp_load_acquire(p) \ +@@ -124,7 +124,7 @@ do { \ + do { \ + compiletime_assert_atomic_type(*p); \ + barrier(); \ +- ACCESS_ONCE(*p) = (v); \ ++ ACCESS_ONCE_RW(*p) = (v); \ + } while (0) + + #define smp_load_acquire(p) \ +diff --git a/arch/x86/include/asm/bitops.h b/arch/x86/include/asm/bitops.h +index 9fc1af7..776d75a 100644 +--- a/arch/x86/include/asm/bitops.h ++++ b/arch/x86/include/asm/bitops.h +@@ -49,7 +49,7 @@ + * a mask operation on a byte. + */ + #define IS_IMMEDIATE(nr) (__builtin_constant_p(nr)) +-#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((void *)(addr) + ((nr)>>3)) ++#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((volatile void *)(addr) + ((nr)>>3)) + #define CONST_MASK(nr) (1 << ((nr) & 7)) + + /** +@@ -205,7 +205,7 @@ static inline void change_bit(long nr, volatile unsigned long *addr) + */ + static inline int test_and_set_bit(long nr, volatile unsigned long *addr) + { +- GEN_BINARY_RMWcc(LOCK_PREFIX "bts", *addr, "Ir", nr, "%0", "c"); ++ GEN_BINARY_RMWcc_unchecked(LOCK_PREFIX "bts", *addr, "Ir", nr, "%0", "c"); + } + + /** +@@ -251,7 +251,7 @@ static inline int __test_and_set_bit(long nr, volatile unsigned long *addr) + */ + static inline int test_and_clear_bit(long nr, volatile unsigned long *addr) + { +- GEN_BINARY_RMWcc(LOCK_PREFIX "btr", *addr, "Ir", nr, "%0", "c"); ++ GEN_BINARY_RMWcc_unchecked(LOCK_PREFIX "btr", *addr, "Ir", nr, "%0", "c"); + } + + /** +@@ -304,7 +304,7 @@ static inline int __test_and_change_bit(long nr, volatile unsigned long *addr) + */ + static inline int test_and_change_bit(long nr, volatile unsigned long *addr) + { +- GEN_BINARY_RMWcc(LOCK_PREFIX "btc", *addr, "Ir", nr, "%0", "c"); ++ GEN_BINARY_RMWcc_unchecked(LOCK_PREFIX "btc", *addr, "Ir", nr, "%0", "c"); + } + + static __always_inline int constant_test_bit(long nr, const volatile unsigned long *addr) +@@ -345,7 +345,7 @@ static int test_bit(int nr, const volatile unsigned long *addr); + * + * Undefined if no bit exists, so code should check against 0 first. + */ +-static inline unsigned long __ffs(unsigned long word) ++static inline unsigned long __intentional_overflow(-1) __ffs(unsigned long word) + { + asm("rep; bsf %1,%0" + : "=r" (word) +@@ -359,7 +359,7 @@ static inline unsigned long __ffs(unsigned long word) + * + * Undefined if no zero exists, so code should check against ~0UL first. + */ +-static inline unsigned long ffz(unsigned long word) ++static inline unsigned long __intentional_overflow(-1) ffz(unsigned long word) + { + asm("rep; bsf %1,%0" + : "=r" (word) +@@ -373,7 +373,7 @@ static inline unsigned long ffz(unsigned long word) + * + * Undefined if no set bit exists, so code should check against 0 first. + */ +-static inline unsigned long __fls(unsigned long word) ++static inline unsigned long __intentional_overflow(-1) __fls(unsigned long word) + { + asm("bsr %1,%0" + : "=r" (word) +@@ -436,7 +436,7 @@ static inline int ffs(int x) + * set bit if value is nonzero. The last (most significant) bit is + * at position 32. + */ +-static inline int fls(int x) ++static inline int __intentional_overflow(-1) fls(int x) + { + int r; + +@@ -478,7 +478,7 @@ static inline int fls(int x) + * at position 64. + */ + #ifdef CONFIG_X86_64 +-static __always_inline int fls64(__u64 x) ++static __always_inline __intentional_overflow(-1) int fls64(__u64 x) + { + int bitpos = -1; + /* +diff --git a/arch/x86/include/asm/boot.h b/arch/x86/include/asm/boot.h +index 4fa687a..60f2d39 100644 +--- a/arch/x86/include/asm/boot.h ++++ b/arch/x86/include/asm/boot.h +@@ -6,10 +6,15 @@ + #include <uapi/asm/boot.h> + + /* Physical address where kernel should be loaded. */ +-#define LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \ ++#define ____LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \ + + (CONFIG_PHYSICAL_ALIGN - 1)) \ + & ~(CONFIG_PHYSICAL_ALIGN - 1)) + ++#ifndef __ASSEMBLY__ ++extern unsigned char __LOAD_PHYSICAL_ADDR[]; ++#define LOAD_PHYSICAL_ADDR ((unsigned long)__LOAD_PHYSICAL_ADDR) ++#endif ++ + /* Minimum kernel alignment, as a power of two */ + #ifdef CONFIG_X86_64 + #define MIN_KERNEL_ALIGN_LG2 PMD_SHIFT +diff --git a/arch/x86/include/asm/cache.h b/arch/x86/include/asm/cache.h +index 48f99f1..d78ebf9 100644 +--- a/arch/x86/include/asm/cache.h ++++ b/arch/x86/include/asm/cache.h +@@ -5,12 +5,13 @@ + + /* L1 cache line size */ + #define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT) +-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT) ++#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) + + #define __read_mostly __attribute__((__section__(".data..read_mostly"))) ++#define __read_only __attribute__((__section__(".data..read_only"))) + + #define INTERNODE_CACHE_SHIFT CONFIG_X86_INTERNODE_CACHE_SHIFT +-#define INTERNODE_CACHE_BYTES (1 << INTERNODE_CACHE_SHIFT) ++#define INTERNODE_CACHE_BYTES (_AC(1,UL) << INTERNODE_CACHE_SHIFT) + + #ifdef CONFIG_X86_VSMP + #ifdef CONFIG_SMP +diff --git a/arch/x86/include/asm/cacheflush.h b/arch/x86/include/asm/cacheflush.h +index 9863ee3..4a1f8e1 100644 +--- a/arch/x86/include/asm/cacheflush.h ++++ b/arch/x86/include/asm/cacheflush.h +@@ -27,7 +27,7 @@ static inline unsigned long get_page_memtype(struct page *pg) + unsigned long pg_flags = pg->flags & _PGMT_MASK; + + if (pg_flags == _PGMT_DEFAULT) +- return -1; ++ return ~0UL; + else if (pg_flags == _PGMT_WC) + return _PAGE_CACHE_WC; + else if (pg_flags == _PGMT_UC_MINUS) +diff --git a/arch/x86/include/asm/calling.h b/arch/x86/include/asm/calling.h +index cb4c73b..c473c29 100644 +--- a/arch/x86/include/asm/calling.h ++++ b/arch/x86/include/asm/calling.h +@@ -82,103 +82,113 @@ For 32-bit we have the following conventions - kernel is built with + #define RSP 152 + #define SS 160 + +-#define ARGOFFSET R11 +-#define SWFRAME ORIG_RAX ++#define ARGOFFSET R15 + + .macro SAVE_ARGS addskip=0, save_rcx=1, save_r891011=1 +- subq $9*8+\addskip, %rsp +- CFI_ADJUST_CFA_OFFSET 9*8+\addskip +- movq_cfi rdi, 8*8 +- movq_cfi rsi, 7*8 +- movq_cfi rdx, 6*8 ++ subq $ORIG_RAX-ARGOFFSET+\addskip, %rsp ++ CFI_ADJUST_CFA_OFFSET ORIG_RAX-ARGOFFSET+\addskip ++ movq_cfi rdi, RDI ++ movq_cfi rsi, RSI ++ movq_cfi rdx, RDX + + .if \save_rcx +- movq_cfi rcx, 5*8 ++ movq_cfi rcx, RCX + .endif + +- movq_cfi rax, 4*8 ++ movq_cfi rax, RAX + + .if \save_r891011 +- movq_cfi r8, 3*8 +- movq_cfi r9, 2*8 +- movq_cfi r10, 1*8 +- movq_cfi r11, 0*8 ++ movq_cfi r8, R8 ++ movq_cfi r9, R9 ++ movq_cfi r10, R10 ++ movq_cfi r11, R11 + .endif + ++#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR ++ movq_cfi r12, R12 ++#endif ++ + .endm + +-#define ARG_SKIP (9*8) ++#define ARG_SKIP ORIG_RAX + + .macro RESTORE_ARGS rstor_rax=1, addskip=0, rstor_rcx=1, rstor_r11=1, \ + rstor_r8910=1, rstor_rdx=1 ++ ++#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR ++ movq_cfi_restore R12, r12 ++#endif ++ + .if \rstor_r11 +- movq_cfi_restore 0*8, r11 ++ movq_cfi_restore R11, r11 + .endif + + .if \rstor_r8910 +- movq_cfi_restore 1*8, r10 +- movq_cfi_restore 2*8, r9 +- movq_cfi_restore 3*8, r8 ++ movq_cfi_restore R10, r10 ++ movq_cfi_restore R9, r9 ++ movq_cfi_restore R8, r8 + .endif + + .if \rstor_rax +- movq_cfi_restore 4*8, rax ++ movq_cfi_restore RAX, rax + .endif + + .if \rstor_rcx +- movq_cfi_restore 5*8, rcx ++ movq_cfi_restore RCX, rcx + .endif + + .if \rstor_rdx +- movq_cfi_restore 6*8, rdx ++ movq_cfi_restore RDX, rdx + .endif + +- movq_cfi_restore 7*8, rsi +- movq_cfi_restore 8*8, rdi ++ movq_cfi_restore RSI, rsi ++ movq_cfi_restore RDI, rdi + +- .if ARG_SKIP+\addskip > 0 +- addq $ARG_SKIP+\addskip, %rsp +- CFI_ADJUST_CFA_OFFSET -(ARG_SKIP+\addskip) ++ .if ORIG_RAX+\addskip > 0 ++ addq $ORIG_RAX+\addskip, %rsp ++ CFI_ADJUST_CFA_OFFSET -(ORIG_RAX+\addskip) + .endif + .endm + +- .macro LOAD_ARGS offset, skiprax=0 +- movq \offset(%rsp), %r11 +- movq \offset+8(%rsp), %r10 +- movq \offset+16(%rsp), %r9 +- movq \offset+24(%rsp), %r8 +- movq \offset+40(%rsp), %rcx +- movq \offset+48(%rsp), %rdx +- movq \offset+56(%rsp), %rsi +- movq \offset+64(%rsp), %rdi ++ .macro LOAD_ARGS skiprax=0 ++ movq R11(%rsp), %r11 ++ movq R10(%rsp), %r10 ++ movq R9(%rsp), %r9 ++ movq R8(%rsp), %r8 ++ movq RCX(%rsp), %rcx ++ movq RDX(%rsp), %rdx ++ movq RSI(%rsp), %rsi ++ movq RDI(%rsp), %rdi + .if \skiprax + .else +- movq \offset+72(%rsp), %rax ++ movq RAX(%rsp), %rax + .endif + .endm + +-#define REST_SKIP (6*8) +- + .macro SAVE_REST +- subq $REST_SKIP, %rsp +- CFI_ADJUST_CFA_OFFSET REST_SKIP +- movq_cfi rbx, 5*8 +- movq_cfi rbp, 4*8 +- movq_cfi r12, 3*8 +- movq_cfi r13, 2*8 +- movq_cfi r14, 1*8 +- movq_cfi r15, 0*8 ++ movq_cfi rbx, RBX ++ movq_cfi rbp, RBP ++ ++#ifndef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR ++ movq_cfi r12, R12 ++#endif ++ ++ movq_cfi r13, R13 ++ movq_cfi r14, R14 ++ movq_cfi r15, R15 + .endm + + .macro RESTORE_REST +- movq_cfi_restore 0*8, r15 +- movq_cfi_restore 1*8, r14 +- movq_cfi_restore 2*8, r13 +- movq_cfi_restore 3*8, r12 +- movq_cfi_restore 4*8, rbp +- movq_cfi_restore 5*8, rbx +- addq $REST_SKIP, %rsp +- CFI_ADJUST_CFA_OFFSET -(REST_SKIP) ++ movq_cfi_restore R15, r15 ++ movq_cfi_restore R14, r14 ++ movq_cfi_restore R13, r13 ++ ++#ifndef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR ++ movq_cfi_restore R12, r12 ++#endif ++ ++ movq_cfi_restore RBP, rbp ++ movq_cfi_restore RBX, rbx + .endm + + .macro SAVE_ALL +diff --git a/arch/x86/include/asm/checksum_32.h b/arch/x86/include/asm/checksum_32.h +index f50de69..2b0a458 100644 +--- a/arch/x86/include/asm/checksum_32.h ++++ b/arch/x86/include/asm/checksum_32.h +@@ -31,6 +31,14 @@ asmlinkage __wsum csum_partial_copy_generic(const void *src, void *dst, + int len, __wsum sum, + int *src_err_ptr, int *dst_err_ptr); + ++asmlinkage __wsum csum_partial_copy_generic_to_user(const void *src, void *dst, ++ int len, __wsum sum, ++ int *src_err_ptr, int *dst_err_ptr); ++ ++asmlinkage __wsum csum_partial_copy_generic_from_user(const void *src, void *dst, ++ int len, __wsum sum, ++ int *src_err_ptr, int *dst_err_ptr); ++ + /* + * Note: when you get a NULL pointer exception here this means someone + * passed in an incorrect kernel address to one of these functions. +@@ -53,7 +61,7 @@ static inline __wsum csum_partial_copy_from_user(const void __user *src, + + might_sleep(); + stac(); +- ret = csum_partial_copy_generic((__force void *)src, dst, ++ ret = csum_partial_copy_generic_from_user((__force void *)src, dst, + len, sum, err_ptr, NULL); + clac(); + +@@ -187,7 +195,7 @@ static inline __wsum csum_and_copy_to_user(const void *src, + might_sleep(); + if (access_ok(VERIFY_WRITE, dst, len)) { + stac(); +- ret = csum_partial_copy_generic(src, (__force void *)dst, ++ ret = csum_partial_copy_generic_to_user(src, (__force void *)dst, + len, sum, NULL, err_ptr); + clac(); + return ret; +diff --git a/arch/x86/include/asm/cmpxchg.h b/arch/x86/include/asm/cmpxchg.h +index d47786a..2d8883e 100644 +--- a/arch/x86/include/asm/cmpxchg.h ++++ b/arch/x86/include/asm/cmpxchg.h +@@ -14,8 +14,12 @@ extern void __cmpxchg_wrong_size(void) + __compiletime_error("Bad argument size for cmpxchg"); + extern void __xadd_wrong_size(void) + __compiletime_error("Bad argument size for xadd"); ++extern void __xadd_check_overflow_wrong_size(void) ++ __compiletime_error("Bad argument size for xadd_check_overflow"); + extern void __add_wrong_size(void) + __compiletime_error("Bad argument size for add"); ++extern void __add_check_overflow_wrong_size(void) ++ __compiletime_error("Bad argument size for add_check_overflow"); + + /* + * Constants for operation sizes. On 32-bit, the 64-bit size it set to +@@ -67,6 +71,38 @@ extern void __add_wrong_size(void) + __ret; \ + }) + ++#ifdef CONFIG_PAX_REFCOUNT ++#define __xchg_op_check_overflow(ptr, arg, op, lock) \ ++ ({ \ ++ __typeof__ (*(ptr)) __ret = (arg); \ ++ switch (sizeof(*(ptr))) { \ ++ case __X86_CASE_L: \ ++ asm volatile (lock #op "l %0, %1\n" \ ++ "jno 0f\n" \ ++ "mov %0,%1\n" \ ++ "int $4\n0:\n" \ ++ _ASM_EXTABLE(0b, 0b) \ ++ : "+r" (__ret), "+m" (*(ptr)) \ ++ : : "memory", "cc"); \ ++ break; \ ++ case __X86_CASE_Q: \ ++ asm volatile (lock #op "q %q0, %1\n" \ ++ "jno 0f\n" \ ++ "mov %0,%1\n" \ ++ "int $4\n0:\n" \ ++ _ASM_EXTABLE(0b, 0b) \ ++ : "+r" (__ret), "+m" (*(ptr)) \ ++ : : "memory", "cc"); \ ++ break; \ ++ default: \ ++ __ ## op ## _check_overflow_wrong_size(); \ ++ } \ ++ __ret; \ ++ }) ++#else ++#define __xchg_op_check_overflow(ptr, arg, op, lock) __xchg_op(ptr, arg, op, lock) ++#endif ++ + /* + * Note: no "lock" prefix even on SMP: xchg always implies lock anyway. + * Since this is generally used to protect other memory information, we +@@ -167,6 +203,9 @@ extern void __add_wrong_size(void) + #define xadd_sync(ptr, inc) __xadd((ptr), (inc), "lock; ") + #define xadd_local(ptr, inc) __xadd((ptr), (inc), "") + ++#define __xadd_check_overflow(ptr, inc, lock) __xchg_op_check_overflow((ptr), (inc), xadd, lock) ++#define xadd_check_overflow(ptr, inc) __xadd_check_overflow((ptr), (inc), LOCK_PREFIX) ++ + #define __add(ptr, inc, lock) \ + ({ \ + __typeof__ (*(ptr)) __ret = (inc); \ +diff --git a/arch/x86/include/asm/compat.h b/arch/x86/include/asm/compat.h +index 59c6c40..5e0b22c 100644 +--- a/arch/x86/include/asm/compat.h ++++ b/arch/x86/include/asm/compat.h +@@ -41,7 +41,7 @@ typedef s64 __attribute__((aligned(4))) compat_s64; + typedef u32 compat_uint_t; + typedef u32 compat_ulong_t; + typedef u64 __attribute__((aligned(4))) compat_u64; +-typedef u32 compat_uptr_t; ++typedef u32 __user compat_uptr_t; + + struct compat_timespec { + compat_time_t tv_sec; +diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h +index 5f12968..a383517 100644 +--- a/arch/x86/include/asm/cpufeature.h ++++ b/arch/x86/include/asm/cpufeature.h +@@ -203,7 +203,7 @@ + #define X86_FEATURE_DECODEASSISTS (8*32+12) /* AMD Decode Assists support */ + #define X86_FEATURE_PAUSEFILTER (8*32+13) /* AMD filtered pause intercept */ + #define X86_FEATURE_PFTHRESHOLD (8*32+14) /* AMD pause filter threshold */ +- ++#define X86_FEATURE_STRONGUDEREF (8*32+31) /* PaX PCID based strong UDEREF */ + + /* Intel-defined CPU features, CPUID level 0x00000007:0 (ebx), word 9 */ + #define X86_FEATURE_FSGSBASE (9*32+ 0) /* {RD/WR}{FS/GS}BASE instructions*/ +@@ -211,7 +211,7 @@ + #define X86_FEATURE_BMI1 (9*32+ 3) /* 1st group bit manipulation extensions */ + #define X86_FEATURE_HLE (9*32+ 4) /* Hardware Lock Elision */ + #define X86_FEATURE_AVX2 (9*32+ 5) /* AVX2 instructions */ +-#define X86_FEATURE_SMEP (9*32+ 7) /* Supervisor Mode Execution Protection */ ++#define X86_FEATURE_SMEP (9*32+ 7) /* Supervisor Mode Execution Prevention */ + #define X86_FEATURE_BMI2 (9*32+ 8) /* 2nd group bit manipulation extensions */ + #define X86_FEATURE_ERMS (9*32+ 9) /* Enhanced REP MOVSB/STOSB */ + #define X86_FEATURE_INVPCID (9*32+10) /* Invalidate Processor Context ID */ +@@ -358,6 +358,7 @@ extern const char * const x86_power_flags[32]; + #undef cpu_has_centaur_mcr + #define cpu_has_centaur_mcr 0 + ++#define cpu_has_pcid boot_cpu_has(X86_FEATURE_PCID) + #endif /* CONFIG_X86_64 */ + + #if __GNUC__ >= 4 +@@ -410,7 +411,8 @@ static __always_inline __pure bool __static_cpu_has(u16 bit) + + #ifdef CONFIG_X86_DEBUG_STATIC_CPU_HAS + t_warn: +- warn_pre_alternatives(); ++ if (bit != X86_FEATURE_PCID && bit != X86_FEATURE_INVPCID) ++ warn_pre_alternatives(); + return false; + #endif + +@@ -430,7 +432,7 @@ static __always_inline __pure bool __static_cpu_has(u16 bit) + ".section .discard,\"aw\",@progbits\n" + " .byte 0xff + (4f-3f) - (2b-1b)\n" /* size check */ + ".previous\n" +- ".section .altinstr_replacement,\"ax\"\n" ++ ".section .altinstr_replacement,\"a\"\n" + "3: movb $1,%0\n" + "4:\n" + ".previous\n" +@@ -467,7 +469,7 @@ static __always_inline __pure bool _static_cpu_has_safe(u16 bit) + " .byte 2b - 1b\n" /* src len */ + " .byte 4f - 3f\n" /* repl len */ + ".previous\n" +- ".section .altinstr_replacement,\"ax\"\n" ++ ".section .altinstr_replacement,\"a\"\n" + "3: .byte 0xe9\n .long %l[t_no] - 2b\n" + "4:\n" + ".previous\n" +@@ -500,7 +502,7 @@ static __always_inline __pure bool _static_cpu_has_safe(u16 bit) + ".section .discard,\"aw\",@progbits\n" + " .byte 0xff + (4f-3f) - (2b-1b)\n" /* size check */ + ".previous\n" +- ".section .altinstr_replacement,\"ax\"\n" ++ ".section .altinstr_replacement,\"a\"\n" + "3: movb $0,%0\n" + "4:\n" + ".previous\n" +@@ -514,7 +516,7 @@ static __always_inline __pure bool _static_cpu_has_safe(u16 bit) + ".section .discard,\"aw\",@progbits\n" + " .byte 0xff + (6f-5f) - (4b-3b)\n" /* size check */ + ".previous\n" +- ".section .altinstr_replacement,\"ax\"\n" ++ ".section .altinstr_replacement,\"a\"\n" + "5: movb $1,%0\n" + "6:\n" + ".previous\n" +diff --git a/arch/x86/include/asm/desc.h b/arch/x86/include/asm/desc.h +index 50d033a..37deb26 100644 +--- a/arch/x86/include/asm/desc.h ++++ b/arch/x86/include/asm/desc.h +@@ -4,6 +4,7 @@ + #include <asm/desc_defs.h> + #include <asm/ldt.h> + #include <asm/mmu.h> ++#include <asm/pgtable.h> + + #include <linux/smp.h> + #include <linux/percpu.h> +@@ -17,6 +18,7 @@ static inline void fill_ldt(struct desc_struct *desc, const struct user_desc *in + + desc->type = (info->read_exec_only ^ 1) << 1; + desc->type |= info->contents << 2; ++ desc->type |= info->seg_not_present ^ 1; + + desc->s = 1; + desc->dpl = 0x3; +@@ -35,19 +37,14 @@ static inline void fill_ldt(struct desc_struct *desc, const struct user_desc *in + } + + extern struct desc_ptr idt_descr; +-extern gate_desc idt_table[]; +-extern struct desc_ptr debug_idt_descr; +-extern gate_desc debug_idt_table[]; +- +-struct gdt_page { +- struct desc_struct gdt[GDT_ENTRIES]; +-} __attribute__((aligned(PAGE_SIZE))); +- +-DECLARE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page); ++extern gate_desc idt_table[IDT_ENTRIES]; ++extern const struct desc_ptr debug_idt_descr; ++extern gate_desc debug_idt_table[IDT_ENTRIES]; + ++extern struct desc_struct cpu_gdt_table[NR_CPUS][PAGE_SIZE / sizeof(struct desc_struct)]; + static inline struct desc_struct *get_cpu_gdt_table(unsigned int cpu) + { +- return per_cpu(gdt_page, cpu).gdt; ++ return cpu_gdt_table[cpu]; + } + + #ifdef CONFIG_X86_64 +@@ -72,8 +69,14 @@ static inline void pack_gate(gate_desc *gate, unsigned char type, + unsigned long base, unsigned dpl, unsigned flags, + unsigned short seg) + { +- gate->a = (seg << 16) | (base & 0xffff); +- gate->b = (base & 0xffff0000) | (((0x80 | type | (dpl << 5)) & 0xff) << 8); ++ gate->gate.offset_low = base; ++ gate->gate.seg = seg; ++ gate->gate.reserved = 0; ++ gate->gate.type = type; ++ gate->gate.s = 0; ++ gate->gate.dpl = dpl; ++ gate->gate.p = 1; ++ gate->gate.offset_high = base >> 16; + } + + #endif +@@ -118,12 +121,16 @@ static inline void paravirt_free_ldt(struct desc_struct *ldt, unsigned entries) + + static inline void native_write_idt_entry(gate_desc *idt, int entry, const gate_desc *gate) + { ++ pax_open_kernel(); + memcpy(&idt[entry], gate, sizeof(*gate)); ++ pax_close_kernel(); + } + + static inline void native_write_ldt_entry(struct desc_struct *ldt, int entry, const void *desc) + { ++ pax_open_kernel(); + memcpy(&ldt[entry], desc, 8); ++ pax_close_kernel(); + } + + static inline void +@@ -137,7 +144,9 @@ native_write_gdt_entry(struct desc_struct *gdt, int entry, const void *desc, int + default: size = sizeof(*gdt); break; + } + ++ pax_open_kernel(); + memcpy(&gdt[entry], desc, size); ++ pax_close_kernel(); + } + + static inline void pack_descriptor(struct desc_struct *desc, unsigned long base, +@@ -210,7 +219,9 @@ static inline void native_set_ldt(const void *addr, unsigned int entries) + + static inline void native_load_tr_desc(void) + { ++ pax_open_kernel(); + asm volatile("ltr %w0"::"q" (GDT_ENTRY_TSS*8)); ++ pax_close_kernel(); + } + + static inline void native_load_gdt(const struct desc_ptr *dtr) +@@ -247,8 +258,10 @@ static inline void native_load_tls(struct thread_struct *t, unsigned int cpu) + struct desc_struct *gdt = get_cpu_gdt_table(cpu); + unsigned int i; + ++ pax_open_kernel(); + for (i = 0; i < GDT_ENTRY_TLS_ENTRIES; i++) + gdt[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i]; ++ pax_close_kernel(); + } + + #define _LDT_empty(info) \ +@@ -287,7 +300,7 @@ static inline void load_LDT(mm_context_t *pc) + preempt_enable(); + } + +-static inline unsigned long get_desc_base(const struct desc_struct *desc) ++static inline unsigned long __intentional_overflow(-1) get_desc_base(const struct desc_struct *desc) + { + return (unsigned)(desc->base0 | ((desc->base1) << 16) | ((desc->base2) << 24)); + } +@@ -311,7 +324,7 @@ static inline void set_desc_limit(struct desc_struct *desc, unsigned long limit) + } + + #ifdef CONFIG_X86_64 +-static inline void set_nmi_gate(int gate, void *addr) ++static inline void set_nmi_gate(int gate, const void *addr) + { + gate_desc s; + +@@ -321,14 +334,14 @@ static inline void set_nmi_gate(int gate, void *addr) + #endif + + #ifdef CONFIG_TRACING +-extern struct desc_ptr trace_idt_descr; +-extern gate_desc trace_idt_table[]; ++extern const struct desc_ptr trace_idt_descr; ++extern gate_desc trace_idt_table[IDT_ENTRIES]; + static inline void write_trace_idt_entry(int entry, const gate_desc *gate) + { + write_idt_entry(trace_idt_table, entry, gate); + } + +-static inline void _trace_set_gate(int gate, unsigned type, void *addr, ++static inline void _trace_set_gate(int gate, unsigned type, const void *addr, + unsigned dpl, unsigned ist, unsigned seg) + { + gate_desc s; +@@ -348,7 +361,7 @@ static inline void write_trace_idt_entry(int entry, const gate_desc *gate) + #define _trace_set_gate(gate, type, addr, dpl, ist, seg) + #endif + +-static inline void _set_gate(int gate, unsigned type, void *addr, ++static inline void _set_gate(int gate, unsigned type, const void *addr, + unsigned dpl, unsigned ist, unsigned seg) + { + gate_desc s; +@@ -371,9 +384,9 @@ static inline void _set_gate(int gate, unsigned type, void *addr, + #define set_intr_gate(n, addr) \ + do { \ + BUG_ON((unsigned)n > 0xFF); \ +- _set_gate(n, GATE_INTERRUPT, (void *)addr, 0, 0, \ ++ _set_gate(n, GATE_INTERRUPT, (const void *)addr, 0, 0, \ + __KERNEL_CS); \ +- _trace_set_gate(n, GATE_INTERRUPT, (void *)trace_##addr,\ ++ _trace_set_gate(n, GATE_INTERRUPT, (const void *)trace_##addr,\ + 0, 0, __KERNEL_CS); \ + } while (0) + +@@ -401,19 +414,19 @@ static inline void alloc_system_vector(int vector) + /* + * This routine sets up an interrupt gate at directory privilege level 3. + */ +-static inline void set_system_intr_gate(unsigned int n, void *addr) ++static inline void set_system_intr_gate(unsigned int n, const void *addr) + { + BUG_ON((unsigned)n > 0xFF); + _set_gate(n, GATE_INTERRUPT, addr, 0x3, 0, __KERNEL_CS); + } + +-static inline void set_system_trap_gate(unsigned int n, void *addr) ++static inline void set_system_trap_gate(unsigned int n, const void *addr) + { + BUG_ON((unsigned)n > 0xFF); + _set_gate(n, GATE_TRAP, addr, 0x3, 0, __KERNEL_CS); + } + +-static inline void set_trap_gate(unsigned int n, void *addr) ++static inline void set_trap_gate(unsigned int n, const void *addr) + { + BUG_ON((unsigned)n > 0xFF); + _set_gate(n, GATE_TRAP, addr, 0, 0, __KERNEL_CS); +@@ -422,16 +435,16 @@ static inline void set_trap_gate(unsigned int n, void *addr) + static inline void set_task_gate(unsigned int n, unsigned int gdt_entry) + { + BUG_ON((unsigned)n > 0xFF); +- _set_gate(n, GATE_TASK, (void *)0, 0, 0, (gdt_entry<<3)); ++ _set_gate(n, GATE_TASK, (const void *)0, 0, 0, (gdt_entry<<3)); + } + +-static inline void set_intr_gate_ist(int n, void *addr, unsigned ist) ++static inline void set_intr_gate_ist(int n, const void *addr, unsigned ist) + { + BUG_ON((unsigned)n > 0xFF); + _set_gate(n, GATE_INTERRUPT, addr, 0, ist, __KERNEL_CS); + } + +-static inline void set_system_intr_gate_ist(int n, void *addr, unsigned ist) ++static inline void set_system_intr_gate_ist(int n, const void *addr, unsigned ist) + { + BUG_ON((unsigned)n > 0xFF); + _set_gate(n, GATE_INTERRUPT, addr, 0x3, ist, __KERNEL_CS); +@@ -503,4 +516,17 @@ static inline void load_current_idt(void) + else + load_idt((const struct desc_ptr *)&idt_descr); + } ++ ++#ifdef CONFIG_X86_32 ++static inline void set_user_cs(unsigned long base, unsigned long limit, int cpu) ++{ ++ struct desc_struct d; ++ ++ if (likely(limit)) ++ limit = (limit - 1UL) >> PAGE_SHIFT; ++ pack_descriptor(&d, base, limit, 0xFB, 0xC); ++ write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_DEFAULT_USER_CS, &d, DESCTYPE_S); ++} ++#endif ++ + #endif /* _ASM_X86_DESC_H */ +diff --git a/arch/x86/include/asm/desc_defs.h b/arch/x86/include/asm/desc_defs.h +index 278441f..b95a174 100644 +--- a/arch/x86/include/asm/desc_defs.h ++++ b/arch/x86/include/asm/desc_defs.h +@@ -31,6 +31,12 @@ struct desc_struct { + unsigned base1: 8, type: 4, s: 1, dpl: 2, p: 1; + unsigned limit: 4, avl: 1, l: 1, d: 1, g: 1, base2: 8; + }; ++ struct { ++ u16 offset_low; ++ u16 seg; ++ unsigned reserved: 8, type: 4, s: 1, dpl: 2, p: 1; ++ unsigned offset_high: 16; ++ } gate; + }; + } __attribute__((packed)); + +diff --git a/arch/x86/include/asm/div64.h b/arch/x86/include/asm/div64.h +index ced283a..ffe04cc 100644 +--- a/arch/x86/include/asm/div64.h ++++ b/arch/x86/include/asm/div64.h +@@ -39,7 +39,7 @@ + __mod; \ + }) + +-static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder) ++static inline u64 __intentional_overflow(-1) div_u64_rem(u64 dividend, u32 divisor, u32 *remainder) + { + union { + u64 v64; +diff --git a/arch/x86/include/asm/elf.h b/arch/x86/include/asm/elf.h +index 9c999c1..3860cb8 100644 +--- a/arch/x86/include/asm/elf.h ++++ b/arch/x86/include/asm/elf.h +@@ -243,7 +243,25 @@ extern int force_personality32; + the loader. We need to make sure that it is out of the way of the program + that it will "exec", and that there is sufficient room for the brk. */ + ++#ifdef CONFIG_PAX_SEGMEXEC ++#define ELF_ET_DYN_BASE ((current->mm->pax_flags & MF_PAX_SEGMEXEC) ? SEGMEXEC_TASK_SIZE/3*2 : TASK_SIZE/3*2) ++#else + #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2) ++#endif ++ ++#ifdef CONFIG_PAX_ASLR ++#ifdef CONFIG_X86_32 ++#define PAX_ELF_ET_DYN_BASE 0x10000000UL ++ ++#define PAX_DELTA_MMAP_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16) ++#define PAX_DELTA_STACK_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16) ++#else ++#define PAX_ELF_ET_DYN_BASE 0x400000UL ++ ++#define PAX_DELTA_MMAP_LEN ((test_thread_flag(TIF_ADDR32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3) ++#define PAX_DELTA_STACK_LEN ((test_thread_flag(TIF_ADDR32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3) ++#endif ++#endif + + /* This yields a mask that user programs can use to figure out what + instruction set this CPU supports. This could be done in user space, +@@ -296,16 +314,12 @@ do { \ + + #define ARCH_DLINFO \ + do { \ +- if (vdso_enabled) \ +- NEW_AUX_ENT(AT_SYSINFO_EHDR, \ +- (unsigned long)current->mm->context.vdso); \ ++ NEW_AUX_ENT(AT_SYSINFO_EHDR, current->mm->context.vdso); \ + } while (0) + + #define ARCH_DLINFO_X32 \ + do { \ +- if (vdso_enabled) \ +- NEW_AUX_ENT(AT_SYSINFO_EHDR, \ +- (unsigned long)current->mm->context.vdso); \ ++ NEW_AUX_ENT(AT_SYSINFO_EHDR, current->mm->context.vdso); \ + } while (0) + + #define AT_SYSINFO 32 +@@ -320,7 +334,7 @@ else \ + + #endif /* !CONFIG_X86_32 */ + +-#define VDSO_CURRENT_BASE ((unsigned long)current->mm->context.vdso) ++#define VDSO_CURRENT_BASE (current->mm->context.vdso) + + #define VDSO_ENTRY \ + ((unsigned long)VDSO32_SYMBOL(VDSO_CURRENT_BASE, vsyscall)) +@@ -336,9 +350,6 @@ extern int x32_setup_additional_pages(struct linux_binprm *bprm, + extern int syscall32_setup_pages(struct linux_binprm *, int exstack); + #define compat_arch_setup_additional_pages syscall32_setup_pages + +-extern unsigned long arch_randomize_brk(struct mm_struct *mm); +-#define arch_randomize_brk arch_randomize_brk +- + /* + * True on X86_32 or when emulating IA32 on X86_64 + */ +diff --git a/arch/x86/include/asm/emergency-restart.h b/arch/x86/include/asm/emergency-restart.h +index 77a99ac..39ff7f5 100644 +--- a/arch/x86/include/asm/emergency-restart.h ++++ b/arch/x86/include/asm/emergency-restart.h +@@ -1,6 +1,6 @@ + #ifndef _ASM_X86_EMERGENCY_RESTART_H + #define _ASM_X86_EMERGENCY_RESTART_H + +-extern void machine_emergency_restart(void); ++extern void machine_emergency_restart(void) __noreturn; + + #endif /* _ASM_X86_EMERGENCY_RESTART_H */ +diff --git a/arch/x86/include/asm/floppy.h b/arch/x86/include/asm/floppy.h +index d3d7469..677ef72 100644 +--- a/arch/x86/include/asm/floppy.h ++++ b/arch/x86/include/asm/floppy.h +@@ -229,18 +229,18 @@ static struct fd_routine_l { + int (*_dma_setup)(char *addr, unsigned long size, int mode, int io); + } fd_routine[] = { + { +- request_dma, +- free_dma, +- get_dma_residue, +- dma_mem_alloc, +- hard_dma_setup ++ ._request_dma = request_dma, ++ ._free_dma = free_dma, ++ ._get_dma_residue = get_dma_residue, ++ ._dma_mem_alloc = dma_mem_alloc, ++ ._dma_setup = hard_dma_setup + }, + { +- vdma_request_dma, +- vdma_nop, +- vdma_get_dma_residue, +- vdma_mem_alloc, +- vdma_dma_setup ++ ._request_dma = vdma_request_dma, ++ ._free_dma = vdma_nop, ++ ._get_dma_residue = vdma_get_dma_residue, ++ ._dma_mem_alloc = vdma_mem_alloc, ++ ._dma_setup = vdma_dma_setup + } + }; + +diff --git a/arch/x86/include/asm/fpu-internal.h b/arch/x86/include/asm/fpu-internal.h +index cea1c76..6c0d79b 100644 +--- a/arch/x86/include/asm/fpu-internal.h ++++ b/arch/x86/include/asm/fpu-internal.h +@@ -124,8 +124,11 @@ static inline void sanitize_i387_state(struct task_struct *tsk) + #define user_insn(insn, output, input...) \ + ({ \ + int err; \ ++ pax_open_userland(); \ + asm volatile(ASM_STAC "\n" \ +- "1:" #insn "\n\t" \ ++ "1:" \ ++ __copyuser_seg \ ++ #insn "\n\t" \ + "2: " ASM_CLAC "\n" \ + ".section .fixup,\"ax\"\n" \ + "3: movl $-1,%[err]\n" \ +@@ -134,6 +137,7 @@ static inline void sanitize_i387_state(struct task_struct *tsk) + _ASM_EXTABLE(1b, 3b) \ + : [err] "=r" (err), output \ + : "0"(0), input); \ ++ pax_close_userland(); \ + err; \ + }) + +@@ -298,7 +302,7 @@ static inline int restore_fpu_checking(struct task_struct *tsk) + "fnclex\n\t" + "emms\n\t" + "fildl %P[addr]" /* set F?P to defined value */ +- : : [addr] "m" (tsk->thread.fpu.has_fpu)); ++ : : [addr] "m" (init_tss[raw_smp_processor_id()].x86_tss.sp0)); + } + + return fpu_restore_checking(&tsk->thread.fpu); +diff --git a/arch/x86/include/asm/futex.h b/arch/x86/include/asm/futex.h +index b4c1f54..e290c08 100644 +--- a/arch/x86/include/asm/futex.h ++++ b/arch/x86/include/asm/futex.h +@@ -12,6 +12,7 @@ + #include <asm/smap.h> + + #define __futex_atomic_op1(insn, ret, oldval, uaddr, oparg) \ ++ typecheck(u32 __user *, uaddr); \ + asm volatile("\t" ASM_STAC "\n" \ + "1:\t" insn "\n" \ + "2:\t" ASM_CLAC "\n" \ +@@ -20,15 +21,16 @@ + "\tjmp\t2b\n" \ + "\t.previous\n" \ + _ASM_EXTABLE(1b, 3b) \ +- : "=r" (oldval), "=r" (ret), "+m" (*uaddr) \ ++ : "=r" (oldval), "=r" (ret), "+m" (*(u32 __user *)____m(uaddr)) \ + : "i" (-EFAULT), "0" (oparg), "1" (0)) + + #define __futex_atomic_op2(insn, ret, oldval, uaddr, oparg) \ ++ typecheck(u32 __user *, uaddr); \ + asm volatile("\t" ASM_STAC "\n" \ + "1:\tmovl %2, %0\n" \ + "\tmovl\t%0, %3\n" \ + "\t" insn "\n" \ +- "2:\t" LOCK_PREFIX "cmpxchgl %3, %2\n" \ ++ "2:\t" LOCK_PREFIX __copyuser_seg"cmpxchgl %3, %2\n" \ + "\tjnz\t1b\n" \ + "3:\t" ASM_CLAC "\n" \ + "\t.section .fixup,\"ax\"\n" \ +@@ -38,7 +40,7 @@ + _ASM_EXTABLE(1b, 4b) \ + _ASM_EXTABLE(2b, 4b) \ + : "=&a" (oldval), "=&r" (ret), \ +- "+m" (*uaddr), "=&r" (tem) \ ++ "+m" (*(u32 __user *)____m(uaddr)), "=&r" (tem) \ + : "r" (oparg), "i" (-EFAULT), "1" (0)) + + static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr) +@@ -57,12 +59,13 @@ static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr) + + pagefault_disable(); + ++ pax_open_userland(); + switch (op) { + case FUTEX_OP_SET: +- __futex_atomic_op1("xchgl %0, %2", ret, oldval, uaddr, oparg); ++ __futex_atomic_op1(__copyuser_seg"xchgl %0, %2", ret, oldval, uaddr, oparg); + break; + case FUTEX_OP_ADD: +- __futex_atomic_op1(LOCK_PREFIX "xaddl %0, %2", ret, oldval, ++ __futex_atomic_op1(LOCK_PREFIX __copyuser_seg"xaddl %0, %2", ret, oldval, + uaddr, oparg); + break; + case FUTEX_OP_OR: +@@ -77,6 +80,7 @@ static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr) + default: + ret = -ENOSYS; + } ++ pax_close_userland(); + + pagefault_enable(); + +diff --git a/arch/x86/include/asm/hw_irq.h b/arch/x86/include/asm/hw_irq.h +index 67d69b8..50e4b77 100644 +--- a/arch/x86/include/asm/hw_irq.h ++++ b/arch/x86/include/asm/hw_irq.h +@@ -165,8 +165,8 @@ extern void setup_ioapic_dest(void); + extern void enable_IO_APIC(void); + + /* Statistics */ +-extern atomic_t irq_err_count; +-extern atomic_t irq_mis_count; ++extern atomic_unchecked_t irq_err_count; ++extern atomic_unchecked_t irq_mis_count; + + /* EISA */ + extern void eisa_set_level_irq(unsigned int irq); +diff --git a/arch/x86/include/asm/i8259.h b/arch/x86/include/asm/i8259.h +index a203659..9889f1c 100644 +--- a/arch/x86/include/asm/i8259.h ++++ b/arch/x86/include/asm/i8259.h +@@ -62,7 +62,7 @@ struct legacy_pic { + void (*init)(int auto_eoi); + int (*irq_pending)(unsigned int irq); + void (*make_irq)(unsigned int irq); +-}; ++} __do_const; + + extern struct legacy_pic *legacy_pic; + extern struct legacy_pic null_legacy_pic; +diff --git a/arch/x86/include/asm/io.h b/arch/x86/include/asm/io.h +index 91d9c69..dfae7d0 100644 +--- a/arch/x86/include/asm/io.h ++++ b/arch/x86/include/asm/io.h +@@ -51,12 +51,12 @@ static inline void name(type val, volatile void __iomem *addr) \ + "m" (*(volatile type __force *)addr) barrier); } + + build_mmio_read(readb, "b", unsigned char, "=q", :"memory") +-build_mmio_read(readw, "w", unsigned short, "=r", :"memory") +-build_mmio_read(readl, "l", unsigned int, "=r", :"memory") ++build_mmio_read(__intentional_overflow(-1) readw, "w", unsigned short, "=r", :"memory") ++build_mmio_read(__intentional_overflow(-1) readl, "l", unsigned int, "=r", :"memory") + + build_mmio_read(__readb, "b", unsigned char, "=q", ) +-build_mmio_read(__readw, "w", unsigned short, "=r", ) +-build_mmio_read(__readl, "l", unsigned int, "=r", ) ++build_mmio_read(__intentional_overflow(-1) __readw, "w", unsigned short, "=r", ) ++build_mmio_read(__intentional_overflow(-1) __readl, "l", unsigned int, "=r", ) + + build_mmio_write(writeb, "b", unsigned char, "q", :"memory") + build_mmio_write(writew, "w", unsigned short, "r", :"memory") +@@ -184,7 +184,7 @@ static inline void __iomem *ioremap(resource_size_t offset, unsigned long size) + return ioremap_nocache(offset, size); + } + +-extern void iounmap(volatile void __iomem *addr); ++extern void iounmap(const volatile void __iomem *addr); + + extern void set_iounmap_nonlazy(void); + +@@ -194,6 +194,17 @@ extern void set_iounmap_nonlazy(void); + + #include <linux/vmalloc.h> + ++#define ARCH_HAS_VALID_PHYS_ADDR_RANGE ++static inline int valid_phys_addr_range(unsigned long addr, size_t count) ++{ ++ return ((addr + count + PAGE_SIZE - 1) >> PAGE_SHIFT) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0; ++} ++ ++static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t count) ++{ ++ return (pfn + (count >> PAGE_SHIFT)) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0; ++} ++ + /* + * Convert a virtual cached pointer to an uncached pointer + */ +diff --git a/arch/x86/include/asm/irqflags.h b/arch/x86/include/asm/irqflags.h +index 0a8b519..80e7d5b 100644 +--- a/arch/x86/include/asm/irqflags.h ++++ b/arch/x86/include/asm/irqflags.h +@@ -141,6 +141,11 @@ static inline notrace unsigned long arch_local_irq_save(void) + sti; \ + sysexit + ++#define GET_CR0_INTO_RDI mov %cr0, %rdi ++#define SET_RDI_INTO_CR0 mov %rdi, %cr0 ++#define GET_CR3_INTO_RDI mov %cr3, %rdi ++#define SET_RDI_INTO_CR3 mov %rdi, %cr3 ++ + #else + #define INTERRUPT_RETURN iret + #define ENABLE_INTERRUPTS_SYSEXIT sti; sysexit +diff --git a/arch/x86/include/asm/kprobes.h b/arch/x86/include/asm/kprobes.h +index 9454c16..e4100e3 100644 +--- a/arch/x86/include/asm/kprobes.h ++++ b/arch/x86/include/asm/kprobes.h +@@ -38,13 +38,8 @@ typedef u8 kprobe_opcode_t; + #define RELATIVEJUMP_SIZE 5 + #define RELATIVECALL_OPCODE 0xe8 + #define RELATIVE_ADDR_SIZE 4 +-#define MAX_STACK_SIZE 64 +-#define MIN_STACK_SIZE(ADDR) \ +- (((MAX_STACK_SIZE) < (((unsigned long)current_thread_info()) + \ +- THREAD_SIZE - (unsigned long)(ADDR))) \ +- ? (MAX_STACK_SIZE) \ +- : (((unsigned long)current_thread_info()) + \ +- THREAD_SIZE - (unsigned long)(ADDR))) ++#define MAX_STACK_SIZE 64UL ++#define MIN_STACK_SIZE(ADDR) min(MAX_STACK_SIZE, current->thread.sp0 - (unsigned long)(ADDR)) + + #define flush_insn_slot(p) do { } while (0) + +diff --git a/arch/x86/include/asm/local.h b/arch/x86/include/asm/local.h +index 4ad6560..75c7bdd 100644 +--- a/arch/x86/include/asm/local.h ++++ b/arch/x86/include/asm/local.h +@@ -10,33 +10,97 @@ typedef struct { + atomic_long_t a; + } local_t; + ++typedef struct { ++ atomic_long_unchecked_t a; ++} local_unchecked_t; ++ + #define LOCAL_INIT(i) { ATOMIC_LONG_INIT(i) } + + #define local_read(l) atomic_long_read(&(l)->a) ++#define local_read_unchecked(l) atomic_long_read_unchecked(&(l)->a) + #define local_set(l, i) atomic_long_set(&(l)->a, (i)) ++#define local_set_unchecked(l, i) atomic_long_set_unchecked(&(l)->a, (i)) + + static inline void local_inc(local_t *l) + { +- asm volatile(_ASM_INC "%0" ++ asm volatile(_ASM_INC "%0\n" ++ ++#ifdef CONFIG_PAX_REFCOUNT ++ "jno 0f\n" ++ _ASM_DEC "%0\n" ++ "int $4\n0:\n" ++ _ASM_EXTABLE(0b, 0b) ++#endif ++ ++ : "+m" (l->a.counter)); ++} ++ ++static inline void local_inc_unchecked(local_unchecked_t *l) ++{ ++ asm volatile(_ASM_INC "%0\n" + : "+m" (l->a.counter)); + } + + static inline void local_dec(local_t *l) + { +- asm volatile(_ASM_DEC "%0" ++ asm volatile(_ASM_DEC "%0\n" ++ ++#ifdef CONFIG_PAX_REFCOUNT ++ "jno 0f\n" ++ _ASM_INC "%0\n" ++ "int $4\n0:\n" ++ _ASM_EXTABLE(0b, 0b) ++#endif ++ ++ : "+m" (l->a.counter)); ++} ++ ++static inline void local_dec_unchecked(local_unchecked_t *l) ++{ ++ asm volatile(_ASM_DEC "%0\n" + : "+m" (l->a.counter)); + } + + static inline void local_add(long i, local_t *l) + { +- asm volatile(_ASM_ADD "%1,%0" ++ asm volatile(_ASM_ADD "%1,%0\n" ++ ++#ifdef CONFIG_PAX_REFCOUNT ++ "jno 0f\n" ++ _ASM_SUB "%1,%0\n" ++ "int $4\n0:\n" ++ _ASM_EXTABLE(0b, 0b) ++#endif ++ ++ : "+m" (l->a.counter) ++ : "ir" (i)); ++} ++ ++static inline void local_add_unchecked(long i, local_unchecked_t *l) ++{ ++ asm volatile(_ASM_ADD "%1,%0\n" + : "+m" (l->a.counter) + : "ir" (i)); + } + + static inline void local_sub(long i, local_t *l) + { +- asm volatile(_ASM_SUB "%1,%0" ++ asm volatile(_ASM_SUB "%1,%0\n" ++ ++#ifdef CONFIG_PAX_REFCOUNT ++ "jno 0f\n" ++ _ASM_ADD "%1,%0\n" ++ "int $4\n0:\n" ++ _ASM_EXTABLE(0b, 0b) ++#endif ++ ++ : "+m" (l->a.counter) ++ : "ir" (i)); ++} ++ ++static inline void local_sub_unchecked(long i, local_unchecked_t *l) ++{ ++ asm volatile(_ASM_SUB "%1,%0\n" + : "+m" (l->a.counter) + : "ir" (i)); + } +@@ -52,7 +116,7 @@ static inline void local_sub(long i, local_t *l) + */ + static inline int local_sub_and_test(long i, local_t *l) + { +- GEN_BINARY_RMWcc(_ASM_SUB, l->a.counter, "er", i, "%0", "e"); ++ GEN_BINARY_RMWcc(_ASM_SUB, _ASM_ADD, l->a.counter, "er", i, "%0", "e"); + } + + /** +@@ -65,7 +129,7 @@ static inline int local_sub_and_test(long i, local_t *l) + */ + static inline int local_dec_and_test(local_t *l) + { +- GEN_UNARY_RMWcc(_ASM_DEC, l->a.counter, "%0", "e"); ++ GEN_UNARY_RMWcc(_ASM_DEC, _ASM_INC, l->a.counter, "%0", "e"); + } + + /** +@@ -78,7 +142,7 @@ static inline int local_dec_and_test(local_t *l) + */ + static inline int local_inc_and_test(local_t *l) + { +- GEN_UNARY_RMWcc(_ASM_INC, l->a.counter, "%0", "e"); ++ GEN_UNARY_RMWcc(_ASM_INC, _ASM_DEC, l->a.counter, "%0", "e"); + } + + /** +@@ -92,7 +156,7 @@ static inline int local_inc_and_test(local_t *l) + */ + static inline int local_add_negative(long i, local_t *l) + { +- GEN_BINARY_RMWcc(_ASM_ADD, l->a.counter, "er", i, "%0", "s"); ++ GEN_BINARY_RMWcc(_ASM_ADD, _ASM_SUB, l->a.counter, "er", i, "%0", "s"); + } + + /** +@@ -105,6 +169,30 @@ static inline int local_add_negative(long i, local_t *l) + static inline long local_add_return(long i, local_t *l) + { + long __i = i; ++ asm volatile(_ASM_XADD "%0, %1\n" ++ ++#ifdef CONFIG_PAX_REFCOUNT ++ "jno 0f\n" ++ _ASM_MOV "%0,%1\n" ++ "int $4\n0:\n" ++ _ASM_EXTABLE(0b, 0b) ++#endif ++ ++ : "+r" (i), "+m" (l->a.counter) ++ : : "memory"); ++ return i + __i; ++} ++ ++/** ++ * local_add_return_unchecked - add and return ++ * @i: integer value to add ++ * @l: pointer to type local_unchecked_t ++ * ++ * Atomically adds @i to @l and returns @i + @l ++ */ ++static inline long local_add_return_unchecked(long i, local_unchecked_t *l) ++{ ++ long __i = i; + asm volatile(_ASM_XADD "%0, %1;" + : "+r" (i), "+m" (l->a.counter) + : : "memory"); +@@ -121,6 +209,8 @@ static inline long local_sub_return(long i, local_t *l) + + #define local_cmpxchg(l, o, n) \ + (cmpxchg_local(&((l)->a.counter), (o), (n))) ++#define local_cmpxchg_unchecked(l, o, n) \ ++ (cmpxchg_local(&((l)->a.counter), (o), (n))) + /* Always has a lock prefix */ + #define local_xchg(l, n) (xchg(&((l)->a.counter), (n))) + +diff --git a/arch/x86/include/asm/mman.h b/arch/x86/include/asm/mman.h +new file mode 100644 +index 0000000..2bfd3ba +--- /dev/null ++++ b/arch/x86/include/asm/mman.h +@@ -0,0 +1,15 @@ ++#ifndef _X86_MMAN_H ++#define _X86_MMAN_H ++ ++#include <uapi/asm/mman.h> ++ ++#ifdef __KERNEL__ ++#ifndef __ASSEMBLY__ ++#ifdef CONFIG_X86_32 ++#define arch_mmap_check i386_mmap_check ++int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags); ++#endif ++#endif ++#endif ++ ++#endif /* X86_MMAN_H */ +diff --git a/arch/x86/include/asm/mmu.h b/arch/x86/include/asm/mmu.h +index 5f55e69..e20bfb1 100644 +--- a/arch/x86/include/asm/mmu.h ++++ b/arch/x86/include/asm/mmu.h +@@ -9,7 +9,7 @@ + * we put the segment information here. + */ + typedef struct { +- void *ldt; ++ struct desc_struct *ldt; + int size; + + #ifdef CONFIG_X86_64 +@@ -18,7 +18,19 @@ typedef struct { + #endif + + struct mutex lock; +- void *vdso; ++ unsigned long vdso; ++ ++#ifdef CONFIG_X86_32 ++#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC) ++ unsigned long user_cs_base; ++ unsigned long user_cs_limit; ++ ++#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP) ++ cpumask_t cpu_user_cs_mask; ++#endif ++ ++#endif ++#endif + } mm_context_t; + + #ifdef CONFIG_SMP +diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h +index be12c53..4d24039 100644 +--- a/arch/x86/include/asm/mmu_context.h ++++ b/arch/x86/include/asm/mmu_context.h +@@ -24,6 +24,20 @@ void destroy_context(struct mm_struct *mm); + + static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) + { ++ ++#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF) ++ if (!(static_cpu_has(X86_FEATURE_PCID))) { ++ unsigned int i; ++ pgd_t *pgd; ++ ++ pax_open_kernel(); ++ pgd = get_cpu_pgd(smp_processor_id(), kernel); ++ for (i = USER_PGD_PTRS; i < 2 * USER_PGD_PTRS; ++i) ++ set_pgd_batched(pgd+i, native_make_pgd(0)); ++ pax_close_kernel(); ++ } ++#endif ++ + #ifdef CONFIG_SMP + if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_OK) + this_cpu_write(cpu_tlbstate.state, TLBSTATE_LAZY); +@@ -34,16 +48,59 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, + struct task_struct *tsk) + { + unsigned cpu = smp_processor_id(); ++#if defined(CONFIG_X86_32) && defined(CONFIG_SMP) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)) ++ int tlbstate = TLBSTATE_OK; ++#endif + + if (likely(prev != next)) { + #ifdef CONFIG_SMP ++#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)) ++ tlbstate = this_cpu_read(cpu_tlbstate.state); ++#endif + this_cpu_write(cpu_tlbstate.state, TLBSTATE_OK); + this_cpu_write(cpu_tlbstate.active_mm, next); + #endif + cpumask_set_cpu(cpu, mm_cpumask(next)); + + /* Re-load page tables */ ++#ifdef CONFIG_PAX_PER_CPU_PGD ++ pax_open_kernel(); ++ ++#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF) ++ if (static_cpu_has(X86_FEATURE_PCID)) ++ __clone_user_pgds(get_cpu_pgd(cpu, user), next->pgd); ++ else ++#endif ++ ++ __clone_user_pgds(get_cpu_pgd(cpu, kernel), next->pgd); ++ __shadow_user_pgds(get_cpu_pgd(cpu, kernel) + USER_PGD_PTRS, next->pgd); ++ pax_close_kernel(); ++ BUG_ON((__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL) != (read_cr3() & __PHYSICAL_MASK) && (__pa(get_cpu_pgd(cpu, user)) | PCID_USER) != (read_cr3() & __PHYSICAL_MASK)); ++ ++#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF) ++ if (static_cpu_has(X86_FEATURE_PCID)) { ++ if (static_cpu_has(X86_FEATURE_INVPCID)) { ++ u64 descriptor[2]; ++ descriptor[0] = PCID_USER; ++ asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_SINGLE_CONTEXT) : "memory"); ++ if (!static_cpu_has(X86_FEATURE_STRONGUDEREF)) { ++ descriptor[0] = PCID_KERNEL; ++ asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_SINGLE_CONTEXT) : "memory"); ++ } ++ } else { ++ write_cr3(__pa(get_cpu_pgd(cpu, user)) | PCID_USER); ++ if (static_cpu_has(X86_FEATURE_STRONGUDEREF)) ++ write_cr3(__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL | PCID_NOFLUSH); ++ else ++ write_cr3(__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL); ++ } ++ } else ++#endif ++ ++ load_cr3(get_cpu_pgd(cpu, kernel)); ++#else + load_cr3(next->pgd); ++#endif + + /* Stop flush ipis for the previous mm */ + cpumask_clear_cpu(cpu, mm_cpumask(prev)); +@@ -51,9 +108,67 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, + /* Load the LDT, if the LDT is different: */ + if (unlikely(prev->context.ldt != next->context.ldt)) + load_LDT_nolock(&next->context); ++ ++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP) ++ if (!(__supported_pte_mask & _PAGE_NX)) { ++ smp_mb__before_clear_bit(); ++ cpu_clear(cpu, prev->context.cpu_user_cs_mask); ++ smp_mb__after_clear_bit(); ++ cpu_set(cpu, next->context.cpu_user_cs_mask); ++ } ++#endif ++ ++#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)) ++ if (unlikely(prev->context.user_cs_base != next->context.user_cs_base || ++ prev->context.user_cs_limit != next->context.user_cs_limit)) ++ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu); ++#ifdef CONFIG_SMP ++ else if (unlikely(tlbstate != TLBSTATE_OK)) ++ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu); ++#endif ++#endif ++ + } ++ else { ++ ++#ifdef CONFIG_PAX_PER_CPU_PGD ++ pax_open_kernel(); ++ ++#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF) ++ if (static_cpu_has(X86_FEATURE_PCID)) ++ __clone_user_pgds(get_cpu_pgd(cpu, user), next->pgd); ++ else ++#endif ++ ++ __clone_user_pgds(get_cpu_pgd(cpu, kernel), next->pgd); ++ __shadow_user_pgds(get_cpu_pgd(cpu, kernel) + USER_PGD_PTRS, next->pgd); ++ pax_close_kernel(); ++ BUG_ON((__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL) != (read_cr3() & __PHYSICAL_MASK) && (__pa(get_cpu_pgd(cpu, user)) | PCID_USER) != (read_cr3() & __PHYSICAL_MASK)); ++ ++#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF) ++ if (static_cpu_has(X86_FEATURE_PCID)) { ++ if (static_cpu_has(X86_FEATURE_INVPCID)) { ++ u64 descriptor[2]; ++ descriptor[0] = PCID_USER; ++ asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_SINGLE_CONTEXT) : "memory"); ++ if (!static_cpu_has(X86_FEATURE_STRONGUDEREF)) { ++ descriptor[0] = PCID_KERNEL; ++ asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_SINGLE_CONTEXT) : "memory"); ++ } ++ } else { ++ write_cr3(__pa(get_cpu_pgd(cpu, user)) | PCID_USER); ++ if (static_cpu_has(X86_FEATURE_STRONGUDEREF)) ++ write_cr3(__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL | PCID_NOFLUSH); ++ else ++ write_cr3(__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL); ++ } ++ } else ++#endif ++ ++ load_cr3(get_cpu_pgd(cpu, kernel)); ++#endif ++ + #ifdef CONFIG_SMP +- else { + this_cpu_write(cpu_tlbstate.state, TLBSTATE_OK); + BUG_ON(this_cpu_read(cpu_tlbstate.active_mm) != next); + +@@ -70,11 +185,28 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, + * tlb flush IPI delivery. We must reload CR3 + * to make sure to use no freed page tables. + */ ++ ++#ifndef CONFIG_PAX_PER_CPU_PGD + load_cr3(next->pgd); ++#endif ++ + load_LDT_nolock(&next->context); ++ ++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC) ++ if (!(__supported_pte_mask & _PAGE_NX)) ++ cpu_set(cpu, next->context.cpu_user_cs_mask); ++#endif ++ ++#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)) ++#ifdef CONFIG_PAX_PAGEEXEC ++ if (!((next->pax_flags & MF_PAX_PAGEEXEC) && (__supported_pte_mask & _PAGE_NX))) ++#endif ++ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu); ++#endif ++ + } ++#endif + } +-#endif + } + + #define activate_mm(prev, next) \ +diff --git a/arch/x86/include/asm/module.h b/arch/x86/include/asm/module.h +index e3b7819..b257c64 100644 +--- a/arch/x86/include/asm/module.h ++++ b/arch/x86/include/asm/module.h +@@ -5,6 +5,7 @@ + + #ifdef CONFIG_X86_64 + /* X86_64 does not define MODULE_PROC_FAMILY */ ++#define MODULE_PROC_FAMILY "" + #elif defined CONFIG_M486 + #define MODULE_PROC_FAMILY "486 " + #elif defined CONFIG_M586 +@@ -57,8 +58,20 @@ + #error unknown processor family + #endif + +-#ifdef CONFIG_X86_32 +-# define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY ++#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_BTS ++#define MODULE_PAX_KERNEXEC "KERNEXEC_BTS " ++#elif defined(CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR) ++#define MODULE_PAX_KERNEXEC "KERNEXEC_OR " ++#else ++#define MODULE_PAX_KERNEXEC "" + #endif + ++#ifdef CONFIG_PAX_MEMORY_UDEREF ++#define MODULE_PAX_UDEREF "UDEREF " ++#else ++#define MODULE_PAX_UDEREF "" ++#endif ++ ++#define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_PAX_KERNEXEC MODULE_PAX_UDEREF ++ + #endif /* _ASM_X86_MODULE_H */ +diff --git a/arch/x86/include/asm/nmi.h b/arch/x86/include/asm/nmi.h +index 86f9301..b365cda 100644 +--- a/arch/x86/include/asm/nmi.h ++++ b/arch/x86/include/asm/nmi.h +@@ -40,11 +40,11 @@ struct nmiaction { + nmi_handler_t handler; + unsigned long flags; + const char *name; +-}; ++} __do_const; + + #define register_nmi_handler(t, fn, fg, n, init...) \ + ({ \ +- static struct nmiaction init fn##_na = { \ ++ static const struct nmiaction init fn##_na = { \ + .handler = (fn), \ + .name = (n), \ + .flags = (fg), \ +@@ -52,7 +52,7 @@ struct nmiaction { + __register_nmi_handler((t), &fn##_na); \ + }) + +-int __register_nmi_handler(unsigned int, struct nmiaction *); ++int __register_nmi_handler(unsigned int, const struct nmiaction *); + + void unregister_nmi_handler(unsigned int, const char *); + +diff --git a/arch/x86/include/asm/page.h b/arch/x86/include/asm/page.h +index 775873d..04cd306 100644 +--- a/arch/x86/include/asm/page.h ++++ b/arch/x86/include/asm/page.h +@@ -52,6 +52,7 @@ static inline void copy_user_page(void *to, void *from, unsigned long vaddr, + __phys_addr_symbol(__phys_reloc_hide((unsigned long)(x))) + + #define __va(x) ((void *)((unsigned long)(x)+PAGE_OFFSET)) ++#define __early_va(x) ((void *)((unsigned long)(x)+__START_KERNEL_map - phys_base)) + + #define __boot_va(x) __va(x) + #define __boot_pa(x) __pa(x) +@@ -60,11 +61,21 @@ static inline void copy_user_page(void *to, void *from, unsigned long vaddr, + * virt_to_page(kaddr) returns a valid pointer if and only if + * virt_addr_valid(kaddr) returns true. + */ +-#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT) + #define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT) + extern bool __virt_addr_valid(unsigned long kaddr); + #define virt_addr_valid(kaddr) __virt_addr_valid((unsigned long) (kaddr)) + ++#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW ++#define virt_to_page(kaddr) \ ++ ({ \ ++ const void *__kaddr = (const void *)(kaddr); \ ++ BUG_ON(!virt_addr_valid(__kaddr)); \ ++ pfn_to_page(__pa(__kaddr) >> PAGE_SHIFT); \ ++ }) ++#else ++#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT) ++#endif ++ + #endif /* __ASSEMBLY__ */ + + #include <asm-generic/memory_model.h> +diff --git a/arch/x86/include/asm/page_64.h b/arch/x86/include/asm/page_64.h +index 0f1ddee..e2fc3d1 100644 +--- a/arch/x86/include/asm/page_64.h ++++ b/arch/x86/include/asm/page_64.h +@@ -7,9 +7,9 @@ + + /* duplicated to the one in bootmem.h */ + extern unsigned long max_pfn; +-extern unsigned long phys_base; ++extern const unsigned long phys_base; + +-static inline unsigned long __phys_addr_nodebug(unsigned long x) ++static inline unsigned long __intentional_overflow(-1) __phys_addr_nodebug(unsigned long x) + { + unsigned long y = x - __START_KERNEL_map; + +diff --git a/arch/x86/include/asm/page_64_types.h b/arch/x86/include/asm/page_64_types.h +index 8de6d9c..6782051 100644 +--- a/arch/x86/include/asm/page_64_types.h ++++ b/arch/x86/include/asm/page_64_types.h +@@ -1,7 +1,7 @@ + #ifndef _ASM_X86_PAGE_64_DEFS_H + #define _ASM_X86_PAGE_64_DEFS_H + +-#define THREAD_SIZE_ORDER 1 ++#define THREAD_SIZE_ORDER 2 + #define THREAD_SIZE (PAGE_SIZE << THREAD_SIZE_ORDER) + #define CURRENT_MASK (~(THREAD_SIZE - 1)) + +diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h +index cd6e1610..70f4418 100644 +--- a/arch/x86/include/asm/paravirt.h ++++ b/arch/x86/include/asm/paravirt.h +@@ -560,7 +560,7 @@ static inline pmd_t __pmd(pmdval_t val) + return (pmd_t) { ret }; + } + +-static inline pmdval_t pmd_val(pmd_t pmd) ++static inline __intentional_overflow(-1) pmdval_t pmd_val(pmd_t pmd) + { + pmdval_t ret; + +@@ -626,6 +626,18 @@ static inline void set_pgd(pgd_t *pgdp, pgd_t pgd) + val); + } + ++static inline void set_pgd_batched(pgd_t *pgdp, pgd_t pgd) ++{ ++ pgdval_t val = native_pgd_val(pgd); ++ ++ if (sizeof(pgdval_t) > sizeof(long)) ++ PVOP_VCALL3(pv_mmu_ops.set_pgd_batched, pgdp, ++ val, (u64)val >> 32); ++ else ++ PVOP_VCALL2(pv_mmu_ops.set_pgd_batched, pgdp, ++ val); ++} ++ + static inline void pgd_clear(pgd_t *pgdp) + { + set_pgd(pgdp, __pgd(0)); +@@ -710,6 +722,21 @@ static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx, + pv_mmu_ops.set_fixmap(idx, phys, flags); + } + ++#ifdef CONFIG_PAX_KERNEXEC ++static inline unsigned long pax_open_kernel(void) ++{ ++ return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_open_kernel); ++} ++ ++static inline unsigned long pax_close_kernel(void) ++{ ++ return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_close_kernel); ++} ++#else ++static inline unsigned long pax_open_kernel(void) { return 0; } ++static inline unsigned long pax_close_kernel(void) { return 0; } ++#endif ++ + #if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS) + + static __always_inline void __ticket_lock_spinning(struct arch_spinlock *lock, +@@ -906,7 +933,7 @@ extern void default_banner(void); + + #define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 4) + #define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .long, 4) +-#define PARA_INDIRECT(addr) *%cs:addr ++#define PARA_INDIRECT(addr) *%ss:addr + #endif + + #define INTERRUPT_RETURN \ +@@ -981,6 +1008,21 @@ extern void default_banner(void); + PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_sysexit), \ + CLBR_NONE, \ + jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_irq_enable_sysexit)) ++ ++#define GET_CR0_INTO_RDI \ ++ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0); \ ++ mov %rax,%rdi ++ ++#define SET_RDI_INTO_CR0 \ ++ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0) ++ ++#define GET_CR3_INTO_RDI \ ++ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_read_cr3); \ ++ mov %rax,%rdi ++ ++#define SET_RDI_INTO_CR3 \ ++ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_write_cr3) ++ + #endif /* CONFIG_X86_32 */ + + #endif /* __ASSEMBLY__ */ +diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h +index 7549b8b..f0edfda 100644 +--- a/arch/x86/include/asm/paravirt_types.h ++++ b/arch/x86/include/asm/paravirt_types.h +@@ -84,7 +84,7 @@ struct pv_init_ops { + */ + unsigned (*patch)(u8 type, u16 clobber, void *insnbuf, + unsigned long addr, unsigned len); +-}; ++} __no_const __no_randomize_layout; + + + struct pv_lazy_ops { +@@ -92,13 +92,13 @@ struct pv_lazy_ops { + void (*enter)(void); + void (*leave)(void); + void (*flush)(void); +-}; ++} __no_randomize_layout; + + struct pv_time_ops { + unsigned long long (*sched_clock)(void); + unsigned long long (*steal_clock)(int cpu); + unsigned long (*get_tsc_khz)(void); +-}; ++} __no_const __no_randomize_layout; + + struct pv_cpu_ops { + /* hooks for various privileged instructions */ +@@ -192,7 +192,7 @@ struct pv_cpu_ops { + + void (*start_context_switch)(struct task_struct *prev); + void (*end_context_switch)(struct task_struct *next); +-}; ++} __no_const __no_randomize_layout; + + struct pv_irq_ops { + /* +@@ -215,7 +215,7 @@ struct pv_irq_ops { + #ifdef CONFIG_X86_64 + void (*adjust_exception_frame)(void); + #endif +-}; ++} __no_randomize_layout; + + struct pv_apic_ops { + #ifdef CONFIG_X86_LOCAL_APIC +@@ -223,7 +223,7 @@ struct pv_apic_ops { + unsigned long start_eip, + unsigned long start_esp); + #endif +-}; ++} __no_const __no_randomize_layout; + + struct pv_mmu_ops { + unsigned long (*read_cr2)(void); +@@ -313,6 +313,7 @@ struct pv_mmu_ops { + struct paravirt_callee_save make_pud; + + void (*set_pgd)(pgd_t *pudp, pgd_t pgdval); ++ void (*set_pgd_batched)(pgd_t *pudp, pgd_t pgdval); + #endif /* PAGETABLE_LEVELS == 4 */ + #endif /* PAGETABLE_LEVELS >= 3 */ + +@@ -324,7 +325,13 @@ struct pv_mmu_ops { + an mfn. We can tell which is which from the index. */ + void (*set_fixmap)(unsigned /* enum fixed_addresses */ idx, + phys_addr_t phys, pgprot_t flags); +-}; ++ ++#ifdef CONFIG_PAX_KERNEXEC ++ unsigned long (*pax_open_kernel)(void); ++ unsigned long (*pax_close_kernel)(void); ++#endif ++ ++} __no_randomize_layout; + + struct arch_spinlock; + #ifdef CONFIG_SMP +@@ -336,11 +343,14 @@ typedef u16 __ticket_t; + struct pv_lock_ops { + struct paravirt_callee_save lock_spinning; + void (*unlock_kick)(struct arch_spinlock *lock, __ticket_t ticket); +-}; ++} __no_randomize_layout; + + /* This contains all the paravirt structures: we get a convenient + * number for each function using the offset which we use to indicate +- * what to patch. */ ++ * what to patch. ++ * shouldn't be randomized due to the "NEAT TRICK" in paravirt.c ++ */ ++ + struct paravirt_patch_template { + struct pv_init_ops pv_init_ops; + struct pv_time_ops pv_time_ops; +@@ -349,7 +359,7 @@ struct paravirt_patch_template { + struct pv_apic_ops pv_apic_ops; + struct pv_mmu_ops pv_mmu_ops; + struct pv_lock_ops pv_lock_ops; +-}; ++} __no_randomize_layout; + + extern struct pv_info pv_info; + extern struct pv_init_ops pv_init_ops; +diff --git a/arch/x86/include/asm/pgalloc.h b/arch/x86/include/asm/pgalloc.h +index c4412e9..90e88c5 100644 +--- a/arch/x86/include/asm/pgalloc.h ++++ b/arch/x86/include/asm/pgalloc.h +@@ -63,6 +63,13 @@ static inline void pmd_populate_kernel(struct mm_struct *mm, + pmd_t *pmd, pte_t *pte) + { + paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT); ++ set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE)); ++} ++ ++static inline void pmd_populate_user(struct mm_struct *mm, ++ pmd_t *pmd, pte_t *pte) ++{ ++ paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT); + set_pmd(pmd, __pmd(__pa(pte) | _PAGE_TABLE)); + } + +@@ -108,12 +115,22 @@ static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd, + + #ifdef CONFIG_X86_PAE + extern void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd); ++static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd) ++{ ++ pud_populate(mm, pudp, pmd); ++} + #else /* !CONFIG_X86_PAE */ + static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd) + { + paravirt_alloc_pmd(mm, __pa(pmd) >> PAGE_SHIFT); + set_pud(pud, __pud(_PAGE_TABLE | __pa(pmd))); + } ++ ++static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd) ++{ ++ paravirt_alloc_pmd(mm, __pa(pmd) >> PAGE_SHIFT); ++ set_pud(pud, __pud(_KERNPG_TABLE | __pa(pmd))); ++} + #endif /* CONFIG_X86_PAE */ + + #if PAGETABLE_LEVELS > 3 +@@ -123,6 +140,12 @@ static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud) + set_pgd(pgd, __pgd(_PAGE_TABLE | __pa(pud))); + } + ++static inline void pgd_populate_kernel(struct mm_struct *mm, pgd_t *pgd, pud_t *pud) ++{ ++ paravirt_alloc_pud(mm, __pa(pud) >> PAGE_SHIFT); ++ set_pgd(pgd, __pgd(_KERNPG_TABLE | __pa(pud))); ++} ++ + static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr) + { + return (pud_t *)get_zeroed_page(GFP_KERNEL|__GFP_REPEAT); +diff --git a/arch/x86/include/asm/pgtable-2level.h b/arch/x86/include/asm/pgtable-2level.h +index 0d193e2..bf59aeb 100644 +--- a/arch/x86/include/asm/pgtable-2level.h ++++ b/arch/x86/include/asm/pgtable-2level.h +@@ -18,7 +18,9 @@ static inline void native_set_pte(pte_t *ptep , pte_t pte) + + static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd) + { ++ pax_open_kernel(); + *pmdp = pmd; ++ pax_close_kernel(); + } + + static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte) +diff --git a/arch/x86/include/asm/pgtable-3level.h b/arch/x86/include/asm/pgtable-3level.h +index 81bb91b..9392125 100644 +--- a/arch/x86/include/asm/pgtable-3level.h ++++ b/arch/x86/include/asm/pgtable-3level.h +@@ -92,12 +92,16 @@ static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte) + + static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd) + { ++ pax_open_kernel(); + set_64bit((unsigned long long *)(pmdp), native_pmd_val(pmd)); ++ pax_close_kernel(); + } + + static inline void native_set_pud(pud_t *pudp, pud_t pud) + { ++ pax_open_kernel(); + set_64bit((unsigned long long *)(pudp), native_pud_val(pud)); ++ pax_close_kernel(); + } + + /* +diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h +index bbc8b12..f228861 100644 +--- a/arch/x86/include/asm/pgtable.h ++++ b/arch/x86/include/asm/pgtable.h +@@ -45,6 +45,7 @@ extern struct mm_struct *pgd_page_get_mm(struct page *page); + + #ifndef __PAGETABLE_PUD_FOLDED + #define set_pgd(pgdp, pgd) native_set_pgd(pgdp, pgd) ++#define set_pgd_batched(pgdp, pgd) native_set_pgd_batched(pgdp, pgd) + #define pgd_clear(pgd) native_pgd_clear(pgd) + #endif + +@@ -82,12 +83,51 @@ extern struct mm_struct *pgd_page_get_mm(struct page *page); + + #define arch_end_context_switch(prev) do {} while(0) + ++#define pax_open_kernel() native_pax_open_kernel() ++#define pax_close_kernel() native_pax_close_kernel() + #endif /* CONFIG_PARAVIRT */ + ++#define __HAVE_ARCH_PAX_OPEN_KERNEL ++#define __HAVE_ARCH_PAX_CLOSE_KERNEL ++ ++#ifdef CONFIG_PAX_KERNEXEC ++static inline unsigned long native_pax_open_kernel(void) ++{ ++ unsigned long cr0; ++ ++ preempt_disable(); ++ barrier(); ++ cr0 = read_cr0() ^ X86_CR0_WP; ++ BUG_ON(cr0 & X86_CR0_WP); ++ write_cr0(cr0); ++ return cr0 ^ X86_CR0_WP; ++} ++ ++static inline unsigned long native_pax_close_kernel(void) ++{ ++ unsigned long cr0; ++ ++ cr0 = read_cr0() ^ X86_CR0_WP; ++ BUG_ON(!(cr0 & X86_CR0_WP)); ++ write_cr0(cr0); ++ barrier(); ++ preempt_enable_no_resched(); ++ return cr0 ^ X86_CR0_WP; ++} ++#else ++static inline unsigned long native_pax_open_kernel(void) { return 0; } ++static inline unsigned long native_pax_close_kernel(void) { return 0; } ++#endif ++ + /* + * The following only work if pte_present() is true. + * Undefined behaviour if not.. + */ ++static inline int pte_user(pte_t pte) ++{ ++ return pte_val(pte) & _PAGE_USER; ++} ++ + static inline int pte_dirty(pte_t pte) + { + return pte_flags(pte) & _PAGE_DIRTY; +@@ -148,6 +188,11 @@ static inline unsigned long pud_pfn(pud_t pud) + return (pud_val(pud) & PTE_PFN_MASK) >> PAGE_SHIFT; + } + ++static inline unsigned long pgd_pfn(pgd_t pgd) ++{ ++ return (pgd_val(pgd) & PTE_PFN_MASK) >> PAGE_SHIFT; ++} ++ + #define pte_page(pte) pfn_to_page(pte_pfn(pte)) + + static inline int pmd_large(pmd_t pte) +@@ -201,9 +246,29 @@ static inline pte_t pte_wrprotect(pte_t pte) + return pte_clear_flags(pte, _PAGE_RW); + } + ++static inline pte_t pte_mkread(pte_t pte) ++{ ++ return __pte(pte_val(pte) | _PAGE_USER); ++} ++ + static inline pte_t pte_mkexec(pte_t pte) + { +- return pte_clear_flags(pte, _PAGE_NX); ++#ifdef CONFIG_X86_PAE ++ if (__supported_pte_mask & _PAGE_NX) ++ return pte_clear_flags(pte, _PAGE_NX); ++ else ++#endif ++ return pte_set_flags(pte, _PAGE_USER); ++} ++ ++static inline pte_t pte_exprotect(pte_t pte) ++{ ++#ifdef CONFIG_X86_PAE ++ if (__supported_pte_mask & _PAGE_NX) ++ return pte_set_flags(pte, _PAGE_NX); ++ else ++#endif ++ return pte_clear_flags(pte, _PAGE_USER); + } + + static inline pte_t pte_mkdirty(pte_t pte) +@@ -430,6 +495,16 @@ pte_t *populate_extra_pte(unsigned long vaddr); + #endif + + #ifndef __ASSEMBLY__ ++ ++#ifdef CONFIG_PAX_PER_CPU_PGD ++extern pgd_t cpu_pgd[NR_CPUS][2][PTRS_PER_PGD]; ++enum cpu_pgd_type {kernel = 0, user = 1}; ++static inline pgd_t *get_cpu_pgd(unsigned int cpu, enum cpu_pgd_type type) ++{ ++ return cpu_pgd[cpu][type]; ++} ++#endif ++ + #include <linux/mm_types.h> + #include <linux/mmdebug.h> + #include <linux/log2.h> +@@ -570,7 +645,7 @@ static inline unsigned long pud_page_vaddr(pud_t pud) + * Currently stuck as a macro due to indirect forward reference to + * linux/mmzone.h's __section_mem_map_addr() definition: + */ +-#define pud_page(pud) pfn_to_page(pud_val(pud) >> PAGE_SHIFT) ++#define pud_page(pud) pfn_to_page((pud_val(pud) & PTE_PFN_MASK) >> PAGE_SHIFT) + + /* Find an entry in the second-level page table.. */ + static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address) +@@ -610,7 +685,7 @@ static inline unsigned long pgd_page_vaddr(pgd_t pgd) + * Currently stuck as a macro due to indirect forward reference to + * linux/mmzone.h's __section_mem_map_addr() definition: + */ +-#define pgd_page(pgd) pfn_to_page(pgd_val(pgd) >> PAGE_SHIFT) ++#define pgd_page(pgd) pfn_to_page((pgd_val(pgd) & PTE_PFN_MASK) >> PAGE_SHIFT) + + /* to find an entry in a page-table-directory. */ + static inline unsigned long pud_index(unsigned long address) +@@ -625,7 +700,7 @@ static inline pud_t *pud_offset(pgd_t *pgd, unsigned long address) + + static inline int pgd_bad(pgd_t pgd) + { +- return (pgd_flags(pgd) & ~_PAGE_USER) != _KERNPG_TABLE; ++ return (pgd_flags(pgd) & ~(_PAGE_USER | _PAGE_NX)) != _KERNPG_TABLE; + } + + static inline int pgd_none(pgd_t pgd) +@@ -648,7 +723,12 @@ static inline int pgd_none(pgd_t pgd) + * pgd_offset() returns a (pgd_t *) + * pgd_index() is used get the offset into the pgd page's array of pgd_t's; + */ +-#define pgd_offset(mm, address) ((mm)->pgd + pgd_index((address))) ++#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address)) ++ ++#ifdef CONFIG_PAX_PER_CPU_PGD ++#define pgd_offset_cpu(cpu, type, address) (get_cpu_pgd(cpu, type) + pgd_index(address)) ++#endif ++ + /* + * a shortcut which implies the use of the kernel's pgd, instead + * of a process's +@@ -659,6 +739,23 @@ static inline int pgd_none(pgd_t pgd) + #define KERNEL_PGD_BOUNDARY pgd_index(PAGE_OFFSET) + #define KERNEL_PGD_PTRS (PTRS_PER_PGD - KERNEL_PGD_BOUNDARY) + ++#ifdef CONFIG_X86_32 ++#define USER_PGD_PTRS KERNEL_PGD_BOUNDARY ++#else ++#define TASK_SIZE_MAX_SHIFT CONFIG_TASK_SIZE_MAX_SHIFT ++#define USER_PGD_PTRS (_AC(1,UL) << (TASK_SIZE_MAX_SHIFT - PGDIR_SHIFT)) ++ ++#ifdef CONFIG_PAX_MEMORY_UDEREF ++#ifdef __ASSEMBLY__ ++#define pax_user_shadow_base pax_user_shadow_base(%rip) ++#else ++extern unsigned long pax_user_shadow_base; ++extern pgdval_t clone_pgd_mask; ++#endif ++#endif ++ ++#endif ++ + #ifndef __ASSEMBLY__ + + extern int direct_gbpages; +@@ -825,11 +922,24 @@ static inline void pmdp_set_wrprotect(struct mm_struct *mm, + * dst and src can be on the same page, but the range must not overlap, + * and must not cross a page boundary. + */ +-static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count) ++static inline void clone_pgd_range(pgd_t *dst, const pgd_t *src, int count) + { +- memcpy(dst, src, count * sizeof(pgd_t)); ++ pax_open_kernel(); ++ while (count--) ++ *dst++ = *src++; ++ pax_close_kernel(); + } + ++#ifdef CONFIG_PAX_PER_CPU_PGD ++extern void __clone_user_pgds(pgd_t *dst, const pgd_t *src); ++#endif ++ ++#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF) ++extern void __shadow_user_pgds(pgd_t *dst, const pgd_t *src); ++#else ++static inline void __shadow_user_pgds(pgd_t *dst, const pgd_t *src) {} ++#endif ++ + #define PTE_SHIFT ilog2(PTRS_PER_PTE) + static inline int page_level_shift(enum pg_level level) + { +diff --git a/arch/x86/include/asm/pgtable_32.h b/arch/x86/include/asm/pgtable_32.h +index 9ee3221..b979c6b 100644 +--- a/arch/x86/include/asm/pgtable_32.h ++++ b/arch/x86/include/asm/pgtable_32.h +@@ -25,9 +25,6 @@ + struct mm_struct; + struct vm_area_struct; + +-extern pgd_t swapper_pg_dir[1024]; +-extern pgd_t initial_page_table[1024]; +- + static inline void pgtable_cache_init(void) { } + static inline void check_pgt_cache(void) { } + void paging_init(void); +@@ -48,6 +45,12 @@ extern void set_pmd_pfn(unsigned long, unsigned long, pgprot_t); + # include <asm/pgtable-2level.h> + #endif + ++extern pgd_t swapper_pg_dir[PTRS_PER_PGD]; ++extern pgd_t initial_page_table[PTRS_PER_PGD]; ++#ifdef CONFIG_X86_PAE ++extern pmd_t swapper_pm_dir[PTRS_PER_PGD][PTRS_PER_PMD]; ++#endif ++ + #if defined(CONFIG_HIGHPTE) + #define pte_offset_map(dir, address) \ + ((pte_t *)kmap_atomic(pmd_page(*(dir))) + \ +@@ -62,12 +65,17 @@ extern void set_pmd_pfn(unsigned long, unsigned long, pgprot_t); + /* Clear a kernel PTE and flush it from the TLB */ + #define kpte_clear_flush(ptep, vaddr) \ + do { \ ++ pax_open_kernel(); \ + pte_clear(&init_mm, (vaddr), (ptep)); \ ++ pax_close_kernel(); \ + __flush_tlb_one((vaddr)); \ + } while (0) + + #endif /* !__ASSEMBLY__ */ + ++#define HAVE_ARCH_UNMAPPED_AREA ++#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN ++ + /* + * kern_addr_valid() is (1) for FLATMEM and (0) for + * SPARSEMEM and DISCONTIGMEM +diff --git a/arch/x86/include/asm/pgtable_32_types.h b/arch/x86/include/asm/pgtable_32_types.h +index ed5903b..c7fe163 100644 +--- a/arch/x86/include/asm/pgtable_32_types.h ++++ b/arch/x86/include/asm/pgtable_32_types.h +@@ -8,7 +8,7 @@ + */ + #ifdef CONFIG_X86_PAE + # include <asm/pgtable-3level_types.h> +-# define PMD_SIZE (1UL << PMD_SHIFT) ++# define PMD_SIZE (_AC(1, UL) << PMD_SHIFT) + # define PMD_MASK (~(PMD_SIZE - 1)) + #else + # include <asm/pgtable-2level_types.h> +@@ -46,6 +46,19 @@ extern bool __vmalloc_start_set; /* set once high_memory is set */ + # define VMALLOC_END (FIXADDR_START - 2 * PAGE_SIZE) + #endif + ++#ifdef CONFIG_PAX_KERNEXEC ++#ifndef __ASSEMBLY__ ++extern unsigned char MODULES_EXEC_VADDR[]; ++extern unsigned char MODULES_EXEC_END[]; ++#endif ++#include <asm/boot.h> ++#define ktla_ktva(addr) (addr + LOAD_PHYSICAL_ADDR + PAGE_OFFSET) ++#define ktva_ktla(addr) (addr - LOAD_PHYSICAL_ADDR - PAGE_OFFSET) ++#else ++#define ktla_ktva(addr) (addr) ++#define ktva_ktla(addr) (addr) ++#endif ++ + #define MODULES_VADDR VMALLOC_START + #define MODULES_END VMALLOC_END + #define MODULES_LEN (MODULES_VADDR - MODULES_END) +diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h +index e22c1db..23a625a 100644 +--- a/arch/x86/include/asm/pgtable_64.h ++++ b/arch/x86/include/asm/pgtable_64.h +@@ -16,10 +16,14 @@ + + extern pud_t level3_kernel_pgt[512]; + extern pud_t level3_ident_pgt[512]; ++extern pud_t level3_vmalloc_start_pgt[512]; ++extern pud_t level3_vmalloc_end_pgt[512]; ++extern pud_t level3_vmemmap_pgt[512]; ++extern pud_t level2_vmemmap_pgt[512]; + extern pmd_t level2_kernel_pgt[512]; + extern pmd_t level2_fixmap_pgt[512]; +-extern pmd_t level2_ident_pgt[512]; +-extern pgd_t init_level4_pgt[]; ++extern pmd_t level2_ident_pgt[512*2]; ++extern pgd_t init_level4_pgt[512]; + + #define swapper_pg_dir init_level4_pgt + +@@ -61,7 +65,9 @@ static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte) + + static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd) + { ++ pax_open_kernel(); + *pmdp = pmd; ++ pax_close_kernel(); + } + + static inline void native_pmd_clear(pmd_t *pmd) +@@ -97,7 +103,9 @@ static inline pmd_t native_pmdp_get_and_clear(pmd_t *xp) + + static inline void native_set_pud(pud_t *pudp, pud_t pud) + { ++ pax_open_kernel(); + *pudp = pud; ++ pax_close_kernel(); + } + + static inline void native_pud_clear(pud_t *pud) +@@ -107,6 +115,13 @@ static inline void native_pud_clear(pud_t *pud) + + static inline void native_set_pgd(pgd_t *pgdp, pgd_t pgd) + { ++ pax_open_kernel(); ++ *pgdp = pgd; ++ pax_close_kernel(); ++} ++ ++static inline void native_set_pgd_batched(pgd_t *pgdp, pgd_t pgd) ++{ + *pgdp = pgd; + } + +diff --git a/arch/x86/include/asm/pgtable_64_types.h b/arch/x86/include/asm/pgtable_64_types.h +index 7166e25..baaa6fe 100644 +--- a/arch/x86/include/asm/pgtable_64_types.h ++++ b/arch/x86/include/asm/pgtable_64_types.h +@@ -61,9 +61,14 @@ typedef struct { pteval_t pte; } pte_t; + #define MODULES_VADDR (__START_KERNEL_map + KERNEL_IMAGE_SIZE) + #define MODULES_END _AC(0xffffffffff000000, UL) + #define MODULES_LEN (MODULES_END - MODULES_VADDR) ++#define MODULES_EXEC_VADDR MODULES_VADDR ++#define MODULES_EXEC_END MODULES_END + #define ESPFIX_PGD_ENTRY _AC(-2, UL) + #define ESPFIX_BASE_ADDR (ESPFIX_PGD_ENTRY << PGDIR_SHIFT) + ++#define ktla_ktva(addr) (addr) ++#define ktva_ktla(addr) (addr) ++ + #define EARLY_DYNAMIC_PAGE_TABLES 64 + + #endif /* _ASM_X86_PGTABLE_64_DEFS_H */ +diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h +index 94e40f1..ebd03e4 100644 +--- a/arch/x86/include/asm/pgtable_types.h ++++ b/arch/x86/include/asm/pgtable_types.h +@@ -16,13 +16,12 @@ + #define _PAGE_BIT_PSE 7 /* 4 MB (or 2MB) page */ + #define _PAGE_BIT_PAT 7 /* on 4KB pages */ + #define _PAGE_BIT_GLOBAL 8 /* Global TLB entry PPro+ */ +-#define _PAGE_BIT_UNUSED1 9 /* available for programmer */ ++#define _PAGE_BIT_SPECIAL 9 /* special mappings, no associated struct page */ + #define _PAGE_BIT_IOMAP 10 /* flag used to indicate IO mapping */ + #define _PAGE_BIT_HIDDEN 11 /* hidden by kmemcheck */ + #define _PAGE_BIT_PAT_LARGE 12 /* On 2MB or 1GB pages */ +-#define _PAGE_BIT_SPECIAL _PAGE_BIT_UNUSED1 +-#define _PAGE_BIT_CPA_TEST _PAGE_BIT_UNUSED1 +-#define _PAGE_BIT_SPLITTING _PAGE_BIT_UNUSED1 /* only valid on a PSE pmd */ ++#define _PAGE_BIT_CPA_TEST _PAGE_BIT_SPECIAL ++#define _PAGE_BIT_SPLITTING _PAGE_BIT_SPECIAL /* only valid on a PSE pmd */ + #define _PAGE_BIT_NX 63 /* No execute: only valid after cpuid check */ + + /* If _PAGE_BIT_PRESENT is clear, we use these: */ +@@ -40,7 +39,6 @@ + #define _PAGE_DIRTY (_AT(pteval_t, 1) << _PAGE_BIT_DIRTY) + #define _PAGE_PSE (_AT(pteval_t, 1) << _PAGE_BIT_PSE) + #define _PAGE_GLOBAL (_AT(pteval_t, 1) << _PAGE_BIT_GLOBAL) +-#define _PAGE_UNUSED1 (_AT(pteval_t, 1) << _PAGE_BIT_UNUSED1) + #define _PAGE_IOMAP (_AT(pteval_t, 1) << _PAGE_BIT_IOMAP) + #define _PAGE_PAT (_AT(pteval_t, 1) << _PAGE_BIT_PAT) + #define _PAGE_PAT_LARGE (_AT(pteval_t, 1) << _PAGE_BIT_PAT_LARGE) +@@ -87,8 +85,10 @@ + + #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE) + #define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_NX) +-#else ++#elif defined(CONFIG_KMEMCHECK) || defined(CONFIG_MEM_SOFT_DIRTY) + #define _PAGE_NX (_AT(pteval_t, 0)) ++#else ++#define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_HIDDEN) + #endif + + #define _PAGE_FILE (_AT(pteval_t, 1) << _PAGE_BIT_FILE) +@@ -147,6 +147,9 @@ + #define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \ + _PAGE_ACCESSED) + ++#define PAGE_READONLY_NOEXEC PAGE_READONLY ++#define PAGE_SHARED_NOEXEC PAGE_SHARED ++ + #define __PAGE_KERNEL_EXEC \ + (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_GLOBAL) + #define __PAGE_KERNEL (__PAGE_KERNEL_EXEC | _PAGE_NX) +@@ -157,7 +160,7 @@ + #define __PAGE_KERNEL_WC (__PAGE_KERNEL | _PAGE_CACHE_WC) + #define __PAGE_KERNEL_NOCACHE (__PAGE_KERNEL | _PAGE_PCD | _PAGE_PWT) + #define __PAGE_KERNEL_UC_MINUS (__PAGE_KERNEL | _PAGE_PCD) +-#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RX | _PAGE_USER) ++#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RO | _PAGE_USER) + #define __PAGE_KERNEL_VVAR (__PAGE_KERNEL_RO | _PAGE_USER) + #define __PAGE_KERNEL_VVAR_NOCACHE (__PAGE_KERNEL_VVAR | _PAGE_PCD | _PAGE_PWT) + #define __PAGE_KERNEL_LARGE (__PAGE_KERNEL | _PAGE_PSE) +@@ -219,8 +222,8 @@ + * bits are combined, this will alow user to access the high address mapped + * VDSO in the presence of CONFIG_COMPAT_VDSO + */ +-#define PTE_IDENT_ATTR 0x003 /* PRESENT+RW */ +-#define PDE_IDENT_ATTR 0x067 /* PRESENT+RW+USER+DIRTY+ACCESSED */ ++#define PTE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */ ++#define PDE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */ + #define PGD_IDENT_ATTR 0x001 /* PRESENT (no other attributes) */ + #endif + +@@ -258,7 +261,17 @@ static inline pgdval_t pgd_flags(pgd_t pgd) + { + return native_pgd_val(pgd) & PTE_FLAGS_MASK; + } ++#endif + ++#if PAGETABLE_LEVELS == 3 ++#include <asm-generic/pgtable-nopud.h> ++#endif ++ ++#if PAGETABLE_LEVELS == 2 ++#include <asm-generic/pgtable-nopmd.h> ++#endif ++ ++#ifndef __ASSEMBLY__ + #if PAGETABLE_LEVELS > 3 + typedef struct { pudval_t pud; } pud_t; + +@@ -272,8 +285,6 @@ static inline pudval_t native_pud_val(pud_t pud) + return pud.pud; + } + #else +-#include <asm-generic/pgtable-nopud.h> +- + static inline pudval_t native_pud_val(pud_t pud) + { + return native_pgd_val(pud.pgd); +@@ -293,8 +304,6 @@ static inline pmdval_t native_pmd_val(pmd_t pmd) + return pmd.pmd; + } + #else +-#include <asm-generic/pgtable-nopmd.h> +- + static inline pmdval_t native_pmd_val(pmd_t pmd) + { + return native_pgd_val(pmd.pud.pgd); +@@ -334,7 +343,6 @@ typedef struct page *pgtable_t; + + extern pteval_t __supported_pte_mask; + extern void set_nx(void); +-extern int nx_enabled; + + #define pgprot_writecombine pgprot_writecombine + extern pgprot_t pgprot_writecombine(pgprot_t prot); +diff --git a/arch/x86/include/asm/preempt.h b/arch/x86/include/asm/preempt.h +index b39e194..9d44fd1 100644 +--- a/arch/x86/include/asm/preempt.h ++++ b/arch/x86/include/asm/preempt.h +@@ -99,7 +99,7 @@ static __always_inline void __preempt_count_sub(int val) + */ + static __always_inline bool __preempt_count_dec_and_test(void) + { +- GEN_UNARY_RMWcc("decl", __preempt_count, __percpu_arg(0), "e"); ++ GEN_UNARY_RMWcc("decl", "incl", __preempt_count, __percpu_arg(0), "e"); + } + + /* +diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h +index fdedd38..95c02c2 100644 +--- a/arch/x86/include/asm/processor.h ++++ b/arch/x86/include/asm/processor.h +@@ -128,7 +128,7 @@ struct cpuinfo_x86 { + /* Index into per_cpu list: */ + u16 cpu_index; + u32 microcode; +-} __attribute__((__aligned__(SMP_CACHE_BYTES))); ++} __attribute__((__aligned__(SMP_CACHE_BYTES))) __randomize_layout; + + #define X86_VENDOR_INTEL 0 + #define X86_VENDOR_CYRIX 1 +@@ -199,9 +199,21 @@ static inline void native_cpuid(unsigned int *eax, unsigned int *ebx, + : "memory"); + } + ++/* invpcid (%rdx),%rax */ ++#define __ASM_INVPCID ".byte 0x66,0x0f,0x38,0x82,0x02" ++ ++#define INVPCID_SINGLE_ADDRESS 0UL ++#define INVPCID_SINGLE_CONTEXT 1UL ++#define INVPCID_ALL_GLOBAL 2UL ++#define INVPCID_ALL_MONGLOBAL 3UL ++ ++#define PCID_KERNEL 0UL ++#define PCID_USER 1UL ++#define PCID_NOFLUSH (1UL << 63) ++ + static inline void load_cr3(pgd_t *pgdir) + { +- write_cr3(__pa(pgdir)); ++ write_cr3(__pa(pgdir) | PCID_KERNEL); + } + + #ifdef CONFIG_X86_32 +@@ -283,7 +295,7 @@ struct tss_struct { + + } ____cacheline_aligned; + +-DECLARE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss); ++extern struct tss_struct init_tss[NR_CPUS]; + + /* + * Save the original ist values for checking stack pointers during debugging +@@ -470,6 +482,7 @@ struct thread_struct { + unsigned short ds; + unsigned short fsindex; + unsigned short gsindex; ++ unsigned short ss; + #endif + #ifdef CONFIG_X86_32 + unsigned long ip; +@@ -579,29 +592,8 @@ static inline void load_sp0(struct tss_struct *tss, + extern unsigned long mmu_cr4_features; + extern u32 *trampoline_cr4_features; + +-static inline void set_in_cr4(unsigned long mask) +-{ +- unsigned long cr4; +- +- mmu_cr4_features |= mask; +- if (trampoline_cr4_features) +- *trampoline_cr4_features = mmu_cr4_features; +- cr4 = read_cr4(); +- cr4 |= mask; +- write_cr4(cr4); +-} +- +-static inline void clear_in_cr4(unsigned long mask) +-{ +- unsigned long cr4; +- +- mmu_cr4_features &= ~mask; +- if (trampoline_cr4_features) +- *trampoline_cr4_features = mmu_cr4_features; +- cr4 = read_cr4(); +- cr4 &= ~mask; +- write_cr4(cr4); +-} ++extern void set_in_cr4(unsigned long mask); ++extern void clear_in_cr4(unsigned long mask); + + typedef struct { + unsigned long seg; +@@ -827,11 +819,18 @@ static inline void spin_lock_prefetch(const void *x) + */ + #define TASK_SIZE PAGE_OFFSET + #define TASK_SIZE_MAX TASK_SIZE ++ ++#ifdef CONFIG_PAX_SEGMEXEC ++#define SEGMEXEC_TASK_SIZE (TASK_SIZE / 2) ++#define STACK_TOP ((current->mm->pax_flags & MF_PAX_SEGMEXEC)?SEGMEXEC_TASK_SIZE:TASK_SIZE) ++#else + #define STACK_TOP TASK_SIZE +-#define STACK_TOP_MAX STACK_TOP ++#endif ++ ++#define STACK_TOP_MAX TASK_SIZE + + #define INIT_THREAD { \ +- .sp0 = sizeof(init_stack) + (long)&init_stack, \ ++ .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \ + .vm86_info = NULL, \ + .sysenter_cs = __KERNEL_CS, \ + .io_bitmap_ptr = NULL, \ +@@ -845,7 +844,7 @@ static inline void spin_lock_prefetch(const void *x) + */ + #define INIT_TSS { \ + .x86_tss = { \ +- .sp0 = sizeof(init_stack) + (long)&init_stack, \ ++ .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \ + .ss0 = __KERNEL_DS, \ + .ss1 = __KERNEL_CS, \ + .io_bitmap_base = INVALID_IO_BITMAP_OFFSET, \ +@@ -856,11 +855,7 @@ static inline void spin_lock_prefetch(const void *x) + extern unsigned long thread_saved_pc(struct task_struct *tsk); + + #define THREAD_SIZE_LONGS (THREAD_SIZE/sizeof(unsigned long)) +-#define KSTK_TOP(info) \ +-({ \ +- unsigned long *__ptr = (unsigned long *)(info); \ +- (unsigned long)(&__ptr[THREAD_SIZE_LONGS]); \ +-}) ++#define KSTK_TOP(info) ((container_of(info, struct task_struct, tinfo))->thread.sp0) + + /* + * The below -8 is to reserve 8 bytes on top of the ring0 stack. +@@ -875,7 +870,7 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk); + #define task_pt_regs(task) \ + ({ \ + struct pt_regs *__regs__; \ +- __regs__ = (struct pt_regs *)(KSTK_TOP(task_stack_page(task))-8); \ ++ __regs__ = (struct pt_regs *)((task)->thread.sp0); \ + __regs__ - 1; \ + }) + +@@ -885,13 +880,13 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk); + /* + * User space process size. 47bits minus one guard page. + */ +-#define TASK_SIZE_MAX ((1UL << 47) - PAGE_SIZE) ++#define TASK_SIZE_MAX ((1UL << TASK_SIZE_MAX_SHIFT) - PAGE_SIZE) + + /* This decides where the kernel will search for a free chunk of vm + * space during mmap's. + */ + #define IA32_PAGE_OFFSET ((current->personality & ADDR_LIMIT_3GB) ? \ +- 0xc0000000 : 0xFFFFe000) ++ 0xc0000000 : 0xFFFFf000) + + #define TASK_SIZE (test_thread_flag(TIF_ADDR32) ? \ + IA32_PAGE_OFFSET : TASK_SIZE_MAX) +@@ -902,11 +897,11 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk); + #define STACK_TOP_MAX TASK_SIZE_MAX + + #define INIT_THREAD { \ +- .sp0 = (unsigned long)&init_stack + sizeof(init_stack) \ ++ .sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \ + } + + #define INIT_TSS { \ +- .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) \ ++ .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \ + } + + /* +@@ -934,6 +929,10 @@ extern void start_thread(struct pt_regs *regs, unsigned long new_ip, + */ + #define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3)) + ++#ifdef CONFIG_PAX_SEGMEXEC ++#define SEGMEXEC_TASK_UNMAPPED_BASE (PAGE_ALIGN(SEGMEXEC_TASK_SIZE / 3)) ++#endif ++ + #define KSTK_EIP(task) (task_pt_regs(task)->ip) + + /* Get/set a process' ability to use the timestamp counter instruction */ +@@ -960,7 +959,7 @@ static inline uint32_t hypervisor_cpuid_base(const char *sig, uint32_t leaves) + return 0; + } + +-extern unsigned long arch_align_stack(unsigned long sp); ++#define arch_align_stack(x) ((x) & ~0xfUL) + extern void free_init_pages(char *what, unsigned long begin, unsigned long end); + + void default_idle(void); +@@ -970,6 +969,6 @@ bool xen_set_default_idle(void); + #define xen_set_default_idle 0 + #endif + +-void stop_this_cpu(void *dummy); ++void stop_this_cpu(void *dummy) __noreturn; + void df_debug(struct pt_regs *regs, long error_code); + #endif /* _ASM_X86_PROCESSOR_H */ +diff --git a/arch/x86/include/asm/ptrace.h b/arch/x86/include/asm/ptrace.h +index 6205f0c..b31a4a4 100644 +--- a/arch/x86/include/asm/ptrace.h ++++ b/arch/x86/include/asm/ptrace.h +@@ -84,28 +84,29 @@ static inline unsigned long regs_return_value(struct pt_regs *regs) + } + + /* +- * user_mode_vm(regs) determines whether a register set came from user mode. ++ * user_mode(regs) determines whether a register set came from user mode. + * This is true if V8086 mode was enabled OR if the register set was from + * protected mode with RPL-3 CS value. This tricky test checks that with + * one comparison. Many places in the kernel can bypass this full check +- * if they have already ruled out V8086 mode, so user_mode(regs) can be used. ++ * if they have already ruled out V8086 mode, so user_mode_novm(regs) can ++ * be used. + */ +-static inline int user_mode(struct pt_regs *regs) ++static inline int user_mode_novm(struct pt_regs *regs) + { + #ifdef CONFIG_X86_32 + return (regs->cs & SEGMENT_RPL_MASK) == USER_RPL; + #else +- return !!(regs->cs & 3); ++ return !!(regs->cs & SEGMENT_RPL_MASK); + #endif + } + +-static inline int user_mode_vm(struct pt_regs *regs) ++static inline int user_mode(struct pt_regs *regs) + { + #ifdef CONFIG_X86_32 + return ((regs->cs & SEGMENT_RPL_MASK) | (regs->flags & X86_VM_MASK)) >= + USER_RPL; + #else +- return user_mode(regs); ++ return user_mode_novm(regs); + #endif + } + +@@ -121,15 +122,16 @@ static inline int v8086_mode(struct pt_regs *regs) + #ifdef CONFIG_X86_64 + static inline bool user_64bit_mode(struct pt_regs *regs) + { ++ unsigned long cs = regs->cs & 0xffff; + #ifndef CONFIG_PARAVIRT + /* + * On non-paravirt systems, this is the only long mode CPL 3 + * selector. We do not allow long mode selectors in the LDT. + */ +- return regs->cs == __USER_CS; ++ return cs == __USER_CS; + #else + /* Headers are too twisted for this to go in paravirt.h. */ +- return regs->cs == __USER_CS || regs->cs == pv_info.extra_user_64bit_cs; ++ return cs == __USER_CS || cs == pv_info.extra_user_64bit_cs; + #endif + } + +@@ -180,9 +182,11 @@ static inline unsigned long regs_get_register(struct pt_regs *regs, + * Traps from the kernel do not save sp and ss. + * Use the helper function to retrieve sp. + */ +- if (offset == offsetof(struct pt_regs, sp) && +- regs->cs == __KERNEL_CS) +- return kernel_stack_pointer(regs); ++ if (offset == offsetof(struct pt_regs, sp)) { ++ unsigned long cs = regs->cs & 0xffff; ++ if (cs == __KERNEL_CS || cs == __KERNEXEC_KERNEL_CS) ++ return kernel_stack_pointer(regs); ++ } + #endif + return *(unsigned long *)((unsigned long)regs + offset); + } +diff --git a/arch/x86/include/asm/realmode.h b/arch/x86/include/asm/realmode.h +index 9c6b890..5305f53 100644 +--- a/arch/x86/include/asm/realmode.h ++++ b/arch/x86/include/asm/realmode.h +@@ -22,16 +22,14 @@ struct real_mode_header { + #endif + /* APM/BIOS reboot */ + u32 machine_real_restart_asm; +-#ifdef CONFIG_X86_64 + u32 machine_real_restart_seg; +-#endif + }; + + /* This must match data at trampoline_32/64.S */ + struct trampoline_header { + #ifdef CONFIG_X86_32 + u32 start; +- u16 gdt_pad; ++ u16 boot_cs; + u16 gdt_limit; + u32 gdt_base; + #else +diff --git a/arch/x86/include/asm/reboot.h b/arch/x86/include/asm/reboot.h +index a82c4f1..ac45053 100644 +--- a/arch/x86/include/asm/reboot.h ++++ b/arch/x86/include/asm/reboot.h +@@ -6,13 +6,13 @@ + struct pt_regs; + + struct machine_ops { +- void (*restart)(char *cmd); +- void (*halt)(void); +- void (*power_off)(void); ++ void (* __noreturn restart)(char *cmd); ++ void (* __noreturn halt)(void); ++ void (* __noreturn power_off)(void); + void (*shutdown)(void); + void (*crash_shutdown)(struct pt_regs *); +- void (*emergency_restart)(void); +-}; ++ void (* __noreturn emergency_restart)(void); ++} __no_const; + + extern struct machine_ops machine_ops; + +diff --git a/arch/x86/include/asm/rmwcc.h b/arch/x86/include/asm/rmwcc.h +index 8f7866a..e442f20 100644 +--- a/arch/x86/include/asm/rmwcc.h ++++ b/arch/x86/include/asm/rmwcc.h +@@ -3,7 +3,34 @@ + + #ifdef CC_HAVE_ASM_GOTO + +-#define __GEN_RMWcc(fullop, var, cc, ...) \ ++#ifdef CONFIG_PAX_REFCOUNT ++#define __GEN_RMWcc(fullop, fullantiop, var, cc, ...) \ ++do { \ ++ asm_volatile_goto (fullop \ ++ ";jno 0f\n" \ ++ fullantiop \ ++ ";int $4\n0:\n" \ ++ _ASM_EXTABLE(0b, 0b) \ ++ ";j" cc " %l[cc_label]" \ ++ : : "m" (var), ## __VA_ARGS__ \ ++ : "memory" : cc_label); \ ++ return 0; \ ++cc_label: \ ++ return 1; \ ++} while (0) ++#else ++#define __GEN_RMWcc(fullop, fullantiop, var, cc, ...) \ ++do { \ ++ asm_volatile_goto (fullop ";j" cc " %l[cc_label]" \ ++ : : "m" (var), ## __VA_ARGS__ \ ++ : "memory" : cc_label); \ ++ return 0; \ ++cc_label: \ ++ return 1; \ ++} while (0) ++#endif ++ ++#define __GEN_RMWcc_unchecked(fullop, var, cc, ...) \ + do { \ + asm_volatile_goto (fullop "; j" cc " %l[cc_label]" \ + : : "m" (var), ## __VA_ARGS__ \ +@@ -13,15 +40,46 @@ cc_label: \ + return 1; \ + } while (0) + +-#define GEN_UNARY_RMWcc(op, var, arg0, cc) \ +- __GEN_RMWcc(op " " arg0, var, cc) ++#define GEN_UNARY_RMWcc(op, antiop, var, arg0, cc) \ ++ __GEN_RMWcc(op " " arg0, antiop " " arg0, var, cc) + +-#define GEN_BINARY_RMWcc(op, var, vcon, val, arg0, cc) \ +- __GEN_RMWcc(op " %1, " arg0, var, cc, vcon (val)) ++#define GEN_UNARY_RMWcc_unchecked(op, var, arg0, cc) \ ++ __GEN_RMWcc_unchecked(op " " arg0, var, cc) ++ ++#define GEN_BINARY_RMWcc(op, antiop, var, vcon, val, arg0, cc) \ ++ __GEN_RMWcc(op " %1, " arg0, antiop " %1, " arg0, var, cc, vcon (val)) ++ ++#define GEN_BINARY_RMWcc_unchecked(op, var, vcon, val, arg0, cc) \ ++ __GEN_RMWcc_unchecked(op " %1, " arg0, var, cc, vcon (val)) + + #else /* !CC_HAVE_ASM_GOTO */ + +-#define __GEN_RMWcc(fullop, var, cc, ...) \ ++#ifdef CONFIG_PAX_REFCOUNT ++#define __GEN_RMWcc(fullop, fullantiop, var, cc, ...) \ ++do { \ ++ char c; \ ++ asm volatile (fullop \ ++ ";jno 0f\n" \ ++ fullantiop \ ++ ";int $4\n0:\n" \ ++ _ASM_EXTABLE(0b, 0b) \ ++ "; set" cc " %1" \ ++ : "+m" (var), "=qm" (c) \ ++ : __VA_ARGS__ : "memory"); \ ++ return c != 0; \ ++} while (0) ++#else ++#define __GEN_RMWcc(fullop, fullantiop, var, cc, ...) \ ++do { \ ++ char c; \ ++ asm volatile (fullop "; set" cc " %1" \ ++ : "+m" (var), "=qm" (c) \ ++ : __VA_ARGS__ : "memory"); \ ++ return c != 0; \ ++} while (0) ++#endif ++ ++#define __GEN_RMWcc_unchecked(fullop, var, cc, ...) \ + do { \ + char c; \ + asm volatile (fullop "; set" cc " %1" \ +@@ -30,11 +88,17 @@ do { \ + return c != 0; \ + } while (0) + +-#define GEN_UNARY_RMWcc(op, var, arg0, cc) \ +- __GEN_RMWcc(op " " arg0, var, cc) ++#define GEN_UNARY_RMWcc(op, antiop, var, arg0, cc) \ ++ __GEN_RMWcc(op " " arg0, antiop " " arg0, var, cc) ++ ++#define GEN_UNARY_RMWcc_unchecked(op, var, arg0, cc) \ ++ __GEN_RMWcc_unchecked(op " " arg0, var, cc) ++ ++#define GEN_BINARY_RMWcc(op, antiop, var, vcon, val, arg0, cc) \ ++ __GEN_RMWcc(op " %2, " arg0, antiop " %2, " arg0, var, cc, vcon (val)) + +-#define GEN_BINARY_RMWcc(op, var, vcon, val, arg0, cc) \ +- __GEN_RMWcc(op " %2, " arg0, var, cc, vcon (val)) ++#define GEN_BINARY_RMWcc_unchecked(op, var, vcon, val, arg0, cc) \ ++ __GEN_RMWcc_unchecked(op " %2, " arg0, var, cc, vcon (val)) + + #endif /* CC_HAVE_ASM_GOTO */ + +diff --git a/arch/x86/include/asm/rwsem.h b/arch/x86/include/asm/rwsem.h +index cad82c9..2e5c5c1 100644 +--- a/arch/x86/include/asm/rwsem.h ++++ b/arch/x86/include/asm/rwsem.h +@@ -64,6 +64,14 @@ static inline void __down_read(struct rw_semaphore *sem) + { + asm volatile("# beginning down_read\n\t" + LOCK_PREFIX _ASM_INC "(%1)\n\t" ++ ++#ifdef CONFIG_PAX_REFCOUNT ++ "jno 0f\n" ++ LOCK_PREFIX _ASM_DEC "(%1)\n" ++ "int $4\n0:\n" ++ _ASM_EXTABLE(0b, 0b) ++#endif ++ + /* adds 0x00000001 */ + " jns 1f\n" + " call call_rwsem_down_read_failed\n" +@@ -85,6 +93,14 @@ static inline int __down_read_trylock(struct rw_semaphore *sem) + "1:\n\t" + " mov %1,%2\n\t" + " add %3,%2\n\t" ++ ++#ifdef CONFIG_PAX_REFCOUNT ++ "jno 0f\n" ++ "sub %3,%2\n" ++ "int $4\n0:\n" ++ _ASM_EXTABLE(0b, 0b) ++#endif ++ + " jle 2f\n\t" + LOCK_PREFIX " cmpxchg %2,%0\n\t" + " jnz 1b\n\t" +@@ -104,6 +120,14 @@ static inline void __down_write_nested(struct rw_semaphore *sem, int subclass) + long tmp; + asm volatile("# beginning down_write\n\t" + LOCK_PREFIX " xadd %1,(%2)\n\t" ++ ++#ifdef CONFIG_PAX_REFCOUNT ++ "jno 0f\n" ++ "mov %1,(%2)\n" ++ "int $4\n0:\n" ++ _ASM_EXTABLE(0b, 0b) ++#endif ++ + /* adds 0xffff0001, returns the old value */ + " test " __ASM_SEL(%w1,%k1) "," __ASM_SEL(%w1,%k1) "\n\t" + /* was the active mask 0 before? */ +@@ -155,6 +179,14 @@ static inline void __up_read(struct rw_semaphore *sem) + long tmp; + asm volatile("# beginning __up_read\n\t" + LOCK_PREFIX " xadd %1,(%2)\n\t" ++ ++#ifdef CONFIG_PAX_REFCOUNT ++ "jno 0f\n" ++ "mov %1,(%2)\n" ++ "int $4\n0:\n" ++ _ASM_EXTABLE(0b, 0b) ++#endif ++ + /* subtracts 1, returns the old value */ + " jns 1f\n\t" + " call call_rwsem_wake\n" /* expects old value in %edx */ +@@ -173,6 +205,14 @@ static inline void __up_write(struct rw_semaphore *sem) + long tmp; + asm volatile("# beginning __up_write\n\t" + LOCK_PREFIX " xadd %1,(%2)\n\t" ++ ++#ifdef CONFIG_PAX_REFCOUNT ++ "jno 0f\n" ++ "mov %1,(%2)\n" ++ "int $4\n0:\n" ++ _ASM_EXTABLE(0b, 0b) ++#endif ++ + /* subtracts 0xffff0001, returns the old value */ + " jns 1f\n\t" + " call call_rwsem_wake\n" /* expects old value in %edx */ +@@ -190,6 +230,14 @@ static inline void __downgrade_write(struct rw_semaphore *sem) + { + asm volatile("# beginning __downgrade_write\n\t" + LOCK_PREFIX _ASM_ADD "%2,(%1)\n\t" ++ ++#ifdef CONFIG_PAX_REFCOUNT ++ "jno 0f\n" ++ LOCK_PREFIX _ASM_SUB "%2,(%1)\n" ++ "int $4\n0:\n" ++ _ASM_EXTABLE(0b, 0b) ++#endif ++ + /* + * transitions 0xZZZZ0001 -> 0xYYYY0001 (i386) + * 0xZZZZZZZZ00000001 -> 0xYYYYYYYY00000001 (x86_64) +@@ -208,7 +256,15 @@ static inline void __downgrade_write(struct rw_semaphore *sem) + */ + static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem) + { +- asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0" ++ asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0\n" ++ ++#ifdef CONFIG_PAX_REFCOUNT ++ "jno 0f\n" ++ LOCK_PREFIX _ASM_SUB "%1,%0\n" ++ "int $4\n0:\n" ++ _ASM_EXTABLE(0b, 0b) ++#endif ++ + : "+m" (sem->count) + : "er" (delta)); + } +@@ -218,7 +274,7 @@ static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem) + */ + static inline long rwsem_atomic_update(long delta, struct rw_semaphore *sem) + { +- return delta + xadd(&sem->count, delta); ++ return delta + xadd_check_overflow(&sem->count, delta); + } + + #endif /* __KERNEL__ */ +diff --git a/arch/x86/include/asm/segment.h b/arch/x86/include/asm/segment.h +index 6f1c3a8..7744f19 100644 +--- a/arch/x86/include/asm/segment.h ++++ b/arch/x86/include/asm/segment.h +@@ -64,10 +64,15 @@ + * 26 - ESPFIX small SS + * 27 - per-cpu [ offset to per-cpu data area ] + * 28 - stack_canary-20 [ for stack protector ] +- * 29 - unused +- * 30 - unused ++ * 29 - PCI BIOS CS ++ * 30 - PCI BIOS DS + * 31 - TSS for double fault handler + */ ++#define GDT_ENTRY_KERNEXEC_EFI_CS (1) ++#define GDT_ENTRY_KERNEXEC_EFI_DS (2) ++#define __KERNEXEC_EFI_CS (GDT_ENTRY_KERNEXEC_EFI_CS*8) ++#define __KERNEXEC_EFI_DS (GDT_ENTRY_KERNEXEC_EFI_DS*8) ++ + #define GDT_ENTRY_TLS_MIN 6 + #define GDT_ENTRY_TLS_MAX (GDT_ENTRY_TLS_MIN + GDT_ENTRY_TLS_ENTRIES - 1) + +@@ -79,6 +84,8 @@ + + #define GDT_ENTRY_KERNEL_CS (GDT_ENTRY_KERNEL_BASE+0) + ++#define GDT_ENTRY_KERNEXEC_KERNEL_CS (4) ++ + #define GDT_ENTRY_KERNEL_DS (GDT_ENTRY_KERNEL_BASE+1) + + #define GDT_ENTRY_TSS (GDT_ENTRY_KERNEL_BASE+4) +@@ -104,6 +111,12 @@ + #define __KERNEL_STACK_CANARY 0 + #endif + ++#define GDT_ENTRY_PCIBIOS_CS (GDT_ENTRY_KERNEL_BASE+17) ++#define __PCIBIOS_CS (GDT_ENTRY_PCIBIOS_CS * 8) ++ ++#define GDT_ENTRY_PCIBIOS_DS (GDT_ENTRY_KERNEL_BASE+18) ++#define __PCIBIOS_DS (GDT_ENTRY_PCIBIOS_DS * 8) ++ + #define GDT_ENTRY_DOUBLEFAULT_TSS 31 + + /* +@@ -141,7 +154,7 @@ + */ + + /* Matches PNP_CS32 and PNP_CS16 (they must be consecutive) */ +-#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xf4) == GDT_ENTRY_PNPBIOS_BASE * 8) ++#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xFFFCU) == PNP_CS32 || ((x) & 0xFFFCU) == PNP_CS16) + + + #else +@@ -165,6 +178,8 @@ + #define __USER32_CS (GDT_ENTRY_DEFAULT_USER32_CS*8+3) + #define __USER32_DS __USER_DS + ++#define GDT_ENTRY_KERNEXEC_KERNEL_CS 7 ++ + #define GDT_ENTRY_TSS 8 /* needs two entries */ + #define GDT_ENTRY_LDT 10 /* needs two entries */ + #define GDT_ENTRY_TLS_MIN 12 +@@ -173,6 +188,8 @@ + #define GDT_ENTRY_PER_CPU 15 /* Abused to load per CPU data from limit */ + #define __PER_CPU_SEG (GDT_ENTRY_PER_CPU * 8 + 3) + ++#define GDT_ENTRY_UDEREF_KERNEL_DS 16 ++ + /* TLS indexes for 64bit - hardcoded in arch_prctl */ + #define FS_TLS 0 + #define GS_TLS 1 +@@ -180,12 +197,14 @@ + #define GS_TLS_SEL ((GDT_ENTRY_TLS_MIN+GS_TLS)*8 + 3) + #define FS_TLS_SEL ((GDT_ENTRY_TLS_MIN+FS_TLS)*8 + 3) + +-#define GDT_ENTRIES 16 ++#define GDT_ENTRIES 17 + + #endif + + #define __KERNEL_CS (GDT_ENTRY_KERNEL_CS*8) ++#define __KERNEXEC_KERNEL_CS (GDT_ENTRY_KERNEXEC_KERNEL_CS*8) + #define __KERNEL_DS (GDT_ENTRY_KERNEL_DS*8) ++#define __UDEREF_KERNEL_DS (GDT_ENTRY_UDEREF_KERNEL_DS*8) + #define __USER_DS (GDT_ENTRY_DEFAULT_USER_DS*8+3) + #define __USER_CS (GDT_ENTRY_DEFAULT_USER_CS*8+3) + #ifndef CONFIG_PARAVIRT +@@ -268,7 +287,7 @@ static inline unsigned long get_limit(unsigned long segment) + { + unsigned long __limit; + asm("lsll %1,%0" : "=r" (__limit) : "r" (segment)); +- return __limit + 1; ++ return __limit; + } + + #endif /* !__ASSEMBLY__ */ +diff --git a/arch/x86/include/asm/smap.h b/arch/x86/include/asm/smap.h +index 8d3120f..352b440 100644 +--- a/arch/x86/include/asm/smap.h ++++ b/arch/x86/include/asm/smap.h +@@ -25,11 +25,40 @@ + + #include <asm/alternative-asm.h> + ++#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF) ++#define ASM_PAX_OPEN_USERLAND \ ++ 661: jmp 663f; \ ++ .pushsection .altinstr_replacement, "a" ; \ ++ 662: pushq %rax; nop; \ ++ .popsection ; \ ++ .pushsection .altinstructions, "a" ; \ ++ altinstruction_entry 661b, 662b, X86_FEATURE_STRONGUDEREF, 2, 2;\ ++ .popsection ; \ ++ call __pax_open_userland; \ ++ popq %rax; \ ++ 663: ++ ++#define ASM_PAX_CLOSE_USERLAND \ ++ 661: jmp 663f; \ ++ .pushsection .altinstr_replacement, "a" ; \ ++ 662: pushq %rax; nop; \ ++ .popsection; \ ++ .pushsection .altinstructions, "a" ; \ ++ altinstruction_entry 661b, 662b, X86_FEATURE_STRONGUDEREF, 2, 2;\ ++ .popsection; \ ++ call __pax_close_userland; \ ++ popq %rax; \ ++ 663: ++#else ++#define ASM_PAX_OPEN_USERLAND ++#define ASM_PAX_CLOSE_USERLAND ++#endif ++ + #ifdef CONFIG_X86_SMAP + + #define ASM_CLAC \ + 661: ASM_NOP3 ; \ +- .pushsection .altinstr_replacement, "ax" ; \ ++ .pushsection .altinstr_replacement, "a" ; \ + 662: __ASM_CLAC ; \ + .popsection ; \ + .pushsection .altinstructions, "a" ; \ +@@ -38,7 +67,7 @@ + + #define ASM_STAC \ + 661: ASM_NOP3 ; \ +- .pushsection .altinstr_replacement, "ax" ; \ ++ .pushsection .altinstr_replacement, "a" ; \ + 662: __ASM_STAC ; \ + .popsection ; \ + .pushsection .altinstructions, "a" ; \ +@@ -56,6 +85,37 @@ + + #include <asm/alternative.h> + ++#define __HAVE_ARCH_PAX_OPEN_USERLAND ++#define __HAVE_ARCH_PAX_CLOSE_USERLAND ++ ++extern void __pax_open_userland(void); ++static __always_inline unsigned long pax_open_userland(void) ++{ ++ ++#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF) ++ asm volatile(ALTERNATIVE(ASM_NOP5, "call %P[open]", X86_FEATURE_STRONGUDEREF) ++ : ++ : [open] "i" (__pax_open_userland) ++ : "memory", "rax"); ++#endif ++ ++ return 0; ++} ++ ++extern void __pax_close_userland(void); ++static __always_inline unsigned long pax_close_userland(void) ++{ ++ ++#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF) ++ asm volatile(ALTERNATIVE(ASM_NOP5, "call %P[close]", X86_FEATURE_STRONGUDEREF) ++ : ++ : [close] "i" (__pax_close_userland) ++ : "memory", "rax"); ++#endif ++ ++ return 0; ++} ++ + #ifdef CONFIG_X86_SMAP + + static __always_inline void clac(void) +diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h +index 8cd27e0..7f05ec8 100644 +--- a/arch/x86/include/asm/smp.h ++++ b/arch/x86/include/asm/smp.h +@@ -35,7 +35,7 @@ DECLARE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_core_map); + /* cpus sharing the last level cache: */ + DECLARE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_llc_shared_map); + DECLARE_PER_CPU_READ_MOSTLY(u16, cpu_llc_id); +-DECLARE_PER_CPU_READ_MOSTLY(int, cpu_number); ++DECLARE_PER_CPU_READ_MOSTLY(unsigned int, cpu_number); + + static inline struct cpumask *cpu_sibling_mask(int cpu) + { +@@ -78,7 +78,7 @@ struct smp_ops { + + void (*send_call_func_ipi)(const struct cpumask *mask); + void (*send_call_func_single_ipi)(int cpu); +-}; ++} __no_const; + + /* Globals due to paravirt */ + extern void set_cpu_sibling_map(int cpu); +@@ -190,14 +190,8 @@ extern unsigned disabled_cpus; + extern int safe_smp_processor_id(void); + + #elif defined(CONFIG_X86_64_SMP) +-#define raw_smp_processor_id() (this_cpu_read(cpu_number)) +- +-#define stack_smp_processor_id() \ +-({ \ +- struct thread_info *ti; \ +- __asm__("andq %%rsp,%0; ":"=r" (ti) : "0" (CURRENT_MASK)); \ +- ti->cpu; \ +-}) ++#define raw_smp_processor_id() (this_cpu_read(cpu_number)) ++#define stack_smp_processor_id() raw_smp_processor_id() + #define safe_smp_processor_id() smp_processor_id() + + #endif +diff --git a/arch/x86/include/asm/spinlock.h b/arch/x86/include/asm/spinlock.h +index 0f62f54..cb5d0dd 100644 +--- a/arch/x86/include/asm/spinlock.h ++++ b/arch/x86/include/asm/spinlock.h +@@ -222,6 +222,14 @@ static inline int arch_write_can_lock(arch_rwlock_t *lock) + static inline void arch_read_lock(arch_rwlock_t *rw) + { + asm volatile(LOCK_PREFIX READ_LOCK_SIZE(dec) " (%0)\n\t" ++ ++#ifdef CONFIG_PAX_REFCOUNT ++ "jno 0f\n" ++ LOCK_PREFIX READ_LOCK_SIZE(inc) " (%0)\n" ++ "int $4\n0:\n" ++ _ASM_EXTABLE(0b, 0b) ++#endif ++ + "jns 1f\n" + "call __read_lock_failed\n\t" + "1:\n" +@@ -231,6 +239,14 @@ static inline void arch_read_lock(arch_rwlock_t *rw) + static inline void arch_write_lock(arch_rwlock_t *rw) + { + asm volatile(LOCK_PREFIX WRITE_LOCK_SUB(%1) "(%0)\n\t" ++ ++#ifdef CONFIG_PAX_REFCOUNT ++ "jno 0f\n" ++ LOCK_PREFIX WRITE_LOCK_ADD(%1) "(%0)\n" ++ "int $4\n0:\n" ++ _ASM_EXTABLE(0b, 0b) ++#endif ++ + "jz 1f\n" + "call __write_lock_failed\n\t" + "1:\n" +@@ -260,13 +276,29 @@ static inline int arch_write_trylock(arch_rwlock_t *lock) + + static inline void arch_read_unlock(arch_rwlock_t *rw) + { +- asm volatile(LOCK_PREFIX READ_LOCK_SIZE(inc) " %0" ++ asm volatile(LOCK_PREFIX READ_LOCK_SIZE(inc) " %0\n" ++ ++#ifdef CONFIG_PAX_REFCOUNT ++ "jno 0f\n" ++ LOCK_PREFIX READ_LOCK_SIZE(dec) " %0\n" ++ "int $4\n0:\n" ++ _ASM_EXTABLE(0b, 0b) ++#endif ++ + :"+m" (rw->lock) : : "memory"); + } + + static inline void arch_write_unlock(arch_rwlock_t *rw) + { +- asm volatile(LOCK_PREFIX WRITE_LOCK_ADD(%1) "%0" ++ asm volatile(LOCK_PREFIX WRITE_LOCK_ADD(%1) "%0\n" ++ ++#ifdef CONFIG_PAX_REFCOUNT ++ "jno 0f\n" ++ LOCK_PREFIX WRITE_LOCK_SUB(%1) "%0\n" ++ "int $4\n0:\n" ++ _ASM_EXTABLE(0b, 0b) ++#endif ++ + : "+m" (rw->write) : "i" (RW_LOCK_BIAS) : "memory"); + } + +diff --git a/arch/x86/include/asm/stackprotector.h b/arch/x86/include/asm/stackprotector.h +index 6a99859..03cb807 100644 +--- a/arch/x86/include/asm/stackprotector.h ++++ b/arch/x86/include/asm/stackprotector.h +@@ -47,7 +47,7 @@ + * head_32 for boot CPU and setup_per_cpu_areas() for others. + */ + #define GDT_STACK_CANARY_INIT \ +- [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x18), ++ [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x17), + + /* + * Initialize the stackprotector canary value. +@@ -112,7 +112,7 @@ static inline void setup_stack_canary_segment(int cpu) + + static inline void load_stack_canary_segment(void) + { +-#ifdef CONFIG_X86_32 ++#if defined(CONFIG_X86_32) && !defined(CONFIG_PAX_MEMORY_UDEREF) + asm volatile ("mov %0, %%gs" : : "r" (0)); + #endif + } +diff --git a/arch/x86/include/asm/stacktrace.h b/arch/x86/include/asm/stacktrace.h +index 70bbe39..4ae2bd4 100644 +--- a/arch/x86/include/asm/stacktrace.h ++++ b/arch/x86/include/asm/stacktrace.h +@@ -11,28 +11,20 @@ + + extern int kstack_depth_to_print; + +-struct thread_info; ++struct task_struct; + struct stacktrace_ops; + +-typedef unsigned long (*walk_stack_t)(struct thread_info *tinfo, +- unsigned long *stack, +- unsigned long bp, +- const struct stacktrace_ops *ops, +- void *data, +- unsigned long *end, +- int *graph); ++typedef unsigned long walk_stack_t(struct task_struct *task, ++ void *stack_start, ++ unsigned long *stack, ++ unsigned long bp, ++ const struct stacktrace_ops *ops, ++ void *data, ++ unsigned long *end, ++ int *graph); + +-extern unsigned long +-print_context_stack(struct thread_info *tinfo, +- unsigned long *stack, unsigned long bp, +- const struct stacktrace_ops *ops, void *data, +- unsigned long *end, int *graph); +- +-extern unsigned long +-print_context_stack_bp(struct thread_info *tinfo, +- unsigned long *stack, unsigned long bp, +- const struct stacktrace_ops *ops, void *data, +- unsigned long *end, int *graph); ++extern walk_stack_t print_context_stack; ++extern walk_stack_t print_context_stack_bp; + + /* Generic stack tracer with callbacks */ + +@@ -40,7 +32,7 @@ struct stacktrace_ops { + void (*address)(void *data, unsigned long address, int reliable); + /* On negative return stop dumping */ + int (*stack)(void *data, char *name); +- walk_stack_t walk_stack; ++ walk_stack_t *walk_stack; + }; + + void dump_trace(struct task_struct *tsk, struct pt_regs *regs, +diff --git a/arch/x86/include/asm/switch_to.h b/arch/x86/include/asm/switch_to.h +index d7f3b3b..3cc39f1 100644 +--- a/arch/x86/include/asm/switch_to.h ++++ b/arch/x86/include/asm/switch_to.h +@@ -108,7 +108,7 @@ do { \ + "call __switch_to\n\t" \ + "movq "__percpu_arg([current_task])",%%rsi\n\t" \ + __switch_canary \ +- "movq %P[thread_info](%%rsi),%%r8\n\t" \ ++ "movq "__percpu_arg([thread_info])",%%r8\n\t" \ + "movq %%rax,%%rdi\n\t" \ + "testl %[_tif_fork],%P[ti_flags](%%r8)\n\t" \ + "jnz ret_from_fork\n\t" \ +@@ -119,7 +119,7 @@ do { \ + [threadrsp] "i" (offsetof(struct task_struct, thread.sp)), \ + [ti_flags] "i" (offsetof(struct thread_info, flags)), \ + [_tif_fork] "i" (_TIF_FORK), \ +- [thread_info] "i" (offsetof(struct task_struct, stack)), \ ++ [thread_info] "m" (current_tinfo), \ + [current_task] "m" (current_task) \ + __switch_canary_iparam \ + : "memory", "cc" __EXTRA_CLOBBER) +diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h +index e1940c0..ac50dd8 100644 +--- a/arch/x86/include/asm/thread_info.h ++++ b/arch/x86/include/asm/thread_info.h +@@ -10,6 +10,7 @@ + #include <linux/compiler.h> + #include <asm/page.h> + #include <asm/types.h> ++#include <asm/percpu.h> + + /* + * low level task data that entry.S needs immediate access to +@@ -23,7 +24,6 @@ struct exec_domain; + #include <linux/atomic.h> + + struct thread_info { +- struct task_struct *task; /* main task structure */ + struct exec_domain *exec_domain; /* execution domain */ + __u32 flags; /* low level flags */ + __u32 status; /* thread synchronous flags */ +@@ -32,19 +32,13 @@ struct thread_info { + mm_segment_t addr_limit; + struct restart_block restart_block; + void __user *sysenter_return; +-#ifdef CONFIG_X86_32 +- unsigned long previous_esp; /* ESP of the previous stack in +- case of nested (IRQ) stacks +- */ +- __u8 supervisor_stack[0]; +-#endif ++ unsigned long lowest_stack; + unsigned int sig_on_uaccess_error:1; + unsigned int uaccess_err:1; /* uaccess failed */ + }; + +-#define INIT_THREAD_INFO(tsk) \ ++#define INIT_THREAD_INFO \ + { \ +- .task = &tsk, \ + .exec_domain = &default_exec_domain, \ + .flags = 0, \ + .cpu = 0, \ +@@ -55,7 +49,7 @@ struct thread_info { + }, \ + } + +-#define init_thread_info (init_thread_union.thread_info) ++#define init_thread_info (init_thread_union.stack) + #define init_stack (init_thread_union.stack) + + #else /* !__ASSEMBLY__ */ +@@ -95,6 +89,7 @@ struct thread_info { + #define TIF_SYSCALL_TRACEPOINT 28 /* syscall tracepoint instrumentation */ + #define TIF_ADDR32 29 /* 32-bit address space on 64 bits */ + #define TIF_X32 30 /* 32-bit native x86-64 binary */ ++#define TIF_GRSEC_SETXID 31 /* update credentials on syscall entry/exit */ + + #define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE) + #define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME) +@@ -118,17 +113,18 @@ struct thread_info { + #define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT) + #define _TIF_ADDR32 (1 << TIF_ADDR32) + #define _TIF_X32 (1 << TIF_X32) ++#define _TIF_GRSEC_SETXID (1 << TIF_GRSEC_SETXID) + + /* work to do in syscall_trace_enter() */ + #define _TIF_WORK_SYSCALL_ENTRY \ + (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_EMU | _TIF_SYSCALL_AUDIT | \ + _TIF_SECCOMP | _TIF_SINGLESTEP | _TIF_SYSCALL_TRACEPOINT | \ +- _TIF_NOHZ) ++ _TIF_NOHZ | _TIF_GRSEC_SETXID) + + /* work to do in syscall_trace_leave() */ + #define _TIF_WORK_SYSCALL_EXIT \ + (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SINGLESTEP | \ +- _TIF_SYSCALL_TRACEPOINT | _TIF_NOHZ) ++ _TIF_SYSCALL_TRACEPOINT | _TIF_NOHZ | _TIF_GRSEC_SETXID) + + /* work to do on interrupt/exception return */ + #define _TIF_WORK_MASK \ +@@ -139,7 +135,7 @@ struct thread_info { + /* work to do on any return to user space */ + #define _TIF_ALLWORK_MASK \ + ((0x0000FFFF & ~_TIF_SECCOMP) | _TIF_SYSCALL_TRACEPOINT | \ +- _TIF_NOHZ) ++ _TIF_NOHZ | _TIF_GRSEC_SETXID) + + /* Only used for 64 bit */ + #define _TIF_DO_NOTIFY_MASK \ +@@ -153,6 +149,23 @@ struct thread_info { + #define _TIF_WORK_CTXSW_PREV (_TIF_WORK_CTXSW|_TIF_USER_RETURN_NOTIFY) + #define _TIF_WORK_CTXSW_NEXT (_TIF_WORK_CTXSW) + ++#ifdef __ASSEMBLY__ ++/* how to get the thread information struct from ASM */ ++#define GET_THREAD_INFO(reg) \ ++ mov PER_CPU_VAR(current_tinfo), reg ++ ++/* use this one if reg already contains %esp */ ++#define GET_THREAD_INFO_WITH_ESP(reg) GET_THREAD_INFO(reg) ++#else ++/* how to get the thread information struct from C */ ++DECLARE_PER_CPU(struct thread_info *, current_tinfo); ++ ++static __always_inline struct thread_info *current_thread_info(void) ++{ ++ return this_cpu_read_stable(current_tinfo); ++} ++#endif ++ + #ifdef CONFIG_X86_32 + + #define STACK_WARN (THREAD_SIZE/8) +@@ -169,31 +182,10 @@ struct thread_info { + sp; \ + }) + +-/* how to get the thread information struct from C */ +-static inline struct thread_info *current_thread_info(void) +-{ +- return (struct thread_info *) +- (current_stack_pointer & ~(THREAD_SIZE - 1)); +-} +- +-#else /* !__ASSEMBLY__ */ +- +-/* how to get the thread information struct from ASM */ +-#define GET_THREAD_INFO(reg) \ +- movl $-THREAD_SIZE, reg; \ +- andl %esp, reg +- +-/* use this one if reg already contains %esp */ +-#define GET_THREAD_INFO_WITH_ESP(reg) \ +- andl $-THREAD_SIZE, reg +- + #endif + + #else /* X86_32 */ + +-#include <asm/percpu.h> +-#define KERNEL_STACK_OFFSET (5*8) +- + /* + * macros/functions for gaining access to the thread information structure + * preempt_count needs to be 1 initially, until the scheduler is functional. +@@ -201,27 +193,8 @@ static inline struct thread_info *current_thread_info(void) + #ifndef __ASSEMBLY__ + DECLARE_PER_CPU(unsigned long, kernel_stack); + +-static inline struct thread_info *current_thread_info(void) +-{ +- struct thread_info *ti; +- ti = (void *)(this_cpu_read_stable(kernel_stack) + +- KERNEL_STACK_OFFSET - THREAD_SIZE); +- return ti; +-} +- +-#else /* !__ASSEMBLY__ */ +- +-/* how to get the thread information struct from ASM */ +-#define GET_THREAD_INFO(reg) \ +- movq PER_CPU_VAR(kernel_stack),reg ; \ +- subq $(THREAD_SIZE-KERNEL_STACK_OFFSET),reg +- +-/* +- * Same if PER_CPU_VAR(kernel_stack) is, perhaps with some offset, already in +- * a certain register (to be used in assembler memory operands). +- */ +-#define THREAD_INFO(reg, off) KERNEL_STACK_OFFSET+(off)-THREAD_SIZE(reg) +- ++/* how to get the current stack pointer from C */ ++register unsigned long current_stack_pointer asm("rsp") __used; + #endif + + #endif /* !X86_32 */ +@@ -280,5 +253,12 @@ static inline bool is_ia32_task(void) + extern void arch_task_cache_init(void); + extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src); + extern void arch_release_task_struct(struct task_struct *tsk); ++ ++#define __HAVE_THREAD_FUNCTIONS ++#define task_thread_info(task) (&(task)->tinfo) ++#define task_stack_page(task) ((task)->stack) ++#define setup_thread_stack(p, org) do {} while (0) ++#define end_of_stack(p) ((unsigned long *)task_stack_page(p) + 1) ++ + #endif + #endif /* _ASM_X86_THREAD_INFO_H */ +diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h +index 04905bf..49203ca 100644 +--- a/arch/x86/include/asm/tlbflush.h ++++ b/arch/x86/include/asm/tlbflush.h +@@ -17,18 +17,44 @@ + + static inline void __native_flush_tlb(void) + { ++ if (static_cpu_has(X86_FEATURE_INVPCID)) { ++ u64 descriptor[2]; ++ ++ descriptor[0] = PCID_KERNEL; ++ asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_ALL_MONGLOBAL) : "memory"); ++ return; ++ } ++ ++#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF) ++ if (static_cpu_has(X86_FEATURE_PCID)) { ++ unsigned int cpu = raw_get_cpu(); ++ ++ native_write_cr3(__pa(get_cpu_pgd(cpu, user)) | PCID_USER); ++ native_write_cr3(__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL); ++ raw_put_cpu_no_resched(); ++ return; ++ } ++#endif ++ + native_write_cr3(native_read_cr3()); + } + + static inline void __native_flush_tlb_global_irq_disabled(void) + { +- unsigned long cr4; ++ if (static_cpu_has(X86_FEATURE_INVPCID)) { ++ u64 descriptor[2]; + +- cr4 = native_read_cr4(); +- /* clear PGE */ +- native_write_cr4(cr4 & ~X86_CR4_PGE); +- /* write old PGE again and flush TLBs */ +- native_write_cr4(cr4); ++ descriptor[0] = PCID_KERNEL; ++ asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_ALL_GLOBAL) : "memory"); ++ } else { ++ unsigned long cr4; ++ ++ cr4 = native_read_cr4(); ++ /* clear PGE */ ++ native_write_cr4(cr4 & ~X86_CR4_PGE); ++ /* write old PGE again and flush TLBs */ ++ native_write_cr4(cr4); ++ } + } + + static inline void __native_flush_tlb_global(void) +@@ -49,6 +75,41 @@ static inline void __native_flush_tlb_global(void) + + static inline void __native_flush_tlb_single(unsigned long addr) + { ++ if (static_cpu_has(X86_FEATURE_INVPCID)) { ++ u64 descriptor[2]; ++ ++ descriptor[0] = PCID_KERNEL; ++ descriptor[1] = addr; ++ ++#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF) ++ if (!static_cpu_has(X86_FEATURE_STRONGUDEREF) || addr >= TASK_SIZE_MAX) { ++ if (addr < TASK_SIZE_MAX) ++ descriptor[1] += pax_user_shadow_base; ++ asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_SINGLE_ADDRESS) : "memory"); ++ } ++ ++ descriptor[0] = PCID_USER; ++ descriptor[1] = addr; ++#endif ++ ++ asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_SINGLE_ADDRESS) : "memory"); ++ return; ++ } ++ ++#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF) ++ if (static_cpu_has(X86_FEATURE_PCID)) { ++ unsigned int cpu = raw_get_cpu(); ++ ++ native_write_cr3(__pa(get_cpu_pgd(cpu, user)) | PCID_USER | PCID_NOFLUSH); ++ asm volatile("invlpg (%0)" ::"r" (addr) : "memory"); ++ native_write_cr3(__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL | PCID_NOFLUSH); ++ raw_put_cpu_no_resched(); ++ ++ if (!static_cpu_has(X86_FEATURE_STRONGUDEREF) && addr < TASK_SIZE_MAX) ++ addr += pax_user_shadow_base; ++ } ++#endif ++ + asm volatile("invlpg (%0)" ::"r" (addr) : "memory"); + } + +diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h +index 0d592e0..526f797 100644 +--- a/arch/x86/include/asm/uaccess.h ++++ b/arch/x86/include/asm/uaccess.h +@@ -7,6 +7,7 @@ + #include <linux/compiler.h> + #include <linux/thread_info.h> + #include <linux/string.h> ++#include <linux/spinlock.h> + #include <asm/asm.h> + #include <asm/page.h> + #include <asm/smap.h> +@@ -29,7 +30,12 @@ + + #define get_ds() (KERNEL_DS) + #define get_fs() (current_thread_info()->addr_limit) ++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF) ++void __set_fs(mm_segment_t x); ++void set_fs(mm_segment_t x); ++#else + #define set_fs(x) (current_thread_info()->addr_limit = (x)) ++#endif + + #define segment_eq(a, b) ((a).seg == (b).seg) + +@@ -85,8 +91,36 @@ static inline bool __chk_range_not_ok(unsigned long addr, unsigned long size, un + * checks that the pointer is in the user space range - after calling + * this function, memory access functions may still return -EFAULT. + */ +-#define access_ok(type, addr, size) \ +- likely(!__range_not_ok(addr, size, user_addr_max())) ++extern int _cond_resched(void); ++#define access_ok_noprefault(type, addr, size) (likely(!__range_not_ok(addr, size, user_addr_max()))) ++#define access_ok(type, addr, size) \ ++({ \ ++ unsigned long __size = size; \ ++ unsigned long __addr = (unsigned long)addr; \ ++ bool __ret_ao = __range_not_ok(__addr, __size, user_addr_max()) == 0;\ ++ if (__ret_ao && __size) { \ ++ unsigned long __addr_ao = __addr & PAGE_MASK; \ ++ unsigned long __end_ao = __addr + __size - 1; \ ++ if (unlikely((__end_ao ^ __addr_ao) & PAGE_MASK)) { \ ++ while (__addr_ao <= __end_ao) { \ ++ char __c_ao; \ ++ __addr_ao += PAGE_SIZE; \ ++ if (__size > PAGE_SIZE) \ ++ _cond_resched(); \ ++ if (__get_user(__c_ao, (char __user *)__addr)) \ ++ break; \ ++ if (type != VERIFY_WRITE) { \ ++ __addr = __addr_ao; \ ++ continue; \ ++ } \ ++ if (__put_user(__c_ao, (char __user *)__addr)) \ ++ break; \ ++ __addr = __addr_ao; \ ++ } \ ++ } \ ++ } \ ++ __ret_ao; \ ++}) + + /* + * The exception table consists of pairs of addresses relative to the +@@ -176,10 +210,12 @@ __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL)) + register __inttype(*(ptr)) __val_gu asm("%"_ASM_DX); \ + __chk_user_ptr(ptr); \ + might_fault(); \ ++ pax_open_userland(); \ + asm volatile("call __get_user_%P3" \ + : "=a" (__ret_gu), "=r" (__val_gu) \ + : "0" (ptr), "i" (sizeof(*(ptr)))); \ + (x) = (__typeof__(*(ptr))) __val_gu; \ ++ pax_close_userland(); \ + __ret_gu; \ + }) + +@@ -187,13 +223,21 @@ __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL)) + asm volatile("call __put_user_" #size : "=a" (__ret_pu) \ + : "0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx") + +- ++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF) ++#define __copyuser_seg "gs;" ++#define __COPYUSER_SET_ES "pushl %%gs; popl %%es\n" ++#define __COPYUSER_RESTORE_ES "pushl %%ss; popl %%es\n" ++#else ++#define __copyuser_seg ++#define __COPYUSER_SET_ES ++#define __COPYUSER_RESTORE_ES ++#endif + + #ifdef CONFIG_X86_32 + #define __put_user_asm_u64(x, addr, err, errret) \ + asm volatile(ASM_STAC "\n" \ +- "1: movl %%eax,0(%2)\n" \ +- "2: movl %%edx,4(%2)\n" \ ++ "1: "__copyuser_seg"movl %%eax,0(%2)\n" \ ++ "2: "__copyuser_seg"movl %%edx,4(%2)\n" \ + "3: " ASM_CLAC "\n" \ + ".section .fixup,\"ax\"\n" \ + "4: movl %3,%0\n" \ +@@ -206,8 +250,8 @@ __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL)) + + #define __put_user_asm_ex_u64(x, addr) \ + asm volatile(ASM_STAC "\n" \ +- "1: movl %%eax,0(%1)\n" \ +- "2: movl %%edx,4(%1)\n" \ ++ "1: "__copyuser_seg"movl %%eax,0(%1)\n" \ ++ "2: "__copyuser_seg"movl %%edx,4(%1)\n" \ + "3: " ASM_CLAC "\n" \ + _ASM_EXTABLE_EX(1b, 2b) \ + _ASM_EXTABLE_EX(2b, 3b) \ +@@ -257,7 +301,8 @@ extern void __put_user_8(void); + __typeof__(*(ptr)) __pu_val; \ + __chk_user_ptr(ptr); \ + might_fault(); \ +- __pu_val = x; \ ++ __pu_val = (x); \ ++ pax_open_userland(); \ + switch (sizeof(*(ptr))) { \ + case 1: \ + __put_user_x(1, __pu_val, ptr, __ret_pu); \ +@@ -275,6 +320,7 @@ extern void __put_user_8(void); + __put_user_x(X, __pu_val, ptr, __ret_pu); \ + break; \ + } \ ++ pax_close_userland(); \ + __ret_pu; \ + }) + +@@ -355,8 +401,10 @@ do { \ + } while (0) + + #define __get_user_asm(x, addr, err, itype, rtype, ltype, errret) \ ++do { \ ++ pax_open_userland(); \ + asm volatile(ASM_STAC "\n" \ +- "1: mov"itype" %2,%"rtype"1\n" \ ++ "1: "__copyuser_seg"mov"itype" %2,%"rtype"1\n"\ + "2: " ASM_CLAC "\n" \ + ".section .fixup,\"ax\"\n" \ + "3: mov %3,%0\n" \ +@@ -364,8 +412,10 @@ do { \ + " jmp 2b\n" \ + ".previous\n" \ + _ASM_EXTABLE(1b, 3b) \ +- : "=r" (err), ltype(x) \ +- : "m" (__m(addr)), "i" (errret), "0" (err)) ++ : "=r" (err), ltype (x) \ ++ : "m" (__m(addr)), "i" (errret), "0" (err)); \ ++ pax_close_userland(); \ ++} while (0) + + #define __get_user_size_ex(x, ptr, size) \ + do { \ +@@ -389,7 +439,7 @@ do { \ + } while (0) + + #define __get_user_asm_ex(x, addr, itype, rtype, ltype) \ +- asm volatile("1: mov"itype" %1,%"rtype"0\n" \ ++ asm volatile("1: "__copyuser_seg"mov"itype" %1,%"rtype"0\n"\ + "2:\n" \ + _ASM_EXTABLE_EX(1b, 2b) \ + : ltype(x) : "m" (__m(addr))) +@@ -406,13 +456,24 @@ do { \ + int __gu_err; \ + unsigned long __gu_val; \ + __get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT); \ +- (x) = (__force __typeof__(*(ptr)))__gu_val; \ ++ (x) = (__typeof__(*(ptr)))__gu_val; \ + __gu_err; \ + }) + + /* FIXME: this hack is definitely wrong -AK */ + struct __large_struct { unsigned long buf[100]; }; +-#define __m(x) (*(struct __large_struct __user *)(x)) ++#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF) ++#define ____m(x) \ ++({ \ ++ unsigned long ____x = (unsigned long)(x); \ ++ if (____x < pax_user_shadow_base) \ ++ ____x += pax_user_shadow_base; \ ++ (typeof(x))____x; \ ++}) ++#else ++#define ____m(x) (x) ++#endif ++#define __m(x) (*(struct __large_struct __user *)____m(x)) + + /* + * Tell gcc we read from memory instead of writing: this is because +@@ -420,8 +481,10 @@ struct __large_struct { unsigned long buf[100]; }; + * aliasing issues. + */ + #define __put_user_asm(x, addr, err, itype, rtype, ltype, errret) \ ++do { \ ++ pax_open_userland(); \ + asm volatile(ASM_STAC "\n" \ +- "1: mov"itype" %"rtype"1,%2\n" \ ++ "1: "__copyuser_seg"mov"itype" %"rtype"1,%2\n"\ + "2: " ASM_CLAC "\n" \ + ".section .fixup,\"ax\"\n" \ + "3: mov %3,%0\n" \ +@@ -429,10 +492,12 @@ struct __large_struct { unsigned long buf[100]; }; + ".previous\n" \ + _ASM_EXTABLE(1b, 3b) \ + : "=r"(err) \ +- : ltype(x), "m" (__m(addr)), "i" (errret), "0" (err)) ++ : ltype (x), "m" (__m(addr)), "i" (errret), "0" (err));\ ++ pax_close_userland(); \ ++} while (0) + + #define __put_user_asm_ex(x, addr, itype, rtype, ltype) \ +- asm volatile("1: mov"itype" %"rtype"0,%1\n" \ ++ asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"0,%1\n"\ + "2:\n" \ + _ASM_EXTABLE_EX(1b, 2b) \ + : : ltype(x), "m" (__m(addr))) +@@ -442,11 +507,13 @@ struct __large_struct { unsigned long buf[100]; }; + */ + #define uaccess_try do { \ + current_thread_info()->uaccess_err = 0; \ ++ pax_open_userland(); \ + stac(); \ + barrier(); + + #define uaccess_catch(err) \ + clac(); \ ++ pax_close_userland(); \ + (err) |= (current_thread_info()->uaccess_err ? -EFAULT : 0); \ + } while (0) + +@@ -471,8 +538,12 @@ struct __large_struct { unsigned long buf[100]; }; + * On error, the variable @x is set to zero. + */ + ++#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF) ++#define __get_user(x, ptr) get_user((x), (ptr)) ++#else + #define __get_user(x, ptr) \ + __get_user_nocheck((x), (ptr), sizeof(*(ptr))) ++#endif + + /** + * __put_user: - Write a simple value into user space, with less checking. +@@ -494,8 +565,12 @@ struct __large_struct { unsigned long buf[100]; }; + * Returns zero on success, or -EFAULT on error. + */ + ++#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF) ++#define __put_user(x, ptr) put_user((x), (ptr)) ++#else + #define __put_user(x, ptr) \ + __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr))) ++#endif + + #define __get_user_unaligned __get_user + #define __put_user_unaligned __put_user +@@ -513,7 +588,7 @@ struct __large_struct { unsigned long buf[100]; }; + #define get_user_ex(x, ptr) do { \ + unsigned long __gue_val; \ + __get_user_size_ex((__gue_val), (ptr), (sizeof(*(ptr)))); \ +- (x) = (__force __typeof__(*(ptr)))__gue_val; \ ++ (x) = (__typeof__(*(ptr)))__gue_val; \ + } while (0) + + #define put_user_try uaccess_try +@@ -542,18 +617,19 @@ extern void __cmpxchg_wrong_size(void) + __typeof__(ptr) __uval = (uval); \ + __typeof__(*(ptr)) __old = (old); \ + __typeof__(*(ptr)) __new = (new); \ ++ pax_open_userland(); \ + switch (size) { \ + case 1: \ + { \ + asm volatile("\t" ASM_STAC "\n" \ +- "1:\t" LOCK_PREFIX "cmpxchgb %4, %2\n" \ ++ "1:\t" LOCK_PREFIX __copyuser_seg"cmpxchgb %4, %2\n"\ + "2:\t" ASM_CLAC "\n" \ + "\t.section .fixup, \"ax\"\n" \ + "3:\tmov %3, %0\n" \ + "\tjmp 2b\n" \ + "\t.previous\n" \ + _ASM_EXTABLE(1b, 3b) \ +- : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \ ++ : "+r" (__ret), "=a" (__old), "+m" (*____m(ptr))\ + : "i" (-EFAULT), "q" (__new), "1" (__old) \ + : "memory" \ + ); \ +@@ -562,14 +638,14 @@ extern void __cmpxchg_wrong_size(void) + case 2: \ + { \ + asm volatile("\t" ASM_STAC "\n" \ +- "1:\t" LOCK_PREFIX "cmpxchgw %4, %2\n" \ ++ "1:\t" LOCK_PREFIX __copyuser_seg"cmpxchgw %4, %2\n"\ + "2:\t" ASM_CLAC "\n" \ + "\t.section .fixup, \"ax\"\n" \ + "3:\tmov %3, %0\n" \ + "\tjmp 2b\n" \ + "\t.previous\n" \ + _ASM_EXTABLE(1b, 3b) \ +- : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \ ++ : "+r" (__ret), "=a" (__old), "+m" (*____m(ptr))\ + : "i" (-EFAULT), "r" (__new), "1" (__old) \ + : "memory" \ + ); \ +@@ -578,14 +654,14 @@ extern void __cmpxchg_wrong_size(void) + case 4: \ + { \ + asm volatile("\t" ASM_STAC "\n" \ +- "1:\t" LOCK_PREFIX "cmpxchgl %4, %2\n" \ ++ "1:\t" LOCK_PREFIX __copyuser_seg"cmpxchgl %4, %2\n"\ + "2:\t" ASM_CLAC "\n" \ + "\t.section .fixup, \"ax\"\n" \ + "3:\tmov %3, %0\n" \ + "\tjmp 2b\n" \ + "\t.previous\n" \ + _ASM_EXTABLE(1b, 3b) \ +- : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \ ++ : "+r" (__ret), "=a" (__old), "+m" (*____m(ptr))\ + : "i" (-EFAULT), "r" (__new), "1" (__old) \ + : "memory" \ + ); \ +@@ -597,14 +673,14 @@ extern void __cmpxchg_wrong_size(void) + __cmpxchg_wrong_size(); \ + \ + asm volatile("\t" ASM_STAC "\n" \ +- "1:\t" LOCK_PREFIX "cmpxchgq %4, %2\n" \ ++ "1:\t" LOCK_PREFIX __copyuser_seg"cmpxchgq %4, %2\n"\ + "2:\t" ASM_CLAC "\n" \ + "\t.section .fixup, \"ax\"\n" \ + "3:\tmov %3, %0\n" \ + "\tjmp 2b\n" \ + "\t.previous\n" \ + _ASM_EXTABLE(1b, 3b) \ +- : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \ ++ : "+r" (__ret), "=a" (__old), "+m" (*____m(ptr))\ + : "i" (-EFAULT), "r" (__new), "1" (__old) \ + : "memory" \ + ); \ +@@ -613,6 +689,7 @@ extern void __cmpxchg_wrong_size(void) + default: \ + __cmpxchg_wrong_size(); \ + } \ ++ pax_close_userland(); \ + *__uval = __old; \ + __ret; \ + }) +@@ -636,17 +713,6 @@ extern struct movsl_mask { + + #define ARCH_HAS_NOCACHE_UACCESS 1 + +-#ifdef CONFIG_X86_32 +-# include <asm/uaccess_32.h> +-#else +-# include <asm/uaccess_64.h> +-#endif +- +-unsigned long __must_check _copy_from_user(void *to, const void __user *from, +- unsigned n); +-unsigned long __must_check _copy_to_user(void __user *to, const void *from, +- unsigned n); +- + #ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS + # define copy_user_diag __compiletime_error + #else +@@ -656,7 +722,7 @@ unsigned long __must_check _copy_to_user(void __user *to, const void *from, + extern void copy_user_diag("copy_from_user() buffer size is too small") + copy_from_user_overflow(void); + extern void copy_user_diag("copy_to_user() buffer size is too small") +-copy_to_user_overflow(void) __asm__("copy_from_user_overflow"); ++copy_to_user_overflow(void); + + #undef copy_user_diag + +@@ -669,7 +735,7 @@ __copy_from_user_overflow(void) __asm__("copy_from_user_overflow"); + + extern void + __compiletime_warning("copy_to_user() buffer size is not provably correct") +-__copy_to_user_overflow(void) __asm__("copy_from_user_overflow"); ++__copy_to_user_overflow(void) __asm__("copy_to_user_overflow"); + #define __copy_to_user_overflow(size, count) __copy_to_user_overflow() + + #else +@@ -684,10 +750,16 @@ __copy_from_user_overflow(int size, unsigned long count) + + #endif + ++#ifdef CONFIG_X86_32 ++# include <asm/uaccess_32.h> ++#else ++# include <asm/uaccess_64.h> ++#endif ++ + static inline unsigned long __must_check + copy_from_user(void *to, const void __user *from, unsigned long n) + { +- int sz = __compiletime_object_size(to); ++ size_t sz = __compiletime_object_size(to); + + might_fault(); + +@@ -709,12 +781,15 @@ copy_from_user(void *to, const void __user *from, unsigned long n) + * case, and do only runtime checking for non-constant sizes. + */ + +- if (likely(sz < 0 || sz >= n)) +- n = _copy_from_user(to, from, n); +- else if(__builtin_constant_p(n)) +- copy_from_user_overflow(); +- else +- __copy_from_user_overflow(sz, n); ++ if (likely(sz != (size_t)-1 && sz < n)) { ++ if(__builtin_constant_p(n)) ++ copy_from_user_overflow(); ++ else ++ __copy_from_user_overflow(sz, n); ++ } else if (access_ok(VERIFY_READ, from, n)) ++ n = __copy_from_user(to, from, n); ++ else if ((long)n > 0) ++ memset(to, 0, n); + + return n; + } +@@ -722,17 +797,18 @@ copy_from_user(void *to, const void __user *from, unsigned long n) + static inline unsigned long __must_check + copy_to_user(void __user *to, const void *from, unsigned long n) + { +- int sz = __compiletime_object_size(from); ++ size_t sz = __compiletime_object_size(from); + + might_fault(); + + /* See the comment in copy_from_user() above. */ +- if (likely(sz < 0 || sz >= n)) +- n = _copy_to_user(to, from, n); +- else if(__builtin_constant_p(n)) +- copy_to_user_overflow(); +- else +- __copy_to_user_overflow(sz, n); ++ if (likely(sz != (size_t)-1 && sz < n)) { ++ if(__builtin_constant_p(n)) ++ copy_to_user_overflow(); ++ else ++ __copy_to_user_overflow(sz, n); ++ } else if (access_ok(VERIFY_WRITE, to, n)) ++ n = __copy_to_user(to, from, n); + + return n; + } +diff --git a/arch/x86/include/asm/uaccess_32.h b/arch/x86/include/asm/uaccess_32.h +index 3c03a5d..1071638 100644 +--- a/arch/x86/include/asm/uaccess_32.h ++++ b/arch/x86/include/asm/uaccess_32.h +@@ -43,6 +43,11 @@ unsigned long __must_check __copy_from_user_ll_nocache_nozero + static __always_inline unsigned long __must_check + __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n) + { ++ if ((long)n < 0) ++ return n; ++ ++ check_object_size(from, n, true); ++ + if (__builtin_constant_p(n)) { + unsigned long ret; + +@@ -82,12 +87,16 @@ static __always_inline unsigned long __must_check + __copy_to_user(void __user *to, const void *from, unsigned long n) + { + might_fault(); ++ + return __copy_to_user_inatomic(to, from, n); + } + + static __always_inline unsigned long + __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n) + { ++ if ((long)n < 0) ++ return n; ++ + /* Avoid zeroing the tail if the copy fails.. + * If 'n' is constant and 1, 2, or 4, we do still zero on a failure, + * but as the zeroing behaviour is only significant when n is not +@@ -137,6 +146,12 @@ static __always_inline unsigned long + __copy_from_user(void *to, const void __user *from, unsigned long n) + { + might_fault(); ++ ++ if ((long)n < 0) ++ return n; ++ ++ check_object_size(to, n, false); ++ + if (__builtin_constant_p(n)) { + unsigned long ret; + +@@ -159,6 +174,10 @@ static __always_inline unsigned long __copy_from_user_nocache(void *to, + const void __user *from, unsigned long n) + { + might_fault(); ++ ++ if ((long)n < 0) ++ return n; ++ + if (__builtin_constant_p(n)) { + unsigned long ret; + +@@ -181,7 +200,10 @@ static __always_inline unsigned long + __copy_from_user_inatomic_nocache(void *to, const void __user *from, + unsigned long n) + { +- return __copy_from_user_ll_nocache_nozero(to, from, n); ++ if ((long)n < 0) ++ return n; ++ ++ return __copy_from_user_ll_nocache_nozero(to, from, n); + } + + #endif /* _ASM_X86_UACCESS_32_H */ +diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h +index 12a26b9..206c200 100644 +--- a/arch/x86/include/asm/uaccess_64.h ++++ b/arch/x86/include/asm/uaccess_64.h +@@ -10,6 +10,9 @@ + #include <asm/alternative.h> + #include <asm/cpufeature.h> + #include <asm/page.h> ++#include <asm/pgtable.h> ++ ++#define set_fs(x) (current_thread_info()->addr_limit = (x)) + + /* + * Copy To/From Userspace +@@ -17,14 +20,14 @@ + + /* Handles exceptions in both to and from, but doesn't do access_ok */ + __must_check unsigned long +-copy_user_enhanced_fast_string(void *to, const void *from, unsigned len); ++copy_user_enhanced_fast_string(void *to, const void *from, unsigned len) __size_overflow(3); + __must_check unsigned long +-copy_user_generic_string(void *to, const void *from, unsigned len); ++copy_user_generic_string(void *to, const void *from, unsigned len) __size_overflow(3); + __must_check unsigned long +-copy_user_generic_unrolled(void *to, const void *from, unsigned len); ++copy_user_generic_unrolled(void *to, const void *from, unsigned len) __size_overflow(3); + + static __always_inline __must_check unsigned long +-copy_user_generic(void *to, const void *from, unsigned len) ++copy_user_generic(void *to, const void *from, unsigned long len) + { + unsigned ret; + +@@ -46,121 +49,170 @@ copy_user_generic(void *to, const void *from, unsigned len) + } + + __must_check unsigned long +-copy_in_user(void __user *to, const void __user *from, unsigned len); ++copy_in_user(void __user *to, const void __user *from, unsigned long len); + + static __always_inline __must_check +-int __copy_from_user_nocheck(void *dst, const void __user *src, unsigned size) ++unsigned long __copy_from_user_nocheck(void *dst, const void __user *src, unsigned long size) + { +- int ret = 0; ++ size_t sz = __compiletime_object_size(dst); ++ unsigned ret = 0; ++ ++ if (size > INT_MAX) ++ return size; ++ ++ check_object_size(dst, size, false); ++ ++#ifdef CONFIG_PAX_MEMORY_UDEREF ++ if (!access_ok_noprefault(VERIFY_READ, src, size)) ++ return size; ++#endif ++ ++ if (unlikely(sz != (size_t)-1 && sz < size)) { ++ if(__builtin_constant_p(size)) ++ copy_from_user_overflow(); ++ else ++ __copy_from_user_overflow(sz, size); ++ return size; ++ } + + if (!__builtin_constant_p(size)) +- return copy_user_generic(dst, (__force void *)src, size); ++ return copy_user_generic(dst, (__force_kernel const void *)____m(src), size); + switch (size) { +- case 1:__get_user_asm(*(u8 *)dst, (u8 __user *)src, ++ case 1:__get_user_asm(*(u8 *)dst, (const u8 __user *)src, + ret, "b", "b", "=q", 1); + return ret; +- case 2:__get_user_asm(*(u16 *)dst, (u16 __user *)src, ++ case 2:__get_user_asm(*(u16 *)dst, (const u16 __user *)src, + ret, "w", "w", "=r", 2); + return ret; +- case 4:__get_user_asm(*(u32 *)dst, (u32 __user *)src, ++ case 4:__get_user_asm(*(u32 *)dst, (const u32 __user *)src, + ret, "l", "k", "=r", 4); + return ret; +- case 8:__get_user_asm(*(u64 *)dst, (u64 __user *)src, ++ case 8:__get_user_asm(*(u64 *)dst, (const u64 __user *)src, + ret, "q", "", "=r", 8); + return ret; + case 10: +- __get_user_asm(*(u64 *)dst, (u64 __user *)src, ++ __get_user_asm(*(u64 *)dst, (const u64 __user *)src, + ret, "q", "", "=r", 10); + if (unlikely(ret)) + return ret; + __get_user_asm(*(u16 *)(8 + (char *)dst), +- (u16 __user *)(8 + (char __user *)src), ++ (const u16 __user *)(8 + (const char __user *)src), + ret, "w", "w", "=r", 2); + return ret; + case 16: +- __get_user_asm(*(u64 *)dst, (u64 __user *)src, ++ __get_user_asm(*(u64 *)dst, (const u64 __user *)src, + ret, "q", "", "=r", 16); + if (unlikely(ret)) + return ret; + __get_user_asm(*(u64 *)(8 + (char *)dst), +- (u64 __user *)(8 + (char __user *)src), ++ (const u64 __user *)(8 + (const char __user *)src), + ret, "q", "", "=r", 8); + return ret; + default: +- return copy_user_generic(dst, (__force void *)src, size); ++ return copy_user_generic(dst, (__force_kernel const void *)____m(src), size); + } + } + + static __always_inline __must_check +-int __copy_from_user(void *dst, const void __user *src, unsigned size) ++unsigned long __copy_from_user(void *dst, const void __user *src, unsigned long size) + { + might_fault(); + return __copy_from_user_nocheck(dst, src, size); + } + + static __always_inline __must_check +-int __copy_to_user_nocheck(void __user *dst, const void *src, unsigned size) ++unsigned long __copy_to_user_nocheck(void __user *dst, const void *src, unsigned long size) + { +- int ret = 0; ++ size_t sz = __compiletime_object_size(src); ++ unsigned ret = 0; ++ ++ if (size > INT_MAX) ++ return size; ++ ++ check_object_size(src, size, true); ++ ++#ifdef CONFIG_PAX_MEMORY_UDEREF ++ if (!access_ok_noprefault(VERIFY_WRITE, dst, size)) ++ return size; ++#endif ++ ++ if (unlikely(sz != (size_t)-1 && sz < size)) { ++ if(__builtin_constant_p(size)) ++ copy_to_user_overflow(); ++ else ++ __copy_to_user_overflow(sz, size); ++ return size; ++ } + + if (!__builtin_constant_p(size)) +- return copy_user_generic((__force void *)dst, src, size); ++ return copy_user_generic((__force_kernel void *)____m(dst), src, size); + switch (size) { +- case 1:__put_user_asm(*(u8 *)src, (u8 __user *)dst, ++ case 1:__put_user_asm(*(const u8 *)src, (u8 __user *)dst, + ret, "b", "b", "iq", 1); + return ret; +- case 2:__put_user_asm(*(u16 *)src, (u16 __user *)dst, ++ case 2:__put_user_asm(*(const u16 *)src, (u16 __user *)dst, + ret, "w", "w", "ir", 2); + return ret; +- case 4:__put_user_asm(*(u32 *)src, (u32 __user *)dst, ++ case 4:__put_user_asm(*(const u32 *)src, (u32 __user *)dst, + ret, "l", "k", "ir", 4); + return ret; +- case 8:__put_user_asm(*(u64 *)src, (u64 __user *)dst, ++ case 8:__put_user_asm(*(const u64 *)src, (u64 __user *)dst, + ret, "q", "", "er", 8); + return ret; + case 10: +- __put_user_asm(*(u64 *)src, (u64 __user *)dst, ++ __put_user_asm(*(const u64 *)src, (u64 __user *)dst, + ret, "q", "", "er", 10); + if (unlikely(ret)) + return ret; + asm("":::"memory"); +- __put_user_asm(4[(u16 *)src], 4 + (u16 __user *)dst, ++ __put_user_asm(4[(const u16 *)src], 4 + (u16 __user *)dst, + ret, "w", "w", "ir", 2); + return ret; + case 16: +- __put_user_asm(*(u64 *)src, (u64 __user *)dst, ++ __put_user_asm(*(const u64 *)src, (u64 __user *)dst, + ret, "q", "", "er", 16); + if (unlikely(ret)) + return ret; + asm("":::"memory"); +- __put_user_asm(1[(u64 *)src], 1 + (u64 __user *)dst, ++ __put_user_asm(1[(const u64 *)src], 1 + (u64 __user *)dst, + ret, "q", "", "er", 8); + return ret; + default: +- return copy_user_generic((__force void *)dst, src, size); ++ return copy_user_generic((__force_kernel void *)____m(dst), src, size); + } + } + + static __always_inline __must_check +-int __copy_to_user(void __user *dst, const void *src, unsigned size) ++unsigned long __copy_to_user(void __user *dst, const void *src, unsigned long size) + { + might_fault(); + return __copy_to_user_nocheck(dst, src, size); + } + + static __always_inline __must_check +-int __copy_in_user(void __user *dst, const void __user *src, unsigned size) ++unsigned long __copy_in_user(void __user *dst, const void __user *src, unsigned size) + { +- int ret = 0; ++ unsigned ret = 0; + + might_fault(); ++ ++ if (size > INT_MAX) ++ return size; ++ ++#ifdef CONFIG_PAX_MEMORY_UDEREF ++ if (!access_ok_noprefault(VERIFY_READ, src, size)) ++ return size; ++ if (!access_ok_noprefault(VERIFY_WRITE, dst, size)) ++ return size; ++#endif ++ + if (!__builtin_constant_p(size)) +- return copy_user_generic((__force void *)dst, +- (__force void *)src, size); ++ return copy_user_generic((__force_kernel void *)____m(dst), ++ (__force_kernel const void *)____m(src), size); + switch (size) { + case 1: { + u8 tmp; +- __get_user_asm(tmp, (u8 __user *)src, ++ __get_user_asm(tmp, (const u8 __user *)src, + ret, "b", "b", "=q", 1); + if (likely(!ret)) + __put_user_asm(tmp, (u8 __user *)dst, +@@ -169,7 +221,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size) + } + case 2: { + u16 tmp; +- __get_user_asm(tmp, (u16 __user *)src, ++ __get_user_asm(tmp, (const u16 __user *)src, + ret, "w", "w", "=r", 2); + if (likely(!ret)) + __put_user_asm(tmp, (u16 __user *)dst, +@@ -179,7 +231,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size) + + case 4: { + u32 tmp; +- __get_user_asm(tmp, (u32 __user *)src, ++ __get_user_asm(tmp, (const u32 __user *)src, + ret, "l", "k", "=r", 4); + if (likely(!ret)) + __put_user_asm(tmp, (u32 __user *)dst, +@@ -188,7 +240,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size) + } + case 8: { + u64 tmp; +- __get_user_asm(tmp, (u64 __user *)src, ++ __get_user_asm(tmp, (const u64 __user *)src, + ret, "q", "", "=r", 8); + if (likely(!ret)) + __put_user_asm(tmp, (u64 __user *)dst, +@@ -196,41 +248,58 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size) + return ret; + } + default: +- return copy_user_generic((__force void *)dst, +- (__force void *)src, size); ++ return copy_user_generic((__force_kernel void *)____m(dst), ++ (__force_kernel const void *)____m(src), size); + } + } + +-static __must_check __always_inline int +-__copy_from_user_inatomic(void *dst, const void __user *src, unsigned size) ++static __must_check __always_inline unsigned long ++__copy_from_user_inatomic(void *dst, const void __user *src, unsigned long size) + { + return __copy_from_user_nocheck(dst, src, size); + } + +-static __must_check __always_inline int +-__copy_to_user_inatomic(void __user *dst, const void *src, unsigned size) ++static __must_check __always_inline unsigned long ++__copy_to_user_inatomic(void __user *dst, const void *src, unsigned long size) + { + return __copy_to_user_nocheck(dst, src, size); + } + +-extern long __copy_user_nocache(void *dst, const void __user *src, +- unsigned size, int zerorest); ++extern unsigned long __copy_user_nocache(void *dst, const void __user *src, ++ unsigned long size, int zerorest); + +-static inline int +-__copy_from_user_nocache(void *dst, const void __user *src, unsigned size) ++static inline unsigned long ++__copy_from_user_nocache(void *dst, const void __user *src, unsigned long size) + { + might_fault(); ++ ++ if (size > INT_MAX) ++ return size; ++ ++#ifdef CONFIG_PAX_MEMORY_UDEREF ++ if (!access_ok_noprefault(VERIFY_READ, src, size)) ++ return size; ++#endif ++ + return __copy_user_nocache(dst, src, size, 1); + } + +-static inline int ++static inline unsigned long + __copy_from_user_inatomic_nocache(void *dst, const void __user *src, +- unsigned size) ++ unsigned long size) + { ++ if (size > INT_MAX) ++ return size; ++ ++#ifdef CONFIG_PAX_MEMORY_UDEREF ++ if (!access_ok_noprefault(VERIFY_READ, src, size)) ++ return size; ++#endif ++ + return __copy_user_nocache(dst, src, size, 0); + } + + unsigned long +-copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest); ++copy_user_handle_tail(char __user *to, char __user *from, unsigned long len, unsigned zerorest) __size_overflow(3); + + #endif /* _ASM_X86_UACCESS_64_H */ +diff --git a/arch/x86/include/asm/word-at-a-time.h b/arch/x86/include/asm/word-at-a-time.h +index 5b238981..77fdd78 100644 +--- a/arch/x86/include/asm/word-at-a-time.h ++++ b/arch/x86/include/asm/word-at-a-time.h +@@ -11,7 +11,7 @@ + * and shift, for example. + */ + struct word_at_a_time { +- const unsigned long one_bits, high_bits; ++ unsigned long one_bits, high_bits; + }; + + #define WORD_AT_A_TIME_CONSTANTS { REPEAT_BYTE(0x01), REPEAT_BYTE(0x80) } +diff --git a/arch/x86/include/asm/x86_init.h b/arch/x86/include/asm/x86_init.h +index e45e4da..44e8572 100644 +--- a/arch/x86/include/asm/x86_init.h ++++ b/arch/x86/include/asm/x86_init.h +@@ -129,7 +129,7 @@ struct x86_init_ops { + struct x86_init_timers timers; + struct x86_init_iommu iommu; + struct x86_init_pci pci; +-}; ++} __no_const; + + /** + * struct x86_cpuinit_ops - platform specific cpu hotplug setups +@@ -140,7 +140,7 @@ struct x86_cpuinit_ops { + void (*setup_percpu_clockev)(void); + void (*early_percpu_clock_init)(void); + void (*fixup_cpu_id)(struct cpuinfo_x86 *c, int node); +-}; ++} __no_const; + + struct timespec; + +@@ -168,7 +168,7 @@ struct x86_platform_ops { + void (*save_sched_clock_state)(void); + void (*restore_sched_clock_state)(void); + void (*apic_post_init)(void); +-}; ++} __no_const; + + struct pci_dev; + struct msi_msg; +@@ -185,7 +185,7 @@ struct x86_msi_ops { + int (*setup_hpet_msi)(unsigned int irq, unsigned int id); + u32 (*msi_mask_irq)(struct msi_desc *desc, u32 mask, u32 flag); + u32 (*msix_mask_irq)(struct msi_desc *desc, u32 flag); +-}; ++} __no_const; + + struct IO_APIC_route_entry; + struct io_apic_irq_attr; +@@ -206,7 +206,7 @@ struct x86_io_apic_ops { + unsigned int destination, int vector, + struct io_apic_irq_attr *attr); + void (*eoi_ioapic_pin)(int apic, int pin, int vector); +-}; ++} __no_const; + + extern struct x86_init_ops x86_init; + extern struct x86_cpuinit_ops x86_cpuinit; +diff --git a/arch/x86/include/asm/xen/page.h b/arch/x86/include/asm/xen/page.h +index 3e276eb..2eb3c30 100644 +--- a/arch/x86/include/asm/xen/page.h ++++ b/arch/x86/include/asm/xen/page.h +@@ -56,7 +56,7 @@ extern int m2p_remove_override(struct page *page, + extern struct page *m2p_find_override(unsigned long mfn); + extern unsigned long m2p_find_override_pfn(unsigned long mfn, unsigned long pfn); + +-static inline unsigned long pfn_to_mfn(unsigned long pfn) ++static inline unsigned long __intentional_overflow(-1) pfn_to_mfn(unsigned long pfn) + { + unsigned long mfn; + +diff --git a/arch/x86/include/asm/xsave.h b/arch/x86/include/asm/xsave.h +index 6c1d741..39e6ecf 100644 +--- a/arch/x86/include/asm/xsave.h ++++ b/arch/x86/include/asm/xsave.h +@@ -80,8 +80,11 @@ static inline int xsave_user(struct xsave_struct __user *buf) + if (unlikely(err)) + return -EFAULT; + ++ pax_open_userland(); + __asm__ __volatile__(ASM_STAC "\n" +- "1: .byte " REX_PREFIX "0x0f,0xae,0x27\n" ++ "1:" ++ __copyuser_seg ++ ".byte " REX_PREFIX "0x0f,0xae,0x27\n" + "2: " ASM_CLAC "\n" + ".section .fixup,\"ax\"\n" + "3: movl $-1,%[err]\n" +@@ -91,18 +94,22 @@ static inline int xsave_user(struct xsave_struct __user *buf) + : [err] "=r" (err) + : "D" (buf), "a" (-1), "d" (-1), "0" (0) + : "memory"); ++ pax_close_userland(); + return err; + } + + static inline int xrestore_user(struct xsave_struct __user *buf, u64 mask) + { + int err; +- struct xsave_struct *xstate = ((__force struct xsave_struct *)buf); ++ struct xsave_struct *xstate = ((__force_kernel struct xsave_struct *)buf); + u32 lmask = mask; + u32 hmask = mask >> 32; + ++ pax_open_userland(); + __asm__ __volatile__(ASM_STAC "\n" +- "1: .byte " REX_PREFIX "0x0f,0xae,0x2f\n" ++ "1:" ++ __copyuser_seg ++ ".byte " REX_PREFIX "0x0f,0xae,0x2f\n" + "2: " ASM_CLAC "\n" + ".section .fixup,\"ax\"\n" + "3: movl $-1,%[err]\n" +@@ -112,6 +119,7 @@ static inline int xrestore_user(struct xsave_struct __user *buf, u64 mask) + : [err] "=r" (err) + : "D" (xstate), "a" (lmask), "d" (hmask), "0" (0) + : "memory"); /* memory required? */ ++ pax_close_userland(); + return err; + } + +diff --git a/arch/x86/include/uapi/asm/e820.h b/arch/x86/include/uapi/asm/e820.h +index bbae024..e1528f9 100644 +--- a/arch/x86/include/uapi/asm/e820.h ++++ b/arch/x86/include/uapi/asm/e820.h +@@ -63,7 +63,7 @@ struct e820map { + #define ISA_START_ADDRESS 0xa0000 + #define ISA_END_ADDRESS 0x100000 + +-#define BIOS_BEGIN 0x000a0000 ++#define BIOS_BEGIN 0x000c0000 + #define BIOS_END 0x00100000 + + #define BIOS_ROM_BASE 0xffe00000 +diff --git a/arch/x86/include/uapi/asm/ptrace-abi.h b/arch/x86/include/uapi/asm/ptrace-abi.h +index 7b0a55a..ad115bf 100644 +--- a/arch/x86/include/uapi/asm/ptrace-abi.h ++++ b/arch/x86/include/uapi/asm/ptrace-abi.h +@@ -49,7 +49,6 @@ + #define EFLAGS 144 + #define RSP 152 + #define SS 160 +-#define ARGOFFSET R11 + #endif /* __ASSEMBLY__ */ + + /* top of stack page */ +diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile +index 56bac86..9d8df82 100644 +--- a/arch/x86/kernel/Makefile ++++ b/arch/x86/kernel/Makefile +@@ -24,7 +24,7 @@ obj-y += time.o ioport.o ldt.o dumpstack.o nmi.o + obj-y += setup.o x86_init.o i8259.o irqinit.o jump_label.o + obj-$(CONFIG_IRQ_WORK) += irq_work.o + obj-y += probe_roms.o +-obj-$(CONFIG_X86_32) += i386_ksyms_32.o ++obj-$(CONFIG_X86_32) += sys_i386_32.o i386_ksyms_32.o + obj-$(CONFIG_X86_64) += sys_x86_64.o x8664_ksyms_64.o + obj-y += syscall_$(BITS).o + obj-$(CONFIG_X86_64) += vsyscall_64.o +diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c +index 1dac942..19c8b0c 100644 +--- a/arch/x86/kernel/acpi/boot.c ++++ b/arch/x86/kernel/acpi/boot.c +@@ -1312,7 +1312,7 @@ static int __init dmi_ignore_irq0_timer_override(const struct dmi_system_id *d) + * If your system is blacklisted here, but you find that acpi=force + * works for you, please contact linux-acpi@vger.kernel.org + */ +-static struct dmi_system_id __initdata acpi_dmi_table[] = { ++static const struct dmi_system_id __initconst acpi_dmi_table[] = { + /* + * Boxes that need ACPI disabled + */ +@@ -1387,7 +1387,7 @@ static struct dmi_system_id __initdata acpi_dmi_table[] = { + }; + + /* second table for DMI checks that should run after early-quirks */ +-static struct dmi_system_id __initdata acpi_dmi_table_late[] = { ++static const struct dmi_system_id __initconst acpi_dmi_table_late[] = { + /* + * HP laptops which use a DSDT reporting as HP/SB400/10000, + * which includes some code which overrides all temperature +diff --git a/arch/x86/kernel/acpi/sleep.c b/arch/x86/kernel/acpi/sleep.c +index 3a2ae4c..9db31d6 100644 +--- a/arch/x86/kernel/acpi/sleep.c ++++ b/arch/x86/kernel/acpi/sleep.c +@@ -99,8 +99,12 @@ int x86_acpi_suspend_lowlevel(void) + #else /* CONFIG_64BIT */ + #ifdef CONFIG_SMP + stack_start = (unsigned long)temp_stack + sizeof(temp_stack); ++ ++ pax_open_kernel(); + early_gdt_descr.address = + (unsigned long)get_cpu_gdt_table(smp_processor_id()); ++ pax_close_kernel(); ++ + initial_gs = per_cpu_offset(smp_processor_id()); + #endif + initial_code = (unsigned long)wakeup_long64; +diff --git a/arch/x86/kernel/acpi/wakeup_32.S b/arch/x86/kernel/acpi/wakeup_32.S +index 665c6b7..eae4d56 100644 +--- a/arch/x86/kernel/acpi/wakeup_32.S ++++ b/arch/x86/kernel/acpi/wakeup_32.S +@@ -29,13 +29,11 @@ wakeup_pmode_return: + # and restore the stack ... but you need gdt for this to work + movl saved_context_esp, %esp + +- movl %cs:saved_magic, %eax +- cmpl $0x12345678, %eax ++ cmpl $0x12345678, saved_magic + jne bogus_magic + + # jump to place where we left off +- movl saved_eip, %eax +- jmp *%eax ++ jmp *(saved_eip) + + bogus_magic: + jmp bogus_magic +diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c +index df94598..f3b29bf 100644 +--- a/arch/x86/kernel/alternative.c ++++ b/arch/x86/kernel/alternative.c +@@ -269,6 +269,13 @@ void __init_or_module apply_alternatives(struct alt_instr *start, + */ + for (a = start; a < end; a++) { + instr = (u8 *)&a->instr_offset + a->instr_offset; ++ ++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC) ++ instr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR; ++ if (instr < (u8 *)_text || (u8 *)_einittext <= instr) ++ instr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR; ++#endif ++ + replacement = (u8 *)&a->repl_offset + a->repl_offset; + BUG_ON(a->replacementlen > a->instrlen); + BUG_ON(a->instrlen > sizeof(insnbuf)); +@@ -300,10 +307,16 @@ static void alternatives_smp_lock(const s32 *start, const s32 *end, + for (poff = start; poff < end; poff++) { + u8 *ptr = (u8 *)poff + *poff; + ++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC) ++ ptr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR; ++ if (ptr < (u8 *)_text || (u8 *)_einittext <= ptr) ++ ptr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR; ++#endif ++ + if (!*poff || ptr < text || ptr >= text_end) + continue; + /* turn DS segment override prefix into lock prefix */ +- if (*ptr == 0x3e) ++ if (*ktla_ktva(ptr) == 0x3e) + text_poke(ptr, ((unsigned char []){0xf0}), 1); + } + mutex_unlock(&text_mutex); +@@ -318,10 +331,16 @@ static void alternatives_smp_unlock(const s32 *start, const s32 *end, + for (poff = start; poff < end; poff++) { + u8 *ptr = (u8 *)poff + *poff; + ++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC) ++ ptr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR; ++ if (ptr < (u8 *)_text || (u8 *)_einittext <= ptr) ++ ptr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR; ++#endif ++ + if (!*poff || ptr < text || ptr >= text_end) + continue; + /* turn lock prefix into DS segment override prefix */ +- if (*ptr == 0xf0) ++ if (*ktla_ktva(ptr) == 0xf0) + text_poke(ptr, ((unsigned char []){0x3E}), 1); + } + mutex_unlock(&text_mutex); +@@ -458,7 +477,7 @@ void __init_or_module apply_paravirt(struct paravirt_patch_site *start, + + BUG_ON(p->len > MAX_PATCH_LEN); + /* prep the buffer with the original instructions */ +- memcpy(insnbuf, p->instr, p->len); ++ memcpy(insnbuf, ktla_ktva(p->instr), p->len); + used = pv_init_ops.patch(p->instrtype, p->clobbers, insnbuf, + (unsigned long)p->instr, p->len); + +@@ -505,7 +524,7 @@ void __init alternative_instructions(void) + if (!uniproc_patched || num_possible_cpus() == 1) + free_init_pages("SMP alternatives", + (unsigned long)__smp_locks, +- (unsigned long)__smp_locks_end); ++ PAGE_ALIGN((unsigned long)__smp_locks_end)); + #endif + + apply_paravirt(__parainstructions, __parainstructions_end); +@@ -525,13 +544,17 @@ void __init alternative_instructions(void) + * instructions. And on the local CPU you need to be protected again NMI or MCE + * handlers seeing an inconsistent instruction while you patch. + */ +-void *__init_or_module text_poke_early(void *addr, const void *opcode, ++void *__kprobes text_poke_early(void *addr, const void *opcode, + size_t len) + { + unsigned long flags; + local_irq_save(flags); +- memcpy(addr, opcode, len); ++ ++ pax_open_kernel(); ++ memcpy(ktla_ktva(addr), opcode, len); + sync_core(); ++ pax_close_kernel(); ++ + local_irq_restore(flags); + /* Could also do a CLFLUSH here to speed up CPU recovery; but + that causes hangs on some VIA CPUs. */ +@@ -553,36 +576,22 @@ void *__init_or_module text_poke_early(void *addr, const void *opcode, + */ + void *__kprobes text_poke(void *addr, const void *opcode, size_t len) + { +- unsigned long flags; +- char *vaddr; ++ unsigned char *vaddr = ktla_ktva(addr); + struct page *pages[2]; +- int i; ++ size_t i; + + if (!core_kernel_text((unsigned long)addr)) { +- pages[0] = vmalloc_to_page(addr); +- pages[1] = vmalloc_to_page(addr + PAGE_SIZE); ++ pages[0] = vmalloc_to_page(vaddr); ++ pages[1] = vmalloc_to_page(vaddr + PAGE_SIZE); + } else { +- pages[0] = virt_to_page(addr); ++ pages[0] = virt_to_page(vaddr); + WARN_ON(!PageReserved(pages[0])); +- pages[1] = virt_to_page(addr + PAGE_SIZE); ++ pages[1] = virt_to_page(vaddr + PAGE_SIZE); + } + BUG_ON(!pages[0]); +- local_irq_save(flags); +- set_fixmap(FIX_TEXT_POKE0, page_to_phys(pages[0])); +- if (pages[1]) +- set_fixmap(FIX_TEXT_POKE1, page_to_phys(pages[1])); +- vaddr = (char *)fix_to_virt(FIX_TEXT_POKE0); +- memcpy(&vaddr[(unsigned long)addr & ~PAGE_MASK], opcode, len); +- clear_fixmap(FIX_TEXT_POKE0); +- if (pages[1]) +- clear_fixmap(FIX_TEXT_POKE1); +- local_flush_tlb(); +- sync_core(); +- /* Could also do a CLFLUSH here to speed up CPU recovery; but +- that causes hangs on some VIA CPUs. */ ++ text_poke_early(addr, opcode, len); + for (i = 0; i < len; i++) +- BUG_ON(((char *)addr)[i] != ((char *)opcode)[i]); +- local_irq_restore(flags); ++ BUG_ON((vaddr)[i] != ((const unsigned char *)opcode)[i]); + return addr; + } + +@@ -602,7 +611,7 @@ int poke_int3_handler(struct pt_regs *regs) + if (likely(!bp_patching_in_progress)) + return 0; + +- if (user_mode_vm(regs) || regs->ip != (unsigned long)bp_int3_addr) ++ if (user_mode(regs) || regs->ip != (unsigned long)bp_int3_addr) + return 0; + + /* set up the specified breakpoint handler */ +@@ -636,7 +645,7 @@ int poke_int3_handler(struct pt_regs *regs) + */ + void *text_poke_bp(void *addr, const void *opcode, size_t len, void *handler) + { +- unsigned char int3 = 0xcc; ++ const unsigned char int3 = 0xcc; + + bp_int3_handler = handler; + bp_int3_addr = (u8 *)addr + sizeof(int3); +diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c +index 7f26c9a..694544e 100644 +--- a/arch/x86/kernel/apic/apic.c ++++ b/arch/x86/kernel/apic/apic.c +@@ -198,7 +198,7 @@ int first_system_vector = 0xfe; + /* + * Debug level, exported for io_apic.c + */ +-unsigned int apic_verbosity; ++int apic_verbosity; + + int pic_mode; + +@@ -1992,7 +1992,7 @@ static inline void __smp_error_interrupt(struct pt_regs *regs) + apic_write(APIC_ESR, 0); + v = apic_read(APIC_ESR); + ack_APIC_irq(); +- atomic_inc(&irq_err_count); ++ atomic_inc_unchecked(&irq_err_count); + + apic_printk(APIC_DEBUG, KERN_DEBUG "APIC error on CPU%d: %02x", + smp_processor_id(), v); +diff --git a/arch/x86/kernel/apic/apic_flat_64.c b/arch/x86/kernel/apic/apic_flat_64.c +index 2c621a6..fa2b1ae 100644 +--- a/arch/x86/kernel/apic/apic_flat_64.c ++++ b/arch/x86/kernel/apic/apic_flat_64.c +@@ -154,7 +154,7 @@ static int flat_probe(void) + return 1; + } + +-static struct apic apic_flat = { ++static struct apic apic_flat __read_only = { + .name = "flat", + .probe = flat_probe, + .acpi_madt_oem_check = flat_acpi_madt_oem_check, +@@ -268,7 +268,7 @@ static int physflat_probe(void) + return 0; + } + +-static struct apic apic_physflat = { ++static struct apic apic_physflat __read_only = { + + .name = "physical flat", + .probe = physflat_probe, +diff --git a/arch/x86/kernel/apic/apic_noop.c b/arch/x86/kernel/apic/apic_noop.c +index 191ce75..2db6d63 100644 +--- a/arch/x86/kernel/apic/apic_noop.c ++++ b/arch/x86/kernel/apic/apic_noop.c +@@ -118,7 +118,7 @@ static void noop_apic_write(u32 reg, u32 v) + WARN_ON_ONCE(cpu_has_apic && !disable_apic); + } + +-struct apic apic_noop = { ++struct apic apic_noop __read_only = { + .name = "noop", + .probe = noop_probe, + .acpi_madt_oem_check = NULL, +diff --git a/arch/x86/kernel/apic/bigsmp_32.c b/arch/x86/kernel/apic/bigsmp_32.c +index d50e364..543bee3 100644 +--- a/arch/x86/kernel/apic/bigsmp_32.c ++++ b/arch/x86/kernel/apic/bigsmp_32.c +@@ -152,7 +152,7 @@ static int probe_bigsmp(void) + return dmi_bigsmp; + } + +-static struct apic apic_bigsmp = { ++static struct apic apic_bigsmp __read_only = { + + .name = "bigsmp", + .probe = probe_bigsmp, +diff --git a/arch/x86/kernel/apic/es7000_32.c b/arch/x86/kernel/apic/es7000_32.c +index c552247..587a316 100644 +--- a/arch/x86/kernel/apic/es7000_32.c ++++ b/arch/x86/kernel/apic/es7000_32.c +@@ -608,8 +608,7 @@ static int es7000_mps_oem_check_cluster(struct mpc_table *mpc, char *oem, + return ret && es7000_apic_is_cluster(); + } + +-/* We've been warned by a false positive warning.Use __refdata to keep calm. */ +-static struct apic __refdata apic_es7000_cluster = { ++static struct apic apic_es7000_cluster __read_only = { + + .name = "es7000", + .probe = probe_es7000, +@@ -675,7 +674,7 @@ static struct apic __refdata apic_es7000_cluster = { + .x86_32_early_logical_apicid = es7000_early_logical_apicid, + }; + +-static struct apic __refdata apic_es7000 = { ++static struct apic apic_es7000 __read_only = { + + .name = "es7000", + .probe = probe_es7000, +diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c +index 6ad4658..38a7b5c 100644 +--- a/arch/x86/kernel/apic/io_apic.c ++++ b/arch/x86/kernel/apic/io_apic.c +@@ -1057,7 +1057,7 @@ int IO_APIC_get_PCI_irq_vector(int bus, int slot, int pin, + } + EXPORT_SYMBOL(IO_APIC_get_PCI_irq_vector); + +-void lock_vector_lock(void) ++void lock_vector_lock(void) __acquires(vector_lock) + { + /* Used to the online set of cpus does not change + * during assign_irq_vector. +@@ -1065,7 +1065,7 @@ void lock_vector_lock(void) + raw_spin_lock(&vector_lock); + } + +-void unlock_vector_lock(void) ++void unlock_vector_lock(void) __releases(vector_lock) + { + raw_spin_unlock(&vector_lock); + } +@@ -2364,7 +2364,7 @@ static void ack_apic_edge(struct irq_data *data) + ack_APIC_irq(); + } + +-atomic_t irq_mis_count; ++atomic_unchecked_t irq_mis_count; + + #ifdef CONFIG_GENERIC_PENDING_IRQ + static bool io_apic_level_ack_pending(struct irq_cfg *cfg) +@@ -2505,7 +2505,7 @@ static void ack_apic_level(struct irq_data *data) + * at the cpu. + */ + if (!(v & (1 << (i & 0x1f)))) { +- atomic_inc(&irq_mis_count); ++ atomic_inc_unchecked(&irq_mis_count); + + eoi_ioapic_irq(irq, cfg); + } +diff --git a/arch/x86/kernel/apic/numaq_32.c b/arch/x86/kernel/apic/numaq_32.c +index 1e42e8f..daacf44 100644 +--- a/arch/x86/kernel/apic/numaq_32.c ++++ b/arch/x86/kernel/apic/numaq_32.c +@@ -455,8 +455,7 @@ static void numaq_setup_portio_remap(void) + (u_long) xquad_portio, (u_long) num_quads*XQUAD_PORTIO_QUAD); + } + +-/* Use __refdata to keep false positive warning calm. */ +-static struct apic __refdata apic_numaq = { ++static struct apic apic_numaq __read_only = { + + .name = "NUMAQ", + .probe = probe_numaq, +diff --git a/arch/x86/kernel/apic/probe_32.c b/arch/x86/kernel/apic/probe_32.c +index eb35ef9..f184a21 100644 +--- a/arch/x86/kernel/apic/probe_32.c ++++ b/arch/x86/kernel/apic/probe_32.c +@@ -72,7 +72,7 @@ static int probe_default(void) + return 1; + } + +-static struct apic apic_default = { ++static struct apic apic_default __read_only = { + + .name = "default", + .probe = probe_default, +diff --git a/arch/x86/kernel/apic/summit_32.c b/arch/x86/kernel/apic/summit_32.c +index 00146f9..5e299b8 100644 +--- a/arch/x86/kernel/apic/summit_32.c ++++ b/arch/x86/kernel/apic/summit_32.c +@@ -485,7 +485,7 @@ void setup_summit(void) + } + #endif + +-static struct apic apic_summit = { ++static struct apic apic_summit __read_only = { + + .name = "summit", + .probe = probe_summit, +diff --git a/arch/x86/kernel/apic/x2apic_cluster.c b/arch/x86/kernel/apic/x2apic_cluster.c +index cac85ee..01fa741 100644 +--- a/arch/x86/kernel/apic/x2apic_cluster.c ++++ b/arch/x86/kernel/apic/x2apic_cluster.c +@@ -182,7 +182,7 @@ update_clusterinfo(struct notifier_block *nfb, unsigned long action, void *hcpu) + return notifier_from_errno(err); + } + +-static struct notifier_block __refdata x2apic_cpu_notifier = { ++static struct notifier_block x2apic_cpu_notifier = { + .notifier_call = update_clusterinfo, + }; + +@@ -234,7 +234,7 @@ static void cluster_vector_allocation_domain(int cpu, struct cpumask *retmask, + cpumask_and(retmask, mask, per_cpu(cpus_in_cluster, cpu)); + } + +-static struct apic apic_x2apic_cluster = { ++static struct apic apic_x2apic_cluster __read_only = { + + .name = "cluster x2apic", + .probe = x2apic_cluster_probe, +diff --git a/arch/x86/kernel/apic/x2apic_phys.c b/arch/x86/kernel/apic/x2apic_phys.c +index de231e3..1d1b2ee 100644 +--- a/arch/x86/kernel/apic/x2apic_phys.c ++++ b/arch/x86/kernel/apic/x2apic_phys.c +@@ -88,7 +88,7 @@ static int x2apic_phys_probe(void) + return apic == &apic_x2apic_phys; + } + +-static struct apic apic_x2apic_phys = { ++static struct apic apic_x2apic_phys __read_only = { + + .name = "physical x2apic", + .probe = x2apic_phys_probe, +diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c +index d263b13..963258b 100644 +--- a/arch/x86/kernel/apic/x2apic_uv_x.c ++++ b/arch/x86/kernel/apic/x2apic_uv_x.c +@@ -350,7 +350,7 @@ static int uv_probe(void) + return apic == &apic_x2apic_uv_x; + } + +-static struct apic __refdata apic_x2apic_uv_x = { ++static struct apic apic_x2apic_uv_x __read_only = { + + .name = "UV large system", + .probe = uv_probe, +diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c +index 3ab0343..814c4787 100644 +--- a/arch/x86/kernel/apm_32.c ++++ b/arch/x86/kernel/apm_32.c +@@ -433,7 +433,7 @@ static DEFINE_MUTEX(apm_mutex); + * This is for buggy BIOS's that refer to (real mode) segment 0x40 + * even though they are called in protected mode. + */ +-static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092, ++static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093, + (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1); + + static const char driver_version[] = "1.16ac"; /* no spaces */ +@@ -611,7 +611,10 @@ static long __apm_bios_call(void *_call) + BUG_ON(cpu != 0); + gdt = get_cpu_gdt_table(cpu); + save_desc_40 = gdt[0x40 / 8]; ++ ++ pax_open_kernel(); + gdt[0x40 / 8] = bad_bios_desc; ++ pax_close_kernel(); + + apm_irq_save(flags); + APM_DO_SAVE_SEGS; +@@ -620,7 +623,11 @@ static long __apm_bios_call(void *_call) + &call->esi); + APM_DO_RESTORE_SEGS; + apm_irq_restore(flags); ++ ++ pax_open_kernel(); + gdt[0x40 / 8] = save_desc_40; ++ pax_close_kernel(); ++ + put_cpu(); + + return call->eax & 0xff; +@@ -687,7 +694,10 @@ static long __apm_bios_call_simple(void *_call) + BUG_ON(cpu != 0); + gdt = get_cpu_gdt_table(cpu); + save_desc_40 = gdt[0x40 / 8]; ++ ++ pax_open_kernel(); + gdt[0x40 / 8] = bad_bios_desc; ++ pax_close_kernel(); + + apm_irq_save(flags); + APM_DO_SAVE_SEGS; +@@ -695,7 +705,11 @@ static long __apm_bios_call_simple(void *_call) + &call->eax); + APM_DO_RESTORE_SEGS; + apm_irq_restore(flags); ++ ++ pax_open_kernel(); + gdt[0x40 / 8] = save_desc_40; ++ pax_close_kernel(); ++ + put_cpu(); + return error; + } +@@ -2362,12 +2376,15 @@ static int __init apm_init(void) + * code to that CPU. + */ + gdt = get_cpu_gdt_table(0); ++ ++ pax_open_kernel(); + set_desc_base(&gdt[APM_CS >> 3], + (unsigned long)__va((unsigned long)apm_info.bios.cseg << 4)); + set_desc_base(&gdt[APM_CS_16 >> 3], + (unsigned long)__va((unsigned long)apm_info.bios.cseg_16 << 4)); + set_desc_base(&gdt[APM_DS >> 3], + (unsigned long)__va((unsigned long)apm_info.bios.dseg << 4)); ++ pax_close_kernel(); + + proc_create("apm", 0, NULL, &apm_file_ops); + +diff --git a/arch/x86/kernel/asm-offsets.c b/arch/x86/kernel/asm-offsets.c +index 9f6b934..cf5ffb3 100644 +--- a/arch/x86/kernel/asm-offsets.c ++++ b/arch/x86/kernel/asm-offsets.c +@@ -32,6 +32,8 @@ void common(void) { + OFFSET(TI_flags, thread_info, flags); + OFFSET(TI_status, thread_info, status); + OFFSET(TI_addr_limit, thread_info, addr_limit); ++ OFFSET(TI_lowest_stack, thread_info, lowest_stack); ++ DEFINE(TI_task_thread_sp0, offsetof(struct task_struct, thread.sp0) - offsetof(struct task_struct, tinfo)); + + BLANK(); + OFFSET(crypto_tfm_ctx_offset, crypto_tfm, __crt_ctx); +@@ -52,8 +54,26 @@ void common(void) { + OFFSET(PV_CPU_irq_enable_sysexit, pv_cpu_ops, irq_enable_sysexit); + OFFSET(PV_CPU_read_cr0, pv_cpu_ops, read_cr0); + OFFSET(PV_MMU_read_cr2, pv_mmu_ops, read_cr2); ++ ++#ifdef CONFIG_PAX_KERNEXEC ++ OFFSET(PV_CPU_write_cr0, pv_cpu_ops, write_cr0); + #endif + ++#ifdef CONFIG_PAX_MEMORY_UDEREF ++ OFFSET(PV_MMU_read_cr3, pv_mmu_ops, read_cr3); ++ OFFSET(PV_MMU_write_cr3, pv_mmu_ops, write_cr3); ++#ifdef CONFIG_X86_64 ++ OFFSET(PV_MMU_set_pgd_batched, pv_mmu_ops, set_pgd_batched); ++#endif ++#endif ++ ++#endif ++ ++ BLANK(); ++ DEFINE(PAGE_SIZE_asm, PAGE_SIZE); ++ DEFINE(PAGE_SHIFT_asm, PAGE_SHIFT); ++ DEFINE(THREAD_SIZE_asm, THREAD_SIZE); ++ + #ifdef CONFIG_XEN + BLANK(); + OFFSET(XEN_vcpu_info_mask, vcpu_info, evtchn_upcall_mask); +diff --git a/arch/x86/kernel/asm-offsets_64.c b/arch/x86/kernel/asm-offsets_64.c +index e7c798b..2b2019b 100644 +--- a/arch/x86/kernel/asm-offsets_64.c ++++ b/arch/x86/kernel/asm-offsets_64.c +@@ -77,6 +77,7 @@ int main(void) + BLANK(); + #undef ENTRY + ++ DEFINE(TSS_size, sizeof(struct tss_struct)); + OFFSET(TSS_ist, tss_struct, x86_tss.ist); + BLANK(); + +diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile +index 7fd54f0..0691410 100644 +--- a/arch/x86/kernel/cpu/Makefile ++++ b/arch/x86/kernel/cpu/Makefile +@@ -8,10 +8,6 @@ CFLAGS_REMOVE_common.o = -pg + CFLAGS_REMOVE_perf_event.o = -pg + endif + +-# Make sure load_percpu_segment has no stackprotector +-nostackp := $(call cc-option, -fno-stack-protector) +-CFLAGS_common.o := $(nostackp) +- + obj-y := intel_cacheinfo.o scattered.o topology.o + obj-y += proc.o capflags.o powerflags.o common.o + obj-y += rdrand.o +diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c +index c67ffa6..f41fbbf 100644 +--- a/arch/x86/kernel/cpu/amd.c ++++ b/arch/x86/kernel/cpu/amd.c +@@ -752,7 +752,7 @@ static void init_amd(struct cpuinfo_x86 *c) + static unsigned int amd_size_cache(struct cpuinfo_x86 *c, unsigned int size) + { + /* AMD errata T13 (order #21922) */ +- if ((c->x86 == 6)) { ++ if (c->x86 == 6) { + /* Duron Rev A0 */ + if (c->x86_model == 3 && c->x86_mask == 0) + size = 64; +diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c +index 8e28bf2..bf5c0d2 100644 +--- a/arch/x86/kernel/cpu/common.c ++++ b/arch/x86/kernel/cpu/common.c +@@ -88,60 +88,6 @@ static const struct cpu_dev default_cpu = { + + static const struct cpu_dev *this_cpu = &default_cpu; + +-DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = { +-#ifdef CONFIG_X86_64 +- /* +- * We need valid kernel segments for data and code in long mode too +- * IRET will check the segment types kkeil 2000/10/28 +- * Also sysret mandates a special GDT layout +- * +- * TLS descriptors are currently at a different place compared to i386. +- * Hopefully nobody expects them at a fixed place (Wine?) +- */ +- [GDT_ENTRY_KERNEL32_CS] = GDT_ENTRY_INIT(0xc09b, 0, 0xfffff), +- [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xa09b, 0, 0xfffff), +- [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc093, 0, 0xfffff), +- [GDT_ENTRY_DEFAULT_USER32_CS] = GDT_ENTRY_INIT(0xc0fb, 0, 0xfffff), +- [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f3, 0, 0xfffff), +- [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xa0fb, 0, 0xfffff), +-#else +- [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xc09a, 0, 0xfffff), +- [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff), +- [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xc0fa, 0, 0xfffff), +- [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f2, 0, 0xfffff), +- /* +- * Segments used for calling PnP BIOS have byte granularity. +- * They code segments and data segments have fixed 64k limits, +- * the transfer segment sizes are set at run time. +- */ +- /* 32-bit code */ +- [GDT_ENTRY_PNPBIOS_CS32] = GDT_ENTRY_INIT(0x409a, 0, 0xffff), +- /* 16-bit code */ +- [GDT_ENTRY_PNPBIOS_CS16] = GDT_ENTRY_INIT(0x009a, 0, 0xffff), +- /* 16-bit data */ +- [GDT_ENTRY_PNPBIOS_DS] = GDT_ENTRY_INIT(0x0092, 0, 0xffff), +- /* 16-bit data */ +- [GDT_ENTRY_PNPBIOS_TS1] = GDT_ENTRY_INIT(0x0092, 0, 0), +- /* 16-bit data */ +- [GDT_ENTRY_PNPBIOS_TS2] = GDT_ENTRY_INIT(0x0092, 0, 0), +- /* +- * The APM segments have byte granularity and their bases +- * are set at run time. All have 64k limits. +- */ +- /* 32-bit code */ +- [GDT_ENTRY_APMBIOS_BASE] = GDT_ENTRY_INIT(0x409a, 0, 0xffff), +- /* 16-bit code */ +- [GDT_ENTRY_APMBIOS_BASE+1] = GDT_ENTRY_INIT(0x009a, 0, 0xffff), +- /* data */ +- [GDT_ENTRY_APMBIOS_BASE+2] = GDT_ENTRY_INIT(0x4092, 0, 0xffff), +- +- [GDT_ENTRY_ESPFIX_SS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff), +- [GDT_ENTRY_PERCPU] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff), +- GDT_STACK_CANARY_INIT +-#endif +-} }; +-EXPORT_PER_CPU_SYMBOL_GPL(gdt_page); +- + static int __init x86_xsave_setup(char *s) + { + setup_clear_cpu_cap(X86_FEATURE_XSAVE); +@@ -293,6 +239,59 @@ static __always_inline void setup_smap(struct cpuinfo_x86 *c) + } + } + ++#ifdef CONFIG_X86_64 ++static __init int setup_disable_pcid(char *arg) ++{ ++ setup_clear_cpu_cap(X86_FEATURE_PCID); ++ setup_clear_cpu_cap(X86_FEATURE_INVPCID); ++ ++#ifdef CONFIG_PAX_MEMORY_UDEREF ++ if (clone_pgd_mask != ~(pgdval_t)0UL) ++ pax_user_shadow_base = 1UL << TASK_SIZE_MAX_SHIFT; ++#endif ++ ++ return 1; ++} ++__setup("nopcid", setup_disable_pcid); ++ ++static void setup_pcid(struct cpuinfo_x86 *c) ++{ ++ if (!cpu_has(c, X86_FEATURE_PCID)) { ++ clear_cpu_cap(c, X86_FEATURE_INVPCID); ++ ++#ifdef CONFIG_PAX_MEMORY_UDEREF ++ if (clone_pgd_mask != ~(pgdval_t)0UL) { ++ pax_open_kernel(); ++ pax_user_shadow_base = 1UL << TASK_SIZE_MAX_SHIFT; ++ pax_close_kernel(); ++ printk("PAX: slow and weak UDEREF enabled\n"); ++ } else ++ printk("PAX: UDEREF disabled\n"); ++#endif ++ ++ return; ++ } ++ ++ printk("PAX: PCID detected\n"); ++ set_in_cr4(X86_CR4_PCIDE); ++ ++#ifdef CONFIG_PAX_MEMORY_UDEREF ++ pax_open_kernel(); ++ clone_pgd_mask = ~(pgdval_t)0UL; ++ pax_close_kernel(); ++ if (pax_user_shadow_base) ++ printk("PAX: weak UDEREF enabled\n"); ++ else { ++ set_cpu_cap(c, X86_FEATURE_STRONGUDEREF); ++ printk("PAX: strong UDEREF enabled\n"); ++ } ++#endif ++ ++ if (cpu_has(c, X86_FEATURE_INVPCID)) ++ printk("PAX: INVPCID detected\n"); ++} ++#endif ++ + /* + * Some CPU features depend on higher CPUID levels, which may not always + * be available due to CPUID level capping or broken virtualization +@@ -393,7 +392,7 @@ void switch_to_new_gdt(int cpu) + { + struct desc_ptr gdt_descr; + +- gdt_descr.address = (long)get_cpu_gdt_table(cpu); ++ gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu); + gdt_descr.size = GDT_SIZE - 1; + load_gdt(&gdt_descr); + /* Reload the per-cpu base */ +@@ -883,6 +882,10 @@ static void identify_cpu(struct cpuinfo_x86 *c) + setup_smep(c); + setup_smap(c); + ++#ifdef CONFIG_X86_64 ++ setup_pcid(c); ++#endif ++ + /* + * The vendor-specific functions might have changed features. + * Now we do "generic changes." +@@ -891,6 +894,10 @@ static void identify_cpu(struct cpuinfo_x86 *c) + /* Filter out anything that depends on CPUID levels we don't have */ + filter_cpuid_features(c, true); + ++#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)) ++ setup_clear_cpu_cap(X86_FEATURE_SEP); ++#endif ++ + /* If the model name is still unset, do table lookup. */ + if (!c->x86_model_id[0]) { + const char *p; +@@ -1078,10 +1085,12 @@ static __init int setup_disablecpuid(char *arg) + } + __setup("clearcpuid=", setup_disablecpuid); + ++DEFINE_PER_CPU(struct thread_info *, current_tinfo) = &init_task.tinfo; ++EXPORT_PER_CPU_SYMBOL(current_tinfo); ++ + #ifdef CONFIG_X86_64 +-struct desc_ptr idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) idt_table }; +-struct desc_ptr debug_idt_descr = { NR_VECTORS * 16 - 1, +- (unsigned long) debug_idt_table }; ++struct desc_ptr idt_descr __read_only = { NR_VECTORS * 16 - 1, (unsigned long) idt_table }; ++const struct desc_ptr debug_idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) debug_idt_table }; + + DEFINE_PER_CPU_FIRST(union irq_stack_union, + irq_stack_union) __aligned(PAGE_SIZE) __visible; +@@ -1095,7 +1104,7 @@ DEFINE_PER_CPU(struct task_struct *, current_task) ____cacheline_aligned = + EXPORT_PER_CPU_SYMBOL(current_task); + + DEFINE_PER_CPU(unsigned long, kernel_stack) = +- (unsigned long)&init_thread_union - KERNEL_STACK_OFFSET + THREAD_SIZE; ++ (unsigned long)&init_thread_union - 16 + THREAD_SIZE; + EXPORT_PER_CPU_SYMBOL(kernel_stack); + + DEFINE_PER_CPU(char *, irq_stack_ptr) = +@@ -1245,7 +1254,7 @@ void cpu_init(void) + load_ucode_ap(); + + cpu = stack_smp_processor_id(); +- t = &per_cpu(init_tss, cpu); ++ t = init_tss + cpu; + oist = &per_cpu(orig_ist, cpu); + + #ifdef CONFIG_NUMA +@@ -1280,7 +1289,6 @@ void cpu_init(void) + wrmsrl(MSR_KERNEL_GS_BASE, 0); + barrier(); + +- x86_configure_nx(); + enable_x2apic(); + + /* +@@ -1332,7 +1340,7 @@ void cpu_init(void) + { + int cpu = smp_processor_id(); + struct task_struct *curr = current; +- struct tss_struct *t = &per_cpu(init_tss, cpu); ++ struct tss_struct *t = init_tss + cpu; + struct thread_struct *thread = &curr->thread; + + show_ucode_info_early(); +diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c +index 0641113..06f5ba4 100644 +--- a/arch/x86/kernel/cpu/intel_cacheinfo.c ++++ b/arch/x86/kernel/cpu/intel_cacheinfo.c +@@ -1014,6 +1014,22 @@ static struct attribute *default_attrs[] = { + }; + + #ifdef CONFIG_AMD_NB ++static struct attribute *default_attrs_amd_nb[] = { ++ &type.attr, ++ &level.attr, ++ &coherency_line_size.attr, ++ &physical_line_partition.attr, ++ &ways_of_associativity.attr, ++ &number_of_sets.attr, ++ &size.attr, ++ &shared_cpu_map.attr, ++ &shared_cpu_list.attr, ++ NULL, ++ NULL, ++ NULL, ++ NULL ++}; ++ + static struct attribute **amd_l3_attrs(void) + { + static struct attribute **attrs; +@@ -1024,18 +1040,7 @@ static struct attribute **amd_l3_attrs(void) + + n = ARRAY_SIZE(default_attrs); + +- if (amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE)) +- n += 2; +- +- if (amd_nb_has_feature(AMD_NB_L3_PARTITIONING)) +- n += 1; +- +- attrs = kzalloc(n * sizeof (struct attribute *), GFP_KERNEL); +- if (attrs == NULL) +- return attrs = default_attrs; +- +- for (n = 0; default_attrs[n]; n++) +- attrs[n] = default_attrs[n]; ++ attrs = default_attrs_amd_nb; + + if (amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE)) { + attrs[n++] = &cache_disable_0.attr; +@@ -1086,6 +1091,13 @@ static struct kobj_type ktype_cache = { + .default_attrs = default_attrs, + }; + ++#ifdef CONFIG_AMD_NB ++static struct kobj_type ktype_cache_amd_nb = { ++ .sysfs_ops = &sysfs_ops, ++ .default_attrs = default_attrs_amd_nb, ++}; ++#endif ++ + static struct kobj_type ktype_percpu_entry = { + .sysfs_ops = &sysfs_ops, + }; +@@ -1151,20 +1163,26 @@ static int cache_add_dev(struct device *dev) + return retval; + } + ++#ifdef CONFIG_AMD_NB ++ amd_l3_attrs(); ++#endif ++ + for (i = 0; i < num_cache_leaves; i++) { ++ struct kobj_type *ktype; ++ + this_object = INDEX_KOBJECT_PTR(cpu, i); + this_object->cpu = cpu; + this_object->index = i; + + this_leaf = CPUID4_INFO_IDX(cpu, i); + +- ktype_cache.default_attrs = default_attrs; ++ ktype = &ktype_cache; + #ifdef CONFIG_AMD_NB + if (this_leaf->base.nb) +- ktype_cache.default_attrs = amd_l3_attrs(); ++ ktype = &ktype_cache_amd_nb; + #endif + retval = kobject_init_and_add(&(this_object->kobj), +- &ktype_cache, ++ ktype, + per_cpu(ici_cache_kobject, cpu), + "index%1lu", i); + if (unlikely(retval)) { +diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c +index 4d5419b..95f11bb 100644 +--- a/arch/x86/kernel/cpu/mcheck/mce.c ++++ b/arch/x86/kernel/cpu/mcheck/mce.c +@@ -45,6 +45,7 @@ + #include <asm/processor.h> + #include <asm/mce.h> + #include <asm/msr.h> ++#include <asm/local.h> + + #include "mce-internal.h" + +@@ -258,7 +259,7 @@ static void print_mce(struct mce *m) + !(m->mcgstatus & MCG_STATUS_EIPV) ? " !INEXACT!" : "", + m->cs, m->ip); + +- if (m->cs == __KERNEL_CS) ++ if (m->cs == __KERNEL_CS || m->cs == __KERNEXEC_KERNEL_CS) + print_symbol("{%s}", m->ip); + pr_cont("\n"); + } +@@ -291,10 +292,10 @@ static void print_mce(struct mce *m) + + #define PANIC_TIMEOUT 5 /* 5 seconds */ + +-static atomic_t mce_paniced; ++static atomic_unchecked_t mce_paniced; + + static int fake_panic; +-static atomic_t mce_fake_paniced; ++static atomic_unchecked_t mce_fake_paniced; + + /* Panic in progress. Enable interrupts and wait for final IPI */ + static void wait_for_panic(void) +@@ -318,7 +319,7 @@ static void mce_panic(char *msg, struct mce *final, char *exp) + /* + * Make sure only one CPU runs in machine check panic + */ +- if (atomic_inc_return(&mce_paniced) > 1) ++ if (atomic_inc_return_unchecked(&mce_paniced) > 1) + wait_for_panic(); + barrier(); + +@@ -326,7 +327,7 @@ static void mce_panic(char *msg, struct mce *final, char *exp) + console_verbose(); + } else { + /* Don't log too much for fake panic */ +- if (atomic_inc_return(&mce_fake_paniced) > 1) ++ if (atomic_inc_return_unchecked(&mce_fake_paniced) > 1) + return; + } + /* First print corrected ones that are still unlogged */ +@@ -365,7 +366,7 @@ static void mce_panic(char *msg, struct mce *final, char *exp) + if (!fake_panic) { + if (panic_timeout == 0) + panic_timeout = mca_cfg.panic_timeout; +- panic(msg); ++ panic("%s", msg); + } else + pr_emerg(HW_ERR "Fake kernel panic: %s\n", msg); + } +@@ -695,7 +696,7 @@ static int mce_timed_out(u64 *t) + * might have been modified by someone else. + */ + rmb(); +- if (atomic_read(&mce_paniced)) ++ if (atomic_read_unchecked(&mce_paniced)) + wait_for_panic(); + if (!mca_cfg.monarch_timeout) + goto out; +@@ -1666,7 +1667,7 @@ static void unexpected_machine_check(struct pt_regs *regs, long error_code) + } + + /* Call the installed machine check handler for this CPU setup. */ +-void (*machine_check_vector)(struct pt_regs *, long error_code) = ++void (*machine_check_vector)(struct pt_regs *, long error_code) __read_only = + unexpected_machine_check; + + /* +@@ -1689,7 +1690,9 @@ void mcheck_cpu_init(struct cpuinfo_x86 *c) + return; + } + ++ pax_open_kernel(); + machine_check_vector = do_machine_check; ++ pax_close_kernel(); + + __mcheck_cpu_init_generic(); + __mcheck_cpu_init_vendor(c); +@@ -1703,7 +1706,7 @@ void mcheck_cpu_init(struct cpuinfo_x86 *c) + */ + + static DEFINE_SPINLOCK(mce_chrdev_state_lock); +-static int mce_chrdev_open_count; /* #times opened */ ++static local_t mce_chrdev_open_count; /* #times opened */ + static int mce_chrdev_open_exclu; /* already open exclusive? */ + + static int mce_chrdev_open(struct inode *inode, struct file *file) +@@ -1711,7 +1714,7 @@ static int mce_chrdev_open(struct inode *inode, struct file *file) + spin_lock(&mce_chrdev_state_lock); + + if (mce_chrdev_open_exclu || +- (mce_chrdev_open_count && (file->f_flags & O_EXCL))) { ++ (local_read(&mce_chrdev_open_count) && (file->f_flags & O_EXCL))) { + spin_unlock(&mce_chrdev_state_lock); + + return -EBUSY; +@@ -1719,7 +1722,7 @@ static int mce_chrdev_open(struct inode *inode, struct file *file) + + if (file->f_flags & O_EXCL) + mce_chrdev_open_exclu = 1; +- mce_chrdev_open_count++; ++ local_inc(&mce_chrdev_open_count); + + spin_unlock(&mce_chrdev_state_lock); + +@@ -1730,7 +1733,7 @@ static int mce_chrdev_release(struct inode *inode, struct file *file) + { + spin_lock(&mce_chrdev_state_lock); + +- mce_chrdev_open_count--; ++ local_dec(&mce_chrdev_open_count); + mce_chrdev_open_exclu = 0; + + spin_unlock(&mce_chrdev_state_lock); +@@ -2406,7 +2409,7 @@ static __init void mce_init_banks(void) + + for (i = 0; i < mca_cfg.banks; i++) { + struct mce_bank *b = &mce_banks[i]; +- struct device_attribute *a = &b->attr; ++ device_attribute_no_const *a = &b->attr; + + sysfs_attr_init(&a->attr); + a->attr.name = b->attrname; +@@ -2474,7 +2477,7 @@ struct dentry *mce_get_debugfs_dir(void) + static void mce_reset(void) + { + cpu_missing = 0; +- atomic_set(&mce_fake_paniced, 0); ++ atomic_set_unchecked(&mce_fake_paniced, 0); + atomic_set(&mce_executing, 0); + atomic_set(&mce_callin, 0); + atomic_set(&global_nwo, 0); +diff --git a/arch/x86/kernel/cpu/mcheck/p5.c b/arch/x86/kernel/cpu/mcheck/p5.c +index a304298..49b6d06 100644 +--- a/arch/x86/kernel/cpu/mcheck/p5.c ++++ b/arch/x86/kernel/cpu/mcheck/p5.c +@@ -10,6 +10,7 @@ + #include <asm/processor.h> + #include <asm/mce.h> + #include <asm/msr.h> ++#include <asm/pgtable.h> + + /* By default disabled */ + int mce_p5_enabled __read_mostly; +@@ -48,7 +49,9 @@ void intel_p5_mcheck_init(struct cpuinfo_x86 *c) + if (!cpu_has(c, X86_FEATURE_MCE)) + return; + ++ pax_open_kernel(); + machine_check_vector = pentium_machine_check; ++ pax_close_kernel(); + /* Make sure the vector pointer is visible before we enable MCEs: */ + wmb(); + +diff --git a/arch/x86/kernel/cpu/mcheck/winchip.c b/arch/x86/kernel/cpu/mcheck/winchip.c +index 7dc5564..1273569 100644 +--- a/arch/x86/kernel/cpu/mcheck/winchip.c ++++ b/arch/x86/kernel/cpu/mcheck/winchip.c +@@ -9,6 +9,7 @@ + #include <asm/processor.h> + #include <asm/mce.h> + #include <asm/msr.h> ++#include <asm/pgtable.h> + + /* Machine check handler for WinChip C6: */ + static void winchip_machine_check(struct pt_regs *regs, long error_code) +@@ -22,7 +23,9 @@ void winchip_mcheck_init(struct cpuinfo_x86 *c) + { + u32 lo, hi; + ++ pax_open_kernel(); + machine_check_vector = winchip_machine_check; ++ pax_close_kernel(); + /* Make sure the vector pointer is visible before we enable MCEs: */ + wmb(); + +diff --git a/arch/x86/kernel/cpu/microcode/core.c b/arch/x86/kernel/cpu/microcode/core.c +index 15c9876..0a43909 100644 +--- a/arch/x86/kernel/cpu/microcode/core.c ++++ b/arch/x86/kernel/cpu/microcode/core.c +@@ -513,7 +513,7 @@ mc_cpu_callback(struct notifier_block *nb, unsigned long action, void *hcpu) + return NOTIFY_OK; + } + +-static struct notifier_block __refdata mc_cpu_notifier = { ++static struct notifier_block mc_cpu_notifier = { + .notifier_call = mc_cpu_callback, + }; + +diff --git a/arch/x86/kernel/cpu/microcode/intel.c b/arch/x86/kernel/cpu/microcode/intel.c +index a276fa7..e66810f 100644 +--- a/arch/x86/kernel/cpu/microcode/intel.c ++++ b/arch/x86/kernel/cpu/microcode/intel.c +@@ -293,13 +293,13 @@ static enum ucode_state request_microcode_fw(int cpu, struct device *device, + + static int get_ucode_user(void *to, const void *from, size_t n) + { +- return copy_from_user(to, from, n); ++ return copy_from_user(to, (const void __force_user *)from, n); + } + + static enum ucode_state + request_microcode_user(int cpu, const void __user *buf, size_t size) + { +- return generic_load_microcode(cpu, (void *)buf, size, &get_ucode_user); ++ return generic_load_microcode(cpu, (__force_kernel void *)buf, size, &get_ucode_user); + } + + static void microcode_fini_cpu(int cpu) +diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/main.c +index f961de9..8a9d332 100644 +--- a/arch/x86/kernel/cpu/mtrr/main.c ++++ b/arch/x86/kernel/cpu/mtrr/main.c +@@ -66,7 +66,7 @@ static DEFINE_MUTEX(mtrr_mutex); + u64 size_or_mask, size_and_mask; + static bool mtrr_aps_delayed_init; + +-static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM]; ++static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM] __read_only; + + const struct mtrr_ops *mtrr_if; + +diff --git a/arch/x86/kernel/cpu/mtrr/mtrr.h b/arch/x86/kernel/cpu/mtrr/mtrr.h +index df5e41f..816c719 100644 +--- a/arch/x86/kernel/cpu/mtrr/mtrr.h ++++ b/arch/x86/kernel/cpu/mtrr/mtrr.h +@@ -25,7 +25,7 @@ struct mtrr_ops { + int (*validate_add_page)(unsigned long base, unsigned long size, + unsigned int type); + int (*have_wrcomb)(void); +-}; ++} __do_const; + + extern int generic_get_free_region(unsigned long base, unsigned long size, + int replace_reg); +diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c +index 79f9f84..38ace52 100644 +--- a/arch/x86/kernel/cpu/perf_event.c ++++ b/arch/x86/kernel/cpu/perf_event.c +@@ -1351,7 +1351,7 @@ static void __init pmu_check_apic(void) + pr_info("no hardware sampling interrupt available.\n"); + } + +-static struct attribute_group x86_pmu_format_group = { ++static attribute_group_no_const x86_pmu_format_group = { + .name = "format", + .attrs = NULL, + }; +@@ -1450,7 +1450,7 @@ static struct attribute *events_attr[] = { + NULL, + }; + +-static struct attribute_group x86_pmu_events_group = { ++static attribute_group_no_const x86_pmu_events_group = { + .name = "events", + .attrs = events_attr, + }; +@@ -1971,7 +1971,7 @@ static unsigned long get_segment_base(unsigned int segment) + if (idx > GDT_ENTRIES) + return 0; + +- desc = __this_cpu_ptr(&gdt_page.gdt[0]); ++ desc = get_cpu_gdt_table(smp_processor_id()); + } + + return get_desc_base(desc + idx); +@@ -2061,7 +2061,7 @@ perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs) + break; + + perf_callchain_store(entry, frame.return_address); +- fp = frame.next_frame; ++ fp = (const void __force_user *)frame.next_frame; + } + } + +diff --git a/arch/x86/kernel/cpu/perf_event_amd_iommu.c b/arch/x86/kernel/cpu/perf_event_amd_iommu.c +index 639d128..e92d7e5 100644 +--- a/arch/x86/kernel/cpu/perf_event_amd_iommu.c ++++ b/arch/x86/kernel/cpu/perf_event_amd_iommu.c +@@ -405,7 +405,7 @@ static void perf_iommu_del(struct perf_event *event, int flags) + static __init int _init_events_attrs(struct perf_amd_iommu *perf_iommu) + { + struct attribute **attrs; +- struct attribute_group *attr_group; ++ attribute_group_no_const *attr_group; + int i = 0, j; + + while (amd_iommu_v2_event_descs[i].attr.attr.name) +diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c +index 1340ebf..fc6d5c9 100644 +--- a/arch/x86/kernel/cpu/perf_event_intel.c ++++ b/arch/x86/kernel/cpu/perf_event_intel.c +@@ -2318,10 +2318,10 @@ __init int intel_pmu_init(void) + x86_pmu.num_counters_fixed = max((int)edx.split.num_counters_fixed, 3); + + if (boot_cpu_has(X86_FEATURE_PDCM)) { +- u64 capabilities; ++ u64 capabilities = x86_pmu.intel_cap.capabilities; + +- rdmsrl(MSR_IA32_PERF_CAPABILITIES, capabilities); +- x86_pmu.intel_cap.capabilities = capabilities; ++ if (rdmsrl_safe(MSR_IA32_PERF_CAPABILITIES, &x86_pmu.intel_cap.capabilities)) ++ x86_pmu.intel_cap.capabilities = capabilities; + } + + intel_ds_init(); +diff --git a/arch/x86/kernel/cpu/perf_event_intel_rapl.c b/arch/x86/kernel/cpu/perf_event_intel_rapl.c +index 5ad35ad..e0a3960 100644 +--- a/arch/x86/kernel/cpu/perf_event_intel_rapl.c ++++ b/arch/x86/kernel/cpu/perf_event_intel_rapl.c +@@ -425,7 +425,7 @@ static struct attribute *rapl_events_cln_attr[] = { + NULL, + }; + +-static struct attribute_group rapl_pmu_events_group = { ++static attribute_group_no_const rapl_pmu_events_group __read_only = { + .name = "events", + .attrs = NULL, /* patched at runtime */ + }; +diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.c b/arch/x86/kernel/cpu/perf_event_intel_uncore.c +index 047f540..afdeba0 100644 +--- a/arch/x86/kernel/cpu/perf_event_intel_uncore.c ++++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.c +@@ -3326,7 +3326,7 @@ static void __init uncore_types_exit(struct intel_uncore_type **types) + static int __init uncore_type_init(struct intel_uncore_type *type) + { + struct intel_uncore_pmu *pmus; +- struct attribute_group *attr_group; ++ attribute_group_no_const *attr_group; + struct attribute **attrs; + int i, j; + +diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.h b/arch/x86/kernel/cpu/perf_event_intel_uncore.h +index a80ab71..4089da5 100644 +--- a/arch/x86/kernel/cpu/perf_event_intel_uncore.h ++++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.h +@@ -498,7 +498,7 @@ struct intel_uncore_box { + struct uncore_event_desc { + struct kobj_attribute attr; + const char *config; +-}; ++} __do_const; + + #define INTEL_UNCORE_EVENT_DESC(_name, _config) \ + { \ +diff --git a/arch/x86/kernel/cpuid.c b/arch/x86/kernel/cpuid.c +index 7d9481c..99c7e4b 100644 +--- a/arch/x86/kernel/cpuid.c ++++ b/arch/x86/kernel/cpuid.c +@@ -170,7 +170,7 @@ static int cpuid_class_cpu_callback(struct notifier_block *nfb, + return notifier_from_errno(err); + } + +-static struct notifier_block __refdata cpuid_class_cpu_notifier = ++static struct notifier_block cpuid_class_cpu_notifier = + { + .notifier_call = cpuid_class_cpu_callback, + }; +diff --git a/arch/x86/kernel/crash.c b/arch/x86/kernel/crash.c +index a57902e..ebaae2a 100644 +--- a/arch/x86/kernel/crash.c ++++ b/arch/x86/kernel/crash.c +@@ -57,10 +57,8 @@ static void kdump_nmi_callback(int cpu, struct pt_regs *regs) + { + #ifdef CONFIG_X86_32 + struct pt_regs fixed_regs; +-#endif + +-#ifdef CONFIG_X86_32 +- if (!user_mode_vm(regs)) { ++ if (!user_mode(regs)) { + crash_fixup_ss_esp(&fixed_regs, regs); + regs = &fixed_regs; + } +diff --git a/arch/x86/kernel/crash_dump_64.c b/arch/x86/kernel/crash_dump_64.c +index afa64ad..dce67dd 100644 +--- a/arch/x86/kernel/crash_dump_64.c ++++ b/arch/x86/kernel/crash_dump_64.c +@@ -36,7 +36,7 @@ ssize_t copy_oldmem_page(unsigned long pfn, char *buf, + return -ENOMEM; + + if (userbuf) { +- if (copy_to_user(buf, vaddr + offset, csize)) { ++ if (copy_to_user((char __force_user *)buf, vaddr + offset, csize)) { + iounmap(vaddr); + return -EFAULT; + } +diff --git a/arch/x86/kernel/doublefault.c b/arch/x86/kernel/doublefault.c +index f6dfd93..892ade4 100644 +--- a/arch/x86/kernel/doublefault.c ++++ b/arch/x86/kernel/doublefault.c +@@ -12,7 +12,7 @@ + + #define DOUBLEFAULT_STACKSIZE (1024) + static unsigned long doublefault_stack[DOUBLEFAULT_STACKSIZE]; +-#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE) ++#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE-2) + + #define ptr_ok(x) ((x) > PAGE_OFFSET && (x) < PAGE_OFFSET + MAXMEM) + +@@ -22,7 +22,7 @@ static void doublefault_fn(void) + unsigned long gdt, tss; + + native_store_gdt(&gdt_desc); +- gdt = gdt_desc.address; ++ gdt = (unsigned long)gdt_desc.address; + + printk(KERN_EMERG "PANIC: double fault, gdt at %08lx [%d bytes]\n", gdt, gdt_desc.size); + +@@ -59,10 +59,10 @@ struct tss_struct doublefault_tss __cacheline_aligned = { + /* 0x2 bit is always set */ + .flags = X86_EFLAGS_SF | 0x2, + .sp = STACK_START, +- .es = __USER_DS, ++ .es = __KERNEL_DS, + .cs = __KERNEL_CS, + .ss = __KERNEL_DS, +- .ds = __USER_DS, ++ .ds = __KERNEL_DS, + .fs = __KERNEL_PERCPU, + + .__cr3 = __pa_nodebug(swapper_pg_dir), +diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c +index d9c12d3..7858b62 100644 +--- a/arch/x86/kernel/dumpstack.c ++++ b/arch/x86/kernel/dumpstack.c +@@ -2,6 +2,9 @@ + * Copyright (C) 1991, 1992 Linus Torvalds + * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs + */ ++#ifdef CONFIG_GRKERNSEC_HIDESYM ++#define __INCLUDED_BY_HIDESYM 1 ++#endif + #include <linux/kallsyms.h> + #include <linux/kprobes.h> + #include <linux/uaccess.h> +@@ -40,16 +43,14 @@ void printk_address(unsigned long address) + static void + print_ftrace_graph_addr(unsigned long addr, void *data, + const struct stacktrace_ops *ops, +- struct thread_info *tinfo, int *graph) ++ struct task_struct *task, int *graph) + { +- struct task_struct *task; + unsigned long ret_addr; + int index; + + if (addr != (unsigned long)return_to_handler) + return; + +- task = tinfo->task; + index = task->curr_ret_stack; + + if (!task->ret_stack || index < *graph) +@@ -66,7 +67,7 @@ print_ftrace_graph_addr(unsigned long addr, void *data, + static inline void + print_ftrace_graph_addr(unsigned long addr, void *data, + const struct stacktrace_ops *ops, +- struct thread_info *tinfo, int *graph) ++ struct task_struct *task, int *graph) + { } + #endif + +@@ -77,10 +78,8 @@ print_ftrace_graph_addr(unsigned long addr, void *data, + * severe exception (double fault, nmi, stack fault, debug, mce) hardware stack + */ + +-static inline int valid_stack_ptr(struct thread_info *tinfo, +- void *p, unsigned int size, void *end) ++static inline int valid_stack_ptr(void *t, void *p, unsigned int size, void *end) + { +- void *t = tinfo; + if (end) { + if (p < end && p >= (end-THREAD_SIZE)) + return 1; +@@ -91,14 +90,14 @@ static inline int valid_stack_ptr(struct thread_info *tinfo, + } + + unsigned long +-print_context_stack(struct thread_info *tinfo, ++print_context_stack(struct task_struct *task, void *stack_start, + unsigned long *stack, unsigned long bp, + const struct stacktrace_ops *ops, void *data, + unsigned long *end, int *graph) + { + struct stack_frame *frame = (struct stack_frame *)bp; + +- while (valid_stack_ptr(tinfo, stack, sizeof(*stack), end)) { ++ while (valid_stack_ptr(stack_start, stack, sizeof(*stack), end)) { + unsigned long addr; + + addr = *stack; +@@ -110,7 +109,7 @@ print_context_stack(struct thread_info *tinfo, + } else { + ops->address(data, addr, 0); + } +- print_ftrace_graph_addr(addr, data, ops, tinfo, graph); ++ print_ftrace_graph_addr(addr, data, ops, task, graph); + } + stack++; + } +@@ -119,7 +118,7 @@ print_context_stack(struct thread_info *tinfo, + EXPORT_SYMBOL_GPL(print_context_stack); + + unsigned long +-print_context_stack_bp(struct thread_info *tinfo, ++print_context_stack_bp(struct task_struct *task, void *stack_start, + unsigned long *stack, unsigned long bp, + const struct stacktrace_ops *ops, void *data, + unsigned long *end, int *graph) +@@ -127,7 +126,7 @@ print_context_stack_bp(struct thread_info *tinfo, + struct stack_frame *frame = (struct stack_frame *)bp; + unsigned long *ret_addr = &frame->return_address; + +- while (valid_stack_ptr(tinfo, ret_addr, sizeof(*ret_addr), end)) { ++ while (valid_stack_ptr(stack_start, ret_addr, sizeof(*ret_addr), end)) { + unsigned long addr = *ret_addr; + + if (!__kernel_text_address(addr)) +@@ -136,7 +135,7 @@ print_context_stack_bp(struct thread_info *tinfo, + ops->address(data, addr, 1); + frame = frame->next_frame; + ret_addr = &frame->return_address; +- print_ftrace_graph_addr(addr, data, ops, tinfo, graph); ++ print_ftrace_graph_addr(addr, data, ops, task, graph); + } + + return (unsigned long)frame; +@@ -155,7 +154,7 @@ static int print_trace_stack(void *data, char *name) + static void print_trace_address(void *data, unsigned long addr, int reliable) + { + touch_nmi_watchdog(); +- printk(data); ++ printk("%s", (char *)data); + printk_stack_address(addr, reliable); + } + +@@ -224,6 +223,8 @@ unsigned __kprobes long oops_begin(void) + } + EXPORT_SYMBOL_GPL(oops_begin); + ++extern void gr_handle_kernel_exploit(void); ++ + void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr) + { + if (regs && kexec_should_crash(current)) +@@ -245,7 +246,10 @@ void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr) + panic("Fatal exception in interrupt"); + if (panic_on_oops) + panic("Fatal exception"); +- do_exit(signr); ++ ++ gr_handle_kernel_exploit(); ++ ++ do_group_exit(signr); + } + + int __kprobes __die(const char *str, struct pt_regs *regs, long err) +@@ -273,7 +277,7 @@ int __kprobes __die(const char *str, struct pt_regs *regs, long err) + print_modules(); + show_regs(regs); + #ifdef CONFIG_X86_32 +- if (user_mode_vm(regs)) { ++ if (user_mode(regs)) { + sp = regs->sp; + ss = regs->ss & 0xffff; + } else { +@@ -301,7 +305,7 @@ void die(const char *str, struct pt_regs *regs, long err) + unsigned long flags = oops_begin(); + int sig = SIGSEGV; + +- if (!user_mode_vm(regs)) ++ if (!user_mode(regs)) + report_bug(regs->ip, regs); + + if (__die(str, regs, err)) +diff --git a/arch/x86/kernel/dumpstack_32.c b/arch/x86/kernel/dumpstack_32.c +index f2a1770..10fa52d 100644 +--- a/arch/x86/kernel/dumpstack_32.c ++++ b/arch/x86/kernel/dumpstack_32.c +@@ -38,15 +38,13 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs, + bp = stack_frame(task, regs); + + for (;;) { +- struct thread_info *context; ++ void *stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1)); + +- context = (struct thread_info *) +- ((unsigned long)stack & (~(THREAD_SIZE - 1))); +- bp = ops->walk_stack(context, stack, bp, ops, data, NULL, &graph); ++ bp = ops->walk_stack(task, stack_start, stack, bp, ops, data, NULL, &graph); + +- stack = (unsigned long *)context->previous_esp; +- if (!stack) ++ if (stack_start == task_stack_page(task)) + break; ++ stack = *(unsigned long **)stack_start; + if (ops->stack(data, "IRQ") < 0) + break; + touch_nmi_watchdog(); +@@ -87,27 +85,28 @@ void show_regs(struct pt_regs *regs) + int i; + + show_regs_print_info(KERN_EMERG); +- __show_regs(regs, !user_mode_vm(regs)); ++ __show_regs(regs, !user_mode(regs)); + + /* + * When in-kernel, we also print out the stack and code at the + * time of the fault.. + */ +- if (!user_mode_vm(regs)) { ++ if (!user_mode(regs)) { + unsigned int code_prologue = code_bytes * 43 / 64; + unsigned int code_len = code_bytes; + unsigned char c; + u8 *ip; ++ unsigned long cs_base = get_desc_base(&get_cpu_gdt_table(0)[(0xffff & regs->cs) >> 3]); + + pr_emerg("Stack:\n"); + show_stack_log_lvl(NULL, regs, ®s->sp, 0, KERN_EMERG); + + pr_emerg("Code:"); + +- ip = (u8 *)regs->ip - code_prologue; ++ ip = (u8 *)regs->ip - code_prologue + cs_base; + if (ip < (u8 *)PAGE_OFFSET || probe_kernel_address(ip, c)) { + /* try starting at IP */ +- ip = (u8 *)regs->ip; ++ ip = (u8 *)regs->ip + cs_base; + code_len = code_len - code_prologue + 1; + } + for (i = 0; i < code_len; i++, ip++) { +@@ -116,7 +115,7 @@ void show_regs(struct pt_regs *regs) + pr_cont(" Bad EIP value."); + break; + } +- if (ip == (u8 *)regs->ip) ++ if (ip == (u8 *)regs->ip + cs_base) + pr_cont(" <%02x>", c); + else + pr_cont(" %02x", c); +@@ -129,6 +128,7 @@ int is_valid_bugaddr(unsigned long ip) + { + unsigned short ud2; + ++ ip = ktla_ktva(ip); + if (ip < PAGE_OFFSET) + return 0; + if (probe_kernel_address((unsigned short *)ip, ud2)) +@@ -136,3 +136,15 @@ int is_valid_bugaddr(unsigned long ip) + + return ud2 == 0x0b0f; + } ++ ++#if defined(CONFIG_PAX_MEMORY_STACKLEAK) || defined(CONFIG_PAX_USERCOPY) ++void pax_check_alloca(unsigned long size) ++{ ++ unsigned long sp = (unsigned long)&sp, stack_left; ++ ++ /* all kernel stacks are of the same size */ ++ stack_left = sp & (THREAD_SIZE - 1); ++ BUG_ON(stack_left < 256 || size >= stack_left - 256); ++} ++EXPORT_SYMBOL(pax_check_alloca); ++#endif +diff --git a/arch/x86/kernel/dumpstack_64.c b/arch/x86/kernel/dumpstack_64.c +index addb207..921706b 100644 +--- a/arch/x86/kernel/dumpstack_64.c ++++ b/arch/x86/kernel/dumpstack_64.c +@@ -119,9 +119,9 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs, + unsigned long *irq_stack_end = + (unsigned long *)per_cpu(irq_stack_ptr, cpu); + unsigned used = 0; +- struct thread_info *tinfo; + int graph = 0; + unsigned long dummy; ++ void *stack_start; + + if (!task) + task = current; +@@ -142,10 +142,10 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs, + * current stack address. If the stacks consist of nested + * exceptions + */ +- tinfo = task_thread_info(task); + for (;;) { + char *id; + unsigned long *estack_end; ++ + estack_end = in_exception_stack(cpu, (unsigned long)stack, + &used, &id); + +@@ -153,7 +153,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs, + if (ops->stack(data, id) < 0) + break; + +- bp = ops->walk_stack(tinfo, stack, bp, ops, ++ bp = ops->walk_stack(task, estack_end - EXCEPTION_STKSZ, stack, bp, ops, + data, estack_end, &graph); + ops->stack(data, "<EOE>"); + /* +@@ -161,6 +161,8 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs, + * second-to-last pointer (index -2 to end) in the + * exception stack: + */ ++ if ((u16)estack_end[-1] != __KERNEL_DS) ++ goto out; + stack = (unsigned long *) estack_end[-2]; + continue; + } +@@ -172,7 +174,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs, + if (in_irq_stack(stack, irq_stack, irq_stack_end)) { + if (ops->stack(data, "IRQ") < 0) + break; +- bp = ops->walk_stack(tinfo, stack, bp, ++ bp = ops->walk_stack(task, irq_stack, stack, bp, + ops, data, irq_stack_end, &graph); + /* + * We link to the next stack (which would be +@@ -191,7 +193,9 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs, + /* + * This handles the process stack: + */ +- bp = ops->walk_stack(tinfo, stack, bp, ops, data, NULL, &graph); ++ stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1)); ++ bp = ops->walk_stack(task, stack_start, stack, bp, ops, data, NULL, &graph); ++out: + put_cpu(); + } + EXPORT_SYMBOL(dump_trace); +@@ -300,3 +304,50 @@ int is_valid_bugaddr(unsigned long ip) + + return ud2 == 0x0b0f; + } ++ ++#if defined(CONFIG_PAX_MEMORY_STACKLEAK) || defined(CONFIG_PAX_USERCOPY) ++void pax_check_alloca(unsigned long size) ++{ ++ unsigned long sp = (unsigned long)&sp, stack_start, stack_end; ++ unsigned cpu, used; ++ char *id; ++ ++ /* check the process stack first */ ++ stack_start = (unsigned long)task_stack_page(current); ++ stack_end = stack_start + THREAD_SIZE; ++ if (likely(stack_start <= sp && sp < stack_end)) { ++ unsigned long stack_left = sp & (THREAD_SIZE - 1); ++ BUG_ON(stack_left < 256 || size >= stack_left - 256); ++ return; ++ } ++ ++ cpu = get_cpu(); ++ ++ /* check the irq stacks */ ++ stack_end = (unsigned long)per_cpu(irq_stack_ptr, cpu); ++ stack_start = stack_end - IRQ_STACK_SIZE; ++ if (stack_start <= sp && sp < stack_end) { ++ unsigned long stack_left = sp & (IRQ_STACK_SIZE - 1); ++ put_cpu(); ++ BUG_ON(stack_left < 256 || size >= stack_left - 256); ++ return; ++ } ++ ++ /* check the exception stacks */ ++ used = 0; ++ stack_end = (unsigned long)in_exception_stack(cpu, sp, &used, &id); ++ stack_start = stack_end - EXCEPTION_STKSZ; ++ if (stack_end && stack_start <= sp && sp < stack_end) { ++ unsigned long stack_left = sp & (EXCEPTION_STKSZ - 1); ++ put_cpu(); ++ BUG_ON(stack_left < 256 || size >= stack_left - 256); ++ return; ++ } ++ ++ put_cpu(); ++ ++ /* unknown stack */ ++ BUG(); ++} ++EXPORT_SYMBOL(pax_check_alloca); ++#endif +diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c +index 988c00a..4f673b6 100644 +--- a/arch/x86/kernel/e820.c ++++ b/arch/x86/kernel/e820.c +@@ -803,8 +803,8 @@ unsigned long __init e820_end_of_low_ram_pfn(void) + + static void early_panic(char *msg) + { +- early_printk(msg); +- panic(msg); ++ early_printk("%s", msg); ++ panic("%s", msg); + } + + static int userdef __initdata; +diff --git a/arch/x86/kernel/early_printk.c b/arch/x86/kernel/early_printk.c +index 01d1c18..8073693 100644 +--- a/arch/x86/kernel/early_printk.c ++++ b/arch/x86/kernel/early_printk.c +@@ -7,6 +7,7 @@ + #include <linux/pci_regs.h> + #include <linux/pci_ids.h> + #include <linux/errno.h> ++#include <linux/sched.h> + #include <asm/io.h> + #include <asm/processor.h> + #include <asm/fcntl.h> +diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S +index c5a9cb9..228d280 100644 +--- a/arch/x86/kernel/entry_32.S ++++ b/arch/x86/kernel/entry_32.S +@@ -177,13 +177,153 @@ + /*CFI_REL_OFFSET gs, PT_GS*/ + .endm + .macro SET_KERNEL_GS reg ++ ++#ifdef CONFIG_CC_STACKPROTECTOR + movl $(__KERNEL_STACK_CANARY), \reg ++#elif defined(CONFIG_PAX_MEMORY_UDEREF) ++ movl $(__USER_DS), \reg ++#else ++ xorl \reg, \reg ++#endif ++ + movl \reg, %gs + .endm + + #endif /* CONFIG_X86_32_LAZY_GS */ + +-.macro SAVE_ALL ++.macro pax_enter_kernel ++#ifdef CONFIG_PAX_KERNEXEC ++ call pax_enter_kernel ++#endif ++.endm ++ ++.macro pax_exit_kernel ++#ifdef CONFIG_PAX_KERNEXEC ++ call pax_exit_kernel ++#endif ++.endm ++ ++#ifdef CONFIG_PAX_KERNEXEC ++ENTRY(pax_enter_kernel) ++#ifdef CONFIG_PARAVIRT ++ pushl %eax ++ pushl %ecx ++ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0) ++ mov %eax, %esi ++#else ++ mov %cr0, %esi ++#endif ++ bts $16, %esi ++ jnc 1f ++ mov %cs, %esi ++ cmp $__KERNEL_CS, %esi ++ jz 3f ++ ljmp $__KERNEL_CS, $3f ++1: ljmp $__KERNEXEC_KERNEL_CS, $2f ++2: ++#ifdef CONFIG_PARAVIRT ++ mov %esi, %eax ++ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0) ++#else ++ mov %esi, %cr0 ++#endif ++3: ++#ifdef CONFIG_PARAVIRT ++ popl %ecx ++ popl %eax ++#endif ++ ret ++ENDPROC(pax_enter_kernel) ++ ++ENTRY(pax_exit_kernel) ++#ifdef CONFIG_PARAVIRT ++ pushl %eax ++ pushl %ecx ++#endif ++ mov %cs, %esi ++ cmp $__KERNEXEC_KERNEL_CS, %esi ++ jnz 2f ++#ifdef CONFIG_PARAVIRT ++ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0); ++ mov %eax, %esi ++#else ++ mov %cr0, %esi ++#endif ++ btr $16, %esi ++ ljmp $__KERNEL_CS, $1f ++1: ++#ifdef CONFIG_PARAVIRT ++ mov %esi, %eax ++ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0); ++#else ++ mov %esi, %cr0 ++#endif ++2: ++#ifdef CONFIG_PARAVIRT ++ popl %ecx ++ popl %eax ++#endif ++ ret ++ENDPROC(pax_exit_kernel) ++#endif ++ ++ .macro pax_erase_kstack ++#ifdef CONFIG_PAX_MEMORY_STACKLEAK ++ call pax_erase_kstack ++#endif ++ .endm ++ ++#ifdef CONFIG_PAX_MEMORY_STACKLEAK ++/* ++ * ebp: thread_info ++ */ ++ENTRY(pax_erase_kstack) ++ pushl %edi ++ pushl %ecx ++ pushl %eax ++ ++ mov TI_lowest_stack(%ebp), %edi ++ mov $-0xBEEF, %eax ++ std ++ ++1: mov %edi, %ecx ++ and $THREAD_SIZE_asm - 1, %ecx ++ shr $2, %ecx ++ repne scasl ++ jecxz 2f ++ ++ cmp $2*16, %ecx ++ jc 2f ++ ++ mov $2*16, %ecx ++ repe scasl ++ jecxz 2f ++ jne 1b ++ ++2: cld ++ mov %esp, %ecx ++ sub %edi, %ecx ++ ++ cmp $THREAD_SIZE_asm, %ecx ++ jb 3f ++ ud2 ++3: ++ ++ shr $2, %ecx ++ rep stosl ++ ++ mov TI_task_thread_sp0(%ebp), %edi ++ sub $128, %edi ++ mov %edi, TI_lowest_stack(%ebp) ++ ++ popl %eax ++ popl %ecx ++ popl %edi ++ ret ++ENDPROC(pax_erase_kstack) ++#endif ++ ++.macro __SAVE_ALL _DS + cld + PUSH_GS + pushl_cfi %fs +@@ -206,7 +346,7 @@ + CFI_REL_OFFSET ecx, 0 + pushl_cfi %ebx + CFI_REL_OFFSET ebx, 0 +- movl $(__USER_DS), %edx ++ movl $\_DS, %edx + movl %edx, %ds + movl %edx, %es + movl $(__KERNEL_PERCPU), %edx +@@ -214,6 +354,15 @@ + SET_KERNEL_GS %edx + .endm + ++.macro SAVE_ALL ++#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF) ++ __SAVE_ALL __KERNEL_DS ++ pax_enter_kernel ++#else ++ __SAVE_ALL __USER_DS ++#endif ++.endm ++ + .macro RESTORE_INT_REGS + popl_cfi %ebx + CFI_RESTORE ebx +@@ -297,7 +446,7 @@ ENTRY(ret_from_fork) + popfl_cfi + jmp syscall_exit + CFI_ENDPROC +-END(ret_from_fork) ++ENDPROC(ret_from_fork) + + ENTRY(ret_from_kernel_thread) + CFI_STARTPROC +@@ -344,7 +493,15 @@ ret_from_intr: + andl $SEGMENT_RPL_MASK, %eax + #endif + cmpl $USER_RPL, %eax ++ ++#ifdef CONFIG_PAX_KERNEXEC ++ jae resume_userspace ++ ++ pax_exit_kernel ++ jmp resume_kernel ++#else + jb resume_kernel # not returning to v8086 or userspace ++#endif + + ENTRY(resume_userspace) + LOCKDEP_SYS_EXIT +@@ -356,8 +513,8 @@ ENTRY(resume_userspace) + andl $_TIF_WORK_MASK, %ecx # is there any work to be done on + # int/exception return? + jne work_pending +- jmp restore_all +-END(ret_from_exception) ++ jmp restore_all_pax ++ENDPROC(ret_from_exception) + + #ifdef CONFIG_PREEMPT + ENTRY(resume_kernel) +@@ -369,7 +526,7 @@ need_resched: + jz restore_all + call preempt_schedule_irq + jmp need_resched +-END(resume_kernel) ++ENDPROC(resume_kernel) + #endif + CFI_ENDPROC + /* +@@ -403,30 +560,45 @@ sysenter_past_esp: + /*CFI_REL_OFFSET cs, 0*/ + /* + * Push current_thread_info()->sysenter_return to the stack. +- * A tiny bit of offset fixup is necessary - 4*4 means the 4 words +- * pushed above; +8 corresponds to copy_thread's esp0 setting. + */ +- pushl_cfi ((TI_sysenter_return)-THREAD_SIZE+8+4*4)(%esp) ++ pushl_cfi $0 + CFI_REL_OFFSET eip, 0 + + pushl_cfi %eax + SAVE_ALL ++ GET_THREAD_INFO(%ebp) ++ movl TI_sysenter_return(%ebp),%ebp ++ movl %ebp,PT_EIP(%esp) + ENABLE_INTERRUPTS(CLBR_NONE) + + /* + * Load the potential sixth argument from user stack. + * Careful about security. + */ ++ movl PT_OLDESP(%esp),%ebp ++ ++#ifdef CONFIG_PAX_MEMORY_UDEREF ++ mov PT_OLDSS(%esp),%ds ++1: movl %ds:(%ebp),%ebp ++ push %ss ++ pop %ds ++#else + cmpl $__PAGE_OFFSET-3,%ebp + jae syscall_fault + ASM_STAC + 1: movl (%ebp),%ebp + ASM_CLAC ++#endif ++ + movl %ebp,PT_EBP(%esp) + _ASM_EXTABLE(1b,syscall_fault) + + GET_THREAD_INFO(%ebp) + ++#ifdef CONFIG_PAX_RANDKSTACK ++ pax_erase_kstack ++#endif ++ + testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%ebp) + jnz sysenter_audit + sysenter_do_call: +@@ -442,12 +614,24 @@ sysenter_after_call: + testl $_TIF_ALLWORK_MASK, %ecx + jne sysexit_audit + sysenter_exit: ++ ++#ifdef CONFIG_PAX_RANDKSTACK ++ pushl_cfi %eax ++ movl %esp, %eax ++ call pax_randomize_kstack ++ popl_cfi %eax ++#endif ++ ++ pax_erase_kstack ++ + /* if something modifies registers it must also disable sysexit */ + movl PT_EIP(%esp), %edx + movl PT_OLDESP(%esp), %ecx + xorl %ebp,%ebp + TRACE_IRQS_ON + 1: mov PT_FS(%esp), %fs ++2: mov PT_DS(%esp), %ds ++3: mov PT_ES(%esp), %es + PTGS_TO_GS + ENABLE_INTERRUPTS_SYSEXIT + +@@ -464,6 +648,9 @@ sysenter_audit: + movl %eax,%edx /* 2nd arg: syscall number */ + movl $AUDIT_ARCH_I386,%eax /* 1st arg: audit arch */ + call __audit_syscall_entry ++ ++ pax_erase_kstack ++ + pushl_cfi %ebx + movl PT_EAX(%esp),%eax /* reload syscall number */ + jmp sysenter_do_call +@@ -489,10 +676,16 @@ sysexit_audit: + + CFI_ENDPROC + .pushsection .fixup,"ax" +-2: movl $0,PT_FS(%esp) ++4: movl $0,PT_FS(%esp) ++ jmp 1b ++5: movl $0,PT_DS(%esp) ++ jmp 1b ++6: movl $0,PT_ES(%esp) + jmp 1b + .popsection +- _ASM_EXTABLE(1b,2b) ++ _ASM_EXTABLE(1b,4b) ++ _ASM_EXTABLE(2b,5b) ++ _ASM_EXTABLE(3b,6b) + PTGS_TO_GS_EX + ENDPROC(ia32_sysenter_target) + +@@ -507,6 +700,11 @@ ENTRY(system_call) + pushl_cfi %eax # save orig_eax + SAVE_ALL + GET_THREAD_INFO(%ebp) ++ ++#ifdef CONFIG_PAX_RANDKSTACK ++ pax_erase_kstack ++#endif ++ + # system call tracing in operation / emulation + testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%ebp) + jnz syscall_trace_entry +@@ -526,6 +724,15 @@ syscall_exit: + testl $_TIF_ALLWORK_MASK, %ecx # current->work + jne syscall_exit_work + ++restore_all_pax: ++ ++#ifdef CONFIG_PAX_RANDKSTACK ++ movl %esp, %eax ++ call pax_randomize_kstack ++#endif ++ ++ pax_erase_kstack ++ + restore_all: + TRACE_IRQS_IRET + restore_all_notrace: +@@ -580,14 +787,34 @@ ldt_ss: + * compensating for the offset by changing to the ESPFIX segment with + * a base address that matches for the difference. + */ +-#define GDT_ESPFIX_SS PER_CPU_VAR(gdt_page) + (GDT_ENTRY_ESPFIX_SS * 8) ++#define GDT_ESPFIX_SS (GDT_ENTRY_ESPFIX_SS * 8)(%ebx) + mov %esp, %edx /* load kernel esp */ + mov PT_OLDESP(%esp), %eax /* load userspace esp */ + mov %dx, %ax /* eax: new kernel esp */ + sub %eax, %edx /* offset (low word is 0) */ ++#ifdef CONFIG_SMP ++ movl PER_CPU_VAR(cpu_number), %ebx ++ shll $PAGE_SHIFT_asm, %ebx ++ addl $cpu_gdt_table, %ebx ++#else ++ movl $cpu_gdt_table, %ebx ++#endif + shr $16, %edx +- mov %dl, GDT_ESPFIX_SS + 4 /* bits 16..23 */ +- mov %dh, GDT_ESPFIX_SS + 7 /* bits 24..31 */ ++ ++#ifdef CONFIG_PAX_KERNEXEC ++ mov %cr0, %esi ++ btr $16, %esi ++ mov %esi, %cr0 ++#endif ++ ++ mov %dl, 4 + GDT_ESPFIX_SS /* bits 16..23 */ ++ mov %dh, 7 + GDT_ESPFIX_SS /* bits 24..31 */ ++ ++#ifdef CONFIG_PAX_KERNEXEC ++ bts $16, %esi ++ mov %esi, %cr0 ++#endif ++ + pushl_cfi $__ESPFIX_SS + pushl_cfi %eax /* new kernel esp */ + /* Disable interrupts, but do not irqtrace this section: we +@@ -617,20 +844,18 @@ work_resched: + movl TI_flags(%ebp), %ecx + andl $_TIF_WORK_MASK, %ecx # is there any work to be done other + # than syscall tracing? +- jz restore_all ++ jz restore_all_pax + testb $_TIF_NEED_RESCHED, %cl + jnz work_resched + + work_notifysig: # deal with pending signals and + # notify-resume requests ++ movl %esp, %eax + #ifdef CONFIG_VM86 + testl $X86_EFLAGS_VM, PT_EFLAGS(%esp) +- movl %esp, %eax + jne work_notifysig_v86 # returning to kernel-space or + # vm86-space + 1: +-#else +- movl %esp, %eax + #endif + TRACE_IRQS_ON + ENABLE_INTERRUPTS(CLBR_NONE) +@@ -651,7 +876,7 @@ work_notifysig_v86: + movl %eax, %esp + jmp 1b + #endif +-END(work_pending) ++ENDPROC(work_pending) + + # perform syscall exit tracing + ALIGN +@@ -659,11 +884,14 @@ syscall_trace_entry: + movl $-ENOSYS,PT_EAX(%esp) + movl %esp, %eax + call syscall_trace_enter ++ ++ pax_erase_kstack ++ + /* What it returned is what we'll actually use. */ + cmpl $(NR_syscalls), %eax + jnae syscall_call + jmp syscall_exit +-END(syscall_trace_entry) ++ENDPROC(syscall_trace_entry) + + # perform syscall exit tracing + ALIGN +@@ -676,26 +904,30 @@ syscall_exit_work: + movl %esp, %eax + call syscall_trace_leave + jmp resume_userspace +-END(syscall_exit_work) ++ENDPROC(syscall_exit_work) + CFI_ENDPROC + + RING0_INT_FRAME # can't unwind into user space anyway + syscall_fault: ++#ifdef CONFIG_PAX_MEMORY_UDEREF ++ push %ss ++ pop %ds ++#endif + ASM_CLAC + GET_THREAD_INFO(%ebp) + movl $-EFAULT,PT_EAX(%esp) + jmp resume_userspace +-END(syscall_fault) ++ENDPROC(syscall_fault) + + syscall_badsys: + movl $-ENOSYS,%eax + jmp syscall_after_call +-END(syscall_badsys) ++ENDPROC(syscall_badsys) + + sysenter_badsys: + movl $-ENOSYS,%eax + jmp sysenter_after_call +-END(syscall_badsys) ++ENDPROC(sysenter_badsys) + CFI_ENDPROC + /* + * End of kprobes section +@@ -712,8 +944,15 @@ END(syscall_badsys) + */ + #ifdef CONFIG_X86_ESPFIX32 + /* fixup the stack */ +- mov GDT_ESPFIX_SS + 4, %al /* bits 16..23 */ +- mov GDT_ESPFIX_SS + 7, %ah /* bits 24..31 */ ++#ifdef CONFIG_SMP ++ movl PER_CPU_VAR(cpu_number), %ebx ++ shll $PAGE_SHIFT_asm, %ebx ++ addl $cpu_gdt_table, %ebx ++#else ++ movl $cpu_gdt_table, %ebx ++#endif ++ mov 4 + GDT_ESPFIX_SS, %al /* bits 16..23 */ ++ mov 7 + GDT_ESPFIX_SS, %ah /* bits 24..31 */ + shl $16, %eax + addl %esp, %eax /* the adjusted stack pointer */ + pushl_cfi $__KERNEL_DS +@@ -769,7 +1008,7 @@ vector=vector+1 + .endr + 2: jmp common_interrupt + .endr +-END(irq_entries_start) ++ENDPROC(irq_entries_start) + + .previous + END(interrupt) +@@ -830,7 +1069,7 @@ ENTRY(coprocessor_error) + pushl_cfi $do_coprocessor_error + jmp error_code + CFI_ENDPROC +-END(coprocessor_error) ++ENDPROC(coprocessor_error) + + ENTRY(simd_coprocessor_error) + RING0_INT_FRAME +@@ -843,7 +1082,7 @@ ENTRY(simd_coprocessor_error) + .section .altinstructions,"a" + altinstruction_entry 661b, 663f, X86_FEATURE_XMM, 662b-661b, 664f-663f + .previous +-.section .altinstr_replacement,"ax" ++.section .altinstr_replacement,"a" + 663: pushl $do_simd_coprocessor_error + 664: + .previous +@@ -852,7 +1091,7 @@ ENTRY(simd_coprocessor_error) + #endif + jmp error_code + CFI_ENDPROC +-END(simd_coprocessor_error) ++ENDPROC(simd_coprocessor_error) + + ENTRY(device_not_available) + RING0_INT_FRAME +@@ -861,18 +1100,18 @@ ENTRY(device_not_available) + pushl_cfi $do_device_not_available + jmp error_code + CFI_ENDPROC +-END(device_not_available) ++ENDPROC(device_not_available) + + #ifdef CONFIG_PARAVIRT + ENTRY(native_iret) + iret + _ASM_EXTABLE(native_iret, iret_exc) +-END(native_iret) ++ENDPROC(native_iret) + + ENTRY(native_irq_enable_sysexit) + sti + sysexit +-END(native_irq_enable_sysexit) ++ENDPROC(native_irq_enable_sysexit) + #endif + + ENTRY(overflow) +@@ -882,7 +1121,7 @@ ENTRY(overflow) + pushl_cfi $do_overflow + jmp error_code + CFI_ENDPROC +-END(overflow) ++ENDPROC(overflow) + + ENTRY(bounds) + RING0_INT_FRAME +@@ -891,7 +1130,7 @@ ENTRY(bounds) + pushl_cfi $do_bounds + jmp error_code + CFI_ENDPROC +-END(bounds) ++ENDPROC(bounds) + + ENTRY(invalid_op) + RING0_INT_FRAME +@@ -900,7 +1139,7 @@ ENTRY(invalid_op) + pushl_cfi $do_invalid_op + jmp error_code + CFI_ENDPROC +-END(invalid_op) ++ENDPROC(invalid_op) + + ENTRY(coprocessor_segment_overrun) + RING0_INT_FRAME +@@ -909,7 +1148,7 @@ ENTRY(coprocessor_segment_overrun) + pushl_cfi $do_coprocessor_segment_overrun + jmp error_code + CFI_ENDPROC +-END(coprocessor_segment_overrun) ++ENDPROC(coprocessor_segment_overrun) + + ENTRY(invalid_TSS) + RING0_EC_FRAME +@@ -917,7 +1156,7 @@ ENTRY(invalid_TSS) + pushl_cfi $do_invalid_TSS + jmp error_code + CFI_ENDPROC +-END(invalid_TSS) ++ENDPROC(invalid_TSS) + + ENTRY(segment_not_present) + RING0_EC_FRAME +@@ -925,7 +1164,7 @@ ENTRY(segment_not_present) + pushl_cfi $do_segment_not_present + jmp error_code + CFI_ENDPROC +-END(segment_not_present) ++ENDPROC(segment_not_present) + + ENTRY(stack_segment) + RING0_EC_FRAME +@@ -933,7 +1172,7 @@ ENTRY(stack_segment) + pushl_cfi $do_stack_segment + jmp error_code + CFI_ENDPROC +-END(stack_segment) ++ENDPROC(stack_segment) + + ENTRY(alignment_check) + RING0_EC_FRAME +@@ -941,7 +1180,7 @@ ENTRY(alignment_check) + pushl_cfi $do_alignment_check + jmp error_code + CFI_ENDPROC +-END(alignment_check) ++ENDPROC(alignment_check) + + ENTRY(divide_error) + RING0_INT_FRAME +@@ -950,7 +1189,7 @@ ENTRY(divide_error) + pushl_cfi $do_divide_error + jmp error_code + CFI_ENDPROC +-END(divide_error) ++ENDPROC(divide_error) + + #ifdef CONFIG_X86_MCE + ENTRY(machine_check) +@@ -960,7 +1199,7 @@ ENTRY(machine_check) + pushl_cfi machine_check_vector + jmp error_code + CFI_ENDPROC +-END(machine_check) ++ENDPROC(machine_check) + #endif + + ENTRY(spurious_interrupt_bug) +@@ -970,7 +1209,7 @@ ENTRY(spurious_interrupt_bug) + pushl_cfi $do_spurious_interrupt_bug + jmp error_code + CFI_ENDPROC +-END(spurious_interrupt_bug) ++ENDPROC(spurious_interrupt_bug) + /* + * End of kprobes section + */ +@@ -1080,7 +1319,7 @@ BUILD_INTERRUPT3(hyperv_callback_vector, HYPERVISOR_CALLBACK_VECTOR, + + ENTRY(mcount) + ret +-END(mcount) ++ENDPROC(mcount) + + ENTRY(ftrace_caller) + cmpl $0, function_trace_stop +@@ -1113,7 +1352,7 @@ ftrace_graph_call: + .globl ftrace_stub + ftrace_stub: + ret +-END(ftrace_caller) ++ENDPROC(ftrace_caller) + + ENTRY(ftrace_regs_caller) + pushf /* push flags before compare (in cs location) */ +@@ -1217,7 +1456,7 @@ trace: + popl %ecx + popl %eax + jmp ftrace_stub +-END(mcount) ++ENDPROC(mcount) + #endif /* CONFIG_DYNAMIC_FTRACE */ + #endif /* CONFIG_FUNCTION_TRACER */ + +@@ -1235,7 +1474,7 @@ ENTRY(ftrace_graph_caller) + popl %ecx + popl %eax + ret +-END(ftrace_graph_caller) ++ENDPROC(ftrace_graph_caller) + + .globl return_to_handler + return_to_handler: +@@ -1301,15 +1540,18 @@ error_code: + movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart + REG_TO_PTGS %ecx + SET_KERNEL_GS %ecx +- movl $(__USER_DS), %ecx ++ movl $(__KERNEL_DS), %ecx + movl %ecx, %ds + movl %ecx, %es ++ ++ pax_enter_kernel ++ + TRACE_IRQS_OFF + movl %esp,%eax # pt_regs pointer + call *%edi + jmp ret_from_exception + CFI_ENDPROC +-END(page_fault) ++ENDPROC(page_fault) + + /* + * Debug traps and NMI can happen at the one SYSENTER instruction +@@ -1352,7 +1594,7 @@ debug_stack_correct: + call do_debug + jmp ret_from_exception + CFI_ENDPROC +-END(debug) ++ENDPROC(debug) + + /* + * NMI is doubly nasty. It can happen _while_ we're handling +@@ -1392,6 +1634,9 @@ nmi_stack_correct: + xorl %edx,%edx # zero error code + movl %esp,%eax # pt_regs pointer + call do_nmi ++ ++ pax_exit_kernel ++ + jmp restore_all_notrace + CFI_ENDPROC + +@@ -1429,13 +1674,16 @@ nmi_espfix_stack: + FIXUP_ESPFIX_STACK # %eax == %esp + xorl %edx,%edx # zero error code + call do_nmi ++ ++ pax_exit_kernel ++ + RESTORE_REGS + lss 12+4(%esp), %esp # back to espfix stack + CFI_ADJUST_CFA_OFFSET -24 + jmp irq_return + #endif + CFI_ENDPROC +-END(nmi) ++ENDPROC(nmi) + + ENTRY(int3) + RING0_INT_FRAME +@@ -1448,14 +1696,14 @@ ENTRY(int3) + call do_int3 + jmp ret_from_exception + CFI_ENDPROC +-END(int3) ++ENDPROC(int3) + + ENTRY(general_protection) + RING0_EC_FRAME + pushl_cfi $do_general_protection + jmp error_code + CFI_ENDPROC +-END(general_protection) ++ENDPROC(general_protection) + + #ifdef CONFIG_KVM_GUEST + ENTRY(async_page_fault) +@@ -1464,7 +1712,7 @@ ENTRY(async_page_fault) + pushl_cfi $do_async_page_fault + jmp error_code + CFI_ENDPROC +-END(async_page_fault) ++ENDPROC(async_page_fault) + #endif + + /* +diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S +index 03cd2a8..d236ccb 100644 +--- a/arch/x86/kernel/entry_64.S ++++ b/arch/x86/kernel/entry_64.S +@@ -60,6 +60,8 @@ + #include <asm/smap.h> + #include <asm/pgtable_types.h> + #include <linux/err.h> ++#include <asm/pgtable.h> ++#include <asm/alternative-asm.h> + + /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */ + #include <linux/elf-em.h> +@@ -81,8 +83,9 @@ + #ifdef CONFIG_DYNAMIC_FTRACE + + ENTRY(function_hook) ++ pax_force_retaddr + retq +-END(function_hook) ++ENDPROC(function_hook) + + /* skip is set if stack has been adjusted */ + .macro ftrace_caller_setup skip=0 +@@ -123,8 +126,9 @@ GLOBAL(ftrace_graph_call) + #endif + + GLOBAL(ftrace_stub) ++ pax_force_retaddr + retq +-END(ftrace_caller) ++ENDPROC(ftrace_caller) + + ENTRY(ftrace_regs_caller) + /* Save the current flags before compare (in SS location)*/ +@@ -192,7 +196,7 @@ ftrace_restore_flags: + popfq + jmp ftrace_stub + +-END(ftrace_regs_caller) ++ENDPROC(ftrace_regs_caller) + + + #else /* ! CONFIG_DYNAMIC_FTRACE */ +@@ -213,6 +217,7 @@ ENTRY(function_hook) + #endif + + GLOBAL(ftrace_stub) ++ pax_force_retaddr + retq + + trace: +@@ -226,12 +231,13 @@ trace: + #endif + subq $MCOUNT_INSN_SIZE, %rdi + ++ pax_force_fptr ftrace_trace_function + call *ftrace_trace_function + + MCOUNT_RESTORE_FRAME + + jmp ftrace_stub +-END(function_hook) ++ENDPROC(function_hook) + #endif /* CONFIG_DYNAMIC_FTRACE */ + #endif /* CONFIG_FUNCTION_TRACER */ + +@@ -253,8 +259,9 @@ ENTRY(ftrace_graph_caller) + + MCOUNT_RESTORE_FRAME + ++ pax_force_retaddr + retq +-END(ftrace_graph_caller) ++ENDPROC(ftrace_graph_caller) + + GLOBAL(return_to_handler) + subq $24, %rsp +@@ -270,7 +277,9 @@ GLOBAL(return_to_handler) + movq 8(%rsp), %rdx + movq (%rsp), %rax + addq $24, %rsp ++ pax_force_fptr %rdi + jmp *%rdi ++ENDPROC(return_to_handler) + #endif + + +@@ -285,6 +294,430 @@ ENTRY(native_usergs_sysret64) + ENDPROC(native_usergs_sysret64) + #endif /* CONFIG_PARAVIRT */ + ++ .macro ljmpq sel, off ++#if defined(CONFIG_MPSC) || defined(CONFIG_MCORE2) || defined (CONFIG_MATOM) ++ .byte 0x48; ljmp *1234f(%rip) ++ .pushsection .rodata ++ .align 16 ++ 1234: .quad \off; .word \sel ++ .popsection ++#else ++ pushq $\sel ++ pushq $\off ++ lretq ++#endif ++ .endm ++ ++ .macro pax_enter_kernel ++ pax_set_fptr_mask ++#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF) ++ call pax_enter_kernel ++#endif ++ .endm ++ ++ .macro pax_exit_kernel ++#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF) ++ call pax_exit_kernel ++#endif ++ ++ .endm ++ ++#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF) ++ENTRY(pax_enter_kernel) ++ pushq %rdi ++ ++#ifdef CONFIG_PARAVIRT ++ PV_SAVE_REGS(CLBR_RDI) ++#endif ++ ++#ifdef CONFIG_PAX_KERNEXEC ++ GET_CR0_INTO_RDI ++ bts $16,%rdi ++ jnc 3f ++ mov %cs,%edi ++ cmp $__KERNEL_CS,%edi ++ jnz 2f ++1: ++#endif ++ ++#ifdef CONFIG_PAX_MEMORY_UDEREF ++ 661: jmp 111f ++ .pushsection .altinstr_replacement, "a" ++ 662: ASM_NOP2 ++ .popsection ++ .pushsection .altinstructions, "a" ++ altinstruction_entry 661b, 662b, X86_FEATURE_PCID, 2, 2 ++ .popsection ++ GET_CR3_INTO_RDI ++ cmp $0,%dil ++ jnz 112f ++ mov $__KERNEL_DS,%edi ++ mov %edi,%ss ++ jmp 111f ++112: cmp $1,%dil ++ jz 113f ++ ud2 ++113: sub $4097,%rdi ++ bts $63,%rdi ++ SET_RDI_INTO_CR3 ++ mov $__UDEREF_KERNEL_DS,%edi ++ mov %edi,%ss ++111: ++#endif ++ ++#ifdef CONFIG_PARAVIRT ++ PV_RESTORE_REGS(CLBR_RDI) ++#endif ++ ++ popq %rdi ++ pax_force_retaddr ++ retq ++ ++#ifdef CONFIG_PAX_KERNEXEC ++2: ljmpq __KERNEL_CS,1b ++3: ljmpq __KERNEXEC_KERNEL_CS,4f ++4: SET_RDI_INTO_CR0 ++ jmp 1b ++#endif ++ENDPROC(pax_enter_kernel) ++ ++ENTRY(pax_exit_kernel) ++ pushq %rdi ++ ++#ifdef CONFIG_PARAVIRT ++ PV_SAVE_REGS(CLBR_RDI) ++#endif ++ ++#ifdef CONFIG_PAX_KERNEXEC ++ mov %cs,%rdi ++ cmp $__KERNEXEC_KERNEL_CS,%edi ++ jz 2f ++ GET_CR0_INTO_RDI ++ bts $16,%rdi ++ jnc 4f ++1: ++#endif ++ ++#ifdef CONFIG_PAX_MEMORY_UDEREF ++ 661: jmp 111f ++ .pushsection .altinstr_replacement, "a" ++ 662: ASM_NOP2 ++ .popsection ++ .pushsection .altinstructions, "a" ++ altinstruction_entry 661b, 662b, X86_FEATURE_PCID, 2, 2 ++ .popsection ++ mov %ss,%edi ++ cmp $__UDEREF_KERNEL_DS,%edi ++ jnz 111f ++ GET_CR3_INTO_RDI ++ cmp $0,%dil ++ jz 112f ++ ud2 ++112: add $4097,%rdi ++ bts $63,%rdi ++ SET_RDI_INTO_CR3 ++ mov $__KERNEL_DS,%edi ++ mov %edi,%ss ++111: ++#endif ++ ++#ifdef CONFIG_PARAVIRT ++ PV_RESTORE_REGS(CLBR_RDI); ++#endif ++ ++ popq %rdi ++ pax_force_retaddr ++ retq ++ ++#ifdef CONFIG_PAX_KERNEXEC ++2: GET_CR0_INTO_RDI ++ btr $16,%rdi ++ jnc 4f ++ ljmpq __KERNEL_CS,3f ++3: SET_RDI_INTO_CR0 ++ jmp 1b ++4: ud2 ++ jmp 4b ++#endif ++ENDPROC(pax_exit_kernel) ++#endif ++ ++ .macro pax_enter_kernel_user ++ pax_set_fptr_mask ++#ifdef CONFIG_PAX_MEMORY_UDEREF ++ call pax_enter_kernel_user ++#endif ++ .endm ++ ++ .macro pax_exit_kernel_user ++#ifdef CONFIG_PAX_MEMORY_UDEREF ++ call pax_exit_kernel_user ++#endif ++#ifdef CONFIG_PAX_RANDKSTACK ++ pushq %rax ++ pushq %r11 ++ call pax_randomize_kstack ++ popq %r11 ++ popq %rax ++#endif ++ .endm ++ ++#ifdef CONFIG_PAX_MEMORY_UDEREF ++ENTRY(pax_enter_kernel_user) ++ pushq %rdi ++ pushq %rbx ++ ++#ifdef CONFIG_PARAVIRT ++ PV_SAVE_REGS(CLBR_RDI) ++#endif ++ ++ 661: jmp 111f ++ .pushsection .altinstr_replacement, "a" ++ 662: ASM_NOP2 ++ .popsection ++ .pushsection .altinstructions, "a" ++ altinstruction_entry 661b, 662b, X86_FEATURE_PCID, 2, 2 ++ .popsection ++ GET_CR3_INTO_RDI ++ cmp $1,%dil ++ jnz 4f ++ sub $4097,%rdi ++ bts $63,%rdi ++ SET_RDI_INTO_CR3 ++ jmp 3f ++111: ++ ++ GET_CR3_INTO_RDI ++ mov %rdi,%rbx ++ add $__START_KERNEL_map,%rbx ++ sub phys_base(%rip),%rbx ++ ++#ifdef CONFIG_PARAVIRT ++ cmpl $0, pv_info+PARAVIRT_enabled ++ jz 1f ++ pushq %rdi ++ i = 0 ++ .rept USER_PGD_PTRS ++ mov i*8(%rbx),%rsi ++ mov $0,%sil ++ lea i*8(%rbx),%rdi ++ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched) ++ i = i + 1 ++ .endr ++ popq %rdi ++ jmp 2f ++1: ++#endif ++ ++ i = 0 ++ .rept USER_PGD_PTRS ++ movb $0,i*8(%rbx) ++ i = i + 1 ++ .endr ++ ++2: SET_RDI_INTO_CR3 ++ ++#ifdef CONFIG_PAX_KERNEXEC ++ GET_CR0_INTO_RDI ++ bts $16,%rdi ++ SET_RDI_INTO_CR0 ++#endif ++ ++3: ++ ++#ifdef CONFIG_PARAVIRT ++ PV_RESTORE_REGS(CLBR_RDI) ++#endif ++ ++ popq %rbx ++ popq %rdi ++ pax_force_retaddr ++ retq ++4: ud2 ++ENDPROC(pax_enter_kernel_user) ++ ++ENTRY(pax_exit_kernel_user) ++ pushq %rdi ++ pushq %rbx ++ ++#ifdef CONFIG_PARAVIRT ++ PV_SAVE_REGS(CLBR_RDI) ++#endif ++ ++ GET_CR3_INTO_RDI ++ 661: jmp 1f ++ .pushsection .altinstr_replacement, "a" ++ 662: ASM_NOP2 ++ .popsection ++ .pushsection .altinstructions, "a" ++ altinstruction_entry 661b, 662b, X86_FEATURE_PCID, 2, 2 ++ .popsection ++ cmp $0,%dil ++ jnz 3f ++ add $4097,%rdi ++ bts $63,%rdi ++ SET_RDI_INTO_CR3 ++ jmp 2f ++1: ++ ++ mov %rdi,%rbx ++ ++#ifdef CONFIG_PAX_KERNEXEC ++ GET_CR0_INTO_RDI ++ btr $16,%rdi ++ jnc 3f ++ SET_RDI_INTO_CR0 ++#endif ++ ++ add $__START_KERNEL_map,%rbx ++ sub phys_base(%rip),%rbx ++ ++#ifdef CONFIG_PARAVIRT ++ cmpl $0, pv_info+PARAVIRT_enabled ++ jz 1f ++ i = 0 ++ .rept USER_PGD_PTRS ++ mov i*8(%rbx),%rsi ++ mov $0x67,%sil ++ lea i*8(%rbx),%rdi ++ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched) ++ i = i + 1 ++ .endr ++ jmp 2f ++1: ++#endif ++ ++ i = 0 ++ .rept USER_PGD_PTRS ++ movb $0x67,i*8(%rbx) ++ i = i + 1 ++ .endr ++2: ++ ++#ifdef CONFIG_PARAVIRT ++ PV_RESTORE_REGS(CLBR_RDI) ++#endif ++ ++ popq %rbx ++ popq %rdi ++ pax_force_retaddr ++ retq ++3: ud2 ++ENDPROC(pax_exit_kernel_user) ++#endif ++ ++ .macro pax_enter_kernel_nmi ++ pax_set_fptr_mask ++ ++#ifdef CONFIG_PAX_KERNEXEC ++ GET_CR0_INTO_RDI ++ bts $16,%rdi ++ jc 110f ++ SET_RDI_INTO_CR0 ++ or $2,%ebx ++110: ++#endif ++ ++#ifdef CONFIG_PAX_MEMORY_UDEREF ++ 661: jmp 111f ++ .pushsection .altinstr_replacement, "a" ++ 662: ASM_NOP2 ++ .popsection ++ .pushsection .altinstructions, "a" ++ altinstruction_entry 661b, 662b, X86_FEATURE_PCID, 2, 2 ++ .popsection ++ GET_CR3_INTO_RDI ++ cmp $0,%dil ++ jz 111f ++ sub $4097,%rdi ++ or $4,%ebx ++ bts $63,%rdi ++ SET_RDI_INTO_CR3 ++ mov $__UDEREF_KERNEL_DS,%edi ++ mov %edi,%ss ++111: ++#endif ++ .endm ++ ++ .macro pax_exit_kernel_nmi ++#ifdef CONFIG_PAX_KERNEXEC ++ btr $1,%ebx ++ jnc 110f ++ GET_CR0_INTO_RDI ++ btr $16,%rdi ++ SET_RDI_INTO_CR0 ++110: ++#endif ++ ++#ifdef CONFIG_PAX_MEMORY_UDEREF ++ btr $2,%ebx ++ jnc 111f ++ GET_CR3_INTO_RDI ++ add $4097,%rdi ++ bts $63,%rdi ++ SET_RDI_INTO_CR3 ++ mov $__KERNEL_DS,%edi ++ mov %edi,%ss ++111: ++#endif ++ .endm ++ ++ .macro pax_erase_kstack ++#ifdef CONFIG_PAX_MEMORY_STACKLEAK ++ call pax_erase_kstack ++#endif ++ .endm ++ ++#ifdef CONFIG_PAX_MEMORY_STACKLEAK ++ENTRY(pax_erase_kstack) ++ pushq %rdi ++ pushq %rcx ++ pushq %rax ++ pushq %r11 ++ ++ GET_THREAD_INFO(%r11) ++ mov TI_lowest_stack(%r11), %rdi ++ mov $-0xBEEF, %rax ++ std ++ ++1: mov %edi, %ecx ++ and $THREAD_SIZE_asm - 1, %ecx ++ shr $3, %ecx ++ repne scasq ++ jecxz 2f ++ ++ cmp $2*8, %ecx ++ jc 2f ++ ++ mov $2*8, %ecx ++ repe scasq ++ jecxz 2f ++ jne 1b ++ ++2: cld ++ mov %esp, %ecx ++ sub %edi, %ecx ++ ++ cmp $THREAD_SIZE_asm, %rcx ++ jb 3f ++ ud2 ++3: ++ ++ shr $3, %ecx ++ rep stosq ++ ++ mov TI_task_thread_sp0(%r11), %rdi ++ sub $256, %rdi ++ mov %rdi, TI_lowest_stack(%r11) ++ ++ popq %r11 ++ popq %rax ++ popq %rcx ++ popq %rdi ++ pax_force_retaddr ++ ret ++ENDPROC(pax_erase_kstack) ++#endif + + .macro TRACE_IRQS_IRETQ offset=ARGOFFSET + #ifdef CONFIG_TRACE_IRQFLAGS +@@ -321,7 +754,7 @@ ENDPROC(native_usergs_sysret64) + .endm + + .macro TRACE_IRQS_IRETQ_DEBUG offset=ARGOFFSET +- bt $9,EFLAGS-\offset(%rsp) /* interrupts off? */ ++ bt $X86_EFLAGS_IF_BIT,EFLAGS-\offset(%rsp) /* interrupts off? */ + jnc 1f + TRACE_IRQS_ON_DEBUG + 1: +@@ -359,27 +792,6 @@ ENDPROC(native_usergs_sysret64) + movq \tmp,R11+\offset(%rsp) + .endm + +- .macro FAKE_STACK_FRAME child_rip +- /* push in order ss, rsp, eflags, cs, rip */ +- xorl %eax, %eax +- pushq_cfi $__KERNEL_DS /* ss */ +- /*CFI_REL_OFFSET ss,0*/ +- pushq_cfi %rax /* rsp */ +- CFI_REL_OFFSET rsp,0 +- pushq_cfi $(X86_EFLAGS_IF|X86_EFLAGS_FIXED) /* eflags - interrupts on */ +- /*CFI_REL_OFFSET rflags,0*/ +- pushq_cfi $__KERNEL_CS /* cs */ +- /*CFI_REL_OFFSET cs,0*/ +- pushq_cfi \child_rip /* rip */ +- CFI_REL_OFFSET rip,0 +- pushq_cfi %rax /* orig rax */ +- .endm +- +- .macro UNFAKE_STACK_FRAME +- addq $8*6, %rsp +- CFI_ADJUST_CFA_OFFSET -(6*8) +- .endm +- + /* + * initial frame state for interrupts (and exceptions without error code) + */ +@@ -446,25 +858,26 @@ ENDPROC(native_usergs_sysret64) + /* save partial stack frame */ + .macro SAVE_ARGS_IRQ + cld +- /* start from rbp in pt_regs and jump over */ +- movq_cfi rdi, (RDI-RBP) +- movq_cfi rsi, (RSI-RBP) +- movq_cfi rdx, (RDX-RBP) +- movq_cfi rcx, (RCX-RBP) +- movq_cfi rax, (RAX-RBP) +- movq_cfi r8, (R8-RBP) +- movq_cfi r9, (R9-RBP) +- movq_cfi r10, (R10-RBP) +- movq_cfi r11, (R11-RBP) ++ /* start from r15 in pt_regs and jump over */ ++ movq_cfi rdi, RDI ++ movq_cfi rsi, RSI ++ movq_cfi rdx, RDX ++ movq_cfi rcx, RCX ++ movq_cfi rax, RAX ++ movq_cfi r8, R8 ++ movq_cfi r9, R9 ++ movq_cfi r10, R10 ++ movq_cfi r11, R11 ++ movq_cfi r12, R12 + + /* Save rbp so that we can unwind from get_irq_regs() */ +- movq_cfi rbp, 0 ++ movq_cfi rbp, RBP + + /* Save previous stack value */ + movq %rsp, %rsi + +- leaq -RBP(%rsp),%rdi /* arg1 for handler */ +- testl $3, CS-RBP(%rsi) ++ movq %rsp,%rdi /* arg1 for handler */ ++ testb $3, CS(%rsi) + je 1f + SWAPGS + /* +@@ -484,6 +897,18 @@ ENDPROC(native_usergs_sysret64) + 0x06 /* DW_OP_deref */, \ + 0x08 /* DW_OP_const1u */, SS+8-RBP, \ + 0x22 /* DW_OP_plus */ ++ ++#ifdef CONFIG_PAX_MEMORY_UDEREF ++ testb $3, CS(%rdi) ++ jnz 1f ++ pax_enter_kernel ++ jmp 2f ++1: pax_enter_kernel_user ++2: ++#else ++ pax_enter_kernel ++#endif ++ + /* We entered an interrupt context - irqs are off: */ + TRACE_IRQS_OFF + .endm +@@ -515,9 +940,52 @@ ENTRY(save_paranoid) + js 1f /* negative -> in kernel */ + SWAPGS + xorl %ebx,%ebx +-1: ret ++1: ++#ifdef CONFIG_PAX_MEMORY_UDEREF ++ testb $3, CS+8(%rsp) ++ jnz 1f ++ pax_enter_kernel ++ jmp 2f ++1: pax_enter_kernel_user ++2: ++#else ++ pax_enter_kernel ++#endif ++ pax_force_retaddr ++ ret + CFI_ENDPROC +-END(save_paranoid) ++ENDPROC(save_paranoid) ++ ++ENTRY(save_paranoid_nmi) ++ XCPT_FRAME 1 RDI+8 ++ cld ++ movq_cfi rdi, RDI+8 ++ movq_cfi rsi, RSI+8 ++ movq_cfi rdx, RDX+8 ++ movq_cfi rcx, RCX+8 ++ movq_cfi rax, RAX+8 ++ movq_cfi r8, R8+8 ++ movq_cfi r9, R9+8 ++ movq_cfi r10, R10+8 ++ movq_cfi r11, R11+8 ++ movq_cfi rbx, RBX+8 ++ movq_cfi rbp, RBP+8 ++ movq_cfi r12, R12+8 ++ movq_cfi r13, R13+8 ++ movq_cfi r14, R14+8 ++ movq_cfi r15, R15+8 ++ movl $1,%ebx ++ movl $MSR_GS_BASE,%ecx ++ rdmsr ++ testl %edx,%edx ++ js 1f /* negative -> in kernel */ ++ SWAPGS ++ xorl %ebx,%ebx ++1: pax_enter_kernel_nmi ++ pax_force_retaddr ++ ret ++ CFI_ENDPROC ++ENDPROC(save_paranoid_nmi) + .popsection + + /* +@@ -539,7 +1007,7 @@ ENTRY(ret_from_fork) + + RESTORE_REST + +- testl $3, CS-ARGOFFSET(%rsp) # from kernel_thread? ++ testb $3, CS-ARGOFFSET(%rsp) # from kernel_thread? + jz 1f + + testl $_TIF_IA32, TI_flags(%rcx) # 32-bit compat task needs IRET +@@ -549,15 +1017,13 @@ ENTRY(ret_from_fork) + jmp ret_from_sys_call # go to the SYSRET fastpath + + 1: +- subq $REST_SKIP, %rsp # leave space for volatiles +- CFI_ADJUST_CFA_OFFSET REST_SKIP + movq %rbp, %rdi + call *%rbx + movl $0, RAX(%rsp) + RESTORE_REST + jmp int_ret_from_sys_call + CFI_ENDPROC +-END(ret_from_fork) ++ENDPROC(ret_from_fork) + + /* + * System call entry. Up to 6 arguments in registers are supported. +@@ -594,7 +1060,7 @@ END(ret_from_fork) + ENTRY(system_call) + CFI_STARTPROC simple + CFI_SIGNAL_FRAME +- CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET ++ CFI_DEF_CFA rsp,0 + CFI_REGISTER rip,rcx + /*CFI_REGISTER rflags,r11*/ + SWAPGS_UNSAFE_STACK +@@ -607,16 +1073,23 @@ GLOBAL(system_call_after_swapgs) + + movq %rsp,PER_CPU_VAR(old_rsp) + movq PER_CPU_VAR(kernel_stack),%rsp ++ SAVE_ARGS 8*6,0 ++ pax_enter_kernel_user ++ ++#ifdef CONFIG_PAX_RANDKSTACK ++ pax_erase_kstack ++#endif ++ + /* + * No need to follow this irqs off/on section - it's straight + * and short: + */ + ENABLE_INTERRUPTS(CLBR_NONE) +- SAVE_ARGS 8,0 + movq %rax,ORIG_RAX-ARGOFFSET(%rsp) + movq %rcx,RIP-ARGOFFSET(%rsp) + CFI_REL_OFFSET rip,RIP-ARGOFFSET +- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET) ++ GET_THREAD_INFO(%rcx) ++ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%rcx) + jnz tracesys + system_call_fastpath: + #if __SYSCALL_MASK == ~0 +@@ -640,10 +1113,13 @@ sysret_check: + LOCKDEP_SYS_EXIT + DISABLE_INTERRUPTS(CLBR_NONE) + TRACE_IRQS_OFF +- movl TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET),%edx ++ GET_THREAD_INFO(%rcx) ++ movl TI_flags(%rcx),%edx + andl %edi,%edx + jnz sysret_careful + CFI_REMEMBER_STATE ++ pax_exit_kernel_user ++ pax_erase_kstack + /* + * sysretq will re-enable interrupts: + */ +@@ -702,6 +1178,9 @@ auditsys: + movq %rax,%rsi /* 2nd arg: syscall number */ + movl $AUDIT_ARCH_X86_64,%edi /* 1st arg: audit arch */ + call __audit_syscall_entry ++ ++ pax_erase_kstack ++ + LOAD_ARGS 0 /* reload call-clobbered registers */ + jmp system_call_fastpath + +@@ -723,7 +1202,7 @@ sysret_audit: + /* Do syscall tracing */ + tracesys: + #ifdef CONFIG_AUDITSYSCALL +- testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET) ++ testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%rcx) + jz auditsys + #endif + SAVE_REST +@@ -731,12 +1210,15 @@ tracesys: + FIXUP_TOP_OF_STACK %rdi + movq %rsp,%rdi + call syscall_trace_enter ++ ++ pax_erase_kstack ++ + /* + * Reload arg registers from stack in case ptrace changed them. + * We don't reload %rax because syscall_trace_enter() returned + * the value it wants us to use in the table lookup. + */ +- LOAD_ARGS ARGOFFSET, 1 ++ LOAD_ARGS 1 + RESTORE_REST + #if __SYSCALL_MASK == ~0 + cmpq $__NR_syscall_max,%rax +@@ -766,7 +1248,9 @@ GLOBAL(int_with_check) + andl %edi,%edx + jnz int_careful + andl $~TS_COMPAT,TI_status(%rcx) +- jmp retint_swapgs ++ pax_exit_kernel_user ++ pax_erase_kstack ++ jmp retint_swapgs_pax + + /* Either reschedule or signal or syscall exit tracking needed. */ + /* First do a reschedule test. */ +@@ -812,7 +1296,7 @@ int_restore_rest: + TRACE_IRQS_OFF + jmp int_with_check + CFI_ENDPROC +-END(system_call) ++ENDPROC(system_call) + + .macro FORK_LIKE func + ENTRY(stub_\func) +@@ -825,9 +1309,10 @@ ENTRY(stub_\func) + DEFAULT_FRAME 0 8 /* offset 8: return address */ + call sys_\func + RESTORE_TOP_OF_STACK %r11, 8 +- ret $REST_SKIP /* pop extended registers */ ++ pax_force_retaddr ++ ret + CFI_ENDPROC +-END(stub_\func) ++ENDPROC(stub_\func) + .endm + + .macro FIXED_FRAME label,func +@@ -837,9 +1322,10 @@ ENTRY(\label) + FIXUP_TOP_OF_STACK %r11, 8-ARGOFFSET + call \func + RESTORE_TOP_OF_STACK %r11, 8-ARGOFFSET ++ pax_force_retaddr + ret + CFI_ENDPROC +-END(\label) ++ENDPROC(\label) + .endm + + FORK_LIKE clone +@@ -847,19 +1333,6 @@ END(\label) + FORK_LIKE vfork + FIXED_FRAME stub_iopl, sys_iopl + +-ENTRY(ptregscall_common) +- DEFAULT_FRAME 1 8 /* offset 8: return address */ +- RESTORE_TOP_OF_STACK %r11, 8 +- movq_cfi_restore R15+8, r15 +- movq_cfi_restore R14+8, r14 +- movq_cfi_restore R13+8, r13 +- movq_cfi_restore R12+8, r12 +- movq_cfi_restore RBP+8, rbp +- movq_cfi_restore RBX+8, rbx +- ret $REST_SKIP /* pop extended registers */ +- CFI_ENDPROC +-END(ptregscall_common) +- + ENTRY(stub_execve) + CFI_STARTPROC + addq $8, %rsp +@@ -871,7 +1344,7 @@ ENTRY(stub_execve) + RESTORE_REST + jmp int_ret_from_sys_call + CFI_ENDPROC +-END(stub_execve) ++ENDPROC(stub_execve) + + /* + * sigreturn is special because it needs to restore all registers on return. +@@ -888,7 +1361,7 @@ ENTRY(stub_rt_sigreturn) + RESTORE_REST + jmp int_ret_from_sys_call + CFI_ENDPROC +-END(stub_rt_sigreturn) ++ENDPROC(stub_rt_sigreturn) + + #ifdef CONFIG_X86_X32_ABI + ENTRY(stub_x32_rt_sigreturn) +@@ -902,7 +1375,7 @@ ENTRY(stub_x32_rt_sigreturn) + RESTORE_REST + jmp int_ret_from_sys_call + CFI_ENDPROC +-END(stub_x32_rt_sigreturn) ++ENDPROC(stub_x32_rt_sigreturn) + + ENTRY(stub_x32_execve) + CFI_STARTPROC +@@ -916,7 +1389,7 @@ ENTRY(stub_x32_execve) + RESTORE_REST + jmp int_ret_from_sys_call + CFI_ENDPROC +-END(stub_x32_execve) ++ENDPROC(stub_x32_execve) + + #endif + +@@ -953,7 +1426,7 @@ vector=vector+1 + 2: jmp common_interrupt + .endr + CFI_ENDPROC +-END(irq_entries_start) ++ENDPROC(irq_entries_start) + + .previous + END(interrupt) +@@ -970,8 +1443,8 @@ END(interrupt) + /* 0(%rsp): ~(interrupt number) */ + .macro interrupt func + /* reserve pt_regs for scratch regs and rbp */ +- subq $ORIG_RAX-RBP, %rsp +- CFI_ADJUST_CFA_OFFSET ORIG_RAX-RBP ++ subq $ORIG_RAX, %rsp ++ CFI_ADJUST_CFA_OFFSET ORIG_RAX + SAVE_ARGS_IRQ + call \func + .endm +@@ -998,14 +1471,14 @@ ret_from_intr: + + /* Restore saved previous stack */ + popq %rsi +- CFI_DEF_CFA rsi,SS+8-RBP /* reg/off reset after def_cfa_expr */ +- leaq ARGOFFSET-RBP(%rsi), %rsp ++ CFI_DEF_CFA rsi,SS+8 /* reg/off reset after def_cfa_expr */ ++ movq %rsi, %rsp + CFI_DEF_CFA_REGISTER rsp +- CFI_ADJUST_CFA_OFFSET RBP-ARGOFFSET ++ CFI_ADJUST_CFA_OFFSET -ARGOFFSET + + exit_intr: + GET_THREAD_INFO(%rcx) +- testl $3,CS-ARGOFFSET(%rsp) ++ testb $3,CS-ARGOFFSET(%rsp) + je retint_kernel + + /* Interrupt came from user space */ +@@ -1027,12 +1500,35 @@ retint_swapgs: /* return to user-space */ + * The iretq could re-enable interrupts: + */ + DISABLE_INTERRUPTS(CLBR_ANY) ++ pax_exit_kernel_user ++retint_swapgs_pax: + TRACE_IRQS_IRETQ + SWAPGS + jmp restore_args + + retint_restore_args: /* return to kernel space */ + DISABLE_INTERRUPTS(CLBR_ANY) ++ pax_exit_kernel ++ ++#if defined(CONFIG_EFI) && defined(CONFIG_PAX_KERNEXEC) ++ /* This is a quirk to allow IRQs/NMIs/MCEs during early EFI setup, ++ * namely calling EFI runtime services with a phys mapping. We're ++ * starting off with NOPs and patch in the real instrumentation ++ * (BTS/OR) before starting any userland process; even before starting ++ * up the APs. ++ */ ++ .pushsection .altinstr_replacement, "a" ++ 601: pax_force_retaddr (RIP-ARGOFFSET) ++ 602: ++ .popsection ++ 603: .fill 602b-601b, 1, 0x90 ++ .pushsection .altinstructions, "a" ++ altinstruction_entry 603b, 601b, X86_FEATURE_ALWAYS, 602b-601b, 602b-601b ++ .popsection ++#else ++ pax_force_retaddr (RIP-ARGOFFSET) ++#endif ++ + /* + * The iretq could re-enable interrupts: + */ +@@ -1145,7 +1641,7 @@ ENTRY(retint_kernel) + jmp exit_intr + #endif + CFI_ENDPROC +-END(common_interrupt) ++ENDPROC(common_interrupt) + + /* + * If IRET takes a fault on the espfix stack, then we +@@ -1167,13 +1663,13 @@ __do_double_fault: + cmpq $native_irq_return_iret,%rax + jne do_double_fault /* This shouldn't happen... */ + movq PER_CPU_VAR(kernel_stack),%rax +- subq $(6*8-KERNEL_STACK_OFFSET),%rax /* Reset to original stack */ ++ subq $(6*8),%rax /* Reset to original stack */ + movq %rax,RSP(%rdi) + movq $0,(%rax) /* Missing (lost) #GP error code */ + movq $general_protection,RIP(%rdi) + retq + CFI_ENDPROC +-END(__do_double_fault) ++ENDPROC(__do_double_fault) + #else + # define __do_double_fault do_double_fault + #endif +@@ -1195,7 +1691,7 @@ ENTRY(\sym) + interrupt \do_sym + jmp ret_from_intr + CFI_ENDPROC +-END(\sym) ++ENDPROC(\sym) + .endm + + #ifdef CONFIG_TRACING +@@ -1283,7 +1779,7 @@ ENTRY(\sym) + call \do_sym + jmp error_exit /* %ebx: no swapgs flag */ + CFI_ENDPROC +-END(\sym) ++ENDPROC(\sym) + .endm + + .macro paranoidzeroentry sym do_sym +@@ -1301,10 +1797,10 @@ ENTRY(\sym) + call \do_sym + jmp paranoid_exit /* %ebx: no swapgs flag */ + CFI_ENDPROC +-END(\sym) ++ENDPROC(\sym) + .endm + +-#define INIT_TSS_IST(x) PER_CPU_VAR(init_tss) + (TSS_ist + ((x) - 1) * 8) ++#define INIT_TSS_IST(x) (TSS_ist + ((x) - 1) * 8)(%r13) + .macro paranoidzeroentry_ist sym do_sym ist + ENTRY(\sym) + INTR_FRAME +@@ -1317,12 +1813,18 @@ ENTRY(\sym) + TRACE_IRQS_OFF_DEBUG + movq %rsp,%rdi /* pt_regs pointer */ + xorl %esi,%esi /* no error code */ ++#ifdef CONFIG_SMP ++ imul $TSS_size, PER_CPU_VAR(cpu_number), %r13d ++ lea init_tss(%r13), %r13 ++#else ++ lea init_tss(%rip), %r13 ++#endif + subq $EXCEPTION_STKSZ, INIT_TSS_IST(\ist) + call \do_sym + addq $EXCEPTION_STKSZ, INIT_TSS_IST(\ist) + jmp paranoid_exit /* %ebx: no swapgs flag */ + CFI_ENDPROC +-END(\sym) ++ENDPROC(\sym) + .endm + + .macro errorentry sym do_sym +@@ -1340,7 +1842,7 @@ ENTRY(\sym) + call \do_sym + jmp error_exit /* %ebx: no swapgs flag */ + CFI_ENDPROC +-END(\sym) ++ENDPROC(\sym) + .endm + + #ifdef CONFIG_TRACING +@@ -1371,7 +1873,7 @@ ENTRY(\sym) + call \do_sym + jmp paranoid_exit /* %ebx: no swapgs flag */ + CFI_ENDPROC +-END(\sym) ++ENDPROC(\sym) + .endm + + zeroentry divide_error do_divide_error +@@ -1401,9 +1903,10 @@ gs_change: + 2: mfence /* workaround */ + SWAPGS + popfq_cfi ++ pax_force_retaddr + ret + CFI_ENDPROC +-END(native_load_gs_index) ++ENDPROC(native_load_gs_index) + + _ASM_EXTABLE(gs_change,bad_gs) + .section .fixup,"ax" +@@ -1431,9 +1934,10 @@ ENTRY(do_softirq_own_stack) + CFI_DEF_CFA_REGISTER rsp + CFI_ADJUST_CFA_OFFSET -8 + decl PER_CPU_VAR(irq_count) ++ pax_force_retaddr + ret + CFI_ENDPROC +-END(do_softirq_own_stack) ++ENDPROC(do_softirq_own_stack) + + #ifdef CONFIG_XEN + zeroentry xen_hypervisor_callback xen_do_hypervisor_callback +@@ -1471,7 +1975,7 @@ ENTRY(xen_do_hypervisor_callback) # do_hypervisor_callback(struct *pt_regs) + decl PER_CPU_VAR(irq_count) + jmp error_exit + CFI_ENDPROC +-END(xen_do_hypervisor_callback) ++ENDPROC(xen_do_hypervisor_callback) + + /* + * Hypervisor uses this for application faults while it executes. +@@ -1530,7 +2034,7 @@ ENTRY(xen_failsafe_callback) + SAVE_ALL + jmp error_exit + CFI_ENDPROC +-END(xen_failsafe_callback) ++ENDPROC(xen_failsafe_callback) + + apicinterrupt3 HYPERVISOR_CALLBACK_VECTOR \ + xen_hvm_callback_vector xen_evtchn_do_upcall +@@ -1582,18 +2086,33 @@ ENTRY(paranoid_exit) + DEFAULT_FRAME + DISABLE_INTERRUPTS(CLBR_NONE) + TRACE_IRQS_OFF_DEBUG +- testl %ebx,%ebx /* swapgs needed? */ ++ testl $1,%ebx /* swapgs needed? */ + jnz paranoid_restore +- testl $3,CS(%rsp) ++ testb $3,CS(%rsp) + jnz paranoid_userspace ++#ifdef CONFIG_PAX_MEMORY_UDEREF ++ pax_exit_kernel ++ TRACE_IRQS_IRETQ 0 ++ SWAPGS_UNSAFE_STACK ++ RESTORE_ALL 8 ++ pax_force_retaddr_bts ++ jmp irq_return ++#endif + paranoid_swapgs: ++#ifdef CONFIG_PAX_MEMORY_UDEREF ++ pax_exit_kernel_user ++#else ++ pax_exit_kernel ++#endif + TRACE_IRQS_IRETQ 0 + SWAPGS_UNSAFE_STACK + RESTORE_ALL 8 + jmp irq_return + paranoid_restore: ++ pax_exit_kernel + TRACE_IRQS_IRETQ_DEBUG 0 + RESTORE_ALL 8 ++ pax_force_retaddr_bts + jmp irq_return + paranoid_userspace: + GET_THREAD_INFO(%rcx) +@@ -1622,7 +2141,7 @@ paranoid_schedule: + TRACE_IRQS_OFF + jmp paranoid_userspace + CFI_ENDPROC +-END(paranoid_exit) ++ENDPROC(paranoid_exit) + + /* + * Exception entry point. This expects an error code/orig_rax on the stack. +@@ -1649,12 +2168,23 @@ ENTRY(error_entry) + movq_cfi r14, R14+8 + movq_cfi r15, R15+8 + xorl %ebx,%ebx +- testl $3,CS+8(%rsp) ++ testb $3,CS+8(%rsp) + je error_kernelspace + error_swapgs: + SWAPGS + error_sti: ++#ifdef CONFIG_PAX_MEMORY_UDEREF ++ testb $3, CS+8(%rsp) ++ jnz 1f ++ pax_enter_kernel ++ jmp 2f ++1: pax_enter_kernel_user ++2: ++#else ++ pax_enter_kernel ++#endif + TRACE_IRQS_OFF ++ pax_force_retaddr + ret + + /* +@@ -1681,7 +2211,7 @@ bstep_iret: + movq %rcx,RIP+8(%rsp) + jmp error_swapgs + CFI_ENDPROC +-END(error_entry) ++ENDPROC(error_entry) + + + /* ebx: no swapgs flag (1: don't need swapgs, 0: need it) */ +@@ -1692,7 +2222,7 @@ ENTRY(error_exit) + DISABLE_INTERRUPTS(CLBR_NONE) + TRACE_IRQS_OFF + GET_THREAD_INFO(%rcx) +- testl %eax,%eax ++ testl $1,%eax + jne retint_kernel + LOCKDEP_SYS_EXIT_IRQ + movl TI_flags(%rcx),%edx +@@ -1701,7 +2231,7 @@ ENTRY(error_exit) + jnz retint_careful + jmp retint_swapgs + CFI_ENDPROC +-END(error_exit) ++ENDPROC(error_exit) + + /* + * Test if a given stack is an NMI stack or not. +@@ -1759,9 +2289,11 @@ ENTRY(nmi) + * If %cs was not the kernel segment, then the NMI triggered in user + * space, which means it is definitely not nested. + */ ++ cmpl $__KERNEXEC_KERNEL_CS, 16(%rsp) ++ je 1f + cmpl $__KERNEL_CS, 16(%rsp) + jne first_nmi +- ++1: + /* + * Check the special variable on the stack to see if NMIs are + * executing. +@@ -1795,8 +2327,7 @@ nested_nmi: + + 1: + /* Set up the interrupted NMIs stack to jump to repeat_nmi */ +- leaq -1*8(%rsp), %rdx +- movq %rdx, %rsp ++ subq $8, %rsp + CFI_ADJUST_CFA_OFFSET 1*8 + leaq -10*8(%rsp), %rdx + pushq_cfi $__KERNEL_DS +@@ -1814,6 +2345,7 @@ nested_nmi_out: + CFI_RESTORE rdx + + /* No need to check faults here */ ++# pax_force_retaddr_bts + INTERRUPT_RETURN + + CFI_RESTORE_STATE +@@ -1910,13 +2442,13 @@ end_repeat_nmi: + subq $ORIG_RAX-R15, %rsp + CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15 + /* +- * Use save_paranoid to handle SWAPGS, but no need to use paranoid_exit ++ * Use save_paranoid_nmi to handle SWAPGS, but no need to use paranoid_exit + * as we should not be calling schedule in NMI context. + * Even with normal interrupts enabled. An NMI should not be + * setting NEED_RESCHED or anything that normal interrupts and + * exceptions might do. + */ +- call save_paranoid ++ call save_paranoid_nmi + DEFAULT_FRAME 0 + + /* +@@ -1926,9 +2458,9 @@ end_repeat_nmi: + * NMI itself takes a page fault, the page fault that was preempted + * will read the information from the NMI page fault and not the + * origin fault. Save it off and restore it if it changes. +- * Use the r12 callee-saved register. ++ * Use the r13 callee-saved register. + */ +- movq %cr2, %r12 ++ movq %cr2, %r13 + + /* paranoidentry do_nmi, 0; without TRACE_IRQS_OFF */ + movq %rsp,%rdi +@@ -1937,31 +2469,36 @@ end_repeat_nmi: + + /* Did the NMI take a page fault? Restore cr2 if it did */ + movq %cr2, %rcx +- cmpq %rcx, %r12 ++ cmpq %rcx, %r13 + je 1f +- movq %r12, %cr2 ++ movq %r13, %cr2 + 1: + +- testl %ebx,%ebx /* swapgs needed? */ ++ testl $1,%ebx /* swapgs needed? */ + jnz nmi_restore + nmi_swapgs: + SWAPGS_UNSAFE_STACK + nmi_restore: ++ pax_exit_kernel_nmi + /* Pop the extra iret frame at once */ + RESTORE_ALL 6*8 ++ testb $3, 8(%rsp) ++ jnz 1f ++ pax_force_retaddr_bts ++1: + + /* Clear the NMI executing stack variable */ + movq $0, 5*8(%rsp) + jmp irq_return + CFI_ENDPROC +-END(nmi) ++ENDPROC(nmi) + + ENTRY(ignore_sysret) + CFI_STARTPROC + mov $-ENOSYS,%eax + sysret + CFI_ENDPROC +-END(ignore_sysret) ++ENDPROC(ignore_sysret) + + /* + * End of kprobes section +diff --git a/arch/x86/kernel/espfix_64.c b/arch/x86/kernel/espfix_64.c +index 94d857f..bf1f0bf 100644 +--- a/arch/x86/kernel/espfix_64.c ++++ b/arch/x86/kernel/espfix_64.c +@@ -197,7 +197,7 @@ void init_espfix_ap(void) + set_pte(&pte_p[n*PTE_STRIDE], pte); + + /* Job is done for this CPU and any CPU which shares this page */ +- ACCESS_ONCE(espfix_pages[page]) = stack_page; ++ ACCESS_ONCE_RW(espfix_pages[page]) = stack_page; + + unlock_done: + mutex_unlock(&espfix_init_mutex); +diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c +index 1ffc32d..e52c745 100644 +--- a/arch/x86/kernel/ftrace.c ++++ b/arch/x86/kernel/ftrace.c +@@ -104,6 +104,8 @@ ftrace_modify_code_direct(unsigned long ip, unsigned const char *old_code, + { + unsigned char replaced[MCOUNT_INSN_SIZE]; + ++ ip = ktla_ktva(ip); ++ + /* + * Note: Due to modules and __init, code can + * disappear and change, we need to protect against faulting +@@ -229,7 +231,7 @@ static int update_ftrace_func(unsigned long ip, void *new) + unsigned char old[MCOUNT_INSN_SIZE]; + int ret; + +- memcpy(old, (void *)ip, MCOUNT_INSN_SIZE); ++ memcpy(old, (void *)ktla_ktva(ip), MCOUNT_INSN_SIZE); + + ftrace_update_func = ip; + /* Make sure the breakpoints see the ftrace_update_func update */ +@@ -306,7 +308,7 @@ static int ftrace_write(unsigned long ip, const char *val, int size) + * kernel identity mapping to modify code. + */ + if (within(ip, (unsigned long)_text, (unsigned long)_etext)) +- ip = (unsigned long)__va(__pa_symbol(ip)); ++ ip = (unsigned long)__va(__pa_symbol(ktla_ktva(ip))); + + return probe_kernel_write((void *)ip, val, size); + } +@@ -316,7 +318,7 @@ static int add_break(unsigned long ip, const char *old) + unsigned char replaced[MCOUNT_INSN_SIZE]; + unsigned char brk = BREAKPOINT_INSTRUCTION; + +- if (probe_kernel_read(replaced, (void *)ip, MCOUNT_INSN_SIZE)) ++ if (probe_kernel_read(replaced, (void *)ktla_ktva(ip), MCOUNT_INSN_SIZE)) + return -EFAULT; + + /* Make sure it is what we expect it to be */ +@@ -664,7 +666,7 @@ ftrace_modify_code(unsigned long ip, unsigned const char *old_code, + return ret; + + fail_update: +- probe_kernel_write((void *)ip, &old_code[0], 1); ++ probe_kernel_write((void *)ktla_ktva(ip), &old_code[0], 1); + goto out; + } + +diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c +index 85126cc..1bbce17 100644 +--- a/arch/x86/kernel/head64.c ++++ b/arch/x86/kernel/head64.c +@@ -67,12 +67,12 @@ again: + pgd = *pgd_p; + + /* +- * The use of __START_KERNEL_map rather than __PAGE_OFFSET here is +- * critical -- __PAGE_OFFSET would point us back into the dynamic ++ * The use of __early_va rather than __va here is critical: ++ * __va would point us back into the dynamic + * range and we might end up looping forever... + */ + if (pgd) +- pud_p = (pudval_t *)((pgd & PTE_PFN_MASK) + __START_KERNEL_map - phys_base); ++ pud_p = (pudval_t *)(__early_va(pgd & PTE_PFN_MASK)); + else { + if (next_early_pgt >= EARLY_DYNAMIC_PAGE_TABLES) { + reset_early_page_tables(); +@@ -82,13 +82,13 @@ again: + pud_p = (pudval_t *)early_dynamic_pgts[next_early_pgt++]; + for (i = 0; i < PTRS_PER_PUD; i++) + pud_p[i] = 0; +- *pgd_p = (pgdval_t)pud_p - __START_KERNEL_map + phys_base + _KERNPG_TABLE; ++ *pgd_p = (pgdval_t)__pa(pud_p) + _KERNPG_TABLE; + } + pud_p += pud_index(address); + pud = *pud_p; + + if (pud) +- pmd_p = (pmdval_t *)((pud & PTE_PFN_MASK) + __START_KERNEL_map - phys_base); ++ pmd_p = (pmdval_t *)(__early_va(pud & PTE_PFN_MASK)); + else { + if (next_early_pgt >= EARLY_DYNAMIC_PAGE_TABLES) { + reset_early_page_tables(); +@@ -98,7 +98,7 @@ again: + pmd_p = (pmdval_t *)early_dynamic_pgts[next_early_pgt++]; + for (i = 0; i < PTRS_PER_PMD; i++) + pmd_p[i] = 0; +- *pud_p = (pudval_t)pmd_p - __START_KERNEL_map + phys_base + _KERNPG_TABLE; ++ *pud_p = (pudval_t)__pa(pmd_p) + _KERNPG_TABLE; + } + pmd = (physaddr & PMD_MASK) + early_pmd_flags; + pmd_p[pmd_index(address)] = pmd; +@@ -175,7 +175,6 @@ asmlinkage void __init x86_64_start_kernel(char * real_mode_data) + if (console_loglevel == 10) + early_printk("Kernel alive\n"); + +- clear_page(init_level4_pgt); + /* set init_level4_pgt kernel high mapping*/ + init_level4_pgt[511] = early_level4_pgt[511]; + +diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S +index f36bd42..0ab4474 100644 +--- a/arch/x86/kernel/head_32.S ++++ b/arch/x86/kernel/head_32.S +@@ -26,6 +26,12 @@ + /* Physical address */ + #define pa(X) ((X) - __PAGE_OFFSET) + ++#ifdef CONFIG_PAX_KERNEXEC ++#define ta(X) (X) ++#else ++#define ta(X) ((X) - __PAGE_OFFSET) ++#endif ++ + /* + * References to members of the new_cpu_data structure. + */ +@@ -55,11 +61,7 @@ + * and small than max_low_pfn, otherwise will waste some page table entries + */ + +-#if PTRS_PER_PMD > 1 +-#define PAGE_TABLE_SIZE(pages) (((pages) / PTRS_PER_PMD) + PTRS_PER_PGD) +-#else +-#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PGD) +-#endif ++#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PTE) + + /* Number of possible pages in the lowmem region */ + LOWMEM_PAGES = (((1<<32) - __PAGE_OFFSET) >> PAGE_SHIFT) +@@ -78,6 +80,12 @@ INIT_MAP_SIZE = PAGE_TABLE_SIZE(KERNEL_PAGES) * PAGE_SIZE + RESERVE_BRK(pagetables, INIT_MAP_SIZE) + + /* ++ * Real beginning of normal "text" segment ++ */ ++ENTRY(stext) ++ENTRY(_stext) ++ ++/* + * 32-bit kernel entrypoint; only used by the boot CPU. On entry, + * %esi points to the real-mode code as a 32-bit pointer. + * CS and DS must be 4 GB flat segments, but we don't depend on +@@ -85,6 +93,13 @@ RESERVE_BRK(pagetables, INIT_MAP_SIZE) + * can. + */ + __HEAD ++ ++#ifdef CONFIG_PAX_KERNEXEC ++ jmp startup_32 ++/* PaX: fill first page in .text with int3 to catch NULL derefs in kernel mode */ ++.fill PAGE_SIZE-5,1,0xcc ++#endif ++ + ENTRY(startup_32) + movl pa(stack_start),%ecx + +@@ -106,6 +121,59 @@ ENTRY(startup_32) + 2: + leal -__PAGE_OFFSET(%ecx),%esp + ++#ifdef CONFIG_SMP ++ movl $pa(cpu_gdt_table),%edi ++ movl $__per_cpu_load,%eax ++ movw %ax,GDT_ENTRY_PERCPU * 8 + 2(%edi) ++ rorl $16,%eax ++ movb %al,GDT_ENTRY_PERCPU * 8 + 4(%edi) ++ movb %ah,GDT_ENTRY_PERCPU * 8 + 7(%edi) ++ movl $__per_cpu_end - 1,%eax ++ subl $__per_cpu_start,%eax ++ movw %ax,GDT_ENTRY_PERCPU * 8 + 0(%edi) ++#endif ++ ++#ifdef CONFIG_PAX_MEMORY_UDEREF ++ movl $NR_CPUS,%ecx ++ movl $pa(cpu_gdt_table),%edi ++1: ++ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c09700),GDT_ENTRY_KERNEL_DS * 8 + 4(%edi) ++ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0fb00),GDT_ENTRY_DEFAULT_USER_CS * 8 + 4(%edi) ++ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0f300),GDT_ENTRY_DEFAULT_USER_DS * 8 + 4(%edi) ++ addl $PAGE_SIZE_asm,%edi ++ loop 1b ++#endif ++ ++#ifdef CONFIG_PAX_KERNEXEC ++ movl $pa(boot_gdt),%edi ++ movl $__LOAD_PHYSICAL_ADDR,%eax ++ movw %ax,GDT_ENTRY_BOOT_CS * 8 + 2(%edi) ++ rorl $16,%eax ++ movb %al,GDT_ENTRY_BOOT_CS * 8 + 4(%edi) ++ movb %ah,GDT_ENTRY_BOOT_CS * 8 + 7(%edi) ++ rorl $16,%eax ++ ++ ljmp $(__BOOT_CS),$1f ++1: ++ ++ movl $NR_CPUS,%ecx ++ movl $pa(cpu_gdt_table),%edi ++ addl $__PAGE_OFFSET,%eax ++1: ++ movb $0xc0,GDT_ENTRY_KERNEL_CS * 8 + 6(%edi) ++ movb $0xc0,GDT_ENTRY_KERNEXEC_KERNEL_CS * 8 + 6(%edi) ++ movw %ax,GDT_ENTRY_KERNEL_CS * 8 + 2(%edi) ++ movw %ax,GDT_ENTRY_KERNEXEC_KERNEL_CS * 8 + 2(%edi) ++ rorl $16,%eax ++ movb %al,GDT_ENTRY_KERNEL_CS * 8 + 4(%edi) ++ movb %al,GDT_ENTRY_KERNEXEC_KERNEL_CS * 8 + 4(%edi) ++ movb %ah,GDT_ENTRY_KERNEL_CS * 8 + 7(%edi) ++ movb %ah,GDT_ENTRY_KERNEXEC_KERNEL_CS * 8 + 7(%edi) ++ rorl $16,%eax ++ addl $PAGE_SIZE_asm,%edi ++ loop 1b ++#endif ++ + /* + * Clear BSS first so that there are no surprises... + */ +@@ -201,8 +269,11 @@ ENTRY(startup_32) + movl %eax, pa(max_pfn_mapped) + + /* Do early initialization of the fixmap area */ +- movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax +- movl %eax,pa(initial_pg_pmd+0x1000*KPMDS-8) ++#ifdef CONFIG_COMPAT_VDSO ++ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(initial_pg_pmd+0x1000*KPMDS-8) ++#else ++ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,pa(initial_pg_pmd+0x1000*KPMDS-8) ++#endif + #else /* Not PAE */ + + page_pde_offset = (__PAGE_OFFSET >> 20); +@@ -232,8 +303,11 @@ page_pde_offset = (__PAGE_OFFSET >> 20); + movl %eax, pa(max_pfn_mapped) + + /* Do early initialization of the fixmap area */ +- movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax +- movl %eax,pa(initial_page_table+0xffc) ++#ifdef CONFIG_COMPAT_VDSO ++ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(initial_page_table+0xffc) ++#else ++ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,pa(initial_page_table+0xffc) ++#endif + #endif + + #ifdef CONFIG_PARAVIRT +@@ -247,9 +321,7 @@ page_pde_offset = (__PAGE_OFFSET >> 20); + cmpl $num_subarch_entries, %eax + jae bad_subarch + +- movl pa(subarch_entries)(,%eax,4), %eax +- subl $__PAGE_OFFSET, %eax +- jmp *%eax ++ jmp *pa(subarch_entries)(,%eax,4) + + bad_subarch: + WEAK(lguest_entry) +@@ -261,10 +333,10 @@ WEAK(xen_entry) + __INITDATA + + subarch_entries: +- .long default_entry /* normal x86/PC */ +- .long lguest_entry /* lguest hypervisor */ +- .long xen_entry /* Xen hypervisor */ +- .long default_entry /* Moorestown MID */ ++ .long ta(default_entry) /* normal x86/PC */ ++ .long ta(lguest_entry) /* lguest hypervisor */ ++ .long ta(xen_entry) /* Xen hypervisor */ ++ .long ta(default_entry) /* Moorestown MID */ + num_subarch_entries = (. - subarch_entries) / 4 + .previous + #else +@@ -354,6 +426,7 @@ default_entry: + movl pa(mmu_cr4_features),%eax + movl %eax,%cr4 + ++#ifdef CONFIG_X86_PAE + testb $X86_CR4_PAE, %al # check if PAE is enabled + jz enable_paging + +@@ -382,6 +455,9 @@ default_entry: + /* Make changes effective */ + wrmsr + ++ btsl $_PAGE_BIT_NX-32,pa(__supported_pte_mask+4) ++#endif ++ + enable_paging: + + /* +@@ -449,14 +525,20 @@ is486: + 1: movl $(__KERNEL_DS),%eax # reload all the segment registers + movl %eax,%ss # after changing gdt. + +- movl $(__USER_DS),%eax # DS/ES contains default USER segment ++# movl $(__KERNEL_DS),%eax # DS/ES contains default KERNEL segment + movl %eax,%ds + movl %eax,%es + + movl $(__KERNEL_PERCPU), %eax + movl %eax,%fs # set this cpu's percpu + ++#ifdef CONFIG_CC_STACKPROTECTOR + movl $(__KERNEL_STACK_CANARY),%eax ++#elif defined(CONFIG_PAX_MEMORY_UDEREF) ++ movl $(__USER_DS),%eax ++#else ++ xorl %eax,%eax ++#endif + movl %eax,%gs + + xorl %eax,%eax # Clear LDT +@@ -512,8 +594,11 @@ setup_once: + * relocation. Manually set base address in stack canary + * segment descriptor. + */ +- movl $gdt_page,%eax ++ movl $cpu_gdt_table,%eax + movl $stack_canary,%ecx ++#ifdef CONFIG_SMP ++ addl $__per_cpu_load,%ecx ++#endif + movw %cx, 8 * GDT_ENTRY_STACK_CANARY + 2(%eax) + shrl $16, %ecx + movb %cl, 8 * GDT_ENTRY_STACK_CANARY + 4(%eax) +@@ -548,7 +633,7 @@ ENTRY(early_idt_handler) + cmpl $2,(%esp) # X86_TRAP_NMI + je is_nmi # Ignore NMI + +- cmpl $2,%ss:early_recursion_flag ++ cmpl $1,%ss:early_recursion_flag + je hlt_loop + incl %ss:early_recursion_flag + +@@ -586,8 +671,8 @@ ENTRY(early_idt_handler) + pushl (20+6*4)(%esp) /* trapno */ + pushl $fault_msg + call printk +-#endif + call dump_stack ++#endif + hlt_loop: + hlt + jmp hlt_loop +@@ -607,8 +692,11 @@ ENDPROC(early_idt_handler) + /* This is the default interrupt "handler" :-) */ + ALIGN + ignore_int: +- cld + #ifdef CONFIG_PRINTK ++ cmpl $2,%ss:early_recursion_flag ++ je hlt_loop ++ incl %ss:early_recursion_flag ++ cld + pushl %eax + pushl %ecx + pushl %edx +@@ -617,9 +705,6 @@ ignore_int: + movl $(__KERNEL_DS),%eax + movl %eax,%ds + movl %eax,%es +- cmpl $2,early_recursion_flag +- je hlt_loop +- incl early_recursion_flag + pushl 16(%esp) + pushl 24(%esp) + pushl 32(%esp) +@@ -653,29 +738,34 @@ ENTRY(setup_once_ref) + /* + * BSS section + */ +-__PAGE_ALIGNED_BSS +- .align PAGE_SIZE + #ifdef CONFIG_X86_PAE ++.section .initial_pg_pmd,"a",@progbits + initial_pg_pmd: + .fill 1024*KPMDS,4,0 + #else ++.section .initial_page_table,"a",@progbits + ENTRY(initial_page_table) + .fill 1024,4,0 + #endif ++.section .initial_pg_fixmap,"a",@progbits + initial_pg_fixmap: + .fill 1024,4,0 ++.section .empty_zero_page,"a",@progbits + ENTRY(empty_zero_page) + .fill 4096,1,0 ++.section .swapper_pg_dir,"a",@progbits + ENTRY(swapper_pg_dir) ++#ifdef CONFIG_X86_PAE ++ .fill 4,8,0 ++#else + .fill 1024,4,0 ++#endif + + /* + * This starts the data section. + */ + #ifdef CONFIG_X86_PAE +-__PAGE_ALIGNED_DATA +- /* Page-aligned for the benefit of paravirt? */ +- .align PAGE_SIZE ++.section .initial_page_table,"a",@progbits + ENTRY(initial_page_table) + .long pa(initial_pg_pmd+PGD_IDENT_ATTR),0 /* low identity map */ + # if KPMDS == 3 +@@ -694,12 +784,20 @@ ENTRY(initial_page_table) + # error "Kernel PMDs should be 1, 2 or 3" + # endif + .align PAGE_SIZE /* needs to be page-sized too */ ++ ++#ifdef CONFIG_PAX_PER_CPU_PGD ++ENTRY(cpu_pgd) ++ .rept 2*NR_CPUS ++ .fill 4,8,0 ++ .endr ++#endif ++ + #endif + + .data + .balign 4 + ENTRY(stack_start) +- .long init_thread_union+THREAD_SIZE ++ .long init_thread_union+THREAD_SIZE-8 + + __INITRODATA + int_msg: +@@ -727,7 +825,7 @@ fault_msg: + * segment size, and 32-bit linear address value: + */ + +- .data ++.section .rodata,"a",@progbits + .globl boot_gdt_descr + .globl idt_descr + +@@ -736,7 +834,7 @@ fault_msg: + .word 0 # 32 bit align gdt_desc.address + boot_gdt_descr: + .word __BOOT_DS+7 +- .long boot_gdt - __PAGE_OFFSET ++ .long pa(boot_gdt) + + .word 0 # 32-bit align idt_desc.address + idt_descr: +@@ -747,7 +845,7 @@ idt_descr: + .word 0 # 32 bit align gdt_desc.address + ENTRY(early_gdt_descr) + .word GDT_ENTRIES*8-1 +- .long gdt_page /* Overwritten for secondary CPUs */ ++ .long cpu_gdt_table /* Overwritten for secondary CPUs */ + + /* + * The boot_gdt must mirror the equivalent in setup.S and is +@@ -756,5 +854,65 @@ ENTRY(early_gdt_descr) + .align L1_CACHE_BYTES + ENTRY(boot_gdt) + .fill GDT_ENTRY_BOOT_CS,8,0 +- .quad 0x00cf9a000000ffff /* kernel 4GB code at 0x00000000 */ +- .quad 0x00cf92000000ffff /* kernel 4GB data at 0x00000000 */ ++ .quad 0x00cf9b000000ffff /* kernel 4GB code at 0x00000000 */ ++ .quad 0x00cf93000000ffff /* kernel 4GB data at 0x00000000 */ ++ ++ .align PAGE_SIZE_asm ++ENTRY(cpu_gdt_table) ++ .rept NR_CPUS ++ .quad 0x0000000000000000 /* NULL descriptor */ ++ .quad 0x0000000000000000 /* 0x0b reserved */ ++ .quad 0x0000000000000000 /* 0x13 reserved */ ++ .quad 0x0000000000000000 /* 0x1b reserved */ ++ ++#ifdef CONFIG_PAX_KERNEXEC ++ .quad 0x00cf9b000000ffff /* 0x20 alternate kernel 4GB code at 0x00000000 */ ++#else ++ .quad 0x0000000000000000 /* 0x20 unused */ ++#endif ++ ++ .quad 0x0000000000000000 /* 0x28 unused */ ++ .quad 0x0000000000000000 /* 0x33 TLS entry 1 */ ++ .quad 0x0000000000000000 /* 0x3b TLS entry 2 */ ++ .quad 0x0000000000000000 /* 0x43 TLS entry 3 */ ++ .quad 0x0000000000000000 /* 0x4b reserved */ ++ .quad 0x0000000000000000 /* 0x53 reserved */ ++ .quad 0x0000000000000000 /* 0x5b reserved */ ++ ++ .quad 0x00cf9b000000ffff /* 0x60 kernel 4GB code at 0x00000000 */ ++ .quad 0x00cf93000000ffff /* 0x68 kernel 4GB data at 0x00000000 */ ++ .quad 0x00cffb000000ffff /* 0x73 user 4GB code at 0x00000000 */ ++ .quad 0x00cff3000000ffff /* 0x7b user 4GB data at 0x00000000 */ ++ ++ .quad 0x0000000000000000 /* 0x80 TSS descriptor */ ++ .quad 0x0000000000000000 /* 0x88 LDT descriptor */ ++ ++ /* ++ * Segments used for calling PnP BIOS have byte granularity. ++ * The code segments and data segments have fixed 64k limits, ++ * the transfer segment sizes are set at run time. ++ */ ++ .quad 0x00409b000000ffff /* 0x90 32-bit code */ ++ .quad 0x00009b000000ffff /* 0x98 16-bit code */ ++ .quad 0x000093000000ffff /* 0xa0 16-bit data */ ++ .quad 0x0000930000000000 /* 0xa8 16-bit data */ ++ .quad 0x0000930000000000 /* 0xb0 16-bit data */ ++ ++ /* ++ * The APM segments have byte granularity and their bases ++ * are set at run time. All have 64k limits. ++ */ ++ .quad 0x00409b000000ffff /* 0xb8 APM CS code */ ++ .quad 0x00009b000000ffff /* 0xc0 APM CS 16 code (16 bit) */ ++ .quad 0x004093000000ffff /* 0xc8 APM DS data */ ++ ++ .quad 0x00c093000000ffff /* 0xd0 - ESPFIX SS */ ++ .quad 0x0040930000000000 /* 0xd8 - PERCPU */ ++ .quad 0x0040910000000017 /* 0xe0 - STACK_CANARY */ ++ .quad 0x0000000000000000 /* 0xe8 - PCIBIOS_CS */ ++ .quad 0x0000000000000000 /* 0xf0 - PCIBIOS_DS */ ++ .quad 0x0000000000000000 /* 0xf8 - GDT entry 31: double-fault TSS */ ++ ++ /* Be sure this is zeroed to avoid false validations in Xen */ ++ .fill PAGE_SIZE_asm - GDT_SIZE,1,0 ++ .endr +diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S +index a468c0a..c7dec74 100644 +--- a/arch/x86/kernel/head_64.S ++++ b/arch/x86/kernel/head_64.S +@@ -20,6 +20,8 @@ + #include <asm/processor-flags.h> + #include <asm/percpu.h> + #include <asm/nops.h> ++#include <asm/cpufeature.h> ++#include <asm/alternative-asm.h> + + #ifdef CONFIG_PARAVIRT + #include <asm/asm-offsets.h> +@@ -41,6 +43,12 @@ L4_PAGE_OFFSET = pgd_index(__PAGE_OFFSET) + L3_PAGE_OFFSET = pud_index(__PAGE_OFFSET) + L4_START_KERNEL = pgd_index(__START_KERNEL_map) + L3_START_KERNEL = pud_index(__START_KERNEL_map) ++L4_VMALLOC_START = pgd_index(VMALLOC_START) ++L3_VMALLOC_START = pud_index(VMALLOC_START) ++L4_VMALLOC_END = pgd_index(VMALLOC_END) ++L3_VMALLOC_END = pud_index(VMALLOC_END) ++L4_VMEMMAP_START = pgd_index(VMEMMAP_START) ++L3_VMEMMAP_START = pud_index(VMEMMAP_START) + + .text + __HEAD +@@ -89,11 +97,24 @@ startup_64: + * Fixup the physical addresses in the page table + */ + addq %rbp, early_level4_pgt + (L4_START_KERNEL*8)(%rip) ++ addq %rbp, init_level4_pgt + (L4_PAGE_OFFSET*8)(%rip) ++ addq %rbp, init_level4_pgt + (L4_VMALLOC_START*8)(%rip) ++ addq %rbp, init_level4_pgt + (L4_VMALLOC_END*8)(%rip) ++ addq %rbp, init_level4_pgt + (L4_VMEMMAP_START*8)(%rip) ++ addq %rbp, init_level4_pgt + (L4_START_KERNEL*8)(%rip) + +- addq %rbp, level3_kernel_pgt + (510*8)(%rip) +- addq %rbp, level3_kernel_pgt + (511*8)(%rip) ++ addq %rbp, level3_ident_pgt + (0*8)(%rip) ++#ifndef CONFIG_XEN ++ addq %rbp, level3_ident_pgt + (1*8)(%rip) ++#endif ++ ++ addq %rbp, level3_vmemmap_pgt + (L3_VMEMMAP_START*8)(%rip) ++ ++ addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8)(%rip) ++ addq %rbp, level3_kernel_pgt + ((L3_START_KERNEL+1)*8)(%rip) + + addq %rbp, level2_fixmap_pgt + (506*8)(%rip) ++ addq %rbp, level2_fixmap_pgt + (507*8)(%rip) + + /* + * Set up the identity mapping for the switchover. These +@@ -177,8 +198,8 @@ ENTRY(secondary_startup_64) + movq $(init_level4_pgt - __START_KERNEL_map), %rax + 1: + +- /* Enable PAE mode and PGE */ +- movl $(X86_CR4_PAE | X86_CR4_PGE), %ecx ++ /* Enable PAE mode and PSE/PGE */ ++ movl $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %ecx + movq %rcx, %cr4 + + /* Setup early boot stage 4 level pagetables. */ +@@ -199,10 +220,19 @@ ENTRY(secondary_startup_64) + movl $MSR_EFER, %ecx + rdmsr + btsl $_EFER_SCE, %eax /* Enable System Call */ +- btl $20,%edi /* No Execute supported? */ ++ btl $(X86_FEATURE_NX & 31),%edi /* No Execute supported? */ + jnc 1f + btsl $_EFER_NX, %eax + btsq $_PAGE_BIT_NX,early_pmd_flags(%rip) ++#ifndef CONFIG_EFI ++ btsq $_PAGE_BIT_NX, init_level4_pgt + 8*L4_PAGE_OFFSET(%rip) ++#endif ++ btsq $_PAGE_BIT_NX, init_level4_pgt + 8*L4_VMALLOC_START(%rip) ++ btsq $_PAGE_BIT_NX, init_level4_pgt + 8*L4_VMALLOC_END(%rip) ++ btsq $_PAGE_BIT_NX, init_level4_pgt + 8*L4_VMEMMAP_START(%rip) ++ btsq $_PAGE_BIT_NX, level2_fixmap_pgt + 8*506(%rip) ++ btsq $_PAGE_BIT_NX, level2_fixmap_pgt + 8*507(%rip) ++ btsq $_PAGE_BIT_NX, __supported_pte_mask(%rip) + 1: wrmsr /* Make changes effective */ + + /* Setup cr0 */ +@@ -282,6 +312,7 @@ ENTRY(secondary_startup_64) + * REX.W + FF /5 JMP m16:64 Jump far, absolute indirect, + * address given in m16:64. + */ ++ pax_set_fptr_mask + movq initial_code(%rip),%rax + pushq $0 # fake return address to stop unwinder + pushq $__KERNEL_CS # set correct cs +@@ -313,7 +344,7 @@ ENDPROC(start_cpu0) + .quad INIT_PER_CPU_VAR(irq_stack_union) + + GLOBAL(stack_start) +- .quad init_thread_union+THREAD_SIZE-8 ++ .quad init_thread_union+THREAD_SIZE-16 + .word 0 + __FINITDATA + +@@ -391,7 +422,7 @@ ENTRY(early_idt_handler) + call dump_stack + #ifdef CONFIG_KALLSYMS + leaq early_idt_ripmsg(%rip),%rdi +- movq 40(%rsp),%rsi # %rip again ++ movq 88(%rsp),%rsi # %rip again + call __print_symbol + #endif + #endif /* EARLY_PRINTK */ +@@ -420,6 +451,7 @@ ENDPROC(early_idt_handler) + early_recursion_flag: + .long 0 + ++ .section .rodata,"a",@progbits + #ifdef CONFIG_EARLY_PRINTK + early_idt_msg: + .asciz "PANIC: early exception %02lx rip %lx:%lx error %lx cr2 %lx\n" +@@ -447,29 +479,52 @@ NEXT_PAGE(early_level4_pgt) + NEXT_PAGE(early_dynamic_pgts) + .fill 512*EARLY_DYNAMIC_PAGE_TABLES,8,0 + +- .data ++ .section .rodata,"a",@progbits + +-#ifndef CONFIG_XEN + NEXT_PAGE(init_level4_pgt) +- .fill 512,8,0 +-#else +-NEXT_PAGE(init_level4_pgt) +- .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE + .org init_level4_pgt + L4_PAGE_OFFSET*8, 0 + .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE ++ .org init_level4_pgt + L4_VMALLOC_START*8, 0 ++ .quad level3_vmalloc_start_pgt - __START_KERNEL_map + _KERNPG_TABLE ++ .org init_level4_pgt + L4_VMALLOC_END*8, 0 ++ .quad level3_vmalloc_end_pgt - __START_KERNEL_map + _KERNPG_TABLE ++ .org init_level4_pgt + L4_VMEMMAP_START*8, 0 ++ .quad level3_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE + .org init_level4_pgt + L4_START_KERNEL*8, 0 + /* (2^48-(2*1024*1024*1024))/(2^39) = 511 */ + .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE + ++#ifdef CONFIG_PAX_PER_CPU_PGD ++NEXT_PAGE(cpu_pgd) ++ .rept 2*NR_CPUS ++ .fill 512,8,0 ++ .endr ++#endif ++ + NEXT_PAGE(level3_ident_pgt) + .quad level2_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE ++#ifdef CONFIG_XEN + .fill 511, 8, 0 ++#else ++ .quad level2_ident_pgt + PAGE_SIZE - __START_KERNEL_map + _KERNPG_TABLE ++ .fill 510,8,0 ++#endif ++ ++NEXT_PAGE(level3_vmalloc_start_pgt) ++ .fill 512,8,0 ++ ++NEXT_PAGE(level3_vmalloc_end_pgt) ++ .fill 512,8,0 ++ ++NEXT_PAGE(level3_vmemmap_pgt) ++ .fill L3_VMEMMAP_START,8,0 ++ .quad level2_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE ++ + NEXT_PAGE(level2_ident_pgt) +- /* Since I easily can, map the first 1G. ++ /* Since I easily can, map the first 2G. + * Don't set NX because code runs from these pages. + */ +- PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, PTRS_PER_PMD) +-#endif ++ PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, 2*PTRS_PER_PMD) + + NEXT_PAGE(level3_kernel_pgt) + .fill L3_START_KERNEL,8,0 +@@ -477,6 +532,9 @@ NEXT_PAGE(level3_kernel_pgt) + .quad level2_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE + .quad level2_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE + ++NEXT_PAGE(level2_vmemmap_pgt) ++ .fill 512,8,0 ++ + NEXT_PAGE(level2_kernel_pgt) + /* + * 512 MB kernel mapping. We spend a full page on this pagetable +@@ -494,28 +552,64 @@ NEXT_PAGE(level2_kernel_pgt) + NEXT_PAGE(level2_fixmap_pgt) + .fill 506,8,0 + .quad level1_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE +- /* 8MB reserved for vsyscalls + a 2MB hole = 4 + 1 entries */ +- .fill 5,8,0 ++ .quad level1_vsyscall_pgt - __START_KERNEL_map + _PAGE_TABLE ++ /* 6MB reserved for vsyscalls + a 2MB hole = 3 + 1 entries */ ++ .fill 4,8,0 + + NEXT_PAGE(level1_fixmap_pgt) + .fill 512,8,0 + ++NEXT_PAGE(level1_vsyscall_pgt) ++ .fill 512,8,0 ++ + #undef PMDS + +- .data ++ .align PAGE_SIZE ++ENTRY(cpu_gdt_table) ++ .rept NR_CPUS ++ .quad 0x0000000000000000 /* NULL descriptor */ ++ .quad 0x00cf9b000000ffff /* __KERNEL32_CS */ ++ .quad 0x00af9b000000ffff /* __KERNEL_CS */ ++ .quad 0x00cf93000000ffff /* __KERNEL_DS */ ++ .quad 0x00cffb000000ffff /* __USER32_CS */ ++ .quad 0x00cff3000000ffff /* __USER_DS, __USER32_DS */ ++ .quad 0x00affb000000ffff /* __USER_CS */ ++ ++#ifdef CONFIG_PAX_KERNEXEC ++ .quad 0x00af9b000000ffff /* __KERNEXEC_KERNEL_CS */ ++#else ++ .quad 0x0 /* unused */ ++#endif ++ ++ .quad 0,0 /* TSS */ ++ .quad 0,0 /* LDT */ ++ .quad 0,0,0 /* three TLS descriptors */ ++ .quad 0x0000f40000000000 /* node/CPU stored in limit */ ++ /* asm/segment.h:GDT_ENTRIES must match this */ ++ ++#ifdef CONFIG_PAX_MEMORY_UDEREF ++ .quad 0x00cf93000000ffff /* __UDEREF_KERNEL_DS */ ++#else ++ .quad 0x0 /* unused */ ++#endif ++ ++ /* zero the remaining page */ ++ .fill PAGE_SIZE / 8 - GDT_ENTRIES,8,0 ++ .endr ++ + .align 16 + .globl early_gdt_descr + early_gdt_descr: + .word GDT_ENTRIES*8-1 + early_gdt_descr_base: +- .quad INIT_PER_CPU_VAR(gdt_page) ++ .quad cpu_gdt_table + + ENTRY(phys_base) + /* This must match the first entry in level2_kernel_pgt */ + .quad 0x0000000000000000 + + #include "../../x86/xen/xen-head.S" +- +- __PAGE_ALIGNED_BSS ++ ++ .section .rodata,"a",@progbits + NEXT_PAGE(empty_zero_page) + .skip PAGE_SIZE +diff --git a/arch/x86/kernel/i386_ksyms_32.c b/arch/x86/kernel/i386_ksyms_32.c +index 05fd74f..c3548b1 100644 +--- a/arch/x86/kernel/i386_ksyms_32.c ++++ b/arch/x86/kernel/i386_ksyms_32.c +@@ -20,8 +20,12 @@ extern void cmpxchg8b_emu(void); + EXPORT_SYMBOL(cmpxchg8b_emu); + #endif + ++EXPORT_SYMBOL_GPL(cpu_gdt_table); ++ + /* Networking helper routines. */ + EXPORT_SYMBOL(csum_partial_copy_generic); ++EXPORT_SYMBOL(csum_partial_copy_generic_to_user); ++EXPORT_SYMBOL(csum_partial_copy_generic_from_user); + + EXPORT_SYMBOL(__get_user_1); + EXPORT_SYMBOL(__get_user_2); +@@ -44,3 +48,11 @@ EXPORT_SYMBOL(___preempt_schedule); + EXPORT_SYMBOL(___preempt_schedule_context); + #endif + #endif ++ ++#ifdef CONFIG_PAX_KERNEXEC ++EXPORT_SYMBOL(__LOAD_PHYSICAL_ADDR); ++#endif ++ ++#ifdef CONFIG_PAX_PER_CPU_PGD ++EXPORT_SYMBOL(cpu_pgd); ++#endif +diff --git a/arch/x86/kernel/i387.c b/arch/x86/kernel/i387.c +index d5dd808..b6432cf 100644 +--- a/arch/x86/kernel/i387.c ++++ b/arch/x86/kernel/i387.c +@@ -51,7 +51,7 @@ static inline bool interrupted_kernel_fpu_idle(void) + static inline bool interrupted_user_mode(void) + { + struct pt_regs *regs = get_irq_regs(); +- return regs && user_mode_vm(regs); ++ return regs && user_mode(regs); + } + + /* +diff --git a/arch/x86/kernel/i8259.c b/arch/x86/kernel/i8259.c +index 2e977b5..5f2c273 100644 +--- a/arch/x86/kernel/i8259.c ++++ b/arch/x86/kernel/i8259.c +@@ -110,7 +110,7 @@ static int i8259A_irq_pending(unsigned int irq) + static void make_8259A_irq(unsigned int irq) + { + disable_irq_nosync(irq); +- io_apic_irqs &= ~(1<<irq); ++ io_apic_irqs &= ~(1UL<<irq); + irq_set_chip_and_handler_name(irq, &i8259A_chip, handle_level_irq, + i8259A_chip.name); + enable_irq(irq); +@@ -209,7 +209,7 @@ spurious_8259A_irq: + "spurious 8259A interrupt: IRQ%d.\n", irq); + spurious_irq_mask |= irqmask; + } +- atomic_inc(&irq_err_count); ++ atomic_inc_unchecked(&irq_err_count); + /* + * Theoretically we do not have to handle this IRQ, + * but in Linux this does not cause problems and is +@@ -332,14 +332,16 @@ static void init_8259A(int auto_eoi) + /* (slave's support for AEOI in flat mode is to be investigated) */ + outb_pic(SLAVE_ICW4_DEFAULT, PIC_SLAVE_IMR); + ++ pax_open_kernel(); + if (auto_eoi) + /* + * In AEOI mode we just have to mask the interrupt + * when acking. + */ +- i8259A_chip.irq_mask_ack = disable_8259A_irq; ++ *(void **)&i8259A_chip.irq_mask_ack = disable_8259A_irq; + else +- i8259A_chip.irq_mask_ack = mask_and_ack_8259A; ++ *(void **)&i8259A_chip.irq_mask_ack = mask_and_ack_8259A; ++ pax_close_kernel(); + + udelay(100); /* wait for 8259A to initialize */ + +diff --git a/arch/x86/kernel/io_delay.c b/arch/x86/kernel/io_delay.c +index a979b5b..1d6db75 100644 +--- a/arch/x86/kernel/io_delay.c ++++ b/arch/x86/kernel/io_delay.c +@@ -58,7 +58,7 @@ static int __init dmi_io_delay_0xed_port(const struct dmi_system_id *id) + * Quirk table for systems that misbehave (lock up, etc.) if port + * 0x80 is used: + */ +-static struct dmi_system_id __initdata io_delay_0xed_port_dmi_table[] = { ++static const struct dmi_system_id __initconst io_delay_0xed_port_dmi_table[] = { + { + .callback = dmi_io_delay_0xed_port, + .ident = "Compaq Presario V6000", +diff --git a/arch/x86/kernel/ioport.c b/arch/x86/kernel/ioport.c +index 4ddaf66..49d5c18 100644 +--- a/arch/x86/kernel/ioport.c ++++ b/arch/x86/kernel/ioport.c +@@ -6,6 +6,7 @@ + #include <linux/sched.h> + #include <linux/kernel.h> + #include <linux/capability.h> ++#include <linux/security.h> + #include <linux/errno.h> + #include <linux/types.h> + #include <linux/ioport.h> +@@ -30,6 +31,12 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on) + return -EINVAL; + if (turn_on && !capable(CAP_SYS_RAWIO)) + return -EPERM; ++#ifdef CONFIG_GRKERNSEC_IO ++ if (turn_on && grsec_disable_privio) { ++ gr_handle_ioperm(); ++ return -ENODEV; ++ } ++#endif + + /* + * If it's the first ioperm() call in this thread's lifetime, set the +@@ -54,7 +61,7 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on) + * because the ->io_bitmap_max value must match the bitmap + * contents: + */ +- tss = &per_cpu(init_tss, get_cpu()); ++ tss = init_tss + get_cpu(); + + if (turn_on) + bitmap_clear(t->io_bitmap_ptr, from, num); +@@ -105,6 +112,12 @@ SYSCALL_DEFINE1(iopl, unsigned int, level) + if (level > old) { + if (!capable(CAP_SYS_RAWIO)) + return -EPERM; ++#ifdef CONFIG_GRKERNSEC_IO ++ if (grsec_disable_privio) { ++ gr_handle_iopl(); ++ return -ENODEV; ++ } ++#endif + } + regs->flags = (regs->flags & ~X86_EFLAGS_IOPL) | (level << 12); + t->iopl = level << 12; +diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c +index d99f31d..1c0f466 100644 +--- a/arch/x86/kernel/irq.c ++++ b/arch/x86/kernel/irq.c +@@ -21,7 +21,7 @@ + #define CREATE_TRACE_POINTS + #include <asm/trace/irq_vectors.h> + +-atomic_t irq_err_count; ++atomic_unchecked_t irq_err_count; + + /* Function pointer for generic interrupt vector handling */ + void (*x86_platform_ipi_callback)(void) = NULL; +@@ -125,9 +125,9 @@ int arch_show_interrupts(struct seq_file *p, int prec) + seq_printf(p, "%10u ", per_cpu(mce_poll_count, j)); + seq_printf(p, " Machine check polls\n"); + #endif +- seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count)); ++ seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read_unchecked(&irq_err_count)); + #if defined(CONFIG_X86_IO_APIC) +- seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read(&irq_mis_count)); ++ seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read_unchecked(&irq_mis_count)); + #endif + return 0; + } +@@ -167,7 +167,7 @@ u64 arch_irq_stat_cpu(unsigned int cpu) + + u64 arch_irq_stat(void) + { +- u64 sum = atomic_read(&irq_err_count); ++ u64 sum = atomic_read_unchecked(&irq_err_count); + return sum; + } + +diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c +index d7fcbed..96e715a 100644 +--- a/arch/x86/kernel/irq_32.c ++++ b/arch/x86/kernel/irq_32.c +@@ -29,6 +29,8 @@ EXPORT_PER_CPU_SYMBOL(irq_regs); + + #ifdef CONFIG_DEBUG_STACKOVERFLOW + ++extern void gr_handle_kernel_exploit(void); ++ + int sysctl_panic_on_stackoverflow __read_mostly; + + /* Debugging check for stack overflow: is there less than 1KB free? */ +@@ -39,13 +41,14 @@ static int check_stack_overflow(void) + __asm__ __volatile__("andl %%esp,%0" : + "=r" (sp) : "0" (THREAD_SIZE - 1)); + +- return sp < (sizeof(struct thread_info) + STACK_WARN); ++ return sp < STACK_WARN; + } + + static void print_stack_overflow(void) + { + printk(KERN_WARNING "low stack detected by irq handler\n"); + dump_stack(); ++ gr_handle_kernel_exploit(); + if (sysctl_panic_on_stackoverflow) + panic("low stack detected by irq handler - check messages\n"); + } +@@ -59,8 +62,8 @@ static inline void print_stack_overflow(void) { } + * per-CPU IRQ handling contexts (thread information and stack) + */ + union irq_ctx { +- struct thread_info tinfo; +- u32 stack[THREAD_SIZE/sizeof(u32)]; ++ unsigned long previous_esp; ++ u32 stack[THREAD_SIZE/sizeof(u32)]; + } __attribute__((aligned(THREAD_SIZE))); + + static DEFINE_PER_CPU(union irq_ctx *, hardirq_ctx); +@@ -80,10 +83,9 @@ static void call_on_stack(void *func, void *stack) + static inline int + execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq) + { +- union irq_ctx *curctx, *irqctx; ++ union irq_ctx *irqctx; + u32 *isp, arg1, arg2; + +- curctx = (union irq_ctx *) current_thread_info(); + irqctx = __this_cpu_read(hardirq_ctx); + + /* +@@ -92,13 +94,16 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq) + * handler) we can't do that and just have to keep using the + * current stack (which is the irq stack already after all) + */ +- if (unlikely(curctx == irqctx)) ++ if (unlikely((void *)current_stack_pointer - (void *)irqctx < THREAD_SIZE)) + return 0; + + /* build the stack frame on the IRQ stack */ +- isp = (u32 *) ((char *)irqctx + sizeof(*irqctx)); +- irqctx->tinfo.task = curctx->tinfo.task; +- irqctx->tinfo.previous_esp = current_stack_pointer; ++ isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8); ++ irqctx->previous_esp = current_stack_pointer; ++ ++#ifdef CONFIG_PAX_MEMORY_UDEREF ++ __set_fs(MAKE_MM_SEG(0)); ++#endif + + if (unlikely(overflow)) + call_on_stack(print_stack_overflow, isp); +@@ -110,6 +115,11 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq) + : "0" (irq), "1" (desc), "2" (isp), + "D" (desc->handle_irq) + : "memory", "cc", "ecx"); ++ ++#ifdef CONFIG_PAX_MEMORY_UDEREF ++ __set_fs(current_thread_info()->addr_limit); ++#endif ++ + return 1; + } + +@@ -118,48 +128,34 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq) + */ + void irq_ctx_init(int cpu) + { +- union irq_ctx *irqctx; +- + if (per_cpu(hardirq_ctx, cpu)) + return; + +- irqctx = page_address(alloc_pages_node(cpu_to_node(cpu), +- THREADINFO_GFP, +- THREAD_SIZE_ORDER)); +- memset(&irqctx->tinfo, 0, sizeof(struct thread_info)); +- irqctx->tinfo.cpu = cpu; +- irqctx->tinfo.addr_limit = MAKE_MM_SEG(0); +- +- per_cpu(hardirq_ctx, cpu) = irqctx; +- +- irqctx = page_address(alloc_pages_node(cpu_to_node(cpu), +- THREADINFO_GFP, +- THREAD_SIZE_ORDER)); +- memset(&irqctx->tinfo, 0, sizeof(struct thread_info)); +- irqctx->tinfo.cpu = cpu; +- irqctx->tinfo.addr_limit = MAKE_MM_SEG(0); +- +- per_cpu(softirq_ctx, cpu) = irqctx; +- +- printk(KERN_DEBUG "CPU %u irqstacks, hard=%p soft=%p\n", +- cpu, per_cpu(hardirq_ctx, cpu), per_cpu(softirq_ctx, cpu)); ++ per_cpu(hardirq_ctx, cpu) = page_address(alloc_pages_node(cpu_to_node(cpu), THREADINFO_GFP, THREAD_SIZE_ORDER)); ++ per_cpu(softirq_ctx, cpu) = page_address(alloc_pages_node(cpu_to_node(cpu), THREADINFO_GFP, THREAD_SIZE_ORDER)); + } + + void do_softirq_own_stack(void) + { +- struct thread_info *curctx; + union irq_ctx *irqctx; + u32 *isp; + +- curctx = current_thread_info(); + irqctx = __this_cpu_read(softirq_ctx); +- irqctx->tinfo.task = curctx->task; +- irqctx->tinfo.previous_esp = current_stack_pointer; ++ irqctx->previous_esp = current_stack_pointer; + + /* build the stack frame on the softirq stack */ +- isp = (u32 *) ((char *)irqctx + sizeof(*irqctx)); ++ isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8); ++ ++#ifdef CONFIG_PAX_MEMORY_UDEREF ++ __set_fs(MAKE_MM_SEG(0)); ++#endif + + call_on_stack(__do_softirq, isp); ++ ++#ifdef CONFIG_PAX_MEMORY_UDEREF ++ __set_fs(current_thread_info()->addr_limit); ++#endif ++ + } + + bool handle_irq(unsigned irq, struct pt_regs *regs) +@@ -173,7 +169,7 @@ bool handle_irq(unsigned irq, struct pt_regs *regs) + if (unlikely(!desc)) + return false; + +- if (user_mode_vm(regs) || !execute_on_irq_stack(overflow, desc, irq)) { ++ if (user_mode(regs) || !execute_on_irq_stack(overflow, desc, irq)) { + if (unlikely(overflow)) + print_stack_overflow(); + desc->handle_irq(irq, desc); +diff --git a/arch/x86/kernel/irq_64.c b/arch/x86/kernel/irq_64.c +index 4d1c746..55a22d6 100644 +--- a/arch/x86/kernel/irq_64.c ++++ b/arch/x86/kernel/irq_64.c +@@ -26,6 +26,8 @@ EXPORT_PER_CPU_SYMBOL(irq_stat); + DEFINE_PER_CPU(struct pt_regs *, irq_regs); + EXPORT_PER_CPU_SYMBOL(irq_regs); + ++extern void gr_handle_kernel_exploit(void); ++ + int sysctl_panic_on_stackoverflow; + + /* +@@ -44,7 +46,7 @@ static inline void stack_overflow_check(struct pt_regs *regs) + u64 estack_top, estack_bottom; + u64 curbase = (u64)task_stack_page(current); + +- if (user_mode_vm(regs)) ++ if (user_mode(regs)) + return; + + if (regs->sp >= curbase + sizeof(struct thread_info) + +@@ -69,6 +71,8 @@ static inline void stack_overflow_check(struct pt_regs *regs) + irq_stack_top, irq_stack_bottom, + estack_top, estack_bottom); + ++ gr_handle_kernel_exploit(); ++ + if (sysctl_panic_on_stackoverflow) + panic("low stack detected by irq handler - check messages\n"); + #endif +diff --git a/arch/x86/kernel/jump_label.c b/arch/x86/kernel/jump_label.c +index 26d5a55..a01160a 100644 +--- a/arch/x86/kernel/jump_label.c ++++ b/arch/x86/kernel/jump_label.c +@@ -51,7 +51,7 @@ static void __jump_label_transform(struct jump_entry *entry, + * Jump label is enabled for the first time. + * So we expect a default_nop... + */ +- if (unlikely(memcmp((void *)entry->code, default_nop, 5) ++ if (unlikely(memcmp((void *)ktla_ktva(entry->code), default_nop, 5) + != 0)) + bug_at((void *)entry->code, __LINE__); + } else { +@@ -59,7 +59,7 @@ static void __jump_label_transform(struct jump_entry *entry, + * ...otherwise expect an ideal_nop. Otherwise + * something went horribly wrong. + */ +- if (unlikely(memcmp((void *)entry->code, ideal_nop, 5) ++ if (unlikely(memcmp((void *)ktla_ktva(entry->code), ideal_nop, 5) + != 0)) + bug_at((void *)entry->code, __LINE__); + } +@@ -75,13 +75,13 @@ static void __jump_label_transform(struct jump_entry *entry, + * are converting the default nop to the ideal nop. + */ + if (init) { +- if (unlikely(memcmp((void *)entry->code, default_nop, 5) != 0)) ++ if (unlikely(memcmp((void *)ktla_ktva(entry->code), default_nop, 5) != 0)) + bug_at((void *)entry->code, __LINE__); + } else { + code.jump = 0xe9; + code.offset = entry->target - + (entry->code + JUMP_LABEL_NOP_SIZE); +- if (unlikely(memcmp((void *)entry->code, &code, 5) != 0)) ++ if (unlikely(memcmp((void *)ktla_ktva(entry->code), &code, 5) != 0)) + bug_at((void *)entry->code, __LINE__); + } + memcpy(&code, ideal_nops[NOP_ATOMIC5], JUMP_LABEL_NOP_SIZE); +diff --git a/arch/x86/kernel/kgdb.c b/arch/x86/kernel/kgdb.c +index 7ec1d5f..5a7d130 100644 +--- a/arch/x86/kernel/kgdb.c ++++ b/arch/x86/kernel/kgdb.c +@@ -126,11 +126,11 @@ char *dbg_get_reg(int regno, void *mem, struct pt_regs *regs) + #ifdef CONFIG_X86_32 + switch (regno) { + case GDB_SS: +- if (!user_mode_vm(regs)) ++ if (!user_mode(regs)) + *(unsigned long *)mem = __KERNEL_DS; + break; + case GDB_SP: +- if (!user_mode_vm(regs)) ++ if (!user_mode(regs)) + *(unsigned long *)mem = kernel_stack_pointer(regs); + break; + case GDB_GS: +@@ -228,7 +228,10 @@ static void kgdb_correct_hw_break(void) + bp->attr.bp_addr = breakinfo[breakno].addr; + bp->attr.bp_len = breakinfo[breakno].len; + bp->attr.bp_type = breakinfo[breakno].type; +- info->address = breakinfo[breakno].addr; ++ if (breakinfo[breakno].type == X86_BREAKPOINT_EXECUTE) ++ info->address = ktla_ktva(breakinfo[breakno].addr); ++ else ++ info->address = breakinfo[breakno].addr; + info->len = breakinfo[breakno].len; + info->type = breakinfo[breakno].type; + val = arch_install_hw_breakpoint(bp); +@@ -475,12 +478,12 @@ int kgdb_arch_handle_exception(int e_vector, int signo, int err_code, + case 'k': + /* clear the trace bit */ + linux_regs->flags &= ~X86_EFLAGS_TF; +- atomic_set(&kgdb_cpu_doing_single_step, -1); ++ atomic_set_unchecked(&kgdb_cpu_doing_single_step, -1); + + /* set the trace bit if we're stepping */ + if (remcomInBuffer[0] == 's') { + linux_regs->flags |= X86_EFLAGS_TF; +- atomic_set(&kgdb_cpu_doing_single_step, ++ atomic_set_unchecked(&kgdb_cpu_doing_single_step, + raw_smp_processor_id()); + } + +@@ -545,7 +548,7 @@ static int __kgdb_notify(struct die_args *args, unsigned long cmd) + + switch (cmd) { + case DIE_DEBUG: +- if (atomic_read(&kgdb_cpu_doing_single_step) != -1) { ++ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1) { + if (user_mode(regs)) + return single_step_cont(regs, args); + break; +@@ -750,11 +753,11 @@ int kgdb_arch_set_breakpoint(struct kgdb_bkpt *bpt) + #endif /* CONFIG_DEBUG_RODATA */ + + bpt->type = BP_BREAKPOINT; +- err = probe_kernel_read(bpt->saved_instr, (char *)bpt->bpt_addr, ++ err = probe_kernel_read(bpt->saved_instr, ktla_ktva((char *)bpt->bpt_addr), + BREAK_INSTR_SIZE); + if (err) + return err; +- err = probe_kernel_write((char *)bpt->bpt_addr, ++ err = probe_kernel_write(ktla_ktva((char *)bpt->bpt_addr), + arch_kgdb_ops.gdb_bpt_instr, BREAK_INSTR_SIZE); + #ifdef CONFIG_DEBUG_RODATA + if (!err) +@@ -767,7 +770,7 @@ int kgdb_arch_set_breakpoint(struct kgdb_bkpt *bpt) + return -EBUSY; + text_poke((void *)bpt->bpt_addr, arch_kgdb_ops.gdb_bpt_instr, + BREAK_INSTR_SIZE); +- err = probe_kernel_read(opc, (char *)bpt->bpt_addr, BREAK_INSTR_SIZE); ++ err = probe_kernel_read(opc, ktla_ktva((char *)bpt->bpt_addr), BREAK_INSTR_SIZE); + if (err) + return err; + if (memcmp(opc, arch_kgdb_ops.gdb_bpt_instr, BREAK_INSTR_SIZE)) +@@ -792,13 +795,13 @@ int kgdb_arch_remove_breakpoint(struct kgdb_bkpt *bpt) + if (mutex_is_locked(&text_mutex)) + goto knl_write; + text_poke((void *)bpt->bpt_addr, bpt->saved_instr, BREAK_INSTR_SIZE); +- err = probe_kernel_read(opc, (char *)bpt->bpt_addr, BREAK_INSTR_SIZE); ++ err = probe_kernel_read(opc, ktla_ktva((char *)bpt->bpt_addr), BREAK_INSTR_SIZE); + if (err || memcmp(opc, bpt->saved_instr, BREAK_INSTR_SIZE)) + goto knl_write; + return err; + knl_write: + #endif /* CONFIG_DEBUG_RODATA */ +- return probe_kernel_write((char *)bpt->bpt_addr, ++ return probe_kernel_write(ktla_ktva((char *)bpt->bpt_addr), + (char *)bpt->saved_instr, BREAK_INSTR_SIZE); + } + +diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c +index 79a3f96..6ba030a 100644 +--- a/arch/x86/kernel/kprobes/core.c ++++ b/arch/x86/kernel/kprobes/core.c +@@ -119,9 +119,12 @@ static void __kprobes __synthesize_relative_insn(void *from, void *to, u8 op) + s32 raddr; + } __packed *insn; + +- insn = (struct __arch_relative_insn *)from; ++ insn = (struct __arch_relative_insn *)ktla_ktva(from); ++ ++ pax_open_kernel(); + insn->raddr = (s32)((long)(to) - ((long)(from) + 5)); + insn->op = op; ++ pax_close_kernel(); + } + + /* Insert a jump instruction at address 'from', which jumps to address 'to'.*/ +@@ -164,7 +167,7 @@ int __kprobes can_boost(kprobe_opcode_t *opcodes) + kprobe_opcode_t opcode; + kprobe_opcode_t *orig_opcodes = opcodes; + +- if (search_exception_tables((unsigned long)opcodes)) ++ if (search_exception_tables(ktva_ktla((unsigned long)opcodes))) + return 0; /* Page fault may occur on this address. */ + + retry: +@@ -238,9 +241,9 @@ __recover_probed_insn(kprobe_opcode_t *buf, unsigned long addr) + * for the first byte, we can recover the original instruction + * from it and kp->opcode. + */ +- memcpy(buf, kp->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t)); ++ memcpy(buf, ktla_ktva(kp->addr), MAX_INSN_SIZE * sizeof(kprobe_opcode_t)); + buf[0] = kp->opcode; +- return (unsigned long)buf; ++ return ktva_ktla((unsigned long)buf); + } + + /* +@@ -332,7 +335,9 @@ int __kprobes __copy_instruction(u8 *dest, u8 *src) + /* Another subsystem puts a breakpoint, failed to recover */ + if (insn.opcode.bytes[0] == BREAKPOINT_INSTRUCTION) + return 0; ++ pax_open_kernel(); + memcpy(dest, insn.kaddr, insn.length); ++ pax_close_kernel(); + + #ifdef CONFIG_X86_64 + if (insn_rip_relative(&insn)) { +@@ -359,7 +364,9 @@ int __kprobes __copy_instruction(u8 *dest, u8 *src) + return 0; + } + disp = (u8 *) dest + insn_offset_displacement(&insn); ++ pax_open_kernel(); + *(s32 *) disp = (s32) newdisp; ++ pax_close_kernel(); + } + #endif + return insn.length; +@@ -498,7 +505,7 @@ setup_singlestep(struct kprobe *p, struct pt_regs *regs, struct kprobe_ctlblk *k + * nor set current_kprobe, because it doesn't use single + * stepping. + */ +- regs->ip = (unsigned long)p->ainsn.insn; ++ regs->ip = ktva_ktla((unsigned long)p->ainsn.insn); + preempt_enable_no_resched(); + return; + } +@@ -515,9 +522,9 @@ setup_singlestep(struct kprobe *p, struct pt_regs *regs, struct kprobe_ctlblk *k + regs->flags &= ~X86_EFLAGS_IF; + /* single step inline if the instruction is an int3 */ + if (p->opcode == BREAKPOINT_INSTRUCTION) +- regs->ip = (unsigned long)p->addr; ++ regs->ip = ktla_ktva((unsigned long)p->addr); + else +- regs->ip = (unsigned long)p->ainsn.insn; ++ regs->ip = ktva_ktla((unsigned long)p->ainsn.insn); + } + + /* +@@ -596,7 +603,7 @@ static int __kprobes kprobe_handler(struct pt_regs *regs) + setup_singlestep(p, regs, kcb, 0); + return 1; + } +- } else if (*addr != BREAKPOINT_INSTRUCTION) { ++ } else if (*(kprobe_opcode_t *)ktla_ktva((unsigned long)addr) != BREAKPOINT_INSTRUCTION) { + /* + * The breakpoint instruction was removed right + * after we hit it. Another cpu has removed +@@ -642,6 +649,9 @@ static void __used __kprobes kretprobe_trampoline_holder(void) + " movq %rax, 152(%rsp)\n" + RESTORE_REGS_STRING + " popfq\n" ++#ifdef KERNEXEC_PLUGIN ++ " btsq $63,(%rsp)\n" ++#endif + #else + " pushf\n" + SAVE_REGS_STRING +@@ -779,7 +789,7 @@ static void __kprobes + resume_execution(struct kprobe *p, struct pt_regs *regs, struct kprobe_ctlblk *kcb) + { + unsigned long *tos = stack_addr(regs); +- unsigned long copy_ip = (unsigned long)p->ainsn.insn; ++ unsigned long copy_ip = ktva_ktla((unsigned long)p->ainsn.insn); + unsigned long orig_ip = (unsigned long)p->addr; + kprobe_opcode_t *insn = p->ainsn.insn; + +@@ -961,7 +971,7 @@ kprobe_exceptions_notify(struct notifier_block *self, unsigned long val, void *d + struct die_args *args = data; + int ret = NOTIFY_DONE; + +- if (args->regs && user_mode_vm(args->regs)) ++ if (args->regs && user_mode(args->regs)) + return ret; + + switch (val) { +diff --git a/arch/x86/kernel/kprobes/opt.c b/arch/x86/kernel/kprobes/opt.c +index 898160b..758cde8 100644 +--- a/arch/x86/kernel/kprobes/opt.c ++++ b/arch/x86/kernel/kprobes/opt.c +@@ -79,6 +79,7 @@ found: + /* Insert a move instruction which sets a pointer to eax/rdi (1st arg). */ + static void __kprobes synthesize_set_arg1(kprobe_opcode_t *addr, unsigned long val) + { ++ pax_open_kernel(); + #ifdef CONFIG_X86_64 + *addr++ = 0x48; + *addr++ = 0xbf; +@@ -86,6 +87,7 @@ static void __kprobes synthesize_set_arg1(kprobe_opcode_t *addr, unsigned long v + *addr++ = 0xb8; + #endif + *(unsigned long *)addr = val; ++ pax_close_kernel(); + } + + asm ( +@@ -335,7 +337,7 @@ int __kprobes arch_prepare_optimized_kprobe(struct optimized_kprobe *op) + * Verify if the address gap is in 2GB range, because this uses + * a relative jump. + */ +- rel = (long)op->optinsn.insn - (long)op->kp.addr + RELATIVEJUMP_SIZE; ++ rel = (long)op->optinsn.insn - ktla_ktva((long)op->kp.addr) + RELATIVEJUMP_SIZE; + if (abs(rel) > 0x7fffffff) + return -ERANGE; + +@@ -350,16 +352,18 @@ int __kprobes arch_prepare_optimized_kprobe(struct optimized_kprobe *op) + op->optinsn.size = ret; + + /* Copy arch-dep-instance from template */ +- memcpy(buf, &optprobe_template_entry, TMPL_END_IDX); ++ pax_open_kernel(); ++ memcpy(buf, ktla_ktva(&optprobe_template_entry), TMPL_END_IDX); ++ pax_close_kernel(); + + /* Set probe information */ + synthesize_set_arg1(buf + TMPL_MOVE_IDX, (unsigned long)op); + + /* Set probe function call */ +- synthesize_relcall(buf + TMPL_CALL_IDX, optimized_callback); ++ synthesize_relcall(ktva_ktla(buf) + TMPL_CALL_IDX, optimized_callback); + + /* Set returning jmp instruction at the tail of out-of-line buffer */ +- synthesize_reljump(buf + TMPL_END_IDX + op->optinsn.size, ++ synthesize_reljump(ktva_ktla(buf) + TMPL_END_IDX + op->optinsn.size, + (u8 *)op->kp.addr + op->optinsn.size); + + flush_icache_range((unsigned long) buf, +@@ -384,7 +388,7 @@ void __kprobes arch_optimize_kprobes(struct list_head *oplist) + WARN_ON(kprobe_disabled(&op->kp)); + + /* Backup instructions which will be replaced by jump address */ +- memcpy(op->optinsn.copied_insn, op->kp.addr + INT3_SIZE, ++ memcpy(op->optinsn.copied_insn, ktla_ktva(op->kp.addr) + INT3_SIZE, + RELATIVE_ADDR_SIZE); + + insn_buf[0] = RELATIVEJUMP_OPCODE; +@@ -433,7 +437,7 @@ setup_detour_execution(struct kprobe *p, struct pt_regs *regs, int reenter) + /* This kprobe is really able to run optimized path. */ + op = container_of(p, struct optimized_kprobe, kp); + /* Detour through copied instructions */ +- regs->ip = (unsigned long)op->optinsn.insn + TMPL_END_IDX; ++ regs->ip = ktva_ktla((unsigned long)op->optinsn.insn) + TMPL_END_IDX; + if (!reenter) + reset_current_kprobe(); + preempt_enable_no_resched(); +diff --git a/arch/x86/kernel/ksysfs.c b/arch/x86/kernel/ksysfs.c +index c2bedae..25e7ab60 100644 +--- a/arch/x86/kernel/ksysfs.c ++++ b/arch/x86/kernel/ksysfs.c +@@ -184,7 +184,7 @@ out: + + static struct kobj_attribute type_attr = __ATTR_RO(type); + +-static struct bin_attribute data_attr = { ++static bin_attribute_no_const data_attr __read_only = { + .attr = { + .name = "data", + .mode = S_IRUGO, +diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c +index c37886d..d851d32 100644 +--- a/arch/x86/kernel/ldt.c ++++ b/arch/x86/kernel/ldt.c +@@ -66,13 +66,13 @@ static int alloc_ldt(mm_context_t *pc, int mincount, int reload) + if (reload) { + #ifdef CONFIG_SMP + preempt_disable(); +- load_LDT(pc); ++ load_LDT_nolock(pc); + if (!cpumask_equal(mm_cpumask(current->mm), + cpumask_of(smp_processor_id()))) + smp_call_function(flush_ldt, current->mm, 1); + preempt_enable(); + #else +- load_LDT(pc); ++ load_LDT_nolock(pc); + #endif + } + if (oldsize) { +@@ -94,7 +94,7 @@ static inline int copy_ldt(mm_context_t *new, mm_context_t *old) + return err; + + for (i = 0; i < old->size; i++) +- write_ldt_entry(new->ldt, i, old->ldt + i * LDT_ENTRY_SIZE); ++ write_ldt_entry(new->ldt, i, old->ldt + i); + return 0; + } + +@@ -115,6 +115,24 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm) + retval = copy_ldt(&mm->context, &old_mm->context); + mutex_unlock(&old_mm->context.lock); + } ++ ++ if (tsk == current) { ++ mm->context.vdso = 0; ++ ++#ifdef CONFIG_X86_32 ++#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC) ++ mm->context.user_cs_base = 0UL; ++ mm->context.user_cs_limit = ~0UL; ++ ++#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP) ++ cpus_clear(mm->context.cpu_user_cs_mask); ++#endif ++ ++#endif ++#endif ++ ++ } ++ + return retval; + } + +@@ -229,6 +247,13 @@ static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode) + } + } + ++#ifdef CONFIG_PAX_SEGMEXEC ++ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (ldt_info.contents & MODIFY_LDT_CONTENTS_CODE)) { ++ error = -EINVAL; ++ goto out_unlock; ++ } ++#endif ++ + if (!IS_ENABLED(CONFIG_X86_16BIT) && !ldt_info.seg_32bit) { + error = -EINVAL; + goto out_unlock; +diff --git a/arch/x86/kernel/machine_kexec_32.c b/arch/x86/kernel/machine_kexec_32.c +index 1667b1d..16492c5 100644 +--- a/arch/x86/kernel/machine_kexec_32.c ++++ b/arch/x86/kernel/machine_kexec_32.c +@@ -25,7 +25,7 @@ + #include <asm/cacheflush.h> + #include <asm/debugreg.h> + +-static void set_idt(void *newidt, __u16 limit) ++static void set_idt(struct desc_struct *newidt, __u16 limit) + { + struct desc_ptr curidt; + +@@ -37,7 +37,7 @@ static void set_idt(void *newidt, __u16 limit) + } + + +-static void set_gdt(void *newgdt, __u16 limit) ++static void set_gdt(struct desc_struct *newgdt, __u16 limit) + { + struct desc_ptr curgdt; + +@@ -215,7 +215,7 @@ void machine_kexec(struct kimage *image) + } + + control_page = page_address(image->control_code_page); +- memcpy(control_page, relocate_kernel, KEXEC_CONTROL_CODE_MAX_SIZE); ++ memcpy(control_page, (void *)ktla_ktva((unsigned long)relocate_kernel), KEXEC_CONTROL_CODE_MAX_SIZE); + + relocate_kernel_ptr = control_page; + page_list[PA_CONTROL_PAGE] = __pa(control_page); +diff --git a/arch/x86/kernel/module.c b/arch/x86/kernel/module.c +index 18be189..4a9fe40 100644 +--- a/arch/x86/kernel/module.c ++++ b/arch/x86/kernel/module.c +@@ -43,15 +43,60 @@ do { \ + } while (0) + #endif + +-void *module_alloc(unsigned long size) ++static inline void *__module_alloc(unsigned long size, pgprot_t prot) + { +- if (PAGE_ALIGN(size) > MODULES_LEN) ++ if (!size || PAGE_ALIGN(size) > MODULES_LEN) + return NULL; + return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END, +- GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC, ++ GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, prot, + NUMA_NO_NODE, __builtin_return_address(0)); + } + ++void *module_alloc(unsigned long size) ++{ ++ ++#ifdef CONFIG_PAX_KERNEXEC ++ return __module_alloc(size, PAGE_KERNEL); ++#else ++ return __module_alloc(size, PAGE_KERNEL_EXEC); ++#endif ++ ++} ++ ++#ifdef CONFIG_PAX_KERNEXEC ++#ifdef CONFIG_X86_32 ++void *module_alloc_exec(unsigned long size) ++{ ++ struct vm_struct *area; ++ ++ if (size == 0) ++ return NULL; ++ ++ area = __get_vm_area(size, VM_ALLOC, (unsigned long)&MODULES_EXEC_VADDR, (unsigned long)&MODULES_EXEC_END); ++ return area ? area->addr : NULL; ++} ++EXPORT_SYMBOL(module_alloc_exec); ++ ++void module_free_exec(struct module *mod, void *module_region) ++{ ++ vunmap(module_region); ++} ++EXPORT_SYMBOL(module_free_exec); ++#else ++void module_free_exec(struct module *mod, void *module_region) ++{ ++ module_free(mod, module_region); ++} ++EXPORT_SYMBOL(module_free_exec); ++ ++void *module_alloc_exec(unsigned long size) ++{ ++ return __module_alloc(size, PAGE_KERNEL_RX); ++} ++EXPORT_SYMBOL(module_alloc_exec); ++#endif ++#endif ++ + #ifdef CONFIG_X86_32 + int apply_relocate(Elf32_Shdr *sechdrs, + const char *strtab, +@@ -62,14 +107,16 @@ int apply_relocate(Elf32_Shdr *sechdrs, + unsigned int i; + Elf32_Rel *rel = (void *)sechdrs[relsec].sh_addr; + Elf32_Sym *sym; +- uint32_t *location; ++ uint32_t *plocation, location; + + DEBUGP("Applying relocate section %u to %u\n", + relsec, sechdrs[relsec].sh_info); + for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) { + /* This is where to make the change */ +- location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr +- + rel[i].r_offset; ++ plocation = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr + rel[i].r_offset; ++ location = (uint32_t)plocation; ++ if (sechdrs[sechdrs[relsec].sh_info].sh_flags & SHF_EXECINSTR) ++ plocation = ktla_ktva((void *)plocation); + /* This is the symbol it is referring to. Note that all + undefined symbols have been resolved. */ + sym = (Elf32_Sym *)sechdrs[symindex].sh_addr +@@ -78,11 +125,15 @@ int apply_relocate(Elf32_Shdr *sechdrs, + switch (ELF32_R_TYPE(rel[i].r_info)) { + case R_386_32: + /* We add the value into the location given */ +- *location += sym->st_value; ++ pax_open_kernel(); ++ *plocation += sym->st_value; ++ pax_close_kernel(); + break; + case R_386_PC32: + /* Add the value, subtract its position */ +- *location += sym->st_value - (uint32_t)location; ++ pax_open_kernel(); ++ *plocation += sym->st_value - location; ++ pax_close_kernel(); + break; + default: + pr_err("%s: Unknown relocation: %u\n", +@@ -127,21 +178,30 @@ int apply_relocate_add(Elf64_Shdr *sechdrs, + case R_X86_64_NONE: + break; + case R_X86_64_64: ++ pax_open_kernel(); + *(u64 *)loc = val; ++ pax_close_kernel(); + break; + case R_X86_64_32: ++ pax_open_kernel(); + *(u32 *)loc = val; ++ pax_close_kernel(); + if (val != *(u32 *)loc) + goto overflow; + break; + case R_X86_64_32S: ++ pax_open_kernel(); + *(s32 *)loc = val; ++ pax_close_kernel(); + if ((s64)val != *(s32 *)loc) + goto overflow; + break; + case R_X86_64_PC32: + val -= (u64)loc; ++ pax_open_kernel(); + *(u32 *)loc = val; ++ pax_close_kernel(); ++ + #if 0 + if ((s64)val != *(s32 *)loc) + goto overflow; +diff --git a/arch/x86/kernel/msr.c b/arch/x86/kernel/msr.c +index 05266b5..3432443 100644 +--- a/arch/x86/kernel/msr.c ++++ b/arch/x86/kernel/msr.c +@@ -37,6 +37,7 @@ + #include <linux/notifier.h> + #include <linux/uaccess.h> + #include <linux/gfp.h> ++#include <linux/grsecurity.h> + + #include <asm/processor.h> + #include <asm/msr.h> +@@ -103,6 +104,11 @@ static ssize_t msr_write(struct file *file, const char __user *buf, + int err = 0; + ssize_t bytes = 0; + ++#ifdef CONFIG_GRKERNSEC_KMEM ++ gr_handle_msr_write(); ++ return -EPERM; ++#endif ++ + if (count % 8) + return -EINVAL; /* Invalid chunk size */ + +@@ -150,6 +156,10 @@ static long msr_ioctl(struct file *file, unsigned int ioc, unsigned long arg) + err = -EBADF; + break; + } ++#ifdef CONFIG_GRKERNSEC_KMEM ++ gr_handle_msr_write(); ++ return -EPERM; ++#endif + if (copy_from_user(®s, uregs, sizeof regs)) { + err = -EFAULT; + break; +@@ -233,7 +243,7 @@ static int msr_class_cpu_callback(struct notifier_block *nfb, + return notifier_from_errno(err); + } + +-static struct notifier_block __refdata msr_class_cpu_notifier = { ++static struct notifier_block msr_class_cpu_notifier = { + .notifier_call = msr_class_cpu_callback, + }; + +diff --git a/arch/x86/kernel/nmi.c b/arch/x86/kernel/nmi.c +index 6fcb49c..5b3f4ff 100644 +--- a/arch/x86/kernel/nmi.c ++++ b/arch/x86/kernel/nmi.c +@@ -138,7 +138,7 @@ static int __kprobes nmi_handle(unsigned int type, struct pt_regs *regs, bool b2 + return handled; + } + +-int __register_nmi_handler(unsigned int type, struct nmiaction *action) ++int __register_nmi_handler(unsigned int type, const struct nmiaction *action) + { + struct nmi_desc *desc = nmi_to_desc(type); + unsigned long flags; +@@ -162,9 +162,9 @@ int __register_nmi_handler(unsigned int type, struct nmiaction *action) + * event confuses some handlers (kdump uses this flag) + */ + if (action->flags & NMI_FLAG_FIRST) +- list_add_rcu(&action->list, &desc->head); ++ pax_list_add_rcu((struct list_head *)&action->list, &desc->head); + else +- list_add_tail_rcu(&action->list, &desc->head); ++ pax_list_add_tail_rcu((struct list_head *)&action->list, &desc->head); + + spin_unlock_irqrestore(&desc->lock, flags); + return 0; +@@ -187,7 +187,7 @@ void unregister_nmi_handler(unsigned int type, const char *name) + if (!strcmp(n->name, name)) { + WARN(in_nmi(), + "Trying to free NMI (%s) from NMI context!\n", n->name); +- list_del_rcu(&n->list); ++ pax_list_del_rcu((struct list_head *)&n->list); + break; + } + } +@@ -512,6 +512,17 @@ static inline void nmi_nesting_postprocess(void) + dotraplinkage notrace __kprobes void + do_nmi(struct pt_regs *regs, long error_code) + { ++ ++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC) ++ if (!user_mode(regs)) { ++ unsigned long cs = regs->cs & 0xFFFF; ++ unsigned long ip = ktva_ktla(regs->ip); ++ ++ if ((cs == __KERNEL_CS || cs == __KERNEXEC_KERNEL_CS) && ip <= (unsigned long)_etext) ++ regs->ip = ip; ++ } ++#endif ++ + nmi_nesting_preprocess(regs); + + nmi_enter(); +diff --git a/arch/x86/kernel/nmi_selftest.c b/arch/x86/kernel/nmi_selftest.c +index 6d9582e..f746287 100644 +--- a/arch/x86/kernel/nmi_selftest.c ++++ b/arch/x86/kernel/nmi_selftest.c +@@ -43,7 +43,7 @@ static void __init init_nmi_testsuite(void) + { + /* trap all the unknown NMIs we may generate */ + register_nmi_handler(NMI_UNKNOWN, nmi_unk_cb, 0, "nmi_selftest_unk", +- __initdata); ++ __initconst); + } + + static void __init cleanup_nmi_testsuite(void) +@@ -66,7 +66,7 @@ static void __init test_nmi_ipi(struct cpumask *mask) + unsigned long timeout; + + if (register_nmi_handler(NMI_LOCAL, test_nmi_ipi_callback, +- NMI_FLAG_FIRST, "nmi_selftest", __initdata)) { ++ NMI_FLAG_FIRST, "nmi_selftest", __initconst)) { + nmi_fail = FAILURE; + return; + } +diff --git a/arch/x86/kernel/paravirt-spinlocks.c b/arch/x86/kernel/paravirt-spinlocks.c +index bbb6c73..24a58ef 100644 +--- a/arch/x86/kernel/paravirt-spinlocks.c ++++ b/arch/x86/kernel/paravirt-spinlocks.c +@@ -8,7 +8,7 @@ + + #include <asm/paravirt.h> + +-struct pv_lock_ops pv_lock_ops = { ++struct pv_lock_ops pv_lock_ops __read_only = { + #ifdef CONFIG_SMP + .lock_spinning = __PV_IS_CALLEE_SAVE(paravirt_nop), + .unlock_kick = paravirt_nop, +diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c +index 1b10af8..45bfbec 100644 +--- a/arch/x86/kernel/paravirt.c ++++ b/arch/x86/kernel/paravirt.c +@@ -55,6 +55,9 @@ u64 _paravirt_ident_64(u64 x) + { + return x; + } ++#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE) ++PV_CALLEE_SAVE_REGS_THUNK(_paravirt_ident_64); ++#endif + + void __init default_banner(void) + { +@@ -141,16 +144,20 @@ unsigned paravirt_patch_default(u8 type, u16 clobbers, void *insnbuf, + + if (opfunc == NULL) + /* If there's no function, patch it with a ud2a (BUG) */ +- ret = paravirt_patch_insns(insnbuf, len, ud2a, ud2a+sizeof(ud2a)); +- else if (opfunc == _paravirt_nop) ++ ret = paravirt_patch_insns(insnbuf, len, ktva_ktla(ud2a), ud2a+sizeof(ud2a)); ++ else if (opfunc == (void *)_paravirt_nop) + /* If the operation is a nop, then nop the callsite */ + ret = paravirt_patch_nop(); + + /* identity functions just return their single argument */ +- else if (opfunc == _paravirt_ident_32) ++ else if (opfunc == (void *)_paravirt_ident_32) + ret = paravirt_patch_ident_32(insnbuf, len); +- else if (opfunc == _paravirt_ident_64) ++ else if (opfunc == (void *)_paravirt_ident_64) + ret = paravirt_patch_ident_64(insnbuf, len); ++#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE) ++ else if (opfunc == (void *)__raw_callee_save__paravirt_ident_64) ++ ret = paravirt_patch_ident_64(insnbuf, len); ++#endif + + else if (type == PARAVIRT_PATCH(pv_cpu_ops.iret) || + type == PARAVIRT_PATCH(pv_cpu_ops.irq_enable_sysexit) || +@@ -175,7 +182,7 @@ unsigned paravirt_patch_insns(void *insnbuf, unsigned len, + if (insn_len > len || start == NULL) + insn_len = len; + else +- memcpy(insnbuf, start, insn_len); ++ memcpy(insnbuf, ktla_ktva(start), insn_len); + + return insn_len; + } +@@ -299,7 +306,7 @@ enum paravirt_lazy_mode paravirt_get_lazy_mode(void) + return this_cpu_read(paravirt_lazy_mode); + } + +-struct pv_info pv_info = { ++struct pv_info pv_info __read_only = { + .name = "bare hardware", + .paravirt_enabled = 0, + .kernel_rpl = 0, +@@ -310,16 +317,16 @@ struct pv_info pv_info = { + #endif + }; + +-struct pv_init_ops pv_init_ops = { ++struct pv_init_ops pv_init_ops __read_only = { + .patch = native_patch, + }; + +-struct pv_time_ops pv_time_ops = { ++struct pv_time_ops pv_time_ops __read_only = { + .sched_clock = native_sched_clock, + .steal_clock = native_steal_clock, + }; + +-__visible struct pv_irq_ops pv_irq_ops = { ++__visible struct pv_irq_ops pv_irq_ops __read_only = { + .save_fl = __PV_IS_CALLEE_SAVE(native_save_fl), + .restore_fl = __PV_IS_CALLEE_SAVE(native_restore_fl), + .irq_disable = __PV_IS_CALLEE_SAVE(native_irq_disable), +@@ -331,7 +338,7 @@ __visible struct pv_irq_ops pv_irq_ops = { + #endif + }; + +-__visible struct pv_cpu_ops pv_cpu_ops = { ++__visible struct pv_cpu_ops pv_cpu_ops __read_only = { + .cpuid = native_cpuid, + .get_debugreg = native_get_debugreg, + .set_debugreg = native_set_debugreg, +@@ -389,21 +396,26 @@ __visible struct pv_cpu_ops pv_cpu_ops = { + .end_context_switch = paravirt_nop, + }; + +-struct pv_apic_ops pv_apic_ops = { ++struct pv_apic_ops pv_apic_ops __read_only= { + #ifdef CONFIG_X86_LOCAL_APIC + .startup_ipi_hook = paravirt_nop, + #endif + }; + +-#if defined(CONFIG_X86_32) && !defined(CONFIG_X86_PAE) ++#ifdef CONFIG_X86_32 ++#ifdef CONFIG_X86_PAE ++/* 64-bit pagetable entries */ ++#define PTE_IDENT PV_CALLEE_SAVE(_paravirt_ident_64) ++#else + /* 32-bit pagetable entries */ + #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_32) ++#endif + #else + /* 64-bit pagetable entries */ + #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_64) + #endif + +-struct pv_mmu_ops pv_mmu_ops = { ++struct pv_mmu_ops pv_mmu_ops __read_only = { + + .read_cr2 = native_read_cr2, + .write_cr2 = native_write_cr2, +@@ -453,6 +465,7 @@ struct pv_mmu_ops pv_mmu_ops = { + .make_pud = PTE_IDENT, + + .set_pgd = native_set_pgd, ++ .set_pgd_batched = native_set_pgd_batched, + #endif + #endif /* PAGETABLE_LEVELS >= 3 */ + +@@ -473,6 +486,12 @@ struct pv_mmu_ops pv_mmu_ops = { + }, + + .set_fixmap = native_set_fixmap, ++ ++#ifdef CONFIG_PAX_KERNEXEC ++ .pax_open_kernel = native_pax_open_kernel, ++ .pax_close_kernel = native_pax_close_kernel, ++#endif ++ + }; + + EXPORT_SYMBOL_GPL(pv_time_ops); +diff --git a/arch/x86/kernel/pci-calgary_64.c b/arch/x86/kernel/pci-calgary_64.c +index 299d493..2ccb0ee 100644 +--- a/arch/x86/kernel/pci-calgary_64.c ++++ b/arch/x86/kernel/pci-calgary_64.c +@@ -1339,7 +1339,7 @@ static void __init get_tce_space_from_tar(void) + tce_space = be64_to_cpu(readq(target)); + tce_space = tce_space & TAR_SW_BITS; + +- tce_space = tce_space & (~specified_table_size); ++ tce_space = tce_space & (~(unsigned long)specified_table_size); + info->tce_space = (u64 *)__va(tce_space); + } + } +diff --git a/arch/x86/kernel/pci-iommu_table.c b/arch/x86/kernel/pci-iommu_table.c +index 35ccf75..7a15747 100644 +--- a/arch/x86/kernel/pci-iommu_table.c ++++ b/arch/x86/kernel/pci-iommu_table.c +@@ -2,7 +2,7 @@ + #include <asm/iommu_table.h> + #include <linux/string.h> + #include <linux/kallsyms.h> +- ++#include <linux/sched.h> + + #define DEBUG 1 + +diff --git a/arch/x86/kernel/pci-swiotlb.c b/arch/x86/kernel/pci-swiotlb.c +index 6c483ba..d10ce2f 100644 +--- a/arch/x86/kernel/pci-swiotlb.c ++++ b/arch/x86/kernel/pci-swiotlb.c +@@ -32,7 +32,7 @@ static void x86_swiotlb_free_coherent(struct device *dev, size_t size, + void *vaddr, dma_addr_t dma_addr, + struct dma_attrs *attrs) + { +- swiotlb_free_coherent(dev, size, vaddr, dma_addr); ++ swiotlb_free_coherent(dev, size, vaddr, dma_addr, attrs); + } + + static struct dma_map_ops swiotlb_dma_ops = { +diff --git a/arch/x86/kernel/preempt.S b/arch/x86/kernel/preempt.S +index ca7f0d5..8996469 100644 +--- a/arch/x86/kernel/preempt.S ++++ b/arch/x86/kernel/preempt.S +@@ -3,12 +3,14 @@ + #include <asm/dwarf2.h> + #include <asm/asm.h> + #include <asm/calling.h> ++#include <asm/alternative-asm.h> + + ENTRY(___preempt_schedule) + CFI_STARTPROC + SAVE_ALL + call preempt_schedule + RESTORE_ALL ++ pax_force_retaddr + ret + CFI_ENDPROC + +@@ -19,6 +21,7 @@ ENTRY(___preempt_schedule_context) + SAVE_ALL + call preempt_schedule_context + RESTORE_ALL ++ pax_force_retaddr + ret + CFI_ENDPROC + +diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c +index 3fb8d95..254dc51 100644 +--- a/arch/x86/kernel/process.c ++++ b/arch/x86/kernel/process.c +@@ -36,7 +36,8 @@ + * section. Since TSS's are completely CPU-local, we want them + * on exact cacheline boundaries, to eliminate cacheline ping-pong. + */ +-__visible DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss) = INIT_TSS; ++struct tss_struct init_tss[NR_CPUS] __visible ____cacheline_internodealigned_in_smp = { [0 ... NR_CPUS-1] = INIT_TSS }; ++EXPORT_SYMBOL(init_tss); + + #ifdef CONFIG_X86_64 + static DEFINE_PER_CPU(unsigned char, is_idle); +@@ -92,7 +93,7 @@ void arch_task_cache_init(void) + task_xstate_cachep = + kmem_cache_create("task_xstate", xstate_size, + __alignof__(union thread_xstate), +- SLAB_PANIC | SLAB_NOTRACK, NULL); ++ SLAB_PANIC | SLAB_NOTRACK | SLAB_USERCOPY, NULL); + } + + /* +@@ -105,7 +106,7 @@ void exit_thread(void) + unsigned long *bp = t->io_bitmap_ptr; + + if (bp) { +- struct tss_struct *tss = &per_cpu(init_tss, get_cpu()); ++ struct tss_struct *tss = init_tss + get_cpu(); + + t->io_bitmap_ptr = NULL; + clear_thread_flag(TIF_IO_BITMAP); +@@ -125,6 +126,9 @@ void flush_thread(void) + { + struct task_struct *tsk = current; + ++#if defined(CONFIG_X86_32) && !defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_PAX_MEMORY_UDEREF) ++ loadsegment(gs, 0); ++#endif + flush_ptrace_hw_breakpoint(tsk); + memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array)); + drop_init_fpu(tsk); +@@ -271,7 +275,7 @@ static void __exit_idle(void) + void exit_idle(void) + { + /* idle loop has pid 0 */ +- if (current->pid) ++ if (task_pid_nr(current)) + return; + __exit_idle(); + } +@@ -327,7 +331,7 @@ bool xen_set_default_idle(void) + return ret; + } + #endif +-void stop_this_cpu(void *dummy) ++__noreturn void stop_this_cpu(void *dummy) + { + local_irq_disable(); + /* +@@ -456,16 +460,37 @@ static int __init idle_setup(char *str) + } + early_param("idle", idle_setup); + +-unsigned long arch_align_stack(unsigned long sp) ++#ifdef CONFIG_PAX_RANDKSTACK ++void pax_randomize_kstack(struct pt_regs *regs) + { +- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space) +- sp -= get_random_int() % 8192; +- return sp & ~0xf; +-} ++ struct thread_struct *thread = ¤t->thread; ++ unsigned long time; + +-unsigned long arch_randomize_brk(struct mm_struct *mm) +-{ +- unsigned long range_end = mm->brk + 0x02000000; +- return randomize_range(mm->brk, range_end, 0) ? : mm->brk; +-} ++ if (!randomize_va_space) ++ return; ++ ++ if (v8086_mode(regs)) ++ return; + ++ rdtscl(time); ++ ++ /* P4 seems to return a 0 LSB, ignore it */ ++#ifdef CONFIG_MPENTIUM4 ++ time &= 0x3EUL; ++ time <<= 2; ++#elif defined(CONFIG_X86_64) ++ time &= 0xFUL; ++ time <<= 4; ++#else ++ time &= 0x1FUL; ++ time <<= 3; ++#endif ++ ++ thread->sp0 ^= time; ++ load_sp0(init_tss + smp_processor_id(), thread); ++ ++#ifdef CONFIG_X86_64 ++ this_cpu_write(kernel_stack, thread->sp0); ++#endif ++} ++#endif +diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c +index 0de43e9..056b840 100644 +--- a/arch/x86/kernel/process_32.c ++++ b/arch/x86/kernel/process_32.c +@@ -64,6 +64,7 @@ asmlinkage void ret_from_kernel_thread(void) __asm__("ret_from_kernel_thread"); + unsigned long thread_saved_pc(struct task_struct *tsk) + { + return ((unsigned long *)tsk->thread.sp)[3]; ++//XXX return tsk->thread.eip; + } + + void __show_regs(struct pt_regs *regs, int all) +@@ -73,19 +74,18 @@ void __show_regs(struct pt_regs *regs, int all) + unsigned long sp; + unsigned short ss, gs; + +- if (user_mode_vm(regs)) { ++ if (user_mode(regs)) { + sp = regs->sp; + ss = regs->ss & 0xffff; +- gs = get_user_gs(regs); + } else { + sp = kernel_stack_pointer(regs); + savesegment(ss, ss); +- savesegment(gs, gs); + } ++ gs = get_user_gs(regs); + + printk(KERN_DEFAULT "EIP: %04x:[<%08lx>] EFLAGS: %08lx CPU: %d\n", + (u16)regs->cs, regs->ip, regs->flags, +- smp_processor_id()); ++ raw_smp_processor_id()); + print_symbol("EIP is at %s\n", regs->ip); + + printk(KERN_DEFAULT "EAX: %08lx EBX: %08lx ECX: %08lx EDX: %08lx\n", +@@ -132,20 +132,21 @@ void release_thread(struct task_struct *dead_task) + int copy_thread(unsigned long clone_flags, unsigned long sp, + unsigned long arg, struct task_struct *p) + { +- struct pt_regs *childregs = task_pt_regs(p); ++ struct pt_regs *childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 8; + struct task_struct *tsk; + int err; + + p->thread.sp = (unsigned long) childregs; + p->thread.sp0 = (unsigned long) (childregs+1); ++ p->tinfo.lowest_stack = (unsigned long)task_stack_page(p); + + if (unlikely(p->flags & PF_KTHREAD)) { + /* kernel thread */ + memset(childregs, 0, sizeof(struct pt_regs)); + p->thread.ip = (unsigned long) ret_from_kernel_thread; +- task_user_gs(p) = __KERNEL_STACK_CANARY; +- childregs->ds = __USER_DS; +- childregs->es = __USER_DS; ++ savesegment(gs, childregs->gs); ++ childregs->ds = __KERNEL_DS; ++ childregs->es = __KERNEL_DS; + childregs->fs = __KERNEL_PERCPU; + childregs->bx = sp; /* function */ + childregs->bp = arg; +@@ -252,7 +253,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) + struct thread_struct *prev = &prev_p->thread, + *next = &next_p->thread; + int cpu = smp_processor_id(); +- struct tss_struct *tss = &per_cpu(init_tss, cpu); ++ struct tss_struct *tss = init_tss + cpu; + fpu_switch_t fpu; + + /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */ +@@ -276,6 +277,10 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) + */ + lazy_save_gs(prev->gs); + ++#ifdef CONFIG_PAX_MEMORY_UDEREF ++ __set_fs(task_thread_info(next_p)->addr_limit); ++#endif ++ + /* + * Load the per-thread Thread-Local Storage descriptor. + */ +@@ -314,6 +319,9 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) + */ + arch_end_context_switch(next_p); + ++ this_cpu_write(current_task, next_p); ++ this_cpu_write(current_tinfo, &next_p->tinfo); ++ + /* + * Restore %gs if needed (which is common) + */ +@@ -322,8 +330,6 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) + + switch_fpu_finish(next_p, fpu); + +- this_cpu_write(current_task, next_p); +- + return prev_p; + } + +@@ -353,4 +359,3 @@ unsigned long get_wchan(struct task_struct *p) + } while (count++ < 16); + return 0; + } +- +diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c +index 9c0280f..5bbb1c0 100644 +--- a/arch/x86/kernel/process_64.c ++++ b/arch/x86/kernel/process_64.c +@@ -158,10 +158,11 @@ int copy_thread(unsigned long clone_flags, unsigned long sp, + struct pt_regs *childregs; + struct task_struct *me = current; + +- p->thread.sp0 = (unsigned long)task_stack_page(p) + THREAD_SIZE; ++ p->thread.sp0 = (unsigned long)task_stack_page(p) + THREAD_SIZE - 16; + childregs = task_pt_regs(p); + p->thread.sp = (unsigned long) childregs; + p->thread.usersp = me->thread.usersp; ++ p->tinfo.lowest_stack = (unsigned long)task_stack_page(p); + set_tsk_thread_flag(p, TIF_FORK); + p->thread.fpu_counter = 0; + p->thread.io_bitmap_ptr = NULL; +@@ -172,6 +173,8 @@ int copy_thread(unsigned long clone_flags, unsigned long sp, + p->thread.fs = p->thread.fsindex ? 0 : me->thread.fs; + savesegment(es, p->thread.es); + savesegment(ds, p->thread.ds); ++ savesegment(ss, p->thread.ss); ++ BUG_ON(p->thread.ss == __UDEREF_KERNEL_DS); + memset(p->thread.ptrace_bps, 0, sizeof(p->thread.ptrace_bps)); + + if (unlikely(p->flags & PF_KTHREAD)) { +@@ -280,7 +283,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) + struct thread_struct *prev = &prev_p->thread; + struct thread_struct *next = &next_p->thread; + int cpu = smp_processor_id(); +- struct tss_struct *tss = &per_cpu(init_tss, cpu); ++ struct tss_struct *tss = init_tss + cpu; + unsigned fsindex, gsindex; + fpu_switch_t fpu; + +@@ -303,6 +306,9 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) + if (unlikely(next->ds | prev->ds)) + loadsegment(ds, next->ds); + ++ savesegment(ss, prev->ss); ++ if (unlikely(next->ss != prev->ss)) ++ loadsegment(ss, next->ss); + + /* We must save %fs and %gs before load_TLS() because + * %fs and %gs may be cleared by load_TLS(). +@@ -362,6 +368,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) + prev->usersp = this_cpu_read(old_rsp); + this_cpu_write(old_rsp, next->usersp); + this_cpu_write(current_task, next_p); ++ this_cpu_write(current_tinfo, &next_p->tinfo); + + /* + * If it were not for PREEMPT_ACTIVE we could guarantee that the +@@ -371,9 +378,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) + task_thread_info(prev_p)->saved_preempt_count = this_cpu_read(__preempt_count); + this_cpu_write(__preempt_count, task_thread_info(next_p)->saved_preempt_count); + +- this_cpu_write(kernel_stack, +- (unsigned long)task_stack_page(next_p) + +- THREAD_SIZE - KERNEL_STACK_OFFSET); ++ this_cpu_write(kernel_stack, next->sp0); + + /* + * Now maybe reload the debug registers and handle I/O bitmaps +@@ -442,12 +447,11 @@ unsigned long get_wchan(struct task_struct *p) + if (!p || p == current || p->state == TASK_RUNNING) + return 0; + stack = (unsigned long)task_stack_page(p); +- if (p->thread.sp < stack || p->thread.sp >= stack+THREAD_SIZE) ++ if (p->thread.sp < stack || p->thread.sp > stack+THREAD_SIZE-16-sizeof(u64)) + return 0; + fp = *(u64 *)(p->thread.sp); + do { +- if (fp < (unsigned long)stack || +- fp >= (unsigned long)stack+THREAD_SIZE) ++ if (fp < stack || fp > stack+THREAD_SIZE-16-sizeof(u64)) + return 0; + ip = *(u64 *)(fp+8); + if (!in_sched_functions(ip)) +diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c +index 7461f50..01d0b9c 100644 +--- a/arch/x86/kernel/ptrace.c ++++ b/arch/x86/kernel/ptrace.c +@@ -184,14 +184,13 @@ unsigned long kernel_stack_pointer(struct pt_regs *regs) + { + unsigned long context = (unsigned long)regs & ~(THREAD_SIZE - 1); + unsigned long sp = (unsigned long)®s->sp; +- struct thread_info *tinfo; + +- if (context == (sp & ~(THREAD_SIZE - 1))) ++ if (context == ((sp + 8) & ~(THREAD_SIZE - 1))) + return sp; + +- tinfo = (struct thread_info *)context; +- if (tinfo->previous_esp) +- return tinfo->previous_esp; ++ sp = *(unsigned long *)context; ++ if (sp) ++ return sp; + + return (unsigned long)regs; + } +@@ -452,6 +451,20 @@ static int putreg(struct task_struct *child, + if (child->thread.gs != value) + return do_arch_prctl(child, ARCH_SET_GS, value); + return 0; ++ ++ case offsetof(struct user_regs_struct,ip): ++ /* ++ * Protect against any attempt to set ip to an ++ * impossible address. There are dragons lurking if the ++ * address is noncanonical. (This explicitly allows ++ * setting ip to TASK_SIZE_MAX, because user code can do ++ * that all by itself by running off the end of its ++ * address space. ++ */ ++ if (value > TASK_SIZE_MAX) ++ return -EIO; ++ break; ++ + #endif + } + +@@ -588,7 +601,7 @@ static void ptrace_triggered(struct perf_event *bp, + static unsigned long ptrace_get_dr7(struct perf_event *bp[]) + { + int i; +- int dr7 = 0; ++ unsigned long dr7 = 0; + struct arch_hw_breakpoint *info; + + for (i = 0; i < HBP_NUM; i++) { +@@ -822,7 +835,7 @@ long arch_ptrace(struct task_struct *child, long request, + unsigned long addr, unsigned long data) + { + int ret; +- unsigned long __user *datap = (unsigned long __user *)data; ++ unsigned long __user *datap = (__force unsigned long __user *)data; + + switch (request) { + /* read the word at location addr in the USER area. */ +@@ -907,14 +920,14 @@ long arch_ptrace(struct task_struct *child, long request, + if ((int) addr < 0) + return -EIO; + ret = do_get_thread_area(child, addr, +- (struct user_desc __user *)data); ++ (__force struct user_desc __user *) data); + break; + + case PTRACE_SET_THREAD_AREA: + if ((int) addr < 0) + return -EIO; + ret = do_set_thread_area(child, addr, +- (struct user_desc __user *)data, 0); ++ (__force struct user_desc __user *) data, 0); + break; + #endif + +@@ -1292,7 +1305,7 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request, + + #ifdef CONFIG_X86_64 + +-static struct user_regset x86_64_regsets[] __read_mostly = { ++static user_regset_no_const x86_64_regsets[] __read_only = { + [REGSET_GENERAL] = { + .core_note_type = NT_PRSTATUS, + .n = sizeof(struct user_regs_struct) / sizeof(long), +@@ -1333,7 +1346,7 @@ static const struct user_regset_view user_x86_64_view = { + #endif /* CONFIG_X86_64 */ + + #if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION +-static struct user_regset x86_32_regsets[] __read_mostly = { ++static user_regset_no_const x86_32_regsets[] __read_only = { + [REGSET_GENERAL] = { + .core_note_type = NT_PRSTATUS, + .n = sizeof(struct user_regs_struct32) / sizeof(u32), +@@ -1386,7 +1399,7 @@ static const struct user_regset_view user_x86_32_view = { + */ + u64 xstate_fx_sw_bytes[USER_XSTATE_FX_SW_WORDS]; + +-void update_regset_xstate_info(unsigned int size, u64 xstate_mask) ++void __init update_regset_xstate_info(unsigned int size, u64 xstate_mask) + { + #ifdef CONFIG_X86_64 + x86_64_regsets[REGSET_XSTATE].n = size / sizeof(u64); +@@ -1421,7 +1434,7 @@ static void fill_sigtrap_info(struct task_struct *tsk, + memset(info, 0, sizeof(*info)); + info->si_signo = SIGTRAP; + info->si_code = si_code; +- info->si_addr = user_mode_vm(regs) ? (void __user *)regs->ip : NULL; ++ info->si_addr = user_mode(regs) ? (__force void __user *)regs->ip : NULL; + } + + void user_single_step_siginfo(struct task_struct *tsk, +@@ -1450,6 +1463,10 @@ void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs, + # define IS_IA32 0 + #endif + ++#ifdef CONFIG_GRKERNSEC_SETXID ++extern void gr_delayed_cred_worker(void); ++#endif ++ + /* + * We must return the syscall number to actually look up in the table. + * This can be -1L to skip running any syscall at all. +@@ -1460,6 +1477,11 @@ long syscall_trace_enter(struct pt_regs *regs) + + user_exit(); + ++#ifdef CONFIG_GRKERNSEC_SETXID ++ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID))) ++ gr_delayed_cred_worker(); ++#endif ++ + /* + * If we stepped into a sysenter/syscall insn, it trapped in + * kernel mode; do_debug() cleared TF and set TIF_SINGLESTEP. +@@ -1515,6 +1537,11 @@ void syscall_trace_leave(struct pt_regs *regs) + */ + user_exit(); + ++#ifdef CONFIG_GRKERNSEC_SETXID ++ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID))) ++ gr_delayed_cred_worker(); ++#endif ++ + audit_syscall_exit(regs); + + if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT))) +diff --git a/arch/x86/kernel/pvclock.c b/arch/x86/kernel/pvclock.c +index 2f355d2..e75ed0a 100644 +--- a/arch/x86/kernel/pvclock.c ++++ b/arch/x86/kernel/pvclock.c +@@ -51,11 +51,11 @@ void pvclock_touch_watchdogs(void) + reset_hung_task_detector(); + } + +-static atomic64_t last_value = ATOMIC64_INIT(0); ++static atomic64_unchecked_t last_value = ATOMIC64_INIT(0); + + void pvclock_resume(void) + { +- atomic64_set(&last_value, 0); ++ atomic64_set_unchecked(&last_value, 0); + } + + u8 pvclock_read_flags(struct pvclock_vcpu_time_info *src) +@@ -105,11 +105,11 @@ cycle_t pvclock_clocksource_read(struct pvclock_vcpu_time_info *src) + * updating at the same time, and one of them could be slightly behind, + * making the assumption that last_value always go forward fail to hold. + */ +- last = atomic64_read(&last_value); ++ last = atomic64_read_unchecked(&last_value); + do { + if (ret < last) + return last; +- last = atomic64_cmpxchg(&last_value, last, ret); ++ last = atomic64_cmpxchg_unchecked(&last_value, last, ret); + } while (unlikely(last != ret)); + + return ret; +diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c +index c752cb4..866c432 100644 +--- a/arch/x86/kernel/reboot.c ++++ b/arch/x86/kernel/reboot.c +@@ -68,6 +68,11 @@ static int __init set_bios_reboot(const struct dmi_system_id *d) + + void __noreturn machine_real_restart(unsigned int type) + { ++ ++#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)) ++ struct desc_struct *gdt; ++#endif ++ + local_irq_disable(); + + /* +@@ -95,7 +100,29 @@ void __noreturn machine_real_restart(unsigned int type) + + /* Jump to the identity-mapped low memory code */ + #ifdef CONFIG_X86_32 +- asm volatile("jmpl *%0" : : ++ ++#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF) ++ gdt = get_cpu_gdt_table(smp_processor_id()); ++ pax_open_kernel(); ++#ifdef CONFIG_PAX_MEMORY_UDEREF ++ gdt[GDT_ENTRY_KERNEL_DS].type = 3; ++ gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf; ++ loadsegment(ds, __KERNEL_DS); ++ loadsegment(es, __KERNEL_DS); ++ loadsegment(ss, __KERNEL_DS); ++#endif ++#ifdef CONFIG_PAX_KERNEXEC ++ gdt[GDT_ENTRY_KERNEL_CS].base0 = 0; ++ gdt[GDT_ENTRY_KERNEL_CS].base1 = 0; ++ gdt[GDT_ENTRY_KERNEL_CS].base2 = 0; ++ gdt[GDT_ENTRY_KERNEL_CS].limit0 = 0xffff; ++ gdt[GDT_ENTRY_KERNEL_CS].limit = 0xf; ++ gdt[GDT_ENTRY_KERNEL_CS].g = 1; ++#endif ++ pax_close_kernel(); ++#endif ++ ++ asm volatile("ljmpl *%0" : : + "rm" (real_mode_header->machine_real_restart_asm), + "a" (type)); + #else +@@ -470,7 +497,7 @@ void __attribute__((weak)) mach_reboot_fixups(void) + * try to force a triple fault and then cycle between hitting the keyboard + * controller and doing that + */ +-static void native_machine_emergency_restart(void) ++static void __noreturn native_machine_emergency_restart(void) + { + int i; + int attempt = 0; +@@ -593,13 +620,13 @@ void native_machine_shutdown(void) + #endif + } + +-static void __machine_emergency_restart(int emergency) ++static void __noreturn __machine_emergency_restart(int emergency) + { + reboot_emergency = emergency; + machine_ops.emergency_restart(); + } + +-static void native_machine_restart(char *__unused) ++static void __noreturn native_machine_restart(char *__unused) + { + pr_notice("machine restart\n"); + +@@ -608,7 +635,7 @@ static void native_machine_restart(char *__unused) + __machine_emergency_restart(0); + } + +-static void native_machine_halt(void) ++static void __noreturn native_machine_halt(void) + { + /* Stop other cpus and apics */ + machine_shutdown(); +@@ -618,7 +645,7 @@ static void native_machine_halt(void) + stop_this_cpu(NULL); + } + +-static void native_machine_power_off(void) ++static void __noreturn native_machine_power_off(void) + { + if (pm_power_off) { + if (!reboot_force) +@@ -627,9 +654,10 @@ static void native_machine_power_off(void) + } + /* A fallback in case there is no PM info available */ + tboot_shutdown(TB_SHUTDOWN_HALT); ++ unreachable(); + } + +-struct machine_ops machine_ops = { ++struct machine_ops machine_ops __read_only = { + .power_off = native_machine_power_off, + .shutdown = native_machine_shutdown, + .emergency_restart = native_machine_emergency_restart, +diff --git a/arch/x86/kernel/reboot_fixups_32.c b/arch/x86/kernel/reboot_fixups_32.c +index c8e41e9..64049ef 100644 +--- a/arch/x86/kernel/reboot_fixups_32.c ++++ b/arch/x86/kernel/reboot_fixups_32.c +@@ -57,7 +57,7 @@ struct device_fixup { + unsigned int vendor; + unsigned int device; + void (*reboot_fixup)(struct pci_dev *); +-}; ++} __do_const; + + /* + * PCI ids solely used for fixups_table go here +diff --git a/arch/x86/kernel/relocate_kernel_64.S b/arch/x86/kernel/relocate_kernel_64.S +index 3fd2c69..a444264 100644 +--- a/arch/x86/kernel/relocate_kernel_64.S ++++ b/arch/x86/kernel/relocate_kernel_64.S +@@ -96,8 +96,7 @@ relocate_kernel: + + /* jump to identity mapped page */ + addq $(identity_mapped - relocate_kernel), %r8 +- pushq %r8 +- ret ++ jmp *%r8 + + identity_mapped: + /* set return address to 0 if not preserving context */ +diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c +index ce72964..be8aea7 100644 +--- a/arch/x86/kernel/setup.c ++++ b/arch/x86/kernel/setup.c +@@ -110,6 +110,7 @@ + #include <asm/mce.h> + #include <asm/alternative.h> + #include <asm/prom.h> ++#include <asm/boot.h> + + /* + * max_low_pfn_mapped: highest direct mapped pfn under 4GB +@@ -205,12 +206,50 @@ EXPORT_SYMBOL(boot_cpu_data); + #endif + + +-#if !defined(CONFIG_X86_PAE) || defined(CONFIG_X86_64) +-__visible unsigned long mmu_cr4_features; ++#ifdef CONFIG_X86_64 ++__visible unsigned long mmu_cr4_features __read_only = X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE; ++#elif defined(CONFIG_X86_PAE) ++__visible unsigned long mmu_cr4_features __read_only = X86_CR4_PAE; + #else +-__visible unsigned long mmu_cr4_features = X86_CR4_PAE; ++__visible unsigned long mmu_cr4_features __read_only; + #endif + ++void set_in_cr4(unsigned long mask) ++{ ++ unsigned long cr4 = read_cr4(); ++ ++ if ((cr4 & mask) == mask && cr4 == mmu_cr4_features) ++ return; ++ ++ pax_open_kernel(); ++ mmu_cr4_features |= mask; ++ pax_close_kernel(); ++ ++ if (trampoline_cr4_features) ++ *trampoline_cr4_features = mmu_cr4_features; ++ cr4 |= mask; ++ write_cr4(cr4); ++} ++EXPORT_SYMBOL(set_in_cr4); ++ ++void clear_in_cr4(unsigned long mask) ++{ ++ unsigned long cr4 = read_cr4(); ++ ++ if (!(cr4 & mask) && cr4 == mmu_cr4_features) ++ return; ++ ++ pax_open_kernel(); ++ mmu_cr4_features &= ~mask; ++ pax_close_kernel(); ++ ++ if (trampoline_cr4_features) ++ *trampoline_cr4_features = mmu_cr4_features; ++ cr4 &= ~mask; ++ write_cr4(cr4); ++} ++EXPORT_SYMBOL(clear_in_cr4); ++ + /* Boot loader ID and version as integers, for the benefit of proc_dointvec */ + int bootloader_type, bootloader_version; + +@@ -772,7 +811,7 @@ static void __init trim_bios_range(void) + * area (640->1Mb) as ram even though it is not. + * take them out. + */ +- e820_remove_range(BIOS_BEGIN, BIOS_END - BIOS_BEGIN, E820_RAM, 1); ++ e820_remove_range(ISA_START_ADDRESS, ISA_END_ADDRESS - ISA_START_ADDRESS, E820_RAM, 1); + + sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map); + } +@@ -780,7 +819,7 @@ static void __init trim_bios_range(void) + /* called before trim_bios_range() to spare extra sanitize */ + static void __init e820_add_kernel_range(void) + { +- u64 start = __pa_symbol(_text); ++ u64 start = __pa_symbol(ktla_ktva(_text)); + u64 size = __pa_symbol(_end) - start; + + /* +@@ -856,8 +895,12 @@ dump_kernel_offset(struct notifier_block *self, unsigned long v, void *p) + + void __init setup_arch(char **cmdline_p) + { ++#ifdef CONFIG_X86_32 ++ memblock_reserve(LOAD_PHYSICAL_ADDR, __pa_symbol(__bss_stop) - LOAD_PHYSICAL_ADDR); ++#else + memblock_reserve(__pa_symbol(_text), + (unsigned long)__bss_stop - (unsigned long)_text); ++#endif + + early_reserve_initrd(); + +@@ -947,14 +990,14 @@ void __init setup_arch(char **cmdline_p) + + if (!boot_params.hdr.root_flags) + root_mountflags &= ~MS_RDONLY; +- init_mm.start_code = (unsigned long) _text; +- init_mm.end_code = (unsigned long) _etext; ++ init_mm.start_code = ktla_ktva((unsigned long) _text); ++ init_mm.end_code = ktla_ktva((unsigned long) _etext); + init_mm.end_data = (unsigned long) _edata; + init_mm.brk = _brk_end; + +- code_resource.start = __pa_symbol(_text); +- code_resource.end = __pa_symbol(_etext)-1; +- data_resource.start = __pa_symbol(_etext); ++ code_resource.start = __pa_symbol(ktla_ktva(_text)); ++ code_resource.end = __pa_symbol(ktla_ktva(_etext))-1; ++ data_resource.start = __pa_symbol(_sdata); + data_resource.end = __pa_symbol(_edata)-1; + bss_resource.start = __pa_symbol(__bss_start); + bss_resource.end = __pa_symbol(__bss_stop)-1; +diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c +index 5cdff03..80fa283 100644 +--- a/arch/x86/kernel/setup_percpu.c ++++ b/arch/x86/kernel/setup_percpu.c +@@ -21,19 +21,17 @@ + #include <asm/cpu.h> + #include <asm/stackprotector.h> + +-DEFINE_PER_CPU_READ_MOSTLY(int, cpu_number); ++#ifdef CONFIG_SMP ++DEFINE_PER_CPU_READ_MOSTLY(unsigned int, cpu_number); + EXPORT_PER_CPU_SYMBOL(cpu_number); ++#endif + +-#ifdef CONFIG_X86_64 + #define BOOT_PERCPU_OFFSET ((unsigned long)__per_cpu_load) +-#else +-#define BOOT_PERCPU_OFFSET 0 +-#endif + + DEFINE_PER_CPU(unsigned long, this_cpu_off) = BOOT_PERCPU_OFFSET; + EXPORT_PER_CPU_SYMBOL(this_cpu_off); + +-unsigned long __per_cpu_offset[NR_CPUS] __read_mostly = { ++unsigned long __per_cpu_offset[NR_CPUS] __read_only = { + [0 ... NR_CPUS-1] = BOOT_PERCPU_OFFSET, + }; + EXPORT_SYMBOL(__per_cpu_offset); +@@ -66,7 +64,7 @@ static bool __init pcpu_need_numa(void) + { + #ifdef CONFIG_NEED_MULTIPLE_NODES + pg_data_t *last = NULL; +- unsigned int cpu; ++ int cpu; + + for_each_possible_cpu(cpu) { + int node = early_cpu_to_node(cpu); +@@ -155,10 +153,10 @@ static inline void setup_percpu_segment(int cpu) + { + #ifdef CONFIG_X86_32 + struct desc_struct gdt; ++ unsigned long base = per_cpu_offset(cpu); + +- pack_descriptor(&gdt, per_cpu_offset(cpu), 0xFFFFF, +- 0x2 | DESCTYPE_S, 0x8); +- gdt.s = 1; ++ pack_descriptor(&gdt, base, (VMALLOC_END - base - 1) >> PAGE_SHIFT, ++ 0x83 | DESCTYPE_S, 0xC); + write_gdt_entry(get_cpu_gdt_table(cpu), + GDT_ENTRY_PERCPU, &gdt, DESCTYPE_S); + #endif +@@ -219,6 +217,11 @@ void __init setup_per_cpu_areas(void) + /* alrighty, percpu areas up and running */ + delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start; + for_each_possible_cpu(cpu) { ++#ifdef CONFIG_CC_STACKPROTECTOR ++#ifdef CONFIG_X86_32 ++ unsigned long canary = per_cpu(stack_canary.canary, cpu); ++#endif ++#endif + per_cpu_offset(cpu) = delta + pcpu_unit_offsets[cpu]; + per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu); + per_cpu(cpu_number, cpu) = cpu; +@@ -259,6 +262,12 @@ void __init setup_per_cpu_areas(void) + */ + set_cpu_numa_node(cpu, early_cpu_to_node(cpu)); + #endif ++#ifdef CONFIG_CC_STACKPROTECTOR ++#ifdef CONFIG_X86_32 ++ if (!cpu) ++ per_cpu(stack_canary.canary, cpu) = canary; ++#endif ++#endif + /* + * Up to this point, the boot CPU has been using .init.data + * area. Reload any changed state for the boot CPU. +diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c +index 9e5de68..147c254 100644 +--- a/arch/x86/kernel/signal.c ++++ b/arch/x86/kernel/signal.c +@@ -190,7 +190,7 @@ static unsigned long align_sigframe(unsigned long sp) + * Align the stack pointer according to the i386 ABI, + * i.e. so that on function entry ((sp + 4) & 15) == 0. + */ +- sp = ((sp + 4) & -16ul) - 4; ++ sp = ((sp - 12) & -16ul) - 4; + #else /* !CONFIG_X86_32 */ + sp = round_down(sp, 16) - 8; + #endif +@@ -298,9 +298,9 @@ __setup_frame(int sig, struct ksignal *ksig, sigset_t *set, + } + + if (current->mm->context.vdso) +- restorer = VDSO32_SYMBOL(current->mm->context.vdso, sigreturn); ++ restorer = (void __force_user *)VDSO32_SYMBOL(current->mm->context.vdso, sigreturn); + else +- restorer = &frame->retcode; ++ restorer = (void __user *)&frame->retcode; + if (ksig->ka.sa.sa_flags & SA_RESTORER) + restorer = ksig->ka.sa.sa_restorer; + +@@ -314,7 +314,7 @@ __setup_frame(int sig, struct ksignal *ksig, sigset_t *set, + * reasons and because gdb uses it as a signature to notice + * signal handler stack frames. + */ +- err |= __put_user(*((u64 *)&retcode), (u64 *)frame->retcode); ++ err |= __put_user(*((u64 *)&retcode), (u64 __user *)frame->retcode); + + if (err) + return -EFAULT; +@@ -361,7 +361,10 @@ static int __setup_rt_frame(int sig, struct ksignal *ksig, + save_altstack_ex(&frame->uc.uc_stack, regs->sp); + + /* Set up to return from userspace. */ +- restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn); ++ if (current->mm->context.vdso) ++ restorer = (void __force_user *)VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn); ++ else ++ restorer = (void __user *)&frame->retcode; + if (ksig->ka.sa.sa_flags & SA_RESTORER) + restorer = ksig->ka.sa.sa_restorer; + put_user_ex(restorer, &frame->pretcode); +@@ -373,7 +376,7 @@ static int __setup_rt_frame(int sig, struct ksignal *ksig, + * reasons and because gdb uses it as a signature to notice + * signal handler stack frames. + */ +- put_user_ex(*((u64 *)&rt_retcode), (u64 *)frame->retcode); ++ put_user_ex(*((u64 *)&rt_retcode), (u64 __user *)frame->retcode); + } put_user_catch(err); + + err |= copy_siginfo_to_user(&frame->info, &ksig->info); +@@ -609,7 +612,12 @@ setup_rt_frame(struct ksignal *ksig, struct pt_regs *regs) + { + int usig = signr_convert(ksig->sig); + sigset_t *set = sigmask_to_save(); +- compat_sigset_t *cset = (compat_sigset_t *) set; ++ sigset_t sigcopy; ++ compat_sigset_t *cset; ++ ++ sigcopy = *set; ++ ++ cset = (compat_sigset_t *) &sigcopy; + + /* Set up the stack frame */ + if (is_ia32_frame()) { +@@ -620,7 +628,7 @@ setup_rt_frame(struct ksignal *ksig, struct pt_regs *regs) + } else if (is_x32_frame()) { + return x32_setup_rt_frame(ksig, cset, regs); + } else { +- return __setup_rt_frame(ksig->sig, ksig, set, regs); ++ return __setup_rt_frame(ksig->sig, ksig, &sigcopy, regs); + } + } + +diff --git a/arch/x86/kernel/smp.c b/arch/x86/kernel/smp.c +index 7c3a5a6..f0a8961 100644 +--- a/arch/x86/kernel/smp.c ++++ b/arch/x86/kernel/smp.c +@@ -341,7 +341,7 @@ static int __init nonmi_ipi_setup(char *str) + + __setup("nonmi_ipi", nonmi_ipi_setup); + +-struct smp_ops smp_ops = { ++struct smp_ops smp_ops __read_only = { + .smp_prepare_boot_cpu = native_smp_prepare_boot_cpu, + .smp_prepare_cpus = native_smp_prepare_cpus, + .smp_cpus_done = native_smp_cpus_done, +diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c +index 395be6d..11665af 100644 +--- a/arch/x86/kernel/smpboot.c ++++ b/arch/x86/kernel/smpboot.c +@@ -229,14 +229,17 @@ static void notrace start_secondary(void *unused) + + enable_start_cpu0 = 0; + +-#ifdef CONFIG_X86_32 ++ /* otherwise gcc will move up smp_processor_id before the cpu_init */ ++ barrier(); ++ + /* switch away from the initial page table */ ++#ifdef CONFIG_PAX_PER_CPU_PGD ++ load_cr3(get_cpu_pgd(smp_processor_id(), kernel)); ++#else + load_cr3(swapper_pg_dir); ++#endif + __flush_tlb_all(); +-#endif + +- /* otherwise gcc will move up smp_processor_id before the cpu_init */ +- barrier(); + /* + * Check TSC synchronization with the BP: + */ +@@ -756,8 +759,9 @@ static int do_boot_cpu(int apicid, int cpu, struct task_struct *idle) + alternatives_enable_smp(); + + idle->thread.sp = (unsigned long) (((struct pt_regs *) +- (THREAD_SIZE + task_stack_page(idle))) - 1); ++ (THREAD_SIZE - 16 + task_stack_page(idle))) - 1); + per_cpu(current_task, cpu) = idle; ++ per_cpu(current_tinfo, cpu) = &idle->tinfo; + + #ifdef CONFIG_X86_32 + /* Stack for startup_32 can be just as for start_secondary onwards */ +@@ -765,11 +769,13 @@ static int do_boot_cpu(int apicid, int cpu, struct task_struct *idle) + #else + clear_tsk_thread_flag(idle, TIF_FORK); + initial_gs = per_cpu_offset(cpu); +- per_cpu(kernel_stack, cpu) = +- (unsigned long)task_stack_page(idle) - +- KERNEL_STACK_OFFSET + THREAD_SIZE; ++ per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(idle) - 16 + THREAD_SIZE; + #endif ++ ++ pax_open_kernel(); + early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu); ++ pax_close_kernel(); ++ + initial_code = (unsigned long)start_secondary; + stack_start = idle->thread.sp; + +@@ -918,6 +924,15 @@ int native_cpu_up(unsigned int cpu, struct task_struct *tidle) + /* the FPU context is blank, nobody can own it */ + __cpu_disable_lazy_restore(cpu); + ++#ifdef CONFIG_PAX_PER_CPU_PGD ++ clone_pgd_range(get_cpu_pgd(cpu, kernel) + KERNEL_PGD_BOUNDARY, ++ swapper_pg_dir + KERNEL_PGD_BOUNDARY, ++ KERNEL_PGD_PTRS); ++ clone_pgd_range(get_cpu_pgd(cpu, user) + KERNEL_PGD_BOUNDARY, ++ swapper_pg_dir + KERNEL_PGD_BOUNDARY, ++ KERNEL_PGD_PTRS); ++#endif ++ + err = do_boot_cpu(apicid, cpu, tidle); + if (err) { + pr_debug("do_boot_cpu failed %d\n", err); +diff --git a/arch/x86/kernel/step.c b/arch/x86/kernel/step.c +index 9b4d51d..5d28b58 100644 +--- a/arch/x86/kernel/step.c ++++ b/arch/x86/kernel/step.c +@@ -27,10 +27,10 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re + struct desc_struct *desc; + unsigned long base; + +- seg &= ~7UL; ++ seg >>= 3; + + mutex_lock(&child->mm->context.lock); +- if (unlikely((seg >> 3) >= child->mm->context.size)) ++ if (unlikely(seg >= child->mm->context.size)) + addr = -1L; /* bogus selector, access would fault */ + else { + desc = child->mm->context.ldt + seg; +@@ -42,7 +42,8 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re + addr += base; + } + mutex_unlock(&child->mm->context.lock); +- } ++ } else if (seg == __KERNEL_CS || seg == __KERNEXEC_KERNEL_CS) ++ addr = ktla_ktva(addr); + + return addr; + } +@@ -53,6 +54,9 @@ static int is_setting_trap_flag(struct task_struct *child, struct pt_regs *regs) + unsigned char opcode[15]; + unsigned long addr = convert_ip_to_linear(child, regs); + ++ if (addr == -EINVAL) ++ return 0; ++ + copied = access_process_vm(child, addr, opcode, sizeof(opcode), 0); + for (i = 0; i < copied; i++) { + switch (opcode[i]) { +diff --git a/arch/x86/kernel/sys_i386_32.c b/arch/x86/kernel/sys_i386_32.c +new file mode 100644 +index 0000000..5877189 +--- /dev/null ++++ b/arch/x86/kernel/sys_i386_32.c +@@ -0,0 +1,189 @@ ++/* ++ * This file contains various random system calls that ++ * have a non-standard calling sequence on the Linux/i386 ++ * platform. ++ */ ++ ++#include <linux/errno.h> ++#include <linux/sched.h> ++#include <linux/mm.h> ++#include <linux/fs.h> ++#include <linux/smp.h> ++#include <linux/sem.h> ++#include <linux/msg.h> ++#include <linux/shm.h> ++#include <linux/stat.h> ++#include <linux/syscalls.h> ++#include <linux/mman.h> ++#include <linux/file.h> ++#include <linux/utsname.h> ++#include <linux/ipc.h> ++#include <linux/elf.h> ++ ++#include <linux/uaccess.h> ++#include <linux/unistd.h> ++ ++#include <asm/syscalls.h> ++ ++int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags) ++{ ++ unsigned long pax_task_size = TASK_SIZE; ++ ++#ifdef CONFIG_PAX_SEGMEXEC ++ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) ++ pax_task_size = SEGMEXEC_TASK_SIZE; ++#endif ++ ++ if (flags & MAP_FIXED) ++ if (len > pax_task_size || addr > pax_task_size - len) ++ return -EINVAL; ++ ++ return 0; ++} ++ ++/* ++ * Align a virtual address to avoid aliasing in the I$ on AMD F15h. ++ */ ++static unsigned long get_align_mask(void) ++{ ++ if (va_align.flags < 0 || !(va_align.flags & ALIGN_VA_32)) ++ return 0; ++ ++ if (!(current->flags & PF_RANDOMIZE)) ++ return 0; ++ ++ return va_align.mask; ++} ++ ++unsigned long ++arch_get_unmapped_area(struct file *filp, unsigned long addr, ++ unsigned long len, unsigned long pgoff, unsigned long flags) ++{ ++ struct mm_struct *mm = current->mm; ++ struct vm_area_struct *vma; ++ unsigned long pax_task_size = TASK_SIZE; ++ struct vm_unmapped_area_info info; ++ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags); ++ ++#ifdef CONFIG_PAX_SEGMEXEC ++ if (mm->pax_flags & MF_PAX_SEGMEXEC) ++ pax_task_size = SEGMEXEC_TASK_SIZE; ++#endif ++ ++ pax_task_size -= PAGE_SIZE; ++ ++ if (len > pax_task_size) ++ return -ENOMEM; ++ ++ if (flags & MAP_FIXED) ++ return addr; ++ ++#ifdef CONFIG_PAX_RANDMMAP ++ if (!(mm->pax_flags & MF_PAX_RANDMMAP)) ++#endif ++ ++ if (addr) { ++ addr = PAGE_ALIGN(addr); ++ if (pax_task_size - len >= addr) { ++ vma = find_vma(mm, addr); ++ if (check_heap_stack_gap(vma, addr, len, offset)) ++ return addr; ++ } ++ } ++ ++ info.flags = 0; ++ info.length = len; ++ info.align_mask = filp ? get_align_mask() : 0; ++ info.align_offset = pgoff << PAGE_SHIFT; ++ info.threadstack_offset = offset; ++ ++#ifdef CONFIG_PAX_PAGEEXEC ++ if (!(__supported_pte_mask & _PAGE_NX) && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE)) { ++ info.low_limit = 0x00110000UL; ++ info.high_limit = mm->start_code; ++ ++#ifdef CONFIG_PAX_RANDMMAP ++ if (mm->pax_flags & MF_PAX_RANDMMAP) ++ info.low_limit += mm->delta_mmap & 0x03FFF000UL; ++#endif ++ ++ if (info.low_limit < info.high_limit) { ++ addr = vm_unmapped_area(&info); ++ if (!IS_ERR_VALUE(addr)) ++ return addr; ++ } ++ } else ++#endif ++ ++ info.low_limit = mm->mmap_base; ++ info.high_limit = pax_task_size; ++ ++ return vm_unmapped_area(&info); ++} ++ ++unsigned long ++arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, ++ const unsigned long len, const unsigned long pgoff, ++ const unsigned long flags) ++{ ++ struct vm_area_struct *vma; ++ struct mm_struct *mm = current->mm; ++ unsigned long addr = addr0, pax_task_size = TASK_SIZE; ++ struct vm_unmapped_area_info info; ++ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags); ++ ++#ifdef CONFIG_PAX_SEGMEXEC ++ if (mm->pax_flags & MF_PAX_SEGMEXEC) ++ pax_task_size = SEGMEXEC_TASK_SIZE; ++#endif ++ ++ pax_task_size -= PAGE_SIZE; ++ ++ /* requested length too big for entire address space */ ++ if (len > pax_task_size) ++ return -ENOMEM; ++ ++ if (flags & MAP_FIXED) ++ return addr; ++ ++#ifdef CONFIG_PAX_PAGEEXEC ++ if (!(__supported_pte_mask & _PAGE_NX) && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE)) ++ goto bottomup; ++#endif ++ ++#ifdef CONFIG_PAX_RANDMMAP ++ if (!(mm->pax_flags & MF_PAX_RANDMMAP)) ++#endif ++ ++ /* requesting a specific address */ ++ if (addr) { ++ addr = PAGE_ALIGN(addr); ++ if (pax_task_size - len >= addr) { ++ vma = find_vma(mm, addr); ++ if (check_heap_stack_gap(vma, addr, len, offset)) ++ return addr; ++ } ++ } ++ ++ info.flags = VM_UNMAPPED_AREA_TOPDOWN; ++ info.length = len; ++ info.low_limit = PAGE_SIZE; ++ info.high_limit = mm->mmap_base; ++ info.align_mask = filp ? get_align_mask() : 0; ++ info.align_offset = pgoff << PAGE_SHIFT; ++ info.threadstack_offset = offset; ++ ++ addr = vm_unmapped_area(&info); ++ if (!(addr & ~PAGE_MASK)) ++ return addr; ++ VM_BUG_ON(addr != -ENOMEM); ++ ++bottomup: ++ /* ++ * A failed mmap() very likely causes application failure, ++ * so fall back to the bottom-up function here. This scenario ++ * can happen with large stack limits and large mmap() ++ * allocations. ++ */ ++ return arch_get_unmapped_area(filp, addr0, len, pgoff, flags); ++} +diff --git a/arch/x86/kernel/sys_x86_64.c b/arch/x86/kernel/sys_x86_64.c +index 30277e2..5664a29 100644 +--- a/arch/x86/kernel/sys_x86_64.c ++++ b/arch/x86/kernel/sys_x86_64.c +@@ -81,8 +81,8 @@ out: + return error; + } + +-static void find_start_end(unsigned long flags, unsigned long *begin, +- unsigned long *end) ++static void find_start_end(struct mm_struct *mm, unsigned long flags, ++ unsigned long *begin, unsigned long *end) + { + if (!test_thread_flag(TIF_ADDR32) && (flags & MAP_32BIT)) { + unsigned long new_begin; +@@ -101,7 +101,7 @@ static void find_start_end(unsigned long flags, unsigned long *begin, + *begin = new_begin; + } + } else { +- *begin = current->mm->mmap_legacy_base; ++ *begin = mm->mmap_legacy_base; + *end = TASK_SIZE; + } + } +@@ -114,20 +114,24 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr, + struct vm_area_struct *vma; + struct vm_unmapped_area_info info; + unsigned long begin, end; ++ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags); + + if (flags & MAP_FIXED) + return addr; + +- find_start_end(flags, &begin, &end); ++ find_start_end(mm, flags, &begin, &end); + + if (len > end) + return -ENOMEM; + ++#ifdef CONFIG_PAX_RANDMMAP ++ if (!(mm->pax_flags & MF_PAX_RANDMMAP)) ++#endif ++ + if (addr) { + addr = PAGE_ALIGN(addr); + vma = find_vma(mm, addr); +- if (end - len >= addr && +- (!vma || addr + len <= vma->vm_start)) ++ if (end - len >= addr && check_heap_stack_gap(vma, addr, len, offset)) + return addr; + } + +@@ -137,6 +141,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr, + info.high_limit = end; + info.align_mask = filp ? get_align_mask() : 0; + info.align_offset = pgoff << PAGE_SHIFT; ++ info.threadstack_offset = offset; + return vm_unmapped_area(&info); + } + +@@ -149,6 +154,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, + struct mm_struct *mm = current->mm; + unsigned long addr = addr0; + struct vm_unmapped_area_info info; ++ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags); + + /* requested length too big for entire address space */ + if (len > TASK_SIZE) +@@ -161,12 +167,15 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, + if (!test_thread_flag(TIF_ADDR32) && (flags & MAP_32BIT)) + goto bottomup; + ++#ifdef CONFIG_PAX_RANDMMAP ++ if (!(mm->pax_flags & MF_PAX_RANDMMAP)) ++#endif ++ + /* requesting a specific address */ + if (addr) { + addr = PAGE_ALIGN(addr); + vma = find_vma(mm, addr); +- if (TASK_SIZE - len >= addr && +- (!vma || addr + len <= vma->vm_start)) ++ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset)) + return addr; + } + +@@ -176,6 +185,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, + info.high_limit = mm->mmap_base; + info.align_mask = filp ? get_align_mask() : 0; + info.align_offset = pgoff << PAGE_SHIFT; ++ info.threadstack_offset = offset; + addr = vm_unmapped_area(&info); + if (!(addr & ~PAGE_MASK)) + return addr; +diff --git a/arch/x86/kernel/tboot.c b/arch/x86/kernel/tboot.c +index 91a4496..bb87552 100644 +--- a/arch/x86/kernel/tboot.c ++++ b/arch/x86/kernel/tboot.c +@@ -221,7 +221,7 @@ static int tboot_setup_sleep(void) + + void tboot_shutdown(u32 shutdown_type) + { +- void (*shutdown)(void); ++ void (* __noreturn shutdown)(void); + + if (!tboot_enabled()) + return; +@@ -243,7 +243,7 @@ void tboot_shutdown(u32 shutdown_type) + + switch_to_tboot_pt(); + +- shutdown = (void(*)(void))(unsigned long)tboot->shutdown_entry; ++ shutdown = (void *)(unsigned long)tboot->shutdown_entry; + shutdown(); + + /* should not reach here */ +@@ -310,7 +310,7 @@ static int tboot_extended_sleep(u8 sleep_state, u32 val_a, u32 val_b) + return -ENODEV; + } + +-static atomic_t ap_wfs_count; ++static atomic_unchecked_t ap_wfs_count; + + static int tboot_wait_for_aps(int num_aps) + { +@@ -334,9 +334,9 @@ static int tboot_cpu_callback(struct notifier_block *nfb, unsigned long action, + { + switch (action) { + case CPU_DYING: +- atomic_inc(&ap_wfs_count); ++ atomic_inc_unchecked(&ap_wfs_count); + if (num_online_cpus() == 1) +- if (tboot_wait_for_aps(atomic_read(&ap_wfs_count))) ++ if (tboot_wait_for_aps(atomic_read_unchecked(&ap_wfs_count))) + return NOTIFY_BAD; + break; + } +@@ -422,7 +422,7 @@ static __init int tboot_late_init(void) + + tboot_create_trampoline(); + +- atomic_set(&ap_wfs_count, 0); ++ atomic_set_unchecked(&ap_wfs_count, 0); + register_hotcpu_notifier(&tboot_cpu_notifier); + + #ifdef CONFIG_DEBUG_FS +diff --git a/arch/x86/kernel/time.c b/arch/x86/kernel/time.c +index 24d3c91..d06b473 100644 +--- a/arch/x86/kernel/time.c ++++ b/arch/x86/kernel/time.c +@@ -30,9 +30,9 @@ unsigned long profile_pc(struct pt_regs *regs) + { + unsigned long pc = instruction_pointer(regs); + +- if (!user_mode_vm(regs) && in_lock_functions(pc)) { ++ if (!user_mode(regs) && in_lock_functions(pc)) { + #ifdef CONFIG_FRAME_POINTER +- return *(unsigned long *)(regs->bp + sizeof(long)); ++ return ktla_ktva(*(unsigned long *)(regs->bp + sizeof(long))); + #else + unsigned long *sp = + (unsigned long *)kernel_stack_pointer(regs); +@@ -41,11 +41,17 @@ unsigned long profile_pc(struct pt_regs *regs) + * or above a saved flags. Eflags has bits 22-31 zero, + * kernel addresses don't. + */ ++ ++#ifdef CONFIG_PAX_KERNEXEC ++ return ktla_ktva(sp[0]); ++#else + if (sp[0] >> 22) + return sp[0]; + if (sp[1] >> 22) + return sp[1]; + #endif ++ ++#endif + } + return pc; + } +diff --git a/arch/x86/kernel/tls.c b/arch/x86/kernel/tls.c +index f7fec09..9991981 100644 +--- a/arch/x86/kernel/tls.c ++++ b/arch/x86/kernel/tls.c +@@ -84,6 +84,11 @@ int do_set_thread_area(struct task_struct *p, int idx, + if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX) + return -EINVAL; + ++#ifdef CONFIG_PAX_SEGMEXEC ++ if ((p->mm->pax_flags & MF_PAX_SEGMEXEC) && (info.contents & MODIFY_LDT_CONTENTS_CODE)) ++ return -EINVAL; ++#endif ++ + set_tls_desc(p, idx, &info, 1); + + return 0; +@@ -200,7 +205,7 @@ int regset_tls_set(struct task_struct *target, const struct user_regset *regset, + + if (kbuf) + info = kbuf; +- else if (__copy_from_user(infobuf, ubuf, count)) ++ else if (count > sizeof infobuf || __copy_from_user(infobuf, ubuf, count)) + return -EFAULT; + else + info = infobuf; +diff --git a/arch/x86/kernel/tracepoint.c b/arch/x86/kernel/tracepoint.c +index 1c113db..287b42e 100644 +--- a/arch/x86/kernel/tracepoint.c ++++ b/arch/x86/kernel/tracepoint.c +@@ -9,11 +9,11 @@ + #include <linux/atomic.h> + + atomic_t trace_idt_ctr = ATOMIC_INIT(0); +-struct desc_ptr trace_idt_descr = { NR_VECTORS * 16 - 1, ++const struct desc_ptr trace_idt_descr = { NR_VECTORS * 16 - 1, + (unsigned long) trace_idt_table }; + + /* No need to be aligned, but done to keep all IDTs defined the same way. */ +-gate_desc trace_idt_table[NR_VECTORS] __page_aligned_bss; ++gate_desc trace_idt_table[NR_VECTORS] __page_aligned_rodata; + + static int trace_irq_vector_refcount; + static DEFINE_MUTEX(irq_vector_mutex); +diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c +index 57409f6..b505597 100644 +--- a/arch/x86/kernel/traps.c ++++ b/arch/x86/kernel/traps.c +@@ -66,7 +66,7 @@ + #include <asm/proto.h> + + /* No need to be aligned, but done to keep all IDTs defined the same way. */ +-gate_desc debug_idt_table[NR_VECTORS] __page_aligned_bss; ++gate_desc debug_idt_table[NR_VECTORS] __page_aligned_rodata; + #else + #include <asm/processor-flags.h> + #include <asm/setup.h> +@@ -75,7 +75,7 @@ asmlinkage int system_call(void); + #endif + + /* Must be page-aligned because the real IDT is used in a fixmap. */ +-gate_desc idt_table[NR_VECTORS] __page_aligned_bss; ++gate_desc idt_table[NR_VECTORS] __page_aligned_rodata; + + DECLARE_BITMAP(used_vectors, NR_VECTORS); + EXPORT_SYMBOL_GPL(used_vectors); +@@ -107,11 +107,11 @@ static inline void preempt_conditional_cli(struct pt_regs *regs) + } + + static int __kprobes +-do_trap_no_signal(struct task_struct *tsk, int trapnr, char *str, ++do_trap_no_signal(struct task_struct *tsk, int trapnr, const char *str, + struct pt_regs *regs, long error_code) + { + #ifdef CONFIG_X86_32 +- if (regs->flags & X86_VM_MASK) { ++ if (v8086_mode(regs)) { + /* + * Traps 0, 1, 3, 4, and 5 should be forwarded to vm86. + * On nmi (interrupt 2), do_trap should not be called. +@@ -124,12 +124,24 @@ do_trap_no_signal(struct task_struct *tsk, int trapnr, char *str, + return -1; + } + #endif +- if (!user_mode(regs)) { ++ if (!user_mode_novm(regs)) { + if (!fixup_exception(regs)) { + tsk->thread.error_code = error_code; + tsk->thread.trap_nr = trapnr; ++ ++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC) ++ if (trapnr == X86_TRAP_SS && ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS)) ++ str = "PAX: suspicious stack segment fault"; ++#endif ++ + die(str, regs, error_code); + } ++ ++#ifdef CONFIG_PAX_REFCOUNT ++ if (trapnr == X86_TRAP_OF) ++ pax_report_refcount_overflow(regs); ++#endif ++ + return 0; + } + +@@ -137,7 +149,7 @@ do_trap_no_signal(struct task_struct *tsk, int trapnr, char *str, + } + + static void __kprobes +-do_trap(int trapnr, int signr, char *str, struct pt_regs *regs, ++do_trap(int trapnr, int signr, const char *str, struct pt_regs *regs, + long error_code, siginfo_t *info) + { + struct task_struct *tsk = current; +@@ -161,7 +173,7 @@ do_trap(int trapnr, int signr, char *str, struct pt_regs *regs, + if (show_unhandled_signals && unhandled_signal(tsk, signr) && + printk_ratelimit()) { + pr_info("%s[%d] trap %s ip:%lx sp:%lx error:%lx", +- tsk->comm, tsk->pid, str, ++ tsk->comm, task_pid_nr(tsk), str, + regs->ip, regs->sp, error_code); + print_vma_addr(" in ", regs->ip); + pr_cont("\n"); +@@ -251,6 +263,11 @@ dotraplinkage void do_double_fault(struct pt_regs *regs, long error_code) + tsk->thread.error_code = error_code; + tsk->thread.trap_nr = X86_TRAP_DF; + ++#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW ++ if ((unsigned long)tsk->stack - regs->sp <= PAGE_SIZE) ++ die("grsec: kernel stack overflow detected", regs, error_code); ++#endif ++ + #ifdef CONFIG_DOUBLEFAULT + df_debug(regs, error_code); + #endif +@@ -273,7 +290,7 @@ do_general_protection(struct pt_regs *regs, long error_code) + conditional_sti(regs); + + #ifdef CONFIG_X86_32 +- if (regs->flags & X86_VM_MASK) { ++ if (v8086_mode(regs)) { + local_irq_enable(); + handle_vm86_fault((struct kernel_vm86_regs *) regs, error_code); + goto exit; +@@ -281,18 +298,42 @@ do_general_protection(struct pt_regs *regs, long error_code) + #endif + + tsk = current; +- if (!user_mode(regs)) { ++ if (!user_mode_novm(regs)) { + if (fixup_exception(regs)) + goto exit; + + tsk->thread.error_code = error_code; + tsk->thread.trap_nr = X86_TRAP_GP; + if (notify_die(DIE_GPF, "general protection fault", regs, error_code, +- X86_TRAP_GP, SIGSEGV) != NOTIFY_STOP) ++ X86_TRAP_GP, SIGSEGV) != NOTIFY_STOP) { ++ ++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC) ++ if ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS) ++ die("PAX: suspicious general protection fault", regs, error_code); ++ else ++#endif ++ + die("general protection fault", regs, error_code); ++ } + goto exit; + } + ++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC) ++ if (!(__supported_pte_mask & _PAGE_NX) && tsk->mm && (tsk->mm->pax_flags & MF_PAX_PAGEEXEC)) { ++ struct mm_struct *mm = tsk->mm; ++ unsigned long limit; ++ ++ down_write(&mm->mmap_sem); ++ limit = mm->context.user_cs_limit; ++ if (limit < TASK_SIZE) { ++ track_exec_limit(mm, limit, TASK_SIZE, VM_EXEC); ++ up_write(&mm->mmap_sem); ++ return; ++ } ++ up_write(&mm->mmap_sem); ++ } ++#endif ++ + tsk->thread.error_code = error_code; + tsk->thread.trap_nr = X86_TRAP_GP; + +@@ -453,7 +494,7 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code) + /* It's safe to allow irq's after DR6 has been saved */ + preempt_conditional_sti(regs); + +- if (regs->flags & X86_VM_MASK) { ++ if (v8086_mode(regs)) { + handle_vm86_trap((struct kernel_vm86_regs *) regs, error_code, + X86_TRAP_DB); + preempt_conditional_cli(regs); +@@ -468,7 +509,7 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code) + * We already checked v86 mode above, so we can check for kernel mode + * by just checking the CPL of CS. + */ +- if ((dr6 & DR_STEP) && !user_mode(regs)) { ++ if ((dr6 & DR_STEP) && !user_mode_novm(regs)) { + tsk->thread.debugreg6 &= ~DR_STEP; + set_tsk_thread_flag(tsk, TIF_SINGLESTEP); + regs->flags &= ~X86_EFLAGS_TF; +@@ -500,7 +541,7 @@ void math_error(struct pt_regs *regs, int error_code, int trapnr) + return; + conditional_sti(regs); + +- if (!user_mode_vm(regs)) ++ if (!user_mode(regs)) + { + if (!fixup_exception(regs)) { + task->thread.error_code = error_code; +diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c +index e0d1d7a..db035d4 100644 +--- a/arch/x86/kernel/tsc.c ++++ b/arch/x86/kernel/tsc.c +@@ -150,7 +150,7 @@ static void cyc2ns_write_end(int cpu, struct cyc2ns_data *data) + */ + smp_wmb(); + +- ACCESS_ONCE(c2n->head) = data; ++ ACCESS_ONCE_RW(c2n->head) = data; + } + + /* +diff --git a/arch/x86/kernel/uprobes.c b/arch/x86/kernel/uprobes.c +index 2ed8459..7cf329f 100644 +--- a/arch/x86/kernel/uprobes.c ++++ b/arch/x86/kernel/uprobes.c +@@ -629,7 +629,7 @@ int arch_uprobe_exception_notify(struct notifier_block *self, unsigned long val, + int ret = NOTIFY_DONE; + + /* We are only interested in userspace traps */ +- if (regs && !user_mode_vm(regs)) ++ if (regs && !user_mode(regs)) + return NOTIFY_DONE; + + switch (val) { +@@ -719,7 +719,7 @@ arch_uretprobe_hijack_return_addr(unsigned long trampoline_vaddr, struct pt_regs + + if (ncopied != rasize) { + pr_err("uprobe: return address clobbered: pid=%d, %%sp=%#lx, " +- "%%ip=%#lx\n", current->pid, regs->sp, regs->ip); ++ "%%ip=%#lx\n", task_pid_nr(current), regs->sp, regs->ip); + + force_sig_info(SIGSEGV, SEND_SIG_FORCED, current); + } +diff --git a/arch/x86/kernel/verify_cpu.S b/arch/x86/kernel/verify_cpu.S +index b9242ba..50c5edd 100644 +--- a/arch/x86/kernel/verify_cpu.S ++++ b/arch/x86/kernel/verify_cpu.S +@@ -20,6 +20,7 @@ + * arch/x86/boot/compressed/head_64.S: Boot cpu verification + * arch/x86/kernel/trampoline_64.S: secondary processor verification + * arch/x86/kernel/head_32.S: processor startup ++ * arch/x86/kernel/acpi/realmode/wakeup.S: 32bit processor resume + * + * verify_cpu, returns the status of longmode and SSE in register %eax. + * 0: Success 1: Failure +diff --git a/arch/x86/kernel/vm86_32.c b/arch/x86/kernel/vm86_32.c +index e8edcf5..27f9344 100644 +--- a/arch/x86/kernel/vm86_32.c ++++ b/arch/x86/kernel/vm86_32.c +@@ -44,6 +44,7 @@ + #include <linux/ptrace.h> + #include <linux/audit.h> + #include <linux/stddef.h> ++#include <linux/grsecurity.h> + + #include <asm/uaccess.h> + #include <asm/io.h> +@@ -150,7 +151,7 @@ struct pt_regs *save_v86_state(struct kernel_vm86_regs *regs) + do_exit(SIGSEGV); + } + +- tss = &per_cpu(init_tss, get_cpu()); ++ tss = init_tss + get_cpu(); + current->thread.sp0 = current->thread.saved_sp0; + current->thread.sysenter_cs = __KERNEL_CS; + load_sp0(tss, ¤t->thread); +@@ -214,6 +215,14 @@ SYSCALL_DEFINE1(vm86old, struct vm86_struct __user *, v86) + + if (tsk->thread.saved_sp0) + return -EPERM; ++ ++#ifdef CONFIG_GRKERNSEC_VM86 ++ if (!capable(CAP_SYS_RAWIO)) { ++ gr_handle_vm86(); ++ return -EPERM; ++ } ++#endif ++ + tmp = copy_vm86_regs_from_user(&info.regs, &v86->regs, + offsetof(struct kernel_vm86_struct, vm86plus) - + sizeof(info.regs)); +@@ -238,6 +247,13 @@ SYSCALL_DEFINE2(vm86, unsigned long, cmd, unsigned long, arg) + int tmp; + struct vm86plus_struct __user *v86; + ++#ifdef CONFIG_GRKERNSEC_VM86 ++ if (!capable(CAP_SYS_RAWIO)) { ++ gr_handle_vm86(); ++ return -EPERM; ++ } ++#endif ++ + tsk = current; + switch (cmd) { + case VM86_REQUEST_IRQ: +@@ -318,7 +334,7 @@ static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk + tsk->thread.saved_fs = info->regs32->fs; + tsk->thread.saved_gs = get_user_gs(info->regs32); + +- tss = &per_cpu(init_tss, get_cpu()); ++ tss = init_tss + get_cpu(); + tsk->thread.sp0 = (unsigned long) &info->VM86_TSS_ESP0; + if (cpu_has_sep) + tsk->thread.sysenter_cs = 0; +@@ -525,7 +541,7 @@ static void do_int(struct kernel_vm86_regs *regs, int i, + goto cannot_handle; + if (i == 0x21 && is_revectored(AH(regs), &KVM86->int21_revectored)) + goto cannot_handle; +- intr_ptr = (unsigned long __user *) (i << 2); ++ intr_ptr = (__force unsigned long __user *) (i << 2); + if (get_user(segoffs, intr_ptr)) + goto cannot_handle; + if ((segoffs >> 16) == BIOSSEG) +diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S +index da6b35a..977e9cf 100644 +--- a/arch/x86/kernel/vmlinux.lds.S ++++ b/arch/x86/kernel/vmlinux.lds.S +@@ -26,6 +26,13 @@ + #include <asm/page_types.h> + #include <asm/cache.h> + #include <asm/boot.h> ++#include <asm/segment.h> ++ ++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC) ++#define __KERNEL_TEXT_OFFSET (LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR) ++#else ++#define __KERNEL_TEXT_OFFSET 0 ++#endif + + #undef i386 /* in case the preprocessor is a 32bit one */ + +@@ -69,30 +76,43 @@ jiffies_64 = jiffies; + + PHDRS { + text PT_LOAD FLAGS(5); /* R_E */ ++#ifdef CONFIG_X86_32 ++ module PT_LOAD FLAGS(5); /* R_E */ ++#endif ++#ifdef CONFIG_XEN ++ rodata PT_LOAD FLAGS(5); /* R_E */ ++#else ++ rodata PT_LOAD FLAGS(4); /* R__ */ ++#endif + data PT_LOAD FLAGS(6); /* RW_ */ +-#ifdef CONFIG_X86_64 ++ init.begin PT_LOAD FLAGS(6); /* RW_ */ + #ifdef CONFIG_SMP + percpu PT_LOAD FLAGS(6); /* RW_ */ + #endif ++ text.init PT_LOAD FLAGS(5); /* R_E */ ++ text.exit PT_LOAD FLAGS(5); /* R_E */ + init PT_LOAD FLAGS(7); /* RWE */ +-#endif + note PT_NOTE FLAGS(0); /* ___ */ + } + + SECTIONS + { + #ifdef CONFIG_X86_32 +- . = LOAD_OFFSET + LOAD_PHYSICAL_ADDR; +- phys_startup_32 = startup_32 - LOAD_OFFSET; ++ . = LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR; + #else +- . = __START_KERNEL; +- phys_startup_64 = startup_64 - LOAD_OFFSET; ++ . = __START_KERNEL; + #endif + + /* Text and read-only data */ +- .text : AT(ADDR(.text) - LOAD_OFFSET) { +- _text = .; ++ .text (. - __KERNEL_TEXT_OFFSET): AT(ADDR(.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) { + /* bootstrapping code */ ++#ifdef CONFIG_X86_32 ++ phys_startup_32 = startup_32 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET; ++#else ++ phys_startup_64 = startup_64 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET; ++#endif ++ __LOAD_PHYSICAL_ADDR = . - LOAD_OFFSET + __KERNEL_TEXT_OFFSET; ++ _text = .; + HEAD_TEXT + . = ALIGN(8); + _stext = .; +@@ -104,13 +124,47 @@ SECTIONS + IRQENTRY_TEXT + *(.fixup) + *(.gnu.warning) +- /* End of text section */ +- _etext = .; + } :text = 0x9090 + +- NOTES :text :note ++ . += __KERNEL_TEXT_OFFSET; + +- EXCEPTION_TABLE(16) :text = 0x9090 ++#ifdef CONFIG_X86_32 ++ . = ALIGN(PAGE_SIZE); ++ .module.text : AT(ADDR(.module.text) - LOAD_OFFSET) { ++ ++#ifdef CONFIG_PAX_KERNEXEC ++ MODULES_EXEC_VADDR = .; ++ BYTE(0) ++ . += (CONFIG_PAX_KERNEXEC_MODULE_TEXT * 1024 * 1024); ++ . = ALIGN(HPAGE_SIZE) - 1; ++ MODULES_EXEC_END = .; ++#endif ++ ++ } :module ++#endif ++ ++ .text.end : AT(ADDR(.text.end) - LOAD_OFFSET) { ++ /* End of text section */ ++ BYTE(0) ++ _etext = . - __KERNEL_TEXT_OFFSET; ++ } ++ ++#ifdef CONFIG_X86_32 ++ . = ALIGN(PAGE_SIZE); ++ .rodata.page_aligned : AT(ADDR(.rodata.page_aligned) - LOAD_OFFSET) { ++ . = ALIGN(PAGE_SIZE); ++ *(.empty_zero_page) ++ *(.initial_pg_fixmap) ++ *(.initial_pg_pmd) ++ *(.initial_page_table) ++ *(.swapper_pg_dir) ++ } :rodata ++#endif ++ ++ . = ALIGN(PAGE_SIZE); ++ NOTES :rodata :note ++ ++ EXCEPTION_TABLE(16) :rodata + + #if defined(CONFIG_DEBUG_RODATA) + /* .text should occupy whole number of pages */ +@@ -122,16 +176,20 @@ SECTIONS + + /* Data */ + .data : AT(ADDR(.data) - LOAD_OFFSET) { ++ ++#ifdef CONFIG_PAX_KERNEXEC ++ . = ALIGN(HPAGE_SIZE); ++#else ++ . = ALIGN(PAGE_SIZE); ++#endif ++ + /* Start of data section */ + _sdata = .; + + /* init_task */ + INIT_TASK_DATA(THREAD_SIZE) + +-#ifdef CONFIG_X86_32 +- /* 32 bit has nosave before _edata */ + NOSAVE_DATA +-#endif + + PAGE_ALIGNED_DATA(PAGE_SIZE) + +@@ -172,12 +230,19 @@ SECTIONS + #endif /* CONFIG_X86_64 */ + + /* Init code and data - will be freed after init */ +- . = ALIGN(PAGE_SIZE); + .init.begin : AT(ADDR(.init.begin) - LOAD_OFFSET) { ++ BYTE(0) ++ ++#ifdef CONFIG_PAX_KERNEXEC ++ . = ALIGN(HPAGE_SIZE); ++#else ++ . = ALIGN(PAGE_SIZE); ++#endif ++ + __init_begin = .; /* paired with __init_end */ +- } ++ } :init.begin + +-#if defined(CONFIG_X86_64) && defined(CONFIG_SMP) ++#ifdef CONFIG_SMP + /* + * percpu offsets are zero-based on SMP. PERCPU_VADDR() changes the + * output PHDR, so the next output section - .init.text - should +@@ -186,12 +251,27 @@ SECTIONS + PERCPU_VADDR(INTERNODE_CACHE_BYTES, 0, :percpu) + #endif + +- INIT_TEXT_SECTION(PAGE_SIZE) +-#ifdef CONFIG_X86_64 +- :init +-#endif ++ . = ALIGN(PAGE_SIZE); ++ init_begin = .; ++ .init.text (. - __KERNEL_TEXT_OFFSET): AT(init_begin - LOAD_OFFSET) { ++ VMLINUX_SYMBOL(_sinittext) = .; ++ INIT_TEXT ++ VMLINUX_SYMBOL(_einittext) = .; ++ . = ALIGN(PAGE_SIZE); ++ } :text.init + +- INIT_DATA_SECTION(16) ++ /* ++ * .exit.text is discard at runtime, not link time, to deal with ++ * references from .altinstructions and .eh_frame ++ */ ++ .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) { ++ EXIT_TEXT ++ . = ALIGN(16); ++ } :text.exit ++ . = init_begin + SIZEOF(.init.text) + SIZEOF(.exit.text); ++ ++ . = ALIGN(PAGE_SIZE); ++ INIT_DATA_SECTION(16) :init + + .x86_cpu_dev.init : AT(ADDR(.x86_cpu_dev.init) - LOAD_OFFSET) { + __x86_cpu_dev_start = .; +@@ -262,19 +342,12 @@ SECTIONS + } + + . = ALIGN(8); +- /* +- * .exit.text is discard at runtime, not link time, to deal with +- * references from .altinstructions and .eh_frame +- */ +- .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) { +- EXIT_TEXT +- } + + .exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) { + EXIT_DATA + } + +-#if !defined(CONFIG_X86_64) || !defined(CONFIG_SMP) ++#ifndef CONFIG_SMP + PERCPU_SECTION(INTERNODE_CACHE_BYTES) + #endif + +@@ -293,16 +366,10 @@ SECTIONS + .smp_locks : AT(ADDR(.smp_locks) - LOAD_OFFSET) { + __smp_locks = .; + *(.smp_locks) +- . = ALIGN(PAGE_SIZE); + __smp_locks_end = .; ++ . = ALIGN(PAGE_SIZE); + } + +-#ifdef CONFIG_X86_64 +- .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) { +- NOSAVE_DATA +- } +-#endif +- + /* BSS */ + . = ALIGN(PAGE_SIZE); + .bss : AT(ADDR(.bss) - LOAD_OFFSET) { +@@ -318,6 +385,7 @@ SECTIONS + __brk_base = .; + . += 64 * 1024; /* 64k alignment slop space */ + *(.brk_reservation) /* areas brk users have reserved */ ++ . = ALIGN(HPAGE_SIZE); + __brk_limit = .; + } + +@@ -344,13 +412,12 @@ SECTIONS + * for the boot processor. + */ + #define INIT_PER_CPU(x) init_per_cpu__##x = x + __per_cpu_load +-INIT_PER_CPU(gdt_page); + INIT_PER_CPU(irq_stack_union); + + /* + * Build-time check on the image size: + */ +-. = ASSERT((_end - _text <= KERNEL_IMAGE_SIZE), ++. = ASSERT((_end - _text - __KERNEL_TEXT_OFFSET <= KERNEL_IMAGE_SIZE), + "kernel image bigger than KERNEL_IMAGE_SIZE"); + + #ifdef CONFIG_SMP +diff --git a/arch/x86/kernel/vsyscall_64.c b/arch/x86/kernel/vsyscall_64.c +index 09ce23a..9293938 100644 +--- a/arch/x86/kernel/vsyscall_64.c ++++ b/arch/x86/kernel/vsyscall_64.c +@@ -56,15 +56,13 @@ + DEFINE_VVAR(int, vgetcpu_mode); + DEFINE_VVAR(struct vsyscall_gtod_data, vsyscall_gtod_data); + +-static enum { EMULATE, NATIVE, NONE } vsyscall_mode = EMULATE; ++static enum { EMULATE, NONE } vsyscall_mode = EMULATE; + + static int __init vsyscall_setup(char *str) + { + if (str) { + if (!strcmp("emulate", str)) + vsyscall_mode = EMULATE; +- else if (!strcmp("native", str)) +- vsyscall_mode = NATIVE; + else if (!strcmp("none", str)) + vsyscall_mode = NONE; + else +@@ -101,7 +99,7 @@ void update_vsyscall(struct timekeeper *tk) + vdata->monotonic_time_sec = tk->xtime_sec + + tk->wall_to_monotonic.tv_sec; + vdata->monotonic_time_snsec = tk->xtime_nsec +- + (tk->wall_to_monotonic.tv_nsec ++ + ((u64)tk->wall_to_monotonic.tv_nsec + << tk->shift); + while (vdata->monotonic_time_snsec >= + (((u64)NSEC_PER_SEC) << tk->shift)) { +@@ -323,8 +321,7 @@ do_ret: + return true; + + sigsegv: +- force_sig(SIGSEGV, current); +- return true; ++ do_group_exit(SIGKILL); + } + + /* +@@ -377,10 +374,7 @@ void __init map_vsyscall(void) + extern char __vvar_page; + unsigned long physaddr_vvar_page = __pa_symbol(&__vvar_page); + +- __set_fixmap(VSYSCALL_FIRST_PAGE, physaddr_vsyscall, +- vsyscall_mode == NATIVE +- ? PAGE_KERNEL_VSYSCALL +- : PAGE_KERNEL_VVAR); ++ __set_fixmap(VSYSCALL_FIRST_PAGE, physaddr_vsyscall, PAGE_KERNEL_VVAR); + BUILD_BUG_ON((unsigned long)__fix_to_virt(VSYSCALL_FIRST_PAGE) != + (unsigned long)VSYSCALL_START); + +diff --git a/arch/x86/kernel/x8664_ksyms_64.c b/arch/x86/kernel/x8664_ksyms_64.c +index 04068192..4d75aa6 100644 +--- a/arch/x86/kernel/x8664_ksyms_64.c ++++ b/arch/x86/kernel/x8664_ksyms_64.c +@@ -34,8 +34,6 @@ EXPORT_SYMBOL(copy_user_generic_string); + EXPORT_SYMBOL(copy_user_generic_unrolled); + EXPORT_SYMBOL(copy_user_enhanced_fast_string); + EXPORT_SYMBOL(__copy_user_nocache); +-EXPORT_SYMBOL(_copy_from_user); +-EXPORT_SYMBOL(_copy_to_user); + + EXPORT_SYMBOL(copy_page); + EXPORT_SYMBOL(clear_page); +@@ -73,3 +71,7 @@ EXPORT_SYMBOL(___preempt_schedule); + EXPORT_SYMBOL(___preempt_schedule_context); + #endif + #endif ++ ++#ifdef CONFIG_PAX_PER_CPU_PGD ++EXPORT_SYMBOL(cpu_pgd); ++#endif +diff --git a/arch/x86/kernel/x86_init.c b/arch/x86/kernel/x86_init.c +index e48b674..a451dd9 100644 +--- a/arch/x86/kernel/x86_init.c ++++ b/arch/x86/kernel/x86_init.c +@@ -93,7 +93,7 @@ struct x86_cpuinit_ops x86_cpuinit = { + static void default_nmi_init(void) { }; + static int default_i8042_detect(void) { return 1; }; + +-struct x86_platform_ops x86_platform = { ++struct x86_platform_ops x86_platform __read_only = { + .calibrate_tsc = native_calibrate_tsc, + .get_wallclock = mach_get_cmos_time, + .set_wallclock = mach_set_rtc_mmss, +@@ -109,7 +109,7 @@ struct x86_platform_ops x86_platform = { + EXPORT_SYMBOL_GPL(x86_platform); + + #if defined(CONFIG_PCI_MSI) +-struct x86_msi_ops x86_msi = { ++struct x86_msi_ops x86_msi __read_only = { + .setup_msi_irqs = native_setup_msi_irqs, + .compose_msi_msg = native_compose_msi_msg, + .teardown_msi_irq = native_teardown_msi_irq, +@@ -150,7 +150,7 @@ u32 arch_msix_mask_irq(struct msi_desc *desc, u32 flag) + } + #endif + +-struct x86_io_apic_ops x86_io_apic_ops = { ++struct x86_io_apic_ops x86_io_apic_ops __read_only = { + .init = native_io_apic_init_mappings, + .read = native_io_apic_read, + .write = native_io_apic_write, +diff --git a/arch/x86/kernel/xsave.c b/arch/x86/kernel/xsave.c +index a4b451c..8dfe1ad 100644 +--- a/arch/x86/kernel/xsave.c ++++ b/arch/x86/kernel/xsave.c +@@ -164,18 +164,18 @@ static inline int save_xstate_epilog(void __user *buf, int ia32_frame) + + /* Setup the bytes not touched by the [f]xsave and reserved for SW. */ + sw_bytes = ia32_frame ? &fx_sw_reserved_ia32 : &fx_sw_reserved; +- err = __copy_to_user(&x->i387.sw_reserved, sw_bytes, sizeof(*sw_bytes)); ++ err = __copy_to_user(x->i387.sw_reserved, sw_bytes, sizeof(*sw_bytes)); + + if (!use_xsave()) + return err; + +- err |= __put_user(FP_XSTATE_MAGIC2, (__u32 *)(buf + xstate_size)); ++ err |= __put_user(FP_XSTATE_MAGIC2, (__u32 __user *)(buf + xstate_size)); + + /* + * Read the xstate_bv which we copied (directly from the cpu or + * from the state in task struct) to the user buffers. + */ +- err |= __get_user(xstate_bv, (__u32 *)&x->xsave_hdr.xstate_bv); ++ err |= __get_user(xstate_bv, (__u32 __user *)&x->xsave_hdr.xstate_bv); + + /* + * For legacy compatible, we always set FP/SSE bits in the bit +@@ -190,7 +190,7 @@ static inline int save_xstate_epilog(void __user *buf, int ia32_frame) + */ + xstate_bv |= XSTATE_FPSSE; + +- err |= __put_user(xstate_bv, (__u32 *)&x->xsave_hdr.xstate_bv); ++ err |= __put_user(xstate_bv, (__u32 __user *)&x->xsave_hdr.xstate_bv); + + return err; + } +@@ -199,6 +199,7 @@ static inline int save_user_xstate(struct xsave_struct __user *buf) + { + int err; + ++ buf = (struct xsave_struct __user *)____m(buf); + if (use_xsave()) + err = xsave_user(buf); + else if (use_fxsr()) +@@ -311,6 +312,7 @@ sanitize_restored_xstate(struct task_struct *tsk, + */ + static inline int restore_user_xstate(void __user *buf, u64 xbv, int fx_only) + { ++ buf = (void __user *)____m(buf); + if (use_xsave()) { + if ((unsigned long)buf % 64 || fx_only) { + u64 init_bv = pcntxt_mask & ~XSTATE_FPSSE; +diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c +index c697625..a032162 100644 +--- a/arch/x86/kvm/cpuid.c ++++ b/arch/x86/kvm/cpuid.c +@@ -156,15 +156,20 @@ int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu, + struct kvm_cpuid2 *cpuid, + struct kvm_cpuid_entry2 __user *entries) + { +- int r; ++ int r, i; + + r = -E2BIG; + if (cpuid->nent > KVM_MAX_CPUID_ENTRIES) + goto out; + r = -EFAULT; +- if (copy_from_user(&vcpu->arch.cpuid_entries, entries, +- cpuid->nent * sizeof(struct kvm_cpuid_entry2))) ++ if (!access_ok(VERIFY_READ, entries, cpuid->nent * sizeof(struct kvm_cpuid_entry2))) + goto out; ++ for (i = 0; i < cpuid->nent; ++i) { ++ struct kvm_cpuid_entry2 cpuid_entry; ++ if (__copy_from_user(&cpuid_entry, entries + i, sizeof(cpuid_entry))) ++ goto out; ++ vcpu->arch.cpuid_entries[i] = cpuid_entry; ++ } + vcpu->arch.cpuid_nent = cpuid->nent; + kvm_apic_set_version(vcpu); + kvm_x86_ops->cpuid_update(vcpu); +@@ -179,15 +184,19 @@ int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu, + struct kvm_cpuid2 *cpuid, + struct kvm_cpuid_entry2 __user *entries) + { +- int r; ++ int r, i; + + r = -E2BIG; + if (cpuid->nent < vcpu->arch.cpuid_nent) + goto out; + r = -EFAULT; +- if (copy_to_user(entries, &vcpu->arch.cpuid_entries, +- vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2))) ++ if (!access_ok(VERIFY_WRITE, entries, vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2))) + goto out; ++ for (i = 0; i < vcpu->arch.cpuid_nent; ++i) { ++ struct kvm_cpuid_entry2 cpuid_entry = vcpu->arch.cpuid_entries[i]; ++ if (__copy_to_user(entries + i, &cpuid_entry, sizeof(cpuid_entry))) ++ goto out; ++ } + return 0; + + out: +diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c +index 453e5fb..214168f 100644 +--- a/arch/x86/kvm/lapic.c ++++ b/arch/x86/kvm/lapic.c +@@ -55,7 +55,7 @@ + #define APIC_BUS_CYCLE_NS 1 + + /* #define apic_debug(fmt,arg...) printk(KERN_WARNING fmt,##arg) */ +-#define apic_debug(fmt, arg...) ++#define apic_debug(fmt, arg...) do {} while (0) + + #define APIC_LVT_NUM 6 + /* 14 is the version for Xeon and Pentium 8.4.8*/ +diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h +index cba218a..1cc1bed 100644 +--- a/arch/x86/kvm/paging_tmpl.h ++++ b/arch/x86/kvm/paging_tmpl.h +@@ -331,7 +331,7 @@ retry_walk: + if (unlikely(kvm_is_error_hva(host_addr))) + goto error; + +- ptep_user = (pt_element_t __user *)((void *)host_addr + offset); ++ ptep_user = (pt_element_t __force_user *)((void *)host_addr + offset); + if (unlikely(__copy_from_user(&pte, ptep_user, sizeof(pte)))) + goto error; + walker->ptep_user[walker->level - 1] = ptep_user; +diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c +index 2de1bc0..22251ee 100644 +--- a/arch/x86/kvm/svm.c ++++ b/arch/x86/kvm/svm.c +@@ -3508,7 +3508,11 @@ static void reload_tss(struct kvm_vcpu *vcpu) + int cpu = raw_smp_processor_id(); + + struct svm_cpu_data *sd = per_cpu(svm_data, cpu); ++ ++ pax_open_kernel(); + sd->tss_desc->type = 9; /* available 32/64-bit TSS */ ++ pax_close_kernel(); ++ + load_TR_desc(); + } + +@@ -3911,6 +3915,10 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu) + #endif + #endif + ++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF) ++ __set_fs(current_thread_info()->addr_limit); ++#endif ++ + reload_tss(vcpu); + + local_irq_disable(); +diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c +index 3927528..fc19971 100644 +--- a/arch/x86/kvm/vmx.c ++++ b/arch/x86/kvm/vmx.c +@@ -1320,12 +1320,12 @@ static void vmcs_write64(unsigned long field, u64 value) + #endif + } + +-static void vmcs_clear_bits(unsigned long field, u32 mask) ++static void vmcs_clear_bits(unsigned long field, unsigned long mask) + { + vmcs_writel(field, vmcs_readl(field) & ~mask); + } + +-static void vmcs_set_bits(unsigned long field, u32 mask) ++static void vmcs_set_bits(unsigned long field, unsigned long mask) + { + vmcs_writel(field, vmcs_readl(field) | mask); + } +@@ -1585,7 +1585,11 @@ static void reload_tss(void) + struct desc_struct *descs; + + descs = (void *)gdt->address; ++ ++ pax_open_kernel(); + descs[GDT_ENTRY_TSS].type = 9; /* available TSS */ ++ pax_close_kernel(); ++ + load_TR_desc(); + } + +@@ -1809,6 +1813,10 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu) + vmcs_writel(HOST_TR_BASE, kvm_read_tr_base()); /* 22.2.4 */ + vmcs_writel(HOST_GDTR_BASE, gdt->address); /* 22.2.4 */ + ++#ifdef CONFIG_PAX_PER_CPU_PGD ++ vmcs_writel(HOST_CR3, read_cr3()); /* 22.2.3 FIXME: shadow tables */ ++#endif ++ + rdmsrl(MSR_IA32_SYSENTER_ESP, sysenter_esp); + vmcs_writel(HOST_IA32_SYSENTER_ESP, sysenter_esp); /* 22.2.3 */ + vmx->loaded_vmcs->cpu = cpu; +@@ -2098,7 +2106,7 @@ static void setup_msrs(struct vcpu_vmx *vmx) + * reads and returns guest's timestamp counter "register" + * guest_tsc = host_tsc + tsc_offset -- 21.3 + */ +-static u64 guest_read_tsc(void) ++static u64 __intentional_overflow(-1) guest_read_tsc(void) + { + u64 host_tsc, tsc_offset; + +@@ -3024,8 +3032,11 @@ static __init int hardware_setup(void) + if (!cpu_has_vmx_flexpriority()) + flexpriority_enabled = 0; + +- if (!cpu_has_vmx_tpr_shadow()) +- kvm_x86_ops->update_cr8_intercept = NULL; ++ if (!cpu_has_vmx_tpr_shadow()) { ++ pax_open_kernel(); ++ *(void **)&kvm_x86_ops->update_cr8_intercept = NULL; ++ pax_close_kernel(); ++ } + + if (enable_ept && !cpu_has_vmx_ept_2m_page()) + kvm_disable_largepages(); +@@ -3036,13 +3047,15 @@ static __init int hardware_setup(void) + if (!cpu_has_vmx_apicv()) + enable_apicv = 0; + ++ pax_open_kernel(); + if (enable_apicv) +- kvm_x86_ops->update_cr8_intercept = NULL; ++ *(void **)&kvm_x86_ops->update_cr8_intercept = NULL; + else { +- kvm_x86_ops->hwapic_irr_update = NULL; +- kvm_x86_ops->deliver_posted_interrupt = NULL; +- kvm_x86_ops->sync_pir_to_irr = vmx_sync_pir_to_irr_dummy; ++ *(void **)&kvm_x86_ops->hwapic_irr_update = NULL; ++ *(void **)&kvm_x86_ops->deliver_posted_interrupt = NULL; ++ *(void **)&kvm_x86_ops->sync_pir_to_irr = vmx_sync_pir_to_irr_dummy; + } ++ pax_close_kernel(); + + if (nested) + nested_vmx_setup_ctls_msrs(); +@@ -4165,7 +4178,10 @@ static void vmx_set_constant_host_state(struct vcpu_vmx *vmx) + + vmcs_writel(HOST_CR0, read_cr0() & ~X86_CR0_TS); /* 22.2.3 */ + vmcs_writel(HOST_CR4, read_cr4()); /* 22.2.3, 22.2.5 */ ++ ++#ifndef CONFIG_PAX_PER_CPU_PGD + vmcs_writel(HOST_CR3, read_cr3()); /* 22.2.3 FIXME: shadow tables */ ++#endif + + vmcs_write16(HOST_CS_SELECTOR, __KERNEL_CS); /* 22.2.4 */ + #ifdef CONFIG_X86_64 +@@ -4187,7 +4203,7 @@ static void vmx_set_constant_host_state(struct vcpu_vmx *vmx) + vmcs_writel(HOST_IDTR_BASE, dt.address); /* 22.2.4 */ + vmx->host_idt_base = dt.address; + +- vmcs_writel(HOST_RIP, vmx_return); /* 22.2.5 */ ++ vmcs_writel(HOST_RIP, ktla_ktva(vmx_return)); /* 22.2.5 */ + + rdmsr(MSR_IA32_SYSENTER_CS, low32, high32); + vmcs_write32(HOST_IA32_SYSENTER_CS, low32); +@@ -7265,6 +7281,12 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu) + "jmp 2f \n\t" + "1: " __ex(ASM_VMX_VMRESUME) "\n\t" + "2: " ++ ++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC) ++ "ljmp %[cs],$3f\n\t" ++ "3: " ++#endif ++ + /* Save guest registers, load host registers, keep flags */ + "mov %0, %c[wordsize](%%" _ASM_SP ") \n\t" + "pop %0 \n\t" +@@ -7317,6 +7339,11 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu) + #endif + [cr2]"i"(offsetof(struct vcpu_vmx, vcpu.arch.cr2)), + [wordsize]"i"(sizeof(ulong)) ++ ++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC) ++ ,[cs]"i"(__KERNEL_CS) ++#endif ++ + : "cc", "memory" + #ifdef CONFIG_X86_64 + , "rax", "rbx", "rdi", "rsi" +@@ -7330,7 +7357,7 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu) + if (debugctlmsr) + update_debugctlmsr(debugctlmsr); + +-#ifndef CONFIG_X86_64 ++#ifdef CONFIG_X86_32 + /* + * The sysexit path does not restore ds/es, so we must set them to + * a reasonable value ourselves. +@@ -7339,8 +7366,18 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu) + * may be executed in interrupt context, which saves and restore segments + * around it, nullifying its effect. + */ +- loadsegment(ds, __USER_DS); +- loadsegment(es, __USER_DS); ++ loadsegment(ds, __KERNEL_DS); ++ loadsegment(es, __KERNEL_DS); ++ loadsegment(ss, __KERNEL_DS); ++ ++#ifdef CONFIG_PAX_KERNEXEC ++ loadsegment(fs, __KERNEL_PERCPU); ++#endif ++ ++#ifdef CONFIG_PAX_MEMORY_UDEREF ++ __set_fs(current_thread_info()->addr_limit); ++#endif ++ + #endif + + vcpu->arch.regs_avail = ~((1 << VCPU_REGS_RIP) | (1 << VCPU_REGS_RSP) +diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c +index 8fbd1a7..e046eef 100644 +--- a/arch/x86/kvm/x86.c ++++ b/arch/x86/kvm/x86.c +@@ -1776,8 +1776,8 @@ static int xen_hvm_config(struct kvm_vcpu *vcpu, u64 data) + { + struct kvm *kvm = vcpu->kvm; + int lm = is_long_mode(vcpu); +- u8 *blob_addr = lm ? (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_64 +- : (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_32; ++ u8 __user *blob_addr = lm ? (u8 __user *)(long)kvm->arch.xen_hvm_config.blob_addr_64 ++ : (u8 __user *)(long)kvm->arch.xen_hvm_config.blob_addr_32; + u8 blob_size = lm ? kvm->arch.xen_hvm_config.blob_size_64 + : kvm->arch.xen_hvm_config.blob_size_32; + u32 page_num = data & ~PAGE_MASK; +@@ -2688,6 +2688,8 @@ long kvm_arch_dev_ioctl(struct file *filp, + if (n < msr_list.nmsrs) + goto out; + r = -EFAULT; ++ if (num_msrs_to_save > ARRAY_SIZE(msrs_to_save)) ++ goto out; + if (copy_to_user(user_msr_list->indices, &msrs_to_save, + num_msrs_to_save * sizeof(u32))) + goto out; +@@ -5502,7 +5504,7 @@ static struct notifier_block pvclock_gtod_notifier = { + }; + #endif + +-int kvm_arch_init(void *opaque) ++int kvm_arch_init(const void *opaque) + { + int r; + struct kvm_x86_ops *ops = opaque; +diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c +index ad1fb5f..fe30b66 100644 +--- a/arch/x86/lguest/boot.c ++++ b/arch/x86/lguest/boot.c +@@ -1206,9 +1206,10 @@ static __init int early_put_chars(u32 vtermno, const char *buf, int count) + * Rebooting also tells the Host we're finished, but the RESTART flag tells the + * Launcher to reboot us. + */ +-static void lguest_restart(char *reason) ++static __noreturn void lguest_restart(char *reason) + { + hcall(LHCALL_SHUTDOWN, __pa(reason), LGUEST_SHUTDOWN_RESTART, 0, 0); ++ BUG(); + } + + /*G:050 +diff --git a/arch/x86/lib/atomic64_386_32.S b/arch/x86/lib/atomic64_386_32.S +index 00933d5..3a64af9 100644 +--- a/arch/x86/lib/atomic64_386_32.S ++++ b/arch/x86/lib/atomic64_386_32.S +@@ -48,6 +48,10 @@ BEGIN(read) + movl (v), %eax + movl 4(v), %edx + RET_ENDP ++BEGIN(read_unchecked) ++ movl (v), %eax ++ movl 4(v), %edx ++RET_ENDP + #undef v + + #define v %esi +@@ -55,6 +59,10 @@ BEGIN(set) + movl %ebx, (v) + movl %ecx, 4(v) + RET_ENDP ++BEGIN(set_unchecked) ++ movl %ebx, (v) ++ movl %ecx, 4(v) ++RET_ENDP + #undef v + + #define v %esi +@@ -70,6 +78,20 @@ RET_ENDP + BEGIN(add) + addl %eax, (v) + adcl %edx, 4(v) ++ ++#ifdef CONFIG_PAX_REFCOUNT ++ jno 0f ++ subl %eax, (v) ++ sbbl %edx, 4(v) ++ int $4 ++0: ++ _ASM_EXTABLE(0b, 0b) ++#endif ++ ++RET_ENDP ++BEGIN(add_unchecked) ++ addl %eax, (v) ++ adcl %edx, 4(v) + RET_ENDP + #undef v + +@@ -77,6 +99,24 @@ RET_ENDP + BEGIN(add_return) + addl (v), %eax + adcl 4(v), %edx ++ ++#ifdef CONFIG_PAX_REFCOUNT ++ into ++1234: ++ _ASM_EXTABLE(1234b, 2f) ++#endif ++ ++ movl %eax, (v) ++ movl %edx, 4(v) ++ ++#ifdef CONFIG_PAX_REFCOUNT ++2: ++#endif ++ ++RET_ENDP ++BEGIN(add_return_unchecked) ++ addl (v), %eax ++ adcl 4(v), %edx + movl %eax, (v) + movl %edx, 4(v) + RET_ENDP +@@ -86,6 +126,20 @@ RET_ENDP + BEGIN(sub) + subl %eax, (v) + sbbl %edx, 4(v) ++ ++#ifdef CONFIG_PAX_REFCOUNT ++ jno 0f ++ addl %eax, (v) ++ adcl %edx, 4(v) ++ int $4 ++0: ++ _ASM_EXTABLE(0b, 0b) ++#endif ++ ++RET_ENDP ++BEGIN(sub_unchecked) ++ subl %eax, (v) ++ sbbl %edx, 4(v) + RET_ENDP + #undef v + +@@ -96,6 +150,27 @@ BEGIN(sub_return) + sbbl $0, %edx + addl (v), %eax + adcl 4(v), %edx ++ ++#ifdef CONFIG_PAX_REFCOUNT ++ into ++1234: ++ _ASM_EXTABLE(1234b, 2f) ++#endif ++ ++ movl %eax, (v) ++ movl %edx, 4(v) ++ ++#ifdef CONFIG_PAX_REFCOUNT ++2: ++#endif ++ ++RET_ENDP ++BEGIN(sub_return_unchecked) ++ negl %edx ++ negl %eax ++ sbbl $0, %edx ++ addl (v), %eax ++ adcl 4(v), %edx + movl %eax, (v) + movl %edx, 4(v) + RET_ENDP +@@ -105,6 +180,20 @@ RET_ENDP + BEGIN(inc) + addl $1, (v) + adcl $0, 4(v) ++ ++#ifdef CONFIG_PAX_REFCOUNT ++ jno 0f ++ subl $1, (v) ++ sbbl $0, 4(v) ++ int $4 ++0: ++ _ASM_EXTABLE(0b, 0b) ++#endif ++ ++RET_ENDP ++BEGIN(inc_unchecked) ++ addl $1, (v) ++ adcl $0, 4(v) + RET_ENDP + #undef v + +@@ -114,6 +203,26 @@ BEGIN(inc_return) + movl 4(v), %edx + addl $1, %eax + adcl $0, %edx ++ ++#ifdef CONFIG_PAX_REFCOUNT ++ into ++1234: ++ _ASM_EXTABLE(1234b, 2f) ++#endif ++ ++ movl %eax, (v) ++ movl %edx, 4(v) ++ ++#ifdef CONFIG_PAX_REFCOUNT ++2: ++#endif ++ ++RET_ENDP ++BEGIN(inc_return_unchecked) ++ movl (v), %eax ++ movl 4(v), %edx ++ addl $1, %eax ++ adcl $0, %edx + movl %eax, (v) + movl %edx, 4(v) + RET_ENDP +@@ -123,6 +232,20 @@ RET_ENDP + BEGIN(dec) + subl $1, (v) + sbbl $0, 4(v) ++ ++#ifdef CONFIG_PAX_REFCOUNT ++ jno 0f ++ addl $1, (v) ++ adcl $0, 4(v) ++ int $4 ++0: ++ _ASM_EXTABLE(0b, 0b) ++#endif ++ ++RET_ENDP ++BEGIN(dec_unchecked) ++ subl $1, (v) ++ sbbl $0, 4(v) + RET_ENDP + #undef v + +@@ -132,6 +255,26 @@ BEGIN(dec_return) + movl 4(v), %edx + subl $1, %eax + sbbl $0, %edx ++ ++#ifdef CONFIG_PAX_REFCOUNT ++ into ++1234: ++ _ASM_EXTABLE(1234b, 2f) ++#endif ++ ++ movl %eax, (v) ++ movl %edx, 4(v) ++ ++#ifdef CONFIG_PAX_REFCOUNT ++2: ++#endif ++ ++RET_ENDP ++BEGIN(dec_return_unchecked) ++ movl (v), %eax ++ movl 4(v), %edx ++ subl $1, %eax ++ sbbl $0, %edx + movl %eax, (v) + movl %edx, 4(v) + RET_ENDP +@@ -143,6 +286,13 @@ BEGIN(add_unless) + adcl %edx, %edi + addl (v), %eax + adcl 4(v), %edx ++ ++#ifdef CONFIG_PAX_REFCOUNT ++ into ++1234: ++ _ASM_EXTABLE(1234b, 2f) ++#endif ++ + cmpl %eax, %ecx + je 3f + 1: +@@ -168,6 +318,13 @@ BEGIN(inc_not_zero) + 1: + addl $1, %eax + adcl $0, %edx ++ ++#ifdef CONFIG_PAX_REFCOUNT ++ into ++1234: ++ _ASM_EXTABLE(1234b, 2f) ++#endif ++ + movl %eax, (v) + movl %edx, 4(v) + movl $1, %eax +@@ -186,6 +343,13 @@ BEGIN(dec_if_positive) + movl 4(v), %edx + subl $1, %eax + sbbl $0, %edx ++ ++#ifdef CONFIG_PAX_REFCOUNT ++ into ++1234: ++ _ASM_EXTABLE(1234b, 1f) ++#endif ++ + js 1f + movl %eax, (v) + movl %edx, 4(v) +diff --git a/arch/x86/lib/atomic64_cx8_32.S b/arch/x86/lib/atomic64_cx8_32.S +index f5cc9eb..51fa319 100644 +--- a/arch/x86/lib/atomic64_cx8_32.S ++++ b/arch/x86/lib/atomic64_cx8_32.S +@@ -35,10 +35,20 @@ ENTRY(atomic64_read_cx8) + CFI_STARTPROC + + read64 %ecx ++ pax_force_retaddr + ret + CFI_ENDPROC + ENDPROC(atomic64_read_cx8) + ++ENTRY(atomic64_read_unchecked_cx8) ++ CFI_STARTPROC ++ ++ read64 %ecx ++ pax_force_retaddr ++ ret ++ CFI_ENDPROC ++ENDPROC(atomic64_read_unchecked_cx8) ++ + ENTRY(atomic64_set_cx8) + CFI_STARTPROC + +@@ -48,10 +58,25 @@ ENTRY(atomic64_set_cx8) + cmpxchg8b (%esi) + jne 1b + ++ pax_force_retaddr + ret + CFI_ENDPROC + ENDPROC(atomic64_set_cx8) + ++ENTRY(atomic64_set_unchecked_cx8) ++ CFI_STARTPROC ++ ++1: ++/* we don't need LOCK_PREFIX since aligned 64-bit writes ++ * are atomic on 586 and newer */ ++ cmpxchg8b (%esi) ++ jne 1b ++ ++ pax_force_retaddr ++ ret ++ CFI_ENDPROC ++ENDPROC(atomic64_set_unchecked_cx8) ++ + ENTRY(atomic64_xchg_cx8) + CFI_STARTPROC + +@@ -60,12 +85,13 @@ ENTRY(atomic64_xchg_cx8) + cmpxchg8b (%esi) + jne 1b + ++ pax_force_retaddr + ret + CFI_ENDPROC + ENDPROC(atomic64_xchg_cx8) + +-.macro addsub_return func ins insc +-ENTRY(atomic64_\func\()_return_cx8) ++.macro addsub_return func ins insc unchecked="" ++ENTRY(atomic64_\func\()_return\unchecked\()_cx8) + CFI_STARTPROC + SAVE ebp + SAVE ebx +@@ -82,27 +108,44 @@ ENTRY(atomic64_\func\()_return_cx8) + movl %edx, %ecx + \ins\()l %esi, %ebx + \insc\()l %edi, %ecx ++ ++.ifb \unchecked ++#ifdef CONFIG_PAX_REFCOUNT ++ into ++2: ++ _ASM_EXTABLE(2b, 3f) ++#endif ++.endif ++ + LOCK_PREFIX + cmpxchg8b (%ebp) + jne 1b +- +-10: + movl %ebx, %eax + movl %ecx, %edx ++ ++.ifb \unchecked ++#ifdef CONFIG_PAX_REFCOUNT ++3: ++#endif ++.endif ++ + RESTORE edi + RESTORE esi + RESTORE ebx + RESTORE ebp ++ pax_force_retaddr + ret + CFI_ENDPROC +-ENDPROC(atomic64_\func\()_return_cx8) ++ENDPROC(atomic64_\func\()_return\unchecked\()_cx8) + .endm + + addsub_return add add adc + addsub_return sub sub sbb ++addsub_return add add adc _unchecked ++addsub_return sub sub sbb _unchecked + +-.macro incdec_return func ins insc +-ENTRY(atomic64_\func\()_return_cx8) ++.macro incdec_return func ins insc unchecked="" ++ENTRY(atomic64_\func\()_return\unchecked\()_cx8) + CFI_STARTPROC + SAVE ebx + +@@ -112,21 +155,39 @@ ENTRY(atomic64_\func\()_return_cx8) + movl %edx, %ecx + \ins\()l $1, %ebx + \insc\()l $0, %ecx ++ ++.ifb \unchecked ++#ifdef CONFIG_PAX_REFCOUNT ++ into ++2: ++ _ASM_EXTABLE(2b, 3f) ++#endif ++.endif ++ + LOCK_PREFIX + cmpxchg8b (%esi) + jne 1b + +-10: + movl %ebx, %eax + movl %ecx, %edx ++ ++.ifb \unchecked ++#ifdef CONFIG_PAX_REFCOUNT ++3: ++#endif ++.endif ++ + RESTORE ebx ++ pax_force_retaddr + ret + CFI_ENDPROC +-ENDPROC(atomic64_\func\()_return_cx8) ++ENDPROC(atomic64_\func\()_return\unchecked\()_cx8) + .endm + + incdec_return inc add adc + incdec_return dec sub sbb ++incdec_return inc add adc _unchecked ++incdec_return dec sub sbb _unchecked + + ENTRY(atomic64_dec_if_positive_cx8) + CFI_STARTPROC +@@ -138,6 +199,13 @@ ENTRY(atomic64_dec_if_positive_cx8) + movl %edx, %ecx + subl $1, %ebx + sbb $0, %ecx ++ ++#ifdef CONFIG_PAX_REFCOUNT ++ into ++1234: ++ _ASM_EXTABLE(1234b, 2f) ++#endif ++ + js 2f + LOCK_PREFIX + cmpxchg8b (%esi) +@@ -147,6 +215,7 @@ ENTRY(atomic64_dec_if_positive_cx8) + movl %ebx, %eax + movl %ecx, %edx + RESTORE ebx ++ pax_force_retaddr + ret + CFI_ENDPROC + ENDPROC(atomic64_dec_if_positive_cx8) +@@ -171,6 +240,13 @@ ENTRY(atomic64_add_unless_cx8) + movl %edx, %ecx + addl %ebp, %ebx + adcl %edi, %ecx ++ ++#ifdef CONFIG_PAX_REFCOUNT ++ into ++1234: ++ _ASM_EXTABLE(1234b, 3f) ++#endif ++ + LOCK_PREFIX + cmpxchg8b (%esi) + jne 1b +@@ -181,6 +257,7 @@ ENTRY(atomic64_add_unless_cx8) + CFI_ADJUST_CFA_OFFSET -8 + RESTORE ebx + RESTORE ebp ++ pax_force_retaddr + ret + 4: + cmpl %edx, 4(%esp) +@@ -203,6 +280,13 @@ ENTRY(atomic64_inc_not_zero_cx8) + xorl %ecx, %ecx + addl $1, %ebx + adcl %edx, %ecx ++ ++#ifdef CONFIG_PAX_REFCOUNT ++ into ++1234: ++ _ASM_EXTABLE(1234b, 3f) ++#endif ++ + LOCK_PREFIX + cmpxchg8b (%esi) + jne 1b +@@ -210,6 +294,7 @@ ENTRY(atomic64_inc_not_zero_cx8) + movl $1, %eax + 3: + RESTORE ebx ++ pax_force_retaddr + ret + CFI_ENDPROC + ENDPROC(atomic64_inc_not_zero_cx8) +diff --git a/arch/x86/lib/checksum_32.S b/arch/x86/lib/checksum_32.S +index e78b8ee..7e173a8 100644 +--- a/arch/x86/lib/checksum_32.S ++++ b/arch/x86/lib/checksum_32.S +@@ -29,7 +29,8 @@ + #include <asm/dwarf2.h> + #include <asm/errno.h> + #include <asm/asm.h> +- ++#include <asm/segment.h> ++ + /* + * computes a partial checksum, e.g. for TCP/UDP fragments + */ +@@ -293,9 +294,24 @@ unsigned int csum_partial_copy_generic (const char *src, char *dst, + + #define ARGBASE 16 + #define FP 12 +- +-ENTRY(csum_partial_copy_generic) ++ ++ENTRY(csum_partial_copy_generic_to_user) + CFI_STARTPROC ++ ++#ifdef CONFIG_PAX_MEMORY_UDEREF ++ pushl_cfi %gs ++ popl_cfi %es ++ jmp csum_partial_copy_generic ++#endif ++ ++ENTRY(csum_partial_copy_generic_from_user) ++ ++#ifdef CONFIG_PAX_MEMORY_UDEREF ++ pushl_cfi %gs ++ popl_cfi %ds ++#endif ++ ++ENTRY(csum_partial_copy_generic) + subl $4,%esp + CFI_ADJUST_CFA_OFFSET 4 + pushl_cfi %edi +@@ -317,7 +333,7 @@ ENTRY(csum_partial_copy_generic) + jmp 4f + SRC(1: movw (%esi), %bx ) + addl $2, %esi +-DST( movw %bx, (%edi) ) ++DST( movw %bx, %es:(%edi) ) + addl $2, %edi + addw %bx, %ax + adcl $0, %eax +@@ -329,30 +345,30 @@ DST( movw %bx, (%edi) ) + SRC(1: movl (%esi), %ebx ) + SRC( movl 4(%esi), %edx ) + adcl %ebx, %eax +-DST( movl %ebx, (%edi) ) ++DST( movl %ebx, %es:(%edi) ) + adcl %edx, %eax +-DST( movl %edx, 4(%edi) ) ++DST( movl %edx, %es:4(%edi) ) + + SRC( movl 8(%esi), %ebx ) + SRC( movl 12(%esi), %edx ) + adcl %ebx, %eax +-DST( movl %ebx, 8(%edi) ) ++DST( movl %ebx, %es:8(%edi) ) + adcl %edx, %eax +-DST( movl %edx, 12(%edi) ) ++DST( movl %edx, %es:12(%edi) ) + + SRC( movl 16(%esi), %ebx ) + SRC( movl 20(%esi), %edx ) + adcl %ebx, %eax +-DST( movl %ebx, 16(%edi) ) ++DST( movl %ebx, %es:16(%edi) ) + adcl %edx, %eax +-DST( movl %edx, 20(%edi) ) ++DST( movl %edx, %es:20(%edi) ) + + SRC( movl 24(%esi), %ebx ) + SRC( movl 28(%esi), %edx ) + adcl %ebx, %eax +-DST( movl %ebx, 24(%edi) ) ++DST( movl %ebx, %es:24(%edi) ) + adcl %edx, %eax +-DST( movl %edx, 28(%edi) ) ++DST( movl %edx, %es:28(%edi) ) + + lea 32(%esi), %esi + lea 32(%edi), %edi +@@ -366,7 +382,7 @@ DST( movl %edx, 28(%edi) ) + shrl $2, %edx # This clears CF + SRC(3: movl (%esi), %ebx ) + adcl %ebx, %eax +-DST( movl %ebx, (%edi) ) ++DST( movl %ebx, %es:(%edi) ) + lea 4(%esi), %esi + lea 4(%edi), %edi + dec %edx +@@ -378,12 +394,12 @@ DST( movl %ebx, (%edi) ) + jb 5f + SRC( movw (%esi), %cx ) + leal 2(%esi), %esi +-DST( movw %cx, (%edi) ) ++DST( movw %cx, %es:(%edi) ) + leal 2(%edi), %edi + je 6f + shll $16,%ecx + SRC(5: movb (%esi), %cl ) +-DST( movb %cl, (%edi) ) ++DST( movb %cl, %es:(%edi) ) + 6: addl %ecx, %eax + adcl $0, %eax + 7: +@@ -394,7 +410,7 @@ DST( movb %cl, (%edi) ) + + 6001: + movl ARGBASE+20(%esp), %ebx # src_err_ptr +- movl $-EFAULT, (%ebx) ++ movl $-EFAULT, %ss:(%ebx) + + # zero the complete destination - computing the rest + # is too much work +@@ -407,11 +423,15 @@ DST( movb %cl, (%edi) ) + + 6002: + movl ARGBASE+24(%esp), %ebx # dst_err_ptr +- movl $-EFAULT,(%ebx) ++ movl $-EFAULT,%ss:(%ebx) + jmp 5000b + + .previous + ++ pushl_cfi %ss ++ popl_cfi %ds ++ pushl_cfi %ss ++ popl_cfi %es + popl_cfi %ebx + CFI_RESTORE ebx + popl_cfi %esi +@@ -421,26 +441,43 @@ DST( movb %cl, (%edi) ) + popl_cfi %ecx # equivalent to addl $4,%esp + ret + CFI_ENDPROC +-ENDPROC(csum_partial_copy_generic) ++ENDPROC(csum_partial_copy_generic_to_user) + + #else + + /* Version for PentiumII/PPro */ + + #define ROUND1(x) \ ++ nop; nop; nop; \ + SRC(movl x(%esi), %ebx ) ; \ + addl %ebx, %eax ; \ +- DST(movl %ebx, x(%edi) ) ; ++ DST(movl %ebx, %es:x(%edi)) ; + + #define ROUND(x) \ ++ nop; nop; nop; \ + SRC(movl x(%esi), %ebx ) ; \ + adcl %ebx, %eax ; \ +- DST(movl %ebx, x(%edi) ) ; ++ DST(movl %ebx, %es:x(%edi)) ; + + #define ARGBASE 12 +- +-ENTRY(csum_partial_copy_generic) ++ ++ENTRY(csum_partial_copy_generic_to_user) + CFI_STARTPROC ++ ++#ifdef CONFIG_PAX_MEMORY_UDEREF ++ pushl_cfi %gs ++ popl_cfi %es ++ jmp csum_partial_copy_generic ++#endif ++ ++ENTRY(csum_partial_copy_generic_from_user) ++ ++#ifdef CONFIG_PAX_MEMORY_UDEREF ++ pushl_cfi %gs ++ popl_cfi %ds ++#endif ++ ++ENTRY(csum_partial_copy_generic) + pushl_cfi %ebx + CFI_REL_OFFSET ebx, 0 + pushl_cfi %edi +@@ -461,7 +498,7 @@ ENTRY(csum_partial_copy_generic) + subl %ebx, %edi + lea -1(%esi),%edx + andl $-32,%edx +- lea 3f(%ebx,%ebx), %ebx ++ lea 3f(%ebx,%ebx,2), %ebx + testl %esi, %esi + jmp *%ebx + 1: addl $64,%esi +@@ -482,19 +519,19 @@ ENTRY(csum_partial_copy_generic) + jb 5f + SRC( movw (%esi), %dx ) + leal 2(%esi), %esi +-DST( movw %dx, (%edi) ) ++DST( movw %dx, %es:(%edi) ) + leal 2(%edi), %edi + je 6f + shll $16,%edx + 5: + SRC( movb (%esi), %dl ) +-DST( movb %dl, (%edi) ) ++DST( movb %dl, %es:(%edi) ) + 6: addl %edx, %eax + adcl $0, %eax + 7: + .section .fixup, "ax" + 6001: movl ARGBASE+20(%esp), %ebx # src_err_ptr +- movl $-EFAULT, (%ebx) ++ movl $-EFAULT, %ss:(%ebx) + # zero the complete destination (computing the rest is too much work) + movl ARGBASE+8(%esp),%edi # dst + movl ARGBASE+12(%esp),%ecx # len +@@ -502,10 +539,17 @@ DST( movb %dl, (%edi) ) + rep; stosb + jmp 7b + 6002: movl ARGBASE+24(%esp), %ebx # dst_err_ptr +- movl $-EFAULT, (%ebx) ++ movl $-EFAULT, %ss:(%ebx) + jmp 7b + .previous + ++#ifdef CONFIG_PAX_MEMORY_UDEREF ++ pushl_cfi %ss ++ popl_cfi %ds ++ pushl_cfi %ss ++ popl_cfi %es ++#endif ++ + popl_cfi %esi + CFI_RESTORE esi + popl_cfi %edi +@@ -514,7 +558,7 @@ DST( movb %dl, (%edi) ) + CFI_RESTORE ebx + ret + CFI_ENDPROC +-ENDPROC(csum_partial_copy_generic) ++ENDPROC(csum_partial_copy_generic_to_user) + + #undef ROUND + #undef ROUND1 +diff --git a/arch/x86/lib/clear_page_64.S b/arch/x86/lib/clear_page_64.S +index f2145cf..cea889d 100644 +--- a/arch/x86/lib/clear_page_64.S ++++ b/arch/x86/lib/clear_page_64.S +@@ -11,6 +11,7 @@ ENTRY(clear_page_c) + movl $4096/8,%ecx + xorl %eax,%eax + rep stosq ++ pax_force_retaddr + ret + CFI_ENDPROC + ENDPROC(clear_page_c) +@@ -20,6 +21,7 @@ ENTRY(clear_page_c_e) + movl $4096,%ecx + xorl %eax,%eax + rep stosb ++ pax_force_retaddr + ret + CFI_ENDPROC + ENDPROC(clear_page_c_e) +@@ -43,6 +45,7 @@ ENTRY(clear_page) + leaq 64(%rdi),%rdi + jnz .Lloop + nop ++ pax_force_retaddr + ret + CFI_ENDPROC + .Lclear_page_end: +@@ -58,7 +61,7 @@ ENDPROC(clear_page) + + #include <asm/cpufeature.h> + +- .section .altinstr_replacement,"ax" ++ .section .altinstr_replacement,"a" + 1: .byte 0xeb /* jmp <disp8> */ + .byte (clear_page_c - clear_page) - (2f - 1b) /* offset */ + 2: .byte 0xeb /* jmp <disp8> */ +diff --git a/arch/x86/lib/cmpxchg16b_emu.S b/arch/x86/lib/cmpxchg16b_emu.S +index 1e572c5..2a162cd 100644 +--- a/arch/x86/lib/cmpxchg16b_emu.S ++++ b/arch/x86/lib/cmpxchg16b_emu.S +@@ -53,11 +53,13 @@ this_cpu_cmpxchg16b_emu: + + popf + mov $1, %al ++ pax_force_retaddr + ret + + not_same: + popf + xor %al,%al ++ pax_force_retaddr + ret + + CFI_ENDPROC +diff --git a/arch/x86/lib/copy_page_64.S b/arch/x86/lib/copy_page_64.S +index 176cca6..e0d658e 100644 +--- a/arch/x86/lib/copy_page_64.S ++++ b/arch/x86/lib/copy_page_64.S +@@ -9,6 +9,7 @@ copy_page_rep: + CFI_STARTPROC + movl $4096/8, %ecx + rep movsq ++ pax_force_retaddr + ret + CFI_ENDPROC + ENDPROC(copy_page_rep) +@@ -24,8 +25,8 @@ ENTRY(copy_page) + CFI_ADJUST_CFA_OFFSET 2*8 + movq %rbx, (%rsp) + CFI_REL_OFFSET rbx, 0 +- movq %r12, 1*8(%rsp) +- CFI_REL_OFFSET r12, 1*8 ++ movq %r13, 1*8(%rsp) ++ CFI_REL_OFFSET r13, 1*8 + + movl $(4096/64)-5, %ecx + .p2align 4 +@@ -38,7 +39,7 @@ ENTRY(copy_page) + movq 0x8*4(%rsi), %r9 + movq 0x8*5(%rsi), %r10 + movq 0x8*6(%rsi), %r11 +- movq 0x8*7(%rsi), %r12 ++ movq 0x8*7(%rsi), %r13 + + prefetcht0 5*64(%rsi) + +@@ -49,7 +50,7 @@ ENTRY(copy_page) + movq %r9, 0x8*4(%rdi) + movq %r10, 0x8*5(%rdi) + movq %r11, 0x8*6(%rdi) +- movq %r12, 0x8*7(%rdi) ++ movq %r13, 0x8*7(%rdi) + + leaq 64 (%rsi), %rsi + leaq 64 (%rdi), %rdi +@@ -68,7 +69,7 @@ ENTRY(copy_page) + movq 0x8*4(%rsi), %r9 + movq 0x8*5(%rsi), %r10 + movq 0x8*6(%rsi), %r11 +- movq 0x8*7(%rsi), %r12 ++ movq 0x8*7(%rsi), %r13 + + movq %rax, 0x8*0(%rdi) + movq %rbx, 0x8*1(%rdi) +@@ -77,7 +78,7 @@ ENTRY(copy_page) + movq %r9, 0x8*4(%rdi) + movq %r10, 0x8*5(%rdi) + movq %r11, 0x8*6(%rdi) +- movq %r12, 0x8*7(%rdi) ++ movq %r13, 0x8*7(%rdi) + + leaq 64(%rdi), %rdi + leaq 64(%rsi), %rsi +@@ -85,10 +86,11 @@ ENTRY(copy_page) + + movq (%rsp), %rbx + CFI_RESTORE rbx +- movq 1*8(%rsp), %r12 +- CFI_RESTORE r12 ++ movq 1*8(%rsp), %r13 ++ CFI_RESTORE r13 + addq $2*8, %rsp + CFI_ADJUST_CFA_OFFSET -2*8 ++ pax_force_retaddr + ret + .Lcopy_page_end: + CFI_ENDPROC +@@ -99,7 +101,7 @@ ENDPROC(copy_page) + + #include <asm/cpufeature.h> + +- .section .altinstr_replacement,"ax" ++ .section .altinstr_replacement,"a" + 1: .byte 0xeb /* jmp <disp8> */ + .byte (copy_page_rep - copy_page) - (2f - 1b) /* offset */ + 2: +diff --git a/arch/x86/lib/copy_user_64.S b/arch/x86/lib/copy_user_64.S +index dee945d..a84067b 100644 +--- a/arch/x86/lib/copy_user_64.S ++++ b/arch/x86/lib/copy_user_64.S +@@ -18,31 +18,7 @@ + #include <asm/alternative-asm.h> + #include <asm/asm.h> + #include <asm/smap.h> +- +-/* +- * By placing feature2 after feature1 in altinstructions section, we logically +- * implement: +- * If CPU has feature2, jmp to alt2 is used +- * else if CPU has feature1, jmp to alt1 is used +- * else jmp to orig is used. +- */ +- .macro ALTERNATIVE_JUMP feature1,feature2,orig,alt1,alt2 +-0: +- .byte 0xe9 /* 32bit jump */ +- .long \orig-1f /* by default jump to orig */ +-1: +- .section .altinstr_replacement,"ax" +-2: .byte 0xe9 /* near jump with 32bit immediate */ +- .long \alt1-1b /* offset */ /* or alternatively to alt1 */ +-3: .byte 0xe9 /* near jump with 32bit immediate */ +- .long \alt2-1b /* offset */ /* or alternatively to alt2 */ +- .previous +- +- .section .altinstructions,"a" +- altinstruction_entry 0b,2b,\feature1,5,5 +- altinstruction_entry 0b,3b,\feature2,5,5 +- .previous +- .endm ++#include <asm/pgtable.h> + + .macro ALIGN_DESTINATION + #ifdef FIX_ALIGNMENT +@@ -70,52 +46,6 @@ + #endif + .endm + +-/* Standard copy_to_user with segment limit checking */ +-ENTRY(_copy_to_user) +- CFI_STARTPROC +- GET_THREAD_INFO(%rax) +- movq %rdi,%rcx +- addq %rdx,%rcx +- jc bad_to_user +- cmpq TI_addr_limit(%rax),%rcx +- ja bad_to_user +- ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,X86_FEATURE_ERMS, \ +- copy_user_generic_unrolled,copy_user_generic_string, \ +- copy_user_enhanced_fast_string +- CFI_ENDPROC +-ENDPROC(_copy_to_user) +- +-/* Standard copy_from_user with segment limit checking */ +-ENTRY(_copy_from_user) +- CFI_STARTPROC +- GET_THREAD_INFO(%rax) +- movq %rsi,%rcx +- addq %rdx,%rcx +- jc bad_from_user +- cmpq TI_addr_limit(%rax),%rcx +- ja bad_from_user +- ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,X86_FEATURE_ERMS, \ +- copy_user_generic_unrolled,copy_user_generic_string, \ +- copy_user_enhanced_fast_string +- CFI_ENDPROC +-ENDPROC(_copy_from_user) +- +- .section .fixup,"ax" +- /* must zero dest */ +-ENTRY(bad_from_user) +-bad_from_user: +- CFI_STARTPROC +- movl %edx,%ecx +- xorl %eax,%eax +- rep +- stosb +-bad_to_user: +- movl %edx,%eax +- ret +- CFI_ENDPROC +-ENDPROC(bad_from_user) +- .previous +- + /* + * copy_user_generic_unrolled - memory copy with exception handling. + * This version is for CPUs like P4 that don't have efficient micro +@@ -131,6 +61,7 @@ ENDPROC(bad_from_user) + */ + ENTRY(copy_user_generic_unrolled) + CFI_STARTPROC ++ ASM_PAX_OPEN_USERLAND + ASM_STAC + cmpl $8,%edx + jb 20f /* less then 8 bytes, go to byte copy loop */ +@@ -180,6 +111,8 @@ ENTRY(copy_user_generic_unrolled) + jnz 21b + 23: xor %eax,%eax + ASM_CLAC ++ ASM_PAX_CLOSE_USERLAND ++ pax_force_retaddr + ret + + .section .fixup,"ax" +@@ -235,6 +168,7 @@ ENDPROC(copy_user_generic_unrolled) + */ + ENTRY(copy_user_generic_string) + CFI_STARTPROC ++ ASM_PAX_OPEN_USERLAND + ASM_STAC + cmpl $8,%edx + jb 2f /* less than 8 bytes, go to byte copy loop */ +@@ -249,6 +183,8 @@ ENTRY(copy_user_generic_string) + movsb + xorl %eax,%eax + ASM_CLAC ++ ASM_PAX_CLOSE_USERLAND ++ pax_force_retaddr + ret + + .section .fixup,"ax" +@@ -276,12 +212,15 @@ ENDPROC(copy_user_generic_string) + */ + ENTRY(copy_user_enhanced_fast_string) + CFI_STARTPROC ++ ASM_PAX_OPEN_USERLAND + ASM_STAC + movl %edx,%ecx + 1: rep + movsb + xorl %eax,%eax + ASM_CLAC ++ ASM_PAX_CLOSE_USERLAND ++ pax_force_retaddr + ret + + .section .fixup,"ax" +diff --git a/arch/x86/lib/copy_user_nocache_64.S b/arch/x86/lib/copy_user_nocache_64.S +index 6a4f43c..c70fb52 100644 +--- a/arch/x86/lib/copy_user_nocache_64.S ++++ b/arch/x86/lib/copy_user_nocache_64.S +@@ -8,6 +8,7 @@ + + #include <linux/linkage.h> + #include <asm/dwarf2.h> ++#include <asm/alternative-asm.h> + + #define FIX_ALIGNMENT 1 + +@@ -16,6 +17,7 @@ + #include <asm/thread_info.h> + #include <asm/asm.h> + #include <asm/smap.h> ++#include <asm/pgtable.h> + + .macro ALIGN_DESTINATION + #ifdef FIX_ALIGNMENT +@@ -49,6 +51,16 @@ + */ + ENTRY(__copy_user_nocache) + CFI_STARTPROC ++ ++#ifdef CONFIG_PAX_MEMORY_UDEREF ++ mov pax_user_shadow_base,%rcx ++ cmp %rcx,%rsi ++ jae 1f ++ add %rcx,%rsi ++1: ++#endif ++ ++ ASM_PAX_OPEN_USERLAND + ASM_STAC + cmpl $8,%edx + jb 20f /* less then 8 bytes, go to byte copy loop */ +@@ -98,7 +110,9 @@ ENTRY(__copy_user_nocache) + jnz 21b + 23: xorl %eax,%eax + ASM_CLAC ++ ASM_PAX_CLOSE_USERLAND + sfence ++ pax_force_retaddr + ret + + .section .fixup,"ax" +diff --git a/arch/x86/lib/csum-copy_64.S b/arch/x86/lib/csum-copy_64.S +index 2419d5f..fe52d0e 100644 +--- a/arch/x86/lib/csum-copy_64.S ++++ b/arch/x86/lib/csum-copy_64.S +@@ -9,6 +9,7 @@ + #include <asm/dwarf2.h> + #include <asm/errno.h> + #include <asm/asm.h> ++#include <asm/alternative-asm.h> + + /* + * Checksum copy with exception handling. +@@ -56,8 +57,8 @@ ENTRY(csum_partial_copy_generic) + CFI_ADJUST_CFA_OFFSET 7*8 + movq %rbx, 2*8(%rsp) + CFI_REL_OFFSET rbx, 2*8 +- movq %r12, 3*8(%rsp) +- CFI_REL_OFFSET r12, 3*8 ++ movq %r15, 3*8(%rsp) ++ CFI_REL_OFFSET r15, 3*8 + movq %r14, 4*8(%rsp) + CFI_REL_OFFSET r14, 4*8 + movq %r13, 5*8(%rsp) +@@ -72,16 +73,16 @@ ENTRY(csum_partial_copy_generic) + movl %edx, %ecx + + xorl %r9d, %r9d +- movq %rcx, %r12 ++ movq %rcx, %r15 + +- shrq $6, %r12 ++ shrq $6, %r15 + jz .Lhandle_tail /* < 64 */ + + clc + + /* main loop. clear in 64 byte blocks */ + /* r9: zero, r8: temp2, rbx: temp1, rax: sum, rcx: saved length */ +- /* r11: temp3, rdx: temp4, r12 loopcnt */ ++ /* r11: temp3, rdx: temp4, r15 loopcnt */ + /* r10: temp5, rbp: temp6, r14 temp7, r13 temp8 */ + .p2align 4 + .Lloop: +@@ -115,7 +116,7 @@ ENTRY(csum_partial_copy_generic) + adcq %r14, %rax + adcq %r13, %rax + +- decl %r12d ++ decl %r15d + + dest + movq %rbx, (%rsi) +@@ -210,8 +211,8 @@ ENTRY(csum_partial_copy_generic) + .Lende: + movq 2*8(%rsp), %rbx + CFI_RESTORE rbx +- movq 3*8(%rsp), %r12 +- CFI_RESTORE r12 ++ movq 3*8(%rsp), %r15 ++ CFI_RESTORE r15 + movq 4*8(%rsp), %r14 + CFI_RESTORE r14 + movq 5*8(%rsp), %r13 +@@ -220,6 +221,7 @@ ENTRY(csum_partial_copy_generic) + CFI_RESTORE rbp + addq $7*8, %rsp + CFI_ADJUST_CFA_OFFSET -7*8 ++ pax_force_retaddr + ret + CFI_RESTORE_STATE + +diff --git a/arch/x86/lib/csum-wrappers_64.c b/arch/x86/lib/csum-wrappers_64.c +index 7609e0e..b449b98 100644 +--- a/arch/x86/lib/csum-wrappers_64.c ++++ b/arch/x86/lib/csum-wrappers_64.c +@@ -53,10 +53,12 @@ csum_partial_copy_from_user(const void __user *src, void *dst, + len -= 2; + } + } ++ pax_open_userland(); + stac(); +- isum = csum_partial_copy_generic((__force const void *)src, ++ isum = csum_partial_copy_generic((const void __force_kernel *)____m(src), + dst, len, isum, errp, NULL); + clac(); ++ pax_close_userland(); + if (unlikely(*errp)) + goto out_err; + +@@ -110,10 +112,12 @@ csum_partial_copy_to_user(const void *src, void __user *dst, + } + + *errp = 0; ++ pax_open_userland(); + stac(); +- ret = csum_partial_copy_generic(src, (void __force *)dst, ++ ret = csum_partial_copy_generic(src, (void __force_kernel *)____m(dst), + len, isum, NULL, errp); + clac(); ++ pax_close_userland(); + return ret; + } + EXPORT_SYMBOL(csum_partial_copy_to_user); +diff --git a/arch/x86/lib/getuser.S b/arch/x86/lib/getuser.S +index a451235..1daa956 100644 +--- a/arch/x86/lib/getuser.S ++++ b/arch/x86/lib/getuser.S +@@ -33,17 +33,40 @@ + #include <asm/thread_info.h> + #include <asm/asm.h> + #include <asm/smap.h> ++#include <asm/segment.h> ++#include <asm/pgtable.h> ++#include <asm/alternative-asm.h> ++ ++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF) ++#define __copyuser_seg gs; ++#else ++#define __copyuser_seg ++#endif + + .text + ENTRY(__get_user_1) + CFI_STARTPROC ++ ++#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF) + GET_THREAD_INFO(%_ASM_DX) + cmp TI_addr_limit(%_ASM_DX),%_ASM_AX + jae bad_get_user + ASM_STAC +-1: movzbl (%_ASM_AX),%edx ++ ++#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF) ++ mov pax_user_shadow_base,%_ASM_DX ++ cmp %_ASM_DX,%_ASM_AX ++ jae 1234f ++ add %_ASM_DX,%_ASM_AX ++1234: ++#endif ++ ++#endif ++ ++1: __copyuser_seg movzbl (%_ASM_AX),%edx + xor %eax,%eax + ASM_CLAC ++ pax_force_retaddr + ret + CFI_ENDPROC + ENDPROC(__get_user_1) +@@ -51,14 +74,28 @@ ENDPROC(__get_user_1) + ENTRY(__get_user_2) + CFI_STARTPROC + add $1,%_ASM_AX ++ ++#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF) + jc bad_get_user + GET_THREAD_INFO(%_ASM_DX) + cmp TI_addr_limit(%_ASM_DX),%_ASM_AX + jae bad_get_user + ASM_STAC +-2: movzwl -1(%_ASM_AX),%edx ++ ++#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF) ++ mov pax_user_shadow_base,%_ASM_DX ++ cmp %_ASM_DX,%_ASM_AX ++ jae 1234f ++ add %_ASM_DX,%_ASM_AX ++1234: ++#endif ++ ++#endif ++ ++2: __copyuser_seg movzwl -1(%_ASM_AX),%edx + xor %eax,%eax + ASM_CLAC ++ pax_force_retaddr + ret + CFI_ENDPROC + ENDPROC(__get_user_2) +@@ -66,14 +103,28 @@ ENDPROC(__get_user_2) + ENTRY(__get_user_4) + CFI_STARTPROC + add $3,%_ASM_AX ++ ++#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF) + jc bad_get_user + GET_THREAD_INFO(%_ASM_DX) + cmp TI_addr_limit(%_ASM_DX),%_ASM_AX + jae bad_get_user + ASM_STAC +-3: movl -3(%_ASM_AX),%edx ++ ++#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF) ++ mov pax_user_shadow_base,%_ASM_DX ++ cmp %_ASM_DX,%_ASM_AX ++ jae 1234f ++ add %_ASM_DX,%_ASM_AX ++1234: ++#endif ++ ++#endif ++ ++3: __copyuser_seg movl -3(%_ASM_AX),%edx + xor %eax,%eax + ASM_CLAC ++ pax_force_retaddr + ret + CFI_ENDPROC + ENDPROC(__get_user_4) +@@ -86,10 +137,20 @@ ENTRY(__get_user_8) + GET_THREAD_INFO(%_ASM_DX) + cmp TI_addr_limit(%_ASM_DX),%_ASM_AX + jae bad_get_user ++ ++#ifdef CONFIG_PAX_MEMORY_UDEREF ++ mov pax_user_shadow_base,%_ASM_DX ++ cmp %_ASM_DX,%_ASM_AX ++ jae 1234f ++ add %_ASM_DX,%_ASM_AX ++1234: ++#endif ++ + ASM_STAC + 4: movq -7(%_ASM_AX),%rdx + xor %eax,%eax + ASM_CLAC ++ pax_force_retaddr + ret + #else + add $7,%_ASM_AX +@@ -98,10 +159,11 @@ ENTRY(__get_user_8) + cmp TI_addr_limit(%_ASM_DX),%_ASM_AX + jae bad_get_user_8 + ASM_STAC +-4: movl -7(%_ASM_AX),%edx +-5: movl -3(%_ASM_AX),%ecx ++4: __copyuser_seg movl -7(%_ASM_AX),%edx ++5: __copyuser_seg movl -3(%_ASM_AX),%ecx + xor %eax,%eax + ASM_CLAC ++ pax_force_retaddr + ret + #endif + CFI_ENDPROC +@@ -113,6 +175,7 @@ bad_get_user: + xor %edx,%edx + mov $(-EFAULT),%_ASM_AX + ASM_CLAC ++ pax_force_retaddr + ret + CFI_ENDPROC + END(bad_get_user) +@@ -124,6 +187,7 @@ bad_get_user_8: + xor %ecx,%ecx + mov $(-EFAULT),%_ASM_AX + ASM_CLAC ++ pax_force_retaddr + ret + CFI_ENDPROC + END(bad_get_user_8) +diff --git a/arch/x86/lib/insn.c b/arch/x86/lib/insn.c +index 54fcffe..7be149e 100644 +--- a/arch/x86/lib/insn.c ++++ b/arch/x86/lib/insn.c +@@ -20,8 +20,10 @@ + + #ifdef __KERNEL__ + #include <linux/string.h> ++#include <asm/pgtable_types.h> + #else + #include <string.h> ++#define ktla_ktva(addr) addr + #endif + #include <asm/inat.h> + #include <asm/insn.h> +@@ -53,8 +55,8 @@ + void insn_init(struct insn *insn, const void *kaddr, int x86_64) + { + memset(insn, 0, sizeof(*insn)); +- insn->kaddr = kaddr; +- insn->next_byte = kaddr; ++ insn->kaddr = ktla_ktva(kaddr); ++ insn->next_byte = ktla_ktva(kaddr); + insn->x86_64 = x86_64 ? 1 : 0; + insn->opnd_bytes = 4; + if (x86_64) +diff --git a/arch/x86/lib/iomap_copy_64.S b/arch/x86/lib/iomap_copy_64.S +index 05a95e7..326f2fa 100644 +--- a/arch/x86/lib/iomap_copy_64.S ++++ b/arch/x86/lib/iomap_copy_64.S +@@ -17,6 +17,7 @@ + + #include <linux/linkage.h> + #include <asm/dwarf2.h> ++#include <asm/alternative-asm.h> + + /* + * override generic version in lib/iomap_copy.c +@@ -25,6 +26,7 @@ ENTRY(__iowrite32_copy) + CFI_STARTPROC + movl %edx,%ecx + rep movsd ++ pax_force_retaddr + ret + CFI_ENDPROC + ENDPROC(__iowrite32_copy) +diff --git a/arch/x86/lib/memcpy_64.S b/arch/x86/lib/memcpy_64.S +index 56313a3..0db417e 100644 +--- a/arch/x86/lib/memcpy_64.S ++++ b/arch/x86/lib/memcpy_64.S +@@ -24,7 +24,7 @@ + * This gets patched over the unrolled variant (below) via the + * alternative instructions framework: + */ +- .section .altinstr_replacement, "ax", @progbits ++ .section .altinstr_replacement, "a", @progbits + .Lmemcpy_c: + movq %rdi, %rax + movq %rdx, %rcx +@@ -33,6 +33,7 @@ + rep movsq + movl %edx, %ecx + rep movsb ++ pax_force_retaddr + ret + .Lmemcpy_e: + .previous +@@ -44,11 +45,12 @@ + * This gets patched over the unrolled variant (below) via the + * alternative instructions framework: + */ +- .section .altinstr_replacement, "ax", @progbits ++ .section .altinstr_replacement, "a", @progbits + .Lmemcpy_c_e: + movq %rdi, %rax + movq %rdx, %rcx + rep movsb ++ pax_force_retaddr + ret + .Lmemcpy_e_e: + .previous +@@ -136,6 +138,7 @@ ENTRY(memcpy) + movq %r9, 1*8(%rdi) + movq %r10, -2*8(%rdi, %rdx) + movq %r11, -1*8(%rdi, %rdx) ++ pax_force_retaddr + retq + .p2align 4 + .Lless_16bytes: +@@ -148,6 +151,7 @@ ENTRY(memcpy) + movq -1*8(%rsi, %rdx), %r9 + movq %r8, 0*8(%rdi) + movq %r9, -1*8(%rdi, %rdx) ++ pax_force_retaddr + retq + .p2align 4 + .Lless_8bytes: +@@ -161,6 +165,7 @@ ENTRY(memcpy) + movl -4(%rsi, %rdx), %r8d + movl %ecx, (%rdi) + movl %r8d, -4(%rdi, %rdx) ++ pax_force_retaddr + retq + .p2align 4 + .Lless_3bytes: +@@ -179,6 +184,7 @@ ENTRY(memcpy) + movb %cl, (%rdi) + + .Lend: ++ pax_force_retaddr + retq + CFI_ENDPROC + ENDPROC(memcpy) +diff --git a/arch/x86/lib/memmove_64.S b/arch/x86/lib/memmove_64.S +index 65268a6..dd1de11 100644 +--- a/arch/x86/lib/memmove_64.S ++++ b/arch/x86/lib/memmove_64.S +@@ -202,14 +202,16 @@ ENTRY(memmove) + movb (%rsi), %r11b + movb %r11b, (%rdi) + 13: ++ pax_force_retaddr + retq + CFI_ENDPROC + +- .section .altinstr_replacement,"ax" ++ .section .altinstr_replacement,"a" + .Lmemmove_begin_forward_efs: + /* Forward moving data. */ + movq %rdx, %rcx + rep movsb ++ pax_force_retaddr + retq + .Lmemmove_end_forward_efs: + .previous +diff --git a/arch/x86/lib/memset_64.S b/arch/x86/lib/memset_64.S +index 2dcb380..2eb79fe 100644 +--- a/arch/x86/lib/memset_64.S ++++ b/arch/x86/lib/memset_64.S +@@ -16,7 +16,7 @@ + * + * rax original destination + */ +- .section .altinstr_replacement, "ax", @progbits ++ .section .altinstr_replacement, "a", @progbits + .Lmemset_c: + movq %rdi,%r9 + movq %rdx,%rcx +@@ -30,6 +30,7 @@ + movl %edx,%ecx + rep stosb + movq %r9,%rax ++ pax_force_retaddr + ret + .Lmemset_e: + .previous +@@ -45,13 +46,14 @@ + * + * rax original destination + */ +- .section .altinstr_replacement, "ax", @progbits ++ .section .altinstr_replacement, "a", @progbits + .Lmemset_c_e: + movq %rdi,%r9 + movb %sil,%al + movq %rdx,%rcx + rep stosb + movq %r9,%rax ++ pax_force_retaddr + ret + .Lmemset_e_e: + .previous +@@ -118,6 +120,7 @@ ENTRY(__memset) + + .Lende: + movq %r10,%rax ++ pax_force_retaddr + ret + + CFI_RESTORE_STATE +diff --git a/arch/x86/lib/mmx_32.c b/arch/x86/lib/mmx_32.c +index c9f2d9b..e7fd2c0 100644 +--- a/arch/x86/lib/mmx_32.c ++++ b/arch/x86/lib/mmx_32.c +@@ -29,6 +29,7 @@ void *_mmx_memcpy(void *to, const void *from, size_t len) + { + void *p; + int i; ++ unsigned long cr0; + + if (unlikely(in_interrupt())) + return __memcpy(to, from, len); +@@ -39,44 +40,72 @@ void *_mmx_memcpy(void *to, const void *from, size_t len) + kernel_fpu_begin(); + + __asm__ __volatile__ ( +- "1: prefetch (%0)\n" /* This set is 28 bytes */ +- " prefetch 64(%0)\n" +- " prefetch 128(%0)\n" +- " prefetch 192(%0)\n" +- " prefetch 256(%0)\n" ++ "1: prefetch (%1)\n" /* This set is 28 bytes */ ++ " prefetch 64(%1)\n" ++ " prefetch 128(%1)\n" ++ " prefetch 192(%1)\n" ++ " prefetch 256(%1)\n" + "2: \n" + ".section .fixup, \"ax\"\n" +- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */ ++ "3: \n" ++ ++#ifdef CONFIG_PAX_KERNEXEC ++ " movl %%cr0, %0\n" ++ " movl %0, %%eax\n" ++ " andl $0xFFFEFFFF, %%eax\n" ++ " movl %%eax, %%cr0\n" ++#endif ++ ++ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */ ++ ++#ifdef CONFIG_PAX_KERNEXEC ++ " movl %0, %%cr0\n" ++#endif ++ + " jmp 2b\n" + ".previous\n" + _ASM_EXTABLE(1b, 3b) +- : : "r" (from)); ++ : "=&r" (cr0) : "r" (from) : "ax"); + + for ( ; i > 5; i--) { + __asm__ __volatile__ ( +- "1: prefetch 320(%0)\n" +- "2: movq (%0), %%mm0\n" +- " movq 8(%0), %%mm1\n" +- " movq 16(%0), %%mm2\n" +- " movq 24(%0), %%mm3\n" +- " movq %%mm0, (%1)\n" +- " movq %%mm1, 8(%1)\n" +- " movq %%mm2, 16(%1)\n" +- " movq %%mm3, 24(%1)\n" +- " movq 32(%0), %%mm0\n" +- " movq 40(%0), %%mm1\n" +- " movq 48(%0), %%mm2\n" +- " movq 56(%0), %%mm3\n" +- " movq %%mm0, 32(%1)\n" +- " movq %%mm1, 40(%1)\n" +- " movq %%mm2, 48(%1)\n" +- " movq %%mm3, 56(%1)\n" ++ "1: prefetch 320(%1)\n" ++ "2: movq (%1), %%mm0\n" ++ " movq 8(%1), %%mm1\n" ++ " movq 16(%1), %%mm2\n" ++ " movq 24(%1), %%mm3\n" ++ " movq %%mm0, (%2)\n" ++ " movq %%mm1, 8(%2)\n" ++ " movq %%mm2, 16(%2)\n" ++ " movq %%mm3, 24(%2)\n" ++ " movq 32(%1), %%mm0\n" ++ " movq 40(%1), %%mm1\n" ++ " movq 48(%1), %%mm2\n" ++ " movq 56(%1), %%mm3\n" ++ " movq %%mm0, 32(%2)\n" ++ " movq %%mm1, 40(%2)\n" ++ " movq %%mm2, 48(%2)\n" ++ " movq %%mm3, 56(%2)\n" + ".section .fixup, \"ax\"\n" +- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */ ++ "3:\n" ++ ++#ifdef CONFIG_PAX_KERNEXEC ++ " movl %%cr0, %0\n" ++ " movl %0, %%eax\n" ++ " andl $0xFFFEFFFF, %%eax\n" ++ " movl %%eax, %%cr0\n" ++#endif ++ ++ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */ ++ ++#ifdef CONFIG_PAX_KERNEXEC ++ " movl %0, %%cr0\n" ++#endif ++ + " jmp 2b\n" + ".previous\n" + _ASM_EXTABLE(1b, 3b) +- : : "r" (from), "r" (to) : "memory"); ++ : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax"); + + from += 64; + to += 64; +@@ -158,6 +187,7 @@ static void fast_clear_page(void *page) + static void fast_copy_page(void *to, void *from) + { + int i; ++ unsigned long cr0; + + kernel_fpu_begin(); + +@@ -166,42 +196,70 @@ static void fast_copy_page(void *to, void *from) + * but that is for later. -AV + */ + __asm__ __volatile__( +- "1: prefetch (%0)\n" +- " prefetch 64(%0)\n" +- " prefetch 128(%0)\n" +- " prefetch 192(%0)\n" +- " prefetch 256(%0)\n" ++ "1: prefetch (%1)\n" ++ " prefetch 64(%1)\n" ++ " prefetch 128(%1)\n" ++ " prefetch 192(%1)\n" ++ " prefetch 256(%1)\n" + "2: \n" + ".section .fixup, \"ax\"\n" +- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */ ++ "3: \n" ++ ++#ifdef CONFIG_PAX_KERNEXEC ++ " movl %%cr0, %0\n" ++ " movl %0, %%eax\n" ++ " andl $0xFFFEFFFF, %%eax\n" ++ " movl %%eax, %%cr0\n" ++#endif ++ ++ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */ ++ ++#ifdef CONFIG_PAX_KERNEXEC ++ " movl %0, %%cr0\n" ++#endif ++ + " jmp 2b\n" + ".previous\n" +- _ASM_EXTABLE(1b, 3b) : : "r" (from)); ++ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax"); + + for (i = 0; i < (4096-320)/64; i++) { + __asm__ __volatile__ ( +- "1: prefetch 320(%0)\n" +- "2: movq (%0), %%mm0\n" +- " movntq %%mm0, (%1)\n" +- " movq 8(%0), %%mm1\n" +- " movntq %%mm1, 8(%1)\n" +- " movq 16(%0), %%mm2\n" +- " movntq %%mm2, 16(%1)\n" +- " movq 24(%0), %%mm3\n" +- " movntq %%mm3, 24(%1)\n" +- " movq 32(%0), %%mm4\n" +- " movntq %%mm4, 32(%1)\n" +- " movq 40(%0), %%mm5\n" +- " movntq %%mm5, 40(%1)\n" +- " movq 48(%0), %%mm6\n" +- " movntq %%mm6, 48(%1)\n" +- " movq 56(%0), %%mm7\n" +- " movntq %%mm7, 56(%1)\n" ++ "1: prefetch 320(%1)\n" ++ "2: movq (%1), %%mm0\n" ++ " movntq %%mm0, (%2)\n" ++ " movq 8(%1), %%mm1\n" ++ " movntq %%mm1, 8(%2)\n" ++ " movq 16(%1), %%mm2\n" ++ " movntq %%mm2, 16(%2)\n" ++ " movq 24(%1), %%mm3\n" ++ " movntq %%mm3, 24(%2)\n" ++ " movq 32(%1), %%mm4\n" ++ " movntq %%mm4, 32(%2)\n" ++ " movq 40(%1), %%mm5\n" ++ " movntq %%mm5, 40(%2)\n" ++ " movq 48(%1), %%mm6\n" ++ " movntq %%mm6, 48(%2)\n" ++ " movq 56(%1), %%mm7\n" ++ " movntq %%mm7, 56(%2)\n" + ".section .fixup, \"ax\"\n" +- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */ ++ "3:\n" ++ ++#ifdef CONFIG_PAX_KERNEXEC ++ " movl %%cr0, %0\n" ++ " movl %0, %%eax\n" ++ " andl $0xFFFEFFFF, %%eax\n" ++ " movl %%eax, %%cr0\n" ++#endif ++ ++ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */ ++ ++#ifdef CONFIG_PAX_KERNEXEC ++ " movl %0, %%cr0\n" ++#endif ++ + " jmp 2b\n" + ".previous\n" +- _ASM_EXTABLE(1b, 3b) : : "r" (from), "r" (to) : "memory"); ++ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax"); + + from += 64; + to += 64; +@@ -280,47 +338,76 @@ static void fast_clear_page(void *page) + static void fast_copy_page(void *to, void *from) + { + int i; ++ unsigned long cr0; + + kernel_fpu_begin(); + + __asm__ __volatile__ ( +- "1: prefetch (%0)\n" +- " prefetch 64(%0)\n" +- " prefetch 128(%0)\n" +- " prefetch 192(%0)\n" +- " prefetch 256(%0)\n" ++ "1: prefetch (%1)\n" ++ " prefetch 64(%1)\n" ++ " prefetch 128(%1)\n" ++ " prefetch 192(%1)\n" ++ " prefetch 256(%1)\n" + "2: \n" + ".section .fixup, \"ax\"\n" +- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */ ++ "3: \n" ++ ++#ifdef CONFIG_PAX_KERNEXEC ++ " movl %%cr0, %0\n" ++ " movl %0, %%eax\n" ++ " andl $0xFFFEFFFF, %%eax\n" ++ " movl %%eax, %%cr0\n" ++#endif ++ ++ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */ ++ ++#ifdef CONFIG_PAX_KERNEXEC ++ " movl %0, %%cr0\n" ++#endif ++ + " jmp 2b\n" + ".previous\n" +- _ASM_EXTABLE(1b, 3b) : : "r" (from)); ++ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax"); + + for (i = 0; i < 4096/64; i++) { + __asm__ __volatile__ ( +- "1: prefetch 320(%0)\n" +- "2: movq (%0), %%mm0\n" +- " movq 8(%0), %%mm1\n" +- " movq 16(%0), %%mm2\n" +- " movq 24(%0), %%mm3\n" +- " movq %%mm0, (%1)\n" +- " movq %%mm1, 8(%1)\n" +- " movq %%mm2, 16(%1)\n" +- " movq %%mm3, 24(%1)\n" +- " movq 32(%0), %%mm0\n" +- " movq 40(%0), %%mm1\n" +- " movq 48(%0), %%mm2\n" +- " movq 56(%0), %%mm3\n" +- " movq %%mm0, 32(%1)\n" +- " movq %%mm1, 40(%1)\n" +- " movq %%mm2, 48(%1)\n" +- " movq %%mm3, 56(%1)\n" ++ "1: prefetch 320(%1)\n" ++ "2: movq (%1), %%mm0\n" ++ " movq 8(%1), %%mm1\n" ++ " movq 16(%1), %%mm2\n" ++ " movq 24(%1), %%mm3\n" ++ " movq %%mm0, (%2)\n" ++ " movq %%mm1, 8(%2)\n" ++ " movq %%mm2, 16(%2)\n" ++ " movq %%mm3, 24(%2)\n" ++ " movq 32(%1), %%mm0\n" ++ " movq 40(%1), %%mm1\n" ++ " movq 48(%1), %%mm2\n" ++ " movq 56(%1), %%mm3\n" ++ " movq %%mm0, 32(%2)\n" ++ " movq %%mm1, 40(%2)\n" ++ " movq %%mm2, 48(%2)\n" ++ " movq %%mm3, 56(%2)\n" + ".section .fixup, \"ax\"\n" +- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */ ++ "3:\n" ++ ++#ifdef CONFIG_PAX_KERNEXEC ++ " movl %%cr0, %0\n" ++ " movl %0, %%eax\n" ++ " andl $0xFFFEFFFF, %%eax\n" ++ " movl %%eax, %%cr0\n" ++#endif ++ ++ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */ ++ ++#ifdef CONFIG_PAX_KERNEXEC ++ " movl %0, %%cr0\n" ++#endif ++ + " jmp 2b\n" + ".previous\n" + _ASM_EXTABLE(1b, 3b) +- : : "r" (from), "r" (to) : "memory"); ++ : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax"); + + from += 64; + to += 64; +diff --git a/arch/x86/lib/msr-reg.S b/arch/x86/lib/msr-reg.S +index f6d13ee..d789440 100644 +--- a/arch/x86/lib/msr-reg.S ++++ b/arch/x86/lib/msr-reg.S +@@ -3,6 +3,7 @@ + #include <asm/dwarf2.h> + #include <asm/asm.h> + #include <asm/msr.h> ++#include <asm/alternative-asm.h> + + #ifdef CONFIG_X86_64 + /* +@@ -37,6 +38,7 @@ ENTRY(\op\()_safe_regs) + movl %edi, 28(%r10) + popq_cfi %rbp + popq_cfi %rbx ++ pax_force_retaddr + ret + 3: + CFI_RESTORE_STATE +diff --git a/arch/x86/lib/putuser.S b/arch/x86/lib/putuser.S +index fc6ba17..d4d989d 100644 +--- a/arch/x86/lib/putuser.S ++++ b/arch/x86/lib/putuser.S +@@ -16,7 +16,9 @@ + #include <asm/errno.h> + #include <asm/asm.h> + #include <asm/smap.h> +- ++#include <asm/segment.h> ++#include <asm/pgtable.h> ++#include <asm/alternative-asm.h> + + /* + * __put_user_X +@@ -30,57 +32,125 @@ + * as they get called from within inline assembly. + */ + +-#define ENTER CFI_STARTPROC ; \ +- GET_THREAD_INFO(%_ASM_BX) +-#define EXIT ASM_CLAC ; \ +- ret ; \ ++#define ENTER CFI_STARTPROC ++#define EXIT ASM_CLAC ; \ ++ pax_force_retaddr ; \ ++ ret ; \ + CFI_ENDPROC + ++#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF) ++#define _DEST %_ASM_CX,%_ASM_BX ++#else ++#define _DEST %_ASM_CX ++#endif ++ ++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF) ++#define __copyuser_seg gs; ++#else ++#define __copyuser_seg ++#endif ++ + .text + ENTRY(__put_user_1) + ENTER ++ ++#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF) ++ GET_THREAD_INFO(%_ASM_BX) + cmp TI_addr_limit(%_ASM_BX),%_ASM_CX + jae bad_put_user + ASM_STAC +-1: movb %al,(%_ASM_CX) ++ ++#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF) ++ mov pax_user_shadow_base,%_ASM_BX ++ cmp %_ASM_BX,%_ASM_CX ++ jb 1234f ++ xor %ebx,%ebx ++1234: ++#endif ++ ++#endif ++ ++1: __copyuser_seg movb %al,(_DEST) + xor %eax,%eax + EXIT + ENDPROC(__put_user_1) + + ENTRY(__put_user_2) + ENTER ++ ++#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF) ++ GET_THREAD_INFO(%_ASM_BX) + mov TI_addr_limit(%_ASM_BX),%_ASM_BX + sub $1,%_ASM_BX + cmp %_ASM_BX,%_ASM_CX + jae bad_put_user + ASM_STAC +-2: movw %ax,(%_ASM_CX) ++ ++#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF) ++ mov pax_user_shadow_base,%_ASM_BX ++ cmp %_ASM_BX,%_ASM_CX ++ jb 1234f ++ xor %ebx,%ebx ++1234: ++#endif ++ ++#endif ++ ++2: __copyuser_seg movw %ax,(_DEST) + xor %eax,%eax + EXIT + ENDPROC(__put_user_2) + + ENTRY(__put_user_4) + ENTER ++ ++#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF) ++ GET_THREAD_INFO(%_ASM_BX) + mov TI_addr_limit(%_ASM_BX),%_ASM_BX + sub $3,%_ASM_BX + cmp %_ASM_BX,%_ASM_CX + jae bad_put_user + ASM_STAC +-3: movl %eax,(%_ASM_CX) ++ ++#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF) ++ mov pax_user_shadow_base,%_ASM_BX ++ cmp %_ASM_BX,%_ASM_CX ++ jb 1234f ++ xor %ebx,%ebx ++1234: ++#endif ++ ++#endif ++ ++3: __copyuser_seg movl %eax,(_DEST) + xor %eax,%eax + EXIT + ENDPROC(__put_user_4) + + ENTRY(__put_user_8) + ENTER ++ ++#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF) ++ GET_THREAD_INFO(%_ASM_BX) + mov TI_addr_limit(%_ASM_BX),%_ASM_BX + sub $7,%_ASM_BX + cmp %_ASM_BX,%_ASM_CX + jae bad_put_user + ASM_STAC +-4: mov %_ASM_AX,(%_ASM_CX) ++ ++#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF) ++ mov pax_user_shadow_base,%_ASM_BX ++ cmp %_ASM_BX,%_ASM_CX ++ jb 1234f ++ xor %ebx,%ebx ++1234: ++#endif ++ ++#endif ++ ++4: __copyuser_seg mov %_ASM_AX,(_DEST) + #ifdef CONFIG_X86_32 +-5: movl %edx,4(%_ASM_CX) ++5: __copyuser_seg movl %edx,4(_DEST) + #endif + xor %eax,%eax + EXIT +diff --git a/arch/x86/lib/rwlock.S b/arch/x86/lib/rwlock.S +index 1cad221..de671ee 100644 +--- a/arch/x86/lib/rwlock.S ++++ b/arch/x86/lib/rwlock.S +@@ -16,13 +16,34 @@ ENTRY(__write_lock_failed) + FRAME + 0: LOCK_PREFIX + WRITE_LOCK_ADD($RW_LOCK_BIAS) (%__lock_ptr) ++ ++#ifdef CONFIG_PAX_REFCOUNT ++ jno 1234f ++ LOCK_PREFIX ++ WRITE_LOCK_SUB($RW_LOCK_BIAS) (%__lock_ptr) ++ int $4 ++1234: ++ _ASM_EXTABLE(1234b, 1234b) ++#endif ++ + 1: rep; nop + cmpl $WRITE_LOCK_CMP, (%__lock_ptr) + jne 1b + LOCK_PREFIX + WRITE_LOCK_SUB($RW_LOCK_BIAS) (%__lock_ptr) ++ ++#ifdef CONFIG_PAX_REFCOUNT ++ jno 1234f ++ LOCK_PREFIX ++ WRITE_LOCK_ADD($RW_LOCK_BIAS) (%__lock_ptr) ++ int $4 ++1234: ++ _ASM_EXTABLE(1234b, 1234b) ++#endif ++ + jnz 0b + ENDFRAME ++ pax_force_retaddr + ret + CFI_ENDPROC + END(__write_lock_failed) +@@ -32,13 +53,34 @@ ENTRY(__read_lock_failed) + FRAME + 0: LOCK_PREFIX + READ_LOCK_SIZE(inc) (%__lock_ptr) ++ ++#ifdef CONFIG_PAX_REFCOUNT ++ jno 1234f ++ LOCK_PREFIX ++ READ_LOCK_SIZE(dec) (%__lock_ptr) ++ int $4 ++1234: ++ _ASM_EXTABLE(1234b, 1234b) ++#endif ++ + 1: rep; nop + READ_LOCK_SIZE(cmp) $1, (%__lock_ptr) + js 1b + LOCK_PREFIX + READ_LOCK_SIZE(dec) (%__lock_ptr) ++ ++#ifdef CONFIG_PAX_REFCOUNT ++ jno 1234f ++ LOCK_PREFIX ++ READ_LOCK_SIZE(inc) (%__lock_ptr) ++ int $4 ++1234: ++ _ASM_EXTABLE(1234b, 1234b) ++#endif ++ + js 0b + ENDFRAME ++ pax_force_retaddr + ret + CFI_ENDPROC + END(__read_lock_failed) +diff --git a/arch/x86/lib/rwsem.S b/arch/x86/lib/rwsem.S +index 5dff5f0..cadebf4 100644 +--- a/arch/x86/lib/rwsem.S ++++ b/arch/x86/lib/rwsem.S +@@ -94,6 +94,7 @@ ENTRY(call_rwsem_down_read_failed) + __ASM_SIZE(pop,_cfi) %__ASM_REG(dx) + CFI_RESTORE __ASM_REG(dx) + restore_common_regs ++ pax_force_retaddr + ret + CFI_ENDPROC + ENDPROC(call_rwsem_down_read_failed) +@@ -104,6 +105,7 @@ ENTRY(call_rwsem_down_write_failed) + movq %rax,%rdi + call rwsem_down_write_failed + restore_common_regs ++ pax_force_retaddr + ret + CFI_ENDPROC + ENDPROC(call_rwsem_down_write_failed) +@@ -117,7 +119,8 @@ ENTRY(call_rwsem_wake) + movq %rax,%rdi + call rwsem_wake + restore_common_regs +-1: ret ++1: pax_force_retaddr ++ ret + CFI_ENDPROC + ENDPROC(call_rwsem_wake) + +@@ -131,6 +134,7 @@ ENTRY(call_rwsem_downgrade_wake) + __ASM_SIZE(pop,_cfi) %__ASM_REG(dx) + CFI_RESTORE __ASM_REG(dx) + restore_common_regs ++ pax_force_retaddr + ret + CFI_ENDPROC + ENDPROC(call_rwsem_downgrade_wake) +diff --git a/arch/x86/lib/thunk_64.S b/arch/x86/lib/thunk_64.S +index a63efd6..8149fbe 100644 +--- a/arch/x86/lib/thunk_64.S ++++ b/arch/x86/lib/thunk_64.S +@@ -8,6 +8,7 @@ + #include <linux/linkage.h> + #include <asm/dwarf2.h> + #include <asm/calling.h> ++#include <asm/alternative-asm.h> + + /* rdi: arg1 ... normal C conventions. rax is saved/restored. */ + .macro THUNK name, func, put_ret_addr_in_rdi=0 +@@ -15,11 +16,11 @@ + \name: + CFI_STARTPROC + +- /* this one pushes 9 elems, the next one would be %rIP */ +- SAVE_ARGS ++ /* this one pushes 15+1 elems, the next one would be %rIP */ ++ SAVE_ARGS 8 + + .if \put_ret_addr_in_rdi +- movq_cfi_restore 9*8, rdi ++ movq_cfi_restore RIP, rdi + .endif + + call \func +@@ -38,8 +39,9 @@ + + /* SAVE_ARGS below is used only for the .cfi directives it contains. */ + CFI_STARTPROC +- SAVE_ARGS ++ SAVE_ARGS 8 + restore: +- RESTORE_ARGS ++ RESTORE_ARGS 1,8 ++ pax_force_retaddr + ret + CFI_ENDPROC +diff --git a/arch/x86/lib/usercopy_32.c b/arch/x86/lib/usercopy_32.c +index e2f5e21..4b22130 100644 +--- a/arch/x86/lib/usercopy_32.c ++++ b/arch/x86/lib/usercopy_32.c +@@ -42,11 +42,13 @@ do { \ + int __d0; \ + might_fault(); \ + __asm__ __volatile__( \ ++ __COPYUSER_SET_ES \ + ASM_STAC "\n" \ + "0: rep; stosl\n" \ + " movl %2,%0\n" \ + "1: rep; stosb\n" \ + "2: " ASM_CLAC "\n" \ ++ __COPYUSER_RESTORE_ES \ + ".section .fixup,\"ax\"\n" \ + "3: lea 0(%2,%0,4),%0\n" \ + " jmp 2b\n" \ +@@ -98,7 +100,7 @@ EXPORT_SYMBOL(__clear_user); + + #ifdef CONFIG_X86_INTEL_USERCOPY + static unsigned long +-__copy_user_intel(void __user *to, const void *from, unsigned long size) ++__generic_copy_to_user_intel(void __user *to, const void *from, unsigned long size) + { + int d0, d1; + __asm__ __volatile__( +@@ -110,36 +112,36 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size) + " .align 2,0x90\n" + "3: movl 0(%4), %%eax\n" + "4: movl 4(%4), %%edx\n" +- "5: movl %%eax, 0(%3)\n" +- "6: movl %%edx, 4(%3)\n" ++ "5: "__copyuser_seg" movl %%eax, 0(%3)\n" ++ "6: "__copyuser_seg" movl %%edx, 4(%3)\n" + "7: movl 8(%4), %%eax\n" + "8: movl 12(%4),%%edx\n" +- "9: movl %%eax, 8(%3)\n" +- "10: movl %%edx, 12(%3)\n" ++ "9: "__copyuser_seg" movl %%eax, 8(%3)\n" ++ "10: "__copyuser_seg" movl %%edx, 12(%3)\n" + "11: movl 16(%4), %%eax\n" + "12: movl 20(%4), %%edx\n" +- "13: movl %%eax, 16(%3)\n" +- "14: movl %%edx, 20(%3)\n" ++ "13: "__copyuser_seg" movl %%eax, 16(%3)\n" ++ "14: "__copyuser_seg" movl %%edx, 20(%3)\n" + "15: movl 24(%4), %%eax\n" + "16: movl 28(%4), %%edx\n" +- "17: movl %%eax, 24(%3)\n" +- "18: movl %%edx, 28(%3)\n" ++ "17: "__copyuser_seg" movl %%eax, 24(%3)\n" ++ "18: "__copyuser_seg" movl %%edx, 28(%3)\n" + "19: movl 32(%4), %%eax\n" + "20: movl 36(%4), %%edx\n" +- "21: movl %%eax, 32(%3)\n" +- "22: movl %%edx, 36(%3)\n" ++ "21: "__copyuser_seg" movl %%eax, 32(%3)\n" ++ "22: "__copyuser_seg" movl %%edx, 36(%3)\n" + "23: movl 40(%4), %%eax\n" + "24: movl 44(%4), %%edx\n" +- "25: movl %%eax, 40(%3)\n" +- "26: movl %%edx, 44(%3)\n" ++ "25: "__copyuser_seg" movl %%eax, 40(%3)\n" ++ "26: "__copyuser_seg" movl %%edx, 44(%3)\n" + "27: movl 48(%4), %%eax\n" + "28: movl 52(%4), %%edx\n" +- "29: movl %%eax, 48(%3)\n" +- "30: movl %%edx, 52(%3)\n" ++ "29: "__copyuser_seg" movl %%eax, 48(%3)\n" ++ "30: "__copyuser_seg" movl %%edx, 52(%3)\n" + "31: movl 56(%4), %%eax\n" + "32: movl 60(%4), %%edx\n" +- "33: movl %%eax, 56(%3)\n" +- "34: movl %%edx, 60(%3)\n" ++ "33: "__copyuser_seg" movl %%eax, 56(%3)\n" ++ "34: "__copyuser_seg" movl %%edx, 60(%3)\n" + " addl $-64, %0\n" + " addl $64, %4\n" + " addl $64, %3\n" +@@ -149,10 +151,116 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size) + " shrl $2, %0\n" + " andl $3, %%eax\n" + " cld\n" ++ __COPYUSER_SET_ES + "99: rep; movsl\n" + "36: movl %%eax, %0\n" + "37: rep; movsb\n" + "100:\n" ++ __COPYUSER_RESTORE_ES ++ ".section .fixup,\"ax\"\n" ++ "101: lea 0(%%eax,%0,4),%0\n" ++ " jmp 100b\n" ++ ".previous\n" ++ _ASM_EXTABLE(1b,100b) ++ _ASM_EXTABLE(2b,100b) ++ _ASM_EXTABLE(3b,100b) ++ _ASM_EXTABLE(4b,100b) ++ _ASM_EXTABLE(5b,100b) ++ _ASM_EXTABLE(6b,100b) ++ _ASM_EXTABLE(7b,100b) ++ _ASM_EXTABLE(8b,100b) ++ _ASM_EXTABLE(9b,100b) ++ _ASM_EXTABLE(10b,100b) ++ _ASM_EXTABLE(11b,100b) ++ _ASM_EXTABLE(12b,100b) ++ _ASM_EXTABLE(13b,100b) ++ _ASM_EXTABLE(14b,100b) ++ _ASM_EXTABLE(15b,100b) ++ _ASM_EXTABLE(16b,100b) ++ _ASM_EXTABLE(17b,100b) ++ _ASM_EXTABLE(18b,100b) ++ _ASM_EXTABLE(19b,100b) ++ _ASM_EXTABLE(20b,100b) ++ _ASM_EXTABLE(21b,100b) ++ _ASM_EXTABLE(22b,100b) ++ _ASM_EXTABLE(23b,100b) ++ _ASM_EXTABLE(24b,100b) ++ _ASM_EXTABLE(25b,100b) ++ _ASM_EXTABLE(26b,100b) ++ _ASM_EXTABLE(27b,100b) ++ _ASM_EXTABLE(28b,100b) ++ _ASM_EXTABLE(29b,100b) ++ _ASM_EXTABLE(30b,100b) ++ _ASM_EXTABLE(31b,100b) ++ _ASM_EXTABLE(32b,100b) ++ _ASM_EXTABLE(33b,100b) ++ _ASM_EXTABLE(34b,100b) ++ _ASM_EXTABLE(35b,100b) ++ _ASM_EXTABLE(36b,100b) ++ _ASM_EXTABLE(37b,100b) ++ _ASM_EXTABLE(99b,101b) ++ : "=&c"(size), "=&D" (d0), "=&S" (d1) ++ : "1"(to), "2"(from), "0"(size) ++ : "eax", "edx", "memory"); ++ return size; ++} ++ ++static unsigned long ++__generic_copy_from_user_intel(void *to, const void __user *from, unsigned long size) ++{ ++ int d0, d1; ++ __asm__ __volatile__( ++ " .align 2,0x90\n" ++ "1: "__copyuser_seg" movl 32(%4), %%eax\n" ++ " cmpl $67, %0\n" ++ " jbe 3f\n" ++ "2: "__copyuser_seg" movl 64(%4), %%eax\n" ++ " .align 2,0x90\n" ++ "3: "__copyuser_seg" movl 0(%4), %%eax\n" ++ "4: "__copyuser_seg" movl 4(%4), %%edx\n" ++ "5: movl %%eax, 0(%3)\n" ++ "6: movl %%edx, 4(%3)\n" ++ "7: "__copyuser_seg" movl 8(%4), %%eax\n" ++ "8: "__copyuser_seg" movl 12(%4),%%edx\n" ++ "9: movl %%eax, 8(%3)\n" ++ "10: movl %%edx, 12(%3)\n" ++ "11: "__copyuser_seg" movl 16(%4), %%eax\n" ++ "12: "__copyuser_seg" movl 20(%4), %%edx\n" ++ "13: movl %%eax, 16(%3)\n" ++ "14: movl %%edx, 20(%3)\n" ++ "15: "__copyuser_seg" movl 24(%4), %%eax\n" ++ "16: "__copyuser_seg" movl 28(%4), %%edx\n" ++ "17: movl %%eax, 24(%3)\n" ++ "18: movl %%edx, 28(%3)\n" ++ "19: "__copyuser_seg" movl 32(%4), %%eax\n" ++ "20: "__copyuser_seg" movl 36(%4), %%edx\n" ++ "21: movl %%eax, 32(%3)\n" ++ "22: movl %%edx, 36(%3)\n" ++ "23: "__copyuser_seg" movl 40(%4), %%eax\n" ++ "24: "__copyuser_seg" movl 44(%4), %%edx\n" ++ "25: movl %%eax, 40(%3)\n" ++ "26: movl %%edx, 44(%3)\n" ++ "27: "__copyuser_seg" movl 48(%4), %%eax\n" ++ "28: "__copyuser_seg" movl 52(%4), %%edx\n" ++ "29: movl %%eax, 48(%3)\n" ++ "30: movl %%edx, 52(%3)\n" ++ "31: "__copyuser_seg" movl 56(%4), %%eax\n" ++ "32: "__copyuser_seg" movl 60(%4), %%edx\n" ++ "33: movl %%eax, 56(%3)\n" ++ "34: movl %%edx, 60(%3)\n" ++ " addl $-64, %0\n" ++ " addl $64, %4\n" ++ " addl $64, %3\n" ++ " cmpl $63, %0\n" ++ " ja 1b\n" ++ "35: movl %0, %%eax\n" ++ " shrl $2, %0\n" ++ " andl $3, %%eax\n" ++ " cld\n" ++ "99: rep; "__copyuser_seg" movsl\n" ++ "36: movl %%eax, %0\n" ++ "37: rep; "__copyuser_seg" movsb\n" ++ "100:\n" + ".section .fixup,\"ax\"\n" + "101: lea 0(%%eax,%0,4),%0\n" + " jmp 100b\n" +@@ -207,41 +315,41 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size) + int d0, d1; + __asm__ __volatile__( + " .align 2,0x90\n" +- "0: movl 32(%4), %%eax\n" ++ "0: "__copyuser_seg" movl 32(%4), %%eax\n" + " cmpl $67, %0\n" + " jbe 2f\n" +- "1: movl 64(%4), %%eax\n" ++ "1: "__copyuser_seg" movl 64(%4), %%eax\n" + " .align 2,0x90\n" +- "2: movl 0(%4), %%eax\n" +- "21: movl 4(%4), %%edx\n" ++ "2: "__copyuser_seg" movl 0(%4), %%eax\n" ++ "21: "__copyuser_seg" movl 4(%4), %%edx\n" + " movl %%eax, 0(%3)\n" + " movl %%edx, 4(%3)\n" +- "3: movl 8(%4), %%eax\n" +- "31: movl 12(%4),%%edx\n" ++ "3: "__copyuser_seg" movl 8(%4), %%eax\n" ++ "31: "__copyuser_seg" movl 12(%4),%%edx\n" + " movl %%eax, 8(%3)\n" + " movl %%edx, 12(%3)\n" +- "4: movl 16(%4), %%eax\n" +- "41: movl 20(%4), %%edx\n" ++ "4: "__copyuser_seg" movl 16(%4), %%eax\n" ++ "41: "__copyuser_seg" movl 20(%4), %%edx\n" + " movl %%eax, 16(%3)\n" + " movl %%edx, 20(%3)\n" +- "10: movl 24(%4), %%eax\n" +- "51: movl 28(%4), %%edx\n" ++ "10: "__copyuser_seg" movl 24(%4), %%eax\n" ++ "51: "__copyuser_seg" movl 28(%4), %%edx\n" + " movl %%eax, 24(%3)\n" + " movl %%edx, 28(%3)\n" +- "11: movl 32(%4), %%eax\n" +- "61: movl 36(%4), %%edx\n" ++ "11: "__copyuser_seg" movl 32(%4), %%eax\n" ++ "61: "__copyuser_seg" movl 36(%4), %%edx\n" + " movl %%eax, 32(%3)\n" + " movl %%edx, 36(%3)\n" +- "12: movl 40(%4), %%eax\n" +- "71: movl 44(%4), %%edx\n" ++ "12: "__copyuser_seg" movl 40(%4), %%eax\n" ++ "71: "__copyuser_seg" movl 44(%4), %%edx\n" + " movl %%eax, 40(%3)\n" + " movl %%edx, 44(%3)\n" +- "13: movl 48(%4), %%eax\n" +- "81: movl 52(%4), %%edx\n" ++ "13: "__copyuser_seg" movl 48(%4), %%eax\n" ++ "81: "__copyuser_seg" movl 52(%4), %%edx\n" + " movl %%eax, 48(%3)\n" + " movl %%edx, 52(%3)\n" +- "14: movl 56(%4), %%eax\n" +- "91: movl 60(%4), %%edx\n" ++ "14: "__copyuser_seg" movl 56(%4), %%eax\n" ++ "91: "__copyuser_seg" movl 60(%4), %%edx\n" + " movl %%eax, 56(%3)\n" + " movl %%edx, 60(%3)\n" + " addl $-64, %0\n" +@@ -253,9 +361,9 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size) + " shrl $2, %0\n" + " andl $3, %%eax\n" + " cld\n" +- "6: rep; movsl\n" ++ "6: rep; "__copyuser_seg" movsl\n" + " movl %%eax,%0\n" +- "7: rep; movsb\n" ++ "7: rep; "__copyuser_seg" movsb\n" + "8:\n" + ".section .fixup,\"ax\"\n" + "9: lea 0(%%eax,%0,4),%0\n" +@@ -305,41 +413,41 @@ static unsigned long __copy_user_zeroing_intel_nocache(void *to, + + __asm__ __volatile__( + " .align 2,0x90\n" +- "0: movl 32(%4), %%eax\n" ++ "0: "__copyuser_seg" movl 32(%4), %%eax\n" + " cmpl $67, %0\n" + " jbe 2f\n" +- "1: movl 64(%4), %%eax\n" ++ "1: "__copyuser_seg" movl 64(%4), %%eax\n" + " .align 2,0x90\n" +- "2: movl 0(%4), %%eax\n" +- "21: movl 4(%4), %%edx\n" ++ "2: "__copyuser_seg" movl 0(%4), %%eax\n" ++ "21: "__copyuser_seg" movl 4(%4), %%edx\n" + " movnti %%eax, 0(%3)\n" + " movnti %%edx, 4(%3)\n" +- "3: movl 8(%4), %%eax\n" +- "31: movl 12(%4),%%edx\n" ++ "3: "__copyuser_seg" movl 8(%4), %%eax\n" ++ "31: "__copyuser_seg" movl 12(%4),%%edx\n" + " movnti %%eax, 8(%3)\n" + " movnti %%edx, 12(%3)\n" +- "4: movl 16(%4), %%eax\n" +- "41: movl 20(%4), %%edx\n" ++ "4: "__copyuser_seg" movl 16(%4), %%eax\n" ++ "41: "__copyuser_seg" movl 20(%4), %%edx\n" + " movnti %%eax, 16(%3)\n" + " movnti %%edx, 20(%3)\n" +- "10: movl 24(%4), %%eax\n" +- "51: movl 28(%4), %%edx\n" ++ "10: "__copyuser_seg" movl 24(%4), %%eax\n" ++ "51: "__copyuser_seg" movl 28(%4), %%edx\n" + " movnti %%eax, 24(%3)\n" + " movnti %%edx, 28(%3)\n" +- "11: movl 32(%4), %%eax\n" +- "61: movl 36(%4), %%edx\n" ++ "11: "__copyuser_seg" movl 32(%4), %%eax\n" ++ "61: "__copyuser_seg" movl 36(%4), %%edx\n" + " movnti %%eax, 32(%3)\n" + " movnti %%edx, 36(%3)\n" +- "12: movl 40(%4), %%eax\n" +- "71: movl 44(%4), %%edx\n" ++ "12: "__copyuser_seg" movl 40(%4), %%eax\n" ++ "71: "__copyuser_seg" movl 44(%4), %%edx\n" + " movnti %%eax, 40(%3)\n" + " movnti %%edx, 44(%3)\n" +- "13: movl 48(%4), %%eax\n" +- "81: movl 52(%4), %%edx\n" ++ "13: "__copyuser_seg" movl 48(%4), %%eax\n" ++ "81: "__copyuser_seg" movl 52(%4), %%edx\n" + " movnti %%eax, 48(%3)\n" + " movnti %%edx, 52(%3)\n" +- "14: movl 56(%4), %%eax\n" +- "91: movl 60(%4), %%edx\n" ++ "14: "__copyuser_seg" movl 56(%4), %%eax\n" ++ "91: "__copyuser_seg" movl 60(%4), %%edx\n" + " movnti %%eax, 56(%3)\n" + " movnti %%edx, 60(%3)\n" + " addl $-64, %0\n" +@@ -352,9 +460,9 @@ static unsigned long __copy_user_zeroing_intel_nocache(void *to, + " shrl $2, %0\n" + " andl $3, %%eax\n" + " cld\n" +- "6: rep; movsl\n" ++ "6: rep; "__copyuser_seg" movsl\n" + " movl %%eax,%0\n" +- "7: rep; movsb\n" ++ "7: rep; "__copyuser_seg" movsb\n" + "8:\n" + ".section .fixup,\"ax\"\n" + "9: lea 0(%%eax,%0,4),%0\n" +@@ -399,41 +507,41 @@ static unsigned long __copy_user_intel_nocache(void *to, + + __asm__ __volatile__( + " .align 2,0x90\n" +- "0: movl 32(%4), %%eax\n" ++ "0: "__copyuser_seg" movl 32(%4), %%eax\n" + " cmpl $67, %0\n" + " jbe 2f\n" +- "1: movl 64(%4), %%eax\n" ++ "1: "__copyuser_seg" movl 64(%4), %%eax\n" + " .align 2,0x90\n" +- "2: movl 0(%4), %%eax\n" +- "21: movl 4(%4), %%edx\n" ++ "2: "__copyuser_seg" movl 0(%4), %%eax\n" ++ "21: "__copyuser_seg" movl 4(%4), %%edx\n" + " movnti %%eax, 0(%3)\n" + " movnti %%edx, 4(%3)\n" +- "3: movl 8(%4), %%eax\n" +- "31: movl 12(%4),%%edx\n" ++ "3: "__copyuser_seg" movl 8(%4), %%eax\n" ++ "31: "__copyuser_seg" movl 12(%4),%%edx\n" + " movnti %%eax, 8(%3)\n" + " movnti %%edx, 12(%3)\n" +- "4: movl 16(%4), %%eax\n" +- "41: movl 20(%4), %%edx\n" ++ "4: "__copyuser_seg" movl 16(%4), %%eax\n" ++ "41: "__copyuser_seg" movl 20(%4), %%edx\n" + " movnti %%eax, 16(%3)\n" + " movnti %%edx, 20(%3)\n" +- "10: movl 24(%4), %%eax\n" +- "51: movl 28(%4), %%edx\n" ++ "10: "__copyuser_seg" movl 24(%4), %%eax\n" ++ "51: "__copyuser_seg" movl 28(%4), %%edx\n" + " movnti %%eax, 24(%3)\n" + " movnti %%edx, 28(%3)\n" +- "11: movl 32(%4), %%eax\n" +- "61: movl 36(%4), %%edx\n" ++ "11: "__copyuser_seg" movl 32(%4), %%eax\n" ++ "61: "__copyuser_seg" movl 36(%4), %%edx\n" + " movnti %%eax, 32(%3)\n" + " movnti %%edx, 36(%3)\n" +- "12: movl 40(%4), %%eax\n" +- "71: movl 44(%4), %%edx\n" ++ "12: "__copyuser_seg" movl 40(%4), %%eax\n" ++ "71: "__copyuser_seg" movl 44(%4), %%edx\n" + " movnti %%eax, 40(%3)\n" + " movnti %%edx, 44(%3)\n" +- "13: movl 48(%4), %%eax\n" +- "81: movl 52(%4), %%edx\n" ++ "13: "__copyuser_seg" movl 48(%4), %%eax\n" ++ "81: "__copyuser_seg" movl 52(%4), %%edx\n" + " movnti %%eax, 48(%3)\n" + " movnti %%edx, 52(%3)\n" +- "14: movl 56(%4), %%eax\n" +- "91: movl 60(%4), %%edx\n" ++ "14: "__copyuser_seg" movl 56(%4), %%eax\n" ++ "91: "__copyuser_seg" movl 60(%4), %%edx\n" + " movnti %%eax, 56(%3)\n" + " movnti %%edx, 60(%3)\n" + " addl $-64, %0\n" +@@ -446,9 +554,9 @@ static unsigned long __copy_user_intel_nocache(void *to, + " shrl $2, %0\n" + " andl $3, %%eax\n" + " cld\n" +- "6: rep; movsl\n" ++ "6: rep; "__copyuser_seg" movsl\n" + " movl %%eax,%0\n" +- "7: rep; movsb\n" ++ "7: rep; "__copyuser_seg" movsb\n" + "8:\n" + ".section .fixup,\"ax\"\n" + "9: lea 0(%%eax,%0,4),%0\n" +@@ -488,32 +596,36 @@ static unsigned long __copy_user_intel_nocache(void *to, + */ + unsigned long __copy_user_zeroing_intel(void *to, const void __user *from, + unsigned long size); +-unsigned long __copy_user_intel(void __user *to, const void *from, ++unsigned long __generic_copy_to_user_intel(void __user *to, const void *from, ++ unsigned long size); ++unsigned long __generic_copy_from_user_intel(void *to, const void __user *from, + unsigned long size); + unsigned long __copy_user_zeroing_intel_nocache(void *to, + const void __user *from, unsigned long size); + #endif /* CONFIG_X86_INTEL_USERCOPY */ + + /* Generic arbitrary sized copy. */ +-#define __copy_user(to, from, size) \ ++#define __copy_user(to, from, size, prefix, set, restore) \ + do { \ + int __d0, __d1, __d2; \ + __asm__ __volatile__( \ ++ set \ + " cmp $7,%0\n" \ + " jbe 1f\n" \ + " movl %1,%0\n" \ + " negl %0\n" \ + " andl $7,%0\n" \ + " subl %0,%3\n" \ +- "4: rep; movsb\n" \ ++ "4: rep; "prefix"movsb\n" \ + " movl %3,%0\n" \ + " shrl $2,%0\n" \ + " andl $3,%3\n" \ + " .align 2,0x90\n" \ +- "0: rep; movsl\n" \ ++ "0: rep; "prefix"movsl\n" \ + " movl %3,%0\n" \ +- "1: rep; movsb\n" \ ++ "1: rep; "prefix"movsb\n" \ + "2:\n" \ ++ restore \ + ".section .fixup,\"ax\"\n" \ + "5: addl %3,%0\n" \ + " jmp 2b\n" \ +@@ -538,14 +650,14 @@ do { \ + " negl %0\n" \ + " andl $7,%0\n" \ + " subl %0,%3\n" \ +- "4: rep; movsb\n" \ ++ "4: rep; "__copyuser_seg"movsb\n" \ + " movl %3,%0\n" \ + " shrl $2,%0\n" \ + " andl $3,%3\n" \ + " .align 2,0x90\n" \ +- "0: rep; movsl\n" \ ++ "0: rep; "__copyuser_seg"movsl\n" \ + " movl %3,%0\n" \ +- "1: rep; movsb\n" \ ++ "1: rep; "__copyuser_seg"movsb\n" \ + "2:\n" \ + ".section .fixup,\"ax\"\n" \ + "5: addl %3,%0\n" \ +@@ -572,9 +684,9 @@ unsigned long __copy_to_user_ll(void __user *to, const void *from, + { + stac(); + if (movsl_is_ok(to, from, n)) +- __copy_user(to, from, n); ++ __copy_user(to, from, n, "", __COPYUSER_SET_ES, __COPYUSER_RESTORE_ES); + else +- n = __copy_user_intel(to, from, n); ++ n = __generic_copy_to_user_intel(to, from, n); + clac(); + return n; + } +@@ -598,10 +710,9 @@ unsigned long __copy_from_user_ll_nozero(void *to, const void __user *from, + { + stac(); + if (movsl_is_ok(to, from, n)) +- __copy_user(to, from, n); ++ __copy_user(to, from, n, __copyuser_seg, "", ""); + else +- n = __copy_user_intel((void __user *)to, +- (const void *)from, n); ++ n = __generic_copy_from_user_intel(to, from, n); + clac(); + return n; + } +@@ -632,58 +743,38 @@ unsigned long __copy_from_user_ll_nocache_nozero(void *to, const void __user *fr + if (n > 64 && cpu_has_xmm2) + n = __copy_user_intel_nocache(to, from, n); + else +- __copy_user(to, from, n); ++ __copy_user(to, from, n, __copyuser_seg, "", ""); + #else +- __copy_user(to, from, n); ++ __copy_user(to, from, n, __copyuser_seg, "", ""); + #endif + clac(); + return n; + } + EXPORT_SYMBOL(__copy_from_user_ll_nocache_nozero); + +-/** +- * copy_to_user: - Copy a block of data into user space. +- * @to: Destination address, in user space. +- * @from: Source address, in kernel space. +- * @n: Number of bytes to copy. +- * +- * Context: User context only. This function may sleep. +- * +- * Copy data from kernel space to user space. +- * +- * Returns number of bytes that could not be copied. +- * On success, this will be zero. +- */ +-unsigned long _copy_to_user(void __user *to, const void *from, unsigned n) ++#ifdef CONFIG_PAX_MEMORY_UDEREF ++void __set_fs(mm_segment_t x) + { +- if (access_ok(VERIFY_WRITE, to, n)) +- n = __copy_to_user(to, from, n); +- return n; ++ switch (x.seg) { ++ case 0: ++ loadsegment(gs, 0); ++ break; ++ case TASK_SIZE_MAX: ++ loadsegment(gs, __USER_DS); ++ break; ++ case -1UL: ++ loadsegment(gs, __KERNEL_DS); ++ break; ++ default: ++ BUG(); ++ } + } +-EXPORT_SYMBOL(_copy_to_user); ++EXPORT_SYMBOL(__set_fs); + +-/** +- * copy_from_user: - Copy a block of data from user space. +- * @to: Destination address, in kernel space. +- * @from: Source address, in user space. +- * @n: Number of bytes to copy. +- * +- * Context: User context only. This function may sleep. +- * +- * Copy data from user space to kernel space. +- * +- * Returns number of bytes that could not be copied. +- * On success, this will be zero. +- * +- * If some data could not be copied, this function will pad the copied +- * data to the requested size using zero bytes. +- */ +-unsigned long _copy_from_user(void *to, const void __user *from, unsigned n) ++void set_fs(mm_segment_t x) + { +- if (access_ok(VERIFY_READ, from, n)) +- n = __copy_from_user(to, from, n); +- else +- memset(to, 0, n); +- return n; ++ current_thread_info()->addr_limit = x; ++ __set_fs(x); + } +-EXPORT_SYMBOL(_copy_from_user); ++EXPORT_SYMBOL(set_fs); ++#endif +diff --git a/arch/x86/lib/usercopy_64.c b/arch/x86/lib/usercopy_64.c +index c905e89..01ab928 100644 +--- a/arch/x86/lib/usercopy_64.c ++++ b/arch/x86/lib/usercopy_64.c +@@ -18,6 +18,7 @@ unsigned long __clear_user(void __user *addr, unsigned long size) + might_fault(); + /* no memory constraint because it doesn't change any memory gcc knows + about */ ++ pax_open_userland(); + stac(); + asm volatile( + " testq %[size8],%[size8]\n" +@@ -39,9 +40,10 @@ unsigned long __clear_user(void __user *addr, unsigned long size) + _ASM_EXTABLE(0b,3b) + _ASM_EXTABLE(1b,2b) + : [size8] "=&c"(size), [dst] "=&D" (__d0) +- : [size1] "r"(size & 7), "[size8]" (size / 8), "[dst]"(addr), ++ : [size1] "r"(size & 7), "[size8]" (size / 8), "[dst]"(____m(addr)), + [zero] "r" (0UL), [eight] "r" (8UL)); + clac(); ++ pax_close_userland(); + return size; + } + EXPORT_SYMBOL(__clear_user); +@@ -54,12 +56,11 @@ unsigned long clear_user(void __user *to, unsigned long n) + } + EXPORT_SYMBOL(clear_user); + +-unsigned long copy_in_user(void __user *to, const void __user *from, unsigned len) ++unsigned long copy_in_user(void __user *to, const void __user *from, unsigned long len) + { +- if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) { +- return copy_user_generic((__force void *)to, (__force void *)from, len); +- } +- return len; ++ if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) ++ return copy_user_generic((void __force_kernel *)____m(to), (void __force_kernel *)____m(from), len); ++ return len; + } + EXPORT_SYMBOL(copy_in_user); + +@@ -69,11 +70,13 @@ EXPORT_SYMBOL(copy_in_user); + * it is not necessary to optimize tail handling. + */ + __visible unsigned long +-copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest) ++copy_user_handle_tail(char __user *to, char __user *from, unsigned long len, unsigned zerorest) + { + char c; + unsigned zero_len; + ++ clac(); ++ pax_close_userland(); + for (; len; --len, to++) { + if (__get_user_nocheck(c, from++, sizeof(char))) + break; +@@ -84,6 +87,5 @@ copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest) + for (c = 0, zero_len = len; zerorest && zero_len; --zero_len) + if (__put_user_nocheck(c, to++, sizeof(char))) + break; +- clac(); + return len; + } +diff --git a/arch/x86/mm/Makefile b/arch/x86/mm/Makefile +index 6a19ad9..1c48f9a 100644 +--- a/arch/x86/mm/Makefile ++++ b/arch/x86/mm/Makefile +@@ -30,3 +30,7 @@ obj-$(CONFIG_ACPI_NUMA) += srat.o + obj-$(CONFIG_NUMA_EMU) += numa_emulation.o + + obj-$(CONFIG_MEMTEST) += memtest.o ++ ++quote:=" ++obj-$(CONFIG_X86_64) += uderef_64.o ++CFLAGS_uderef_64.o := $(subst $(quote),,$(CONFIG_ARCH_HWEIGHT_CFLAGS)) +diff --git a/arch/x86/mm/extable.c b/arch/x86/mm/extable.c +index 903ec1e..c4166b2 100644 +--- a/arch/x86/mm/extable.c ++++ b/arch/x86/mm/extable.c +@@ -6,12 +6,24 @@ + static inline unsigned long + ex_insn_addr(const struct exception_table_entry *x) + { +- return (unsigned long)&x->insn + x->insn; ++ unsigned long reloc = 0; ++ ++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC) ++ reloc = ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR; ++#endif ++ ++ return (unsigned long)&x->insn + x->insn + reloc; + } + static inline unsigned long + ex_fixup_addr(const struct exception_table_entry *x) + { +- return (unsigned long)&x->fixup + x->fixup; ++ unsigned long reloc = 0; ++ ++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC) ++ reloc = ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR; ++#endif ++ ++ return (unsigned long)&x->fixup + x->fixup + reloc; + } + + int fixup_exception(struct pt_regs *regs) +@@ -20,7 +32,7 @@ int fixup_exception(struct pt_regs *regs) + unsigned long new_ip; + + #ifdef CONFIG_PNPBIOS +- if (unlikely(SEGMENT_IS_PNP_CODE(regs->cs))) { ++ if (unlikely(!v8086_mode(regs) && SEGMENT_IS_PNP_CODE(regs->cs))) { + extern u32 pnp_bios_fault_eip, pnp_bios_fault_esp; + extern u32 pnp_bios_is_utter_crap; + pnp_bios_is_utter_crap = 1; +@@ -145,6 +157,13 @@ void sort_extable(struct exception_table_entry *start, + i += 4; + p->fixup -= i; + i += 4; ++ ++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC) ++ BUILD_BUG_ON(!IS_ENABLED(CONFIG_BUILDTIME_EXTABLE_SORT)); ++ p->insn -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR; ++ p->fixup -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR; ++#endif ++ + } + } + +diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c +index a10c8c7..35a5abb 100644 +--- a/arch/x86/mm/fault.c ++++ b/arch/x86/mm/fault.c +@@ -14,11 +14,18 @@ + #include <linux/hugetlb.h> /* hstate_index_to_shift */ + #include <linux/prefetch.h> /* prefetchw */ + #include <linux/context_tracking.h> /* exception_enter(), ... */ ++#include <linux/unistd.h> ++#include <linux/compiler.h> + + #include <asm/traps.h> /* dotraplinkage, ... */ + #include <asm/pgalloc.h> /* pgd_*(), ... */ + #include <asm/kmemcheck.h> /* kmemcheck_*(), ... */ + #include <asm/fixmap.h> /* VSYSCALL_START */ ++#include <asm/tlbflush.h> ++ ++#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF) ++#include <asm/stacktrace.h> ++#endif + + #define CREATE_TRACE_POINTS + #include <asm/trace/exceptions.h> +@@ -59,7 +66,7 @@ static inline int __kprobes kprobes_fault(struct pt_regs *regs) + int ret = 0; + + /* kprobe_running() needs smp_processor_id() */ +- if (kprobes_built_in() && !user_mode_vm(regs)) { ++ if (kprobes_built_in() && !user_mode(regs)) { + preempt_disable(); + if (kprobe_running() && kprobe_fault_handler(regs, 14)) + ret = 1; +@@ -120,7 +127,10 @@ check_prefetch_opcode(struct pt_regs *regs, unsigned char *instr, + return !instr_lo || (instr_lo>>1) == 1; + case 0x00: + /* Prefetch instruction is 0x0F0D or 0x0F18 */ +- if (probe_kernel_address(instr, opcode)) ++ if (user_mode(regs)) { ++ if (__copy_from_user_inatomic(&opcode, (unsigned char __force_user *)(instr), 1)) ++ return 0; ++ } else if (probe_kernel_address(instr, opcode)) + return 0; + + *prefetch = (instr_lo == 0xF) && +@@ -154,7 +164,10 @@ is_prefetch(struct pt_regs *regs, unsigned long error_code, unsigned long addr) + while (instr < max_instr) { + unsigned char opcode; + +- if (probe_kernel_address(instr, opcode)) ++ if (user_mode(regs)) { ++ if (__copy_from_user_inatomic(&opcode, (unsigned char __force_user *)(instr), 1)) ++ break; ++ } else if (probe_kernel_address(instr, opcode)) + break; + + instr++; +@@ -185,6 +198,34 @@ force_sig_info_fault(int si_signo, int si_code, unsigned long address, + force_sig_info(si_signo, &info, tsk); + } + ++#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC) ++static bool pax_is_fetch_fault(struct pt_regs *regs, unsigned long error_code, unsigned long address); ++#endif ++ ++#ifdef CONFIG_PAX_EMUTRAMP ++static int pax_handle_fetch_fault(struct pt_regs *regs); ++#endif ++ ++#ifdef CONFIG_PAX_PAGEEXEC ++static inline pmd_t * pax_get_pmd(struct mm_struct *mm, unsigned long address) ++{ ++ pgd_t *pgd; ++ pud_t *pud; ++ pmd_t *pmd; ++ ++ pgd = pgd_offset(mm, address); ++ if (!pgd_present(*pgd)) ++ return NULL; ++ pud = pud_offset(pgd, address); ++ if (!pud_present(*pud)) ++ return NULL; ++ pmd = pmd_offset(pud, address); ++ if (!pmd_present(*pmd)) ++ return NULL; ++ return pmd; ++} ++#endif ++ + DEFINE_SPINLOCK(pgd_lock); + LIST_HEAD(pgd_list); + +@@ -235,10 +276,27 @@ void vmalloc_sync_all(void) + for (address = VMALLOC_START & PMD_MASK; + address >= TASK_SIZE && address < FIXADDR_TOP; + address += PMD_SIZE) { ++ ++#ifdef CONFIG_PAX_PER_CPU_PGD ++ unsigned long cpu; ++#else + struct page *page; ++#endif + + spin_lock(&pgd_lock); ++ ++#ifdef CONFIG_PAX_PER_CPU_PGD ++ for (cpu = 0; cpu < nr_cpu_ids; ++cpu) { ++ pgd_t *pgd = get_cpu_pgd(cpu, user); ++ pmd_t *ret; ++ ++ ret = vmalloc_sync_one(pgd, address); ++ if (!ret) ++ break; ++ pgd = get_cpu_pgd(cpu, kernel); ++#else + list_for_each_entry(page, &pgd_list, lru) { ++ pgd_t *pgd; + spinlock_t *pgt_lock; + pmd_t *ret; + +@@ -246,8 +304,14 @@ void vmalloc_sync_all(void) + pgt_lock = &pgd_page_get_mm(page)->page_table_lock; + + spin_lock(pgt_lock); +- ret = vmalloc_sync_one(page_address(page), address); ++ pgd = page_address(page); ++#endif ++ ++ ret = vmalloc_sync_one(pgd, address); ++ ++#ifndef CONFIG_PAX_PER_CPU_PGD + spin_unlock(pgt_lock); ++#endif + + if (!ret) + break; +@@ -281,6 +345,12 @@ static noinline __kprobes int vmalloc_fault(unsigned long address) + * an interrupt in the middle of a task switch.. + */ + pgd_paddr = read_cr3(); ++ ++#ifdef CONFIG_PAX_PER_CPU_PGD ++ BUG_ON(__pa(get_cpu_pgd(smp_processor_id(), kernel)) != (pgd_paddr & __PHYSICAL_MASK)); ++ vmalloc_sync_one(__va(pgd_paddr + PAGE_SIZE), address); ++#endif ++ + pmd_k = vmalloc_sync_one(__va(pgd_paddr), address); + if (!pmd_k) + return -1; +@@ -376,11 +446,25 @@ static noinline __kprobes int vmalloc_fault(unsigned long address) + * happen within a race in page table update. In the later + * case just flush: + */ +- pgd = pgd_offset(current->active_mm, address); ++ + pgd_ref = pgd_offset_k(address); + if (pgd_none(*pgd_ref)) + return -1; + ++#ifdef CONFIG_PAX_PER_CPU_PGD ++ BUG_ON(__pa(get_cpu_pgd(smp_processor_id(), kernel)) != (read_cr3() & __PHYSICAL_MASK)); ++ pgd = pgd_offset_cpu(smp_processor_id(), user, address); ++ if (pgd_none(*pgd)) { ++ set_pgd(pgd, *pgd_ref); ++ arch_flush_lazy_mmu_mode(); ++ } else { ++ BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_ref)); ++ } ++ pgd = pgd_offset_cpu(smp_processor_id(), kernel, address); ++#else ++ pgd = pgd_offset(current->active_mm, address); ++#endif ++ + if (pgd_none(*pgd)) { + set_pgd(pgd, *pgd_ref); + arch_flush_lazy_mmu_mode(); +@@ -546,7 +630,7 @@ static int is_errata93(struct pt_regs *regs, unsigned long address) + static int is_errata100(struct pt_regs *regs, unsigned long address) + { + #ifdef CONFIG_X86_64 +- if ((regs->cs == __USER32_CS || (regs->cs & (1<<2))) && (address >> 32)) ++ if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)) && (address >> 32)) + return 1; + #endif + return 0; +@@ -573,7 +657,7 @@ static int is_f00f_bug(struct pt_regs *regs, unsigned long address) + } + + static const char nx_warning[] = KERN_CRIT +-"kernel tried to execute NX-protected page - exploit attempt? (uid: %d)\n"; ++"kernel tried to execute NX-protected page - exploit attempt? (uid: %d, task: %s, pid: %d)\n"; + + static void + show_fault_oops(struct pt_regs *regs, unsigned long error_code, +@@ -582,15 +666,27 @@ show_fault_oops(struct pt_regs *regs, unsigned long error_code, + if (!oops_may_print()) + return; + +- if (error_code & PF_INSTR) { ++ if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR)) { + unsigned int level; + + pte_t *pte = lookup_address(address, &level); + + if (pte && pte_present(*pte) && !pte_exec(*pte)) +- printk(nx_warning, from_kuid(&init_user_ns, current_uid())); ++ printk(nx_warning, from_kuid_munged(&init_user_ns, current_uid()), current->comm, task_pid_nr(current)); + } + ++#ifdef CONFIG_PAX_KERNEXEC ++ if (init_mm.start_code <= address && address < init_mm.end_code) { ++ if (current->signal->curr_ip) ++ printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n", ++ ¤t->signal->curr_ip, current->comm, task_pid_nr(current), ++ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid())); ++ else ++ printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n", current->comm, task_pid_nr(current), ++ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid())); ++ } ++#endif ++ + printk(KERN_ALERT "BUG: unable to handle kernel "); + if (address < PAGE_SIZE) + printk(KERN_CONT "NULL pointer dereference"); +@@ -771,6 +867,22 @@ __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code, + return; + } + #endif ++ ++#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC) ++ if (pax_is_fetch_fault(regs, error_code, address)) { ++ ++#ifdef CONFIG_PAX_EMUTRAMP ++ switch (pax_handle_fetch_fault(regs)) { ++ case 2: ++ return; ++ } ++#endif ++ ++ pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp); ++ do_group_exit(SIGKILL); ++ } ++#endif ++ + /* Kernel addresses are always protection faults: */ + if (address >= TASK_SIZE) + error_code |= PF_PROT; +@@ -856,7 +968,7 @@ do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address, + if (fault & (VM_FAULT_HWPOISON|VM_FAULT_HWPOISON_LARGE)) { + printk(KERN_ERR + "MCE: Killing %s:%d due to hardware memory corruption fault at %lx\n", +- tsk->comm, tsk->pid, address); ++ tsk->comm, task_pid_nr(tsk), address); + code = BUS_MCEERR_AR; + } + #endif +@@ -910,6 +1022,99 @@ static int spurious_fault_check(unsigned long error_code, pte_t *pte) + return 1; + } + ++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC) ++static int pax_handle_pageexec_fault(struct pt_regs *regs, struct mm_struct *mm, unsigned long address, unsigned long error_code) ++{ ++ pte_t *pte; ++ pmd_t *pmd; ++ spinlock_t *ptl; ++ unsigned char pte_mask; ++ ++ if ((__supported_pte_mask & _PAGE_NX) || (error_code & (PF_PROT|PF_USER)) != (PF_PROT|PF_USER) || v8086_mode(regs) || ++ !(mm->pax_flags & MF_PAX_PAGEEXEC)) ++ return 0; ++ ++ /* PaX: it's our fault, let's handle it if we can */ ++ ++ /* PaX: take a look at read faults before acquiring any locks */ ++ if (unlikely(!(error_code & PF_WRITE) && (regs->ip == address))) { ++ /* instruction fetch attempt from a protected page in user mode */ ++ up_read(&mm->mmap_sem); ++ ++#ifdef CONFIG_PAX_EMUTRAMP ++ switch (pax_handle_fetch_fault(regs)) { ++ case 2: ++ return 1; ++ } ++#endif ++ ++ pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp); ++ do_group_exit(SIGKILL); ++ } ++ ++ pmd = pax_get_pmd(mm, address); ++ if (unlikely(!pmd)) ++ return 0; ++ ++ pte = pte_offset_map_lock(mm, pmd, address, &ptl); ++ if (unlikely(!(pte_val(*pte) & _PAGE_PRESENT) || pte_user(*pte))) { ++ pte_unmap_unlock(pte, ptl); ++ return 0; ++ } ++ ++ if (unlikely((error_code & PF_WRITE) && !pte_write(*pte))) { ++ /* write attempt to a protected page in user mode */ ++ pte_unmap_unlock(pte, ptl); ++ return 0; ++ } ++ ++#ifdef CONFIG_SMP ++ if (likely(address > get_limit(regs->cs) && cpu_isset(smp_processor_id(), mm->context.cpu_user_cs_mask))) ++#else ++ if (likely(address > get_limit(regs->cs))) ++#endif ++ { ++ set_pte(pte, pte_mkread(*pte)); ++ __flush_tlb_one(address); ++ pte_unmap_unlock(pte, ptl); ++ up_read(&mm->mmap_sem); ++ return 1; ++ } ++ ++ pte_mask = _PAGE_ACCESSED | _PAGE_USER | ((error_code & PF_WRITE) << (_PAGE_BIT_DIRTY-1)); ++ ++ /* ++ * PaX: fill DTLB with user rights and retry ++ */ ++ __asm__ __volatile__ ( ++ "orb %2,(%1)\n" ++#if defined(CONFIG_M586) || defined(CONFIG_M586TSC) ++/* ++ * PaX: let this uncommented 'invlpg' remind us on the behaviour of Intel's ++ * (and AMD's) TLBs. namely, they do not cache PTEs that would raise *any* ++ * page fault when examined during a TLB load attempt. this is true not only ++ * for PTEs holding a non-present entry but also present entries that will ++ * raise a page fault (such as those set up by PaX, or the copy-on-write ++ * mechanism). in effect it means that we do *not* need to flush the TLBs ++ * for our target pages since their PTEs are simply not in the TLBs at all. ++ ++ * the best thing in omitting it is that we gain around 15-20% speed in the ++ * fast path of the page fault handler and can get rid of tracing since we ++ * can no longer flush unintended entries. ++ */ ++ "invlpg (%0)\n" ++#endif ++ __copyuser_seg"testb $0,(%0)\n" ++ "xorb %3,(%1)\n" ++ : ++ : "r" (address), "r" (pte), "q" (pte_mask), "i" (_PAGE_USER) ++ : "memory", "cc"); ++ pte_unmap_unlock(pte, ptl); ++ up_read(&mm->mmap_sem); ++ return 1; ++} ++#endif ++ + /* + * Handle a spurious fault caused by a stale TLB entry. + * +@@ -976,6 +1181,9 @@ int show_unhandled_signals = 1; + static inline int + access_error(unsigned long error_code, struct vm_area_struct *vma) + { ++ if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR) && !(vma->vm_flags & VM_EXEC)) ++ return 1; ++ + if (error_code & PF_WRITE) { + /* write, present and write, not present: */ + if (unlikely(!(vma->vm_flags & VM_WRITE))) +@@ -1010,7 +1218,7 @@ static inline bool smap_violation(int error_code, struct pt_regs *regs) + if (error_code & PF_USER) + return false; + +- if (!user_mode_vm(regs) && (regs->flags & X86_EFLAGS_AC)) ++ if (!user_mode(regs) && (regs->flags & X86_EFLAGS_AC)) + return false; + + return true; +@@ -1038,6 +1246,22 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code, + tsk = current; + mm = tsk->mm; + ++#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF) ++ if (!user_mode(regs) && address < 2 * pax_user_shadow_base) { ++ if (!search_exception_tables(regs->ip)) { ++ printk(KERN_ERR "PAX: please report this to pageexec@freemail.hu\n"); ++ bad_area_nosemaphore(regs, error_code, address); ++ return; ++ } ++ if (address < pax_user_shadow_base) { ++ printk(KERN_ERR "PAX: please report this to pageexec@freemail.hu\n"); ++ printk(KERN_ERR "PAX: faulting IP: %pS\n", (void *)regs->ip); ++ show_trace_log_lvl(NULL, NULL, (void *)regs->sp, regs->bp, KERN_ERR); ++ } else ++ address -= pax_user_shadow_base; ++ } ++#endif ++ + /* + * Detect and handle instructions that would cause a page fault for + * both a tracked kernel page and a userspace page. +@@ -1115,7 +1339,7 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code, + * User-mode registers count as a user access even for any + * potential system fault or CPU buglet: + */ +- if (user_mode_vm(regs)) { ++ if (user_mode(regs)) { + local_irq_enable(); + error_code |= PF_USER; + flags |= FAULT_FLAG_USER; +@@ -1162,6 +1386,11 @@ retry: + might_sleep(); + } + ++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC) ++ if (pax_handle_pageexec_fault(regs, mm, address, error_code)) ++ return; ++#endif ++ + vma = find_vma(mm, address); + if (unlikely(!vma)) { + bad_area(regs, error_code, address); +@@ -1173,18 +1402,24 @@ retry: + bad_area(regs, error_code, address); + return; + } +- if (error_code & PF_USER) { +- /* +- * Accessing the stack below %sp is always a bug. +- * The large cushion allows instructions like enter +- * and pusha to work. ("enter $65535, $31" pushes +- * 32 pointers and then decrements %sp by 65535.) +- */ +- if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < regs->sp)) { +- bad_area(regs, error_code, address); +- return; +- } ++ /* ++ * Accessing the stack below %sp is always a bug. ++ * The large cushion allows instructions like enter ++ * and pusha to work. ("enter $65535, $31" pushes ++ * 32 pointers and then decrements %sp by 65535.) ++ */ ++ if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < task_pt_regs(tsk)->sp)) { ++ bad_area(regs, error_code, address); ++ return; + } ++ ++#ifdef CONFIG_PAX_SEGMEXEC ++ if (unlikely((mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end - SEGMEXEC_TASK_SIZE - 1 < address - SEGMEXEC_TASK_SIZE - 1)) { ++ bad_area(regs, error_code, address); ++ return; ++ } ++#endif ++ + if (unlikely(expand_stack(vma, address))) { + bad_area(regs, error_code, address); + return; +@@ -1296,3 +1531,292 @@ trace_do_page_fault(struct pt_regs *regs, unsigned long error_code) + exception_exit(prev_state); + } + #endif /* CONFIG_TRACING */ ++ ++#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC) ++static bool pax_is_fetch_fault(struct pt_regs *regs, unsigned long error_code, unsigned long address) ++{ ++ struct mm_struct *mm = current->mm; ++ unsigned long ip = regs->ip; ++ ++ if (v8086_mode(regs)) ++ ip = ((regs->cs & 0xffff) << 4) + (ip & 0xffff); ++ ++#ifdef CONFIG_PAX_PAGEEXEC ++ if (mm->pax_flags & MF_PAX_PAGEEXEC) { ++ if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR)) ++ return true; ++ if (!(error_code & (PF_PROT | PF_WRITE)) && ip == address) ++ return true; ++ return false; ++ } ++#endif ++ ++#ifdef CONFIG_PAX_SEGMEXEC ++ if (mm->pax_flags & MF_PAX_SEGMEXEC) { ++ if (!(error_code & (PF_PROT | PF_WRITE)) && (ip + SEGMEXEC_TASK_SIZE == address)) ++ return true; ++ return false; ++ } ++#endif ++ ++ return false; ++} ++#endif ++ ++#ifdef CONFIG_PAX_EMUTRAMP ++static int pax_handle_fetch_fault_32(struct pt_regs *regs) ++{ ++ int err; ++ ++ do { /* PaX: libffi trampoline emulation */ ++ unsigned char mov, jmp; ++ unsigned int addr1, addr2; ++ ++#ifdef CONFIG_X86_64 ++ if ((regs->ip + 9) >> 32) ++ break; ++#endif ++ ++ err = get_user(mov, (unsigned char __user *)regs->ip); ++ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1)); ++ err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5)); ++ err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6)); ++ ++ if (err) ++ break; ++ ++ if (mov == 0xB8 && jmp == 0xE9) { ++ regs->ax = addr1; ++ regs->ip = (unsigned int)(regs->ip + addr2 + 10); ++ return 2; ++ } ++ } while (0); ++ ++ do { /* PaX: gcc trampoline emulation #1 */ ++ unsigned char mov1, mov2; ++ unsigned short jmp; ++ unsigned int addr1, addr2; ++ ++#ifdef CONFIG_X86_64 ++ if ((regs->ip + 11) >> 32) ++ break; ++#endif ++ ++ err = get_user(mov1, (unsigned char __user *)regs->ip); ++ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1)); ++ err |= get_user(mov2, (unsigned char __user *)(regs->ip + 5)); ++ err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6)); ++ err |= get_user(jmp, (unsigned short __user *)(regs->ip + 10)); ++ ++ if (err) ++ break; ++ ++ if (mov1 == 0xB9 && mov2 == 0xB8 && jmp == 0xE0FF) { ++ regs->cx = addr1; ++ regs->ax = addr2; ++ regs->ip = addr2; ++ return 2; ++ } ++ } while (0); ++ ++ do { /* PaX: gcc trampoline emulation #2 */ ++ unsigned char mov, jmp; ++ unsigned int addr1, addr2; ++ ++#ifdef CONFIG_X86_64 ++ if ((regs->ip + 9) >> 32) ++ break; ++#endif ++ ++ err = get_user(mov, (unsigned char __user *)regs->ip); ++ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1)); ++ err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5)); ++ err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6)); ++ ++ if (err) ++ break; ++ ++ if (mov == 0xB9 && jmp == 0xE9) { ++ regs->cx = addr1; ++ regs->ip = (unsigned int)(regs->ip + addr2 + 10); ++ return 2; ++ } ++ } while (0); ++ ++ return 1; /* PaX in action */ ++} ++ ++#ifdef CONFIG_X86_64 ++static int pax_handle_fetch_fault_64(struct pt_regs *regs) ++{ ++ int err; ++ ++ do { /* PaX: libffi trampoline emulation */ ++ unsigned short mov1, mov2, jmp1; ++ unsigned char stcclc, jmp2; ++ unsigned long addr1, addr2; ++ ++ err = get_user(mov1, (unsigned short __user *)regs->ip); ++ err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2)); ++ err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10)); ++ err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12)); ++ err |= get_user(stcclc, (unsigned char __user *)(regs->ip + 20)); ++ err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 21)); ++ err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 23)); ++ ++ if (err) ++ break; ++ ++ if (mov1 == 0xBB49 && mov2 == 0xBA49 && (stcclc == 0xF8 || stcclc == 0xF9) && jmp1 == 0xFF49 && jmp2 == 0xE3) { ++ regs->r11 = addr1; ++ regs->r10 = addr2; ++ if (stcclc == 0xF8) ++ regs->flags &= ~X86_EFLAGS_CF; ++ else ++ regs->flags |= X86_EFLAGS_CF; ++ regs->ip = addr1; ++ return 2; ++ } ++ } while (0); ++ ++ do { /* PaX: gcc trampoline emulation #1 */ ++ unsigned short mov1, mov2, jmp1; ++ unsigned char jmp2; ++ unsigned int addr1; ++ unsigned long addr2; ++ ++ err = get_user(mov1, (unsigned short __user *)regs->ip); ++ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 2)); ++ err |= get_user(mov2, (unsigned short __user *)(regs->ip + 6)); ++ err |= get_user(addr2, (unsigned long __user *)(regs->ip + 8)); ++ err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 16)); ++ err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 18)); ++ ++ if (err) ++ break; ++ ++ if (mov1 == 0xBB41 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) { ++ regs->r11 = addr1; ++ regs->r10 = addr2; ++ regs->ip = addr1; ++ return 2; ++ } ++ } while (0); ++ ++ do { /* PaX: gcc trampoline emulation #2 */ ++ unsigned short mov1, mov2, jmp1; ++ unsigned char jmp2; ++ unsigned long addr1, addr2; ++ ++ err = get_user(mov1, (unsigned short __user *)regs->ip); ++ err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2)); ++ err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10)); ++ err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12)); ++ err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 20)); ++ err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 22)); ++ ++ if (err) ++ break; ++ ++ if (mov1 == 0xBB49 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) { ++ regs->r11 = addr1; ++ regs->r10 = addr2; ++ regs->ip = addr1; ++ return 2; ++ } ++ } while (0); ++ ++ return 1; /* PaX in action */ ++} ++#endif ++ ++/* ++ * PaX: decide what to do with offenders (regs->ip = fault address) ++ * ++ * returns 1 when task should be killed ++ * 2 when gcc trampoline was detected ++ */ ++static int pax_handle_fetch_fault(struct pt_regs *regs) ++{ ++ if (v8086_mode(regs)) ++ return 1; ++ ++ if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP)) ++ return 1; ++ ++#ifdef CONFIG_X86_32 ++ return pax_handle_fetch_fault_32(regs); ++#else ++ if (regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)) ++ return pax_handle_fetch_fault_32(regs); ++ else ++ return pax_handle_fetch_fault_64(regs); ++#endif ++} ++#endif ++ ++#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC) ++void pax_report_insns(struct pt_regs *regs, void *pc, void *sp) ++{ ++ long i; ++ ++ printk(KERN_ERR "PAX: bytes at PC: "); ++ for (i = 0; i < 20; i++) { ++ unsigned char c; ++ if (get_user(c, (unsigned char __force_user *)pc+i)) ++ printk(KERN_CONT "?? "); ++ else ++ printk(KERN_CONT "%02x ", c); ++ } ++ printk("\n"); ++ ++ printk(KERN_ERR "PAX: bytes at SP-%lu: ", (unsigned long)sizeof(long)); ++ for (i = -1; i < 80 / (long)sizeof(long); i++) { ++ unsigned long c; ++ if (get_user(c, (unsigned long __force_user *)sp+i)) { ++#ifdef CONFIG_X86_32 ++ printk(KERN_CONT "???????? "); ++#else ++ if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))) ++ printk(KERN_CONT "???????? ???????? "); ++ else ++ printk(KERN_CONT "???????????????? "); ++#endif ++ } else { ++#ifdef CONFIG_X86_64 ++ if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))) { ++ printk(KERN_CONT "%08x ", (unsigned int)c); ++ printk(KERN_CONT "%08x ", (unsigned int)(c >> 32)); ++ } else ++#endif ++ printk(KERN_CONT "%0*lx ", 2 * (int)sizeof(long), c); ++ } ++ } ++ printk("\n"); ++} ++#endif ++ ++/** ++ * probe_kernel_write(): safely attempt to write to a location ++ * @dst: address to write to ++ * @src: pointer to the data that shall be written ++ * @size: size of the data chunk ++ * ++ * Safely write to address @dst from the buffer at @src. If a kernel fault ++ * happens, handle that and return -EFAULT. ++ */ ++long notrace probe_kernel_write(void *dst, const void *src, size_t size) ++{ ++ long ret; ++ mm_segment_t old_fs = get_fs(); ++ ++ set_fs(KERNEL_DS); ++ pagefault_disable(); ++ pax_open_kernel(); ++ ret = __copy_to_user_inatomic((void __force_user *)dst, src, size); ++ pax_close_kernel(); ++ pagefault_enable(); ++ set_fs(old_fs); ++ ++ return ret ? -EFAULT : 0; ++} +diff --git a/arch/x86/mm/gup.c b/arch/x86/mm/gup.c +index 207d9aef..69030980 100644 +--- a/arch/x86/mm/gup.c ++++ b/arch/x86/mm/gup.c +@@ -268,7 +268,7 @@ int __get_user_pages_fast(unsigned long start, int nr_pages, int write, + addr = start; + len = (unsigned long) nr_pages << PAGE_SHIFT; + end = start + len; +- if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ, ++ if (unlikely(!access_ok_noprefault(write ? VERIFY_WRITE : VERIFY_READ, + (void __user *)start, len))) + return 0; + +@@ -344,6 +344,10 @@ int get_user_pages_fast(unsigned long start, int nr_pages, int write, + goto slow_irqon; + #endif + ++ if (unlikely(!access_ok_noprefault(write ? VERIFY_WRITE : VERIFY_READ, ++ (void __user *)start, len))) ++ return 0; ++ + /* + * XXX: batch / limit 'nr', to avoid large irq off latency + * needs some instrumenting to determine the common sizes used by +diff --git a/arch/x86/mm/highmem_32.c b/arch/x86/mm/highmem_32.c +index 4500142..53a363c 100644 +--- a/arch/x86/mm/highmem_32.c ++++ b/arch/x86/mm/highmem_32.c +@@ -45,7 +45,11 @@ void *kmap_atomic_prot(struct page *page, pgprot_t prot) + idx = type + KM_TYPE_NR*smp_processor_id(); + vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); + BUG_ON(!pte_none(*(kmap_pte-idx))); ++ ++ pax_open_kernel(); + set_pte(kmap_pte-idx, mk_pte(page, prot)); ++ pax_close_kernel(); ++ + arch_flush_lazy_mmu_mode(); + + return (void *)vaddr; +diff --git a/arch/x86/mm/hugetlbpage.c b/arch/x86/mm/hugetlbpage.c +index 8b977eb..4732c33 100644 +--- a/arch/x86/mm/hugetlbpage.c ++++ b/arch/x86/mm/hugetlbpage.c +@@ -80,23 +80,24 @@ int pud_huge(pud_t pud) + #ifdef CONFIG_HUGETLB_PAGE + static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file, + unsigned long addr, unsigned long len, +- unsigned long pgoff, unsigned long flags) ++ unsigned long pgoff, unsigned long flags, unsigned long offset) + { + struct hstate *h = hstate_file(file); + struct vm_unmapped_area_info info; +- ++ + info.flags = 0; + info.length = len; + info.low_limit = current->mm->mmap_legacy_base; + info.high_limit = TASK_SIZE; + info.align_mask = PAGE_MASK & ~huge_page_mask(h); + info.align_offset = 0; ++ info.threadstack_offset = offset; + return vm_unmapped_area(&info); + } + + static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file, + unsigned long addr0, unsigned long len, +- unsigned long pgoff, unsigned long flags) ++ unsigned long pgoff, unsigned long flags, unsigned long offset) + { + struct hstate *h = hstate_file(file); + struct vm_unmapped_area_info info; +@@ -108,6 +109,7 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file, + info.high_limit = current->mm->mmap_base; + info.align_mask = PAGE_MASK & ~huge_page_mask(h); + info.align_offset = 0; ++ info.threadstack_offset = offset; + addr = vm_unmapped_area(&info); + + /* +@@ -120,6 +122,12 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file, + VM_BUG_ON(addr != -ENOMEM); + info.flags = 0; + info.low_limit = TASK_UNMAPPED_BASE; ++ ++#ifdef CONFIG_PAX_RANDMMAP ++ if (current->mm->pax_flags & MF_PAX_RANDMMAP) ++ info.low_limit += current->mm->delta_mmap; ++#endif ++ + info.high_limit = TASK_SIZE; + addr = vm_unmapped_area(&info); + } +@@ -134,10 +142,20 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr, + struct hstate *h = hstate_file(file); + struct mm_struct *mm = current->mm; + struct vm_area_struct *vma; ++ unsigned long pax_task_size = TASK_SIZE; ++ unsigned long offset = gr_rand_threadstack_offset(mm, file, flags); + + if (len & ~huge_page_mask(h)) + return -EINVAL; +- if (len > TASK_SIZE) ++ ++#ifdef CONFIG_PAX_SEGMEXEC ++ if (mm->pax_flags & MF_PAX_SEGMEXEC) ++ pax_task_size = SEGMEXEC_TASK_SIZE; ++#endif ++ ++ pax_task_size -= PAGE_SIZE; ++ ++ if (len > pax_task_size) + return -ENOMEM; + + if (flags & MAP_FIXED) { +@@ -146,19 +164,22 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr, + return addr; + } + ++#ifdef CONFIG_PAX_RANDMMAP ++ if (!(mm->pax_flags & MF_PAX_RANDMMAP)) ++#endif ++ + if (addr) { + addr = ALIGN(addr, huge_page_size(h)); + vma = find_vma(mm, addr); +- if (TASK_SIZE - len >= addr && +- (!vma || addr + len <= vma->vm_start)) ++ if (pax_task_size - len >= addr && check_heap_stack_gap(vma, addr, len, offset)) + return addr; + } + if (mm->get_unmapped_area == arch_get_unmapped_area) + return hugetlb_get_unmapped_area_bottomup(file, addr, len, +- pgoff, flags); ++ pgoff, flags, offset); + else + return hugetlb_get_unmapped_area_topdown(file, addr, len, +- pgoff, flags); ++ pgoff, flags, offset); + } + #endif /* CONFIG_HUGETLB_PAGE */ + +diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c +index f971306..e83e0f6 100644 +--- a/arch/x86/mm/init.c ++++ b/arch/x86/mm/init.c +@@ -4,6 +4,7 @@ + #include <linux/swap.h> + #include <linux/memblock.h> + #include <linux/bootmem.h> /* for max_low_pfn */ ++#include <linux/tboot.h> + + #include <asm/cacheflush.h> + #include <asm/e820.h> +@@ -17,6 +18,8 @@ + #include <asm/proto.h> + #include <asm/dma.h> /* for MAX_DMA_PFN */ + #include <asm/microcode.h> ++#include <asm/desc.h> ++#include <asm/bios_ebda.h> + + #include "mm_internal.h" + +@@ -563,7 +566,18 @@ void __init init_mem_mapping(void) + early_ioremap_page_table_range_init(); + #endif + ++#ifdef CONFIG_PAX_PER_CPU_PGD ++ clone_pgd_range(get_cpu_pgd(0, kernel) + KERNEL_PGD_BOUNDARY, ++ swapper_pg_dir + KERNEL_PGD_BOUNDARY, ++ KERNEL_PGD_PTRS); ++ clone_pgd_range(get_cpu_pgd(0, user) + KERNEL_PGD_BOUNDARY, ++ swapper_pg_dir + KERNEL_PGD_BOUNDARY, ++ KERNEL_PGD_PTRS); ++ load_cr3(get_cpu_pgd(0, kernel)); ++#else + load_cr3(swapper_pg_dir); ++#endif ++ + __flush_tlb_all(); + + early_memtest(0, max_pfn_mapped << PAGE_SHIFT); +@@ -579,10 +593,40 @@ void __init init_mem_mapping(void) + * Access has to be given to non-kernel-ram areas as well, these contain the PCI + * mmio resources as well as potential bios/acpi data regions. + */ ++ ++#ifdef CONFIG_GRKERNSEC_KMEM ++static unsigned int ebda_start __read_only; ++static unsigned int ebda_end __read_only; ++#endif ++ + int devmem_is_allowed(unsigned long pagenr) + { +- if (pagenr < 256) ++#ifdef CONFIG_GRKERNSEC_KMEM ++ /* allow BDA */ ++ if (!pagenr) + return 1; ++ /* allow EBDA */ ++ if (pagenr >= ebda_start && pagenr < ebda_end) ++ return 1; ++ /* if tboot is in use, allow access to its hardcoded serial log range */ ++ if (tboot_enabled() && ((0x60000 >> PAGE_SHIFT) <= pagenr) && (pagenr < (0x68000 >> PAGE_SHIFT))) ++ return 1; ++#else ++ if (!pagenr) ++ return 1; ++#ifdef CONFIG_VM86 ++ if (pagenr < (ISA_START_ADDRESS >> PAGE_SHIFT)) ++ return 1; ++#endif ++#endif ++ ++ if ((ISA_START_ADDRESS >> PAGE_SHIFT) <= pagenr && pagenr < (ISA_END_ADDRESS >> PAGE_SHIFT)) ++ return 1; ++#ifdef CONFIG_GRKERNSEC_KMEM ++ /* throw out everything else below 1MB */ ++ if (pagenr <= 256) ++ return 0; ++#endif + if (iomem_is_exclusive(pagenr << PAGE_SHIFT)) + return 0; + if (!page_is_ram(pagenr)) +@@ -628,8 +672,117 @@ void free_init_pages(char *what, unsigned long begin, unsigned long end) + #endif + } + ++#ifdef CONFIG_GRKERNSEC_KMEM ++static inline void gr_init_ebda(void) ++{ ++ unsigned int ebda_addr; ++ unsigned int ebda_size = 0; ++ ++ ebda_addr = get_bios_ebda(); ++ if (ebda_addr) { ++ ebda_size = *(unsigned char *)phys_to_virt(ebda_addr); ++ ebda_size <<= 10; ++ } ++ if (ebda_addr && ebda_size) { ++ ebda_start = ebda_addr >> PAGE_SHIFT; ++ ebda_end = min((unsigned int)PAGE_ALIGN(ebda_addr + ebda_size), (unsigned int)0xa0000) >> PAGE_SHIFT; ++ } else { ++ ebda_start = 0x9f000 >> PAGE_SHIFT; ++ ebda_end = 0xa0000 >> PAGE_SHIFT; ++ } ++} ++#else ++static inline void gr_init_ebda(void) { } ++#endif ++ + void free_initmem(void) + { ++#ifdef CONFIG_PAX_KERNEXEC ++#ifdef CONFIG_X86_32 ++ /* PaX: limit KERNEL_CS to actual size */ ++ unsigned long addr, limit; ++ struct desc_struct d; ++ int cpu; ++#else ++ pgd_t *pgd; ++ pud_t *pud; ++ pmd_t *pmd; ++ unsigned long addr, end; ++#endif ++#endif ++ ++ gr_init_ebda(); ++ ++#ifdef CONFIG_PAX_KERNEXEC ++#ifdef CONFIG_X86_32 ++ limit = paravirt_enabled() ? ktva_ktla(0xffffffff) : (unsigned long)&_etext; ++ limit = (limit - 1UL) >> PAGE_SHIFT; ++ ++ memset(__LOAD_PHYSICAL_ADDR + PAGE_OFFSET, POISON_FREE_INITMEM, PAGE_SIZE); ++ for (cpu = 0; cpu < nr_cpu_ids; cpu++) { ++ pack_descriptor(&d, get_desc_base(&get_cpu_gdt_table(cpu)[GDT_ENTRY_KERNEL_CS]), limit, 0x9B, 0xC); ++ write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_KERNEL_CS, &d, DESCTYPE_S); ++ write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_KERNEXEC_KERNEL_CS, &d, DESCTYPE_S); ++ } ++ ++ /* PaX: make KERNEL_CS read-only */ ++ addr = PFN_ALIGN(ktla_ktva((unsigned long)&_text)); ++ if (!paravirt_enabled()) ++ set_memory_ro(addr, (PFN_ALIGN(_sdata) - addr) >> PAGE_SHIFT); ++/* ++ for (addr = ktla_ktva((unsigned long)&_text); addr < (unsigned long)&_sdata; addr += PMD_SIZE) { ++ pgd = pgd_offset_k(addr); ++ pud = pud_offset(pgd, addr); ++ pmd = pmd_offset(pud, addr); ++ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW)); ++ } ++*/ ++#ifdef CONFIG_X86_PAE ++ set_memory_nx(PFN_ALIGN(__init_begin), (PFN_ALIGN(__init_end) - PFN_ALIGN(__init_begin)) >> PAGE_SHIFT); ++/* ++ for (addr = (unsigned long)&__init_begin; addr < (unsigned long)&__init_end; addr += PMD_SIZE) { ++ pgd = pgd_offset_k(addr); ++ pud = pud_offset(pgd, addr); ++ pmd = pmd_offset(pud, addr); ++ set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask))); ++ } ++*/ ++#endif ++ ++#ifdef CONFIG_MODULES ++ set_memory_4k((unsigned long)MODULES_EXEC_VADDR, (MODULES_EXEC_END - MODULES_EXEC_VADDR) >> PAGE_SHIFT); ++#endif ++ ++#else ++ /* PaX: make kernel code/rodata read-only, rest non-executable */ ++ for (addr = __START_KERNEL_map; addr < __START_KERNEL_map + KERNEL_IMAGE_SIZE; addr += PMD_SIZE) { ++ pgd = pgd_offset_k(addr); ++ pud = pud_offset(pgd, addr); ++ pmd = pmd_offset(pud, addr); ++ if (!pmd_present(*pmd)) ++ continue; ++ if ((unsigned long)_text <= addr && addr < (unsigned long)_sdata) ++ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW)); ++ else ++ set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask))); ++ } ++ ++ addr = (unsigned long)__va(__pa(__START_KERNEL_map)); ++ end = addr + KERNEL_IMAGE_SIZE; ++ for (; addr < end; addr += PMD_SIZE) { ++ pgd = pgd_offset_k(addr); ++ pud = pud_offset(pgd, addr); ++ pmd = pmd_offset(pud, addr); ++ if (!pmd_present(*pmd)) ++ continue; ++ if ((unsigned long)__va(__pa(_text)) <= addr && addr < (unsigned long)__va(__pa(_sdata))) ++ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW)); ++ } ++#endif ++ ++ flush_tlb_all(); ++#endif ++ + free_init_pages("unused kernel", + (unsigned long)(&__init_begin), + (unsigned long)(&__init_end)); +diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c +index e395048..cd38278 100644 +--- a/arch/x86/mm/init_32.c ++++ b/arch/x86/mm/init_32.c +@@ -62,33 +62,6 @@ static noinline int do_test_wp_bit(void); + bool __read_mostly __vmalloc_start_set = false; + + /* +- * Creates a middle page table and puts a pointer to it in the +- * given global directory entry. This only returns the gd entry +- * in non-PAE compilation mode, since the middle layer is folded. +- */ +-static pmd_t * __init one_md_table_init(pgd_t *pgd) +-{ +- pud_t *pud; +- pmd_t *pmd_table; +- +-#ifdef CONFIG_X86_PAE +- if (!(pgd_val(*pgd) & _PAGE_PRESENT)) { +- pmd_table = (pmd_t *)alloc_low_page(); +- paravirt_alloc_pmd(&init_mm, __pa(pmd_table) >> PAGE_SHIFT); +- set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT)); +- pud = pud_offset(pgd, 0); +- BUG_ON(pmd_table != pmd_offset(pud, 0)); +- +- return pmd_table; +- } +-#endif +- pud = pud_offset(pgd, 0); +- pmd_table = pmd_offset(pud, 0); +- +- return pmd_table; +-} +- +-/* + * Create a page table and place a pointer to it in a middle page + * directory entry: + */ +@@ -98,13 +71,28 @@ static pte_t * __init one_page_table_init(pmd_t *pmd) + pte_t *page_table = (pte_t *)alloc_low_page(); + + paravirt_alloc_pte(&init_mm, __pa(page_table) >> PAGE_SHIFT); ++#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC) ++ set_pmd(pmd, __pmd(__pa(page_table) | _KERNPG_TABLE)); ++#else + set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE)); ++#endif + BUG_ON(page_table != pte_offset_kernel(pmd, 0)); + } + + return pte_offset_kernel(pmd, 0); + } + ++static pmd_t * __init one_md_table_init(pgd_t *pgd) ++{ ++ pud_t *pud; ++ pmd_t *pmd_table; ++ ++ pud = pud_offset(pgd, 0); ++ pmd_table = pmd_offset(pud, 0); ++ ++ return pmd_table; ++} ++ + pmd_t * __init populate_extra_pmd(unsigned long vaddr) + { + int pgd_idx = pgd_index(vaddr); +@@ -208,6 +196,7 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base) + int pgd_idx, pmd_idx; + unsigned long vaddr; + pgd_t *pgd; ++ pud_t *pud; + pmd_t *pmd; + pte_t *pte = NULL; + unsigned long count = page_table_range_init_count(start, end); +@@ -222,8 +211,13 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base) + pgd = pgd_base + pgd_idx; + + for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) { +- pmd = one_md_table_init(pgd); +- pmd = pmd + pmd_index(vaddr); ++ pud = pud_offset(pgd, vaddr); ++ pmd = pmd_offset(pud, vaddr); ++ ++#ifdef CONFIG_X86_PAE ++ paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT); ++#endif ++ + for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end); + pmd++, pmd_idx++) { + pte = page_table_kmap_check(one_page_table_init(pmd), +@@ -235,11 +229,20 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base) + } + } + +-static inline int is_kernel_text(unsigned long addr) ++static inline int is_kernel_text(unsigned long start, unsigned long end) + { +- if (addr >= (unsigned long)_text && addr <= (unsigned long)__init_end) +- return 1; +- return 0; ++ if ((start >= ktla_ktva((unsigned long)_etext) || ++ end <= ktla_ktva((unsigned long)_stext)) && ++ (start >= ktla_ktva((unsigned long)_einittext) || ++ end <= ktla_ktva((unsigned long)_sinittext)) && ++ ++#ifdef CONFIG_ACPI_SLEEP ++ (start >= (unsigned long)__va(acpi_wakeup_address) + 0x4000 || end <= (unsigned long)__va(acpi_wakeup_address)) && ++#endif ++ ++ (start > (unsigned long)__va(0xfffff) || end <= (unsigned long)__va(0xc0000))) ++ return 0; ++ return 1; + } + + /* +@@ -256,9 +259,10 @@ kernel_physical_mapping_init(unsigned long start, + unsigned long last_map_addr = end; + unsigned long start_pfn, end_pfn; + pgd_t *pgd_base = swapper_pg_dir; +- int pgd_idx, pmd_idx, pte_ofs; ++ unsigned int pgd_idx, pmd_idx, pte_ofs; + unsigned long pfn; + pgd_t *pgd; ++ pud_t *pud; + pmd_t *pmd; + pte_t *pte; + unsigned pages_2m, pages_4k; +@@ -291,8 +295,13 @@ repeat: + pfn = start_pfn; + pgd_idx = pgd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET); + pgd = pgd_base + pgd_idx; +- for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) { +- pmd = one_md_table_init(pgd); ++ for (; pgd_idx < PTRS_PER_PGD && pfn < max_low_pfn; pgd++, pgd_idx++) { ++ pud = pud_offset(pgd, 0); ++ pmd = pmd_offset(pud, 0); ++ ++#ifdef CONFIG_X86_PAE ++ paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT); ++#endif + + if (pfn >= end_pfn) + continue; +@@ -304,14 +313,13 @@ repeat: + #endif + for (; pmd_idx < PTRS_PER_PMD && pfn < end_pfn; + pmd++, pmd_idx++) { +- unsigned int addr = pfn * PAGE_SIZE + PAGE_OFFSET; ++ unsigned long address = pfn * PAGE_SIZE + PAGE_OFFSET; + + /* + * Map with big pages if possible, otherwise + * create normal page tables: + */ + if (use_pse) { +- unsigned int addr2; + pgprot_t prot = PAGE_KERNEL_LARGE; + /* + * first pass will use the same initial +@@ -322,11 +330,7 @@ repeat: + _PAGE_PSE); + + pfn &= PMD_MASK >> PAGE_SHIFT; +- addr2 = (pfn + PTRS_PER_PTE-1) * PAGE_SIZE + +- PAGE_OFFSET + PAGE_SIZE-1; +- +- if (is_kernel_text(addr) || +- is_kernel_text(addr2)) ++ if (is_kernel_text(address, address + PMD_SIZE)) + prot = PAGE_KERNEL_LARGE_EXEC; + + pages_2m++; +@@ -343,7 +347,7 @@ repeat: + pte_ofs = pte_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET); + pte += pte_ofs; + for (; pte_ofs < PTRS_PER_PTE && pfn < end_pfn; +- pte++, pfn++, pte_ofs++, addr += PAGE_SIZE) { ++ pte++, pfn++, pte_ofs++, address += PAGE_SIZE) { + pgprot_t prot = PAGE_KERNEL; + /* + * first pass will use the same initial +@@ -351,7 +355,7 @@ repeat: + */ + pgprot_t init_prot = __pgprot(PTE_IDENT_ATTR); + +- if (is_kernel_text(addr)) ++ if (is_kernel_text(address, address + PAGE_SIZE)) + prot = PAGE_KERNEL_EXEC; + + pages_4k++; +@@ -474,7 +478,7 @@ void __init native_pagetable_init(void) + + pud = pud_offset(pgd, va); + pmd = pmd_offset(pud, va); +- if (!pmd_present(*pmd)) ++ if (!pmd_present(*pmd)) // PAX TODO || pmd_large(*pmd)) + break; + + /* should not be large page here */ +@@ -532,12 +536,10 @@ void __init early_ioremap_page_table_range_init(void) + + static void __init pagetable_init(void) + { +- pgd_t *pgd_base = swapper_pg_dir; +- +- permanent_kmaps_init(pgd_base); ++ permanent_kmaps_init(swapper_pg_dir); + } + +-pteval_t __supported_pte_mask __read_mostly = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP); ++pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP); + EXPORT_SYMBOL_GPL(__supported_pte_mask); + + /* user-defined highmem size */ +@@ -787,10 +789,10 @@ void __init mem_init(void) + ((unsigned long)&__init_end - + (unsigned long)&__init_begin) >> 10, + +- (unsigned long)&_etext, (unsigned long)&_edata, +- ((unsigned long)&_edata - (unsigned long)&_etext) >> 10, ++ (unsigned long)&_sdata, (unsigned long)&_edata, ++ ((unsigned long)&_edata - (unsigned long)&_sdata) >> 10, + +- (unsigned long)&_text, (unsigned long)&_etext, ++ ktla_ktva((unsigned long)&_text), ktla_ktva((unsigned long)&_etext), + ((unsigned long)&_etext - (unsigned long)&_text) >> 10); + + /* +@@ -883,6 +885,7 @@ void set_kernel_text_rw(void) + if (!kernel_set_to_readonly) + return; + ++ start = ktla_ktva(start); + pr_debug("Set kernel text: %lx - %lx for read write\n", + start, start+size); + +@@ -897,6 +900,7 @@ void set_kernel_text_ro(void) + if (!kernel_set_to_readonly) + return; + ++ start = ktla_ktva(start); + pr_debug("Set kernel text: %lx - %lx for read only\n", + start, start+size); + +@@ -925,6 +929,7 @@ void mark_rodata_ro(void) + unsigned long start = PFN_ALIGN(_text); + unsigned long size = PFN_ALIGN(_etext) - start; + ++ start = ktla_ktva(start); + set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT); + printk(KERN_INFO "Write protecting the kernel text: %luk\n", + size >> 10); +diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c +index f35c66c..84b95ef 100644 +--- a/arch/x86/mm/init_64.c ++++ b/arch/x86/mm/init_64.c +@@ -151,7 +151,7 @@ early_param("gbpages", parse_direct_gbpages_on); + * around without checking the pgd every time. + */ + +-pteval_t __supported_pte_mask __read_mostly = ~_PAGE_IOMAP; ++pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_IOMAP); + EXPORT_SYMBOL_GPL(__supported_pte_mask); + + int force_personality32; +@@ -184,12 +184,29 @@ void sync_global_pgds(unsigned long start, unsigned long end) + + for (address = start; address <= end; address += PGDIR_SIZE) { + const pgd_t *pgd_ref = pgd_offset_k(address); ++ ++#ifdef CONFIG_PAX_PER_CPU_PGD ++ unsigned long cpu; ++#else + struct page *page; ++#endif + + if (pgd_none(*pgd_ref)) + continue; + + spin_lock(&pgd_lock); ++ ++#ifdef CONFIG_PAX_PER_CPU_PGD ++ for (cpu = 0; cpu < nr_cpu_ids; ++cpu) { ++ pgd_t *pgd = pgd_offset_cpu(cpu, user, address); ++ ++ if (pgd_none(*pgd)) ++ set_pgd(pgd, *pgd_ref); ++ else ++ BUG_ON(pgd_page_vaddr(*pgd) ++ != pgd_page_vaddr(*pgd_ref)); ++ pgd = pgd_offset_cpu(cpu, kernel, address); ++#else + list_for_each_entry(page, &pgd_list, lru) { + pgd_t *pgd; + spinlock_t *pgt_lock; +@@ -198,6 +215,7 @@ void sync_global_pgds(unsigned long start, unsigned long end) + /* the pgt_lock only for Xen */ + pgt_lock = &pgd_page_get_mm(page)->page_table_lock; + spin_lock(pgt_lock); ++#endif + + if (pgd_none(*pgd)) + set_pgd(pgd, *pgd_ref); +@@ -205,7 +223,10 @@ void sync_global_pgds(unsigned long start, unsigned long end) + BUG_ON(pgd_page_vaddr(*pgd) + != pgd_page_vaddr(*pgd_ref)); + ++#ifndef CONFIG_PAX_PER_CPU_PGD + spin_unlock(pgt_lock); ++#endif ++ + } + spin_unlock(&pgd_lock); + } +@@ -238,7 +259,7 @@ static pud_t *fill_pud(pgd_t *pgd, unsigned long vaddr) + { + if (pgd_none(*pgd)) { + pud_t *pud = (pud_t *)spp_getpage(); +- pgd_populate(&init_mm, pgd, pud); ++ pgd_populate_kernel(&init_mm, pgd, pud); + if (pud != pud_offset(pgd, 0)) + printk(KERN_ERR "PAGETABLE BUG #00! %p <-> %p\n", + pud, pud_offset(pgd, 0)); +@@ -250,7 +271,7 @@ static pmd_t *fill_pmd(pud_t *pud, unsigned long vaddr) + { + if (pud_none(*pud)) { + pmd_t *pmd = (pmd_t *) spp_getpage(); +- pud_populate(&init_mm, pud, pmd); ++ pud_populate_kernel(&init_mm, pud, pmd); + if (pmd != pmd_offset(pud, 0)) + printk(KERN_ERR "PAGETABLE BUG #01! %p <-> %p\n", + pmd, pmd_offset(pud, 0)); +@@ -279,7 +300,9 @@ void set_pte_vaddr_pud(pud_t *pud_page, unsigned long vaddr, pte_t new_pte) + pmd = fill_pmd(pud, vaddr); + pte = fill_pte(pmd, vaddr); + ++ pax_open_kernel(); + set_pte(pte, new_pte); ++ pax_close_kernel(); + + /* + * It's enough to flush this one mapping. +@@ -338,14 +361,12 @@ static void __init __init_extra_mapping(unsigned long phys, unsigned long size, + pgd = pgd_offset_k((unsigned long)__va(phys)); + if (pgd_none(*pgd)) { + pud = (pud_t *) spp_getpage(); +- set_pgd(pgd, __pgd(__pa(pud) | _KERNPG_TABLE | +- _PAGE_USER)); ++ set_pgd(pgd, __pgd(__pa(pud) | _PAGE_TABLE)); + } + pud = pud_offset(pgd, (unsigned long)__va(phys)); + if (pud_none(*pud)) { + pmd = (pmd_t *) spp_getpage(); +- set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE | +- _PAGE_USER)); ++ set_pud(pud, __pud(__pa(pmd) | _PAGE_TABLE)); + } + pmd = pmd_offset(pud, phys); + BUG_ON(!pmd_none(*pmd)); +@@ -586,7 +607,7 @@ phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end, + prot); + + spin_lock(&init_mm.page_table_lock); +- pud_populate(&init_mm, pud, pmd); ++ pud_populate_kernel(&init_mm, pud, pmd); + spin_unlock(&init_mm.page_table_lock); + } + __flush_tlb_all(); +@@ -627,7 +648,7 @@ kernel_physical_mapping_init(unsigned long start, + page_size_mask); + + spin_lock(&init_mm.page_table_lock); +- pgd_populate(&init_mm, pgd, pud); ++ pgd_populate_kernel(&init_mm, pgd, pud); + spin_unlock(&init_mm.page_table_lock); + pgd_changed = true; + } +@@ -1188,8 +1209,8 @@ int kern_addr_valid(unsigned long addr) + static struct vm_area_struct gate_vma = { + .vm_start = VSYSCALL_START, + .vm_end = VSYSCALL_START + (VSYSCALL_MAPPED_PAGES * PAGE_SIZE), +- .vm_page_prot = PAGE_READONLY_EXEC, +- .vm_flags = VM_READ | VM_EXEC ++ .vm_page_prot = PAGE_READONLY, ++ .vm_flags = VM_READ + }; + + struct vm_area_struct *get_gate_vma(struct mm_struct *mm) +@@ -1223,7 +1244,7 @@ int in_gate_area_no_mm(unsigned long addr) + + const char *arch_vma_name(struct vm_area_struct *vma) + { +- if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso) ++ if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso) + return "[vdso]"; + if (vma == &gate_vma) + return "[vsyscall]"; +diff --git a/arch/x86/mm/iomap_32.c b/arch/x86/mm/iomap_32.c +index 7b179b4..6bd17777 100644 +--- a/arch/x86/mm/iomap_32.c ++++ b/arch/x86/mm/iomap_32.c +@@ -64,7 +64,11 @@ void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot) + type = kmap_atomic_idx_push(); + idx = type + KM_TYPE_NR * smp_processor_id(); + vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); ++ ++ pax_open_kernel(); + set_pte(kmap_pte - idx, pfn_pte(pfn, prot)); ++ pax_close_kernel(); ++ + arch_flush_lazy_mmu_mode(); + + return (void *)vaddr; +diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c +index 94bd247..7e48391 100644 +--- a/arch/x86/mm/ioremap.c ++++ b/arch/x86/mm/ioremap.c +@@ -56,8 +56,8 @@ static int __ioremap_check_ram(unsigned long start_pfn, unsigned long nr_pages, + unsigned long i; + + for (i = 0; i < nr_pages; ++i) +- if (pfn_valid(start_pfn + i) && +- !PageReserved(pfn_to_page(start_pfn + i))) ++ if (pfn_valid(start_pfn + i) && (start_pfn + i >= 0x100 || ++ !PageReserved(pfn_to_page(start_pfn + i)))) + return 1; + + WARN_ONCE(1, "ioremap on RAM pfn 0x%lx\n", start_pfn); +@@ -268,7 +268,7 @@ EXPORT_SYMBOL(ioremap_prot); + * + * Caller must ensure there is only one unmapping for the same pointer. + */ +-void iounmap(volatile void __iomem *addr) ++void iounmap(const volatile void __iomem *addr) + { + struct vm_struct *p, *o; + +@@ -322,6 +322,9 @@ void *xlate_dev_mem_ptr(unsigned long phys) + + /* If page is RAM, we can use __va. Otherwise ioremap and unmap. */ + if (page_is_ram(start >> PAGE_SHIFT)) ++#ifdef CONFIG_HIGHMEM ++ if ((start >> PAGE_SHIFT) < max_low_pfn) ++#endif + return __va(phys); + + addr = (void __force *)ioremap_cache(start, PAGE_SIZE); +@@ -334,6 +337,9 @@ void *xlate_dev_mem_ptr(unsigned long phys) + void unxlate_dev_mem_ptr(unsigned long phys, void *addr) + { + if (page_is_ram(phys >> PAGE_SHIFT)) ++#ifdef CONFIG_HIGHMEM ++ if ((phys >> PAGE_SHIFT) < max_low_pfn) ++#endif + return; + + iounmap((void __iomem *)((unsigned long)addr & PAGE_MASK)); +@@ -351,7 +357,7 @@ static int __init early_ioremap_debug_setup(char *str) + early_param("early_ioremap_debug", early_ioremap_debug_setup); + + static __initdata int after_paging_init; +-static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss; ++static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __read_only __aligned(PAGE_SIZE); + + static inline pmd_t * __init early_ioremap_pmd(unsigned long addr) + { +@@ -388,8 +394,7 @@ void __init early_ioremap_init(void) + slot_virt[i] = __fix_to_virt(FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*i); + + pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN)); +- memset(bm_pte, 0, sizeof(bm_pte)); +- pmd_populate_kernel(&init_mm, pmd, bm_pte); ++ pmd_populate_user(&init_mm, pmd, bm_pte); + + /* + * The boot-ioremap range spans multiple pmds, for which +diff --git a/arch/x86/mm/kmemcheck/kmemcheck.c b/arch/x86/mm/kmemcheck/kmemcheck.c +index d87dd6d..bf3fa66 100644 +--- a/arch/x86/mm/kmemcheck/kmemcheck.c ++++ b/arch/x86/mm/kmemcheck/kmemcheck.c +@@ -622,9 +622,9 @@ bool kmemcheck_fault(struct pt_regs *regs, unsigned long address, + * memory (e.g. tracked pages)? For now, we need this to avoid + * invoking kmemcheck for PnP BIOS calls. + */ +- if (regs->flags & X86_VM_MASK) ++ if (v8086_mode(regs)) + return false; +- if (regs->cs != __KERNEL_CS) ++ if (regs->cs != __KERNEL_CS && regs->cs != __KERNEXEC_KERNEL_CS) + return false; + + pte = kmemcheck_pte_lookup(address); +diff --git a/arch/x86/mm/mmap.c b/arch/x86/mm/mmap.c +index 25e7e13..1964579 100644 +--- a/arch/x86/mm/mmap.c ++++ b/arch/x86/mm/mmap.c +@@ -52,7 +52,7 @@ static unsigned int stack_maxrandom_size(void) + * Leave an at least ~128 MB hole with possible stack randomization. + */ + #define MIN_GAP (128*1024*1024UL + stack_maxrandom_size()) +-#define MAX_GAP (TASK_SIZE/6*5) ++#define MAX_GAP (pax_task_size/6*5) + + static int mmap_is_legacy(void) + { +@@ -82,27 +82,40 @@ static unsigned long mmap_rnd(void) + return rnd << PAGE_SHIFT; + } + +-static unsigned long mmap_base(void) ++static unsigned long mmap_base(struct mm_struct *mm) + { + unsigned long gap = rlimit(RLIMIT_STACK); ++ unsigned long pax_task_size = TASK_SIZE; ++ ++#ifdef CONFIG_PAX_SEGMEXEC ++ if (mm->pax_flags & MF_PAX_SEGMEXEC) ++ pax_task_size = SEGMEXEC_TASK_SIZE; ++#endif + + if (gap < MIN_GAP) + gap = MIN_GAP; + else if (gap > MAX_GAP) + gap = MAX_GAP; + +- return PAGE_ALIGN(TASK_SIZE - gap - mmap_rnd()); ++ return PAGE_ALIGN(pax_task_size - gap - mmap_rnd()); + } + + /* + * Bottom-up (legacy) layout on X86_32 did not support randomization, X86_64 + * does, but not when emulating X86_32 + */ +-static unsigned long mmap_legacy_base(void) ++static unsigned long mmap_legacy_base(struct mm_struct *mm) + { +- if (mmap_is_ia32()) ++ if (mmap_is_ia32()) { ++ ++#ifdef CONFIG_PAX_SEGMEXEC ++ if (mm->pax_flags & MF_PAX_SEGMEXEC) ++ return SEGMEXEC_TASK_UNMAPPED_BASE; ++ else ++#endif ++ + return TASK_UNMAPPED_BASE; +- else ++ } else + return TASK_UNMAPPED_BASE + mmap_rnd(); + } + +@@ -112,8 +125,15 @@ static unsigned long mmap_legacy_base(void) + */ + void arch_pick_mmap_layout(struct mm_struct *mm) + { +- mm->mmap_legacy_base = mmap_legacy_base(); +- mm->mmap_base = mmap_base(); ++ mm->mmap_legacy_base = mmap_legacy_base(mm); ++ mm->mmap_base = mmap_base(mm); ++ ++#ifdef CONFIG_PAX_RANDMMAP ++ if (mm->pax_flags & MF_PAX_RANDMMAP) { ++ mm->mmap_legacy_base += mm->delta_mmap; ++ mm->mmap_base -= mm->delta_mmap + mm->delta_stack; ++ } ++#endif + + if (mmap_is_legacy()) { + mm->mmap_base = mm->mmap_legacy_base; +diff --git a/arch/x86/mm/mmio-mod.c b/arch/x86/mm/mmio-mod.c +index 0057a7a..95c7edd 100644 +--- a/arch/x86/mm/mmio-mod.c ++++ b/arch/x86/mm/mmio-mod.c +@@ -194,7 +194,7 @@ static void pre(struct kmmio_probe *p, struct pt_regs *regs, + break; + default: + { +- unsigned char *ip = (unsigned char *)instptr; ++ unsigned char *ip = (unsigned char *)ktla_ktva(instptr); + my_trace->opcode = MMIO_UNKNOWN_OP; + my_trace->width = 0; + my_trace->value = (*ip) << 16 | *(ip + 1) << 8 | +@@ -234,7 +234,7 @@ static void post(struct kmmio_probe *p, unsigned long condition, + static void ioremap_trace_core(resource_size_t offset, unsigned long size, + void __iomem *addr) + { +- static atomic_t next_id; ++ static atomic_unchecked_t next_id; + struct remap_trace *trace = kmalloc(sizeof(*trace), GFP_KERNEL); + /* These are page-unaligned. */ + struct mmiotrace_map map = { +@@ -258,7 +258,7 @@ static void ioremap_trace_core(resource_size_t offset, unsigned long size, + .private = trace + }, + .phys = offset, +- .id = atomic_inc_return(&next_id) ++ .id = atomic_inc_return_unchecked(&next_id) + }; + map.map_id = trace->id; + +@@ -290,7 +290,7 @@ void mmiotrace_ioremap(resource_size_t offset, unsigned long size, + ioremap_trace_core(offset, size, addr); + } + +-static void iounmap_trace_core(volatile void __iomem *addr) ++static void iounmap_trace_core(const volatile void __iomem *addr) + { + struct mmiotrace_map map = { + .phys = 0, +@@ -328,7 +328,7 @@ not_enabled: + } + } + +-void mmiotrace_iounmap(volatile void __iomem *addr) ++void mmiotrace_iounmap(const volatile void __iomem *addr) + { + might_sleep(); + if (is_enabled()) /* recheck and proper locking in *_core() */ +diff --git a/arch/x86/mm/numa.c b/arch/x86/mm/numa.c +index 27aa0455..0eb1406 100644 +--- a/arch/x86/mm/numa.c ++++ b/arch/x86/mm/numa.c +@@ -478,7 +478,7 @@ static bool __init numa_meminfo_cover_memory(const struct numa_meminfo *mi) + return true; + } + +-static int __init numa_register_memblks(struct numa_meminfo *mi) ++static int __init __intentional_overflow(-1) numa_register_memblks(struct numa_meminfo *mi) + { + unsigned long uninitialized_var(pfn_align); + int i, nid; +diff --git a/arch/x86/mm/pageattr-test.c b/arch/x86/mm/pageattr-test.c +index 461bc82..4e091a3 100644 +--- a/arch/x86/mm/pageattr-test.c ++++ b/arch/x86/mm/pageattr-test.c +@@ -35,7 +35,7 @@ enum { + + static int pte_testbit(pte_t pte) + { +- return pte_flags(pte) & _PAGE_UNUSED1; ++ return pte_flags(pte) & _PAGE_CPA_TEST; + } + + struct split_state { +diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c +index a348868..3c64310 100644 +--- a/arch/x86/mm/pageattr.c ++++ b/arch/x86/mm/pageattr.c +@@ -262,7 +262,7 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address, + */ + #ifdef CONFIG_PCI_BIOS + if (pcibios_enabled && within(pfn, BIOS_BEGIN >> PAGE_SHIFT, BIOS_END >> PAGE_SHIFT)) +- pgprot_val(forbidden) |= _PAGE_NX; ++ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask; + #endif + + /* +@@ -270,9 +270,10 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address, + * Does not cover __inittext since that is gone later on. On + * 64bit we do not enforce !NX on the low mapping + */ +- if (within(address, (unsigned long)_text, (unsigned long)_etext)) +- pgprot_val(forbidden) |= _PAGE_NX; ++ if (within(address, ktla_ktva((unsigned long)_text), ktla_ktva((unsigned long)_etext))) ++ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask; + ++#ifdef CONFIG_DEBUG_RODATA + /* + * The .rodata section needs to be read-only. Using the pfn + * catches all aliases. +@@ -280,6 +281,7 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address, + if (within(pfn, __pa_symbol(__start_rodata) >> PAGE_SHIFT, + __pa_symbol(__end_rodata) >> PAGE_SHIFT)) + pgprot_val(forbidden) |= _PAGE_RW; ++#endif + + #if defined(CONFIG_X86_64) && defined(CONFIG_DEBUG_RODATA) + /* +@@ -318,6 +320,13 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address, + } + #endif + ++#ifdef CONFIG_PAX_KERNEXEC ++ if (within(pfn, __pa(ktla_ktva((unsigned long)&_text)), __pa((unsigned long)&_sdata))) { ++ pgprot_val(forbidden) |= _PAGE_RW; ++ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask; ++ } ++#endif ++ + prot = __pgprot(pgprot_val(prot) & ~pgprot_val(forbidden)); + + return prot; +@@ -416,23 +425,37 @@ EXPORT_SYMBOL_GPL(slow_virt_to_phys); + static void __set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte) + { + /* change init_mm */ ++ pax_open_kernel(); + set_pte_atomic(kpte, pte); ++ + #ifdef CONFIG_X86_32 + if (!SHARED_KERNEL_PMD) { ++ ++#ifdef CONFIG_PAX_PER_CPU_PGD ++ unsigned long cpu; ++#else + struct page *page; ++#endif + ++#ifdef CONFIG_PAX_PER_CPU_PGD ++ for (cpu = 0; cpu < nr_cpu_ids; ++cpu) { ++ pgd_t *pgd = get_cpu_pgd(cpu, kernel); ++#else + list_for_each_entry(page, &pgd_list, lru) { +- pgd_t *pgd; ++ pgd_t *pgd = (pgd_t *)page_address(page); ++#endif ++ + pud_t *pud; + pmd_t *pmd; + +- pgd = (pgd_t *)page_address(page) + pgd_index(address); ++ pgd += pgd_index(address); + pud = pud_offset(pgd, address); + pmd = pmd_offset(pud, address); + set_pte_atomic((pte_t *)pmd, pte); + } + } + #endif ++ pax_close_kernel(); + } + + static int +diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c +index 6574388..87e9bef 100644 +--- a/arch/x86/mm/pat.c ++++ b/arch/x86/mm/pat.c +@@ -376,7 +376,7 @@ int free_memtype(u64 start, u64 end) + + if (!entry) { + printk(KERN_INFO "%s:%d freeing invalid memtype [mem %#010Lx-%#010Lx]\n", +- current->comm, current->pid, start, end - 1); ++ current->comm, task_pid_nr(current), start, end - 1); + return -EINVAL; + } + +@@ -506,8 +506,8 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size) + + while (cursor < to) { + if (!devmem_is_allowed(pfn)) { +- printk(KERN_INFO "Program %s tried to access /dev/mem between [mem %#010Lx-%#010Lx]\n", +- current->comm, from, to - 1); ++ printk(KERN_INFO "Program %s tried to access /dev/mem between [mem %#010Lx-%#010Lx] (%#010Lx)\n", ++ current->comm, from, to - 1, cursor); + return 0; + } + cursor += PAGE_SIZE; +@@ -577,7 +577,7 @@ int kernel_map_sync_memtype(u64 base, unsigned long size, unsigned long flags) + if (ioremap_change_attr((unsigned long)__va(base), id_sz, flags) < 0) { + printk(KERN_INFO "%s:%d ioremap_change_attr failed %s " + "for [mem %#010Lx-%#010Lx]\n", +- current->comm, current->pid, ++ current->comm, task_pid_nr(current), + cattr_name(flags), + base, (unsigned long long)(base + size-1)); + return -EINVAL; +@@ -612,7 +612,7 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot, + flags = lookup_memtype(paddr); + if (want_flags != flags) { + printk(KERN_WARNING "%s:%d map pfn RAM range req %s for [mem %#010Lx-%#010Lx], got %s\n", +- current->comm, current->pid, ++ current->comm, task_pid_nr(current), + cattr_name(want_flags), + (unsigned long long)paddr, + (unsigned long long)(paddr + size - 1), +@@ -634,7 +634,7 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot, + free_memtype(paddr, paddr + size); + printk(KERN_ERR "%s:%d map pfn expected mapping type %s" + " for [mem %#010Lx-%#010Lx], got %s\n", +- current->comm, current->pid, ++ current->comm, task_pid_nr(current), + cattr_name(want_flags), + (unsigned long long)paddr, + (unsigned long long)(paddr + size - 1), +diff --git a/arch/x86/mm/pat_rbtree.c b/arch/x86/mm/pat_rbtree.c +index 415f6c4..d319983 100644 +--- a/arch/x86/mm/pat_rbtree.c ++++ b/arch/x86/mm/pat_rbtree.c +@@ -160,7 +160,7 @@ success: + + failure: + printk(KERN_INFO "%s:%d conflicting memory types " +- "%Lx-%Lx %s<->%s\n", current->comm, current->pid, start, ++ "%Lx-%Lx %s<->%s\n", current->comm, task_pid_nr(current), start, + end, cattr_name(found_type), cattr_name(match->type)); + return -EBUSY; + } +diff --git a/arch/x86/mm/pf_in.c b/arch/x86/mm/pf_in.c +index 9f0614d..92ae64a 100644 +--- a/arch/x86/mm/pf_in.c ++++ b/arch/x86/mm/pf_in.c +@@ -148,7 +148,7 @@ enum reason_type get_ins_type(unsigned long ins_addr) + int i; + enum reason_type rv = OTHERS; + +- p = (unsigned char *)ins_addr; ++ p = (unsigned char *)ktla_ktva(ins_addr); + p += skip_prefix(p, &prf); + p += get_opcode(p, &opcode); + +@@ -168,7 +168,7 @@ static unsigned int get_ins_reg_width(unsigned long ins_addr) + struct prefix_bits prf; + int i; + +- p = (unsigned char *)ins_addr; ++ p = (unsigned char *)ktla_ktva(ins_addr); + p += skip_prefix(p, &prf); + p += get_opcode(p, &opcode); + +@@ -191,7 +191,7 @@ unsigned int get_ins_mem_width(unsigned long ins_addr) + struct prefix_bits prf; + int i; + +- p = (unsigned char *)ins_addr; ++ p = (unsigned char *)ktla_ktva(ins_addr); + p += skip_prefix(p, &prf); + p += get_opcode(p, &opcode); + +@@ -415,7 +415,7 @@ unsigned long get_ins_reg_val(unsigned long ins_addr, struct pt_regs *regs) + struct prefix_bits prf; + int i; + +- p = (unsigned char *)ins_addr; ++ p = (unsigned char *)ktla_ktva(ins_addr); + p += skip_prefix(p, &prf); + p += get_opcode(p, &opcode); + for (i = 0; i < ARRAY_SIZE(reg_rop); i++) +@@ -470,7 +470,7 @@ unsigned long get_ins_imm_val(unsigned long ins_addr) + struct prefix_bits prf; + int i; + +- p = (unsigned char *)ins_addr; ++ p = (unsigned char *)ktla_ktva(ins_addr); + p += skip_prefix(p, &prf); + p += get_opcode(p, &opcode); + for (i = 0; i < ARRAY_SIZE(imm_wop); i++) +diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c +index c96314a..433b127 100644 +--- a/arch/x86/mm/pgtable.c ++++ b/arch/x86/mm/pgtable.c +@@ -97,10 +97,71 @@ static inline void pgd_list_del(pgd_t *pgd) + list_del(&page->lru); + } + +-#define UNSHARED_PTRS_PER_PGD \ +- (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD) ++#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF) ++pgdval_t clone_pgd_mask __read_only = ~_PAGE_PRESENT; + ++void __shadow_user_pgds(pgd_t *dst, const pgd_t *src) ++{ ++ unsigned int count = USER_PGD_PTRS; + ++ if (!pax_user_shadow_base) ++ return; ++ ++ while (count--) ++ *dst++ = __pgd((pgd_val(*src++) | (_PAGE_NX & __supported_pte_mask)) & ~_PAGE_USER); ++} ++#endif ++ ++#ifdef CONFIG_PAX_PER_CPU_PGD ++void __clone_user_pgds(pgd_t *dst, const pgd_t *src) ++{ ++ unsigned int count = USER_PGD_PTRS; ++ ++ while (count--) { ++ pgd_t pgd; ++ ++#ifdef CONFIG_X86_64 ++ pgd = __pgd(pgd_val(*src++) | _PAGE_USER); ++#else ++ pgd = *src++; ++#endif ++ ++#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF) ++ pgd = __pgd(pgd_val(pgd) & clone_pgd_mask); ++#endif ++ ++ *dst++ = pgd; ++ } ++ ++} ++#endif ++ ++#ifdef CONFIG_X86_64 ++#define pxd_t pud_t ++#define pyd_t pgd_t ++#define paravirt_release_pxd(pfn) paravirt_release_pud(pfn) ++#define pgtable_pxd_page_ctor(page) true ++#define pgtable_pxd_page_dtor(page) ++#define pxd_free(mm, pud) pud_free((mm), (pud)) ++#define pyd_populate(mm, pgd, pud) pgd_populate((mm), (pgd), (pud)) ++#define pyd_offset(mm, address) pgd_offset((mm), (address)) ++#define PYD_SIZE PGDIR_SIZE ++#else ++#define pxd_t pmd_t ++#define pyd_t pud_t ++#define paravirt_release_pxd(pfn) paravirt_release_pmd(pfn) ++#define pgtable_pxd_page_ctor(page) pgtable_pmd_page_ctor(page) ++#define pgtable_pxd_page_dtor(page) pgtable_pmd_page_dtor(page) ++#define pxd_free(mm, pud) pmd_free((mm), (pud)) ++#define pyd_populate(mm, pgd, pud) pud_populate((mm), (pgd), (pud)) ++#define pyd_offset(mm, address) pud_offset((mm), (address)) ++#define PYD_SIZE PUD_SIZE ++#endif ++ ++#ifdef CONFIG_PAX_PER_CPU_PGD ++static inline void pgd_ctor(struct mm_struct *mm, pgd_t *pgd) {} ++static inline void pgd_dtor(pgd_t *pgd) {} ++#else + static void pgd_set_mm(pgd_t *pgd, struct mm_struct *mm) + { + BUILD_BUG_ON(sizeof(virt_to_page(pgd)->index) < sizeof(mm)); +@@ -141,6 +202,7 @@ static void pgd_dtor(pgd_t *pgd) + pgd_list_del(pgd); + spin_unlock(&pgd_lock); + } ++#endif + + /* + * List of all pgd's needed for non-PAE so it can invalidate entries +@@ -153,7 +215,7 @@ static void pgd_dtor(pgd_t *pgd) + * -- nyc + */ + +-#ifdef CONFIG_X86_PAE ++#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE) + /* + * In PAE mode, we need to do a cr3 reload (=tlb flush) when + * updating the top-level pagetable entries to guarantee the +@@ -165,7 +227,7 @@ static void pgd_dtor(pgd_t *pgd) + * not shared between pagetables (!SHARED_KERNEL_PMDS), we allocate + * and initialize the kernel pmds here. + */ +-#define PREALLOCATED_PMDS UNSHARED_PTRS_PER_PGD ++#define PREALLOCATED_PXDS (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD) + + void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd) + { +@@ -183,43 +245,45 @@ void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd) + */ + flush_tlb_mm(mm); + } ++#elif defined(CONFIG_X86_64) && defined(CONFIG_PAX_PER_CPU_PGD) ++#define PREALLOCATED_PXDS USER_PGD_PTRS + #else /* !CONFIG_X86_PAE */ + + /* No need to prepopulate any pagetable entries in non-PAE modes. */ +-#define PREALLOCATED_PMDS 0 ++#define PREALLOCATED_PXDS 0 + + #endif /* CONFIG_X86_PAE */ + +-static void free_pmds(pmd_t *pmds[]) ++static void free_pxds(pxd_t *pxds[]) + { + int i; + +- for(i = 0; i < PREALLOCATED_PMDS; i++) +- if (pmds[i]) { +- pgtable_pmd_page_dtor(virt_to_page(pmds[i])); +- free_page((unsigned long)pmds[i]); ++ for(i = 0; i < PREALLOCATED_PXDS; i++) ++ if (pxds[i]) { ++ pgtable_pxd_page_dtor(virt_to_page(pxds[i])); ++ free_page((unsigned long)pxds[i]); + } + } + +-static int preallocate_pmds(pmd_t *pmds[]) ++static int preallocate_pxds(pxd_t *pxds[]) + { + int i; + bool failed = false; + +- for(i = 0; i < PREALLOCATED_PMDS; i++) { +- pmd_t *pmd = (pmd_t *)__get_free_page(PGALLOC_GFP); +- if (!pmd) ++ for(i = 0; i < PREALLOCATED_PXDS; i++) { ++ pxd_t *pxd = (pxd_t *)__get_free_page(PGALLOC_GFP); ++ if (!pxd) + failed = true; +- if (pmd && !pgtable_pmd_page_ctor(virt_to_page(pmd))) { +- free_page((unsigned long)pmd); +- pmd = NULL; ++ if (pxd && !pgtable_pxd_page_ctor(virt_to_page(pxd))) { ++ free_page((unsigned long)pxd); ++ pxd = NULL; + failed = true; + } +- pmds[i] = pmd; ++ pxds[i] = pxd; + } + + if (failed) { +- free_pmds(pmds); ++ free_pxds(pxds); + return -ENOMEM; + } + +@@ -232,49 +296,52 @@ static int preallocate_pmds(pmd_t *pmds[]) + * preallocate which never got a corresponding vma will need to be + * freed manually. + */ +-static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgdp) ++static void pgd_mop_up_pxds(struct mm_struct *mm, pgd_t *pgdp) + { + int i; + +- for(i = 0; i < PREALLOCATED_PMDS; i++) { ++ for(i = 0; i < PREALLOCATED_PXDS; i++) { + pgd_t pgd = pgdp[i]; + + if (pgd_val(pgd) != 0) { +- pmd_t *pmd = (pmd_t *)pgd_page_vaddr(pgd); ++ pxd_t *pxd = (pxd_t *)pgd_page_vaddr(pgd); + +- pgdp[i] = native_make_pgd(0); ++ set_pgd(pgdp + i, native_make_pgd(0)); + +- paravirt_release_pmd(pgd_val(pgd) >> PAGE_SHIFT); +- pmd_free(mm, pmd); ++ paravirt_release_pxd(pgd_val(pgd) >> PAGE_SHIFT); ++ pxd_free(mm, pxd); + } + } + } + +-static void pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmds[]) ++static void pgd_prepopulate_pxd(struct mm_struct *mm, pgd_t *pgd, pxd_t *pxds[]) + { +- pud_t *pud; ++ pyd_t *pyd; + int i; + +- if (PREALLOCATED_PMDS == 0) /* Work around gcc-3.4.x bug */ ++ if (PREALLOCATED_PXDS == 0) /* Work around gcc-3.4.x bug */ + return; + +- pud = pud_offset(pgd, 0); +- +- for (i = 0; i < PREALLOCATED_PMDS; i++, pud++) { +- pmd_t *pmd = pmds[i]; ++#ifdef CONFIG_X86_64 ++ pyd = pyd_offset(mm, 0L); ++#else ++ pyd = pyd_offset(pgd, 0L); ++#endif + ++ for (i = 0; i < PREALLOCATED_PXDS; i++, pyd++) { ++ pxd_t *pxd = pxds[i]; + if (i >= KERNEL_PGD_BOUNDARY) +- memcpy(pmd, (pmd_t *)pgd_page_vaddr(swapper_pg_dir[i]), +- sizeof(pmd_t) * PTRS_PER_PMD); ++ memcpy(pxd, (pxd_t *)pgd_page_vaddr(swapper_pg_dir[i]), ++ sizeof(pxd_t) * PTRS_PER_PMD); + +- pud_populate(mm, pud, pmd); ++ pyd_populate(mm, pyd, pxd); + } + } + + pgd_t *pgd_alloc(struct mm_struct *mm) + { + pgd_t *pgd; +- pmd_t *pmds[PREALLOCATED_PMDS]; ++ pxd_t *pxds[PREALLOCATED_PXDS]; + + pgd = (pgd_t *)__get_free_page(PGALLOC_GFP); + +@@ -283,11 +350,11 @@ pgd_t *pgd_alloc(struct mm_struct *mm) + + mm->pgd = pgd; + +- if (preallocate_pmds(pmds) != 0) ++ if (preallocate_pxds(pxds) != 0) + goto out_free_pgd; + + if (paravirt_pgd_alloc(mm) != 0) +- goto out_free_pmds; ++ goto out_free_pxds; + + /* + * Make sure that pre-populating the pmds is atomic with +@@ -297,14 +364,14 @@ pgd_t *pgd_alloc(struct mm_struct *mm) + spin_lock(&pgd_lock); + + pgd_ctor(mm, pgd); +- pgd_prepopulate_pmd(mm, pgd, pmds); ++ pgd_prepopulate_pxd(mm, pgd, pxds); + + spin_unlock(&pgd_lock); + + return pgd; + +-out_free_pmds: +- free_pmds(pmds); ++out_free_pxds: ++ free_pxds(pxds); + out_free_pgd: + free_page((unsigned long)pgd); + out: +@@ -313,7 +380,7 @@ out: + + void pgd_free(struct mm_struct *mm, pgd_t *pgd) + { +- pgd_mop_up_pmds(mm, pgd); ++ pgd_mop_up_pxds(mm, pgd); + pgd_dtor(pgd); + paravirt_pgd_free(mm, pgd); + free_page((unsigned long)pgd); +diff --git a/arch/x86/mm/pgtable_32.c b/arch/x86/mm/pgtable_32.c +index a69bcb8..19068ab 100644 +--- a/arch/x86/mm/pgtable_32.c ++++ b/arch/x86/mm/pgtable_32.c +@@ -47,10 +47,13 @@ void set_pte_vaddr(unsigned long vaddr, pte_t pteval) + return; + } + pte = pte_offset_kernel(pmd, vaddr); ++ ++ pax_open_kernel(); + if (pte_val(pteval)) + set_pte_at(&init_mm, vaddr, pte, pteval); + else + pte_clear(&init_mm, vaddr, pte); ++ pax_close_kernel(); + + /* + * It's enough to flush this one mapping. +diff --git a/arch/x86/mm/physaddr.c b/arch/x86/mm/physaddr.c +index e666cbb..61788c45 100644 +--- a/arch/x86/mm/physaddr.c ++++ b/arch/x86/mm/physaddr.c +@@ -10,7 +10,7 @@ + #ifdef CONFIG_X86_64 + + #ifdef CONFIG_DEBUG_VIRTUAL +-unsigned long __phys_addr(unsigned long x) ++unsigned long __intentional_overflow(-1) __phys_addr(unsigned long x) + { + unsigned long y = x - __START_KERNEL_map; + +@@ -67,7 +67,7 @@ EXPORT_SYMBOL(__virt_addr_valid); + #else + + #ifdef CONFIG_DEBUG_VIRTUAL +-unsigned long __phys_addr(unsigned long x) ++unsigned long __intentional_overflow(-1) __phys_addr(unsigned long x) + { + unsigned long phys_addr = x - PAGE_OFFSET; + /* VMALLOC_* aren't constants */ +diff --git a/arch/x86/mm/setup_nx.c b/arch/x86/mm/setup_nx.c +index 90555bf..f5f1828 100644 +--- a/arch/x86/mm/setup_nx.c ++++ b/arch/x86/mm/setup_nx.c +@@ -5,8 +5,10 @@ + #include <asm/pgtable.h> + #include <asm/proto.h> + ++#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE) + static int disable_nx; + ++#ifndef CONFIG_PAX_PAGEEXEC + /* + * noexec = on|off + * +@@ -28,12 +30,17 @@ static int __init noexec_setup(char *str) + return 0; + } + early_param("noexec", noexec_setup); ++#endif ++ ++#endif + + void x86_configure_nx(void) + { ++#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE) + if (cpu_has_nx && !disable_nx) + __supported_pte_mask |= _PAGE_NX; + else ++#endif + __supported_pte_mask &= ~_PAGE_NX; + } + +diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c +index dd8dda1..9e9b0f6 100644 +--- a/arch/x86/mm/tlb.c ++++ b/arch/x86/mm/tlb.c +@@ -48,7 +48,11 @@ void leave_mm(int cpu) + BUG(); + if (cpumask_test_cpu(cpu, mm_cpumask(active_mm))) { + cpumask_clear_cpu(cpu, mm_cpumask(active_mm)); ++ ++#ifndef CONFIG_PAX_PER_CPU_PGD + load_cr3(swapper_pg_dir); ++#endif ++ + } + } + EXPORT_SYMBOL_GPL(leave_mm); +diff --git a/arch/x86/mm/uderef_64.c b/arch/x86/mm/uderef_64.c +new file mode 100644 +index 0000000..dace51c +--- /dev/null ++++ b/arch/x86/mm/uderef_64.c +@@ -0,0 +1,37 @@ ++#include <linux/mm.h> ++#include <asm/pgtable.h> ++#include <asm/uaccess.h> ++ ++#ifdef CONFIG_PAX_MEMORY_UDEREF ++/* PaX: due to the special call convention these functions must ++ * - remain leaf functions under all configurations, ++ * - never be called directly, only dereferenced from the wrappers. ++ */ ++void __pax_open_userland(void) ++{ ++ unsigned int cpu; ++ ++ if (unlikely(!segment_eq(get_fs(), USER_DS))) ++ return; ++ ++ cpu = raw_get_cpu(); ++ BUG_ON((read_cr3() & ~PAGE_MASK) != PCID_KERNEL); ++ write_cr3(__pa(get_cpu_pgd(cpu, user)) | PCID_USER | PCID_NOFLUSH); ++ raw_put_cpu_no_resched(); ++} ++EXPORT_SYMBOL(__pax_open_userland); ++ ++void __pax_close_userland(void) ++{ ++ unsigned int cpu; ++ ++ if (unlikely(!segment_eq(get_fs(), USER_DS))) ++ return; ++ ++ cpu = raw_get_cpu(); ++ BUG_ON((read_cr3() & ~PAGE_MASK) != PCID_USER); ++ write_cr3(__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL | PCID_NOFLUSH); ++ raw_put_cpu_no_resched(); ++} ++EXPORT_SYMBOL(__pax_close_userland); ++#endif +diff --git a/arch/x86/net/bpf_jit.S b/arch/x86/net/bpf_jit.S +index 0149575..f746de8 100644 +--- a/arch/x86/net/bpf_jit.S ++++ b/arch/x86/net/bpf_jit.S +@@ -9,6 +9,7 @@ + */ + #include <linux/linkage.h> + #include <asm/dwarf2.h> ++#include <asm/alternative-asm.h> + + /* + * Calling convention : +@@ -35,6 +36,7 @@ sk_load_word_positive_offset: + jle bpf_slow_path_word + mov (SKBDATA,%rsi),%eax + bswap %eax /* ntohl() */ ++ pax_force_retaddr + ret + + sk_load_half: +@@ -52,6 +54,7 @@ sk_load_half_positive_offset: + jle bpf_slow_path_half + movzwl (SKBDATA,%rsi),%eax + rol $8,%ax # ntohs() ++ pax_force_retaddr + ret + + sk_load_byte: +@@ -66,6 +69,7 @@ sk_load_byte_positive_offset: + cmp %esi,%r9d /* if (offset >= hlen) goto bpf_slow_path_byte */ + jle bpf_slow_path_byte + movzbl (SKBDATA,%rsi),%eax ++ pax_force_retaddr + ret + + /** +@@ -87,6 +91,7 @@ sk_load_byte_msh_positive_offset: + movzbl (SKBDATA,%rsi),%ebx + and $15,%bl + shl $2,%bl ++ pax_force_retaddr + ret + + /* rsi contains offset and can be scratched */ +@@ -109,6 +114,7 @@ bpf_slow_path_word: + js bpf_error + mov -12(%rbp),%eax + bswap %eax ++ pax_force_retaddr + ret + + bpf_slow_path_half: +@@ -117,12 +123,14 @@ bpf_slow_path_half: + mov -12(%rbp),%ax + rol $8,%ax + movzwl %ax,%eax ++ pax_force_retaddr + ret + + bpf_slow_path_byte: + bpf_slow_path_common(1) + js bpf_error + movzbl -12(%rbp),%eax ++ pax_force_retaddr + ret + + bpf_slow_path_byte_msh: +@@ -133,6 +141,7 @@ bpf_slow_path_byte_msh: + and $15,%al + shl $2,%al + xchg %eax,%ebx ++ pax_force_retaddr + ret + + #define sk_negative_common(SIZE) \ +@@ -157,6 +166,7 @@ sk_load_word_negative_offset: + sk_negative_common(4) + mov (%rax), %eax + bswap %eax ++ pax_force_retaddr + ret + + bpf_slow_path_half_neg: +@@ -168,6 +178,7 @@ sk_load_half_negative_offset: + mov (%rax),%ax + rol $8,%ax + movzwl %ax,%eax ++ pax_force_retaddr + ret + + bpf_slow_path_byte_neg: +@@ -177,6 +188,7 @@ sk_load_byte_negative_offset: + .globl sk_load_byte_negative_offset + sk_negative_common(1) + movzbl (%rax), %eax ++ pax_force_retaddr + ret + + bpf_slow_path_byte_msh_neg: +@@ -190,6 +202,7 @@ sk_load_byte_msh_negative_offset: + and $15,%al + shl $2,%al + xchg %eax,%ebx ++ pax_force_retaddr + ret + + bpf_error: +@@ -197,4 +210,5 @@ bpf_error: + xor %eax,%eax + mov -8(%rbp),%rbx + leaveq ++ pax_force_retaddr + ret +diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c +index af2d431..3cf24f0b 100644 +--- a/arch/x86/net/bpf_jit_comp.c ++++ b/arch/x86/net/bpf_jit_comp.c +@@ -50,13 +50,90 @@ static inline u8 *emit_code(u8 *ptr, u32 bytes, unsigned int len) + return ptr + len; + } + ++#ifdef CONFIG_GRKERNSEC_JIT_HARDEN ++#define MAX_INSTR_CODE_SIZE 96 ++#else ++#define MAX_INSTR_CODE_SIZE 64 ++#endif ++ + #define EMIT(bytes, len) do { prog = emit_code(prog, bytes, len); } while (0) + + #define EMIT1(b1) EMIT(b1, 1) + #define EMIT2(b1, b2) EMIT((b1) + ((b2) << 8), 2) + #define EMIT3(b1, b2, b3) EMIT((b1) + ((b2) << 8) + ((b3) << 16), 3) + #define EMIT4(b1, b2, b3, b4) EMIT((b1) + ((b2) << 8) + ((b3) << 16) + ((b4) << 24), 4) ++ ++#ifdef CONFIG_GRKERNSEC_JIT_HARDEN ++/* original constant will appear in ecx */ ++#define DILUTE_CONST_SEQUENCE(_off, _key) \ ++do { \ ++ /* mov ecx, randkey */ \ ++ EMIT1(0xb9); \ ++ EMIT(_key, 4); \ ++ /* xor ecx, randkey ^ off */ \ ++ EMIT2(0x81, 0xf1); \ ++ EMIT((_key) ^ (_off), 4); \ ++} while (0) ++ ++#define EMIT1_off32(b1, _off) \ ++do { \ ++ switch (b1) { \ ++ case 0x05: /* add eax, imm32 */ \ ++ case 0x2d: /* sub eax, imm32 */ \ ++ case 0x25: /* and eax, imm32 */ \ ++ case 0x0d: /* or eax, imm32 */ \ ++ case 0xb8: /* mov eax, imm32 */ \ ++ case 0x35: /* xor eax, imm32 */ \ ++ case 0x3d: /* cmp eax, imm32 */ \ ++ case 0xa9: /* test eax, imm32 */ \ ++ DILUTE_CONST_SEQUENCE(_off, randkey); \ ++ EMIT2((b1) - 4, 0xc8); /* convert imm instruction to eax, ecx */\ ++ break; \ ++ case 0xbb: /* mov ebx, imm32 */ \ ++ DILUTE_CONST_SEQUENCE(_off, randkey); \ ++ /* mov ebx, ecx */ \ ++ EMIT2(0x89, 0xcb); \ ++ break; \ ++ case 0xbe: /* mov esi, imm32 */ \ ++ DILUTE_CONST_SEQUENCE(_off, randkey); \ ++ /* mov esi, ecx */ \ ++ EMIT2(0x89, 0xce); \ ++ break; \ ++ case 0xe8: /* call rel imm32, always to known funcs */ \ ++ EMIT1(b1); \ ++ EMIT(_off, 4); \ ++ break; \ ++ case 0xe9: /* jmp rel imm32 */ \ ++ EMIT1(b1); \ ++ EMIT(_off, 4); \ ++ /* prevent fall-through, we're not called if off = 0 */ \ ++ EMIT(0xcccccccc, 4); \ ++ EMIT(0xcccccccc, 4); \ ++ break; \ ++ default: \ ++ BUILD_BUG(); \ ++ } \ ++} while (0) ++ ++#define EMIT2_off32(b1, b2, _off) \ ++do { \ ++ if ((b1) == 0x8d && (b2) == 0xb3) { /* lea esi, [rbx+imm32] */ \ ++ EMIT2(0x8d, 0xb3); /* lea esi, [rbx+randkey] */ \ ++ EMIT(randkey, 4); \ ++ EMIT2(0x8d, 0xb6); /* lea esi, [esi+off-randkey] */ \ ++ EMIT((_off) - randkey, 4); \ ++ } else if ((b1) == 0x69 && (b2) == 0xc0) { /* imul eax, imm32 */\ ++ DILUTE_CONST_SEQUENCE(_off, randkey); \ ++ /* imul eax, ecx */ \ ++ EMIT3(0x0f, 0xaf, 0xc1); \ ++ } else { \ ++ BUILD_BUG(); \ ++ } \ ++} while (0) ++#else + #define EMIT1_off32(b1, off) do { EMIT1(b1); EMIT(off, 4);} while (0) ++#define EMIT2_off32(b1, b2, off) do { EMIT2(b1, b2); EMIT(off, 4);} while (0) ++#endif + + #define CLEAR_A() EMIT2(0x31, 0xc0) /* xor %eax,%eax */ + #define CLEAR_X() EMIT2(0x31, 0xdb) /* xor %ebx,%ebx */ +@@ -91,6 +168,24 @@ do { \ + #define X86_JBE 0x76 + #define X86_JA 0x77 + ++#ifdef CONFIG_GRKERNSEC_JIT_HARDEN ++#define APPEND_FLOW_VERIFY() \ ++do { \ ++ /* mov ecx, randkey */ \ ++ EMIT1(0xb9); \ ++ EMIT(randkey, 4); \ ++ /* cmp ecx, randkey */ \ ++ EMIT2(0x81, 0xf9); \ ++ EMIT(randkey, 4); \ ++ /* jz after 8 int 3s */ \ ++ EMIT2(0x74, 0x08); \ ++ EMIT(0xcccccccc, 4); \ ++ EMIT(0xcccccccc, 4); \ ++} while (0) ++#else ++#define APPEND_FLOW_VERIFY() do { } while (0) ++#endif ++ + #define EMIT_COND_JMP(op, offset) \ + do { \ + if (is_near(offset)) \ +@@ -98,6 +193,7 @@ do { \ + else { \ + EMIT2(0x0f, op + 0x10); \ + EMIT(offset, 4); /* jxx .+off32 */ \ ++ APPEND_FLOW_VERIFY(); \ + } \ + } while (0) + +@@ -145,55 +241,54 @@ static int pkt_type_offset(void) + return -1; + } + +-struct bpf_binary_header { +- unsigned int pages; +- /* Note : for security reasons, bpf code will follow a randomly +- * sized amount of int3 instructions +- */ +- u8 image[]; +-}; +- +-static struct bpf_binary_header *bpf_alloc_binary(unsigned int proglen, ++/* Note : for security reasons, bpf code will follow a randomly ++ * sized amount of int3 instructions ++ */ ++static u8 *bpf_alloc_binary(unsigned int proglen, + u8 **image_ptr) + { + unsigned int sz, hole; +- struct bpf_binary_header *header; ++ u8 *header; + + /* Most of BPF filters are really small, + * but if some of them fill a page, allow at least + * 128 extra bytes to insert a random section of int3 + */ +- sz = round_up(proglen + sizeof(*header) + 128, PAGE_SIZE); +- header = module_alloc(sz); ++ sz = round_up(proglen + 128, PAGE_SIZE); ++ header = module_alloc_exec(sz); + if (!header) + return NULL; + ++ pax_open_kernel(); + memset(header, 0xcc, sz); /* fill whole space with int3 instructions */ ++ pax_close_kernel(); + +- header->pages = sz / PAGE_SIZE; +- hole = min(sz - (proglen + sizeof(*header)), PAGE_SIZE - sizeof(*header)); ++ hole = PAGE_SIZE - (proglen & ~PAGE_MASK); + + /* insert a random number of int3 instructions before BPF code */ +- *image_ptr = &header->image[prandom_u32() % hole]; ++ *image_ptr = &header[prandom_u32() % hole]; + return header; + } + + void bpf_jit_compile(struct sk_filter *fp) + { +- u8 temp[64]; ++ u8 temp[MAX_INSTR_CODE_SIZE]; + u8 *prog; + unsigned int proglen, oldproglen = 0; + int ilen, i; + int t_offset, f_offset; + u8 t_op, f_op, seen = 0, pass; + u8 *image = NULL; +- struct bpf_binary_header *header = NULL; ++ u8 *header = NULL; + u8 *func; + int pc_ret0 = -1; /* bpf index of first RET #0 instruction (if any) */ + unsigned int cleanup_addr; /* epilogue code offset */ + unsigned int *addrs; + const struct sock_filter *filter = fp->insns; + int flen = fp->len; ++#ifdef CONFIG_GRKERNSEC_JIT_HARDEN ++ unsigned int randkey; ++#endif + + if (!bpf_jit_enable) + return; +@@ -203,10 +298,10 @@ void bpf_jit_compile(struct sk_filter *fp) + return; + + /* Before first pass, make a rough estimation of addrs[] +- * each bpf instruction is translated to less than 64 bytes ++ * each bpf instruction is translated to less than MAX_INSTR_CODE_SIZE bytes + */ + for (proglen = 0, i = 0; i < flen; i++) { +- proglen += 64; ++ proglen += MAX_INSTR_CODE_SIZE; + addrs[i] = proglen; + } + cleanup_addr = proglen; /* epilogue address */ +@@ -285,6 +380,10 @@ void bpf_jit_compile(struct sk_filter *fp) + for (i = 0; i < flen; i++) { + unsigned int K = filter[i].k; + ++#ifdef CONFIG_GRKERNSEC_JIT_HARDEN ++ randkey = prandom_u32(); ++#endif ++ + switch (filter[i].code) { + case BPF_S_ALU_ADD_X: /* A += X; */ + seen |= SEEN_XREG; +@@ -317,10 +416,8 @@ void bpf_jit_compile(struct sk_filter *fp) + case BPF_S_ALU_MUL_K: /* A *= K */ + if (is_imm8(K)) + EMIT3(0x6b, 0xc0, K); /* imul imm8,%eax,%eax */ +- else { +- EMIT2(0x69, 0xc0); /* imul imm32,%eax */ +- EMIT(K, 4); +- } ++ else ++ EMIT2_off32(0x69, 0xc0, K); /* imul imm32,%eax */ + break; + case BPF_S_ALU_DIV_X: /* A /= X; */ + seen |= SEEN_XREG; +@@ -364,7 +461,11 @@ void bpf_jit_compile(struct sk_filter *fp) + break; + } + EMIT2(0x31, 0xd2); /* xor %edx,%edx */ ++#ifdef CONFIG_GRKERNSEC_JIT_HARDEN ++ DILUTE_CONST_SEQUENCE(K, randkey); ++#else + EMIT1(0xb9);EMIT(K, 4); /* mov imm32,%ecx */ ++#endif + EMIT2(0xf7, 0xf1); /* div %ecx */ + EMIT2(0x89, 0xd0); /* mov %edx,%eax */ + break; +@@ -372,7 +473,11 @@ void bpf_jit_compile(struct sk_filter *fp) + if (K == 1) + break; + EMIT2(0x31, 0xd2); /* xor %edx,%edx */ ++#ifdef CONFIG_GRKERNSEC_JIT_HARDEN ++ DILUTE_CONST_SEQUENCE(K, randkey); ++#else + EMIT1(0xb9);EMIT(K, 4); /* mov imm32,%ecx */ ++#endif + EMIT2(0xf7, 0xf1); /* div %ecx */ + break; + case BPF_S_ALU_AND_X: +@@ -643,8 +748,7 @@ common_load_ind: seen |= SEEN_DATAREF | SEEN_XREG; + if (is_imm8(K)) { + EMIT3(0x8d, 0x73, K); /* lea imm8(%rbx), %esi */ + } else { +- EMIT2(0x8d, 0xb3); /* lea imm32(%rbx),%esi */ +- EMIT(K, 4); ++ EMIT2_off32(0x8d, 0xb3, K); /* lea imm32(%rbx),%esi */ + } + } else { + EMIT2(0x89,0xde); /* mov %ebx,%esi */ +@@ -734,10 +838,12 @@ cond_branch: f_offset = addrs[i + filter[i].jf] - addrs[i]; + if (unlikely(proglen + ilen > oldproglen)) { + pr_err("bpb_jit_compile fatal error\n"); + kfree(addrs); +- module_free(NULL, header); ++ module_free_exec(NULL, image); + return; + } ++ pax_open_kernel(); + memcpy(image + proglen, temp, ilen); ++ pax_close_kernel(); + } + proglen += ilen; + addrs[i] = proglen; +@@ -770,7 +876,6 @@ cond_branch: f_offset = addrs[i + filter[i].jf] - addrs[i]; + + if (image) { + bpf_flush_icache(header, image + proglen); +- set_memory_ro((unsigned long)header, header->pages); + fp->bpf_func = (void *)image; + } + out: +@@ -782,10 +887,9 @@ static void bpf_jit_free_deferred(struct work_struct *work) + { + struct sk_filter *fp = container_of(work, struct sk_filter, work); + unsigned long addr = (unsigned long)fp->bpf_func & PAGE_MASK; +- struct bpf_binary_header *header = (void *)addr; + +- set_memory_rw(addr, header->pages); +- module_free(NULL, header); ++ set_memory_rw(addr, 1); ++ module_free_exec(NULL, (void *)addr); + kfree(fp); + } + +diff --git a/arch/x86/oprofile/backtrace.c b/arch/x86/oprofile/backtrace.c +index 5d04be5..2beeaa2 100644 +--- a/arch/x86/oprofile/backtrace.c ++++ b/arch/x86/oprofile/backtrace.c +@@ -46,11 +46,11 @@ dump_user_backtrace_32(struct stack_frame_ia32 *head) + struct stack_frame_ia32 *fp; + unsigned long bytes; + +- bytes = copy_from_user_nmi(bufhead, head, sizeof(bufhead)); ++ bytes = copy_from_user_nmi(bufhead, (const char __force_user *)head, sizeof(bufhead)); + if (bytes != 0) + return NULL; + +- fp = (struct stack_frame_ia32 *) compat_ptr(bufhead[0].next_frame); ++ fp = (struct stack_frame_ia32 __force_kernel *) compat_ptr(bufhead[0].next_frame); + + oprofile_add_trace(bufhead[0].return_address); + +@@ -92,7 +92,7 @@ static struct stack_frame *dump_user_backtrace(struct stack_frame *head) + struct stack_frame bufhead[2]; + unsigned long bytes; + +- bytes = copy_from_user_nmi(bufhead, head, sizeof(bufhead)); ++ bytes = copy_from_user_nmi(bufhead, (const char __force_user *)head, sizeof(bufhead)); + if (bytes != 0) + return NULL; + +@@ -111,7 +111,7 @@ x86_backtrace(struct pt_regs * const regs, unsigned int depth) + { + struct stack_frame *head = (struct stack_frame *)frame_pointer(regs); + +- if (!user_mode_vm(regs)) { ++ if (!user_mode(regs)) { + unsigned long stack = kernel_stack_pointer(regs); + if (depth) + dump_trace(NULL, regs, (unsigned long *)stack, 0, +diff --git a/arch/x86/oprofile/nmi_int.c b/arch/x86/oprofile/nmi_int.c +index 6890d84..1dad1f1 100644 +--- a/arch/x86/oprofile/nmi_int.c ++++ b/arch/x86/oprofile/nmi_int.c +@@ -23,6 +23,7 @@ + #include <asm/nmi.h> + #include <asm/msr.h> + #include <asm/apic.h> ++#include <asm/pgtable.h> + + #include "op_counter.h" + #include "op_x86_model.h" +@@ -774,8 +775,11 @@ int __init op_nmi_init(struct oprofile_operations *ops) + if (ret) + return ret; + +- if (!model->num_virt_counters) +- model->num_virt_counters = model->num_counters; ++ if (!model->num_virt_counters) { ++ pax_open_kernel(); ++ *(unsigned int *)&model->num_virt_counters = model->num_counters; ++ pax_close_kernel(); ++ } + + mux_init(ops); + +diff --git a/arch/x86/oprofile/op_model_amd.c b/arch/x86/oprofile/op_model_amd.c +index 50d86c0..7985318 100644 +--- a/arch/x86/oprofile/op_model_amd.c ++++ b/arch/x86/oprofile/op_model_amd.c +@@ -519,9 +519,11 @@ static int op_amd_init(struct oprofile_operations *ops) + num_counters = AMD64_NUM_COUNTERS; + } + +- op_amd_spec.num_counters = num_counters; +- op_amd_spec.num_controls = num_counters; +- op_amd_spec.num_virt_counters = max(num_counters, NUM_VIRT_COUNTERS); ++ pax_open_kernel(); ++ *(unsigned int *)&op_amd_spec.num_counters = num_counters; ++ *(unsigned int *)&op_amd_spec.num_controls = num_counters; ++ *(unsigned int *)&op_amd_spec.num_virt_counters = max(num_counters, NUM_VIRT_COUNTERS); ++ pax_close_kernel(); + + return 0; + } +diff --git a/arch/x86/oprofile/op_model_ppro.c b/arch/x86/oprofile/op_model_ppro.c +index d90528e..0127e2b 100644 +--- a/arch/x86/oprofile/op_model_ppro.c ++++ b/arch/x86/oprofile/op_model_ppro.c +@@ -19,6 +19,7 @@ + #include <asm/msr.h> + #include <asm/apic.h> + #include <asm/nmi.h> ++#include <asm/pgtable.h> + + #include "op_x86_model.h" + #include "op_counter.h" +@@ -221,8 +222,10 @@ static void arch_perfmon_setup_counters(void) + + num_counters = min((int)eax.split.num_counters, OP_MAX_COUNTER); + +- op_arch_perfmon_spec.num_counters = num_counters; +- op_arch_perfmon_spec.num_controls = num_counters; ++ pax_open_kernel(); ++ *(unsigned int *)&op_arch_perfmon_spec.num_counters = num_counters; ++ *(unsigned int *)&op_arch_perfmon_spec.num_controls = num_counters; ++ pax_close_kernel(); + } + + static int arch_perfmon_init(struct oprofile_operations *ignore) +diff --git a/arch/x86/oprofile/op_x86_model.h b/arch/x86/oprofile/op_x86_model.h +index 71e8a67..6a313bb 100644 +--- a/arch/x86/oprofile/op_x86_model.h ++++ b/arch/x86/oprofile/op_x86_model.h +@@ -52,7 +52,7 @@ struct op_x86_model_spec { + void (*switch_ctrl)(struct op_x86_model_spec const *model, + struct op_msrs const * const msrs); + #endif +-}; ++} __do_const; + + struct op_counter_config; + +diff --git a/arch/x86/pci/intel_mid_pci.c b/arch/x86/pci/intel_mid_pci.c +index 84b9d67..260e5ff 100644 +--- a/arch/x86/pci/intel_mid_pci.c ++++ b/arch/x86/pci/intel_mid_pci.c +@@ -245,7 +245,7 @@ int __init intel_mid_pci_init(void) + pr_info("Intel MID platform detected, using MID PCI ops\n"); + pci_mmcfg_late_init(); + pcibios_enable_irq = intel_mid_pci_irq_enable; +- pci_root_ops = intel_mid_pci_ops; ++ memcpy((void *)&pci_root_ops, &intel_mid_pci_ops, sizeof pci_root_ops); + pci_soc_mode = 1; + /* Continue with standard init */ + return 1; +diff --git a/arch/x86/pci/irq.c b/arch/x86/pci/irq.c +index 372e9b8..e775a6c 100644 +--- a/arch/x86/pci/irq.c ++++ b/arch/x86/pci/irq.c +@@ -50,7 +50,7 @@ struct irq_router { + struct irq_router_handler { + u16 vendor; + int (*probe)(struct irq_router *r, struct pci_dev *router, u16 device); +-}; ++} __do_const; + + int (*pcibios_enable_irq)(struct pci_dev *dev) = pirq_enable_irq; + void (*pcibios_disable_irq)(struct pci_dev *dev) = NULL; +@@ -794,7 +794,7 @@ static __init int pico_router_probe(struct irq_router *r, struct pci_dev *router + return 0; + } + +-static __initdata struct irq_router_handler pirq_routers[] = { ++static __initconst const struct irq_router_handler pirq_routers[] = { + { PCI_VENDOR_ID_INTEL, intel_router_probe }, + { PCI_VENDOR_ID_AL, ali_router_probe }, + { PCI_VENDOR_ID_ITE, ite_router_probe }, +@@ -821,7 +821,7 @@ static struct pci_dev *pirq_router_dev; + static void __init pirq_find_router(struct irq_router *r) + { + struct irq_routing_table *rt = pirq_table; +- struct irq_router_handler *h; ++ const struct irq_router_handler *h; + + #ifdef CONFIG_PCI_BIOS + if (!rt->signature) { +@@ -1094,7 +1094,7 @@ static int __init fix_acer_tm360_irqrouting(const struct dmi_system_id *d) + return 0; + } + +-static struct dmi_system_id __initdata pciirq_dmi_table[] = { ++static const struct dmi_system_id __initconst pciirq_dmi_table[] = { + { + .callback = fix_broken_hp_bios_irq9, + .ident = "HP Pavilion N5400 Series Laptop", +diff --git a/arch/x86/pci/pcbios.c b/arch/x86/pci/pcbios.c +index c77b24a..c979855 100644 +--- a/arch/x86/pci/pcbios.c ++++ b/arch/x86/pci/pcbios.c +@@ -79,7 +79,7 @@ union bios32 { + static struct { + unsigned long address; + unsigned short segment; +-} bios32_indirect = { 0, __KERNEL_CS }; ++} bios32_indirect __read_only = { 0, __PCIBIOS_CS }; + + /* + * Returns the entry point for the given service, NULL on error +@@ -92,37 +92,80 @@ static unsigned long bios32_service(unsigned long service) + unsigned long length; /* %ecx */ + unsigned long entry; /* %edx */ + unsigned long flags; ++ struct desc_struct d, *gdt; + + local_irq_save(flags); +- __asm__("lcall *(%%edi); cld" ++ ++ gdt = get_cpu_gdt_table(smp_processor_id()); ++ ++ pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x9B, 0xC); ++ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S); ++ pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x93, 0xC); ++ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S); ++ ++ __asm__("movw %w7, %%ds; lcall *(%%edi); push %%ss; pop %%ds; cld" + : "=a" (return_code), + "=b" (address), + "=c" (length), + "=d" (entry) + : "0" (service), + "1" (0), +- "D" (&bios32_indirect)); ++ "D" (&bios32_indirect), ++ "r"(__PCIBIOS_DS) ++ : "memory"); ++ ++ pax_open_kernel(); ++ gdt[GDT_ENTRY_PCIBIOS_CS].a = 0; ++ gdt[GDT_ENTRY_PCIBIOS_CS].b = 0; ++ gdt[GDT_ENTRY_PCIBIOS_DS].a = 0; ++ gdt[GDT_ENTRY_PCIBIOS_DS].b = 0; ++ pax_close_kernel(); ++ + local_irq_restore(flags); + + switch (return_code) { +- case 0: +- return address + entry; +- case 0x80: /* Not present */ +- printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service); +- return 0; +- default: /* Shouldn't happen */ +- printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n", +- service, return_code); ++ case 0: { ++ int cpu; ++ unsigned char flags; ++ ++ printk(KERN_INFO "bios32_service: base:%08lx length:%08lx entry:%08lx\n", address, length, entry); ++ if (address >= 0xFFFF0 || length > 0x100000 - address || length <= entry) { ++ printk(KERN_WARNING "bios32_service: not valid\n"); + return 0; ++ } ++ address = address + PAGE_OFFSET; ++ length += 16UL; /* some BIOSs underreport this... */ ++ flags = 4; ++ if (length >= 64*1024*1024) { ++ length >>= PAGE_SHIFT; ++ flags |= 8; ++ } ++ ++ for (cpu = 0; cpu < nr_cpu_ids; cpu++) { ++ gdt = get_cpu_gdt_table(cpu); ++ pack_descriptor(&d, address, length, 0x9b, flags); ++ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S); ++ pack_descriptor(&d, address, length, 0x93, flags); ++ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S); ++ } ++ return entry; ++ } ++ case 0x80: /* Not present */ ++ printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service); ++ return 0; ++ default: /* Shouldn't happen */ ++ printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n", ++ service, return_code); ++ return 0; + } + } + + static struct { + unsigned long address; + unsigned short segment; +-} pci_indirect = { 0, __KERNEL_CS }; ++} pci_indirect __read_only = { 0, __PCIBIOS_CS }; + +-static int pci_bios_present; ++static int pci_bios_present __read_only; + + static int check_pcibios(void) + { +@@ -131,11 +174,13 @@ static int check_pcibios(void) + unsigned long flags, pcibios_entry; + + if ((pcibios_entry = bios32_service(PCI_SERVICE))) { +- pci_indirect.address = pcibios_entry + PAGE_OFFSET; ++ pci_indirect.address = pcibios_entry; + + local_irq_save(flags); +- __asm__( +- "lcall *(%%edi); cld\n\t" ++ __asm__("movw %w6, %%ds\n\t" ++ "lcall *%%ss:(%%edi); cld\n\t" ++ "push %%ss\n\t" ++ "pop %%ds\n\t" + "jc 1f\n\t" + "xor %%ah, %%ah\n" + "1:" +@@ -144,7 +189,8 @@ static int check_pcibios(void) + "=b" (ebx), + "=c" (ecx) + : "1" (PCIBIOS_PCI_BIOS_PRESENT), +- "D" (&pci_indirect) ++ "D" (&pci_indirect), ++ "r" (__PCIBIOS_DS) + : "memory"); + local_irq_restore(flags); + +@@ -189,7 +235,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus, + + switch (len) { + case 1: +- __asm__("lcall *(%%esi); cld\n\t" ++ __asm__("movw %w6, %%ds\n\t" ++ "lcall *%%ss:(%%esi); cld\n\t" ++ "push %%ss\n\t" ++ "pop %%ds\n\t" + "jc 1f\n\t" + "xor %%ah, %%ah\n" + "1:" +@@ -198,7 +247,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus, + : "1" (PCIBIOS_READ_CONFIG_BYTE), + "b" (bx), + "D" ((long)reg), +- "S" (&pci_indirect)); ++ "S" (&pci_indirect), ++ "r" (__PCIBIOS_DS)); + /* + * Zero-extend the result beyond 8 bits, do not trust the + * BIOS having done it: +@@ -206,7 +256,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus, + *value &= 0xff; + break; + case 2: +- __asm__("lcall *(%%esi); cld\n\t" ++ __asm__("movw %w6, %%ds\n\t" ++ "lcall *%%ss:(%%esi); cld\n\t" ++ "push %%ss\n\t" ++ "pop %%ds\n\t" + "jc 1f\n\t" + "xor %%ah, %%ah\n" + "1:" +@@ -215,7 +268,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus, + : "1" (PCIBIOS_READ_CONFIG_WORD), + "b" (bx), + "D" ((long)reg), +- "S" (&pci_indirect)); ++ "S" (&pci_indirect), ++ "r" (__PCIBIOS_DS)); + /* + * Zero-extend the result beyond 16 bits, do not trust the + * BIOS having done it: +@@ -223,7 +277,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus, + *value &= 0xffff; + break; + case 4: +- __asm__("lcall *(%%esi); cld\n\t" ++ __asm__("movw %w6, %%ds\n\t" ++ "lcall *%%ss:(%%esi); cld\n\t" ++ "push %%ss\n\t" ++ "pop %%ds\n\t" + "jc 1f\n\t" + "xor %%ah, %%ah\n" + "1:" +@@ -232,7 +289,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus, + : "1" (PCIBIOS_READ_CONFIG_DWORD), + "b" (bx), + "D" ((long)reg), +- "S" (&pci_indirect)); ++ "S" (&pci_indirect), ++ "r" (__PCIBIOS_DS)); + break; + } + +@@ -256,7 +314,10 @@ static int pci_bios_write(unsigned int seg, unsigned int bus, + + switch (len) { + case 1: +- __asm__("lcall *(%%esi); cld\n\t" ++ __asm__("movw %w6, %%ds\n\t" ++ "lcall *%%ss:(%%esi); cld\n\t" ++ "push %%ss\n\t" ++ "pop %%ds\n\t" + "jc 1f\n\t" + "xor %%ah, %%ah\n" + "1:" +@@ -265,10 +326,14 @@ static int pci_bios_write(unsigned int seg, unsigned int bus, + "c" (value), + "b" (bx), + "D" ((long)reg), +- "S" (&pci_indirect)); ++ "S" (&pci_indirect), ++ "r" (__PCIBIOS_DS)); + break; + case 2: +- __asm__("lcall *(%%esi); cld\n\t" ++ __asm__("movw %w6, %%ds\n\t" ++ "lcall *%%ss:(%%esi); cld\n\t" ++ "push %%ss\n\t" ++ "pop %%ds\n\t" + "jc 1f\n\t" + "xor %%ah, %%ah\n" + "1:" +@@ -277,10 +342,14 @@ static int pci_bios_write(unsigned int seg, unsigned int bus, + "c" (value), + "b" (bx), + "D" ((long)reg), +- "S" (&pci_indirect)); ++ "S" (&pci_indirect), ++ "r" (__PCIBIOS_DS)); + break; + case 4: +- __asm__("lcall *(%%esi); cld\n\t" ++ __asm__("movw %w6, %%ds\n\t" ++ "lcall *%%ss:(%%esi); cld\n\t" ++ "push %%ss\n\t" ++ "pop %%ds\n\t" + "jc 1f\n\t" + "xor %%ah, %%ah\n" + "1:" +@@ -289,7 +358,8 @@ static int pci_bios_write(unsigned int seg, unsigned int bus, + "c" (value), + "b" (bx), + "D" ((long)reg), +- "S" (&pci_indirect)); ++ "S" (&pci_indirect), ++ "r" (__PCIBIOS_DS)); + break; + } + +@@ -394,10 +464,13 @@ struct irq_routing_table * pcibios_get_irq_routing_table(void) + + DBG("PCI: Fetching IRQ routing table... "); + __asm__("push %%es\n\t" ++ "movw %w8, %%ds\n\t" + "push %%ds\n\t" + "pop %%es\n\t" +- "lcall *(%%esi); cld\n\t" ++ "lcall *%%ss:(%%esi); cld\n\t" + "pop %%es\n\t" ++ "push %%ss\n\t" ++ "pop %%ds\n" + "jc 1f\n\t" + "xor %%ah, %%ah\n" + "1:" +@@ -408,7 +481,8 @@ struct irq_routing_table * pcibios_get_irq_routing_table(void) + "1" (0), + "D" ((long) &opt), + "S" (&pci_indirect), +- "m" (opt) ++ "m" (opt), ++ "r" (__PCIBIOS_DS) + : "memory"); + DBG("OK ret=%d, size=%d, map=%x\n", ret, opt.size, map); + if (ret & 0xff00) +@@ -432,7 +506,10 @@ int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq) + { + int ret; + +- __asm__("lcall *(%%esi); cld\n\t" ++ __asm__("movw %w5, %%ds\n\t" ++ "lcall *%%ss:(%%esi); cld\n\t" ++ "push %%ss\n\t" ++ "pop %%ds\n" + "jc 1f\n\t" + "xor %%ah, %%ah\n" + "1:" +@@ -440,7 +517,8 @@ int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq) + : "0" (PCIBIOS_SET_PCI_HW_INT), + "b" ((dev->bus->number << 8) | dev->devfn), + "c" ((irq << 8) | (pin + 10)), +- "S" (&pci_indirect)); ++ "S" (&pci_indirect), ++ "r" (__PCIBIOS_DS)); + return !(ret & 0xff00); + } + EXPORT_SYMBOL(pcibios_set_irq_routing); +diff --git a/arch/x86/platform/efi/efi_32.c b/arch/x86/platform/efi/efi_32.c +index 9ee3491..872192f 100644 +--- a/arch/x86/platform/efi/efi_32.c ++++ b/arch/x86/platform/efi/efi_32.c +@@ -59,11 +59,22 @@ void efi_call_phys_prelog(void) + { + struct desc_ptr gdt_descr; + ++#ifdef CONFIG_PAX_KERNEXEC ++ struct desc_struct d; ++#endif ++ + local_irq_save(efi_rt_eflags); + + load_cr3(initial_page_table); + __flush_tlb_all(); + ++#ifdef CONFIG_PAX_KERNEXEC ++ pack_descriptor(&d, 0, 0xFFFFF, 0x9B, 0xC); ++ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_CS, &d, DESCTYPE_S); ++ pack_descriptor(&d, 0, 0xFFFFF, 0x93, 0xC); ++ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_DS, &d, DESCTYPE_S); ++#endif ++ + gdt_descr.address = __pa(get_cpu_gdt_table(0)); + gdt_descr.size = GDT_SIZE - 1; + load_gdt(&gdt_descr); +@@ -73,11 +84,24 @@ void efi_call_phys_epilog(void) + { + struct desc_ptr gdt_descr; + ++#ifdef CONFIG_PAX_KERNEXEC ++ struct desc_struct d; ++ ++ memset(&d, 0, sizeof d); ++ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_CS, &d, DESCTYPE_S); ++ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_DS, &d, DESCTYPE_S); ++#endif ++ + gdt_descr.address = (unsigned long)get_cpu_gdt_table(0); + gdt_descr.size = GDT_SIZE - 1; + load_gdt(&gdt_descr); + ++#ifdef CONFIG_PAX_PER_CPU_PGD ++ load_cr3(get_cpu_pgd(smp_processor_id(), kernel)); ++#else + load_cr3(swapper_pg_dir); ++#endif ++ + __flush_tlb_all(); + + local_irq_restore(efi_rt_eflags); +diff --git a/arch/x86/platform/efi/efi_64.c b/arch/x86/platform/efi/efi_64.c +index 666b74a..673d88f 100644 +--- a/arch/x86/platform/efi/efi_64.c ++++ b/arch/x86/platform/efi/efi_64.c +@@ -97,6 +97,11 @@ void __init efi_call_phys_prelog(void) + vaddress = (unsigned long)__va(pgd * PGDIR_SIZE); + set_pgd(pgd_offset_k(pgd * PGDIR_SIZE), *pgd_offset_k(vaddress)); + } ++ ++#ifdef CONFIG_PAX_PER_CPU_PGD ++ load_cr3(swapper_pg_dir); ++#endif ++ + __flush_tlb_all(); + } + +@@ -114,6 +119,11 @@ void __init efi_call_phys_epilog(void) + for (pgd = 0; pgd < n_pgds; pgd++) + set_pgd(pgd_offset_k(pgd * PGDIR_SIZE), save_pgd[pgd]); + kfree(save_pgd); ++ ++#ifdef CONFIG_PAX_PER_CPU_PGD ++ load_cr3(get_cpu_pgd(smp_processor_id(), kernel)); ++#endif ++ + __flush_tlb_all(); + local_irq_restore(efi_flags); + early_code_mapping_set_exec(0); +diff --git a/arch/x86/platform/efi/efi_stub_32.S b/arch/x86/platform/efi/efi_stub_32.S +index fbe66e6..eae5e38 100644 +--- a/arch/x86/platform/efi/efi_stub_32.S ++++ b/arch/x86/platform/efi/efi_stub_32.S +@@ -6,7 +6,9 @@ + */ + + #include <linux/linkage.h> ++#include <linux/init.h> + #include <asm/page_types.h> ++#include <asm/segment.h> + + /* + * efi_call_phys(void *, ...) is a function with variable parameters. +@@ -20,7 +22,7 @@ + * service functions will comply with gcc calling convention, too. + */ + +-.text ++__INIT + ENTRY(efi_call_phys) + /* + * 0. The function can only be called in Linux kernel. So CS has been +@@ -36,10 +38,24 @@ ENTRY(efi_call_phys) + * The mapping of lower virtual memory has been created in prelog and + * epilog. + */ +- movl $1f, %edx +- subl $__PAGE_OFFSET, %edx +- jmp *%edx ++#ifdef CONFIG_PAX_KERNEXEC ++ movl $(__KERNEXEC_EFI_DS), %edx ++ mov %edx, %ds ++ mov %edx, %es ++ mov %edx, %ss ++ addl $2f,(1f) ++ ljmp *(1f) ++ ++__INITDATA ++1: .long __LOAD_PHYSICAL_ADDR, __KERNEXEC_EFI_CS ++.previous ++ ++2: ++ subl $2b,(1b) ++#else ++ jmp 1f-__PAGE_OFFSET + 1: ++#endif + + /* + * 2. Now on the top of stack is the return +@@ -47,14 +63,8 @@ ENTRY(efi_call_phys) + * parameter 2, ..., param n. To make things easy, we save the return + * address of efi_call_phys in a global variable. + */ +- popl %edx +- movl %edx, saved_return_addr +- /* get the function pointer into ECX*/ +- popl %ecx +- movl %ecx, efi_rt_function_ptr +- movl $2f, %edx +- subl $__PAGE_OFFSET, %edx +- pushl %edx ++ popl (saved_return_addr) ++ popl (efi_rt_function_ptr) + + /* + * 3. Clear PG bit in %CR0. +@@ -73,9 +83,8 @@ ENTRY(efi_call_phys) + /* + * 5. Call the physical function. + */ +- jmp *%ecx ++ call *(efi_rt_function_ptr-__PAGE_OFFSET) + +-2: + /* + * 6. After EFI runtime service returns, control will return to + * following instruction. We'd better readjust stack pointer first. +@@ -88,35 +97,36 @@ ENTRY(efi_call_phys) + movl %cr0, %edx + orl $0x80000000, %edx + movl %edx, %cr0 +- jmp 1f +-1: ++ + /* + * 8. Now restore the virtual mode from flat mode by + * adding EIP with PAGE_OFFSET. + */ +- movl $1f, %edx +- jmp *%edx ++#ifdef CONFIG_PAX_KERNEXEC ++ movl $(__KERNEL_DS), %edx ++ mov %edx, %ds ++ mov %edx, %es ++ mov %edx, %ss ++ ljmp $(__KERNEL_CS),$1f ++#else ++ jmp 1f+__PAGE_OFFSET ++#endif + 1: + + /* + * 9. Balance the stack. And because EAX contain the return value, + * we'd better not clobber it. + */ +- leal efi_rt_function_ptr, %edx +- movl (%edx), %ecx +- pushl %ecx ++ pushl (efi_rt_function_ptr) + + /* +- * 10. Push the saved return address onto the stack and return. ++ * 10. Return to the saved return address. + */ +- leal saved_return_addr, %edx +- movl (%edx), %ecx +- pushl %ecx +- ret ++ jmpl *(saved_return_addr) + ENDPROC(efi_call_phys) + .previous + +-.data ++__INITDATA + saved_return_addr: + .long 0 + efi_rt_function_ptr: +diff --git a/arch/x86/platform/efi/efi_stub_64.S b/arch/x86/platform/efi/efi_stub_64.S +index 88073b1..1cc2f53 100644 +--- a/arch/x86/platform/efi/efi_stub_64.S ++++ b/arch/x86/platform/efi/efi_stub_64.S +@@ -7,6 +7,7 @@ + */ + + #include <linux/linkage.h> ++#include <asm/alternative-asm.h> + + #define SAVE_XMM \ + mov %rsp, %rax; \ +@@ -77,6 +78,7 @@ ENTRY(efi_call0) + RESTORE_PGT + addq $32, %rsp + RESTORE_XMM ++ pax_force_retaddr 0, 1 + ret + ENDPROC(efi_call0) + +@@ -89,6 +91,7 @@ ENTRY(efi_call1) + RESTORE_PGT + addq $32, %rsp + RESTORE_XMM ++ pax_force_retaddr 0, 1 + ret + ENDPROC(efi_call1) + +@@ -101,6 +104,7 @@ ENTRY(efi_call2) + RESTORE_PGT + addq $32, %rsp + RESTORE_XMM ++ pax_force_retaddr 0, 1 + ret + ENDPROC(efi_call2) + +@@ -114,6 +118,7 @@ ENTRY(efi_call3) + RESTORE_PGT + addq $32, %rsp + RESTORE_XMM ++ pax_force_retaddr 0, 1 + ret + ENDPROC(efi_call3) + +@@ -128,6 +133,7 @@ ENTRY(efi_call4) + RESTORE_PGT + addq $32, %rsp + RESTORE_XMM ++ pax_force_retaddr 0, 1 + ret + ENDPROC(efi_call4) + +@@ -143,6 +149,7 @@ ENTRY(efi_call5) + RESTORE_PGT + addq $48, %rsp + RESTORE_XMM ++ pax_force_retaddr 0, 1 + ret + ENDPROC(efi_call5) + +@@ -161,6 +168,7 @@ ENTRY(efi_call6) + RESTORE_PGT + addq $48, %rsp + RESTORE_XMM ++ pax_force_retaddr 0, 1 + ret + ENDPROC(efi_call6) + +diff --git a/arch/x86/platform/intel-mid/intel-mid.c b/arch/x86/platform/intel-mid/intel-mid.c +index 1bbedc4..eb795b5 100644 +--- a/arch/x86/platform/intel-mid/intel-mid.c ++++ b/arch/x86/platform/intel-mid/intel-mid.c +@@ -71,9 +71,10 @@ static void intel_mid_power_off(void) + { + }; + +-static void intel_mid_reboot(void) ++static void __noreturn intel_mid_reboot(void) + { + intel_scu_ipc_simple_command(IPCMSG_COLD_BOOT, 0); ++ BUG(); + } + + static unsigned long __init intel_mid_calibrate_tsc(void) +diff --git a/arch/x86/platform/olpc/olpc_dt.c b/arch/x86/platform/olpc/olpc_dt.c +index d6ee929..3637cb5 100644 +--- a/arch/x86/platform/olpc/olpc_dt.c ++++ b/arch/x86/platform/olpc/olpc_dt.c +@@ -156,7 +156,7 @@ void * __init prom_early_alloc(unsigned long size) + return res; + } + +-static struct of_pdt_ops prom_olpc_ops __initdata = { ++static struct of_pdt_ops prom_olpc_ops __initconst = { + .nextprop = olpc_dt_nextprop, + .getproplen = olpc_dt_getproplen, + .getproperty = olpc_dt_getproperty, +diff --git a/arch/x86/power/cpu.c b/arch/x86/power/cpu.c +index 424f4c9..f2a2988 100644 +--- a/arch/x86/power/cpu.c ++++ b/arch/x86/power/cpu.c +@@ -137,11 +137,8 @@ static void do_fpu_end(void) + static void fix_processor_context(void) + { + int cpu = smp_processor_id(); +- struct tss_struct *t = &per_cpu(init_tss, cpu); +-#ifdef CONFIG_X86_64 +- struct desc_struct *desc = get_cpu_gdt_table(cpu); +- tss_desc tss; +-#endif ++ struct tss_struct *t = init_tss + cpu; ++ + set_tss_desc(cpu, t); /* + * This just modifies memory; should not be + * necessary. But... This is necessary, because +@@ -150,10 +147,6 @@ static void fix_processor_context(void) + */ + + #ifdef CONFIG_X86_64 +- memcpy(&tss, &desc[GDT_ENTRY_TSS], sizeof(tss_desc)); +- tss.type = 0x9; /* The available 64-bit TSS (see AMD vol 2, pg 91 */ +- write_gdt_entry(desc, GDT_ENTRY_TSS, &tss, DESC_TSS); +- + syscall_init(); /* This sets MSR_*STAR and related */ + #endif + load_TR_desc(); /* This does ltr */ +diff --git a/arch/x86/realmode/init.c b/arch/x86/realmode/init.c +index bad628a..a102610 100644 +--- a/arch/x86/realmode/init.c ++++ b/arch/x86/realmode/init.c +@@ -68,7 +68,13 @@ void __init setup_real_mode(void) + __va(real_mode_header->trampoline_header); + + #ifdef CONFIG_X86_32 +- trampoline_header->start = __pa_symbol(startup_32_smp); ++ trampoline_header->start = __pa_symbol(ktla_ktva(startup_32_smp)); ++ ++#ifdef CONFIG_PAX_KERNEXEC ++ trampoline_header->start -= LOAD_PHYSICAL_ADDR; ++#endif ++ ++ trampoline_header->boot_cs = __BOOT_CS; + trampoline_header->gdt_limit = __BOOT_DS + 7; + trampoline_header->gdt_base = __pa_symbol(boot_gdt); + #else +@@ -84,7 +90,7 @@ void __init setup_real_mode(void) + *trampoline_cr4_features = read_cr4(); + + trampoline_pgd = (u64 *) __va(real_mode_header->trampoline_pgd); +- trampoline_pgd[0] = init_level4_pgt[pgd_index(__PAGE_OFFSET)].pgd; ++ trampoline_pgd[0] = init_level4_pgt[pgd_index(__PAGE_OFFSET)].pgd & ~_PAGE_NX; + trampoline_pgd[511] = init_level4_pgt[511].pgd; + #endif + } +diff --git a/arch/x86/realmode/rm/Makefile b/arch/x86/realmode/rm/Makefile +index 3497f14..cc73b92 100644 +--- a/arch/x86/realmode/rm/Makefile ++++ b/arch/x86/realmode/rm/Makefile +@@ -66,5 +66,8 @@ $(obj)/realmode.relocs: $(obj)/realmode.elf FORCE + + KBUILD_CFLAGS := $(LINUXINCLUDE) $(REALMODE_CFLAGS) -D_SETUP -D_WAKEUP \ + -I$(srctree)/arch/x86/boot ++ifdef CONSTIFY_PLUGIN ++KBUILD_CFLAGS += -fplugin-arg-constify_plugin-no-constify ++endif + KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__ + GCOV_PROFILE := n +diff --git a/arch/x86/realmode/rm/header.S b/arch/x86/realmode/rm/header.S +index a28221d..93c40f1 100644 +--- a/arch/x86/realmode/rm/header.S ++++ b/arch/x86/realmode/rm/header.S +@@ -30,7 +30,9 @@ GLOBAL(real_mode_header) + #endif + /* APM/BIOS reboot */ + .long pa_machine_real_restart_asm +-#ifdef CONFIG_X86_64 ++#ifdef CONFIG_X86_32 ++ .long __KERNEL_CS ++#else + .long __KERNEL32_CS + #endif + END(real_mode_header) +diff --git a/arch/x86/realmode/rm/trampoline_32.S b/arch/x86/realmode/rm/trampoline_32.S +index 48ddd76..c26749f 100644 +--- a/arch/x86/realmode/rm/trampoline_32.S ++++ b/arch/x86/realmode/rm/trampoline_32.S +@@ -24,6 +24,12 @@ + #include <asm/page_types.h> + #include "realmode.h" + ++#ifdef CONFIG_PAX_KERNEXEC ++#define ta(X) (X) ++#else ++#define ta(X) (pa_ ## X) ++#endif ++ + .text + .code16 + +@@ -38,8 +44,6 @@ ENTRY(trampoline_start) + + cli # We should be safe anyway + +- movl tr_start, %eax # where we need to go +- + movl $0xA5A5A5A5, trampoline_status + # write marker for master knows we're running + +@@ -55,7 +59,7 @@ ENTRY(trampoline_start) + movw $1, %dx # protected mode (PE) bit + lmsw %dx # into protected mode + +- ljmpl $__BOOT_CS, $pa_startup_32 ++ ljmpl *(trampoline_header) + + .section ".text32","ax" + .code32 +@@ -66,7 +70,7 @@ ENTRY(startup_32) # note: also used from wakeup_asm.S + .balign 8 + GLOBAL(trampoline_header) + tr_start: .space 4 +- tr_gdt_pad: .space 2 ++ tr_boot_cs: .space 2 + tr_gdt: .space 6 + END(trampoline_header) + +diff --git a/arch/x86/realmode/rm/trampoline_64.S b/arch/x86/realmode/rm/trampoline_64.S +index dac7b20..72dbaca 100644 +--- a/arch/x86/realmode/rm/trampoline_64.S ++++ b/arch/x86/realmode/rm/trampoline_64.S +@@ -93,6 +93,7 @@ ENTRY(startup_32) + movl %edx, %gs + + movl pa_tr_cr4, %eax ++ andl $~X86_CR4_PCIDE, %eax + movl %eax, %cr4 # Enable PAE mode + + # Setup trampoline 4 level pagetables +@@ -106,7 +107,7 @@ ENTRY(startup_32) + wrmsr + + # Enable paging and in turn activate Long Mode +- movl $(X86_CR0_PG | X86_CR0_WP | X86_CR0_PE), %eax ++ movl $(X86_CR0_PG | X86_CR0_PE), %eax + movl %eax, %cr0 + + /* +diff --git a/arch/x86/realmode/rm/wakeup_asm.S b/arch/x86/realmode/rm/wakeup_asm.S +index 9e7e147..25a4158 100644 +--- a/arch/x86/realmode/rm/wakeup_asm.S ++++ b/arch/x86/realmode/rm/wakeup_asm.S +@@ -126,11 +126,10 @@ ENTRY(wakeup_start) + lgdtl pmode_gdt + + /* This really couldn't... */ +- movl pmode_entry, %eax + movl pmode_cr0, %ecx + movl %ecx, %cr0 +- ljmpl $__KERNEL_CS, $pa_startup_32 +- /* -> jmp *%eax in trampoline_32.S */ ++ ++ ljmpl *pmode_entry + #else + jmp trampoline_start + #endif +diff --git a/arch/x86/tools/Makefile b/arch/x86/tools/Makefile +index e812034..c747134 100644 +--- a/arch/x86/tools/Makefile ++++ b/arch/x86/tools/Makefile +@@ -37,7 +37,7 @@ $(obj)/test_get_len.o: $(srctree)/arch/x86/lib/insn.c $(srctree)/arch/x86/lib/in + + $(obj)/insn_sanity.o: $(srctree)/arch/x86/lib/insn.c $(srctree)/arch/x86/lib/inat.c $(srctree)/arch/x86/include/asm/inat_types.h $(srctree)/arch/x86/include/asm/inat.h $(srctree)/arch/x86/include/asm/insn.h $(objtree)/arch/x86/lib/inat-tables.c + +-HOST_EXTRACFLAGS += -I$(srctree)/tools/include ++HOST_EXTRACFLAGS += -I$(srctree)/tools/include -ggdb + hostprogs-y += relocs + relocs-objs := relocs_32.o relocs_64.o relocs_common.o + relocs: $(obj)/relocs +diff --git a/arch/x86/tools/relocs.c b/arch/x86/tools/relocs.c +index cfbdbdb..1aa763c 100644 +--- a/arch/x86/tools/relocs.c ++++ b/arch/x86/tools/relocs.c +@@ -1,5 +1,7 @@ + /* This is included from relocs_32/64.c */ + ++#include "../../../include/generated/autoconf.h" ++ + #define ElfW(type) _ElfW(ELF_BITS, type) + #define _ElfW(bits, type) __ElfW(bits, type) + #define __ElfW(bits, type) Elf##bits##_##type +@@ -11,6 +13,7 @@ + #define Elf_Sym ElfW(Sym) + + static Elf_Ehdr ehdr; ++static Elf_Phdr *phdr; + + struct relocs { + uint32_t *offset; +@@ -383,9 +386,39 @@ static void read_ehdr(FILE *fp) + } + } + ++static void read_phdrs(FILE *fp) ++{ ++ unsigned int i; ++ ++ phdr = calloc(ehdr.e_phnum, sizeof(Elf_Phdr)); ++ if (!phdr) { ++ die("Unable to allocate %d program headers\n", ++ ehdr.e_phnum); ++ } ++ if (fseek(fp, ehdr.e_phoff, SEEK_SET) < 0) { ++ die("Seek to %d failed: %s\n", ++ ehdr.e_phoff, strerror(errno)); ++ } ++ if (fread(phdr, sizeof(*phdr), ehdr.e_phnum, fp) != ehdr.e_phnum) { ++ die("Cannot read ELF program headers: %s\n", ++ strerror(errno)); ++ } ++ for(i = 0; i < ehdr.e_phnum; i++) { ++ phdr[i].p_type = elf_word_to_cpu(phdr[i].p_type); ++ phdr[i].p_offset = elf_off_to_cpu(phdr[i].p_offset); ++ phdr[i].p_vaddr = elf_addr_to_cpu(phdr[i].p_vaddr); ++ phdr[i].p_paddr = elf_addr_to_cpu(phdr[i].p_paddr); ++ phdr[i].p_filesz = elf_word_to_cpu(phdr[i].p_filesz); ++ phdr[i].p_memsz = elf_word_to_cpu(phdr[i].p_memsz); ++ phdr[i].p_flags = elf_word_to_cpu(phdr[i].p_flags); ++ phdr[i].p_align = elf_word_to_cpu(phdr[i].p_align); ++ } ++ ++} ++ + static void read_shdrs(FILE *fp) + { +- int i; ++ unsigned int i; + Elf_Shdr shdr; + + secs = calloc(ehdr.e_shnum, sizeof(struct section)); +@@ -420,7 +453,7 @@ static void read_shdrs(FILE *fp) + + static void read_strtabs(FILE *fp) + { +- int i; ++ unsigned int i; + for (i = 0; i < ehdr.e_shnum; i++) { + struct section *sec = &secs[i]; + if (sec->shdr.sh_type != SHT_STRTAB) { +@@ -445,7 +478,7 @@ static void read_strtabs(FILE *fp) + + static void read_symtabs(FILE *fp) + { +- int i,j; ++ unsigned int i,j; + for (i = 0; i < ehdr.e_shnum; i++) { + struct section *sec = &secs[i]; + if (sec->shdr.sh_type != SHT_SYMTAB) { +@@ -476,9 +509,11 @@ static void read_symtabs(FILE *fp) + } + + +-static void read_relocs(FILE *fp) ++static void read_relocs(FILE *fp, int use_real_mode) + { +- int i,j; ++ unsigned int i,j; ++ uint32_t base; ++ + for (i = 0; i < ehdr.e_shnum; i++) { + struct section *sec = &secs[i]; + if (sec->shdr.sh_type != SHT_REL_TYPE) { +@@ -498,9 +533,22 @@ static void read_relocs(FILE *fp) + die("Cannot read symbol table: %s\n", + strerror(errno)); + } ++ base = 0; ++ ++#ifdef CONFIG_X86_32 ++ for (j = 0; !use_real_mode && j < ehdr.e_phnum; j++) { ++ if (phdr[j].p_type != PT_LOAD ) ++ continue; ++ if (secs[sec->shdr.sh_info].shdr.sh_offset < phdr[j].p_offset || secs[sec->shdr.sh_info].shdr.sh_offset >= phdr[j].p_offset + phdr[j].p_filesz) ++ continue; ++ base = CONFIG_PAGE_OFFSET + phdr[j].p_paddr - phdr[j].p_vaddr; ++ break; ++ } ++#endif ++ + for (j = 0; j < sec->shdr.sh_size/sizeof(Elf_Rel); j++) { + Elf_Rel *rel = &sec->reltab[j]; +- rel->r_offset = elf_addr_to_cpu(rel->r_offset); ++ rel->r_offset = elf_addr_to_cpu(rel->r_offset) + base; + rel->r_info = elf_xword_to_cpu(rel->r_info); + #if (SHT_REL_TYPE == SHT_RELA) + rel->r_addend = elf_xword_to_cpu(rel->r_addend); +@@ -512,7 +560,7 @@ static void read_relocs(FILE *fp) + + static void print_absolute_symbols(void) + { +- int i; ++ unsigned int i; + const char *format; + + if (ELF_BITS == 64) +@@ -525,7 +573,7 @@ static void print_absolute_symbols(void) + for (i = 0; i < ehdr.e_shnum; i++) { + struct section *sec = &secs[i]; + char *sym_strtab; +- int j; ++ unsigned int j; + + if (sec->shdr.sh_type != SHT_SYMTAB) { + continue; +@@ -552,7 +600,7 @@ static void print_absolute_symbols(void) + + static void print_absolute_relocs(void) + { +- int i, printed = 0; ++ unsigned int i, printed = 0; + const char *format; + + if (ELF_BITS == 64) +@@ -565,7 +613,7 @@ static void print_absolute_relocs(void) + struct section *sec_applies, *sec_symtab; + char *sym_strtab; + Elf_Sym *sh_symtab; +- int j; ++ unsigned int j; + if (sec->shdr.sh_type != SHT_REL_TYPE) { + continue; + } +@@ -642,13 +690,13 @@ static void add_reloc(struct relocs *r, uint32_t offset) + static void walk_relocs(int (*process)(struct section *sec, Elf_Rel *rel, + Elf_Sym *sym, const char *symname)) + { +- int i; ++ unsigned int i; + /* Walk through the relocations */ + for (i = 0; i < ehdr.e_shnum; i++) { + char *sym_strtab; + Elf_Sym *sh_symtab; + struct section *sec_applies, *sec_symtab; +- int j; ++ unsigned int j; + struct section *sec = &secs[i]; + + if (sec->shdr.sh_type != SHT_REL_TYPE) { +@@ -822,6 +870,23 @@ static int do_reloc32(struct section *sec, Elf_Rel *rel, Elf_Sym *sym, + { + unsigned r_type = ELF32_R_TYPE(rel->r_info); + int shn_abs = (sym->st_shndx == SHN_ABS) && !is_reloc(S_REL, symname); ++ char *sym_strtab = sec->link->link->strtab; ++ ++ /* Don't relocate actual per-cpu variables, they are absolute indices, not addresses */ ++ if (!strcmp(sec_name(sym->st_shndx), ".data..percpu") && strcmp(sym_name(sym_strtab, sym), "__per_cpu_load")) ++ return 0; ++ ++#ifdef CONFIG_PAX_KERNEXEC ++ /* Don't relocate actual code, they are relocated implicitly by the base address of KERNEL_CS */ ++ if (!strcmp(sec_name(sym->st_shndx), ".text.end") && !strcmp(sym_name(sym_strtab, sym), "_etext")) ++ return 0; ++ if (!strcmp(sec_name(sym->st_shndx), ".init.text")) ++ return 0; ++ if (!strcmp(sec_name(sym->st_shndx), ".exit.text")) ++ return 0; ++ if (!strcmp(sec_name(sym->st_shndx), ".text") && strcmp(sym_name(sym_strtab, sym), "__LOAD_PHYSICAL_ADDR")) ++ return 0; ++#endif + + switch (r_type) { + case R_386_NONE: +@@ -960,7 +1025,7 @@ static int write32_as_text(uint32_t v, FILE *f) + + static void emit_relocs(int as_text, int use_real_mode) + { +- int i; ++ unsigned int i; + int (*write_reloc)(uint32_t, FILE *) = write32; + int (*do_reloc)(struct section *sec, Elf_Rel *rel, Elf_Sym *sym, + const char *symname); +@@ -1060,10 +1125,11 @@ void process(FILE *fp, int use_real_mode, int as_text, + { + regex_init(use_real_mode); + read_ehdr(fp); ++ read_phdrs(fp); + read_shdrs(fp); + read_strtabs(fp); + read_symtabs(fp); +- read_relocs(fp); ++ read_relocs(fp, use_real_mode); + if (ELF_BITS == 64) + percpu_init(); + if (show_absolute_syms) { +diff --git a/arch/x86/um/tls_32.c b/arch/x86/um/tls_32.c +index 80ffa5b..a33bd15 100644 +--- a/arch/x86/um/tls_32.c ++++ b/arch/x86/um/tls_32.c +@@ -260,7 +260,7 @@ out: + if (unlikely(task == current && + !t->arch.tls_array[idx - GDT_ENTRY_TLS_MIN].flushed)) { + printk(KERN_ERR "get_tls_entry: task with pid %d got here " +- "without flushed TLS.", current->pid); ++ "without flushed TLS.", task_pid_nr(current)); + } + + return 0; +diff --git a/arch/x86/vdso/Makefile b/arch/x86/vdso/Makefile +index fd14be1..e3c79c0 100644 +--- a/arch/x86/vdso/Makefile ++++ b/arch/x86/vdso/Makefile +@@ -181,7 +181,7 @@ quiet_cmd_vdso = VDSO $@ + -Wl,-T,$(filter %.lds,$^) $(filter %.o,$^) && \ + sh $(srctree)/$(src)/checkundef.sh '$(NM)' '$@' + +-VDSO_LDFLAGS = -fPIC -shared $(call cc-ldoption, -Wl$(comma)--hash-style=sysv) ++VDSO_LDFLAGS = -fPIC -shared -Wl,--no-undefined $(call cc-ldoption, -Wl$(comma)--hash-style=sysv) + GCOV_PROFILE := n + + # +diff --git a/arch/x86/vdso/vdso32-setup.c b/arch/x86/vdso/vdso32-setup.c +index d6bfb87..876ee18 100644 +--- a/arch/x86/vdso/vdso32-setup.c ++++ b/arch/x86/vdso/vdso32-setup.c +@@ -25,6 +25,7 @@ + #include <asm/tlbflush.h> + #include <asm/vdso.h> + #include <asm/proto.h> ++#include <asm/mman.h> + + enum { + VDSO_DISABLED = 0, +@@ -226,7 +227,7 @@ static inline void map_compat_vdso(int map) + void enable_sep_cpu(void) + { + int cpu = get_cpu(); +- struct tss_struct *tss = &per_cpu(init_tss, cpu); ++ struct tss_struct *tss = init_tss + cpu; + + if (!boot_cpu_has(X86_FEATURE_SEP)) { + put_cpu(); +@@ -249,7 +250,7 @@ static int __init gate_vma_init(void) + gate_vma.vm_start = FIXADDR_USER_START; + gate_vma.vm_end = FIXADDR_USER_END; + gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC; +- gate_vma.vm_page_prot = __P101; ++ gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags); + + return 0; + } +@@ -330,14 +331,14 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) + if (compat) + addr = VDSO_HIGH_BASE; + else { +- addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0); ++ addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, MAP_EXECUTABLE); + if (IS_ERR_VALUE(addr)) { + ret = addr; + goto up_fail; + } + } + +- current->mm->context.vdso = (void *)addr; ++ current->mm->context.vdso = addr; + + if (compat_uses_vma || !compat) { + /* +@@ -353,11 +354,11 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) + } + + current_thread_info()->sysenter_return = +- VDSO32_SYMBOL(addr, SYSENTER_RETURN); ++ (__force void __user *)VDSO32_SYMBOL(addr, SYSENTER_RETURN); + + up_fail: + if (ret) +- current->mm->context.vdso = NULL; ++ current->mm->context.vdso = 0; + + up_write(&mm->mmap_sem); + +@@ -404,8 +405,14 @@ __initcall(ia32_binfmt_init); + + const char *arch_vma_name(struct vm_area_struct *vma) + { +- if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso) ++ if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso) + return "[vdso]"; ++ ++#ifdef CONFIG_PAX_SEGMEXEC ++ if (vma->vm_mm && vma->vm_mirror && vma->vm_mirror->vm_start == vma->vm_mm->context.vdso) ++ return "[vdso]"; ++#endif ++ + return NULL; + } + +@@ -415,7 +422,7 @@ struct vm_area_struct *get_gate_vma(struct mm_struct *mm) + * Check to see if the corresponding task was created in compat vdso + * mode. + */ +- if (mm && mm->context.vdso == (void *)VDSO_HIGH_BASE) ++ if (mm && mm->context.vdso == VDSO_HIGH_BASE) + return &gate_vma; + return NULL; + } +diff --git a/arch/x86/vdso/vma.c b/arch/x86/vdso/vma.c +index 431e875..cbb23f3 100644 +--- a/arch/x86/vdso/vma.c ++++ b/arch/x86/vdso/vma.c +@@ -16,8 +16,6 @@ + #include <asm/vdso.h> + #include <asm/page.h> + +-unsigned int __read_mostly vdso_enabled = 1; +- + extern char vdso_start[], vdso_end[]; + extern unsigned short vdso_sync_cpuid; + +@@ -141,7 +139,6 @@ static unsigned long vdso_addr(unsigned long start, unsigned len) + * unaligned here as a result of stack start randomization. + */ + addr = PAGE_ALIGN(addr); +- addr = align_vdso_addr(addr); + + return addr; + } +@@ -154,30 +151,31 @@ static int setup_additional_pages(struct linux_binprm *bprm, + unsigned size) + { + struct mm_struct *mm = current->mm; +- unsigned long addr; ++ unsigned long addr = 0; + int ret; + +- if (!vdso_enabled) +- return 0; +- + down_write(&mm->mmap_sem); ++ ++#ifdef CONFIG_PAX_RANDMMAP ++ if (!(mm->pax_flags & MF_PAX_RANDMMAP)) ++#endif ++ + addr = vdso_addr(mm->start_stack, size); ++ addr = align_vdso_addr(addr); + addr = get_unmapped_area(NULL, addr, size, 0, 0); + if (IS_ERR_VALUE(addr)) { + ret = addr; + goto up_fail; + } + +- current->mm->context.vdso = (void *)addr; ++ mm->context.vdso = addr; + + ret = install_special_mapping(mm, addr, size, + VM_READ|VM_EXEC| + VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC, + pages); +- if (ret) { +- current->mm->context.vdso = NULL; +- goto up_fail; +- } ++ if (ret) ++ mm->context.vdso = 0; + + up_fail: + up_write(&mm->mmap_sem); +@@ -197,10 +195,3 @@ int x32_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) + vdsox32_size); + } + #endif +- +-static __init int vdso_setup(char *s) +-{ +- vdso_enabled = simple_strtoul(s, NULL, 0); +- return 0; +-} +-__setup("vdso=", vdso_setup); +diff --git a/arch/x86/xen/Kconfig b/arch/x86/xen/Kconfig +index 01b9026..1e476df 100644 +--- a/arch/x86/xen/Kconfig ++++ b/arch/x86/xen/Kconfig +@@ -9,6 +9,7 @@ config XEN + select XEN_HAVE_PVMMU + depends on X86_64 || (X86_32 && X86_PAE && !X86_VISWS) + depends on X86_TSC ++ depends on !GRKERNSEC_CONFIG_AUTO || GRKERNSEC_CONFIG_VIRT_XEN + help + This is the Linux Xen port. Enabling this will allow the + kernel to boot in a paravirtualized environment under the +diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c +index 201d09a..e4723e5 100644 +--- a/arch/x86/xen/enlighten.c ++++ b/arch/x86/xen/enlighten.c +@@ -123,8 +123,6 @@ EXPORT_SYMBOL_GPL(xen_start_info); + + struct shared_info xen_dummy_shared_info; + +-void *xen_initial_gdt; +- + RESERVE_BRK(shared_info_page_brk, PAGE_SIZE); + __read_mostly int xen_have_vector_callback; + EXPORT_SYMBOL_GPL(xen_have_vector_callback); +@@ -542,8 +540,7 @@ static void xen_load_gdt(const struct desc_ptr *dtr) + { + unsigned long va = dtr->address; + unsigned int size = dtr->size + 1; +- unsigned pages = (size + PAGE_SIZE - 1) / PAGE_SIZE; +- unsigned long frames[pages]; ++ unsigned long frames[65536 / PAGE_SIZE]; + int f; + + /* +@@ -591,8 +588,7 @@ static void __init xen_load_gdt_boot(const struct desc_ptr *dtr) + { + unsigned long va = dtr->address; + unsigned int size = dtr->size + 1; +- unsigned pages = (size + PAGE_SIZE - 1) / PAGE_SIZE; +- unsigned long frames[pages]; ++ unsigned long frames[(GDT_SIZE + PAGE_SIZE - 1) / PAGE_SIZE]; + int f; + + /* +@@ -600,7 +596,7 @@ static void __init xen_load_gdt_boot(const struct desc_ptr *dtr) + * 8-byte entries, or 16 4k pages.. + */ + +- BUG_ON(size > 65536); ++ BUG_ON(size > GDT_SIZE); + BUG_ON(va & ~PAGE_MASK); + + for (f = 0; va < dtr->address + size; va += PAGE_SIZE, f++) { +@@ -989,7 +985,7 @@ static u32 xen_safe_apic_wait_icr_idle(void) + return 0; + } + +-static void set_xen_basic_apic_ops(void) ++static void __init set_xen_basic_apic_ops(void) + { + apic->read = xen_apic_read; + apic->write = xen_apic_write; +@@ -1295,30 +1291,30 @@ static const struct pv_apic_ops xen_apic_ops __initconst = { + #endif + }; + +-static void xen_reboot(int reason) ++static __noreturn void xen_reboot(int reason) + { + struct sched_shutdown r = { .reason = reason }; + +- if (HYPERVISOR_sched_op(SCHEDOP_shutdown, &r)) +- BUG(); ++ HYPERVISOR_sched_op(SCHEDOP_shutdown, &r); ++ BUG(); + } + +-static void xen_restart(char *msg) ++static __noreturn void xen_restart(char *msg) + { + xen_reboot(SHUTDOWN_reboot); + } + +-static void xen_emergency_restart(void) ++static __noreturn void xen_emergency_restart(void) + { + xen_reboot(SHUTDOWN_reboot); + } + +-static void xen_machine_halt(void) ++static __noreturn void xen_machine_halt(void) + { + xen_reboot(SHUTDOWN_poweroff); + } + +-static void xen_machine_power_off(void) ++static __noreturn void xen_machine_power_off(void) + { + if (pm_power_off) + pm_power_off(); +@@ -1564,7 +1560,17 @@ asmlinkage void __init xen_start_kernel(void) + __userpte_alloc_gfp &= ~__GFP_HIGHMEM; + + /* Work out if we support NX */ +- x86_configure_nx(); ++#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE) ++ if ((cpuid_eax(0x80000000) & 0xffff0000) == 0x80000000 && ++ (cpuid_edx(0x80000001) & (1U << (X86_FEATURE_NX & 31)))) { ++ unsigned l, h; ++ ++ __supported_pte_mask |= _PAGE_NX; ++ rdmsr(MSR_EFER, l, h); ++ l |= EFER_NX; ++ wrmsr(MSR_EFER, l, h); ++ } ++#endif + + /* Get mfn list */ + xen_build_dynamic_phys_to_machine(); +@@ -1592,13 +1598,6 @@ asmlinkage void __init xen_start_kernel(void) + + machine_ops = xen_machine_ops; + +- /* +- * The only reliable way to retain the initial address of the +- * percpu gdt_page is to remember it here, so we can go and +- * mark it RW later, when the initial percpu area is freed. +- */ +- xen_initial_gdt = &per_cpu(gdt_page, 0); +- + xen_smp_init(); + + #ifdef CONFIG_ACPI_NUMA +diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c +index 2423ef0..4f6fb5b 100644 +--- a/arch/x86/xen/mmu.c ++++ b/arch/x86/xen/mmu.c +@@ -379,7 +379,7 @@ static pteval_t pte_mfn_to_pfn(pteval_t val) + return val; + } + +-static pteval_t pte_pfn_to_mfn(pteval_t val) ++static pteval_t __intentional_overflow(-1) pte_pfn_to_mfn(pteval_t val) + { + if (val & _PAGE_PRESENT) { + unsigned long pfn = (val & PTE_PFN_MASK) >> PAGE_SHIFT; +@@ -1904,6 +1904,9 @@ void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn) + /* L3_k[510] -> level2_kernel_pgt + * L3_i[511] -> level2_fixmap_pgt */ + convert_pfn_mfn(level3_kernel_pgt); ++ convert_pfn_mfn(level3_vmalloc_start_pgt); ++ convert_pfn_mfn(level3_vmalloc_end_pgt); ++ convert_pfn_mfn(level3_vmemmap_pgt); + } + /* We get [511][511] and have Xen's version of level2_kernel_pgt */ + l3 = m2v(pgd[pgd_index(__START_KERNEL_map)].pgd); +@@ -1933,8 +1936,12 @@ void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn) + set_page_prot(init_level4_pgt, PAGE_KERNEL_RO); + set_page_prot(level3_ident_pgt, PAGE_KERNEL_RO); + set_page_prot(level3_kernel_pgt, PAGE_KERNEL_RO); ++ set_page_prot(level3_vmalloc_start_pgt, PAGE_KERNEL_RO); ++ set_page_prot(level3_vmalloc_end_pgt, PAGE_KERNEL_RO); ++ set_page_prot(level3_vmemmap_pgt, PAGE_KERNEL_RO); + set_page_prot(level3_user_vsyscall, PAGE_KERNEL_RO); + set_page_prot(level2_ident_pgt, PAGE_KERNEL_RO); ++ set_page_prot(level2_vmemmap_pgt, PAGE_KERNEL_RO); + set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO); + set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO); + +@@ -2123,6 +2130,7 @@ static void __init xen_post_allocator_init(void) + pv_mmu_ops.set_pud = xen_set_pud; + #if PAGETABLE_LEVELS == 4 + pv_mmu_ops.set_pgd = xen_set_pgd; ++ pv_mmu_ops.set_pgd_batched = xen_set_pgd; + #endif + + /* This will work as long as patching hasn't happened yet +@@ -2201,6 +2209,7 @@ static const struct pv_mmu_ops xen_mmu_ops __initconst = { + .pud_val = PV_CALLEE_SAVE(xen_pud_val), + .make_pud = PV_CALLEE_SAVE(xen_make_pud), + .set_pgd = xen_set_pgd_hyper, ++ .set_pgd_batched = xen_set_pgd_hyper, + + .alloc_pud = xen_alloc_pmd_init, + .release_pud = xen_release_pmd_init, +diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c +index a18eadd..2e2f10e 100644 +--- a/arch/x86/xen/smp.c ++++ b/arch/x86/xen/smp.c +@@ -283,17 +283,13 @@ static void __init xen_smp_prepare_boot_cpu(void) + + if (xen_pv_domain()) { + if (!xen_feature(XENFEAT_writable_page_tables)) +- /* We've switched to the "real" per-cpu gdt, so make +- * sure the old memory can be recycled. */ +- make_lowmem_page_readwrite(xen_initial_gdt); +- + #ifdef CONFIG_X86_32 + /* + * Xen starts us with XEN_FLAT_RING1_DS, but linux code + * expects __USER_DS + */ +- loadsegment(ds, __USER_DS); +- loadsegment(es, __USER_DS); ++ loadsegment(ds, __KERNEL_DS); ++ loadsegment(es, __KERNEL_DS); + #endif + + xen_filter_cpu_maps(); +@@ -372,7 +368,7 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle) + #ifdef CONFIG_X86_32 + /* Note: PVH is not yet supported on x86_32. */ + ctxt->user_regs.fs = __KERNEL_PERCPU; +- ctxt->user_regs.gs = __KERNEL_STACK_CANARY; ++ savesegment(gs, ctxt->user_regs.gs); + #endif + ctxt->user_regs.eip = (unsigned long)cpu_bringup_and_idle; + +@@ -381,8 +377,8 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle) + if (!xen_feature(XENFEAT_auto_translated_physmap)) { + ctxt->flags = VGCF_IN_KERNEL; + ctxt->user_regs.eflags = 0x1000; /* IOPL_RING1 */ +- ctxt->user_regs.ds = __USER_DS; +- ctxt->user_regs.es = __USER_DS; ++ ctxt->user_regs.ds = __KERNEL_DS; ++ ctxt->user_regs.es = __KERNEL_DS; + ctxt->user_regs.ss = __KERNEL_DS; + + xen_copy_trap_info(ctxt->trap_ctxt); +@@ -437,13 +433,12 @@ static int xen_cpu_up(unsigned int cpu, struct task_struct *idle) + int rc; + + per_cpu(current_task, cpu) = idle; ++ per_cpu(current_tinfo, cpu) = &idle->tinfo; + #ifdef CONFIG_X86_32 + irq_ctx_init(cpu); + #else + clear_tsk_thread_flag(idle, TIF_FORK); +- per_cpu(kernel_stack, cpu) = +- (unsigned long)task_stack_page(idle) - +- KERNEL_STACK_OFFSET + THREAD_SIZE; ++ per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(idle) - 16 + THREAD_SIZE; + #endif + xen_setup_runstate_info(cpu); + xen_setup_timer(cpu); +@@ -719,7 +714,7 @@ static const struct smp_ops xen_smp_ops __initconst = { + + void __init xen_smp_init(void) + { +- smp_ops = xen_smp_ops; ++ memcpy((void *)&smp_ops, &xen_smp_ops, sizeof smp_ops); + xen_fill_possible_map(); + } + +diff --git a/arch/x86/xen/xen-asm_32.S b/arch/x86/xen/xen-asm_32.S +index 33ca6e4..0ded929 100644 +--- a/arch/x86/xen/xen-asm_32.S ++++ b/arch/x86/xen/xen-asm_32.S +@@ -84,14 +84,14 @@ ENTRY(xen_iret) + ESP_OFFSET=4 # bytes pushed onto stack + + /* +- * Store vcpu_info pointer for easy access. Do it this way to +- * avoid having to reload %fs ++ * Store vcpu_info pointer for easy access. + */ + #ifdef CONFIG_SMP +- GET_THREAD_INFO(%eax) +- movl %ss:TI_cpu(%eax), %eax +- movl %ss:__per_cpu_offset(,%eax,4), %eax +- mov %ss:xen_vcpu(%eax), %eax ++ push %fs ++ mov $(__KERNEL_PERCPU), %eax ++ mov %eax, %fs ++ mov PER_CPU_VAR(xen_vcpu), %eax ++ pop %fs + #else + movl %ss:xen_vcpu, %eax + #endif +diff --git a/arch/x86/xen/xen-head.S b/arch/x86/xen/xen-head.S +index 485b695..fda3e7c 100644 +--- a/arch/x86/xen/xen-head.S ++++ b/arch/x86/xen/xen-head.S +@@ -39,6 +39,17 @@ ENTRY(startup_xen) + #ifdef CONFIG_X86_32 + mov %esi,xen_start_info + mov $init_thread_union+THREAD_SIZE,%esp ++#ifdef CONFIG_SMP ++ movl $cpu_gdt_table,%edi ++ movl $__per_cpu_load,%eax ++ movw %ax,__KERNEL_PERCPU + 2(%edi) ++ rorl $16,%eax ++ movb %al,__KERNEL_PERCPU + 4(%edi) ++ movb %ah,__KERNEL_PERCPU + 7(%edi) ++ movl $__per_cpu_end - 1,%eax ++ subl $__per_cpu_start,%eax ++ movw %ax,__KERNEL_PERCPU + 0(%edi) ++#endif + #else + mov %rsi,xen_start_info + mov $init_thread_union+THREAD_SIZE,%rsp +diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h +index 1cb6f4c..9981524 100644 +--- a/arch/x86/xen/xen-ops.h ++++ b/arch/x86/xen/xen-ops.h +@@ -10,8 +10,6 @@ + extern const char xen_hypervisor_callback[]; + extern const char xen_failsafe_callback[]; + +-extern void *xen_initial_gdt; +- + struct trap_info; + void xen_copy_trap_info(struct trap_info *traps); + +diff --git a/arch/xtensa/variants/dc232b/include/variant/core.h b/arch/xtensa/variants/dc232b/include/variant/core.h +index 525bd3d..ef888b1 100644 +--- a/arch/xtensa/variants/dc232b/include/variant/core.h ++++ b/arch/xtensa/variants/dc232b/include/variant/core.h +@@ -119,9 +119,9 @@ + ----------------------------------------------------------------------*/ + + #define XCHAL_ICACHE_LINESIZE 32 /* I-cache line size in bytes */ +-#define XCHAL_DCACHE_LINESIZE 32 /* D-cache line size in bytes */ + #define XCHAL_ICACHE_LINEWIDTH 5 /* log2(I line size in bytes) */ + #define XCHAL_DCACHE_LINEWIDTH 5 /* log2(D line size in bytes) */ ++#define XCHAL_DCACHE_LINESIZE (_AC(1,UL) << XCHAL_DCACHE_LINEWIDTH) /* D-cache line size in bytes */ + + #define XCHAL_ICACHE_SIZE 16384 /* I-cache size in bytes or 0 */ + #define XCHAL_DCACHE_SIZE 16384 /* D-cache size in bytes or 0 */ +diff --git a/arch/xtensa/variants/fsf/include/variant/core.h b/arch/xtensa/variants/fsf/include/variant/core.h +index 2f33760..835e50a 100644 +--- a/arch/xtensa/variants/fsf/include/variant/core.h ++++ b/arch/xtensa/variants/fsf/include/variant/core.h +@@ -11,6 +11,7 @@ + #ifndef _XTENSA_CORE_H + #define _XTENSA_CORE_H + ++#include <linux/const.h> + + /**************************************************************************** + Parameters Useful for Any Code, USER or PRIVILEGED +@@ -112,9 +113,9 @@ + ----------------------------------------------------------------------*/ + + #define XCHAL_ICACHE_LINESIZE 16 /* I-cache line size in bytes */ +-#define XCHAL_DCACHE_LINESIZE 16 /* D-cache line size in bytes */ + #define XCHAL_ICACHE_LINEWIDTH 4 /* log2(I line size in bytes) */ + #define XCHAL_DCACHE_LINEWIDTH 4 /* log2(D line size in bytes) */ ++#define XCHAL_DCACHE_LINESIZE (_AC(1,UL) << XCHAL_DCACHE_LINEWIDTH) /* D-cache line size in bytes */ + + #define XCHAL_ICACHE_SIZE 8192 /* I-cache size in bytes or 0 */ + #define XCHAL_DCACHE_SIZE 8192 /* D-cache size in bytes or 0 */ +diff --git a/arch/xtensa/variants/s6000/include/variant/core.h b/arch/xtensa/variants/s6000/include/variant/core.h +index af00795..2bb8105 100644 +--- a/arch/xtensa/variants/s6000/include/variant/core.h ++++ b/arch/xtensa/variants/s6000/include/variant/core.h +@@ -11,6 +11,7 @@ + #ifndef _XTENSA_CORE_CONFIGURATION_H + #define _XTENSA_CORE_CONFIGURATION_H + ++#include <linux/const.h> + + /**************************************************************************** + Parameters Useful for Any Code, USER or PRIVILEGED +@@ -118,9 +119,9 @@ + ----------------------------------------------------------------------*/ + + #define XCHAL_ICACHE_LINESIZE 16 /* I-cache line size in bytes */ +-#define XCHAL_DCACHE_LINESIZE 16 /* D-cache line size in bytes */ + #define XCHAL_ICACHE_LINEWIDTH 4 /* log2(I line size in bytes) */ + #define XCHAL_DCACHE_LINEWIDTH 4 /* log2(D line size in bytes) */ ++#define XCHAL_DCACHE_LINESIZE (_AC(1,UL) << XCHAL_DCACHE_LINEWIDTH) /* D-cache line size in bytes */ + + #define XCHAL_ICACHE_SIZE 32768 /* I-cache size in bytes or 0 */ + #define XCHAL_DCACHE_SIZE 32768 /* D-cache size in bytes or 0 */ +diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c +index d8f80e7..5f41702 100644 +--- a/block/blk-cgroup.c ++++ b/block/blk-cgroup.c +@@ -809,7 +809,7 @@ static void blkcg_css_free(struct cgroup_subsys_state *css) + static struct cgroup_subsys_state * + blkcg_css_alloc(struct cgroup_subsys_state *parent_css) + { +- static atomic64_t id_seq = ATOMIC64_INIT(0); ++ static atomic64_unchecked_t id_seq = ATOMIC64_INIT(0); + struct blkcg *blkcg; + + if (!parent_css) { +@@ -823,7 +823,7 @@ blkcg_css_alloc(struct cgroup_subsys_state *parent_css) + + blkcg->cfq_weight = CFQ_WEIGHT_DEFAULT; + blkcg->cfq_leaf_weight = CFQ_WEIGHT_DEFAULT; +- blkcg->id = atomic64_inc_return(&id_seq); /* root is 0, start from 1 */ ++ blkcg->id = atomic64_inc_return_unchecked(&id_seq); /* root is 0, start from 1 */ + done: + spin_lock_init(&blkcg->lock); + INIT_RADIX_TREE(&blkcg->blkg_tree, GFP_ATOMIC); +diff --git a/block/blk-iopoll.c b/block/blk-iopoll.c +index 1855bf5..af12b06 100644 +--- a/block/blk-iopoll.c ++++ b/block/blk-iopoll.c +@@ -77,7 +77,7 @@ void blk_iopoll_complete(struct blk_iopoll *iopoll) + } + EXPORT_SYMBOL(blk_iopoll_complete); + +-static void blk_iopoll_softirq(struct softirq_action *h) ++static __latent_entropy void blk_iopoll_softirq(void) + { + struct list_head *list = this_cpu_ptr(&blk_cpu_iopoll); + int rearm = 0, budget = blk_iopoll_budget; +diff --git a/block/blk-map.c b/block/blk-map.c +index ae4ae10..c470b8d 100644 +--- a/block/blk-map.c ++++ b/block/blk-map.c +@@ -302,7 +302,7 @@ int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf, + if (!len || !kbuf) + return -EINVAL; + +- do_copy = !blk_rq_aligned(q, addr, len) || object_is_on_stack(kbuf); ++ do_copy = !blk_rq_aligned(q, addr, len) || object_starts_on_stack(kbuf); + if (do_copy) + bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading); + else +diff --git a/block/blk-softirq.c b/block/blk-softirq.c +index 57790c1..5e988dd 100644 +--- a/block/blk-softirq.c ++++ b/block/blk-softirq.c +@@ -18,7 +18,7 @@ static DEFINE_PER_CPU(struct list_head, blk_cpu_done); + * Softirq action handler - move entries to local list and loop over them + * while passing them to the queue registered handler. + */ +-static void blk_done_softirq(struct softirq_action *h) ++static __latent_entropy void blk_done_softirq(void) + { + struct list_head *cpu_list, local_list; + +diff --git a/block/bsg.c b/block/bsg.c +index 420a5a9..23834aa 100644 +--- a/block/bsg.c ++++ b/block/bsg.c +@@ -176,16 +176,24 @@ static int blk_fill_sgv4_hdr_rq(struct request_queue *q, struct request *rq, + struct sg_io_v4 *hdr, struct bsg_device *bd, + fmode_t has_write_perm) + { ++ unsigned char tmpcmd[sizeof(rq->__cmd)]; ++ unsigned char *cmdptr; ++ + if (hdr->request_len > BLK_MAX_CDB) { + rq->cmd = kzalloc(hdr->request_len, GFP_KERNEL); + if (!rq->cmd) + return -ENOMEM; +- } ++ cmdptr = rq->cmd; ++ } else ++ cmdptr = tmpcmd; + +- if (copy_from_user(rq->cmd, (void __user *)(unsigned long)hdr->request, ++ if (copy_from_user(cmdptr, (void __user *)(unsigned long)hdr->request, + hdr->request_len)) + return -EFAULT; + ++ if (cmdptr != rq->cmd) ++ memcpy(rq->cmd, cmdptr, hdr->request_len); ++ + if (hdr->subprotocol == BSG_SUB_PROTOCOL_SCSI_CMD) { + if (blk_verify_command(rq->cmd, has_write_perm)) + return -EPERM; +diff --git a/block/compat_ioctl.c b/block/compat_ioctl.c +index a0926a6..b2b14b2 100644 +--- a/block/compat_ioctl.c ++++ b/block/compat_ioctl.c +@@ -156,7 +156,7 @@ static int compat_cdrom_generic_command(struct block_device *bdev, fmode_t mode, + cgc = compat_alloc_user_space(sizeof(*cgc)); + cgc32 = compat_ptr(arg); + +- if (copy_in_user(&cgc->cmd, &cgc32->cmd, sizeof(cgc->cmd)) || ++ if (copy_in_user(cgc->cmd, cgc32->cmd, sizeof(cgc->cmd)) || + get_user(data, &cgc32->buffer) || + put_user(compat_ptr(data), &cgc->buffer) || + copy_in_user(&cgc->buflen, &cgc32->buflen, +@@ -341,7 +341,7 @@ static int compat_fd_ioctl(struct block_device *bdev, fmode_t mode, + err |= __get_user(f->spec1, &uf->spec1); + err |= __get_user(f->fmt_gap, &uf->fmt_gap); + err |= __get_user(name, &uf->name); +- f->name = compat_ptr(name); ++ f->name = (void __force_kernel *)compat_ptr(name); + if (err) { + err = -EFAULT; + goto out; +diff --git a/block/genhd.c b/block/genhd.c +index 791f419..89f21c4 100644 +--- a/block/genhd.c ++++ b/block/genhd.c +@@ -467,21 +467,24 @@ static char *bdevt_str(dev_t devt, char *buf) + + /* + * Register device numbers dev..(dev+range-1) +- * range must be nonzero ++ * Noop if @range is zero. + * The hash chain is sorted on range, so that subranges can override. + */ + void blk_register_region(dev_t devt, unsigned long range, struct module *module, + struct kobject *(*probe)(dev_t, int *, void *), + int (*lock)(dev_t, void *), void *data) + { +- kobj_map(bdev_map, devt, range, module, probe, lock, data); ++ if (range) ++ kobj_map(bdev_map, devt, range, module, probe, lock, data); + } + + EXPORT_SYMBOL(blk_register_region); + ++/* undo blk_register_region(), noop if @range is zero */ + void blk_unregister_region(dev_t devt, unsigned long range) + { +- kobj_unmap(bdev_map, devt, range); ++ if (range) ++ kobj_unmap(bdev_map, devt, range); + } + + EXPORT_SYMBOL(blk_unregister_region); +diff --git a/block/partitions/efi.c b/block/partitions/efi.c +index dc51f46..d5446a8 100644 +--- a/block/partitions/efi.c ++++ b/block/partitions/efi.c +@@ -293,14 +293,14 @@ static gpt_entry *alloc_read_gpt_entries(struct parsed_partitions *state, + if (!gpt) + return NULL; + ++ if (!le32_to_cpu(gpt->num_partition_entries)) ++ return NULL; ++ pte = kcalloc(le32_to_cpu(gpt->num_partition_entries), le32_to_cpu(gpt->sizeof_partition_entry), GFP_KERNEL); ++ if (!pte) ++ return NULL; ++ + count = le32_to_cpu(gpt->num_partition_entries) * + le32_to_cpu(gpt->sizeof_partition_entry); +- if (!count) +- return NULL; +- pte = kmalloc(count, GFP_KERNEL); +- if (!pte) +- return NULL; +- + if (read_lba(state, le64_to_cpu(gpt->partition_entry_lba), + (u8 *) pte, count) < count) { + kfree(pte); +diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c +index 2648797..92ed21f 100644 +--- a/block/scsi_ioctl.c ++++ b/block/scsi_ioctl.c +@@ -67,7 +67,7 @@ static int scsi_get_bus(struct request_queue *q, int __user *p) + return put_user(0, p); + } + +-static int sg_get_timeout(struct request_queue *q) ++static int __intentional_overflow(-1) sg_get_timeout(struct request_queue *q) + { + return jiffies_to_clock_t(q->sg_timeout); + } +@@ -224,8 +224,20 @@ EXPORT_SYMBOL(blk_verify_command); + static int blk_fill_sghdr_rq(struct request_queue *q, struct request *rq, + struct sg_io_hdr *hdr, fmode_t mode) + { +- if (copy_from_user(rq->cmd, hdr->cmdp, hdr->cmd_len)) ++ unsigned char tmpcmd[sizeof(rq->__cmd)]; ++ unsigned char *cmdptr; ++ ++ if (rq->cmd != rq->__cmd) ++ cmdptr = rq->cmd; ++ else ++ cmdptr = tmpcmd; ++ ++ if (copy_from_user(cmdptr, hdr->cmdp, hdr->cmd_len)) + return -EFAULT; ++ ++ if (cmdptr != rq->cmd) ++ memcpy(rq->cmd, cmdptr, hdr->cmd_len); ++ + if (blk_verify_command(rq->cmd, mode & FMODE_WRITE)) + return -EPERM; + +@@ -417,6 +429,8 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode, + int err; + unsigned int in_len, out_len, bytes, opcode, cmdlen; + char *buffer = NULL, sense[SCSI_SENSE_BUFFERSIZE]; ++ unsigned char tmpcmd[sizeof(rq->__cmd)]; ++ unsigned char *cmdptr; + + if (!sic) + return -EINVAL; +@@ -450,9 +464,18 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode, + */ + err = -EFAULT; + rq->cmd_len = cmdlen; +- if (copy_from_user(rq->cmd, sic->data, cmdlen)) ++ ++ if (rq->cmd != rq->__cmd) ++ cmdptr = rq->cmd; ++ else ++ cmdptr = tmpcmd; ++ ++ if (copy_from_user(cmdptr, sic->data, cmdlen)) + goto error; + ++ if (rq->cmd != cmdptr) ++ memcpy(rq->cmd, cmdptr, cmdlen); ++ + if (in_len && copy_from_user(buffer, sic->data + cmdlen, in_len)) + goto error; + +diff --git a/crypto/cryptd.c b/crypto/cryptd.c +index 7bdd61b..afec999 100644 +--- a/crypto/cryptd.c ++++ b/crypto/cryptd.c +@@ -63,7 +63,7 @@ struct cryptd_blkcipher_ctx { + + struct cryptd_blkcipher_request_ctx { + crypto_completion_t complete; +-}; ++} __no_const; + + struct cryptd_hash_ctx { + struct crypto_shash *child; +@@ -80,7 +80,7 @@ struct cryptd_aead_ctx { + + struct cryptd_aead_request_ctx { + crypto_completion_t complete; +-}; ++} __no_const; + + static void cryptd_queue_worker(struct work_struct *work); + +diff --git a/crypto/pcrypt.c b/crypto/pcrypt.c +index 309d345..1632720 100644 +--- a/crypto/pcrypt.c ++++ b/crypto/pcrypt.c +@@ -440,7 +440,7 @@ static int pcrypt_sysfs_add(struct padata_instance *pinst, const char *name) + int ret; + + pinst->kobj.kset = pcrypt_kset; +- ret = kobject_add(&pinst->kobj, NULL, name); ++ ret = kobject_add(&pinst->kobj, NULL, "%s", name); + if (!ret) + kobject_uevent(&pinst->kobj, KOBJ_ADD); + +diff --git a/drivers/acpi/acpica/hwxfsleep.c b/drivers/acpi/acpica/hwxfsleep.c +index 15dddc1..b61cf0c 100644 +--- a/drivers/acpi/acpica/hwxfsleep.c ++++ b/drivers/acpi/acpica/hwxfsleep.c +@@ -63,11 +63,12 @@ static acpi_status acpi_hw_sleep_dispatch(u8 sleep_state, u32 function_id); + /* Legacy functions are optional, based upon ACPI_REDUCED_HARDWARE */ + + static struct acpi_sleep_functions acpi_sleep_dispatch[] = { +- {ACPI_HW_OPTIONAL_FUNCTION(acpi_hw_legacy_sleep), +- acpi_hw_extended_sleep}, +- {ACPI_HW_OPTIONAL_FUNCTION(acpi_hw_legacy_wake_prep), +- acpi_hw_extended_wake_prep}, +- {ACPI_HW_OPTIONAL_FUNCTION(acpi_hw_legacy_wake), acpi_hw_extended_wake} ++ {.legacy_function = ACPI_HW_OPTIONAL_FUNCTION(acpi_hw_legacy_sleep), ++ .extended_function = acpi_hw_extended_sleep}, ++ {.legacy_function = ACPI_HW_OPTIONAL_FUNCTION(acpi_hw_legacy_wake_prep), ++ .extended_function = acpi_hw_extended_wake_prep}, ++ {.legacy_function = ACPI_HW_OPTIONAL_FUNCTION(acpi_hw_legacy_wake), ++ .extended_function = acpi_hw_extended_wake} + }; + + /* +diff --git a/drivers/acpi/apei/apei-internal.h b/drivers/acpi/apei/apei-internal.h +index e5bcd91..74f050d 100644 +--- a/drivers/acpi/apei/apei-internal.h ++++ b/drivers/acpi/apei/apei-internal.h +@@ -19,7 +19,7 @@ typedef int (*apei_exec_ins_func_t)(struct apei_exec_context *ctx, + struct apei_exec_ins_type { + u32 flags; + apei_exec_ins_func_t run; +-}; ++} __do_const; + + struct apei_exec_context { + u32 ip; +diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c +index dab7cb7..f0d2994 100644 +--- a/drivers/acpi/apei/ghes.c ++++ b/drivers/acpi/apei/ghes.c +@@ -500,7 +500,7 @@ static void __ghes_print_estatus(const char *pfx, + const struct acpi_hest_generic *generic, + const struct acpi_generic_status *estatus) + { +- static atomic_t seqno; ++ static atomic_unchecked_t seqno; + unsigned int curr_seqno; + char pfx_seq[64]; + +@@ -511,7 +511,7 @@ static void __ghes_print_estatus(const char *pfx, + else + pfx = KERN_ERR; + } +- curr_seqno = atomic_inc_return(&seqno); ++ curr_seqno = atomic_inc_return_unchecked(&seqno); + snprintf(pfx_seq, sizeof(pfx_seq), "%s{%u}" HW_ERR, pfx, curr_seqno); + printk("%s""Hardware error from APEI Generic Hardware Error Source: %d\n", + pfx_seq, generic->header.source_id); +diff --git a/drivers/acpi/bgrt.c b/drivers/acpi/bgrt.c +index a83e3c6..c3d617f 100644 +--- a/drivers/acpi/bgrt.c ++++ b/drivers/acpi/bgrt.c +@@ -86,8 +86,10 @@ static int __init bgrt_init(void) + if (!bgrt_image) + return -ENODEV; + +- bin_attr_image.private = bgrt_image; +- bin_attr_image.size = bgrt_image_size; ++ pax_open_kernel(); ++ *(void **)&bin_attr_image.private = bgrt_image; ++ *(size_t *)&bin_attr_image.size = bgrt_image_size; ++ pax_close_kernel(); + + bgrt_kobj = kobject_create_and_add("bgrt", acpi_kobj); + if (!bgrt_kobj) +diff --git a/drivers/acpi/blacklist.c b/drivers/acpi/blacklist.c +index 3d8413d..95f638c 100644 +--- a/drivers/acpi/blacklist.c ++++ b/drivers/acpi/blacklist.c +@@ -51,7 +51,7 @@ struct acpi_blacklist_item { + u32 is_critical_error; + }; + +-static struct dmi_system_id acpi_osi_dmi_table[] __initdata; ++static const struct dmi_system_id acpi_osi_dmi_table[] __initconst; + + /* + * POLICY: If *anything* doesn't work, put it on the blacklist. +@@ -163,7 +163,7 @@ static int __init dmi_disable_osi_win8(const struct dmi_system_id *d) + return 0; + } + +-static struct dmi_system_id acpi_osi_dmi_table[] __initdata = { ++static const struct dmi_system_id acpi_osi_dmi_table[] __initconst = { + { + .callback = dmi_disable_osi_vista, + .ident = "Fujitsu Siemens", +diff --git a/drivers/acpi/custom_method.c b/drivers/acpi/custom_method.c +index c68e724..e863008 100644 +--- a/drivers/acpi/custom_method.c ++++ b/drivers/acpi/custom_method.c +@@ -29,6 +29,10 @@ static ssize_t cm_write(struct file *file, const char __user * user_buf, + struct acpi_table_header table; + acpi_status status; + ++#ifdef CONFIG_GRKERNSEC_KMEM ++ return -EPERM; ++#endif ++ + if (!(*ppos)) { + /* parse the table header to get the table length */ + if (count <= sizeof(struct acpi_table_header)) +diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c +index 3dca36d..abaf070 100644 +--- a/drivers/acpi/processor_idle.c ++++ b/drivers/acpi/processor_idle.c +@@ -952,7 +952,7 @@ static int acpi_processor_setup_cpuidle_states(struct acpi_processor *pr) + { + int i, count = CPUIDLE_DRIVER_STATE_START; + struct acpi_processor_cx *cx; +- struct cpuidle_state *state; ++ cpuidle_state_no_const *state; + struct cpuidle_driver *drv = &acpi_idle_driver; + + if (!pr->flags.power_setup_done) +diff --git a/drivers/acpi/sysfs.c b/drivers/acpi/sysfs.c +index 91a32ce..d77fcaf 100644 +--- a/drivers/acpi/sysfs.c ++++ b/drivers/acpi/sysfs.c +@@ -425,11 +425,11 @@ static u32 num_counters; + static struct attribute **all_attrs; + static u32 acpi_gpe_count; + +-static struct attribute_group interrupt_stats_attr_group = { ++static attribute_group_no_const interrupt_stats_attr_group = { + .name = "interrupts", + }; + +-static struct kobj_attribute *counter_attrs; ++static kobj_attribute_no_const *counter_attrs; + + static void delete_gpe_attr_array(void) + { +diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c +index 36605ab..6ef6d4b 100644 +--- a/drivers/ata/libahci.c ++++ b/drivers/ata/libahci.c +@@ -1239,7 +1239,7 @@ int ahci_kick_engine(struct ata_port *ap) + } + EXPORT_SYMBOL_GPL(ahci_kick_engine); + +-static int ahci_exec_polled_cmd(struct ata_port *ap, int pmp, ++static int __intentional_overflow(-1) ahci_exec_polled_cmd(struct ata_port *ap, int pmp, + struct ata_taskfile *tf, int is_cmd, u16 flags, + unsigned long timeout_msec) + { +diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c +index f761603..3042d5c 100644 +--- a/drivers/ata/libata-core.c ++++ b/drivers/ata/libata-core.c +@@ -98,7 +98,7 @@ static unsigned int ata_dev_set_xfermode(struct ata_device *dev); + static void ata_dev_xfermask(struct ata_device *dev); + static unsigned long ata_dev_blacklisted(const struct ata_device *dev); + +-atomic_t ata_print_id = ATOMIC_INIT(0); ++atomic_unchecked_t ata_print_id = ATOMIC_INIT(0); + + struct ata_force_param { + const char *name; +@@ -4863,7 +4863,7 @@ void ata_qc_free(struct ata_queued_cmd *qc) + struct ata_port *ap; + unsigned int tag; + +- WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */ ++ BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */ + ap = qc->ap; + + qc->flags = 0; +@@ -4879,7 +4879,7 @@ void __ata_qc_complete(struct ata_queued_cmd *qc) + struct ata_port *ap; + struct ata_link *link; + +- WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */ ++ BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */ + WARN_ON_ONCE(!(qc->flags & ATA_QCFLAG_ACTIVE)); + ap = qc->ap; + link = qc->dev->link; +@@ -5998,6 +5998,7 @@ static void ata_finalize_port_ops(struct ata_port_operations *ops) + return; + + spin_lock(&lock); ++ pax_open_kernel(); + + for (cur = ops->inherits; cur; cur = cur->inherits) { + void **inherit = (void **)cur; +@@ -6011,8 +6012,9 @@ static void ata_finalize_port_ops(struct ata_port_operations *ops) + if (IS_ERR(*pp)) + *pp = NULL; + +- ops->inherits = NULL; ++ *(struct ata_port_operations **)&ops->inherits = NULL; + ++ pax_close_kernel(); + spin_unlock(&lock); + } + +@@ -6208,7 +6210,7 @@ int ata_host_register(struct ata_host *host, struct scsi_host_template *sht) + + /* give ports names and add SCSI hosts */ + for (i = 0; i < host->n_ports; i++) { +- host->ports[i]->print_id = atomic_inc_return(&ata_print_id); ++ host->ports[i]->print_id = atomic_inc_return_unchecked(&ata_print_id); + host->ports[i]->local_port_no = i + 1; + } + +diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c +index ef8567d..8bdbd03 100644 +--- a/drivers/ata/libata-scsi.c ++++ b/drivers/ata/libata-scsi.c +@@ -4147,7 +4147,7 @@ int ata_sas_port_init(struct ata_port *ap) + + if (rc) + return rc; +- ap->print_id = atomic_inc_return(&ata_print_id); ++ ap->print_id = atomic_inc_return_unchecked(&ata_print_id); + return 0; + } + EXPORT_SYMBOL_GPL(ata_sas_port_init); +diff --git a/drivers/ata/libata.h b/drivers/ata/libata.h +index 45b5ab3..98446b8 100644 +--- a/drivers/ata/libata.h ++++ b/drivers/ata/libata.h +@@ -53,7 +53,7 @@ enum { + ATA_DNXFER_QUIET = (1 << 31), + }; + +-extern atomic_t ata_print_id; ++extern atomic_unchecked_t ata_print_id; + extern int atapi_passthru16; + extern int libata_fua; + extern int libata_noacpi; +diff --git a/drivers/ata/pata_arasan_cf.c b/drivers/ata/pata_arasan_cf.c +index 73492dd..ca2bff5 100644 +--- a/drivers/ata/pata_arasan_cf.c ++++ b/drivers/ata/pata_arasan_cf.c +@@ -865,7 +865,9 @@ static int arasan_cf_probe(struct platform_device *pdev) + /* Handle platform specific quirks */ + if (quirk) { + if (quirk & CF_BROKEN_PIO) { +- ap->ops->set_piomode = NULL; ++ pax_open_kernel(); ++ *(void **)&ap->ops->set_piomode = NULL; ++ pax_close_kernel(); + ap->pio_mask = 0; + } + if (quirk & CF_BROKEN_MWDMA) +diff --git a/drivers/atm/adummy.c b/drivers/atm/adummy.c +index f9b983a..887b9d8 100644 +--- a/drivers/atm/adummy.c ++++ b/drivers/atm/adummy.c +@@ -114,7 +114,7 @@ adummy_send(struct atm_vcc *vcc, struct sk_buff *skb) + vcc->pop(vcc, skb); + else + dev_kfree_skb_any(skb); +- atomic_inc(&vcc->stats->tx); ++ atomic_inc_unchecked(&vcc->stats->tx); + + return 0; + } +diff --git a/drivers/atm/ambassador.c b/drivers/atm/ambassador.c +index 62a7607..cc4be104 100644 +--- a/drivers/atm/ambassador.c ++++ b/drivers/atm/ambassador.c +@@ -454,7 +454,7 @@ static void tx_complete (amb_dev * dev, tx_out * tx) { + PRINTD (DBG_FLOW|DBG_TX, "tx_complete %p %p", dev, tx); + + // VC layer stats +- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx); ++ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx); + + // free the descriptor + kfree (tx_descr); +@@ -495,7 +495,7 @@ static void rx_complete (amb_dev * dev, rx_out * rx) { + dump_skb ("<<<", vc, skb); + + // VC layer stats +- atomic_inc(&atm_vcc->stats->rx); ++ atomic_inc_unchecked(&atm_vcc->stats->rx); + __net_timestamp(skb); + // end of our responsibility + atm_vcc->push (atm_vcc, skb); +@@ -510,7 +510,7 @@ static void rx_complete (amb_dev * dev, rx_out * rx) { + } else { + PRINTK (KERN_INFO, "dropped over-size frame"); + // should we count this? +- atomic_inc(&atm_vcc->stats->rx_drop); ++ atomic_inc_unchecked(&atm_vcc->stats->rx_drop); + } + + } else { +@@ -1338,7 +1338,7 @@ static int amb_send (struct atm_vcc * atm_vcc, struct sk_buff * skb) { + } + + if (check_area (skb->data, skb->len)) { +- atomic_inc(&atm_vcc->stats->tx_err); ++ atomic_inc_unchecked(&atm_vcc->stats->tx_err); + return -ENOMEM; // ? + } + +diff --git a/drivers/atm/atmtcp.c b/drivers/atm/atmtcp.c +index 0e3f8f9..765a7a5 100644 +--- a/drivers/atm/atmtcp.c ++++ b/drivers/atm/atmtcp.c +@@ -206,7 +206,7 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb) + if (vcc->pop) vcc->pop(vcc,skb); + else dev_kfree_skb(skb); + if (dev_data) return 0; +- atomic_inc(&vcc->stats->tx_err); ++ atomic_inc_unchecked(&vcc->stats->tx_err); + return -ENOLINK; + } + size = skb->len+sizeof(struct atmtcp_hdr); +@@ -214,7 +214,7 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb) + if (!new_skb) { + if (vcc->pop) vcc->pop(vcc,skb); + else dev_kfree_skb(skb); +- atomic_inc(&vcc->stats->tx_err); ++ atomic_inc_unchecked(&vcc->stats->tx_err); + return -ENOBUFS; + } + hdr = (void *) skb_put(new_skb,sizeof(struct atmtcp_hdr)); +@@ -225,8 +225,8 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb) + if (vcc->pop) vcc->pop(vcc,skb); + else dev_kfree_skb(skb); + out_vcc->push(out_vcc,new_skb); +- atomic_inc(&vcc->stats->tx); +- atomic_inc(&out_vcc->stats->rx); ++ atomic_inc_unchecked(&vcc->stats->tx); ++ atomic_inc_unchecked(&out_vcc->stats->rx); + return 0; + } + +@@ -299,7 +299,7 @@ static int atmtcp_c_send(struct atm_vcc *vcc,struct sk_buff *skb) + out_vcc = find_vcc(dev, ntohs(hdr->vpi), ntohs(hdr->vci)); + read_unlock(&vcc_sklist_lock); + if (!out_vcc) { +- atomic_inc(&vcc->stats->tx_err); ++ atomic_inc_unchecked(&vcc->stats->tx_err); + goto done; + } + skb_pull(skb,sizeof(struct atmtcp_hdr)); +@@ -311,8 +311,8 @@ static int atmtcp_c_send(struct atm_vcc *vcc,struct sk_buff *skb) + __net_timestamp(new_skb); + skb_copy_from_linear_data(skb, skb_put(new_skb, skb->len), skb->len); + out_vcc->push(out_vcc,new_skb); +- atomic_inc(&vcc->stats->tx); +- atomic_inc(&out_vcc->stats->rx); ++ atomic_inc_unchecked(&vcc->stats->tx); ++ atomic_inc_unchecked(&out_vcc->stats->rx); + done: + if (vcc->pop) vcc->pop(vcc,skb); + else dev_kfree_skb(skb); +diff --git a/drivers/atm/eni.c b/drivers/atm/eni.c +index b1955ba..b179940 100644 +--- a/drivers/atm/eni.c ++++ b/drivers/atm/eni.c +@@ -522,7 +522,7 @@ static int rx_aal0(struct atm_vcc *vcc) + DPRINTK(DEV_LABEL "(itf %d): trashing empty cell\n", + vcc->dev->number); + length = 0; +- atomic_inc(&vcc->stats->rx_err); ++ atomic_inc_unchecked(&vcc->stats->rx_err); + } + else { + length = ATM_CELL_SIZE-1; /* no HEC */ +@@ -577,7 +577,7 @@ static int rx_aal5(struct atm_vcc *vcc) + size); + } + eff = length = 0; +- atomic_inc(&vcc->stats->rx_err); ++ atomic_inc_unchecked(&vcc->stats->rx_err); + } + else { + size = (descr & MID_RED_COUNT)*(ATM_CELL_PAYLOAD >> 2); +@@ -594,7 +594,7 @@ static int rx_aal5(struct atm_vcc *vcc) + "(VCI=%d,length=%ld,size=%ld (descr 0x%lx))\n", + vcc->dev->number,vcc->vci,length,size << 2,descr); + length = eff = 0; +- atomic_inc(&vcc->stats->rx_err); ++ atomic_inc_unchecked(&vcc->stats->rx_err); + } + } + skb = eff ? atm_alloc_charge(vcc,eff << 2,GFP_ATOMIC) : NULL; +@@ -767,7 +767,7 @@ rx_dequeued++; + vcc->push(vcc,skb); + pushed++; + } +- atomic_inc(&vcc->stats->rx); ++ atomic_inc_unchecked(&vcc->stats->rx); + } + wake_up(&eni_dev->rx_wait); + } +@@ -1227,7 +1227,7 @@ static void dequeue_tx(struct atm_dev *dev) + PCI_DMA_TODEVICE); + if (vcc->pop) vcc->pop(vcc,skb); + else dev_kfree_skb_irq(skb); +- atomic_inc(&vcc->stats->tx); ++ atomic_inc_unchecked(&vcc->stats->tx); + wake_up(&eni_dev->tx_wait); + dma_complete++; + } +diff --git a/drivers/atm/firestream.c b/drivers/atm/firestream.c +index b41c948..a002b17 100644 +--- a/drivers/atm/firestream.c ++++ b/drivers/atm/firestream.c +@@ -749,7 +749,7 @@ static void process_txdone_queue (struct fs_dev *dev, struct queue *q) + } + } + +- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx); ++ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx); + + fs_dprintk (FS_DEBUG_TXMEM, "i"); + fs_dprintk (FS_DEBUG_ALLOC, "Free t-skb: %p\n", skb); +@@ -816,7 +816,7 @@ static void process_incoming (struct fs_dev *dev, struct queue *q) + #endif + skb_put (skb, qe->p1 & 0xffff); + ATM_SKB(skb)->vcc = atm_vcc; +- atomic_inc(&atm_vcc->stats->rx); ++ atomic_inc_unchecked(&atm_vcc->stats->rx); + __net_timestamp(skb); + fs_dprintk (FS_DEBUG_ALLOC, "Free rec-skb: %p (pushed)\n", skb); + atm_vcc->push (atm_vcc, skb); +@@ -837,12 +837,12 @@ static void process_incoming (struct fs_dev *dev, struct queue *q) + kfree (pe); + } + if (atm_vcc) +- atomic_inc(&atm_vcc->stats->rx_drop); ++ atomic_inc_unchecked(&atm_vcc->stats->rx_drop); + break; + case 0x1f: /* Reassembly abort: no buffers. */ + /* Silently increment error counter. */ + if (atm_vcc) +- atomic_inc(&atm_vcc->stats->rx_drop); ++ atomic_inc_unchecked(&atm_vcc->stats->rx_drop); + break; + default: /* Hmm. Haven't written the code to handle the others yet... -- REW */ + printk (KERN_WARNING "Don't know what to do with RX status %x: %s.\n", +diff --git a/drivers/atm/fore200e.c b/drivers/atm/fore200e.c +index 204814e..cede831 100644 +--- a/drivers/atm/fore200e.c ++++ b/drivers/atm/fore200e.c +@@ -931,9 +931,9 @@ fore200e_tx_irq(struct fore200e* fore200e) + #endif + /* check error condition */ + if (*entry->status & STATUS_ERROR) +- atomic_inc(&vcc->stats->tx_err); ++ atomic_inc_unchecked(&vcc->stats->tx_err); + else +- atomic_inc(&vcc->stats->tx); ++ atomic_inc_unchecked(&vcc->stats->tx); + } + } + +@@ -1082,7 +1082,7 @@ fore200e_push_rpd(struct fore200e* fore200e, struct atm_vcc* vcc, struct rpd* rp + if (skb == NULL) { + DPRINTK(2, "unable to alloc new skb, rx PDU length = %d\n", pdu_len); + +- atomic_inc(&vcc->stats->rx_drop); ++ atomic_inc_unchecked(&vcc->stats->rx_drop); + return -ENOMEM; + } + +@@ -1125,14 +1125,14 @@ fore200e_push_rpd(struct fore200e* fore200e, struct atm_vcc* vcc, struct rpd* rp + + dev_kfree_skb_any(skb); + +- atomic_inc(&vcc->stats->rx_drop); ++ atomic_inc_unchecked(&vcc->stats->rx_drop); + return -ENOMEM; + } + + ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0); + + vcc->push(vcc, skb); +- atomic_inc(&vcc->stats->rx); ++ atomic_inc_unchecked(&vcc->stats->rx); + + ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0); + +@@ -1210,7 +1210,7 @@ fore200e_rx_irq(struct fore200e* fore200e) + DPRINTK(2, "damaged PDU on %d.%d.%d\n", + fore200e->atm_dev->number, + entry->rpd->atm_header.vpi, entry->rpd->atm_header.vci); +- atomic_inc(&vcc->stats->rx_err); ++ atomic_inc_unchecked(&vcc->stats->rx_err); + } + } + +@@ -1655,7 +1655,7 @@ fore200e_send(struct atm_vcc *vcc, struct sk_buff *skb) + goto retry_here; + } + +- atomic_inc(&vcc->stats->tx_err); ++ atomic_inc_unchecked(&vcc->stats->tx_err); + + fore200e->tx_sat++; + DPRINTK(2, "tx queue of device %s is saturated, PDU dropped - heartbeat is %08x\n", +diff --git a/drivers/atm/he.c b/drivers/atm/he.c +index aa6be26..f70a785 100644 +--- a/drivers/atm/he.c ++++ b/drivers/atm/he.c +@@ -1690,7 +1690,7 @@ he_service_rbrq(struct he_dev *he_dev, int group) + + if (RBRQ_HBUF_ERR(he_dev->rbrq_head)) { + hprintk("HBUF_ERR! (cid 0x%x)\n", cid); +- atomic_inc(&vcc->stats->rx_drop); ++ atomic_inc_unchecked(&vcc->stats->rx_drop); + goto return_host_buffers; + } + +@@ -1717,7 +1717,7 @@ he_service_rbrq(struct he_dev *he_dev, int group) + RBRQ_LEN_ERR(he_dev->rbrq_head) + ? "LEN_ERR" : "", + vcc->vpi, vcc->vci); +- atomic_inc(&vcc->stats->rx_err); ++ atomic_inc_unchecked(&vcc->stats->rx_err); + goto return_host_buffers; + } + +@@ -1769,7 +1769,7 @@ he_service_rbrq(struct he_dev *he_dev, int group) + vcc->push(vcc, skb); + spin_lock(&he_dev->global_lock); + +- atomic_inc(&vcc->stats->rx); ++ atomic_inc_unchecked(&vcc->stats->rx); + + return_host_buffers: + ++pdus_assembled; +@@ -2095,7 +2095,7 @@ __enqueue_tpd(struct he_dev *he_dev, struct he_tpd *tpd, unsigned cid) + tpd->vcc->pop(tpd->vcc, tpd->skb); + else + dev_kfree_skb_any(tpd->skb); +- atomic_inc(&tpd->vcc->stats->tx_err); ++ atomic_inc_unchecked(&tpd->vcc->stats->tx_err); + } + pci_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status)); + return; +@@ -2507,7 +2507,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb) + vcc->pop(vcc, skb); + else + dev_kfree_skb_any(skb); +- atomic_inc(&vcc->stats->tx_err); ++ atomic_inc_unchecked(&vcc->stats->tx_err); + return -EINVAL; + } + +@@ -2518,7 +2518,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb) + vcc->pop(vcc, skb); + else + dev_kfree_skb_any(skb); +- atomic_inc(&vcc->stats->tx_err); ++ atomic_inc_unchecked(&vcc->stats->tx_err); + return -EINVAL; + } + #endif +@@ -2530,7 +2530,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb) + vcc->pop(vcc, skb); + else + dev_kfree_skb_any(skb); +- atomic_inc(&vcc->stats->tx_err); ++ atomic_inc_unchecked(&vcc->stats->tx_err); + spin_unlock_irqrestore(&he_dev->global_lock, flags); + return -ENOMEM; + } +@@ -2572,7 +2572,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb) + vcc->pop(vcc, skb); + else + dev_kfree_skb_any(skb); +- atomic_inc(&vcc->stats->tx_err); ++ atomic_inc_unchecked(&vcc->stats->tx_err); + spin_unlock_irqrestore(&he_dev->global_lock, flags); + return -ENOMEM; + } +@@ -2603,7 +2603,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb) + __enqueue_tpd(he_dev, tpd, cid); + spin_unlock_irqrestore(&he_dev->global_lock, flags); + +- atomic_inc(&vcc->stats->tx); ++ atomic_inc_unchecked(&vcc->stats->tx); + + return 0; + } +diff --git a/drivers/atm/horizon.c b/drivers/atm/horizon.c +index 1dc0519..1aadaf7 100644 +--- a/drivers/atm/horizon.c ++++ b/drivers/atm/horizon.c +@@ -1034,7 +1034,7 @@ static void rx_schedule (hrz_dev * dev, int irq) { + { + struct atm_vcc * vcc = ATM_SKB(skb)->vcc; + // VC layer stats +- atomic_inc(&vcc->stats->rx); ++ atomic_inc_unchecked(&vcc->stats->rx); + __net_timestamp(skb); + // end of our responsibility + vcc->push (vcc, skb); +@@ -1186,7 +1186,7 @@ static void tx_schedule (hrz_dev * const dev, int irq) { + dev->tx_iovec = NULL; + + // VC layer stats +- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx); ++ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx); + + // free the skb + hrz_kfree_skb (skb); +diff --git a/drivers/atm/idt77252.c b/drivers/atm/idt77252.c +index 1bdf104..9dc44b1 100644 +--- a/drivers/atm/idt77252.c ++++ b/drivers/atm/idt77252.c +@@ -812,7 +812,7 @@ drain_scq(struct idt77252_dev *card, struct vc_map *vc) + else + dev_kfree_skb(skb); + +- atomic_inc(&vcc->stats->tx); ++ atomic_inc_unchecked(&vcc->stats->tx); + } + + atomic_dec(&scq->used); +@@ -1075,13 +1075,13 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe) + if ((sb = dev_alloc_skb(64)) == NULL) { + printk("%s: Can't allocate buffers for aal0.\n", + card->name); +- atomic_add(i, &vcc->stats->rx_drop); ++ atomic_add_unchecked(i, &vcc->stats->rx_drop); + break; + } + if (!atm_charge(vcc, sb->truesize)) { + RXPRINTK("%s: atm_charge() dropped aal0 packets.\n", + card->name); +- atomic_add(i - 1, &vcc->stats->rx_drop); ++ atomic_add_unchecked(i - 1, &vcc->stats->rx_drop); + dev_kfree_skb(sb); + break; + } +@@ -1098,7 +1098,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe) + ATM_SKB(sb)->vcc = vcc; + __net_timestamp(sb); + vcc->push(vcc, sb); +- atomic_inc(&vcc->stats->rx); ++ atomic_inc_unchecked(&vcc->stats->rx); + + cell += ATM_CELL_PAYLOAD; + } +@@ -1135,13 +1135,13 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe) + "(CDC: %08x)\n", + card->name, len, rpp->len, readl(SAR_REG_CDC)); + recycle_rx_pool_skb(card, rpp); +- atomic_inc(&vcc->stats->rx_err); ++ atomic_inc_unchecked(&vcc->stats->rx_err); + return; + } + if (stat & SAR_RSQE_CRC) { + RXPRINTK("%s: AAL5 CRC error.\n", card->name); + recycle_rx_pool_skb(card, rpp); +- atomic_inc(&vcc->stats->rx_err); ++ atomic_inc_unchecked(&vcc->stats->rx_err); + return; + } + if (skb_queue_len(&rpp->queue) > 1) { +@@ -1152,7 +1152,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe) + RXPRINTK("%s: Can't alloc RX skb.\n", + card->name); + recycle_rx_pool_skb(card, rpp); +- atomic_inc(&vcc->stats->rx_err); ++ atomic_inc_unchecked(&vcc->stats->rx_err); + return; + } + if (!atm_charge(vcc, skb->truesize)) { +@@ -1171,7 +1171,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe) + __net_timestamp(skb); + + vcc->push(vcc, skb); +- atomic_inc(&vcc->stats->rx); ++ atomic_inc_unchecked(&vcc->stats->rx); + + return; + } +@@ -1193,7 +1193,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe) + __net_timestamp(skb); + + vcc->push(vcc, skb); +- atomic_inc(&vcc->stats->rx); ++ atomic_inc_unchecked(&vcc->stats->rx); + + if (skb->truesize > SAR_FB_SIZE_3) + add_rx_skb(card, 3, SAR_FB_SIZE_3, 1); +@@ -1304,14 +1304,14 @@ idt77252_rx_raw(struct idt77252_dev *card) + if (vcc->qos.aal != ATM_AAL0) { + RPRINTK("%s: raw cell for non AAL0 vc %u.%u\n", + card->name, vpi, vci); +- atomic_inc(&vcc->stats->rx_drop); ++ atomic_inc_unchecked(&vcc->stats->rx_drop); + goto drop; + } + + if ((sb = dev_alloc_skb(64)) == NULL) { + printk("%s: Can't allocate buffers for AAL0.\n", + card->name); +- atomic_inc(&vcc->stats->rx_err); ++ atomic_inc_unchecked(&vcc->stats->rx_err); + goto drop; + } + +@@ -1330,7 +1330,7 @@ idt77252_rx_raw(struct idt77252_dev *card) + ATM_SKB(sb)->vcc = vcc; + __net_timestamp(sb); + vcc->push(vcc, sb); +- atomic_inc(&vcc->stats->rx); ++ atomic_inc_unchecked(&vcc->stats->rx); + + drop: + skb_pull(queue, 64); +@@ -1955,13 +1955,13 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam) + + if (vc == NULL) { + printk("%s: NULL connection in send().\n", card->name); +- atomic_inc(&vcc->stats->tx_err); ++ atomic_inc_unchecked(&vcc->stats->tx_err); + dev_kfree_skb(skb); + return -EINVAL; + } + if (!test_bit(VCF_TX, &vc->flags)) { + printk("%s: Trying to transmit on a non-tx VC.\n", card->name); +- atomic_inc(&vcc->stats->tx_err); ++ atomic_inc_unchecked(&vcc->stats->tx_err); + dev_kfree_skb(skb); + return -EINVAL; + } +@@ -1973,14 +1973,14 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam) + break; + default: + printk("%s: Unsupported AAL: %d\n", card->name, vcc->qos.aal); +- atomic_inc(&vcc->stats->tx_err); ++ atomic_inc_unchecked(&vcc->stats->tx_err); + dev_kfree_skb(skb); + return -EINVAL; + } + + if (skb_shinfo(skb)->nr_frags != 0) { + printk("%s: No scatter-gather yet.\n", card->name); +- atomic_inc(&vcc->stats->tx_err); ++ atomic_inc_unchecked(&vcc->stats->tx_err); + dev_kfree_skb(skb); + return -EINVAL; + } +@@ -1988,7 +1988,7 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam) + + err = queue_skb(card, vc, skb, oam); + if (err) { +- atomic_inc(&vcc->stats->tx_err); ++ atomic_inc_unchecked(&vcc->stats->tx_err); + dev_kfree_skb(skb); + return err; + } +@@ -2011,7 +2011,7 @@ idt77252_send_oam(struct atm_vcc *vcc, void *cell, int flags) + skb = dev_alloc_skb(64); + if (!skb) { + printk("%s: Out of memory in send_oam().\n", card->name); +- atomic_inc(&vcc->stats->tx_err); ++ atomic_inc_unchecked(&vcc->stats->tx_err); + return -ENOMEM; + } + atomic_add(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc); +diff --git a/drivers/atm/iphase.c b/drivers/atm/iphase.c +index 4217f29..88f547a 100644 +--- a/drivers/atm/iphase.c ++++ b/drivers/atm/iphase.c +@@ -1145,7 +1145,7 @@ static int rx_pkt(struct atm_dev *dev) + status = (u_short) (buf_desc_ptr->desc_mode); + if (status & (RX_CER | RX_PTE | RX_OFL)) + { +- atomic_inc(&vcc->stats->rx_err); ++ atomic_inc_unchecked(&vcc->stats->rx_err); + IF_ERR(printk("IA: bad packet, dropping it");) + if (status & RX_CER) { + IF_ERR(printk(" cause: packet CRC error\n");) +@@ -1168,7 +1168,7 @@ static int rx_pkt(struct atm_dev *dev) + len = dma_addr - buf_addr; + if (len > iadev->rx_buf_sz) { + printk("Over %d bytes sdu received, dropped!!!\n", iadev->rx_buf_sz); +- atomic_inc(&vcc->stats->rx_err); ++ atomic_inc_unchecked(&vcc->stats->rx_err); + goto out_free_desc; + } + +@@ -1318,7 +1318,7 @@ static void rx_dle_intr(struct atm_dev *dev) + ia_vcc = INPH_IA_VCC(vcc); + if (ia_vcc == NULL) + { +- atomic_inc(&vcc->stats->rx_err); ++ atomic_inc_unchecked(&vcc->stats->rx_err); + atm_return(vcc, skb->truesize); + dev_kfree_skb_any(skb); + goto INCR_DLE; +@@ -1330,7 +1330,7 @@ static void rx_dle_intr(struct atm_dev *dev) + if ((length > iadev->rx_buf_sz) || (length > + (skb->len - sizeof(struct cpcs_trailer)))) + { +- atomic_inc(&vcc->stats->rx_err); ++ atomic_inc_unchecked(&vcc->stats->rx_err); + IF_ERR(printk("rx_dle_intr: Bad AAL5 trailer %d (skb len %d)", + length, skb->len);) + atm_return(vcc, skb->truesize); +@@ -1346,7 +1346,7 @@ static void rx_dle_intr(struct atm_dev *dev) + + IF_RX(printk("rx_dle_intr: skb push");) + vcc->push(vcc,skb); +- atomic_inc(&vcc->stats->rx); ++ atomic_inc_unchecked(&vcc->stats->rx); + iadev->rx_pkt_cnt++; + } + INCR_DLE: +@@ -2826,15 +2826,15 @@ static int ia_ioctl(struct atm_dev *dev, unsigned int cmd, void __user *arg) + { + struct k_sonet_stats *stats; + stats = &PRIV(_ia_dev[board])->sonet_stats; +- printk("section_bip: %d\n", atomic_read(&stats->section_bip)); +- printk("line_bip : %d\n", atomic_read(&stats->line_bip)); +- printk("path_bip : %d\n", atomic_read(&stats->path_bip)); +- printk("line_febe : %d\n", atomic_read(&stats->line_febe)); +- printk("path_febe : %d\n", atomic_read(&stats->path_febe)); +- printk("corr_hcs : %d\n", atomic_read(&stats->corr_hcs)); +- printk("uncorr_hcs : %d\n", atomic_read(&stats->uncorr_hcs)); +- printk("tx_cells : %d\n", atomic_read(&stats->tx_cells)); +- printk("rx_cells : %d\n", atomic_read(&stats->rx_cells)); ++ printk("section_bip: %d\n", atomic_read_unchecked(&stats->section_bip)); ++ printk("line_bip : %d\n", atomic_read_unchecked(&stats->line_bip)); ++ printk("path_bip : %d\n", atomic_read_unchecked(&stats->path_bip)); ++ printk("line_febe : %d\n", atomic_read_unchecked(&stats->line_febe)); ++ printk("path_febe : %d\n", atomic_read_unchecked(&stats->path_febe)); ++ printk("corr_hcs : %d\n", atomic_read_unchecked(&stats->corr_hcs)); ++ printk("uncorr_hcs : %d\n", atomic_read_unchecked(&stats->uncorr_hcs)); ++ printk("tx_cells : %d\n", atomic_read_unchecked(&stats->tx_cells)); ++ printk("rx_cells : %d\n", atomic_read_unchecked(&stats->rx_cells)); + } + ia_cmds.status = 0; + break; +@@ -2939,7 +2939,7 @@ static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb) { + if ((desc == 0) || (desc > iadev->num_tx_desc)) + { + IF_ERR(printk(DEV_LABEL "invalid desc for send: %d\n", desc);) +- atomic_inc(&vcc->stats->tx); ++ atomic_inc_unchecked(&vcc->stats->tx); + if (vcc->pop) + vcc->pop(vcc, skb); + else +@@ -3044,14 +3044,14 @@ static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb) { + ATM_DESC(skb) = vcc->vci; + skb_queue_tail(&iadev->tx_dma_q, skb); + +- atomic_inc(&vcc->stats->tx); ++ atomic_inc_unchecked(&vcc->stats->tx); + iadev->tx_pkt_cnt++; + /* Increment transaction counter */ + writel(2, iadev->dma+IPHASE5575_TX_COUNTER); + + #if 0 + /* add flow control logic */ +- if (atomic_read(&vcc->stats->tx) % 20 == 0) { ++ if (atomic_read_unchecked(&vcc->stats->tx) % 20 == 0) { + if (iavcc->vc_desc_cnt > 10) { + vcc->tx_quota = vcc->tx_quota * 3 / 4; + printk("Tx1: vcc->tx_quota = %d \n", (u32)vcc->tx_quota ); +diff --git a/drivers/atm/lanai.c b/drivers/atm/lanai.c +index fa7d7019..1e404c7 100644 +--- a/drivers/atm/lanai.c ++++ b/drivers/atm/lanai.c +@@ -1303,7 +1303,7 @@ static void lanai_send_one_aal5(struct lanai_dev *lanai, + vcc_tx_add_aal5_trailer(lvcc, skb->len, 0, 0); + lanai_endtx(lanai, lvcc); + lanai_free_skb(lvcc->tx.atmvcc, skb); +- atomic_inc(&lvcc->tx.atmvcc->stats->tx); ++ atomic_inc_unchecked(&lvcc->tx.atmvcc->stats->tx); + } + + /* Try to fill the buffer - don't call unless there is backlog */ +@@ -1426,7 +1426,7 @@ static void vcc_rx_aal5(struct lanai_vcc *lvcc, int endptr) + ATM_SKB(skb)->vcc = lvcc->rx.atmvcc; + __net_timestamp(skb); + lvcc->rx.atmvcc->push(lvcc->rx.atmvcc, skb); +- atomic_inc(&lvcc->rx.atmvcc->stats->rx); ++ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx); + out: + lvcc->rx.buf.ptr = end; + cardvcc_write(lvcc, endptr, vcc_rxreadptr); +@@ -1667,7 +1667,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s) + DPRINTK("(itf %d) got RX service entry 0x%X for non-AAL5 " + "vcc %d\n", lanai->number, (unsigned int) s, vci); + lanai->stats.service_rxnotaal5++; +- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err); ++ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err); + return 0; + } + if (likely(!(s & (SERVICE_TRASH | SERVICE_STREAM | SERVICE_CRCERR)))) { +@@ -1679,7 +1679,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s) + int bytes; + read_unlock(&vcc_sklist_lock); + DPRINTK("got trashed rx pdu on vci %d\n", vci); +- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err); ++ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err); + lvcc->stats.x.aal5.service_trash++; + bytes = (SERVICE_GET_END(s) * 16) - + (((unsigned long) lvcc->rx.buf.ptr) - +@@ -1691,7 +1691,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s) + } + if (s & SERVICE_STREAM) { + read_unlock(&vcc_sklist_lock); +- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err); ++ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err); + lvcc->stats.x.aal5.service_stream++; + printk(KERN_ERR DEV_LABEL "(itf %d): Got AAL5 stream " + "PDU on VCI %d!\n", lanai->number, vci); +@@ -1699,7 +1699,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s) + return 0; + } + DPRINTK("got rx crc error on vci %d\n", vci); +- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err); ++ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err); + lvcc->stats.x.aal5.service_rxcrc++; + lvcc->rx.buf.ptr = &lvcc->rx.buf.start[SERVICE_GET_END(s) * 4]; + cardvcc_write(lvcc, SERVICE_GET_END(s), vcc_rxreadptr); +diff --git a/drivers/atm/nicstar.c b/drivers/atm/nicstar.c +index 9587e95..b45c5cb 100644 +--- a/drivers/atm/nicstar.c ++++ b/drivers/atm/nicstar.c +@@ -1640,7 +1640,7 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb) + if ((vc = (vc_map *) vcc->dev_data) == NULL) { + printk("nicstar%d: vcc->dev_data == NULL on ns_send().\n", + card->index); +- atomic_inc(&vcc->stats->tx_err); ++ atomic_inc_unchecked(&vcc->stats->tx_err); + dev_kfree_skb_any(skb); + return -EINVAL; + } +@@ -1648,7 +1648,7 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb) + if (!vc->tx) { + printk("nicstar%d: Trying to transmit on a non-tx VC.\n", + card->index); +- atomic_inc(&vcc->stats->tx_err); ++ atomic_inc_unchecked(&vcc->stats->tx_err); + dev_kfree_skb_any(skb); + return -EINVAL; + } +@@ -1656,14 +1656,14 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb) + if (vcc->qos.aal != ATM_AAL5 && vcc->qos.aal != ATM_AAL0) { + printk("nicstar%d: Only AAL0 and AAL5 are supported.\n", + card->index); +- atomic_inc(&vcc->stats->tx_err); ++ atomic_inc_unchecked(&vcc->stats->tx_err); + dev_kfree_skb_any(skb); + return -EINVAL; + } + + if (skb_shinfo(skb)->nr_frags != 0) { + printk("nicstar%d: No scatter-gather yet.\n", card->index); +- atomic_inc(&vcc->stats->tx_err); ++ atomic_inc_unchecked(&vcc->stats->tx_err); + dev_kfree_skb_any(skb); + return -EINVAL; + } +@@ -1711,11 +1711,11 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb) + } + + if (push_scqe(card, vc, scq, &scqe, skb) != 0) { +- atomic_inc(&vcc->stats->tx_err); ++ atomic_inc_unchecked(&vcc->stats->tx_err); + dev_kfree_skb_any(skb); + return -EIO; + } +- atomic_inc(&vcc->stats->tx); ++ atomic_inc_unchecked(&vcc->stats->tx); + + return 0; + } +@@ -2032,14 +2032,14 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe) + printk + ("nicstar%d: Can't allocate buffers for aal0.\n", + card->index); +- atomic_add(i, &vcc->stats->rx_drop); ++ atomic_add_unchecked(i, &vcc->stats->rx_drop); + break; + } + if (!atm_charge(vcc, sb->truesize)) { + RXPRINTK + ("nicstar%d: atm_charge() dropped aal0 packets.\n", + card->index); +- atomic_add(i - 1, &vcc->stats->rx_drop); /* already increased by 1 */ ++ atomic_add_unchecked(i - 1, &vcc->stats->rx_drop); /* already increased by 1 */ + dev_kfree_skb_any(sb); + break; + } +@@ -2054,7 +2054,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe) + ATM_SKB(sb)->vcc = vcc; + __net_timestamp(sb); + vcc->push(vcc, sb); +- atomic_inc(&vcc->stats->rx); ++ atomic_inc_unchecked(&vcc->stats->rx); + cell += ATM_CELL_PAYLOAD; + } + +@@ -2071,7 +2071,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe) + if (iovb == NULL) { + printk("nicstar%d: Out of iovec buffers.\n", + card->index); +- atomic_inc(&vcc->stats->rx_drop); ++ atomic_inc_unchecked(&vcc->stats->rx_drop); + recycle_rx_buf(card, skb); + return; + } +@@ -2095,7 +2095,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe) + small or large buffer itself. */ + } else if (NS_PRV_IOVCNT(iovb) >= NS_MAX_IOVECS) { + printk("nicstar%d: received too big AAL5 SDU.\n", card->index); +- atomic_inc(&vcc->stats->rx_err); ++ atomic_inc_unchecked(&vcc->stats->rx_err); + recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data, + NS_MAX_IOVECS); + NS_PRV_IOVCNT(iovb) = 0; +@@ -2115,7 +2115,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe) + ("nicstar%d: Expected a small buffer, and this is not one.\n", + card->index); + which_list(card, skb); +- atomic_inc(&vcc->stats->rx_err); ++ atomic_inc_unchecked(&vcc->stats->rx_err); + recycle_rx_buf(card, skb); + vc->rx_iov = NULL; + recycle_iov_buf(card, iovb); +@@ -2128,7 +2128,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe) + ("nicstar%d: Expected a large buffer, and this is not one.\n", + card->index); + which_list(card, skb); +- atomic_inc(&vcc->stats->rx_err); ++ atomic_inc_unchecked(&vcc->stats->rx_err); + recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data, + NS_PRV_IOVCNT(iovb)); + vc->rx_iov = NULL; +@@ -2151,7 +2151,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe) + printk(" - PDU size mismatch.\n"); + else + printk(".\n"); +- atomic_inc(&vcc->stats->rx_err); ++ atomic_inc_unchecked(&vcc->stats->rx_err); + recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data, + NS_PRV_IOVCNT(iovb)); + vc->rx_iov = NULL; +@@ -2165,7 +2165,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe) + /* skb points to a small buffer */ + if (!atm_charge(vcc, skb->truesize)) { + push_rxbufs(card, skb); +- atomic_inc(&vcc->stats->rx_drop); ++ atomic_inc_unchecked(&vcc->stats->rx_drop); + } else { + skb_put(skb, len); + dequeue_sm_buf(card, skb); +@@ -2175,7 +2175,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe) + ATM_SKB(skb)->vcc = vcc; + __net_timestamp(skb); + vcc->push(vcc, skb); +- atomic_inc(&vcc->stats->rx); ++ atomic_inc_unchecked(&vcc->stats->rx); + } + } else if (NS_PRV_IOVCNT(iovb) == 2) { /* One small plus one large buffer */ + struct sk_buff *sb; +@@ -2186,7 +2186,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe) + if (len <= NS_SMBUFSIZE) { + if (!atm_charge(vcc, sb->truesize)) { + push_rxbufs(card, sb); +- atomic_inc(&vcc->stats->rx_drop); ++ atomic_inc_unchecked(&vcc->stats->rx_drop); + } else { + skb_put(sb, len); + dequeue_sm_buf(card, sb); +@@ -2196,7 +2196,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe) + ATM_SKB(sb)->vcc = vcc; + __net_timestamp(sb); + vcc->push(vcc, sb); +- atomic_inc(&vcc->stats->rx); ++ atomic_inc_unchecked(&vcc->stats->rx); + } + + push_rxbufs(card, skb); +@@ -2205,7 +2205,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe) + + if (!atm_charge(vcc, skb->truesize)) { + push_rxbufs(card, skb); +- atomic_inc(&vcc->stats->rx_drop); ++ atomic_inc_unchecked(&vcc->stats->rx_drop); + } else { + dequeue_lg_buf(card, skb); + #ifdef NS_USE_DESTRUCTORS +@@ -2218,7 +2218,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe) + ATM_SKB(skb)->vcc = vcc; + __net_timestamp(skb); + vcc->push(vcc, skb); +- atomic_inc(&vcc->stats->rx); ++ atomic_inc_unchecked(&vcc->stats->rx); + } + + push_rxbufs(card, sb); +@@ -2239,7 +2239,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe) + printk + ("nicstar%d: Out of huge buffers.\n", + card->index); +- atomic_inc(&vcc->stats->rx_drop); ++ atomic_inc_unchecked(&vcc->stats->rx_drop); + recycle_iovec_rx_bufs(card, + (struct iovec *) + iovb->data, +@@ -2290,7 +2290,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe) + card->hbpool.count++; + } else + dev_kfree_skb_any(hb); +- atomic_inc(&vcc->stats->rx_drop); ++ atomic_inc_unchecked(&vcc->stats->rx_drop); + } else { + /* Copy the small buffer to the huge buffer */ + sb = (struct sk_buff *)iov->iov_base; +@@ -2327,7 +2327,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe) + #endif /* NS_USE_DESTRUCTORS */ + __net_timestamp(hb); + vcc->push(vcc, hb); +- atomic_inc(&vcc->stats->rx); ++ atomic_inc_unchecked(&vcc->stats->rx); + } + } + +diff --git a/drivers/atm/solos-pci.c b/drivers/atm/solos-pci.c +index e3fb496..d9646bf 100644 +--- a/drivers/atm/solos-pci.c ++++ b/drivers/atm/solos-pci.c +@@ -838,7 +838,7 @@ void solos_bh(unsigned long card_arg) + } + atm_charge(vcc, skb->truesize); + vcc->push(vcc, skb); +- atomic_inc(&vcc->stats->rx); ++ atomic_inc_unchecked(&vcc->stats->rx); + break; + + case PKT_STATUS: +@@ -1116,7 +1116,7 @@ static uint32_t fpga_tx(struct solos_card *card) + vcc = SKB_CB(oldskb)->vcc; + + if (vcc) { +- atomic_inc(&vcc->stats->tx); ++ atomic_inc_unchecked(&vcc->stats->tx); + solos_pop(vcc, oldskb); + } else { + dev_kfree_skb_irq(oldskb); +diff --git a/drivers/atm/suni.c b/drivers/atm/suni.c +index 0215934..ce9f5b1 100644 +--- a/drivers/atm/suni.c ++++ b/drivers/atm/suni.c +@@ -49,8 +49,8 @@ static DEFINE_SPINLOCK(sunis_lock); + + + #define ADD_LIMITED(s,v) \ +- atomic_add((v),&stats->s); \ +- if (atomic_read(&stats->s) < 0) atomic_set(&stats->s,INT_MAX); ++ atomic_add_unchecked((v),&stats->s); \ ++ if (atomic_read_unchecked(&stats->s) < 0) atomic_set_unchecked(&stats->s,INT_MAX); + + + static void suni_hz(unsigned long from_timer) +diff --git a/drivers/atm/uPD98402.c b/drivers/atm/uPD98402.c +index 5120a96..e2572bd 100644 +--- a/drivers/atm/uPD98402.c ++++ b/drivers/atm/uPD98402.c +@@ -42,7 +42,7 @@ static int fetch_stats(struct atm_dev *dev,struct sonet_stats __user *arg,int ze + struct sonet_stats tmp; + int error = 0; + +- atomic_add(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs); ++ atomic_add_unchecked(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs); + sonet_copy_stats(&PRIV(dev)->sonet_stats,&tmp); + if (arg) error = copy_to_user(arg,&tmp,sizeof(tmp)); + if (zero && !error) { +@@ -161,9 +161,9 @@ static int uPD98402_ioctl(struct atm_dev *dev,unsigned int cmd,void __user *arg) + + + #define ADD_LIMITED(s,v) \ +- { atomic_add(GET(v),&PRIV(dev)->sonet_stats.s); \ +- if (atomic_read(&PRIV(dev)->sonet_stats.s) < 0) \ +- atomic_set(&PRIV(dev)->sonet_stats.s,INT_MAX); } ++ { atomic_add_unchecked(GET(v),&PRIV(dev)->sonet_stats.s); \ ++ if (atomic_read_unchecked(&PRIV(dev)->sonet_stats.s) < 0) \ ++ atomic_set_unchecked(&PRIV(dev)->sonet_stats.s,INT_MAX); } + + + static void stat_event(struct atm_dev *dev) +@@ -194,7 +194,7 @@ static void uPD98402_int(struct atm_dev *dev) + if (reason & uPD98402_INT_PFM) stat_event(dev); + if (reason & uPD98402_INT_PCO) { + (void) GET(PCOCR); /* clear interrupt cause */ +- atomic_add(GET(HECCT), ++ atomic_add_unchecked(GET(HECCT), + &PRIV(dev)->sonet_stats.uncorr_hcs); + } + if ((reason & uPD98402_INT_RFO) && +@@ -222,9 +222,9 @@ static int uPD98402_start(struct atm_dev *dev) + PUT(~(uPD98402_INT_PFM | uPD98402_INT_ALM | uPD98402_INT_RFO | + uPD98402_INT_LOS),PIMR); /* enable them */ + (void) fetch_stats(dev,NULL,1); /* clear kernel counters */ +- atomic_set(&PRIV(dev)->sonet_stats.corr_hcs,-1); +- atomic_set(&PRIV(dev)->sonet_stats.tx_cells,-1); +- atomic_set(&PRIV(dev)->sonet_stats.rx_cells,-1); ++ atomic_set_unchecked(&PRIV(dev)->sonet_stats.corr_hcs,-1); ++ atomic_set_unchecked(&PRIV(dev)->sonet_stats.tx_cells,-1); ++ atomic_set_unchecked(&PRIV(dev)->sonet_stats.rx_cells,-1); + return 0; + } + +diff --git a/drivers/atm/zatm.c b/drivers/atm/zatm.c +index 969c3c2..9b72956 100644 +--- a/drivers/atm/zatm.c ++++ b/drivers/atm/zatm.c +@@ -459,7 +459,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy[0],dummy[1]); + } + if (!size) { + dev_kfree_skb_irq(skb); +- if (vcc) atomic_inc(&vcc->stats->rx_err); ++ if (vcc) atomic_inc_unchecked(&vcc->stats->rx_err); + continue; + } + if (!atm_charge(vcc,skb->truesize)) { +@@ -469,7 +469,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy[0],dummy[1]); + skb->len = size; + ATM_SKB(skb)->vcc = vcc; + vcc->push(vcc,skb); +- atomic_inc(&vcc->stats->rx); ++ atomic_inc_unchecked(&vcc->stats->rx); + } + zout(pos & 0xffff,MTA(mbx)); + #if 0 /* probably a stupid idea */ +@@ -733,7 +733,7 @@ if (*ZATM_PRV_DSC(skb) != (uPD98401_TXPD_V | uPD98401_TXPD_DP | + skb_queue_head(&zatm_vcc->backlog,skb); + break; + } +- atomic_inc(&vcc->stats->tx); ++ atomic_inc_unchecked(&vcc->stats->tx); + wake_up(&zatm_vcc->tx_wait); + } + +diff --git a/drivers/base/bus.c b/drivers/base/bus.c +index 59dc808..f10c74e 100644 +--- a/drivers/base/bus.c ++++ b/drivers/base/bus.c +@@ -1124,7 +1124,7 @@ int subsys_interface_register(struct subsys_interface *sif) + return -EINVAL; + + mutex_lock(&subsys->p->mutex); +- list_add_tail(&sif->node, &subsys->p->interfaces); ++ pax_list_add_tail((struct list_head *)&sif->node, &subsys->p->interfaces); + if (sif->add_dev) { + subsys_dev_iter_init(&iter, subsys, NULL, NULL); + while ((dev = subsys_dev_iter_next(&iter))) +@@ -1149,7 +1149,7 @@ void subsys_interface_unregister(struct subsys_interface *sif) + subsys = sif->subsys; + + mutex_lock(&subsys->p->mutex); +- list_del_init(&sif->node); ++ pax_list_del_init((struct list_head *)&sif->node); + if (sif->remove_dev) { + subsys_dev_iter_init(&iter, subsys, NULL, NULL); + while ((dev = subsys_dev_iter_next(&iter))) +diff --git a/drivers/base/devtmpfs.c b/drivers/base/devtmpfs.c +index 25798db..15f130e 100644 +--- a/drivers/base/devtmpfs.c ++++ b/drivers/base/devtmpfs.c +@@ -354,7 +354,7 @@ int devtmpfs_mount(const char *mntdir) + if (!thread) + return 0; + +- err = sys_mount("devtmpfs", (char *)mntdir, "devtmpfs", MS_SILENT, NULL); ++ err = sys_mount((char __force_user *)"devtmpfs", (char __force_user *)mntdir, (char __force_user *)"devtmpfs", MS_SILENT, NULL); + if (err) + printk(KERN_INFO "devtmpfs: error mounting %i\n", err); + else +@@ -380,11 +380,11 @@ static int devtmpfsd(void *p) + *err = sys_unshare(CLONE_NEWNS); + if (*err) + goto out; +- *err = sys_mount("devtmpfs", "/", "devtmpfs", MS_SILENT, options); ++ *err = sys_mount((char __force_user *)"devtmpfs", (char __force_user *)"/", (char __force_user *)"devtmpfs", MS_SILENT, (char __force_user *)options); + if (*err) + goto out; +- sys_chdir("/.."); /* will traverse into overmounted root */ +- sys_chroot("."); ++ sys_chdir((char __force_user *)"/.."); /* will traverse into overmounted root */ ++ sys_chroot((char __force_user *)"."); + complete(&setup_done); + while (1) { + spin_lock(&req_lock); +diff --git a/drivers/base/node.c b/drivers/base/node.c +index bc9f43b..29703b8 100644 +--- a/drivers/base/node.c ++++ b/drivers/base/node.c +@@ -620,7 +620,7 @@ static ssize_t print_nodes_state(enum node_states state, char *buf) + struct node_attr { + struct device_attribute attr; + enum node_states state; +-}; ++} __do_const; + + static ssize_t show_node_state(struct device *dev, + struct device_attribute *attr, char *buf) +diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c +index bfb8955..4ebff34 100644 +--- a/drivers/base/power/domain.c ++++ b/drivers/base/power/domain.c +@@ -1809,9 +1809,9 @@ int __pm_genpd_remove_callbacks(struct device *dev, bool clear_td) + + if (dev->power.subsys_data->domain_data) { + gpd_data = to_gpd_data(dev->power.subsys_data->domain_data); +- gpd_data->ops = (struct gpd_dev_ops){ NULL }; ++ memset(&gpd_data->ops, 0, sizeof(gpd_data->ops)); + if (clear_td) +- gpd_data->td = (struct gpd_timing_data){ 0 }; ++ memset(&gpd_data->td, 0, sizeof(gpd_data->td)); + + if (--gpd_data->refcount == 0) { + dev->power.subsys_data->domain_data = NULL; +@@ -1850,7 +1850,7 @@ int pm_genpd_attach_cpuidle(struct generic_pm_domain *genpd, int state) + { + struct cpuidle_driver *cpuidle_drv; + struct gpd_cpu_data *cpu_data; +- struct cpuidle_state *idle_state; ++ cpuidle_state_no_const *idle_state; + int ret = 0; + + if (IS_ERR_OR_NULL(genpd) || state < 0) +@@ -1918,7 +1918,7 @@ int pm_genpd_name_attach_cpuidle(const char *name, int state) + int pm_genpd_detach_cpuidle(struct generic_pm_domain *genpd) + { + struct gpd_cpu_data *cpu_data; +- struct cpuidle_state *idle_state; ++ cpuidle_state_no_const *idle_state; + int ret = 0; + + if (IS_ERR_OR_NULL(genpd)) +diff --git a/drivers/base/power/sysfs.c b/drivers/base/power/sysfs.c +index 03e089a..0e9560c 100644 +--- a/drivers/base/power/sysfs.c ++++ b/drivers/base/power/sysfs.c +@@ -185,7 +185,7 @@ static ssize_t rtpm_status_show(struct device *dev, + return -EIO; + } + } +- return sprintf(buf, p); ++ return sprintf(buf, "%s", p); + } + + static DEVICE_ATTR(runtime_status, 0444, rtpm_status_show, NULL); +diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c +index 2d56f41..8830f19 100644 +--- a/drivers/base/power/wakeup.c ++++ b/drivers/base/power/wakeup.c +@@ -29,14 +29,14 @@ bool events_check_enabled __read_mostly; + * They need to be modified together atomically, so it's better to use one + * atomic variable to hold them both. + */ +-static atomic_t combined_event_count = ATOMIC_INIT(0); ++static atomic_unchecked_t combined_event_count = ATOMIC_INIT(0); + + #define IN_PROGRESS_BITS (sizeof(int) * 4) + #define MAX_IN_PROGRESS ((1 << IN_PROGRESS_BITS) - 1) + + static void split_counters(unsigned int *cnt, unsigned int *inpr) + { +- unsigned int comb = atomic_read(&combined_event_count); ++ unsigned int comb = atomic_read_unchecked(&combined_event_count); + + *cnt = (comb >> IN_PROGRESS_BITS); + *inpr = comb & MAX_IN_PROGRESS; +@@ -395,7 +395,7 @@ static void wakeup_source_activate(struct wakeup_source *ws) + ws->start_prevent_time = ws->last_time; + + /* Increment the counter of events in progress. */ +- cec = atomic_inc_return(&combined_event_count); ++ cec = atomic_inc_return_unchecked(&combined_event_count); + + trace_wakeup_source_activate(ws->name, cec); + } +@@ -521,7 +521,7 @@ static void wakeup_source_deactivate(struct wakeup_source *ws) + * Increment the counter of registered wakeup events and decrement the + * couter of wakeup events in progress simultaneously. + */ +- cec = atomic_add_return(MAX_IN_PROGRESS, &combined_event_count); ++ cec = atomic_add_return_unchecked(MAX_IN_PROGRESS, &combined_event_count); + trace_wakeup_source_deactivate(ws->name, cec); + + split_counters(&cnt, &inpr); +diff --git a/drivers/base/syscore.c b/drivers/base/syscore.c +index e8d11b6..7b1b36f 100644 +--- a/drivers/base/syscore.c ++++ b/drivers/base/syscore.c +@@ -21,7 +21,7 @@ static DEFINE_MUTEX(syscore_ops_lock); + void register_syscore_ops(struct syscore_ops *ops) + { + mutex_lock(&syscore_ops_lock); +- list_add_tail(&ops->node, &syscore_ops_list); ++ pax_list_add_tail((struct list_head *)&ops->node, &syscore_ops_list); + mutex_unlock(&syscore_ops_lock); + } + EXPORT_SYMBOL_GPL(register_syscore_ops); +@@ -33,7 +33,7 @@ EXPORT_SYMBOL_GPL(register_syscore_ops); + void unregister_syscore_ops(struct syscore_ops *ops) + { + mutex_lock(&syscore_ops_lock); +- list_del(&ops->node); ++ pax_list_del((struct list_head *)&ops->node); + mutex_unlock(&syscore_ops_lock); + } + EXPORT_SYMBOL_GPL(unregister_syscore_ops); +diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c +index 036e8ab..6221dec 100644 +--- a/drivers/block/cciss.c ++++ b/drivers/block/cciss.c +@@ -3011,7 +3011,7 @@ static void start_io(ctlr_info_t *h) + while (!list_empty(&h->reqQ)) { + c = list_entry(h->reqQ.next, CommandList_struct, list); + /* can't do anything if fifo is full */ +- if ((h->access.fifo_full(h))) { ++ if ((h->access->fifo_full(h))) { + dev_warn(&h->pdev->dev, "fifo full\n"); + break; + } +@@ -3021,7 +3021,7 @@ static void start_io(ctlr_info_t *h) + h->Qdepth--; + + /* Tell the controller execute command */ +- h->access.submit_command(h, c); ++ h->access->submit_command(h, c); + + /* Put job onto the completed Q */ + addQ(&h->cmpQ, c); +@@ -3447,17 +3447,17 @@ startio: + + static inline unsigned long get_next_completion(ctlr_info_t *h) + { +- return h->access.command_completed(h); ++ return h->access->command_completed(h); + } + + static inline int interrupt_pending(ctlr_info_t *h) + { +- return h->access.intr_pending(h); ++ return h->access->intr_pending(h); + } + + static inline long interrupt_not_for_us(ctlr_info_t *h) + { +- return ((h->access.intr_pending(h) == 0) || ++ return ((h->access->intr_pending(h) == 0) || + (h->interrupts_enabled == 0)); + } + +@@ -3490,7 +3490,7 @@ static inline u32 next_command(ctlr_info_t *h) + u32 a; + + if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant))) +- return h->access.command_completed(h); ++ return h->access->command_completed(h); + + if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) { + a = *(h->reply_pool_head); /* Next cmd in ring buffer */ +@@ -4047,7 +4047,7 @@ static void cciss_put_controller_into_performant_mode(ctlr_info_t *h) + trans_support & CFGTBL_Trans_use_short_tags); + + /* Change the access methods to the performant access methods */ +- h->access = SA5_performant_access; ++ h->access = &SA5_performant_access; + h->transMethod = CFGTBL_Trans_Performant; + + return; +@@ -4327,7 +4327,7 @@ static int cciss_pci_init(ctlr_info_t *h) + if (prod_index < 0) + return -ENODEV; + h->product_name = products[prod_index].product_name; +- h->access = *(products[prod_index].access); ++ h->access = products[prod_index].access; + + if (cciss_board_disabled(h)) { + dev_warn(&h->pdev->dev, "controller appears to be disabled\n"); +@@ -5059,7 +5059,7 @@ reinit_after_soft_reset: + } + + /* make sure the board interrupts are off */ +- h->access.set_intr_mask(h, CCISS_INTR_OFF); ++ h->access->set_intr_mask(h, CCISS_INTR_OFF); + rc = cciss_request_irq(h, do_cciss_msix_intr, do_cciss_intx); + if (rc) + goto clean2; +@@ -5109,7 +5109,7 @@ reinit_after_soft_reset: + * fake ones to scoop up any residual completions. + */ + spin_lock_irqsave(&h->lock, flags); +- h->access.set_intr_mask(h, CCISS_INTR_OFF); ++ h->access->set_intr_mask(h, CCISS_INTR_OFF); + spin_unlock_irqrestore(&h->lock, flags); + free_irq(h->intr[h->intr_mode], h); + rc = cciss_request_irq(h, cciss_msix_discard_completions, +@@ -5129,9 +5129,9 @@ reinit_after_soft_reset: + dev_info(&h->pdev->dev, "Board READY.\n"); + dev_info(&h->pdev->dev, + "Waiting for stale completions to drain.\n"); +- h->access.set_intr_mask(h, CCISS_INTR_ON); ++ h->access->set_intr_mask(h, CCISS_INTR_ON); + msleep(10000); +- h->access.set_intr_mask(h, CCISS_INTR_OFF); ++ h->access->set_intr_mask(h, CCISS_INTR_OFF); + + rc = controller_reset_failed(h->cfgtable); + if (rc) +@@ -5154,7 +5154,7 @@ reinit_after_soft_reset: + cciss_scsi_setup(h); + + /* Turn the interrupts on so we can service requests */ +- h->access.set_intr_mask(h, CCISS_INTR_ON); ++ h->access->set_intr_mask(h, CCISS_INTR_ON); + + /* Get the firmware version */ + inq_buff = kzalloc(sizeof(InquiryData_struct), GFP_KERNEL); +@@ -5226,7 +5226,7 @@ static void cciss_shutdown(struct pci_dev *pdev) + kfree(flush_buf); + if (return_code != IO_OK) + dev_warn(&h->pdev->dev, "Error flushing cache\n"); +- h->access.set_intr_mask(h, CCISS_INTR_OFF); ++ h->access->set_intr_mask(h, CCISS_INTR_OFF); + free_irq(h->intr[h->intr_mode], h); + } + +diff --git a/drivers/block/cciss.h b/drivers/block/cciss.h +index 7fda30e..2f27946 100644 +--- a/drivers/block/cciss.h ++++ b/drivers/block/cciss.h +@@ -101,7 +101,7 @@ struct ctlr_info + /* information about each logical volume */ + drive_info_struct *drv[CISS_MAX_LUN]; + +- struct access_method access; ++ struct access_method *access; + + /* queue and queue Info */ + struct list_head reqQ; +@@ -402,27 +402,27 @@ static bool SA5_performant_intr_pending(ctlr_info_t *h) + } + + static struct access_method SA5_access = { +- SA5_submit_command, +- SA5_intr_mask, +- SA5_fifo_full, +- SA5_intr_pending, +- SA5_completed, ++ .submit_command = SA5_submit_command, ++ .set_intr_mask = SA5_intr_mask, ++ .fifo_full = SA5_fifo_full, ++ .intr_pending = SA5_intr_pending, ++ .command_completed = SA5_completed, + }; + + static struct access_method SA5B_access = { +- SA5_submit_command, +- SA5B_intr_mask, +- SA5_fifo_full, +- SA5B_intr_pending, +- SA5_completed, ++ .submit_command = SA5_submit_command, ++ .set_intr_mask = SA5B_intr_mask, ++ .fifo_full = SA5_fifo_full, ++ .intr_pending = SA5B_intr_pending, ++ .command_completed = SA5_completed, + }; + + static struct access_method SA5_performant_access = { +- SA5_submit_command, +- SA5_performant_intr_mask, +- SA5_fifo_full, +- SA5_performant_intr_pending, +- SA5_performant_completed, ++ .submit_command = SA5_submit_command, ++ .set_intr_mask = SA5_performant_intr_mask, ++ .fifo_full = SA5_fifo_full, ++ .intr_pending = SA5_performant_intr_pending, ++ .command_completed = SA5_performant_completed, + }; + + struct board_type { +diff --git a/drivers/block/cpqarray.c b/drivers/block/cpqarray.c +index 2b94403..fd6ad1f 100644 +--- a/drivers/block/cpqarray.c ++++ b/drivers/block/cpqarray.c +@@ -404,7 +404,7 @@ static int cpqarray_register_ctlr(int i, struct pci_dev *pdev) + if (register_blkdev(COMPAQ_SMART2_MAJOR+i, hba[i]->devname)) { + goto Enomem4; + } +- hba[i]->access.set_intr_mask(hba[i], 0); ++ hba[i]->access->set_intr_mask(hba[i], 0); + if (request_irq(hba[i]->intr, do_ida_intr, + IRQF_DISABLED|IRQF_SHARED, hba[i]->devname, hba[i])) + { +@@ -459,7 +459,7 @@ static int cpqarray_register_ctlr(int i, struct pci_dev *pdev) + add_timer(&hba[i]->timer); + + /* Enable IRQ now that spinlock and rate limit timer are set up */ +- hba[i]->access.set_intr_mask(hba[i], FIFO_NOT_EMPTY); ++ hba[i]->access->set_intr_mask(hba[i], FIFO_NOT_EMPTY); + + for(j=0; j<NWD; j++) { + struct gendisk *disk = ida_gendisk[i][j]; +@@ -694,7 +694,7 @@ DBGINFO( + for(i=0; i<NR_PRODUCTS; i++) { + if (board_id == products[i].board_id) { + c->product_name = products[i].product_name; +- c->access = *(products[i].access); ++ c->access = products[i].access; + break; + } + } +@@ -792,7 +792,7 @@ static int cpqarray_eisa_detect(void) + hba[ctlr]->intr = intr; + sprintf(hba[ctlr]->devname, "ida%d", nr_ctlr); + hba[ctlr]->product_name = products[j].product_name; +- hba[ctlr]->access = *(products[j].access); ++ hba[ctlr]->access = products[j].access; + hba[ctlr]->ctlr = ctlr; + hba[ctlr]->board_id = board_id; + hba[ctlr]->pci_dev = NULL; /* not PCI */ +@@ -978,7 +978,7 @@ static void start_io(ctlr_info_t *h) + + while((c = h->reqQ) != NULL) { + /* Can't do anything if we're busy */ +- if (h->access.fifo_full(h) == 0) ++ if (h->access->fifo_full(h) == 0) + return; + + /* Get the first entry from the request Q */ +@@ -986,7 +986,7 @@ static void start_io(ctlr_info_t *h) + h->Qdepth--; + + /* Tell the controller to do our bidding */ +- h->access.submit_command(h, c); ++ h->access->submit_command(h, c); + + /* Get onto the completion Q */ + addQ(&h->cmpQ, c); +@@ -1048,7 +1048,7 @@ static irqreturn_t do_ida_intr(int irq, void *dev_id) + unsigned long flags; + __u32 a,a1; + +- istat = h->access.intr_pending(h); ++ istat = h->access->intr_pending(h); + /* Is this interrupt for us? */ + if (istat == 0) + return IRQ_NONE; +@@ -1059,7 +1059,7 @@ static irqreturn_t do_ida_intr(int irq, void *dev_id) + */ + spin_lock_irqsave(IDA_LOCK(h->ctlr), flags); + if (istat & FIFO_NOT_EMPTY) { +- while((a = h->access.command_completed(h))) { ++ while((a = h->access->command_completed(h))) { + a1 = a; a &= ~3; + if ((c = h->cmpQ) == NULL) + { +@@ -1448,11 +1448,11 @@ static int sendcmd( + /* + * Disable interrupt + */ +- info_p->access.set_intr_mask(info_p, 0); ++ info_p->access->set_intr_mask(info_p, 0); + /* Make sure there is room in the command FIFO */ + /* Actually it should be completely empty at this time. */ + for (i = 200000; i > 0; i--) { +- temp = info_p->access.fifo_full(info_p); ++ temp = info_p->access->fifo_full(info_p); + if (temp != 0) { + break; + } +@@ -1465,7 +1465,7 @@ DBG( + /* + * Send the cmd + */ +- info_p->access.submit_command(info_p, c); ++ info_p->access->submit_command(info_p, c); + complete = pollcomplete(ctlr); + + pci_unmap_single(info_p->pci_dev, (dma_addr_t) c->req.sg[0].addr, +@@ -1548,9 +1548,9 @@ static int revalidate_allvol(ctlr_info_t *host) + * we check the new geometry. Then turn interrupts back on when + * we're done. + */ +- host->access.set_intr_mask(host, 0); ++ host->access->set_intr_mask(host, 0); + getgeometry(ctlr); +- host->access.set_intr_mask(host, FIFO_NOT_EMPTY); ++ host->access->set_intr_mask(host, FIFO_NOT_EMPTY); + + for(i=0; i<NWD; i++) { + struct gendisk *disk = ida_gendisk[ctlr][i]; +@@ -1590,7 +1590,7 @@ static int pollcomplete(int ctlr) + /* Wait (up to 2 seconds) for a command to complete */ + + for (i = 200000; i > 0; i--) { +- done = hba[ctlr]->access.command_completed(hba[ctlr]); ++ done = hba[ctlr]->access->command_completed(hba[ctlr]); + if (done == 0) { + udelay(10); /* a short fixed delay */ + } else +diff --git a/drivers/block/cpqarray.h b/drivers/block/cpqarray.h +index be73e9d..7fbf140 100644 +--- a/drivers/block/cpqarray.h ++++ b/drivers/block/cpqarray.h +@@ -99,7 +99,7 @@ struct ctlr_info { + drv_info_t drv[NWD]; + struct proc_dir_entry *proc; + +- struct access_method access; ++ struct access_method *access; + + cmdlist_t *reqQ; + cmdlist_t *cmpQ; +diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h +index 0e06f0c..c47b81d 100644 +--- a/drivers/block/drbd/drbd_int.h ++++ b/drivers/block/drbd/drbd_int.h +@@ -582,7 +582,7 @@ struct drbd_epoch { + struct drbd_tconn *tconn; + struct list_head list; + unsigned int barrier_nr; +- atomic_t epoch_size; /* increased on every request added. */ ++ atomic_unchecked_t epoch_size; /* increased on every request added. */ + atomic_t active; /* increased on every req. added, and dec on every finished. */ + unsigned long flags; + }; +@@ -1022,7 +1022,7 @@ struct drbd_conf { + unsigned int al_tr_number; + int al_tr_cycle; + wait_queue_head_t seq_wait; +- atomic_t packet_seq; ++ atomic_unchecked_t packet_seq; + unsigned int peer_seq; + spinlock_t peer_seq_lock; + unsigned int minor; +@@ -1573,7 +1573,7 @@ static inline int drbd_setsockopt(struct socket *sock, int level, int optname, + char __user *uoptval; + int err; + +- uoptval = (char __user __force *)optval; ++ uoptval = (char __force_user *)optval; + + set_fs(KERNEL_DS); + if (level == SOL_SOCKET) +diff --git a/drivers/block/drbd/drbd_interval.c b/drivers/block/drbd/drbd_interval.c +index 89c497c..9c736ae 100644 +--- a/drivers/block/drbd/drbd_interval.c ++++ b/drivers/block/drbd/drbd_interval.c +@@ -67,9 +67,9 @@ static void augment_rotate(struct rb_node *rb_old, struct rb_node *rb_new) + } + + static const struct rb_augment_callbacks augment_callbacks = { +- augment_propagate, +- augment_copy, +- augment_rotate, ++ .propagate = augment_propagate, ++ .copy = augment_copy, ++ .rotate = augment_rotate, + }; + + /** +diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c +index 929468e..7d934eb 100644 +--- a/drivers/block/drbd/drbd_main.c ++++ b/drivers/block/drbd/drbd_main.c +@@ -1317,7 +1317,7 @@ static int _drbd_send_ack(struct drbd_conf *mdev, enum drbd_packet cmd, + p->sector = sector; + p->block_id = block_id; + p->blksize = blksize; +- p->seq_num = cpu_to_be32(atomic_inc_return(&mdev->packet_seq)); ++ p->seq_num = cpu_to_be32(atomic_inc_return_unchecked(&mdev->packet_seq)); + return drbd_send_command(mdev, sock, cmd, sizeof(*p), NULL, 0); + } + +@@ -1622,7 +1622,7 @@ int drbd_send_dblock(struct drbd_conf *mdev, struct drbd_request *req) + return -EIO; + p->sector = cpu_to_be64(req->i.sector); + p->block_id = (unsigned long)req; +- p->seq_num = cpu_to_be32(atomic_inc_return(&mdev->packet_seq)); ++ p->seq_num = cpu_to_be32(atomic_inc_return_unchecked(&mdev->packet_seq)); + dp_flags = bio_flags_to_wire(mdev, req->master_bio->bi_rw); + if (mdev->state.conn >= C_SYNC_SOURCE && + mdev->state.conn <= C_PAUSED_SYNC_T) +@@ -2577,8 +2577,8 @@ void conn_destroy(struct kref *kref) + { + struct drbd_tconn *tconn = container_of(kref, struct drbd_tconn, kref); + +- if (atomic_read(&tconn->current_epoch->epoch_size) != 0) +- conn_err(tconn, "epoch_size:%d\n", atomic_read(&tconn->current_epoch->epoch_size)); ++ if (atomic_read_unchecked(&tconn->current_epoch->epoch_size) != 0) ++ conn_err(tconn, "epoch_size:%d\n", atomic_read_unchecked(&tconn->current_epoch->epoch_size)); + kfree(tconn->current_epoch); + + idr_destroy(&tconn->volumes); +diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c +index c706d50..5e1b472 100644 +--- a/drivers/block/drbd/drbd_nl.c ++++ b/drivers/block/drbd/drbd_nl.c +@@ -3440,7 +3440,7 @@ out: + + void drbd_bcast_event(struct drbd_conf *mdev, const struct sib_info *sib) + { +- static atomic_t drbd_genl_seq = ATOMIC_INIT(2); /* two. */ ++ static atomic_unchecked_t drbd_genl_seq = ATOMIC_INIT(2); /* two. */ + struct sk_buff *msg; + struct drbd_genlmsghdr *d_out; + unsigned seq; +@@ -3453,7 +3453,7 @@ void drbd_bcast_event(struct drbd_conf *mdev, const struct sib_info *sib) + return; + } + +- seq = atomic_inc_return(&drbd_genl_seq); ++ seq = atomic_inc_return_unchecked(&drbd_genl_seq); + msg = genlmsg_new(NLMSG_GOODSIZE, GFP_NOIO); + if (!msg) + goto failed; +diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c +index d073305..4998fea 100644 +--- a/drivers/block/drbd/drbd_receiver.c ++++ b/drivers/block/drbd/drbd_receiver.c +@@ -834,7 +834,7 @@ int drbd_connected(struct drbd_conf *mdev) + { + int err; + +- atomic_set(&mdev->packet_seq, 0); ++ atomic_set_unchecked(&mdev->packet_seq, 0); + mdev->peer_seq = 0; + + mdev->state_mutex = mdev->tconn->agreed_pro_version < 100 ? +@@ -1193,7 +1193,7 @@ static enum finish_epoch drbd_may_finish_epoch(struct drbd_tconn *tconn, + do { + next_epoch = NULL; + +- epoch_size = atomic_read(&epoch->epoch_size); ++ epoch_size = atomic_read_unchecked(&epoch->epoch_size); + + switch (ev & ~EV_CLEANUP) { + case EV_PUT: +@@ -1233,7 +1233,7 @@ static enum finish_epoch drbd_may_finish_epoch(struct drbd_tconn *tconn, + rv = FE_DESTROYED; + } else { + epoch->flags = 0; +- atomic_set(&epoch->epoch_size, 0); ++ atomic_set_unchecked(&epoch->epoch_size, 0); + /* atomic_set(&epoch->active, 0); is already zero */ + if (rv == FE_STILL_LIVE) + rv = FE_RECYCLED; +@@ -1451,7 +1451,7 @@ static int receive_Barrier(struct drbd_tconn *tconn, struct packet_info *pi) + conn_wait_active_ee_empty(tconn); + drbd_flush(tconn); + +- if (atomic_read(&tconn->current_epoch->epoch_size)) { ++ if (atomic_read_unchecked(&tconn->current_epoch->epoch_size)) { + epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO); + if (epoch) + break; +@@ -1464,11 +1464,11 @@ static int receive_Barrier(struct drbd_tconn *tconn, struct packet_info *pi) + } + + epoch->flags = 0; +- atomic_set(&epoch->epoch_size, 0); ++ atomic_set_unchecked(&epoch->epoch_size, 0); + atomic_set(&epoch->active, 0); + + spin_lock(&tconn->epoch_lock); +- if (atomic_read(&tconn->current_epoch->epoch_size)) { ++ if (atomic_read_unchecked(&tconn->current_epoch->epoch_size)) { + list_add(&epoch->list, &tconn->current_epoch->list); + tconn->current_epoch = epoch; + tconn->epochs++; +@@ -2164,7 +2164,7 @@ static int receive_Data(struct drbd_tconn *tconn, struct packet_info *pi) + + err = wait_for_and_update_peer_seq(mdev, peer_seq); + drbd_send_ack_dp(mdev, P_NEG_ACK, p, pi->size); +- atomic_inc(&tconn->current_epoch->epoch_size); ++ atomic_inc_unchecked(&tconn->current_epoch->epoch_size); + err2 = drbd_drain_block(mdev, pi->size); + if (!err) + err = err2; +@@ -2198,7 +2198,7 @@ static int receive_Data(struct drbd_tconn *tconn, struct packet_info *pi) + + spin_lock(&tconn->epoch_lock); + peer_req->epoch = tconn->current_epoch; +- atomic_inc(&peer_req->epoch->epoch_size); ++ atomic_inc_unchecked(&peer_req->epoch->epoch_size); + atomic_inc(&peer_req->epoch->active); + spin_unlock(&tconn->epoch_lock); + +@@ -4345,7 +4345,7 @@ struct data_cmd { + int expect_payload; + size_t pkt_size; + int (*fn)(struct drbd_tconn *, struct packet_info *); +-}; ++} __do_const; + + static struct data_cmd drbd_cmd_handler[] = { + [P_DATA] = { 1, sizeof(struct p_data), receive_Data }, +@@ -4465,7 +4465,7 @@ static void conn_disconnect(struct drbd_tconn *tconn) + if (!list_empty(&tconn->current_epoch->list)) + conn_err(tconn, "ASSERTION FAILED: tconn->current_epoch->list not empty\n"); + /* ok, no more ee's on the fly, it is safe to reset the epoch_size */ +- atomic_set(&tconn->current_epoch->epoch_size, 0); ++ atomic_set_unchecked(&tconn->current_epoch->epoch_size, 0); + tconn->send.seen_any_write_yet = false; + + conn_info(tconn, "Connection closed\n"); +@@ -5221,7 +5221,7 @@ static int tconn_finish_peer_reqs(struct drbd_tconn *tconn) + struct asender_cmd { + size_t pkt_size; + int (*fn)(struct drbd_tconn *tconn, struct packet_info *); +-}; ++} __do_const; + + static struct asender_cmd asender_tbl[] = { + [P_PING] = { 0, got_Ping }, +diff --git a/drivers/block/loop.c b/drivers/block/loop.c +index 66e8c3b..9b68dd9 100644 +--- a/drivers/block/loop.c ++++ b/drivers/block/loop.c +@@ -232,7 +232,7 @@ static int __do_lo_send_write(struct file *file, + + file_start_write(file); + set_fs(get_ds()); +- bw = file->f_op->write(file, buf, len, &pos); ++ bw = file->f_op->write(file, (const char __force_user *)buf, len, &pos); + set_fs(old_fs); + file_end_write(file); + if (likely(bw == len)) +diff --git a/drivers/block/null_blk.c b/drivers/block/null_blk.c +index 091b9ea..f5428f8 100644 +--- a/drivers/block/null_blk.c ++++ b/drivers/block/null_blk.c +@@ -382,15 +382,25 @@ static int null_init_hctx(struct blk_mq_hw_ctx *hctx, void *data, + return 0; + } + +-static struct blk_mq_ops null_mq_ops = { +- .queue_rq = null_queue_rq, +- .map_queue = blk_mq_map_queue, ++static struct blk_mq_ops null_mq_single_ops = { ++ .queue_rq = null_queue_rq, ++ .map_queue = blk_mq_map_queue, + .init_hctx = null_init_hctx, + .complete = null_softirq_done_fn, ++ .alloc_hctx = blk_mq_alloc_single_hw_queue, ++ .free_hctx = blk_mq_free_single_hw_queue, ++}; ++ ++static struct blk_mq_ops null_mq_per_node_ops = { ++ .queue_rq = null_queue_rq, ++ .map_queue = blk_mq_map_queue, ++ .init_hctx = null_init_hctx, ++ .alloc_hctx = null_alloc_hctx, ++ .free_hctx = null_free_hctx, + }; + + static struct blk_mq_reg null_mq_reg = { +- .ops = &null_mq_ops, ++ .ops = &null_mq_single_ops, + .queue_depth = 64, + .cmd_size = sizeof(struct nullb_cmd), + .flags = BLK_MQ_F_SHOULD_MERGE, +@@ -521,13 +531,8 @@ static int null_add_dev(void) + null_mq_reg.queue_depth = hw_queue_depth; + null_mq_reg.nr_hw_queues = submit_queues; + +- if (use_per_node_hctx) { +- null_mq_reg.ops->alloc_hctx = null_alloc_hctx; +- null_mq_reg.ops->free_hctx = null_free_hctx; +- } else { +- null_mq_reg.ops->alloc_hctx = blk_mq_alloc_single_hw_queue; +- null_mq_reg.ops->free_hctx = blk_mq_free_single_hw_queue; +- } ++ if (use_per_node_hctx) ++ null_mq_reg.ops = &null_mq_per_node_ops; + + nullb->q = blk_mq_init_queue(&null_mq_reg, nullb); + } else if (queue_mode == NULL_Q_BIO) { +diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c +index a2af73d..c0b8f61 100644 +--- a/drivers/block/pktcdvd.c ++++ b/drivers/block/pktcdvd.c +@@ -108,7 +108,7 @@ static int pkt_seq_show(struct seq_file *m, void *p); + + static sector_t get_zone(sector_t sector, struct pktcdvd_device *pd) + { +- return (sector + pd->offset) & ~(sector_t)(pd->settings.size - 1); ++ return (sector + pd->offset) & ~(sector_t)(pd->settings.size - 1UL); + } + + /* +@@ -1888,7 +1888,7 @@ static noinline_for_stack int pkt_probe_settings(struct pktcdvd_device *pd) + return -EROFS; + } + pd->settings.fp = ti.fp; +- pd->offset = (be32_to_cpu(ti.track_start) << 2) & (pd->settings.size - 1); ++ pd->offset = (be32_to_cpu(ti.track_start) << 2) & (pd->settings.size - 1UL); + + if (ti.nwa_v) { + pd->nwa = be32_to_cpu(ti.next_writable); +diff --git a/drivers/block/smart1,2.h b/drivers/block/smart1,2.h +index e5565fb..71be10b4 100644 +--- a/drivers/block/smart1,2.h ++++ b/drivers/block/smart1,2.h +@@ -108,11 +108,11 @@ static unsigned long smart4_intr_pending(ctlr_info_t *h) + } + + static struct access_method smart4_access = { +- smart4_submit_command, +- smart4_intr_mask, +- smart4_fifo_full, +- smart4_intr_pending, +- smart4_completed, ++ .submit_command = smart4_submit_command, ++ .set_intr_mask = smart4_intr_mask, ++ .fifo_full = smart4_fifo_full, ++ .intr_pending = smart4_intr_pending, ++ .command_completed = smart4_completed, + }; + + /* +@@ -144,11 +144,11 @@ static unsigned long smart2_intr_pending(ctlr_info_t *h) + } + + static struct access_method smart2_access = { +- smart2_submit_command, +- smart2_intr_mask, +- smart2_fifo_full, +- smart2_intr_pending, +- smart2_completed, ++ .submit_command = smart2_submit_command, ++ .set_intr_mask = smart2_intr_mask, ++ .fifo_full = smart2_fifo_full, ++ .intr_pending = smart2_intr_pending, ++ .command_completed = smart2_completed, + }; + + /* +@@ -180,11 +180,11 @@ static unsigned long smart2e_intr_pending(ctlr_info_t *h) + } + + static struct access_method smart2e_access = { +- smart2e_submit_command, +- smart2e_intr_mask, +- smart2e_fifo_full, +- smart2e_intr_pending, +- smart2e_completed, ++ .submit_command = smart2e_submit_command, ++ .set_intr_mask = smart2e_intr_mask, ++ .fifo_full = smart2e_fifo_full, ++ .intr_pending = smart2e_intr_pending, ++ .command_completed = smart2e_completed, + }; + + /* +@@ -270,9 +270,9 @@ static unsigned long smart1_intr_pending(ctlr_info_t *h) + } + + static struct access_method smart1_access = { +- smart1_submit_command, +- smart1_intr_mask, +- smart1_fifo_full, +- smart1_intr_pending, +- smart1_completed, ++ .submit_command = smart1_submit_command, ++ .set_intr_mask = smart1_intr_mask, ++ .fifo_full = smart1_fifo_full, ++ .intr_pending = smart1_intr_pending, ++ .command_completed = smart1_completed, + }; +diff --git a/drivers/bluetooth/btwilink.c b/drivers/bluetooth/btwilink.c +index f038dba..bb74c08 100644 +--- a/drivers/bluetooth/btwilink.c ++++ b/drivers/bluetooth/btwilink.c +@@ -288,7 +288,7 @@ static int ti_st_send_frame(struct hci_dev *hdev, struct sk_buff *skb) + + static int bt_ti_probe(struct platform_device *pdev) + { +- static struct ti_st *hst; ++ struct ti_st *hst; + struct hci_dev *hdev; + int err; + +diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c +index 8a3aff7..d7538c2 100644 +--- a/drivers/cdrom/cdrom.c ++++ b/drivers/cdrom/cdrom.c +@@ -416,7 +416,6 @@ int register_cdrom(struct cdrom_device_info *cdi) + ENSURE(reset, CDC_RESET); + ENSURE(generic_packet, CDC_GENERIC_PACKET); + cdi->mc_flags = 0; +- cdo->n_minors = 0; + cdi->options = CDO_USE_FFLAGS; + + if (autoclose==1 && CDROM_CAN(CDC_CLOSE_TRAY)) +@@ -436,8 +435,11 @@ int register_cdrom(struct cdrom_device_info *cdi) + else + cdi->cdda_method = CDDA_OLD; + +- if (!cdo->generic_packet) +- cdo->generic_packet = cdrom_dummy_generic_packet; ++ if (!cdo->generic_packet) { ++ pax_open_kernel(); ++ *(void **)&cdo->generic_packet = cdrom_dummy_generic_packet; ++ pax_close_kernel(); ++ } + + cdinfo(CD_REG_UNREG, "drive \"/dev/%s\" registered\n", cdi->name); + mutex_lock(&cdrom_mutex); +@@ -458,7 +460,6 @@ void unregister_cdrom(struct cdrom_device_info *cdi) + if (cdi->exit) + cdi->exit(cdi); + +- cdi->ops->n_minors--; + cdinfo(CD_REG_UNREG, "drive \"/dev/%s\" unregistered\n", cdi->name); + } + +@@ -2107,7 +2108,7 @@ static int cdrom_read_cdda_old(struct cdrom_device_info *cdi, __u8 __user *ubuf, + */ + nr = nframes; + do { +- cgc.buffer = kmalloc(CD_FRAMESIZE_RAW * nr, GFP_KERNEL); ++ cgc.buffer = kzalloc(CD_FRAMESIZE_RAW * nr, GFP_KERNEL); + if (cgc.buffer) + break; + +@@ -3429,7 +3430,7 @@ static int cdrom_print_info(const char *header, int val, char *info, + struct cdrom_device_info *cdi; + int ret; + +- ret = scnprintf(info + *pos, max_size - *pos, header); ++ ret = scnprintf(info + *pos, max_size - *pos, "%s", header); + if (!ret) + return 1; + +diff --git a/drivers/cdrom/gdrom.c b/drivers/cdrom/gdrom.c +index 51e75ad..39c4c76 100644 +--- a/drivers/cdrom/gdrom.c ++++ b/drivers/cdrom/gdrom.c +@@ -491,7 +491,6 @@ static struct cdrom_device_ops gdrom_ops = { + .audio_ioctl = gdrom_audio_ioctl, + .capability = CDC_MULTI_SESSION | CDC_MEDIA_CHANGED | + CDC_RESET | CDC_DRIVE_STATUS | CDC_CD_R, +- .n_minors = 1, + }; + + static int gdrom_bdops_open(struct block_device *bdev, fmode_t mode) +diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig +index 1386749..5430258 100644 +--- a/drivers/char/Kconfig ++++ b/drivers/char/Kconfig +@@ -8,7 +8,8 @@ source "drivers/tty/Kconfig" + + config DEVKMEM + bool "/dev/kmem virtual device support" +- default y ++ default n ++ depends on !GRKERNSEC_KMEM + help + Say Y here if you want to support the /dev/kmem device. The + /dev/kmem device is rarely used, but can be used for certain +@@ -577,6 +578,7 @@ config DEVPORT + bool + depends on !M68K + depends on ISA || PCI ++ depends on !GRKERNSEC_KMEM + default y + + source "drivers/s390/char/Kconfig" +diff --git a/drivers/char/agp/compat_ioctl.c b/drivers/char/agp/compat_ioctl.c +index a48e05b..6bac831 100644 +--- a/drivers/char/agp/compat_ioctl.c ++++ b/drivers/char/agp/compat_ioctl.c +@@ -108,7 +108,7 @@ static int compat_agpioc_reserve_wrap(struct agp_file_private *priv, void __user + return -ENOMEM; + } + +- if (copy_from_user(usegment, (void __user *) ureserve.seg_list, ++ if (copy_from_user(usegment, (void __force_user *) ureserve.seg_list, + sizeof(*usegment) * ureserve.seg_count)) { + kfree(usegment); + kfree(ksegment); +diff --git a/drivers/char/agp/frontend.c b/drivers/char/agp/frontend.c +index 1b19239..963967b 100644 +--- a/drivers/char/agp/frontend.c ++++ b/drivers/char/agp/frontend.c +@@ -731,6 +731,7 @@ static int agpioc_info_wrap(struct agp_file_private *priv, void __user *arg) + + agp_copy_info(agp_bridge, &kerninfo); + ++ memset(&userinfo, 0, sizeof(userinfo)); + userinfo.version.major = kerninfo.version.major; + userinfo.version.minor = kerninfo.version.minor; + userinfo.bridge_id = kerninfo.device->vendor | +@@ -819,7 +820,7 @@ static int agpioc_reserve_wrap(struct agp_file_private *priv, void __user *arg) + if (copy_from_user(&reserve, arg, sizeof(struct agp_region))) + return -EFAULT; + +- if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment)) ++ if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment_priv)) + return -EFAULT; + + client = agp_find_client_by_pid(reserve.pid); +@@ -849,7 +850,7 @@ static int agpioc_reserve_wrap(struct agp_file_private *priv, void __user *arg) + if (segment == NULL) + return -ENOMEM; + +- if (copy_from_user(segment, (void __user *) reserve.seg_list, ++ if (copy_from_user(segment, (void __force_user *) reserve.seg_list, + sizeof(struct agp_segment) * reserve.seg_count)) { + kfree(segment); + return -EFAULT; +diff --git a/drivers/char/genrtc.c b/drivers/char/genrtc.c +index 4f94375..413694e 100644 +--- a/drivers/char/genrtc.c ++++ b/drivers/char/genrtc.c +@@ -273,6 +273,7 @@ static int gen_rtc_ioctl(struct file *file, + switch (cmd) { + + case RTC_PLL_GET: ++ memset(&pll, 0, sizeof(pll)); + if (get_rtc_pll(&pll)) + return -EINVAL; + else +diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c +index d5d4cd8..22d561d 100644 +--- a/drivers/char/hpet.c ++++ b/drivers/char/hpet.c +@@ -575,7 +575,7 @@ static inline unsigned long hpet_time_div(struct hpets *hpets, + } + + static int +-hpet_ioctl_common(struct hpet_dev *devp, int cmd, unsigned long arg, ++hpet_ioctl_common(struct hpet_dev *devp, unsigned int cmd, unsigned long arg, + struct hpet_info *info) + { + struct hpet_timer __iomem *timer; +diff --git a/drivers/char/hw_random/intel-rng.c b/drivers/char/hw_random/intel-rng.c +index 86fe45c..c0ea948 100644 +--- a/drivers/char/hw_random/intel-rng.c ++++ b/drivers/char/hw_random/intel-rng.c +@@ -314,7 +314,7 @@ PFX "RNG, try using the 'no_fwh_detect' option.\n"; + + if (no_fwh_detect) + return -ENODEV; +- printk(warning); ++ printk("%s", warning); + return -EBUSY; + } + +diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c +index ec4e10f..f2a763b 100644 +--- a/drivers/char/ipmi/ipmi_msghandler.c ++++ b/drivers/char/ipmi/ipmi_msghandler.c +@@ -420,7 +420,7 @@ struct ipmi_smi { + struct proc_dir_entry *proc_dir; + char proc_dir_name[10]; + +- atomic_t stats[IPMI_NUM_STATS]; ++ atomic_unchecked_t stats[IPMI_NUM_STATS]; + + /* + * run_to_completion duplicate of smb_info, smi_info +@@ -453,9 +453,9 @@ static DEFINE_MUTEX(smi_watchers_mutex); + + + #define ipmi_inc_stat(intf, stat) \ +- atomic_inc(&(intf)->stats[IPMI_STAT_ ## stat]) ++ atomic_inc_unchecked(&(intf)->stats[IPMI_STAT_ ## stat]) + #define ipmi_get_stat(intf, stat) \ +- ((unsigned int) atomic_read(&(intf)->stats[IPMI_STAT_ ## stat])) ++ ((unsigned int) atomic_read_unchecked(&(intf)->stats[IPMI_STAT_ ## stat])) + + static int is_lan_addr(struct ipmi_addr *addr) + { +@@ -2883,7 +2883,7 @@ int ipmi_register_smi(struct ipmi_smi_handlers *handlers, + INIT_LIST_HEAD(&intf->cmd_rcvrs); + init_waitqueue_head(&intf->waitq); + for (i = 0; i < IPMI_NUM_STATS; i++) +- atomic_set(&intf->stats[i], 0); ++ atomic_set_unchecked(&intf->stats[i], 0); + + intf->proc_dir = NULL; + +diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c +index 8b4fa2c..5f81848 100644 +--- a/drivers/char/ipmi/ipmi_si_intf.c ++++ b/drivers/char/ipmi/ipmi_si_intf.c +@@ -283,7 +283,7 @@ struct smi_info { + unsigned char slave_addr; + + /* Counters and things for the proc filesystem. */ +- atomic_t stats[SI_NUM_STATS]; ++ atomic_unchecked_t stats[SI_NUM_STATS]; + + struct task_struct *thread; + +@@ -292,9 +292,9 @@ struct smi_info { + }; + + #define smi_inc_stat(smi, stat) \ +- atomic_inc(&(smi)->stats[SI_STAT_ ## stat]) ++ atomic_inc_unchecked(&(smi)->stats[SI_STAT_ ## stat]) + #define smi_get_stat(smi, stat) \ +- ((unsigned int) atomic_read(&(smi)->stats[SI_STAT_ ## stat])) ++ ((unsigned int) atomic_read_unchecked(&(smi)->stats[SI_STAT_ ## stat])) + + #define SI_MAX_PARMS 4 + +@@ -3349,7 +3349,7 @@ static int try_smi_init(struct smi_info *new_smi) + atomic_set(&new_smi->req_events, 0); + new_smi->run_to_completion = 0; + for (i = 0; i < SI_NUM_STATS; i++) +- atomic_set(&new_smi->stats[i], 0); ++ atomic_set_unchecked(&new_smi->stats[i], 0); + + new_smi->interrupt_disabled = 1; + atomic_set(&new_smi->stop_operation, 0); +diff --git a/drivers/char/mem.c b/drivers/char/mem.c +index 92c5937..1be4e4d 100644 +--- a/drivers/char/mem.c ++++ b/drivers/char/mem.c +@@ -18,6 +18,7 @@ + #include <linux/raw.h> + #include <linux/tty.h> + #include <linux/capability.h> ++#include <linux/security.h> + #include <linux/ptrace.h> + #include <linux/device.h> + #include <linux/highmem.h> +@@ -36,6 +37,10 @@ + + #define DEVPORT_MINOR 4 + ++#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC) ++extern const struct file_operations grsec_fops; ++#endif ++ + static inline unsigned long size_inside_page(unsigned long start, + unsigned long size) + { +@@ -67,9 +72,13 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size) + + while (cursor < to) { + if (!devmem_is_allowed(pfn)) { ++#ifdef CONFIG_GRKERNSEC_KMEM ++ gr_handle_mem_readwrite(from, to); ++#else + printk(KERN_INFO + "Program %s tried to access /dev/mem between %Lx->%Lx.\n", + current->comm, from, to); ++#endif + return 0; + } + cursor += PAGE_SIZE; +@@ -77,6 +86,11 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size) + } + return 1; + } ++#elif defined(CONFIG_GRKERNSEC_KMEM) ++static inline int range_is_allowed(unsigned long pfn, unsigned long size) ++{ ++ return 0; ++} + #else + static inline int range_is_allowed(unsigned long pfn, unsigned long size) + { +@@ -119,6 +133,7 @@ static ssize_t read_mem(struct file *file, char __user *buf, + + while (count > 0) { + unsigned long remaining; ++ char *temp; + + sz = size_inside_page(p, count); + +@@ -134,7 +149,23 @@ static ssize_t read_mem(struct file *file, char __user *buf, + if (!ptr) + return -EFAULT; + +- remaining = copy_to_user(buf, ptr, sz); ++#ifdef CONFIG_PAX_USERCOPY ++ temp = kmalloc(sz, GFP_KERNEL|GFP_USERCOPY); ++ if (!temp) { ++ unxlate_dev_mem_ptr(p, ptr); ++ return -ENOMEM; ++ } ++ memcpy(temp, ptr, sz); ++#else ++ temp = ptr; ++#endif ++ ++ remaining = copy_to_user(buf, temp, sz); ++ ++#ifdef CONFIG_PAX_USERCOPY ++ kfree(temp); ++#endif ++ + unxlate_dev_mem_ptr(p, ptr); + if (remaining) + return -EFAULT; +@@ -363,9 +394,8 @@ static ssize_t read_kmem(struct file *file, char __user *buf, + size_t count, loff_t *ppos) + { + unsigned long p = *ppos; +- ssize_t low_count, read, sz; ++ ssize_t low_count, read, sz, err = 0; + char *kbuf; /* k-addr because vread() takes vmlist_lock rwlock */ +- int err = 0; + + read = 0; + if (p < (unsigned long) high_memory) { +@@ -387,6 +417,8 @@ static ssize_t read_kmem(struct file *file, char __user *buf, + } + #endif + while (low_count > 0) { ++ char *temp; ++ + sz = size_inside_page(p, low_count); + + /* +@@ -396,7 +428,22 @@ static ssize_t read_kmem(struct file *file, char __user *buf, + */ + kbuf = xlate_dev_kmem_ptr((char *)p); + +- if (copy_to_user(buf, kbuf, sz)) ++#ifdef CONFIG_PAX_USERCOPY ++ temp = kmalloc(sz, GFP_KERNEL|GFP_USERCOPY); ++ if (!temp) ++ return -ENOMEM; ++ memcpy(temp, kbuf, sz); ++#else ++ temp = kbuf; ++#endif ++ ++ err = copy_to_user(buf, temp, sz); ++ ++#ifdef CONFIG_PAX_USERCOPY ++ kfree(temp); ++#endif ++ ++ if (err) + return -EFAULT; + buf += sz; + p += sz; +@@ -821,6 +868,9 @@ static const struct memdev { + #ifdef CONFIG_PRINTK + [11] = { "kmsg", 0644, &kmsg_fops, NULL }, + #endif ++#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC) ++ [13] = { "grsec",S_IRUSR | S_IWUGO, &grsec_fops, NULL }, ++#endif + }; + + static int memory_open(struct inode *inode, struct file *filp) +@@ -892,7 +942,7 @@ static int __init chr_dev_init(void) + continue; + + device_create(mem_class, NULL, MKDEV(MEM_MAJOR, minor), +- NULL, devlist[minor].name); ++ NULL, "%s", devlist[minor].name); + } + + return tty_init(); +diff --git a/drivers/char/nvram.c b/drivers/char/nvram.c +index 9df78e2..01ba9ae 100644 +--- a/drivers/char/nvram.c ++++ b/drivers/char/nvram.c +@@ -247,7 +247,7 @@ static ssize_t nvram_read(struct file *file, char __user *buf, + + spin_unlock_irq(&rtc_lock); + +- if (copy_to_user(buf, contents, tmp - contents)) ++ if (tmp - contents > sizeof(contents) || copy_to_user(buf, contents, tmp - contents)) + return -EFAULT; + + *ppos = i; +diff --git a/drivers/char/pcmcia/synclink_cs.c b/drivers/char/pcmcia/synclink_cs.c +index 8320abd..ec48108 100644 +--- a/drivers/char/pcmcia/synclink_cs.c ++++ b/drivers/char/pcmcia/synclink_cs.c +@@ -2345,9 +2345,9 @@ static void mgslpc_close(struct tty_struct *tty, struct file * filp) + + if (debug_level >= DEBUG_LEVEL_INFO) + printk("%s(%d):mgslpc_close(%s) entry, count=%d\n", +- __FILE__, __LINE__, info->device_name, port->count); ++ __FILE__, __LINE__, info->device_name, atomic_read(&port->count)); + +- WARN_ON(!port->count); ++ WARN_ON(!atomic_read(&port->count)); + + if (tty_port_close_start(port, tty, filp) == 0) + goto cleanup; +@@ -2365,7 +2365,7 @@ static void mgslpc_close(struct tty_struct *tty, struct file * filp) + cleanup: + if (debug_level >= DEBUG_LEVEL_INFO) + printk("%s(%d):mgslpc_close(%s) exit, count=%d\n", __FILE__, __LINE__, +- tty->driver->name, port->count); ++ tty->driver->name, atomic_read(&port->count)); + } + + /* Wait until the transmitter is empty. +@@ -2507,7 +2507,7 @@ static int mgslpc_open(struct tty_struct *tty, struct file * filp) + + if (debug_level >= DEBUG_LEVEL_INFO) + printk("%s(%d):mgslpc_open(%s), old ref count = %d\n", +- __FILE__, __LINE__, tty->driver->name, port->count); ++ __FILE__, __LINE__, tty->driver->name, atomic_read(&port->count)); + + /* If port is closing, signal caller to try again */ + if (tty_hung_up_p(filp) || port->flags & ASYNC_CLOSING){ +@@ -2527,11 +2527,11 @@ static int mgslpc_open(struct tty_struct *tty, struct file * filp) + goto cleanup; + } + spin_lock(&port->lock); +- port->count++; ++ atomic_inc(&port->count); + spin_unlock(&port->lock); + spin_unlock_irqrestore(&info->netlock, flags); + +- if (port->count == 1) { ++ if (atomic_read(&port->count) == 1) { + /* 1st open on this device, init hardware */ + retval = startup(info, tty); + if (retval < 0) +@@ -3920,7 +3920,7 @@ static int hdlcdev_attach(struct net_device *dev, unsigned short encoding, + unsigned short new_crctype; + + /* return error if TTY interface open */ +- if (info->port.count) ++ if (atomic_read(&info->port.count)) + return -EBUSY; + + switch (encoding) +@@ -4024,7 +4024,7 @@ static int hdlcdev_open(struct net_device *dev) + + /* arbitrate between network and tty opens */ + spin_lock_irqsave(&info->netlock, flags); +- if (info->port.count != 0 || info->netcount != 0) { ++ if (atomic_read(&info->port.count) != 0 || info->netcount != 0) { + printk(KERN_WARNING "%s: hdlc_open returning busy\n", dev->name); + spin_unlock_irqrestore(&info->netlock, flags); + return -EBUSY; +@@ -4114,7 +4114,7 @@ static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) + printk("%s:hdlcdev_ioctl(%s)\n", __FILE__, dev->name); + + /* return error if TTY interface open */ +- if (info->port.count) ++ if (atomic_read(&info->port.count)) + return -EBUSY; + + if (cmd != SIOCWANDEV) +diff --git a/drivers/char/random.c b/drivers/char/random.c +index 429b75b..de805d0 100644 +--- a/drivers/char/random.c ++++ b/drivers/char/random.c +@@ -284,9 +284,6 @@ + /* + * To allow fractional bits to be tracked, the entropy_count field is + * denominated in units of 1/8th bits. +- * +- * 2*(ENTROPY_SHIFT + log2(poolbits)) must <= 31, or the multiply in +- * credit_entropy_bits() needs to be 64 bits wide. + */ + #define ENTROPY_SHIFT 3 + #define ENTROPY_BITS(r) ((r)->entropy_count >> ENTROPY_SHIFT) +@@ -433,9 +430,9 @@ struct entropy_store { + }; + + static void push_to_pool(struct work_struct *work); +-static __u32 input_pool_data[INPUT_POOL_WORDS]; +-static __u32 blocking_pool_data[OUTPUT_POOL_WORDS]; +-static __u32 nonblocking_pool_data[OUTPUT_POOL_WORDS]; ++static __u32 input_pool_data[INPUT_POOL_WORDS] __latent_entropy; ++static __u32 blocking_pool_data[OUTPUT_POOL_WORDS] __latent_entropy; ++static __u32 nonblocking_pool_data[OUTPUT_POOL_WORDS] __latent_entropy; + + static struct entropy_store input_pool = { + .poolinfo = &poolinfo_table[0], +@@ -524,8 +521,8 @@ static void _mix_pool_bytes(struct entropy_store *r, const void *in, + input_rotate = (input_rotate + (i ? 7 : 14)) & 31; + } + +- ACCESS_ONCE(r->input_rotate) = input_rotate; +- ACCESS_ONCE(r->add_ptr) = i; ++ ACCESS_ONCE_RW(r->input_rotate) = input_rotate; ++ ACCESS_ONCE_RW(r->add_ptr) = i; + smp_wmb(); + + if (out) +@@ -632,7 +629,7 @@ retry: + /* The +2 corresponds to the /4 in the denominator */ + + do { +- unsigned int anfrac = min(pnfrac, pool_size/2); ++ u64 anfrac = min(pnfrac, pool_size/2); + unsigned int add = + ((pool_size - entropy_count)*anfrac*3) >> s; + +@@ -1151,7 +1148,7 @@ static ssize_t extract_entropy_user(struct entropy_store *r, void __user *buf, + + extract_buf(r, tmp); + i = min_t(int, nbytes, EXTRACT_SIZE); +- if (copy_to_user(buf, tmp, i)) { ++ if (i > sizeof(tmp) || copy_to_user(buf, tmp, i)) { + ret = -EFAULT; + break; + } +@@ -1507,7 +1504,7 @@ EXPORT_SYMBOL(generate_random_uuid); + #include <linux/sysctl.h> + + static int min_read_thresh = 8, min_write_thresh; +-static int max_read_thresh = INPUT_POOL_WORDS * 32; ++static int max_read_thresh = OUTPUT_POOL_WORDS * 32; + static int max_write_thresh = INPUT_POOL_WORDS * 32; + static char sysctl_bootid[16]; + +@@ -1523,7 +1520,7 @@ static char sysctl_bootid[16]; + static int proc_do_uuid(struct ctl_table *table, int write, + void __user *buffer, size_t *lenp, loff_t *ppos) + { +- struct ctl_table fake_table; ++ ctl_table_no_const fake_table; + unsigned char buf[64], tmp_uuid[16], *uuid; + + uuid = table->data; +@@ -1553,7 +1550,7 @@ static int proc_do_uuid(struct ctl_table *table, int write, + static int proc_do_entropy(ctl_table *table, int write, + void __user *buffer, size_t *lenp, loff_t *ppos) + { +- ctl_table fake_table; ++ ctl_table_no_const fake_table; + int entropy_count; + + entropy_count = *(int *)table->data >> ENTROPY_SHIFT; +diff --git a/drivers/char/sonypi.c b/drivers/char/sonypi.c +index 7cc1fe22..b602d6b 100644 +--- a/drivers/char/sonypi.c ++++ b/drivers/char/sonypi.c +@@ -54,6 +54,7 @@ + + #include <asm/uaccess.h> + #include <asm/io.h> ++#include <asm/local.h> + + #include <linux/sonypi.h> + +@@ -490,7 +491,7 @@ static struct sonypi_device { + spinlock_t fifo_lock; + wait_queue_head_t fifo_proc_list; + struct fasync_struct *fifo_async; +- int open_count; ++ local_t open_count; + int model; + struct input_dev *input_jog_dev; + struct input_dev *input_key_dev; +@@ -892,7 +893,7 @@ static int sonypi_misc_fasync(int fd, struct file *filp, int on) + static int sonypi_misc_release(struct inode *inode, struct file *file) + { + mutex_lock(&sonypi_device.lock); +- sonypi_device.open_count--; ++ local_dec(&sonypi_device.open_count); + mutex_unlock(&sonypi_device.lock); + return 0; + } +@@ -901,9 +902,9 @@ static int sonypi_misc_open(struct inode *inode, struct file *file) + { + mutex_lock(&sonypi_device.lock); + /* Flush input queue on first open */ +- if (!sonypi_device.open_count) ++ if (!local_read(&sonypi_device.open_count)) + kfifo_reset(&sonypi_device.fifo); +- sonypi_device.open_count++; ++ local_inc(&sonypi_device.open_count); + mutex_unlock(&sonypi_device.lock); + + return 0; +diff --git a/drivers/char/tpm/tpm_acpi.c b/drivers/char/tpm/tpm_acpi.c +index b9a57fa..5bb9e38 100644 +--- a/drivers/char/tpm/tpm_acpi.c ++++ b/drivers/char/tpm/tpm_acpi.c +@@ -98,11 +98,12 @@ int read_log(struct tpm_bios_log *log) + virt = acpi_os_map_memory(start, len); + if (!virt) { + kfree(log->bios_event_log); ++ log->bios_event_log = NULL; + printk("%s: ERROR - Unable to map memory\n", __func__); + return -EIO; + } + +- memcpy_fromio(log->bios_event_log, virt, len); ++ memcpy_fromio(log->bios_event_log, (const char __force_kernel *)virt, len); + + acpi_os_unmap_memory(virt, len); + return 0; +diff --git a/drivers/char/tpm/tpm_eventlog.c b/drivers/char/tpm/tpm_eventlog.c +index 59f7cb2..bac8b6d 100644 +--- a/drivers/char/tpm/tpm_eventlog.c ++++ b/drivers/char/tpm/tpm_eventlog.c +@@ -95,7 +95,7 @@ static void *tpm_bios_measurements_start(struct seq_file *m, loff_t *pos) + event = addr; + + if ((event->event_type == 0 && event->event_size == 0) || +- ((addr + sizeof(struct tcpa_event) + event->event_size) >= limit)) ++ (event->event_size >= limit - addr - sizeof(struct tcpa_event))) + return NULL; + + return addr; +@@ -120,7 +120,7 @@ static void *tpm_bios_measurements_next(struct seq_file *m, void *v, + return NULL; + + if ((event->event_type == 0 && event->event_size == 0) || +- ((v + sizeof(struct tcpa_event) + event->event_size) >= limit)) ++ (event->event_size >= limit - v - sizeof(struct tcpa_event))) + return NULL; + + (*pos)++; +@@ -213,7 +213,8 @@ static int tpm_binary_bios_measurements_show(struct seq_file *m, void *v) + int i; + + for (i = 0; i < sizeof(struct tcpa_event) + event->event_size; i++) +- seq_putc(m, data[i]); ++ if (!seq_putc(m, data[i])) ++ return -EFAULT; + + return 0; + } +diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c +index 6928d09..ff6abe8 100644 +--- a/drivers/char/virtio_console.c ++++ b/drivers/char/virtio_console.c +@@ -684,7 +684,7 @@ static ssize_t fill_readbuf(struct port *port, char *out_buf, size_t out_count, + if (to_user) { + ssize_t ret; + +- ret = copy_to_user(out_buf, buf->buf + buf->offset, out_count); ++ ret = copy_to_user((char __force_user *)out_buf, buf->buf + buf->offset, out_count); + if (ret) + return -EFAULT; + } else { +@@ -787,7 +787,7 @@ static ssize_t port_fops_read(struct file *filp, char __user *ubuf, + if (!port_has_data(port) && !port->host_connected) + return 0; + +- return fill_readbuf(port, ubuf, count, true); ++ return fill_readbuf(port, (char __force_kernel *)ubuf, count, true); + } + + static int wait_port_writable(struct port *port, bool nonblock) +diff --git a/drivers/clk/clk-composite.c b/drivers/clk/clk-composite.c +index 57a078e..c17cde8 100644 +--- a/drivers/clk/clk-composite.c ++++ b/drivers/clk/clk-composite.c +@@ -146,7 +146,7 @@ struct clk *clk_register_composite(struct device *dev, const char *name, + struct clk *clk; + struct clk_init_data init; + struct clk_composite *composite; +- struct clk_ops *clk_composite_ops; ++ clk_ops_no_const *clk_composite_ops; + + composite = kzalloc(sizeof(*composite), GFP_KERNEL); + if (!composite) { +diff --git a/drivers/clk/socfpga/clk.c b/drivers/clk/socfpga/clk.c +index 5983a26..65d5f46 100644 +--- a/drivers/clk/socfpga/clk.c ++++ b/drivers/clk/socfpga/clk.c +@@ -22,6 +22,7 @@ + #include <linux/clk-provider.h> + #include <linux/io.h> + #include <linux/of.h> ++#include <asm/pgtable.h> + + /* Clock Manager offsets */ + #define CLKMGR_CTRL 0x0 +@@ -150,8 +151,10 @@ static __init struct clk *socfpga_clk_init(struct device_node *node, + streq(clk_name, "periph_pll") || + streq(clk_name, "sdram_pll")) { + socfpga_clk->hw.bit_idx = SOCFPGA_PLL_EXT_ENA; +- clk_pll_ops.enable = clk_gate_ops.enable; +- clk_pll_ops.disable = clk_gate_ops.disable; ++ pax_open_kernel(); ++ *(void **)&clk_pll_ops.enable = clk_gate_ops.enable; ++ *(void **)&clk_pll_ops.disable = clk_gate_ops.disable; ++ pax_close_kernel(); + } + + clk = clk_register(NULL, &socfpga_clk->hw.hw); +@@ -242,7 +245,7 @@ static unsigned long socfpga_clk_recalc_rate(struct clk_hw *hwclk, + return parent_rate / div; + } + +-static struct clk_ops gateclk_ops = { ++static clk_ops_no_const gateclk_ops __read_only = { + .recalc_rate = socfpga_clk_recalc_rate, + .get_parent = socfpga_clk_get_parent, + .set_parent = socfpga_clk_set_parent, +diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c +index 18448a7..d5fad43 100644 +--- a/drivers/cpufreq/acpi-cpufreq.c ++++ b/drivers/cpufreq/acpi-cpufreq.c +@@ -676,8 +676,11 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy) + data->acpi_data = per_cpu_ptr(acpi_perf_data, cpu); + per_cpu(acfreq_data, cpu) = data; + +- if (cpu_has(c, X86_FEATURE_CONSTANT_TSC)) +- acpi_cpufreq_driver.flags |= CPUFREQ_CONST_LOOPS; ++ if (cpu_has(c, X86_FEATURE_CONSTANT_TSC)) { ++ pax_open_kernel(); ++ *(u8 *)&acpi_cpufreq_driver.flags |= CPUFREQ_CONST_LOOPS; ++ pax_close_kernel(); ++ } + + result = acpi_processor_register_performance(data->acpi_data, cpu); + if (result) +@@ -810,7 +813,9 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy) + policy->cur = acpi_cpufreq_guess_freq(data, policy->cpu); + break; + case ACPI_ADR_SPACE_FIXED_HARDWARE: +- acpi_cpufreq_driver.get = get_cur_freq_on_cpu; ++ pax_open_kernel(); ++ *(void **)&acpi_cpufreq_driver.get = get_cur_freq_on_cpu; ++ pax_close_kernel(); + break; + default: + break; +@@ -905,8 +910,10 @@ static void __init acpi_cpufreq_boost_init(void) + if (!msrs) + return; + +- acpi_cpufreq_driver.boost_supported = true; +- acpi_cpufreq_driver.boost_enabled = boost_state(0); ++ pax_open_kernel(); ++ *(bool *)&acpi_cpufreq_driver.boost_supported = true; ++ *(bool *)&acpi_cpufreq_driver.boost_enabled = boost_state(0); ++ pax_close_kernel(); + get_online_cpus(); + + /* Force all MSRs to the same value */ +diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c +index 153f4b9..d47054a 100644 +--- a/drivers/cpufreq/cpufreq.c ++++ b/drivers/cpufreq/cpufreq.c +@@ -1972,7 +1972,7 @@ void cpufreq_unregister_governor(struct cpufreq_governor *governor) + #endif + + mutex_lock(&cpufreq_governor_mutex); +- list_del(&governor->governor_list); ++ pax_list_del(&governor->governor_list); + mutex_unlock(&cpufreq_governor_mutex); + return; + } +@@ -2202,7 +2202,7 @@ static int cpufreq_cpu_callback(struct notifier_block *nfb, + return NOTIFY_OK; + } + +-static struct notifier_block __refdata cpufreq_cpu_notifier = { ++static struct notifier_block cpufreq_cpu_notifier = { + .notifier_call = cpufreq_cpu_callback, + }; + +@@ -2242,13 +2242,17 @@ int cpufreq_boost_trigger_state(int state) + return 0; + + write_lock_irqsave(&cpufreq_driver_lock, flags); +- cpufreq_driver->boost_enabled = state; ++ pax_open_kernel(); ++ *(bool *)&cpufreq_driver->boost_enabled = state; ++ pax_close_kernel(); + write_unlock_irqrestore(&cpufreq_driver_lock, flags); + + ret = cpufreq_driver->set_boost(state); + if (ret) { + write_lock_irqsave(&cpufreq_driver_lock, flags); +- cpufreq_driver->boost_enabled = !state; ++ pax_open_kernel(); ++ *(bool *)&cpufreq_driver->boost_enabled = !state; ++ pax_close_kernel(); + write_unlock_irqrestore(&cpufreq_driver_lock, flags); + + pr_err("%s: Cannot %s BOOST\n", __func__, +@@ -2302,8 +2306,11 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data) + + pr_debug("trying to register driver %s\n", driver_data->name); + +- if (driver_data->setpolicy) +- driver_data->flags |= CPUFREQ_CONST_LOOPS; ++ if (driver_data->setpolicy) { ++ pax_open_kernel(); ++ *(u8 *)&driver_data->flags |= CPUFREQ_CONST_LOOPS; ++ pax_close_kernel(); ++ } + + write_lock_irqsave(&cpufreq_driver_lock, flags); + if (cpufreq_driver) { +@@ -2318,8 +2325,11 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data) + * Check if driver provides function to enable boost - + * if not, use cpufreq_boost_set_sw as default + */ +- if (!cpufreq_driver->set_boost) +- cpufreq_driver->set_boost = cpufreq_boost_set_sw; ++ if (!cpufreq_driver->set_boost) { ++ pax_open_kernel(); ++ *(void **)&cpufreq_driver->set_boost = cpufreq_boost_set_sw; ++ pax_close_kernel(); ++ } + + ret = cpufreq_sysfs_create_file(&boost.attr); + if (ret) { +diff --git a/drivers/cpufreq/cpufreq_governor.c b/drivers/cpufreq/cpufreq_governor.c +index e1c6433..31203ae 100644 +--- a/drivers/cpufreq/cpufreq_governor.c ++++ b/drivers/cpufreq/cpufreq_governor.c +@@ -191,7 +191,7 @@ int cpufreq_governor_dbs(struct cpufreq_policy *policy, + struct dbs_data *dbs_data; + struct od_cpu_dbs_info_s *od_dbs_info = NULL; + struct cs_cpu_dbs_info_s *cs_dbs_info = NULL; +- struct od_ops *od_ops = NULL; ++ const struct od_ops *od_ops = NULL; + struct od_dbs_tuners *od_tuners = NULL; + struct cs_dbs_tuners *cs_tuners = NULL; + struct cpu_dbs_common_info *cpu_cdbs; +@@ -257,7 +257,7 @@ int cpufreq_governor_dbs(struct cpufreq_policy *policy, + + if ((cdata->governor == GOV_CONSERVATIVE) && + (!policy->governor->initialized)) { +- struct cs_ops *cs_ops = dbs_data->cdata->gov_ops; ++ const struct cs_ops *cs_ops = dbs_data->cdata->gov_ops; + + cpufreq_register_notifier(cs_ops->notifier_block, + CPUFREQ_TRANSITION_NOTIFIER); +@@ -277,7 +277,7 @@ int cpufreq_governor_dbs(struct cpufreq_policy *policy, + + if ((dbs_data->cdata->governor == GOV_CONSERVATIVE) && + (policy->governor->initialized == 1)) { +- struct cs_ops *cs_ops = dbs_data->cdata->gov_ops; ++ const struct cs_ops *cs_ops = dbs_data->cdata->gov_ops; + + cpufreq_unregister_notifier(cs_ops->notifier_block, + CPUFREQ_TRANSITION_NOTIFIER); +diff --git a/drivers/cpufreq/cpufreq_governor.h b/drivers/cpufreq/cpufreq_governor.h +index bfb9ae1..e1d3a7e 100644 +--- a/drivers/cpufreq/cpufreq_governor.h ++++ b/drivers/cpufreq/cpufreq_governor.h +@@ -205,7 +205,7 @@ struct common_dbs_data { + void (*exit)(struct dbs_data *dbs_data); + + /* Governor specific ops, see below */ +- void *gov_ops; ++ const void *gov_ops; + }; + + /* Governor Per policy data */ +@@ -225,7 +225,7 @@ struct od_ops { + unsigned int (*powersave_bias_target)(struct cpufreq_policy *policy, + unsigned int freq_next, unsigned int relation); + void (*freq_increase)(struct cpufreq_policy *policy, unsigned int freq); +-}; ++} __no_const; + + struct cs_ops { + struct notifier_block *notifier_block; +diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c +index 18d4091..434be15 100644 +--- a/drivers/cpufreq/cpufreq_ondemand.c ++++ b/drivers/cpufreq/cpufreq_ondemand.c +@@ -521,7 +521,7 @@ static void od_exit(struct dbs_data *dbs_data) + + define_get_cpu_dbs_routines(od_cpu_dbs_info); + +-static struct od_ops od_ops = { ++static struct od_ops od_ops __read_only = { + .powersave_bias_init_cpu = ondemand_powersave_bias_init_cpu, + .powersave_bias_target = generic_powersave_bias_target, + .freq_increase = dbs_freq_increase, +@@ -576,14 +576,18 @@ void od_register_powersave_bias_handler(unsigned int (*f) + (struct cpufreq_policy *, unsigned int, unsigned int), + unsigned int powersave_bias) + { +- od_ops.powersave_bias_target = f; ++ pax_open_kernel(); ++ *(void **)&od_ops.powersave_bias_target = f; ++ pax_close_kernel(); + od_set_powersave_bias(powersave_bias); + } + EXPORT_SYMBOL_GPL(od_register_powersave_bias_handler); + + void od_unregister_powersave_bias_handler(void) + { +- od_ops.powersave_bias_target = generic_powersave_bias_target; ++ pax_open_kernel(); ++ *(void **)&od_ops.powersave_bias_target = generic_powersave_bias_target; ++ pax_close_kernel(); + od_set_powersave_bias(0); + } + EXPORT_SYMBOL_GPL(od_unregister_powersave_bias_handler); +diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c +index ae52c77..3d8f69b 100644 +--- a/drivers/cpufreq/intel_pstate.c ++++ b/drivers/cpufreq/intel_pstate.c +@@ -125,10 +125,10 @@ struct pstate_funcs { + struct cpu_defaults { + struct pstate_adjust_policy pid_policy; + struct pstate_funcs funcs; +-}; ++} __do_const; + + static struct pstate_adjust_policy pid_params; +-static struct pstate_funcs pstate_funcs; ++static struct pstate_funcs *pstate_funcs; + + struct perf_limits { + int no_turbo; +@@ -530,7 +530,7 @@ static void intel_pstate_set_pstate(struct cpudata *cpu, int pstate) + + cpu->pstate.current_pstate = pstate; + +- pstate_funcs.set(cpu, pstate); ++ pstate_funcs->set(cpu, pstate); + } + + static inline void intel_pstate_pstate_increase(struct cpudata *cpu, int steps) +@@ -552,12 +552,12 @@ static void intel_pstate_get_cpu_pstates(struct cpudata *cpu) + { + sprintf(cpu->name, "Intel 2nd generation core"); + +- cpu->pstate.min_pstate = pstate_funcs.get_min(); +- cpu->pstate.max_pstate = pstate_funcs.get_max(); +- cpu->pstate.turbo_pstate = pstate_funcs.get_turbo(); ++ cpu->pstate.min_pstate = pstate_funcs->get_min(); ++ cpu->pstate.max_pstate = pstate_funcs->get_max(); ++ cpu->pstate.turbo_pstate = pstate_funcs->get_turbo(); + +- if (pstate_funcs.get_vid) +- pstate_funcs.get_vid(cpu); ++ if (pstate_funcs->get_vid) ++ pstate_funcs->get_vid(cpu); + intel_pstate_set_pstate(cpu, cpu->pstate.min_pstate); + } + +@@ -844,9 +844,9 @@ static int intel_pstate_msrs_not_valid(void) + rdmsrl(MSR_IA32_APERF, aperf); + rdmsrl(MSR_IA32_MPERF, mperf); + +- if (!pstate_funcs.get_max() || +- !pstate_funcs.get_min() || +- !pstate_funcs.get_turbo()) ++ if (!pstate_funcs->get_max() || ++ !pstate_funcs->get_min() || ++ !pstate_funcs->get_turbo()) + return -ENODEV; + + rdmsrl(MSR_IA32_APERF, tmp); +@@ -860,7 +860,7 @@ static int intel_pstate_msrs_not_valid(void) + return 0; + } + +-static void copy_pid_params(struct pstate_adjust_policy *policy) ++static void copy_pid_params(const struct pstate_adjust_policy *policy) + { + pid_params.sample_rate_ms = policy->sample_rate_ms; + pid_params.p_gain_pct = policy->p_gain_pct; +@@ -872,11 +872,7 @@ static void copy_pid_params(struct pstate_adjust_policy *policy) + + static void copy_cpu_funcs(struct pstate_funcs *funcs) + { +- pstate_funcs.get_max = funcs->get_max; +- pstate_funcs.get_min = funcs->get_min; +- pstate_funcs.get_turbo = funcs->get_turbo; +- pstate_funcs.set = funcs->set; +- pstate_funcs.get_vid = funcs->get_vid; ++ pstate_funcs = funcs; + } + + #if IS_ENABLED(CONFIG_ACPI) +diff --git a/drivers/cpufreq/p4-clockmod.c b/drivers/cpufreq/p4-clockmod.c +index 3d1cba9..0ab21d2 100644 +--- a/drivers/cpufreq/p4-clockmod.c ++++ b/drivers/cpufreq/p4-clockmod.c +@@ -134,10 +134,14 @@ static unsigned int cpufreq_p4_get_frequency(struct cpuinfo_x86 *c) + case 0x0F: /* Core Duo */ + case 0x16: /* Celeron Core */ + case 0x1C: /* Atom */ +- p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS; ++ pax_open_kernel(); ++ *(u8 *)&p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS; ++ pax_close_kernel(); + return speedstep_get_frequency(SPEEDSTEP_CPU_PCORE); + case 0x0D: /* Pentium M (Dothan) */ +- p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS; ++ pax_open_kernel(); ++ *(u8 *)&p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS; ++ pax_close_kernel(); + /* fall through */ + case 0x09: /* Pentium M (Banias) */ + return speedstep_get_frequency(SPEEDSTEP_CPU_PM); +@@ -149,7 +153,9 @@ static unsigned int cpufreq_p4_get_frequency(struct cpuinfo_x86 *c) + + /* on P-4s, the TSC runs with constant frequency independent whether + * throttling is active or not. */ +- p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS; ++ pax_open_kernel(); ++ *(u8 *)&p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS; ++ pax_close_kernel(); + + if (speedstep_detect_processor() == SPEEDSTEP_CPU_P4M) { + printk(KERN_WARNING PFX "Warning: Pentium 4-M detected. " +diff --git a/drivers/cpufreq/sparc-us3-cpufreq.c b/drivers/cpufreq/sparc-us3-cpufreq.c +index 724ffbd..f06aaaa 100644 +--- a/drivers/cpufreq/sparc-us3-cpufreq.c ++++ b/drivers/cpufreq/sparc-us3-cpufreq.c +@@ -18,14 +18,12 @@ + #include <asm/head.h> + #include <asm/timer.h> + +-static struct cpufreq_driver *cpufreq_us3_driver; +- + struct us3_freq_percpu_info { + struct cpufreq_frequency_table table[4]; + }; + + /* Indexed by cpu number. */ +-static struct us3_freq_percpu_info *us3_freq_table; ++static struct us3_freq_percpu_info us3_freq_table[NR_CPUS]; + + /* UltraSPARC-III has three dividers: 1, 2, and 32. These are controlled + * in the Safari config register. +@@ -156,18 +154,28 @@ static int __init us3_freq_cpu_init(struct cpufreq_policy *policy) + + static int us3_freq_cpu_exit(struct cpufreq_policy *policy) + { +- if (cpufreq_us3_driver) { +- cpufreq_frequency_table_put_attr(policy->cpu); +- us3_freq_target(policy, 0); +- } ++ cpufreq_frequency_table_put_attr(policy->cpu); ++ us3_freq_target(policy, 0); + + return 0; + } + ++static int __init us3_freq_init(void); ++static void __exit us3_freq_exit(void); ++ ++static struct cpufreq_driver cpufreq_us3_driver = { ++ .init = us3_freq_cpu_init, ++ .verify = cpufreq_generic_frequency_table_verify, ++ .target_index = us3_freq_target, ++ .get = us3_freq_get, ++ .exit = us3_freq_cpu_exit, ++ .name = "UltraSPARC-III", ++ ++}; ++ + static int __init us3_freq_init(void) + { + unsigned long manuf, impl, ver; +- int ret; + + if (tlb_type != cheetah && tlb_type != cheetah_plus) + return -ENODEV; +@@ -180,55 +188,15 @@ static int __init us3_freq_init(void) + (impl == CHEETAH_IMPL || + impl == CHEETAH_PLUS_IMPL || + impl == JAGUAR_IMPL || +- impl == PANTHER_IMPL)) { +- struct cpufreq_driver *driver; +- +- ret = -ENOMEM; +- driver = kzalloc(sizeof(*driver), GFP_KERNEL); +- if (!driver) +- goto err_out; +- +- us3_freq_table = kzalloc((NR_CPUS * sizeof(*us3_freq_table)), +- GFP_KERNEL); +- if (!us3_freq_table) +- goto err_out; +- +- driver->init = us3_freq_cpu_init; +- driver->verify = cpufreq_generic_frequency_table_verify; +- driver->target_index = us3_freq_target; +- driver->get = us3_freq_get; +- driver->exit = us3_freq_cpu_exit; +- strcpy(driver->name, "UltraSPARC-III"); +- +- cpufreq_us3_driver = driver; +- ret = cpufreq_register_driver(driver); +- if (ret) +- goto err_out; +- +- return 0; +- +-err_out: +- if (driver) { +- kfree(driver); +- cpufreq_us3_driver = NULL; +- } +- kfree(us3_freq_table); +- us3_freq_table = NULL; +- return ret; +- } ++ impl == PANTHER_IMPL)) ++ return cpufreq_register_driver(&cpufreq_us3_driver); + + return -ENODEV; + } + + static void __exit us3_freq_exit(void) + { +- if (cpufreq_us3_driver) { +- cpufreq_unregister_driver(cpufreq_us3_driver); +- kfree(cpufreq_us3_driver); +- cpufreq_us3_driver = NULL; +- kfree(us3_freq_table); +- us3_freq_table = NULL; +- } ++ cpufreq_unregister_driver(&cpufreq_us3_driver); + } + + MODULE_AUTHOR("David S. Miller <davem@redhat.com>"); +diff --git a/drivers/cpufreq/speedstep-centrino.c b/drivers/cpufreq/speedstep-centrino.c +index 4e1daca..e707b61 100644 +--- a/drivers/cpufreq/speedstep-centrino.c ++++ b/drivers/cpufreq/speedstep-centrino.c +@@ -351,8 +351,11 @@ static int centrino_cpu_init(struct cpufreq_policy *policy) + !cpu_has(cpu, X86_FEATURE_EST)) + return -ENODEV; + +- if (cpu_has(cpu, X86_FEATURE_CONSTANT_TSC)) +- centrino_driver.flags |= CPUFREQ_CONST_LOOPS; ++ if (cpu_has(cpu, X86_FEATURE_CONSTANT_TSC)) { ++ pax_open_kernel(); ++ *(u8 *)¢rino_driver.flags |= CPUFREQ_CONST_LOOPS; ++ pax_close_kernel(); ++ } + + if (policy->cpu != 0) + return -ENODEV; +diff --git a/drivers/cpuidle/driver.c b/drivers/cpuidle/driver.c +index 06dbe7c..c2c8671 100644 +--- a/drivers/cpuidle/driver.c ++++ b/drivers/cpuidle/driver.c +@@ -202,7 +202,7 @@ static int poll_idle(struct cpuidle_device *dev, + + static void poll_idle_init(struct cpuidle_driver *drv) + { +- struct cpuidle_state *state = &drv->states[0]; ++ cpuidle_state_no_const *state = &drv->states[0]; + + snprintf(state->name, CPUIDLE_NAME_LEN, "POLL"); + snprintf(state->desc, CPUIDLE_DESC_LEN, "CPUIDLE CORE POLL IDLE"); +diff --git a/drivers/cpuidle/governor.c b/drivers/cpuidle/governor.c +index ca89412..a7b9c49 100644 +--- a/drivers/cpuidle/governor.c ++++ b/drivers/cpuidle/governor.c +@@ -87,7 +87,7 @@ int cpuidle_register_governor(struct cpuidle_governor *gov) + mutex_lock(&cpuidle_lock); + if (__cpuidle_find_governor(gov->name) == NULL) { + ret = 0; +- list_add_tail(&gov->governor_list, &cpuidle_governors); ++ pax_list_add_tail((struct list_head *)&gov->governor_list, &cpuidle_governors); + if (!cpuidle_curr_governor || + cpuidle_curr_governor->rating < gov->rating) + cpuidle_switch_governor(gov); +diff --git a/drivers/cpuidle/sysfs.c b/drivers/cpuidle/sysfs.c +index e918b6d..f87ea80 100644 +--- a/drivers/cpuidle/sysfs.c ++++ b/drivers/cpuidle/sysfs.c +@@ -135,7 +135,7 @@ static struct attribute *cpuidle_switch_attrs[] = { + NULL + }; + +-static struct attribute_group cpuidle_attr_group = { ++static attribute_group_no_const cpuidle_attr_group = { + .attrs = cpuidle_default_attrs, + .name = "cpuidle", + }; +diff --git a/drivers/crypto/hifn_795x.c b/drivers/crypto/hifn_795x.c +index 12fea3e..1e28f47 100644 +--- a/drivers/crypto/hifn_795x.c ++++ b/drivers/crypto/hifn_795x.c +@@ -51,7 +51,7 @@ module_param_string(hifn_pll_ref, hifn_pll_ref, sizeof(hifn_pll_ref), 0444); + MODULE_PARM_DESC(hifn_pll_ref, + "PLL reference clock (pci[freq] or ext[freq], default ext)"); + +-static atomic_t hifn_dev_number; ++static atomic_unchecked_t hifn_dev_number; + + #define ACRYPTO_OP_DECRYPT 0 + #define ACRYPTO_OP_ENCRYPT 1 +@@ -2577,7 +2577,7 @@ static int hifn_probe(struct pci_dev *pdev, const struct pci_device_id *id) + goto err_out_disable_pci_device; + + snprintf(name, sizeof(name), "hifn%d", +- atomic_inc_return(&hifn_dev_number)-1); ++ atomic_inc_return_unchecked(&hifn_dev_number)-1); + + err = pci_request_regions(pdev, name); + if (err) +diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c +index a0b2f7e..1b6f028 100644 +--- a/drivers/devfreq/devfreq.c ++++ b/drivers/devfreq/devfreq.c +@@ -607,7 +607,7 @@ int devfreq_add_governor(struct devfreq_governor *governor) + goto err_out; + } + +- list_add(&governor->node, &devfreq_governor_list); ++ pax_list_add((struct list_head *)&governor->node, &devfreq_governor_list); + + list_for_each_entry(devfreq, &devfreq_list, node) { + int ret = 0; +@@ -695,7 +695,7 @@ int devfreq_remove_governor(struct devfreq_governor *governor) + } + } + +- list_del(&governor->node); ++ pax_list_del((struct list_head *)&governor->node); + err_out: + mutex_unlock(&devfreq_list_lock); + +diff --git a/drivers/dma/sh/shdma-base.c b/drivers/dma/sh/shdma-base.c +index 2e7b394..1371a64 100644 +--- a/drivers/dma/sh/shdma-base.c ++++ b/drivers/dma/sh/shdma-base.c +@@ -267,8 +267,8 @@ static int shdma_alloc_chan_resources(struct dma_chan *chan) + schan->slave_id = -EINVAL; + } + +- schan->desc = kcalloc(NR_DESCS_PER_CHANNEL, +- sdev->desc_size, GFP_KERNEL); ++ schan->desc = kcalloc(sdev->desc_size, ++ NR_DESCS_PER_CHANNEL, GFP_KERNEL); + if (!schan->desc) { + ret = -ENOMEM; + goto edescalloc; +diff --git a/drivers/dma/sh/shdmac.c b/drivers/dma/sh/shdmac.c +index 0d765c0..60b7480 100644 +--- a/drivers/dma/sh/shdmac.c ++++ b/drivers/dma/sh/shdmac.c +@@ -511,7 +511,7 @@ static int sh_dmae_nmi_handler(struct notifier_block *self, + return ret; + } + +-static struct notifier_block sh_dmae_nmi_notifier __read_mostly = { ++static struct notifier_block sh_dmae_nmi_notifier = { + .notifier_call = sh_dmae_nmi_handler, + + /* Run before NMI debug handler and KGDB */ +diff --git a/drivers/edac/edac_device.c b/drivers/edac/edac_device.c +index 592af5f..bb1d583 100644 +--- a/drivers/edac/edac_device.c ++++ b/drivers/edac/edac_device.c +@@ -477,9 +477,9 @@ void edac_device_reset_delay_period(struct edac_device_ctl_info *edac_dev, + */ + int edac_device_alloc_index(void) + { +- static atomic_t device_indexes = ATOMIC_INIT(0); ++ static atomic_unchecked_t device_indexes = ATOMIC_INIT(0); + +- return atomic_inc_return(&device_indexes) - 1; ++ return atomic_inc_return_unchecked(&device_indexes) - 1; + } + EXPORT_SYMBOL_GPL(edac_device_alloc_index); + +diff --git a/drivers/edac/edac_mc_sysfs.c b/drivers/edac/edac_mc_sysfs.c +index b335c6a..db65b44 100644 +--- a/drivers/edac/edac_mc_sysfs.c ++++ b/drivers/edac/edac_mc_sysfs.c +@@ -152,7 +152,7 @@ static const char * const edac_caps[] = { + struct dev_ch_attribute { + struct device_attribute attr; + int channel; +-}; ++} __do_const; + + #define DEVICE_CHANNEL(_name, _mode, _show, _store, _var) \ + struct dev_ch_attribute dev_attr_legacy_##_name = \ +@@ -1009,14 +1009,16 @@ int edac_create_sysfs_mci_device(struct mem_ctl_info *mci) + } + + if (mci->set_sdram_scrub_rate || mci->get_sdram_scrub_rate) { ++ pax_open_kernel(); + if (mci->get_sdram_scrub_rate) { +- dev_attr_sdram_scrub_rate.attr.mode |= S_IRUGO; +- dev_attr_sdram_scrub_rate.show = &mci_sdram_scrub_rate_show; ++ *(umode_t *)&dev_attr_sdram_scrub_rate.attr.mode |= S_IRUGO; ++ *(void **)&dev_attr_sdram_scrub_rate.show = &mci_sdram_scrub_rate_show; + } + if (mci->set_sdram_scrub_rate) { +- dev_attr_sdram_scrub_rate.attr.mode |= S_IWUSR; +- dev_attr_sdram_scrub_rate.store = &mci_sdram_scrub_rate_store; ++ *(umode_t *)&dev_attr_sdram_scrub_rate.attr.mode |= S_IWUSR; ++ *(void **)&dev_attr_sdram_scrub_rate.store = &mci_sdram_scrub_rate_store; + } ++ pax_close_kernel(); + err = device_create_file(&mci->dev, + &dev_attr_sdram_scrub_rate); + if (err) { +diff --git a/drivers/edac/edac_pci.c b/drivers/edac/edac_pci.c +index 2cf44b4d..6dd2dc7 100644 +--- a/drivers/edac/edac_pci.c ++++ b/drivers/edac/edac_pci.c +@@ -29,7 +29,7 @@ + + static DEFINE_MUTEX(edac_pci_ctls_mutex); + static LIST_HEAD(edac_pci_list); +-static atomic_t pci_indexes = ATOMIC_INIT(0); ++static atomic_unchecked_t pci_indexes = ATOMIC_INIT(0); + + /* + * edac_pci_alloc_ctl_info +@@ -315,7 +315,7 @@ EXPORT_SYMBOL_GPL(edac_pci_reset_delay_period); + */ + int edac_pci_alloc_index(void) + { +- return atomic_inc_return(&pci_indexes) - 1; ++ return atomic_inc_return_unchecked(&pci_indexes) - 1; + } + EXPORT_SYMBOL_GPL(edac_pci_alloc_index); + +diff --git a/drivers/edac/edac_pci_sysfs.c b/drivers/edac/edac_pci_sysfs.c +index e8658e4..22746d6 100644 +--- a/drivers/edac/edac_pci_sysfs.c ++++ b/drivers/edac/edac_pci_sysfs.c +@@ -26,8 +26,8 @@ static int edac_pci_log_pe = 1; /* log PCI parity errors */ + static int edac_pci_log_npe = 1; /* log PCI non-parity error errors */ + static int edac_pci_poll_msec = 1000; /* one second workq period */ + +-static atomic_t pci_parity_count = ATOMIC_INIT(0); +-static atomic_t pci_nonparity_count = ATOMIC_INIT(0); ++static atomic_unchecked_t pci_parity_count = ATOMIC_INIT(0); ++static atomic_unchecked_t pci_nonparity_count = ATOMIC_INIT(0); + + static struct kobject *edac_pci_top_main_kobj; + static atomic_t edac_pci_sysfs_refcount = ATOMIC_INIT(0); +@@ -235,7 +235,7 @@ struct edac_pci_dev_attribute { + void *value; + ssize_t(*show) (void *, char *); + ssize_t(*store) (void *, const char *, size_t); +-}; ++} __do_const; + + /* Set of show/store abstract level functions for PCI Parity object */ + static ssize_t edac_pci_dev_show(struct kobject *kobj, struct attribute *attr, +@@ -579,7 +579,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev) + edac_printk(KERN_CRIT, EDAC_PCI, + "Signaled System Error on %s\n", + pci_name(dev)); +- atomic_inc(&pci_nonparity_count); ++ atomic_inc_unchecked(&pci_nonparity_count); + } + + if (status & (PCI_STATUS_PARITY)) { +@@ -587,7 +587,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev) + "Master Data Parity Error on %s\n", + pci_name(dev)); + +- atomic_inc(&pci_parity_count); ++ atomic_inc_unchecked(&pci_parity_count); + } + + if (status & (PCI_STATUS_DETECTED_PARITY)) { +@@ -595,7 +595,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev) + "Detected Parity Error on %s\n", + pci_name(dev)); + +- atomic_inc(&pci_parity_count); ++ atomic_inc_unchecked(&pci_parity_count); + } + } + +@@ -618,7 +618,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev) + edac_printk(KERN_CRIT, EDAC_PCI, "Bridge " + "Signaled System Error on %s\n", + pci_name(dev)); +- atomic_inc(&pci_nonparity_count); ++ atomic_inc_unchecked(&pci_nonparity_count); + } + + if (status & (PCI_STATUS_PARITY)) { +@@ -626,7 +626,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev) + "Master Data Parity Error on " + "%s\n", pci_name(dev)); + +- atomic_inc(&pci_parity_count); ++ atomic_inc_unchecked(&pci_parity_count); + } + + if (status & (PCI_STATUS_DETECTED_PARITY)) { +@@ -634,7 +634,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev) + "Detected Parity Error on %s\n", + pci_name(dev)); + +- atomic_inc(&pci_parity_count); ++ atomic_inc_unchecked(&pci_parity_count); + } + } + } +@@ -672,7 +672,7 @@ void edac_pci_do_parity_check(void) + if (!check_pci_errors) + return; + +- before_count = atomic_read(&pci_parity_count); ++ before_count = atomic_read_unchecked(&pci_parity_count); + + /* scan all PCI devices looking for a Parity Error on devices and + * bridges. +@@ -684,7 +684,7 @@ void edac_pci_do_parity_check(void) + /* Only if operator has selected panic on PCI Error */ + if (edac_pci_get_panic_on_pe()) { + /* If the count is different 'after' from 'before' */ +- if (before_count != atomic_read(&pci_parity_count)) ++ if (before_count != atomic_read_unchecked(&pci_parity_count)) + panic("EDAC: PCI Parity Error"); + } + } +diff --git a/drivers/edac/mce_amd.h b/drivers/edac/mce_amd.h +index 51b7e3a..aa8a3e8 100644 +--- a/drivers/edac/mce_amd.h ++++ b/drivers/edac/mce_amd.h +@@ -77,7 +77,7 @@ struct amd_decoder_ops { + bool (*mc0_mce)(u16, u8); + bool (*mc1_mce)(u16, u8); + bool (*mc2_mce)(u16, u8); +-}; ++} __no_const; + + void amd_report_gart_errors(bool); + void amd_register_ecc_decoder(void (*f)(int, struct mce *)); +diff --git a/drivers/firewire/core-card.c b/drivers/firewire/core-card.c +index 57ea7f4..af06b76 100644 +--- a/drivers/firewire/core-card.c ++++ b/drivers/firewire/core-card.c +@@ -528,9 +528,9 @@ void fw_card_initialize(struct fw_card *card, + const struct fw_card_driver *driver, + struct device *device) + { +- static atomic_t index = ATOMIC_INIT(-1); ++ static atomic_unchecked_t index = ATOMIC_INIT(-1); + +- card->index = atomic_inc_return(&index); ++ card->index = atomic_inc_return_unchecked(&index); + card->driver = driver; + card->device = device; + card->current_tlabel = 0; +@@ -680,7 +680,7 @@ EXPORT_SYMBOL_GPL(fw_card_release); + + void fw_core_remove_card(struct fw_card *card) + { +- struct fw_card_driver dummy_driver = dummy_driver_template; ++ fw_card_driver_no_const dummy_driver = dummy_driver_template; + + card->driver->update_phy_reg(card, 4, + PHY_LINK_ACTIVE | PHY_CONTENDER, 0); +diff --git a/drivers/firewire/core-device.c b/drivers/firewire/core-device.c +index 2c6d5e1..a2cca6b 100644 +--- a/drivers/firewire/core-device.c ++++ b/drivers/firewire/core-device.c +@@ -253,7 +253,7 @@ EXPORT_SYMBOL(fw_device_enable_phys_dma); + struct config_rom_attribute { + struct device_attribute attr; + u32 key; +-}; ++} __do_const; + + static ssize_t show_immediate(struct device *dev, + struct device_attribute *dattr, char *buf) +diff --git a/drivers/firewire/core-transaction.c b/drivers/firewire/core-transaction.c +index eb6935c..3cc2bfa 100644 +--- a/drivers/firewire/core-transaction.c ++++ b/drivers/firewire/core-transaction.c +@@ -38,6 +38,7 @@ + #include <linux/timer.h> + #include <linux/types.h> + #include <linux/workqueue.h> ++#include <linux/sched.h> + + #include <asm/byteorder.h> + +diff --git a/drivers/firewire/core.h b/drivers/firewire/core.h +index f477308..2795f24 100644 +--- a/drivers/firewire/core.h ++++ b/drivers/firewire/core.h +@@ -111,6 +111,7 @@ struct fw_card_driver { + + int (*stop_iso)(struct fw_iso_context *ctx); + }; ++typedef struct fw_card_driver __no_const fw_card_driver_no_const; + + void fw_card_initialize(struct fw_card *card, + const struct fw_card_driver *driver, struct device *device); +diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c +index 586f2f7..3545ad2 100644 +--- a/drivers/firewire/ohci.c ++++ b/drivers/firewire/ohci.c +@@ -2049,10 +2049,12 @@ static void bus_reset_work(struct work_struct *work) + be32_to_cpu(ohci->next_header)); + } + ++#ifndef CONFIG_GRKERNSEC + if (param_remote_dma) { + reg_write(ohci, OHCI1394_PhyReqFilterHiSet, ~0); + reg_write(ohci, OHCI1394_PhyReqFilterLoSet, ~0); + } ++#endif + + spin_unlock_irq(&ohci->lock); + +@@ -2584,8 +2586,10 @@ static int ohci_enable_phys_dma(struct fw_card *card, + unsigned long flags; + int n, ret = 0; + ++#ifndef CONFIG_GRKERNSEC + if (param_remote_dma) + return 0; ++#endif + + /* + * FIXME: Make sure this bitmask is cleared when we clear the busReset +diff --git a/drivers/firmware/dmi-id.c b/drivers/firmware/dmi-id.c +index 94a58a0..f5eba42 100644 +--- a/drivers/firmware/dmi-id.c ++++ b/drivers/firmware/dmi-id.c +@@ -16,7 +16,7 @@ + struct dmi_device_attribute{ + struct device_attribute dev_attr; + int field; +-}; ++} __do_const; + #define to_dmi_dev_attr(_dev_attr) \ + container_of(_dev_attr, struct dmi_device_attribute, dev_attr) + +diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c +index 17afc51..0ef90cd 100644 +--- a/drivers/firmware/dmi_scan.c ++++ b/drivers/firmware/dmi_scan.c +@@ -835,7 +835,7 @@ int dmi_walk(void (*decode)(const struct dmi_header *, void *), + if (buf == NULL) + return -1; + +- dmi_table(buf, dmi_len, dmi_num, decode, private_data); ++ dmi_table((char __force_kernel *)buf, dmi_len, dmi_num, decode, private_data); + + dmi_unmap(buf); + return 0; +diff --git a/drivers/firmware/efi/cper.c b/drivers/firmware/efi/cper.c +index 1491dd4..aa910db 100644 +--- a/drivers/firmware/efi/cper.c ++++ b/drivers/firmware/efi/cper.c +@@ -41,12 +41,12 @@ + */ + u64 cper_next_record_id(void) + { +- static atomic64_t seq; ++ static atomic64_unchecked_t seq; + +- if (!atomic64_read(&seq)) +- atomic64_set(&seq, ((u64)get_seconds()) << 32); ++ if (!atomic64_read_unchecked(&seq)) ++ atomic64_set_unchecked(&seq, ((u64)get_seconds()) << 32); + +- return atomic64_inc_return(&seq); ++ return atomic64_inc_return_unchecked(&seq); + } + EXPORT_SYMBOL_GPL(cper_next_record_id); + +diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c +index 4753bac..02861a2 100644 +--- a/drivers/firmware/efi/efi.c ++++ b/drivers/firmware/efi/efi.c +@@ -120,14 +120,16 @@ static struct attribute_group efi_subsys_attr_group = { + }; + + static struct efivars generic_efivars; +-static struct efivar_operations generic_ops; ++static efivar_operations_no_const generic_ops __read_only; + + static int generic_ops_register(void) + { +- generic_ops.get_variable = efi.get_variable; +- generic_ops.set_variable = efi.set_variable; +- generic_ops.get_next_variable = efi.get_next_variable; +- generic_ops.query_variable_store = efi_query_variable_store; ++ pax_open_kernel(); ++ *(void **)&generic_ops.get_variable = efi.get_variable; ++ *(void **)&generic_ops.set_variable = efi.set_variable; ++ *(void **)&generic_ops.get_next_variable = efi.get_next_variable; ++ *(void **)&generic_ops.query_variable_store = efi_query_variable_store; ++ pax_close_kernel(); + + return efivars_register(&generic_efivars, &generic_ops, efi_kobj); + } +diff --git a/drivers/firmware/efi/efivars.c b/drivers/firmware/efi/efivars.c +index 3dc2482..7bd2f61 100644 +--- a/drivers/firmware/efi/efivars.c ++++ b/drivers/firmware/efi/efivars.c +@@ -456,7 +456,7 @@ efivar_create_sysfs_entry(struct efivar_entry *new_var) + static int + create_efivars_bin_attributes(void) + { +- struct bin_attribute *attr; ++ bin_attribute_no_const *attr; + int error; + + /* new_var */ +diff --git a/drivers/firmware/google/memconsole.c b/drivers/firmware/google/memconsole.c +index 2a90ba6..07f3733 100644 +--- a/drivers/firmware/google/memconsole.c ++++ b/drivers/firmware/google/memconsole.c +@@ -147,7 +147,9 @@ static int __init memconsole_init(void) + if (!found_memconsole()) + return -ENODEV; + +- memconsole_bin_attr.size = memconsole_length; ++ pax_open_kernel(); ++ *(size_t *)&memconsole_bin_attr.size = memconsole_length; ++ pax_close_kernel(); + + ret = sysfs_create_bin_file(firmware_kobj, &memconsole_bin_attr); + +diff --git a/drivers/gpio/gpio-em.c b/drivers/gpio/gpio-em.c +index 1e98a98..b444372 100644 +--- a/drivers/gpio/gpio-em.c ++++ b/drivers/gpio/gpio-em.c +@@ -278,7 +278,7 @@ static int em_gio_probe(struct platform_device *pdev) + struct em_gio_priv *p; + struct resource *io[2], *irq[2]; + struct gpio_chip *gpio_chip; +- struct irq_chip *irq_chip; ++ irq_chip_no_const *irq_chip; + const char *name = dev_name(&pdev->dev); + int ret; + +diff --git a/drivers/gpio/gpio-ich.c b/drivers/gpio/gpio-ich.c +index f5bf3c3..7baaa59 100644 +--- a/drivers/gpio/gpio-ich.c ++++ b/drivers/gpio/gpio-ich.c +@@ -71,7 +71,7 @@ struct ichx_desc { + /* Some chipsets have quirks, let these use their own request/get */ + int (*request)(struct gpio_chip *chip, unsigned offset); + int (*get)(struct gpio_chip *chip, unsigned offset); +-}; ++} __do_const; + + static struct { + spinlock_t lock; +diff --git a/drivers/gpio/gpio-rcar.c b/drivers/gpio/gpio-rcar.c +index ca76ce7..68b384b 100644 +--- a/drivers/gpio/gpio-rcar.c ++++ b/drivers/gpio/gpio-rcar.c +@@ -355,7 +355,7 @@ static int gpio_rcar_probe(struct platform_device *pdev) + struct gpio_rcar_priv *p; + struct resource *io, *irq; + struct gpio_chip *gpio_chip; +- struct irq_chip *irq_chip; ++ irq_chip_no_const *irq_chip; + const char *name = dev_name(&pdev->dev); + int ret; + +diff --git a/drivers/gpio/gpio-vr41xx.c b/drivers/gpio/gpio-vr41xx.c +index 9902732..64b62dd 100644 +--- a/drivers/gpio/gpio-vr41xx.c ++++ b/drivers/gpio/gpio-vr41xx.c +@@ -204,7 +204,7 @@ static int giu_get_irq(unsigned int irq) + printk(KERN_ERR "spurious GIU interrupt: %04x(%04x),%04x(%04x)\n", + maskl, pendl, maskh, pendh); + +- atomic_inc(&irq_err_count); ++ atomic_inc_unchecked(&irq_err_count); + + return -EINVAL; + } +diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c +index 3b7d32d..05c2f74 100644 +--- a/drivers/gpu/drm/drm_crtc.c ++++ b/drivers/gpu/drm/drm_crtc.c +@@ -3123,7 +3123,7 @@ int drm_mode_getproperty_ioctl(struct drm_device *dev, + goto done; + } + +- if (copy_to_user(&enum_ptr[copied].name, ++ if (copy_to_user(enum_ptr[copied].name, + &prop_enum->name, DRM_PROP_NAME_LEN)) { + ret = -EFAULT; + goto done; +diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c +old mode 100644 +new mode 100755 +index 345be03..65b66c0 +--- a/drivers/gpu/drm/drm_drv.c ++++ b/drivers/gpu/drm/drm_drv.c +@@ -233,7 +233,7 @@ module_exit(drm_core_exit); + /** + * Copy and IOCTL return string to user space + */ +-static int drm_copy_field(char *buf, size_t *buf_len, const char *value) ++static int drm_copy_field(char __user *buf, size_t *buf_len, const char *value) + { + int len; + +@@ -303,7 +303,7 @@ long drm_ioctl(struct file *filp, + struct drm_file *file_priv = filp->private_data; + struct drm_device *dev; + const struct drm_ioctl_desc *ioctl = NULL; +- drm_ioctl_t *func; ++ drm_ioctl_no_const_t func; + unsigned int nr = DRM_IOCTL_NR(cmd); + int retcode = -EINVAL; + char stack_kdata[128]; +@@ -380,8 +380,9 @@ long drm_ioctl(struct file *filp, + retcode = -EFAULT; + goto err_i1; + } +- } else ++ } else if (cmd & IOC_OUT) { + memset(kdata, 0, usize); ++ } + + if (ioctl->flags & DRM_UNLOCKED) + retcode = func(dev, kdata, file_priv); +diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c +index 7f2af9a..1561914 100644 +--- a/drivers/gpu/drm/drm_fops.c ++++ b/drivers/gpu/drm/drm_fops.c +@@ -97,7 +97,7 @@ int drm_open(struct inode *inode, struct file *filp) + if (drm_device_is_unplugged(dev)) + return -ENODEV; + +- if (!dev->open_count++) ++ if (local_inc_return(&dev->open_count) == 1) + need_setup = 1; + mutex_lock(&dev->struct_mutex); + old_imapping = inode->i_mapping; +@@ -127,7 +127,7 @@ err_undo: + iput(container_of(dev->dev_mapping, struct inode, i_data)); + dev->dev_mapping = old_mapping; + mutex_unlock(&dev->struct_mutex); +- dev->open_count--; ++ local_dec(&dev->open_count); + return retcode; + } + EXPORT_SYMBOL(drm_open); +@@ -463,7 +463,7 @@ int drm_release(struct inode *inode, struct file *filp) + + mutex_lock(&drm_global_mutex); + +- DRM_DEBUG("open_count = %d\n", dev->open_count); ++ DRM_DEBUG("open_count = %ld\n", local_read(&dev->open_count)); + + if (dev->driver->preclose) + dev->driver->preclose(dev, file_priv); +@@ -472,10 +472,10 @@ int drm_release(struct inode *inode, struct file *filp) + * Begin inline drm_release + */ + +- DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n", ++ DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %ld\n", + task_pid_nr(current), + (long)old_encode_dev(file_priv->minor->device), +- dev->open_count); ++ local_read(&dev->open_count)); + + /* Release any auth tokens that might point to this file_priv, + (do that under the drm_global_mutex) */ +@@ -573,7 +573,7 @@ int drm_release(struct inode *inode, struct file *filp) + * End inline drm_release + */ + +- if (!--dev->open_count) { ++ if (local_dec_and_test(&dev->open_count)) { + retcode = drm_lastclose(dev); + if (drm_device_is_unplugged(dev)) + drm_put_dev(dev); +diff --git a/drivers/gpu/drm/drm_global.c b/drivers/gpu/drm/drm_global.c +index 3d2e91c..d31c4c9 100644 +--- a/drivers/gpu/drm/drm_global.c ++++ b/drivers/gpu/drm/drm_global.c +@@ -36,7 +36,7 @@ + struct drm_global_item { + struct mutex mutex; + void *object; +- int refcount; ++ atomic_t refcount; + }; + + static struct drm_global_item glob[DRM_GLOBAL_NUM]; +@@ -49,7 +49,7 @@ void drm_global_init(void) + struct drm_global_item *item = &glob[i]; + mutex_init(&item->mutex); + item->object = NULL; +- item->refcount = 0; ++ atomic_set(&item->refcount, 0); + } + } + +@@ -59,7 +59,7 @@ void drm_global_release(void) + for (i = 0; i < DRM_GLOBAL_NUM; ++i) { + struct drm_global_item *item = &glob[i]; + BUG_ON(item->object != NULL); +- BUG_ON(item->refcount != 0); ++ BUG_ON(atomic_read(&item->refcount) != 0); + } + } + +@@ -69,7 +69,7 @@ int drm_global_item_ref(struct drm_global_reference *ref) + struct drm_global_item *item = &glob[ref->global_type]; + + mutex_lock(&item->mutex); +- if (item->refcount == 0) { ++ if (atomic_read(&item->refcount) == 0) { + item->object = kzalloc(ref->size, GFP_KERNEL); + if (unlikely(item->object == NULL)) { + ret = -ENOMEM; +@@ -82,7 +82,7 @@ int drm_global_item_ref(struct drm_global_reference *ref) + goto out_err; + + } +- ++item->refcount; ++ atomic_inc(&item->refcount); + ref->object = item->object; + mutex_unlock(&item->mutex); + return 0; +@@ -98,9 +98,9 @@ void drm_global_item_unref(struct drm_global_reference *ref) + struct drm_global_item *item = &glob[ref->global_type]; + + mutex_lock(&item->mutex); +- BUG_ON(item->refcount == 0); ++ BUG_ON(atomic_read(&item->refcount) == 0); + BUG_ON(ref->object != item->object); +- if (--item->refcount == 0) { ++ if (atomic_dec_and_test(&item->refcount)) { + ref->release(ref); + item->object = NULL; + } +diff --git a/drivers/gpu/drm/drm_info.c b/drivers/gpu/drm/drm_info.c +index 7473035..a48b9c5 100644 +--- a/drivers/gpu/drm/drm_info.c ++++ b/drivers/gpu/drm/drm_info.c +@@ -75,10 +75,13 @@ int drm_vm_info(struct seq_file *m, void *data) + struct drm_local_map *map; + struct drm_map_list *r_list; + +- /* Hardcoded from _DRM_FRAME_BUFFER, +- _DRM_REGISTERS, _DRM_SHM, _DRM_AGP, and +- _DRM_SCATTER_GATHER and _DRM_CONSISTENT */ +- const char *types[] = { "FB", "REG", "SHM", "AGP", "SG", "PCI" }; ++ static const char * const types[] = { ++ [_DRM_FRAME_BUFFER] = "FB", ++ [_DRM_REGISTERS] = "REG", ++ [_DRM_SHM] = "SHM", ++ [_DRM_AGP] = "AGP", ++ [_DRM_SCATTER_GATHER] = "SG", ++ [_DRM_CONSISTENT] = "PCI"}; + const char *type; + int i; + +@@ -89,7 +92,7 @@ int drm_vm_info(struct seq_file *m, void *data) + map = r_list->map; + if (!map) + continue; +- if (map->type < 0 || map->type > 5) ++ if (map->type >= ARRAY_SIZE(types)) + type = "??"; + else + type = types[map->type]; +@@ -261,7 +264,11 @@ int drm_vma_info(struct seq_file *m, void *data) + vma->vm_flags & VM_MAYSHARE ? 's' : 'p', + vma->vm_flags & VM_LOCKED ? 'l' : '-', + vma->vm_flags & VM_IO ? 'i' : '-', ++#ifdef CONFIG_GRKERNSEC_HIDESYM ++ 0); ++#else + vma->vm_pgoff); ++#endif + + #if defined(__i386__) + pgprot = pgprot_val(vma->vm_page_prot); +diff --git a/drivers/gpu/drm/drm_ioc32.c b/drivers/gpu/drm/drm_ioc32.c +index 2f4c4343..dd12cd2 100644 +--- a/drivers/gpu/drm/drm_ioc32.c ++++ b/drivers/gpu/drm/drm_ioc32.c +@@ -457,7 +457,7 @@ static int compat_drm_infobufs(struct file *file, unsigned int cmd, + request = compat_alloc_user_space(nbytes); + if (!access_ok(VERIFY_WRITE, request, nbytes)) + return -EFAULT; +- list = (struct drm_buf_desc *) (request + 1); ++ list = (struct drm_buf_desc __user *) (request + 1); + + if (__put_user(count, &request->count) + || __put_user(list, &request->list)) +@@ -518,7 +518,7 @@ static int compat_drm_mapbufs(struct file *file, unsigned int cmd, + request = compat_alloc_user_space(nbytes); + if (!access_ok(VERIFY_WRITE, request, nbytes)) + return -EFAULT; +- list = (struct drm_buf_pub *) (request + 1); ++ list = (struct drm_buf_pub __user *) (request + 1); + + if (__put_user(count, &request->count) + || __put_user(list, &request->list)) +@@ -1016,7 +1016,7 @@ static int compat_drm_wait_vblank(struct file *file, unsigned int cmd, + return 0; + } + +-drm_ioctl_compat_t *drm_compat_ioctls[] = { ++drm_ioctl_compat_t drm_compat_ioctls[] = { + [DRM_IOCTL_NR(DRM_IOCTL_VERSION32)] = compat_drm_version, + [DRM_IOCTL_NR(DRM_IOCTL_GET_UNIQUE32)] = compat_drm_getunique, + [DRM_IOCTL_NR(DRM_IOCTL_GET_MAP32)] = compat_drm_getmap, +@@ -1062,7 +1062,6 @@ drm_ioctl_compat_t *drm_compat_ioctls[] = { + long drm_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) + { + unsigned int nr = DRM_IOCTL_NR(cmd); +- drm_ioctl_compat_t *fn; + int ret; + + /* Assume that ioctls without an explicit compat routine will just +@@ -1072,10 +1071,8 @@ long drm_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) + if (nr >= ARRAY_SIZE(drm_compat_ioctls)) + return drm_ioctl(filp, cmd, arg); + +- fn = drm_compat_ioctls[nr]; +- +- if (fn != NULL) +- ret = (*fn) (filp, cmd, arg); ++ if (drm_compat_ioctls[nr] != NULL) ++ ret = (*drm_compat_ioctls[nr]) (filp, cmd, arg); + else + ret = drm_ioctl(filp, cmd, arg); + +diff --git a/drivers/gpu/drm/drm_stub.c b/drivers/gpu/drm/drm_stub.c +index 98a33c580..8fd1c2b 100644 +--- a/drivers/gpu/drm/drm_stub.c ++++ b/drivers/gpu/drm/drm_stub.c +@@ -409,7 +409,7 @@ void drm_unplug_dev(struct drm_device *dev) + + drm_device_set_unplugged(dev); + +- if (dev->open_count == 0) { ++ if (local_read(&dev->open_count) == 0) { + drm_put_dev(dev); + } + mutex_unlock(&drm_global_mutex); +diff --git a/drivers/gpu/drm/drm_sysfs.c b/drivers/gpu/drm/drm_sysfs.c +index c22c309..ae758c3 100644 +--- a/drivers/gpu/drm/drm_sysfs.c ++++ b/drivers/gpu/drm/drm_sysfs.c +@@ -505,7 +505,7 @@ static void drm_sysfs_release(struct device *dev) + */ + int drm_sysfs_device_add(struct drm_minor *minor) + { +- char *minor_str; ++ const char *minor_str; + int r; + + if (minor->type == DRM_MINOR_CONTROL) +diff --git a/drivers/gpu/drm/i810/i810_drv.h b/drivers/gpu/drm/i810/i810_drv.h +index d4d16ed..8fb0b51 100644 +--- a/drivers/gpu/drm/i810/i810_drv.h ++++ b/drivers/gpu/drm/i810/i810_drv.h +@@ -108,8 +108,8 @@ typedef struct drm_i810_private { + int page_flipping; + + wait_queue_head_t irq_queue; +- atomic_t irq_received; +- atomic_t irq_emitted; ++ atomic_unchecked_t irq_received; ++ atomic_unchecked_t irq_emitted; + + int front_offset; + } drm_i810_private_t; +diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c +index b2b46c5..feb9fe7 100644 +--- a/drivers/gpu/drm/i915/i915_debugfs.c ++++ b/drivers/gpu/drm/i915/i915_debugfs.c +@@ -713,7 +713,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data) + I915_READ(GTIMR)); + } + seq_printf(m, "Interrupts received: %d\n", +- atomic_read(&dev_priv->irq_received)); ++ atomic_read_unchecked(&dev_priv->irq_received)); + for_each_ring(ring, dev_priv, i) { + if (INTEL_INFO(dev)->gen >= 6) { + seq_printf(m, +diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c +index 15a74f9..4278889 100644 +--- a/drivers/gpu/drm/i915/i915_dma.c ++++ b/drivers/gpu/drm/i915/i915_dma.c +@@ -1273,7 +1273,7 @@ static bool i915_switcheroo_can_switch(struct pci_dev *pdev) + bool can_switch; + + spin_lock(&dev->count_lock); +- can_switch = (dev->open_count == 0); ++ can_switch = (local_read(&dev->open_count) == 0); + spin_unlock(&dev->count_lock); + return can_switch; + } +diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h +index 4677af9..cd79971 100644 +--- a/drivers/gpu/drm/i915/i915_drv.h ++++ b/drivers/gpu/drm/i915/i915_drv.h +@@ -1362,7 +1362,7 @@ typedef struct drm_i915_private { + drm_dma_handle_t *status_page_dmah; + struct resource mch_res; + +- atomic_t irq_received; ++ atomic_unchecked_t irq_received; + + /* protects the irq masks */ + spinlock_t irq_lock; +diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c +index 768e666..68cf44d 100644 +--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c ++++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c +@@ -860,9 +860,9 @@ i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec) + + static int + validate_exec_list(struct drm_i915_gem_exec_object2 *exec, +- int count) ++ unsigned int count) + { +- int i; ++ unsigned int i; + unsigned relocs_total = 0; + unsigned relocs_max = UINT_MAX / sizeof(struct drm_i915_gem_relocation_entry); + +diff --git a/drivers/gpu/drm/i915/i915_ioc32.c b/drivers/gpu/drm/i915/i915_ioc32.c +index 3c59584..500f2e9 100644 +--- a/drivers/gpu/drm/i915/i915_ioc32.c ++++ b/drivers/gpu/drm/i915/i915_ioc32.c +@@ -181,7 +181,7 @@ static int compat_i915_alloc(struct file *file, unsigned int cmd, + (unsigned long)request); + } + +-static drm_ioctl_compat_t *i915_compat_ioctls[] = { ++static drm_ioctl_compat_t i915_compat_ioctls[] = { + [DRM_I915_BATCHBUFFER] = compat_i915_batchbuffer, + [DRM_I915_CMDBUFFER] = compat_i915_cmdbuffer, + [DRM_I915_GETPARAM] = compat_i915_getparam, +@@ -202,18 +202,15 @@ static drm_ioctl_compat_t *i915_compat_ioctls[] = { + long i915_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) + { + unsigned int nr = DRM_IOCTL_NR(cmd); +- drm_ioctl_compat_t *fn = NULL; + int ret; + + if (nr < DRM_COMMAND_BASE) + return drm_compat_ioctl(filp, cmd, arg); + +- if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(i915_compat_ioctls)) +- fn = i915_compat_ioctls[nr - DRM_COMMAND_BASE]; +- +- if (fn != NULL) ++ if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(i915_compat_ioctls)) { ++ drm_ioctl_compat_t fn = i915_compat_ioctls[nr - DRM_COMMAND_BASE]; + ret = (*fn) (filp, cmd, arg); +- else ++ } else + ret = drm_ioctl(filp, cmd, arg); + + return ret; +diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c +index 4050450..f67c5c1 100644 +--- a/drivers/gpu/drm/i915/i915_irq.c ++++ b/drivers/gpu/drm/i915/i915_irq.c +@@ -1448,7 +1448,7 @@ static irqreturn_t valleyview_irq_handler(int irq, void *arg) + int pipe; + u32 pipe_stats[I915_MAX_PIPES]; + +- atomic_inc(&dev_priv->irq_received); ++ atomic_inc_unchecked(&dev_priv->irq_received); + + while (true) { + iir = I915_READ(VLV_IIR); +@@ -1761,7 +1761,7 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg) + u32 de_iir, gt_iir, de_ier, sde_ier = 0; + irqreturn_t ret = IRQ_NONE; + +- atomic_inc(&dev_priv->irq_received); ++ atomic_inc_unchecked(&dev_priv->irq_received); + + /* We get interrupts on unclaimed registers, so check for this before we + * do any I915_{READ,WRITE}. */ +@@ -1831,7 +1831,7 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg) + uint32_t tmp = 0; + enum pipe pipe; + +- atomic_inc(&dev_priv->irq_received); ++ atomic_inc_unchecked(&dev_priv->irq_received); + + master_ctl = I915_READ(GEN8_MASTER_IRQ); + master_ctl &= ~GEN8_MASTER_IRQ_CONTROL; +@@ -2655,7 +2655,7 @@ static void ironlake_irq_preinstall(struct drm_device *dev) + { + drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; + +- atomic_set(&dev_priv->irq_received, 0); ++ atomic_set_unchecked(&dev_priv->irq_received, 0); + + I915_WRITE(HWSTAM, 0xeffe); + +@@ -2673,7 +2673,7 @@ static void valleyview_irq_preinstall(struct drm_device *dev) + drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; + int pipe; + +- atomic_set(&dev_priv->irq_received, 0); ++ atomic_set_unchecked(&dev_priv->irq_received, 0); + + /* VLV magic */ + I915_WRITE(VLV_IMR, 0); +@@ -2704,7 +2704,7 @@ static void gen8_irq_preinstall(struct drm_device *dev) + struct drm_i915_private *dev_priv = dev->dev_private; + int pipe; + +- atomic_set(&dev_priv->irq_received, 0); ++ atomic_set_unchecked(&dev_priv->irq_received, 0); + + I915_WRITE(GEN8_MASTER_IRQ, 0); + POSTING_READ(GEN8_MASTER_IRQ); +@@ -3028,7 +3028,7 @@ static void gen8_irq_uninstall(struct drm_device *dev) + if (!dev_priv) + return; + +- atomic_set(&dev_priv->irq_received, 0); ++ atomic_set_unchecked(&dev_priv->irq_received, 0); + + I915_WRITE(GEN8_MASTER_IRQ, 0); + +@@ -3122,7 +3122,7 @@ static void i8xx_irq_preinstall(struct drm_device * dev) + drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; + int pipe; + +- atomic_set(&dev_priv->irq_received, 0); ++ atomic_set_unchecked(&dev_priv->irq_received, 0); + + for_each_pipe(pipe) + I915_WRITE(PIPESTAT(pipe), 0); +@@ -3208,7 +3208,7 @@ static irqreturn_t i8xx_irq_handler(int irq, void *arg) + I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | + I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT; + +- atomic_inc(&dev_priv->irq_received); ++ atomic_inc_unchecked(&dev_priv->irq_received); + + iir = I915_READ16(IIR); + if (iir == 0) +@@ -3287,7 +3287,7 @@ static void i915_irq_preinstall(struct drm_device * dev) + drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; + int pipe; + +- atomic_set(&dev_priv->irq_received, 0); ++ atomic_set_unchecked(&dev_priv->irq_received, 0); + + if (I915_HAS_HOTPLUG(dev)) { + I915_WRITE(PORT_HOTPLUG_EN, 0); +@@ -3394,7 +3394,7 @@ static irqreturn_t i915_irq_handler(int irq, void *arg) + I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT; + int pipe, ret = IRQ_NONE; + +- atomic_inc(&dev_priv->irq_received); ++ atomic_inc_unchecked(&dev_priv->irq_received); + + iir = I915_READ(IIR); + do { +@@ -3521,7 +3521,7 @@ static void i965_irq_preinstall(struct drm_device * dev) + drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; + int pipe; + +- atomic_set(&dev_priv->irq_received, 0); ++ atomic_set_unchecked(&dev_priv->irq_received, 0); + + I915_WRITE(PORT_HOTPLUG_EN, 0); + I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); +@@ -3637,7 +3637,7 @@ static irqreturn_t i965_irq_handler(int irq, void *arg) + I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | + I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT; + +- atomic_inc(&dev_priv->irq_received); ++ atomic_inc_unchecked(&dev_priv->irq_received); + + iir = I915_READ(IIR); + +diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c +index b6fb3eb..e0fa1e1 100644 +--- a/drivers/gpu/drm/i915/intel_display.c ++++ b/drivers/gpu/drm/i915/intel_display.c +@@ -10798,13 +10798,13 @@ struct intel_quirk { + int subsystem_vendor; + int subsystem_device; + void (*hook)(struct drm_device *dev); +-}; ++} __do_const; + + /* For systems that don't have a meaningful PCI subdevice/subvendor ID */ + struct intel_dmi_quirk { + void (*hook)(struct drm_device *dev); + const struct dmi_system_id (*dmi_id_list)[]; +-}; ++} __do_const; + + static int intel_dmi_reverse_brightness(const struct dmi_system_id *id) + { +@@ -10812,18 +10812,20 @@ static int intel_dmi_reverse_brightness(const struct dmi_system_id *id) + return 1; + } + +-static const struct intel_dmi_quirk intel_dmi_quirks[] = { ++static const struct dmi_system_id intel_dmi_quirks_table[] = { + { +- .dmi_id_list = &(const struct dmi_system_id[]) { +- { +- .callback = intel_dmi_reverse_brightness, +- .ident = "NCR Corporation", +- .matches = {DMI_MATCH(DMI_SYS_VENDOR, "NCR Corporation"), +- DMI_MATCH(DMI_PRODUCT_NAME, ""), +- }, +- }, +- { } /* terminating entry */ ++ .callback = intel_dmi_reverse_brightness, ++ .ident = "NCR Corporation", ++ .matches = {DMI_MATCH(DMI_SYS_VENDOR, "NCR Corporation"), ++ DMI_MATCH(DMI_PRODUCT_NAME, ""), + }, ++ }, ++ { } /* terminating entry */ ++}; ++ ++static const struct intel_dmi_quirk intel_dmi_quirks[] = { ++ { ++ .dmi_id_list = &intel_dmi_quirks_table, + .hook = quirk_invert_brightness, + }, + }; +diff --git a/drivers/gpu/drm/mga/mga_drv.h b/drivers/gpu/drm/mga/mga_drv.h +index fe45321..836fdca 100644 +--- a/drivers/gpu/drm/mga/mga_drv.h ++++ b/drivers/gpu/drm/mga/mga_drv.h +@@ -120,9 +120,9 @@ typedef struct drm_mga_private { + u32 clear_cmd; + u32 maccess; + +- atomic_t vbl_received; /**< Number of vblanks received. */ ++ atomic_unchecked_t vbl_received; /**< Number of vblanks received. */ + wait_queue_head_t fence_queue; +- atomic_t last_fence_retired; ++ atomic_unchecked_t last_fence_retired; + u32 next_fence_to_post; + + unsigned int fb_cpp; +diff --git a/drivers/gpu/drm/mga/mga_ioc32.c b/drivers/gpu/drm/mga/mga_ioc32.c +index 86b4bb8..ae237ad 100644 +--- a/drivers/gpu/drm/mga/mga_ioc32.c ++++ b/drivers/gpu/drm/mga/mga_ioc32.c +@@ -190,7 +190,7 @@ static int compat_mga_dma_bootstrap(struct file *file, unsigned int cmd, + return 0; + } + +-drm_ioctl_compat_t *mga_compat_ioctls[] = { ++drm_ioctl_compat_t mga_compat_ioctls[] = { + [DRM_MGA_INIT] = compat_mga_init, + [DRM_MGA_GETPARAM] = compat_mga_getparam, + [DRM_MGA_DMA_BOOTSTRAP] = compat_mga_dma_bootstrap, +@@ -208,18 +208,15 @@ drm_ioctl_compat_t *mga_compat_ioctls[] = { + long mga_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) + { + unsigned int nr = DRM_IOCTL_NR(cmd); +- drm_ioctl_compat_t *fn = NULL; + int ret; + + if (nr < DRM_COMMAND_BASE) + return drm_compat_ioctl(filp, cmd, arg); + +- if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(mga_compat_ioctls)) +- fn = mga_compat_ioctls[nr - DRM_COMMAND_BASE]; +- +- if (fn != NULL) ++ if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(mga_compat_ioctls)) { ++ drm_ioctl_compat_t fn = mga_compat_ioctls[nr - DRM_COMMAND_BASE]; + ret = (*fn) (filp, cmd, arg); +- else ++ } else + ret = drm_ioctl(filp, cmd, arg); + + return ret; +diff --git a/drivers/gpu/drm/mga/mga_irq.c b/drivers/gpu/drm/mga/mga_irq.c +index 1b071b8..de8601a 100644 +--- a/drivers/gpu/drm/mga/mga_irq.c ++++ b/drivers/gpu/drm/mga/mga_irq.c +@@ -43,7 +43,7 @@ u32 mga_get_vblank_counter(struct drm_device *dev, int crtc) + if (crtc != 0) + return 0; + +- return atomic_read(&dev_priv->vbl_received); ++ return atomic_read_unchecked(&dev_priv->vbl_received); + } + + +@@ -59,7 +59,7 @@ irqreturn_t mga_driver_irq_handler(int irq, void *arg) + /* VBLANK interrupt */ + if (status & MGA_VLINEPEN) { + MGA_WRITE(MGA_ICLEAR, MGA_VLINEICLR); +- atomic_inc(&dev_priv->vbl_received); ++ atomic_inc_unchecked(&dev_priv->vbl_received); + drm_handle_vblank(dev, 0); + handled = 1; + } +@@ -78,7 +78,7 @@ irqreturn_t mga_driver_irq_handler(int irq, void *arg) + if ((prim_start & ~0x03) != (prim_end & ~0x03)) + MGA_WRITE(MGA_PRIMEND, prim_end); + +- atomic_inc(&dev_priv->last_fence_retired); ++ atomic_inc_unchecked(&dev_priv->last_fence_retired); + wake_up(&dev_priv->fence_queue); + handled = 1; + } +@@ -129,7 +129,7 @@ int mga_driver_fence_wait(struct drm_device *dev, unsigned int *sequence) + * using fences. + */ + DRM_WAIT_ON(ret, dev_priv->fence_queue, 3 * HZ, +- (((cur_fence = atomic_read(&dev_priv->last_fence_retired)) ++ (((cur_fence = atomic_read_unchecked(&dev_priv->last_fence_retired)) + - *sequence) <= (1 << 23))); + + *sequence = cur_fence; +diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c +index 4c3feaa..26391ce 100644 +--- a/drivers/gpu/drm/nouveau/nouveau_bios.c ++++ b/drivers/gpu/drm/nouveau/nouveau_bios.c +@@ -965,7 +965,7 @@ static int parse_bit_tmds_tbl_entry(struct drm_device *dev, struct nvbios *bios, + struct bit_table { + const char id; + int (* const parse_fn)(struct drm_device *, struct nvbios *, struct bit_entry *); +-}; ++} __no_const; + + #define BIT_TABLE(id, funcid) ((struct bit_table){ id, parse_bit_##funcid##_tbl_entry }) + +diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.h b/drivers/gpu/drm/nouveau/nouveau_drm.h +index 23ca7a5..b6c955d 100644 +--- a/drivers/gpu/drm/nouveau/nouveau_drm.h ++++ b/drivers/gpu/drm/nouveau/nouveau_drm.h +@@ -97,7 +97,6 @@ struct nouveau_drm { + struct drm_global_reference mem_global_ref; + struct ttm_bo_global_ref bo_global_ref; + struct ttm_bo_device bdev; +- atomic_t validate_sequence; + int (*move)(struct nouveau_channel *, + struct ttm_buffer_object *, + struct ttm_mem_reg *, struct ttm_mem_reg *); +diff --git a/drivers/gpu/drm/nouveau/nouveau_ioc32.c b/drivers/gpu/drm/nouveau/nouveau_ioc32.c +index c1a7e5a..38b8539 100644 +--- a/drivers/gpu/drm/nouveau/nouveau_ioc32.c ++++ b/drivers/gpu/drm/nouveau/nouveau_ioc32.c +@@ -50,7 +50,7 @@ long nouveau_compat_ioctl(struct file *filp, unsigned int cmd, + unsigned long arg) + { + unsigned int nr = DRM_IOCTL_NR(cmd); +- drm_ioctl_compat_t *fn = NULL; ++ drm_ioctl_compat_t fn = NULL; + int ret; + + if (nr < DRM_COMMAND_BASE) +diff --git a/drivers/gpu/drm/nouveau/nouveau_ttm.c b/drivers/gpu/drm/nouveau/nouveau_ttm.c +index d45d50d..72a5dd2 100644 +--- a/drivers/gpu/drm/nouveau/nouveau_ttm.c ++++ b/drivers/gpu/drm/nouveau/nouveau_ttm.c +@@ -130,11 +130,11 @@ nouveau_vram_manager_debug(struct ttm_mem_type_manager *man, const char *prefix) + } + + const struct ttm_mem_type_manager_func nouveau_vram_manager = { +- nouveau_vram_manager_init, +- nouveau_vram_manager_fini, +- nouveau_vram_manager_new, +- nouveau_vram_manager_del, +- nouveau_vram_manager_debug ++ .init = nouveau_vram_manager_init, ++ .takedown = nouveau_vram_manager_fini, ++ .get_node = nouveau_vram_manager_new, ++ .put_node = nouveau_vram_manager_del, ++ .debug = nouveau_vram_manager_debug + }; + + static int +@@ -199,11 +199,11 @@ nouveau_gart_manager_debug(struct ttm_mem_type_manager *man, const char *prefix) + } + + const struct ttm_mem_type_manager_func nouveau_gart_manager = { +- nouveau_gart_manager_init, +- nouveau_gart_manager_fini, +- nouveau_gart_manager_new, +- nouveau_gart_manager_del, +- nouveau_gart_manager_debug ++ .init = nouveau_gart_manager_init, ++ .takedown = nouveau_gart_manager_fini, ++ .get_node = nouveau_gart_manager_new, ++ .put_node = nouveau_gart_manager_del, ++ .debug = nouveau_gart_manager_debug + }; + + #include <core/subdev/vm/nv04.h> +@@ -271,11 +271,11 @@ nv04_gart_manager_debug(struct ttm_mem_type_manager *man, const char *prefix) + } + + const struct ttm_mem_type_manager_func nv04_gart_manager = { +- nv04_gart_manager_init, +- nv04_gart_manager_fini, +- nv04_gart_manager_new, +- nv04_gart_manager_del, +- nv04_gart_manager_debug ++ .init = nv04_gart_manager_init, ++ .takedown = nv04_gart_manager_fini, ++ .get_node = nv04_gart_manager_new, ++ .put_node = nv04_gart_manager_del, ++ .debug = nv04_gart_manager_debug + }; + + int +diff --git a/drivers/gpu/drm/nouveau/nouveau_vga.c b/drivers/gpu/drm/nouveau/nouveau_vga.c +index 471347e..5adc6b9 100644 +--- a/drivers/gpu/drm/nouveau/nouveau_vga.c ++++ b/drivers/gpu/drm/nouveau/nouveau_vga.c +@@ -67,7 +67,7 @@ nouveau_switcheroo_can_switch(struct pci_dev *pdev) + bool can_switch; + + spin_lock(&dev->count_lock); +- can_switch = (dev->open_count == 0); ++ can_switch = (local_read(&dev->open_count) == 0); + spin_unlock(&dev->count_lock); + return can_switch; + } +diff --git a/drivers/gpu/drm/qxl/qxl_cmd.c b/drivers/gpu/drm/qxl/qxl_cmd.c +index eb89653..613cf71 100644 +--- a/drivers/gpu/drm/qxl/qxl_cmd.c ++++ b/drivers/gpu/drm/qxl/qxl_cmd.c +@@ -285,27 +285,27 @@ static int wait_for_io_cmd_user(struct qxl_device *qdev, uint8_t val, long port, + int ret; + + mutex_lock(&qdev->async_io_mutex); +- irq_num = atomic_read(&qdev->irq_received_io_cmd); ++ irq_num = atomic_read_unchecked(&qdev->irq_received_io_cmd); + if (qdev->last_sent_io_cmd > irq_num) { + if (intr) + ret = wait_event_interruptible_timeout(qdev->io_cmd_event, +- atomic_read(&qdev->irq_received_io_cmd) > irq_num, 5*HZ); ++ atomic_read_unchecked(&qdev->irq_received_io_cmd) > irq_num, 5*HZ); + else + ret = wait_event_timeout(qdev->io_cmd_event, +- atomic_read(&qdev->irq_received_io_cmd) > irq_num, 5*HZ); ++ atomic_read_unchecked(&qdev->irq_received_io_cmd) > irq_num, 5*HZ); + /* 0 is timeout, just bail the "hw" has gone away */ + if (ret <= 0) + goto out; +- irq_num = atomic_read(&qdev->irq_received_io_cmd); ++ irq_num = atomic_read_unchecked(&qdev->irq_received_io_cmd); + } + outb(val, addr); + qdev->last_sent_io_cmd = irq_num + 1; + if (intr) + ret = wait_event_interruptible_timeout(qdev->io_cmd_event, +- atomic_read(&qdev->irq_received_io_cmd) > irq_num, 5*HZ); ++ atomic_read_unchecked(&qdev->irq_received_io_cmd) > irq_num, 5*HZ); + else + ret = wait_event_timeout(qdev->io_cmd_event, +- atomic_read(&qdev->irq_received_io_cmd) > irq_num, 5*HZ); ++ atomic_read_unchecked(&qdev->irq_received_io_cmd) > irq_num, 5*HZ); + out: + if (ret > 0) + ret = 0; +diff --git a/drivers/gpu/drm/qxl/qxl_debugfs.c b/drivers/gpu/drm/qxl/qxl_debugfs.c +index c3c2bbd..bc3c0fb 100644 +--- a/drivers/gpu/drm/qxl/qxl_debugfs.c ++++ b/drivers/gpu/drm/qxl/qxl_debugfs.c +@@ -42,10 +42,10 @@ qxl_debugfs_irq_received(struct seq_file *m, void *data) + struct drm_info_node *node = (struct drm_info_node *) m->private; + struct qxl_device *qdev = node->minor->dev->dev_private; + +- seq_printf(m, "%d\n", atomic_read(&qdev->irq_received)); +- seq_printf(m, "%d\n", atomic_read(&qdev->irq_received_display)); +- seq_printf(m, "%d\n", atomic_read(&qdev->irq_received_cursor)); +- seq_printf(m, "%d\n", atomic_read(&qdev->irq_received_io_cmd)); ++ seq_printf(m, "%d\n", atomic_read_unchecked(&qdev->irq_received)); ++ seq_printf(m, "%d\n", atomic_read_unchecked(&qdev->irq_received_display)); ++ seq_printf(m, "%d\n", atomic_read_unchecked(&qdev->irq_received_cursor)); ++ seq_printf(m, "%d\n", atomic_read_unchecked(&qdev->irq_received_io_cmd)); + seq_printf(m, "%d\n", qdev->irq_received_error); + return 0; + } +diff --git a/drivers/gpu/drm/qxl/qxl_drv.h b/drivers/gpu/drm/qxl/qxl_drv.h +index 36ed40b..0397633 100644 +--- a/drivers/gpu/drm/qxl/qxl_drv.h ++++ b/drivers/gpu/drm/qxl/qxl_drv.h +@@ -290,10 +290,10 @@ struct qxl_device { + unsigned int last_sent_io_cmd; + + /* interrupt handling */ +- atomic_t irq_received; +- atomic_t irq_received_display; +- atomic_t irq_received_cursor; +- atomic_t irq_received_io_cmd; ++ atomic_unchecked_t irq_received; ++ atomic_unchecked_t irq_received_display; ++ atomic_unchecked_t irq_received_cursor; ++ atomic_unchecked_t irq_received_io_cmd; + unsigned irq_received_error; + wait_queue_head_t display_event; + wait_queue_head_t cursor_event; +diff --git a/drivers/gpu/drm/qxl/qxl_ioctl.c b/drivers/gpu/drm/qxl/qxl_ioctl.c +index 0bb86e6..d41416d 100644 +--- a/drivers/gpu/drm/qxl/qxl_ioctl.c ++++ b/drivers/gpu/drm/qxl/qxl_ioctl.c +@@ -181,7 +181,7 @@ static int qxl_process_single_command(struct qxl_device *qdev, + + /* TODO copy slow path code from i915 */ + fb_cmd = qxl_bo_kmap_atomic_page(qdev, cmd_bo, (release->release_offset & PAGE_SIZE)); +- unwritten = __copy_from_user_inatomic_nocache(fb_cmd + sizeof(union qxl_release_info) + (release->release_offset & ~PAGE_SIZE), (void *)(unsigned long)cmd->command, cmd->command_size); ++ unwritten = __copy_from_user_inatomic_nocache(fb_cmd + sizeof(union qxl_release_info) + (release->release_offset & ~PAGE_SIZE), (void __force_user *)(unsigned long)cmd->command, cmd->command_size); + + { + struct qxl_drawable *draw = fb_cmd; +@@ -201,7 +201,7 @@ static int qxl_process_single_command(struct qxl_device *qdev, + struct drm_qxl_reloc reloc; + + if (copy_from_user(&reloc, +- &((struct drm_qxl_reloc *)(uintptr_t)cmd->relocs)[i], ++ &((struct drm_qxl_reloc __force_user *)(uintptr_t)cmd->relocs)[i], + sizeof(reloc))) { + ret = -EFAULT; + goto out_free_bos; +@@ -294,10 +294,10 @@ static int qxl_execbuffer_ioctl(struct drm_device *dev, void *data, + + for (cmd_num = 0; cmd_num < execbuffer->commands_num; ++cmd_num) { + +- struct drm_qxl_command *commands = +- (struct drm_qxl_command *)(uintptr_t)execbuffer->commands; ++ struct drm_qxl_command __user *commands = ++ (struct drm_qxl_command __user *)(uintptr_t)execbuffer->commands; + +- if (copy_from_user(&user_cmd, &commands[cmd_num], ++ if (copy_from_user(&user_cmd, (struct drm_qxl_command __force_user *)&commands[cmd_num], + sizeof(user_cmd))) + return -EFAULT; + +diff --git a/drivers/gpu/drm/qxl/qxl_irq.c b/drivers/gpu/drm/qxl/qxl_irq.c +index 3485bdc..20d26e3 100644 +--- a/drivers/gpu/drm/qxl/qxl_irq.c ++++ b/drivers/gpu/drm/qxl/qxl_irq.c +@@ -36,19 +36,19 @@ irqreturn_t qxl_irq_handler(int irq, void *arg) + if (!pending) + return IRQ_NONE; + +- atomic_inc(&qdev->irq_received); ++ atomic_inc_unchecked(&qdev->irq_received); + + if (pending & QXL_INTERRUPT_DISPLAY) { +- atomic_inc(&qdev->irq_received_display); ++ atomic_inc_unchecked(&qdev->irq_received_display); + wake_up_all(&qdev->display_event); + qxl_queue_garbage_collect(qdev, false); + } + if (pending & QXL_INTERRUPT_CURSOR) { +- atomic_inc(&qdev->irq_received_cursor); ++ atomic_inc_unchecked(&qdev->irq_received_cursor); + wake_up_all(&qdev->cursor_event); + } + if (pending & QXL_INTERRUPT_IO_CMD) { +- atomic_inc(&qdev->irq_received_io_cmd); ++ atomic_inc_unchecked(&qdev->irq_received_io_cmd); + wake_up_all(&qdev->io_cmd_event); + } + if (pending & QXL_INTERRUPT_ERROR) { +@@ -85,10 +85,10 @@ int qxl_irq_init(struct qxl_device *qdev) + init_waitqueue_head(&qdev->io_cmd_event); + INIT_WORK(&qdev->client_monitors_config_work, + qxl_client_monitors_config_work_func); +- atomic_set(&qdev->irq_received, 0); +- atomic_set(&qdev->irq_received_display, 0); +- atomic_set(&qdev->irq_received_cursor, 0); +- atomic_set(&qdev->irq_received_io_cmd, 0); ++ atomic_set_unchecked(&qdev->irq_received, 0); ++ atomic_set_unchecked(&qdev->irq_received_display, 0); ++ atomic_set_unchecked(&qdev->irq_received_cursor, 0); ++ atomic_set_unchecked(&qdev->irq_received_io_cmd, 0); + qdev->irq_received_error = 0; + ret = drm_irq_install(qdev->ddev); + qdev->ram_header->int_mask = QXL_INTERRUPT_MASK; +diff --git a/drivers/gpu/drm/qxl/qxl_ttm.c b/drivers/gpu/drm/qxl/qxl_ttm.c +index c82c1d6a9..6158c02 100644 +--- a/drivers/gpu/drm/qxl/qxl_ttm.c ++++ b/drivers/gpu/drm/qxl/qxl_ttm.c +@@ -103,7 +103,7 @@ static void qxl_ttm_global_fini(struct qxl_device *qdev) + } + } + +-static struct vm_operations_struct qxl_ttm_vm_ops; ++static vm_operations_struct_no_const qxl_ttm_vm_ops __read_only; + static const struct vm_operations_struct *ttm_vm_ops; + + static int qxl_ttm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) +@@ -147,8 +147,10 @@ int qxl_mmap(struct file *filp, struct vm_area_struct *vma) + return r; + if (unlikely(ttm_vm_ops == NULL)) { + ttm_vm_ops = vma->vm_ops; ++ pax_open_kernel(); + qxl_ttm_vm_ops = *ttm_vm_ops; + qxl_ttm_vm_ops.fault = &qxl_ttm_fault; ++ pax_close_kernel(); + } + vma->vm_ops = &qxl_ttm_vm_ops; + return 0; +@@ -561,25 +563,23 @@ static int qxl_mm_dump_table(struct seq_file *m, void *data) + static int qxl_ttm_debugfs_init(struct qxl_device *qdev) + { + #if defined(CONFIG_DEBUG_FS) +- static struct drm_info_list qxl_mem_types_list[QXL_DEBUGFS_MEM_TYPES]; +- static char qxl_mem_types_names[QXL_DEBUGFS_MEM_TYPES][32]; +- unsigned i; ++ static struct drm_info_list qxl_mem_types_list[QXL_DEBUGFS_MEM_TYPES] = { ++ { ++ .name = "qxl_mem_mm", ++ .show = &qxl_mm_dump_table, ++ }, ++ { ++ .name = "qxl_surf_mm", ++ .show = &qxl_mm_dump_table, ++ } ++ }; + +- for (i = 0; i < QXL_DEBUGFS_MEM_TYPES; i++) { +- if (i == 0) +- sprintf(qxl_mem_types_names[i], "qxl_mem_mm"); +- else +- sprintf(qxl_mem_types_names[i], "qxl_surf_mm"); +- qxl_mem_types_list[i].name = qxl_mem_types_names[i]; +- qxl_mem_types_list[i].show = &qxl_mm_dump_table; +- qxl_mem_types_list[i].driver_features = 0; +- if (i == 0) +- qxl_mem_types_list[i].data = qdev->mman.bdev.man[TTM_PL_VRAM].priv; +- else +- qxl_mem_types_list[i].data = qdev->mman.bdev.man[TTM_PL_PRIV0].priv; ++ pax_open_kernel(); ++ *(void **)&qxl_mem_types_list[0].data = qdev->mman.bdev.man[TTM_PL_VRAM].priv; ++ *(void **)&qxl_mem_types_list[1].data = qdev->mman.bdev.man[TTM_PL_PRIV0].priv; ++ pax_close_kernel(); + +- } +- return qxl_debugfs_add_files(qdev, qxl_mem_types_list, i); ++ return qxl_debugfs_add_files(qdev, qxl_mem_types_list, QXL_DEBUGFS_MEM_TYPES); + #else + return 0; + #endif +diff --git a/drivers/gpu/drm/r128/r128_cce.c b/drivers/gpu/drm/r128/r128_cce.c +index 59459fe..be26b31 100644 +--- a/drivers/gpu/drm/r128/r128_cce.c ++++ b/drivers/gpu/drm/r128/r128_cce.c +@@ -377,7 +377,7 @@ static int r128_do_init_cce(struct drm_device *dev, drm_r128_init_t *init) + + /* GH: Simple idle check. + */ +- atomic_set(&dev_priv->idle_count, 0); ++ atomic_set_unchecked(&dev_priv->idle_count, 0); + + /* We don't support anything other than bus-mastering ring mode, + * but the ring can be in either AGP or PCI space for the ring +diff --git a/drivers/gpu/drm/r128/r128_drv.h b/drivers/gpu/drm/r128/r128_drv.h +index 5bf3f5f..7000661 100644 +--- a/drivers/gpu/drm/r128/r128_drv.h ++++ b/drivers/gpu/drm/r128/r128_drv.h +@@ -90,14 +90,14 @@ typedef struct drm_r128_private { + int is_pci; + unsigned long cce_buffers_offset; + +- atomic_t idle_count; ++ atomic_unchecked_t idle_count; + + int page_flipping; + int current_page; + u32 crtc_offset; + u32 crtc_offset_cntl; + +- atomic_t vbl_received; ++ atomic_unchecked_t vbl_received; + + u32 color_fmt; + unsigned int front_offset; +diff --git a/drivers/gpu/drm/r128/r128_ioc32.c b/drivers/gpu/drm/r128/r128_ioc32.c +index b0d0fd3..a6fbbe4 100644 +--- a/drivers/gpu/drm/r128/r128_ioc32.c ++++ b/drivers/gpu/drm/r128/r128_ioc32.c +@@ -178,7 +178,7 @@ static int compat_r128_getparam(struct file *file, unsigned int cmd, + return drm_ioctl(file, DRM_IOCTL_R128_GETPARAM, (unsigned long)getparam); + } + +-drm_ioctl_compat_t *r128_compat_ioctls[] = { ++drm_ioctl_compat_t r128_compat_ioctls[] = { + [DRM_R128_INIT] = compat_r128_init, + [DRM_R128_DEPTH] = compat_r128_depth, + [DRM_R128_STIPPLE] = compat_r128_stipple, +@@ -197,18 +197,15 @@ drm_ioctl_compat_t *r128_compat_ioctls[] = { + long r128_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) + { + unsigned int nr = DRM_IOCTL_NR(cmd); +- drm_ioctl_compat_t *fn = NULL; + int ret; + + if (nr < DRM_COMMAND_BASE) + return drm_compat_ioctl(filp, cmd, arg); + +- if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(r128_compat_ioctls)) +- fn = r128_compat_ioctls[nr - DRM_COMMAND_BASE]; +- +- if (fn != NULL) ++ if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(r128_compat_ioctls)) { ++ drm_ioctl_compat_t fn = r128_compat_ioctls[nr - DRM_COMMAND_BASE]; + ret = (*fn) (filp, cmd, arg); +- else ++ } else + ret = drm_ioctl(filp, cmd, arg); + + return ret; +diff --git a/drivers/gpu/drm/r128/r128_irq.c b/drivers/gpu/drm/r128/r128_irq.c +index c2ae496..30b5993 100644 +--- a/drivers/gpu/drm/r128/r128_irq.c ++++ b/drivers/gpu/drm/r128/r128_irq.c +@@ -41,7 +41,7 @@ u32 r128_get_vblank_counter(struct drm_device *dev, int crtc) + if (crtc != 0) + return 0; + +- return atomic_read(&dev_priv->vbl_received); ++ return atomic_read_unchecked(&dev_priv->vbl_received); + } + + irqreturn_t r128_driver_irq_handler(int irq, void *arg) +@@ -55,7 +55,7 @@ irqreturn_t r128_driver_irq_handler(int irq, void *arg) + /* VBLANK interrupt */ + if (status & R128_CRTC_VBLANK_INT) { + R128_WRITE(R128_GEN_INT_STATUS, R128_CRTC_VBLANK_INT_AK); +- atomic_inc(&dev_priv->vbl_received); ++ atomic_inc_unchecked(&dev_priv->vbl_received); + drm_handle_vblank(dev, 0); + return IRQ_HANDLED; + } +diff --git a/drivers/gpu/drm/r128/r128_state.c b/drivers/gpu/drm/r128/r128_state.c +index e806dac..f81d32f 100644 +--- a/drivers/gpu/drm/r128/r128_state.c ++++ b/drivers/gpu/drm/r128/r128_state.c +@@ -320,10 +320,10 @@ static void r128_clear_box(drm_r128_private_t *dev_priv, + + static void r128_cce_performance_boxes(drm_r128_private_t *dev_priv) + { +- if (atomic_read(&dev_priv->idle_count) == 0) ++ if (atomic_read_unchecked(&dev_priv->idle_count) == 0) + r128_clear_box(dev_priv, 64, 4, 8, 8, 0, 255, 0); + else +- atomic_set(&dev_priv->idle_count, 0); ++ atomic_set_unchecked(&dev_priv->idle_count, 0); + } + + #endif +diff --git a/drivers/gpu/drm/radeon/mkregtable.c b/drivers/gpu/drm/radeon/mkregtable.c +index 4a85bb6..aaea819 100644 +--- a/drivers/gpu/drm/radeon/mkregtable.c ++++ b/drivers/gpu/drm/radeon/mkregtable.c +@@ -624,14 +624,14 @@ static int parser_auth(struct table *t, const char *filename) + regex_t mask_rex; + regmatch_t match[4]; + char buf[1024]; +- size_t end; ++ long end; + int len; + int done = 0; + int r; + unsigned o; + struct offset *offset; + char last_reg_s[10]; +- int last_reg; ++ unsigned long last_reg; + + if (regcomp + (&mask_rex, "(0x[0-9a-fA-F]*) *([_a-zA-Z0-9]*)", REG_EXTENDED)) { +diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c +index 0bf6f4a..18e2437 100644 +--- a/drivers/gpu/drm/radeon/radeon_device.c ++++ b/drivers/gpu/drm/radeon/radeon_device.c +@@ -1128,7 +1128,7 @@ static bool radeon_switcheroo_can_switch(struct pci_dev *pdev) + bool can_switch; + + spin_lock(&dev->count_lock); +- can_switch = (dev->open_count == 0); ++ can_switch = (local_read(&dev->open_count) == 0); + spin_unlock(&dev->count_lock); + return can_switch; + } +diff --git a/drivers/gpu/drm/radeon/radeon_drv.h b/drivers/gpu/drm/radeon/radeon_drv.h +index dafd812..1bf20c7 100644 +--- a/drivers/gpu/drm/radeon/radeon_drv.h ++++ b/drivers/gpu/drm/radeon/radeon_drv.h +@@ -262,7 +262,7 @@ typedef struct drm_radeon_private { + + /* SW interrupt */ + wait_queue_head_t swi_queue; +- atomic_t swi_emitted; ++ atomic_unchecked_t swi_emitted; + int vblank_crtc; + uint32_t irq_enable_reg; + uint32_t r500_disp_irq_reg; +diff --git a/drivers/gpu/drm/radeon/radeon_ioc32.c b/drivers/gpu/drm/radeon/radeon_ioc32.c +index bdb0f93..5ff558f 100644 +--- a/drivers/gpu/drm/radeon/radeon_ioc32.c ++++ b/drivers/gpu/drm/radeon/radeon_ioc32.c +@@ -358,7 +358,7 @@ static int compat_radeon_cp_setparam(struct file *file, unsigned int cmd, + request = compat_alloc_user_space(sizeof(*request)); + if (!access_ok(VERIFY_WRITE, request, sizeof(*request)) + || __put_user(req32.param, &request->param) +- || __put_user((void __user *)(unsigned long)req32.value, ++ || __put_user((unsigned long)req32.value, + &request->value)) + return -EFAULT; + +@@ -368,7 +368,7 @@ static int compat_radeon_cp_setparam(struct file *file, unsigned int cmd, + #define compat_radeon_cp_setparam NULL + #endif /* X86_64 || IA64 */ + +-static drm_ioctl_compat_t *radeon_compat_ioctls[] = { ++static drm_ioctl_compat_t radeon_compat_ioctls[] = { + [DRM_RADEON_CP_INIT] = compat_radeon_cp_init, + [DRM_RADEON_CLEAR] = compat_radeon_cp_clear, + [DRM_RADEON_STIPPLE] = compat_radeon_cp_stipple, +@@ -393,18 +393,15 @@ static drm_ioctl_compat_t *radeon_compat_ioctls[] = { + long radeon_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) + { + unsigned int nr = DRM_IOCTL_NR(cmd); +- drm_ioctl_compat_t *fn = NULL; + int ret; + + if (nr < DRM_COMMAND_BASE) + return drm_compat_ioctl(filp, cmd, arg); + +- if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(radeon_compat_ioctls)) +- fn = radeon_compat_ioctls[nr - DRM_COMMAND_BASE]; +- +- if (fn != NULL) ++ if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(radeon_compat_ioctls)) { ++ drm_ioctl_compat_t fn = radeon_compat_ioctls[nr - DRM_COMMAND_BASE]; + ret = (*fn) (filp, cmd, arg); +- else ++ } else + ret = drm_ioctl(filp, cmd, arg); + + return ret; +diff --git a/drivers/gpu/drm/radeon/radeon_irq.c b/drivers/gpu/drm/radeon/radeon_irq.c +index 244b19b..c19226d 100644 +--- a/drivers/gpu/drm/radeon/radeon_irq.c ++++ b/drivers/gpu/drm/radeon/radeon_irq.c +@@ -226,8 +226,8 @@ static int radeon_emit_irq(struct drm_device * dev) + unsigned int ret; + RING_LOCALS; + +- atomic_inc(&dev_priv->swi_emitted); +- ret = atomic_read(&dev_priv->swi_emitted); ++ atomic_inc_unchecked(&dev_priv->swi_emitted); ++ ret = atomic_read_unchecked(&dev_priv->swi_emitted); + + BEGIN_RING(4); + OUT_RING_REG(RADEON_LAST_SWI_REG, ret); +@@ -353,7 +353,7 @@ int radeon_driver_irq_postinstall(struct drm_device *dev) + drm_radeon_private_t *dev_priv = + (drm_radeon_private_t *) dev->dev_private; + +- atomic_set(&dev_priv->swi_emitted, 0); ++ atomic_set_unchecked(&dev_priv->swi_emitted, 0); + init_waitqueue_head(&dev_priv->swi_queue); + + dev->max_vblank_count = 0x001fffff; +diff --git a/drivers/gpu/drm/radeon/radeon_state.c b/drivers/gpu/drm/radeon/radeon_state.c +index 956ab7f..fbd36d8 100644 +--- a/drivers/gpu/drm/radeon/radeon_state.c ++++ b/drivers/gpu/drm/radeon/radeon_state.c +@@ -2168,7 +2168,7 @@ static int radeon_cp_clear(struct drm_device *dev, void *data, struct drm_file * + if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS) + sarea_priv->nbox = RADEON_NR_SAREA_CLIPRECTS; + +- if (copy_from_user(&depth_boxes, clear->depth_boxes, ++ if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS || copy_from_user(&depth_boxes, clear->depth_boxes, + sarea_priv->nbox * sizeof(depth_boxes[0]))) + return -EFAULT; + +@@ -3031,7 +3031,7 @@ static int radeon_cp_getparam(struct drm_device *dev, void *data, struct drm_fil + { + drm_radeon_private_t *dev_priv = dev->dev_private; + drm_radeon_getparam_t *param = data; +- int value; ++ int value = 0; + + DRM_DEBUG("pid=%d\n", DRM_CURRENTPID); + +diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c +index 040a2a1..eae4e54 100644 +--- a/drivers/gpu/drm/radeon/radeon_ttm.c ++++ b/drivers/gpu/drm/radeon/radeon_ttm.c +@@ -790,7 +790,7 @@ void radeon_ttm_set_active_vram_size(struct radeon_device *rdev, u64 size) + man->size = size >> PAGE_SHIFT; + } + +-static struct vm_operations_struct radeon_ttm_vm_ops; ++static vm_operations_struct_no_const radeon_ttm_vm_ops __read_only; + static const struct vm_operations_struct *ttm_vm_ops = NULL; + + static int radeon_ttm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) +@@ -831,8 +831,10 @@ int radeon_mmap(struct file *filp, struct vm_area_struct *vma) + } + if (unlikely(ttm_vm_ops == NULL)) { + ttm_vm_ops = vma->vm_ops; ++ pax_open_kernel(); + radeon_ttm_vm_ops = *ttm_vm_ops; + radeon_ttm_vm_ops.fault = &radeon_ttm_fault; ++ pax_close_kernel(); + } + vma->vm_ops = &radeon_ttm_vm_ops; + return 0; +diff --git a/drivers/gpu/drm/tegra/dc.c b/drivers/gpu/drm/tegra/dc.c +index 9336006..ce78aa7 100644 +--- a/drivers/gpu/drm/tegra/dc.c ++++ b/drivers/gpu/drm/tegra/dc.c +@@ -1057,7 +1057,7 @@ static int tegra_dc_debugfs_init(struct tegra_dc *dc, struct drm_minor *minor) + } + + for (i = 0; i < ARRAY_SIZE(debugfs_files); i++) +- dc->debugfs_files[i].data = dc; ++ *(void **)&dc->debugfs_files[i].data = dc; + + err = drm_debugfs_create_files(dc->debugfs_files, + ARRAY_SIZE(debugfs_files), +diff --git a/drivers/gpu/drm/tegra/dsi.c b/drivers/gpu/drm/tegra/dsi.c +index d452faab..f8cbc6a 100644 +--- a/drivers/gpu/drm/tegra/dsi.c ++++ b/drivers/gpu/drm/tegra/dsi.c +@@ -53,7 +53,7 @@ struct tegra_dsi { + struct clk *clk_lp; + struct clk *clk; + +- struct drm_info_list *debugfs_files; ++ drm_info_list_no_const *debugfs_files; + struct drm_minor *minor; + struct dentry *debugfs; + +diff --git a/drivers/gpu/drm/tegra/hdmi.c b/drivers/gpu/drm/tegra/hdmi.c +index 6928015..c9853e7 100644 +--- a/drivers/gpu/drm/tegra/hdmi.c ++++ b/drivers/gpu/drm/tegra/hdmi.c +@@ -59,7 +59,7 @@ struct tegra_hdmi { + bool stereo; + bool dvi; + +- struct drm_info_list *debugfs_files; ++ drm_info_list_no_const *debugfs_files; + struct drm_minor *minor; + struct dentry *debugfs; + }; +diff --git a/drivers/gpu/drm/ttm/ttm_bo_manager.c b/drivers/gpu/drm/ttm/ttm_bo_manager.c +index c58eba33..83c2728 100644 +--- a/drivers/gpu/drm/ttm/ttm_bo_manager.c ++++ b/drivers/gpu/drm/ttm/ttm_bo_manager.c +@@ -141,10 +141,10 @@ static void ttm_bo_man_debug(struct ttm_mem_type_manager *man, + } + + const struct ttm_mem_type_manager_func ttm_bo_manager_func = { +- ttm_bo_man_init, +- ttm_bo_man_takedown, +- ttm_bo_man_get_node, +- ttm_bo_man_put_node, +- ttm_bo_man_debug ++ .init = ttm_bo_man_init, ++ .takedown = ttm_bo_man_takedown, ++ .get_node = ttm_bo_man_get_node, ++ .put_node = ttm_bo_man_put_node, ++ .debug = ttm_bo_man_debug + }; + EXPORT_SYMBOL(ttm_bo_manager_func); +diff --git a/drivers/gpu/drm/ttm/ttm_memory.c b/drivers/gpu/drm/ttm/ttm_memory.c +index dbc2def..0a9f710 100644 +--- a/drivers/gpu/drm/ttm/ttm_memory.c ++++ b/drivers/gpu/drm/ttm/ttm_memory.c +@@ -264,7 +264,7 @@ static int ttm_mem_init_kernel_zone(struct ttm_mem_global *glob, + zone->glob = glob; + glob->zone_kernel = zone; + ret = kobject_init_and_add( +- &zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, zone->name); ++ &zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, "%s", zone->name); + if (unlikely(ret != 0)) { + kobject_put(&zone->kobj); + return ret; +@@ -347,7 +347,7 @@ static int ttm_mem_init_dma32_zone(struct ttm_mem_global *glob, + zone->glob = glob; + glob->zone_dma32 = zone; + ret = kobject_init_and_add( +- &zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, zone->name); ++ &zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, "%s", zone->name); + if (unlikely(ret != 0)) { + kobject_put(&zone->kobj); + return ret; +diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c +index 863bef9..cba15cf 100644 +--- a/drivers/gpu/drm/ttm/ttm_page_alloc.c ++++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c +@@ -391,9 +391,9 @@ out: + static unsigned long + ttm_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc) + { +- static atomic_t start_pool = ATOMIC_INIT(0); ++ static atomic_unchecked_t start_pool = ATOMIC_INIT(0); + unsigned i; +- unsigned pool_offset = atomic_add_return(1, &start_pool); ++ unsigned pool_offset = atomic_add_return_unchecked(1, &start_pool); + struct ttm_page_pool *pool; + int shrink_pages = sc->nr_to_scan; + unsigned long freed = 0; +diff --git a/drivers/gpu/drm/udl/udl_fb.c b/drivers/gpu/drm/udl/udl_fb.c +index dbadd49..1b7457b 100644 +--- a/drivers/gpu/drm/udl/udl_fb.c ++++ b/drivers/gpu/drm/udl/udl_fb.c +@@ -367,7 +367,6 @@ static int udl_fb_release(struct fb_info *info, int user) + fb_deferred_io_cleanup(info); + kfree(info->fbdefio); + info->fbdefio = NULL; +- info->fbops->fb_mmap = udl_fb_mmap; + } + + pr_warn("released /dev/fb%d user=%d count=%d\n", +diff --git a/drivers/gpu/drm/via/via_drv.h b/drivers/gpu/drm/via/via_drv.h +index ad02732..144f5ed 100644 +--- a/drivers/gpu/drm/via/via_drv.h ++++ b/drivers/gpu/drm/via/via_drv.h +@@ -51,7 +51,7 @@ typedef struct drm_via_ring_buffer { + typedef uint32_t maskarray_t[5]; + + typedef struct drm_via_irq { +- atomic_t irq_received; ++ atomic_unchecked_t irq_received; + uint32_t pending_mask; + uint32_t enable_mask; + wait_queue_head_t irq_queue; +@@ -75,7 +75,7 @@ typedef struct drm_via_private { + struct timeval last_vblank; + int last_vblank_valid; + unsigned usec_per_vblank; +- atomic_t vbl_received; ++ atomic_unchecked_t vbl_received; + drm_via_state_t hc_state; + char pci_buf[VIA_PCI_BUF_SIZE]; + const uint32_t *fire_offsets[VIA_FIRE_BUF_SIZE]; +diff --git a/drivers/gpu/drm/via/via_irq.c b/drivers/gpu/drm/via/via_irq.c +index 1319433..a993b0c 100644 +--- a/drivers/gpu/drm/via/via_irq.c ++++ b/drivers/gpu/drm/via/via_irq.c +@@ -101,7 +101,7 @@ u32 via_get_vblank_counter(struct drm_device *dev, int crtc) + if (crtc != 0) + return 0; + +- return atomic_read(&dev_priv->vbl_received); ++ return atomic_read_unchecked(&dev_priv->vbl_received); + } + + irqreturn_t via_driver_irq_handler(int irq, void *arg) +@@ -116,8 +116,8 @@ irqreturn_t via_driver_irq_handler(int irq, void *arg) + + status = VIA_READ(VIA_REG_INTERRUPT); + if (status & VIA_IRQ_VBLANK_PENDING) { +- atomic_inc(&dev_priv->vbl_received); +- if (!(atomic_read(&dev_priv->vbl_received) & 0x0F)) { ++ atomic_inc_unchecked(&dev_priv->vbl_received); ++ if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0x0F)) { + do_gettimeofday(&cur_vblank); + if (dev_priv->last_vblank_valid) { + dev_priv->usec_per_vblank = +@@ -127,7 +127,7 @@ irqreturn_t via_driver_irq_handler(int irq, void *arg) + dev_priv->last_vblank = cur_vblank; + dev_priv->last_vblank_valid = 1; + } +- if (!(atomic_read(&dev_priv->vbl_received) & 0xFF)) { ++ if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0xFF)) { + DRM_DEBUG("US per vblank is: %u\n", + dev_priv->usec_per_vblank); + } +@@ -137,7 +137,7 @@ irqreturn_t via_driver_irq_handler(int irq, void *arg) + + for (i = 0; i < dev_priv->num_irqs; ++i) { + if (status & cur_irq->pending_mask) { +- atomic_inc(&cur_irq->irq_received); ++ atomic_inc_unchecked(&cur_irq->irq_received); + wake_up(&cur_irq->irq_queue); + handled = 1; + if (dev_priv->irq_map[drm_via_irq_dma0_td] == i) +@@ -242,11 +242,11 @@ via_driver_irq_wait(struct drm_device *dev, unsigned int irq, int force_sequence + DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * HZ, + ((VIA_READ(masks[irq][2]) & masks[irq][3]) == + masks[irq][4])); +- cur_irq_sequence = atomic_read(&cur_irq->irq_received); ++ cur_irq_sequence = atomic_read_unchecked(&cur_irq->irq_received); + } else { + DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * HZ, + (((cur_irq_sequence = +- atomic_read(&cur_irq->irq_received)) - ++ atomic_read_unchecked(&cur_irq->irq_received)) - + *sequence) <= (1 << 23))); + } + *sequence = cur_irq_sequence; +@@ -284,7 +284,7 @@ void via_driver_irq_preinstall(struct drm_device *dev) + } + + for (i = 0; i < dev_priv->num_irqs; ++i) { +- atomic_set(&cur_irq->irq_received, 0); ++ atomic_set_unchecked(&cur_irq->irq_received, 0); + cur_irq->enable_mask = dev_priv->irq_masks[i][0]; + cur_irq->pending_mask = dev_priv->irq_masks[i][1]; + init_waitqueue_head(&cur_irq->irq_queue); +@@ -366,7 +366,7 @@ int via_wait_irq(struct drm_device *dev, void *data, struct drm_file *file_priv) + switch (irqwait->request.type & ~VIA_IRQ_FLAGS_MASK) { + case VIA_IRQ_RELATIVE: + irqwait->request.sequence += +- atomic_read(&cur_irq->irq_received); ++ atomic_read_unchecked(&cur_irq->irq_received); + irqwait->request.type &= ~_DRM_VBLANK_RELATIVE; + case VIA_IRQ_ABSOLUTE: + break; +diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h +index 0783155..b29e18e 100644 +--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h ++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h +@@ -437,7 +437,7 @@ struct vmw_private { + * Fencing and IRQs. + */ + +- atomic_t marker_seq; ++ atomic_unchecked_t marker_seq; + wait_queue_head_t fence_queue; + wait_queue_head_t fifo_queue; + int fence_queue_waiters; /* Protected by hw_mutex */ +diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c +index 6ccd993..618d592 100644 +--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c ++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c +@@ -154,7 +154,7 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo) + (unsigned int) min, + (unsigned int) fifo->capabilities); + +- atomic_set(&dev_priv->marker_seq, dev_priv->last_read_seqno); ++ atomic_set_unchecked(&dev_priv->marker_seq, dev_priv->last_read_seqno); + iowrite32(dev_priv->last_read_seqno, fifo_mem + SVGA_FIFO_FENCE); + vmw_marker_queue_init(&fifo->marker_queue); + return vmw_fifo_send_fence(dev_priv, &dummy); +@@ -372,7 +372,7 @@ void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes) + if (reserveable) + iowrite32(bytes, fifo_mem + + SVGA_FIFO_RESERVED); +- return fifo_mem + (next_cmd >> 2); ++ return (__le32 __force_kernel *)fifo_mem + (next_cmd >> 2); + } else { + need_bounce = true; + } +@@ -492,7 +492,7 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *seqno) + + fm = vmw_fifo_reserve(dev_priv, bytes); + if (unlikely(fm == NULL)) { +- *seqno = atomic_read(&dev_priv->marker_seq); ++ *seqno = atomic_read_unchecked(&dev_priv->marker_seq); + ret = -ENOMEM; + (void)vmw_fallback_wait(dev_priv, false, true, *seqno, + false, 3*HZ); +@@ -500,7 +500,7 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *seqno) + } + + do { +- *seqno = atomic_add_return(1, &dev_priv->marker_seq); ++ *seqno = atomic_add_return_unchecked(1, &dev_priv->marker_seq); + } while (*seqno == 0); + + if (!(fifo_state->capabilities & SVGA_FIFO_CAP_FENCE)) { +diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c +index b1273e8..9c274fd 100644 +--- a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c ++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c +@@ -164,9 +164,9 @@ static void vmw_gmrid_man_debug(struct ttm_mem_type_manager *man, + } + + const struct ttm_mem_type_manager_func vmw_gmrid_manager_func = { +- vmw_gmrid_man_init, +- vmw_gmrid_man_takedown, +- vmw_gmrid_man_get_node, +- vmw_gmrid_man_put_node, +- vmw_gmrid_man_debug ++ .init = vmw_gmrid_man_init, ++ .takedown = vmw_gmrid_man_takedown, ++ .get_node = vmw_gmrid_man_get_node, ++ .put_node = vmw_gmrid_man_put_node, ++ .debug = vmw_gmrid_man_debug + }; +diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c +index 47b7094..698ba09 100644 +--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c ++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c +@@ -236,7 +236,7 @@ int vmw_present_ioctl(struct drm_device *dev, void *data, + int ret; + + num_clips = arg->num_clips; +- clips_ptr = (struct drm_vmw_rect *)(unsigned long)arg->clips_ptr; ++ clips_ptr = (struct drm_vmw_rect __user *)(unsigned long)arg->clips_ptr; + + if (unlikely(num_clips == 0)) + return 0; +@@ -320,7 +320,7 @@ int vmw_present_readback_ioctl(struct drm_device *dev, void *data, + int ret; + + num_clips = arg->num_clips; +- clips_ptr = (struct drm_vmw_rect *)(unsigned long)arg->clips_ptr; ++ clips_ptr = (struct drm_vmw_rect __user *)(unsigned long)arg->clips_ptr; + + if (unlikely(num_clips == 0)) + return 0; +diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c +index 0c42376..6febe77 100644 +--- a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c ++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c +@@ -107,7 +107,7 @@ bool vmw_seqno_passed(struct vmw_private *dev_priv, + * emitted. Then the fence is stale and signaled. + */ + +- ret = ((atomic_read(&dev_priv->marker_seq) - seqno) ++ ret = ((atomic_read_unchecked(&dev_priv->marker_seq) - seqno) + > VMW_FENCE_WRAP); + + return ret; +@@ -138,7 +138,7 @@ int vmw_fallback_wait(struct vmw_private *dev_priv, + + if (fifo_idle) + down_read(&fifo_state->rwsem); +- signal_seq = atomic_read(&dev_priv->marker_seq); ++ signal_seq = atomic_read_unchecked(&dev_priv->marker_seq); + ret = 0; + + for (;;) { +diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c b/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c +index 8a8725c2..afed796 100644 +--- a/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c ++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c +@@ -151,7 +151,7 @@ int vmw_wait_lag(struct vmw_private *dev_priv, + while (!vmw_lag_lt(queue, us)) { + spin_lock(&queue->lock); + if (list_empty(&queue->head)) +- seqno = atomic_read(&dev_priv->marker_seq); ++ seqno = atomic_read_unchecked(&dev_priv->marker_seq); + else { + marker = list_first_entry(&queue->head, + struct vmw_marker, head); +diff --git a/drivers/gpu/vga/vga_switcheroo.c b/drivers/gpu/vga/vga_switcheroo.c +index 6866448..2ad2b34 100644 +--- a/drivers/gpu/vga/vga_switcheroo.c ++++ b/drivers/gpu/vga/vga_switcheroo.c +@@ -644,7 +644,7 @@ static int vga_switcheroo_runtime_resume(struct device *dev) + + /* this version is for the case where the power switch is separate + to the device being powered down. */ +-int vga_switcheroo_init_domain_pm_ops(struct device *dev, struct dev_pm_domain *domain) ++int vga_switcheroo_init_domain_pm_ops(struct device *dev, dev_pm_domain_no_const *domain) + { + /* copy over all the bus versions */ + if (dev->bus && dev->bus->pm) { +@@ -689,7 +689,7 @@ static int vga_switcheroo_runtime_resume_hdmi_audio(struct device *dev) + return ret; + } + +-int vga_switcheroo_init_domain_pm_optimus_hdmi_audio(struct device *dev, struct dev_pm_domain *domain) ++int vga_switcheroo_init_domain_pm_optimus_hdmi_audio(struct device *dev, dev_pm_domain_no_const *domain) + { + /* copy over all the bus versions */ + if (dev->bus && dev->bus->pm) { +diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c +index 7cd42ea..a367c48 100644 +--- a/drivers/hid/hid-core.c ++++ b/drivers/hid/hid-core.c +@@ -2432,7 +2432,7 @@ EXPORT_SYMBOL_GPL(hid_ignore); + + int hid_add_device(struct hid_device *hdev) + { +- static atomic_t id = ATOMIC_INIT(0); ++ static atomic_unchecked_t id = ATOMIC_INIT(0); + int ret; + + if (WARN_ON(hdev->status & HID_STAT_ADDED)) +@@ -2466,7 +2466,7 @@ int hid_add_device(struct hid_device *hdev) + /* XXX hack, any other cleaner solution after the driver core + * is converted to allow more than 20 bytes as the device name? */ + dev_set_name(&hdev->dev, "%04X:%04X:%04X.%04X", hdev->bus, +- hdev->vendor, hdev->product, atomic_inc_return(&id)); ++ hdev->vendor, hdev->product, atomic_inc_return_unchecked(&id)); + + hid_debug_register(hdev, dev_name(&hdev->dev)); + ret = device_add(&hdev->dev); +diff --git a/drivers/hid/hid-magicmouse.c b/drivers/hid/hid-magicmouse.c +index 3b43d1c..991ba79 100644 +--- a/drivers/hid/hid-magicmouse.c ++++ b/drivers/hid/hid-magicmouse.c +@@ -290,6 +290,11 @@ static int magicmouse_raw_event(struct hid_device *hdev, + if (size < 4 || ((size - 4) % 9) != 0) + return 0; + npoints = (size - 4) / 9; ++ if (npoints > 15) { ++ hid_warn(hdev, "invalid size value (%d) for TRACKPAD_REPORT_ID\n", ++ size); ++ return 0; ++ } + msc->ntouches = 0; + for (ii = 0; ii < npoints; ii++) + magicmouse_emit_touch(msc, ii, data + ii * 9 + 4); +@@ -307,6 +312,11 @@ static int magicmouse_raw_event(struct hid_device *hdev, + if (size < 6 || ((size - 6) % 8) != 0) + return 0; + npoints = (size - 6) / 8; ++ if (npoints > 15) { ++ hid_warn(hdev, "invalid size value (%d) for MOUSE_REPORT_ID\n", ++ size); ++ return 0; ++ } + msc->ntouches = 0; + for (ii = 0; ii < npoints; ii++) + magicmouse_emit_touch(msc, ii, data + ii * 8 + 6); +diff --git a/drivers/hid/hid-picolcd_core.c b/drivers/hid/hid-picolcd_core.c +index acbb0210..020df3c 100644 +--- a/drivers/hid/hid-picolcd_core.c ++++ b/drivers/hid/hid-picolcd_core.c +@@ -350,6 +350,12 @@ static int picolcd_raw_event(struct hid_device *hdev, + if (!data) + return 1; + ++ if (size > 64) { ++ hid_warn(hdev, "invalid size value (%d) for picolcd raw event\n", ++ size); ++ return 0; ++ } ++ + if (report->id == REPORT_KEY_STATE) { + if (data->input_keys) + ret = picolcd_raw_keypad(data, report, raw_data+1, size-1); +diff --git a/drivers/hid/hid-wiimote-debug.c b/drivers/hid/hid-wiimote-debug.c +index c13fb5b..55a3802 100644 +--- a/drivers/hid/hid-wiimote-debug.c ++++ b/drivers/hid/hid-wiimote-debug.c +@@ -66,7 +66,7 @@ static ssize_t wiidebug_eeprom_read(struct file *f, char __user *u, size_t s, + else if (size == 0) + return -EIO; + +- if (copy_to_user(u, buf, size)) ++ if (size > sizeof(buf) || copy_to_user(u, buf, size)) + return -EFAULT; + + *off += size; +diff --git a/drivers/hid/uhid.c b/drivers/hid/uhid.c +index cedc6da..2c3da2a 100644 +--- a/drivers/hid/uhid.c ++++ b/drivers/hid/uhid.c +@@ -47,7 +47,7 @@ struct uhid_device { + struct mutex report_lock; + wait_queue_head_t report_wait; + atomic_t report_done; +- atomic_t report_id; ++ atomic_unchecked_t report_id; + struct uhid_event report_buf; + }; + +@@ -163,7 +163,7 @@ static int uhid_hid_get_raw(struct hid_device *hid, unsigned char rnum, + + spin_lock_irqsave(&uhid->qlock, flags); + ev->type = UHID_FEATURE; +- ev->u.feature.id = atomic_inc_return(&uhid->report_id); ++ ev->u.feature.id = atomic_inc_return_unchecked(&uhid->report_id); + ev->u.feature.rnum = rnum; + ev->u.feature.rtype = report_type; + +@@ -446,7 +446,7 @@ static int uhid_dev_feature_answer(struct uhid_device *uhid, + spin_lock_irqsave(&uhid->qlock, flags); + + /* id for old report; drop it silently */ +- if (atomic_read(&uhid->report_id) != ev->u.feature_answer.id) ++ if (atomic_read_unchecked(&uhid->report_id) != ev->u.feature_answer.id) + goto unlock; + if (atomic_read(&uhid->report_done)) + goto unlock; +diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c +index 69ea36f..8dbf4bb 100644 +--- a/drivers/hv/channel.c ++++ b/drivers/hv/channel.c +@@ -364,8 +364,8 @@ int vmbus_establish_gpadl(struct vmbus_channel *channel, void *kbuffer, + int ret = 0; + int t; + +- next_gpadl_handle = atomic_read(&vmbus_connection.next_gpadl_handle); +- atomic_inc(&vmbus_connection.next_gpadl_handle); ++ next_gpadl_handle = atomic_read_unchecked(&vmbus_connection.next_gpadl_handle); ++ atomic_inc_unchecked(&vmbus_connection.next_gpadl_handle); + + ret = create_gpadl_header(kbuffer, size, &msginfo, &msgcount); + if (ret) +diff --git a/drivers/hv/hv.c b/drivers/hv/hv.c +index bcb4950..61dba6c 100644 +--- a/drivers/hv/hv.c ++++ b/drivers/hv/hv.c +@@ -112,7 +112,7 @@ static u64 do_hypercall(u64 control, void *input, void *output) + u64 output_address = (output) ? virt_to_phys(output) : 0; + u32 output_address_hi = output_address >> 32; + u32 output_address_lo = output_address & 0xFFFFFFFF; +- void *hypercall_page = hv_context.hypercall_page; ++ void *hypercall_page = ktva_ktla(hv_context.hypercall_page); + + __asm__ __volatile__ ("call *%8" : "=d"(hv_status_hi), + "=a"(hv_status_lo) : "d" (control_hi), +@@ -154,7 +154,7 @@ int hv_init(void) + /* See if the hypercall page is already set */ + rdmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64); + +- virtaddr = __vmalloc(PAGE_SIZE, GFP_KERNEL, PAGE_KERNEL_EXEC); ++ virtaddr = __vmalloc(PAGE_SIZE, GFP_KERNEL, PAGE_KERNEL_RX); + + if (!virtaddr) + goto cleanup; +diff --git a/drivers/hv/hv_balloon.c b/drivers/hv/hv_balloon.c +index 393fd8a..079e13f 100644 +--- a/drivers/hv/hv_balloon.c ++++ b/drivers/hv/hv_balloon.c +@@ -470,7 +470,7 @@ MODULE_PARM_DESC(hot_add, "If set attempt memory hot_add"); + + module_param(pressure_report_delay, uint, (S_IRUGO | S_IWUSR)); + MODULE_PARM_DESC(pressure_report_delay, "Delay in secs in reporting pressure"); +-static atomic_t trans_id = ATOMIC_INIT(0); ++static atomic_unchecked_t trans_id = ATOMIC_INIT(0); + + static int dm_ring_size = (5 * PAGE_SIZE); + +@@ -893,7 +893,7 @@ static void hot_add_req(struct work_struct *dummy) + pr_info("Memory hot add failed\n"); + + dm->state = DM_INITIALIZED; +- resp.hdr.trans_id = atomic_inc_return(&trans_id); ++ resp.hdr.trans_id = atomic_inc_return_unchecked(&trans_id); + vmbus_sendpacket(dm->dev->channel, &resp, + sizeof(struct dm_hot_add_response), + (unsigned long)NULL, +@@ -973,7 +973,7 @@ static void post_status(struct hv_dynmem_device *dm) + memset(&status, 0, sizeof(struct dm_status)); + status.hdr.type = DM_STATUS_REPORT; + status.hdr.size = sizeof(struct dm_status); +- status.hdr.trans_id = atomic_inc_return(&trans_id); ++ status.hdr.trans_id = atomic_inc_return_unchecked(&trans_id); + + /* + * The host expects the guest to report free memory. +@@ -993,7 +993,7 @@ static void post_status(struct hv_dynmem_device *dm) + * send the status. This can happen if we were interrupted + * after we picked our transaction ID. + */ +- if (status.hdr.trans_id != atomic_read(&trans_id)) ++ if (status.hdr.trans_id != atomic_read_unchecked(&trans_id)) + return; + + /* +@@ -1129,7 +1129,7 @@ static void balloon_up(struct work_struct *dummy) + */ + + do { +- bl_resp->hdr.trans_id = atomic_inc_return(&trans_id); ++ bl_resp->hdr.trans_id = atomic_inc_return_unchecked(&trans_id); + ret = vmbus_sendpacket(dm_device.dev->channel, + bl_resp, + bl_resp->hdr.size, +@@ -1175,7 +1175,7 @@ static void balloon_down(struct hv_dynmem_device *dm, + + memset(&resp, 0, sizeof(struct dm_unballoon_response)); + resp.hdr.type = DM_UNBALLOON_RESPONSE; +- resp.hdr.trans_id = atomic_inc_return(&trans_id); ++ resp.hdr.trans_id = atomic_inc_return_unchecked(&trans_id); + resp.hdr.size = sizeof(struct dm_unballoon_response); + + vmbus_sendpacket(dm_device.dev->channel, &resp, +@@ -1238,7 +1238,7 @@ static void version_resp(struct hv_dynmem_device *dm, + memset(&version_req, 0, sizeof(struct dm_version_request)); + version_req.hdr.type = DM_VERSION_REQUEST; + version_req.hdr.size = sizeof(struct dm_version_request); +- version_req.hdr.trans_id = atomic_inc_return(&trans_id); ++ version_req.hdr.trans_id = atomic_inc_return_unchecked(&trans_id); + version_req.version.version = DYNMEM_PROTOCOL_VERSION_WIN7; + version_req.is_last_attempt = 1; + +@@ -1408,7 +1408,7 @@ static int balloon_probe(struct hv_device *dev, + memset(&version_req, 0, sizeof(struct dm_version_request)); + version_req.hdr.type = DM_VERSION_REQUEST; + version_req.hdr.size = sizeof(struct dm_version_request); +- version_req.hdr.trans_id = atomic_inc_return(&trans_id); ++ version_req.hdr.trans_id = atomic_inc_return_unchecked(&trans_id); + version_req.version.version = DYNMEM_PROTOCOL_VERSION_WIN8; + version_req.is_last_attempt = 0; + +@@ -1439,7 +1439,7 @@ static int balloon_probe(struct hv_device *dev, + memset(&cap_msg, 0, sizeof(struct dm_capabilities)); + cap_msg.hdr.type = DM_CAPABILITIES_REPORT; + cap_msg.hdr.size = sizeof(struct dm_capabilities); +- cap_msg.hdr.trans_id = atomic_inc_return(&trans_id); ++ cap_msg.hdr.trans_id = atomic_inc_return_unchecked(&trans_id); + + cap_msg.caps.cap_bits.balloon = 1; + cap_msg.caps.cap_bits.hot_add = 1; +diff --git a/drivers/hv/hyperv_vmbus.h b/drivers/hv/hyperv_vmbus.h +index e055176..c22ff1f 100644 +--- a/drivers/hv/hyperv_vmbus.h ++++ b/drivers/hv/hyperv_vmbus.h +@@ -602,7 +602,7 @@ enum vmbus_connect_state { + struct vmbus_connection { + enum vmbus_connect_state conn_state; + +- atomic_t next_gpadl_handle; ++ atomic_unchecked_t next_gpadl_handle; + + /* + * Represents channel interrupts. Each bit position represents a +diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c +index 077bb1b..d433d74 100644 +--- a/drivers/hv/vmbus_drv.c ++++ b/drivers/hv/vmbus_drv.c +@@ -844,10 +844,10 @@ int vmbus_device_register(struct hv_device *child_device_obj) + { + int ret = 0; + +- static atomic_t device_num = ATOMIC_INIT(0); ++ static atomic_unchecked_t device_num = ATOMIC_INIT(0); + + dev_set_name(&child_device_obj->device, "vmbus_0_%d", +- atomic_inc_return(&device_num)); ++ atomic_inc_return_unchecked(&device_num)); + + child_device_obj->device.bus = &hv_bus; + child_device_obj->device.parent = &hv_acpi_dev->dev; +diff --git a/drivers/hwmon/acpi_power_meter.c b/drivers/hwmon/acpi_power_meter.c +index 579bdf9..75118b5 100644 +--- a/drivers/hwmon/acpi_power_meter.c ++++ b/drivers/hwmon/acpi_power_meter.c +@@ -116,7 +116,7 @@ struct sensor_template { + struct device_attribute *devattr, + const char *buf, size_t count); + int index; +-}; ++} __do_const; + + /* Averaging interval */ + static int update_avg_interval(struct acpi_power_meter_resource *resource) +@@ -631,7 +631,7 @@ static int register_attrs(struct acpi_power_meter_resource *resource, + struct sensor_template *attrs) + { + struct device *dev = &resource->acpi_dev->dev; +- struct sensor_device_attribute *sensors = ++ sensor_device_attribute_no_const *sensors = + &resource->sensors[resource->num_sensors]; + int res = 0; + +diff --git a/drivers/hwmon/applesmc.c b/drivers/hwmon/applesmc.c +index 3288f13..71cfb4e 100644 +--- a/drivers/hwmon/applesmc.c ++++ b/drivers/hwmon/applesmc.c +@@ -1106,7 +1106,7 @@ static int applesmc_create_nodes(struct applesmc_node_group *groups, int num) + { + struct applesmc_node_group *grp; + struct applesmc_dev_attr *node; +- struct attribute *attr; ++ attribute_no_const *attr; + int ret, i; + + for (grp = groups; grp->format; grp++) { +diff --git a/drivers/hwmon/asus_atk0110.c b/drivers/hwmon/asus_atk0110.c +index ae208f6..48b6c5b 100644 +--- a/drivers/hwmon/asus_atk0110.c ++++ b/drivers/hwmon/asus_atk0110.c +@@ -147,10 +147,10 @@ MODULE_DEVICE_TABLE(acpi, atk_ids); + struct atk_sensor_data { + struct list_head list; + struct atk_data *data; +- struct device_attribute label_attr; +- struct device_attribute input_attr; +- struct device_attribute limit1_attr; +- struct device_attribute limit2_attr; ++ device_attribute_no_const label_attr; ++ device_attribute_no_const input_attr; ++ device_attribute_no_const limit1_attr; ++ device_attribute_no_const limit2_attr; + char label_attr_name[ATTR_NAME_SIZE]; + char input_attr_name[ATTR_NAME_SIZE]; + char limit1_attr_name[ATTR_NAME_SIZE]; +@@ -270,7 +270,7 @@ static ssize_t atk_name_show(struct device *dev, + static struct device_attribute atk_name_attr = + __ATTR(name, 0444, atk_name_show, NULL); + +-static void atk_init_attribute(struct device_attribute *attr, char *name, ++static void atk_init_attribute(device_attribute_no_const *attr, char *name, + sysfs_show_func show) + { + sysfs_attr_init(&attr->attr); +diff --git a/drivers/hwmon/coretemp.c b/drivers/hwmon/coretemp.c +index 1599310..cd9525c 100644 +--- a/drivers/hwmon/coretemp.c ++++ b/drivers/hwmon/coretemp.c +@@ -823,7 +823,7 @@ static int coretemp_cpu_callback(struct notifier_block *nfb, + return NOTIFY_OK; + } + +-static struct notifier_block coretemp_cpu_notifier __refdata = { ++static struct notifier_block coretemp_cpu_notifier = { + .notifier_call = coretemp_cpu_callback, + }; + +diff --git a/drivers/hwmon/ibmaem.c b/drivers/hwmon/ibmaem.c +index 632f1dc..57e6a58 100644 +--- a/drivers/hwmon/ibmaem.c ++++ b/drivers/hwmon/ibmaem.c +@@ -926,7 +926,7 @@ static int aem_register_sensors(struct aem_data *data, + struct aem_rw_sensor_template *rw) + { + struct device *dev = &data->pdev->dev; +- struct sensor_device_attribute *sensors = data->sensors; ++ sensor_device_attribute_no_const *sensors = data->sensors; + int err; + + /* Set up read-only sensors */ +diff --git a/drivers/hwmon/iio_hwmon.c b/drivers/hwmon/iio_hwmon.c +index 708081b..fe2d4ab 100644 +--- a/drivers/hwmon/iio_hwmon.c ++++ b/drivers/hwmon/iio_hwmon.c +@@ -73,7 +73,7 @@ static int iio_hwmon_probe(struct platform_device *pdev) + { + struct device *dev = &pdev->dev; + struct iio_hwmon_state *st; +- struct sensor_device_attribute *a; ++ sensor_device_attribute_no_const *a; + int ret, i; + int in_i = 1, temp_i = 1, curr_i = 1; + enum iio_chan_type type; +diff --git a/drivers/hwmon/nct6775.c b/drivers/hwmon/nct6775.c +index 38d5a63..cf2c2ea 100644 +--- a/drivers/hwmon/nct6775.c ++++ b/drivers/hwmon/nct6775.c +@@ -944,10 +944,10 @@ static struct attribute_group * + nct6775_create_attr_group(struct device *dev, struct sensor_template_group *tg, + int repeat) + { +- struct attribute_group *group; ++ attribute_group_no_const *group; + struct sensor_device_attr_u *su; +- struct sensor_device_attribute *a; +- struct sensor_device_attribute_2 *a2; ++ sensor_device_attribute_no_const *a; ++ sensor_device_attribute_2_no_const *a2; + struct attribute **attrs; + struct sensor_device_template **t; + int i, count; +diff --git a/drivers/hwmon/pmbus/pmbus_core.c b/drivers/hwmon/pmbus/pmbus_core.c +index 291d11f..3f0dbbd 100644 +--- a/drivers/hwmon/pmbus/pmbus_core.c ++++ b/drivers/hwmon/pmbus/pmbus_core.c +@@ -783,7 +783,7 @@ static int pmbus_add_attribute(struct pmbus_data *data, struct attribute *attr) + return 0; + } + +-static void pmbus_dev_attr_init(struct device_attribute *dev_attr, ++static void pmbus_dev_attr_init(device_attribute_no_const *dev_attr, + const char *name, + umode_t mode, + ssize_t (*show)(struct device *dev, +@@ -800,7 +800,7 @@ static void pmbus_dev_attr_init(struct device_attribute *dev_attr, + dev_attr->store = store; + } + +-static void pmbus_attr_init(struct sensor_device_attribute *a, ++static void pmbus_attr_init(sensor_device_attribute_no_const *a, + const char *name, + umode_t mode, + ssize_t (*show)(struct device *dev, +@@ -822,7 +822,7 @@ static int pmbus_add_boolean(struct pmbus_data *data, + u16 reg, u8 mask) + { + struct pmbus_boolean *boolean; +- struct sensor_device_attribute *a; ++ sensor_device_attribute_no_const *a; + + boolean = devm_kzalloc(data->dev, sizeof(*boolean), GFP_KERNEL); + if (!boolean) +@@ -847,7 +847,7 @@ static struct pmbus_sensor *pmbus_add_sensor(struct pmbus_data *data, + bool update, bool readonly) + { + struct pmbus_sensor *sensor; +- struct device_attribute *a; ++ device_attribute_no_const *a; + + sensor = devm_kzalloc(data->dev, sizeof(*sensor), GFP_KERNEL); + if (!sensor) +@@ -878,7 +878,7 @@ static int pmbus_add_label(struct pmbus_data *data, + const char *lstring, int index) + { + struct pmbus_label *label; +- struct device_attribute *a; ++ device_attribute_no_const *a; + + label = devm_kzalloc(data->dev, sizeof(*label), GFP_KERNEL); + if (!label) +diff --git a/drivers/hwmon/sht15.c b/drivers/hwmon/sht15.c +index 97cd45a..ac54d8b 100644 +--- a/drivers/hwmon/sht15.c ++++ b/drivers/hwmon/sht15.c +@@ -169,7 +169,7 @@ struct sht15_data { + int supply_uv; + bool supply_uv_valid; + struct work_struct update_supply_work; +- atomic_t interrupt_handled; ++ atomic_unchecked_t interrupt_handled; + }; + + /** +@@ -542,13 +542,13 @@ static int sht15_measurement(struct sht15_data *data, + ret = gpio_direction_input(data->pdata->gpio_data); + if (ret) + return ret; +- atomic_set(&data->interrupt_handled, 0); ++ atomic_set_unchecked(&data->interrupt_handled, 0); + + enable_irq(gpio_to_irq(data->pdata->gpio_data)); + if (gpio_get_value(data->pdata->gpio_data) == 0) { + disable_irq_nosync(gpio_to_irq(data->pdata->gpio_data)); + /* Only relevant if the interrupt hasn't occurred. */ +- if (!atomic_read(&data->interrupt_handled)) ++ if (!atomic_read_unchecked(&data->interrupt_handled)) + schedule_work(&data->read_work); + } + ret = wait_event_timeout(data->wait_queue, +@@ -820,7 +820,7 @@ static irqreturn_t sht15_interrupt_fired(int irq, void *d) + + /* First disable the interrupt */ + disable_irq_nosync(irq); +- atomic_inc(&data->interrupt_handled); ++ atomic_inc_unchecked(&data->interrupt_handled); + /* Then schedule a reading work struct */ + if (data->state != SHT15_READING_NOTHING) + schedule_work(&data->read_work); +@@ -842,11 +842,11 @@ static void sht15_bh_read_data(struct work_struct *work_s) + * If not, then start the interrupt again - care here as could + * have gone low in meantime so verify it hasn't! + */ +- atomic_set(&data->interrupt_handled, 0); ++ atomic_set_unchecked(&data->interrupt_handled, 0); + enable_irq(gpio_to_irq(data->pdata->gpio_data)); + /* If still not occurred or another handler was scheduled */ + if (gpio_get_value(data->pdata->gpio_data) +- || atomic_read(&data->interrupt_handled)) ++ || atomic_read_unchecked(&data->interrupt_handled)) + return; + } + +diff --git a/drivers/hwmon/via-cputemp.c b/drivers/hwmon/via-cputemp.c +index 38944e9..ae9e5ed 100644 +--- a/drivers/hwmon/via-cputemp.c ++++ b/drivers/hwmon/via-cputemp.c +@@ -296,7 +296,7 @@ static int via_cputemp_cpu_callback(struct notifier_block *nfb, + return NOTIFY_OK; + } + +-static struct notifier_block via_cputemp_cpu_notifier __refdata = { ++static struct notifier_block via_cputemp_cpu_notifier = { + .notifier_call = via_cputemp_cpu_callback, + }; + +diff --git a/drivers/i2c/busses/i2c-amd756-s4882.c b/drivers/i2c/busses/i2c-amd756-s4882.c +index 41fc683..a39cfea 100644 +--- a/drivers/i2c/busses/i2c-amd756-s4882.c ++++ b/drivers/i2c/busses/i2c-amd756-s4882.c +@@ -43,7 +43,7 @@ + extern struct i2c_adapter amd756_smbus; + + static struct i2c_adapter *s4882_adapter; +-static struct i2c_algorithm *s4882_algo; ++static i2c_algorithm_no_const *s4882_algo; + + /* Wrapper access functions for multiplexed SMBus */ + static DEFINE_MUTEX(amd756_lock); +diff --git a/drivers/i2c/busses/i2c-diolan-u2c.c b/drivers/i2c/busses/i2c-diolan-u2c.c +index 721f7eb..0fd2a09 100644 +--- a/drivers/i2c/busses/i2c-diolan-u2c.c ++++ b/drivers/i2c/busses/i2c-diolan-u2c.c +@@ -98,7 +98,7 @@ MODULE_PARM_DESC(frequency, "I2C clock frequency in hertz"); + /* usb layer */ + + /* Send command to device, and get response. */ +-static int diolan_usb_transfer(struct i2c_diolan_u2c *dev) ++static int __intentional_overflow(-1) diolan_usb_transfer(struct i2c_diolan_u2c *dev) + { + int ret = 0; + int actual; +diff --git a/drivers/i2c/busses/i2c-nforce2-s4985.c b/drivers/i2c/busses/i2c-nforce2-s4985.c +index b170bdf..3c76427 100644 +--- a/drivers/i2c/busses/i2c-nforce2-s4985.c ++++ b/drivers/i2c/busses/i2c-nforce2-s4985.c +@@ -41,7 +41,7 @@ + extern struct i2c_adapter *nforce2_smbus; + + static struct i2c_adapter *s4985_adapter; +-static struct i2c_algorithm *s4985_algo; ++static i2c_algorithm_no_const *s4985_algo; + + /* Wrapper access functions for multiplexed SMBus */ + static DEFINE_MUTEX(nforce2_lock); +diff --git a/drivers/i2c/i2c-dev.c b/drivers/i2c/i2c-dev.c +index 80b47e8..1a6040d9 100644 +--- a/drivers/i2c/i2c-dev.c ++++ b/drivers/i2c/i2c-dev.c +@@ -277,7 +277,7 @@ static noinline int i2cdev_ioctl_rdrw(struct i2c_client *client, + break; + } + +- data_ptrs[i] = (u8 __user *)rdwr_pa[i].buf; ++ data_ptrs[i] = (u8 __force_user *)rdwr_pa[i].buf; + rdwr_pa[i].buf = memdup_user(data_ptrs[i], rdwr_pa[i].len); + if (IS_ERR(rdwr_pa[i].buf)) { + res = PTR_ERR(rdwr_pa[i].buf); +diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c +index 0b510ba..4fbb5085 100644 +--- a/drivers/ide/ide-cd.c ++++ b/drivers/ide/ide-cd.c +@@ -768,7 +768,7 @@ static void cdrom_do_block_pc(ide_drive_t *drive, struct request *rq) + alignment = queue_dma_alignment(q) | q->dma_pad_mask; + if ((unsigned long)buf & alignment + || blk_rq_bytes(rq) & q->dma_pad_mask +- || object_is_on_stack(buf)) ++ || object_starts_on_stack(buf)) + drive->dma = 0; + } + } +diff --git a/drivers/iio/industrialio-core.c b/drivers/iio/industrialio-core.c +index acc911a..8700c3c 100644 +--- a/drivers/iio/industrialio-core.c ++++ b/drivers/iio/industrialio-core.c +@@ -527,7 +527,7 @@ static ssize_t iio_write_channel_info(struct device *dev, + } + + static +-int __iio_device_attr_init(struct device_attribute *dev_attr, ++int __iio_device_attr_init(device_attribute_no_const *dev_attr, + const char *postfix, + struct iio_chan_spec const *chan, + ssize_t (*readfunc)(struct device *dev, +diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c +index c323917..6ddea8b 100644 +--- a/drivers/infiniband/core/cm.c ++++ b/drivers/infiniband/core/cm.c +@@ -115,7 +115,7 @@ static char const counter_group_names[CM_COUNTER_GROUPS] + + struct cm_counter_group { + struct kobject obj; +- atomic_long_t counter[CM_ATTR_COUNT]; ++ atomic_long_unchecked_t counter[CM_ATTR_COUNT]; + }; + + struct cm_counter_attribute { +@@ -1398,7 +1398,7 @@ static void cm_dup_req_handler(struct cm_work *work, + struct ib_mad_send_buf *msg = NULL; + int ret; + +- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES]. ++ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES]. + counter[CM_REQ_COUNTER]); + + /* Quick state check to discard duplicate REQs. */ +@@ -1785,7 +1785,7 @@ static void cm_dup_rep_handler(struct cm_work *work) + if (!cm_id_priv) + return; + +- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES]. ++ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES]. + counter[CM_REP_COUNTER]); + ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg); + if (ret) +@@ -1952,7 +1952,7 @@ static int cm_rtu_handler(struct cm_work *work) + if (cm_id_priv->id.state != IB_CM_REP_SENT && + cm_id_priv->id.state != IB_CM_MRA_REP_RCVD) { + spin_unlock_irq(&cm_id_priv->lock); +- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES]. ++ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES]. + counter[CM_RTU_COUNTER]); + goto out; + } +@@ -2135,7 +2135,7 @@ static int cm_dreq_handler(struct cm_work *work) + cm_id_priv = cm_acquire_id(dreq_msg->remote_comm_id, + dreq_msg->local_comm_id); + if (!cm_id_priv) { +- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES]. ++ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES]. + counter[CM_DREQ_COUNTER]); + cm_issue_drep(work->port, work->mad_recv_wc); + return -EINVAL; +@@ -2160,7 +2160,7 @@ static int cm_dreq_handler(struct cm_work *work) + case IB_CM_MRA_REP_RCVD: + break; + case IB_CM_TIMEWAIT: +- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES]. ++ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES]. + counter[CM_DREQ_COUNTER]); + if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg)) + goto unlock; +@@ -2174,7 +2174,7 @@ static int cm_dreq_handler(struct cm_work *work) + cm_free_msg(msg); + goto deref; + case IB_CM_DREQ_RCVD: +- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES]. ++ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES]. + counter[CM_DREQ_COUNTER]); + goto unlock; + default: +@@ -2541,7 +2541,7 @@ static int cm_mra_handler(struct cm_work *work) + ib_modify_mad(cm_id_priv->av.port->mad_agent, + cm_id_priv->msg, timeout)) { + if (cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD) +- atomic_long_inc(&work->port-> ++ atomic_long_inc_unchecked(&work->port-> + counter_group[CM_RECV_DUPLICATES]. + counter[CM_MRA_COUNTER]); + goto out; +@@ -2550,7 +2550,7 @@ static int cm_mra_handler(struct cm_work *work) + break; + case IB_CM_MRA_REQ_RCVD: + case IB_CM_MRA_REP_RCVD: +- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES]. ++ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES]. + counter[CM_MRA_COUNTER]); + /* fall through */ + default: +@@ -2712,7 +2712,7 @@ static int cm_lap_handler(struct cm_work *work) + case IB_CM_LAP_IDLE: + break; + case IB_CM_MRA_LAP_SENT: +- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES]. ++ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES]. + counter[CM_LAP_COUNTER]); + if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg)) + goto unlock; +@@ -2728,7 +2728,7 @@ static int cm_lap_handler(struct cm_work *work) + cm_free_msg(msg); + goto deref; + case IB_CM_LAP_RCVD: +- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES]. ++ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES]. + counter[CM_LAP_COUNTER]); + goto unlock; + default: +@@ -3012,7 +3012,7 @@ static int cm_sidr_req_handler(struct cm_work *work) + cur_cm_id_priv = cm_insert_remote_sidr(cm_id_priv); + if (cur_cm_id_priv) { + spin_unlock_irq(&cm.lock); +- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES]. ++ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES]. + counter[CM_SIDR_REQ_COUNTER]); + goto out; /* Duplicate message. */ + } +@@ -3224,10 +3224,10 @@ static void cm_send_handler(struct ib_mad_agent *mad_agent, + if (!msg->context[0] && (attr_index != CM_REJ_COUNTER)) + msg->retries = 1; + +- atomic_long_add(1 + msg->retries, ++ atomic_long_add_unchecked(1 + msg->retries, + &port->counter_group[CM_XMIT].counter[attr_index]); + if (msg->retries) +- atomic_long_add(msg->retries, ++ atomic_long_add_unchecked(msg->retries, + &port->counter_group[CM_XMIT_RETRIES]. + counter[attr_index]); + +@@ -3437,7 +3437,7 @@ static void cm_recv_handler(struct ib_mad_agent *mad_agent, + } + + attr_id = be16_to_cpu(mad_recv_wc->recv_buf.mad->mad_hdr.attr_id); +- atomic_long_inc(&port->counter_group[CM_RECV]. ++ atomic_long_inc_unchecked(&port->counter_group[CM_RECV]. + counter[attr_id - CM_ATTR_ID_OFFSET]); + + work = kmalloc(sizeof *work + sizeof(struct ib_sa_path_rec) * paths, +@@ -3668,7 +3668,7 @@ static ssize_t cm_show_counter(struct kobject *obj, struct attribute *attr, + cm_attr = container_of(attr, struct cm_counter_attribute, attr); + + return sprintf(buf, "%ld\n", +- atomic_long_read(&group->counter[cm_attr->index])); ++ atomic_long_read_unchecked(&group->counter[cm_attr->index])); + } + + static const struct sysfs_ops cm_counter_ops = { +diff --git a/drivers/infiniband/core/fmr_pool.c b/drivers/infiniband/core/fmr_pool.c +index 9f5ad7c..588cd84 100644 +--- a/drivers/infiniband/core/fmr_pool.c ++++ b/drivers/infiniband/core/fmr_pool.c +@@ -98,8 +98,8 @@ struct ib_fmr_pool { + + struct task_struct *thread; + +- atomic_t req_ser; +- atomic_t flush_ser; ++ atomic_unchecked_t req_ser; ++ atomic_unchecked_t flush_ser; + + wait_queue_head_t force_wait; + }; +@@ -179,10 +179,10 @@ static int ib_fmr_cleanup_thread(void *pool_ptr) + struct ib_fmr_pool *pool = pool_ptr; + + do { +- if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) < 0) { ++ if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) < 0) { + ib_fmr_batch_release(pool); + +- atomic_inc(&pool->flush_ser); ++ atomic_inc_unchecked(&pool->flush_ser); + wake_up_interruptible(&pool->force_wait); + + if (pool->flush_function) +@@ -190,7 +190,7 @@ static int ib_fmr_cleanup_thread(void *pool_ptr) + } + + set_current_state(TASK_INTERRUPTIBLE); +- if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) >= 0 && ++ if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) >= 0 && + !kthread_should_stop()) + schedule(); + __set_current_state(TASK_RUNNING); +@@ -282,8 +282,8 @@ struct ib_fmr_pool *ib_create_fmr_pool(struct ib_pd *pd, + pool->dirty_watermark = params->dirty_watermark; + pool->dirty_len = 0; + spin_lock_init(&pool->pool_lock); +- atomic_set(&pool->req_ser, 0); +- atomic_set(&pool->flush_ser, 0); ++ atomic_set_unchecked(&pool->req_ser, 0); ++ atomic_set_unchecked(&pool->flush_ser, 0); + init_waitqueue_head(&pool->force_wait); + + pool->thread = kthread_run(ib_fmr_cleanup_thread, +@@ -411,11 +411,11 @@ int ib_flush_fmr_pool(struct ib_fmr_pool *pool) + } + spin_unlock_irq(&pool->pool_lock); + +- serial = atomic_inc_return(&pool->req_ser); ++ serial = atomic_inc_return_unchecked(&pool->req_ser); + wake_up_process(pool->thread); + + if (wait_event_interruptible(pool->force_wait, +- atomic_read(&pool->flush_ser) - serial >= 0)) ++ atomic_read_unchecked(&pool->flush_ser) - serial >= 0)) + return -EINTR; + + return 0; +@@ -525,7 +525,7 @@ int ib_fmr_pool_unmap(struct ib_pool_fmr *fmr) + } else { + list_add_tail(&fmr->list, &pool->dirty_list); + if (++pool->dirty_len >= pool->dirty_watermark) { +- atomic_inc(&pool->req_ser); ++ atomic_inc_unchecked(&pool->req_ser); + wake_up_process(pool->thread); + } + } +diff --git a/drivers/infiniband/hw/cxgb4/mem.c b/drivers/infiniband/hw/cxgb4/mem.c +index 41b1195..27971a0 100644 +--- a/drivers/infiniband/hw/cxgb4/mem.c ++++ b/drivers/infiniband/hw/cxgb4/mem.c +@@ -249,7 +249,7 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry, + int err; + struct fw_ri_tpte tpt; + u32 stag_idx; +- static atomic_t key; ++ static atomic_unchecked_t key; + + if (c4iw_fatal_error(rdev)) + return -EIO; +@@ -266,7 +266,7 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry, + if (rdev->stats.stag.cur > rdev->stats.stag.max) + rdev->stats.stag.max = rdev->stats.stag.cur; + mutex_unlock(&rdev->stats.lock); +- *stag = (stag_idx << 8) | (atomic_inc_return(&key) & 0xff); ++ *stag = (stag_idx << 8) | (atomic_inc_return_unchecked(&key) & 0xff); + } + PDBG("%s stag_state 0x%0x type 0x%0x pdid 0x%0x, stag_idx 0x%x\n", + __func__, stag_state, type, pdid, stag_idx); +diff --git a/drivers/infiniband/hw/ipath/ipath_dma.c b/drivers/infiniband/hw/ipath/ipath_dma.c +index 644c2c7..ecf0879 100644 +--- a/drivers/infiniband/hw/ipath/ipath_dma.c ++++ b/drivers/infiniband/hw/ipath/ipath_dma.c +@@ -176,17 +176,17 @@ static void ipath_dma_free_coherent(struct ib_device *dev, size_t size, + } + + struct ib_dma_mapping_ops ipath_dma_mapping_ops = { +- ipath_mapping_error, +- ipath_dma_map_single, +- ipath_dma_unmap_single, +- ipath_dma_map_page, +- ipath_dma_unmap_page, +- ipath_map_sg, +- ipath_unmap_sg, +- ipath_sg_dma_address, +- ipath_sg_dma_len, +- ipath_sync_single_for_cpu, +- ipath_sync_single_for_device, +- ipath_dma_alloc_coherent, +- ipath_dma_free_coherent ++ .mapping_error = ipath_mapping_error, ++ .map_single = ipath_dma_map_single, ++ .unmap_single = ipath_dma_unmap_single, ++ .map_page = ipath_dma_map_page, ++ .unmap_page = ipath_dma_unmap_page, ++ .map_sg = ipath_map_sg, ++ .unmap_sg = ipath_unmap_sg, ++ .dma_address = ipath_sg_dma_address, ++ .dma_len = ipath_sg_dma_len, ++ .sync_single_for_cpu = ipath_sync_single_for_cpu, ++ .sync_single_for_device = ipath_sync_single_for_device, ++ .alloc_coherent = ipath_dma_alloc_coherent, ++ .free_coherent = ipath_dma_free_coherent + }; +diff --git a/drivers/infiniband/hw/ipath/ipath_rc.c b/drivers/infiniband/hw/ipath/ipath_rc.c +index 79b3dbc..96e5fcc 100644 +--- a/drivers/infiniband/hw/ipath/ipath_rc.c ++++ b/drivers/infiniband/hw/ipath/ipath_rc.c +@@ -1868,7 +1868,7 @@ void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr, + struct ib_atomic_eth *ateth; + struct ipath_ack_entry *e; + u64 vaddr; +- atomic64_t *maddr; ++ atomic64_unchecked_t *maddr; + u64 sdata; + u32 rkey; + u8 next; +@@ -1903,11 +1903,11 @@ void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr, + IB_ACCESS_REMOTE_ATOMIC))) + goto nack_acc_unlck; + /* Perform atomic OP and save result. */ +- maddr = (atomic64_t *) qp->r_sge.sge.vaddr; ++ maddr = (atomic64_unchecked_t *) qp->r_sge.sge.vaddr; + sdata = be64_to_cpu(ateth->swap_data); + e = &qp->s_ack_queue[qp->r_head_ack_queue]; + e->atomic_data = (opcode == OP(FETCH_ADD)) ? +- (u64) atomic64_add_return(sdata, maddr) - sdata : ++ (u64) atomic64_add_return_unchecked(sdata, maddr) - sdata : + (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr, + be64_to_cpu(ateth->compare_data), + sdata); +diff --git a/drivers/infiniband/hw/ipath/ipath_ruc.c b/drivers/infiniband/hw/ipath/ipath_ruc.c +index 1f95bba..9530f87 100644 +--- a/drivers/infiniband/hw/ipath/ipath_ruc.c ++++ b/drivers/infiniband/hw/ipath/ipath_ruc.c +@@ -266,7 +266,7 @@ static void ipath_ruc_loopback(struct ipath_qp *sqp) + unsigned long flags; + struct ib_wc wc; + u64 sdata; +- atomic64_t *maddr; ++ atomic64_unchecked_t *maddr; + enum ib_wc_status send_status; + + /* +@@ -382,11 +382,11 @@ again: + IB_ACCESS_REMOTE_ATOMIC))) + goto acc_err; + /* Perform atomic OP and save result. */ +- maddr = (atomic64_t *) qp->r_sge.sge.vaddr; ++ maddr = (atomic64_unchecked_t *) qp->r_sge.sge.vaddr; + sdata = wqe->wr.wr.atomic.compare_add; + *(u64 *) sqp->s_sge.sge.vaddr = + (wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) ? +- (u64) atomic64_add_return(sdata, maddr) - sdata : ++ (u64) atomic64_add_return_unchecked(sdata, maddr) - sdata : + (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr, + sdata, wqe->wr.wr.atomic.swap); + goto send_comp; +diff --git a/drivers/infiniband/hw/mlx4/mad.c b/drivers/infiniband/hw/mlx4/mad.c +index f2a3f48..673ec79 100644 +--- a/drivers/infiniband/hw/mlx4/mad.c ++++ b/drivers/infiniband/hw/mlx4/mad.c +@@ -98,7 +98,7 @@ __be64 mlx4_ib_gen_node_guid(void) + + __be64 mlx4_ib_get_new_demux_tid(struct mlx4_ib_demux_ctx *ctx) + { +- return cpu_to_be64(atomic_inc_return(&ctx->tid)) | ++ return cpu_to_be64(atomic_inc_return_unchecked(&ctx->tid)) | + cpu_to_be64(0xff00000000000000LL); + } + +diff --git a/drivers/infiniband/hw/mlx4/mcg.c b/drivers/infiniband/hw/mlx4/mcg.c +index 25b2cdf..099ff97 100644 +--- a/drivers/infiniband/hw/mlx4/mcg.c ++++ b/drivers/infiniband/hw/mlx4/mcg.c +@@ -1040,7 +1040,7 @@ int mlx4_ib_mcg_port_init(struct mlx4_ib_demux_ctx *ctx) + { + char name[20]; + +- atomic_set(&ctx->tid, 0); ++ atomic_set_unchecked(&ctx->tid, 0); + sprintf(name, "mlx4_ib_mcg%d", ctx->port); + ctx->mcg_wq = create_singlethread_workqueue(name); + if (!ctx->mcg_wq) +diff --git a/drivers/infiniband/hw/mlx4/mlx4_ib.h b/drivers/infiniband/hw/mlx4/mlx4_ib.h +index a230683..3723f2d 100644 +--- a/drivers/infiniband/hw/mlx4/mlx4_ib.h ++++ b/drivers/infiniband/hw/mlx4/mlx4_ib.h +@@ -408,7 +408,7 @@ struct mlx4_ib_demux_ctx { + struct list_head mcg_mgid0_list; + struct workqueue_struct *mcg_wq; + struct mlx4_ib_demux_pv_ctx **tun; +- atomic_t tid; ++ atomic_unchecked_t tid; + int flushing; /* flushing the work queue */ + }; + +diff --git a/drivers/infiniband/hw/mthca/mthca_cmd.c b/drivers/infiniband/hw/mthca/mthca_cmd.c +index 9d3e5c1..6f166df 100644 +--- a/drivers/infiniband/hw/mthca/mthca_cmd.c ++++ b/drivers/infiniband/hw/mthca/mthca_cmd.c +@@ -772,7 +772,7 @@ static void mthca_setup_cmd_doorbells(struct mthca_dev *dev, u64 base) + mthca_dbg(dev, "Mapped doorbell page for posting FW commands\n"); + } + +-int mthca_QUERY_FW(struct mthca_dev *dev) ++int __intentional_overflow(-1) mthca_QUERY_FW(struct mthca_dev *dev) + { + struct mthca_mailbox *mailbox; + u32 *outbox; +@@ -1612,7 +1612,7 @@ int mthca_HW2SW_MPT(struct mthca_dev *dev, struct mthca_mailbox *mailbox, + CMD_TIME_CLASS_B); + } + +-int mthca_WRITE_MTT(struct mthca_dev *dev, struct mthca_mailbox *mailbox, ++int __intentional_overflow(-1) mthca_WRITE_MTT(struct mthca_dev *dev, struct mthca_mailbox *mailbox, + int num_mtt) + { + return mthca_cmd(dev, mailbox->dma, num_mtt, 0, CMD_WRITE_MTT, +@@ -1634,7 +1634,7 @@ int mthca_MAP_EQ(struct mthca_dev *dev, u64 event_mask, int unmap, + 0, CMD_MAP_EQ, CMD_TIME_CLASS_B); + } + +-int mthca_SW2HW_EQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox, ++int __intentional_overflow(-1) mthca_SW2HW_EQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox, + int eq_num) + { + return mthca_cmd(dev, mailbox->dma, eq_num, 0, CMD_SW2HW_EQ, +@@ -1857,7 +1857,7 @@ int mthca_CONF_SPECIAL_QP(struct mthca_dev *dev, int type, u32 qpn) + CMD_TIME_CLASS_B); + } + +-int mthca_MAD_IFC(struct mthca_dev *dev, int ignore_mkey, int ignore_bkey, ++int __intentional_overflow(-1) mthca_MAD_IFC(struct mthca_dev *dev, int ignore_mkey, int ignore_bkey, + int port, struct ib_wc *in_wc, struct ib_grh *in_grh, + void *in_mad, void *response_mad) + { +diff --git a/drivers/infiniband/hw/mthca/mthca_main.c b/drivers/infiniband/hw/mthca/mthca_main.c +index 87897b9..7e79542 100644 +--- a/drivers/infiniband/hw/mthca/mthca_main.c ++++ b/drivers/infiniband/hw/mthca/mthca_main.c +@@ -692,7 +692,7 @@ err_close: + return err; + } + +-static int mthca_setup_hca(struct mthca_dev *dev) ++static int __intentional_overflow(-1) mthca_setup_hca(struct mthca_dev *dev) + { + int err; + +diff --git a/drivers/infiniband/hw/mthca/mthca_mr.c b/drivers/infiniband/hw/mthca/mthca_mr.c +index ed9a989..6aa5dc2 100644 +--- a/drivers/infiniband/hw/mthca/mthca_mr.c ++++ b/drivers/infiniband/hw/mthca/mthca_mr.c +@@ -81,7 +81,7 @@ struct mthca_mpt_entry { + * through the bitmaps) + */ + +-static u32 mthca_buddy_alloc(struct mthca_buddy *buddy, int order) ++static u32 __intentional_overflow(-1) mthca_buddy_alloc(struct mthca_buddy *buddy, int order) + { + int o; + int m; +@@ -426,7 +426,7 @@ static inline u32 adjust_key(struct mthca_dev *dev, u32 key) + return key; + } + +-int mthca_mr_alloc(struct mthca_dev *dev, u32 pd, int buffer_size_shift, ++int __intentional_overflow(-1) mthca_mr_alloc(struct mthca_dev *dev, u32 pd, int buffer_size_shift, + u64 iova, u64 total_size, u32 access, struct mthca_mr *mr) + { + struct mthca_mailbox *mailbox; +@@ -516,7 +516,7 @@ int mthca_mr_alloc_notrans(struct mthca_dev *dev, u32 pd, + return mthca_mr_alloc(dev, pd, 12, 0, ~0ULL, access, mr); + } + +-int mthca_mr_alloc_phys(struct mthca_dev *dev, u32 pd, ++int __intentional_overflow(-1) mthca_mr_alloc_phys(struct mthca_dev *dev, u32 pd, + u64 *buffer_list, int buffer_size_shift, + int list_len, u64 iova, u64 total_size, + u32 access, struct mthca_mr *mr) +diff --git a/drivers/infiniband/hw/mthca/mthca_provider.c b/drivers/infiniband/hw/mthca/mthca_provider.c +index 42dde06..1257310 100644 +--- a/drivers/infiniband/hw/mthca/mthca_provider.c ++++ b/drivers/infiniband/hw/mthca/mthca_provider.c +@@ -764,7 +764,7 @@ unlock: + return 0; + } + +-static int mthca_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata) ++static int __intentional_overflow(-1) mthca_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata) + { + struct mthca_dev *dev = to_mdev(ibcq->device); + struct mthca_cq *cq = to_mcq(ibcq); +diff --git a/drivers/infiniband/hw/nes/nes.c b/drivers/infiniband/hw/nes/nes.c +index 353c7b0..c6ce921 100644 +--- a/drivers/infiniband/hw/nes/nes.c ++++ b/drivers/infiniband/hw/nes/nes.c +@@ -98,7 +98,7 @@ MODULE_PARM_DESC(limit_maxrdreqsz, "Limit max read request size to 256 Bytes"); + LIST_HEAD(nes_adapter_list); + static LIST_HEAD(nes_dev_list); + +-atomic_t qps_destroyed; ++atomic_unchecked_t qps_destroyed; + + static unsigned int ee_flsh_adapter; + static unsigned int sysfs_nonidx_addr; +@@ -269,7 +269,7 @@ static void nes_cqp_rem_ref_callback(struct nes_device *nesdev, struct nes_cqp_r + struct nes_qp *nesqp = cqp_request->cqp_callback_pointer; + struct nes_adapter *nesadapter = nesdev->nesadapter; + +- atomic_inc(&qps_destroyed); ++ atomic_inc_unchecked(&qps_destroyed); + + /* Free the control structures */ + +diff --git a/drivers/infiniband/hw/nes/nes.h b/drivers/infiniband/hw/nes/nes.h +index 33cc589..3bd6538 100644 +--- a/drivers/infiniband/hw/nes/nes.h ++++ b/drivers/infiniband/hw/nes/nes.h +@@ -177,17 +177,17 @@ extern unsigned int nes_debug_level; + extern unsigned int wqm_quanta; + extern struct list_head nes_adapter_list; + +-extern atomic_t cm_connects; +-extern atomic_t cm_accepts; +-extern atomic_t cm_disconnects; +-extern atomic_t cm_closes; +-extern atomic_t cm_connecteds; +-extern atomic_t cm_connect_reqs; +-extern atomic_t cm_rejects; +-extern atomic_t mod_qp_timouts; +-extern atomic_t qps_created; +-extern atomic_t qps_destroyed; +-extern atomic_t sw_qps_destroyed; ++extern atomic_unchecked_t cm_connects; ++extern atomic_unchecked_t cm_accepts; ++extern atomic_unchecked_t cm_disconnects; ++extern atomic_unchecked_t cm_closes; ++extern atomic_unchecked_t cm_connecteds; ++extern atomic_unchecked_t cm_connect_reqs; ++extern atomic_unchecked_t cm_rejects; ++extern atomic_unchecked_t mod_qp_timouts; ++extern atomic_unchecked_t qps_created; ++extern atomic_unchecked_t qps_destroyed; ++extern atomic_unchecked_t sw_qps_destroyed; + extern u32 mh_detected; + extern u32 mh_pauses_sent; + extern u32 cm_packets_sent; +@@ -196,16 +196,16 @@ extern u32 cm_packets_created; + extern u32 cm_packets_received; + extern u32 cm_packets_dropped; + extern u32 cm_packets_retrans; +-extern atomic_t cm_listens_created; +-extern atomic_t cm_listens_destroyed; ++extern atomic_unchecked_t cm_listens_created; ++extern atomic_unchecked_t cm_listens_destroyed; + extern u32 cm_backlog_drops; +-extern atomic_t cm_loopbacks; +-extern atomic_t cm_nodes_created; +-extern atomic_t cm_nodes_destroyed; +-extern atomic_t cm_accel_dropped_pkts; +-extern atomic_t cm_resets_recvd; +-extern atomic_t pau_qps_created; +-extern atomic_t pau_qps_destroyed; ++extern atomic_unchecked_t cm_loopbacks; ++extern atomic_unchecked_t cm_nodes_created; ++extern atomic_unchecked_t cm_nodes_destroyed; ++extern atomic_unchecked_t cm_accel_dropped_pkts; ++extern atomic_unchecked_t cm_resets_recvd; ++extern atomic_unchecked_t pau_qps_created; ++extern atomic_unchecked_t pau_qps_destroyed; + + extern u32 int_mod_timer_init; + extern u32 int_mod_cq_depth_256; +diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c +index 9c9f2f5..2559190 100644 +--- a/drivers/infiniband/hw/nes/nes_cm.c ++++ b/drivers/infiniband/hw/nes/nes_cm.c +@@ -68,14 +68,14 @@ u32 cm_packets_dropped; + u32 cm_packets_retrans; + u32 cm_packets_created; + u32 cm_packets_received; +-atomic_t cm_listens_created; +-atomic_t cm_listens_destroyed; ++atomic_unchecked_t cm_listens_created; ++atomic_unchecked_t cm_listens_destroyed; + u32 cm_backlog_drops; +-atomic_t cm_loopbacks; +-atomic_t cm_nodes_created; +-atomic_t cm_nodes_destroyed; +-atomic_t cm_accel_dropped_pkts; +-atomic_t cm_resets_recvd; ++atomic_unchecked_t cm_loopbacks; ++atomic_unchecked_t cm_nodes_created; ++atomic_unchecked_t cm_nodes_destroyed; ++atomic_unchecked_t cm_accel_dropped_pkts; ++atomic_unchecked_t cm_resets_recvd; + + static inline int mini_cm_accelerated(struct nes_cm_core *, struct nes_cm_node *); + static struct nes_cm_listener *mini_cm_listen(struct nes_cm_core *, struct nes_vnic *, struct nes_cm_info *); +@@ -133,28 +133,28 @@ static void print_core(struct nes_cm_core *core); + /* instance of function pointers for client API */ + /* set address of this instance to cm_core->cm_ops at cm_core alloc */ + static struct nes_cm_ops nes_cm_api = { +- mini_cm_accelerated, +- mini_cm_listen, +- mini_cm_del_listen, +- mini_cm_connect, +- mini_cm_close, +- mini_cm_accept, +- mini_cm_reject, +- mini_cm_recv_pkt, +- mini_cm_dealloc_core, +- mini_cm_get, +- mini_cm_set ++ .accelerated = mini_cm_accelerated, ++ .listen = mini_cm_listen, ++ .stop_listener = mini_cm_del_listen, ++ .connect = mini_cm_connect, ++ .close = mini_cm_close, ++ .accept = mini_cm_accept, ++ .reject = mini_cm_reject, ++ .recv_pkt = mini_cm_recv_pkt, ++ .destroy_cm_core = mini_cm_dealloc_core, ++ .get = mini_cm_get, ++ .set = mini_cm_set + }; + + static struct nes_cm_core *g_cm_core; + +-atomic_t cm_connects; +-atomic_t cm_accepts; +-atomic_t cm_disconnects; +-atomic_t cm_closes; +-atomic_t cm_connecteds; +-atomic_t cm_connect_reqs; +-atomic_t cm_rejects; ++atomic_unchecked_t cm_connects; ++atomic_unchecked_t cm_accepts; ++atomic_unchecked_t cm_disconnects; ++atomic_unchecked_t cm_closes; ++atomic_unchecked_t cm_connecteds; ++atomic_unchecked_t cm_connect_reqs; ++atomic_unchecked_t cm_rejects; + + int nes_add_ref_cm_node(struct nes_cm_node *cm_node) + { +@@ -1272,7 +1272,7 @@ static int mini_cm_dec_refcnt_listen(struct nes_cm_core *cm_core, + kfree(listener); + listener = NULL; + ret = 0; +- atomic_inc(&cm_listens_destroyed); ++ atomic_inc_unchecked(&cm_listens_destroyed); + } else { + spin_unlock_irqrestore(&cm_core->listen_list_lock, flags); + } +@@ -1465,7 +1465,7 @@ static struct nes_cm_node *make_cm_node(struct nes_cm_core *cm_core, + cm_node->rem_mac); + + add_hte_node(cm_core, cm_node); +- atomic_inc(&cm_nodes_created); ++ atomic_inc_unchecked(&cm_nodes_created); + + return cm_node; + } +@@ -1523,7 +1523,7 @@ static int rem_ref_cm_node(struct nes_cm_core *cm_core, + } + + atomic_dec(&cm_core->node_cnt); +- atomic_inc(&cm_nodes_destroyed); ++ atomic_inc_unchecked(&cm_nodes_destroyed); + nesqp = cm_node->nesqp; + if (nesqp) { + nesqp->cm_node = NULL; +@@ -1587,7 +1587,7 @@ static int process_options(struct nes_cm_node *cm_node, u8 *optionsloc, + + static void drop_packet(struct sk_buff *skb) + { +- atomic_inc(&cm_accel_dropped_pkts); ++ atomic_inc_unchecked(&cm_accel_dropped_pkts); + dev_kfree_skb_any(skb); + } + +@@ -1650,7 +1650,7 @@ static void handle_rst_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb, + { + + int reset = 0; /* whether to send reset in case of err.. */ +- atomic_inc(&cm_resets_recvd); ++ atomic_inc_unchecked(&cm_resets_recvd); + nes_debug(NES_DBG_CM, "Received Reset, cm_node = %p, state = %u." + " refcnt=%d\n", cm_node, cm_node->state, + atomic_read(&cm_node->ref_count)); +@@ -2291,7 +2291,7 @@ static struct nes_cm_node *mini_cm_connect(struct nes_cm_core *cm_core, + rem_ref_cm_node(cm_node->cm_core, cm_node); + return NULL; + } +- atomic_inc(&cm_loopbacks); ++ atomic_inc_unchecked(&cm_loopbacks); + loopbackremotenode->loopbackpartner = cm_node; + loopbackremotenode->tcp_cntxt.rcv_wscale = + NES_CM_DEFAULT_RCV_WND_SCALE; +@@ -2566,7 +2566,7 @@ static int mini_cm_recv_pkt(struct nes_cm_core *cm_core, + nes_queue_mgt_skbs(skb, nesvnic, cm_node->nesqp); + else { + rem_ref_cm_node(cm_core, cm_node); +- atomic_inc(&cm_accel_dropped_pkts); ++ atomic_inc_unchecked(&cm_accel_dropped_pkts); + dev_kfree_skb_any(skb); + } + break; +@@ -2874,7 +2874,7 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp) + + if ((cm_id) && (cm_id->event_handler)) { + if (issue_disconn) { +- atomic_inc(&cm_disconnects); ++ atomic_inc_unchecked(&cm_disconnects); + cm_event.event = IW_CM_EVENT_DISCONNECT; + cm_event.status = disconn_status; + cm_event.local_addr = cm_id->local_addr; +@@ -2896,7 +2896,7 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp) + } + + if (issue_close) { +- atomic_inc(&cm_closes); ++ atomic_inc_unchecked(&cm_closes); + nes_disconnect(nesqp, 1); + + cm_id->provider_data = nesqp; +@@ -3034,7 +3034,7 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) + + nes_debug(NES_DBG_CM, "QP%u, cm_node=%p, jiffies = %lu listener = %p\n", + nesqp->hwqp.qp_id, cm_node, jiffies, cm_node->listener); +- atomic_inc(&cm_accepts); ++ atomic_inc_unchecked(&cm_accepts); + + nes_debug(NES_DBG_CM, "netdev refcnt = %u.\n", + netdev_refcnt_read(nesvnic->netdev)); +@@ -3223,7 +3223,7 @@ int nes_reject(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len) + struct nes_cm_core *cm_core; + u8 *start_buff; + +- atomic_inc(&cm_rejects); ++ atomic_inc_unchecked(&cm_rejects); + cm_node = (struct nes_cm_node *)cm_id->provider_data; + loopback = cm_node->loopbackpartner; + cm_core = cm_node->cm_core; +@@ -3285,7 +3285,7 @@ int nes_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) + ntohs(raddr->sin_port), ntohl(laddr->sin_addr.s_addr), + ntohs(laddr->sin_port)); + +- atomic_inc(&cm_connects); ++ atomic_inc_unchecked(&cm_connects); + nesqp->active_conn = 1; + + /* cache the cm_id in the qp */ +@@ -3397,7 +3397,7 @@ int nes_create_listen(struct iw_cm_id *cm_id, int backlog) + g_cm_core->api->stop_listener(g_cm_core, (void *)cm_node); + return err; + } +- atomic_inc(&cm_listens_created); ++ atomic_inc_unchecked(&cm_listens_created); + } + + cm_id->add_ref(cm_id); +@@ -3504,7 +3504,7 @@ static void cm_event_connected(struct nes_cm_event *event) + + if (nesqp->destroyed) + return; +- atomic_inc(&cm_connecteds); ++ atomic_inc_unchecked(&cm_connecteds); + nes_debug(NES_DBG_CM, "QP%u attempting to connect to 0x%08X:0x%04X on" + " local port 0x%04X. jiffies = %lu.\n", + nesqp->hwqp.qp_id, ntohl(raddr->sin_addr.s_addr), +@@ -3685,7 +3685,7 @@ static void cm_event_reset(struct nes_cm_event *event) + + cm_id->add_ref(cm_id); + ret = cm_id->event_handler(cm_id, &cm_event); +- atomic_inc(&cm_closes); ++ atomic_inc_unchecked(&cm_closes); + cm_event.event = IW_CM_EVENT_CLOSE; + cm_event.status = 0; + cm_event.provider_data = cm_id->provider_data; +@@ -3725,7 +3725,7 @@ static void cm_event_mpa_req(struct nes_cm_event *event) + return; + cm_id = cm_node->cm_id; + +- atomic_inc(&cm_connect_reqs); ++ atomic_inc_unchecked(&cm_connect_reqs); + nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n", + cm_node, cm_id, jiffies); + +@@ -3769,7 +3769,7 @@ static void cm_event_mpa_reject(struct nes_cm_event *event) + return; + cm_id = cm_node->cm_id; + +- atomic_inc(&cm_connect_reqs); ++ atomic_inc_unchecked(&cm_connect_reqs); + nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n", + cm_node, cm_id, jiffies); + +diff --git a/drivers/infiniband/hw/nes/nes_mgt.c b/drivers/infiniband/hw/nes/nes_mgt.c +index 4166452..fc952c3 100644 +--- a/drivers/infiniband/hw/nes/nes_mgt.c ++++ b/drivers/infiniband/hw/nes/nes_mgt.c +@@ -40,8 +40,8 @@ + #include "nes.h" + #include "nes_mgt.h" + +-atomic_t pau_qps_created; +-atomic_t pau_qps_destroyed; ++atomic_unchecked_t pau_qps_created; ++atomic_unchecked_t pau_qps_destroyed; + + static void nes_replenish_mgt_rq(struct nes_vnic_mgt *mgtvnic) + { +@@ -621,7 +621,7 @@ void nes_destroy_pau_qp(struct nes_device *nesdev, struct nes_qp *nesqp) + { + struct sk_buff *skb; + unsigned long flags; +- atomic_inc(&pau_qps_destroyed); ++ atomic_inc_unchecked(&pau_qps_destroyed); + + /* Free packets that have not yet been forwarded */ + /* Lock is acquired by skb_dequeue when removing the skb */ +@@ -810,7 +810,7 @@ static void nes_mgt_ce_handler(struct nes_device *nesdev, struct nes_hw_nic_cq * + cq->cq_vbase[head].cqe_words[NES_NIC_CQE_HASH_RCVNXT]); + skb_queue_head_init(&nesqp->pau_list); + spin_lock_init(&nesqp->pau_lock); +- atomic_inc(&pau_qps_created); ++ atomic_inc_unchecked(&pau_qps_created); + nes_change_quad_hash(nesdev, mgtvnic->nesvnic, nesqp); + } + +diff --git a/drivers/infiniband/hw/nes/nes_nic.c b/drivers/infiniband/hw/nes/nes_nic.c +index 49eb511..a774366 100644 +--- a/drivers/infiniband/hw/nes/nes_nic.c ++++ b/drivers/infiniband/hw/nes/nes_nic.c +@@ -1273,39 +1273,39 @@ static void nes_netdev_get_ethtool_stats(struct net_device *netdev, + target_stat_values[++index] = mh_detected; + target_stat_values[++index] = mh_pauses_sent; + target_stat_values[++index] = nesvnic->endnode_ipv4_tcp_retransmits; +- target_stat_values[++index] = atomic_read(&cm_connects); +- target_stat_values[++index] = atomic_read(&cm_accepts); +- target_stat_values[++index] = atomic_read(&cm_disconnects); +- target_stat_values[++index] = atomic_read(&cm_connecteds); +- target_stat_values[++index] = atomic_read(&cm_connect_reqs); +- target_stat_values[++index] = atomic_read(&cm_rejects); +- target_stat_values[++index] = atomic_read(&mod_qp_timouts); +- target_stat_values[++index] = atomic_read(&qps_created); +- target_stat_values[++index] = atomic_read(&sw_qps_destroyed); +- target_stat_values[++index] = atomic_read(&qps_destroyed); +- target_stat_values[++index] = atomic_read(&cm_closes); ++ target_stat_values[++index] = atomic_read_unchecked(&cm_connects); ++ target_stat_values[++index] = atomic_read_unchecked(&cm_accepts); ++ target_stat_values[++index] = atomic_read_unchecked(&cm_disconnects); ++ target_stat_values[++index] = atomic_read_unchecked(&cm_connecteds); ++ target_stat_values[++index] = atomic_read_unchecked(&cm_connect_reqs); ++ target_stat_values[++index] = atomic_read_unchecked(&cm_rejects); ++ target_stat_values[++index] = atomic_read_unchecked(&mod_qp_timouts); ++ target_stat_values[++index] = atomic_read_unchecked(&qps_created); ++ target_stat_values[++index] = atomic_read_unchecked(&sw_qps_destroyed); ++ target_stat_values[++index] = atomic_read_unchecked(&qps_destroyed); ++ target_stat_values[++index] = atomic_read_unchecked(&cm_closes); + target_stat_values[++index] = cm_packets_sent; + target_stat_values[++index] = cm_packets_bounced; + target_stat_values[++index] = cm_packets_created; + target_stat_values[++index] = cm_packets_received; + target_stat_values[++index] = cm_packets_dropped; + target_stat_values[++index] = cm_packets_retrans; +- target_stat_values[++index] = atomic_read(&cm_listens_created); +- target_stat_values[++index] = atomic_read(&cm_listens_destroyed); ++ target_stat_values[++index] = atomic_read_unchecked(&cm_listens_created); ++ target_stat_values[++index] = atomic_read_unchecked(&cm_listens_destroyed); + target_stat_values[++index] = cm_backlog_drops; +- target_stat_values[++index] = atomic_read(&cm_loopbacks); +- target_stat_values[++index] = atomic_read(&cm_nodes_created); +- target_stat_values[++index] = atomic_read(&cm_nodes_destroyed); +- target_stat_values[++index] = atomic_read(&cm_accel_dropped_pkts); +- target_stat_values[++index] = atomic_read(&cm_resets_recvd); ++ target_stat_values[++index] = atomic_read_unchecked(&cm_loopbacks); ++ target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_created); ++ target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_destroyed); ++ target_stat_values[++index] = atomic_read_unchecked(&cm_accel_dropped_pkts); ++ target_stat_values[++index] = atomic_read_unchecked(&cm_resets_recvd); + target_stat_values[++index] = nesadapter->free_4kpbl; + target_stat_values[++index] = nesadapter->free_256pbl; + target_stat_values[++index] = int_mod_timer_init; + target_stat_values[++index] = nesvnic->lro_mgr.stats.aggregated; + target_stat_values[++index] = nesvnic->lro_mgr.stats.flushed; + target_stat_values[++index] = nesvnic->lro_mgr.stats.no_desc; +- target_stat_values[++index] = atomic_read(&pau_qps_created); +- target_stat_values[++index] = atomic_read(&pau_qps_destroyed); ++ target_stat_values[++index] = atomic_read_unchecked(&pau_qps_created); ++ target_stat_values[++index] = atomic_read_unchecked(&pau_qps_destroyed); + } + + /** +diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c +index eb62461..2b7fc71 100644 +--- a/drivers/infiniband/hw/nes/nes_verbs.c ++++ b/drivers/infiniband/hw/nes/nes_verbs.c +@@ -46,9 +46,9 @@ + + #include <rdma/ib_umem.h> + +-atomic_t mod_qp_timouts; +-atomic_t qps_created; +-atomic_t sw_qps_destroyed; ++atomic_unchecked_t mod_qp_timouts; ++atomic_unchecked_t qps_created; ++atomic_unchecked_t sw_qps_destroyed; + + static void nes_unregister_ofa_device(struct nes_ib_device *nesibdev); + +@@ -1134,7 +1134,7 @@ static struct ib_qp *nes_create_qp(struct ib_pd *ibpd, + if (init_attr->create_flags) + return ERR_PTR(-EINVAL); + +- atomic_inc(&qps_created); ++ atomic_inc_unchecked(&qps_created); + switch (init_attr->qp_type) { + case IB_QPT_RC: + if (nes_drv_opt & NES_DRV_OPT_NO_INLINE_DATA) { +@@ -1466,7 +1466,7 @@ static int nes_destroy_qp(struct ib_qp *ibqp) + struct iw_cm_event cm_event; + int ret = 0; + +- atomic_inc(&sw_qps_destroyed); ++ atomic_inc_unchecked(&sw_qps_destroyed); + nesqp->destroyed = 1; + + /* Blow away the connection if it exists. */ +diff --git a/drivers/infiniband/hw/qib/qib.h b/drivers/infiniband/hw/qib/qib.h +index 1946101..09766d2 100644 +--- a/drivers/infiniband/hw/qib/qib.h ++++ b/drivers/infiniband/hw/qib/qib.h +@@ -52,6 +52,7 @@ + #include <linux/kref.h> + #include <linux/sched.h> + #include <linux/kthread.h> ++#include <linux/slab.h> + + #include "qib_common.h" + #include "qib_verbs.h" +diff --git a/drivers/input/gameport/gameport.c b/drivers/input/gameport/gameport.c +index 24c41ba..102d71f 100644 +--- a/drivers/input/gameport/gameport.c ++++ b/drivers/input/gameport/gameport.c +@@ -490,14 +490,14 @@ EXPORT_SYMBOL(gameport_set_phys); + */ + static void gameport_init_port(struct gameport *gameport) + { +- static atomic_t gameport_no = ATOMIC_INIT(0); ++ static atomic_unchecked_t gameport_no = ATOMIC_INIT(0); + + __module_get(THIS_MODULE); + + mutex_init(&gameport->drv_mutex); + device_initialize(&gameport->dev); + dev_set_name(&gameport->dev, "gameport%lu", +- (unsigned long)atomic_inc_return(&gameport_no) - 1); ++ (unsigned long)atomic_inc_return_unchecked(&gameport_no) - 1); + gameport->dev.bus = &gameport_bus; + gameport->dev.release = gameport_release_port; + if (gameport->parent) +diff --git a/drivers/input/input.c b/drivers/input/input.c +index 29ca0bb..f4bc2e3 100644 +--- a/drivers/input/input.c ++++ b/drivers/input/input.c +@@ -1774,7 +1774,7 @@ EXPORT_SYMBOL_GPL(input_class); + */ + struct input_dev *input_allocate_device(void) + { +- static atomic_t input_no = ATOMIC_INIT(0); ++ static atomic_unchecked_t input_no = ATOMIC_INIT(0); + struct input_dev *dev; + + dev = kzalloc(sizeof(struct input_dev), GFP_KERNEL); +@@ -1789,7 +1789,7 @@ struct input_dev *input_allocate_device(void) + INIT_LIST_HEAD(&dev->node); + + dev_set_name(&dev->dev, "input%ld", +- (unsigned long) atomic_inc_return(&input_no) - 1); ++ (unsigned long) atomic_inc_return_unchecked(&input_no) - 1); + + __module_get(THIS_MODULE); + } +diff --git a/drivers/input/joystick/sidewinder.c b/drivers/input/joystick/sidewinder.c +index 4a95b22..874c182 100644 +--- a/drivers/input/joystick/sidewinder.c ++++ b/drivers/input/joystick/sidewinder.c +@@ -30,6 +30,7 @@ + #include <linux/kernel.h> + #include <linux/module.h> + #include <linux/slab.h> ++#include <linux/sched.h> + #include <linux/input.h> + #include <linux/gameport.h> + #include <linux/jiffies.h> +diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c +index 603fe0d..f63decc 100644 +--- a/drivers/input/joystick/xpad.c ++++ b/drivers/input/joystick/xpad.c +@@ -737,7 +737,7 @@ static void xpad_led_set(struct led_classdev *led_cdev, + + static int xpad_led_probe(struct usb_xpad *xpad) + { +- static atomic_t led_seq = ATOMIC_INIT(0); ++ static atomic_unchecked_t led_seq = ATOMIC_INIT(0); + long led_no; + struct xpad_led *led; + struct led_classdev *led_cdev; +@@ -750,7 +750,7 @@ static int xpad_led_probe(struct usb_xpad *xpad) + if (!led) + return -ENOMEM; + +- led_no = (long)atomic_inc_return(&led_seq) - 1; ++ led_no = (long)atomic_inc_return_unchecked(&led_seq) - 1; + + snprintf(led->name, sizeof(led->name), "xpad%ld", led_no); + led->xpad = xpad; +diff --git a/drivers/input/misc/ims-pcu.c b/drivers/input/misc/ims-pcu.c +index e204f26..8459f15 100644 +--- a/drivers/input/misc/ims-pcu.c ++++ b/drivers/input/misc/ims-pcu.c +@@ -1621,7 +1621,7 @@ static int ims_pcu_identify_type(struct ims_pcu *pcu, u8 *device_id) + + static int ims_pcu_init_application_mode(struct ims_pcu *pcu) + { +- static atomic_t device_no = ATOMIC_INIT(0); ++ static atomic_unchecked_t device_no = ATOMIC_INIT(0); + + const struct ims_pcu_device_info *info; + u8 device_id; +@@ -1653,7 +1653,7 @@ static int ims_pcu_init_application_mode(struct ims_pcu *pcu) + } + + /* Device appears to be operable, complete initialization */ +- pcu->device_no = atomic_inc_return(&device_no) - 1; ++ pcu->device_no = atomic_inc_return_unchecked(&device_no) - 1; + + error = ims_pcu_setup_backlight(pcu); + if (error) +diff --git a/drivers/input/mouse/psmouse.h b/drivers/input/mouse/psmouse.h +index 2f0b39d..7370f13 100644 +--- a/drivers/input/mouse/psmouse.h ++++ b/drivers/input/mouse/psmouse.h +@@ -116,7 +116,7 @@ struct psmouse_attribute { + ssize_t (*set)(struct psmouse *psmouse, void *data, + const char *buf, size_t count); + bool protect; +-}; ++} __do_const; + #define to_psmouse_attr(a) container_of((a), struct psmouse_attribute, dattr) + + ssize_t psmouse_attr_show_helper(struct device *dev, struct device_attribute *attr, +diff --git a/drivers/input/mousedev.c b/drivers/input/mousedev.c +index b604564..3f14ae4 100644 +--- a/drivers/input/mousedev.c ++++ b/drivers/input/mousedev.c +@@ -744,7 +744,7 @@ static ssize_t mousedev_read(struct file *file, char __user *buffer, + + spin_unlock_irq(&client->packet_lock); + +- if (copy_to_user(buffer, data, count)) ++ if (count > sizeof(data) || copy_to_user(buffer, data, count)) + return -EFAULT; + + return count; +diff --git a/drivers/input/serio/serio.c b/drivers/input/serio/serio.c +index b29134d..394deb0 100644 +--- a/drivers/input/serio/serio.c ++++ b/drivers/input/serio/serio.c +@@ -514,7 +514,7 @@ static void serio_release_port(struct device *dev) + */ + static void serio_init_port(struct serio *serio) + { +- static atomic_t serio_no = ATOMIC_INIT(0); ++ static atomic_unchecked_t serio_no = ATOMIC_INIT(0); + + __module_get(THIS_MODULE); + +@@ -525,7 +525,7 @@ static void serio_init_port(struct serio *serio) + mutex_init(&serio->drv_mutex); + device_initialize(&serio->dev); + dev_set_name(&serio->dev, "serio%ld", +- (long)atomic_inc_return(&serio_no) - 1); ++ (long)atomic_inc_return_unchecked(&serio_no) - 1); + serio->dev.bus = &serio_bus; + serio->dev.release = serio_release_port; + serio->dev.groups = serio_device_attr_groups; +diff --git a/drivers/input/serio/serio_raw.c b/drivers/input/serio/serio_raw.c +index c9a02fe..0debc75 100644 +--- a/drivers/input/serio/serio_raw.c ++++ b/drivers/input/serio/serio_raw.c +@@ -292,7 +292,7 @@ static irqreturn_t serio_raw_interrupt(struct serio *serio, unsigned char data, + + static int serio_raw_connect(struct serio *serio, struct serio_driver *drv) + { +- static atomic_t serio_raw_no = ATOMIC_INIT(0); ++ static atomic_unchecked_t serio_raw_no = ATOMIC_INIT(0); + struct serio_raw *serio_raw; + int err; + +@@ -303,7 +303,7 @@ static int serio_raw_connect(struct serio *serio, struct serio_driver *drv) + } + + snprintf(serio_raw->name, sizeof(serio_raw->name), +- "serio_raw%ld", (long)atomic_inc_return(&serio_raw_no) - 1); ++ "serio_raw%ld", (long)atomic_inc_return_unchecked(&serio_raw_no) - 1); + kref_init(&serio_raw->kref); + INIT_LIST_HEAD(&serio_raw->client_list); + init_waitqueue_head(&serio_raw->wait); +diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c +index e5555fc..937986d 100644 +--- a/drivers/iommu/iommu.c ++++ b/drivers/iommu/iommu.c +@@ -588,7 +588,7 @@ static struct notifier_block iommu_bus_nb = { + static void iommu_bus_init(struct bus_type *bus, struct iommu_ops *ops) + { + bus_register_notifier(bus, &iommu_bus_nb); +- bus_for_each_dev(bus, NULL, ops, add_iommu_group); ++ bus_for_each_dev(bus, NULL, (void *)ops, add_iommu_group); + } + + /** +diff --git a/drivers/iommu/irq_remapping.c b/drivers/iommu/irq_remapping.c +index 228632c9..edfe331 100644 +--- a/drivers/iommu/irq_remapping.c ++++ b/drivers/iommu/irq_remapping.c +@@ -356,7 +356,7 @@ int setup_hpet_msi_remapped(unsigned int irq, unsigned int id) + void panic_if_irq_remap(const char *msg) + { + if (irq_remapping_enabled) +- panic(msg); ++ panic("%s", msg); + } + + static void ir_ack_apic_edge(struct irq_data *data) +@@ -377,10 +377,12 @@ static void ir_print_prefix(struct irq_data *data, struct seq_file *p) + + void irq_remap_modify_chip_defaults(struct irq_chip *chip) + { +- chip->irq_print_chip = ir_print_prefix; +- chip->irq_ack = ir_ack_apic_edge; +- chip->irq_eoi = ir_ack_apic_level; +- chip->irq_set_affinity = x86_io_apic_ops.set_affinity; ++ pax_open_kernel(); ++ *(void **)&chip->irq_print_chip = ir_print_prefix; ++ *(void **)&chip->irq_ack = ir_ack_apic_edge; ++ *(void **)&chip->irq_eoi = ir_ack_apic_level; ++ *(void **)&chip->irq_set_affinity = x86_io_apic_ops.set_affinity; ++ pax_close_kernel(); + } + + bool setup_remapped_irq(int irq, struct irq_cfg *cfg, struct irq_chip *chip) +diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c +index 12698ee..a58a958 100644 +--- a/drivers/irqchip/irq-gic.c ++++ b/drivers/irqchip/irq-gic.c +@@ -85,7 +85,7 @@ static u8 gic_cpu_map[NR_GIC_CPU_IF] __read_mostly; + * Supported arch specific GIC irq extension. + * Default make them NULL. + */ +-struct irq_chip gic_arch_extn = { ++irq_chip_no_const gic_arch_extn = { + .irq_eoi = NULL, + .irq_mask = NULL, + .irq_unmask = NULL, +@@ -337,7 +337,7 @@ static void gic_handle_cascade_irq(unsigned int irq, struct irq_desc *desc) + chained_irq_exit(chip, desc); + } + +-static struct irq_chip gic_chip = { ++static irq_chip_no_const gic_chip __read_only = { + .name = "GIC", + .irq_mask = gic_mask_irq, + .irq_unmask = gic_unmask_irq, +diff --git a/drivers/irqchip/irq-renesas-irqc.c b/drivers/irqchip/irq-renesas-irqc.c +index 8777065..a4a9967 100644 +--- a/drivers/irqchip/irq-renesas-irqc.c ++++ b/drivers/irqchip/irq-renesas-irqc.c +@@ -151,7 +151,7 @@ static int irqc_probe(struct platform_device *pdev) + struct irqc_priv *p; + struct resource *io; + struct resource *irq; +- struct irq_chip *irq_chip; ++ irq_chip_no_const *irq_chip; + const char *name = dev_name(&pdev->dev); + int ret; + int k; +diff --git a/drivers/isdn/capi/capi.c b/drivers/isdn/capi/capi.c +index ac6f72b..81150f2 100644 +--- a/drivers/isdn/capi/capi.c ++++ b/drivers/isdn/capi/capi.c +@@ -81,8 +81,8 @@ struct capiminor { + + struct capi20_appl *ap; + u32 ncci; +- atomic_t datahandle; +- atomic_t msgid; ++ atomic_unchecked_t datahandle; ++ atomic_unchecked_t msgid; + + struct tty_port port; + int ttyinstop; +@@ -391,7 +391,7 @@ gen_data_b3_resp_for(struct capiminor *mp, struct sk_buff *skb) + capimsg_setu16(s, 2, mp->ap->applid); + capimsg_setu8 (s, 4, CAPI_DATA_B3); + capimsg_setu8 (s, 5, CAPI_RESP); +- capimsg_setu16(s, 6, atomic_inc_return(&mp->msgid)); ++ capimsg_setu16(s, 6, atomic_inc_return_unchecked(&mp->msgid)); + capimsg_setu32(s, 8, mp->ncci); + capimsg_setu16(s, 12, datahandle); + } +@@ -512,14 +512,14 @@ static void handle_minor_send(struct capiminor *mp) + mp->outbytes -= len; + spin_unlock_bh(&mp->outlock); + +- datahandle = atomic_inc_return(&mp->datahandle); ++ datahandle = atomic_inc_return_unchecked(&mp->datahandle); + skb_push(skb, CAPI_DATA_B3_REQ_LEN); + memset(skb->data, 0, CAPI_DATA_B3_REQ_LEN); + capimsg_setu16(skb->data, 0, CAPI_DATA_B3_REQ_LEN); + capimsg_setu16(skb->data, 2, mp->ap->applid); + capimsg_setu8 (skb->data, 4, CAPI_DATA_B3); + capimsg_setu8 (skb->data, 5, CAPI_REQ); +- capimsg_setu16(skb->data, 6, atomic_inc_return(&mp->msgid)); ++ capimsg_setu16(skb->data, 6, atomic_inc_return_unchecked(&mp->msgid)); + capimsg_setu32(skb->data, 8, mp->ncci); /* NCCI */ + capimsg_setu32(skb->data, 12, (u32)(long)skb->data);/* Data32 */ + capimsg_setu16(skb->data, 16, len); /* Data length */ +diff --git a/drivers/isdn/gigaset/bas-gigaset.c b/drivers/isdn/gigaset/bas-gigaset.c +index c44950d..10ac276 100644 +--- a/drivers/isdn/gigaset/bas-gigaset.c ++++ b/drivers/isdn/gigaset/bas-gigaset.c +@@ -2564,22 +2564,22 @@ static int gigaset_post_reset(struct usb_interface *intf) + + + static const struct gigaset_ops gigops = { +- gigaset_write_cmd, +- gigaset_write_room, +- gigaset_chars_in_buffer, +- gigaset_brkchars, +- gigaset_init_bchannel, +- gigaset_close_bchannel, +- gigaset_initbcshw, +- gigaset_freebcshw, +- gigaset_reinitbcshw, +- gigaset_initcshw, +- gigaset_freecshw, +- gigaset_set_modem_ctrl, +- gigaset_baud_rate, +- gigaset_set_line_ctrl, +- gigaset_isoc_send_skb, +- gigaset_isoc_input, ++ .write_cmd = gigaset_write_cmd, ++ .write_room = gigaset_write_room, ++ .chars_in_buffer = gigaset_chars_in_buffer, ++ .brkchars = gigaset_brkchars, ++ .init_bchannel = gigaset_init_bchannel, ++ .close_bchannel = gigaset_close_bchannel, ++ .initbcshw = gigaset_initbcshw, ++ .freebcshw = gigaset_freebcshw, ++ .reinitbcshw = gigaset_reinitbcshw, ++ .initcshw = gigaset_initcshw, ++ .freecshw = gigaset_freecshw, ++ .set_modem_ctrl = gigaset_set_modem_ctrl, ++ .baud_rate = gigaset_baud_rate, ++ .set_line_ctrl = gigaset_set_line_ctrl, ++ .send_skb = gigaset_isoc_send_skb, ++ .handle_input = gigaset_isoc_input, + }; + + /* bas_gigaset_init +diff --git a/drivers/isdn/gigaset/interface.c b/drivers/isdn/gigaset/interface.c +index 600c79b..3752bab 100644 +--- a/drivers/isdn/gigaset/interface.c ++++ b/drivers/isdn/gigaset/interface.c +@@ -130,9 +130,9 @@ static int if_open(struct tty_struct *tty, struct file *filp) + } + tty->driver_data = cs; + +- ++cs->port.count; ++ atomic_inc(&cs->port.count); + +- if (cs->port.count == 1) { ++ if (atomic_read(&cs->port.count) == 1) { + tty_port_tty_set(&cs->port, tty); + cs->port.low_latency = 1; + } +@@ -156,9 +156,9 @@ static void if_close(struct tty_struct *tty, struct file *filp) + + if (!cs->connected) + gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */ +- else if (!cs->port.count) ++ else if (!atomic_read(&cs->port.count)) + dev_warn(cs->dev, "%s: device not opened\n", __func__); +- else if (!--cs->port.count) ++ else if (!atomic_dec_return(&cs->port.count)) + tty_port_tty_set(&cs->port, NULL); + + mutex_unlock(&cs->mutex); +diff --git a/drivers/isdn/gigaset/ser-gigaset.c b/drivers/isdn/gigaset/ser-gigaset.c +index 8c91fd5..14f13ce 100644 +--- a/drivers/isdn/gigaset/ser-gigaset.c ++++ b/drivers/isdn/gigaset/ser-gigaset.c +@@ -453,22 +453,22 @@ static int gigaset_set_line_ctrl(struct cardstate *cs, unsigned cflag) + } + + static const struct gigaset_ops ops = { +- gigaset_write_cmd, +- gigaset_write_room, +- gigaset_chars_in_buffer, +- gigaset_brkchars, +- gigaset_init_bchannel, +- gigaset_close_bchannel, +- gigaset_initbcshw, +- gigaset_freebcshw, +- gigaset_reinitbcshw, +- gigaset_initcshw, +- gigaset_freecshw, +- gigaset_set_modem_ctrl, +- gigaset_baud_rate, +- gigaset_set_line_ctrl, +- gigaset_m10x_send_skb, /* asyncdata.c */ +- gigaset_m10x_input, /* asyncdata.c */ ++ .write_cmd = gigaset_write_cmd, ++ .write_room = gigaset_write_room, ++ .chars_in_buffer = gigaset_chars_in_buffer, ++ .brkchars = gigaset_brkchars, ++ .init_bchannel = gigaset_init_bchannel, ++ .close_bchannel = gigaset_close_bchannel, ++ .initbcshw = gigaset_initbcshw, ++ .freebcshw = gigaset_freebcshw, ++ .reinitbcshw = gigaset_reinitbcshw, ++ .initcshw = gigaset_initcshw, ++ .freecshw = gigaset_freecshw, ++ .set_modem_ctrl = gigaset_set_modem_ctrl, ++ .baud_rate = gigaset_baud_rate, ++ .set_line_ctrl = gigaset_set_line_ctrl, ++ .send_skb = gigaset_m10x_send_skb, /* asyncdata.c */ ++ .handle_input = gigaset_m10x_input, /* asyncdata.c */ + }; + + +diff --git a/drivers/isdn/gigaset/usb-gigaset.c b/drivers/isdn/gigaset/usb-gigaset.c +index d0a41cb..b953e50 100644 +--- a/drivers/isdn/gigaset/usb-gigaset.c ++++ b/drivers/isdn/gigaset/usb-gigaset.c +@@ -547,7 +547,7 @@ static int gigaset_brkchars(struct cardstate *cs, const unsigned char buf[6]) + gigaset_dbg_buffer(DEBUG_USBREQ, "brkchars", 6, buf); + memcpy(cs->hw.usb->bchars, buf, 6); + return usb_control_msg(udev, usb_sndctrlpipe(udev, 0), 0x19, 0x41, +- 0, 0, &buf, 6, 2000); ++ 0, 0, buf, 6, 2000); + } + + static void gigaset_freebcshw(struct bc_state *bcs) +@@ -869,22 +869,22 @@ static int gigaset_pre_reset(struct usb_interface *intf) + } + + static const struct gigaset_ops ops = { +- gigaset_write_cmd, +- gigaset_write_room, +- gigaset_chars_in_buffer, +- gigaset_brkchars, +- gigaset_init_bchannel, +- gigaset_close_bchannel, +- gigaset_initbcshw, +- gigaset_freebcshw, +- gigaset_reinitbcshw, +- gigaset_initcshw, +- gigaset_freecshw, +- gigaset_set_modem_ctrl, +- gigaset_baud_rate, +- gigaset_set_line_ctrl, +- gigaset_m10x_send_skb, +- gigaset_m10x_input, ++ .write_cmd = gigaset_write_cmd, ++ .write_room = gigaset_write_room, ++ .chars_in_buffer = gigaset_chars_in_buffer, ++ .brkchars = gigaset_brkchars, ++ .init_bchannel = gigaset_init_bchannel, ++ .close_bchannel = gigaset_close_bchannel, ++ .initbcshw = gigaset_initbcshw, ++ .freebcshw = gigaset_freebcshw, ++ .reinitbcshw = gigaset_reinitbcshw, ++ .initcshw = gigaset_initcshw, ++ .freecshw = gigaset_freecshw, ++ .set_modem_ctrl = gigaset_set_modem_ctrl, ++ .baud_rate = gigaset_baud_rate, ++ .set_line_ctrl = gigaset_set_line_ctrl, ++ .send_skb = gigaset_m10x_send_skb, ++ .handle_input = gigaset_m10x_input, + }; + + /* +diff --git a/drivers/isdn/hardware/avm/b1.c b/drivers/isdn/hardware/avm/b1.c +index 4d9b195..455075c 100644 +--- a/drivers/isdn/hardware/avm/b1.c ++++ b/drivers/isdn/hardware/avm/b1.c +@@ -176,7 +176,7 @@ int b1_load_t4file(avmcard *card, capiloaddatapart *t4file) + } + if (left) { + if (t4file->user) { +- if (copy_from_user(buf, dp, left)) ++ if (left > sizeof buf || copy_from_user(buf, dp, left)) + return -EFAULT; + } else { + memcpy(buf, dp, left); +@@ -224,7 +224,7 @@ int b1_load_config(avmcard *card, capiloaddatapart *config) + } + if (left) { + if (config->user) { +- if (copy_from_user(buf, dp, left)) ++ if (left > sizeof buf || copy_from_user(buf, dp, left)) + return -EFAULT; + } else { + memcpy(buf, dp, left); +diff --git a/drivers/isdn/i4l/isdn_common.c b/drivers/isdn/i4l/isdn_common.c +index 9bb12ba..d4262f7 100644 +--- a/drivers/isdn/i4l/isdn_common.c ++++ b/drivers/isdn/i4l/isdn_common.c +@@ -1651,6 +1651,8 @@ isdn_ioctl(struct file *file, uint cmd, ulong arg) + } else + return -EINVAL; + case IIOCDBGVAR: ++ if (!capable(CAP_SYS_RAWIO)) ++ return -EPERM; + if (arg) { + if (copy_to_user(argp, &dev, sizeof(ulong))) + return -EFAULT; +diff --git a/drivers/isdn/i4l/isdn_concap.c b/drivers/isdn/i4l/isdn_concap.c +index 91d5730..336523e 100644 +--- a/drivers/isdn/i4l/isdn_concap.c ++++ b/drivers/isdn/i4l/isdn_concap.c +@@ -80,9 +80,9 @@ static int isdn_concap_dl_disconn_req(struct concap_proto *concap) + } + + struct concap_device_ops isdn_concap_reliable_dl_dops = { +- &isdn_concap_dl_data_req, +- &isdn_concap_dl_connect_req, +- &isdn_concap_dl_disconn_req ++ .data_req = &isdn_concap_dl_data_req, ++ .connect_req = &isdn_concap_dl_connect_req, ++ .disconn_req = &isdn_concap_dl_disconn_req + }; + + /* The following should better go into a dedicated source file such that +diff --git a/drivers/isdn/i4l/isdn_tty.c b/drivers/isdn/i4l/isdn_tty.c +index 3c5f249..5fac4d0 100644 +--- a/drivers/isdn/i4l/isdn_tty.c ++++ b/drivers/isdn/i4l/isdn_tty.c +@@ -1508,9 +1508,9 @@ isdn_tty_open(struct tty_struct *tty, struct file *filp) + + #ifdef ISDN_DEBUG_MODEM_OPEN + printk(KERN_DEBUG "isdn_tty_open %s, count = %d\n", tty->name, +- port->count); ++ atomic_read(&port->count)); + #endif +- port->count++; ++ atomic_inc(&port->count); + port->tty = tty; + /* + * Start up serial port +@@ -1554,7 +1554,7 @@ isdn_tty_close(struct tty_struct *tty, struct file *filp) + #endif + return; + } +- if ((tty->count == 1) && (port->count != 1)) { ++ if ((tty->count == 1) && (atomic_read(&port->count) != 1)) { + /* + * Uh, oh. tty->count is 1, which means that the tty + * structure will be freed. Info->count should always +@@ -1563,15 +1563,15 @@ isdn_tty_close(struct tty_struct *tty, struct file *filp) + * serial port won't be shutdown. + */ + printk(KERN_ERR "isdn_tty_close: bad port count; tty->count is 1, " +- "info->count is %d\n", port->count); +- port->count = 1; ++ "info->count is %d\n", atomic_read(&port->count)); ++ atomic_set(&port->count, 1); + } +- if (--port->count < 0) { ++ if (atomic_dec_return(&port->count) < 0) { + printk(KERN_ERR "isdn_tty_close: bad port count for ttyi%d: %d\n", +- info->line, port->count); +- port->count = 0; ++ info->line, atomic_read(&port->count)); ++ atomic_set(&port->count, 0); + } +- if (port->count) { ++ if (atomic_read(&port->count)) { + #ifdef ISDN_DEBUG_MODEM_OPEN + printk(KERN_DEBUG "isdn_tty_close after info->count != 0\n"); + #endif +@@ -1625,7 +1625,7 @@ isdn_tty_hangup(struct tty_struct *tty) + if (isdn_tty_paranoia_check(info, tty->name, "isdn_tty_hangup")) + return; + isdn_tty_shutdown(info); +- port->count = 0; ++ atomic_set(&port->count, 0); + port->flags &= ~ASYNC_NORMAL_ACTIVE; + port->tty = NULL; + wake_up_interruptible(&port->open_wait); +@@ -1970,7 +1970,7 @@ isdn_tty_find_icall(int di, int ch, setup_parm *setup) + for (i = 0; i < ISDN_MAX_CHANNELS; i++) { + modem_info *info = &dev->mdm.info[i]; + +- if (info->port.count == 0) ++ if (atomic_read(&info->port.count) == 0) + continue; + if ((info->emu.mdmreg[REG_SI1] & si2bit[si1]) && /* SI1 is matching */ + (info->emu.mdmreg[REG_SI2] == si2)) { /* SI2 is matching */ +diff --git a/drivers/isdn/i4l/isdn_x25iface.c b/drivers/isdn/i4l/isdn_x25iface.c +index e2d4e58..40cd045 100644 +--- a/drivers/isdn/i4l/isdn_x25iface.c ++++ b/drivers/isdn/i4l/isdn_x25iface.c +@@ -53,14 +53,14 @@ static int isdn_x25iface_disconn_ind(struct concap_proto *); + + + static struct concap_proto_ops ix25_pops = { +- &isdn_x25iface_proto_new, +- &isdn_x25iface_proto_del, +- &isdn_x25iface_proto_restart, +- &isdn_x25iface_proto_close, +- &isdn_x25iface_xmit, +- &isdn_x25iface_receive, +- &isdn_x25iface_connect_ind, +- &isdn_x25iface_disconn_ind ++ .proto_new = &isdn_x25iface_proto_new, ++ .proto_del = &isdn_x25iface_proto_del, ++ .restart = &isdn_x25iface_proto_restart, ++ .close = &isdn_x25iface_proto_close, ++ .encap_and_xmit = &isdn_x25iface_xmit, ++ .data_ind = &isdn_x25iface_receive, ++ .connect_ind = &isdn_x25iface_connect_ind, ++ .disconn_ind = &isdn_x25iface_disconn_ind + }; + + /* error message helper function */ +diff --git a/drivers/isdn/icn/icn.c b/drivers/isdn/icn/icn.c +index 53d487f..cae33fe 100644 +--- a/drivers/isdn/icn/icn.c ++++ b/drivers/isdn/icn/icn.c +@@ -1045,7 +1045,7 @@ icn_writecmd(const u_char *buf, int len, int user, icn_card *card) + if (count > len) + count = len; + if (user) { +- if (copy_from_user(msg, buf, count)) ++ if (count > sizeof msg || copy_from_user(msg, buf, count)) + return -EFAULT; + } else + memcpy(msg, buf, count); +@@ -1155,7 +1155,7 @@ icn_command(isdn_ctrl *c, icn_card *card) + ulong a; + ulong flags; + int i; +- char cbuf[60]; ++ char cbuf[80]; + isdn_ctrl cmd; + icn_cdef cdef; + char __user *arg; +@@ -1309,7 +1309,6 @@ icn_command(isdn_ctrl *c, icn_card *card) + break; + if ((c->arg & 255) < ICN_BCH) { + char *p; +- char dial[50]; + char dcode[4]; + + a = c->arg; +@@ -1321,10 +1320,10 @@ icn_command(isdn_ctrl *c, icn_card *card) + } else + /* Normal Dial */ + strcpy(dcode, "CAL"); +- strcpy(dial, p); +- sprintf(cbuf, "%02d;D%s_R%s,%02d,%02d,%s\n", (int) (a + 1), +- dcode, dial, c->parm.setup.si1, +- c->parm.setup.si2, c->parm.setup.eazmsn); ++ snprintf(cbuf, sizeof(cbuf), ++ "%02d;D%s_R%s,%02d,%02d,%s\n", (int) (a + 1), ++ dcode, p, c->parm.setup.si1, ++ c->parm.setup.si2, c->parm.setup.eazmsn); + i = icn_writecmd(cbuf, strlen(cbuf), 0, card); + } + break; +diff --git a/drivers/isdn/mISDN/dsp_cmx.c b/drivers/isdn/mISDN/dsp_cmx.c +index a4f05c5..1433bc5 100644 +--- a/drivers/isdn/mISDN/dsp_cmx.c ++++ b/drivers/isdn/mISDN/dsp_cmx.c +@@ -1628,7 +1628,7 @@ unsigned long dsp_spl_jiffies; /* calculate the next time to fire */ + static u16 dsp_count; /* last sample count */ + static int dsp_count_valid; /* if we have last sample count */ + +-void ++void __intentional_overflow(-1) + dsp_cmx_send(void *arg) + { + struct dsp_conf *conf; +diff --git a/drivers/leds/leds-clevo-mail.c b/drivers/leds/leds-clevo-mail.c +index d93e245..e7ece6b 100644 +--- a/drivers/leds/leds-clevo-mail.c ++++ b/drivers/leds/leds-clevo-mail.c +@@ -40,7 +40,7 @@ static int __init clevo_mail_led_dmi_callback(const struct dmi_system_id *id) + * detected as working, but in reality it is not) as low as + * possible. + */ +-static struct dmi_system_id clevo_mail_led_dmi_table[] __initdata = { ++static struct dmi_system_id clevo_mail_led_dmi_table[] __initconst = { + { + .callback = clevo_mail_led_dmi_callback, + .ident = "Clevo D410J", +diff --git a/drivers/leds/leds-ss4200.c b/drivers/leds/leds-ss4200.c +index 5b8f938..b73d657 100644 +--- a/drivers/leds/leds-ss4200.c ++++ b/drivers/leds/leds-ss4200.c +@@ -91,7 +91,7 @@ MODULE_PARM_DESC(nodetect, "Skip DMI-based hardware detection"); + * detected as working, but in reality it is not) as low as + * possible. + */ +-static struct dmi_system_id nas_led_whitelist[] __initdata = { ++static struct dmi_system_id nas_led_whitelist[] __initconst = { + { + .callback = ss4200_led_dmi_callback, + .ident = "Intel SS4200-E", +diff --git a/drivers/lguest/core.c b/drivers/lguest/core.c +index 0bf1e4e..b4bf44e 100644 +--- a/drivers/lguest/core.c ++++ b/drivers/lguest/core.c +@@ -97,9 +97,17 @@ static __init int map_switcher(void) + * The end address needs +1 because __get_vm_area allocates an + * extra guard page, so we need space for that. + */ ++ ++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC) ++ switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE, ++ VM_ALLOC | VM_KERNEXEC, switcher_addr, switcher_addr ++ + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE); ++#else + switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE, + VM_ALLOC, switcher_addr, switcher_addr + + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE); ++#endif ++ + if (!switcher_vma) { + err = -ENOMEM; + printk("lguest: could not map switcher pages high\n"); +@@ -124,7 +132,7 @@ static __init int map_switcher(void) + * Now the Switcher is mapped at the right address, we can't fail! + * Copy in the compiled-in Switcher code (from x86/switcher_32.S). + */ +- memcpy(switcher_vma->addr, start_switcher_text, ++ memcpy(switcher_vma->addr, ktla_ktva(start_switcher_text), + end_switcher_text - start_switcher_text); + + printk(KERN_INFO "lguest: mapped switcher at %p\n", +diff --git a/drivers/lguest/page_tables.c b/drivers/lguest/page_tables.c +index bfb39bb..08a603b 100644 +--- a/drivers/lguest/page_tables.c ++++ b/drivers/lguest/page_tables.c +@@ -559,7 +559,7 @@ void pin_page(struct lg_cpu *cpu, unsigned long vaddr) + /*:*/ + + #ifdef CONFIG_X86_PAE +-static void release_pmd(pmd_t *spmd) ++static void __intentional_overflow(-1) release_pmd(pmd_t *spmd) + { + /* If the entry's not present, there's nothing to release. */ + if (pmd_flags(*spmd) & _PAGE_PRESENT) { +diff --git a/drivers/lguest/x86/core.c b/drivers/lguest/x86/core.c +index 922a1ac..9dd0c2a 100644 +--- a/drivers/lguest/x86/core.c ++++ b/drivers/lguest/x86/core.c +@@ -59,7 +59,7 @@ static struct { + /* Offset from where switcher.S was compiled to where we've copied it */ + static unsigned long switcher_offset(void) + { +- return switcher_addr - (unsigned long)start_switcher_text; ++ return switcher_addr - (unsigned long)ktla_ktva(start_switcher_text); + } + + /* This cpu's struct lguest_pages (after the Switcher text page) */ +@@ -99,7 +99,13 @@ static void copy_in_guest_info(struct lg_cpu *cpu, struct lguest_pages *pages) + * These copies are pretty cheap, so we do them unconditionally: */ + /* Save the current Host top-level page directory. + */ ++ ++#ifdef CONFIG_PAX_PER_CPU_PGD ++ pages->state.host_cr3 = read_cr3(); ++#else + pages->state.host_cr3 = __pa(current->mm->pgd); ++#endif ++ + /* + * Set up the Guest's page tables to see this CPU's pages (and no + * other CPU's pages). +@@ -477,7 +483,7 @@ void __init lguest_arch_host_init(void) + * compiled-in switcher code and the high-mapped copy we just made. + */ + for (i = 0; i < IDT_ENTRIES; i++) +- default_idt_entries[i] += switcher_offset(); ++ default_idt_entries[i] = ktla_ktva(default_idt_entries[i]) + switcher_offset(); + + /* + * Set up the Switcher's per-cpu areas. +@@ -560,7 +566,7 @@ void __init lguest_arch_host_init(void) + * it will be undisturbed when we switch. To change %cs and jump we + * need this structure to feed to Intel's "lcall" instruction. + */ +- lguest_entry.offset = (long)switch_to_guest + switcher_offset(); ++ lguest_entry.offset = (long)ktla_ktva(switch_to_guest) + switcher_offset(); + lguest_entry.segment = LGUEST_CS; + + /* +diff --git a/drivers/lguest/x86/switcher_32.S b/drivers/lguest/x86/switcher_32.S +index 40634b0..4f5855e 100644 +--- a/drivers/lguest/x86/switcher_32.S ++++ b/drivers/lguest/x86/switcher_32.S +@@ -87,6 +87,7 @@ + #include <asm/page.h> + #include <asm/segment.h> + #include <asm/lguest.h> ++#include <asm/processor-flags.h> + + // We mark the start of the code to copy + // It's placed in .text tho it's never run here +@@ -149,6 +150,13 @@ ENTRY(switch_to_guest) + // Changes type when we load it: damn Intel! + // For after we switch over our page tables + // That entry will be read-only: we'd crash. ++ ++#ifdef CONFIG_PAX_KERNEXEC ++ mov %cr0, %edx ++ xor $X86_CR0_WP, %edx ++ mov %edx, %cr0 ++#endif ++ + movl $(GDT_ENTRY_TSS*8), %edx + ltr %dx + +@@ -157,9 +165,15 @@ ENTRY(switch_to_guest) + // Let's clear it again for our return. + // The GDT descriptor of the Host + // Points to the table after two "size" bytes +- movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %edx ++ movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %eax + // Clear "used" from type field (byte 5, bit 2) +- andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%edx) ++ andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%eax) ++ ++#ifdef CONFIG_PAX_KERNEXEC ++ mov %cr0, %eax ++ xor $X86_CR0_WP, %eax ++ mov %eax, %cr0 ++#endif + + // Once our page table's switched, the Guest is live! + // The Host fades as we run this final step. +@@ -295,13 +309,12 @@ deliver_to_host: + // I consulted gcc, and it gave + // These instructions, which I gladly credit: + leal (%edx,%ebx,8), %eax +- movzwl (%eax),%edx +- movl 4(%eax), %eax +- xorw %ax, %ax +- orl %eax, %edx ++ movl 4(%eax), %edx ++ movw (%eax), %dx + // Now the address of the handler's in %edx + // We call it now: its "iret" drops us home. +- jmp *%edx ++ ljmp $__KERNEL_CS, $1f ++1: jmp *%edx + + // Every interrupt can come to us here + // But we must truly tell each apart. +diff --git a/drivers/md/bcache/closure.h b/drivers/md/bcache/closure.h +index 7ef7461..5a09dac 100644 +--- a/drivers/md/bcache/closure.h ++++ b/drivers/md/bcache/closure.h +@@ -238,7 +238,7 @@ static inline void closure_set_stopped(struct closure *cl) + static inline void set_closure_fn(struct closure *cl, closure_fn *fn, + struct workqueue_struct *wq) + { +- BUG_ON(object_is_on_stack(cl)); ++ BUG_ON(object_starts_on_stack(cl)); + closure_set_ip(cl); + cl->fn = fn; + cl->wq = wq; +diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c +index 4195a01..42527ac 100644 +--- a/drivers/md/bitmap.c ++++ b/drivers/md/bitmap.c +@@ -1779,7 +1779,7 @@ void bitmap_status(struct seq_file *seq, struct bitmap *bitmap) + chunk_kb ? "KB" : "B"); + if (bitmap->storage.file) { + seq_printf(seq, ", file: "); +- seq_path(seq, &bitmap->storage.file->f_path, " \t\n"); ++ seq_path(seq, &bitmap->storage.file->f_path, " \t\n\\"); + } + + seq_printf(seq, "\n"); +diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c +index 5152142..623d141 100644 +--- a/drivers/md/dm-ioctl.c ++++ b/drivers/md/dm-ioctl.c +@@ -1769,7 +1769,7 @@ static int validate_params(uint cmd, struct dm_ioctl *param) + cmd == DM_LIST_VERSIONS_CMD) + return 0; + +- if ((cmd == DM_DEV_CREATE_CMD)) { ++ if (cmd == DM_DEV_CREATE_CMD) { + if (!*param->name) { + DMWARN("name not supplied when creating device"); + return -EINVAL; +diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c +index 7dfdb5c..4caada6 100644 +--- a/drivers/md/dm-raid1.c ++++ b/drivers/md/dm-raid1.c +@@ -40,7 +40,7 @@ enum dm_raid1_error { + + struct mirror { + struct mirror_set *ms; +- atomic_t error_count; ++ atomic_unchecked_t error_count; + unsigned long error_type; + struct dm_dev *dev; + sector_t offset; +@@ -186,7 +186,7 @@ static struct mirror *get_valid_mirror(struct mirror_set *ms) + struct mirror *m; + + for (m = ms->mirror; m < ms->mirror + ms->nr_mirrors; m++) +- if (!atomic_read(&m->error_count)) ++ if (!atomic_read_unchecked(&m->error_count)) + return m; + + return NULL; +@@ -218,7 +218,7 @@ static void fail_mirror(struct mirror *m, enum dm_raid1_error error_type) + * simple way to tell if a device has encountered + * errors. + */ +- atomic_inc(&m->error_count); ++ atomic_inc_unchecked(&m->error_count); + + if (test_and_set_bit(error_type, &m->error_type)) + return; +@@ -409,7 +409,7 @@ static struct mirror *choose_mirror(struct mirror_set *ms, sector_t sector) + struct mirror *m = get_default_mirror(ms); + + do { +- if (likely(!atomic_read(&m->error_count))) ++ if (likely(!atomic_read_unchecked(&m->error_count))) + return m; + + if (m-- == ms->mirror) +@@ -423,7 +423,7 @@ static int default_ok(struct mirror *m) + { + struct mirror *default_mirror = get_default_mirror(m->ms); + +- return !atomic_read(&default_mirror->error_count); ++ return !atomic_read_unchecked(&default_mirror->error_count); + } + + static int mirror_available(struct mirror_set *ms, struct bio *bio) +@@ -560,7 +560,7 @@ static void do_reads(struct mirror_set *ms, struct bio_list *reads) + */ + if (likely(region_in_sync(ms, region, 1))) + m = choose_mirror(ms, bio->bi_iter.bi_sector); +- else if (m && atomic_read(&m->error_count)) ++ else if (m && atomic_read_unchecked(&m->error_count)) + m = NULL; + + if (likely(m)) +@@ -927,7 +927,7 @@ static int get_mirror(struct mirror_set *ms, struct dm_target *ti, + } + + ms->mirror[mirror].ms = ms; +- atomic_set(&(ms->mirror[mirror].error_count), 0); ++ atomic_set_unchecked(&(ms->mirror[mirror].error_count), 0); + ms->mirror[mirror].error_type = 0; + ms->mirror[mirror].offset = offset; + +@@ -1342,7 +1342,7 @@ static void mirror_resume(struct dm_target *ti) + */ + static char device_status_char(struct mirror *m) + { +- if (!atomic_read(&(m->error_count))) ++ if (!atomic_read_unchecked(&(m->error_count))) + return 'A'; + + return (test_bit(DM_RAID1_FLUSH_ERROR, &(m->error_type))) ? 'F' : +diff --git a/drivers/md/dm-stats.c b/drivers/md/dm-stats.c +index 28a9012..9c0f6a5 100644 +--- a/drivers/md/dm-stats.c ++++ b/drivers/md/dm-stats.c +@@ -382,7 +382,7 @@ do_sync_free: + synchronize_rcu_expedited(); + dm_stat_free(&s->rcu_head); + } else { +- ACCESS_ONCE(dm_stat_need_rcu_barrier) = 1; ++ ACCESS_ONCE_RW(dm_stat_need_rcu_barrier) = 1; + call_rcu(&s->rcu_head, dm_stat_free); + } + return 0; +@@ -554,8 +554,8 @@ void dm_stats_account_io(struct dm_stats *stats, unsigned long bi_rw, + ((bi_rw & (REQ_WRITE | REQ_DISCARD)) == + (ACCESS_ONCE(last->last_rw) & (REQ_WRITE | REQ_DISCARD))) + )); +- ACCESS_ONCE(last->last_sector) = end_sector; +- ACCESS_ONCE(last->last_rw) = bi_rw; ++ ACCESS_ONCE_RW(last->last_sector) = end_sector; ++ ACCESS_ONCE_RW(last->last_rw) = bi_rw; + } + + rcu_read_lock(); +diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c +index d1600d2..4c3af3a 100644 +--- a/drivers/md/dm-stripe.c ++++ b/drivers/md/dm-stripe.c +@@ -21,7 +21,7 @@ struct stripe { + struct dm_dev *dev; + sector_t physical_start; + +- atomic_t error_count; ++ atomic_unchecked_t error_count; + }; + + struct stripe_c { +@@ -186,7 +186,7 @@ static int stripe_ctr(struct dm_target *ti, unsigned int argc, char **argv) + kfree(sc); + return r; + } +- atomic_set(&(sc->stripe[i].error_count), 0); ++ atomic_set_unchecked(&(sc->stripe[i].error_count), 0); + } + + ti->private = sc; +@@ -330,7 +330,7 @@ static void stripe_status(struct dm_target *ti, status_type_t type, + DMEMIT("%d ", sc->stripes); + for (i = 0; i < sc->stripes; i++) { + DMEMIT("%s ", sc->stripe[i].dev->name); +- buffer[i] = atomic_read(&(sc->stripe[i].error_count)) ? ++ buffer[i] = atomic_read_unchecked(&(sc->stripe[i].error_count)) ? + 'D' : 'A'; + } + buffer[i] = '\0'; +@@ -375,8 +375,8 @@ static int stripe_end_io(struct dm_target *ti, struct bio *bio, int error) + */ + for (i = 0; i < sc->stripes; i++) + if (!strcmp(sc->stripe[i].dev->name, major_minor)) { +- atomic_inc(&(sc->stripe[i].error_count)); +- if (atomic_read(&(sc->stripe[i].error_count)) < ++ atomic_inc_unchecked(&(sc->stripe[i].error_count)); ++ if (atomic_read_unchecked(&(sc->stripe[i].error_count)) < + DM_IO_ERROR_THRESHOLD) + schedule_work(&sc->trigger_event); + } +diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c +index 6a7f2b8..fea0bde 100644 +--- a/drivers/md/dm-table.c ++++ b/drivers/md/dm-table.c +@@ -274,7 +274,7 @@ static struct dm_dev_internal *find_device(struct list_head *l, dev_t dev) + static int open_dev(struct dm_dev_internal *d, dev_t dev, + struct mapped_device *md) + { +- static char *_claim_ptr = "I belong to device-mapper"; ++ static char _claim_ptr[] = "I belong to device-mapper"; + struct block_device *bdev; + + int r; +@@ -342,7 +342,7 @@ static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev, + if (!dev_size) + return 0; + +- if ((start >= dev_size) || (start + len > dev_size)) { ++ if ((start >= dev_size) || (len > dev_size - start)) { + DMWARN("%s: %s too small for target: " + "start=%llu, len=%llu, dev_size=%llu", + dm_device_name(ti->table->md), bdevname(bdev, b), +diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c +index e9d33ad..dae9880d 100644 +--- a/drivers/md/dm-thin-metadata.c ++++ b/drivers/md/dm-thin-metadata.c +@@ -404,7 +404,7 @@ static void __setup_btree_details(struct dm_pool_metadata *pmd) + { + pmd->info.tm = pmd->tm; + pmd->info.levels = 2; +- pmd->info.value_type.context = pmd->data_sm; ++ pmd->info.value_type.context = (dm_space_map_no_const *)pmd->data_sm; + pmd->info.value_type.size = sizeof(__le64); + pmd->info.value_type.inc = data_block_inc; + pmd->info.value_type.dec = data_block_dec; +@@ -423,7 +423,7 @@ static void __setup_btree_details(struct dm_pool_metadata *pmd) + + pmd->bl_info.tm = pmd->tm; + pmd->bl_info.levels = 1; +- pmd->bl_info.value_type.context = pmd->data_sm; ++ pmd->bl_info.value_type.context = (dm_space_map_no_const *)pmd->data_sm; + pmd->bl_info.value_type.size = sizeof(__le64); + pmd->bl_info.value_type.inc = data_block_inc; + pmd->bl_info.value_type.dec = data_block_dec; +diff --git a/drivers/md/dm.c b/drivers/md/dm.c +index 65ee3a0..1852af9 100644 +--- a/drivers/md/dm.c ++++ b/drivers/md/dm.c +@@ -187,9 +187,9 @@ struct mapped_device { + /* + * Event handling. + */ +- atomic_t event_nr; ++ atomic_unchecked_t event_nr; + wait_queue_head_t eventq; +- atomic_t uevent_seq; ++ atomic_unchecked_t uevent_seq; + struct list_head uevent_list; + spinlock_t uevent_lock; /* Protect access to uevent_list */ + +@@ -1899,8 +1899,8 @@ static struct mapped_device *alloc_dev(int minor) + spin_lock_init(&md->deferred_lock); + atomic_set(&md->holders, 1); + atomic_set(&md->open_count, 0); +- atomic_set(&md->event_nr, 0); +- atomic_set(&md->uevent_seq, 0); ++ atomic_set_unchecked(&md->event_nr, 0); ++ atomic_set_unchecked(&md->uevent_seq, 0); + INIT_LIST_HEAD(&md->uevent_list); + spin_lock_init(&md->uevent_lock); + +@@ -2054,7 +2054,7 @@ static void event_callback(void *context) + + dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj); + +- atomic_inc(&md->event_nr); ++ atomic_inc_unchecked(&md->event_nr); + wake_up(&md->eventq); + } + +@@ -2747,18 +2747,18 @@ int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action, + + uint32_t dm_next_uevent_seq(struct mapped_device *md) + { +- return atomic_add_return(1, &md->uevent_seq); ++ return atomic_add_return_unchecked(1, &md->uevent_seq); + } + + uint32_t dm_get_event_nr(struct mapped_device *md) + { +- return atomic_read(&md->event_nr); ++ return atomic_read_unchecked(&md->event_nr); + } + + int dm_wait_event(struct mapped_device *md, int event_nr) + { + return wait_event_interruptible(md->eventq, +- (event_nr != atomic_read(&md->event_nr))); ++ (event_nr != atomic_read_unchecked(&md->event_nr))); + } + + void dm_uevent_add(struct mapped_device *md, struct list_head *elist) +diff --git a/drivers/md/md.c b/drivers/md/md.c +index 73aedcb..424968a 100644 +--- a/drivers/md/md.c ++++ b/drivers/md/md.c +@@ -194,10 +194,10 @@ EXPORT_SYMBOL_GPL(bio_clone_mddev); + * start build, activate spare + */ + static DECLARE_WAIT_QUEUE_HEAD(md_event_waiters); +-static atomic_t md_event_count; ++static atomic_unchecked_t md_event_count; + void md_new_event(struct mddev *mddev) + { +- atomic_inc(&md_event_count); ++ atomic_inc_unchecked(&md_event_count); + wake_up(&md_event_waiters); + } + EXPORT_SYMBOL_GPL(md_new_event); +@@ -207,7 +207,7 @@ EXPORT_SYMBOL_GPL(md_new_event); + */ + static void md_new_event_inintr(struct mddev *mddev) + { +- atomic_inc(&md_event_count); ++ atomic_inc_unchecked(&md_event_count); + wake_up(&md_event_waiters); + } + +@@ -1462,7 +1462,7 @@ static int super_1_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_ + if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_RESHAPE_ACTIVE) && + (le32_to_cpu(sb->feature_map) & MD_FEATURE_NEW_OFFSET)) + rdev->new_data_offset += (s32)le32_to_cpu(sb->new_offset); +- atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read)); ++ atomic_set_unchecked(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read)); + + rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256; + bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1; +@@ -1713,7 +1713,7 @@ static void super_1_sync(struct mddev *mddev, struct md_rdev *rdev) + else + sb->resync_offset = cpu_to_le64(0); + +- sb->cnt_corrected_read = cpu_to_le32(atomic_read(&rdev->corrected_errors)); ++ sb->cnt_corrected_read = cpu_to_le32(atomic_read_unchecked(&rdev->corrected_errors)); + + sb->raid_disks = cpu_to_le32(mddev->raid_disks); + sb->size = cpu_to_le64(mddev->dev_sectors); +@@ -2725,7 +2725,7 @@ __ATTR(state, S_IRUGO|S_IWUSR, state_show, state_store); + static ssize_t + errors_show(struct md_rdev *rdev, char *page) + { +- return sprintf(page, "%d\n", atomic_read(&rdev->corrected_errors)); ++ return sprintf(page, "%d\n", atomic_read_unchecked(&rdev->corrected_errors)); + } + + static ssize_t +@@ -2734,7 +2734,7 @@ errors_store(struct md_rdev *rdev, const char *buf, size_t len) + char *e; + unsigned long n = simple_strtoul(buf, &e, 10); + if (*buf && (*e == 0 || *e == '\n')) { +- atomic_set(&rdev->corrected_errors, n); ++ atomic_set_unchecked(&rdev->corrected_errors, n); + return len; + } + return -EINVAL; +@@ -3183,8 +3183,8 @@ int md_rdev_init(struct md_rdev *rdev) + rdev->sb_loaded = 0; + rdev->bb_page = NULL; + atomic_set(&rdev->nr_pending, 0); +- atomic_set(&rdev->read_errors, 0); +- atomic_set(&rdev->corrected_errors, 0); ++ atomic_set_unchecked(&rdev->read_errors, 0); ++ atomic_set_unchecked(&rdev->corrected_errors, 0); + + INIT_LIST_HEAD(&rdev->same_set); + init_waitqueue_head(&rdev->blocked_wait); +@@ -7075,7 +7075,7 @@ static int md_seq_show(struct seq_file *seq, void *v) + + spin_unlock(&pers_lock); + seq_printf(seq, "\n"); +- seq->poll_event = atomic_read(&md_event_count); ++ seq->poll_event = atomic_read_unchecked(&md_event_count); + return 0; + } + if (v == (void*)2) { +@@ -7178,7 +7178,7 @@ static int md_seq_open(struct inode *inode, struct file *file) + return error; + + seq = file->private_data; +- seq->poll_event = atomic_read(&md_event_count); ++ seq->poll_event = atomic_read_unchecked(&md_event_count); + return error; + } + +@@ -7192,7 +7192,7 @@ static unsigned int mdstat_poll(struct file *filp, poll_table *wait) + /* always allow read */ + mask = POLLIN | POLLRDNORM; + +- if (seq->poll_event != atomic_read(&md_event_count)) ++ if (seq->poll_event != atomic_read_unchecked(&md_event_count)) + mask |= POLLERR | POLLPRI; + return mask; + } +@@ -7236,7 +7236,7 @@ static int is_mddev_idle(struct mddev *mddev, int init) + struct gendisk *disk = rdev->bdev->bd_contains->bd_disk; + curr_events = (int)part_stat_read(&disk->part0, sectors[0]) + + (int)part_stat_read(&disk->part0, sectors[1]) - +- atomic_read(&disk->sync_io); ++ atomic_read_unchecked(&disk->sync_io); + /* sync IO will cause sync_io to increase before the disk_stats + * as sync_io is counted when a request starts, and + * disk_stats is counted when it completes. +diff --git a/drivers/md/md.h b/drivers/md/md.h +index 07bba96..2d6788c 100644 +--- a/drivers/md/md.h ++++ b/drivers/md/md.h +@@ -94,13 +94,13 @@ struct md_rdev { + * only maintained for arrays that + * support hot removal + */ +- atomic_t read_errors; /* number of consecutive read errors that ++ atomic_unchecked_t read_errors; /* number of consecutive read errors that + * we have tried to ignore. + */ + struct timespec last_read_error; /* monotonic time since our + * last read error + */ +- atomic_t corrected_errors; /* number of corrected read errors, ++ atomic_unchecked_t corrected_errors; /* number of corrected read errors, + * for reporting to userspace and storing + * in superblock. + */ +@@ -449,7 +449,7 @@ static inline void rdev_dec_pending(struct md_rdev *rdev, struct mddev *mddev) + + static inline void md_sync_acct(struct block_device *bdev, unsigned long nr_sectors) + { +- atomic_add(nr_sectors, &bdev->bd_contains->bd_disk->sync_io); ++ atomic_add_unchecked(nr_sectors, &bdev->bd_contains->bd_disk->sync_io); + } + + struct md_personality +diff --git a/drivers/md/persistent-data/dm-space-map-metadata.c b/drivers/md/persistent-data/dm-space-map-metadata.c +index 786b689..ea8c956 100644 +--- a/drivers/md/persistent-data/dm-space-map-metadata.c ++++ b/drivers/md/persistent-data/dm-space-map-metadata.c +@@ -679,7 +679,7 @@ static int sm_metadata_extend(struct dm_space_map *sm, dm_block_t extra_blocks) + * Flick into a mode where all blocks get allocated in the new area. + */ + smm->begin = old_len; +- memcpy(sm, &bootstrap_ops, sizeof(*sm)); ++ memcpy((void *)sm, &bootstrap_ops, sizeof(*sm)); + + /* + * Extend. +@@ -710,7 +710,7 @@ out: + /* + * Switch back to normal behaviour. + */ +- memcpy(sm, &ops, sizeof(*sm)); ++ memcpy((void *)sm, &ops, sizeof(*sm)); + return r; + } + +diff --git a/drivers/md/persistent-data/dm-space-map.h b/drivers/md/persistent-data/dm-space-map.h +index 3e6d115..ffecdeb 100644 +--- a/drivers/md/persistent-data/dm-space-map.h ++++ b/drivers/md/persistent-data/dm-space-map.h +@@ -71,6 +71,7 @@ struct dm_space_map { + dm_sm_threshold_fn fn, + void *context); + }; ++typedef struct dm_space_map __no_const dm_space_map_no_const; + + /*----------------------------------------------------------------*/ + +diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c +index 56e24c0..e1c8e1f 100644 +--- a/drivers/md/raid1.c ++++ b/drivers/md/raid1.c +@@ -1931,7 +1931,7 @@ static int fix_sync_read_error(struct r1bio *r1_bio) + if (r1_sync_page_io(rdev, sect, s, + bio->bi_io_vec[idx].bv_page, + READ) != 0) +- atomic_add(s, &rdev->corrected_errors); ++ atomic_add_unchecked(s, &rdev->corrected_errors); + } + sectors -= s; + sect += s; +@@ -2165,7 +2165,7 @@ static void fix_read_error(struct r1conf *conf, int read_disk, + test_bit(In_sync, &rdev->flags)) { + if (r1_sync_page_io(rdev, sect, s, + conf->tmppage, READ)) { +- atomic_add(s, &rdev->corrected_errors); ++ atomic_add_unchecked(s, &rdev->corrected_errors); + printk(KERN_INFO + "md/raid1:%s: read error corrected " + "(%d sectors at %llu on %s)\n", +diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c +index cb882aa..cb8aeca 100644 +--- a/drivers/md/raid10.c ++++ b/drivers/md/raid10.c +@@ -1949,7 +1949,7 @@ static void end_sync_read(struct bio *bio, int error) + /* The write handler will notice the lack of + * R10BIO_Uptodate and record any errors etc + */ +- atomic_add(r10_bio->sectors, ++ atomic_add_unchecked(r10_bio->sectors, + &conf->mirrors[d].rdev->corrected_errors); + + /* for reconstruct, we always reschedule after a read. +@@ -2307,7 +2307,7 @@ static void check_decay_read_errors(struct mddev *mddev, struct md_rdev *rdev) + { + struct timespec cur_time_mon; + unsigned long hours_since_last; +- unsigned int read_errors = atomic_read(&rdev->read_errors); ++ unsigned int read_errors = atomic_read_unchecked(&rdev->read_errors); + + ktime_get_ts(&cur_time_mon); + +@@ -2329,9 +2329,9 @@ static void check_decay_read_errors(struct mddev *mddev, struct md_rdev *rdev) + * overflowing the shift of read_errors by hours_since_last. + */ + if (hours_since_last >= 8 * sizeof(read_errors)) +- atomic_set(&rdev->read_errors, 0); ++ atomic_set_unchecked(&rdev->read_errors, 0); + else +- atomic_set(&rdev->read_errors, read_errors >> hours_since_last); ++ atomic_set_unchecked(&rdev->read_errors, read_errors >> hours_since_last); + } + + static int r10_sync_page_io(struct md_rdev *rdev, sector_t sector, +@@ -2385,8 +2385,8 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10 + return; + + check_decay_read_errors(mddev, rdev); +- atomic_inc(&rdev->read_errors); +- if (atomic_read(&rdev->read_errors) > max_read_errors) { ++ atomic_inc_unchecked(&rdev->read_errors); ++ if (atomic_read_unchecked(&rdev->read_errors) > max_read_errors) { + char b[BDEVNAME_SIZE]; + bdevname(rdev->bdev, b); + +@@ -2394,7 +2394,7 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10 + "md/raid10:%s: %s: Raid device exceeded " + "read_error threshold [cur %d:max %d]\n", + mdname(mddev), b, +- atomic_read(&rdev->read_errors), max_read_errors); ++ atomic_read_unchecked(&rdev->read_errors), max_read_errors); + printk(KERN_NOTICE + "md/raid10:%s: %s: Failing raid device\n", + mdname(mddev), b); +@@ -2549,7 +2549,7 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10 + sect + + choose_data_offset(r10_bio, rdev)), + bdevname(rdev->bdev, b)); +- atomic_add(s, &rdev->corrected_errors); ++ atomic_add_unchecked(s, &rdev->corrected_errors); + } + + rdev_dec_pending(rdev, mddev); +@@ -2954,6 +2954,7 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr, + */ + if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) { + end_reshape(conf); ++ close_sync(conf); + return 0; + } + +@@ -4411,7 +4412,7 @@ read_more: + read_bio->bi_private = r10_bio; + read_bio->bi_end_io = end_sync_read; + read_bio->bi_rw = READ; +- read_bio->bi_flags &= ~(BIO_POOL_MASK - 1); ++ read_bio->bi_flags &= (~0UL << BIO_RESET_BITS); + read_bio->bi_flags |= 1 << BIO_UPTODATE; + read_bio->bi_vcnt = 0; + read_bio->bi_iter.bi_size = 0; +diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c +index 16f5c21..c5d72c7 100644 +--- a/drivers/md/raid5.c ++++ b/drivers/md/raid5.c +@@ -1707,6 +1707,10 @@ static int grow_one_stripe(struct r5conf *conf, int hash) + return 1; + } + ++#ifdef CONFIG_GRKERNSEC_HIDESYM ++static atomic_unchecked_t raid5_cache_id = ATOMIC_INIT(0); ++#endif ++ + static int grow_stripes(struct r5conf *conf, int num) + { + struct kmem_cache *sc; +@@ -1718,7 +1722,11 @@ static int grow_stripes(struct r5conf *conf, int num) + "raid%d-%s", conf->level, mdname(conf->mddev)); + else + sprintf(conf->cache_name[0], ++#ifdef CONFIG_GRKERNSEC_HIDESYM ++ "raid%d-%08lx", conf->level, atomic_inc_return_unchecked(&raid5_cache_id)); ++#else + "raid%d-%p", conf->level, conf->mddev); ++#endif + sprintf(conf->cache_name[1], "%s-alt", conf->cache_name[0]); + + conf->active_name = 0; +@@ -1991,21 +1999,21 @@ static void raid5_end_read_request(struct bio * bi, int error) + mdname(conf->mddev), STRIPE_SECTORS, + (unsigned long long)s, + bdevname(rdev->bdev, b)); +- atomic_add(STRIPE_SECTORS, &rdev->corrected_errors); ++ atomic_add_unchecked(STRIPE_SECTORS, &rdev->corrected_errors); + clear_bit(R5_ReadError, &sh->dev[i].flags); + clear_bit(R5_ReWrite, &sh->dev[i].flags); + } else if (test_bit(R5_ReadNoMerge, &sh->dev[i].flags)) + clear_bit(R5_ReadNoMerge, &sh->dev[i].flags); + +- if (atomic_read(&rdev->read_errors)) +- atomic_set(&rdev->read_errors, 0); ++ if (atomic_read_unchecked(&rdev->read_errors)) ++ atomic_set_unchecked(&rdev->read_errors, 0); + } else { + const char *bdn = bdevname(rdev->bdev, b); + int retry = 0; + int set_bad = 0; + + clear_bit(R5_UPTODATE, &sh->dev[i].flags); +- atomic_inc(&rdev->read_errors); ++ atomic_inc_unchecked(&rdev->read_errors); + if (test_bit(R5_ReadRepl, &sh->dev[i].flags)) + printk_ratelimited( + KERN_WARNING +@@ -2033,7 +2041,7 @@ static void raid5_end_read_request(struct bio * bi, int error) + mdname(conf->mddev), + (unsigned long long)s, + bdn); +- } else if (atomic_read(&rdev->read_errors) ++ } else if (atomic_read_unchecked(&rdev->read_errors) + > conf->max_nr_stripes) + printk(KERN_WARNING + "md/raid:%s: Too many read errors, failing device %s.\n", +@@ -3779,6 +3787,8 @@ static void handle_stripe(struct stripe_head *sh) + set_bit(R5_Wantwrite, &dev->flags); + if (prexor) + continue; ++ if (s.failed > 1) ++ continue; + if (!test_bit(R5_Insync, &dev->flags) || + ((i == sh->pd_idx || i == sh->qd_idx) && + s.failed == 0)) +diff --git a/drivers/media/dvb-core/dvbdev.c b/drivers/media/dvb-core/dvbdev.c +index 983db75..ef9248c 100644 +--- a/drivers/media/dvb-core/dvbdev.c ++++ b/drivers/media/dvb-core/dvbdev.c +@@ -185,7 +185,7 @@ int dvb_register_device(struct dvb_adapter *adap, struct dvb_device **pdvbdev, + const struct dvb_device *template, void *priv, int type) + { + struct dvb_device *dvbdev; +- struct file_operations *dvbdevfops; ++ file_operations_no_const *dvbdevfops; + struct device *clsdev; + int minor; + int id; +diff --git a/drivers/media/dvb-frontends/dib3000.h b/drivers/media/dvb-frontends/dib3000.h +index 9b6c3bb..baeb5c7 100644 +--- a/drivers/media/dvb-frontends/dib3000.h ++++ b/drivers/media/dvb-frontends/dib3000.h +@@ -39,7 +39,7 @@ struct dib_fe_xfer_ops + int (*fifo_ctrl)(struct dvb_frontend *fe, int onoff); + int (*pid_ctrl)(struct dvb_frontend *fe, int index, int pid, int onoff); + int (*tuner_pass_ctrl)(struct dvb_frontend *fe, int onoff, u8 pll_ctrl); +-}; ++} __no_const; + + #if IS_ENABLED(CONFIG_DVB_DIB3000MB) + extern struct dvb_frontend* dib3000mb_attach(const struct dib3000_config* config, +diff --git a/drivers/media/pci/cx88/cx88-video.c b/drivers/media/pci/cx88/cx88-video.c +index ed8cb90..5ef7f79 100644 +--- a/drivers/media/pci/cx88/cx88-video.c ++++ b/drivers/media/pci/cx88/cx88-video.c +@@ -50,9 +50,9 @@ MODULE_VERSION(CX88_VERSION); + + /* ------------------------------------------------------------------ */ + +-static unsigned int video_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET }; +-static unsigned int vbi_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET }; +-static unsigned int radio_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET }; ++static int video_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET }; ++static int vbi_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET }; ++static int radio_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET }; + + module_param_array(video_nr, int, NULL, 0444); + module_param_array(vbi_nr, int, NULL, 0444); +diff --git a/drivers/media/pci/ivtv/ivtv-driver.c b/drivers/media/pci/ivtv/ivtv-driver.c +index 802642d..5534900 100644 +--- a/drivers/media/pci/ivtv/ivtv-driver.c ++++ b/drivers/media/pci/ivtv/ivtv-driver.c +@@ -83,7 +83,7 @@ static struct pci_device_id ivtv_pci_tbl[] = { + MODULE_DEVICE_TABLE(pci,ivtv_pci_tbl); + + /* ivtv instance counter */ +-static atomic_t ivtv_instance = ATOMIC_INIT(0); ++static atomic_unchecked_t ivtv_instance = ATOMIC_INIT(0); + + /* Parameter declarations */ + static int cardtype[IVTV_MAX_CARDS]; +diff --git a/drivers/media/platform/omap/omap_vout.c b/drivers/media/platform/omap/omap_vout.c +index dfd0a21..6bbb465 100644 +--- a/drivers/media/platform/omap/omap_vout.c ++++ b/drivers/media/platform/omap/omap_vout.c +@@ -63,7 +63,6 @@ enum omap_vout_channels { + OMAP_VIDEO2, + }; + +-static struct videobuf_queue_ops video_vbq_ops; + /* Variables configurable through module params*/ + static u32 video1_numbuffers = 3; + static u32 video2_numbuffers = 3; +@@ -1014,6 +1013,12 @@ static int omap_vout_open(struct file *file) + { + struct videobuf_queue *q; + struct omap_vout_device *vout = NULL; ++ static struct videobuf_queue_ops video_vbq_ops = { ++ .buf_setup = omap_vout_buffer_setup, ++ .buf_prepare = omap_vout_buffer_prepare, ++ .buf_release = omap_vout_buffer_release, ++ .buf_queue = omap_vout_buffer_queue, ++ }; + + vout = video_drvdata(file); + v4l2_dbg(1, debug, &vout->vid_dev->v4l2_dev, "Entering %s\n", __func__); +@@ -1031,10 +1036,6 @@ static int omap_vout_open(struct file *file) + vout->type = V4L2_BUF_TYPE_VIDEO_OUTPUT; + + q = &vout->vbq; +- video_vbq_ops.buf_setup = omap_vout_buffer_setup; +- video_vbq_ops.buf_prepare = omap_vout_buffer_prepare; +- video_vbq_ops.buf_release = omap_vout_buffer_release; +- video_vbq_ops.buf_queue = omap_vout_buffer_queue; + spin_lock_init(&vout->vbq_lock); + + videobuf_queue_dma_contig_init(q, &video_vbq_ops, q->dev, +diff --git a/drivers/media/platform/s5p-tv/mixer.h b/drivers/media/platform/s5p-tv/mixer.h +index fb2acc5..a2fcbdc4 100644 +--- a/drivers/media/platform/s5p-tv/mixer.h ++++ b/drivers/media/platform/s5p-tv/mixer.h +@@ -156,7 +156,7 @@ struct mxr_layer { + /** layer index (unique identifier) */ + int idx; + /** callbacks for layer methods */ +- struct mxr_layer_ops ops; ++ struct mxr_layer_ops *ops; + /** format array */ + const struct mxr_format **fmt_array; + /** size of format array */ +diff --git a/drivers/media/platform/s5p-tv/mixer_grp_layer.c b/drivers/media/platform/s5p-tv/mixer_grp_layer.c +index 74344c7..a39e70e 100644 +--- a/drivers/media/platform/s5p-tv/mixer_grp_layer.c ++++ b/drivers/media/platform/s5p-tv/mixer_grp_layer.c +@@ -235,7 +235,7 @@ struct mxr_layer *mxr_graph_layer_create(struct mxr_device *mdev, int idx) + { + struct mxr_layer *layer; + int ret; +- struct mxr_layer_ops ops = { ++ static struct mxr_layer_ops ops = { + .release = mxr_graph_layer_release, + .buffer_set = mxr_graph_buffer_set, + .stream_set = mxr_graph_stream_set, +diff --git a/drivers/media/platform/s5p-tv/mixer_reg.c b/drivers/media/platform/s5p-tv/mixer_reg.c +index b713403..53cb5ad 100644 +--- a/drivers/media/platform/s5p-tv/mixer_reg.c ++++ b/drivers/media/platform/s5p-tv/mixer_reg.c +@@ -276,7 +276,7 @@ static void mxr_irq_layer_handle(struct mxr_layer *layer) + layer->update_buf = next; + } + +- layer->ops.buffer_set(layer, layer->update_buf); ++ layer->ops->buffer_set(layer, layer->update_buf); + + if (done && done != layer->shadow_buf) + vb2_buffer_done(&done->vb, VB2_BUF_STATE_DONE); +diff --git a/drivers/media/platform/s5p-tv/mixer_video.c b/drivers/media/platform/s5p-tv/mixer_video.c +index c5059ba..2649f28 100644 +--- a/drivers/media/platform/s5p-tv/mixer_video.c ++++ b/drivers/media/platform/s5p-tv/mixer_video.c +@@ -210,7 +210,7 @@ static void mxr_layer_default_geo(struct mxr_layer *layer) + layer->geo.src.height = layer->geo.src.full_height; + + mxr_geometry_dump(mdev, &layer->geo); +- layer->ops.fix_geometry(layer, MXR_GEOMETRY_SINK, 0); ++ layer->ops->fix_geometry(layer, MXR_GEOMETRY_SINK, 0); + mxr_geometry_dump(mdev, &layer->geo); + } + +@@ -228,7 +228,7 @@ static void mxr_layer_update_output(struct mxr_layer *layer) + layer->geo.dst.full_width = mbus_fmt.width; + layer->geo.dst.full_height = mbus_fmt.height; + layer->geo.dst.field = mbus_fmt.field; +- layer->ops.fix_geometry(layer, MXR_GEOMETRY_SINK, 0); ++ layer->ops->fix_geometry(layer, MXR_GEOMETRY_SINK, 0); + + mxr_geometry_dump(mdev, &layer->geo); + } +@@ -334,7 +334,7 @@ static int mxr_s_fmt(struct file *file, void *priv, + /* set source size to highest accepted value */ + geo->src.full_width = max(geo->dst.full_width, pix->width); + geo->src.full_height = max(geo->dst.full_height, pix->height); +- layer->ops.fix_geometry(layer, MXR_GEOMETRY_SOURCE, 0); ++ layer->ops->fix_geometry(layer, MXR_GEOMETRY_SOURCE, 0); + mxr_geometry_dump(mdev, &layer->geo); + /* set cropping to total visible screen */ + geo->src.width = pix->width; +@@ -342,12 +342,12 @@ static int mxr_s_fmt(struct file *file, void *priv, + geo->src.x_offset = 0; + geo->src.y_offset = 0; + /* assure consistency of geometry */ +- layer->ops.fix_geometry(layer, MXR_GEOMETRY_CROP, MXR_NO_OFFSET); ++ layer->ops->fix_geometry(layer, MXR_GEOMETRY_CROP, MXR_NO_OFFSET); + mxr_geometry_dump(mdev, &layer->geo); + /* set full size to lowest possible value */ + geo->src.full_width = 0; + geo->src.full_height = 0; +- layer->ops.fix_geometry(layer, MXR_GEOMETRY_SOURCE, 0); ++ layer->ops->fix_geometry(layer, MXR_GEOMETRY_SOURCE, 0); + mxr_geometry_dump(mdev, &layer->geo); + + /* returning results */ +@@ -474,7 +474,7 @@ static int mxr_s_selection(struct file *file, void *fh, + target->width = s->r.width; + target->height = s->r.height; + +- layer->ops.fix_geometry(layer, stage, s->flags); ++ layer->ops->fix_geometry(layer, stage, s->flags); + + /* retrieve update selection rectangle */ + res.left = target->x_offset; +@@ -955,13 +955,13 @@ static int start_streaming(struct vb2_queue *vq, unsigned int count) + mxr_output_get(mdev); + + mxr_layer_update_output(layer); +- layer->ops.format_set(layer); ++ layer->ops->format_set(layer); + /* enabling layer in hardware */ + spin_lock_irqsave(&layer->enq_slock, flags); + layer->state = MXR_LAYER_STREAMING; + spin_unlock_irqrestore(&layer->enq_slock, flags); + +- layer->ops.stream_set(layer, MXR_ENABLE); ++ layer->ops->stream_set(layer, MXR_ENABLE); + mxr_streamer_get(mdev); + + return 0; +@@ -1031,7 +1031,7 @@ static int stop_streaming(struct vb2_queue *vq) + spin_unlock_irqrestore(&layer->enq_slock, flags); + + /* disabling layer in hardware */ +- layer->ops.stream_set(layer, MXR_DISABLE); ++ layer->ops->stream_set(layer, MXR_DISABLE); + /* remove one streamer */ + mxr_streamer_put(mdev); + /* allow changes in output configuration */ +@@ -1070,8 +1070,8 @@ void mxr_base_layer_unregister(struct mxr_layer *layer) + + void mxr_layer_release(struct mxr_layer *layer) + { +- if (layer->ops.release) +- layer->ops.release(layer); ++ if (layer->ops->release) ++ layer->ops->release(layer); + } + + void mxr_base_layer_release(struct mxr_layer *layer) +@@ -1097,7 +1097,7 @@ struct mxr_layer *mxr_base_layer_create(struct mxr_device *mdev, + + layer->mdev = mdev; + layer->idx = idx; +- layer->ops = *ops; ++ layer->ops = ops; + + spin_lock_init(&layer->enq_slock); + INIT_LIST_HEAD(&layer->enq_list); +diff --git a/drivers/media/platform/s5p-tv/mixer_vp_layer.c b/drivers/media/platform/s5p-tv/mixer_vp_layer.c +index c9388c4..ce71ece 100644 +--- a/drivers/media/platform/s5p-tv/mixer_vp_layer.c ++++ b/drivers/media/platform/s5p-tv/mixer_vp_layer.c +@@ -206,7 +206,7 @@ struct mxr_layer *mxr_vp_layer_create(struct mxr_device *mdev, int idx) + { + struct mxr_layer *layer; + int ret; +- struct mxr_layer_ops ops = { ++ static struct mxr_layer_ops ops = { + .release = mxr_vp_layer_release, + .buffer_set = mxr_vp_buffer_set, + .stream_set = mxr_vp_stream_set, +diff --git a/drivers/media/platform/vivi.c b/drivers/media/platform/vivi.c +index 2d4e73b..8b4d5b6 100644 +--- a/drivers/media/platform/vivi.c ++++ b/drivers/media/platform/vivi.c +@@ -58,8 +58,8 @@ MODULE_AUTHOR("Mauro Carvalho Chehab, Ted Walther and John Sokol"); + MODULE_LICENSE("Dual BSD/GPL"); + MODULE_VERSION(VIVI_VERSION); + +-static unsigned video_nr = -1; +-module_param(video_nr, uint, 0644); ++static int video_nr = -1; ++module_param(video_nr, int, 0644); + MODULE_PARM_DESC(video_nr, "videoX start number, -1 is autodetect"); + + static unsigned n_devs = 1; +diff --git a/drivers/media/radio/radio-cadet.c b/drivers/media/radio/radio-cadet.c +index 545c04c..a14bded 100644 +--- a/drivers/media/radio/radio-cadet.c ++++ b/drivers/media/radio/radio-cadet.c +@@ -324,6 +324,8 @@ static ssize_t cadet_read(struct file *file, char __user *data, size_t count, lo + unsigned char readbuf[RDS_BUFFER]; + int i = 0; + ++ if (count > RDS_BUFFER) ++ return -EFAULT; + mutex_lock(&dev->lock); + if (dev->rdsstat == 0) + cadet_start_rds(dev); +@@ -339,7 +341,7 @@ static ssize_t cadet_read(struct file *file, char __user *data, size_t count, lo + while (i < count && dev->rdsin != dev->rdsout) + readbuf[i++] = dev->rdsbuf[dev->rdsout++]; + +- if (i && copy_to_user(data, readbuf, i)) ++ if (i > sizeof(readbuf) || copy_to_user(data, readbuf, i)) + i = -EFAULT; + unlock: + mutex_unlock(&dev->lock); +diff --git a/drivers/media/radio/radio-maxiradio.c b/drivers/media/radio/radio-maxiradio.c +index 5236035..c622c74 100644 +--- a/drivers/media/radio/radio-maxiradio.c ++++ b/drivers/media/radio/radio-maxiradio.c +@@ -61,7 +61,7 @@ MODULE_PARM_DESC(radio_nr, "Radio device number"); + /* TEA5757 pin mappings */ + static const int clk = 1, data = 2, wren = 4, mo_st = 8, power = 16; + +-static atomic_t maxiradio_instance = ATOMIC_INIT(0); ++static atomic_unchecked_t maxiradio_instance = ATOMIC_INIT(0); + + #define PCI_VENDOR_ID_GUILLEMOT 0x5046 + #define PCI_DEVICE_ID_GUILLEMOT_MAXIRADIO 0x1001 +diff --git a/drivers/media/radio/radio-shark.c b/drivers/media/radio/radio-shark.c +index 050b3bb..79f62b9 100644 +--- a/drivers/media/radio/radio-shark.c ++++ b/drivers/media/radio/radio-shark.c +@@ -79,7 +79,7 @@ struct shark_device { + u32 last_val; + }; + +-static atomic_t shark_instance = ATOMIC_INIT(0); ++static atomic_unchecked_t shark_instance = ATOMIC_INIT(0); + + static void shark_write_val(struct snd_tea575x *tea, u32 val) + { +diff --git a/drivers/media/radio/radio-shark2.c b/drivers/media/radio/radio-shark2.c +index 8654e0d..0608a64 100644 +--- a/drivers/media/radio/radio-shark2.c ++++ b/drivers/media/radio/radio-shark2.c +@@ -74,7 +74,7 @@ struct shark_device { + u8 *transfer_buffer; + }; + +-static atomic_t shark_instance = ATOMIC_INIT(0); ++static atomic_unchecked_t shark_instance = ATOMIC_INIT(0); + + static int shark_write_reg(struct radio_tea5777 *tea, u64 reg) + { +diff --git a/drivers/media/radio/radio-si476x.c b/drivers/media/radio/radio-si476x.c +index 2fd9009..278cc1e 100644 +--- a/drivers/media/radio/radio-si476x.c ++++ b/drivers/media/radio/radio-si476x.c +@@ -1445,7 +1445,7 @@ static int si476x_radio_probe(struct platform_device *pdev) + struct si476x_radio *radio; + struct v4l2_ctrl *ctrl; + +- static atomic_t instance = ATOMIC_INIT(0); ++ static atomic_unchecked_t instance = ATOMIC_INIT(0); + + radio = devm_kzalloc(&pdev->dev, sizeof(*radio), GFP_KERNEL); + if (!radio) +diff --git a/drivers/media/usb/dvb-usb/cinergyT2-core.c b/drivers/media/usb/dvb-usb/cinergyT2-core.c +index 9fd1527..8927230 100644 +--- a/drivers/media/usb/dvb-usb/cinergyT2-core.c ++++ b/drivers/media/usb/dvb-usb/cinergyT2-core.c +@@ -50,29 +50,73 @@ static struct dvb_usb_device_properties cinergyt2_properties; + + static int cinergyt2_streaming_ctrl(struct dvb_usb_adapter *adap, int enable) + { +- char buf[] = { CINERGYT2_EP1_CONTROL_STREAM_TRANSFER, enable ? 1 : 0 }; +- char result[64]; +- return dvb_usb_generic_rw(adap->dev, buf, sizeof(buf), result, +- sizeof(result), 0); ++ char *buf; ++ char *result; ++ int retval; ++ ++ buf = kmalloc(2, GFP_KERNEL); ++ if (buf == NULL) ++ return -ENOMEM; ++ result = kmalloc(64, GFP_KERNEL); ++ if (result == NULL) { ++ kfree(buf); ++ return -ENOMEM; ++ } ++ ++ buf[0] = CINERGYT2_EP1_CONTROL_STREAM_TRANSFER; ++ buf[1] = enable ? 1 : 0; ++ ++ retval = dvb_usb_generic_rw(adap->dev, buf, 2, result, 64, 0); ++ ++ kfree(buf); ++ kfree(result); ++ return retval; + } + + static int cinergyt2_power_ctrl(struct dvb_usb_device *d, int enable) + { +- char buf[] = { CINERGYT2_EP1_SLEEP_MODE, enable ? 0 : 1 }; +- char state[3]; +- return dvb_usb_generic_rw(d, buf, sizeof(buf), state, sizeof(state), 0); ++ char *buf; ++ char *state; ++ int retval; ++ ++ buf = kmalloc(2, GFP_KERNEL); ++ if (buf == NULL) ++ return -ENOMEM; ++ state = kmalloc(3, GFP_KERNEL); ++ if (state == NULL) { ++ kfree(buf); ++ return -ENOMEM; ++ } ++ ++ buf[0] = CINERGYT2_EP1_SLEEP_MODE; ++ buf[1] = enable ? 1 : 0; ++ ++ retval = dvb_usb_generic_rw(d, buf, 2, state, 3, 0); ++ ++ kfree(buf); ++ kfree(state); ++ return retval; + } + + static int cinergyt2_frontend_attach(struct dvb_usb_adapter *adap) + { +- char query[] = { CINERGYT2_EP1_GET_FIRMWARE_VERSION }; +- char state[3]; ++ char *query; ++ char *state; + int ret; ++ query = kmalloc(1, GFP_KERNEL); ++ if (query == NULL) ++ return -ENOMEM; ++ state = kmalloc(3, GFP_KERNEL); ++ if (state == NULL) { ++ kfree(query); ++ return -ENOMEM; ++ } ++ ++ query[0] = CINERGYT2_EP1_GET_FIRMWARE_VERSION; + + adap->fe_adap[0].fe = cinergyt2_fe_attach(adap->dev); + +- ret = dvb_usb_generic_rw(adap->dev, query, sizeof(query), state, +- sizeof(state), 0); ++ ret = dvb_usb_generic_rw(adap->dev, query, 1, state, 3, 0); + if (ret < 0) { + deb_rc("cinergyt2_power_ctrl() Failed to retrieve sleep " + "state info\n"); +@@ -80,7 +124,8 @@ static int cinergyt2_frontend_attach(struct dvb_usb_adapter *adap) + + /* Copy this pointer as we are gonna need it in the release phase */ + cinergyt2_usb_device = adap->dev; +- ++ kfree(query); ++ kfree(state); + return 0; + } + +@@ -141,12 +186,23 @@ static int repeatable_keys[] = { + static int cinergyt2_rc_query(struct dvb_usb_device *d, u32 *event, int *state) + { + struct cinergyt2_state *st = d->priv; +- u8 key[5] = {0, 0, 0, 0, 0}, cmd = CINERGYT2_EP1_GET_RC_EVENTS; ++ u8 *key, *cmd; + int i; + ++ cmd = kmalloc(1, GFP_KERNEL); ++ if (cmd == NULL) ++ return -EINVAL; ++ key = kzalloc(5, GFP_KERNEL); ++ if (key == NULL) { ++ kfree(cmd); ++ return -EINVAL; ++ } ++ ++ cmd[0] = CINERGYT2_EP1_GET_RC_EVENTS; ++ + *state = REMOTE_NO_KEY_PRESSED; + +- dvb_usb_generic_rw(d, &cmd, 1, key, sizeof(key), 0); ++ dvb_usb_generic_rw(d, cmd, 1, key, 5, 0); + if (key[4] == 0xff) { + /* key repeat */ + st->rc_counter++; +@@ -157,12 +213,12 @@ static int cinergyt2_rc_query(struct dvb_usb_device *d, u32 *event, int *state) + *event = d->last_event; + deb_rc("repeat key, event %x\n", + *event); +- return 0; ++ goto out; + } + } + deb_rc("repeated key (non repeatable)\n"); + } +- return 0; ++ goto out; + } + + /* hack to pass checksum on the custom field */ +@@ -174,6 +230,9 @@ static int cinergyt2_rc_query(struct dvb_usb_device *d, u32 *event, int *state) + + deb_rc("key: %*ph\n", 5, key); + } ++out: ++ kfree(cmd); ++ kfree(key); + return 0; + } + +diff --git a/drivers/media/usb/dvb-usb/cinergyT2-fe.c b/drivers/media/usb/dvb-usb/cinergyT2-fe.c +index c890fe4..f9b2ae6 100644 +--- a/drivers/media/usb/dvb-usb/cinergyT2-fe.c ++++ b/drivers/media/usb/dvb-usb/cinergyT2-fe.c +@@ -145,103 +145,176 @@ static int cinergyt2_fe_read_status(struct dvb_frontend *fe, + fe_status_t *status) + { + struct cinergyt2_fe_state *state = fe->demodulator_priv; +- struct dvbt_get_status_msg result; +- u8 cmd[] = { CINERGYT2_EP1_GET_TUNER_STATUS }; ++ struct dvbt_get_status_msg *result; ++ u8 *cmd; + int ret; + +- ret = dvb_usb_generic_rw(state->d, cmd, sizeof(cmd), (u8 *)&result, +- sizeof(result), 0); ++ cmd = kmalloc(1, GFP_KERNEL); ++ if (cmd == NULL) ++ return -ENOMEM; ++ result = kmalloc(sizeof(*result), GFP_KERNEL); ++ if (result == NULL) { ++ kfree(cmd); ++ return -ENOMEM; ++ } ++ ++ cmd[0] = CINERGYT2_EP1_GET_TUNER_STATUS; ++ ++ ret = dvb_usb_generic_rw(state->d, cmd, 1, (u8 *)result, ++ sizeof(*result), 0); + if (ret < 0) +- return ret; ++ goto out; + + *status = 0; + +- if (0xffff - le16_to_cpu(result.gain) > 30) ++ if (0xffff - le16_to_cpu(result->gain) > 30) + *status |= FE_HAS_SIGNAL; +- if (result.lock_bits & (1 << 6)) ++ if (result->lock_bits & (1 << 6)) + *status |= FE_HAS_LOCK; +- if (result.lock_bits & (1 << 5)) ++ if (result->lock_bits & (1 << 5)) + *status |= FE_HAS_SYNC; +- if (result.lock_bits & (1 << 4)) ++ if (result->lock_bits & (1 << 4)) + *status |= FE_HAS_CARRIER; +- if (result.lock_bits & (1 << 1)) ++ if (result->lock_bits & (1 << 1)) + *status |= FE_HAS_VITERBI; + + if ((*status & (FE_HAS_CARRIER | FE_HAS_VITERBI | FE_HAS_SYNC)) != + (FE_HAS_CARRIER | FE_HAS_VITERBI | FE_HAS_SYNC)) + *status &= ~FE_HAS_LOCK; + +- return 0; ++out: ++ kfree(cmd); ++ kfree(result); ++ return ret; + } + + static int cinergyt2_fe_read_ber(struct dvb_frontend *fe, u32 *ber) + { + struct cinergyt2_fe_state *state = fe->demodulator_priv; +- struct dvbt_get_status_msg status; +- char cmd[] = { CINERGYT2_EP1_GET_TUNER_STATUS }; ++ struct dvbt_get_status_msg *status; ++ char *cmd; + int ret; + +- ret = dvb_usb_generic_rw(state->d, cmd, sizeof(cmd), (char *)&status, +- sizeof(status), 0); ++ cmd = kmalloc(1, GFP_KERNEL); ++ if (cmd == NULL) ++ return -ENOMEM; ++ status = kmalloc(sizeof(*status), GFP_KERNEL); ++ if (status == NULL) { ++ kfree(cmd); ++ return -ENOMEM; ++ } ++ ++ cmd[0] = CINERGYT2_EP1_GET_TUNER_STATUS; ++ ++ ret = dvb_usb_generic_rw(state->d, cmd, 1, (char *)status, ++ sizeof(*status), 0); + if (ret < 0) +- return ret; ++ goto out; + +- *ber = le32_to_cpu(status.viterbi_error_rate); ++ *ber = le32_to_cpu(status->viterbi_error_rate); ++out: ++ kfree(cmd); ++ kfree(status); + return 0; + } + + static int cinergyt2_fe_read_unc_blocks(struct dvb_frontend *fe, u32 *unc) + { + struct cinergyt2_fe_state *state = fe->demodulator_priv; +- struct dvbt_get_status_msg status; +- u8 cmd[] = { CINERGYT2_EP1_GET_TUNER_STATUS }; ++ struct dvbt_get_status_msg *status; ++ u8 *cmd; + int ret; + +- ret = dvb_usb_generic_rw(state->d, cmd, sizeof(cmd), (u8 *)&status, +- sizeof(status), 0); ++ cmd = kmalloc(1, GFP_KERNEL); ++ if (cmd == NULL) ++ return -ENOMEM; ++ status = kmalloc(sizeof(*status), GFP_KERNEL); ++ if (status == NULL) { ++ kfree(cmd); ++ return -ENOMEM; ++ } ++ ++ cmd[0] = CINERGYT2_EP1_GET_TUNER_STATUS; ++ ++ ret = dvb_usb_generic_rw(state->d, cmd, 1, (u8 *)status, ++ sizeof(*status), 0); + if (ret < 0) { + err("cinergyt2_fe_read_unc_blocks() Failed! (Error=%d)\n", + ret); +- return ret; ++ goto out; + } +- *unc = le32_to_cpu(status.uncorrected_block_count); +- return 0; ++ *unc = le32_to_cpu(status->uncorrected_block_count); ++ ++out: ++ kfree(cmd); ++ kfree(status); ++ return ret; + } + + static int cinergyt2_fe_read_signal_strength(struct dvb_frontend *fe, + u16 *strength) + { + struct cinergyt2_fe_state *state = fe->demodulator_priv; +- struct dvbt_get_status_msg status; +- char cmd[] = { CINERGYT2_EP1_GET_TUNER_STATUS }; ++ struct dvbt_get_status_msg *status; ++ char *cmd; + int ret; + +- ret = dvb_usb_generic_rw(state->d, cmd, sizeof(cmd), (char *)&status, +- sizeof(status), 0); ++ cmd = kmalloc(1, GFP_KERNEL); ++ if (cmd == NULL) ++ return -ENOMEM; ++ status = kmalloc(sizeof(*status), GFP_KERNEL); ++ if (status == NULL) { ++ kfree(cmd); ++ return -ENOMEM; ++ } ++ ++ cmd[0] = CINERGYT2_EP1_GET_TUNER_STATUS; ++ ++ ret = dvb_usb_generic_rw(state->d, cmd, 1, (char *)status, ++ sizeof(*status), 0); + if (ret < 0) { + err("cinergyt2_fe_read_signal_strength() Failed!" + " (Error=%d)\n", ret); +- return ret; ++ goto out; + } +- *strength = (0xffff - le16_to_cpu(status.gain)); ++ *strength = (0xffff - le16_to_cpu(status->gain)); ++ ++out: ++ kfree(cmd); ++ kfree(status); + return 0; + } + + static int cinergyt2_fe_read_snr(struct dvb_frontend *fe, u16 *snr) + { + struct cinergyt2_fe_state *state = fe->demodulator_priv; +- struct dvbt_get_status_msg status; +- char cmd[] = { CINERGYT2_EP1_GET_TUNER_STATUS }; ++ struct dvbt_get_status_msg *status; ++ char *cmd; + int ret; + +- ret = dvb_usb_generic_rw(state->d, cmd, sizeof(cmd), (char *)&status, +- sizeof(status), 0); ++ cmd = kmalloc(1, GFP_KERNEL); ++ if (cmd == NULL) ++ return -ENOMEM; ++ status = kmalloc(sizeof(*status), GFP_KERNEL); ++ if (status == NULL) { ++ kfree(cmd); ++ return -ENOMEM; ++ } ++ ++ cmd[0] = CINERGYT2_EP1_GET_TUNER_STATUS; ++ ++ ret = dvb_usb_generic_rw(state->d, cmd, 1, (char *)status, ++ sizeof(*status), 0); + if (ret < 0) { + err("cinergyt2_fe_read_snr() Failed! (Error=%d)\n", ret); +- return ret; ++ goto out; + } +- *snr = (status.snr << 8) | status.snr; +- return 0; ++ *snr = (status->snr << 8) | status->snr; ++ ++out: ++ kfree(cmd); ++ kfree(status); ++ return ret; + } + + static int cinergyt2_fe_init(struct dvb_frontend *fe) +@@ -266,35 +339,46 @@ static int cinergyt2_fe_set_frontend(struct dvb_frontend *fe) + { + struct dtv_frontend_properties *fep = &fe->dtv_property_cache; + struct cinergyt2_fe_state *state = fe->demodulator_priv; +- struct dvbt_set_parameters_msg param; +- char result[2]; ++ struct dvbt_set_parameters_msg *param; ++ char *result; + int err; + +- param.cmd = CINERGYT2_EP1_SET_TUNER_PARAMETERS; +- param.tps = cpu_to_le16(compute_tps(fep)); +- param.freq = cpu_to_le32(fep->frequency / 1000); +- param.flags = 0; ++ result = kmalloc(2, GFP_KERNEL); ++ if (result == NULL) ++ return -ENOMEM; ++ param = kmalloc(sizeof(*param), GFP_KERNEL); ++ if (param == NULL) { ++ kfree(result); ++ return -ENOMEM; ++ } ++ ++ param->cmd = CINERGYT2_EP1_SET_TUNER_PARAMETERS; ++ param->tps = cpu_to_le16(compute_tps(fep)); ++ param->freq = cpu_to_le32(fep->frequency / 1000); ++ param->flags = 0; + + switch (fep->bandwidth_hz) { + default: + case 8000000: +- param.bandwidth = 8; ++ param->bandwidth = 8; + break; + case 7000000: +- param.bandwidth = 7; ++ param->bandwidth = 7; + break; + case 6000000: +- param.bandwidth = 6; ++ param->bandwidth = 6; + break; + } + + err = dvb_usb_generic_rw(state->d, +- (char *)¶m, sizeof(param), +- result, sizeof(result), 0); ++ (char *)param, sizeof(*param), ++ result, 2, 0); + if (err < 0) + err("cinergyt2_fe_set_frontend() Failed! err=%d\n", err); + +- return (err < 0) ? err : 0; ++ kfree(result); ++ kfree(param); ++ return err; + } + + static void cinergyt2_fe_release(struct dvb_frontend *fe) +diff --git a/drivers/media/usb/dvb-usb/cxusb.c b/drivers/media/usb/dvb-usb/cxusb.c +index a1c641e..3007da9 100644 +--- a/drivers/media/usb/dvb-usb/cxusb.c ++++ b/drivers/media/usb/dvb-usb/cxusb.c +@@ -1112,7 +1112,7 @@ static struct dib0070_config dib7070p_dib0070_config = { + + struct dib0700_adapter_state { + int (*set_param_save) (struct dvb_frontend *); +-}; ++} __no_const; + + static int dib7070_set_param_override(struct dvb_frontend *fe) + { +diff --git a/drivers/media/usb/dvb-usb/dvb-usb-firmware.c b/drivers/media/usb/dvb-usb/dvb-usb-firmware.c +index 733a7ff..f8b52e3 100644 +--- a/drivers/media/usb/dvb-usb/dvb-usb-firmware.c ++++ b/drivers/media/usb/dvb-usb/dvb-usb-firmware.c +@@ -35,42 +35,57 @@ static int usb_cypress_writemem(struct usb_device *udev,u16 addr,u8 *data, u8 le + + int usb_cypress_load_firmware(struct usb_device *udev, const struct firmware *fw, int type) + { +- struct hexline hx; +- u8 reset; ++ struct hexline *hx; ++ u8 *reset; + int ret,pos=0; + ++ reset = kmalloc(1, GFP_KERNEL); ++ if (reset == NULL) ++ return -ENOMEM; ++ ++ hx = kmalloc(sizeof(struct hexline), GFP_KERNEL); ++ if (hx == NULL) { ++ kfree(reset); ++ return -ENOMEM; ++ } ++ + /* stop the CPU */ +- reset = 1; +- if ((ret = usb_cypress_writemem(udev,cypress[type].cpu_cs_register,&reset,1)) != 1) ++ reset[0] = 1; ++ if ((ret = usb_cypress_writemem(udev,cypress[type].cpu_cs_register,reset,1)) != 1) + err("could not stop the USB controller CPU."); + +- while ((ret = dvb_usb_get_hexline(fw,&hx,&pos)) > 0) { +- deb_fw("writing to address 0x%04x (buffer: 0x%02x %02x)\n",hx.addr,hx.len,hx.chk); +- ret = usb_cypress_writemem(udev,hx.addr,hx.data,hx.len); ++ while ((ret = dvb_usb_get_hexline(fw,hx,&pos)) > 0) { ++ deb_fw("writing to address 0x%04x (buffer: 0x%02x %02x)\n",hx->addr,hx->len,hx->chk); ++ ret = usb_cypress_writemem(udev,hx->addr,hx->data,hx->len); + +- if (ret != hx.len) { ++ if (ret != hx->len) { + err("error while transferring firmware " + "(transferred size: %d, block size: %d)", +- ret,hx.len); ++ ret,hx->len); + ret = -EINVAL; + break; + } + } + if (ret < 0) { + err("firmware download failed at %d with %d",pos,ret); ++ kfree(reset); ++ kfree(hx); + return ret; + } + + if (ret == 0) { + /* restart the CPU */ +- reset = 0; +- if (ret || usb_cypress_writemem(udev,cypress[type].cpu_cs_register,&reset,1) != 1) { ++ reset[0] = 0; ++ if (ret || usb_cypress_writemem(udev,cypress[type].cpu_cs_register,reset,1) != 1) { + err("could not restart the USB controller CPU."); + ret = -EINVAL; + } + } else + ret = -EIO; + ++ kfree(reset); ++ kfree(hx); ++ + return ret; + } + EXPORT_SYMBOL(usb_cypress_load_firmware); +diff --git a/drivers/media/usb/dvb-usb/dw2102.c b/drivers/media/usb/dvb-usb/dw2102.c +index ae0f56a..ec71784 100644 +--- a/drivers/media/usb/dvb-usb/dw2102.c ++++ b/drivers/media/usb/dvb-usb/dw2102.c +@@ -118,7 +118,7 @@ struct su3000_state { + + struct s6x0_state { + int (*old_set_voltage)(struct dvb_frontend *f, fe_sec_voltage_t v); +-}; ++} __no_const; + + /* debug */ + static int dvb_usb_dw2102_debug; +diff --git a/drivers/media/usb/dvb-usb/technisat-usb2.c b/drivers/media/usb/dvb-usb/technisat-usb2.c +index 98d24ae..bc22415 100644 +--- a/drivers/media/usb/dvb-usb/technisat-usb2.c ++++ b/drivers/media/usb/dvb-usb/technisat-usb2.c +@@ -87,8 +87,11 @@ struct technisat_usb2_state { + static int technisat_usb2_i2c_access(struct usb_device *udev, + u8 device_addr, u8 *tx, u8 txlen, u8 *rx, u8 rxlen) + { +- u8 b[64]; +- int ret, actual_length; ++ u8 *b = kmalloc(64, GFP_KERNEL); ++ int ret, actual_length, error = 0; ++ ++ if (b == NULL) ++ return -ENOMEM; + + deb_i2c("i2c-access: %02x, tx: ", device_addr); + debug_dump(tx, txlen, deb_i2c); +@@ -121,7 +124,8 @@ static int technisat_usb2_i2c_access(struct usb_device *udev, + + if (ret < 0) { + err("i2c-error: out failed %02x = %d", device_addr, ret); +- return -ENODEV; ++ error = -ENODEV; ++ goto out; + } + + ret = usb_bulk_msg(udev, +@@ -129,7 +133,8 @@ static int technisat_usb2_i2c_access(struct usb_device *udev, + b, 64, &actual_length, 1000); + if (ret < 0) { + err("i2c-error: in failed %02x = %d", device_addr, ret); +- return -ENODEV; ++ error = -ENODEV; ++ goto out; + } + + if (b[0] != I2C_STATUS_OK) { +@@ -137,8 +142,10 @@ static int technisat_usb2_i2c_access(struct usb_device *udev, + /* handle tuner-i2c-nak */ + if (!(b[0] == I2C_STATUS_NAK && + device_addr == 0x60 +- /* && device_is_technisat_usb2 */)) +- return -ENODEV; ++ /* && device_is_technisat_usb2 */)) { ++ error = -ENODEV; ++ goto out; ++ } + } + + deb_i2c("status: %d, ", b[0]); +@@ -152,7 +159,9 @@ static int technisat_usb2_i2c_access(struct usb_device *udev, + + deb_i2c("\n"); + +- return 0; ++out: ++ kfree(b); ++ return error; + } + + static int technisat_usb2_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msg, +@@ -224,14 +233,16 @@ static int technisat_usb2_set_led(struct dvb_usb_device *d, int red, enum techni + { + int ret; + +- u8 led[8] = { +- red ? SET_RED_LED_VENDOR_REQUEST : SET_GREEN_LED_VENDOR_REQUEST, +- 0 +- }; ++ u8 *led = kzalloc(8, GFP_KERNEL); ++ ++ if (led == NULL) ++ return -ENOMEM; + + if (disable_led_control && state != LED_OFF) + return 0; + ++ led[0] = red ? SET_RED_LED_VENDOR_REQUEST : SET_GREEN_LED_VENDOR_REQUEST; ++ + switch (state) { + case LED_ON: + led[1] = 0x82; +@@ -263,16 +274,22 @@ static int technisat_usb2_set_led(struct dvb_usb_device *d, int red, enum techni + red ? SET_RED_LED_VENDOR_REQUEST : SET_GREEN_LED_VENDOR_REQUEST, + USB_TYPE_VENDOR | USB_DIR_OUT, + 0, 0, +- led, sizeof(led), 500); ++ led, 8, 500); + + mutex_unlock(&d->i2c_mutex); ++ ++ kfree(led); ++ + return ret; + } + + static int technisat_usb2_set_led_timer(struct dvb_usb_device *d, u8 red, u8 green) + { + int ret; +- u8 b = 0; ++ u8 *b = kzalloc(1, GFP_KERNEL); ++ ++ if (b == NULL) ++ return -ENOMEM; + + if (mutex_lock_interruptible(&d->i2c_mutex) < 0) + return -EAGAIN; +@@ -281,10 +298,12 @@ static int technisat_usb2_set_led_timer(struct dvb_usb_device *d, u8 red, u8 gre + SET_LED_TIMER_DIVIDER_VENDOR_REQUEST, + USB_TYPE_VENDOR | USB_DIR_OUT, + (red << 8) | green, 0, +- &b, 1, 500); ++ b, 1, 500); + + mutex_unlock(&d->i2c_mutex); + ++ kfree(b); ++ + return ret; + } + +@@ -328,7 +347,7 @@ static int technisat_usb2_identify_state(struct usb_device *udev, + struct dvb_usb_device_description **desc, int *cold) + { + int ret; +- u8 version[3]; ++ u8 *version = kmalloc(3, GFP_KERNEL); + + /* first select the interface */ + if (usb_set_interface(udev, 0, 1) != 0) +@@ -338,11 +357,14 @@ static int technisat_usb2_identify_state(struct usb_device *udev, + + *cold = 0; /* by default do not download a firmware - just in case something is wrong */ + ++ if (version == NULL) ++ return 0; ++ + ret = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0), + GET_VERSION_INFO_VENDOR_REQUEST, + USB_TYPE_VENDOR | USB_DIR_IN, + 0, 0, +- version, sizeof(version), 500); ++ version, 3, 500); + + if (ret < 0) + *cold = 1; +@@ -351,6 +373,8 @@ static int technisat_usb2_identify_state(struct usb_device *udev, + *cold = 0; + } + ++ kfree(version); ++ + return 0; + } + +@@ -591,10 +615,15 @@ static int technisat_usb2_frontend_attach(struct dvb_usb_adapter *a) + + static int technisat_usb2_get_ir(struct dvb_usb_device *d) + { +- u8 buf[62], *b; ++ u8 *buf, *b; + int ret; + struct ir_raw_event ev; + ++ buf = kmalloc(62, GFP_KERNEL); ++ ++ if (buf == NULL) ++ return -ENOMEM; ++ + buf[0] = GET_IR_DATA_VENDOR_REQUEST; + buf[1] = 0x08; + buf[2] = 0x8f; +@@ -617,16 +646,20 @@ static int technisat_usb2_get_ir(struct dvb_usb_device *d) + GET_IR_DATA_VENDOR_REQUEST, + USB_TYPE_VENDOR | USB_DIR_IN, + 0x8080, 0, +- buf, sizeof(buf), 500); ++ buf, 62, 500); + + unlock: + mutex_unlock(&d->i2c_mutex); + +- if (ret < 0) ++ if (ret < 0) { ++ kfree(buf); + return ret; ++ } + +- if (ret == 1) ++ if (ret == 1) { ++ kfree(buf); + return 0; /* no key pressed */ ++ } + + /* decoding */ + b = buf+1; +@@ -653,6 +686,8 @@ unlock: + + ir_raw_event_handle(d->rc_dev); + ++ kfree(buf); ++ + return 1; + } + +diff --git a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c +index fca336b..fb70ab7 100644 +--- a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c ++++ b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c +@@ -328,7 +328,7 @@ struct v4l2_buffer32 { + __u32 reserved; + }; + +-static int get_v4l2_plane32(struct v4l2_plane *up, struct v4l2_plane32 *up32, ++static int get_v4l2_plane32(struct v4l2_plane __user *up, struct v4l2_plane32 __user *up32, + enum v4l2_memory memory) + { + void __user *up_pln; +@@ -357,7 +357,7 @@ static int get_v4l2_plane32(struct v4l2_plane *up, struct v4l2_plane32 *up32, + return 0; + } + +-static int put_v4l2_plane32(struct v4l2_plane *up, struct v4l2_plane32 *up32, ++static int put_v4l2_plane32(struct v4l2_plane __user *up, struct v4l2_plane32 __user *up32, + enum v4l2_memory memory) + { + if (copy_in_user(up32, up, 2 * sizeof(__u32)) || +@@ -427,7 +427,7 @@ static int get_v4l2_buffer32(struct v4l2_buffer *kp, struct v4l2_buffer32 __user + * by passing a very big num_planes value */ + uplane = compat_alloc_user_space(num_planes * + sizeof(struct v4l2_plane)); +- kp->m.planes = uplane; ++ kp->m.planes = (struct v4l2_plane __force_kernel *)uplane; + + while (--num_planes >= 0) { + ret = get_v4l2_plane32(uplane, uplane32, kp->memory); +@@ -498,7 +498,7 @@ static int put_v4l2_buffer32(struct v4l2_buffer *kp, struct v4l2_buffer32 __user + if (num_planes == 0) + return 0; + +- uplane = kp->m.planes; ++ uplane = (struct v4l2_plane __force_user *)kp->m.planes; + if (get_user(p, &up->m.planes)) + return -EFAULT; + uplane32 = compat_ptr(p); +@@ -552,7 +552,7 @@ static int get_v4l2_framebuffer32(struct v4l2_framebuffer *kp, struct v4l2_frame + get_user(kp->capability, &up->capability) || + get_user(kp->flags, &up->flags)) + return -EFAULT; +- kp->base = compat_ptr(tmp); ++ kp->base = (void __force_kernel *)compat_ptr(tmp); + get_v4l2_pix_format(&kp->fmt, &up->fmt); + return 0; + } +@@ -658,7 +658,7 @@ static int get_v4l2_ext_controls32(struct v4l2_ext_controls *kp, struct v4l2_ext + n * sizeof(struct v4l2_ext_control32))) + return -EFAULT; + kcontrols = compat_alloc_user_space(n * sizeof(struct v4l2_ext_control)); +- kp->controls = kcontrols; ++ kp->controls = (struct v4l2_ext_control __force_kernel *)kcontrols; + while (--n >= 0) { + if (copy_in_user(kcontrols, ucontrols, sizeof(*ucontrols))) + return -EFAULT; +@@ -680,7 +680,7 @@ static int get_v4l2_ext_controls32(struct v4l2_ext_controls *kp, struct v4l2_ext + static int put_v4l2_ext_controls32(struct v4l2_ext_controls *kp, struct v4l2_ext_controls32 __user *up) + { + struct v4l2_ext_control32 __user *ucontrols; +- struct v4l2_ext_control __user *kcontrols = kp->controls; ++ struct v4l2_ext_control __user *kcontrols = (struct v4l2_ext_control __force_user *)kp->controls; + int n = kp->count; + compat_caddr_t p; + +@@ -774,7 +774,7 @@ static int put_v4l2_subdev_edid32(struct v4l2_subdev_edid *kp, struct v4l2_subde + put_user(kp->start_block, &up->start_block) || + put_user(kp->blocks, &up->blocks) || + put_user(tmp, &up->edid) || +- copy_to_user(kp->reserved, up->reserved, sizeof(kp->reserved))) ++ copy_to_user(up->reserved, kp->reserved, sizeof(kp->reserved))) + return -EFAULT; + return 0; + } +diff --git a/drivers/media/v4l2-core/v4l2-ctrls.c b/drivers/media/v4l2-core/v4l2-ctrls.c +index 6ff002b..6b9316b 100644 +--- a/drivers/media/v4l2-core/v4l2-ctrls.c ++++ b/drivers/media/v4l2-core/v4l2-ctrls.c +@@ -1401,8 +1401,8 @@ static int validate_new(const struct v4l2_ctrl *ctrl, + return 0; + + case V4L2_CTRL_TYPE_STRING: +- len = strlen(c->string); +- if (len < ctrl->minimum) ++ len = strlen_user(c->string); ++ if (!len || len < ctrl->minimum) + return -ERANGE; + if ((len - ctrl->minimum) % ctrl->step) + return -ERANGE; +diff --git a/drivers/media/v4l2-core/v4l2-device.c b/drivers/media/v4l2-core/v4l2-device.c +index 02d1b63..5fd6b16 100644 +--- a/drivers/media/v4l2-core/v4l2-device.c ++++ b/drivers/media/v4l2-core/v4l2-device.c +@@ -75,9 +75,9 @@ int v4l2_device_put(struct v4l2_device *v4l2_dev) + EXPORT_SYMBOL_GPL(v4l2_device_put); + + int v4l2_device_set_name(struct v4l2_device *v4l2_dev, const char *basename, +- atomic_t *instance) ++ atomic_unchecked_t *instance) + { +- int num = atomic_inc_return(instance) - 1; ++ int num = atomic_inc_return_unchecked(instance) - 1; + int len = strlen(basename); + + if (basename[len - 1] >= '0' && basename[len - 1] <= '9') +diff --git a/drivers/media/v4l2-core/v4l2-ioctl.c b/drivers/media/v4l2-core/v4l2-ioctl.c +index 707aef7..93b8ac0 100644 +--- a/drivers/media/v4l2-core/v4l2-ioctl.c ++++ b/drivers/media/v4l2-core/v4l2-ioctl.c +@@ -1942,7 +1942,8 @@ struct v4l2_ioctl_info { + struct file *file, void *fh, void *p); + } u; + void (*debug)(const void *arg, bool write_only); +-}; ++} __do_const; ++typedef struct v4l2_ioctl_info __no_const v4l2_ioctl_info_no_const; + + /* This control needs a priority check */ + #define INFO_FL_PRIO (1 << 0) +@@ -2123,7 +2124,7 @@ static long __video_do_ioctl(struct file *file, + struct video_device *vfd = video_devdata(file); + const struct v4l2_ioctl_ops *ops = vfd->ioctl_ops; + bool write_only = false; +- struct v4l2_ioctl_info default_info; ++ v4l2_ioctl_info_no_const default_info; + const struct v4l2_ioctl_info *info; + void *fh = file->private_data; + struct v4l2_fh *vfh = NULL; +@@ -2197,7 +2198,7 @@ done: + } + + static int check_array_args(unsigned int cmd, void *parg, size_t *array_size, +- void * __user *user_ptr, void ***kernel_ptr) ++ void __user **user_ptr, void ***kernel_ptr) + { + int ret = 0; + +@@ -2213,7 +2214,7 @@ static int check_array_args(unsigned int cmd, void *parg, size_t *array_size, + ret = -EINVAL; + break; + } +- *user_ptr = (void __user *)buf->m.planes; ++ *user_ptr = (void __force_user *)buf->m.planes; + *kernel_ptr = (void *)&buf->m.planes; + *array_size = sizeof(struct v4l2_plane) * buf->length; + ret = 1; +@@ -2248,7 +2249,7 @@ static int check_array_args(unsigned int cmd, void *parg, size_t *array_size, + ret = -EINVAL; + break; + } +- *user_ptr = (void __user *)ctrls->controls; ++ *user_ptr = (void __force_user *)ctrls->controls; + *kernel_ptr = (void *)&ctrls->controls; + *array_size = sizeof(struct v4l2_ext_control) + * ctrls->count; +@@ -2349,7 +2350,7 @@ video_usercopy(struct file *file, unsigned int cmd, unsigned long arg, + } + + if (has_array_args) { +- *kernel_ptr = user_ptr; ++ *kernel_ptr = (void __force_kernel *)user_ptr; + if (copy_to_user(user_ptr, mbuf, array_size)) + err = -EFAULT; + goto out_array_args; +diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c +index 570b18a..f880314 100644 +--- a/drivers/message/fusion/mptbase.c ++++ b/drivers/message/fusion/mptbase.c +@@ -6755,8 +6755,13 @@ static int mpt_iocinfo_proc_show(struct seq_file *m, void *v) + seq_printf(m, " MaxChainDepth = 0x%02x frames\n", ioc->facts.MaxChainDepth); + seq_printf(m, " MinBlockSize = 0x%02x bytes\n", 4*ioc->facts.BlockSize); + ++#ifdef CONFIG_GRKERNSEC_HIDESYM ++ seq_printf(m, " RequestFrames @ 0x%p (Dma @ 0x%p)\n", NULL, NULL); ++#else + seq_printf(m, " RequestFrames @ 0x%p (Dma @ 0x%p)\n", + (void *)ioc->req_frames, (void *)(ulong)ioc->req_frames_dma); ++#endif ++ + /* + * Rounding UP to nearest 4-kB boundary here... + */ +@@ -6769,7 +6774,11 @@ static int mpt_iocinfo_proc_show(struct seq_file *m, void *v) + ioc->facts.GlobalCredits); + + seq_printf(m, " Frames @ 0x%p (Dma @ 0x%p)\n", ++#ifdef CONFIG_GRKERNSEC_HIDESYM ++ NULL, NULL); ++#else + (void *)ioc->alloc, (void *)(ulong)ioc->alloc_dma); ++#endif + sz = (ioc->reply_sz * ioc->reply_depth) + 128; + seq_printf(m, " {CurRepSz=%d} x {CurRepDepth=%d} = %d bytes ^= 0x%x\n", + ioc->reply_sz, ioc->reply_depth, ioc->reply_sz*ioc->reply_depth, sz); +diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c +index 00d339c..2ea899d 100644 +--- a/drivers/message/fusion/mptsas.c ++++ b/drivers/message/fusion/mptsas.c +@@ -446,6 +446,23 @@ mptsas_is_end_device(struct mptsas_devinfo * attached) + return 0; + } + ++static inline void ++mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy) ++{ ++ if (phy_info->port_details) { ++ phy_info->port_details->rphy = rphy; ++ dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n", ++ ioc->name, rphy)); ++ } ++ ++ if (rphy) { ++ dsaswideprintk(ioc, dev_printk(KERN_DEBUG, ++ &rphy->dev, MYIOC_s_FMT "add:", ioc->name)); ++ dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n", ++ ioc->name, rphy, rphy->dev.release)); ++ } ++} ++ + /* no mutex */ + static void + mptsas_port_delete(MPT_ADAPTER *ioc, struct mptsas_portinfo_details * port_details) +@@ -484,23 +501,6 @@ mptsas_get_rphy(struct mptsas_phyinfo *phy_info) + return NULL; + } + +-static inline void +-mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy) +-{ +- if (phy_info->port_details) { +- phy_info->port_details->rphy = rphy; +- dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n", +- ioc->name, rphy)); +- } +- +- if (rphy) { +- dsaswideprintk(ioc, dev_printk(KERN_DEBUG, +- &rphy->dev, MYIOC_s_FMT "add:", ioc->name)); +- dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n", +- ioc->name, rphy, rphy->dev.release)); +- } +-} +- + static inline struct sas_port * + mptsas_get_port(struct mptsas_phyinfo *phy_info) + { +diff --git a/drivers/message/fusion/mptscsih.c b/drivers/message/fusion/mptscsih.c +index 727819c..ad74694 100644 +--- a/drivers/message/fusion/mptscsih.c ++++ b/drivers/message/fusion/mptscsih.c +@@ -1271,15 +1271,16 @@ mptscsih_info(struct Scsi_Host *SChost) + + h = shost_priv(SChost); + +- if (h) { +- if (h->info_kbuf == NULL) +- if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL) +- return h->info_kbuf; +- h->info_kbuf[0] = '\0'; ++ if (!h) ++ return NULL; + +- mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0); +- h->info_kbuf[size-1] = '\0'; +- } ++ if (h->info_kbuf == NULL) ++ if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL) ++ return h->info_kbuf; ++ h->info_kbuf[0] = '\0'; ++ ++ mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0); ++ h->info_kbuf[size-1] = '\0'; + + return h->info_kbuf; + } +diff --git a/drivers/message/i2o/i2o_proc.c b/drivers/message/i2o/i2o_proc.c +index b7d87cd..3fb36da 100644 +--- a/drivers/message/i2o/i2o_proc.c ++++ b/drivers/message/i2o/i2o_proc.c +@@ -255,12 +255,6 @@ static char *scsi_devices[] = { + "Array Controller Device" + }; + +-static char *chtostr(char *tmp, u8 *chars, int n) +-{ +- tmp[0] = 0; +- return strncat(tmp, (char *)chars, n); +-} +- + static int i2o_report_query_status(struct seq_file *seq, int block_status, + char *group) + { +@@ -707,9 +701,9 @@ static int i2o_seq_show_status(struct seq_file *seq, void *v) + static int i2o_seq_show_hw(struct seq_file *seq, void *v) + { + struct i2o_controller *c = (struct i2o_controller *)seq->private; +- static u32 work32[5]; +- static u8 *work8 = (u8 *) work32; +- static u16 *work16 = (u16 *) work32; ++ u32 work32[5]; ++ u8 *work8 = (u8 *) work32; ++ u16 *work16 = (u16 *) work32; + int token; + u32 hwcap; + +@@ -790,7 +784,6 @@ static int i2o_seq_show_ddm_table(struct seq_file *seq, void *v) + } *result; + + i2o_exec_execute_ddm_table ddm_table; +- char tmp[28 + 1]; + + result = kmalloc(sizeof(*result), GFP_KERNEL); + if (!result) +@@ -825,8 +818,7 @@ static int i2o_seq_show_ddm_table(struct seq_file *seq, void *v) + + seq_printf(seq, "%-#7x", ddm_table.i2o_vendor_id); + seq_printf(seq, "%-#8x", ddm_table.module_id); +- seq_printf(seq, "%-29s", +- chtostr(tmp, ddm_table.module_name_version, 28)); ++ seq_printf(seq, "%-.28s", ddm_table.module_name_version); + seq_printf(seq, "%9d ", ddm_table.data_size); + seq_printf(seq, "%8d", ddm_table.code_size); + +@@ -893,7 +885,6 @@ static int i2o_seq_show_drivers_stored(struct seq_file *seq, void *v) + + i2o_driver_result_table *result; + i2o_driver_store_table *dst; +- char tmp[28 + 1]; + + result = kmalloc(sizeof(i2o_driver_result_table), GFP_KERNEL); + if (result == NULL) +@@ -928,9 +919,8 @@ static int i2o_seq_show_drivers_stored(struct seq_file *seq, void *v) + + seq_printf(seq, "%-#7x", dst->i2o_vendor_id); + seq_printf(seq, "%-#8x", dst->module_id); +- seq_printf(seq, "%-29s", +- chtostr(tmp, dst->module_name_version, 28)); +- seq_printf(seq, "%-9s", chtostr(tmp, dst->date, 8)); ++ seq_printf(seq, "%-.28s", dst->module_name_version); ++ seq_printf(seq, "%-.8s", dst->date); + seq_printf(seq, "%8d ", dst->module_size); + seq_printf(seq, "%8d ", dst->mpb_size); + seq_printf(seq, "0x%04x", dst->module_flags); +@@ -1246,11 +1236,10 @@ static int i2o_seq_show_authorized_users(struct seq_file *seq, void *v) + static int i2o_seq_show_dev_identity(struct seq_file *seq, void *v) + { + struct i2o_device *d = (struct i2o_device *)seq->private; +- static u32 work32[128]; // allow for "stuff" + up to 256 byte (max) serial number ++ u32 work32[128]; // allow for "stuff" + up to 256 byte (max) serial number + // == (allow) 512d bytes (max) +- static u16 *work16 = (u16 *) work32; ++ u16 *work16 = (u16 *) work32; + int token; +- char tmp[16 + 1]; + + token = i2o_parm_field_get(d, 0xF100, -1, &work32, sizeof(work32)); + +@@ -1262,14 +1251,10 @@ static int i2o_seq_show_dev_identity(struct seq_file *seq, void *v) + seq_printf(seq, "Device Class : %s\n", i2o_get_class_name(work16[0])); + seq_printf(seq, "Owner TID : %0#5x\n", work16[2]); + seq_printf(seq, "Parent TID : %0#5x\n", work16[3]); +- seq_printf(seq, "Vendor info : %s\n", +- chtostr(tmp, (u8 *) (work32 + 2), 16)); +- seq_printf(seq, "Product info : %s\n", +- chtostr(tmp, (u8 *) (work32 + 6), 16)); +- seq_printf(seq, "Description : %s\n", +- chtostr(tmp, (u8 *) (work32 + 10), 16)); +- seq_printf(seq, "Product rev. : %s\n", +- chtostr(tmp, (u8 *) (work32 + 14), 8)); ++ seq_printf(seq, "Vendor info : %.16s\n", (u8 *) (work32 + 2)); ++ seq_printf(seq, "Product info : %.16s\n", (u8 *) (work32 + 6)); ++ seq_printf(seq, "Description : %.16s\n", (u8 *) (work32 + 10)); ++ seq_printf(seq, "Product rev. : %.8s\n", (u8 *) (work32 + 14)); + + seq_printf(seq, "Serial number : "); + print_serial_number(seq, (u8 *) (work32 + 16), +@@ -1306,8 +1291,6 @@ static int i2o_seq_show_ddm_identity(struct seq_file *seq, void *v) + u8 pad[256]; // allow up to 256 byte (max) serial number + } result; + +- char tmp[24 + 1]; +- + token = i2o_parm_field_get(d, 0xF101, -1, &result, sizeof(result)); + + if (token < 0) { +@@ -1316,10 +1299,8 @@ static int i2o_seq_show_ddm_identity(struct seq_file *seq, void *v) + } + + seq_printf(seq, "Registering DDM TID : 0x%03x\n", result.ddm_tid); +- seq_printf(seq, "Module name : %s\n", +- chtostr(tmp, result.module_name, 24)); +- seq_printf(seq, "Module revision : %s\n", +- chtostr(tmp, result.module_rev, 8)); ++ seq_printf(seq, "Module name : %.24s\n", result.module_name); ++ seq_printf(seq, "Module revision : %.8s\n", result.module_rev); + + seq_printf(seq, "Serial number : "); + print_serial_number(seq, result.serial_number, sizeof(result) - 36); +@@ -1343,8 +1324,6 @@ static int i2o_seq_show_uinfo(struct seq_file *seq, void *v) + u8 instance_number[4]; + } result; + +- char tmp[64 + 1]; +- + token = i2o_parm_field_get(d, 0xF102, -1, &result, sizeof(result)); + + if (token < 0) { +@@ -1352,14 +1331,10 @@ static int i2o_seq_show_uinfo(struct seq_file *seq, void *v) + return 0; + } + +- seq_printf(seq, "Device name : %s\n", +- chtostr(tmp, result.device_name, 64)); +- seq_printf(seq, "Service name : %s\n", +- chtostr(tmp, result.service_name, 64)); +- seq_printf(seq, "Physical name : %s\n", +- chtostr(tmp, result.physical_location, 64)); +- seq_printf(seq, "Instance number : %s\n", +- chtostr(tmp, result.instance_number, 4)); ++ seq_printf(seq, "Device name : %.64s\n", result.device_name); ++ seq_printf(seq, "Service name : %.64s\n", result.service_name); ++ seq_printf(seq, "Physical name : %.64s\n", result.physical_location); ++ seq_printf(seq, "Instance number : %.4s\n", result.instance_number); + + return 0; + } +@@ -1368,9 +1343,9 @@ static int i2o_seq_show_uinfo(struct seq_file *seq, void *v) + static int i2o_seq_show_sgl_limits(struct seq_file *seq, void *v) + { + struct i2o_device *d = (struct i2o_device *)seq->private; +- static u32 work32[12]; +- static u16 *work16 = (u16 *) work32; +- static u8 *work8 = (u8 *) work32; ++ u32 work32[12]; ++ u16 *work16 = (u16 *) work32; ++ u8 *work8 = (u8 *) work32; + int token; + + token = i2o_parm_field_get(d, 0xF103, -1, &work32, sizeof(work32)); +diff --git a/drivers/message/i2o/iop.c b/drivers/message/i2o/iop.c +index a8c08f3..155fe3d 100644 +--- a/drivers/message/i2o/iop.c ++++ b/drivers/message/i2o/iop.c +@@ -111,10 +111,10 @@ u32 i2o_cntxt_list_add(struct i2o_controller * c, void *ptr) + + spin_lock_irqsave(&c->context_list_lock, flags); + +- if (unlikely(atomic_inc_and_test(&c->context_list_counter))) +- atomic_inc(&c->context_list_counter); ++ if (unlikely(atomic_inc_and_test_unchecked(&c->context_list_counter))) ++ atomic_inc_unchecked(&c->context_list_counter); + +- entry->context = atomic_read(&c->context_list_counter); ++ entry->context = atomic_read_unchecked(&c->context_list_counter); + + list_add(&entry->list, &c->context_list); + +@@ -1077,7 +1077,7 @@ struct i2o_controller *i2o_iop_alloc(void) + + #if BITS_PER_LONG == 64 + spin_lock_init(&c->context_list_lock); +- atomic_set(&c->context_list_counter, 0); ++ atomic_set_unchecked(&c->context_list_counter, 0); + INIT_LIST_HEAD(&c->context_list); + #endif + +diff --git a/drivers/mfd/ab8500-debugfs.c b/drivers/mfd/ab8500-debugfs.c +index d1a22aa..d0f7bf7 100644 +--- a/drivers/mfd/ab8500-debugfs.c ++++ b/drivers/mfd/ab8500-debugfs.c +@@ -100,7 +100,7 @@ static int irq_last; + static u32 *irq_count; + static int num_irqs; + +-static struct device_attribute **dev_attr; ++static device_attribute_no_const **dev_attr; + static char **event_name; + + static u8 avg_sample = SAMPLE_16; +diff --git a/drivers/mfd/max8925-i2c.c b/drivers/mfd/max8925-i2c.c +index a83eed5..62a58a9 100644 +--- a/drivers/mfd/max8925-i2c.c ++++ b/drivers/mfd/max8925-i2c.c +@@ -152,7 +152,7 @@ static int max8925_probe(struct i2c_client *client, + const struct i2c_device_id *id) + { + struct max8925_platform_data *pdata = dev_get_platdata(&client->dev); +- static struct max8925_chip *chip; ++ struct max8925_chip *chip; + struct device_node *node = client->dev.of_node; + + if (node && !pdata) { +diff --git a/drivers/mfd/tps65910.c b/drivers/mfd/tps65910.c +index d657331..0d9a80f 100644 +--- a/drivers/mfd/tps65910.c ++++ b/drivers/mfd/tps65910.c +@@ -230,7 +230,7 @@ static int tps65910_irq_init(struct tps65910 *tps65910, int irq, + struct tps65910_platform_data *pdata) + { + int ret = 0; +- static struct regmap_irq_chip *tps6591x_irqs_chip; ++ struct regmap_irq_chip *tps6591x_irqs_chip; + + if (!irq) { + dev_warn(tps65910->dev, "No interrupt support, no core IRQ\n"); +diff --git a/drivers/mfd/twl4030-irq.c b/drivers/mfd/twl4030-irq.c +index 9aa6d1e..1631bfc 100644 +--- a/drivers/mfd/twl4030-irq.c ++++ b/drivers/mfd/twl4030-irq.c +@@ -35,6 +35,7 @@ + #include <linux/of.h> + #include <linux/irqdomain.h> + #include <linux/i2c/twl.h> ++#include <asm/pgtable.h> + + #include "twl-core.h" + +@@ -726,10 +727,12 @@ int twl4030_init_irq(struct device *dev, int irq_num) + * Install an irq handler for each of the SIH modules; + * clone dummy irq_chip since PIH can't *do* anything + */ +- twl4030_irq_chip = dummy_irq_chip; +- twl4030_irq_chip.name = "twl4030"; ++ pax_open_kernel(); ++ memcpy((void *)&twl4030_irq_chip, &dummy_irq_chip, sizeof twl4030_irq_chip); ++ *(const char **)&twl4030_irq_chip.name = "twl4030"; + +- twl4030_sih_irq_chip.irq_ack = dummy_irq_chip.irq_ack; ++ *(void **)&twl4030_sih_irq_chip.irq_ack = dummy_irq_chip.irq_ack; ++ pax_close_kernel(); + + for (i = irq_base; i < irq_end; i++) { + irq_set_chip_and_handler(i, &twl4030_irq_chip, +diff --git a/drivers/misc/c2port/core.c b/drivers/misc/c2port/core.c +index 464419b..64bae8d 100644 +--- a/drivers/misc/c2port/core.c ++++ b/drivers/misc/c2port/core.c +@@ -922,7 +922,9 @@ struct c2port_device *c2port_device_register(char *name, + goto error_idr_alloc; + c2dev->id = ret; + +- bin_attr_flash_data.size = ops->blocks_num * ops->block_size; ++ pax_open_kernel(); ++ *(size_t *)&bin_attr_flash_data.size = ops->blocks_num * ops->block_size; ++ pax_close_kernel(); + + c2dev->dev = device_create(c2port_class, NULL, 0, c2dev, + "c2port%d", c2dev->id); +diff --git a/drivers/misc/eeprom/sunxi_sid.c b/drivers/misc/eeprom/sunxi_sid.c +index 9c34e57..b981cda 100644 +--- a/drivers/misc/eeprom/sunxi_sid.c ++++ b/drivers/misc/eeprom/sunxi_sid.c +@@ -127,7 +127,9 @@ static int sunxi_sid_probe(struct platform_device *pdev) + + platform_set_drvdata(pdev, sid_data); + +- sid_bin_attr.size = sid_data->keysize; ++ pax_open_kernel(); ++ *(size_t *)&sid_bin_attr.size = sid_data->keysize; ++ pax_close_kernel(); + if (device_create_bin_file(&pdev->dev, &sid_bin_attr)) + return -ENODEV; + +diff --git a/drivers/misc/kgdbts.c b/drivers/misc/kgdbts.c +index 36f5d52..32311c3 100644 +--- a/drivers/misc/kgdbts.c ++++ b/drivers/misc/kgdbts.c +@@ -834,7 +834,7 @@ static void run_plant_and_detach_test(int is_early) + char before[BREAK_INSTR_SIZE]; + char after[BREAK_INSTR_SIZE]; + +- probe_kernel_read(before, (char *)kgdbts_break_test, ++ probe_kernel_read(before, ktla_ktva((char *)kgdbts_break_test), + BREAK_INSTR_SIZE); + init_simple_test(); + ts.tst = plant_and_detach_test; +@@ -842,7 +842,7 @@ static void run_plant_and_detach_test(int is_early) + /* Activate test with initial breakpoint */ + if (!is_early) + kgdb_breakpoint(); +- probe_kernel_read(after, (char *)kgdbts_break_test, ++ probe_kernel_read(after, ktla_ktva((char *)kgdbts_break_test), + BREAK_INSTR_SIZE); + if (memcmp(before, after, BREAK_INSTR_SIZE)) { + printk(KERN_CRIT "kgdbts: ERROR kgdb corrupted memory\n"); +diff --git a/drivers/misc/lis3lv02d/lis3lv02d.c b/drivers/misc/lis3lv02d/lis3lv02d.c +index 036effe..b3a6336 100644 +--- a/drivers/misc/lis3lv02d/lis3lv02d.c ++++ b/drivers/misc/lis3lv02d/lis3lv02d.c +@@ -498,7 +498,7 @@ static irqreturn_t lis302dl_interrupt(int irq, void *data) + * the lid is closed. This leads to interrupts as soon as a little move + * is done. + */ +- atomic_inc(&lis3->count); ++ atomic_inc_unchecked(&lis3->count); + + wake_up_interruptible(&lis3->misc_wait); + kill_fasync(&lis3->async_queue, SIGIO, POLL_IN); +@@ -584,7 +584,7 @@ static int lis3lv02d_misc_open(struct inode *inode, struct file *file) + if (lis3->pm_dev) + pm_runtime_get_sync(lis3->pm_dev); + +- atomic_set(&lis3->count, 0); ++ atomic_set_unchecked(&lis3->count, 0); + return 0; + } + +@@ -616,7 +616,7 @@ static ssize_t lis3lv02d_misc_read(struct file *file, char __user *buf, + add_wait_queue(&lis3->misc_wait, &wait); + while (true) { + set_current_state(TASK_INTERRUPTIBLE); +- data = atomic_xchg(&lis3->count, 0); ++ data = atomic_xchg_unchecked(&lis3->count, 0); + if (data) + break; + +@@ -657,7 +657,7 @@ static unsigned int lis3lv02d_misc_poll(struct file *file, poll_table *wait) + struct lis3lv02d, miscdev); + + poll_wait(file, &lis3->misc_wait, wait); +- if (atomic_read(&lis3->count)) ++ if (atomic_read_unchecked(&lis3->count)) + return POLLIN | POLLRDNORM; + return 0; + } +diff --git a/drivers/misc/lis3lv02d/lis3lv02d.h b/drivers/misc/lis3lv02d/lis3lv02d.h +index c439c82..1f20f57 100644 +--- a/drivers/misc/lis3lv02d/lis3lv02d.h ++++ b/drivers/misc/lis3lv02d/lis3lv02d.h +@@ -297,7 +297,7 @@ struct lis3lv02d { + struct input_polled_dev *idev; /* input device */ + struct platform_device *pdev; /* platform device */ + struct regulator_bulk_data regulators[2]; +- atomic_t count; /* interrupt count after last read */ ++ atomic_unchecked_t count; /* interrupt count after last read */ + union axis_conversion ac; /* hw -> logical axis */ + int mapped_btns[3]; + +diff --git a/drivers/misc/sgi-gru/gruhandles.c b/drivers/misc/sgi-gru/gruhandles.c +index 2f30bad..c4c13d0 100644 +--- a/drivers/misc/sgi-gru/gruhandles.c ++++ b/drivers/misc/sgi-gru/gruhandles.c +@@ -44,8 +44,8 @@ static void update_mcs_stats(enum mcs_op op, unsigned long clks) + unsigned long nsec; + + nsec = CLKS2NSEC(clks); +- atomic_long_inc(&mcs_op_statistics[op].count); +- atomic_long_add(nsec, &mcs_op_statistics[op].total); ++ atomic_long_inc_unchecked(&mcs_op_statistics[op].count); ++ atomic_long_add_unchecked(nsec, &mcs_op_statistics[op].total); + if (mcs_op_statistics[op].max < nsec) + mcs_op_statistics[op].max = nsec; + } +diff --git a/drivers/misc/sgi-gru/gruprocfs.c b/drivers/misc/sgi-gru/gruprocfs.c +index 4f76359..cdfcb2e 100644 +--- a/drivers/misc/sgi-gru/gruprocfs.c ++++ b/drivers/misc/sgi-gru/gruprocfs.c +@@ -32,9 +32,9 @@ + + #define printstat(s, f) printstat_val(s, &gru_stats.f, #f) + +-static void printstat_val(struct seq_file *s, atomic_long_t *v, char *id) ++static void printstat_val(struct seq_file *s, atomic_long_unchecked_t *v, char *id) + { +- unsigned long val = atomic_long_read(v); ++ unsigned long val = atomic_long_read_unchecked(v); + + seq_printf(s, "%16lu %s\n", val, id); + } +@@ -134,8 +134,8 @@ static int mcs_statistics_show(struct seq_file *s, void *p) + + seq_printf(s, "%-20s%12s%12s%12s\n", "#id", "count", "aver-clks", "max-clks"); + for (op = 0; op < mcsop_last; op++) { +- count = atomic_long_read(&mcs_op_statistics[op].count); +- total = atomic_long_read(&mcs_op_statistics[op].total); ++ count = atomic_long_read_unchecked(&mcs_op_statistics[op].count); ++ total = atomic_long_read_unchecked(&mcs_op_statistics[op].total); + max = mcs_op_statistics[op].max; + seq_printf(s, "%-20s%12ld%12ld%12ld\n", id[op], count, + count ? total / count : 0, max); +diff --git a/drivers/misc/sgi-gru/grutables.h b/drivers/misc/sgi-gru/grutables.h +index 5c3ce24..4915ccb 100644 +--- a/drivers/misc/sgi-gru/grutables.h ++++ b/drivers/misc/sgi-gru/grutables.h +@@ -167,82 +167,82 @@ extern unsigned int gru_max_gids; + * GRU statistics. + */ + struct gru_stats_s { +- atomic_long_t vdata_alloc; +- atomic_long_t vdata_free; +- atomic_long_t gts_alloc; +- atomic_long_t gts_free; +- atomic_long_t gms_alloc; +- atomic_long_t gms_free; +- atomic_long_t gts_double_allocate; +- atomic_long_t assign_context; +- atomic_long_t assign_context_failed; +- atomic_long_t free_context; +- atomic_long_t load_user_context; +- atomic_long_t load_kernel_context; +- atomic_long_t lock_kernel_context; +- atomic_long_t unlock_kernel_context; +- atomic_long_t steal_user_context; +- atomic_long_t steal_kernel_context; +- atomic_long_t steal_context_failed; +- atomic_long_t nopfn; +- atomic_long_t asid_new; +- atomic_long_t asid_next; +- atomic_long_t asid_wrap; +- atomic_long_t asid_reuse; +- atomic_long_t intr; +- atomic_long_t intr_cbr; +- atomic_long_t intr_tfh; +- atomic_long_t intr_spurious; +- atomic_long_t intr_mm_lock_failed; +- atomic_long_t call_os; +- atomic_long_t call_os_wait_queue; +- atomic_long_t user_flush_tlb; +- atomic_long_t user_unload_context; +- atomic_long_t user_exception; +- atomic_long_t set_context_option; +- atomic_long_t check_context_retarget_intr; +- atomic_long_t check_context_unload; +- atomic_long_t tlb_dropin; +- atomic_long_t tlb_preload_page; +- atomic_long_t tlb_dropin_fail_no_asid; +- atomic_long_t tlb_dropin_fail_upm; +- atomic_long_t tlb_dropin_fail_invalid; +- atomic_long_t tlb_dropin_fail_range_active; +- atomic_long_t tlb_dropin_fail_idle; +- atomic_long_t tlb_dropin_fail_fmm; +- atomic_long_t tlb_dropin_fail_no_exception; +- atomic_long_t tfh_stale_on_fault; +- atomic_long_t mmu_invalidate_range; +- atomic_long_t mmu_invalidate_page; +- atomic_long_t flush_tlb; +- atomic_long_t flush_tlb_gru; +- atomic_long_t flush_tlb_gru_tgh; +- atomic_long_t flush_tlb_gru_zero_asid; ++ atomic_long_unchecked_t vdata_alloc; ++ atomic_long_unchecked_t vdata_free; ++ atomic_long_unchecked_t gts_alloc; ++ atomic_long_unchecked_t gts_free; ++ atomic_long_unchecked_t gms_alloc; ++ atomic_long_unchecked_t gms_free; ++ atomic_long_unchecked_t gts_double_allocate; ++ atomic_long_unchecked_t assign_context; ++ atomic_long_unchecked_t assign_context_failed; ++ atomic_long_unchecked_t free_context; ++ atomic_long_unchecked_t load_user_context; ++ atomic_long_unchecked_t load_kernel_context; ++ atomic_long_unchecked_t lock_kernel_context; ++ atomic_long_unchecked_t unlock_kernel_context; ++ atomic_long_unchecked_t steal_user_context; ++ atomic_long_unchecked_t steal_kernel_context; ++ atomic_long_unchecked_t steal_context_failed; ++ atomic_long_unchecked_t nopfn; ++ atomic_long_unchecked_t asid_new; ++ atomic_long_unchecked_t asid_next; ++ atomic_long_unchecked_t asid_wrap; ++ atomic_long_unchecked_t asid_reuse; ++ atomic_long_unchecked_t intr; ++ atomic_long_unchecked_t intr_cbr; ++ atomic_long_unchecked_t intr_tfh; ++ atomic_long_unchecked_t intr_spurious; ++ atomic_long_unchecked_t intr_mm_lock_failed; ++ atomic_long_unchecked_t call_os; ++ atomic_long_unchecked_t call_os_wait_queue; ++ atomic_long_unchecked_t user_flush_tlb; ++ atomic_long_unchecked_t user_unload_context; ++ atomic_long_unchecked_t user_exception; ++ atomic_long_unchecked_t set_context_option; ++ atomic_long_unchecked_t check_context_retarget_intr; ++ atomic_long_unchecked_t check_context_unload; ++ atomic_long_unchecked_t tlb_dropin; ++ atomic_long_unchecked_t tlb_preload_page; ++ atomic_long_unchecked_t tlb_dropin_fail_no_asid; ++ atomic_long_unchecked_t tlb_dropin_fail_upm; ++ atomic_long_unchecked_t tlb_dropin_fail_invalid; ++ atomic_long_unchecked_t tlb_dropin_fail_range_active; ++ atomic_long_unchecked_t tlb_dropin_fail_idle; ++ atomic_long_unchecked_t tlb_dropin_fail_fmm; ++ atomic_long_unchecked_t tlb_dropin_fail_no_exception; ++ atomic_long_unchecked_t tfh_stale_on_fault; ++ atomic_long_unchecked_t mmu_invalidate_range; ++ atomic_long_unchecked_t mmu_invalidate_page; ++ atomic_long_unchecked_t flush_tlb; ++ atomic_long_unchecked_t flush_tlb_gru; ++ atomic_long_unchecked_t flush_tlb_gru_tgh; ++ atomic_long_unchecked_t flush_tlb_gru_zero_asid; + +- atomic_long_t copy_gpa; +- atomic_long_t read_gpa; ++ atomic_long_unchecked_t copy_gpa; ++ atomic_long_unchecked_t read_gpa; + +- atomic_long_t mesq_receive; +- atomic_long_t mesq_receive_none; +- atomic_long_t mesq_send; +- atomic_long_t mesq_send_failed; +- atomic_long_t mesq_noop; +- atomic_long_t mesq_send_unexpected_error; +- atomic_long_t mesq_send_lb_overflow; +- atomic_long_t mesq_send_qlimit_reached; +- atomic_long_t mesq_send_amo_nacked; +- atomic_long_t mesq_send_put_nacked; +- atomic_long_t mesq_page_overflow; +- atomic_long_t mesq_qf_locked; +- atomic_long_t mesq_qf_noop_not_full; +- atomic_long_t mesq_qf_switch_head_failed; +- atomic_long_t mesq_qf_unexpected_error; +- atomic_long_t mesq_noop_unexpected_error; +- atomic_long_t mesq_noop_lb_overflow; +- atomic_long_t mesq_noop_qlimit_reached; +- atomic_long_t mesq_noop_amo_nacked; +- atomic_long_t mesq_noop_put_nacked; +- atomic_long_t mesq_noop_page_overflow; ++ atomic_long_unchecked_t mesq_receive; ++ atomic_long_unchecked_t mesq_receive_none; ++ atomic_long_unchecked_t mesq_send; ++ atomic_long_unchecked_t mesq_send_failed; ++ atomic_long_unchecked_t mesq_noop; ++ atomic_long_unchecked_t mesq_send_unexpected_error; ++ atomic_long_unchecked_t mesq_send_lb_overflow; ++ atomic_long_unchecked_t mesq_send_qlimit_reached; ++ atomic_long_unchecked_t mesq_send_amo_nacked; ++ atomic_long_unchecked_t mesq_send_put_nacked; ++ atomic_long_unchecked_t mesq_page_overflow; ++ atomic_long_unchecked_t mesq_qf_locked; ++ atomic_long_unchecked_t mesq_qf_noop_not_full; ++ atomic_long_unchecked_t mesq_qf_switch_head_failed; ++ atomic_long_unchecked_t mesq_qf_unexpected_error; ++ atomic_long_unchecked_t mesq_noop_unexpected_error; ++ atomic_long_unchecked_t mesq_noop_lb_overflow; ++ atomic_long_unchecked_t mesq_noop_qlimit_reached; ++ atomic_long_unchecked_t mesq_noop_amo_nacked; ++ atomic_long_unchecked_t mesq_noop_put_nacked; ++ atomic_long_unchecked_t mesq_noop_page_overflow; + + }; + +@@ -251,8 +251,8 @@ enum mcs_op {cchop_allocate, cchop_start, cchop_interrupt, cchop_interrupt_sync, + tghop_invalidate, mcsop_last}; + + struct mcs_op_statistic { +- atomic_long_t count; +- atomic_long_t total; ++ atomic_long_unchecked_t count; ++ atomic_long_unchecked_t total; + unsigned long max; + }; + +@@ -275,7 +275,7 @@ extern struct mcs_op_statistic mcs_op_statistics[mcsop_last]; + + #define STAT(id) do { \ + if (gru_options & OPT_STATS) \ +- atomic_long_inc(&gru_stats.id); \ ++ atomic_long_inc_unchecked(&gru_stats.id); \ + } while (0) + + #ifdef CONFIG_SGI_GRU_DEBUG +diff --git a/drivers/misc/sgi-xp/xp.h b/drivers/misc/sgi-xp/xp.h +index c862cd4..0d176fe 100644 +--- a/drivers/misc/sgi-xp/xp.h ++++ b/drivers/misc/sgi-xp/xp.h +@@ -288,7 +288,7 @@ struct xpc_interface { + xpc_notify_func, void *); + void (*received) (short, int, void *); + enum xp_retval (*partid_to_nasids) (short, void *); +-}; ++} __no_const; + + extern struct xpc_interface xpc_interface; + +diff --git a/drivers/misc/sgi-xp/xp_main.c b/drivers/misc/sgi-xp/xp_main.c +index 01be66d..e3a0c7e 100644 +--- a/drivers/misc/sgi-xp/xp_main.c ++++ b/drivers/misc/sgi-xp/xp_main.c +@@ -78,13 +78,13 @@ xpc_notloaded(void) + } + + struct xpc_interface xpc_interface = { +- (void (*)(int))xpc_notloaded, +- (void (*)(int))xpc_notloaded, +- (enum xp_retval(*)(short, int, u32, void *, u16))xpc_notloaded, +- (enum xp_retval(*)(short, int, u32, void *, u16, xpc_notify_func, ++ .connect = (void (*)(int))xpc_notloaded, ++ .disconnect = (void (*)(int))xpc_notloaded, ++ .send = (enum xp_retval(*)(short, int, u32, void *, u16))xpc_notloaded, ++ .send_notify = (enum xp_retval(*)(short, int, u32, void *, u16, xpc_notify_func, + void *))xpc_notloaded, +- (void (*)(short, int, void *))xpc_notloaded, +- (enum xp_retval(*)(short, void *))xpc_notloaded ++ .received = (void (*)(short, int, void *))xpc_notloaded, ++ .partid_to_nasids = (enum xp_retval(*)(short, void *))xpc_notloaded + }; + EXPORT_SYMBOL_GPL(xpc_interface); + +diff --git a/drivers/misc/sgi-xp/xpc.h b/drivers/misc/sgi-xp/xpc.h +index b94d5f7..7f494c5 100644 +--- a/drivers/misc/sgi-xp/xpc.h ++++ b/drivers/misc/sgi-xp/xpc.h +@@ -835,6 +835,7 @@ struct xpc_arch_operations { + void (*received_payload) (struct xpc_channel *, void *); + void (*notify_senders_of_disconnect) (struct xpc_channel *); + }; ++typedef struct xpc_arch_operations __no_const xpc_arch_operations_no_const; + + /* struct xpc_partition act_state values (for XPC HB) */ + +@@ -876,7 +877,7 @@ extern struct xpc_registration xpc_registrations[]; + /* found in xpc_main.c */ + extern struct device *xpc_part; + extern struct device *xpc_chan; +-extern struct xpc_arch_operations xpc_arch_ops; ++extern xpc_arch_operations_no_const xpc_arch_ops; + extern int xpc_disengage_timelimit; + extern int xpc_disengage_timedout; + extern int xpc_activate_IRQ_rcvd; +diff --git a/drivers/misc/sgi-xp/xpc_main.c b/drivers/misc/sgi-xp/xpc_main.c +index 82dc574..8539ab2 100644 +--- a/drivers/misc/sgi-xp/xpc_main.c ++++ b/drivers/misc/sgi-xp/xpc_main.c +@@ -166,7 +166,7 @@ static struct notifier_block xpc_die_notifier = { + .notifier_call = xpc_system_die, + }; + +-struct xpc_arch_operations xpc_arch_ops; ++xpc_arch_operations_no_const xpc_arch_ops; + + /* + * Timer function to enforce the timelimit on the partition disengage. +@@ -1210,7 +1210,7 @@ xpc_system_die(struct notifier_block *nb, unsigned long event, void *_die_args) + + if (((die_args->trapnr == X86_TRAP_MF) || + (die_args->trapnr == X86_TRAP_XF)) && +- !user_mode_vm(die_args->regs)) ++ !user_mode(die_args->regs)) + xpc_die_deactivate(); + + break; +diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c +index 7b5424f..ed1d6ac 100644 +--- a/drivers/mmc/card/block.c ++++ b/drivers/mmc/card/block.c +@@ -575,7 +575,7 @@ static int mmc_blk_ioctl_cmd(struct block_device *bdev, + if (idata->ic.postsleep_min_us) + usleep_range(idata->ic.postsleep_min_us, idata->ic.postsleep_max_us); + +- if (copy_to_user(&(ic_ptr->response), cmd.resp, sizeof(cmd.resp))) { ++ if (copy_to_user(ic_ptr->response, cmd.resp, sizeof(cmd.resp))) { + err = -EFAULT; + goto cmd_rel_host; + } +diff --git a/drivers/mmc/core/mmc_ops.c b/drivers/mmc/core/mmc_ops.c +index e5b5eeb..7bf2212 100644 +--- a/drivers/mmc/core/mmc_ops.c ++++ b/drivers/mmc/core/mmc_ops.c +@@ -247,7 +247,7 @@ mmc_send_cxd_data(struct mmc_card *card, struct mmc_host *host, + void *data_buf; + int is_on_stack; + +- is_on_stack = object_is_on_stack(buf); ++ is_on_stack = object_starts_on_stack(buf); + if (is_on_stack) { + /* + * dma onto stack is unsafe/nonportable, but callers to this +diff --git a/drivers/mmc/host/dw_mmc.h b/drivers/mmc/host/dw_mmc.h +index 6bf24ab..13d0293b 100644 +--- a/drivers/mmc/host/dw_mmc.h ++++ b/drivers/mmc/host/dw_mmc.h +@@ -258,5 +258,5 @@ struct dw_mci_drv_data { + int (*parse_dt)(struct dw_mci *host); + int (*execute_tuning)(struct dw_mci_slot *slot, u32 opcode, + struct dw_mci_tuning_data *tuning_data); +-}; ++} __do_const; + #endif /* _DW_MMC_H_ */ +diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c +index b931226..df6a085 100644 +--- a/drivers/mmc/host/mmci.c ++++ b/drivers/mmc/host/mmci.c +@@ -1504,7 +1504,9 @@ static int mmci_probe(struct amba_device *dev, + } + + if (variant->busy_detect) { +- mmci_ops.card_busy = mmci_card_busy; ++ pax_open_kernel(); ++ *(void **)&mmci_ops.card_busy = mmci_card_busy; ++ pax_close_kernel(); + mmci_write_datactrlreg(host, MCI_ST_DPSM_BUSYMODE); + } + +diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c +index b841bb7..d82712f5 100644 +--- a/drivers/mmc/host/sdhci-esdhc-imx.c ++++ b/drivers/mmc/host/sdhci-esdhc-imx.c +@@ -1031,9 +1031,12 @@ static int sdhci_esdhc_imx_probe(struct platform_device *pdev) + host->mmc->caps |= MMC_CAP_1_8V_DDR; + } + +- if (imx_data->socdata->flags & ESDHC_FLAG_MAN_TUNING) +- sdhci_esdhc_ops.platform_execute_tuning = ++ if (imx_data->socdata->flags & ESDHC_FLAG_MAN_TUNING) { ++ pax_open_kernel(); ++ *(void **)&sdhci_esdhc_ops.platform_execute_tuning = + esdhc_executing_tuning; ++ pax_close_kernel(); ++ } + + if (imx_data->socdata->flags & ESDHC_FLAG_STD_TUNING) + writel(readl(host->ioaddr + ESDHC_TUNING_CTRL) | +diff --git a/drivers/mmc/host/sdhci-s3c.c b/drivers/mmc/host/sdhci-s3c.c +index 6debda9..2ba7427 100644 +--- a/drivers/mmc/host/sdhci-s3c.c ++++ b/drivers/mmc/host/sdhci-s3c.c +@@ -668,9 +668,11 @@ static int sdhci_s3c_probe(struct platform_device *pdev) + * we can use overriding functions instead of default. + */ + if (host->quirks & SDHCI_QUIRK_NONSTANDARD_CLOCK) { +- sdhci_s3c_ops.set_clock = sdhci_cmu_set_clock; +- sdhci_s3c_ops.get_min_clock = sdhci_cmu_get_min_clock; +- sdhci_s3c_ops.get_max_clock = sdhci_cmu_get_max_clock; ++ pax_open_kernel(); ++ *(void **)&sdhci_s3c_ops.set_clock = sdhci_cmu_set_clock; ++ *(void **)&sdhci_s3c_ops.get_min_clock = sdhci_cmu_get_min_clock; ++ *(void **)&sdhci_s3c_ops.get_max_clock = sdhci_cmu_get_max_clock; ++ pax_close_kernel(); + } + + /* It supports additional host capabilities if needed */ +diff --git a/drivers/mtd/chips/cfi_cmdset_0020.c b/drivers/mtd/chips/cfi_cmdset_0020.c +index 096993f..f02c23b 100644 +--- a/drivers/mtd/chips/cfi_cmdset_0020.c ++++ b/drivers/mtd/chips/cfi_cmdset_0020.c +@@ -669,7 +669,7 @@ cfi_staa_writev(struct mtd_info *mtd, const struct kvec *vecs, + size_t totlen = 0, thislen; + int ret = 0; + size_t buflen = 0; +- static char *buffer; ++ char *buffer; + + if (!ECCBUF_SIZE) { + /* We should fall back to a general writev implementation. +diff --git a/drivers/mtd/nand/denali.c b/drivers/mtd/nand/denali.c +index c07cd57..61c4fbd 100644 +--- a/drivers/mtd/nand/denali.c ++++ b/drivers/mtd/nand/denali.c +@@ -24,6 +24,7 @@ + #include <linux/slab.h> + #include <linux/mtd/mtd.h> + #include <linux/module.h> ++#include <linux/slab.h> + + #include "denali.h" + +diff --git a/drivers/mtd/nand/gpmi-nand/gpmi-nand.c b/drivers/mtd/nand/gpmi-nand/gpmi-nand.c +index ca6369f..0ce9fed 100644 +--- a/drivers/mtd/nand/gpmi-nand/gpmi-nand.c ++++ b/drivers/mtd/nand/gpmi-nand/gpmi-nand.c +@@ -369,7 +369,7 @@ void prepare_data_dma(struct gpmi_nand_data *this, enum dma_data_direction dr) + + /* first try to map the upper buffer directly */ + if (virt_addr_valid(this->upper_buf) && +- !object_is_on_stack(this->upper_buf)) { ++ !object_starts_on_stack(this->upper_buf)) { + sg_init_one(sgl, this->upper_buf, this->upper_len); + ret = dma_map_sg(this->dev, sgl, 1, dr); + if (ret == 0) +diff --git a/drivers/mtd/nftlmount.c b/drivers/mtd/nftlmount.c +index 51b9d6a..52af9a7 100644 +--- a/drivers/mtd/nftlmount.c ++++ b/drivers/mtd/nftlmount.c +@@ -24,6 +24,7 @@ + #include <asm/errno.h> + #include <linux/delay.h> + #include <linux/slab.h> ++#include <linux/sched.h> + #include <linux/mtd/mtd.h> + #include <linux/mtd/nand.h> + #include <linux/mtd/nftl.h> +diff --git a/drivers/mtd/sm_ftl.c b/drivers/mtd/sm_ftl.c +index cf49c22..971b133 100644 +--- a/drivers/mtd/sm_ftl.c ++++ b/drivers/mtd/sm_ftl.c +@@ -56,7 +56,7 @@ static ssize_t sm_attr_show(struct device *dev, struct device_attribute *attr, + #define SM_CIS_VENDOR_OFFSET 0x59 + static struct attribute_group *sm_create_sysfs_attributes(struct sm_ftl *ftl) + { +- struct attribute_group *attr_group; ++ attribute_group_no_const *attr_group; + struct attribute **attributes; + struct sm_sysfs_attribute *vendor_attribute; + char *vendor; +diff --git a/drivers/net/bonding/bond_netlink.c b/drivers/net/bonding/bond_netlink.c +index 70651f8..7eb1bdf 100644 +--- a/drivers/net/bonding/bond_netlink.c ++++ b/drivers/net/bonding/bond_netlink.c +@@ -542,7 +542,7 @@ nla_put_failure: + return -EMSGSIZE; + } + +-struct rtnl_link_ops bond_link_ops __read_mostly = { ++struct rtnl_link_ops bond_link_ops = { + .kind = "bond", + .priv_size = sizeof(struct bonding), + .setup = bond_setup, +diff --git a/drivers/net/can/Kconfig b/drivers/net/can/Kconfig +index 9e7d95d..d447b88 100644 +--- a/drivers/net/can/Kconfig ++++ b/drivers/net/can/Kconfig +@@ -104,7 +104,7 @@ config CAN_JANZ_ICAN3 + + config CAN_FLEXCAN + tristate "Support for Freescale FLEXCAN based chips" +- depends on ARM || PPC ++ depends on (ARM && CPU_LITTLE_ENDIAN) || PPC + ---help--- + Say Y here if you want to support for Freescale FlexCAN. + +diff --git a/drivers/net/ethernet/8390/ax88796.c b/drivers/net/ethernet/8390/ax88796.c +index 455d4c3..3353ee7 100644 +--- a/drivers/net/ethernet/8390/ax88796.c ++++ b/drivers/net/ethernet/8390/ax88796.c +@@ -889,9 +889,11 @@ static int ax_probe(struct platform_device *pdev) + if (ax->plat->reg_offsets) + ei_local->reg_offset = ax->plat->reg_offsets; + else { ++ resource_size_t _mem_size = mem_size; ++ do_div(_mem_size, 0x18); + ei_local->reg_offset = ax->reg_offsets; + for (ret = 0; ret < 0x18; ret++) +- ax->reg_offsets[ret] = (mem_size / 0x18) * ret; ++ ax->reg_offsets[ret] = _mem_size * ret; + } + + if (!request_mem_region(mem->start, mem_size, pdev->name)) { +diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h +index a89a40f..5a8a2ac 100644 +--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h ++++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h +@@ -1062,7 +1062,7 @@ static inline u8 bnx2x_get_path_func_num(struct bnx2x *bp) + static inline void bnx2x_init_bp_objs(struct bnx2x *bp) + { + /* RX_MODE controlling object */ +- bnx2x_init_rx_mode_obj(bp, &bp->rx_mode_obj); ++ bnx2x_init_rx_mode_obj(bp); + + /* multicast configuration controlling object */ + bnx2x_init_mcast_obj(bp, &bp->mcast_obj, bp->fp->cl_id, bp->fp->cid, +diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c +index 0fb6ff2..78fd55c 100644 +--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c ++++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c +@@ -2329,15 +2329,14 @@ int bnx2x_config_rx_mode(struct bnx2x *bp, + return rc; + } + +-void bnx2x_init_rx_mode_obj(struct bnx2x *bp, +- struct bnx2x_rx_mode_obj *o) ++void bnx2x_init_rx_mode_obj(struct bnx2x *bp) + { + if (CHIP_IS_E1x(bp)) { +- o->wait_comp = bnx2x_empty_rx_mode_wait; +- o->config_rx_mode = bnx2x_set_rx_mode_e1x; ++ bp->rx_mode_obj.wait_comp = bnx2x_empty_rx_mode_wait; ++ bp->rx_mode_obj.config_rx_mode = bnx2x_set_rx_mode_e1x; + } else { +- o->wait_comp = bnx2x_wait_rx_mode_comp_e2; +- o->config_rx_mode = bnx2x_set_rx_mode_e2; ++ bp->rx_mode_obj.wait_comp = bnx2x_wait_rx_mode_comp_e2; ++ bp->rx_mode_obj.config_rx_mode = bnx2x_set_rx_mode_e2; + } + } + +diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h +index 00d7f21..2cddec4 100644 +--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h ++++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h +@@ -1321,8 +1321,7 @@ int bnx2x_vlan_mac_move(struct bnx2x *bp, + + /********************* RX MODE ****************/ + +-void bnx2x_init_rx_mode_obj(struct bnx2x *bp, +- struct bnx2x_rx_mode_obj *o); ++void bnx2x_init_rx_mode_obj(struct bnx2x *bp); + + /** + * bnx2x_config_rx_mode - Send and RX_MODE ramrod according to the provided parameters. +diff --git a/drivers/net/ethernet/broadcom/tg3.h b/drivers/net/ethernet/broadcom/tg3.h +index 04321e5..b51cdc4 100644 +--- a/drivers/net/ethernet/broadcom/tg3.h ++++ b/drivers/net/ethernet/broadcom/tg3.h +@@ -150,6 +150,7 @@ + #define CHIPREV_ID_5750_A0 0x4000 + #define CHIPREV_ID_5750_A1 0x4001 + #define CHIPREV_ID_5750_A3 0x4003 ++#define CHIPREV_ID_5750_C1 0x4201 + #define CHIPREV_ID_5750_C2 0x4202 + #define CHIPREV_ID_5752_A0_HW 0x5000 + #define CHIPREV_ID_5752_A0 0x6000 +diff --git a/drivers/net/ethernet/brocade/bna/bna_enet.c b/drivers/net/ethernet/brocade/bna/bna_enet.c +index 13f9636..228040f 100644 +--- a/drivers/net/ethernet/brocade/bna/bna_enet.c ++++ b/drivers/net/ethernet/brocade/bna/bna_enet.c +@@ -1690,10 +1690,10 @@ bna_cb_ioceth_reset(void *arg) + } + + static struct bfa_ioc_cbfn bna_ioceth_cbfn = { +- bna_cb_ioceth_enable, +- bna_cb_ioceth_disable, +- bna_cb_ioceth_hbfail, +- bna_cb_ioceth_reset ++ .enable_cbfn = bna_cb_ioceth_enable, ++ .disable_cbfn = bna_cb_ioceth_disable, ++ .hbfail_cbfn = bna_cb_ioceth_hbfail, ++ .reset_cbfn = bna_cb_ioceth_reset + }; + + static void bna_attr_init(struct bna_ioceth *ioceth) +diff --git a/drivers/net/ethernet/chelsio/cxgb3/l2t.h b/drivers/net/ethernet/chelsio/cxgb3/l2t.h +index 8cffcdf..aadf043 100644 +--- a/drivers/net/ethernet/chelsio/cxgb3/l2t.h ++++ b/drivers/net/ethernet/chelsio/cxgb3/l2t.h +@@ -87,7 +87,7 @@ typedef void (*arp_failure_handler_func)(struct t3cdev * dev, + */ + struct l2t_skb_cb { + arp_failure_handler_func arp_failure_handler; +-}; ++} __no_const; + + #define L2T_SKB_CB(skb) ((struct l2t_skb_cb *)(skb)->cb) + +diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c +index 34e2488..07e2079 100644 +--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c ++++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c +@@ -2120,7 +2120,7 @@ static void get_regs(struct net_device *dev, struct ethtool_regs *regs, + + int i; + struct adapter *ap = netdev2adap(dev); +- static const unsigned int *reg_ranges; ++ const unsigned int *reg_ranges; + int arr_size = 0, buf_size = 0; + + if (is_t4(ap->params.chip)) { +diff --git a/drivers/net/ethernet/dec/tulip/de4x5.c b/drivers/net/ethernet/dec/tulip/de4x5.c +index c05b66d..ed69872 100644 +--- a/drivers/net/ethernet/dec/tulip/de4x5.c ++++ b/drivers/net/ethernet/dec/tulip/de4x5.c +@@ -5388,7 +5388,7 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) + for (i=0; i<ETH_ALEN; i++) { + tmp.addr[i] = dev->dev_addr[i]; + } +- if (copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT; ++ if (ioc->len > sizeof tmp.addr || copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT; + break; + + case DE4X5_SET_HWADDR: /* Set the hardware address */ +@@ -5428,7 +5428,7 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) + spin_lock_irqsave(&lp->lock, flags); + memcpy(&statbuf, &lp->pktStats, ioc->len); + spin_unlock_irqrestore(&lp->lock, flags); +- if (copy_to_user(ioc->data, &statbuf, ioc->len)) ++ if (ioc->len > sizeof statbuf || copy_to_user(ioc->data, &statbuf, ioc->len)) + return -EFAULT; + break; + } +diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c +index 80bfa03..45114e6 100644 +--- a/drivers/net/ethernet/emulex/benet/be_main.c ++++ b/drivers/net/ethernet/emulex/benet/be_main.c +@@ -534,7 +534,7 @@ static void accumulate_16bit_val(u32 *acc, u16 val) + + if (wrapped) + newacc += 65536; +- ACCESS_ONCE(*acc) = newacc; ++ ACCESS_ONCE_RW(*acc) = newacc; + } + + static void populate_erx_stats(struct be_adapter *adapter, +diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c +index c11ecbc..13bb299 100644 +--- a/drivers/net/ethernet/faraday/ftgmac100.c ++++ b/drivers/net/ethernet/faraday/ftgmac100.c +@@ -30,6 +30,8 @@ + #include <linux/netdevice.h> + #include <linux/phy.h> + #include <linux/platform_device.h> ++#include <linux/interrupt.h> ++#include <linux/irqreturn.h> + #include <net/ip.h> + + #include "ftgmac100.h" +diff --git a/drivers/net/ethernet/faraday/ftmac100.c b/drivers/net/ethernet/faraday/ftmac100.c +index 8be5b40..081bc1b 100644 +--- a/drivers/net/ethernet/faraday/ftmac100.c ++++ b/drivers/net/ethernet/faraday/ftmac100.c +@@ -31,6 +31,8 @@ + #include <linux/module.h> + #include <linux/netdevice.h> + #include <linux/platform_device.h> ++#include <linux/interrupt.h> ++#include <linux/irqreturn.h> + + #include "ftmac100.h" + +diff --git a/drivers/net/ethernet/intel/i40e/i40e_ptp.c b/drivers/net/ethernet/intel/i40e/i40e_ptp.c +index e33ec6c..f54cfe7 100644 +--- a/drivers/net/ethernet/intel/i40e/i40e_ptp.c ++++ b/drivers/net/ethernet/intel/i40e/i40e_ptp.c +@@ -436,7 +436,7 @@ void i40e_ptp_set_increment(struct i40e_pf *pf) + wr32(hw, I40E_PRTTSYN_INC_H, incval >> 32); + + /* Update the base adjustement value. */ +- ACCESS_ONCE(pf->ptp_base_adj) = incval; ++ ACCESS_ONCE_RW(pf->ptp_base_adj) = incval; + smp_mb(); /* Force the above update. */ + } + +diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c +index 5184e2a..acb28c3 100644 +--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c ++++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c +@@ -776,7 +776,7 @@ void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter) + } + + /* update the base incval used to calculate frequency adjustment */ +- ACCESS_ONCE(adapter->base_incval) = incval; ++ ACCESS_ONCE_RW(adapter->base_incval) = incval; + smp_mb(); + + /* need lock to prevent incorrect read while modifying cyclecounter */ +diff --git a/drivers/net/ethernet/neterion/vxge/vxge-config.c b/drivers/net/ethernet/neterion/vxge/vxge-config.c +index 089b713..28d87ae 100644 +--- a/drivers/net/ethernet/neterion/vxge/vxge-config.c ++++ b/drivers/net/ethernet/neterion/vxge/vxge-config.c +@@ -3461,7 +3461,10 @@ __vxge_hw_fifo_create(struct __vxge_hw_vpath_handle *vp, + struct __vxge_hw_fifo *fifo; + struct vxge_hw_fifo_config *config; + u32 txdl_size, txdl_per_memblock; +- struct vxge_hw_mempool_cbs fifo_mp_callback; ++ static struct vxge_hw_mempool_cbs fifo_mp_callback = { ++ .item_func_alloc = __vxge_hw_fifo_mempool_item_alloc, ++ }; ++ + struct __vxge_hw_virtualpath *vpath; + + if ((vp == NULL) || (attr == NULL)) { +@@ -3544,8 +3547,6 @@ __vxge_hw_fifo_create(struct __vxge_hw_vpath_handle *vp, + goto exit; + } + +- fifo_mp_callback.item_func_alloc = __vxge_hw_fifo_mempool_item_alloc; +- + fifo->mempool = + __vxge_hw_mempool_create(vpath->hldev, + fifo->config->memblock_size, +diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c +index 90a2dda..47e620e 100644 +--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c ++++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c +@@ -2088,7 +2088,9 @@ int qlcnic_83xx_configure_opmode(struct qlcnic_adapter *adapter) + adapter->max_tx_rings = QLCNIC_MAX_VNIC_TX_RINGS; + } else if (ret == QLC_83XX_DEFAULT_OPMODE) { + ahw->nic_mode = QLCNIC_DEFAULT_MODE; +- adapter->nic_ops->init_driver = qlcnic_83xx_init_default_driver; ++ pax_open_kernel(); ++ *(void **)&adapter->nic_ops->init_driver = qlcnic_83xx_init_default_driver; ++ pax_close_kernel(); + ahw->idc.state_entry = qlcnic_83xx_idc_ready_state_entry; + adapter->max_sds_rings = QLCNIC_MAX_SDS_RINGS; + adapter->max_tx_rings = QLCNIC_MAX_TX_RINGS; +diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c +index be7d7a6..a8983f8 100644 +--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c ++++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c +@@ -207,17 +207,23 @@ int qlcnic_83xx_config_vnic_opmode(struct qlcnic_adapter *adapter) + case QLCNIC_NON_PRIV_FUNC: + ahw->op_mode = QLCNIC_NON_PRIV_FUNC; + ahw->idc.state_entry = qlcnic_83xx_idc_ready_state_entry; +- nic_ops->init_driver = qlcnic_83xx_init_non_privileged_vnic; ++ pax_open_kernel(); ++ *(void **)&nic_ops->init_driver = qlcnic_83xx_init_non_privileged_vnic; ++ pax_close_kernel(); + break; + case QLCNIC_PRIV_FUNC: + ahw->op_mode = QLCNIC_PRIV_FUNC; + ahw->idc.state_entry = qlcnic_83xx_idc_vnic_pf_entry; +- nic_ops->init_driver = qlcnic_83xx_init_privileged_vnic; ++ pax_open_kernel(); ++ *(void **)&nic_ops->init_driver = qlcnic_83xx_init_privileged_vnic; ++ pax_close_kernel(); + break; + case QLCNIC_MGMT_FUNC: + ahw->op_mode = QLCNIC_MGMT_FUNC; + ahw->idc.state_entry = qlcnic_83xx_idc_ready_state_entry; +- nic_ops->init_driver = qlcnic_83xx_init_mgmt_vnic; ++ pax_open_kernel(); ++ *(void **)&nic_ops->init_driver = qlcnic_83xx_init_mgmt_vnic; ++ pax_close_kernel(); + break; + default: + dev_err(&adapter->pdev->dev, "Invalid Virtual NIC opmode\n"); +diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c +index 7763962..c3499a7 100644 +--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c ++++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c +@@ -1108,7 +1108,7 @@ int qlcnic_dump_fw(struct qlcnic_adapter *adapter) + struct qlcnic_dump_entry *entry; + struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump; + struct qlcnic_dump_template_hdr *tmpl_hdr = fw_dump->tmpl_hdr; +- static const struct qlcnic_dump_operations *fw_dump_ops; ++ const struct qlcnic_dump_operations *fw_dump_ops; + struct device *dev = &adapter->pdev->dev; + struct qlcnic_hardware_context *ahw; + void *temp_buffer; +diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c +index 3ff7bc3..366091b 100644 +--- a/drivers/net/ethernet/realtek/r8169.c ++++ b/drivers/net/ethernet/realtek/r8169.c +@@ -758,22 +758,22 @@ struct rtl8169_private { + struct mdio_ops { + void (*write)(struct rtl8169_private *, int, int); + int (*read)(struct rtl8169_private *, int); +- } mdio_ops; ++ } __no_const mdio_ops; + + struct pll_power_ops { + void (*down)(struct rtl8169_private *); + void (*up)(struct rtl8169_private *); +- } pll_power_ops; ++ } __no_const pll_power_ops; + + struct jumbo_ops { + void (*enable)(struct rtl8169_private *); + void (*disable)(struct rtl8169_private *); +- } jumbo_ops; ++ } __no_const jumbo_ops; + + struct csi_ops { + void (*write)(struct rtl8169_private *, int, int); + u32 (*read)(struct rtl8169_private *, int); +- } csi_ops; ++ } __no_const csi_ops; + + int (*set_speed)(struct net_device *, u8 aneg, u16 sp, u8 dpx, u32 adv); + int (*get_settings)(struct net_device *, struct ethtool_cmd *); +diff --git a/drivers/net/ethernet/sfc/ptp.c b/drivers/net/ethernet/sfc/ptp.c +index d7a3682..9ce272a 100644 +--- a/drivers/net/ethernet/sfc/ptp.c ++++ b/drivers/net/ethernet/sfc/ptp.c +@@ -825,7 +825,7 @@ static int efx_ptp_synchronize(struct efx_nic *efx, unsigned int num_readings) + ptp->start.dma_addr); + + /* Clear flag that signals MC ready */ +- ACCESS_ONCE(*start) = 0; ++ ACCESS_ONCE_RW(*start) = 0; + rc = efx_mcdi_rpc_start(efx, MC_CMD_PTP, synch_buf, + MC_CMD_PTP_IN_SYNCHRONIZE_LEN); + EFX_BUG_ON_PARANOID(rc); +diff --git a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c +index 50617c5..b13724c 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c ++++ b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c +@@ -140,8 +140,8 @@ void dwmac_mmc_ctrl(void __iomem *ioaddr, unsigned int mode) + + writel(value, ioaddr + MMC_CNTRL); + +- pr_debug("stmmac: MMC ctrl register (offset 0x%x): 0x%08x\n", +- MMC_CNTRL, value); ++// pr_debug("stmmac: MMC ctrl register (offset 0x%x): 0x%08x\n", ++// MMC_CNTRL, value); + } + + /* To mask all all interrupts.*/ +diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h +index 7b594ce..1f6c5708 100644 +--- a/drivers/net/hyperv/hyperv_net.h ++++ b/drivers/net/hyperv/hyperv_net.h +@@ -100,7 +100,7 @@ struct rndis_device { + + enum rndis_device_state state; + bool link_state; +- atomic_t new_req_id; ++ atomic_unchecked_t new_req_id; + + spinlock_t request_lock; + struct list_head req_list; +diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c +index b54fd25..9bd2bae 100644 +--- a/drivers/net/hyperv/rndis_filter.c ++++ b/drivers/net/hyperv/rndis_filter.c +@@ -103,7 +103,7 @@ static struct rndis_request *get_rndis_request(struct rndis_device *dev, + * template + */ + set = &rndis_msg->msg.set_req; +- set->req_id = atomic_inc_return(&dev->new_req_id); ++ set->req_id = atomic_inc_return_unchecked(&dev->new_req_id); + + /* Add to the request list */ + spin_lock_irqsave(&dev->request_lock, flags); +@@ -770,7 +770,7 @@ static void rndis_filter_halt_device(struct rndis_device *dev) + + /* Setup the rndis set */ + halt = &request->request_msg.msg.halt_req; +- halt->req_id = atomic_inc_return(&dev->new_req_id); ++ halt->req_id = atomic_inc_return_unchecked(&dev->new_req_id); + + /* Ignore return since this msg is optional. */ + rndis_filter_send_request(dev, request); +diff --git a/drivers/net/ieee802154/fakehard.c b/drivers/net/ieee802154/fakehard.c +index bf0d55e..82bcfbd1 100644 +--- a/drivers/net/ieee802154/fakehard.c ++++ b/drivers/net/ieee802154/fakehard.c +@@ -364,7 +364,7 @@ static int ieee802154fake_probe(struct platform_device *pdev) + phy->transmit_power = 0xbf; + + dev->netdev_ops = &fake_ops; +- dev->ml_priv = &fake_mlme; ++ dev->ml_priv = (void *)&fake_mlme; + + priv = netdev_priv(dev); + priv->phy = phy; +diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c +index 7f1abb7..6434b33 100644 +--- a/drivers/net/macvlan.c ++++ b/drivers/net/macvlan.c +@@ -992,13 +992,15 @@ static const struct nla_policy macvlan_policy[IFLA_MACVLAN_MAX + 1] = { + int macvlan_link_register(struct rtnl_link_ops *ops) + { + /* common fields */ +- ops->priv_size = sizeof(struct macvlan_dev); +- ops->validate = macvlan_validate; +- ops->maxtype = IFLA_MACVLAN_MAX; +- ops->policy = macvlan_policy; +- ops->changelink = macvlan_changelink; +- ops->get_size = macvlan_get_size; +- ops->fill_info = macvlan_fill_info; ++ pax_open_kernel(); ++ *(size_t *)&ops->priv_size = sizeof(struct macvlan_dev); ++ *(void **)&ops->validate = macvlan_validate; ++ *(int *)&ops->maxtype = IFLA_MACVLAN_MAX; ++ *(const void **)&ops->policy = macvlan_policy; ++ *(void **)&ops->changelink = macvlan_changelink; ++ *(void **)&ops->get_size = macvlan_get_size; ++ *(void **)&ops->fill_info = macvlan_fill_info; ++ pax_close_kernel(); + + return rtnl_link_register(ops); + }; +@@ -1052,7 +1054,7 @@ static int macvlan_device_event(struct notifier_block *unused, + return NOTIFY_DONE; + } + +-static struct notifier_block macvlan_notifier_block __read_mostly = { ++static struct notifier_block macvlan_notifier_block = { + .notifier_call = macvlan_device_event, + }; + +diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c +index 3381c4f..dea5fd5 100644 +--- a/drivers/net/macvtap.c ++++ b/drivers/net/macvtap.c +@@ -1020,7 +1020,7 @@ static long macvtap_ioctl(struct file *file, unsigned int cmd, + } + + ret = 0; +- if (copy_to_user(&ifr->ifr_name, vlan->dev->name, IFNAMSIZ) || ++ if (copy_to_user(ifr->ifr_name, vlan->dev->name, IFNAMSIZ) || + put_user(q->flags, &ifr->ifr_flags)) + ret = -EFAULT; + macvtap_put_vlan(vlan); +@@ -1190,7 +1190,7 @@ static int macvtap_device_event(struct notifier_block *unused, + return NOTIFY_DONE; + } + +-static struct notifier_block macvtap_notifier_block __read_mostly = { ++static struct notifier_block macvtap_notifier_block = { + .notifier_call = macvtap_device_event, + }; + +diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c +index 72ff14b..11d442d 100644 +--- a/drivers/net/ppp/ppp_generic.c ++++ b/drivers/net/ppp/ppp_generic.c +@@ -999,7 +999,6 @@ ppp_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) + void __user *addr = (void __user *) ifr->ifr_ifru.ifru_data; + struct ppp_stats stats; + struct ppp_comp_stats cstats; +- char *vers; + + switch (cmd) { + case SIOCGPPPSTATS: +@@ -1021,8 +1020,7 @@ ppp_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) + break; + + case SIOCGPPPVER: +- vers = PPP_VERSION; +- if (copy_to_user(addr, vers, strlen(vers) + 1)) ++ if (copy_to_user(addr, PPP_VERSION, sizeof(PPP_VERSION))) + break; + err = 0; + break; +diff --git a/drivers/net/slip/slhc.c b/drivers/net/slip/slhc.c +index 1252d9c..80e660b 100644 +--- a/drivers/net/slip/slhc.c ++++ b/drivers/net/slip/slhc.c +@@ -488,7 +488,7 @@ slhc_uncompress(struct slcompress *comp, unsigned char *icp, int isize) + register struct tcphdr *thp; + register struct iphdr *ip; + register struct cstate *cs; +- int len, hdrlen; ++ long len, hdrlen; + unsigned char *cp = icp; + + /* We've got a compressed packet; read the change byte */ +diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c +index 26d8c29..bbc6837 100644 +--- a/drivers/net/team/team.c ++++ b/drivers/net/team/team.c +@@ -2874,7 +2874,7 @@ static int team_device_event(struct notifier_block *unused, + return NOTIFY_DONE; + } + +-static struct notifier_block team_notifier_block __read_mostly = { ++static struct notifier_block team_notifier_block = { + .notifier_call = team_device_event, + }; + +diff --git a/drivers/net/tun.c b/drivers/net/tun.c +index 26f8635..c237839 100644 +--- a/drivers/net/tun.c ++++ b/drivers/net/tun.c +@@ -1876,7 +1876,7 @@ unlock: + } + + static long __tun_chr_ioctl(struct file *file, unsigned int cmd, +- unsigned long arg, int ifreq_len) ++ unsigned long arg, size_t ifreq_len) + { + struct tun_file *tfile = file->private_data; + struct tun_struct *tun; +@@ -1889,6 +1889,9 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd, + unsigned int ifindex; + int ret; + ++ if (ifreq_len > sizeof ifr) ++ return -EFAULT; ++ + if (cmd == TUNSETIFF || cmd == TUNSETQUEUE || _IOC_TYPE(cmd) == 0x89) { + if (copy_from_user(&ifr, argp, ifreq_len)) + return -EFAULT; +diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c +index 660bd5e..ac59452 100644 +--- a/drivers/net/usb/hso.c ++++ b/drivers/net/usb/hso.c +@@ -71,7 +71,7 @@ + #include <asm/byteorder.h> + #include <linux/serial_core.h> + #include <linux/serial.h> +- ++#include <asm/local.h> + + #define MOD_AUTHOR "Option Wireless" + #define MOD_DESCRIPTION "USB High Speed Option driver" +@@ -1179,7 +1179,7 @@ static void put_rxbuf_data_and_resubmit_ctrl_urb(struct hso_serial *serial) + struct urb *urb; + + urb = serial->rx_urb[0]; +- if (serial->port.count > 0) { ++ if (atomic_read(&serial->port.count) > 0) { + count = put_rxbuf_data(urb, serial); + if (count == -1) + return; +@@ -1217,7 +1217,7 @@ static void hso_std_serial_read_bulk_callback(struct urb *urb) + DUMP1(urb->transfer_buffer, urb->actual_length); + + /* Anyone listening? */ +- if (serial->port.count == 0) ++ if (atomic_read(&serial->port.count) == 0) + return; + + if (serial->parent->port_spec & HSO_INFO_CRC_BUG) +@@ -1287,8 +1287,7 @@ static int hso_serial_open(struct tty_struct *tty, struct file *filp) + tty_port_tty_set(&serial->port, tty); + + /* check for port already opened, if not set the termios */ +- serial->port.count++; +- if (serial->port.count == 1) { ++ if (atomic_inc_return(&serial->port.count) == 1) { + serial->rx_state = RX_IDLE; + /* Force default termio settings */ + _hso_serial_set_termios(tty, NULL); +@@ -1300,7 +1299,7 @@ static int hso_serial_open(struct tty_struct *tty, struct file *filp) + result = hso_start_serial_device(serial->parent, GFP_KERNEL); + if (result) { + hso_stop_serial_device(serial->parent); +- serial->port.count--; ++ atomic_dec(&serial->port.count); + kref_put(&serial->parent->ref, hso_serial_ref_free); + } + } else { +@@ -1337,10 +1336,10 @@ static void hso_serial_close(struct tty_struct *tty, struct file *filp) + + /* reset the rts and dtr */ + /* do the actual close */ +- serial->port.count--; ++ atomic_dec(&serial->port.count); + +- if (serial->port.count <= 0) { +- serial->port.count = 0; ++ if (atomic_read(&serial->port.count) <= 0) { ++ atomic_set(&serial->port.count, 0); + tty_port_tty_set(&serial->port, NULL); + if (!usb_gone) + hso_stop_serial_device(serial->parent); +@@ -1416,7 +1415,7 @@ static void hso_serial_set_termios(struct tty_struct *tty, struct ktermios *old) + + /* the actual setup */ + spin_lock_irqsave(&serial->serial_lock, flags); +- if (serial->port.count) ++ if (atomic_read(&serial->port.count)) + _hso_serial_set_termios(tty, old); + else + tty->termios = *old; +@@ -1885,7 +1884,7 @@ static void intr_callback(struct urb *urb) + D1("Pending read interrupt on port %d\n", i); + spin_lock(&serial->serial_lock); + if (serial->rx_state == RX_IDLE && +- serial->port.count > 0) { ++ atomic_read(&serial->port.count) > 0) { + /* Setup and send a ctrl req read on + * port i */ + if (!serial->rx_urb_filled[0]) { +@@ -3061,7 +3060,7 @@ static int hso_resume(struct usb_interface *iface) + /* Start all serial ports */ + for (i = 0; i < HSO_SERIAL_TTY_MINORS; i++) { + if (serial_table[i] && (serial_table[i]->interface == iface)) { +- if (dev2ser(serial_table[i])->port.count) { ++ if (atomic_read(&dev2ser(serial_table[i])->port.count)) { + result = + hso_start_serial_device(serial_table[i], GFP_NOIO); + hso_kick_transmit(dev2ser(serial_table[i])); +diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c +index adb12f3..48005ab 100644 +--- a/drivers/net/usb/r8152.c ++++ b/drivers/net/usb/r8152.c +@@ -513,7 +513,7 @@ struct r8152 { + void (*disable)(struct r8152 *); + void (*down)(struct r8152 *); + void (*unload)(struct r8152 *); +- } rtl_ops; ++ } __no_const rtl_ops; + + int intr_interval; + u32 msg_enable; +diff --git a/drivers/net/usb/sierra_net.c b/drivers/net/usb/sierra_net.c +index a2515887..6d13233 100644 +--- a/drivers/net/usb/sierra_net.c ++++ b/drivers/net/usb/sierra_net.c +@@ -51,7 +51,7 @@ static const char driver_name[] = "sierra_net"; + /* atomic counter partially included in MAC address to make sure 2 devices + * do not end up with the same MAC - concept breaks in case of > 255 ifaces + */ +-static atomic_t iface_counter = ATOMIC_INIT(0); ++static atomic_unchecked_t iface_counter = ATOMIC_INIT(0); + + /* + * SYNC Timer Delay definition used to set the expiry time +@@ -697,7 +697,7 @@ static int sierra_net_bind(struct usbnet *dev, struct usb_interface *intf) + dev->net->netdev_ops = &sierra_net_device_ops; + + /* change MAC addr to include, ifacenum, and to be unique */ +- dev->net->dev_addr[ETH_ALEN-2] = atomic_inc_return(&iface_counter); ++ dev->net->dev_addr[ETH_ALEN-2] = atomic_inc_return_unchecked(&iface_counter); + dev->net->dev_addr[ETH_ALEN-1] = ifacenum; + + /* we will have to manufacture ethernet headers, prepare template */ +diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c +index 841b608..198a8b7 100644 +--- a/drivers/net/virtio_net.c ++++ b/drivers/net/virtio_net.c +@@ -47,7 +47,7 @@ module_param(gso, bool, 0444); + #define RECEIVE_AVG_WEIGHT 64 + + /* Minimum alignment for mergeable packet buffers. */ +-#define MERGEABLE_BUFFER_ALIGN max(L1_CACHE_BYTES, 256) ++#define MERGEABLE_BUFFER_ALIGN max(L1_CACHE_BYTES, 256UL) + + #define VIRTNET_DRIVER_VERSION "1.0.0" + +diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c +index 40ad25d..8703023 100644 +--- a/drivers/net/vxlan.c ++++ b/drivers/net/vxlan.c +@@ -2846,7 +2846,7 @@ nla_put_failure: + return -EMSGSIZE; + } + +-static struct rtnl_link_ops vxlan_link_ops __read_mostly = { ++static struct rtnl_link_ops vxlan_link_ops = { + .kind = "vxlan", + .maxtype = IFLA_VXLAN_MAX, + .policy = vxlan_policy, +@@ -2893,7 +2893,7 @@ static int vxlan_lowerdev_event(struct notifier_block *unused, + return NOTIFY_DONE; + } + +-static struct notifier_block vxlan_notifier_block __read_mostly = { ++static struct notifier_block vxlan_notifier_block = { + .notifier_call = vxlan_lowerdev_event, + }; + +diff --git a/drivers/net/wan/lmc/lmc_media.c b/drivers/net/wan/lmc/lmc_media.c +index 5920c99..ff2e4a5 100644 +--- a/drivers/net/wan/lmc/lmc_media.c ++++ b/drivers/net/wan/lmc/lmc_media.c +@@ -95,62 +95,63 @@ static inline void write_av9110_bit (lmc_softc_t *, int); + static void write_av9110(lmc_softc_t *, u32, u32, u32, u32, u32); + + lmc_media_t lmc_ds3_media = { +- lmc_ds3_init, /* special media init stuff */ +- lmc_ds3_default, /* reset to default state */ +- lmc_ds3_set_status, /* reset status to state provided */ +- lmc_dummy_set_1, /* set clock source */ +- lmc_dummy_set2_1, /* set line speed */ +- lmc_ds3_set_100ft, /* set cable length */ +- lmc_ds3_set_scram, /* set scrambler */ +- lmc_ds3_get_link_status, /* get link status */ +- lmc_dummy_set_1, /* set link status */ +- lmc_ds3_set_crc_length, /* set CRC length */ +- lmc_dummy_set_1, /* set T1 or E1 circuit type */ +- lmc_ds3_watchdog ++ .init = lmc_ds3_init, /* special media init stuff */ ++ .defaults = lmc_ds3_default, /* reset to default state */ ++ .set_status = lmc_ds3_set_status, /* reset status to state provided */ ++ .set_clock_source = lmc_dummy_set_1, /* set clock source */ ++ .set_speed = lmc_dummy_set2_1, /* set line speed */ ++ .set_cable_length = lmc_ds3_set_100ft, /* set cable length */ ++ .set_scrambler = lmc_ds3_set_scram, /* set scrambler */ ++ .get_link_status = lmc_ds3_get_link_status, /* get link status */ ++ .set_link_status = lmc_dummy_set_1, /* set link status */ ++ .set_crc_length = lmc_ds3_set_crc_length, /* set CRC length */ ++ .set_circuit_type = lmc_dummy_set_1, /* set T1 or E1 circuit type */ ++ .watchdog = lmc_ds3_watchdog + }; + + lmc_media_t lmc_hssi_media = { +- lmc_hssi_init, /* special media init stuff */ +- lmc_hssi_default, /* reset to default state */ +- lmc_hssi_set_status, /* reset status to state provided */ +- lmc_hssi_set_clock, /* set clock source */ +- lmc_dummy_set2_1, /* set line speed */ +- lmc_dummy_set_1, /* set cable length */ +- lmc_dummy_set_1, /* set scrambler */ +- lmc_hssi_get_link_status, /* get link status */ +- lmc_hssi_set_link_status, /* set link status */ +- lmc_hssi_set_crc_length, /* set CRC length */ +- lmc_dummy_set_1, /* set T1 or E1 circuit type */ +- lmc_hssi_watchdog ++ .init = lmc_hssi_init, /* special media init stuff */ ++ .defaults = lmc_hssi_default, /* reset to default state */ ++ .set_status = lmc_hssi_set_status, /* reset status to state provided */ ++ .set_clock_source = lmc_hssi_set_clock, /* set clock source */ ++ .set_speed = lmc_dummy_set2_1, /* set line speed */ ++ .set_cable_length = lmc_dummy_set_1, /* set cable length */ ++ .set_scrambler = lmc_dummy_set_1, /* set scrambler */ ++ .get_link_status = lmc_hssi_get_link_status, /* get link status */ ++ .set_link_status = lmc_hssi_set_link_status, /* set link status */ ++ .set_crc_length = lmc_hssi_set_crc_length, /* set CRC length */ ++ .set_circuit_type = lmc_dummy_set_1, /* set T1 or E1 circuit type */ ++ .watchdog = lmc_hssi_watchdog + }; + +-lmc_media_t lmc_ssi_media = { lmc_ssi_init, /* special media init stuff */ +- lmc_ssi_default, /* reset to default state */ +- lmc_ssi_set_status, /* reset status to state provided */ +- lmc_ssi_set_clock, /* set clock source */ +- lmc_ssi_set_speed, /* set line speed */ +- lmc_dummy_set_1, /* set cable length */ +- lmc_dummy_set_1, /* set scrambler */ +- lmc_ssi_get_link_status, /* get link status */ +- lmc_ssi_set_link_status, /* set link status */ +- lmc_ssi_set_crc_length, /* set CRC length */ +- lmc_dummy_set_1, /* set T1 or E1 circuit type */ +- lmc_ssi_watchdog ++lmc_media_t lmc_ssi_media = { ++ .init = lmc_ssi_init, /* special media init stuff */ ++ .defaults = lmc_ssi_default, /* reset to default state */ ++ .set_status = lmc_ssi_set_status, /* reset status to state provided */ ++ .set_clock_source = lmc_ssi_set_clock, /* set clock source */ ++ .set_speed = lmc_ssi_set_speed, /* set line speed */ ++ .set_cable_length = lmc_dummy_set_1, /* set cable length */ ++ .set_scrambler = lmc_dummy_set_1, /* set scrambler */ ++ .get_link_status = lmc_ssi_get_link_status, /* get link status */ ++ .set_link_status = lmc_ssi_set_link_status, /* set link status */ ++ .set_crc_length = lmc_ssi_set_crc_length, /* set CRC length */ ++ .set_circuit_type = lmc_dummy_set_1, /* set T1 or E1 circuit type */ ++ .watchdog = lmc_ssi_watchdog + }; + + lmc_media_t lmc_t1_media = { +- lmc_t1_init, /* special media init stuff */ +- lmc_t1_default, /* reset to default state */ +- lmc_t1_set_status, /* reset status to state provided */ +- lmc_t1_set_clock, /* set clock source */ +- lmc_dummy_set2_1, /* set line speed */ +- lmc_dummy_set_1, /* set cable length */ +- lmc_dummy_set_1, /* set scrambler */ +- lmc_t1_get_link_status, /* get link status */ +- lmc_dummy_set_1, /* set link status */ +- lmc_t1_set_crc_length, /* set CRC length */ +- lmc_t1_set_circuit_type, /* set T1 or E1 circuit type */ +- lmc_t1_watchdog ++ .init = lmc_t1_init, /* special media init stuff */ ++ .defaults = lmc_t1_default, /* reset to default state */ ++ .set_status = lmc_t1_set_status, /* reset status to state provided */ ++ .set_clock_source = lmc_t1_set_clock, /* set clock source */ ++ .set_speed = lmc_dummy_set2_1, /* set line speed */ ++ .set_cable_length = lmc_dummy_set_1, /* set cable length */ ++ .set_scrambler = lmc_dummy_set_1, /* set scrambler */ ++ .get_link_status = lmc_t1_get_link_status, /* get link status */ ++ .set_link_status = lmc_dummy_set_1, /* set link status */ ++ .set_crc_length = lmc_t1_set_crc_length, /* set CRC length */ ++ .set_circuit_type = lmc_t1_set_circuit_type, /* set T1 or E1 circuit type */ ++ .watchdog = lmc_t1_watchdog + }; + + static void +diff --git a/drivers/net/wan/x25_asy.c b/drivers/net/wan/x25_asy.c +index 5895f19..fa9fdfa 100644 +--- a/drivers/net/wan/x25_asy.c ++++ b/drivers/net/wan/x25_asy.c +@@ -122,8 +122,12 @@ static int x25_asy_change_mtu(struct net_device *dev, int newmtu) + { + struct x25_asy *sl = netdev_priv(dev); + unsigned char *xbuff, *rbuff; +- int len = 2 * newmtu; ++ int len; + ++ if (newmtu > 65534) ++ return -EINVAL; ++ ++ len = 2 * newmtu; + xbuff = kmalloc(len + 4, GFP_ATOMIC); + rbuff = kmalloc(len + 4, GFP_ATOMIC); + +diff --git a/drivers/net/wan/z85230.c b/drivers/net/wan/z85230.c +index feacc3b..5bac0de 100644 +--- a/drivers/net/wan/z85230.c ++++ b/drivers/net/wan/z85230.c +@@ -485,9 +485,9 @@ static void z8530_status(struct z8530_channel *chan) + + struct z8530_irqhandler z8530_sync = + { +- z8530_rx, +- z8530_tx, +- z8530_status ++ .rx = z8530_rx, ++ .tx = z8530_tx, ++ .status = z8530_status + }; + + EXPORT_SYMBOL(z8530_sync); +@@ -605,15 +605,15 @@ static void z8530_dma_status(struct z8530_channel *chan) + } + + static struct z8530_irqhandler z8530_dma_sync = { +- z8530_dma_rx, +- z8530_dma_tx, +- z8530_dma_status ++ .rx = z8530_dma_rx, ++ .tx = z8530_dma_tx, ++ .status = z8530_dma_status + }; + + static struct z8530_irqhandler z8530_txdma_sync = { +- z8530_rx, +- z8530_dma_tx, +- z8530_dma_status ++ .rx = z8530_rx, ++ .tx = z8530_dma_tx, ++ .status = z8530_dma_status + }; + + /** +@@ -680,9 +680,9 @@ static void z8530_status_clear(struct z8530_channel *chan) + + struct z8530_irqhandler z8530_nop= + { +- z8530_rx_clear, +- z8530_tx_clear, +- z8530_status_clear ++ .rx = z8530_rx_clear, ++ .tx = z8530_tx_clear, ++ .status = z8530_status_clear + }; + + +diff --git a/drivers/net/wimax/i2400m/rx.c b/drivers/net/wimax/i2400m/rx.c +index 0b60295..b8bfa5b 100644 +--- a/drivers/net/wimax/i2400m/rx.c ++++ b/drivers/net/wimax/i2400m/rx.c +@@ -1359,7 +1359,7 @@ int i2400m_rx_setup(struct i2400m *i2400m) + if (i2400m->rx_roq == NULL) + goto error_roq_alloc; + +- rd = kcalloc(I2400M_RO_CIN + 1, sizeof(*i2400m->rx_roq[0].log), ++ rd = kcalloc(sizeof(*i2400m->rx_roq[0].log), I2400M_RO_CIN + 1, + GFP_KERNEL); + if (rd == NULL) { + result = -ENOMEM; +diff --git a/drivers/net/wireless/airo.c b/drivers/net/wireless/airo.c +index edf4b57..68b51c0 100644 +--- a/drivers/net/wireless/airo.c ++++ b/drivers/net/wireless/airo.c +@@ -7843,7 +7843,7 @@ static int writerids(struct net_device *dev, aironet_ioctl *comp) { + struct airo_info *ai = dev->ml_priv; + int ridcode; + int enabled; +- static int (* writer)(struct airo_info *, u16 rid, const void *, int, int); ++ int (* writer)(struct airo_info *, u16 rid, const void *, int, int); + unsigned char *iobuf; + + /* Only super-user can write RIDs */ +diff --git a/drivers/net/wireless/at76c50x-usb.c b/drivers/net/wireless/at76c50x-usb.c +index 99b3bfa..9559372 100644 +--- a/drivers/net/wireless/at76c50x-usb.c ++++ b/drivers/net/wireless/at76c50x-usb.c +@@ -353,7 +353,7 @@ static int at76_dfu_get_state(struct usb_device *udev, u8 *state) + } + + /* Convert timeout from the DFU status to jiffies */ +-static inline unsigned long at76_get_timeout(struct dfu_status *s) ++static inline unsigned long __intentional_overflow(-1) at76_get_timeout(struct dfu_status *s) + { + return msecs_to_jiffies((s->poll_timeout[2] << 16) + | (s->poll_timeout[1] << 8) +diff --git a/drivers/net/wireless/ath/ath10k/htc.c b/drivers/net/wireless/ath/ath10k/htc.c +index edc57ab..ff49e0a 100644 +--- a/drivers/net/wireless/ath/ath10k/htc.c ++++ b/drivers/net/wireless/ath/ath10k/htc.c +@@ -831,7 +831,10 @@ void ath10k_htc_stop(struct ath10k_htc *htc) + /* registered target arrival callback from the HIF layer */ + int ath10k_htc_init(struct ath10k *ar) + { +- struct ath10k_hif_cb htc_callbacks; ++ static struct ath10k_hif_cb htc_callbacks = { ++ .rx_completion = ath10k_htc_rx_completion_handler, ++ .tx_completion = ath10k_htc_tx_completion_handler, ++ }; + struct ath10k_htc_ep *ep = NULL; + struct ath10k_htc *htc = &ar->htc; + +@@ -841,8 +844,6 @@ int ath10k_htc_init(struct ath10k *ar) + ath10k_htc_reset_endpoint_states(htc); + + /* setup HIF layer callbacks */ +- htc_callbacks.rx_completion = ath10k_htc_rx_completion_handler; +- htc_callbacks.tx_completion = ath10k_htc_tx_completion_handler; + htc->ar = ar; + + /* Get HIF default pipe for HTC message exchange */ +diff --git a/drivers/net/wireless/ath/ath10k/htc.h b/drivers/net/wireless/ath/ath10k/htc.h +index 4716d33..a688310 100644 +--- a/drivers/net/wireless/ath/ath10k/htc.h ++++ b/drivers/net/wireless/ath/ath10k/htc.h +@@ -271,13 +271,13 @@ enum ath10k_htc_ep_id { + + struct ath10k_htc_ops { + void (*target_send_suspend_complete)(struct ath10k *ar); +-}; ++} __no_const; + + struct ath10k_htc_ep_ops { + void (*ep_tx_complete)(struct ath10k *, struct sk_buff *); + void (*ep_rx_complete)(struct ath10k *, struct sk_buff *); + void (*ep_tx_credits)(struct ath10k *); +-}; ++} __no_const; + + /* service connection information */ + struct ath10k_htc_svc_conn_req { +diff --git a/drivers/net/wireless/ath/ath9k/ar9002_mac.c b/drivers/net/wireless/ath/ath9k/ar9002_mac.c +index 741b38d..b7ae41b 100644 +--- a/drivers/net/wireless/ath/ath9k/ar9002_mac.c ++++ b/drivers/net/wireless/ath/ath9k/ar9002_mac.c +@@ -220,8 +220,8 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i) + ads->ds_txstatus6 = ads->ds_txstatus7 = 0; + ads->ds_txstatus8 = ads->ds_txstatus9 = 0; + +- ACCESS_ONCE(ads->ds_link) = i->link; +- ACCESS_ONCE(ads->ds_data) = i->buf_addr[0]; ++ ACCESS_ONCE_RW(ads->ds_link) = i->link; ++ ACCESS_ONCE_RW(ads->ds_data) = i->buf_addr[0]; + + ctl1 = i->buf_len[0] | (i->is_last ? 0 : AR_TxMore); + ctl6 = SM(i->keytype, AR_EncrType); +@@ -235,26 +235,26 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i) + + if ((i->is_first || i->is_last) && + i->aggr != AGGR_BUF_MIDDLE && i->aggr != AGGR_BUF_LAST) { +- ACCESS_ONCE(ads->ds_ctl2) = set11nTries(i->rates, 0) ++ ACCESS_ONCE_RW(ads->ds_ctl2) = set11nTries(i->rates, 0) + | set11nTries(i->rates, 1) + | set11nTries(i->rates, 2) + | set11nTries(i->rates, 3) + | (i->dur_update ? AR_DurUpdateEna : 0) + | SM(0, AR_BurstDur); + +- ACCESS_ONCE(ads->ds_ctl3) = set11nRate(i->rates, 0) ++ ACCESS_ONCE_RW(ads->ds_ctl3) = set11nRate(i->rates, 0) + | set11nRate(i->rates, 1) + | set11nRate(i->rates, 2) + | set11nRate(i->rates, 3); + } else { +- ACCESS_ONCE(ads->ds_ctl2) = 0; +- ACCESS_ONCE(ads->ds_ctl3) = 0; ++ ACCESS_ONCE_RW(ads->ds_ctl2) = 0; ++ ACCESS_ONCE_RW(ads->ds_ctl3) = 0; + } + + if (!i->is_first) { +- ACCESS_ONCE(ads->ds_ctl0) = 0; +- ACCESS_ONCE(ads->ds_ctl1) = ctl1; +- ACCESS_ONCE(ads->ds_ctl6) = ctl6; ++ ACCESS_ONCE_RW(ads->ds_ctl0) = 0; ++ ACCESS_ONCE_RW(ads->ds_ctl1) = ctl1; ++ ACCESS_ONCE_RW(ads->ds_ctl6) = ctl6; + return; + } + +@@ -279,7 +279,7 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i) + break; + } + +- ACCESS_ONCE(ads->ds_ctl0) = (i->pkt_len & AR_FrameLen) ++ ACCESS_ONCE_RW(ads->ds_ctl0) = (i->pkt_len & AR_FrameLen) + | (i->flags & ATH9K_TXDESC_VMF ? AR_VirtMoreFrag : 0) + | SM(i->txpower, AR_XmitPower) + | (i->flags & ATH9K_TXDESC_VEOL ? AR_VEOL : 0) +@@ -289,19 +289,19 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i) + | (i->flags & ATH9K_TXDESC_RTSENA ? AR_RTSEnable : + (i->flags & ATH9K_TXDESC_CTSENA ? AR_CTSEnable : 0)); + +- ACCESS_ONCE(ads->ds_ctl1) = ctl1; +- ACCESS_ONCE(ads->ds_ctl6) = ctl6; ++ ACCESS_ONCE_RW(ads->ds_ctl1) = ctl1; ++ ACCESS_ONCE_RW(ads->ds_ctl6) = ctl6; + + if (i->aggr == AGGR_BUF_MIDDLE || i->aggr == AGGR_BUF_LAST) + return; + +- ACCESS_ONCE(ads->ds_ctl4) = set11nPktDurRTSCTS(i->rates, 0) ++ ACCESS_ONCE_RW(ads->ds_ctl4) = set11nPktDurRTSCTS(i->rates, 0) + | set11nPktDurRTSCTS(i->rates, 1); + +- ACCESS_ONCE(ads->ds_ctl5) = set11nPktDurRTSCTS(i->rates, 2) ++ ACCESS_ONCE_RW(ads->ds_ctl5) = set11nPktDurRTSCTS(i->rates, 2) + | set11nPktDurRTSCTS(i->rates, 3); + +- ACCESS_ONCE(ads->ds_ctl7) = set11nRateFlags(i->rates, 0) ++ ACCESS_ONCE_RW(ads->ds_ctl7) = set11nRateFlags(i->rates, 0) + | set11nRateFlags(i->rates, 1) + | set11nRateFlags(i->rates, 2) + | set11nRateFlags(i->rates, 3) +diff --git a/drivers/net/wireless/ath/ath9k/ar9003_mac.c b/drivers/net/wireless/ath/ath9k/ar9003_mac.c +index 729ffbf..49f50e3 100644 +--- a/drivers/net/wireless/ath/ath9k/ar9003_mac.c ++++ b/drivers/net/wireless/ath/ath9k/ar9003_mac.c +@@ -39,47 +39,47 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i) + (i->qcu << AR_TxQcuNum_S) | desc_len; + + checksum += val; +- ACCESS_ONCE(ads->info) = val; ++ ACCESS_ONCE_RW(ads->info) = val; + + checksum += i->link; +- ACCESS_ONCE(ads->link) = i->link; ++ ACCESS_ONCE_RW(ads->link) = i->link; + + checksum += i->buf_addr[0]; +- ACCESS_ONCE(ads->data0) = i->buf_addr[0]; ++ ACCESS_ONCE_RW(ads->data0) = i->buf_addr[0]; + checksum += i->buf_addr[1]; +- ACCESS_ONCE(ads->data1) = i->buf_addr[1]; ++ ACCESS_ONCE_RW(ads->data1) = i->buf_addr[1]; + checksum += i->buf_addr[2]; +- ACCESS_ONCE(ads->data2) = i->buf_addr[2]; ++ ACCESS_ONCE_RW(ads->data2) = i->buf_addr[2]; + checksum += i->buf_addr[3]; +- ACCESS_ONCE(ads->data3) = i->buf_addr[3]; ++ ACCESS_ONCE_RW(ads->data3) = i->buf_addr[3]; + + checksum += (val = (i->buf_len[0] << AR_BufLen_S) & AR_BufLen); +- ACCESS_ONCE(ads->ctl3) = val; ++ ACCESS_ONCE_RW(ads->ctl3) = val; + checksum += (val = (i->buf_len[1] << AR_BufLen_S) & AR_BufLen); +- ACCESS_ONCE(ads->ctl5) = val; ++ ACCESS_ONCE_RW(ads->ctl5) = val; + checksum += (val = (i->buf_len[2] << AR_BufLen_S) & AR_BufLen); +- ACCESS_ONCE(ads->ctl7) = val; ++ ACCESS_ONCE_RW(ads->ctl7) = val; + checksum += (val = (i->buf_len[3] << AR_BufLen_S) & AR_BufLen); +- ACCESS_ONCE(ads->ctl9) = val; ++ ACCESS_ONCE_RW(ads->ctl9) = val; + + checksum = (u16) (((checksum & 0xffff) + (checksum >> 16)) & 0xffff); +- ACCESS_ONCE(ads->ctl10) = checksum; ++ ACCESS_ONCE_RW(ads->ctl10) = checksum; + + if (i->is_first || i->is_last) { +- ACCESS_ONCE(ads->ctl13) = set11nTries(i->rates, 0) ++ ACCESS_ONCE_RW(ads->ctl13) = set11nTries(i->rates, 0) + | set11nTries(i->rates, 1) + | set11nTries(i->rates, 2) + | set11nTries(i->rates, 3) + | (i->dur_update ? AR_DurUpdateEna : 0) + | SM(0, AR_BurstDur); + +- ACCESS_ONCE(ads->ctl14) = set11nRate(i->rates, 0) ++ ACCESS_ONCE_RW(ads->ctl14) = set11nRate(i->rates, 0) + | set11nRate(i->rates, 1) + | set11nRate(i->rates, 2) + | set11nRate(i->rates, 3); + } else { +- ACCESS_ONCE(ads->ctl13) = 0; +- ACCESS_ONCE(ads->ctl14) = 0; ++ ACCESS_ONCE_RW(ads->ctl13) = 0; ++ ACCESS_ONCE_RW(ads->ctl14) = 0; + } + + ads->ctl20 = 0; +@@ -89,17 +89,17 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i) + + ctl17 = SM(i->keytype, AR_EncrType); + if (!i->is_first) { +- ACCESS_ONCE(ads->ctl11) = 0; +- ACCESS_ONCE(ads->ctl12) = i->is_last ? 0 : AR_TxMore; +- ACCESS_ONCE(ads->ctl15) = 0; +- ACCESS_ONCE(ads->ctl16) = 0; +- ACCESS_ONCE(ads->ctl17) = ctl17; +- ACCESS_ONCE(ads->ctl18) = 0; +- ACCESS_ONCE(ads->ctl19) = 0; ++ ACCESS_ONCE_RW(ads->ctl11) = 0; ++ ACCESS_ONCE_RW(ads->ctl12) = i->is_last ? 0 : AR_TxMore; ++ ACCESS_ONCE_RW(ads->ctl15) = 0; ++ ACCESS_ONCE_RW(ads->ctl16) = 0; ++ ACCESS_ONCE_RW(ads->ctl17) = ctl17; ++ ACCESS_ONCE_RW(ads->ctl18) = 0; ++ ACCESS_ONCE_RW(ads->ctl19) = 0; + return; + } + +- ACCESS_ONCE(ads->ctl11) = (i->pkt_len & AR_FrameLen) ++ ACCESS_ONCE_RW(ads->ctl11) = (i->pkt_len & AR_FrameLen) + | (i->flags & ATH9K_TXDESC_VMF ? AR_VirtMoreFrag : 0) + | SM(i->txpower, AR_XmitPower) + | (i->flags & ATH9K_TXDESC_VEOL ? AR_VEOL : 0) +@@ -135,22 +135,22 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i) + val = (i->flags & ATH9K_TXDESC_PAPRD) >> ATH9K_TXDESC_PAPRD_S; + ctl12 |= SM(val, AR_PAPRDChainMask); + +- ACCESS_ONCE(ads->ctl12) = ctl12; +- ACCESS_ONCE(ads->ctl17) = ctl17; ++ ACCESS_ONCE_RW(ads->ctl12) = ctl12; ++ ACCESS_ONCE_RW(ads->ctl17) = ctl17; + +- ACCESS_ONCE(ads->ctl15) = set11nPktDurRTSCTS(i->rates, 0) ++ ACCESS_ONCE_RW(ads->ctl15) = set11nPktDurRTSCTS(i->rates, 0) + | set11nPktDurRTSCTS(i->rates, 1); + +- ACCESS_ONCE(ads->ctl16) = set11nPktDurRTSCTS(i->rates, 2) ++ ACCESS_ONCE_RW(ads->ctl16) = set11nPktDurRTSCTS(i->rates, 2) + | set11nPktDurRTSCTS(i->rates, 3); + +- ACCESS_ONCE(ads->ctl18) = set11nRateFlags(i->rates, 0) ++ ACCESS_ONCE_RW(ads->ctl18) = set11nRateFlags(i->rates, 0) + | set11nRateFlags(i->rates, 1) + | set11nRateFlags(i->rates, 2) + | set11nRateFlags(i->rates, 3) + | SM(i->rtscts_rate, AR_RTSCTSRate); + +- ACCESS_ONCE(ads->ctl19) = AR_Not_Sounding; ++ ACCESS_ONCE_RW(ads->ctl19) = AR_Not_Sounding; + } + + static u16 ar9003_calc_ptr_chksum(struct ar9003_txc *ads) +diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h +index 0acd4b5..0591c91 100644 +--- a/drivers/net/wireless/ath/ath9k/hw.h ++++ b/drivers/net/wireless/ath/ath9k/hw.h +@@ -629,7 +629,7 @@ struct ath_hw_private_ops { + + /* ANI */ + void (*ani_cache_ini_regs)(struct ath_hw *ah); +-}; ++} __no_const; + + /** + * struct ath_spec_scan - parameters for Atheros spectral scan +@@ -706,7 +706,7 @@ struct ath_hw_ops { + #ifdef CONFIG_ATH9K_BTCOEX_SUPPORT + void (*set_bt_ant_diversity)(struct ath_hw *hw, bool enable); + #endif +-}; ++} __no_const; + + struct ath_nf_limits { + s16 max; +diff --git a/drivers/net/wireless/b43/phy_lp.c b/drivers/net/wireless/b43/phy_lp.c +index 92190da..f3a4c4c 100644 +--- a/drivers/net/wireless/b43/phy_lp.c ++++ b/drivers/net/wireless/b43/phy_lp.c +@@ -2514,7 +2514,7 @@ static int lpphy_b2063_tune(struct b43_wldev *dev, + { + struct ssb_bus *bus = dev->dev->sdev->bus; + +- static const struct b206x_channel *chandata = NULL; ++ const struct b206x_channel *chandata = NULL; + u32 crystal_freq = bus->chipco.pmu.crystalfreq * 1000; + u32 freqref, vco_freq, val1, val2, val3, timeout, timeoutref, count; + u16 old_comm15, scale; +diff --git a/drivers/net/wireless/iwlegacy/3945-mac.c b/drivers/net/wireless/iwlegacy/3945-mac.c +index 0487461..fd9e84a 100644 +--- a/drivers/net/wireless/iwlegacy/3945-mac.c ++++ b/drivers/net/wireless/iwlegacy/3945-mac.c +@@ -3638,7 +3638,9 @@ il3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) + */ + if (il3945_mod_params.disable_hw_scan) { + D_INFO("Disabling hw_scan\n"); +- il3945_mac_ops.hw_scan = NULL; ++ pax_open_kernel(); ++ *(void **)&il3945_mac_ops.hw_scan = NULL; ++ pax_close_kernel(); + } + + D_INFO("*** LOAD DRIVER ***\n"); +diff --git a/drivers/net/wireless/iwlwifi/dvm/debugfs.c b/drivers/net/wireless/iwlwifi/dvm/debugfs.c +index d2fe259..0c4c682 100644 +--- a/drivers/net/wireless/iwlwifi/dvm/debugfs.c ++++ b/drivers/net/wireless/iwlwifi/dvm/debugfs.c +@@ -188,7 +188,7 @@ static ssize_t iwl_dbgfs_sram_write(struct file *file, + { + struct iwl_priv *priv = file->private_data; + char buf[64]; +- int buf_size; ++ size_t buf_size; + u32 offset, len; + + memset(buf, 0, sizeof(buf)); +@@ -458,7 +458,7 @@ static ssize_t iwl_dbgfs_rx_handlers_write(struct file *file, + struct iwl_priv *priv = file->private_data; + + char buf[8]; +- int buf_size; ++ size_t buf_size; + u32 reset_flag; + + memset(buf, 0, sizeof(buf)); +@@ -539,7 +539,7 @@ static ssize_t iwl_dbgfs_disable_ht40_write(struct file *file, + { + struct iwl_priv *priv = file->private_data; + char buf[8]; +- int buf_size; ++ size_t buf_size; + int ht40; + + memset(buf, 0, sizeof(buf)); +@@ -591,7 +591,7 @@ static ssize_t iwl_dbgfs_sleep_level_override_write(struct file *file, + { + struct iwl_priv *priv = file->private_data; + char buf[8]; +- int buf_size; ++ size_t buf_size; + int value; + + memset(buf, 0, sizeof(buf)); +@@ -683,10 +683,10 @@ DEBUGFS_READ_FILE_OPS(temperature); + DEBUGFS_READ_WRITE_FILE_OPS(sleep_level_override); + DEBUGFS_READ_FILE_OPS(current_sleep_command); + +-static const char *fmt_value = " %-30s %10u\n"; +-static const char *fmt_hex = " %-30s 0x%02X\n"; +-static const char *fmt_table = " %-30s %10u %10u %10u %10u\n"; +-static const char *fmt_header = ++static const char fmt_value[] = " %-30s %10u\n"; ++static const char fmt_hex[] = " %-30s 0x%02X\n"; ++static const char fmt_table[] = " %-30s %10u %10u %10u %10u\n"; ++static const char fmt_header[] = + "%-32s current cumulative delta max\n"; + + static int iwl_statistics_flag(struct iwl_priv *priv, char *buf, int bufsz) +@@ -1856,7 +1856,7 @@ static ssize_t iwl_dbgfs_clear_ucode_statistics_write(struct file *file, + { + struct iwl_priv *priv = file->private_data; + char buf[8]; +- int buf_size; ++ size_t buf_size; + int clear; + + memset(buf, 0, sizeof(buf)); +@@ -1901,7 +1901,7 @@ static ssize_t iwl_dbgfs_ucode_tracing_write(struct file *file, + { + struct iwl_priv *priv = file->private_data; + char buf[8]; +- int buf_size; ++ size_t buf_size; + int trace; + + memset(buf, 0, sizeof(buf)); +@@ -1972,7 +1972,7 @@ static ssize_t iwl_dbgfs_missed_beacon_write(struct file *file, + { + struct iwl_priv *priv = file->private_data; + char buf[8]; +- int buf_size; ++ size_t buf_size; + int missed; + + memset(buf, 0, sizeof(buf)); +@@ -2013,7 +2013,7 @@ static ssize_t iwl_dbgfs_plcp_delta_write(struct file *file, + + struct iwl_priv *priv = file->private_data; + char buf[8]; +- int buf_size; ++ size_t buf_size; + int plcp; + + memset(buf, 0, sizeof(buf)); +@@ -2073,7 +2073,7 @@ static ssize_t iwl_dbgfs_txfifo_flush_write(struct file *file, + + struct iwl_priv *priv = file->private_data; + char buf[8]; +- int buf_size; ++ size_t buf_size; + int flush; + + memset(buf, 0, sizeof(buf)); +@@ -2163,7 +2163,7 @@ static ssize_t iwl_dbgfs_protection_mode_write(struct file *file, + + struct iwl_priv *priv = file->private_data; + char buf[8]; +- int buf_size; ++ size_t buf_size; + int rts; + + if (!priv->cfg->ht_params) +@@ -2205,7 +2205,7 @@ static ssize_t iwl_dbgfs_echo_test_write(struct file *file, + { + struct iwl_priv *priv = file->private_data; + char buf[8]; +- int buf_size; ++ size_t buf_size; + + memset(buf, 0, sizeof(buf)); + buf_size = min(count, sizeof(buf) - 1); +@@ -2239,7 +2239,7 @@ static ssize_t iwl_dbgfs_log_event_write(struct file *file, + struct iwl_priv *priv = file->private_data; + u32 event_log_flag; + char buf[8]; +- int buf_size; ++ size_t buf_size; + + /* check that the interface is up */ + if (!iwl_is_ready(priv)) +@@ -2293,7 +2293,7 @@ static ssize_t iwl_dbgfs_calib_disabled_write(struct file *file, + struct iwl_priv *priv = file->private_data; + char buf[8]; + u32 calib_disabled; +- int buf_size; ++ size_t buf_size; + + memset(buf, 0, sizeof(buf)); + buf_size = min(count, sizeof(buf) - 1); +diff --git a/drivers/net/wireless/iwlwifi/dvm/main.c b/drivers/net/wireless/iwlwifi/dvm/main.c +index ea7e70c..bc0c45f 100644 +--- a/drivers/net/wireless/iwlwifi/dvm/main.c ++++ b/drivers/net/wireless/iwlwifi/dvm/main.c +@@ -1127,7 +1127,7 @@ static void iwl_option_config(struct iwl_priv *priv) + static int iwl_eeprom_init_hw_params(struct iwl_priv *priv) + { + struct iwl_nvm_data *data = priv->nvm_data; +- char *debug_msg; ++ static const char debug_msg[] = "Device SKU: 24GHz %s %s, 52GHz %s %s, 11.n %s %s\n"; + + if (data->sku_cap_11n_enable && + !priv->cfg->ht_params) { +@@ -1141,7 +1141,6 @@ static int iwl_eeprom_init_hw_params(struct iwl_priv *priv) + return -EINVAL; + } + +- debug_msg = "Device SKU: 24GHz %s %s, 52GHz %s %s, 11.n %s %s\n"; + IWL_DEBUG_INFO(priv, debug_msg, + data->sku_cap_band_24GHz_enable ? "" : "NOT", "enabled", + data->sku_cap_band_52GHz_enable ? "" : "NOT", "enabled", +diff --git a/drivers/net/wireless/iwlwifi/pcie/trans.c b/drivers/net/wireless/iwlwifi/pcie/trans.c +index 16be0c0..eb0bc12 100644 +--- a/drivers/net/wireless/iwlwifi/pcie/trans.c ++++ b/drivers/net/wireless/iwlwifi/pcie/trans.c +@@ -1371,7 +1371,7 @@ static ssize_t iwl_dbgfs_interrupt_write(struct file *file, + struct isr_statistics *isr_stats = &trans_pcie->isr_stats; + + char buf[8]; +- int buf_size; ++ size_t buf_size; + u32 reset_flag; + + memset(buf, 0, sizeof(buf)); +@@ -1392,7 +1392,7 @@ static ssize_t iwl_dbgfs_csr_write(struct file *file, + { + struct iwl_trans *trans = file->private_data; + char buf[8]; +- int buf_size; ++ size_t buf_size; + int csr; + + memset(buf, 0, sizeof(buf)); +diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c +index 69d4c31..bd0b316 100644 +--- a/drivers/net/wireless/mac80211_hwsim.c ++++ b/drivers/net/wireless/mac80211_hwsim.c +@@ -2541,20 +2541,20 @@ static int __init init_mac80211_hwsim(void) + if (channels < 1) + return -EINVAL; + +- mac80211_hwsim_mchan_ops = mac80211_hwsim_ops; +- mac80211_hwsim_mchan_ops.hw_scan = mac80211_hwsim_hw_scan; +- mac80211_hwsim_mchan_ops.cancel_hw_scan = mac80211_hwsim_cancel_hw_scan; +- mac80211_hwsim_mchan_ops.sw_scan_start = NULL; +- mac80211_hwsim_mchan_ops.sw_scan_complete = NULL; +- mac80211_hwsim_mchan_ops.remain_on_channel = mac80211_hwsim_roc; +- mac80211_hwsim_mchan_ops.cancel_remain_on_channel = mac80211_hwsim_croc; +- mac80211_hwsim_mchan_ops.add_chanctx = mac80211_hwsim_add_chanctx; +- mac80211_hwsim_mchan_ops.remove_chanctx = mac80211_hwsim_remove_chanctx; +- mac80211_hwsim_mchan_ops.change_chanctx = mac80211_hwsim_change_chanctx; +- mac80211_hwsim_mchan_ops.assign_vif_chanctx = +- mac80211_hwsim_assign_vif_chanctx; +- mac80211_hwsim_mchan_ops.unassign_vif_chanctx = +- mac80211_hwsim_unassign_vif_chanctx; ++ pax_open_kernel(); ++ memcpy((void *)&mac80211_hwsim_mchan_ops, &mac80211_hwsim_ops, sizeof mac80211_hwsim_mchan_ops); ++ *(void **)&mac80211_hwsim_mchan_ops.hw_scan = mac80211_hwsim_hw_scan; ++ *(void **)&mac80211_hwsim_mchan_ops.cancel_hw_scan = mac80211_hwsim_cancel_hw_scan; ++ *(void **)&mac80211_hwsim_mchan_ops.sw_scan_start = NULL; ++ *(void **)&mac80211_hwsim_mchan_ops.sw_scan_complete = NULL; ++ *(void **)&mac80211_hwsim_mchan_ops.remain_on_channel = mac80211_hwsim_roc; ++ *(void **)&mac80211_hwsim_mchan_ops.cancel_remain_on_channel = mac80211_hwsim_croc; ++ *(void **)&mac80211_hwsim_mchan_ops.add_chanctx = mac80211_hwsim_add_chanctx; ++ *(void **)&mac80211_hwsim_mchan_ops.remove_chanctx = mac80211_hwsim_remove_chanctx; ++ *(void **)&mac80211_hwsim_mchan_ops.change_chanctx = mac80211_hwsim_change_chanctx; ++ *(void **)&mac80211_hwsim_mchan_ops.assign_vif_chanctx = mac80211_hwsim_assign_vif_chanctx; ++ *(void **)&mac80211_hwsim_mchan_ops.unassign_vif_chanctx = mac80211_hwsim_unassign_vif_chanctx; ++ pax_close_kernel(); + + spin_lock_init(&hwsim_radio_lock); + INIT_LIST_HEAD(&hwsim_radios); +diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c +index 5028557..91cf394 100644 +--- a/drivers/net/wireless/rndis_wlan.c ++++ b/drivers/net/wireless/rndis_wlan.c +@@ -1236,7 +1236,7 @@ static int set_rts_threshold(struct usbnet *usbdev, u32 rts_threshold) + + netdev_dbg(usbdev->net, "%s(): %i\n", __func__, rts_threshold); + +- if (rts_threshold < 0 || rts_threshold > 2347) ++ if (rts_threshold > 2347) + rts_threshold = 2347; + + tmp = cpu_to_le32(rts_threshold); +diff --git a/drivers/net/wireless/rt2x00/rt2x00.h b/drivers/net/wireless/rt2x00/rt2x00.h +index 5d45a1a..6f5f041 100644 +--- a/drivers/net/wireless/rt2x00/rt2x00.h ++++ b/drivers/net/wireless/rt2x00/rt2x00.h +@@ -375,7 +375,7 @@ struct rt2x00_intf { + * for hardware which doesn't support hardware + * sequence counting. + */ +- atomic_t seqno; ++ atomic_unchecked_t seqno; + }; + + static inline struct rt2x00_intf* vif_to_intf(struct ieee80211_vif *vif) +diff --git a/drivers/net/wireless/rt2x00/rt2x00queue.c b/drivers/net/wireless/rt2x00/rt2x00queue.c +index 5642ccc..01f03eb 100644 +--- a/drivers/net/wireless/rt2x00/rt2x00queue.c ++++ b/drivers/net/wireless/rt2x00/rt2x00queue.c +@@ -250,9 +250,9 @@ static void rt2x00queue_create_tx_descriptor_seq(struct rt2x00_dev *rt2x00dev, + * sequence counter given by mac80211. + */ + if (test_bit(ENTRY_TXD_FIRST_FRAGMENT, &txdesc->flags)) +- seqno = atomic_add_return(0x10, &intf->seqno); ++ seqno = atomic_add_return_unchecked(0x10, &intf->seqno); + else +- seqno = atomic_read(&intf->seqno); ++ seqno = atomic_read_unchecked(&intf->seqno); + + hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG); + hdr->seq_ctrl |= cpu_to_le16(seqno); +diff --git a/drivers/net/wireless/ti/wl1251/sdio.c b/drivers/net/wireless/ti/wl1251/sdio.c +index e2b3d9c..67a5184 100644 +--- a/drivers/net/wireless/ti/wl1251/sdio.c ++++ b/drivers/net/wireless/ti/wl1251/sdio.c +@@ -271,13 +271,17 @@ static int wl1251_sdio_probe(struct sdio_func *func, + + irq_set_irq_type(wl->irq, IRQ_TYPE_EDGE_RISING); + +- wl1251_sdio_ops.enable_irq = wl1251_enable_line_irq; +- wl1251_sdio_ops.disable_irq = wl1251_disable_line_irq; ++ pax_open_kernel(); ++ *(void **)&wl1251_sdio_ops.enable_irq = wl1251_enable_line_irq; ++ *(void **)&wl1251_sdio_ops.disable_irq = wl1251_disable_line_irq; ++ pax_close_kernel(); + + wl1251_info("using dedicated interrupt line"); + } else { +- wl1251_sdio_ops.enable_irq = wl1251_sdio_enable_irq; +- wl1251_sdio_ops.disable_irq = wl1251_sdio_disable_irq; ++ pax_open_kernel(); ++ *(void **)&wl1251_sdio_ops.enable_irq = wl1251_sdio_enable_irq; ++ *(void **)&wl1251_sdio_ops.disable_irq = wl1251_sdio_disable_irq; ++ pax_close_kernel(); + + wl1251_info("using SDIO interrupt"); + } +diff --git a/drivers/net/wireless/ti/wl12xx/main.c b/drivers/net/wireless/ti/wl12xx/main.c +index be7129b..4161356 100644 +--- a/drivers/net/wireless/ti/wl12xx/main.c ++++ b/drivers/net/wireless/ti/wl12xx/main.c +@@ -656,7 +656,9 @@ static int wl12xx_identify_chip(struct wl1271 *wl) + sizeof(wl->conf.mem)); + + /* read data preparation is only needed by wl127x */ +- wl->ops->prepare_read = wl127x_prepare_read; ++ pax_open_kernel(); ++ *(void **)&wl->ops->prepare_read = wl127x_prepare_read; ++ pax_close_kernel(); + + wlcore_set_min_fw_ver(wl, WL127X_CHIP_VER, + WL127X_IFTYPE_SR_VER, WL127X_MAJOR_SR_VER, +@@ -681,7 +683,9 @@ static int wl12xx_identify_chip(struct wl1271 *wl) + sizeof(wl->conf.mem)); + + /* read data preparation is only needed by wl127x */ +- wl->ops->prepare_read = wl127x_prepare_read; ++ pax_open_kernel(); ++ *(void **)&wl->ops->prepare_read = wl127x_prepare_read; ++ pax_close_kernel(); + + wlcore_set_min_fw_ver(wl, WL127X_CHIP_VER, + WL127X_IFTYPE_SR_VER, WL127X_MAJOR_SR_VER, +diff --git a/drivers/net/wireless/ti/wl18xx/main.c b/drivers/net/wireless/ti/wl18xx/main.c +index ec37b16..7e34d66 100644 +--- a/drivers/net/wireless/ti/wl18xx/main.c ++++ b/drivers/net/wireless/ti/wl18xx/main.c +@@ -1823,8 +1823,10 @@ static int wl18xx_setup(struct wl1271 *wl) + } + + if (!checksum_param) { +- wl18xx_ops.set_rx_csum = NULL; +- wl18xx_ops.init_vif = NULL; ++ pax_open_kernel(); ++ *(void **)&wl18xx_ops.set_rx_csum = NULL; ++ *(void **)&wl18xx_ops.init_vif = NULL; ++ pax_close_kernel(); + } + + /* Enable 11a Band only if we have 5G antennas */ +diff --git a/drivers/net/wireless/zd1211rw/zd_usb.c b/drivers/net/wireless/zd1211rw/zd_usb.c +index a912dc0..a8225ba 100644 +--- a/drivers/net/wireless/zd1211rw/zd_usb.c ++++ b/drivers/net/wireless/zd1211rw/zd_usb.c +@@ -385,7 +385,7 @@ static inline void handle_regs_int(struct urb *urb) + { + struct zd_usb *usb = urb->context; + struct zd_usb_interrupt *intr = &usb->intr; +- int len; ++ unsigned int len; + u16 int_num; + + ZD_ASSERT(in_interrupt()); +diff --git a/drivers/nfc/nfcwilink.c b/drivers/nfc/nfcwilink.c +index 683671a..4519fc2 100644 +--- a/drivers/nfc/nfcwilink.c ++++ b/drivers/nfc/nfcwilink.c +@@ -497,7 +497,7 @@ static struct nci_ops nfcwilink_ops = { + + static int nfcwilink_probe(struct platform_device *pdev) + { +- static struct nfcwilink *drv; ++ struct nfcwilink *drv; + int rc; + __u32 protocols; + +diff --git a/drivers/oprofile/buffer_sync.c b/drivers/oprofile/buffer_sync.c +index d93b2b6..ae50401 100644 +--- a/drivers/oprofile/buffer_sync.c ++++ b/drivers/oprofile/buffer_sync.c +@@ -332,7 +332,7 @@ static void add_data(struct op_entry *entry, struct mm_struct *mm) + if (cookie == NO_COOKIE) + offset = pc; + if (cookie == INVALID_COOKIE) { +- atomic_inc(&oprofile_stats.sample_lost_no_mapping); ++ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping); + offset = pc; + } + if (cookie != last_cookie) { +@@ -376,14 +376,14 @@ add_sample(struct mm_struct *mm, struct op_sample *s, int in_kernel) + /* add userspace sample */ + + if (!mm) { +- atomic_inc(&oprofile_stats.sample_lost_no_mm); ++ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mm); + return 0; + } + + cookie = lookup_dcookie(mm, s->eip, &offset); + + if (cookie == INVALID_COOKIE) { +- atomic_inc(&oprofile_stats.sample_lost_no_mapping); ++ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping); + return 0; + } + +@@ -552,7 +552,7 @@ void sync_buffer(int cpu) + /* ignore backtraces if failed to add a sample */ + if (state == sb_bt_start) { + state = sb_bt_ignore; +- atomic_inc(&oprofile_stats.bt_lost_no_mapping); ++ atomic_inc_unchecked(&oprofile_stats.bt_lost_no_mapping); + } + } + release_mm(mm); +diff --git a/drivers/oprofile/event_buffer.c b/drivers/oprofile/event_buffer.c +index c0cc4e7..44d4e54 100644 +--- a/drivers/oprofile/event_buffer.c ++++ b/drivers/oprofile/event_buffer.c +@@ -53,7 +53,7 @@ void add_event_entry(unsigned long value) + } + + if (buffer_pos == buffer_size) { +- atomic_inc(&oprofile_stats.event_lost_overflow); ++ atomic_inc_unchecked(&oprofile_stats.event_lost_overflow); + return; + } + +diff --git a/drivers/oprofile/oprof.c b/drivers/oprofile/oprof.c +index ed2c3ec..deda85a 100644 +--- a/drivers/oprofile/oprof.c ++++ b/drivers/oprofile/oprof.c +@@ -110,7 +110,7 @@ static void switch_worker(struct work_struct *work) + if (oprofile_ops.switch_events()) + return; + +- atomic_inc(&oprofile_stats.multiplex_counter); ++ atomic_inc_unchecked(&oprofile_stats.multiplex_counter); + start_switch_worker(); + } + +diff --git a/drivers/oprofile/oprofile_files.c b/drivers/oprofile/oprofile_files.c +index ee2cfce..7f8f699 100644 +--- a/drivers/oprofile/oprofile_files.c ++++ b/drivers/oprofile/oprofile_files.c +@@ -27,7 +27,7 @@ unsigned long oprofile_time_slice; + + #ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX + +-static ssize_t timeout_read(struct file *file, char __user *buf, ++static ssize_t __intentional_overflow(-1) timeout_read(struct file *file, char __user *buf, + size_t count, loff_t *offset) + { + return oprofilefs_ulong_to_user(jiffies_to_msecs(oprofile_time_slice), +diff --git a/drivers/oprofile/oprofile_stats.c b/drivers/oprofile/oprofile_stats.c +index 59659ce..6c860a0 100644 +--- a/drivers/oprofile/oprofile_stats.c ++++ b/drivers/oprofile/oprofile_stats.c +@@ -30,11 +30,11 @@ void oprofile_reset_stats(void) + cpu_buf->sample_invalid_eip = 0; + } + +- atomic_set(&oprofile_stats.sample_lost_no_mm, 0); +- atomic_set(&oprofile_stats.sample_lost_no_mapping, 0); +- atomic_set(&oprofile_stats.event_lost_overflow, 0); +- atomic_set(&oprofile_stats.bt_lost_no_mapping, 0); +- atomic_set(&oprofile_stats.multiplex_counter, 0); ++ atomic_set_unchecked(&oprofile_stats.sample_lost_no_mm, 0); ++ atomic_set_unchecked(&oprofile_stats.sample_lost_no_mapping, 0); ++ atomic_set_unchecked(&oprofile_stats.event_lost_overflow, 0); ++ atomic_set_unchecked(&oprofile_stats.bt_lost_no_mapping, 0); ++ atomic_set_unchecked(&oprofile_stats.multiplex_counter, 0); + } + + +diff --git a/drivers/oprofile/oprofile_stats.h b/drivers/oprofile/oprofile_stats.h +index 1fc622b..8c48fc3 100644 +--- a/drivers/oprofile/oprofile_stats.h ++++ b/drivers/oprofile/oprofile_stats.h +@@ -13,11 +13,11 @@ + #include <linux/atomic.h> + + struct oprofile_stat_struct { +- atomic_t sample_lost_no_mm; +- atomic_t sample_lost_no_mapping; +- atomic_t bt_lost_no_mapping; +- atomic_t event_lost_overflow; +- atomic_t multiplex_counter; ++ atomic_unchecked_t sample_lost_no_mm; ++ atomic_unchecked_t sample_lost_no_mapping; ++ atomic_unchecked_t bt_lost_no_mapping; ++ atomic_unchecked_t event_lost_overflow; ++ atomic_unchecked_t multiplex_counter; + }; + + extern struct oprofile_stat_struct oprofile_stats; +diff --git a/drivers/oprofile/oprofilefs.c b/drivers/oprofile/oprofilefs.c +index 3f49345..c750d0b 100644 +--- a/drivers/oprofile/oprofilefs.c ++++ b/drivers/oprofile/oprofilefs.c +@@ -176,8 +176,8 @@ int oprofilefs_create_ro_ulong(struct dentry *root, + + static ssize_t atomic_read_file(struct file *file, char __user *buf, size_t count, loff_t *offset) + { +- atomic_t *val = file->private_data; +- return oprofilefs_ulong_to_user(atomic_read(val), buf, count, offset); ++ atomic_unchecked_t *val = file->private_data; ++ return oprofilefs_ulong_to_user(atomic_read_unchecked(val), buf, count, offset); + } + + +@@ -189,7 +189,7 @@ static const struct file_operations atomic_ro_fops = { + + + int oprofilefs_create_ro_atomic(struct dentry *root, +- char const *name, atomic_t *val) ++ char const *name, atomic_unchecked_t *val) + { + return __oprofilefs_create_file(root, name, + &atomic_ro_fops, 0444, val); +diff --git a/drivers/oprofile/timer_int.c b/drivers/oprofile/timer_int.c +index 61be1d9..dec05d7 100644 +--- a/drivers/oprofile/timer_int.c ++++ b/drivers/oprofile/timer_int.c +@@ -93,7 +93,7 @@ static int oprofile_cpu_notify(struct notifier_block *self, + return NOTIFY_OK; + } + +-static struct notifier_block __refdata oprofile_cpu_notifier = { ++static struct notifier_block oprofile_cpu_notifier = { + .notifier_call = oprofile_cpu_notify, + }; + +diff --git a/drivers/parport/procfs.c b/drivers/parport/procfs.c +index 92ed045..62d39bd7 100644 +--- a/drivers/parport/procfs.c ++++ b/drivers/parport/procfs.c +@@ -64,7 +64,7 @@ static int do_active_device(ctl_table *table, int write, + + *ppos += len; + +- return copy_to_user(result, buffer, len) ? -EFAULT : 0; ++ return (len > sizeof buffer || copy_to_user(result, buffer, len)) ? -EFAULT : 0; + } + + #ifdef CONFIG_PARPORT_1284 +@@ -106,7 +106,7 @@ static int do_autoprobe(ctl_table *table, int write, + + *ppos += len; + +- return copy_to_user (result, buffer, len) ? -EFAULT : 0; ++ return (len > sizeof buffer || copy_to_user (result, buffer, len)) ? -EFAULT : 0; + } + #endif /* IEEE1284.3 support. */ + +diff --git a/drivers/pci/hotplug/acpiphp_ibm.c b/drivers/pci/hotplug/acpiphp_ibm.c +index 8dcccff..35d701d 100644 +--- a/drivers/pci/hotplug/acpiphp_ibm.c ++++ b/drivers/pci/hotplug/acpiphp_ibm.c +@@ -452,7 +452,9 @@ static int __init ibm_acpiphp_init(void) + goto init_cleanup; + } + +- ibm_apci_table_attr.size = ibm_get_table_from_acpi(NULL); ++ pax_open_kernel(); ++ *(size_t *)&ibm_apci_table_attr.size = ibm_get_table_from_acpi(NULL); ++ pax_close_kernel(); + retval = sysfs_create_bin_file(sysdir, &ibm_apci_table_attr); + + return retval; +diff --git a/drivers/pci/hotplug/cpcihp_generic.c b/drivers/pci/hotplug/cpcihp_generic.c +index 7536eef..52dc8fa 100644 +--- a/drivers/pci/hotplug/cpcihp_generic.c ++++ b/drivers/pci/hotplug/cpcihp_generic.c +@@ -73,7 +73,6 @@ static u16 port; + static unsigned int enum_bit; + static u8 enum_mask; + +-static struct cpci_hp_controller_ops generic_hpc_ops; + static struct cpci_hp_controller generic_hpc; + + static int __init validate_parameters(void) +@@ -139,6 +138,10 @@ static int query_enum(void) + return ((value & enum_mask) == enum_mask); + } + ++static struct cpci_hp_controller_ops generic_hpc_ops = { ++ .query_enum = query_enum, ++}; ++ + static int __init cpcihp_generic_init(void) + { + int status; +@@ -165,7 +168,6 @@ static int __init cpcihp_generic_init(void) + pci_dev_put(dev); + + memset(&generic_hpc, 0, sizeof (struct cpci_hp_controller)); +- generic_hpc_ops.query_enum = query_enum; + generic_hpc.ops = &generic_hpc_ops; + + status = cpci_hp_register_controller(&generic_hpc); +diff --git a/drivers/pci/hotplug/cpcihp_zt5550.c b/drivers/pci/hotplug/cpcihp_zt5550.c +index e8c4a7c..7046f5c 100644 +--- a/drivers/pci/hotplug/cpcihp_zt5550.c ++++ b/drivers/pci/hotplug/cpcihp_zt5550.c +@@ -59,7 +59,6 @@ + /* local variables */ + static bool debug; + static bool poll; +-static struct cpci_hp_controller_ops zt5550_hpc_ops; + static struct cpci_hp_controller zt5550_hpc; + + /* Primary cPCI bus bridge device */ +@@ -205,6 +204,10 @@ static int zt5550_hc_disable_irq(void) + return 0; + } + ++static struct cpci_hp_controller_ops zt5550_hpc_ops = { ++ .query_enum = zt5550_hc_query_enum, ++}; ++ + static int zt5550_hc_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) + { + int status; +@@ -216,16 +219,17 @@ static int zt5550_hc_init_one (struct pci_dev *pdev, const struct pci_device_id + dbg("returned from zt5550_hc_config"); + + memset(&zt5550_hpc, 0, sizeof (struct cpci_hp_controller)); +- zt5550_hpc_ops.query_enum = zt5550_hc_query_enum; + zt5550_hpc.ops = &zt5550_hpc_ops; + if(!poll) { + zt5550_hpc.irq = hc_dev->irq; + zt5550_hpc.irq_flags = IRQF_SHARED; + zt5550_hpc.dev_id = hc_dev; + +- zt5550_hpc_ops.enable_irq = zt5550_hc_enable_irq; +- zt5550_hpc_ops.disable_irq = zt5550_hc_disable_irq; +- zt5550_hpc_ops.check_irq = zt5550_hc_check_irq; ++ pax_open_kernel(); ++ *(void **)&zt5550_hpc_ops.enable_irq = zt5550_hc_enable_irq; ++ *(void **)&zt5550_hpc_ops.disable_irq = zt5550_hc_disable_irq; ++ *(void **)&zt5550_hpc_ops.check_irq = zt5550_hc_check_irq; ++ pax_open_kernel(); + } else { + info("using ENUM# polling mode"); + } +diff --git a/drivers/pci/hotplug/cpqphp_nvram.c b/drivers/pci/hotplug/cpqphp_nvram.c +index 76ba8a1..20ca857 100644 +--- a/drivers/pci/hotplug/cpqphp_nvram.c ++++ b/drivers/pci/hotplug/cpqphp_nvram.c +@@ -428,9 +428,13 @@ static u32 store_HRT (void __iomem *rom_start) + + void compaq_nvram_init (void __iomem *rom_start) + { ++ ++#ifndef CONFIG_PAX_KERNEXEC + if (rom_start) { + compaq_int15_entry_point = (rom_start + ROM_INT15_PHY_ADDR - ROM_PHY_ADDR); + } ++#endif ++ + dbg("int15 entry = %p\n", compaq_int15_entry_point); + + /* initialize our int15 lock */ +diff --git a/drivers/pci/hotplug/pci_hotplug_core.c b/drivers/pci/hotplug/pci_hotplug_core.c +index cfa92a9..29539c5 100644 +--- a/drivers/pci/hotplug/pci_hotplug_core.c ++++ b/drivers/pci/hotplug/pci_hotplug_core.c +@@ -441,8 +441,10 @@ int __pci_hp_register(struct hotplug_slot *slot, struct pci_bus *bus, + return -EINVAL; + } + +- slot->ops->owner = owner; +- slot->ops->mod_name = mod_name; ++ pax_open_kernel(); ++ *(struct module **)&slot->ops->owner = owner; ++ *(const char **)&slot->ops->mod_name = mod_name; ++ pax_close_kernel(); + + mutex_lock(&pci_hp_mutex); + /* +diff --git a/drivers/pci/hotplug/pciehp_core.c b/drivers/pci/hotplug/pciehp_core.c +index 53b58de..4479896 100644 +--- a/drivers/pci/hotplug/pciehp_core.c ++++ b/drivers/pci/hotplug/pciehp_core.c +@@ -92,7 +92,7 @@ static int init_slot(struct controller *ctrl) + struct slot *slot = ctrl->slot; + struct hotplug_slot *hotplug = NULL; + struct hotplug_slot_info *info = NULL; +- struct hotplug_slot_ops *ops = NULL; ++ hotplug_slot_ops_no_const *ops = NULL; + char name[SLOT_NAME_SIZE]; + int retval = -ENOMEM; + +diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c +index fb02fc2..83dc2c3 100644 +--- a/drivers/pci/msi.c ++++ b/drivers/pci/msi.c +@@ -524,8 +524,8 @@ static int populate_msi_sysfs(struct pci_dev *pdev) + { + struct attribute **msi_attrs; + struct attribute *msi_attr; +- struct device_attribute *msi_dev_attr; +- struct attribute_group *msi_irq_group; ++ device_attribute_no_const *msi_dev_attr; ++ attribute_group_no_const *msi_irq_group; + const struct attribute_group **msi_irq_groups; + struct msi_desc *entry; + int ret = -ENOMEM; +@@ -589,7 +589,7 @@ error_attrs: + count = 0; + msi_attr = msi_attrs[count]; + while (msi_attr) { +- msi_dev_attr = container_of(msi_attr, struct device_attribute, attr); ++ msi_dev_attr = container_of(msi_attr, device_attribute_no_const, attr); + kfree(msi_attr->name); + kfree(msi_dev_attr); + ++count; +diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c +index 276ef9c..1d33a36 100644 +--- a/drivers/pci/pci-sysfs.c ++++ b/drivers/pci/pci-sysfs.c +@@ -1112,7 +1112,7 @@ static int pci_create_attr(struct pci_dev *pdev, int num, int write_combine) + { + /* allocate attribute structure, piggyback attribute name */ + int name_len = write_combine ? 13 : 10; +- struct bin_attribute *res_attr; ++ bin_attribute_no_const *res_attr; + int retval; + + res_attr = kzalloc(sizeof(*res_attr) + name_len, GFP_ATOMIC); +@@ -1297,7 +1297,7 @@ static struct device_attribute reset_attr = __ATTR(reset, 0200, NULL, reset_stor + static int pci_create_capabilities_sysfs(struct pci_dev *dev) + { + int retval; +- struct bin_attribute *attr; ++ bin_attribute_no_const *attr; + + /* If the device has VPD, try to expose it in sysfs. */ + if (dev->vpd) { +@@ -1344,7 +1344,7 @@ int __must_check pci_create_sysfs_dev_files (struct pci_dev *pdev) + { + int retval; + int rom_size = 0; +- struct bin_attribute *attr; ++ bin_attribute_no_const *attr; + + if (!sysfs_initialized) + return -EACCES; +diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h +index 4df38df..b6bb7fe 100644 +--- a/drivers/pci/pci.h ++++ b/drivers/pci/pci.h +@@ -93,7 +93,7 @@ struct pci_vpd_ops { + struct pci_vpd { + unsigned int len; + const struct pci_vpd_ops *ops; +- struct bin_attribute *attr; /* descriptor for sysfs VPD entry */ ++ bin_attribute_no_const *attr; /* descriptor for sysfs VPD entry */ + }; + + int pci_vpd_pci22_init(struct pci_dev *dev); +diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c +index e1e7026..d28dd33 100644 +--- a/drivers/pci/pcie/aspm.c ++++ b/drivers/pci/pcie/aspm.c +@@ -27,9 +27,9 @@ + #define MODULE_PARAM_PREFIX "pcie_aspm." + + /* Note: those are not register definitions */ +-#define ASPM_STATE_L0S_UP (1) /* Upstream direction L0s state */ +-#define ASPM_STATE_L0S_DW (2) /* Downstream direction L0s state */ +-#define ASPM_STATE_L1 (4) /* L1 state */ ++#define ASPM_STATE_L0S_UP (1U) /* Upstream direction L0s state */ ++#define ASPM_STATE_L0S_DW (2U) /* Downstream direction L0s state */ ++#define ASPM_STATE_L1 (4U) /* L1 state */ + #define ASPM_STATE_L0S (ASPM_STATE_L0S_UP | ASPM_STATE_L0S_DW) + #define ASPM_STATE_ALL (ASPM_STATE_L0S | ASPM_STATE_L1) + +diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c +index 6e34498..9911975 100644 +--- a/drivers/pci/probe.c ++++ b/drivers/pci/probe.c +@@ -175,7 +175,7 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type, + struct pci_bus_region region, inverted_region; + bool bar_too_big = false, bar_disabled = false; + +- mask = type ? PCI_ROM_ADDRESS_MASK : ~0; ++ mask = type ? (u32)PCI_ROM_ADDRESS_MASK : ~0; + + /* No printks while decoding is disabled! */ + if (!dev->mmio_always_on) { +diff --git a/drivers/pci/proc.c b/drivers/pci/proc.c +index 46d1378..30e452b 100644 +--- a/drivers/pci/proc.c ++++ b/drivers/pci/proc.c +@@ -434,7 +434,16 @@ static const struct file_operations proc_bus_pci_dev_operations = { + static int __init pci_proc_init(void) + { + struct pci_dev *dev = NULL; ++ ++#ifdef CONFIG_GRKERNSEC_PROC_ADD ++#ifdef CONFIG_GRKERNSEC_PROC_USER ++ proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR, NULL); ++#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP) ++ proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL); ++#endif ++#else + proc_bus_pci_dir = proc_mkdir("bus/pci", NULL); ++#endif + proc_create("devices", 0, proc_bus_pci_dir, + &proc_bus_pci_dev_operations); + proc_initialized = 1; +diff --git a/drivers/platform/chrome/chromeos_laptop.c b/drivers/platform/chrome/chromeos_laptop.c +index 7f3aad0..7d604bb 100644 +--- a/drivers/platform/chrome/chromeos_laptop.c ++++ b/drivers/platform/chrome/chromeos_laptop.c +@@ -406,7 +406,7 @@ static struct chromeos_laptop cr48 = { + .callback = chromeos_laptop_dmi_matched, \ + .driver_data = (void *)&board_ + +-static struct dmi_system_id chromeos_laptop_dmi_table[] __initdata = { ++static struct dmi_system_id chromeos_laptop_dmi_table[] __initconst = { + { + .ident = "Samsung Series 5 550", + .matches = { +diff --git a/drivers/platform/x86/asus-wmi.c b/drivers/platform/x86/asus-wmi.c +index c5e082f..d6307a0 100644 +--- a/drivers/platform/x86/asus-wmi.c ++++ b/drivers/platform/x86/asus-wmi.c +@@ -1595,6 +1595,10 @@ static int show_dsts(struct seq_file *m, void *data) + int err; + u32 retval = -1; + ++#ifdef CONFIG_GRKERNSEC_KMEM ++ return -EPERM; ++#endif ++ + err = asus_wmi_get_devstate(asus, asus->debug.dev_id, &retval); + + if (err < 0) +@@ -1611,6 +1615,10 @@ static int show_devs(struct seq_file *m, void *data) + int err; + u32 retval = -1; + ++#ifdef CONFIG_GRKERNSEC_KMEM ++ return -EPERM; ++#endif ++ + err = asus_wmi_set_devstate(asus->debug.dev_id, asus->debug.ctrl_param, + &retval); + +@@ -1635,6 +1643,10 @@ static int show_call(struct seq_file *m, void *data) + union acpi_object *obj; + acpi_status status; + ++#ifdef CONFIG_GRKERNSEC_KMEM ++ return -EPERM; ++#endif ++ + status = wmi_evaluate_method(ASUS_WMI_MGMT_GUID, + 1, asus->debug.method_id, + &input, &output); +diff --git a/drivers/platform/x86/msi-laptop.c b/drivers/platform/x86/msi-laptop.c +index 62f8030..c7f2a45 100644 +--- a/drivers/platform/x86/msi-laptop.c ++++ b/drivers/platform/x86/msi-laptop.c +@@ -1000,12 +1000,14 @@ static int __init load_scm_model_init(struct platform_device *sdev) + + if (!quirks->ec_read_only) { + /* allow userland write sysfs file */ +- dev_attr_bluetooth.store = store_bluetooth; +- dev_attr_wlan.store = store_wlan; +- dev_attr_threeg.store = store_threeg; +- dev_attr_bluetooth.attr.mode |= S_IWUSR; +- dev_attr_wlan.attr.mode |= S_IWUSR; +- dev_attr_threeg.attr.mode |= S_IWUSR; ++ pax_open_kernel(); ++ *(void **)&dev_attr_bluetooth.store = store_bluetooth; ++ *(void **)&dev_attr_wlan.store = store_wlan; ++ *(void **)&dev_attr_threeg.store = store_threeg; ++ *(umode_t *)&dev_attr_bluetooth.attr.mode |= S_IWUSR; ++ *(umode_t *)&dev_attr_wlan.attr.mode |= S_IWUSR; ++ *(umode_t *)&dev_attr_threeg.attr.mode |= S_IWUSR; ++ pax_close_kernel(); + } + + /* disable hardware control by fn key */ +diff --git a/drivers/platform/x86/msi-wmi.c b/drivers/platform/x86/msi-wmi.c +index 70222f2..8c8ce66 100644 +--- a/drivers/platform/x86/msi-wmi.c ++++ b/drivers/platform/x86/msi-wmi.c +@@ -183,7 +183,7 @@ static const struct backlight_ops msi_backlight_ops = { + static void msi_wmi_notify(u32 value, void *context) + { + struct acpi_buffer response = { ACPI_ALLOCATE_BUFFER, NULL }; +- static struct key_entry *key; ++ struct key_entry *key; + union acpi_object *obj; + acpi_status status; + +diff --git a/drivers/platform/x86/sony-laptop.c b/drivers/platform/x86/sony-laptop.c +index 8f8551a..3ace3ca 100644 +--- a/drivers/platform/x86/sony-laptop.c ++++ b/drivers/platform/x86/sony-laptop.c +@@ -2451,7 +2451,7 @@ static void sony_nc_gfx_switch_cleanup(struct platform_device *pd) + } + + /* High speed charging function */ +-static struct device_attribute *hsc_handle; ++static device_attribute_no_const *hsc_handle; + + static ssize_t sony_nc_highspeed_charging_store(struct device *dev, + struct device_attribute *attr, +diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c +index e2a91c8..986cc9f 100644 +--- a/drivers/platform/x86/thinkpad_acpi.c ++++ b/drivers/platform/x86/thinkpad_acpi.c +@@ -2094,7 +2094,7 @@ static int hotkey_mask_get(void) + return 0; + } + +-void static hotkey_mask_warn_incomplete_mask(void) ++static void hotkey_mask_warn_incomplete_mask(void) + { + /* log only what the user can fix... */ + const u32 wantedmask = hotkey_driver_mask & +@@ -2321,11 +2321,6 @@ static void hotkey_read_nvram(struct tp_nvram_state *n, const u32 m) + } + } + +-static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn, +- struct tp_nvram_state *newn, +- const u32 event_mask) +-{ +- + #define TPACPI_COMPARE_KEY(__scancode, __member) \ + do { \ + if ((event_mask & (1 << __scancode)) && \ +@@ -2339,36 +2334,42 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn, + tpacpi_hotkey_send_key(__scancode); \ + } while (0) + +- void issue_volchange(const unsigned int oldvol, +- const unsigned int newvol) +- { +- unsigned int i = oldvol; ++static void issue_volchange(const unsigned int oldvol, ++ const unsigned int newvol, ++ const u32 event_mask) ++{ ++ unsigned int i = oldvol; + +- while (i > newvol) { +- TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEDOWN); +- i--; +- } +- while (i < newvol) { +- TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEUP); +- i++; +- } ++ while (i > newvol) { ++ TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEDOWN); ++ i--; + } ++ while (i < newvol) { ++ TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEUP); ++ i++; ++ } ++} + +- void issue_brightnesschange(const unsigned int oldbrt, +- const unsigned int newbrt) +- { +- unsigned int i = oldbrt; ++static void issue_brightnesschange(const unsigned int oldbrt, ++ const unsigned int newbrt, ++ const u32 event_mask) ++{ ++ unsigned int i = oldbrt; + +- while (i > newbrt) { +- TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNEND); +- i--; +- } +- while (i < newbrt) { +- TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNHOME); +- i++; +- } ++ while (i > newbrt) { ++ TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNEND); ++ i--; ++ } ++ while (i < newbrt) { ++ TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNHOME); ++ i++; + } ++} + ++static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn, ++ struct tp_nvram_state *newn, ++ const u32 event_mask) ++{ + TPACPI_COMPARE_KEY(TP_ACPI_HOTKEYSCAN_THINKPAD, thinkpad_toggle); + TPACPI_COMPARE_KEY(TP_ACPI_HOTKEYSCAN_FNSPACE, zoom_toggle); + TPACPI_COMPARE_KEY(TP_ACPI_HOTKEYSCAN_FNF7, display_toggle); +@@ -2402,7 +2403,7 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn, + oldn->volume_level != newn->volume_level) { + /* recently muted, or repeated mute keypress, or + * multiple presses ending in mute */ +- issue_volchange(oldn->volume_level, newn->volume_level); ++ issue_volchange(oldn->volume_level, newn->volume_level, event_mask); + TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_MUTE); + } + } else { +@@ -2412,7 +2413,7 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn, + TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEUP); + } + if (oldn->volume_level != newn->volume_level) { +- issue_volchange(oldn->volume_level, newn->volume_level); ++ issue_volchange(oldn->volume_level, newn->volume_level, event_mask); + } else if (oldn->volume_toggle != newn->volume_toggle) { + /* repeated vol up/down keypress at end of scale ? */ + if (newn->volume_level == 0) +@@ -2425,7 +2426,8 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn, + /* handle brightness */ + if (oldn->brightness_level != newn->brightness_level) { + issue_brightnesschange(oldn->brightness_level, +- newn->brightness_level); ++ newn->brightness_level, ++ event_mask); + } else if (oldn->brightness_toggle != newn->brightness_toggle) { + /* repeated key presses that didn't change state */ + if (newn->brightness_level == 0) +@@ -2434,10 +2436,10 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn, + && !tp_features.bright_unkfw) + TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNHOME); + } ++} + + #undef TPACPI_COMPARE_KEY + #undef TPACPI_MAY_SEND_KEY +-} + + /* + * Polling driver +diff --git a/drivers/pnp/pnpbios/bioscalls.c b/drivers/pnp/pnpbios/bioscalls.c +index 769d265..a3a05ca 100644 +--- a/drivers/pnp/pnpbios/bioscalls.c ++++ b/drivers/pnp/pnpbios/bioscalls.c +@@ -58,7 +58,7 @@ do { \ + set_desc_limit(&gdt[(selname) >> 3], (size) - 1); \ + } while(0) + +-static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092, ++static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093, + (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1); + + /* +@@ -95,7 +95,10 @@ static inline u16 call_pnp_bios(u16 func, u16 arg1, u16 arg2, u16 arg3, + + cpu = get_cpu(); + save_desc_40 = get_cpu_gdt_table(cpu)[0x40 / 8]; ++ ++ pax_open_kernel(); + get_cpu_gdt_table(cpu)[0x40 / 8] = bad_bios_desc; ++ pax_close_kernel(); + + /* On some boxes IRQ's during PnP BIOS calls are deadly. */ + spin_lock_irqsave(&pnp_bios_lock, flags); +@@ -133,7 +136,10 @@ static inline u16 call_pnp_bios(u16 func, u16 arg1, u16 arg2, u16 arg3, + :"memory"); + spin_unlock_irqrestore(&pnp_bios_lock, flags); + ++ pax_open_kernel(); + get_cpu_gdt_table(cpu)[0x40 / 8] = save_desc_40; ++ pax_close_kernel(); ++ + put_cpu(); + + /* If we get here and this is set then the PnP BIOS faulted on us. */ +@@ -467,7 +473,7 @@ int pnp_bios_read_escd(char *data, u32 nvram_base) + return status; + } + +-void pnpbios_calls_init(union pnp_bios_install_struct *header) ++void __init pnpbios_calls_init(union pnp_bios_install_struct *header) + { + int i; + +@@ -475,6 +481,8 @@ void pnpbios_calls_init(union pnp_bios_install_struct *header) + pnp_bios_callpoint.offset = header->fields.pm16offset; + pnp_bios_callpoint.segment = PNP_CS16; + ++ pax_open_kernel(); ++ + for_each_possible_cpu(i) { + struct desc_struct *gdt = get_cpu_gdt_table(i); + if (!gdt) +@@ -486,4 +494,6 @@ void pnpbios_calls_init(union pnp_bios_install_struct *header) + set_desc_base(&gdt[GDT_ENTRY_PNPBIOS_DS], + (unsigned long)__va(header->fields.pm16dseg)); + } ++ ++ pax_close_kernel(); + } +diff --git a/drivers/pnp/resource.c b/drivers/pnp/resource.c +index bacddd1..65ea100 100644 +--- a/drivers/pnp/resource.c ++++ b/drivers/pnp/resource.c +@@ -360,7 +360,7 @@ int pnp_check_irq(struct pnp_dev *dev, struct resource *res) + return 1; + + /* check if the resource is valid */ +- if (*irq < 0 || *irq > 15) ++ if (*irq > 15) + return 0; + + /* check if the resource is reserved */ +@@ -424,7 +424,7 @@ int pnp_check_dma(struct pnp_dev *dev, struct resource *res) + return 1; + + /* check if the resource is valid */ +- if (*dma < 0 || *dma == 4 || *dma > 7) ++ if (*dma == 4 || *dma > 7) + return 0; + + /* check if the resource is reserved */ +diff --git a/drivers/power/pda_power.c b/drivers/power/pda_power.c +index 0c52e2a..3421ab7 100644 +--- a/drivers/power/pda_power.c ++++ b/drivers/power/pda_power.c +@@ -37,7 +37,11 @@ static int polling; + + #if IS_ENABLED(CONFIG_USB_PHY) + static struct usb_phy *transceiver; +-static struct notifier_block otg_nb; ++static int otg_handle_notification(struct notifier_block *nb, ++ unsigned long event, void *unused); ++static struct notifier_block otg_nb = { ++ .notifier_call = otg_handle_notification ++}; + #endif + + static struct regulator *ac_draw; +@@ -369,7 +373,6 @@ static int pda_power_probe(struct platform_device *pdev) + + #if IS_ENABLED(CONFIG_USB_PHY) + if (!IS_ERR_OR_NULL(transceiver) && pdata->use_otg_notifier) { +- otg_nb.notifier_call = otg_handle_notification; + ret = usb_register_notifier(transceiver, &otg_nb); + if (ret) { + dev_err(dev, "failure to register otg notifier\n"); +diff --git a/drivers/power/power_supply.h b/drivers/power/power_supply.h +index cc439fd..8fa30df 100644 +--- a/drivers/power/power_supply.h ++++ b/drivers/power/power_supply.h +@@ -16,12 +16,12 @@ struct power_supply; + + #ifdef CONFIG_SYSFS + +-extern void power_supply_init_attrs(struct device_type *dev_type); ++extern void power_supply_init_attrs(void); + extern int power_supply_uevent(struct device *dev, struct kobj_uevent_env *env); + + #else + +-static inline void power_supply_init_attrs(struct device_type *dev_type) {} ++static inline void power_supply_init_attrs(void) {} + #define power_supply_uevent NULL + + #endif /* CONFIG_SYSFS */ +diff --git a/drivers/power/power_supply_core.c b/drivers/power/power_supply_core.c +index 2660664..75fcb04 100644 +--- a/drivers/power/power_supply_core.c ++++ b/drivers/power/power_supply_core.c +@@ -28,7 +28,10 @@ EXPORT_SYMBOL_GPL(power_supply_class); + ATOMIC_NOTIFIER_HEAD(power_supply_notifier); + EXPORT_SYMBOL_GPL(power_supply_notifier); + +-static struct device_type power_supply_dev_type; ++extern const struct attribute_group *power_supply_attr_groups[]; ++static struct device_type power_supply_dev_type = { ++ .groups = power_supply_attr_groups, ++}; + + static bool __power_supply_is_supplied_by(struct power_supply *supplier, + struct power_supply *supply) +@@ -628,7 +631,7 @@ static int __init power_supply_class_init(void) + return PTR_ERR(power_supply_class); + + power_supply_class->dev_uevent = power_supply_uevent; +- power_supply_init_attrs(&power_supply_dev_type); ++ power_supply_init_attrs(); + + return 0; + } +diff --git a/drivers/power/power_supply_sysfs.c b/drivers/power/power_supply_sysfs.c +index 44420d1..967126e 100644 +--- a/drivers/power/power_supply_sysfs.c ++++ b/drivers/power/power_supply_sysfs.c +@@ -230,17 +230,15 @@ static struct attribute_group power_supply_attr_group = { + .is_visible = power_supply_attr_is_visible, + }; + +-static const struct attribute_group *power_supply_attr_groups[] = { ++const struct attribute_group *power_supply_attr_groups[] = { + &power_supply_attr_group, + NULL, + }; + +-void power_supply_init_attrs(struct device_type *dev_type) ++void power_supply_init_attrs(void) + { + int i; + +- dev_type->groups = power_supply_attr_groups; +- + for (i = 0; i < ARRAY_SIZE(power_supply_attrs); i++) + __power_supply_attrs[i] = &power_supply_attrs[i].attr; + } +diff --git a/drivers/powercap/powercap_sys.c b/drivers/powercap/powercap_sys.c +index 84419af..268ede8 100644 +--- a/drivers/powercap/powercap_sys.c ++++ b/drivers/powercap/powercap_sys.c +@@ -154,8 +154,77 @@ struct powercap_constraint_attr { + struct device_attribute name_attr; + }; + ++static ssize_t show_constraint_name(struct device *dev, ++ struct device_attribute *dev_attr, ++ char *buf); ++ + static struct powercap_constraint_attr +- constraint_attrs[MAX_CONSTRAINTS_PER_ZONE]; ++ constraint_attrs[MAX_CONSTRAINTS_PER_ZONE] = { ++ [0 ... MAX_CONSTRAINTS_PER_ZONE - 1] = { ++ .power_limit_attr = { ++ .attr = { ++ .name = NULL, ++ .mode = S_IWUSR | S_IRUGO ++ }, ++ .show = show_constraint_power_limit_uw, ++ .store = store_constraint_power_limit_uw ++ }, ++ ++ .time_window_attr = { ++ .attr = { ++ .name = NULL, ++ .mode = S_IWUSR | S_IRUGO ++ }, ++ .show = show_constraint_time_window_us, ++ .store = store_constraint_time_window_us ++ }, ++ ++ .max_power_attr = { ++ .attr = { ++ .name = NULL, ++ .mode = S_IRUGO ++ }, ++ .show = show_constraint_max_power_uw, ++ .store = NULL ++ }, ++ ++ .min_power_attr = { ++ .attr = { ++ .name = NULL, ++ .mode = S_IRUGO ++ }, ++ .show = show_constraint_min_power_uw, ++ .store = NULL ++ }, ++ ++ .max_time_window_attr = { ++ .attr = { ++ .name = NULL, ++ .mode = S_IRUGO ++ }, ++ .show = show_constraint_max_time_window_us, ++ .store = NULL ++ }, ++ ++ .min_time_window_attr = { ++ .attr = { ++ .name = NULL, ++ .mode = S_IRUGO ++ }, ++ .show = show_constraint_min_time_window_us, ++ .store = NULL ++ }, ++ ++ .name_attr = { ++ .attr = { ++ .name = NULL, ++ .mode = S_IRUGO ++ }, ++ .show = show_constraint_name, ++ .store = NULL ++ } ++ } ++}; + + /* A list of powercap control_types */ + static LIST_HEAD(powercap_cntrl_list); +@@ -193,23 +262,16 @@ static ssize_t show_constraint_name(struct device *dev, + } + + static int create_constraint_attribute(int id, const char *name, +- int mode, +- struct device_attribute *dev_attr, +- ssize_t (*show)(struct device *, +- struct device_attribute *, char *), +- ssize_t (*store)(struct device *, +- struct device_attribute *, +- const char *, size_t) +- ) ++ struct device_attribute *dev_attr) + { ++ name = kasprintf(GFP_KERNEL, "constraint_%d_%s", id, name); + +- dev_attr->attr.name = kasprintf(GFP_KERNEL, "constraint_%d_%s", +- id, name); +- if (!dev_attr->attr.name) ++ if (!name) + return -ENOMEM; +- dev_attr->attr.mode = mode; +- dev_attr->show = show; +- dev_attr->store = store; ++ ++ pax_open_kernel(); ++ *(const char **)&dev_attr->attr.name = name; ++ pax_close_kernel(); + + return 0; + } +@@ -236,49 +298,31 @@ static int seed_constraint_attributes(void) + + for (i = 0; i < MAX_CONSTRAINTS_PER_ZONE; ++i) { + ret = create_constraint_attribute(i, "power_limit_uw", +- S_IWUSR | S_IRUGO, +- &constraint_attrs[i].power_limit_attr, +- show_constraint_power_limit_uw, +- store_constraint_power_limit_uw); ++ &constraint_attrs[i].power_limit_attr); + if (ret) + goto err_alloc; + ret = create_constraint_attribute(i, "time_window_us", +- S_IWUSR | S_IRUGO, +- &constraint_attrs[i].time_window_attr, +- show_constraint_time_window_us, +- store_constraint_time_window_us); ++ &constraint_attrs[i].time_window_attr); + if (ret) + goto err_alloc; +- ret = create_constraint_attribute(i, "name", S_IRUGO, +- &constraint_attrs[i].name_attr, +- show_constraint_name, +- NULL); ++ ret = create_constraint_attribute(i, "name", ++ &constraint_attrs[i].name_attr); + if (ret) + goto err_alloc; +- ret = create_constraint_attribute(i, "max_power_uw", S_IRUGO, +- &constraint_attrs[i].max_power_attr, +- show_constraint_max_power_uw, +- NULL); ++ ret = create_constraint_attribute(i, "max_power_uw", ++ &constraint_attrs[i].max_power_attr); + if (ret) + goto err_alloc; +- ret = create_constraint_attribute(i, "min_power_uw", S_IRUGO, +- &constraint_attrs[i].min_power_attr, +- show_constraint_min_power_uw, +- NULL); ++ ret = create_constraint_attribute(i, "min_power_uw", ++ &constraint_attrs[i].min_power_attr); + if (ret) + goto err_alloc; + ret = create_constraint_attribute(i, "max_time_window_us", +- S_IRUGO, +- &constraint_attrs[i].max_time_window_attr, +- show_constraint_max_time_window_us, +- NULL); ++ &constraint_attrs[i].max_time_window_attr); + if (ret) + goto err_alloc; + ret = create_constraint_attribute(i, "min_time_window_us", +- S_IRUGO, +- &constraint_attrs[i].min_time_window_attr, +- show_constraint_min_time_window_us, +- NULL); ++ &constraint_attrs[i].min_time_window_attr); + if (ret) + goto err_alloc; + +@@ -378,10 +422,12 @@ static void create_power_zone_common_attributes( + power_zone->zone_dev_attrs[count++] = + &dev_attr_max_energy_range_uj.attr; + if (power_zone->ops->get_energy_uj) { ++ pax_open_kernel(); + if (power_zone->ops->reset_energy_uj) +- dev_attr_energy_uj.attr.mode = S_IWUSR | S_IRUGO; ++ *(umode_t *)&dev_attr_energy_uj.attr.mode = S_IWUSR | S_IRUGO; + else +- dev_attr_energy_uj.attr.mode = S_IRUGO; ++ *(umode_t *)&dev_attr_energy_uj.attr.mode = S_IRUGO; ++ pax_close_kernel(); + power_zone->zone_dev_attrs[count++] = + &dev_attr_energy_uj.attr; + } +diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c +index afca1bc..86840b8 100644 +--- a/drivers/regulator/core.c ++++ b/drivers/regulator/core.c +@@ -3366,7 +3366,7 @@ regulator_register(const struct regulator_desc *regulator_desc, + { + const struct regulation_constraints *constraints = NULL; + const struct regulator_init_data *init_data; +- static atomic_t regulator_no = ATOMIC_INIT(0); ++ static atomic_unchecked_t regulator_no = ATOMIC_INIT(0); + struct regulator_dev *rdev; + struct device *dev; + int ret, i; +@@ -3436,7 +3436,7 @@ regulator_register(const struct regulator_desc *regulator_desc, + rdev->dev.of_node = config->of_node; + rdev->dev.parent = dev; + dev_set_name(&rdev->dev, "regulator.%d", +- atomic_inc_return(®ulator_no) - 1); ++ atomic_inc_return_unchecked(®ulator_no) - 1); + ret = device_register(&rdev->dev); + if (ret != 0) { + put_device(&rdev->dev); +diff --git a/drivers/regulator/max8660.c b/drivers/regulator/max8660.c +index 8d94d3d..653b623 100644 +--- a/drivers/regulator/max8660.c ++++ b/drivers/regulator/max8660.c +@@ -420,8 +420,10 @@ static int max8660_probe(struct i2c_client *client, + max8660->shadow_regs[MAX8660_OVER1] = 5; + } else { + /* Otherwise devices can be toggled via software */ +- max8660_dcdc_ops.enable = max8660_dcdc_enable; +- max8660_dcdc_ops.disable = max8660_dcdc_disable; ++ pax_open_kernel(); ++ *(void **)&max8660_dcdc_ops.enable = max8660_dcdc_enable; ++ *(void **)&max8660_dcdc_ops.disable = max8660_dcdc_disable; ++ pax_close_kernel(); + } + + /* +diff --git a/drivers/regulator/max8973-regulator.c b/drivers/regulator/max8973-regulator.c +index 892aa1e..ebd1b9c 100644 +--- a/drivers/regulator/max8973-regulator.c ++++ b/drivers/regulator/max8973-regulator.c +@@ -406,9 +406,11 @@ static int max8973_probe(struct i2c_client *client, + if (!pdata || !pdata->enable_ext_control) { + max->desc.enable_reg = MAX8973_VOUT; + max->desc.enable_mask = MAX8973_VOUT_ENABLE; +- max->ops.enable = regulator_enable_regmap; +- max->ops.disable = regulator_disable_regmap; +- max->ops.is_enabled = regulator_is_enabled_regmap; ++ pax_open_kernel(); ++ *(void **)&max->ops.enable = regulator_enable_regmap; ++ *(void **)&max->ops.disable = regulator_disable_regmap; ++ *(void **)&max->ops.is_enabled = regulator_is_enabled_regmap; ++ pax_close_kernel(); + } + + if (pdata) { +diff --git a/drivers/regulator/mc13892-regulator.c b/drivers/regulator/mc13892-regulator.c +index f374fa5..26f0683 100644 +--- a/drivers/regulator/mc13892-regulator.c ++++ b/drivers/regulator/mc13892-regulator.c +@@ -582,10 +582,12 @@ static int mc13892_regulator_probe(struct platform_device *pdev) + } + mc13xxx_unlock(mc13892); + +- mc13892_regulators[MC13892_VCAM].desc.ops->set_mode ++ pax_open_kernel(); ++ *(void **)&mc13892_regulators[MC13892_VCAM].desc.ops->set_mode + = mc13892_vcam_set_mode; +- mc13892_regulators[MC13892_VCAM].desc.ops->get_mode ++ *(void **)&mc13892_regulators[MC13892_VCAM].desc.ops->get_mode + = mc13892_vcam_get_mode; ++ pax_close_kernel(); + + mc13xxx_data = mc13xxx_parse_regulators_dt(pdev, mc13892_regulators, + ARRAY_SIZE(mc13892_regulators)); +diff --git a/drivers/rtc/rtc-cmos.c b/drivers/rtc/rtc-cmos.c +index cae212f..58a3980 100644 +--- a/drivers/rtc/rtc-cmos.c ++++ b/drivers/rtc/rtc-cmos.c +@@ -777,7 +777,9 @@ cmos_do_probe(struct device *dev, struct resource *ports, int rtc_irq) + hpet_rtc_timer_init(); + + /* export at least the first block of NVRAM */ +- nvram.size = address_space - NVRAM_OFFSET; ++ pax_open_kernel(); ++ *(size_t *)&nvram.size = address_space - NVRAM_OFFSET; ++ pax_close_kernel(); + retval = sysfs_create_bin_file(&dev->kobj, &nvram); + if (retval < 0) { + dev_dbg(dev, "can't create nvram file? %d\n", retval); +diff --git a/drivers/rtc/rtc-dev.c b/drivers/rtc/rtc-dev.c +index d049393..bb20be0 100644 +--- a/drivers/rtc/rtc-dev.c ++++ b/drivers/rtc/rtc-dev.c +@@ -16,6 +16,7 @@ + #include <linux/module.h> + #include <linux/rtc.h> + #include <linux/sched.h> ++#include <linux/grsecurity.h> + #include "rtc-core.h" + + static dev_t rtc_devt; +@@ -347,6 +348,8 @@ static long rtc_dev_ioctl(struct file *file, + if (copy_from_user(&tm, uarg, sizeof(tm))) + return -EFAULT; + ++ gr_log_timechange(); ++ + return rtc_set_time(rtc, &tm); + + case RTC_PIE_ON: +diff --git a/drivers/rtc/rtc-ds1307.c b/drivers/rtc/rtc-ds1307.c +index 4e75345..09f8663 100644 +--- a/drivers/rtc/rtc-ds1307.c ++++ b/drivers/rtc/rtc-ds1307.c +@@ -107,7 +107,7 @@ struct ds1307 { + u8 offset; /* register's offset */ + u8 regs[11]; + u16 nvram_offset; +- struct bin_attribute *nvram; ++ bin_attribute_no_const *nvram; + enum ds_type type; + unsigned long flags; + #define HAS_NVRAM 0 /* bit 0 == sysfs file active */ +diff --git a/drivers/rtc/rtc-m48t59.c b/drivers/rtc/rtc-m48t59.c +index 11880c1..b823aa4 100644 +--- a/drivers/rtc/rtc-m48t59.c ++++ b/drivers/rtc/rtc-m48t59.c +@@ -483,7 +483,9 @@ static int m48t59_rtc_probe(struct platform_device *pdev) + if (IS_ERR(m48t59->rtc)) + return PTR_ERR(m48t59->rtc); + +- m48t59_nvram_attr.size = pdata->offset; ++ pax_open_kernel(); ++ *(size_t *)&m48t59_nvram_attr.size = pdata->offset; ++ pax_close_kernel(); + + ret = sysfs_create_bin_file(&pdev->dev.kobj, &m48t59_nvram_attr); + if (ret) +diff --git a/drivers/scsi/aic7xxx/aic79xx_pci.c b/drivers/scsi/aic7xxx/aic79xx_pci.c +index 14b5f8d..cc9bd26 100644 +--- a/drivers/scsi/aic7xxx/aic79xx_pci.c ++++ b/drivers/scsi/aic7xxx/aic79xx_pci.c +@@ -827,7 +827,7 @@ ahd_pci_intr(struct ahd_softc *ahd) + for (bit = 0; bit < 8; bit++) { + + if ((pci_status[i] & (0x1 << bit)) != 0) { +- static const char *s; ++ const char *s; + + s = pci_status_strings[bit]; + if (i == 7/*TARG*/ && bit == 3) +@@ -887,23 +887,15 @@ ahd_pci_split_intr(struct ahd_softc *ahd, u_int intstat) + + for (bit = 0; bit < 8; bit++) { + +- if ((split_status[i] & (0x1 << bit)) != 0) { +- static const char *s; +- +- s = split_status_strings[bit]; +- printk(s, ahd_name(ahd), ++ if ((split_status[i] & (0x1 << bit)) != 0) ++ printk(split_status_strings[bit], ahd_name(ahd), + split_status_source[i]); +- } + + if (i > 1) + continue; + +- if ((sg_split_status[i] & (0x1 << bit)) != 0) { +- static const char *s; +- +- s = split_status_strings[bit]; +- printk(s, ahd_name(ahd), "SG"); +- } ++ if ((sg_split_status[i] & (0x1 << bit)) != 0) ++ printk(split_status_strings[bit], ahd_name(ahd), "SG"); + } + } + /* +diff --git a/drivers/scsi/bfa/bfa_fcpim.h b/drivers/scsi/bfa/bfa_fcpim.h +index e693af6..2e525b6 100644 +--- a/drivers/scsi/bfa/bfa_fcpim.h ++++ b/drivers/scsi/bfa/bfa_fcpim.h +@@ -36,7 +36,7 @@ struct bfa_iotag_s { + + struct bfa_itn_s { + bfa_isr_func_t isr; +-}; ++} __no_const; + + void bfa_itn_create(struct bfa_s *bfa, struct bfa_rport_s *rport, + void (*isr)(struct bfa_s *bfa, struct bfi_msg_s *m)); +diff --git a/drivers/scsi/bfa/bfa_fcs.c b/drivers/scsi/bfa/bfa_fcs.c +index a3ab5cc..8143622 100644 +--- a/drivers/scsi/bfa/bfa_fcs.c ++++ b/drivers/scsi/bfa/bfa_fcs.c +@@ -38,10 +38,21 @@ struct bfa_fcs_mod_s { + #define BFA_FCS_MODULE(_mod) { _mod ## _modinit, _mod ## _modexit } + + static struct bfa_fcs_mod_s fcs_modules[] = { +- { bfa_fcs_port_attach, NULL, NULL }, +- { bfa_fcs_uf_attach, NULL, NULL }, +- { bfa_fcs_fabric_attach, bfa_fcs_fabric_modinit, +- bfa_fcs_fabric_modexit }, ++ { ++ .attach = bfa_fcs_port_attach, ++ .modinit = NULL, ++ .modexit = NULL ++ }, ++ { ++ .attach = bfa_fcs_uf_attach, ++ .modinit = NULL, ++ .modexit = NULL ++ }, ++ { ++ .attach = bfa_fcs_fabric_attach, ++ .modinit = bfa_fcs_fabric_modinit, ++ .modexit = bfa_fcs_fabric_modexit ++ }, + }; + + /* +diff --git a/drivers/scsi/bfa/bfa_fcs_lport.c b/drivers/scsi/bfa/bfa_fcs_lport.c +index ff75ef8..2dfe00a 100644 +--- a/drivers/scsi/bfa/bfa_fcs_lport.c ++++ b/drivers/scsi/bfa/bfa_fcs_lport.c +@@ -89,15 +89,26 @@ static struct { + void (*offline) (struct bfa_fcs_lport_s *port); + } __port_action[] = { + { +- bfa_fcs_lport_unknown_init, bfa_fcs_lport_unknown_online, +- bfa_fcs_lport_unknown_offline}, { +- bfa_fcs_lport_fab_init, bfa_fcs_lport_fab_online, +- bfa_fcs_lport_fab_offline}, { +- bfa_fcs_lport_n2n_init, bfa_fcs_lport_n2n_online, +- bfa_fcs_lport_n2n_offline}, { +- bfa_fcs_lport_loop_init, bfa_fcs_lport_loop_online, +- bfa_fcs_lport_loop_offline}, +- }; ++ .init = bfa_fcs_lport_unknown_init, ++ .online = bfa_fcs_lport_unknown_online, ++ .offline = bfa_fcs_lport_unknown_offline ++ }, ++ { ++ .init = bfa_fcs_lport_fab_init, ++ .online = bfa_fcs_lport_fab_online, ++ .offline = bfa_fcs_lport_fab_offline ++ }, ++ { ++ .init = bfa_fcs_lport_n2n_init, ++ .online = bfa_fcs_lport_n2n_online, ++ .offline = bfa_fcs_lport_n2n_offline ++ }, ++ { ++ .init = bfa_fcs_lport_loop_init, ++ .online = bfa_fcs_lport_loop_online, ++ .offline = bfa_fcs_lport_loop_offline ++ }, ++}; + + /* + * fcs_port_sm FCS logical port state machine +diff --git a/drivers/scsi/bfa/bfa_ioc.h b/drivers/scsi/bfa/bfa_ioc.h +index 2e28392..9d865b6 100644 +--- a/drivers/scsi/bfa/bfa_ioc.h ++++ b/drivers/scsi/bfa/bfa_ioc.h +@@ -258,7 +258,7 @@ struct bfa_ioc_cbfn_s { + bfa_ioc_disable_cbfn_t disable_cbfn; + bfa_ioc_hbfail_cbfn_t hbfail_cbfn; + bfa_ioc_reset_cbfn_t reset_cbfn; +-}; ++} __no_const; + + /* + * IOC event notification mechanism. +@@ -352,7 +352,7 @@ struct bfa_ioc_hwif_s { + void (*ioc_set_alt_fwstate) (struct bfa_ioc_s *ioc, + enum bfi_ioc_state fwstate); + enum bfi_ioc_state (*ioc_get_alt_fwstate) (struct bfa_ioc_s *ioc); +-}; ++} __no_const; + + /* + * Queue element to wait for room in request queue. FIFO order is +diff --git a/drivers/scsi/bfa/bfa_modules.h b/drivers/scsi/bfa/bfa_modules.h +index a14c784..6de6790 100644 +--- a/drivers/scsi/bfa/bfa_modules.h ++++ b/drivers/scsi/bfa/bfa_modules.h +@@ -78,12 +78,12 @@ enum { + \ + extern struct bfa_module_s hal_mod_ ## __mod; \ + struct bfa_module_s hal_mod_ ## __mod = { \ +- bfa_ ## __mod ## _meminfo, \ +- bfa_ ## __mod ## _attach, \ +- bfa_ ## __mod ## _detach, \ +- bfa_ ## __mod ## _start, \ +- bfa_ ## __mod ## _stop, \ +- bfa_ ## __mod ## _iocdisable, \ ++ .meminfo = bfa_ ## __mod ## _meminfo, \ ++ .attach = bfa_ ## __mod ## _attach, \ ++ .detach = bfa_ ## __mod ## _detach, \ ++ .start = bfa_ ## __mod ## _start, \ ++ .stop = bfa_ ## __mod ## _stop, \ ++ .iocdisable = bfa_ ## __mod ## _iocdisable, \ + } + + #define BFA_CACHELINE_SZ (256) +diff --git a/drivers/scsi/fcoe/fcoe_sysfs.c b/drivers/scsi/fcoe/fcoe_sysfs.c +index 045c4e1..13de803 100644 +--- a/drivers/scsi/fcoe/fcoe_sysfs.c ++++ b/drivers/scsi/fcoe/fcoe_sysfs.c +@@ -33,8 +33,8 @@ + */ + #include "libfcoe.h" + +-static atomic_t ctlr_num; +-static atomic_t fcf_num; ++static atomic_unchecked_t ctlr_num; ++static atomic_unchecked_t fcf_num; + + /* + * fcoe_fcf_dev_loss_tmo: the default number of seconds that fcoe sysfs +@@ -685,7 +685,7 @@ struct fcoe_ctlr_device *fcoe_ctlr_device_add(struct device *parent, + if (!ctlr) + goto out; + +- ctlr->id = atomic_inc_return(&ctlr_num) - 1; ++ ctlr->id = atomic_inc_return_unchecked(&ctlr_num) - 1; + ctlr->f = f; + ctlr->mode = FIP_CONN_TYPE_FABRIC; + INIT_LIST_HEAD(&ctlr->fcfs); +@@ -902,7 +902,7 @@ struct fcoe_fcf_device *fcoe_fcf_device_add(struct fcoe_ctlr_device *ctlr, + fcf->dev.parent = &ctlr->dev; + fcf->dev.bus = &fcoe_bus_type; + fcf->dev.type = &fcoe_fcf_device_type; +- fcf->id = atomic_inc_return(&fcf_num) - 1; ++ fcf->id = atomic_inc_return_unchecked(&fcf_num) - 1; + fcf->state = FCOE_FCF_STATE_UNKNOWN; + + fcf->dev_loss_tmo = ctlr->fcf_dev_loss_tmo; +@@ -938,8 +938,8 @@ int __init fcoe_sysfs_setup(void) + { + int error; + +- atomic_set(&ctlr_num, 0); +- atomic_set(&fcf_num, 0); ++ atomic_set_unchecked(&ctlr_num, 0); ++ atomic_set_unchecked(&fcf_num, 0); + + error = bus_register(&fcoe_bus_type); + if (error) +diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c +index f28ea07..34b16d3 100644 +--- a/drivers/scsi/hosts.c ++++ b/drivers/scsi/hosts.c +@@ -42,7 +42,7 @@ + #include "scsi_logging.h" + + +-static atomic_t scsi_host_next_hn = ATOMIC_INIT(0); /* host_no for next new host */ ++static atomic_unchecked_t scsi_host_next_hn = ATOMIC_INIT(0); /* host_no for next new host */ + + + static void scsi_host_cls_release(struct device *dev) +@@ -369,7 +369,7 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize) + * subtract one because we increment first then return, but we need to + * know what the next host number was before increment + */ +- shost->host_no = atomic_inc_return(&scsi_host_next_hn) - 1; ++ shost->host_no = atomic_inc_return_unchecked(&scsi_host_next_hn) - 1; + shost->dma_channel = 0xff; + + /* These three are default values which can be overridden */ +diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c +index 528bff5..84963854 100644 +--- a/drivers/scsi/hpsa.c ++++ b/drivers/scsi/hpsa.c +@@ -571,7 +571,7 @@ static inline u32 next_command(struct ctlr_info *h, u8 q) + unsigned long flags; + + if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant))) +- return h->access.command_completed(h, q); ++ return h->access->command_completed(h, q); + + if ((rq->head[rq->current_entry] & 1) == rq->wraparound) { + a = rq->head[rq->current_entry]; +@@ -3474,7 +3474,7 @@ static void start_io(struct ctlr_info *h) + while (!list_empty(&h->reqQ)) { + c = list_entry(h->reqQ.next, struct CommandList, list); + /* can't do anything if fifo is full */ +- if ((h->access.fifo_full(h))) { ++ if ((h->access->fifo_full(h))) { + h->fifo_recently_full = 1; + dev_warn(&h->pdev->dev, "fifo full\n"); + break; +@@ -3498,7 +3498,7 @@ static void start_io(struct ctlr_info *h) + + /* Tell the controller execute command */ + spin_unlock_irqrestore(&h->lock, flags); +- h->access.submit_command(h, c); ++ h->access->submit_command(h, c); + spin_lock_irqsave(&h->lock, flags); + } + spin_unlock_irqrestore(&h->lock, flags); +@@ -3506,17 +3506,17 @@ static void start_io(struct ctlr_info *h) + + static inline unsigned long get_next_completion(struct ctlr_info *h, u8 q) + { +- return h->access.command_completed(h, q); ++ return h->access->command_completed(h, q); + } + + static inline bool interrupt_pending(struct ctlr_info *h) + { +- return h->access.intr_pending(h); ++ return h->access->intr_pending(h); + } + + static inline long interrupt_not_for_us(struct ctlr_info *h) + { +- return (h->access.intr_pending(h) == 0) || ++ return (h->access->intr_pending(h) == 0) || + (h->interrupts_enabled == 0); + } + +@@ -4442,7 +4442,7 @@ static int hpsa_pci_init(struct ctlr_info *h) + if (prod_index < 0) + return -ENODEV; + h->product_name = products[prod_index].product_name; +- h->access = *(products[prod_index].access); ++ h->access = products[prod_index].access; + + pci_disable_link_state(h->pdev, PCIE_LINK_STATE_L0S | + PCIE_LINK_STATE_L1 | PCIE_LINK_STATE_CLKPM); +@@ -4712,7 +4712,7 @@ static void controller_lockup_detected(struct ctlr_info *h) + { + unsigned long flags; + +- h->access.set_intr_mask(h, HPSA_INTR_OFF); ++ h->access->set_intr_mask(h, HPSA_INTR_OFF); + spin_lock_irqsave(&h->lock, flags); + h->lockup_detected = readl(h->vaddr + SA5_SCRATCHPAD_OFFSET); + spin_unlock_irqrestore(&h->lock, flags); +@@ -4843,7 +4843,7 @@ reinit_after_soft_reset: + } + + /* make sure the board interrupts are off */ +- h->access.set_intr_mask(h, HPSA_INTR_OFF); ++ h->access->set_intr_mask(h, HPSA_INTR_OFF); + + if (hpsa_request_irq(h, do_hpsa_intr_msi, do_hpsa_intr_intx)) + goto clean2; +@@ -4877,7 +4877,7 @@ reinit_after_soft_reset: + * fake ones to scoop up any residual completions. + */ + spin_lock_irqsave(&h->lock, flags); +- h->access.set_intr_mask(h, HPSA_INTR_OFF); ++ h->access->set_intr_mask(h, HPSA_INTR_OFF); + spin_unlock_irqrestore(&h->lock, flags); + free_irqs(h); + rc = hpsa_request_irq(h, hpsa_msix_discard_completions, +@@ -4896,9 +4896,9 @@ reinit_after_soft_reset: + dev_info(&h->pdev->dev, "Board READY.\n"); + dev_info(&h->pdev->dev, + "Waiting for stale completions to drain.\n"); +- h->access.set_intr_mask(h, HPSA_INTR_ON); ++ h->access->set_intr_mask(h, HPSA_INTR_ON); + msleep(10000); +- h->access.set_intr_mask(h, HPSA_INTR_OFF); ++ h->access->set_intr_mask(h, HPSA_INTR_OFF); + + rc = controller_reset_failed(h->cfgtable); + if (rc) +@@ -4919,7 +4919,7 @@ reinit_after_soft_reset: + } + + /* Turn the interrupts on so we can service requests */ +- h->access.set_intr_mask(h, HPSA_INTR_ON); ++ h->access->set_intr_mask(h, HPSA_INTR_ON); + + hpsa_hba_inquiry(h); + hpsa_register_scsi(h); /* hook ourselves into SCSI subsystem */ +@@ -4988,7 +4988,7 @@ static void hpsa_shutdown(struct pci_dev *pdev) + * To write all data in the battery backed cache to disks + */ + hpsa_flush_cache(h); +- h->access.set_intr_mask(h, HPSA_INTR_OFF); ++ h->access->set_intr_mask(h, HPSA_INTR_OFF); + hpsa_free_irqs_and_disable_msix(h); + } + +@@ -5162,7 +5162,7 @@ static void hpsa_enter_performant_mode(struct ctlr_info *h, u32 use_short_tags) + return; + } + /* Change the access methods to the performant access methods */ +- h->access = SA5_performant_access; ++ h->access = &SA5_performant_access; + h->transMethod = CFGTBL_Trans_Performant; + } + +diff --git a/drivers/scsi/hpsa.h b/drivers/scsi/hpsa.h +index 01c3283..4655219 100644 +--- a/drivers/scsi/hpsa.h ++++ b/drivers/scsi/hpsa.h +@@ -79,7 +79,7 @@ struct ctlr_info { + unsigned int msix_vector; + unsigned int msi_vector; + int intr_mode; /* either PERF_MODE_INT or SIMPLE_MODE_INT */ +- struct access_method access; ++ struct access_method *access; + + /* queue and queue Info */ + struct list_head reqQ; +@@ -388,19 +388,19 @@ static bool SA5_performant_intr_pending(struct ctlr_info *h) + } + + static struct access_method SA5_access = { +- SA5_submit_command, +- SA5_intr_mask, +- SA5_fifo_full, +- SA5_intr_pending, +- SA5_completed, ++ .submit_command = SA5_submit_command, ++ .set_intr_mask = SA5_intr_mask, ++ .fifo_full = SA5_fifo_full, ++ .intr_pending = SA5_intr_pending, ++ .command_completed = SA5_completed, + }; + + static struct access_method SA5_performant_access = { +- SA5_submit_command, +- SA5_performant_intr_mask, +- SA5_fifo_full, +- SA5_performant_intr_pending, +- SA5_performant_completed, ++ .submit_command = SA5_submit_command, ++ .set_intr_mask = SA5_performant_intr_mask, ++ .fifo_full = SA5_fifo_full, ++ .intr_pending = SA5_performant_intr_pending, ++ .command_completed = SA5_performant_completed, + }; + + struct board_type { +diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c +index 1b3a094..068e683 100644 +--- a/drivers/scsi/libfc/fc_exch.c ++++ b/drivers/scsi/libfc/fc_exch.c +@@ -101,12 +101,12 @@ struct fc_exch_mgr { + u16 pool_max_index; + + struct { +- atomic_t no_free_exch; +- atomic_t no_free_exch_xid; +- atomic_t xid_not_found; +- atomic_t xid_busy; +- atomic_t seq_not_found; +- atomic_t non_bls_resp; ++ atomic_unchecked_t no_free_exch; ++ atomic_unchecked_t no_free_exch_xid; ++ atomic_unchecked_t xid_not_found; ++ atomic_unchecked_t xid_busy; ++ atomic_unchecked_t seq_not_found; ++ atomic_unchecked_t non_bls_resp; + } stats; + }; + +@@ -811,7 +811,7 @@ static struct fc_exch *fc_exch_em_alloc(struct fc_lport *lport, + /* allocate memory for exchange */ + ep = mempool_alloc(mp->ep_pool, GFP_ATOMIC); + if (!ep) { +- atomic_inc(&mp->stats.no_free_exch); ++ atomic_inc_unchecked(&mp->stats.no_free_exch); + goto out; + } + memset(ep, 0, sizeof(*ep)); +@@ -874,7 +874,7 @@ out: + return ep; + err: + spin_unlock_bh(&pool->lock); +- atomic_inc(&mp->stats.no_free_exch_xid); ++ atomic_inc_unchecked(&mp->stats.no_free_exch_xid); + mempool_free(ep, mp->ep_pool); + return NULL; + } +@@ -1023,7 +1023,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport, + xid = ntohs(fh->fh_ox_id); /* we originated exch */ + ep = fc_exch_find(mp, xid); + if (!ep) { +- atomic_inc(&mp->stats.xid_not_found); ++ atomic_inc_unchecked(&mp->stats.xid_not_found); + reject = FC_RJT_OX_ID; + goto out; + } +@@ -1053,7 +1053,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport, + ep = fc_exch_find(mp, xid); + if ((f_ctl & FC_FC_FIRST_SEQ) && fc_sof_is_init(fr_sof(fp))) { + if (ep) { +- atomic_inc(&mp->stats.xid_busy); ++ atomic_inc_unchecked(&mp->stats.xid_busy); + reject = FC_RJT_RX_ID; + goto rel; + } +@@ -1064,7 +1064,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport, + } + xid = ep->xid; /* get our XID */ + } else if (!ep) { +- atomic_inc(&mp->stats.xid_not_found); ++ atomic_inc_unchecked(&mp->stats.xid_not_found); + reject = FC_RJT_RX_ID; /* XID not found */ + goto out; + } +@@ -1082,7 +1082,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport, + } else { + sp = &ep->seq; + if (sp->id != fh->fh_seq_id) { +- atomic_inc(&mp->stats.seq_not_found); ++ atomic_inc_unchecked(&mp->stats.seq_not_found); + if (f_ctl & FC_FC_END_SEQ) { + /* + * Update sequence_id based on incoming last +@@ -1533,22 +1533,22 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp) + + ep = fc_exch_find(mp, ntohs(fh->fh_ox_id)); + if (!ep) { +- atomic_inc(&mp->stats.xid_not_found); ++ atomic_inc_unchecked(&mp->stats.xid_not_found); + goto out; + } + if (ep->esb_stat & ESB_ST_COMPLETE) { +- atomic_inc(&mp->stats.xid_not_found); ++ atomic_inc_unchecked(&mp->stats.xid_not_found); + goto rel; + } + if (ep->rxid == FC_XID_UNKNOWN) + ep->rxid = ntohs(fh->fh_rx_id); + if (ep->sid != 0 && ep->sid != ntoh24(fh->fh_d_id)) { +- atomic_inc(&mp->stats.xid_not_found); ++ atomic_inc_unchecked(&mp->stats.xid_not_found); + goto rel; + } + if (ep->did != ntoh24(fh->fh_s_id) && + ep->did != FC_FID_FLOGI) { +- atomic_inc(&mp->stats.xid_not_found); ++ atomic_inc_unchecked(&mp->stats.xid_not_found); + goto rel; + } + sof = fr_sof(fp); +@@ -1557,7 +1557,7 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp) + sp->ssb_stat |= SSB_ST_RESP; + sp->id = fh->fh_seq_id; + } else if (sp->id != fh->fh_seq_id) { +- atomic_inc(&mp->stats.seq_not_found); ++ atomic_inc_unchecked(&mp->stats.seq_not_found); + goto rel; + } + +@@ -1619,9 +1619,9 @@ static void fc_exch_recv_resp(struct fc_exch_mgr *mp, struct fc_frame *fp) + sp = fc_seq_lookup_orig(mp, fp); /* doesn't hold sequence */ + + if (!sp) +- atomic_inc(&mp->stats.xid_not_found); ++ atomic_inc_unchecked(&mp->stats.xid_not_found); + else +- atomic_inc(&mp->stats.non_bls_resp); ++ atomic_inc_unchecked(&mp->stats.non_bls_resp); + + fc_frame_free(fp); + } +@@ -2261,13 +2261,13 @@ void fc_exch_update_stats(struct fc_lport *lport) + + list_for_each_entry(ema, &lport->ema_list, ema_list) { + mp = ema->mp; +- st->fc_no_free_exch += atomic_read(&mp->stats.no_free_exch); ++ st->fc_no_free_exch += atomic_read_unchecked(&mp->stats.no_free_exch); + st->fc_no_free_exch_xid += +- atomic_read(&mp->stats.no_free_exch_xid); +- st->fc_xid_not_found += atomic_read(&mp->stats.xid_not_found); +- st->fc_xid_busy += atomic_read(&mp->stats.xid_busy); +- st->fc_seq_not_found += atomic_read(&mp->stats.seq_not_found); +- st->fc_non_bls_resp += atomic_read(&mp->stats.non_bls_resp); ++ atomic_read_unchecked(&mp->stats.no_free_exch_xid); ++ st->fc_xid_not_found += atomic_read_unchecked(&mp->stats.xid_not_found); ++ st->fc_xid_busy += atomic_read_unchecked(&mp->stats.xid_busy); ++ st->fc_seq_not_found += atomic_read_unchecked(&mp->stats.seq_not_found); ++ st->fc_non_bls_resp += atomic_read_unchecked(&mp->stats.non_bls_resp); + } + } + EXPORT_SYMBOL(fc_exch_update_stats); +diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c +index d289583..b745eec 100644 +--- a/drivers/scsi/libsas/sas_ata.c ++++ b/drivers/scsi/libsas/sas_ata.c +@@ -554,7 +554,7 @@ static struct ata_port_operations sas_sata_ops = { + .postreset = ata_std_postreset, + .error_handler = ata_std_error_handler, + .post_internal_cmd = sas_ata_post_internal, +- .qc_defer = ata_std_qc_defer, ++ .qc_defer = ata_std_qc_defer, + .qc_prep = ata_noop_qc_prep, + .qc_issue = sas_ata_qc_issue, + .qc_fill_rtf = sas_ata_qc_fill_rtf, +diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h +index 4e1b75c..0bbdfa9 100644 +--- a/drivers/scsi/lpfc/lpfc.h ++++ b/drivers/scsi/lpfc/lpfc.h +@@ -432,7 +432,7 @@ struct lpfc_vport { + struct dentry *debug_nodelist; + struct dentry *vport_debugfs_root; + struct lpfc_debugfs_trc *disc_trc; +- atomic_t disc_trc_cnt; ++ atomic_unchecked_t disc_trc_cnt; + #endif + uint8_t stat_data_enabled; + uint8_t stat_data_blocked; +@@ -865,8 +865,8 @@ struct lpfc_hba { + struct timer_list fabric_block_timer; + unsigned long bit_flags; + #define FABRIC_COMANDS_BLOCKED 0 +- atomic_t num_rsrc_err; +- atomic_t num_cmd_success; ++ atomic_unchecked_t num_rsrc_err; ++ atomic_unchecked_t num_cmd_success; + unsigned long last_rsrc_error_time; + unsigned long last_ramp_down_time; + unsigned long last_ramp_up_time; +@@ -902,7 +902,7 @@ struct lpfc_hba { + + struct dentry *debug_slow_ring_trc; + struct lpfc_debugfs_trc *slow_ring_trc; +- atomic_t slow_ring_trc_cnt; ++ atomic_unchecked_t slow_ring_trc_cnt; + /* iDiag debugfs sub-directory */ + struct dentry *idiag_root; + struct dentry *idiag_pci_cfg; +diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c +index b800cc9..16b6a91 100644 +--- a/drivers/scsi/lpfc/lpfc_debugfs.c ++++ b/drivers/scsi/lpfc/lpfc_debugfs.c +@@ -106,7 +106,7 @@ MODULE_PARM_DESC(lpfc_debugfs_mask_disc_trc, + + #include <linux/debugfs.h> + +-static atomic_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0); ++static atomic_unchecked_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0); + static unsigned long lpfc_debugfs_start_time = 0L; + + /* iDiag */ +@@ -147,7 +147,7 @@ lpfc_debugfs_disc_trc_data(struct lpfc_vport *vport, char *buf, int size) + lpfc_debugfs_enable = 0; + + len = 0; +- index = (atomic_read(&vport->disc_trc_cnt) + 1) & ++ index = (atomic_read_unchecked(&vport->disc_trc_cnt) + 1) & + (lpfc_debugfs_max_disc_trc - 1); + for (i = index; i < lpfc_debugfs_max_disc_trc; i++) { + dtp = vport->disc_trc + i; +@@ -213,7 +213,7 @@ lpfc_debugfs_slow_ring_trc_data(struct lpfc_hba *phba, char *buf, int size) + lpfc_debugfs_enable = 0; + + len = 0; +- index = (atomic_read(&phba->slow_ring_trc_cnt) + 1) & ++ index = (atomic_read_unchecked(&phba->slow_ring_trc_cnt) + 1) & + (lpfc_debugfs_max_slow_ring_trc - 1); + for (i = index; i < lpfc_debugfs_max_slow_ring_trc; i++) { + dtp = phba->slow_ring_trc + i; +@@ -646,14 +646,14 @@ lpfc_debugfs_disc_trc(struct lpfc_vport *vport, int mask, char *fmt, + !vport || !vport->disc_trc) + return; + +- index = atomic_inc_return(&vport->disc_trc_cnt) & ++ index = atomic_inc_return_unchecked(&vport->disc_trc_cnt) & + (lpfc_debugfs_max_disc_trc - 1); + dtp = vport->disc_trc + index; + dtp->fmt = fmt; + dtp->data1 = data1; + dtp->data2 = data2; + dtp->data3 = data3; +- dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt); ++ dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt); + dtp->jif = jiffies; + #endif + return; +@@ -684,14 +684,14 @@ lpfc_debugfs_slow_ring_trc(struct lpfc_hba *phba, char *fmt, + !phba || !phba->slow_ring_trc) + return; + +- index = atomic_inc_return(&phba->slow_ring_trc_cnt) & ++ index = atomic_inc_return_unchecked(&phba->slow_ring_trc_cnt) & + (lpfc_debugfs_max_slow_ring_trc - 1); + dtp = phba->slow_ring_trc + index; + dtp->fmt = fmt; + dtp->data1 = data1; + dtp->data2 = data2; + dtp->data3 = data3; +- dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt); ++ dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt); + dtp->jif = jiffies; + #endif + return; +@@ -4168,7 +4168,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport) + "slow_ring buffer\n"); + goto debug_failed; + } +- atomic_set(&phba->slow_ring_trc_cnt, 0); ++ atomic_set_unchecked(&phba->slow_ring_trc_cnt, 0); + memset(phba->slow_ring_trc, 0, + (sizeof(struct lpfc_debugfs_trc) * + lpfc_debugfs_max_slow_ring_trc)); +@@ -4214,7 +4214,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport) + "buffer\n"); + goto debug_failed; + } +- atomic_set(&vport->disc_trc_cnt, 0); ++ atomic_set_unchecked(&vport->disc_trc_cnt, 0); + + snprintf(name, sizeof(name), "discovery_trace"); + vport->debug_disc_trc = +diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c +index 68c94cc..8c27be5 100644 +--- a/drivers/scsi/lpfc/lpfc_init.c ++++ b/drivers/scsi/lpfc/lpfc_init.c +@@ -10949,8 +10949,10 @@ lpfc_init(void) + "misc_register returned with status %d", error); + + if (lpfc_enable_npiv) { +- lpfc_transport_functions.vport_create = lpfc_vport_create; +- lpfc_transport_functions.vport_delete = lpfc_vport_delete; ++ pax_open_kernel(); ++ *(void **)&lpfc_transport_functions.vport_create = lpfc_vport_create; ++ *(void **)&lpfc_transport_functions.vport_delete = lpfc_vport_delete; ++ pax_close_kernel(); + } + lpfc_transport_template = + fc_attach_transport(&lpfc_transport_functions); +diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c +index b2ede05..aaf482ca 100644 +--- a/drivers/scsi/lpfc/lpfc_scsi.c ++++ b/drivers/scsi/lpfc/lpfc_scsi.c +@@ -353,7 +353,7 @@ lpfc_rampdown_queue_depth(struct lpfc_hba *phba) + uint32_t evt_posted; + + spin_lock_irqsave(&phba->hbalock, flags); +- atomic_inc(&phba->num_rsrc_err); ++ atomic_inc_unchecked(&phba->num_rsrc_err); + phba->last_rsrc_error_time = jiffies; + + if ((phba->last_ramp_down_time + QUEUE_RAMP_DOWN_INTERVAL) > jiffies) { +@@ -394,7 +394,7 @@ lpfc_rampup_queue_depth(struct lpfc_vport *vport, + unsigned long flags; + struct lpfc_hba *phba = vport->phba; + uint32_t evt_posted; +- atomic_inc(&phba->num_cmd_success); ++ atomic_inc_unchecked(&phba->num_cmd_success); + + if (vport->cfg_lun_queue_depth <= queue_depth) + return; +@@ -438,8 +438,8 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba) + unsigned long num_rsrc_err, num_cmd_success; + int i; + +- num_rsrc_err = atomic_read(&phba->num_rsrc_err); +- num_cmd_success = atomic_read(&phba->num_cmd_success); ++ num_rsrc_err = atomic_read_unchecked(&phba->num_rsrc_err); ++ num_cmd_success = atomic_read_unchecked(&phba->num_cmd_success); + + /* + * The error and success command counters are global per +@@ -467,8 +467,8 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba) + } + } + lpfc_destroy_vport_work_array(phba, vports); +- atomic_set(&phba->num_rsrc_err, 0); +- atomic_set(&phba->num_cmd_success, 0); ++ atomic_set_unchecked(&phba->num_rsrc_err, 0); ++ atomic_set_unchecked(&phba->num_cmd_success, 0); + } + + /** +@@ -502,8 +502,8 @@ lpfc_ramp_up_queue_handler(struct lpfc_hba *phba) + } + } + lpfc_destroy_vport_work_array(phba, vports); +- atomic_set(&phba->num_rsrc_err, 0); +- atomic_set(&phba->num_cmd_success, 0); ++ atomic_set_unchecked(&phba->num_rsrc_err, 0); ++ atomic_set_unchecked(&phba->num_cmd_success, 0); + } + + /** +diff --git a/drivers/scsi/mpt2sas/mpt2sas_scsih.c b/drivers/scsi/mpt2sas/mpt2sas_scsih.c +index 6fd7d40..b444223 100644 +--- a/drivers/scsi/mpt2sas/mpt2sas_scsih.c ++++ b/drivers/scsi/mpt2sas/mpt2sas_scsih.c +@@ -1557,7 +1557,7 @@ _scsih_get_resync(struct device *dev) + { + struct scsi_device *sdev = to_scsi_device(dev); + struct MPT2SAS_ADAPTER *ioc = shost_priv(sdev->host); +- static struct _raid_device *raid_device; ++ struct _raid_device *raid_device; + unsigned long flags; + Mpi2RaidVolPage0_t vol_pg0; + Mpi2ConfigReply_t mpi_reply; +@@ -1609,7 +1609,7 @@ _scsih_get_state(struct device *dev) + { + struct scsi_device *sdev = to_scsi_device(dev); + struct MPT2SAS_ADAPTER *ioc = shost_priv(sdev->host); +- static struct _raid_device *raid_device; ++ struct _raid_device *raid_device; + unsigned long flags; + Mpi2RaidVolPage0_t vol_pg0; + Mpi2ConfigReply_t mpi_reply; +@@ -6637,7 +6637,7 @@ _scsih_sas_ir_operation_status_event(struct MPT2SAS_ADAPTER *ioc, + struct fw_event_work *fw_event) + { + Mpi2EventDataIrOperationStatus_t *event_data = fw_event->event_data; +- static struct _raid_device *raid_device; ++ struct _raid_device *raid_device; + unsigned long flags; + u16 handle; + +@@ -7108,7 +7108,7 @@ _scsih_scan_for_devices_after_reset(struct MPT2SAS_ADAPTER *ioc) + u64 sas_address; + struct _sas_device *sas_device; + struct _sas_node *expander_device; +- static struct _raid_device *raid_device; ++ struct _raid_device *raid_device; + u8 retry_count; + unsigned long flags; + +diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c +index be8ce54..94ed33a 100644 +--- a/drivers/scsi/pmcraid.c ++++ b/drivers/scsi/pmcraid.c +@@ -200,8 +200,8 @@ static int pmcraid_slave_alloc(struct scsi_device *scsi_dev) + res->scsi_dev = scsi_dev; + scsi_dev->hostdata = res; + res->change_detected = 0; +- atomic_set(&res->read_failures, 0); +- atomic_set(&res->write_failures, 0); ++ atomic_set_unchecked(&res->read_failures, 0); ++ atomic_set_unchecked(&res->write_failures, 0); + rc = 0; + } + spin_unlock_irqrestore(&pinstance->resource_lock, lock_flags); +@@ -2687,9 +2687,9 @@ static int pmcraid_error_handler(struct pmcraid_cmd *cmd) + + /* If this was a SCSI read/write command keep count of errors */ + if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_READ_CMD) +- atomic_inc(&res->read_failures); ++ atomic_inc_unchecked(&res->read_failures); + else if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_WRITE_CMD) +- atomic_inc(&res->write_failures); ++ atomic_inc_unchecked(&res->write_failures); + + if (!RES_IS_GSCSI(res->cfg_entry) && + masked_ioasc != PMCRAID_IOASC_HW_DEVICE_BUS_STATUS_ERROR) { +@@ -3545,7 +3545,7 @@ static int pmcraid_queuecommand_lck( + * block of scsi_cmd which is re-used (e.g. cancel/abort), which uses + * hrrq_id assigned here in queuecommand + */ +- ioarcb->hrrq_id = atomic_add_return(1, &(pinstance->last_message_id)) % ++ ioarcb->hrrq_id = atomic_add_return_unchecked(1, &(pinstance->last_message_id)) % + pinstance->num_hrrq; + cmd->cmd_done = pmcraid_io_done; + +@@ -3857,7 +3857,7 @@ static long pmcraid_ioctl_passthrough( + * block of scsi_cmd which is re-used (e.g. cancel/abort), which uses + * hrrq_id assigned here in queuecommand + */ +- ioarcb->hrrq_id = atomic_add_return(1, &(pinstance->last_message_id)) % ++ ioarcb->hrrq_id = atomic_add_return_unchecked(1, &(pinstance->last_message_id)) % + pinstance->num_hrrq; + + if (request_size) { +@@ -4495,7 +4495,7 @@ static void pmcraid_worker_function(struct work_struct *workp) + + pinstance = container_of(workp, struct pmcraid_instance, worker_q); + /* add resources only after host is added into system */ +- if (!atomic_read(&pinstance->expose_resources)) ++ if (!atomic_read_unchecked(&pinstance->expose_resources)) + return; + + fw_version = be16_to_cpu(pinstance->inq_data->fw_version); +@@ -5322,8 +5322,8 @@ static int pmcraid_init_instance(struct pci_dev *pdev, struct Scsi_Host *host, + init_waitqueue_head(&pinstance->reset_wait_q); + + atomic_set(&pinstance->outstanding_cmds, 0); +- atomic_set(&pinstance->last_message_id, 0); +- atomic_set(&pinstance->expose_resources, 0); ++ atomic_set_unchecked(&pinstance->last_message_id, 0); ++ atomic_set_unchecked(&pinstance->expose_resources, 0); + + INIT_LIST_HEAD(&pinstance->free_res_q); + INIT_LIST_HEAD(&pinstance->used_res_q); +@@ -6036,7 +6036,7 @@ static int pmcraid_probe(struct pci_dev *pdev, + /* Schedule worker thread to handle CCN and take care of adding and + * removing devices to OS + */ +- atomic_set(&pinstance->expose_resources, 1); ++ atomic_set_unchecked(&pinstance->expose_resources, 1); + schedule_work(&pinstance->worker_q); + return rc; + +diff --git a/drivers/scsi/pmcraid.h b/drivers/scsi/pmcraid.h +index e1d150f..6c6df44 100644 +--- a/drivers/scsi/pmcraid.h ++++ b/drivers/scsi/pmcraid.h +@@ -748,7 +748,7 @@ struct pmcraid_instance { + struct pmcraid_isr_param hrrq_vector[PMCRAID_NUM_MSIX_VECTORS]; + + /* Message id as filled in last fired IOARCB, used to identify HRRQ */ +- atomic_t last_message_id; ++ atomic_unchecked_t last_message_id; + + /* configuration table */ + struct pmcraid_config_table *cfg_table; +@@ -777,7 +777,7 @@ struct pmcraid_instance { + atomic_t outstanding_cmds; + + /* should add/delete resources to mid-layer now ?*/ +- atomic_t expose_resources; ++ atomic_unchecked_t expose_resources; + + + +@@ -813,8 +813,8 @@ struct pmcraid_resource_entry { + struct pmcraid_config_table_entry_ext cfg_entry_ext; + }; + struct scsi_device *scsi_dev; /* Link scsi_device structure */ +- atomic_t read_failures; /* count of failed READ commands */ +- atomic_t write_failures; /* count of failed WRITE commands */ ++ atomic_unchecked_t read_failures; /* count of failed READ commands */ ++ atomic_unchecked_t write_failures; /* count of failed WRITE commands */ + + /* To indicate add/delete/modify during CCN */ + u8 change_detected; +diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c +index 4a0d7c9..3d658d7 100644 +--- a/drivers/scsi/qla2xxx/qla_attr.c ++++ b/drivers/scsi/qla2xxx/qla_attr.c +@@ -2038,7 +2038,7 @@ qla24xx_vport_disable(struct fc_vport *fc_vport, bool disable) + return 0; + } + +-struct fc_function_template qla2xxx_transport_functions = { ++fc_function_template_no_const qla2xxx_transport_functions = { + + .show_host_node_name = 1, + .show_host_port_name = 1, +@@ -2086,7 +2086,7 @@ struct fc_function_template qla2xxx_transport_functions = { + .bsg_timeout = qla24xx_bsg_timeout, + }; + +-struct fc_function_template qla2xxx_transport_vport_functions = { ++fc_function_template_no_const qla2xxx_transport_vport_functions = { + + .show_host_node_name = 1, + .show_host_port_name = 1, +diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h +index 1f42662..bf9836c 100644 +--- a/drivers/scsi/qla2xxx/qla_gbl.h ++++ b/drivers/scsi/qla2xxx/qla_gbl.h +@@ -546,8 +546,8 @@ extern void qla2x00_get_sym_node_name(scsi_qla_host_t *, uint8_t *); + struct device_attribute; + extern struct device_attribute *qla2x00_host_attrs[]; + struct fc_function_template; +-extern struct fc_function_template qla2xxx_transport_functions; +-extern struct fc_function_template qla2xxx_transport_vport_functions; ++extern fc_function_template_no_const qla2xxx_transport_functions; ++extern fc_function_template_no_const qla2xxx_transport_vport_functions; + extern void qla2x00_alloc_sysfs_attr(scsi_qla_host_t *); + extern void qla2x00_free_sysfs_attr(scsi_qla_host_t *, bool); + extern void qla2x00_init_host_attr(scsi_qla_host_t *); +diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c +index 83cb612..9b7b08c 100644 +--- a/drivers/scsi/qla2xxx/qla_os.c ++++ b/drivers/scsi/qla2xxx/qla_os.c +@@ -1491,8 +1491,10 @@ qla2x00_config_dma_addressing(struct qla_hw_data *ha) + !pci_set_consistent_dma_mask(ha->pdev, DMA_BIT_MASK(64))) { + /* Ok, a 64bit DMA mask is applicable. */ + ha->flags.enable_64bit_addressing = 1; +- ha->isp_ops->calc_req_entries = qla2x00_calc_iocbs_64; +- ha->isp_ops->build_iocbs = qla2x00_build_scsi_iocbs_64; ++ pax_open_kernel(); ++ *(void **)&ha->isp_ops->calc_req_entries = qla2x00_calc_iocbs_64; ++ *(void **)&ha->isp_ops->build_iocbs = qla2x00_build_scsi_iocbs_64; ++ pax_close_kernel(); + return; + } + } +diff --git a/drivers/scsi/qla4xxx/ql4_def.h b/drivers/scsi/qla4xxx/ql4_def.h +index aa67bb9..06d0e2a 100644 +--- a/drivers/scsi/qla4xxx/ql4_def.h ++++ b/drivers/scsi/qla4xxx/ql4_def.h +@@ -303,7 +303,7 @@ struct ddb_entry { + * (4000 only) */ + atomic_t relogin_timer; /* Max Time to wait for + * relogin to complete */ +- atomic_t relogin_retry_count; /* Num of times relogin has been ++ atomic_unchecked_t relogin_retry_count; /* Num of times relogin has been + * retried */ + uint32_t default_time2wait; /* Default Min time between + * relogins (+aens) */ +diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c +index c21adc3..1b4155f 100644 +--- a/drivers/scsi/qla4xxx/ql4_os.c ++++ b/drivers/scsi/qla4xxx/ql4_os.c +@@ -4463,12 +4463,12 @@ static void qla4xxx_check_relogin_flash_ddb(struct iscsi_cls_session *cls_sess) + */ + if (!iscsi_is_session_online(cls_sess)) { + /* Reset retry relogin timer */ +- atomic_inc(&ddb_entry->relogin_retry_count); ++ atomic_inc_unchecked(&ddb_entry->relogin_retry_count); + DEBUG2(ql4_printk(KERN_INFO, ha, + "%s: index[%d] relogin timed out-retrying" + " relogin (%d), retry (%d)\n", __func__, + ddb_entry->fw_ddb_index, +- atomic_read(&ddb_entry->relogin_retry_count), ++ atomic_read_unchecked(&ddb_entry->relogin_retry_count), + ddb_entry->default_time2wait + 4)); + set_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags); + atomic_set(&ddb_entry->retry_relogin_timer, +@@ -6552,7 +6552,7 @@ static void qla4xxx_setup_flash_ddb_entry(struct scsi_qla_host *ha, + + atomic_set(&ddb_entry->retry_relogin_timer, INVALID_ENTRY); + atomic_set(&ddb_entry->relogin_timer, 0); +- atomic_set(&ddb_entry->relogin_retry_count, 0); ++ atomic_set_unchecked(&ddb_entry->relogin_retry_count, 0); + def_timeout = le16_to_cpu(ddb_entry->fw_ddb_entry.def_timeout); + ddb_entry->default_relogin_timeout = + (def_timeout > LOGIN_TOV) && (def_timeout < LOGIN_TOV * 10) ? +diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c +index d8afec8..3ec7152 100644 +--- a/drivers/scsi/scsi.c ++++ b/drivers/scsi/scsi.c +@@ -658,7 +658,7 @@ int scsi_dispatch_cmd(struct scsi_cmnd *cmd) + struct Scsi_Host *host = cmd->device->host; + int rtn = 0; + +- atomic_inc(&cmd->device->iorequest_cnt); ++ atomic_inc_unchecked(&cmd->device->iorequest_cnt); + + /* check if the device is still usable */ + if (unlikely(cmd->device->sdev_state == SDEV_DEL)) { +diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c +index 64e487a..384f684 100644 +--- a/drivers/scsi/scsi_lib.c ++++ b/drivers/scsi/scsi_lib.c +@@ -1482,7 +1482,7 @@ static void scsi_kill_request(struct request *req, struct request_queue *q) + shost = sdev->host; + scsi_init_cmd_errh(cmd); + cmd->result = DID_NO_CONNECT << 16; +- atomic_inc(&cmd->device->iorequest_cnt); ++ atomic_inc_unchecked(&cmd->device->iorequest_cnt); + + /* + * SCSI request completion path will do scsi_device_unbusy(), +@@ -1508,9 +1508,9 @@ static void scsi_softirq_done(struct request *rq) + + INIT_LIST_HEAD(&cmd->eh_entry); + +- atomic_inc(&cmd->device->iodone_cnt); ++ atomic_inc_unchecked(&cmd->device->iodone_cnt); + if (cmd->result) +- atomic_inc(&cmd->device->ioerr_cnt); ++ atomic_inc_unchecked(&cmd->device->ioerr_cnt); + + disposition = scsi_decide_disposition(cmd); + if (disposition != SUCCESS && +diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c +index 665acbf..d18fab4 100644 +--- a/drivers/scsi/scsi_sysfs.c ++++ b/drivers/scsi/scsi_sysfs.c +@@ -734,7 +734,7 @@ show_iostat_##field(struct device *dev, struct device_attribute *attr, \ + char *buf) \ + { \ + struct scsi_device *sdev = to_scsi_device(dev); \ +- unsigned long long count = atomic_read(&sdev->field); \ ++ unsigned long long count = atomic_read_unchecked(&sdev->field); \ + return snprintf(buf, 20, "0x%llx\n", count); \ + } \ + static DEVICE_ATTR(field, S_IRUGO, show_iostat_##field, NULL) +diff --git a/drivers/scsi/scsi_tgt_lib.c b/drivers/scsi/scsi_tgt_lib.c +index 84a1fdf..693b0d6 100644 +--- a/drivers/scsi/scsi_tgt_lib.c ++++ b/drivers/scsi/scsi_tgt_lib.c +@@ -362,7 +362,7 @@ static int scsi_map_user_pages(struct scsi_tgt_cmd *tcmd, struct scsi_cmnd *cmd, + int err; + + dprintk("%lx %u\n", uaddr, len); +- err = blk_rq_map_user(q, rq, NULL, (void *)uaddr, len, GFP_KERNEL); ++ err = blk_rq_map_user(q, rq, NULL, (void __user *)uaddr, len, GFP_KERNEL); + if (err) { + /* + * TODO: need to fixup sg_tablesize, max_segment_size, +diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c +index 4628fd5..a94a1c2 100644 +--- a/drivers/scsi/scsi_transport_fc.c ++++ b/drivers/scsi/scsi_transport_fc.c +@@ -497,7 +497,7 @@ static DECLARE_TRANSPORT_CLASS(fc_vport_class, + * Netlink Infrastructure + */ + +-static atomic_t fc_event_seq; ++static atomic_unchecked_t fc_event_seq; + + /** + * fc_get_event_number - Obtain the next sequential FC event number +@@ -510,7 +510,7 @@ static atomic_t fc_event_seq; + u32 + fc_get_event_number(void) + { +- return atomic_add_return(1, &fc_event_seq); ++ return atomic_add_return_unchecked(1, &fc_event_seq); + } + EXPORT_SYMBOL(fc_get_event_number); + +@@ -654,7 +654,7 @@ static __init int fc_transport_init(void) + { + int error; + +- atomic_set(&fc_event_seq, 0); ++ atomic_set_unchecked(&fc_event_seq, 0); + + error = transport_class_register(&fc_host_class); + if (error) +@@ -844,7 +844,7 @@ static int fc_str_to_dev_loss(const char *buf, unsigned long *val) + char *cp; + + *val = simple_strtoul(buf, &cp, 0); +- if ((*cp && (*cp != '\n')) || (*val < 0)) ++ if (*cp && (*cp != '\n')) + return -EINVAL; + /* + * Check for overflow; dev_loss_tmo is u32 +diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c +index fd8ffe6..fd0bebf 100644 +--- a/drivers/scsi/scsi_transport_iscsi.c ++++ b/drivers/scsi/scsi_transport_iscsi.c +@@ -79,7 +79,7 @@ struct iscsi_internal { + struct transport_container session_cont; + }; + +-static atomic_t iscsi_session_nr; /* sysfs session id for next new session */ ++static atomic_unchecked_t iscsi_session_nr; /* sysfs session id for next new session */ + static struct workqueue_struct *iscsi_eh_timer_workq; + + static DEFINE_IDA(iscsi_sess_ida); +@@ -2071,7 +2071,7 @@ int iscsi_add_session(struct iscsi_cls_session *session, unsigned int target_id) + int err; + + ihost = shost->shost_data; +- session->sid = atomic_add_return(1, &iscsi_session_nr); ++ session->sid = atomic_add_return_unchecked(1, &iscsi_session_nr); + + if (target_id == ISCSI_MAX_TARGET) { + id = ida_simple_get(&iscsi_sess_ida, 0, 0, GFP_KERNEL); +@@ -4511,7 +4511,7 @@ static __init int iscsi_transport_init(void) + printk(KERN_INFO "Loading iSCSI transport class v%s.\n", + ISCSI_TRANSPORT_VERSION); + +- atomic_set(&iscsi_session_nr, 0); ++ atomic_set_unchecked(&iscsi_session_nr, 0); + + err = class_register(&iscsi_transport_class); + if (err) +diff --git a/drivers/scsi/scsi_transport_srp.c b/drivers/scsi/scsi_transport_srp.c +index d47ffc8..30f46a9 100644 +--- a/drivers/scsi/scsi_transport_srp.c ++++ b/drivers/scsi/scsi_transport_srp.c +@@ -36,7 +36,7 @@ + #include "scsi_transport_srp_internal.h" + + struct srp_host_attrs { +- atomic_t next_port_id; ++ atomic_unchecked_t next_port_id; + }; + #define to_srp_host_attrs(host) ((struct srp_host_attrs *)(host)->shost_data) + +@@ -101,7 +101,7 @@ static int srp_host_setup(struct transport_container *tc, struct device *dev, + struct Scsi_Host *shost = dev_to_shost(dev); + struct srp_host_attrs *srp_host = to_srp_host_attrs(shost); + +- atomic_set(&srp_host->next_port_id, 0); ++ atomic_set_unchecked(&srp_host->next_port_id, 0); + return 0; + } + +@@ -734,7 +734,7 @@ struct srp_rport *srp_rport_add(struct Scsi_Host *shost, + rport_fast_io_fail_timedout); + INIT_DELAYED_WORK(&rport->dev_loss_work, rport_dev_loss_timedout); + +- id = atomic_inc_return(&to_srp_host_attrs(shost)->next_port_id); ++ id = atomic_inc_return_unchecked(&to_srp_host_attrs(shost)->next_port_id); + dev_set_name(&rport->dev, "port-%d:%d", shost->host_no, id); + + transport_setup_device(&rport->dev); +diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c +index 36d1a23..3f33303 100644 +--- a/drivers/scsi/sd.c ++++ b/drivers/scsi/sd.c +@@ -2962,7 +2962,7 @@ static int sd_probe(struct device *dev) + sdkp->disk = gd; + sdkp->index = index; + atomic_set(&sdkp->openers, 0); +- atomic_set(&sdkp->device->ioerr_cnt, 0); ++ atomic_set_unchecked(&sdkp->device->ioerr_cnt, 0); + + if (!sdp->request_queue->rq_timeout) { + if (sdp->type != TYPE_MOD) +diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c +index df5e961..df6b97f 100644 +--- a/drivers/scsi/sg.c ++++ b/drivers/scsi/sg.c +@@ -1102,7 +1102,7 @@ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg) + sdp->disk->disk_name, + MKDEV(SCSI_GENERIC_MAJOR, sdp->index), + NULL, +- (char *)arg); ++ (char __user *)arg); + case BLKTRACESTART: + return blk_trace_startstop(sdp->device->request_queue, 1); + case BLKTRACESTOP: +diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c +index fbf3b22..f5c8b60 100644 +--- a/drivers/spi/spi.c ++++ b/drivers/spi/spi.c +@@ -1980,7 +1980,7 @@ int spi_bus_unlock(struct spi_master *master) + EXPORT_SYMBOL_GPL(spi_bus_unlock); + + /* portable code must never pass more than 32 bytes */ +-#define SPI_BUFSIZ max(32, SMP_CACHE_BYTES) ++#define SPI_BUFSIZ max(32UL, SMP_CACHE_BYTES) + + static u8 *buf; + +diff --git a/drivers/staging/android/timed_output.c b/drivers/staging/android/timed_output.c +index 2c61783..4d49e4e 100644 +--- a/drivers/staging/android/timed_output.c ++++ b/drivers/staging/android/timed_output.c +@@ -25,7 +25,7 @@ + #include "timed_output.h" + + static struct class *timed_output_class; +-static atomic_t device_count; ++static atomic_unchecked_t device_count; + + static ssize_t enable_show(struct device *dev, struct device_attribute *attr, + char *buf) +@@ -63,7 +63,7 @@ static int create_timed_output_class(void) + timed_output_class = class_create(THIS_MODULE, "timed_output"); + if (IS_ERR(timed_output_class)) + return PTR_ERR(timed_output_class); +- atomic_set(&device_count, 0); ++ atomic_set_unchecked(&device_count, 0); + timed_output_class->dev_groups = timed_output_groups; + } + +@@ -81,7 +81,7 @@ int timed_output_dev_register(struct timed_output_dev *tdev) + if (ret < 0) + return ret; + +- tdev->index = atomic_inc_return(&device_count); ++ tdev->index = atomic_inc_return_unchecked(&device_count); + tdev->dev = device_create(timed_output_class, NULL, + MKDEV(0, tdev->index), NULL, "%s", tdev->name); + if (IS_ERR(tdev->dev)) +diff --git a/drivers/staging/gdm724x/gdm_tty.c b/drivers/staging/gdm724x/gdm_tty.c +index fe47cd3..19a1bd1 100644 +--- a/drivers/staging/gdm724x/gdm_tty.c ++++ b/drivers/staging/gdm724x/gdm_tty.c +@@ -44,7 +44,7 @@ + #define gdm_tty_send_control(n, r, v, d, l) (\ + n->tty_dev->send_control(n->tty_dev->priv_dev, r, v, d, l)) + +-#define GDM_TTY_READY(gdm) (gdm && gdm->tty_dev && gdm->port.count) ++#define GDM_TTY_READY(gdm) (gdm && gdm->tty_dev && atomic_read(&gdm->port.count)) + + static struct tty_driver *gdm_driver[TTY_MAX_COUNT]; + static struct gdm *gdm_table[TTY_MAX_COUNT][GDM_TTY_MINOR]; +diff --git a/drivers/staging/imx-drm/imx-drm-core.c b/drivers/staging/imx-drm/imx-drm-core.c +index 236ed66..dd9cd74 100644 +--- a/drivers/staging/imx-drm/imx-drm-core.c ++++ b/drivers/staging/imx-drm/imx-drm-core.c +@@ -488,7 +488,7 @@ int imx_drm_add_crtc(struct drm_crtc *crtc, + goto err_busy; + } + +- if (imxdrm->drm->open_count) { ++ if (local_read(&imxdrm->drm->open_count)) { + ret = -EBUSY; + goto err_busy; + } +@@ -576,7 +576,7 @@ int imx_drm_add_encoder(struct drm_encoder *encoder, + + mutex_lock(&imxdrm->mutex); + +- if (imxdrm->drm->open_count) { ++ if (local_read(&imxdrm->drm->open_count)) { + ret = -EBUSY; + goto err_busy; + } +@@ -715,7 +715,7 @@ int imx_drm_add_connector(struct drm_connector *connector, + + mutex_lock(&imxdrm->mutex); + +- if (imxdrm->drm->open_count) { ++ if (local_read(&imxdrm->drm->open_count)) { + ret = -EBUSY; + goto err_busy; + } +diff --git a/drivers/staging/lustre/lnet/selftest/brw_test.c b/drivers/staging/lustre/lnet/selftest/brw_test.c +index 3f8020c..649fded 100644 +--- a/drivers/staging/lustre/lnet/selftest/brw_test.c ++++ b/drivers/staging/lustre/lnet/selftest/brw_test.c +@@ -488,13 +488,11 @@ brw_server_handle(struct srpc_server_rpc *rpc) + return 0; + } + +-sfw_test_client_ops_t brw_test_client; +-void brw_init_test_client(void) +-{ +- brw_test_client.tso_init = brw_client_init; +- brw_test_client.tso_fini = brw_client_fini; +- brw_test_client.tso_prep_rpc = brw_client_prep_rpc; +- brw_test_client.tso_done_rpc = brw_client_done_rpc; ++sfw_test_client_ops_t brw_test_client = { ++ .tso_init = brw_client_init, ++ .tso_fini = brw_client_fini, ++ .tso_prep_rpc = brw_client_prep_rpc, ++ .tso_done_rpc = brw_client_done_rpc, + }; + + srpc_service_t brw_test_service; +diff --git a/drivers/staging/lustre/lnet/selftest/framework.c b/drivers/staging/lustre/lnet/selftest/framework.c +index 050723a..fa6fdf1 100644 +--- a/drivers/staging/lustre/lnet/selftest/framework.c ++++ b/drivers/staging/lustre/lnet/selftest/framework.c +@@ -1635,12 +1635,10 @@ static srpc_service_t sfw_services[] = + + extern sfw_test_client_ops_t ping_test_client; + extern srpc_service_t ping_test_service; +-extern void ping_init_test_client(void); + extern void ping_init_test_service(void); + + extern sfw_test_client_ops_t brw_test_client; + extern srpc_service_t brw_test_service; +-extern void brw_init_test_client(void); + extern void brw_init_test_service(void); + + +@@ -1684,12 +1682,10 @@ sfw_startup (void) + INIT_LIST_HEAD(&sfw_data.fw_zombie_rpcs); + INIT_LIST_HEAD(&sfw_data.fw_zombie_sessions); + +- brw_init_test_client(); + brw_init_test_service(); + rc = sfw_register_test(&brw_test_service, &brw_test_client); + LASSERT (rc == 0); + +- ping_init_test_client(); + ping_init_test_service(); + rc = sfw_register_test(&ping_test_service, &ping_test_client); + LASSERT (rc == 0); +diff --git a/drivers/staging/lustre/lnet/selftest/ping_test.c b/drivers/staging/lustre/lnet/selftest/ping_test.c +index 750cac4..e4d751f 100644 +--- a/drivers/staging/lustre/lnet/selftest/ping_test.c ++++ b/drivers/staging/lustre/lnet/selftest/ping_test.c +@@ -211,14 +211,12 @@ ping_server_handle(struct srpc_server_rpc *rpc) + return 0; + } + +-sfw_test_client_ops_t ping_test_client; +-void ping_init_test_client(void) +-{ +- ping_test_client.tso_init = ping_client_init; +- ping_test_client.tso_fini = ping_client_fini; +- ping_test_client.tso_prep_rpc = ping_client_prep_rpc; +- ping_test_client.tso_done_rpc = ping_client_done_rpc; +-} ++sfw_test_client_ops_t ping_test_client = { ++ .tso_init = ping_client_init, ++ .tso_fini = ping_client_fini, ++ .tso_prep_rpc = ping_client_prep_rpc, ++ .tso_done_rpc = ping_client_done_rpc, ++}; + + srpc_service_t ping_test_service; + void ping_init_test_service(void) +diff --git a/drivers/staging/lustre/lustre/include/lustre_dlm.h b/drivers/staging/lustre/lustre/include/lustre_dlm.h +index ec4bb5e..740c6dd 100644 +--- a/drivers/staging/lustre/lustre/include/lustre_dlm.h ++++ b/drivers/staging/lustre/lustre/include/lustre_dlm.h +@@ -1141,7 +1141,7 @@ struct ldlm_callback_suite { + ldlm_completion_callback lcs_completion; + ldlm_blocking_callback lcs_blocking; + ldlm_glimpse_callback lcs_glimpse; +-}; ++} __no_const; + + /* ldlm_lockd.c */ + int ldlm_del_waiting_lock(struct ldlm_lock *lock); +diff --git a/drivers/staging/lustre/lustre/include/obd.h b/drivers/staging/lustre/lustre/include/obd.h +index c3470ce..2bef527 100644 +--- a/drivers/staging/lustre/lustre/include/obd.h ++++ b/drivers/staging/lustre/lustre/include/obd.h +@@ -1426,7 +1426,7 @@ struct md_ops { + * lprocfs_alloc_md_stats() in obdclass/lprocfs_status.c. Also, add a + * wrapper function in include/linux/obd_class.h. + */ +-}; ++} __no_const; + + struct lsm_operations { + void (*lsm_free)(struct lov_stripe_md *); +diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_flock.c b/drivers/staging/lustre/lustre/ldlm/ldlm_flock.c +index c9aae13..60ea292 100644 +--- a/drivers/staging/lustre/lustre/ldlm/ldlm_flock.c ++++ b/drivers/staging/lustre/lustre/ldlm/ldlm_flock.c +@@ -239,7 +239,7 @@ ldlm_process_flock_lock(struct ldlm_lock *req, __u64 *flags, int first_enq, + int added = (mode == LCK_NL); + int overlaps = 0; + int splitted = 0; +- const struct ldlm_callback_suite null_cbs = { NULL }; ++ const struct ldlm_callback_suite null_cbs = { }; + + CDEBUG(D_DLMTRACE, "flags %#llx owner "LPU64" pid %u mode %u start " + LPU64" end "LPU64"\n", *flags, +diff --git a/drivers/staging/lustre/lustre/libcfs/linux/linux-proc.c b/drivers/staging/lustre/lustre/libcfs/linux/linux-proc.c +index e947b91..f408990 100644 +--- a/drivers/staging/lustre/lustre/libcfs/linux/linux-proc.c ++++ b/drivers/staging/lustre/lustre/libcfs/linux/linux-proc.c +@@ -217,7 +217,7 @@ DECLARE_PROC_HANDLER(proc_debug_mb) + int LL_PROC_PROTO(proc_console_max_delay_cs) + { + int rc, max_delay_cs; +- ctl_table_t dummy = *table; ++ ctl_table_no_const dummy = *table; + cfs_duration_t d; + + dummy.data = &max_delay_cs; +@@ -248,7 +248,7 @@ int LL_PROC_PROTO(proc_console_max_delay_cs) + int LL_PROC_PROTO(proc_console_min_delay_cs) + { + int rc, min_delay_cs; +- ctl_table_t dummy = *table; ++ ctl_table_no_const dummy = *table; + cfs_duration_t d; + + dummy.data = &min_delay_cs; +@@ -279,7 +279,7 @@ int LL_PROC_PROTO(proc_console_min_delay_cs) + int LL_PROC_PROTO(proc_console_backoff) + { + int rc, backoff; +- ctl_table_t dummy = *table; ++ ctl_table_no_const dummy = *table; + + dummy.data = &backoff; + dummy.proc_handler = &proc_dointvec; +diff --git a/drivers/staging/lustre/lustre/libcfs/module.c b/drivers/staging/lustre/lustre/libcfs/module.c +index 24ae26d..9d09cab 100644 +--- a/drivers/staging/lustre/lustre/libcfs/module.c ++++ b/drivers/staging/lustre/lustre/libcfs/module.c +@@ -313,11 +313,11 @@ out: + + + struct cfs_psdev_ops libcfs_psdev_ops = { +- libcfs_psdev_open, +- libcfs_psdev_release, +- NULL, +- NULL, +- libcfs_ioctl ++ .p_open = libcfs_psdev_open, ++ .p_close = libcfs_psdev_release, ++ .p_read = NULL, ++ .p_write = NULL, ++ .p_ioctl = libcfs_ioctl + }; + + extern int insert_proc(void); +diff --git a/drivers/staging/lustre/lustre/llite/dir.c b/drivers/staging/lustre/lustre/llite/dir.c +index 52b7731..d604da0 100644 +--- a/drivers/staging/lustre/lustre/llite/dir.c ++++ b/drivers/staging/lustre/lustre/llite/dir.c +@@ -660,7 +660,7 @@ int ll_dir_setdirstripe(struct inode *dir, struct lmv_user_md *lump, + int mode; + int err; + +- mode = (0755 & (S_IRWXUGO|S_ISVTX) & ~current->fs->umask) | S_IFDIR; ++ mode = (0755 & (S_IRWXUGO|S_ISVTX) & ~current_umask()) | S_IFDIR; + op_data = ll_prep_md_op_data(NULL, dir, NULL, filename, + strlen(filename), mode, LUSTRE_OPC_MKDIR, + lump); +diff --git a/drivers/staging/media/solo6x10/solo6x10-core.c b/drivers/staging/media/solo6x10/solo6x10-core.c +index 480b7c4..6846324 100644 +--- a/drivers/staging/media/solo6x10/solo6x10-core.c ++++ b/drivers/staging/media/solo6x10/solo6x10-core.c +@@ -434,7 +434,7 @@ static void solo_device_release(struct device *dev) + + static int solo_sysfs_init(struct solo_dev *solo_dev) + { +- struct bin_attribute *sdram_attr = &solo_dev->sdram_attr; ++ bin_attribute_no_const *sdram_attr = &solo_dev->sdram_attr; + struct device *dev = &solo_dev->dev; + const char *driver; + int i; +diff --git a/drivers/staging/media/solo6x10/solo6x10-g723.c b/drivers/staging/media/solo6x10/solo6x10-g723.c +index 1db18c7..35e6afc 100644 +--- a/drivers/staging/media/solo6x10/solo6x10-g723.c ++++ b/drivers/staging/media/solo6x10/solo6x10-g723.c +@@ -355,7 +355,7 @@ static int solo_snd_pcm_init(struct solo_dev *solo_dev) + + int solo_g723_init(struct solo_dev *solo_dev) + { +- static struct snd_device_ops ops = { NULL }; ++ static struct snd_device_ops ops = { }; + struct snd_card *card; + struct snd_kcontrol_new kctl; + char name[32]; +diff --git a/drivers/staging/media/solo6x10/solo6x10-p2m.c b/drivers/staging/media/solo6x10/solo6x10-p2m.c +index 7f2f247..d999137 100644 +--- a/drivers/staging/media/solo6x10/solo6x10-p2m.c ++++ b/drivers/staging/media/solo6x10/solo6x10-p2m.c +@@ -77,7 +77,7 @@ int solo_p2m_dma_desc(struct solo_dev *solo_dev, + + /* Get next ID. According to Softlogic, 6110 has problems on !=0 P2M */ + if (solo_dev->type != SOLO_DEV_6110 && multi_p2m) { +- p2m_id = atomic_inc_return(&solo_dev->p2m_count) % SOLO_NR_P2M; ++ p2m_id = atomic_inc_return_unchecked(&solo_dev->p2m_count) % SOLO_NR_P2M; + if (p2m_id < 0) + p2m_id = -p2m_id; + } +diff --git a/drivers/staging/media/solo6x10/solo6x10.h b/drivers/staging/media/solo6x10/solo6x10.h +index 8964f8b..36eb087 100644 +--- a/drivers/staging/media/solo6x10/solo6x10.h ++++ b/drivers/staging/media/solo6x10/solo6x10.h +@@ -237,7 +237,7 @@ struct solo_dev { + + /* P2M DMA Engine */ + struct solo_p2m_dev p2m_dev[SOLO_NR_P2M]; +- atomic_t p2m_count; ++ atomic_unchecked_t p2m_count; + int p2m_jiffies; + unsigned int p2m_timeouts; + +diff --git a/drivers/staging/octeon/ethernet-rx.c b/drivers/staging/octeon/ethernet-rx.c +index a0f4868..139f1fb 100644 +--- a/drivers/staging/octeon/ethernet-rx.c ++++ b/drivers/staging/octeon/ethernet-rx.c +@@ -417,11 +417,11 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget) + /* Increment RX stats for virtual ports */ + if (work->ipprt >= CVMX_PIP_NUM_INPUT_PORTS) { + #ifdef CONFIG_64BIT +- atomic64_add(1, (atomic64_t *)&priv->stats.rx_packets); +- atomic64_add(skb->len, (atomic64_t *)&priv->stats.rx_bytes); ++ atomic64_add_unchecked(1, (atomic64_unchecked_t *)&priv->stats.rx_packets); ++ atomic64_add_unchecked(skb->len, (atomic64_unchecked_t *)&priv->stats.rx_bytes); + #else +- atomic_add(1, (atomic_t *)&priv->stats.rx_packets); +- atomic_add(skb->len, (atomic_t *)&priv->stats.rx_bytes); ++ atomic_add_unchecked(1, (atomic_unchecked_t *)&priv->stats.rx_packets); ++ atomic_add_unchecked(skb->len, (atomic_unchecked_t *)&priv->stats.rx_bytes); + #endif + } + netif_receive_skb(skb); +@@ -432,9 +432,9 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget) + dev->name); + */ + #ifdef CONFIG_64BIT +- atomic64_add(1, (atomic64_t *)&priv->stats.rx_dropped); ++ atomic64_add_unchecked(1, (atomic64_unchecked_t *)&priv->stats.rx_dropped); + #else +- atomic_add(1, (atomic_t *)&priv->stats.rx_dropped); ++ atomic_add_unchecked(1, (atomic_unchecked_t *)&priv->stats.rx_dropped); + #endif + dev_kfree_skb_irq(skb); + } +diff --git a/drivers/staging/octeon/ethernet.c b/drivers/staging/octeon/ethernet.c +index 089dc4b..c9a687e 100644 +--- a/drivers/staging/octeon/ethernet.c ++++ b/drivers/staging/octeon/ethernet.c +@@ -253,11 +253,11 @@ static struct net_device_stats *cvm_oct_common_get_stats(struct net_device *dev) + * since the RX tasklet also increments it. + */ + #ifdef CONFIG_64BIT +- atomic64_add(rx_status.dropped_packets, +- (atomic64_t *)&priv->stats.rx_dropped); ++ atomic64_add_unchecked(rx_status.dropped_packets, ++ (atomic64_unchecked_t *)&priv->stats.rx_dropped); + #else +- atomic_add(rx_status.dropped_packets, +- (atomic_t *)&priv->stats.rx_dropped); ++ atomic_add_unchecked(rx_status.dropped_packets, ++ (atomic_unchecked_t *)&priv->stats.rx_dropped); + #endif + } + +diff --git a/drivers/staging/rtl8188eu/include/hal_intf.h b/drivers/staging/rtl8188eu/include/hal_intf.h +index c274b34..f84de76 100644 +--- a/drivers/staging/rtl8188eu/include/hal_intf.h ++++ b/drivers/staging/rtl8188eu/include/hal_intf.h +@@ -271,7 +271,7 @@ struct hal_ops { + s32 (*c2h_handler)(struct adapter *padapter, + struct c2h_evt_hdr *c2h_evt); + c2h_id_filter c2h_id_filter_ccx; +-}; ++} __no_const; + + enum rt_eeprom_type { + EEPROM_93C46, +diff --git a/drivers/staging/rtl8188eu/include/rtw_io.h b/drivers/staging/rtl8188eu/include/rtw_io.h +index 3d1dfcc..ff5620a 100644 +--- a/drivers/staging/rtl8188eu/include/rtw_io.h ++++ b/drivers/staging/rtl8188eu/include/rtw_io.h +@@ -126,7 +126,7 @@ struct _io_ops { + u32 (*_write_scsi)(struct intf_hdl *pintfhdl, u32 cnt, u8 *pmem); + void (*_read_port_cancel)(struct intf_hdl *pintfhdl); + void (*_write_port_cancel)(struct intf_hdl *pintfhdl); +-}; ++} __no_const; + + struct io_req { + struct list_head list; +diff --git a/drivers/staging/rtl8712/rtl871x_io.h b/drivers/staging/rtl8712/rtl871x_io.h +index dc23395..cf7e9b1 100644 +--- a/drivers/staging/rtl8712/rtl871x_io.h ++++ b/drivers/staging/rtl8712/rtl871x_io.h +@@ -108,7 +108,7 @@ struct _io_ops { + u8 *pmem); + u32 (*_write_port)(struct intf_hdl *pintfhdl, u32 addr, u32 cnt, + u8 *pmem); +-}; ++} __no_const; + + struct io_req { + struct list_head list; +diff --git a/drivers/staging/sbe-2t3e3/netdev.c b/drivers/staging/sbe-2t3e3/netdev.c +index 1f5088b..0e59820 100644 +--- a/drivers/staging/sbe-2t3e3/netdev.c ++++ b/drivers/staging/sbe-2t3e3/netdev.c +@@ -51,7 +51,7 @@ static int t3e3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) + t3e3_if_config(sc, cmd_2t3e3, (char *)¶m, &resp, &rlen); + + if (rlen) +- if (copy_to_user(data, &resp, rlen)) ++ if (rlen > sizeof resp || copy_to_user(data, &resp, rlen)) + return -EFAULT; + + return 0; +diff --git a/drivers/staging/usbip/vhci.h b/drivers/staging/usbip/vhci.h +index a863a98..d272795 100644 +--- a/drivers/staging/usbip/vhci.h ++++ b/drivers/staging/usbip/vhci.h +@@ -83,7 +83,7 @@ struct vhci_hcd { + unsigned resuming:1; + unsigned long re_timeout; + +- atomic_t seqnum; ++ atomic_unchecked_t seqnum; + + /* + * NOTE: +diff --git a/drivers/staging/usbip/vhci_hcd.c b/drivers/staging/usbip/vhci_hcd.c +index 72391ef..7c6717a 100644 +--- a/drivers/staging/usbip/vhci_hcd.c ++++ b/drivers/staging/usbip/vhci_hcd.c +@@ -440,7 +440,7 @@ static void vhci_tx_urb(struct urb *urb) + + spin_lock(&vdev->priv_lock); + +- priv->seqnum = atomic_inc_return(&the_controller->seqnum); ++ priv->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum); + if (priv->seqnum == 0xffff) + dev_info(&urb->dev->dev, "seqnum max\n"); + +@@ -686,7 +686,7 @@ static int vhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status) + return -ENOMEM; + } + +- unlink->seqnum = atomic_inc_return(&the_controller->seqnum); ++ unlink->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum); + if (unlink->seqnum == 0xffff) + pr_info("seqnum max\n"); + +@@ -890,7 +890,7 @@ static int vhci_start(struct usb_hcd *hcd) + vdev->rhport = rhport; + } + +- atomic_set(&vhci->seqnum, 0); ++ atomic_set_unchecked(&vhci->seqnum, 0); + spin_lock_init(&vhci->lock); + + hcd->power_budget = 0; /* no limit */ +diff --git a/drivers/staging/usbip/vhci_rx.c b/drivers/staging/usbip/vhci_rx.c +index d07fcb5..358e1e1 100644 +--- a/drivers/staging/usbip/vhci_rx.c ++++ b/drivers/staging/usbip/vhci_rx.c +@@ -80,7 +80,7 @@ static void vhci_recv_ret_submit(struct vhci_device *vdev, + if (!urb) { + pr_err("cannot find a urb of seqnum %u\n", pdu->base.seqnum); + pr_info("max seqnum %d\n", +- atomic_read(&the_controller->seqnum)); ++ atomic_read_unchecked(&the_controller->seqnum)); + usbip_event_add(ud, VDEV_EVENT_ERROR_TCP); + return; + } +diff --git a/drivers/staging/vt6655/hostap.c b/drivers/staging/vt6655/hostap.c +index 6eecd53..29317c6 100644 +--- a/drivers/staging/vt6655/hostap.c ++++ b/drivers/staging/vt6655/hostap.c +@@ -69,14 +69,13 @@ static int msglevel = MSG_LEVEL_INFO; + * + */ + ++static net_device_ops_no_const apdev_netdev_ops; ++ + static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked) + { + PSDevice apdev_priv; + struct net_device *dev = pDevice->dev; + int ret; +- const struct net_device_ops apdev_netdev_ops = { +- .ndo_start_xmit = pDevice->tx_80211, +- }; + + DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%s: Enabling hostapd mode\n", dev->name); + +@@ -88,6 +87,8 @@ static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked) + *apdev_priv = *pDevice; + eth_hw_addr_inherit(pDevice->apdev, dev); + ++ /* only half broken now */ ++ apdev_netdev_ops.ndo_start_xmit = pDevice->tx_80211; + pDevice->apdev->netdev_ops = &apdev_netdev_ops; + + pDevice->apdev->type = ARPHRD_IEEE80211; +diff --git a/drivers/staging/vt6656/hostap.c b/drivers/staging/vt6656/hostap.c +index 67ba48b..24e602f 100644 +--- a/drivers/staging/vt6656/hostap.c ++++ b/drivers/staging/vt6656/hostap.c +@@ -60,14 +60,13 @@ static int msglevel =MSG_LEVEL_INFO; + * + */ + ++static net_device_ops_no_const apdev_netdev_ops; ++ + static int hostap_enable_hostapd(struct vnt_private *pDevice, int rtnl_locked) + { + struct vnt_private *apdev_priv; + struct net_device *dev = pDevice->dev; + int ret; +- const struct net_device_ops apdev_netdev_ops = { +- .ndo_start_xmit = pDevice->tx_80211, +- }; + + DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%s: Enabling hostapd mode\n", dev->name); + +@@ -79,6 +78,8 @@ static int hostap_enable_hostapd(struct vnt_private *pDevice, int rtnl_locked) + *apdev_priv = *pDevice; + memcpy(pDevice->apdev->dev_addr, dev->dev_addr, ETH_ALEN); + ++ /* only half broken now */ ++ apdev_netdev_ops.ndo_start_xmit = pDevice->tx_80211; + pDevice->apdev->netdev_ops = &apdev_netdev_ops; + + pDevice->apdev->type = ARPHRD_IEEE80211; +diff --git a/drivers/target/sbp/sbp_target.c b/drivers/target/sbp/sbp_target.c +index 24884ca..26c8220 100644 +--- a/drivers/target/sbp/sbp_target.c ++++ b/drivers/target/sbp/sbp_target.c +@@ -62,7 +62,7 @@ static const u32 sbp_unit_directory_template[] = { + + #define SESSION_MAINTENANCE_INTERVAL HZ + +-static atomic_t login_id = ATOMIC_INIT(0); ++static atomic_unchecked_t login_id = ATOMIC_INIT(0); + + static void session_maintenance_work(struct work_struct *); + static int sbp_run_transaction(struct fw_card *, int, int, int, int, +@@ -444,7 +444,7 @@ static void sbp_management_request_login( + login->lun = se_lun; + login->status_fifo_addr = sbp2_pointer_to_addr(&req->orb.status_fifo); + login->exclusive = LOGIN_ORB_EXCLUSIVE(be32_to_cpu(req->orb.misc)); +- login->login_id = atomic_inc_return(&login_id); ++ login->login_id = atomic_inc_return_unchecked(&login_id); + + login->tgt_agt = sbp_target_agent_register(login); + if (IS_ERR(login->tgt_agt)) { +diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c +index 6ea95d2..88607b4 100644 +--- a/drivers/target/target_core_device.c ++++ b/drivers/target/target_core_device.c +@@ -1525,7 +1525,7 @@ struct se_device *target_alloc_device(struct se_hba *hba, const char *name) + spin_lock_init(&dev->se_tmr_lock); + spin_lock_init(&dev->qf_cmd_lock); + sema_init(&dev->caw_sem, 1); +- atomic_set(&dev->dev_ordered_id, 0); ++ atomic_set_unchecked(&dev->dev_ordered_id, 0); + INIT_LIST_HEAD(&dev->t10_wwn.t10_vpd_list); + spin_lock_init(&dev->t10_wwn.t10_vpd_lock); + INIT_LIST_HEAD(&dev->t10_pr.registration_list); +diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c +index 24f5279..046edc5 100644 +--- a/drivers/target/target_core_transport.c ++++ b/drivers/target/target_core_transport.c +@@ -1154,7 +1154,7 @@ transport_check_alloc_task_attr(struct se_cmd *cmd) + * Used to determine when ORDERED commands should go from + * Dormant to Active status. + */ +- cmd->se_ordered_id = atomic_inc_return(&dev->dev_ordered_id); ++ cmd->se_ordered_id = atomic_inc_return_unchecked(&dev->dev_ordered_id); + smp_mb__after_atomic_inc(); + pr_debug("Allocated se_ordered_id: %u for Task Attr: 0x%02x on %s\n", + cmd->se_ordered_id, cmd->sam_task_attr, +diff --git a/drivers/thermal/of-thermal.c b/drivers/thermal/of-thermal.c +index 04b1be7..5eff86d 100644 +--- a/drivers/thermal/of-thermal.c ++++ b/drivers/thermal/of-thermal.c +@@ -30,6 +30,7 @@ + #include <linux/err.h> + #include <linux/export.h> + #include <linux/string.h> ++#include <linux/mm.h> + + #include "thermal_core.h" + +@@ -341,8 +342,10 @@ thermal_zone_of_add_sensor(struct device_node *zone, + tz->get_trend = get_trend; + tz->sensor_data = data; + +- tzd->ops->get_temp = of_thermal_get_temp; +- tzd->ops->get_trend = of_thermal_get_trend; ++ pax_open_kernel(); ++ *(void **)&tzd->ops->get_temp = of_thermal_get_temp; ++ *(void **)&tzd->ops->get_trend = of_thermal_get_trend; ++ pax_close_kernel(); + mutex_unlock(&tzd->lock); + + return tzd; +@@ -461,8 +464,10 @@ void thermal_zone_of_sensor_unregister(struct device *dev, + return; + + mutex_lock(&tzd->lock); +- tzd->ops->get_temp = NULL; +- tzd->ops->get_trend = NULL; ++ pax_open_kernel(); ++ *(void **)&tzd->ops->get_temp = NULL; ++ *(void **)&tzd->ops->get_trend = NULL; ++ pax_close_kernel(); + + tz->get_temp = NULL; + tz->get_trend = NULL; +diff --git a/drivers/tty/cyclades.c b/drivers/tty/cyclades.c +index a57bb5a..1f727d33 100644 +--- a/drivers/tty/cyclades.c ++++ b/drivers/tty/cyclades.c +@@ -1570,10 +1570,10 @@ static int cy_open(struct tty_struct *tty, struct file *filp) + printk(KERN_DEBUG "cyc:cy_open ttyC%d, count = %d\n", info->line, + info->port.count); + #endif +- info->port.count++; ++ atomic_inc(&info->port.count); + #ifdef CY_DEBUG_COUNT + printk(KERN_DEBUG "cyc:cy_open (%d): incrementing count to %d\n", +- current->pid, info->port.count); ++ current->pid, atomic_read(&info->port.count)); + #endif + + /* +@@ -3974,7 +3974,7 @@ static int cyclades_proc_show(struct seq_file *m, void *v) + for (j = 0; j < cy_card[i].nports; j++) { + info = &cy_card[i].ports[j]; + +- if (info->port.count) { ++ if (atomic_read(&info->port.count)) { + /* XXX is the ldisc num worth this? */ + struct tty_struct *tty; + struct tty_ldisc *ld; +diff --git a/drivers/tty/hvc/hvc_console.c b/drivers/tty/hvc/hvc_console.c +index 0ff7fda..dbc7d52 100644 +--- a/drivers/tty/hvc/hvc_console.c ++++ b/drivers/tty/hvc/hvc_console.c +@@ -342,7 +342,7 @@ static int hvc_open(struct tty_struct *tty, struct file * filp) + + spin_lock_irqsave(&hp->port.lock, flags); + /* Check and then increment for fast path open. */ +- if (hp->port.count++ > 0) { ++ if (atomic_inc_return(&hp->port.count) > 1) { + spin_unlock_irqrestore(&hp->port.lock, flags); + hvc_kick(); + return 0; +@@ -397,7 +397,7 @@ static void hvc_close(struct tty_struct *tty, struct file * filp) + + spin_lock_irqsave(&hp->port.lock, flags); + +- if (--hp->port.count == 0) { ++ if (atomic_dec_return(&hp->port.count) == 0) { + spin_unlock_irqrestore(&hp->port.lock, flags); + /* We are done with the tty pointer now. */ + tty_port_tty_set(&hp->port, NULL); +@@ -419,9 +419,9 @@ static void hvc_close(struct tty_struct *tty, struct file * filp) + */ + tty_wait_until_sent_from_close(tty, HVC_CLOSE_WAIT); + } else { +- if (hp->port.count < 0) ++ if (atomic_read(&hp->port.count) < 0) + printk(KERN_ERR "hvc_close %X: oops, count is %d\n", +- hp->vtermno, hp->port.count); ++ hp->vtermno, atomic_read(&hp->port.count)); + spin_unlock_irqrestore(&hp->port.lock, flags); + } + } +@@ -451,12 +451,12 @@ static void hvc_hangup(struct tty_struct *tty) + * open->hangup case this can be called after the final close so prevent + * that from happening for now. + */ +- if (hp->port.count <= 0) { ++ if (atomic_read(&hp->port.count) <= 0) { + spin_unlock_irqrestore(&hp->port.lock, flags); + return; + } + +- hp->port.count = 0; ++ atomic_set(&hp->port.count, 0); + spin_unlock_irqrestore(&hp->port.lock, flags); + tty_port_tty_set(&hp->port, NULL); + +@@ -504,7 +504,7 @@ static int hvc_write(struct tty_struct *tty, const unsigned char *buf, int count + return -EPIPE; + + /* FIXME what's this (unprotected) check for? */ +- if (hp->port.count <= 0) ++ if (atomic_read(&hp->port.count) <= 0) + return -EIO; + + spin_lock_irqsave(&hp->lock, flags); +diff --git a/drivers/tty/hvc/hvcs.c b/drivers/tty/hvc/hvcs.c +index 81e939e..95ead10 100644 +--- a/drivers/tty/hvc/hvcs.c ++++ b/drivers/tty/hvc/hvcs.c +@@ -83,6 +83,7 @@ + #include <asm/hvcserver.h> + #include <asm/uaccess.h> + #include <asm/vio.h> ++#include <asm/local.h> + + /* + * 1.3.0 -> 1.3.1 In hvcs_open memset(..,0x00,..) instead of memset(..,0x3F,00). +@@ -416,7 +417,7 @@ static ssize_t hvcs_vterm_state_store(struct device *dev, struct device_attribut + + spin_lock_irqsave(&hvcsd->lock, flags); + +- if (hvcsd->port.count > 0) { ++ if (atomic_read(&hvcsd->port.count) > 0) { + spin_unlock_irqrestore(&hvcsd->lock, flags); + printk(KERN_INFO "HVCS: vterm state unchanged. " + "The hvcs device node is still in use.\n"); +@@ -1127,7 +1128,7 @@ static int hvcs_install(struct tty_driver *driver, struct tty_struct *tty) + } + } + +- hvcsd->port.count = 0; ++ atomic_set(&hvcsd->port.count, 0); + hvcsd->port.tty = tty; + tty->driver_data = hvcsd; + +@@ -1180,7 +1181,7 @@ static int hvcs_open(struct tty_struct *tty, struct file *filp) + unsigned long flags; + + spin_lock_irqsave(&hvcsd->lock, flags); +- hvcsd->port.count++; ++ atomic_inc(&hvcsd->port.count); + hvcsd->todo_mask |= HVCS_SCHED_READ; + spin_unlock_irqrestore(&hvcsd->lock, flags); + +@@ -1216,7 +1217,7 @@ static void hvcs_close(struct tty_struct *tty, struct file *filp) + hvcsd = tty->driver_data; + + spin_lock_irqsave(&hvcsd->lock, flags); +- if (--hvcsd->port.count == 0) { ++ if (atomic_dec_and_test(&hvcsd->port.count)) { + + vio_disable_interrupts(hvcsd->vdev); + +@@ -1241,10 +1242,10 @@ static void hvcs_close(struct tty_struct *tty, struct file *filp) + + free_irq(irq, hvcsd); + return; +- } else if (hvcsd->port.count < 0) { ++ } else if (atomic_read(&hvcsd->port.count) < 0) { + printk(KERN_ERR "HVCS: vty-server@%X open_count: %d" + " is missmanaged.\n", +- hvcsd->vdev->unit_address, hvcsd->port.count); ++ hvcsd->vdev->unit_address, atomic_read(&hvcsd->port.count)); + } + + spin_unlock_irqrestore(&hvcsd->lock, flags); +@@ -1266,7 +1267,7 @@ static void hvcs_hangup(struct tty_struct * tty) + + spin_lock_irqsave(&hvcsd->lock, flags); + /* Preserve this so that we know how many kref refs to put */ +- temp_open_count = hvcsd->port.count; ++ temp_open_count = atomic_read(&hvcsd->port.count); + + /* + * Don't kref put inside the spinlock because the destruction +@@ -1281,7 +1282,7 @@ static void hvcs_hangup(struct tty_struct * tty) + tty->driver_data = NULL; + hvcsd->port.tty = NULL; + +- hvcsd->port.count = 0; ++ atomic_set(&hvcsd->port.count, 0); + + /* This will drop any buffered data on the floor which is OK in a hangup + * scenario. */ +@@ -1352,7 +1353,7 @@ static int hvcs_write(struct tty_struct *tty, + * the middle of a write operation? This is a crummy place to do this + * but we want to keep it all in the spinlock. + */ +- if (hvcsd->port.count <= 0) { ++ if (atomic_read(&hvcsd->port.count) <= 0) { + spin_unlock_irqrestore(&hvcsd->lock, flags); + return -ENODEV; + } +@@ -1426,7 +1427,7 @@ static int hvcs_write_room(struct tty_struct *tty) + { + struct hvcs_struct *hvcsd = tty->driver_data; + +- if (!hvcsd || hvcsd->port.count <= 0) ++ if (!hvcsd || atomic_read(&hvcsd->port.count) <= 0) + return 0; + + return HVCS_BUFF_LEN - hvcsd->chars_in_buffer; +diff --git a/drivers/tty/hvc/hvsi.c b/drivers/tty/hvc/hvsi.c +index 4190199..06d5bfa 100644 +--- a/drivers/tty/hvc/hvsi.c ++++ b/drivers/tty/hvc/hvsi.c +@@ -85,7 +85,7 @@ struct hvsi_struct { + int n_outbuf; + uint32_t vtermno; + uint32_t virq; +- atomic_t seqno; /* HVSI packet sequence number */ ++ atomic_unchecked_t seqno; /* HVSI packet sequence number */ + uint16_t mctrl; + uint8_t state; /* HVSI protocol state */ + uint8_t flags; +@@ -295,7 +295,7 @@ static int hvsi_version_respond(struct hvsi_struct *hp, uint16_t query_seqno) + + packet.hdr.type = VS_QUERY_RESPONSE_PACKET_HEADER; + packet.hdr.len = sizeof(struct hvsi_query_response); +- packet.hdr.seqno = atomic_inc_return(&hp->seqno); ++ packet.hdr.seqno = atomic_inc_return_unchecked(&hp->seqno); + packet.verb = VSV_SEND_VERSION_NUMBER; + packet.u.version = HVSI_VERSION; + packet.query_seqno = query_seqno+1; +@@ -555,7 +555,7 @@ static int hvsi_query(struct hvsi_struct *hp, uint16_t verb) + + packet.hdr.type = VS_QUERY_PACKET_HEADER; + packet.hdr.len = sizeof(struct hvsi_query); +- packet.hdr.seqno = atomic_inc_return(&hp->seqno); ++ packet.hdr.seqno = atomic_inc_return_unchecked(&hp->seqno); + packet.verb = verb; + + pr_debug("%s: sending %i bytes\n", __func__, packet.hdr.len); +@@ -597,7 +597,7 @@ static int hvsi_set_mctrl(struct hvsi_struct *hp, uint16_t mctrl) + int wrote; + + packet.hdr.type = VS_CONTROL_PACKET_HEADER, +- packet.hdr.seqno = atomic_inc_return(&hp->seqno); ++ packet.hdr.seqno = atomic_inc_return_unchecked(&hp->seqno); + packet.hdr.len = sizeof(struct hvsi_control); + packet.verb = VSV_SET_MODEM_CTL; + packet.mask = HVSI_TSDTR; +@@ -680,7 +680,7 @@ static int hvsi_put_chars(struct hvsi_struct *hp, const char *buf, int count) + BUG_ON(count > HVSI_MAX_OUTGOING_DATA); + + packet.hdr.type = VS_DATA_PACKET_HEADER; +- packet.hdr.seqno = atomic_inc_return(&hp->seqno); ++ packet.hdr.seqno = atomic_inc_return_unchecked(&hp->seqno); + packet.hdr.len = count + sizeof(struct hvsi_header); + memcpy(&packet.data, buf, count); + +@@ -697,7 +697,7 @@ static void hvsi_close_protocol(struct hvsi_struct *hp) + struct hvsi_control packet __ALIGNED__; + + packet.hdr.type = VS_CONTROL_PACKET_HEADER; +- packet.hdr.seqno = atomic_inc_return(&hp->seqno); ++ packet.hdr.seqno = atomic_inc_return_unchecked(&hp->seqno); + packet.hdr.len = 6; + packet.verb = VSV_CLOSE_PROTOCOL; + +@@ -725,7 +725,7 @@ static int hvsi_open(struct tty_struct *tty, struct file *filp) + + tty_port_tty_set(&hp->port, tty); + spin_lock_irqsave(&hp->lock, flags); +- hp->port.count++; ++ atomic_inc(&hp->port.count); + atomic_set(&hp->seqno, 0); + h_vio_signal(hp->vtermno, VIO_IRQ_ENABLE); + spin_unlock_irqrestore(&hp->lock, flags); +@@ -782,7 +782,7 @@ static void hvsi_close(struct tty_struct *tty, struct file *filp) + + spin_lock_irqsave(&hp->lock, flags); + +- if (--hp->port.count == 0) { ++ if (atomic_dec_return(&hp->port.count) == 0) { + tty_port_tty_set(&hp->port, NULL); + hp->inbuf_end = hp->inbuf; /* discard remaining partial packets */ + +@@ -815,9 +815,9 @@ static void hvsi_close(struct tty_struct *tty, struct file *filp) + + spin_lock_irqsave(&hp->lock, flags); + } +- } else if (hp->port.count < 0) ++ } else if (atomic_read(&hp->port.count) < 0) + printk(KERN_ERR "hvsi_close %lu: oops, count is %d\n", +- hp - hvsi_ports, hp->port.count); ++ hp - hvsi_ports, atomic_read(&hp->port.count)); + + spin_unlock_irqrestore(&hp->lock, flags); + } +@@ -832,7 +832,7 @@ static void hvsi_hangup(struct tty_struct *tty) + tty_port_tty_set(&hp->port, NULL); + + spin_lock_irqsave(&hp->lock, flags); +- hp->port.count = 0; ++ atomic_set(&hp->port.count, 0); + hp->n_outbuf = 0; + spin_unlock_irqrestore(&hp->lock, flags); + } +diff --git a/drivers/tty/hvc/hvsi_lib.c b/drivers/tty/hvc/hvsi_lib.c +index 7ae6c29..05c6dba 100644 +--- a/drivers/tty/hvc/hvsi_lib.c ++++ b/drivers/tty/hvc/hvsi_lib.c +@@ -8,7 +8,7 @@ + + static int hvsi_send_packet(struct hvsi_priv *pv, struct hvsi_header *packet) + { +- packet->seqno = cpu_to_be16(atomic_inc_return(&pv->seqno)); ++ packet->seqno = cpu_to_be16(atomic_inc_return_unchecked(&pv->seqno)); + + /* Assumes that always succeeds, works in practice */ + return pv->put_chars(pv->termno, (char *)packet, packet->len); +@@ -20,7 +20,7 @@ static void hvsi_start_handshake(struct hvsi_priv *pv) + + /* Reset state */ + pv->established = 0; +- atomic_set(&pv->seqno, 0); ++ atomic_set_unchecked(&pv->seqno, 0); + + pr_devel("HVSI@%x: Handshaking started\n", pv->termno); + +diff --git a/drivers/tty/ipwireless/tty.c b/drivers/tty/ipwireless/tty.c +index 17ee3bf..8d2520d 100644 +--- a/drivers/tty/ipwireless/tty.c ++++ b/drivers/tty/ipwireless/tty.c +@@ -28,6 +28,7 @@ + #include <linux/tty_driver.h> + #include <linux/tty_flip.h> + #include <linux/uaccess.h> ++#include <asm/local.h> + + #include "tty.h" + #include "network.h" +@@ -98,10 +99,10 @@ static int ipw_open(struct tty_struct *linux_tty, struct file *filp) + mutex_unlock(&tty->ipw_tty_mutex); + return -ENODEV; + } +- if (tty->port.count == 0) ++ if (atomic_read(&tty->port.count) == 0) + tty->tx_bytes_queued = 0; + +- tty->port.count++; ++ atomic_inc(&tty->port.count); + + tty->port.tty = linux_tty; + linux_tty->driver_data = tty; +@@ -117,9 +118,7 @@ static int ipw_open(struct tty_struct *linux_tty, struct file *filp) + + static void do_ipw_close(struct ipw_tty *tty) + { +- tty->port.count--; +- +- if (tty->port.count == 0) { ++ if (atomic_dec_return(&tty->port.count) == 0) { + struct tty_struct *linux_tty = tty->port.tty; + + if (linux_tty != NULL) { +@@ -140,7 +139,7 @@ static void ipw_hangup(struct tty_struct *linux_tty) + return; + + mutex_lock(&tty->ipw_tty_mutex); +- if (tty->port.count == 0) { ++ if (atomic_read(&tty->port.count) == 0) { + mutex_unlock(&tty->ipw_tty_mutex); + return; + } +@@ -163,7 +162,7 @@ void ipwireless_tty_received(struct ipw_tty *tty, unsigned char *data, + + mutex_lock(&tty->ipw_tty_mutex); + +- if (!tty->port.count) { ++ if (!atomic_read(&tty->port.count)) { + mutex_unlock(&tty->ipw_tty_mutex); + return; + } +@@ -202,7 +201,7 @@ static int ipw_write(struct tty_struct *linux_tty, + return -ENODEV; + + mutex_lock(&tty->ipw_tty_mutex); +- if (!tty->port.count) { ++ if (!atomic_read(&tty->port.count)) { + mutex_unlock(&tty->ipw_tty_mutex); + return -EINVAL; + } +@@ -242,7 +241,7 @@ static int ipw_write_room(struct tty_struct *linux_tty) + if (!tty) + return -ENODEV; + +- if (!tty->port.count) ++ if (!atomic_read(&tty->port.count)) + return -EINVAL; + + room = IPWIRELESS_TX_QUEUE_SIZE - tty->tx_bytes_queued; +@@ -284,7 +283,7 @@ static int ipw_chars_in_buffer(struct tty_struct *linux_tty) + if (!tty) + return 0; + +- if (!tty->port.count) ++ if (!atomic_read(&tty->port.count)) + return 0; + + return tty->tx_bytes_queued; +@@ -365,7 +364,7 @@ static int ipw_tiocmget(struct tty_struct *linux_tty) + if (!tty) + return -ENODEV; + +- if (!tty->port.count) ++ if (!atomic_read(&tty->port.count)) + return -EINVAL; + + return get_control_lines(tty); +@@ -381,7 +380,7 @@ ipw_tiocmset(struct tty_struct *linux_tty, + if (!tty) + return -ENODEV; + +- if (!tty->port.count) ++ if (!atomic_read(&tty->port.count)) + return -EINVAL; + + return set_control_lines(tty, set, clear); +@@ -395,7 +394,7 @@ static int ipw_ioctl(struct tty_struct *linux_tty, + if (!tty) + return -ENODEV; + +- if (!tty->port.count) ++ if (!atomic_read(&tty->port.count)) + return -EINVAL; + + /* FIXME: Exactly how is the tty object locked here .. */ +@@ -551,7 +550,7 @@ void ipwireless_tty_free(struct ipw_tty *tty) + * are gone */ + mutex_lock(&ttyj->ipw_tty_mutex); + } +- while (ttyj->port.count) ++ while (atomic_read(&ttyj->port.count)) + do_ipw_close(ttyj); + ipwireless_disassociate_network_ttys(network, + ttyj->channel_idx); +diff --git a/drivers/tty/moxa.c b/drivers/tty/moxa.c +index 1deaca4..c8582d4 100644 +--- a/drivers/tty/moxa.c ++++ b/drivers/tty/moxa.c +@@ -1189,7 +1189,7 @@ static int moxa_open(struct tty_struct *tty, struct file *filp) + } + + ch = &brd->ports[port % MAX_PORTS_PER_BOARD]; +- ch->port.count++; ++ atomic_inc(&ch->port.count); + tty->driver_data = ch; + tty_port_tty_set(&ch->port, tty); + mutex_lock(&ch->port.mutex); +diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c +index 2ebe47b..3205833 100644 +--- a/drivers/tty/n_gsm.c ++++ b/drivers/tty/n_gsm.c +@@ -1644,7 +1644,7 @@ static struct gsm_dlci *gsm_dlci_alloc(struct gsm_mux *gsm, int addr) + spin_lock_init(&dlci->lock); + mutex_init(&dlci->mutex); + dlci->fifo = &dlci->_fifo; +- if (kfifo_alloc(&dlci->_fifo, 4096, GFP_KERNEL) < 0) { ++ if (kfifo_alloc(&dlci->_fifo, 4096, GFP_KERNEL)) { + kfree(dlci); + return NULL; + } +@@ -2954,7 +2954,7 @@ static int gsmtty_open(struct tty_struct *tty, struct file *filp) + struct gsm_dlci *dlci = tty->driver_data; + struct tty_port *port = &dlci->port; + +- port->count++; ++ atomic_inc(&port->count); + tty_port_tty_set(port, tty); + + dlci->modem_rx = 0; +diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c +index 28ac3f3..9019b3b 100644 +--- a/drivers/tty/n_tty.c ++++ b/drivers/tty/n_tty.c +@@ -115,7 +115,7 @@ struct n_tty_data { + int minimum_to_wake; + + /* consumer-published */ +- size_t read_tail; ++ size_t read_tail __intentional_overflow(-1); + size_t line_start; + + /* protected by output lock */ +@@ -2520,6 +2520,7 @@ void n_tty_inherit_ops(struct tty_ldisc_ops *ops) + { + *ops = tty_ldisc_N_TTY; + ops->owner = NULL; +- ops->refcount = ops->flags = 0; ++ atomic_set(&ops->refcount, 0); ++ ops->flags = 0; + } + EXPORT_SYMBOL_GPL(n_tty_inherit_ops); +diff --git a/drivers/tty/pty.c b/drivers/tty/pty.c +index 25c9bc7..24077b7 100644 +--- a/drivers/tty/pty.c ++++ b/drivers/tty/pty.c +@@ -790,8 +790,10 @@ static void __init unix98_pty_init(void) + panic("Couldn't register Unix98 pts driver"); + + /* Now create the /dev/ptmx special device */ ++ pax_open_kernel(); + tty_default_fops(&ptmx_fops); +- ptmx_fops.open = ptmx_open; ++ *(void **)&ptmx_fops.open = ptmx_open; ++ pax_close_kernel(); + + cdev_init(&ptmx_cdev, &ptmx_fops); + if (cdev_add(&ptmx_cdev, MKDEV(TTYAUX_MAJOR, 2), 1) || +diff --git a/drivers/tty/rocket.c b/drivers/tty/rocket.c +index 383c4c7..d408e21 100644 +--- a/drivers/tty/rocket.c ++++ b/drivers/tty/rocket.c +@@ -914,7 +914,7 @@ static int rp_open(struct tty_struct *tty, struct file *filp) + tty->driver_data = info; + tty_port_tty_set(port, tty); + +- if (port->count++ == 0) { ++ if (atomic_inc_return(&port->count) == 1) { + atomic_inc(&rp_num_ports_open); + + #ifdef ROCKET_DEBUG_OPEN +@@ -923,7 +923,7 @@ static int rp_open(struct tty_struct *tty, struct file *filp) + #endif + } + #ifdef ROCKET_DEBUG_OPEN +- printk(KERN_INFO "rp_open ttyR%d, count=%d\n", info->line, info->port.count); ++ printk(KERN_INFO "rp_open ttyR%d, count=%d\n", info->line, atomic-read(&info->port.count)); + #endif + + /* +@@ -1515,7 +1515,7 @@ static void rp_hangup(struct tty_struct *tty) + spin_unlock_irqrestore(&info->port.lock, flags); + return; + } +- if (info->port.count) ++ if (atomic_read(&info->port.count)) + atomic_dec(&rp_num_ports_open); + clear_bit((info->aiop * 8) + info->chan, (void *) &xmit_flags[info->board]); + spin_unlock_irqrestore(&info->port.lock, flags); +diff --git a/drivers/tty/serial/ioc4_serial.c b/drivers/tty/serial/ioc4_serial.c +index 1274499..f541382 100644 +--- a/drivers/tty/serial/ioc4_serial.c ++++ b/drivers/tty/serial/ioc4_serial.c +@@ -437,7 +437,7 @@ struct ioc4_soft { + } is_intr_info[MAX_IOC4_INTR_ENTS]; + + /* Number of entries active in the above array */ +- atomic_t is_num_intrs; ++ atomic_unchecked_t is_num_intrs; + } is_intr_type[IOC4_NUM_INTR_TYPES]; + + /* is_ir_lock must be held while +@@ -974,7 +974,7 @@ intr_connect(struct ioc4_soft *soft, int type, + BUG_ON(!((type == IOC4_SIO_INTR_TYPE) + || (type == IOC4_OTHER_INTR_TYPE))); + +- i = atomic_inc_return(&soft-> is_intr_type[type].is_num_intrs) - 1; ++ i = atomic_inc_return_unchecked(&soft-> is_intr_type[type].is_num_intrs) - 1; + BUG_ON(!(i < MAX_IOC4_INTR_ENTS || (printk("i %d\n", i), 0))); + + /* Save off the lower level interrupt handler */ +@@ -1001,7 +1001,7 @@ static irqreturn_t ioc4_intr(int irq, void *arg) + + soft = arg; + for (intr_type = 0; intr_type < IOC4_NUM_INTR_TYPES; intr_type++) { +- num_intrs = (int)atomic_read( ++ num_intrs = (int)atomic_read_unchecked( + &soft->is_intr_type[intr_type].is_num_intrs); + + this_mir = this_ir = pending_intrs(soft, intr_type); +diff --git a/drivers/tty/serial/kgdboc.c b/drivers/tty/serial/kgdboc.c +index a260cde..6b2b5ce 100644 +--- a/drivers/tty/serial/kgdboc.c ++++ b/drivers/tty/serial/kgdboc.c +@@ -24,8 +24,9 @@ + #define MAX_CONFIG_LEN 40 + + static struct kgdb_io kgdboc_io_ops; ++static struct kgdb_io kgdboc_io_ops_console; + +-/* -1 = init not run yet, 0 = unconfigured, 1 = configured. */ ++/* -1 = init not run yet, 0 = unconfigured, 1/2 = configured. */ + static int configured = -1; + + static char config[MAX_CONFIG_LEN]; +@@ -151,6 +152,8 @@ static void cleanup_kgdboc(void) + kgdboc_unregister_kbd(); + if (configured == 1) + kgdb_unregister_io_module(&kgdboc_io_ops); ++ else if (configured == 2) ++ kgdb_unregister_io_module(&kgdboc_io_ops_console); + } + + static int configure_kgdboc(void) +@@ -160,13 +163,13 @@ static int configure_kgdboc(void) + int err; + char *cptr = config; + struct console *cons; ++ int is_console = 0; + + err = kgdboc_option_setup(config); + if (err || !strlen(config) || isspace(config[0])) + goto noconfig; + + err = -ENODEV; +- kgdboc_io_ops.is_console = 0; + kgdb_tty_driver = NULL; + + kgdboc_use_kms = 0; +@@ -187,7 +190,7 @@ static int configure_kgdboc(void) + int idx; + if (cons->device && cons->device(cons, &idx) == p && + idx == tty_line) { +- kgdboc_io_ops.is_console = 1; ++ is_console = 1; + break; + } + cons = cons->next; +@@ -197,7 +200,13 @@ static int configure_kgdboc(void) + kgdb_tty_line = tty_line; + + do_register: +- err = kgdb_register_io_module(&kgdboc_io_ops); ++ if (is_console) { ++ err = kgdb_register_io_module(&kgdboc_io_ops_console); ++ configured = 2; ++ } else { ++ err = kgdb_register_io_module(&kgdboc_io_ops); ++ configured = 1; ++ } + if (err) + goto noconfig; + +@@ -205,8 +214,6 @@ do_register: + if (err) + goto nmi_con_failed; + +- configured = 1; +- + return 0; + + nmi_con_failed: +@@ -223,7 +230,7 @@ noconfig: + static int __init init_kgdboc(void) + { + /* Already configured? */ +- if (configured == 1) ++ if (configured >= 1) + return 0; + + return configure_kgdboc(); +@@ -272,7 +279,7 @@ static int param_set_kgdboc_var(const char *kmessage, struct kernel_param *kp) + if (config[len - 1] == '\n') + config[len - 1] = '\0'; + +- if (configured == 1) ++ if (configured >= 1) + cleanup_kgdboc(); + + /* Go and configure with the new params. */ +@@ -312,6 +319,15 @@ static struct kgdb_io kgdboc_io_ops = { + .post_exception = kgdboc_post_exp_handler, + }; + ++static struct kgdb_io kgdboc_io_ops_console = { ++ .name = "kgdboc", ++ .read_char = kgdboc_get_char, ++ .write_char = kgdboc_put_char, ++ .pre_exception = kgdboc_pre_exp_handler, ++ .post_exception = kgdboc_post_exp_handler, ++ .is_console = 1 ++}; ++ + #ifdef CONFIG_KGDB_SERIAL_CONSOLE + /* This is only available if kgdboc is a built in for early debugging */ + static int __init kgdboc_early_init(char *opt) +diff --git a/drivers/tty/serial/msm_serial.c b/drivers/tty/serial/msm_serial.c +index c0f2b3e..7e3f80c 100644 +--- a/drivers/tty/serial/msm_serial.c ++++ b/drivers/tty/serial/msm_serial.c +@@ -897,7 +897,7 @@ static struct uart_driver msm_uart_driver = { + .cons = MSM_CONSOLE, + }; + +-static atomic_t msm_uart_next_id = ATOMIC_INIT(0); ++static atomic_unchecked_t msm_uart_next_id = ATOMIC_INIT(0); + + static const struct of_device_id msm_uartdm_table[] = { + { .compatible = "qcom,msm-uartdm" }, +@@ -912,7 +912,7 @@ static int __init msm_serial_probe(struct platform_device *pdev) + int irq; + + if (pdev->id == -1) +- pdev->id = atomic_inc_return(&msm_uart_next_id) - 1; ++ pdev->id = atomic_inc_return_unchecked(&msm_uart_next_id) - 1; + + if (unlikely(pdev->id < 0 || pdev->id >= UART_NR)) + return -ENXIO; +diff --git a/drivers/tty/serial/samsung.c b/drivers/tty/serial/samsung.c +index 9cd706d..6ff2de7 100644 +--- a/drivers/tty/serial/samsung.c ++++ b/drivers/tty/serial/samsung.c +@@ -463,11 +463,16 @@ static void s3c24xx_serial_shutdown(struct uart_port *port) + } + } + ++static int s3c64xx_serial_startup(struct uart_port *port); + static int s3c24xx_serial_startup(struct uart_port *port) + { + struct s3c24xx_uart_port *ourport = to_ourport(port); + int ret; + ++ /* Startup sequence is different for s3c64xx and higher SoC's */ ++ if (s3c24xx_serial_has_interrupt_mask(port)) ++ return s3c64xx_serial_startup(port); ++ + dbg("s3c24xx_serial_startup: port=%p (%08lx,%p)\n", + port->mapbase, port->membase); + +@@ -1141,10 +1146,6 @@ static int s3c24xx_serial_init_port(struct s3c24xx_uart_port *ourport, + /* setup info for port */ + port->dev = &platdev->dev; + +- /* Startup sequence is different for s3c64xx and higher SoC's */ +- if (s3c24xx_serial_has_interrupt_mask(port)) +- s3c24xx_serial_ops.startup = s3c64xx_serial_startup; +- + port->uartclk = 1; + + if (cfg->uart_flags & UPF_CONS_FLOW) { +diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c +index 25b8f68..3e23c14 100644 +--- a/drivers/tty/serial/serial_core.c ++++ b/drivers/tty/serial/serial_core.c +@@ -1451,7 +1451,7 @@ static void uart_hangup(struct tty_struct *tty) + uart_flush_buffer(tty); + uart_shutdown(tty, state); + spin_lock_irqsave(&port->lock, flags); +- port->count = 0; ++ atomic_set(&port->count, 0); + clear_bit(ASYNCB_NORMAL_ACTIVE, &port->flags); + spin_unlock_irqrestore(&port->lock, flags); + tty_port_tty_set(port, NULL); +@@ -1547,7 +1547,7 @@ static int uart_open(struct tty_struct *tty, struct file *filp) + goto end; + } + +- port->count++; ++ atomic_inc(&port->count); + if (!state->uart_port || state->uart_port->flags & UPF_DEAD) { + retval = -ENXIO; + goto err_dec_count; +@@ -1575,7 +1575,7 @@ static int uart_open(struct tty_struct *tty, struct file *filp) + /* + * Make sure the device is in D0 state. + */ +- if (port->count == 1) ++ if (atomic_read(&port->count) == 1) + uart_change_pm(state, UART_PM_STATE_ON); + + /* +@@ -1593,7 +1593,7 @@ static int uart_open(struct tty_struct *tty, struct file *filp) + end: + return retval; + err_dec_count: +- port->count--; ++ atomic_inc(&port->count); + mutex_unlock(&port->mutex); + goto end; + } +diff --git a/drivers/tty/synclink.c b/drivers/tty/synclink.c +index 5ae14b4..2c1288f 100644 +--- a/drivers/tty/synclink.c ++++ b/drivers/tty/synclink.c +@@ -3090,7 +3090,7 @@ static void mgsl_close(struct tty_struct *tty, struct file * filp) + + if (debug_level >= DEBUG_LEVEL_INFO) + printk("%s(%d):mgsl_close(%s) entry, count=%d\n", +- __FILE__,__LINE__, info->device_name, info->port.count); ++ __FILE__,__LINE__, info->device_name, atomic_read(&info->port.count)); + + if (tty_port_close_start(&info->port, tty, filp) == 0) + goto cleanup; +@@ -3108,7 +3108,7 @@ static void mgsl_close(struct tty_struct *tty, struct file * filp) + cleanup: + if (debug_level >= DEBUG_LEVEL_INFO) + printk("%s(%d):mgsl_close(%s) exit, count=%d\n", __FILE__,__LINE__, +- tty->driver->name, info->port.count); ++ tty->driver->name, atomic_read(&info->port.count)); + + } /* end of mgsl_close() */ + +@@ -3207,8 +3207,8 @@ static void mgsl_hangup(struct tty_struct *tty) + + mgsl_flush_buffer(tty); + shutdown(info); +- +- info->port.count = 0; ++ ++ atomic_set(&info->port.count, 0); + info->port.flags &= ~ASYNC_NORMAL_ACTIVE; + info->port.tty = NULL; + +@@ -3297,12 +3297,12 @@ static int block_til_ready(struct tty_struct *tty, struct file * filp, + + if (debug_level >= DEBUG_LEVEL_INFO) + printk("%s(%d):block_til_ready before block on %s count=%d\n", +- __FILE__,__LINE__, tty->driver->name, port->count ); ++ __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count)); + + spin_lock_irqsave(&info->irq_spinlock, flags); + if (!tty_hung_up_p(filp)) { + extra_count = true; +- port->count--; ++ atomic_dec(&port->count); + } + spin_unlock_irqrestore(&info->irq_spinlock, flags); + port->blocked_open++; +@@ -3331,7 +3331,7 @@ static int block_til_ready(struct tty_struct *tty, struct file * filp, + + if (debug_level >= DEBUG_LEVEL_INFO) + printk("%s(%d):block_til_ready blocking on %s count=%d\n", +- __FILE__,__LINE__, tty->driver->name, port->count ); ++ __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count)); + + tty_unlock(tty); + schedule(); +@@ -3343,12 +3343,12 @@ static int block_til_ready(struct tty_struct *tty, struct file * filp, + + /* FIXME: Racy on hangup during close wait */ + if (extra_count) +- port->count++; ++ atomic_inc(&port->count); + port->blocked_open--; + + if (debug_level >= DEBUG_LEVEL_INFO) + printk("%s(%d):block_til_ready after blocking on %s count=%d\n", +- __FILE__,__LINE__, tty->driver->name, port->count ); ++ __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count)); + + if (!retval) + port->flags |= ASYNC_NORMAL_ACTIVE; +@@ -3400,7 +3400,7 @@ static int mgsl_open(struct tty_struct *tty, struct file * filp) + + if (debug_level >= DEBUG_LEVEL_INFO) + printk("%s(%d):mgsl_open(%s), old ref count = %d\n", +- __FILE__,__LINE__,tty->driver->name, info->port.count); ++ __FILE__,__LINE__,tty->driver->name, atomic_read(&info->port.count)); + + /* If port is closing, signal caller to try again */ + if (tty_hung_up_p(filp) || info->port.flags & ASYNC_CLOSING){ +@@ -3419,10 +3419,10 @@ static int mgsl_open(struct tty_struct *tty, struct file * filp) + spin_unlock_irqrestore(&info->netlock, flags); + goto cleanup; + } +- info->port.count++; ++ atomic_inc(&info->port.count); + spin_unlock_irqrestore(&info->netlock, flags); + +- if (info->port.count == 1) { ++ if (atomic_read(&info->port.count) == 1) { + /* 1st open on this device, init hardware */ + retval = startup(info); + if (retval < 0) +@@ -3446,8 +3446,8 @@ cleanup: + if (retval) { + if (tty->count == 1) + info->port.tty = NULL; /* tty layer will release tty struct */ +- if(info->port.count) +- info->port.count--; ++ if (atomic_read(&info->port.count)) ++ atomic_dec(&info->port.count); + } + + return retval; +@@ -7665,7 +7665,7 @@ static int hdlcdev_attach(struct net_device *dev, unsigned short encoding, + unsigned short new_crctype; + + /* return error if TTY interface open */ +- if (info->port.count) ++ if (atomic_read(&info->port.count)) + return -EBUSY; + + switch (encoding) +@@ -7760,7 +7760,7 @@ static int hdlcdev_open(struct net_device *dev) + + /* arbitrate between network and tty opens */ + spin_lock_irqsave(&info->netlock, flags); +- if (info->port.count != 0 || info->netcount != 0) { ++ if (atomic_read(&info->port.count) != 0 || info->netcount != 0) { + printk(KERN_WARNING "%s: hdlc_open returning busy\n", dev->name); + spin_unlock_irqrestore(&info->netlock, flags); + return -EBUSY; +@@ -7846,7 +7846,7 @@ static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) + printk("%s:hdlcdev_ioctl(%s)\n",__FILE__,dev->name); + + /* return error if TTY interface open */ +- if (info->port.count) ++ if (atomic_read(&info->port.count)) + return -EBUSY; + + if (cmd != SIOCWANDEV) +diff --git a/drivers/tty/synclink_gt.c b/drivers/tty/synclink_gt.c +index c359a91..959fc26 100644 +--- a/drivers/tty/synclink_gt.c ++++ b/drivers/tty/synclink_gt.c +@@ -670,7 +670,7 @@ static int open(struct tty_struct *tty, struct file *filp) + tty->driver_data = info; + info->port.tty = tty; + +- DBGINFO(("%s open, old ref count = %d\n", info->device_name, info->port.count)); ++ DBGINFO(("%s open, old ref count = %d\n", info->device_name, atomic_read(&info->port.count))); + + /* If port is closing, signal caller to try again */ + if (tty_hung_up_p(filp) || info->port.flags & ASYNC_CLOSING){ +@@ -691,10 +691,10 @@ static int open(struct tty_struct *tty, struct file *filp) + mutex_unlock(&info->port.mutex); + goto cleanup; + } +- info->port.count++; ++ atomic_inc(&info->port.count); + spin_unlock_irqrestore(&info->netlock, flags); + +- if (info->port.count == 1) { ++ if (atomic_read(&info->port.count) == 1) { + /* 1st open on this device, init hardware */ + retval = startup(info); + if (retval < 0) { +@@ -715,8 +715,8 @@ cleanup: + if (retval) { + if (tty->count == 1) + info->port.tty = NULL; /* tty layer will release tty struct */ +- if(info->port.count) +- info->port.count--; ++ if(atomic_read(&info->port.count)) ++ atomic_dec(&info->port.count); + } + + DBGINFO(("%s open rc=%d\n", info->device_name, retval)); +@@ -729,7 +729,7 @@ static void close(struct tty_struct *tty, struct file *filp) + + if (sanity_check(info, tty->name, "close")) + return; +- DBGINFO(("%s close entry, count=%d\n", info->device_name, info->port.count)); ++ DBGINFO(("%s close entry, count=%d\n", info->device_name, atomic_read(&info->port.count))); + + if (tty_port_close_start(&info->port, tty, filp) == 0) + goto cleanup; +@@ -746,7 +746,7 @@ static void close(struct tty_struct *tty, struct file *filp) + tty_port_close_end(&info->port, tty); + info->port.tty = NULL; + cleanup: +- DBGINFO(("%s close exit, count=%d\n", tty->driver->name, info->port.count)); ++ DBGINFO(("%s close exit, count=%d\n", tty->driver->name, atomic_read(&info->port.count))); + } + + static void hangup(struct tty_struct *tty) +@@ -764,7 +764,7 @@ static void hangup(struct tty_struct *tty) + shutdown(info); + + spin_lock_irqsave(&info->port.lock, flags); +- info->port.count = 0; ++ atomic_set(&info->port.count, 0); + info->port.flags &= ~ASYNC_NORMAL_ACTIVE; + info->port.tty = NULL; + spin_unlock_irqrestore(&info->port.lock, flags); +@@ -1449,7 +1449,7 @@ static int hdlcdev_attach(struct net_device *dev, unsigned short encoding, + unsigned short new_crctype; + + /* return error if TTY interface open */ +- if (info->port.count) ++ if (atomic_read(&info->port.count)) + return -EBUSY; + + DBGINFO(("%s hdlcdev_attach\n", info->device_name)); +@@ -1544,7 +1544,7 @@ static int hdlcdev_open(struct net_device *dev) + + /* arbitrate between network and tty opens */ + spin_lock_irqsave(&info->netlock, flags); +- if (info->port.count != 0 || info->netcount != 0) { ++ if (atomic_read(&info->port.count) != 0 || info->netcount != 0) { + DBGINFO(("%s hdlc_open busy\n", dev->name)); + spin_unlock_irqrestore(&info->netlock, flags); + return -EBUSY; +@@ -1629,7 +1629,7 @@ static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) + DBGINFO(("%s hdlcdev_ioctl\n", dev->name)); + + /* return error if TTY interface open */ +- if (info->port.count) ++ if (atomic_read(&info->port.count)) + return -EBUSY; + + if (cmd != SIOCWANDEV) +@@ -2413,7 +2413,7 @@ static irqreturn_t slgt_interrupt(int dummy, void *dev_id) + if (port == NULL) + continue; + spin_lock(&port->lock); +- if ((port->port.count || port->netcount) && ++ if ((atomic_read(&port->port.count) || port->netcount) && + port->pending_bh && !port->bh_running && + !port->bh_requested) { + DBGISR(("%s bh queued\n", port->device_name)); +@@ -3302,7 +3302,7 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp, + spin_lock_irqsave(&info->lock, flags); + if (!tty_hung_up_p(filp)) { + extra_count = true; +- port->count--; ++ atomic_dec(&port->count); + } + spin_unlock_irqrestore(&info->lock, flags); + port->blocked_open++; +@@ -3339,7 +3339,7 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp, + remove_wait_queue(&port->open_wait, &wait); + + if (extra_count) +- port->count++; ++ atomic_inc(&port->count); + port->blocked_open--; + + if (!retval) +diff --git a/drivers/tty/synclinkmp.c b/drivers/tty/synclinkmp.c +index 144202e..4ccb07d 100644 +--- a/drivers/tty/synclinkmp.c ++++ b/drivers/tty/synclinkmp.c +@@ -750,7 +750,7 @@ static int open(struct tty_struct *tty, struct file *filp) + + if (debug_level >= DEBUG_LEVEL_INFO) + printk("%s(%d):%s open(), old ref count = %d\n", +- __FILE__,__LINE__,tty->driver->name, info->port.count); ++ __FILE__,__LINE__,tty->driver->name, atomic_read(&info->port.count)); + + /* If port is closing, signal caller to try again */ + if (tty_hung_up_p(filp) || info->port.flags & ASYNC_CLOSING){ +@@ -769,10 +769,10 @@ static int open(struct tty_struct *tty, struct file *filp) + spin_unlock_irqrestore(&info->netlock, flags); + goto cleanup; + } +- info->port.count++; ++ atomic_inc(&info->port.count); + spin_unlock_irqrestore(&info->netlock, flags); + +- if (info->port.count == 1) { ++ if (atomic_read(&info->port.count) == 1) { + /* 1st open on this device, init hardware */ + retval = startup(info); + if (retval < 0) +@@ -796,8 +796,8 @@ cleanup: + if (retval) { + if (tty->count == 1) + info->port.tty = NULL; /* tty layer will release tty struct */ +- if(info->port.count) +- info->port.count--; ++ if(atomic_read(&info->port.count)) ++ atomic_dec(&info->port.count); + } + + return retval; +@@ -815,7 +815,7 @@ static void close(struct tty_struct *tty, struct file *filp) + + if (debug_level >= DEBUG_LEVEL_INFO) + printk("%s(%d):%s close() entry, count=%d\n", +- __FILE__,__LINE__, info->device_name, info->port.count); ++ __FILE__,__LINE__, info->device_name, atomic_read(&info->port.count)); + + if (tty_port_close_start(&info->port, tty, filp) == 0) + goto cleanup; +@@ -834,7 +834,7 @@ static void close(struct tty_struct *tty, struct file *filp) + cleanup: + if (debug_level >= DEBUG_LEVEL_INFO) + printk("%s(%d):%s close() exit, count=%d\n", __FILE__,__LINE__, +- tty->driver->name, info->port.count); ++ tty->driver->name, atomic_read(&info->port.count)); + } + + /* Called by tty_hangup() when a hangup is signaled. +@@ -857,7 +857,7 @@ static void hangup(struct tty_struct *tty) + shutdown(info); + + spin_lock_irqsave(&info->port.lock, flags); +- info->port.count = 0; ++ atomic_set(&info->port.count, 0); + info->port.flags &= ~ASYNC_NORMAL_ACTIVE; + info->port.tty = NULL; + spin_unlock_irqrestore(&info->port.lock, flags); +@@ -1565,7 +1565,7 @@ static int hdlcdev_attach(struct net_device *dev, unsigned short encoding, + unsigned short new_crctype; + + /* return error if TTY interface open */ +- if (info->port.count) ++ if (atomic_read(&info->port.count)) + return -EBUSY; + + switch (encoding) +@@ -1660,7 +1660,7 @@ static int hdlcdev_open(struct net_device *dev) + + /* arbitrate between network and tty opens */ + spin_lock_irqsave(&info->netlock, flags); +- if (info->port.count != 0 || info->netcount != 0) { ++ if (atomic_read(&info->port.count) != 0 || info->netcount != 0) { + printk(KERN_WARNING "%s: hdlc_open returning busy\n", dev->name); + spin_unlock_irqrestore(&info->netlock, flags); + return -EBUSY; +@@ -1746,7 +1746,7 @@ static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) + printk("%s:hdlcdev_ioctl(%s)\n",__FILE__,dev->name); + + /* return error if TTY interface open */ +- if (info->port.count) ++ if (atomic_read(&info->port.count)) + return -EBUSY; + + if (cmd != SIOCWANDEV) +@@ -2620,7 +2620,7 @@ static irqreturn_t synclinkmp_interrupt(int dummy, void *dev_id) + * do not request bottom half processing if the + * device is not open in a normal mode. + */ +- if ( port && (port->port.count || port->netcount) && ++ if ( port && (atomic_read(&port->port.count) || port->netcount) && + port->pending_bh && !port->bh_running && + !port->bh_requested ) { + if ( debug_level >= DEBUG_LEVEL_ISR ) +@@ -3318,12 +3318,12 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp, + + if (debug_level >= DEBUG_LEVEL_INFO) + printk("%s(%d):%s block_til_ready() before block, count=%d\n", +- __FILE__,__LINE__, tty->driver->name, port->count ); ++ __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count)); + + spin_lock_irqsave(&info->lock, flags); + if (!tty_hung_up_p(filp)) { + extra_count = true; +- port->count--; ++ atomic_dec(&port->count); + } + spin_unlock_irqrestore(&info->lock, flags); + port->blocked_open++; +@@ -3352,7 +3352,7 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp, + + if (debug_level >= DEBUG_LEVEL_INFO) + printk("%s(%d):%s block_til_ready() count=%d\n", +- __FILE__,__LINE__, tty->driver->name, port->count ); ++ __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count)); + + tty_unlock(tty); + schedule(); +@@ -3363,12 +3363,12 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp, + remove_wait_queue(&port->open_wait, &wait); + + if (extra_count) +- port->count++; ++ atomic_inc(&port->count); + port->blocked_open--; + + if (debug_level >= DEBUG_LEVEL_INFO) + printk("%s(%d):%s block_til_ready() after, count=%d\n", +- __FILE__,__LINE__, tty->driver->name, port->count ); ++ __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count)); + + if (!retval) + port->flags |= ASYNC_NORMAL_ACTIVE; +diff --git a/drivers/tty/sysrq.c b/drivers/tty/sysrq.c +index ce396ec..04a37be 100644 +--- a/drivers/tty/sysrq.c ++++ b/drivers/tty/sysrq.c +@@ -1075,7 +1075,7 @@ EXPORT_SYMBOL(unregister_sysrq_key); + static ssize_t write_sysrq_trigger(struct file *file, const char __user *buf, + size_t count, loff_t *ppos) + { +- if (count) { ++ if (count && capable(CAP_SYS_ADMIN)) { + char c; + + if (get_user(c, buf)) +diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c +index d3448a9..28e8db0 100644 +--- a/drivers/tty/tty_io.c ++++ b/drivers/tty/tty_io.c +@@ -3475,7 +3475,7 @@ EXPORT_SYMBOL_GPL(get_current_tty); + + void tty_default_fops(struct file_operations *fops) + { +- *fops = tty_fops; ++ memcpy((void *)fops, &tty_fops, sizeof(tty_fops)); + } + + /* +diff --git a/drivers/tty/tty_ldisc.c b/drivers/tty/tty_ldisc.c +index 2d822aa..a566234 100644 +--- a/drivers/tty/tty_ldisc.c ++++ b/drivers/tty/tty_ldisc.c +@@ -71,7 +71,7 @@ int tty_register_ldisc(int disc, struct tty_ldisc_ops *new_ldisc) + raw_spin_lock_irqsave(&tty_ldiscs_lock, flags); + tty_ldiscs[disc] = new_ldisc; + new_ldisc->num = disc; +- new_ldisc->refcount = 0; ++ atomic_set(&new_ldisc->refcount, 0); + raw_spin_unlock_irqrestore(&tty_ldiscs_lock, flags); + + return ret; +@@ -99,7 +99,7 @@ int tty_unregister_ldisc(int disc) + return -EINVAL; + + raw_spin_lock_irqsave(&tty_ldiscs_lock, flags); +- if (tty_ldiscs[disc]->refcount) ++ if (atomic_read(&tty_ldiscs[disc]->refcount)) + ret = -EBUSY; + else + tty_ldiscs[disc] = NULL; +@@ -120,7 +120,7 @@ static struct tty_ldisc_ops *get_ldops(int disc) + if (ldops) { + ret = ERR_PTR(-EAGAIN); + if (try_module_get(ldops->owner)) { +- ldops->refcount++; ++ atomic_inc(&ldops->refcount); + ret = ldops; + } + } +@@ -133,7 +133,7 @@ static void put_ldops(struct tty_ldisc_ops *ldops) + unsigned long flags; + + raw_spin_lock_irqsave(&tty_ldiscs_lock, flags); +- ldops->refcount--; ++ atomic_dec(&ldops->refcount); + module_put(ldops->owner); + raw_spin_unlock_irqrestore(&tty_ldiscs_lock, flags); + } +diff --git a/drivers/tty/tty_port.c b/drivers/tty/tty_port.c +index 3f746c8..2f2fcaa 100644 +--- a/drivers/tty/tty_port.c ++++ b/drivers/tty/tty_port.c +@@ -235,7 +235,7 @@ void tty_port_hangup(struct tty_port *port) + unsigned long flags; + + spin_lock_irqsave(&port->lock, flags); +- port->count = 0; ++ atomic_set(&port->count, 0); + port->flags &= ~ASYNC_NORMAL_ACTIVE; + tty = port->tty; + if (tty) +@@ -393,7 +393,7 @@ int tty_port_block_til_ready(struct tty_port *port, + /* The port lock protects the port counts */ + spin_lock_irqsave(&port->lock, flags); + if (!tty_hung_up_p(filp)) +- port->count--; ++ atomic_dec(&port->count); + port->blocked_open++; + spin_unlock_irqrestore(&port->lock, flags); + +@@ -435,7 +435,7 @@ int tty_port_block_til_ready(struct tty_port *port, + we must not mess that up further */ + spin_lock_irqsave(&port->lock, flags); + if (!tty_hung_up_p(filp)) +- port->count++; ++ atomic_inc(&port->count); + port->blocked_open--; + if (retval == 0) + port->flags |= ASYNC_NORMAL_ACTIVE; +@@ -469,19 +469,19 @@ int tty_port_close_start(struct tty_port *port, + return 0; + } + +- if (tty->count == 1 && port->count != 1) { ++ if (tty->count == 1 && atomic_read(&port->count) != 1) { + printk(KERN_WARNING + "tty_port_close_start: tty->count = 1 port count = %d.\n", +- port->count); +- port->count = 1; ++ atomic_read(&port->count)); ++ atomic_set(&port->count, 1); + } +- if (--port->count < 0) { ++ if (atomic_dec_return(&port->count) < 0) { + printk(KERN_WARNING "tty_port_close_start: count = %d\n", +- port->count); +- port->count = 0; ++ atomic_read(&port->count)); ++ atomic_set(&port->count, 0); + } + +- if (port->count) { ++ if (atomic_read(&port->count)) { + spin_unlock_irqrestore(&port->lock, flags); + return 0; + } +@@ -563,7 +563,7 @@ int tty_port_open(struct tty_port *port, struct tty_struct *tty, + { + spin_lock_irq(&port->lock); + if (!tty_hung_up_p(filp)) +- ++port->count; ++ atomic_inc(&port->count); + spin_unlock_irq(&port->lock); + tty_port_tty_set(port, tty); + +diff --git a/drivers/tty/vt/keyboard.c b/drivers/tty/vt/keyboard.c +index d0e3a44..5f8b754 100644 +--- a/drivers/tty/vt/keyboard.c ++++ b/drivers/tty/vt/keyboard.c +@@ -641,6 +641,16 @@ static void k_spec(struct vc_data *vc, unsigned char value, char up_flag) + kbd->kbdmode == VC_OFF) && + value != KVAL(K_SAK)) + return; /* SAK is allowed even in raw mode */ ++ ++#if defined(CONFIG_GRKERNSEC_PROC) || defined(CONFIG_GRKERNSEC_PROC_MEMMAP) ++ { ++ void *func = fn_handler[value]; ++ if (func == fn_show_state || func == fn_show_ptregs || ++ func == fn_show_mem) ++ return; ++ } ++#endif ++ + fn_handler[value](vc); + } + +@@ -1776,9 +1786,6 @@ int vt_do_kdsk_ioctl(int cmd, struct kbentry __user *user_kbe, int perm, + if (copy_from_user(&tmp, user_kbe, sizeof(struct kbentry))) + return -EFAULT; + +- if (!capable(CAP_SYS_TTY_CONFIG)) +- perm = 0; +- + switch (cmd) { + case KDGKBENT: + /* Ensure another thread doesn't free it under us */ +@@ -1793,6 +1800,9 @@ int vt_do_kdsk_ioctl(int cmd, struct kbentry __user *user_kbe, int perm, + spin_unlock_irqrestore(&kbd_event_lock, flags); + return put_user(val, &user_kbe->kb_value); + case KDSKBENT: ++ if (!capable(CAP_SYS_TTY_CONFIG)) ++ perm = 0; ++ + if (!perm) + return -EPERM; + if (!i && v == K_NOSUCHMAP) { +@@ -1883,9 +1893,6 @@ int vt_do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm) + int i, j, k; + int ret; + +- if (!capable(CAP_SYS_TTY_CONFIG)) +- perm = 0; +- + kbs = kmalloc(sizeof(*kbs), GFP_KERNEL); + if (!kbs) { + ret = -ENOMEM; +@@ -1919,6 +1926,9 @@ int vt_do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm) + kfree(kbs); + return ((p && *p) ? -EOVERFLOW : 0); + case KDSKBSENT: ++ if (!capable(CAP_SYS_TTY_CONFIG)) ++ perm = 0; ++ + if (!perm) { + ret = -EPERM; + goto reterr; +diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c +index a673e5b..36e5d32 100644 +--- a/drivers/uio/uio.c ++++ b/drivers/uio/uio.c +@@ -25,6 +25,7 @@ + #include <linux/kobject.h> + #include <linux/cdev.h> + #include <linux/uio_driver.h> ++#include <asm/local.h> + + #define UIO_MAX_DEVICES (1U << MINORBITS) + +@@ -32,7 +33,7 @@ struct uio_device { + struct module *owner; + struct device *dev; + int minor; +- atomic_t event; ++ atomic_unchecked_t event; + struct fasync_struct *async_queue; + wait_queue_head_t wait; + struct uio_info *info; +@@ -243,7 +244,7 @@ static ssize_t event_show(struct device *dev, + struct device_attribute *attr, char *buf) + { + struct uio_device *idev = dev_get_drvdata(dev); +- return sprintf(buf, "%u\n", (unsigned int)atomic_read(&idev->event)); ++ return sprintf(buf, "%u\n", (unsigned int)atomic_read_unchecked(&idev->event)); + } + static DEVICE_ATTR_RO(event); + +@@ -405,7 +406,7 @@ void uio_event_notify(struct uio_info *info) + { + struct uio_device *idev = info->uio_dev; + +- atomic_inc(&idev->event); ++ atomic_inc_unchecked(&idev->event); + wake_up_interruptible(&idev->wait); + kill_fasync(&idev->async_queue, SIGIO, POLL_IN); + } +@@ -458,7 +459,7 @@ static int uio_open(struct inode *inode, struct file *filep) + } + + listener->dev = idev; +- listener->event_count = atomic_read(&idev->event); ++ listener->event_count = atomic_read_unchecked(&idev->event); + filep->private_data = listener; + + if (idev->info->open) { +@@ -509,7 +510,7 @@ static unsigned int uio_poll(struct file *filep, poll_table *wait) + return -EIO; + + poll_wait(filep, &idev->wait, wait); +- if (listener->event_count != atomic_read(&idev->event)) ++ if (listener->event_count != atomic_read_unchecked(&idev->event)) + return POLLIN | POLLRDNORM; + return 0; + } +@@ -534,7 +535,7 @@ static ssize_t uio_read(struct file *filep, char __user *buf, + do { + set_current_state(TASK_INTERRUPTIBLE); + +- event_count = atomic_read(&idev->event); ++ event_count = atomic_read_unchecked(&idev->event); + if (event_count != listener->event_count) { + if (copy_to_user(buf, &event_count, count)) + retval = -EFAULT; +@@ -591,9 +592,13 @@ static ssize_t uio_write(struct file *filep, const char __user *buf, + static int uio_find_mem_index(struct vm_area_struct *vma) + { + struct uio_device *idev = vma->vm_private_data; ++ unsigned long size; + + if (vma->vm_pgoff < MAX_UIO_MAPS) { +- if (idev->info->mem[vma->vm_pgoff].size == 0) ++ size = idev->info->mem[vma->vm_pgoff].size; ++ if (size == 0) ++ return -1; ++ if (vma->vm_end - vma->vm_start > size) + return -1; + return (int)vma->vm_pgoff; + } +@@ -825,7 +830,7 @@ int __uio_register_device(struct module *owner, + idev->owner = owner; + idev->info = info; + init_waitqueue_head(&idev->wait); +- atomic_set(&idev->event, 0); ++ atomic_set_unchecked(&idev->event, 0); + + ret = uio_get_minor(idev); + if (ret) +diff --git a/drivers/usb/atm/cxacru.c b/drivers/usb/atm/cxacru.c +index 813d4d3..a71934f 100644 +--- a/drivers/usb/atm/cxacru.c ++++ b/drivers/usb/atm/cxacru.c +@@ -472,7 +472,7 @@ static ssize_t cxacru_sysfs_store_adsl_config(struct device *dev, + ret = sscanf(buf + pos, "%x=%x%n", &index, &value, &tmp); + if (ret < 2) + return -EINVAL; +- if (index < 0 || index > 0x7f) ++ if (index > 0x7f) + return -EINVAL; + pos += tmp; + +diff --git a/drivers/usb/atm/usbatm.c b/drivers/usb/atm/usbatm.c +index dada014..1d0d517 100644 +--- a/drivers/usb/atm/usbatm.c ++++ b/drivers/usb/atm/usbatm.c +@@ -331,7 +331,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char + if (printk_ratelimit()) + atm_warn(instance, "%s: OAM not supported (vpi %d, vci %d)!\n", + __func__, vpi, vci); +- atomic_inc(&vcc->stats->rx_err); ++ atomic_inc_unchecked(&vcc->stats->rx_err); + return; + } + +@@ -358,7 +358,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char + if (length > ATM_MAX_AAL5_PDU) { + atm_rldbg(instance, "%s: bogus length %u (vcc: 0x%p)!\n", + __func__, length, vcc); +- atomic_inc(&vcc->stats->rx_err); ++ atomic_inc_unchecked(&vcc->stats->rx_err); + goto out; + } + +@@ -367,14 +367,14 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char + if (sarb->len < pdu_length) { + atm_rldbg(instance, "%s: bogus pdu_length %u (sarb->len: %u, vcc: 0x%p)!\n", + __func__, pdu_length, sarb->len, vcc); +- atomic_inc(&vcc->stats->rx_err); ++ atomic_inc_unchecked(&vcc->stats->rx_err); + goto out; + } + + if (crc32_be(~0, skb_tail_pointer(sarb) - pdu_length, pdu_length) != 0xc704dd7b) { + atm_rldbg(instance, "%s: packet failed crc check (vcc: 0x%p)!\n", + __func__, vcc); +- atomic_inc(&vcc->stats->rx_err); ++ atomic_inc_unchecked(&vcc->stats->rx_err); + goto out; + } + +@@ -386,7 +386,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char + if (printk_ratelimit()) + atm_err(instance, "%s: no memory for skb (length: %u)!\n", + __func__, length); +- atomic_inc(&vcc->stats->rx_drop); ++ atomic_inc_unchecked(&vcc->stats->rx_drop); + goto out; + } + +@@ -414,7 +414,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char + + vcc->push(vcc, skb); + +- atomic_inc(&vcc->stats->rx); ++ atomic_inc_unchecked(&vcc->stats->rx); + out: + skb_trim(sarb, 0); + } +@@ -612,7 +612,7 @@ static void usbatm_tx_process(unsigned long data) + struct atm_vcc *vcc = UDSL_SKB(skb)->atm.vcc; + + usbatm_pop(vcc, skb); +- atomic_inc(&vcc->stats->tx); ++ atomic_inc_unchecked(&vcc->stats->tx); + + skb = skb_dequeue(&instance->sndqueue); + } +@@ -756,11 +756,11 @@ static int usbatm_atm_proc_read(struct atm_dev *atm_dev, loff_t *pos, char *page + if (!left--) + return sprintf(page, + "AAL5: tx %d ( %d err ), rx %d ( %d err, %d drop )\n", +- atomic_read(&atm_dev->stats.aal5.tx), +- atomic_read(&atm_dev->stats.aal5.tx_err), +- atomic_read(&atm_dev->stats.aal5.rx), +- atomic_read(&atm_dev->stats.aal5.rx_err), +- atomic_read(&atm_dev->stats.aal5.rx_drop)); ++ atomic_read_unchecked(&atm_dev->stats.aal5.tx), ++ atomic_read_unchecked(&atm_dev->stats.aal5.tx_err), ++ atomic_read_unchecked(&atm_dev->stats.aal5.rx), ++ atomic_read_unchecked(&atm_dev->stats.aal5.rx_err), ++ atomic_read_unchecked(&atm_dev->stats.aal5.rx_drop)); + + if (!left--) { + if (instance->disconnected) +diff --git a/drivers/usb/core/devices.c b/drivers/usb/core/devices.c +index 2a3bbdf..91d72cf 100644 +--- a/drivers/usb/core/devices.c ++++ b/drivers/usb/core/devices.c +@@ -126,7 +126,7 @@ static const char format_endpt[] = + * time it gets called. + */ + static struct device_connect_event { +- atomic_t count; ++ atomic_unchecked_t count; + wait_queue_head_t wait; + } device_event = { + .count = ATOMIC_INIT(1), +@@ -164,7 +164,7 @@ static const struct class_info clas_info[] = { + + void usbfs_conn_disc_event(void) + { +- atomic_add(2, &device_event.count); ++ atomic_add_unchecked(2, &device_event.count); + wake_up(&device_event.wait); + } + +@@ -652,7 +652,7 @@ static unsigned int usb_device_poll(struct file *file, + + poll_wait(file, &device_event.wait, wait); + +- event_count = atomic_read(&device_event.count); ++ event_count = atomic_read_unchecked(&device_event.count); + if (file->f_version != event_count) { + file->f_version = event_count; + return POLLIN | POLLRDNORM; +diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c +index 9ca7716..a2ccc2e 100644 +--- a/drivers/usb/core/devio.c ++++ b/drivers/usb/core/devio.c +@@ -187,7 +187,7 @@ static ssize_t usbdev_read(struct file *file, char __user *buf, size_t nbytes, + struct dev_state *ps = file->private_data; + struct usb_device *dev = ps->dev; + ssize_t ret = 0; +- unsigned len; ++ size_t len; + loff_t pos; + int i; + +@@ -229,22 +229,22 @@ static ssize_t usbdev_read(struct file *file, char __user *buf, size_t nbytes, + for (i = 0; nbytes && i < dev->descriptor.bNumConfigurations; i++) { + struct usb_config_descriptor *config = + (struct usb_config_descriptor *)dev->rawdescriptors[i]; +- unsigned int length = le16_to_cpu(config->wTotalLength); ++ size_t length = le16_to_cpu(config->wTotalLength); + + if (*ppos < pos + length) { + + /* The descriptor may claim to be longer than it + * really is. Here is the actual allocated length. */ +- unsigned alloclen = ++ size_t alloclen = + le16_to_cpu(dev->config[i].desc.wTotalLength); + +- len = length - (*ppos - pos); ++ len = length + pos - *ppos; + if (len > nbytes) + len = nbytes; + + /* Simply don't write (skip over) unallocated parts */ + if (alloclen > (*ppos - pos)) { +- alloclen -= (*ppos - pos); ++ alloclen = alloclen + pos - *ppos; + if (copy_to_user(buf, + dev->rawdescriptors[i] + (*ppos - pos), + min(len, alloclen))) { +diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c +index 2518c32..1c201bb 100644 +--- a/drivers/usb/core/hcd.c ++++ b/drivers/usb/core/hcd.c +@@ -1550,7 +1550,7 @@ int usb_hcd_submit_urb (struct urb *urb, gfp_t mem_flags) + */ + usb_get_urb(urb); + atomic_inc(&urb->use_count); +- atomic_inc(&urb->dev->urbnum); ++ atomic_inc_unchecked(&urb->dev->urbnum); + usbmon_urb_submit(&hcd->self, urb); + + /* NOTE requirements on root-hub callers (usbfs and the hub +@@ -1577,7 +1577,7 @@ int usb_hcd_submit_urb (struct urb *urb, gfp_t mem_flags) + urb->hcpriv = NULL; + INIT_LIST_HEAD(&urb->urb_list); + atomic_dec(&urb->use_count); +- atomic_dec(&urb->dev->urbnum); ++ atomic_dec_unchecked(&urb->dev->urbnum); + if (atomic_read(&urb->reject)) + wake_up(&usb_kill_urb_queue); + usb_put_urb(urb); +diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c +index 6650df7..3a94427 100644 +--- a/drivers/usb/core/hub.c ++++ b/drivers/usb/core/hub.c +@@ -27,6 +27,7 @@ + #include <linux/freezer.h> + #include <linux/random.h> + #include <linux/pm_qos.h> ++#include <linux/grsecurity.h> + + #include <asm/uaccess.h> + #include <asm/byteorder.h> +@@ -4549,6 +4550,10 @@ static void hub_port_connect_change(struct usb_hub *hub, int port1, + goto done; + return; + } ++ ++ if (gr_handle_new_usb()) ++ goto done; ++ + if (hub_is_superspeed(hub->hdev)) + unit_load = 150; + else +diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c +index f829a1a..e6c334a 100644 +--- a/drivers/usb/core/message.c ++++ b/drivers/usb/core/message.c +@@ -128,7 +128,7 @@ static int usb_internal_control_msg(struct usb_device *usb_dev, + * Return: If successful, the number of bytes transferred. Otherwise, a negative + * error number. + */ +-int usb_control_msg(struct usb_device *dev, unsigned int pipe, __u8 request, ++int __intentional_overflow(-1) usb_control_msg(struct usb_device *dev, unsigned int pipe, __u8 request, + __u8 requesttype, __u16 value, __u16 index, void *data, + __u16 size, int timeout) + { +@@ -180,7 +180,7 @@ EXPORT_SYMBOL_GPL(usb_control_msg); + * If successful, 0. Otherwise a negative error number. The number of actual + * bytes transferred will be stored in the @actual_length paramater. + */ +-int usb_interrupt_msg(struct usb_device *usb_dev, unsigned int pipe, ++int __intentional_overflow(-1) usb_interrupt_msg(struct usb_device *usb_dev, unsigned int pipe, + void *data, int len, int *actual_length, int timeout) + { + return usb_bulk_msg(usb_dev, pipe, data, len, actual_length, timeout); +@@ -220,7 +220,7 @@ EXPORT_SYMBOL_GPL(usb_interrupt_msg); + * bytes transferred will be stored in the @actual_length parameter. + * + */ +-int usb_bulk_msg(struct usb_device *usb_dev, unsigned int pipe, ++int __intentional_overflow(-1) usb_bulk_msg(struct usb_device *usb_dev, unsigned int pipe, + void *data, int len, int *actual_length, int timeout) + { + struct urb *urb; +diff --git a/drivers/usb/core/sysfs.c b/drivers/usb/core/sysfs.c +index 1236c60..d47a51c 100644 +--- a/drivers/usb/core/sysfs.c ++++ b/drivers/usb/core/sysfs.c +@@ -244,7 +244,7 @@ static ssize_t urbnum_show(struct device *dev, struct device_attribute *attr, + struct usb_device *udev; + + udev = to_usb_device(dev); +- return sprintf(buf, "%d\n", atomic_read(&udev->urbnum)); ++ return sprintf(buf, "%d\n", atomic_read_unchecked(&udev->urbnum)); + } + static DEVICE_ATTR_RO(urbnum); + +diff --git a/drivers/usb/core/usb.c b/drivers/usb/core/usb.c +index 4d11449..f4ccabf 100644 +--- a/drivers/usb/core/usb.c ++++ b/drivers/usb/core/usb.c +@@ -433,7 +433,7 @@ struct usb_device *usb_alloc_dev(struct usb_device *parent, + set_dev_node(&dev->dev, dev_to_node(bus->controller)); + dev->state = USB_STATE_ATTACHED; + dev->lpm_disable_count = 1; +- atomic_set(&dev->urbnum, 0); ++ atomic_set_unchecked(&dev->urbnum, 0); + + INIT_LIST_HEAD(&dev->ep0.urb_list); + dev->ep0.desc.bLength = USB_DT_ENDPOINT_SIZE; +diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c +index 09e9619..d266724 100644 +--- a/drivers/usb/dwc3/gadget.c ++++ b/drivers/usb/dwc3/gadget.c +@@ -532,8 +532,6 @@ static int __dwc3_gadget_ep_enable(struct dwc3_ep *dep, + if (!usb_endpoint_xfer_isoc(desc)) + return 0; + +- memset(&trb_link, 0, sizeof(trb_link)); +- + /* Link TRB for ISOC. The HWO bit is never reset */ + trb_st_hw = &dep->trb_pool[0]; + +diff --git a/drivers/usb/early/ehci-dbgp.c b/drivers/usb/early/ehci-dbgp.c +index 8cfc319..4868255 100644 +--- a/drivers/usb/early/ehci-dbgp.c ++++ b/drivers/usb/early/ehci-dbgp.c +@@ -98,7 +98,8 @@ static inline u32 dbgp_len_update(u32 x, u32 len) + + #ifdef CONFIG_KGDB + static struct kgdb_io kgdbdbgp_io_ops; +-#define dbgp_kgdb_mode (dbg_io_ops == &kgdbdbgp_io_ops) ++static struct kgdb_io kgdbdbgp_io_ops_console; ++#define dbgp_kgdb_mode (dbg_io_ops == &kgdbdbgp_io_ops || dbg_io_ops == &kgdbdbgp_io_ops_console) + #else + #define dbgp_kgdb_mode (0) + #endif +@@ -1043,6 +1044,13 @@ static struct kgdb_io kgdbdbgp_io_ops = { + .write_char = kgdbdbgp_write_char, + }; + ++static struct kgdb_io kgdbdbgp_io_ops_console = { ++ .name = "kgdbdbgp", ++ .read_char = kgdbdbgp_read_char, ++ .write_char = kgdbdbgp_write_char, ++ .is_console = 1 ++}; ++ + static int kgdbdbgp_wait_time; + + static int __init kgdbdbgp_parse_config(char *str) +@@ -1058,8 +1066,10 @@ static int __init kgdbdbgp_parse_config(char *str) + ptr++; + kgdbdbgp_wait_time = simple_strtoul(ptr, &ptr, 10); + } +- kgdb_register_io_module(&kgdbdbgp_io_ops); +- kgdbdbgp_io_ops.is_console = early_dbgp_console.index != -1; ++ if (early_dbgp_console.index != -1) ++ kgdb_register_io_module(&kgdbdbgp_io_ops_console); ++ else ++ kgdb_register_io_module(&kgdbdbgp_io_ops); + + return 0; + } +diff --git a/drivers/usb/gadget/f_uac1.c b/drivers/usb/gadget/f_uac1.c +index 2b4c82d..06a8ee6 100644 +--- a/drivers/usb/gadget/f_uac1.c ++++ b/drivers/usb/gadget/f_uac1.c +@@ -13,6 +13,7 @@ + #include <linux/kernel.h> + #include <linux/device.h> + #include <linux/atomic.h> ++#include <linux/module.h> + + #include "u_uac1.h" + +diff --git a/drivers/usb/gadget/u_serial.c b/drivers/usb/gadget/u_serial.c +index ad0aca8..8ff84865 100644 +--- a/drivers/usb/gadget/u_serial.c ++++ b/drivers/usb/gadget/u_serial.c +@@ -733,9 +733,9 @@ static int gs_open(struct tty_struct *tty, struct file *file) + spin_lock_irq(&port->port_lock); + + /* already open? Great. */ +- if (port->port.count) { ++ if (atomic_read(&port->port.count)) { + status = 0; +- port->port.count++; ++ atomic_inc(&port->port.count); + + /* currently opening/closing? wait ... */ + } else if (port->openclose) { +@@ -794,7 +794,7 @@ static int gs_open(struct tty_struct *tty, struct file *file) + tty->driver_data = port; + port->port.tty = tty; + +- port->port.count = 1; ++ atomic_set(&port->port.count, 1); + port->openclose = false; + + /* if connected, start the I/O stream */ +@@ -836,11 +836,11 @@ static void gs_close(struct tty_struct *tty, struct file *file) + + spin_lock_irq(&port->port_lock); + +- if (port->port.count != 1) { +- if (port->port.count == 0) ++ if (atomic_read(&port->port.count) != 1) { ++ if (atomic_read(&port->port.count) == 0) + WARN_ON(1); + else +- --port->port.count; ++ atomic_dec(&port->port.count); + goto exit; + } + +@@ -850,7 +850,7 @@ static void gs_close(struct tty_struct *tty, struct file *file) + * and sleep if necessary + */ + port->openclose = true; +- port->port.count = 0; ++ atomic_set(&port->port.count, 0); + + gser = port->port_usb; + if (gser && gser->disconnect) +@@ -1066,7 +1066,7 @@ static int gs_closed(struct gs_port *port) + int cond; + + spin_lock_irq(&port->port_lock); +- cond = (port->port.count == 0) && !port->openclose; ++ cond = (atomic_read(&port->port.count) == 0) && !port->openclose; + spin_unlock_irq(&port->port_lock); + return cond; + } +@@ -1209,7 +1209,7 @@ int gserial_connect(struct gserial *gser, u8 port_num) + /* if it's already open, start I/O ... and notify the serial + * protocol about open/close status (connect/disconnect). + */ +- if (port->port.count) { ++ if (atomic_read(&port->port.count)) { + pr_debug("gserial_connect: start ttyGS%d\n", port->port_num); + gs_start_io(port); + if (gser->connect) +@@ -1256,7 +1256,7 @@ void gserial_disconnect(struct gserial *gser) + + port->port_usb = NULL; + gser->ioport = NULL; +- if (port->port.count > 0 || port->openclose) { ++ if (atomic_read(&port->port.count) > 0 || port->openclose) { + wake_up_interruptible(&port->drain_wait); + if (port->port.tty) + tty_hangup(port->port.tty); +@@ -1272,7 +1272,7 @@ void gserial_disconnect(struct gserial *gser) + + /* finally, free any unused/unusable I/O buffers */ + spin_lock_irqsave(&port->port_lock, flags); +- if (port->port.count == 0 && !port->openclose) ++ if (atomic_read(&port->port.count) == 0 && !port->openclose) + gs_buf_free(&port->port_write_buf); + gs_free_requests(gser->out, &port->read_pool, NULL); + gs_free_requests(gser->out, &port->read_queue, NULL); +diff --git a/drivers/usb/gadget/u_uac1.c b/drivers/usb/gadget/u_uac1.c +index 7a55fea..cc0ed4f 100644 +--- a/drivers/usb/gadget/u_uac1.c ++++ b/drivers/usb/gadget/u_uac1.c +@@ -16,6 +16,7 @@ + #include <linux/ctype.h> + #include <linux/random.h> + #include <linux/syscalls.h> ++#include <linux/module.h> + + #include "u_uac1.h" + +diff --git a/drivers/usb/host/ehci-hub.c b/drivers/usb/host/ehci-hub.c +index 7d6f64c..37a1efc 100644 +--- a/drivers/usb/host/ehci-hub.c ++++ b/drivers/usb/host/ehci-hub.c +@@ -780,7 +780,7 @@ static struct urb *request_single_step_set_feature_urb( + urb->transfer_flags = URB_DIR_IN; + usb_get_urb(urb); + atomic_inc(&urb->use_count); +- atomic_inc(&urb->dev->urbnum); ++ atomic_inc_unchecked(&urb->dev->urbnum); + urb->setup_dma = dma_map_single( + hcd->self.controller, + urb->setup_packet, +@@ -847,7 +847,7 @@ static int ehset_single_step_set_feature(struct usb_hcd *hcd, int port) + urb->status = -EINPROGRESS; + usb_get_urb(urb); + atomic_inc(&urb->use_count); +- atomic_inc(&urb->dev->urbnum); ++ atomic_inc_unchecked(&urb->dev->urbnum); + retval = submit_single_step_set_feature(hcd, urb, 0); + if (!retval && !wait_for_completion_timeout(&done, + msecs_to_jiffies(2000))) { +diff --git a/drivers/usb/host/hwa-hc.c b/drivers/usb/host/hwa-hc.c +index e076699..6b3b875 100644 +--- a/drivers/usb/host/hwa-hc.c ++++ b/drivers/usb/host/hwa-hc.c +@@ -301,7 +301,10 @@ static int __hwahc_op_bwa_set(struct wusbhc *wusbhc, s8 stream_index, + struct hwahc *hwahc = container_of(wusbhc, struct hwahc, wusbhc); + struct wahc *wa = &hwahc->wa; + struct device *dev = &wa->usb_iface->dev; +- u8 mas_le[UWB_NUM_MAS/8]; ++ u8 *mas_le = kmalloc(UWB_NUM_MAS/8, GFP_KERNEL); ++ ++ if (mas_le == NULL) ++ return -ENOMEM; + + /* Set the stream index */ + result = usb_control_msg(wa->usb_dev, usb_sndctrlpipe(wa->usb_dev, 0), +@@ -320,10 +323,12 @@ static int __hwahc_op_bwa_set(struct wusbhc *wusbhc, s8 stream_index, + WUSB_REQ_SET_WUSB_MAS, + USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE, + 0, wa->usb_iface->cur_altsetting->desc.bInterfaceNumber, +- mas_le, 32, USB_CTRL_SET_TIMEOUT); ++ mas_le, UWB_NUM_MAS/8, USB_CTRL_SET_TIMEOUT); + if (result < 0) + dev_err(dev, "Cannot set WUSB MAS allocation: %d\n", result); + out: ++ kfree(mas_le); ++ + return result; + } + +diff --git a/drivers/usb/misc/appledisplay.c b/drivers/usb/misc/appledisplay.c +index ba6a5d6..f88f7f3 100644 +--- a/drivers/usb/misc/appledisplay.c ++++ b/drivers/usb/misc/appledisplay.c +@@ -83,7 +83,7 @@ struct appledisplay { + spinlock_t lock; + }; + +-static atomic_t count_displays = ATOMIC_INIT(0); ++static atomic_unchecked_t count_displays = ATOMIC_INIT(0); + static struct workqueue_struct *wq; + + static void appledisplay_complete(struct urb *urb) +@@ -281,7 +281,7 @@ static int appledisplay_probe(struct usb_interface *iface, + + /* Register backlight device */ + snprintf(bl_name, sizeof(bl_name), "appledisplay%d", +- atomic_inc_return(&count_displays) - 1); ++ atomic_inc_return_unchecked(&count_displays) - 1); + memset(&props, 0, sizeof(struct backlight_properties)); + props.type = BACKLIGHT_RAW; + props.max_brightness = 0xff; +diff --git a/drivers/usb/serial/console.c b/drivers/usb/serial/console.c +index 8d7fc48..01c4986 100644 +--- a/drivers/usb/serial/console.c ++++ b/drivers/usb/serial/console.c +@@ -123,7 +123,7 @@ static int usb_console_setup(struct console *co, char *options) + + info->port = port; + +- ++port->port.count; ++ atomic_inc(&port->port.count); + if (!test_bit(ASYNCB_INITIALIZED, &port->port.flags)) { + if (serial->type->set_termios) { + /* +@@ -167,7 +167,7 @@ static int usb_console_setup(struct console *co, char *options) + } + /* Now that any required fake tty operations are completed restore + * the tty port count */ +- --port->port.count; ++ atomic_dec(&port->port.count); + /* The console is special in terms of closing the device so + * indicate this port is now acting as a system console. */ + port->port.console = 1; +@@ -180,7 +180,7 @@ static int usb_console_setup(struct console *co, char *options) + free_tty: + kfree(tty); + reset_open_count: +- port->port.count = 0; ++ atomic_set(&port->port.count, 0); + usb_autopm_put_interface(serial->interface); + error_get_interface: + usb_serial_put(serial); +@@ -191,7 +191,7 @@ static int usb_console_setup(struct console *co, char *options) + static void usb_console_write(struct console *co, + const char *buf, unsigned count) + { +- static struct usbcons_info *info = &usbcons_info; ++ struct usbcons_info *info = &usbcons_info; + struct usb_serial_port *port = info->port; + struct usb_serial *serial; + int retval = -ENODEV; +diff --git a/drivers/usb/storage/usb.h b/drivers/usb/storage/usb.h +index 75f70f0..d467e1a 100644 +--- a/drivers/usb/storage/usb.h ++++ b/drivers/usb/storage/usb.h +@@ -63,7 +63,7 @@ struct us_unusual_dev { + __u8 useProtocol; + __u8 useTransport; + int (*initFunction)(struct us_data *); +-}; ++} __do_const; + + + /* Dynamic bitflag definitions (us->dflags): used in set_bit() etc. */ +diff --git a/drivers/usb/wusbcore/wa-hc.h b/drivers/usb/wusbcore/wa-hc.h +index a2ef84b..aa7c2b8 100644 +--- a/drivers/usb/wusbcore/wa-hc.h ++++ b/drivers/usb/wusbcore/wa-hc.h +@@ -225,7 +225,7 @@ struct wahc { + spinlock_t xfer_list_lock; + struct work_struct xfer_enqueue_work; + struct work_struct xfer_error_work; +- atomic_t xfer_id_count; ++ atomic_unchecked_t xfer_id_count; + + kernel_ulong_t quirks; + }; +@@ -287,7 +287,7 @@ static inline void wa_init(struct wahc *wa) + INIT_WORK(&wa->xfer_enqueue_work, wa_urb_enqueue_run); + INIT_WORK(&wa->xfer_error_work, wa_process_errored_transfers_run); + wa->dto_in_use = 0; +- atomic_set(&wa->xfer_id_count, 1); ++ atomic_set_unchecked(&wa->xfer_id_count, 1); + } + + /** +diff --git a/drivers/usb/wusbcore/wa-xfer.c b/drivers/usb/wusbcore/wa-xfer.c +index 3cd96e9..bd7c58d 100644 +--- a/drivers/usb/wusbcore/wa-xfer.c ++++ b/drivers/usb/wusbcore/wa-xfer.c +@@ -312,7 +312,7 @@ static void wa_xfer_completion(struct wa_xfer *xfer) + */ + static void wa_xfer_id_init(struct wa_xfer *xfer) + { +- xfer->id = atomic_add_return(1, &xfer->wa->xfer_id_count); ++ xfer->id = atomic_add_return_unchecked(1, &xfer->wa->xfer_id_count); + } + + /* Return the xfer's ID. */ +diff --git a/drivers/vfio/vfio.c b/drivers/vfio/vfio.c +index 21271d8..45b55a0 100644 +--- a/drivers/vfio/vfio.c ++++ b/drivers/vfio/vfio.c +@@ -487,7 +487,7 @@ static int vfio_group_nb_add_dev(struct vfio_group *group, struct device *dev) + return 0; + + /* TODO Prevent device auto probing */ +- WARN("Device %s added to live group %d!\n", dev_name(dev), ++ WARN(1, "Device %s added to live group %d!\n", dev_name(dev), + iommu_group_id(group->iommu_group)); + + return 0; +diff --git a/drivers/vhost/vringh.c b/drivers/vhost/vringh.c +index 5174eba..451e6bc 100644 +--- a/drivers/vhost/vringh.c ++++ b/drivers/vhost/vringh.c +@@ -530,17 +530,17 @@ static inline void __vringh_notify_disable(struct vringh *vrh, + /* Userspace access helpers: in this case, addresses are really userspace. */ + static inline int getu16_user(u16 *val, const u16 *p) + { +- return get_user(*val, (__force u16 __user *)p); ++ return get_user(*val, (u16 __force_user *)p); + } + + static inline int putu16_user(u16 *p, u16 val) + { +- return put_user(val, (__force u16 __user *)p); ++ return put_user(val, (u16 __force_user *)p); + } + + static inline int copydesc_user(void *dst, const void *src, size_t len) + { +- return copy_from_user(dst, (__force void __user *)src, len) ? ++ return copy_from_user(dst, (void __force_user *)src, len) ? + -EFAULT : 0; + } + +@@ -548,19 +548,19 @@ static inline int putused_user(struct vring_used_elem *dst, + const struct vring_used_elem *src, + unsigned int num) + { +- return copy_to_user((__force void __user *)dst, src, ++ return copy_to_user((void __force_user *)dst, src, + sizeof(*dst) * num) ? -EFAULT : 0; + } + + static inline int xfer_from_user(void *src, void *dst, size_t len) + { +- return copy_from_user(dst, (__force void __user *)src, len) ? ++ return copy_from_user(dst, (void __force_user *)src, len) ? + -EFAULT : 0; + } + + static inline int xfer_to_user(void *dst, void *src, size_t len) + { +- return copy_to_user((__force void __user *)dst, src, len) ? ++ return copy_to_user((void __force_user *)dst, src, len) ? + -EFAULT : 0; + } + +@@ -596,9 +596,9 @@ int vringh_init_user(struct vringh *vrh, u32 features, + vrh->last_used_idx = 0; + vrh->vring.num = num; + /* vring expects kernel addresses, but only used via accessors. */ +- vrh->vring.desc = (__force struct vring_desc *)desc; +- vrh->vring.avail = (__force struct vring_avail *)avail; +- vrh->vring.used = (__force struct vring_used *)used; ++ vrh->vring.desc = (__force_kernel struct vring_desc *)desc; ++ vrh->vring.avail = (__force_kernel struct vring_avail *)avail; ++ vrh->vring.used = (__force_kernel struct vring_used *)used; + return 0; + } + EXPORT_SYMBOL(vringh_init_user); +@@ -800,7 +800,7 @@ static inline int getu16_kern(u16 *val, const u16 *p) + + static inline int putu16_kern(u16 *p, u16 val) + { +- ACCESS_ONCE(*p) = val; ++ ACCESS_ONCE_RW(*p) = val; + return 0; + } + +diff --git a/drivers/video/arcfb.c b/drivers/video/arcfb.c +index 1b0b233..6f34c2c 100644 +--- a/drivers/video/arcfb.c ++++ b/drivers/video/arcfb.c +@@ -458,7 +458,7 @@ static ssize_t arcfb_write(struct fb_info *info, const char __user *buf, + return -ENOSPC; + + err = 0; +- if ((count + p) > fbmemlength) { ++ if (count > (fbmemlength - p)) { + count = fbmemlength - p; + err = -ENOSPC; + } +diff --git a/drivers/video/aty/aty128fb.c b/drivers/video/aty/aty128fb.c +index 52108be..c7c110d 100644 +--- a/drivers/video/aty/aty128fb.c ++++ b/drivers/video/aty/aty128fb.c +@@ -149,7 +149,7 @@ enum { + }; + + /* Must match above enum */ +-static char * const r128_family[] = { ++static const char * const r128_family[] = { + "AGP", + "PCI", + "PRO AGP", +diff --git a/drivers/video/aty/atyfb_base.c b/drivers/video/aty/atyfb_base.c +index 28fafbf..ae91651 100644 +--- a/drivers/video/aty/atyfb_base.c ++++ b/drivers/video/aty/atyfb_base.c +@@ -1326,10 +1326,14 @@ static int atyfb_set_par(struct fb_info *info) + par->accel_flags = var->accel_flags; /* hack */ + + if (var->accel_flags) { +- info->fbops->fb_sync = atyfb_sync; ++ pax_open_kernel(); ++ *(void **)&info->fbops->fb_sync = atyfb_sync; ++ pax_close_kernel(); + info->flags &= ~FBINFO_HWACCEL_DISABLED; + } else { +- info->fbops->fb_sync = NULL; ++ pax_open_kernel(); ++ *(void **)&info->fbops->fb_sync = NULL; ++ pax_close_kernel(); + info->flags |= FBINFO_HWACCEL_DISABLED; + } + +diff --git a/drivers/video/aty/mach64_cursor.c b/drivers/video/aty/mach64_cursor.c +index 0fe02e2..ab01b26 100644 +--- a/drivers/video/aty/mach64_cursor.c ++++ b/drivers/video/aty/mach64_cursor.c +@@ -8,6 +8,7 @@ + #include "../fb_draw.h" + + #include <asm/io.h> ++#include <asm/pgtable.h> + + #ifdef __sparc__ + #include <asm/fbio.h> +@@ -218,7 +219,9 @@ int aty_init_cursor(struct fb_info *info) + info->sprite.buf_align = 16; /* and 64 lines tall. */ + info->sprite.flags = FB_PIXMAP_IO; + +- info->fbops->fb_cursor = atyfb_cursor; ++ pax_open_kernel(); ++ *(void **)&info->fbops->fb_cursor = atyfb_cursor; ++ pax_close_kernel(); + + return 0; + } +diff --git a/drivers/video/backlight/kb3886_bl.c b/drivers/video/backlight/kb3886_bl.c +index 84a110a..96312c3 100644 +--- a/drivers/video/backlight/kb3886_bl.c ++++ b/drivers/video/backlight/kb3886_bl.c +@@ -78,7 +78,7 @@ static struct kb3886bl_machinfo *bl_machinfo; + static unsigned long kb3886bl_flags; + #define KB3886BL_SUSPENDED 0x01 + +-static struct dmi_system_id kb3886bl_device_table[] __initdata = { ++static const struct dmi_system_id kb3886bl_device_table[] __initconst = { + { + .ident = "Sahara Touch-iT", + .matches = { +diff --git a/drivers/video/fb_defio.c b/drivers/video/fb_defio.c +index 900aa4e..6d49418 100644 +--- a/drivers/video/fb_defio.c ++++ b/drivers/video/fb_defio.c +@@ -206,7 +206,9 @@ void fb_deferred_io_init(struct fb_info *info) + + BUG_ON(!fbdefio); + mutex_init(&fbdefio->lock); +- info->fbops->fb_mmap = fb_deferred_io_mmap; ++ pax_open_kernel(); ++ *(void **)&info->fbops->fb_mmap = fb_deferred_io_mmap; ++ pax_close_kernel(); + INIT_DELAYED_WORK(&info->deferred_work, fb_deferred_io_work); + INIT_LIST_HEAD(&fbdefio->pagelist); + if (fbdefio->delay == 0) /* set a default of 1 s */ +@@ -237,7 +239,7 @@ void fb_deferred_io_cleanup(struct fb_info *info) + page->mapping = NULL; + } + +- info->fbops->fb_mmap = NULL; ++ *(void **)&info->fbops->fb_mmap = NULL; + mutex_destroy(&fbdefio->lock); + } + EXPORT_SYMBOL_GPL(fb_deferred_io_cleanup); +diff --git a/drivers/video/fbmem.c b/drivers/video/fbmem.c +index 7309ac7..be3c49c 100644 +--- a/drivers/video/fbmem.c ++++ b/drivers/video/fbmem.c +@@ -433,7 +433,7 @@ static void fb_do_show_logo(struct fb_info *info, struct fb_image *image, + image->dx += image->width + 8; + } + } else if (rotate == FB_ROTATE_UD) { +- for (x = 0; x < num && image->dx >= 0; x++) { ++ for (x = 0; x < num && (__s32)image->dx >= 0; x++) { + info->fbops->fb_imageblit(info, image); + image->dx -= image->width + 8; + } +@@ -445,7 +445,7 @@ static void fb_do_show_logo(struct fb_info *info, struct fb_image *image, + image->dy += image->height + 8; + } + } else if (rotate == FB_ROTATE_CCW) { +- for (x = 0; x < num && image->dy >= 0; x++) { ++ for (x = 0; x < num && (__s32)image->dy >= 0; x++) { + info->fbops->fb_imageblit(info, image); + image->dy -= image->height + 8; + } +@@ -1179,7 +1179,7 @@ static long do_fb_ioctl(struct fb_info *info, unsigned int cmd, + return -EFAULT; + if (con2fb.console < 1 || con2fb.console > MAX_NR_CONSOLES) + return -EINVAL; +- if (con2fb.framebuffer < 0 || con2fb.framebuffer >= FB_MAX) ++ if (con2fb.framebuffer >= FB_MAX) + return -EINVAL; + if (!registered_fb[con2fb.framebuffer]) + request_module("fb%d", con2fb.framebuffer); +@@ -1300,7 +1300,7 @@ static int do_fscreeninfo_to_user(struct fb_fix_screeninfo *fix, + __u32 data; + int err; + +- err = copy_to_user(&fix32->id, &fix->id, sizeof(fix32->id)); ++ err = copy_to_user(fix32->id, &fix->id, sizeof(fix32->id)); + + data = (__u32) (unsigned long) fix->smem_start; + err |= put_user(data, &fix32->smem_start); +diff --git a/drivers/video/hyperv_fb.c b/drivers/video/hyperv_fb.c +index 130708f..cdac1a9 100644 +--- a/drivers/video/hyperv_fb.c ++++ b/drivers/video/hyperv_fb.c +@@ -233,7 +233,7 @@ static uint screen_fb_size; + static inline int synthvid_send(struct hv_device *hdev, + struct synthvid_msg *msg) + { +- static atomic64_t request_id = ATOMIC64_INIT(0); ++ static atomic64_unchecked_t request_id = ATOMIC64_INIT(0); + int ret; + + msg->pipe_hdr.type = PIPE_MSG_DATA; +@@ -241,7 +241,7 @@ static inline int synthvid_send(struct hv_device *hdev, + + ret = vmbus_sendpacket(hdev->channel, msg, + msg->vid_hdr.size + sizeof(struct pipe_msg_hdr), +- atomic64_inc_return(&request_id), ++ atomic64_inc_return_unchecked(&request_id), + VM_PKT_DATA_INBAND, 0); + + if (ret) +diff --git a/drivers/video/i810/i810_accel.c b/drivers/video/i810/i810_accel.c +index 7672d2e..b56437f 100644 +--- a/drivers/video/i810/i810_accel.c ++++ b/drivers/video/i810/i810_accel.c +@@ -73,6 +73,7 @@ static inline int wait_for_space(struct fb_info *info, u32 space) + } + } + printk("ringbuffer lockup!!!\n"); ++ printk("head:%u tail:%u iring.size:%u space:%u\n", head, tail, par->iring.size, space); + i810_report_error(mmio); + par->dev_flags |= LOCKUP; + info->pixmap.scan_align = 1; +diff --git a/drivers/video/logo/logo_linux_clut224.ppm b/drivers/video/logo/logo_linux_clut224.ppm +index 3c14e43..2630570 100644 +--- a/drivers/video/logo/logo_linux_clut224.ppm ++++ b/drivers/video/logo/logo_linux_clut224.ppm +@@ -2,1603 +2,1123 @@ P3 + # Standard 224-color Linux logo + 80 80 + 255 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 6 6 6 6 6 6 10 10 10 10 10 10 +- 10 10 10 6 6 6 6 6 6 6 6 6 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 6 6 6 10 10 10 14 14 14 +- 22 22 22 26 26 26 30 30 30 34 34 34 +- 30 30 30 30 30 30 26 26 26 18 18 18 +- 14 14 14 10 10 10 6 6 6 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 1 0 0 1 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 6 6 6 14 14 14 26 26 26 42 42 42 +- 54 54 54 66 66 66 78 78 78 78 78 78 +- 78 78 78 74 74 74 66 66 66 54 54 54 +- 42 42 42 26 26 26 18 18 18 10 10 10 +- 6 6 6 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 1 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 10 10 10 +- 22 22 22 42 42 42 66 66 66 86 86 86 +- 66 66 66 38 38 38 38 38 38 22 22 22 +- 26 26 26 34 34 34 54 54 54 66 66 66 +- 86 86 86 70 70 70 46 46 46 26 26 26 +- 14 14 14 6 6 6 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 1 0 0 1 0 0 1 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 10 10 10 26 26 26 +- 50 50 50 82 82 82 58 58 58 6 6 6 +- 2 2 6 2 2 6 2 2 6 2 2 6 +- 2 2 6 2 2 6 2 2 6 2 2 6 +- 6 6 6 54 54 54 86 86 86 66 66 66 +- 38 38 38 18 18 18 6 6 6 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 6 6 6 22 22 22 50 50 50 +- 78 78 78 34 34 34 2 2 6 2 2 6 +- 2 2 6 2 2 6 2 2 6 2 2 6 +- 2 2 6 2 2 6 2 2 6 2 2 6 +- 2 2 6 2 2 6 6 6 6 70 70 70 +- 78 78 78 46 46 46 22 22 22 6 6 6 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 1 0 0 1 0 0 1 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 6 6 6 18 18 18 42 42 42 82 82 82 +- 26 26 26 2 2 6 2 2 6 2 2 6 +- 2 2 6 2 2 6 2 2 6 2 2 6 +- 2 2 6 2 2 6 2 2 6 14 14 14 +- 46 46 46 34 34 34 6 6 6 2 2 6 +- 42 42 42 78 78 78 42 42 42 18 18 18 +- 6 6 6 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 1 0 0 0 0 0 1 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 10 10 10 30 30 30 66 66 66 58 58 58 +- 2 2 6 2 2 6 2 2 6 2 2 6 +- 2 2 6 2 2 6 2 2 6 2 2 6 +- 2 2 6 2 2 6 2 2 6 26 26 26 +- 86 86 86 101 101 101 46 46 46 10 10 10 +- 2 2 6 58 58 58 70 70 70 34 34 34 +- 10 10 10 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 1 0 0 1 0 0 1 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 14 14 14 42 42 42 86 86 86 10 10 10 +- 2 2 6 2 2 6 2 2 6 2 2 6 +- 2 2 6 2 2 6 2 2 6 2 2 6 +- 2 2 6 2 2 6 2 2 6 30 30 30 +- 94 94 94 94 94 94 58 58 58 26 26 26 +- 2 2 6 6 6 6 78 78 78 54 54 54 +- 22 22 22 6 6 6 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 6 6 6 +- 22 22 22 62 62 62 62 62 62 2 2 6 +- 2 2 6 2 2 6 2 2 6 2 2 6 +- 2 2 6 2 2 6 2 2 6 2 2 6 +- 2 2 6 2 2 6 2 2 6 26 26 26 +- 54 54 54 38 38 38 18 18 18 10 10 10 +- 2 2 6 2 2 6 34 34 34 82 82 82 +- 38 38 38 14 14 14 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 1 0 0 1 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 6 6 6 +- 30 30 30 78 78 78 30 30 30 2 2 6 +- 2 2 6 2 2 6 2 2 6 2 2 6 +- 2 2 6 2 2 6 2 2 6 2 2 6 +- 2 2 6 2 2 6 2 2 6 10 10 10 +- 10 10 10 2 2 6 2 2 6 2 2 6 +- 2 2 6 2 2 6 2 2 6 78 78 78 +- 50 50 50 18 18 18 6 6 6 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 1 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 10 10 10 +- 38 38 38 86 86 86 14 14 14 2 2 6 +- 2 2 6 2 2 6 2 2 6 2 2 6 +- 2 2 6 2 2 6 2 2 6 2 2 6 +- 2 2 6 2 2 6 2 2 6 2 2 6 +- 2 2 6 2 2 6 2 2 6 2 2 6 +- 2 2 6 2 2 6 2 2 6 54 54 54 +- 66 66 66 26 26 26 6 6 6 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 1 0 0 1 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 14 14 14 +- 42 42 42 82 82 82 2 2 6 2 2 6 +- 2 2 6 6 6 6 10 10 10 2 2 6 +- 2 2 6 2 2 6 2 2 6 2 2 6 +- 2 2 6 2 2 6 2 2 6 6 6 6 +- 14 14 14 10 10 10 2 2 6 2 2 6 +- 2 2 6 2 2 6 2 2 6 18 18 18 +- 82 82 82 34 34 34 10 10 10 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 1 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 14 14 14 +- 46 46 46 86 86 86 2 2 6 2 2 6 +- 6 6 6 6 6 6 22 22 22 34 34 34 +- 6 6 6 2 2 6 2 2 6 2 2 6 +- 2 2 6 2 2 6 18 18 18 34 34 34 +- 10 10 10 50 50 50 22 22 22 2 2 6 +- 2 2 6 2 2 6 2 2 6 10 10 10 +- 86 86 86 42 42 42 14 14 14 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 1 0 0 1 0 0 1 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 14 14 14 +- 46 46 46 86 86 86 2 2 6 2 2 6 +- 38 38 38 116 116 116 94 94 94 22 22 22 +- 22 22 22 2 2 6 2 2 6 2 2 6 +- 14 14 14 86 86 86 138 138 138 162 162 162 +-154 154 154 38 38 38 26 26 26 6 6 6 +- 2 2 6 2 2 6 2 2 6 2 2 6 +- 86 86 86 46 46 46 14 14 14 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 14 14 14 +- 46 46 46 86 86 86 2 2 6 14 14 14 +-134 134 134 198 198 198 195 195 195 116 116 116 +- 10 10 10 2 2 6 2 2 6 6 6 6 +-101 98 89 187 187 187 210 210 210 218 218 218 +-214 214 214 134 134 134 14 14 14 6 6 6 +- 2 2 6 2 2 6 2 2 6 2 2 6 +- 86 86 86 50 50 50 18 18 18 6 6 6 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 1 0 0 0 +- 0 0 1 0 0 1 0 0 1 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 14 14 14 +- 46 46 46 86 86 86 2 2 6 54 54 54 +-218 218 218 195 195 195 226 226 226 246 246 246 +- 58 58 58 2 2 6 2 2 6 30 30 30 +-210 210 210 253 253 253 174 174 174 123 123 123 +-221 221 221 234 234 234 74 74 74 2 2 6 +- 2 2 6 2 2 6 2 2 6 2 2 6 +- 70 70 70 58 58 58 22 22 22 6 6 6 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 14 14 14 +- 46 46 46 82 82 82 2 2 6 106 106 106 +-170 170 170 26 26 26 86 86 86 226 226 226 +-123 123 123 10 10 10 14 14 14 46 46 46 +-231 231 231 190 190 190 6 6 6 70 70 70 +- 90 90 90 238 238 238 158 158 158 2 2 6 +- 2 2 6 2 2 6 2 2 6 2 2 6 +- 70 70 70 58 58 58 22 22 22 6 6 6 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 1 0 0 0 +- 0 0 1 0 0 1 0 0 1 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 14 14 14 +- 42 42 42 86 86 86 6 6 6 116 116 116 +-106 106 106 6 6 6 70 70 70 149 149 149 +-128 128 128 18 18 18 38 38 38 54 54 54 +-221 221 221 106 106 106 2 2 6 14 14 14 +- 46 46 46 190 190 190 198 198 198 2 2 6 +- 2 2 6 2 2 6 2 2 6 2 2 6 +- 74 74 74 62 62 62 22 22 22 6 6 6 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 1 0 0 0 +- 0 0 1 0 0 0 0 0 1 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 14 14 14 +- 42 42 42 94 94 94 14 14 14 101 101 101 +-128 128 128 2 2 6 18 18 18 116 116 116 +-118 98 46 121 92 8 121 92 8 98 78 10 +-162 162 162 106 106 106 2 2 6 2 2 6 +- 2 2 6 195 195 195 195 195 195 6 6 6 +- 2 2 6 2 2 6 2 2 6 2 2 6 +- 74 74 74 62 62 62 22 22 22 6 6 6 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 1 0 0 1 +- 0 0 1 0 0 0 0 0 1 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 10 10 10 +- 38 38 38 90 90 90 14 14 14 58 58 58 +-210 210 210 26 26 26 54 38 6 154 114 10 +-226 170 11 236 186 11 225 175 15 184 144 12 +-215 174 15 175 146 61 37 26 9 2 2 6 +- 70 70 70 246 246 246 138 138 138 2 2 6 +- 2 2 6 2 2 6 2 2 6 2 2 6 +- 70 70 70 66 66 66 26 26 26 6 6 6 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 10 10 10 +- 38 38 38 86 86 86 14 14 14 10 10 10 +-195 195 195 188 164 115 192 133 9 225 175 15 +-239 182 13 234 190 10 232 195 16 232 200 30 +-245 207 45 241 208 19 232 195 16 184 144 12 +-218 194 134 211 206 186 42 42 42 2 2 6 +- 2 2 6 2 2 6 2 2 6 2 2 6 +- 50 50 50 74 74 74 30 30 30 6 6 6 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 10 10 10 +- 34 34 34 86 86 86 14 14 14 2 2 6 +-121 87 25 192 133 9 219 162 10 239 182 13 +-236 186 11 232 195 16 241 208 19 244 214 54 +-246 218 60 246 218 38 246 215 20 241 208 19 +-241 208 19 226 184 13 121 87 25 2 2 6 +- 2 2 6 2 2 6 2 2 6 2 2 6 +- 50 50 50 82 82 82 34 34 34 10 10 10 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 10 10 10 +- 34 34 34 82 82 82 30 30 30 61 42 6 +-180 123 7 206 145 10 230 174 11 239 182 13 +-234 190 10 238 202 15 241 208 19 246 218 74 +-246 218 38 246 215 20 246 215 20 246 215 20 +-226 184 13 215 174 15 184 144 12 6 6 6 +- 2 2 6 2 2 6 2 2 6 2 2 6 +- 26 26 26 94 94 94 42 42 42 14 14 14 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 10 10 10 +- 30 30 30 78 78 78 50 50 50 104 69 6 +-192 133 9 216 158 10 236 178 12 236 186 11 +-232 195 16 241 208 19 244 214 54 245 215 43 +-246 215 20 246 215 20 241 208 19 198 155 10 +-200 144 11 216 158 10 156 118 10 2 2 6 +- 2 2 6 2 2 6 2 2 6 2 2 6 +- 6 6 6 90 90 90 54 54 54 18 18 18 +- 6 6 6 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 10 10 10 +- 30 30 30 78 78 78 46 46 46 22 22 22 +-137 92 6 210 162 10 239 182 13 238 190 10 +-238 202 15 241 208 19 246 215 20 246 215 20 +-241 208 19 203 166 17 185 133 11 210 150 10 +-216 158 10 210 150 10 102 78 10 2 2 6 +- 6 6 6 54 54 54 14 14 14 2 2 6 +- 2 2 6 62 62 62 74 74 74 30 30 30 +- 10 10 10 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 10 10 10 +- 34 34 34 78 78 78 50 50 50 6 6 6 +- 94 70 30 139 102 15 190 146 13 226 184 13 +-232 200 30 232 195 16 215 174 15 190 146 13 +-168 122 10 192 133 9 210 150 10 213 154 11 +-202 150 34 182 157 106 101 98 89 2 2 6 +- 2 2 6 78 78 78 116 116 116 58 58 58 +- 2 2 6 22 22 22 90 90 90 46 46 46 +- 18 18 18 6 6 6 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 10 10 10 +- 38 38 38 86 86 86 50 50 50 6 6 6 +-128 128 128 174 154 114 156 107 11 168 122 10 +-198 155 10 184 144 12 197 138 11 200 144 11 +-206 145 10 206 145 10 197 138 11 188 164 115 +-195 195 195 198 198 198 174 174 174 14 14 14 +- 2 2 6 22 22 22 116 116 116 116 116 116 +- 22 22 22 2 2 6 74 74 74 70 70 70 +- 30 30 30 10 10 10 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 6 6 6 18 18 18 +- 50 50 50 101 101 101 26 26 26 10 10 10 +-138 138 138 190 190 190 174 154 114 156 107 11 +-197 138 11 200 144 11 197 138 11 192 133 9 +-180 123 7 190 142 34 190 178 144 187 187 187 +-202 202 202 221 221 221 214 214 214 66 66 66 +- 2 2 6 2 2 6 50 50 50 62 62 62 +- 6 6 6 2 2 6 10 10 10 90 90 90 +- 50 50 50 18 18 18 6 6 6 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 10 10 10 34 34 34 +- 74 74 74 74 74 74 2 2 6 6 6 6 +-144 144 144 198 198 198 190 190 190 178 166 146 +-154 121 60 156 107 11 156 107 11 168 124 44 +-174 154 114 187 187 187 190 190 190 210 210 210 +-246 246 246 253 253 253 253 253 253 182 182 182 +- 6 6 6 2 2 6 2 2 6 2 2 6 +- 2 2 6 2 2 6 2 2 6 62 62 62 +- 74 74 74 34 34 34 14 14 14 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 10 10 10 22 22 22 54 54 54 +- 94 94 94 18 18 18 2 2 6 46 46 46 +-234 234 234 221 221 221 190 190 190 190 190 190 +-190 190 190 187 187 187 187 187 187 190 190 190 +-190 190 190 195 195 195 214 214 214 242 242 242 +-253 253 253 253 253 253 253 253 253 253 253 253 +- 82 82 82 2 2 6 2 2 6 2 2 6 +- 2 2 6 2 2 6 2 2 6 14 14 14 +- 86 86 86 54 54 54 22 22 22 6 6 6 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 6 6 6 18 18 18 46 46 46 90 90 90 +- 46 46 46 18 18 18 6 6 6 182 182 182 +-253 253 253 246 246 246 206 206 206 190 190 190 +-190 190 190 190 190 190 190 190 190 190 190 190 +-206 206 206 231 231 231 250 250 250 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-202 202 202 14 14 14 2 2 6 2 2 6 +- 2 2 6 2 2 6 2 2 6 2 2 6 +- 42 42 42 86 86 86 42 42 42 18 18 18 +- 6 6 6 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 6 6 6 +- 14 14 14 38 38 38 74 74 74 66 66 66 +- 2 2 6 6 6 6 90 90 90 250 250 250 +-253 253 253 253 253 253 238 238 238 198 198 198 +-190 190 190 190 190 190 195 195 195 221 221 221 +-246 246 246 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 82 82 82 2 2 6 2 2 6 +- 2 2 6 2 2 6 2 2 6 2 2 6 +- 2 2 6 78 78 78 70 70 70 34 34 34 +- 14 14 14 6 6 6 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 14 14 14 +- 34 34 34 66 66 66 78 78 78 6 6 6 +- 2 2 6 18 18 18 218 218 218 253 253 253 +-253 253 253 253 253 253 253 253 253 246 246 246 +-226 226 226 231 231 231 246 246 246 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 178 178 178 2 2 6 2 2 6 +- 2 2 6 2 2 6 2 2 6 2 2 6 +- 2 2 6 18 18 18 90 90 90 62 62 62 +- 30 30 30 10 10 10 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 10 10 10 26 26 26 +- 58 58 58 90 90 90 18 18 18 2 2 6 +- 2 2 6 110 110 110 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-250 250 250 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 231 231 231 18 18 18 2 2 6 +- 2 2 6 2 2 6 2 2 6 2 2 6 +- 2 2 6 2 2 6 18 18 18 94 94 94 +- 54 54 54 26 26 26 10 10 10 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 6 6 6 22 22 22 50 50 50 +- 90 90 90 26 26 26 2 2 6 2 2 6 +- 14 14 14 195 195 195 250 250 250 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-250 250 250 242 242 242 54 54 54 2 2 6 +- 2 2 6 2 2 6 2 2 6 2 2 6 +- 2 2 6 2 2 6 2 2 6 38 38 38 +- 86 86 86 50 50 50 22 22 22 6 6 6 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 6 6 6 14 14 14 38 38 38 82 82 82 +- 34 34 34 2 2 6 2 2 6 2 2 6 +- 42 42 42 195 195 195 246 246 246 253 253 253 +-253 253 253 253 253 253 253 253 253 250 250 250 +-242 242 242 242 242 242 250 250 250 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 250 250 250 246 246 246 238 238 238 +-226 226 226 231 231 231 101 101 101 6 6 6 +- 2 2 6 2 2 6 2 2 6 2 2 6 +- 2 2 6 2 2 6 2 2 6 2 2 6 +- 38 38 38 82 82 82 42 42 42 14 14 14 +- 6 6 6 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 10 10 10 26 26 26 62 62 62 66 66 66 +- 2 2 6 2 2 6 2 2 6 6 6 6 +- 70 70 70 170 170 170 206 206 206 234 234 234 +-246 246 246 250 250 250 250 250 250 238 238 238 +-226 226 226 231 231 231 238 238 238 250 250 250 +-250 250 250 250 250 250 246 246 246 231 231 231 +-214 214 214 206 206 206 202 202 202 202 202 202 +-198 198 198 202 202 202 182 182 182 18 18 18 +- 2 2 6 2 2 6 2 2 6 2 2 6 +- 2 2 6 2 2 6 2 2 6 2 2 6 +- 2 2 6 62 62 62 66 66 66 30 30 30 +- 10 10 10 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 14 14 14 42 42 42 82 82 82 18 18 18 +- 2 2 6 2 2 6 2 2 6 10 10 10 +- 94 94 94 182 182 182 218 218 218 242 242 242 +-250 250 250 253 253 253 253 253 253 250 250 250 +-234 234 234 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 246 246 246 +-238 238 238 226 226 226 210 210 210 202 202 202 +-195 195 195 195 195 195 210 210 210 158 158 158 +- 6 6 6 14 14 14 50 50 50 14 14 14 +- 2 2 6 2 2 6 2 2 6 2 2 6 +- 2 2 6 6 6 6 86 86 86 46 46 46 +- 18 18 18 6 6 6 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 6 6 6 +- 22 22 22 54 54 54 70 70 70 2 2 6 +- 2 2 6 10 10 10 2 2 6 22 22 22 +-166 166 166 231 231 231 250 250 250 253 253 253 +-253 253 253 253 253 253 253 253 253 250 250 250 +-242 242 242 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 246 246 246 +-231 231 231 206 206 206 198 198 198 226 226 226 +- 94 94 94 2 2 6 6 6 6 38 38 38 +- 30 30 30 2 2 6 2 2 6 2 2 6 +- 2 2 6 2 2 6 62 62 62 66 66 66 +- 26 26 26 10 10 10 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 10 10 10 +- 30 30 30 74 74 74 50 50 50 2 2 6 +- 26 26 26 26 26 26 2 2 6 106 106 106 +-238 238 238 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 246 246 246 218 218 218 202 202 202 +-210 210 210 14 14 14 2 2 6 2 2 6 +- 30 30 30 22 22 22 2 2 6 2 2 6 +- 2 2 6 2 2 6 18 18 18 86 86 86 +- 42 42 42 14 14 14 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 14 14 14 +- 42 42 42 90 90 90 22 22 22 2 2 6 +- 42 42 42 2 2 6 18 18 18 218 218 218 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 250 250 250 221 221 221 +-218 218 218 101 101 101 2 2 6 14 14 14 +- 18 18 18 38 38 38 10 10 10 2 2 6 +- 2 2 6 2 2 6 2 2 6 78 78 78 +- 58 58 58 22 22 22 6 6 6 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 6 6 6 18 18 18 +- 54 54 54 82 82 82 2 2 6 26 26 26 +- 22 22 22 2 2 6 123 123 123 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 250 250 250 +-238 238 238 198 198 198 6 6 6 38 38 38 +- 58 58 58 26 26 26 38 38 38 2 2 6 +- 2 2 6 2 2 6 2 2 6 46 46 46 +- 78 78 78 30 30 30 10 10 10 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 10 10 10 30 30 30 +- 74 74 74 58 58 58 2 2 6 42 42 42 +- 2 2 6 22 22 22 231 231 231 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 250 250 250 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 246 246 246 46 46 46 38 38 38 +- 42 42 42 14 14 14 38 38 38 14 14 14 +- 2 2 6 2 2 6 2 2 6 6 6 6 +- 86 86 86 46 46 46 14 14 14 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 6 6 6 14 14 14 42 42 42 +- 90 90 90 18 18 18 18 18 18 26 26 26 +- 2 2 6 116 116 116 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 250 250 250 238 238 238 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 94 94 94 6 6 6 +- 2 2 6 2 2 6 10 10 10 34 34 34 +- 2 2 6 2 2 6 2 2 6 2 2 6 +- 74 74 74 58 58 58 22 22 22 6 6 6 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 10 10 10 26 26 26 66 66 66 +- 82 82 82 2 2 6 38 38 38 6 6 6 +- 14 14 14 210 210 210 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 246 246 246 242 242 242 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 144 144 144 2 2 6 +- 2 2 6 2 2 6 2 2 6 46 46 46 +- 2 2 6 2 2 6 2 2 6 2 2 6 +- 42 42 42 74 74 74 30 30 30 10 10 10 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 6 6 6 14 14 14 42 42 42 90 90 90 +- 26 26 26 6 6 6 42 42 42 2 2 6 +- 74 74 74 250 250 250 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 242 242 242 242 242 242 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 182 182 182 2 2 6 +- 2 2 6 2 2 6 2 2 6 46 46 46 +- 2 2 6 2 2 6 2 2 6 2 2 6 +- 10 10 10 86 86 86 38 38 38 10 10 10 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 10 10 10 26 26 26 66 66 66 82 82 82 +- 2 2 6 22 22 22 18 18 18 2 2 6 +-149 149 149 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 234 234 234 242 242 242 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 206 206 206 2 2 6 +- 2 2 6 2 2 6 2 2 6 38 38 38 +- 2 2 6 2 2 6 2 2 6 2 2 6 +- 6 6 6 86 86 86 46 46 46 14 14 14 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 6 6 6 +- 18 18 18 46 46 46 86 86 86 18 18 18 +- 2 2 6 34 34 34 10 10 10 6 6 6 +-210 210 210 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 234 234 234 242 242 242 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 221 221 221 6 6 6 +- 2 2 6 2 2 6 6 6 6 30 30 30 +- 2 2 6 2 2 6 2 2 6 2 2 6 +- 2 2 6 82 82 82 54 54 54 18 18 18 +- 6 6 6 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 10 10 10 +- 26 26 26 66 66 66 62 62 62 2 2 6 +- 2 2 6 38 38 38 10 10 10 26 26 26 +-238 238 238 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 231 231 231 238 238 238 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 231 231 231 6 6 6 +- 2 2 6 2 2 6 10 10 10 30 30 30 +- 2 2 6 2 2 6 2 2 6 2 2 6 +- 2 2 6 66 66 66 58 58 58 22 22 22 +- 6 6 6 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 10 10 10 +- 38 38 38 78 78 78 6 6 6 2 2 6 +- 2 2 6 46 46 46 14 14 14 42 42 42 +-246 246 246 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 231 231 231 242 242 242 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 234 234 234 10 10 10 +- 2 2 6 2 2 6 22 22 22 14 14 14 +- 2 2 6 2 2 6 2 2 6 2 2 6 +- 2 2 6 66 66 66 62 62 62 22 22 22 +- 6 6 6 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 6 6 6 18 18 18 +- 50 50 50 74 74 74 2 2 6 2 2 6 +- 14 14 14 70 70 70 34 34 34 62 62 62 +-250 250 250 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 231 231 231 246 246 246 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 234 234 234 14 14 14 +- 2 2 6 2 2 6 30 30 30 2 2 6 +- 2 2 6 2 2 6 2 2 6 2 2 6 +- 2 2 6 66 66 66 62 62 62 22 22 22 +- 6 6 6 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 6 6 6 18 18 18 +- 54 54 54 62 62 62 2 2 6 2 2 6 +- 2 2 6 30 30 30 46 46 46 70 70 70 +-250 250 250 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 231 231 231 246 246 246 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 226 226 226 10 10 10 +- 2 2 6 6 6 6 30 30 30 2 2 6 +- 2 2 6 2 2 6 2 2 6 2 2 6 +- 2 2 6 66 66 66 58 58 58 22 22 22 +- 6 6 6 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 6 6 6 22 22 22 +- 58 58 58 62 62 62 2 2 6 2 2 6 +- 2 2 6 2 2 6 30 30 30 78 78 78 +-250 250 250 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 231 231 231 246 246 246 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 206 206 206 2 2 6 +- 22 22 22 34 34 34 18 14 6 22 22 22 +- 26 26 26 18 18 18 6 6 6 2 2 6 +- 2 2 6 82 82 82 54 54 54 18 18 18 +- 6 6 6 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 6 6 6 26 26 26 +- 62 62 62 106 106 106 74 54 14 185 133 11 +-210 162 10 121 92 8 6 6 6 62 62 62 +-238 238 238 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 231 231 231 246 246 246 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 158 158 158 18 18 18 +- 14 14 14 2 2 6 2 2 6 2 2 6 +- 6 6 6 18 18 18 66 66 66 38 38 38 +- 6 6 6 94 94 94 50 50 50 18 18 18 +- 6 6 6 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 6 6 6 +- 10 10 10 10 10 10 18 18 18 38 38 38 +- 78 78 78 142 134 106 216 158 10 242 186 14 +-246 190 14 246 190 14 156 118 10 10 10 10 +- 90 90 90 238 238 238 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 231 231 231 250 250 250 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 246 230 190 +-238 204 91 238 204 91 181 142 44 37 26 9 +- 2 2 6 2 2 6 2 2 6 2 2 6 +- 2 2 6 2 2 6 38 38 38 46 46 46 +- 26 26 26 106 106 106 54 54 54 18 18 18 +- 6 6 6 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 6 6 6 14 14 14 22 22 22 +- 30 30 30 38 38 38 50 50 50 70 70 70 +-106 106 106 190 142 34 226 170 11 242 186 14 +-246 190 14 246 190 14 246 190 14 154 114 10 +- 6 6 6 74 74 74 226 226 226 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 231 231 231 250 250 250 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 228 184 62 +-241 196 14 241 208 19 232 195 16 38 30 10 +- 2 2 6 2 2 6 2 2 6 2 2 6 +- 2 2 6 6 6 6 30 30 30 26 26 26 +-203 166 17 154 142 90 66 66 66 26 26 26 +- 6 6 6 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 6 6 6 18 18 18 38 38 38 58 58 58 +- 78 78 78 86 86 86 101 101 101 123 123 123 +-175 146 61 210 150 10 234 174 13 246 186 14 +-246 190 14 246 190 14 246 190 14 238 190 10 +-102 78 10 2 2 6 46 46 46 198 198 198 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 234 234 234 242 242 242 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 224 178 62 +-242 186 14 241 196 14 210 166 10 22 18 6 +- 2 2 6 2 2 6 2 2 6 2 2 6 +- 2 2 6 2 2 6 6 6 6 121 92 8 +-238 202 15 232 195 16 82 82 82 34 34 34 +- 10 10 10 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 14 14 14 38 38 38 70 70 70 154 122 46 +-190 142 34 200 144 11 197 138 11 197 138 11 +-213 154 11 226 170 11 242 186 14 246 190 14 +-246 190 14 246 190 14 246 190 14 246 190 14 +-225 175 15 46 32 6 2 2 6 22 22 22 +-158 158 158 250 250 250 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 250 250 250 242 242 242 224 178 62 +-239 182 13 236 186 11 213 154 11 46 32 6 +- 2 2 6 2 2 6 2 2 6 2 2 6 +- 2 2 6 2 2 6 61 42 6 225 175 15 +-238 190 10 236 186 11 112 100 78 42 42 42 +- 14 14 14 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 6 6 6 +- 22 22 22 54 54 54 154 122 46 213 154 11 +-226 170 11 230 174 11 226 170 11 226 170 11 +-236 178 12 242 186 14 246 190 14 246 190 14 +-246 190 14 246 190 14 246 190 14 246 190 14 +-241 196 14 184 144 12 10 10 10 2 2 6 +- 6 6 6 116 116 116 242 242 242 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 231 231 231 198 198 198 214 170 54 +-236 178 12 236 178 12 210 150 10 137 92 6 +- 18 14 6 2 2 6 2 2 6 2 2 6 +- 6 6 6 70 47 6 200 144 11 236 178 12 +-239 182 13 239 182 13 124 112 88 58 58 58 +- 22 22 22 6 6 6 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 10 10 10 +- 30 30 30 70 70 70 180 133 36 226 170 11 +-239 182 13 242 186 14 242 186 14 246 186 14 +-246 190 14 246 190 14 246 190 14 246 190 14 +-246 190 14 246 190 14 246 190 14 246 190 14 +-246 190 14 232 195 16 98 70 6 2 2 6 +- 2 2 6 2 2 6 66 66 66 221 221 221 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 206 206 206 198 198 198 214 166 58 +-230 174 11 230 174 11 216 158 10 192 133 9 +-163 110 8 116 81 8 102 78 10 116 81 8 +-167 114 7 197 138 11 226 170 11 239 182 13 +-242 186 14 242 186 14 162 146 94 78 78 78 +- 34 34 34 14 14 14 6 6 6 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 6 6 6 +- 30 30 30 78 78 78 190 142 34 226 170 11 +-239 182 13 246 190 14 246 190 14 246 190 14 +-246 190 14 246 190 14 246 190 14 246 190 14 +-246 190 14 246 190 14 246 190 14 246 190 14 +-246 190 14 241 196 14 203 166 17 22 18 6 +- 2 2 6 2 2 6 2 2 6 38 38 38 +-218 218 218 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-250 250 250 206 206 206 198 198 198 202 162 69 +-226 170 11 236 178 12 224 166 10 210 150 10 +-200 144 11 197 138 11 192 133 9 197 138 11 +-210 150 10 226 170 11 242 186 14 246 190 14 +-246 190 14 246 186 14 225 175 15 124 112 88 +- 62 62 62 30 30 30 14 14 14 6 6 6 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 10 10 10 +- 30 30 30 78 78 78 174 135 50 224 166 10 +-239 182 13 246 190 14 246 190 14 246 190 14 +-246 190 14 246 190 14 246 190 14 246 190 14 +-246 190 14 246 190 14 246 190 14 246 190 14 +-246 190 14 246 190 14 241 196 14 139 102 15 +- 2 2 6 2 2 6 2 2 6 2 2 6 +- 78 78 78 250 250 250 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-250 250 250 214 214 214 198 198 198 190 150 46 +-219 162 10 236 178 12 234 174 13 224 166 10 +-216 158 10 213 154 11 213 154 11 216 158 10 +-226 170 11 239 182 13 246 190 14 246 190 14 +-246 190 14 246 190 14 242 186 14 206 162 42 +-101 101 101 58 58 58 30 30 30 14 14 14 +- 6 6 6 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 10 10 10 +- 30 30 30 74 74 74 174 135 50 216 158 10 +-236 178 12 246 190 14 246 190 14 246 190 14 +-246 190 14 246 190 14 246 190 14 246 190 14 +-246 190 14 246 190 14 246 190 14 246 190 14 +-246 190 14 246 190 14 241 196 14 226 184 13 +- 61 42 6 2 2 6 2 2 6 2 2 6 +- 22 22 22 238 238 238 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 226 226 226 187 187 187 180 133 36 +-216 158 10 236 178 12 239 182 13 236 178 12 +-230 174 11 226 170 11 226 170 11 230 174 11 +-236 178 12 242 186 14 246 190 14 246 190 14 +-246 190 14 246 190 14 246 186 14 239 182 13 +-206 162 42 106 106 106 66 66 66 34 34 34 +- 14 14 14 6 6 6 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 6 6 6 +- 26 26 26 70 70 70 163 133 67 213 154 11 +-236 178 12 246 190 14 246 190 14 246 190 14 +-246 190 14 246 190 14 246 190 14 246 190 14 +-246 190 14 246 190 14 246 190 14 246 190 14 +-246 190 14 246 190 14 246 190 14 241 196 14 +-190 146 13 18 14 6 2 2 6 2 2 6 +- 46 46 46 246 246 246 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 221 221 221 86 86 86 156 107 11 +-216 158 10 236 178 12 242 186 14 246 186 14 +-242 186 14 239 182 13 239 182 13 242 186 14 +-242 186 14 246 186 14 246 190 14 246 190 14 +-246 190 14 246 190 14 246 190 14 246 190 14 +-242 186 14 225 175 15 142 122 72 66 66 66 +- 30 30 30 10 10 10 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 6 6 6 +- 26 26 26 70 70 70 163 133 67 210 150 10 +-236 178 12 246 190 14 246 190 14 246 190 14 +-246 190 14 246 190 14 246 190 14 246 190 14 +-246 190 14 246 190 14 246 190 14 246 190 14 +-246 190 14 246 190 14 246 190 14 246 190 14 +-232 195 16 121 92 8 34 34 34 106 106 106 +-221 221 221 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-242 242 242 82 82 82 18 14 6 163 110 8 +-216 158 10 236 178 12 242 186 14 246 190 14 +-246 190 14 246 190 14 246 190 14 246 190 14 +-246 190 14 246 190 14 246 190 14 246 190 14 +-246 190 14 246 190 14 246 190 14 246 190 14 +-246 190 14 246 190 14 242 186 14 163 133 67 +- 46 46 46 18 18 18 6 6 6 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 10 10 10 +- 30 30 30 78 78 78 163 133 67 210 150 10 +-236 178 12 246 186 14 246 190 14 246 190 14 +-246 190 14 246 190 14 246 190 14 246 190 14 +-246 190 14 246 190 14 246 190 14 246 190 14 +-246 190 14 246 190 14 246 190 14 246 190 14 +-241 196 14 215 174 15 190 178 144 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 218 218 218 +- 58 58 58 2 2 6 22 18 6 167 114 7 +-216 158 10 236 178 12 246 186 14 246 190 14 +-246 190 14 246 190 14 246 190 14 246 190 14 +-246 190 14 246 190 14 246 190 14 246 190 14 +-246 190 14 246 190 14 246 190 14 246 190 14 +-246 190 14 246 186 14 242 186 14 190 150 46 +- 54 54 54 22 22 22 6 6 6 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 14 14 14 +- 38 38 38 86 86 86 180 133 36 213 154 11 +-236 178 12 246 186 14 246 190 14 246 190 14 +-246 190 14 246 190 14 246 190 14 246 190 14 +-246 190 14 246 190 14 246 190 14 246 190 14 +-246 190 14 246 190 14 246 190 14 246 190 14 +-246 190 14 232 195 16 190 146 13 214 214 214 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 250 250 250 170 170 170 26 26 26 +- 2 2 6 2 2 6 37 26 9 163 110 8 +-219 162 10 239 182 13 246 186 14 246 190 14 +-246 190 14 246 190 14 246 190 14 246 190 14 +-246 190 14 246 190 14 246 190 14 246 190 14 +-246 190 14 246 190 14 246 190 14 246 190 14 +-246 186 14 236 178 12 224 166 10 142 122 72 +- 46 46 46 18 18 18 6 6 6 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 6 6 6 18 18 18 +- 50 50 50 109 106 95 192 133 9 224 166 10 +-242 186 14 246 190 14 246 190 14 246 190 14 +-246 190 14 246 190 14 246 190 14 246 190 14 +-246 190 14 246 190 14 246 190 14 246 190 14 +-246 190 14 246 190 14 246 190 14 246 190 14 +-242 186 14 226 184 13 210 162 10 142 110 46 +-226 226 226 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-253 253 253 253 253 253 253 253 253 253 253 253 +-198 198 198 66 66 66 2 2 6 2 2 6 +- 2 2 6 2 2 6 50 34 6 156 107 11 +-219 162 10 239 182 13 246 186 14 246 190 14 +-246 190 14 246 190 14 246 190 14 246 190 14 +-246 190 14 246 190 14 246 190 14 246 190 14 +-246 190 14 246 190 14 246 190 14 242 186 14 +-234 174 13 213 154 11 154 122 46 66 66 66 +- 30 30 30 10 10 10 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 6 6 6 22 22 22 +- 58 58 58 154 121 60 206 145 10 234 174 13 +-242 186 14 246 186 14 246 190 14 246 190 14 +-246 190 14 246 190 14 246 190 14 246 190 14 +-246 190 14 246 190 14 246 190 14 246 190 14 +-246 190 14 246 190 14 246 190 14 246 190 14 +-246 186 14 236 178 12 210 162 10 163 110 8 +- 61 42 6 138 138 138 218 218 218 250 250 250 +-253 253 253 253 253 253 253 253 253 250 250 250 +-242 242 242 210 210 210 144 144 144 66 66 66 +- 6 6 6 2 2 6 2 2 6 2 2 6 +- 2 2 6 2 2 6 61 42 6 163 110 8 +-216 158 10 236 178 12 246 190 14 246 190 14 +-246 190 14 246 190 14 246 190 14 246 190 14 +-246 190 14 246 190 14 246 190 14 246 190 14 +-246 190 14 239 182 13 230 174 11 216 158 10 +-190 142 34 124 112 88 70 70 70 38 38 38 +- 18 18 18 6 6 6 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 6 6 6 22 22 22 +- 62 62 62 168 124 44 206 145 10 224 166 10 +-236 178 12 239 182 13 242 186 14 242 186 14 +-246 186 14 246 190 14 246 190 14 246 190 14 +-246 190 14 246 190 14 246 190 14 246 190 14 +-246 190 14 246 190 14 246 190 14 246 190 14 +-246 190 14 236 178 12 216 158 10 175 118 6 +- 80 54 7 2 2 6 6 6 6 30 30 30 +- 54 54 54 62 62 62 50 50 50 38 38 38 +- 14 14 14 2 2 6 2 2 6 2 2 6 +- 2 2 6 2 2 6 2 2 6 2 2 6 +- 2 2 6 6 6 6 80 54 7 167 114 7 +-213 154 11 236 178 12 246 190 14 246 190 14 +-246 190 14 246 190 14 246 190 14 246 190 14 +-246 190 14 242 186 14 239 182 13 239 182 13 +-230 174 11 210 150 10 174 135 50 124 112 88 +- 82 82 82 54 54 54 34 34 34 18 18 18 +- 6 6 6 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 6 6 6 18 18 18 +- 50 50 50 158 118 36 192 133 9 200 144 11 +-216 158 10 219 162 10 224 166 10 226 170 11 +-230 174 11 236 178 12 239 182 13 239 182 13 +-242 186 14 246 186 14 246 190 14 246 190 14 +-246 190 14 246 190 14 246 190 14 246 190 14 +-246 186 14 230 174 11 210 150 10 163 110 8 +-104 69 6 10 10 10 2 2 6 2 2 6 +- 2 2 6 2 2 6 2 2 6 2 2 6 +- 2 2 6 2 2 6 2 2 6 2 2 6 +- 2 2 6 2 2 6 2 2 6 2 2 6 +- 2 2 6 6 6 6 91 60 6 167 114 7 +-206 145 10 230 174 11 242 186 14 246 190 14 +-246 190 14 246 190 14 246 186 14 242 186 14 +-239 182 13 230 174 11 224 166 10 213 154 11 +-180 133 36 124 112 88 86 86 86 58 58 58 +- 38 38 38 22 22 22 10 10 10 6 6 6 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 14 14 14 +- 34 34 34 70 70 70 138 110 50 158 118 36 +-167 114 7 180 123 7 192 133 9 197 138 11 +-200 144 11 206 145 10 213 154 11 219 162 10 +-224 166 10 230 174 11 239 182 13 242 186 14 +-246 186 14 246 186 14 246 186 14 246 186 14 +-239 182 13 216 158 10 185 133 11 152 99 6 +-104 69 6 18 14 6 2 2 6 2 2 6 +- 2 2 6 2 2 6 2 2 6 2 2 6 +- 2 2 6 2 2 6 2 2 6 2 2 6 +- 2 2 6 2 2 6 2 2 6 2 2 6 +- 2 2 6 6 6 6 80 54 7 152 99 6 +-192 133 9 219 162 10 236 178 12 239 182 13 +-246 186 14 242 186 14 239 182 13 236 178 12 +-224 166 10 206 145 10 192 133 9 154 121 60 +- 94 94 94 62 62 62 42 42 42 22 22 22 +- 14 14 14 6 6 6 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 6 6 6 +- 18 18 18 34 34 34 58 58 58 78 78 78 +-101 98 89 124 112 88 142 110 46 156 107 11 +-163 110 8 167 114 7 175 118 6 180 123 7 +-185 133 11 197 138 11 210 150 10 219 162 10 +-226 170 11 236 178 12 236 178 12 234 174 13 +-219 162 10 197 138 11 163 110 8 130 83 6 +- 91 60 6 10 10 10 2 2 6 2 2 6 +- 18 18 18 38 38 38 38 38 38 38 38 38 +- 38 38 38 38 38 38 38 38 38 38 38 38 +- 38 38 38 38 38 38 26 26 26 2 2 6 +- 2 2 6 6 6 6 70 47 6 137 92 6 +-175 118 6 200 144 11 219 162 10 230 174 11 +-234 174 13 230 174 11 219 162 10 210 150 10 +-192 133 9 163 110 8 124 112 88 82 82 82 +- 50 50 50 30 30 30 14 14 14 6 6 6 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 6 6 6 14 14 14 22 22 22 34 34 34 +- 42 42 42 58 58 58 74 74 74 86 86 86 +-101 98 89 122 102 70 130 98 46 121 87 25 +-137 92 6 152 99 6 163 110 8 180 123 7 +-185 133 11 197 138 11 206 145 10 200 144 11 +-180 123 7 156 107 11 130 83 6 104 69 6 +- 50 34 6 54 54 54 110 110 110 101 98 89 +- 86 86 86 82 82 82 78 78 78 78 78 78 +- 78 78 78 78 78 78 78 78 78 78 78 78 +- 78 78 78 82 82 82 86 86 86 94 94 94 +-106 106 106 101 101 101 86 66 34 124 80 6 +-156 107 11 180 123 7 192 133 9 200 144 11 +-206 145 10 200 144 11 192 133 9 175 118 6 +-139 102 15 109 106 95 70 70 70 42 42 42 +- 22 22 22 10 10 10 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 6 6 6 10 10 10 +- 14 14 14 22 22 22 30 30 30 38 38 38 +- 50 50 50 62 62 62 74 74 74 90 90 90 +-101 98 89 112 100 78 121 87 25 124 80 6 +-137 92 6 152 99 6 152 99 6 152 99 6 +-138 86 6 124 80 6 98 70 6 86 66 30 +-101 98 89 82 82 82 58 58 58 46 46 46 +- 38 38 38 34 34 34 34 34 34 34 34 34 +- 34 34 34 34 34 34 34 34 34 34 34 34 +- 34 34 34 34 34 34 38 38 38 42 42 42 +- 54 54 54 82 82 82 94 86 76 91 60 6 +-134 86 6 156 107 11 167 114 7 175 118 6 +-175 118 6 167 114 7 152 99 6 121 87 25 +-101 98 89 62 62 62 34 34 34 18 18 18 +- 6 6 6 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 6 6 6 6 6 6 10 10 10 +- 18 18 18 22 22 22 30 30 30 42 42 42 +- 50 50 50 66 66 66 86 86 86 101 98 89 +-106 86 58 98 70 6 104 69 6 104 69 6 +-104 69 6 91 60 6 82 62 34 90 90 90 +- 62 62 62 38 38 38 22 22 22 14 14 14 +- 10 10 10 10 10 10 10 10 10 10 10 10 +- 10 10 10 10 10 10 6 6 6 10 10 10 +- 10 10 10 10 10 10 10 10 10 14 14 14 +- 22 22 22 42 42 42 70 70 70 89 81 66 +- 80 54 7 104 69 6 124 80 6 137 92 6 +-134 86 6 116 81 8 100 82 52 86 86 86 +- 58 58 58 30 30 30 14 14 14 6 6 6 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 6 6 6 10 10 10 14 14 14 +- 18 18 18 26 26 26 38 38 38 54 54 54 +- 70 70 70 86 86 86 94 86 76 89 81 66 +- 89 81 66 86 86 86 74 74 74 50 50 50 +- 30 30 30 14 14 14 6 6 6 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 6 6 6 18 18 18 34 34 34 58 58 58 +- 82 82 82 89 81 66 89 81 66 89 81 66 +- 94 86 66 94 86 76 74 74 74 50 50 50 +- 26 26 26 14 14 14 6 6 6 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 6 6 6 6 6 6 14 14 14 18 18 18 +- 30 30 30 38 38 38 46 46 46 54 54 54 +- 50 50 50 42 42 42 30 30 30 18 18 18 +- 10 10 10 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 6 6 6 14 14 14 26 26 26 +- 38 38 38 50 50 50 58 58 58 58 58 58 +- 54 54 54 42 42 42 30 30 30 18 18 18 +- 10 10 10 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 6 6 6 +- 6 6 6 10 10 10 14 14 14 18 18 18 +- 18 18 18 14 14 14 10 10 10 6 6 6 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 6 6 6 +- 14 14 14 18 18 18 22 22 22 22 22 22 +- 18 18 18 14 14 14 10 10 10 6 6 6 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 +- 0 0 0 0 0 0 0 0 0 0 0 0 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 3 3 3 0 0 0 0 0 0 ++0 0 0 0 0 0 0 0 0 0 0 0 3 3 3 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 ++0 0 0 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 2 1 0 2 1 0 3 2 2 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 2 2 2 0 0 0 3 4 3 26 28 28 ++37 38 37 37 38 37 14 17 19 2 2 2 0 0 0 2 2 2 ++5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 3 3 3 0 0 0 1 1 1 6 6 6 ++2 2 2 0 0 0 3 3 3 4 4 4 4 4 4 4 4 4 ++4 4 5 3 3 3 1 0 0 0 0 0 1 0 0 0 0 0 ++1 1 1 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++2 2 2 0 0 0 0 0 0 14 17 19 60 74 84 137 136 137 ++153 152 153 137 136 137 125 124 125 60 73 81 6 6 6 3 1 0 ++0 0 0 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 0 0 0 4 4 4 41 54 63 125 124 125 ++60 73 81 6 6 6 4 0 0 3 3 3 4 4 4 4 4 4 ++4 4 4 0 0 0 6 9 11 41 54 63 41 65 82 22 30 35 ++2 2 2 2 1 0 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 5 5 5 5 5 5 2 2 2 0 0 0 ++4 0 0 6 6 6 41 54 63 137 136 137 174 174 174 167 166 167 ++165 164 165 165 164 165 163 162 163 163 162 163 125 124 125 41 54 63 ++1 1 1 0 0 0 0 0 0 3 3 3 5 5 5 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 ++3 3 3 2 0 0 4 0 0 60 73 81 156 155 156 167 166 167 ++163 162 163 85 115 134 5 7 8 0 0 0 4 4 4 5 5 5 ++0 0 0 2 5 5 55 98 126 90 154 193 90 154 193 72 125 159 ++37 51 59 2 0 0 1 1 1 4 5 5 4 4 4 4 4 4 ++4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 5 5 5 4 4 4 1 1 1 0 0 0 3 3 3 ++37 38 37 125 124 125 163 162 163 174 174 174 158 157 158 158 157 158 ++156 155 156 156 155 156 158 157 158 165 164 165 174 174 174 166 165 166 ++125 124 125 16 19 21 1 0 0 0 0 0 0 0 0 4 4 4 ++5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 1 1 1 ++0 0 0 0 0 0 37 38 37 153 152 153 174 174 174 158 157 158 ++174 174 174 163 162 163 37 38 37 4 3 3 4 0 0 1 1 1 ++0 0 0 22 40 52 101 161 196 101 161 196 90 154 193 101 161 196 ++64 123 161 14 17 19 0 0 0 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 ++5 5 5 2 2 2 0 0 0 4 0 0 24 26 27 85 115 134 ++156 155 156 174 174 174 167 166 167 156 155 156 154 153 154 157 156 157 ++156 155 156 156 155 156 155 154 155 153 152 153 158 157 158 167 166 167 ++174 174 174 156 155 156 60 74 84 16 19 21 0 0 0 0 0 0 ++1 1 1 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 ++4 4 4 5 5 5 6 6 6 3 3 3 0 0 0 4 0 0 ++13 16 17 60 73 81 137 136 137 165 164 165 156 155 156 153 152 153 ++174 174 174 177 184 187 60 73 81 3 1 0 0 0 0 1 1 2 ++22 30 35 64 123 161 136 185 209 90 154 193 90 154 193 90 154 193 ++90 154 193 21 29 34 0 0 0 3 2 2 4 4 5 4 4 4 ++4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 3 3 3 ++0 0 0 0 0 0 10 13 16 60 74 84 157 156 157 174 174 174 ++174 174 174 158 157 158 153 152 153 154 153 154 156 155 156 155 154 155 ++156 155 156 155 154 155 154 153 154 157 156 157 154 153 154 153 152 153 ++163 162 163 174 174 174 177 184 187 137 136 137 60 73 81 13 16 17 ++4 0 0 0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 ++5 5 5 4 4 4 1 1 1 0 0 0 3 3 3 41 54 63 ++131 129 131 174 174 174 174 174 174 174 174 174 167 166 167 174 174 174 ++190 197 201 137 136 137 24 26 27 4 0 0 16 21 25 50 82 103 ++90 154 193 136 185 209 90 154 193 101 161 196 101 161 196 101 161 196 ++31 91 132 3 6 7 0 0 0 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 2 2 2 0 0 0 4 0 0 ++4 0 0 43 57 68 137 136 137 177 184 187 174 174 174 163 162 163 ++155 154 155 155 154 155 156 155 156 155 154 155 158 157 158 165 164 165 ++167 166 167 166 165 166 163 162 163 157 156 157 155 154 155 155 154 155 ++153 152 153 156 155 156 167 166 167 174 174 174 174 174 174 131 129 131 ++41 54 63 5 5 5 0 0 0 0 0 0 3 3 3 4 4 4 ++1 1 1 0 0 0 1 0 0 26 28 28 125 124 125 174 174 174 ++177 184 187 174 174 174 174 174 174 156 155 156 131 129 131 137 136 137 ++125 124 125 24 26 27 4 0 0 41 65 82 90 154 193 136 185 209 ++136 185 209 101 161 196 53 118 160 37 112 160 90 154 193 34 86 122 ++7 12 15 0 0 0 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 3 3 3 0 0 0 0 0 0 5 5 5 37 38 37 ++125 124 125 167 166 167 174 174 174 167 166 167 158 157 158 155 154 155 ++156 155 156 156 155 156 156 155 156 163 162 163 167 166 167 155 154 155 ++137 136 137 153 152 153 156 155 156 165 164 165 163 162 163 156 155 156 ++156 155 156 156 155 156 155 154 155 158 157 158 166 165 166 174 174 174 ++167 166 167 125 124 125 37 38 37 1 0 0 0 0 0 0 0 0 ++0 0 0 24 26 27 60 74 84 158 157 158 174 174 174 174 174 174 ++166 165 166 158 157 158 125 124 125 41 54 63 13 16 17 6 6 6 ++6 6 6 37 38 37 80 127 157 136 185 209 101 161 196 101 161 196 ++90 154 193 28 67 93 6 10 14 13 20 25 13 20 25 6 10 14 ++1 1 2 4 3 3 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++1 1 1 1 0 0 4 3 3 37 38 37 60 74 84 153 152 153 ++167 166 167 167 166 167 158 157 158 154 153 154 155 154 155 156 155 156 ++157 156 157 158 157 158 167 166 167 167 166 167 131 129 131 43 57 68 ++26 28 28 37 38 37 60 73 81 131 129 131 165 164 165 166 165 166 ++158 157 158 155 154 155 156 155 156 156 155 156 156 155 156 158 157 158 ++165 164 165 174 174 174 163 162 163 60 74 84 16 19 21 13 16 17 ++60 73 81 131 129 131 174 174 174 174 174 174 167 166 167 165 164 165 ++137 136 137 60 73 81 24 26 27 4 0 0 4 0 0 16 19 21 ++52 104 138 101 161 196 136 185 209 136 185 209 90 154 193 27 99 146 ++13 20 25 4 5 7 2 5 5 4 5 7 1 1 2 0 0 0 ++4 4 4 4 4 4 3 3 3 2 2 2 2 2 2 4 4 4 ++4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 3 3 3 0 0 0 ++0 0 0 13 16 17 60 73 81 137 136 137 174 174 174 166 165 166 ++158 157 158 156 155 156 157 156 157 156 155 156 155 154 155 158 157 158 ++167 166 167 174 174 174 153 152 153 60 73 81 16 19 21 4 0 0 ++4 0 0 4 0 0 6 6 6 26 28 28 60 74 84 158 157 158 ++174 174 174 166 165 166 157 156 157 155 154 155 156 155 156 156 155 156 ++155 154 155 158 157 158 167 166 167 167 166 167 131 129 131 125 124 125 ++137 136 137 167 166 167 167 166 167 174 174 174 158 157 158 125 124 125 ++16 19 21 4 0 0 4 0 0 10 13 16 49 76 92 107 159 188 ++136 185 209 136 185 209 90 154 193 26 108 161 22 40 52 6 10 14 ++2 3 3 1 1 2 1 1 2 4 4 5 4 4 5 4 4 5 ++4 4 5 2 2 1 0 0 0 0 0 0 0 0 0 2 2 2 ++4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 5 5 5 3 3 3 0 0 0 1 0 0 4 0 0 ++37 51 59 131 129 131 167 166 167 167 166 167 163 162 163 157 156 157 ++157 156 157 155 154 155 153 152 153 157 156 157 167 166 167 174 174 174 ++153 152 153 125 124 125 37 38 37 4 0 0 4 0 0 4 0 0 ++4 3 3 4 3 3 4 0 0 6 6 6 4 0 0 37 38 37 ++125 124 125 174 174 174 174 174 174 165 164 165 156 155 156 154 153 154 ++156 155 156 156 155 156 155 154 155 163 162 163 158 157 158 163 162 163 ++174 174 174 174 174 174 174 174 174 125 124 125 37 38 37 0 0 0 ++4 0 0 6 9 11 41 54 63 90 154 193 136 185 209 146 190 211 ++136 185 209 37 112 160 22 40 52 6 10 14 3 6 7 1 1 2 ++1 1 2 3 3 3 1 1 2 3 3 3 4 4 4 4 4 4 ++2 2 2 2 0 0 16 19 21 37 38 37 24 26 27 0 0 0 ++0 0 0 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 ++4 4 4 0 0 0 0 0 0 0 0 0 26 28 28 120 125 127 ++158 157 158 174 174 174 165 164 165 157 156 157 155 154 155 156 155 156 ++153 152 153 153 152 153 167 166 167 174 174 174 174 174 174 125 124 125 ++37 38 37 4 0 0 0 0 0 4 0 0 4 3 3 4 4 4 ++4 4 4 4 4 4 5 5 5 4 0 0 4 0 0 4 0 0 ++4 3 3 43 57 68 137 136 137 174 174 174 174 174 174 165 164 165 ++154 153 154 153 152 153 153 152 153 153 152 153 163 162 163 174 174 174 ++174 174 174 153 152 153 60 73 81 6 6 6 4 0 0 4 3 3 ++32 43 50 80 127 157 136 185 209 146 190 211 146 190 211 90 154 193 ++28 67 93 28 67 93 40 71 93 3 6 7 1 1 2 2 5 5 ++50 82 103 79 117 143 26 37 45 0 0 0 3 3 3 1 1 1 ++0 0 0 41 54 63 137 136 137 174 174 174 153 152 153 60 73 81 ++2 0 0 0 0 0 ++4 4 4 4 4 4 4 4 4 4 4 4 6 6 6 2 2 2 ++0 0 0 2 0 0 24 26 27 60 74 84 153 152 153 174 174 174 ++174 174 174 157 156 157 154 153 154 156 155 156 154 153 154 153 152 153 ++165 164 165 174 174 174 177 184 187 137 136 137 43 57 68 6 6 6 ++4 0 0 2 0 0 3 3 3 5 5 5 5 5 5 4 4 4 ++4 4 4 4 4 4 4 4 4 5 5 5 6 6 6 4 3 3 ++4 0 0 4 0 0 24 26 27 60 73 81 153 152 153 174 174 174 ++174 174 174 158 157 158 158 157 158 174 174 174 174 174 174 158 157 158 ++60 74 84 24 26 27 4 0 0 4 0 0 17 23 27 59 113 148 ++136 185 209 191 222 234 146 190 211 136 185 209 31 91 132 7 11 13 ++22 40 52 101 161 196 90 154 193 6 9 11 3 4 4 43 95 132 ++136 185 209 172 205 220 55 98 126 0 0 0 0 0 0 2 0 0 ++26 28 28 153 152 153 177 184 187 167 166 167 177 184 187 165 164 165 ++37 38 37 0 0 0 ++4 4 4 4 4 4 5 5 5 5 5 5 1 1 1 0 0 0 ++13 16 17 60 73 81 137 136 137 174 174 174 174 174 174 165 164 165 ++153 152 153 153 152 153 155 154 155 154 153 154 158 157 158 174 174 174 ++177 184 187 163 162 163 60 73 81 16 19 21 4 0 0 4 0 0 ++4 3 3 4 4 4 5 5 5 5 5 5 4 4 4 5 5 5 ++5 5 5 5 5 5 5 5 5 4 4 4 4 4 4 5 5 5 ++6 6 6 4 0 0 4 0 0 4 0 0 24 26 27 60 74 84 ++166 165 166 174 174 174 177 184 187 165 164 165 125 124 125 24 26 27 ++4 0 0 4 0 0 5 5 5 50 82 103 136 185 209 172 205 220 ++146 190 211 136 185 209 26 108 161 22 40 52 7 12 15 44 81 103 ++71 116 144 28 67 93 37 51 59 41 65 82 100 139 164 101 161 196 ++90 154 193 90 154 193 28 67 93 0 0 0 0 0 0 26 28 28 ++125 124 125 167 166 167 163 162 163 153 152 153 163 162 163 174 174 174 ++85 115 134 4 0 0 ++4 4 4 5 5 5 4 4 4 1 0 0 4 0 0 34 47 55 ++125 124 125 174 174 174 174 174 174 167 166 167 157 156 157 153 152 153 ++155 154 155 155 154 155 158 157 158 166 165 166 167 166 167 154 153 154 ++125 124 125 26 28 28 4 0 0 4 0 0 4 0 0 5 5 5 ++5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 1 1 1 ++0 0 0 0 0 0 1 1 1 4 4 4 4 4 4 4 4 4 ++5 5 5 5 5 5 4 3 3 4 0 0 4 0 0 6 6 6 ++37 38 37 131 129 131 137 136 137 37 38 37 0 0 0 4 0 0 ++4 5 5 43 61 72 90 154 193 172 205 220 146 190 211 136 185 209 ++90 154 193 28 67 93 13 20 25 43 61 72 71 116 144 44 81 103 ++2 5 5 7 11 13 59 113 148 101 161 196 90 154 193 28 67 93 ++13 20 25 6 10 14 0 0 0 13 16 17 60 73 81 137 136 137 ++166 165 166 158 157 158 156 155 156 154 153 154 167 166 167 174 174 174 ++60 73 81 4 0 0 ++4 4 4 4 4 4 0 0 0 3 3 3 60 74 84 174 174 174 ++174 174 174 167 166 167 163 162 163 155 154 155 157 156 157 155 154 155 ++156 155 156 163 162 163 167 166 167 158 157 158 125 124 125 37 38 37 ++4 3 3 4 0 0 4 0 0 6 6 6 6 6 6 5 5 5 ++4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 2 3 3 ++10 13 16 7 11 13 1 0 0 0 0 0 2 2 1 4 4 4 ++4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 4 0 0 ++4 0 0 7 11 13 13 16 17 4 0 0 3 3 3 34 47 55 ++80 127 157 146 190 211 172 205 220 136 185 209 136 185 209 136 185 209 ++28 67 93 22 40 52 55 98 126 55 98 126 21 29 34 7 11 13 ++50 82 103 101 161 196 101 161 196 35 83 115 13 20 25 2 2 1 ++1 1 2 1 1 2 37 51 59 131 129 131 174 174 174 174 174 174 ++167 166 167 163 162 163 163 162 163 167 166 167 174 174 174 125 124 125 ++16 19 21 4 0 0 ++4 4 4 4 0 0 4 0 0 60 74 84 174 174 174 174 174 174 ++158 157 158 155 154 155 155 154 155 156 155 156 155 154 155 158 157 158 ++167 166 167 165 164 165 131 129 131 60 73 81 13 16 17 4 0 0 ++4 0 0 4 3 3 6 6 6 4 3 3 5 5 5 4 4 4 ++4 4 4 3 2 2 0 0 0 0 0 0 7 11 13 45 69 86 ++80 127 157 71 116 144 43 61 72 7 11 13 0 0 0 1 1 1 ++4 3 3 4 4 4 4 4 4 4 4 4 6 6 6 5 5 5 ++3 2 2 4 0 0 1 0 0 21 29 34 59 113 148 136 185 209 ++146 190 211 136 185 209 136 185 209 136 185 209 136 185 209 136 185 209 ++68 124 159 44 81 103 22 40 52 13 16 17 43 61 72 90 154 193 ++136 185 209 59 113 148 21 29 34 3 4 3 1 1 1 0 0 0 ++24 26 27 125 124 125 163 162 163 174 174 174 166 165 166 165 164 165 ++163 162 163 125 124 125 125 124 125 125 124 125 125 124 125 26 28 28 ++4 0 0 4 3 3 ++3 3 3 0 0 0 24 26 27 153 152 153 177 184 187 158 157 158 ++156 155 156 156 155 156 155 154 155 155 154 155 165 164 165 174 174 174 ++155 154 155 60 74 84 26 28 28 4 0 0 4 0 0 3 1 0 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 3 3 ++2 0 0 0 0 0 0 0 0 32 43 50 72 125 159 101 161 196 ++136 185 209 101 161 196 101 161 196 79 117 143 32 43 50 0 0 0 ++0 0 0 2 2 2 4 4 4 4 4 4 3 3 3 1 0 0 ++0 0 0 4 5 5 49 76 92 101 161 196 146 190 211 146 190 211 ++136 185 209 136 185 209 136 185 209 136 185 209 136 185 209 90 154 193 ++28 67 93 13 16 17 37 51 59 80 127 157 136 185 209 90 154 193 ++22 40 52 6 9 11 3 4 3 2 2 1 16 19 21 60 73 81 ++137 136 137 163 162 163 158 157 158 166 165 166 167 166 167 153 152 153 ++60 74 84 37 38 37 6 6 6 13 16 17 4 0 0 1 0 0 ++3 2 2 4 4 4 ++3 2 2 4 0 0 37 38 37 137 136 137 167 166 167 158 157 158 ++157 156 157 154 153 154 157 156 157 167 166 167 174 174 174 125 124 125 ++37 38 37 4 0 0 4 0 0 4 0 0 4 3 3 4 4 4 ++4 4 4 4 4 4 5 5 5 5 5 5 1 1 1 0 0 0 ++0 0 0 16 21 25 55 98 126 90 154 193 136 185 209 101 161 196 ++101 161 196 101 161 196 136 185 209 136 185 209 101 161 196 55 98 126 ++14 17 19 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 ++22 40 52 90 154 193 146 190 211 146 190 211 136 185 209 136 185 209 ++136 185 209 136 185 209 136 185 209 101 161 196 35 83 115 7 11 13 ++17 23 27 59 113 148 136 185 209 101 161 196 34 86 122 7 12 15 ++2 5 5 3 4 3 6 6 6 60 73 81 131 129 131 163 162 163 ++166 165 166 174 174 174 174 174 174 163 162 163 125 124 125 41 54 63 ++13 16 17 4 0 0 4 0 0 4 0 0 1 0 0 2 2 2 ++4 4 4 4 4 4 ++1 1 1 2 1 0 43 57 68 137 136 137 153 152 153 153 152 153 ++163 162 163 156 155 156 165 164 165 167 166 167 60 74 84 6 6 6 ++4 0 0 4 0 0 5 5 5 4 4 4 4 4 4 4 4 4 ++4 5 5 6 6 6 4 3 3 0 0 0 0 0 0 11 15 18 ++40 71 93 100 139 164 101 161 196 101 161 196 101 161 196 101 161 196 ++101 161 196 101 161 196 101 161 196 101 161 196 136 185 209 136 185 209 ++101 161 196 45 69 86 6 6 6 0 0 0 17 23 27 55 98 126 ++136 185 209 146 190 211 136 185 209 136 185 209 136 185 209 136 185 209 ++136 185 209 136 185 209 90 154 193 22 40 52 7 11 13 50 82 103 ++136 185 209 136 185 209 53 118 160 22 40 52 7 11 13 2 5 5 ++3 4 3 37 38 37 125 124 125 157 156 157 166 165 166 167 166 167 ++174 174 174 174 174 174 137 136 137 60 73 81 4 0 0 4 0 0 ++4 0 0 4 0 0 5 5 5 3 3 3 3 3 3 4 4 4 ++4 4 4 4 4 4 ++4 0 0 4 0 0 41 54 63 137 136 137 125 124 125 131 129 131 ++155 154 155 167 166 167 174 174 174 60 74 84 6 6 6 4 0 0 ++4 3 3 6 6 6 4 4 4 4 4 4 4 4 4 5 5 5 ++4 4 4 1 1 1 0 0 0 3 6 7 41 65 82 72 125 159 ++101 161 196 101 161 196 101 161 196 90 154 193 90 154 193 101 161 196 ++101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 136 185 209 ++136 185 209 136 185 209 80 127 157 55 98 126 101 161 196 146 190 211 ++136 185 209 136 185 209 136 185 209 101 161 196 136 185 209 101 161 196 ++136 185 209 101 161 196 35 83 115 22 30 35 101 161 196 172 205 220 ++90 154 193 28 67 93 7 11 13 2 5 5 3 4 3 13 16 17 ++85 115 134 167 166 167 174 174 174 174 174 174 174 174 174 174 174 174 ++167 166 167 60 74 84 13 16 17 4 0 0 4 0 0 4 3 3 ++6 6 6 5 5 5 4 4 4 5 5 5 4 4 4 5 5 5 ++5 5 5 5 5 5 ++1 1 1 4 0 0 41 54 63 137 136 137 137 136 137 125 124 125 ++131 129 131 167 166 167 157 156 157 37 38 37 6 6 6 4 0 0 ++6 6 6 5 5 5 4 4 4 4 4 4 4 5 5 2 2 1 ++0 0 0 0 0 0 26 37 45 58 111 146 101 161 196 101 161 196 ++101 161 196 90 154 193 90 154 193 90 154 193 101 161 196 101 161 196 ++101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 ++101 161 196 136 185 209 136 185 209 136 185 209 146 190 211 136 185 209 ++136 185 209 101 161 196 136 185 209 136 185 209 101 161 196 136 185 209 ++101 161 196 136 185 209 136 185 209 136 185 209 136 185 209 16 89 141 ++7 11 13 2 5 5 2 5 5 13 16 17 60 73 81 154 154 154 ++174 174 174 174 174 174 174 174 174 174 174 174 163 162 163 125 124 125 ++24 26 27 4 0 0 4 0 0 4 0 0 5 5 5 5 5 5 ++4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 5 5 5 ++5 5 5 4 4 4 ++4 0 0 6 6 6 37 38 37 137 136 137 137 136 137 131 129 131 ++131 129 131 153 152 153 131 129 131 26 28 28 4 0 0 4 3 3 ++6 6 6 4 4 4 4 4 4 4 4 4 0 0 0 0 0 0 ++13 20 25 51 88 114 90 154 193 101 161 196 101 161 196 90 154 193 ++90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196 ++101 161 196 101 161 196 101 161 196 101 161 196 136 185 209 101 161 196 ++101 161 196 136 185 209 101 161 196 136 185 209 136 185 209 101 161 196 ++136 185 209 101 161 196 136 185 209 101 161 196 101 161 196 101 161 196 ++136 185 209 136 185 209 136 185 209 37 112 160 21 29 34 5 7 8 ++2 5 5 13 16 17 43 57 68 131 129 131 174 174 174 174 174 174 ++174 174 174 167 166 167 157 156 157 125 124 125 37 38 37 4 0 0 ++4 0 0 4 0 0 5 5 5 5 5 5 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 ++1 1 1 4 0 0 41 54 63 153 152 153 137 136 137 137 136 137 ++137 136 137 153 152 153 125 124 125 24 26 27 4 0 0 3 2 2 ++4 4 4 4 4 4 4 3 3 4 0 0 3 6 7 43 61 72 ++64 123 161 101 161 196 90 154 193 90 154 193 90 154 193 90 154 193 ++90 154 193 90 154 193 90 154 193 90 154 193 101 161 196 90 154 193 ++101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 ++101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 ++136 185 209 101 161 196 101 161 196 136 185 209 136 185 209 101 161 196 ++101 161 196 90 154 193 28 67 93 13 16 17 7 11 13 3 6 7 ++37 51 59 125 124 125 163 162 163 174 174 174 167 166 167 166 165 166 ++167 166 167 131 129 131 60 73 81 4 0 0 4 0 0 4 0 0 ++3 3 3 5 5 5 6 6 6 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 ++4 0 0 4 0 0 41 54 63 137 136 137 153 152 153 137 136 137 ++153 152 153 157 156 157 125 124 125 24 26 27 0 0 0 2 2 2 ++4 4 4 4 4 4 2 0 0 0 0 0 28 67 93 90 154 193 ++90 154 193 90 154 193 90 154 193 90 154 193 64 123 161 90 154 193 ++90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196 ++90 154 193 101 161 196 101 161 196 101 161 196 90 154 193 136 185 209 ++101 161 196 101 161 196 136 185 209 101 161 196 136 185 209 101 161 196 ++101 161 196 101 161 196 136 185 209 101 161 196 101 161 196 90 154 193 ++35 83 115 13 16 17 3 6 7 2 5 5 13 16 17 60 74 84 ++154 154 154 166 165 166 165 164 165 158 157 158 163 162 163 157 156 157 ++60 74 84 13 16 17 4 0 0 4 0 0 3 2 2 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 ++1 1 1 4 0 0 41 54 63 157 156 157 155 154 155 137 136 137 ++153 152 153 158 157 158 137 136 137 26 28 28 2 0 0 2 2 2 ++4 4 4 4 4 4 1 0 0 6 10 14 34 86 122 90 154 193 ++64 123 161 90 154 193 64 123 161 90 154 193 90 154 193 90 154 193 ++64 123 161 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 ++101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 ++101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 ++136 185 209 101 161 196 136 185 209 90 154 193 26 108 161 22 40 52 ++13 16 17 5 7 8 2 5 5 2 5 5 37 38 37 165 164 165 ++174 174 174 163 162 163 154 154 154 165 164 165 167 166 167 60 73 81 ++6 6 6 4 0 0 4 0 0 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 ++4 0 0 6 6 6 41 54 63 156 155 156 158 157 158 153 152 153 ++156 155 156 165 164 165 137 136 137 26 28 28 0 0 0 2 2 2 ++4 4 5 4 4 4 2 0 0 7 12 15 31 96 139 64 123 161 ++90 154 193 64 123 161 90 154 193 90 154 193 64 123 161 90 154 193 ++90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 ++90 154 193 90 154 193 90 154 193 101 161 196 101 161 196 101 161 196 ++101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 136 185 209 ++101 161 196 136 185 209 26 108 161 22 40 52 7 11 13 5 7 8 ++2 5 5 2 5 5 2 5 5 2 2 1 37 38 37 158 157 158 ++174 174 174 154 154 154 156 155 156 167 166 167 165 164 165 37 38 37 ++4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 ++3 1 0 4 0 0 60 73 81 157 156 157 163 162 163 153 152 153 ++158 157 158 167 166 167 137 136 137 26 28 28 2 0 0 2 2 2 ++4 5 5 4 4 4 4 0 0 7 12 15 24 86 132 26 108 161 ++37 112 160 64 123 161 90 154 193 64 123 161 90 154 193 90 154 193 ++90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 ++90 154 193 101 161 196 90 154 193 101 161 196 101 161 196 101 161 196 ++101 161 196 101 161 196 101 161 196 136 185 209 101 161 196 136 185 209 ++90 154 193 35 83 115 13 16 17 13 16 17 7 11 13 3 6 7 ++5 7 8 6 6 6 3 4 3 2 2 1 30 32 34 154 154 154 ++167 166 167 154 154 154 154 154 154 174 174 174 165 164 165 37 38 37 ++6 6 6 4 0 0 6 6 6 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 ++4 0 0 4 0 0 41 54 63 163 162 163 166 165 166 154 154 154 ++163 162 163 174 174 174 137 136 137 26 28 28 0 0 0 2 2 2 ++4 5 5 4 4 5 1 1 2 6 10 14 28 67 93 18 97 151 ++18 97 151 18 97 151 26 108 161 37 112 160 37 112 160 90 154 193 ++64 123 161 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196 ++90 154 193 101 161 196 101 161 196 90 154 193 101 161 196 101 161 196 ++101 161 196 101 161 196 101 161 196 136 185 209 90 154 193 16 89 141 ++13 20 25 7 11 13 5 7 8 5 7 8 2 5 5 4 5 5 ++3 4 3 4 5 5 3 4 3 0 0 0 37 38 37 158 157 158 ++174 174 174 158 157 158 158 157 158 167 166 167 174 174 174 41 54 63 ++4 0 0 3 2 2 5 5 5 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 ++1 1 1 4 0 0 60 73 81 165 164 165 174 174 174 158 157 158 ++167 166 167 174 174 174 153 152 153 26 28 28 2 0 0 2 2 2 ++4 5 5 4 4 4 4 0 0 7 12 15 10 87 144 10 87 144 ++18 97 151 18 97 151 18 97 151 26 108 161 26 108 161 26 108 161 ++26 108 161 37 112 160 53 118 160 90 154 193 90 154 193 90 154 193 ++90 154 193 90 154 193 101 161 196 101 161 196 101 161 196 101 161 196 ++101 161 196 136 185 209 90 154 193 26 108 161 22 40 52 13 16 17 ++7 11 13 3 6 7 5 7 8 5 7 8 2 5 5 4 5 5 ++4 5 5 6 6 6 3 4 3 0 0 0 30 32 34 158 157 158 ++174 174 174 156 155 156 155 154 155 165 164 165 154 153 154 37 38 37 ++4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 ++4 0 0 4 0 0 60 73 81 167 166 167 174 174 174 163 162 163 ++174 174 174 174 174 174 153 152 153 26 28 28 0 0 0 3 3 3 ++5 5 5 4 4 4 1 1 2 7 12 15 28 67 93 18 97 151 ++18 97 151 18 97 151 18 97 151 18 97 151 18 97 151 26 108 161 ++26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 ++90 154 193 26 108 161 90 154 193 90 154 193 90 154 193 101 161 196 ++101 161 196 26 108 161 22 40 52 13 16 17 7 11 13 2 5 5 ++2 5 5 6 6 6 2 5 5 4 5 5 4 5 5 4 5 5 ++3 4 3 5 5 5 3 4 3 2 0 0 30 32 34 137 136 137 ++153 152 153 137 136 137 131 129 131 137 136 137 131 129 131 37 38 37 ++4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 ++1 1 1 4 0 0 60 73 81 167 166 167 174 174 174 166 165 166 ++174 174 174 177 184 187 153 152 153 30 32 34 1 0 0 3 3 3 ++5 5 5 4 3 3 4 0 0 7 12 15 10 87 144 10 87 144 ++18 97 151 18 97 151 18 97 151 26 108 161 26 108 161 26 108 161 ++26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 ++26 108 161 26 108 161 26 108 161 90 154 193 90 154 193 26 108 161 ++35 83 115 13 16 17 7 11 13 5 7 8 3 6 7 5 7 8 ++2 5 5 6 6 6 4 5 5 4 5 5 3 4 3 4 5 5 ++3 4 3 6 6 6 3 4 3 0 0 0 26 28 28 125 124 125 ++131 129 131 125 124 125 125 124 125 131 129 131 131 129 131 37 38 37 ++4 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 ++3 1 0 4 0 0 60 73 81 174 174 174 177 184 187 167 166 167 ++174 174 174 177 184 187 153 152 153 30 32 34 0 0 0 3 3 3 ++5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 18 97 151 ++18 97 151 18 97 151 18 97 151 18 97 151 18 97 151 26 108 161 ++26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 ++26 108 161 90 154 193 26 108 161 26 108 161 24 86 132 13 20 25 ++7 11 13 13 20 25 22 40 52 5 7 8 3 4 3 3 4 3 ++4 5 5 3 4 3 4 5 5 3 4 3 4 5 5 3 4 3 ++4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 125 124 125 ++137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37 ++0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 ++1 1 1 4 0 0 60 73 81 174 174 174 177 184 187 174 174 174 ++174 174 174 190 197 201 157 156 157 30 32 34 1 0 0 3 3 3 ++5 5 5 4 3 3 4 0 0 7 12 15 10 87 144 10 87 144 ++18 97 151 19 95 150 19 95 150 18 97 151 18 97 151 26 108 161 ++18 97 151 26 108 161 26 108 161 26 108 161 26 108 161 90 154 193 ++26 108 161 26 108 161 26 108 161 22 40 52 2 5 5 3 4 3 ++28 67 93 37 112 160 34 86 122 2 5 5 3 4 3 3 4 3 ++3 4 3 3 4 3 3 4 3 2 2 1 3 4 3 4 4 4 ++4 5 5 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131 ++137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37 ++0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 ++4 0 0 4 0 0 60 73 81 174 174 174 177 184 187 174 174 174 ++174 174 174 190 197 201 158 157 158 30 32 34 0 0 0 2 2 2 ++5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 18 97 151 ++10 87 144 19 95 150 19 95 150 18 97 151 18 97 151 18 97 151 ++26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 ++18 97 151 22 40 52 2 5 5 2 2 1 22 40 52 26 108 161 ++90 154 193 37 112 160 22 40 52 3 4 3 13 20 25 22 30 35 ++3 6 7 1 1 1 2 2 2 6 9 11 5 5 5 4 3 3 ++4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131 ++137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37 ++0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 ++1 1 1 4 0 0 60 73 81 177 184 187 193 200 203 174 174 174 ++177 184 187 193 200 203 163 162 163 30 32 34 4 0 0 2 2 2 ++5 5 5 4 3 3 4 0 0 6 10 14 24 86 132 10 87 144 ++10 87 144 10 87 144 19 95 150 19 95 150 19 95 150 18 97 151 ++26 108 161 26 108 161 26 108 161 90 154 193 26 108 161 28 67 93 ++6 10 14 2 5 5 13 20 25 24 86 132 37 112 160 90 154 193 ++10 87 144 7 12 15 2 5 5 28 67 93 37 112 160 28 67 93 ++2 2 1 7 12 15 35 83 115 28 67 93 3 6 7 1 0 0 ++4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131 ++137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37 ++0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 ++4 0 0 4 0 0 60 73 81 174 174 174 190 197 201 174 174 174 ++177 184 187 193 200 203 163 162 163 30 32 34 0 0 0 2 2 2 ++5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144 ++10 87 144 16 89 141 19 95 150 10 87 144 26 108 161 26 108 161 ++26 108 161 26 108 161 26 108 161 28 67 93 6 10 14 1 1 2 ++7 12 15 28 67 93 26 108 161 16 89 141 24 86 132 21 29 34 ++3 4 3 21 29 34 37 112 160 37 112 160 27 99 146 21 29 34 ++21 29 34 26 108 161 90 154 193 35 83 115 1 1 2 2 0 0 ++4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 125 124 125 ++137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37 ++0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 ++3 1 0 4 0 0 60 73 81 193 200 203 193 200 203 174 174 174 ++190 197 201 193 200 203 165 164 165 37 38 37 4 0 0 2 2 2 ++5 5 5 4 3 3 4 0 0 6 10 14 24 86 132 10 87 144 ++10 87 144 10 87 144 16 89 141 18 97 151 18 97 151 10 87 144 ++24 86 132 24 86 132 13 20 25 4 5 7 4 5 7 22 40 52 ++18 97 151 37 112 160 26 108 161 7 12 15 1 1 1 0 0 0 ++28 67 93 37 112 160 26 108 161 28 67 93 22 40 52 28 67 93 ++26 108 161 90 154 193 26 108 161 10 87 144 0 0 0 2 0 0 ++4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131 ++137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37 ++0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 ++4 0 0 6 6 6 60 73 81 174 174 174 193 200 203 174 174 174 ++190 197 201 193 200 203 165 164 165 30 32 34 0 0 0 2 2 2 ++5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144 ++10 87 144 10 87 144 10 87 144 18 97 151 28 67 93 6 10 14 ++0 0 0 1 1 2 4 5 7 13 20 25 16 89 141 26 108 161 ++26 108 161 26 108 161 24 86 132 6 9 11 2 3 3 22 40 52 ++37 112 160 16 89 141 22 40 52 28 67 93 26 108 161 26 108 161 ++90 154 193 26 108 161 26 108 161 28 67 93 1 1 1 4 0 0 ++4 4 4 5 5 5 3 3 3 4 0 0 26 28 28 124 126 130 ++137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37 ++0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 ++4 0 0 4 0 0 60 73 81 193 200 203 193 200 203 174 174 174 ++193 200 203 193 200 203 167 166 167 37 38 37 4 0 0 2 2 2 ++5 5 5 4 4 4 4 0 0 6 10 14 28 67 93 10 87 144 ++10 87 144 10 87 144 18 97 151 10 87 144 13 20 25 4 5 7 ++1 1 2 1 1 1 22 40 52 26 108 161 26 108 161 26 108 161 ++26 108 161 26 108 161 26 108 161 24 86 132 22 40 52 22 40 52 ++22 40 52 22 40 52 10 87 144 26 108 161 26 108 161 26 108 161 ++26 108 161 26 108 161 90 154 193 10 87 144 0 0 0 4 0 0 ++4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131 ++137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37 ++0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 ++4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174 ++190 197 201 205 212 215 167 166 167 30 32 34 0 0 0 2 2 2 ++5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144 ++10 87 144 10 87 144 10 87 144 10 87 144 22 40 52 1 1 2 ++2 0 0 1 1 2 24 86 132 26 108 161 26 108 161 26 108 161 ++26 108 161 19 95 150 16 89 141 10 87 144 22 40 52 22 40 52 ++10 87 144 26 108 161 37 112 160 26 108 161 26 108 161 26 108 161 ++26 108 161 26 108 161 26 108 161 28 67 93 2 0 0 3 1 0 ++4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131 ++137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37 ++0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 ++4 0 0 4 0 0 60 73 81 220 221 221 190 197 201 174 174 174 ++193 200 203 193 200 203 174 174 174 37 38 37 4 0 0 2 2 2 ++5 5 5 4 4 4 3 2 2 1 1 2 13 20 25 10 87 144 ++10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 13 20 25 ++13 20 25 22 40 52 10 87 144 18 97 151 18 97 151 26 108 161 ++10 87 144 13 20 25 6 10 14 21 29 34 24 86 132 18 97 151 ++26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 ++26 108 161 90 154 193 18 97 151 13 20 25 0 0 0 4 3 3 ++4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131 ++137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37 ++0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 ++4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174 ++190 197 201 220 221 221 167 166 167 30 32 34 1 0 0 2 2 2 ++5 5 5 4 4 4 4 4 5 2 5 5 4 5 7 13 20 25 ++28 67 93 10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 ++10 87 144 10 87 144 18 97 151 10 87 144 18 97 151 18 97 151 ++28 67 93 2 3 3 0 0 0 28 67 93 26 108 161 26 108 161 ++26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 ++26 108 161 10 87 144 13 20 25 1 1 2 3 2 2 4 4 4 ++4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131 ++137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37 ++0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 ++4 0 0 4 0 0 60 73 81 220 221 221 190 197 201 174 174 174 ++193 200 203 193 200 203 174 174 174 26 28 28 4 0 0 4 3 3 ++5 5 5 4 4 4 4 4 4 4 4 5 1 1 2 2 5 5 ++4 5 7 22 40 52 10 87 144 10 87 144 18 97 151 10 87 144 ++10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 18 97 151 ++10 87 144 28 67 93 22 40 52 10 87 144 26 108 161 18 97 151 ++18 97 151 18 97 151 26 108 161 26 108 161 26 108 161 26 108 161 ++22 40 52 1 1 2 0 0 0 2 3 3 4 4 4 4 4 4 ++4 4 4 5 5 5 4 4 4 0 0 0 26 28 28 131 129 131 ++137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37 ++0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 ++4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174 ++190 197 201 220 221 221 190 197 201 41 54 63 4 0 0 2 2 2 ++6 6 6 4 4 4 4 4 4 4 4 5 4 4 5 3 3 3 ++1 1 2 1 1 2 6 10 14 22 40 52 10 87 144 18 97 151 ++18 97 151 10 87 144 10 87 144 10 87 144 18 97 151 10 87 144 ++10 87 144 18 97 151 26 108 161 18 97 151 18 97 151 10 87 144 ++26 108 161 26 108 161 26 108 161 10 87 144 28 67 93 6 10 14 ++1 1 2 1 1 2 4 3 3 4 4 5 4 4 4 4 4 4 ++5 5 5 5 5 5 1 1 1 4 0 0 37 51 59 137 136 137 ++137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37 ++0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 ++4 0 0 4 0 0 60 73 81 220 221 221 193 200 203 174 174 174 ++193 200 203 193 200 203 220 221 221 137 136 137 13 16 17 4 0 0 ++2 2 2 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 ++4 4 5 4 3 3 1 1 2 4 5 7 13 20 25 28 67 93 ++10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 ++10 87 144 18 97 151 18 97 151 10 87 144 18 97 151 26 108 161 ++26 108 161 18 97 151 28 67 93 6 10 14 0 0 0 0 0 0 ++2 3 3 4 5 5 4 4 5 4 4 4 4 4 4 5 5 5 ++3 3 3 1 1 1 0 0 0 16 19 21 125 124 125 137 136 137 ++131 129 131 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37 ++0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 ++4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174 ++193 200 203 190 197 201 220 221 221 220 221 221 153 152 153 30 32 34 ++0 0 0 0 0 0 2 2 2 4 4 4 4 4 4 4 4 4 ++4 4 4 4 5 5 4 5 7 1 1 2 1 1 2 4 5 7 ++13 20 25 28 67 93 10 87 144 18 97 151 10 87 144 10 87 144 ++10 87 144 10 87 144 10 87 144 18 97 151 26 108 161 18 97 151 ++28 67 93 7 12 15 0 0 0 0 0 0 2 2 1 4 4 4 ++4 5 5 4 5 5 4 4 4 4 4 4 3 3 3 0 0 0 ++0 0 0 0 0 0 37 38 37 125 124 125 158 157 158 131 129 131 ++125 124 125 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37 ++0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 ++4 3 3 4 0 0 41 54 63 193 200 203 220 221 221 174 174 174 ++193 200 203 193 200 203 193 200 203 220 221 221 244 246 246 193 200 203 ++120 125 127 5 5 5 1 0 0 0 0 0 1 1 1 4 4 4 ++4 4 4 4 4 4 4 5 5 4 5 5 4 4 5 1 1 2 ++4 5 7 4 5 7 22 40 52 10 87 144 10 87 144 10 87 144 ++10 87 144 10 87 144 18 97 151 10 87 144 10 87 144 13 20 25 ++4 5 7 2 3 3 1 1 2 4 4 4 4 5 5 4 4 4 ++4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 1 1 2 ++24 26 27 60 74 84 153 152 153 163 162 163 137 136 137 125 124 125 ++125 124 125 125 124 125 125 124 125 137 136 137 125 124 125 26 28 28 ++0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 ++4 0 0 6 6 6 26 28 28 156 155 156 220 221 221 220 221 221 ++174 174 174 193 200 203 193 200 203 193 200 203 205 212 215 220 221 221 ++220 221 221 167 166 167 60 73 81 7 11 13 0 0 0 0 0 0 ++3 3 3 4 4 4 4 4 4 4 4 4 4 4 5 4 4 5 ++4 4 5 1 1 2 1 1 2 4 5 7 22 40 52 10 87 144 ++10 87 144 10 87 144 10 87 144 22 40 52 4 5 7 1 1 2 ++1 1 2 4 4 5 4 4 4 4 4 4 4 4 4 4 4 4 ++5 5 5 2 2 2 0 0 0 4 0 0 16 19 21 60 73 81 ++137 136 137 167 166 167 158 157 158 137 136 137 131 129 131 131 129 131 ++125 124 125 125 124 125 131 129 131 155 154 155 60 74 84 5 7 8 ++0 0 0 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 ++5 5 5 4 0 0 4 0 0 60 73 81 193 200 203 220 221 221 ++193 200 203 193 200 203 193 200 203 193 200 203 205 212 215 220 221 221 ++220 221 221 220 221 221 220 221 221 137 136 137 43 57 68 6 6 6 ++4 0 0 1 1 1 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 5 4 4 5 3 2 2 1 1 2 2 5 5 13 20 25 ++22 40 52 22 40 52 13 20 25 2 3 3 1 1 2 3 3 3 ++4 5 7 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++1 1 1 0 0 0 2 3 3 41 54 63 131 129 131 166 165 166 ++166 165 166 155 154 155 153 152 153 137 136 137 137 136 137 125 124 125 ++125 124 125 137 136 137 137 136 137 125 124 125 37 38 37 4 3 3 ++4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 ++4 3 3 6 6 6 6 6 6 13 16 17 60 73 81 167 166 167 ++220 221 221 220 221 221 220 221 221 193 200 203 193 200 203 193 200 203 ++205 212 215 220 221 221 220 221 221 244 246 246 205 212 215 125 124 125 ++24 26 27 0 0 0 0 0 0 2 2 2 5 5 5 5 5 5 ++4 4 4 4 4 4 4 4 4 4 4 5 1 1 2 4 5 7 ++4 5 7 4 5 7 1 1 2 3 2 2 4 4 5 4 4 4 ++4 4 4 4 4 4 5 5 5 4 4 4 0 0 0 0 0 0 ++2 0 0 26 28 28 125 124 125 174 174 174 174 174 174 166 165 166 ++156 155 156 153 152 153 137 136 137 137 136 137 131 129 131 137 136 137 ++137 136 137 137 136 137 60 74 84 30 32 34 4 0 0 4 0 0 ++5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 ++5 5 5 6 6 6 4 0 0 4 0 0 6 6 6 26 28 28 ++125 124 125 174 174 174 220 221 221 220 221 221 220 221 221 193 200 203 ++205 212 215 220 221 221 205 212 215 220 221 221 220 221 221 244 246 246 ++193 200 203 60 74 84 13 16 17 4 0 0 0 0 0 3 3 3 ++5 5 5 5 5 5 4 4 4 4 4 4 4 4 5 3 3 3 ++1 1 2 3 3 3 4 4 5 4 4 5 4 4 4 4 4 4 ++5 5 5 5 5 5 2 2 2 0 0 0 0 0 0 13 16 17 ++60 74 84 174 174 174 193 200 203 174 174 174 167 166 167 163 162 163 ++153 152 153 153 152 153 137 136 137 137 136 137 153 152 153 137 136 137 ++125 124 125 41 54 63 24 26 27 4 0 0 4 0 0 5 5 5 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 ++4 3 3 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 ++6 6 6 37 38 37 131 129 131 220 221 221 220 221 221 220 221 221 ++193 200 203 193 200 203 220 221 221 205 212 215 220 221 221 244 246 246 ++244 246 246 244 246 246 174 174 174 41 54 63 0 0 0 0 0 0 ++0 0 0 4 4 4 5 5 5 5 5 5 4 4 4 4 4 5 ++4 4 5 4 4 5 4 4 4 4 4 4 6 6 6 6 6 6 ++3 3 3 0 0 0 2 0 0 13 16 17 60 73 81 156 155 156 ++220 221 221 193 200 203 174 174 174 165 164 165 163 162 163 154 153 154 ++153 152 153 153 152 153 158 157 158 163 162 163 137 136 137 60 73 81 ++13 16 17 4 0 0 4 0 0 4 3 3 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 ++5 5 5 4 3 3 4 3 3 6 6 6 6 6 6 6 6 6 ++6 6 6 6 6 6 6 6 6 37 38 37 167 166 167 244 246 246 ++244 246 246 220 221 221 205 212 215 205 212 215 220 221 221 193 200 203 ++220 221 221 244 246 246 244 246 246 244 246 246 137 136 137 37 38 37 ++3 2 2 0 0 0 1 1 1 5 5 5 5 5 5 4 4 4 ++4 4 4 4 4 4 4 4 4 5 5 5 4 4 4 1 1 1 ++0 0 0 5 5 5 43 57 68 153 152 153 193 200 203 220 221 221 ++177 184 187 174 174 174 167 166 167 166 165 166 158 157 158 157 156 157 ++158 157 158 166 165 166 156 155 156 85 115 134 13 16 17 4 0 0 ++4 0 0 4 0 0 5 5 5 5 5 5 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 ++5 5 5 4 3 3 6 6 6 6 6 6 4 0 0 6 6 6 ++6 6 6 6 6 6 6 6 6 6 6 6 13 16 17 60 73 81 ++177 184 187 220 221 221 220 221 221 220 221 221 205 212 215 220 221 221 ++220 221 221 205 212 215 220 221 221 244 246 246 244 246 246 205 212 215 ++125 124 125 30 32 34 0 0 0 0 0 0 2 2 2 5 5 5 ++4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 1 0 0 ++37 38 37 131 129 131 205 212 215 220 221 221 193 200 203 174 174 174 ++174 174 174 174 174 174 167 166 167 165 164 165 166 165 166 167 166 167 ++158 157 158 125 124 125 37 38 37 4 0 0 4 0 0 4 0 0 ++4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 ++4 4 4 5 5 5 4 3 3 4 3 3 6 6 6 6 6 6 ++4 0 0 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 ++26 28 28 125 124 125 205 212 215 220 221 221 220 221 221 220 221 221 ++205 212 215 220 221 221 205 212 215 220 221 221 220 221 221 244 246 246 ++244 246 246 190 197 201 60 74 84 16 19 21 4 0 0 0 0 0 ++0 0 0 0 0 0 0 0 0 0 0 0 16 19 21 120 125 127 ++177 184 187 220 221 221 205 212 215 177 184 187 174 174 174 177 184 187 ++174 174 174 174 174 174 167 166 167 174 174 174 166 165 166 137 136 137 ++60 73 81 13 16 17 4 0 0 4 0 0 4 3 3 6 6 6 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 ++5 5 5 4 3 3 5 5 5 4 3 3 6 6 6 4 0 0 ++6 6 6 6 6 6 4 0 0 6 6 6 4 0 0 6 6 6 ++6 6 6 6 6 6 37 38 37 137 136 137 193 200 203 220 221 221 ++220 221 221 205 212 215 220 221 221 205 212 215 205 212 215 220 221 221 ++220 221 221 220 221 221 244 246 246 166 165 166 43 57 68 2 2 2 ++0 0 0 4 0 0 16 19 21 60 73 81 157 156 157 202 210 214 ++220 221 221 193 200 203 177 184 187 177 184 187 177 184 187 174 174 174 ++174 174 174 174 174 174 174 174 174 157 156 157 60 74 84 24 26 27 ++4 0 0 4 0 0 4 0 0 6 6 6 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 ++4 4 4 4 4 4 5 5 5 4 3 3 5 5 5 6 6 6 ++6 6 6 4 0 0 6 6 6 6 6 6 6 6 6 4 0 0 ++4 0 0 4 0 0 6 6 6 24 26 27 60 73 81 167 166 167 ++220 221 221 220 221 221 220 221 221 205 212 215 205 212 215 205 212 215 ++205 212 215 220 221 221 220 221 221 220 221 221 205 212 215 137 136 137 ++60 74 84 125 124 125 137 136 137 190 197 201 220 221 221 193 200 203 ++177 184 187 177 184 187 177 184 187 174 174 174 174 174 174 177 184 187 ++190 197 201 174 174 174 125 124 125 37 38 37 6 6 6 4 0 0 ++4 0 0 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 ++4 4 4 4 4 4 5 5 5 5 5 5 4 3 3 6 6 6 ++4 0 0 6 6 6 6 6 6 6 6 6 4 0 0 6 6 6 ++6 6 6 6 6 6 4 0 0 4 0 0 6 6 6 6 6 6 ++125 124 125 193 200 203 244 246 246 220 221 221 205 212 215 205 212 215 ++205 212 215 193 200 203 205 212 215 205 212 215 220 221 221 220 221 221 ++193 200 203 193 200 203 205 212 215 193 200 203 193 200 203 177 184 187 ++190 197 201 190 197 201 174 174 174 190 197 201 193 200 203 190 197 201 ++153 152 153 60 73 81 4 0 0 4 0 0 4 0 0 3 2 2 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 ++6 6 6 4 3 3 4 3 3 4 3 3 6 6 6 6 6 6 ++4 0 0 6 6 6 6 6 6 6 6 6 4 0 0 4 0 0 ++4 0 0 26 28 28 131 129 131 220 221 221 244 246 246 220 221 221 ++205 212 215 193 200 203 205 212 215 193 200 203 193 200 203 205 212 215 ++220 221 221 193 200 203 193 200 203 193 200 203 190 197 201 174 174 174 ++174 174 174 190 197 201 193 200 203 193 200 203 167 166 167 125 124 125 ++6 6 6 4 0 0 4 0 0 4 3 3 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 ++5 5 5 4 3 3 5 5 5 6 6 6 4 3 3 5 5 5 ++6 6 6 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6 ++4 0 0 4 0 0 6 6 6 41 54 63 158 157 158 220 221 221 ++220 221 221 220 221 221 193 200 203 193 200 203 193 200 203 190 197 201 ++190 197 201 190 197 201 190 197 201 190 197 201 174 174 174 193 200 203 ++193 200 203 220 221 221 174 174 174 125 124 125 37 38 37 4 0 0 ++4 0 0 4 3 3 6 6 6 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 5 5 5 4 3 3 4 3 3 4 3 3 5 5 5 ++4 3 3 6 6 6 5 5 5 4 3 3 6 6 6 6 6 6 ++6 6 6 6 6 6 4 0 0 4 0 0 13 16 17 60 73 81 ++174 174 174 220 221 221 220 221 221 205 212 215 190 197 201 174 174 174 ++193 200 203 174 174 174 190 197 201 174 174 174 193 200 203 220 221 221 ++193 200 203 131 129 131 37 38 37 6 6 6 4 0 0 4 0 0 ++6 6 6 6 6 6 4 3 3 5 5 5 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 5 5 5 ++5 5 5 4 3 3 4 3 3 5 5 5 4 3 3 4 3 3 ++5 5 5 6 6 6 6 6 6 4 0 0 6 6 6 6 6 6 ++6 6 6 125 124 125 174 174 174 220 221 221 220 221 221 193 200 203 ++193 200 203 193 200 203 193 200 203 193 200 203 220 221 221 158 157 158 ++60 73 81 6 6 6 4 0 0 4 0 0 5 5 5 6 6 6 ++5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 5 5 5 5 5 5 4 3 3 5 5 5 4 3 3 ++5 5 5 5 5 5 6 6 6 6 6 6 4 0 0 4 0 0 ++4 0 0 4 0 0 26 28 28 125 124 125 174 174 174 193 200 203 ++193 200 203 174 174 174 193 200 203 167 166 167 125 124 125 6 6 6 ++6 6 6 6 6 6 4 0 0 6 6 6 6 6 6 5 5 5 ++4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 ++4 3 3 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6 ++6 6 6 4 0 0 4 0 0 6 6 6 37 38 37 125 124 125 ++153 152 153 131 129 131 125 124 125 37 38 37 6 6 6 6 6 6 ++6 6 6 4 0 0 6 6 6 6 6 6 4 3 3 5 5 5 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 5 5 5 5 5 5 4 3 3 5 5 5 4 3 3 ++6 6 6 6 6 6 4 0 0 4 0 0 6 6 6 6 6 6 ++24 26 27 24 26 27 6 6 6 6 6 6 6 6 6 4 0 0 ++6 6 6 6 6 6 4 0 0 6 6 6 5 5 5 4 3 3 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 5 5 5 4 3 3 5 5 5 6 6 6 ++4 0 0 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 ++6 6 6 6 6 6 6 6 6 4 0 0 6 6 6 6 6 6 ++4 0 0 6 6 6 6 6 6 4 3 3 5 5 5 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 5 5 5 ++5 5 5 5 5 5 4 0 0 6 6 6 4 0 0 6 6 6 ++6 6 6 6 6 6 6 6 6 4 0 0 6 6 6 4 0 0 ++6 6 6 4 3 3 5 5 5 4 3 3 5 5 5 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 ++4 3 3 6 6 6 4 3 3 6 6 6 6 6 6 6 6 6 ++4 0 0 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6 ++6 6 6 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 5 5 5 4 3 3 5 5 5 4 0 0 6 6 6 ++6 6 6 4 0 0 6 6 6 6 6 6 4 0 0 6 6 6 ++4 3 3 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 5 5 5 4 3 3 5 5 5 6 6 6 4 3 3 ++4 3 3 6 6 6 6 6 6 4 3 3 6 6 6 4 3 3 ++5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 6 6 6 ++5 5 5 4 3 3 4 3 3 4 3 3 5 5 5 5 5 5 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 ++5 5 5 4 3 3 5 5 5 5 5 5 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 ++4 4 4 4 4 4 +diff --git a/drivers/video/matrox/matroxfb_DAC1064.c b/drivers/video/matrox/matroxfb_DAC1064.c +index a01147f..5d896f8 100644 +--- a/drivers/video/matrox/matroxfb_DAC1064.c ++++ b/drivers/video/matrox/matroxfb_DAC1064.c +@@ -1088,14 +1088,20 @@ static void MGAG100_restore(struct matrox_fb_info *minfo) + + #ifdef CONFIG_FB_MATROX_MYSTIQUE + struct matrox_switch matrox_mystique = { +- MGA1064_preinit, MGA1064_reset, MGA1064_init, MGA1064_restore, ++ .preinit = MGA1064_preinit, ++ .reset = MGA1064_reset, ++ .init = MGA1064_init, ++ .restore = MGA1064_restore, + }; + EXPORT_SYMBOL(matrox_mystique); + #endif + + #ifdef CONFIG_FB_MATROX_G + struct matrox_switch matrox_G100 = { +- MGAG100_preinit, MGAG100_reset, MGAG100_init, MGAG100_restore, ++ .preinit = MGAG100_preinit, ++ .reset = MGAG100_reset, ++ .init = MGAG100_init, ++ .restore = MGAG100_restore, + }; + EXPORT_SYMBOL(matrox_G100); + #endif +diff --git a/drivers/video/matrox/matroxfb_Ti3026.c b/drivers/video/matrox/matroxfb_Ti3026.c +index 195ad7c..09743fc 100644 +--- a/drivers/video/matrox/matroxfb_Ti3026.c ++++ b/drivers/video/matrox/matroxfb_Ti3026.c +@@ -738,7 +738,10 @@ static int Ti3026_preinit(struct matrox_fb_info *minfo) + } + + struct matrox_switch matrox_millennium = { +- Ti3026_preinit, Ti3026_reset, Ti3026_init, Ti3026_restore ++ .preinit = Ti3026_preinit, ++ .reset = Ti3026_reset, ++ .init = Ti3026_init, ++ .restore = Ti3026_restore + }; + EXPORT_SYMBOL(matrox_millennium); + #endif +diff --git a/drivers/video/mb862xx/mb862xxfb_accel.c b/drivers/video/mb862xx/mb862xxfb_accel.c +index fe92eed..106e085 100644 +--- a/drivers/video/mb862xx/mb862xxfb_accel.c ++++ b/drivers/video/mb862xx/mb862xxfb_accel.c +@@ -312,14 +312,18 @@ void mb862xxfb_init_accel(struct fb_info *info, int xres) + struct mb862xxfb_par *par = info->par; + + if (info->var.bits_per_pixel == 32) { +- info->fbops->fb_fillrect = cfb_fillrect; +- info->fbops->fb_copyarea = cfb_copyarea; +- info->fbops->fb_imageblit = cfb_imageblit; ++ pax_open_kernel(); ++ *(void **)&info->fbops->fb_fillrect = cfb_fillrect; ++ *(void **)&info->fbops->fb_copyarea = cfb_copyarea; ++ *(void **)&info->fbops->fb_imageblit = cfb_imageblit; ++ pax_close_kernel(); + } else { + outreg(disp, GC_L0EM, 3); +- info->fbops->fb_fillrect = mb86290fb_fillrect; +- info->fbops->fb_copyarea = mb86290fb_copyarea; +- info->fbops->fb_imageblit = mb86290fb_imageblit; ++ pax_open_kernel(); ++ *(void **)&info->fbops->fb_fillrect = mb86290fb_fillrect; ++ *(void **)&info->fbops->fb_copyarea = mb86290fb_copyarea; ++ *(void **)&info->fbops->fb_imageblit = mb86290fb_imageblit; ++ pax_close_kernel(); + } + outreg(draw, GDC_REG_DRAW_BASE, 0); + outreg(draw, GDC_REG_MODE_MISC, 0x8000); +diff --git a/drivers/video/nvidia/nvidia.c b/drivers/video/nvidia/nvidia.c +index def0412..fed6529 100644 +--- a/drivers/video/nvidia/nvidia.c ++++ b/drivers/video/nvidia/nvidia.c +@@ -669,19 +669,23 @@ static int nvidiafb_set_par(struct fb_info *info) + info->fix.line_length = (info->var.xres_virtual * + info->var.bits_per_pixel) >> 3; + if (info->var.accel_flags) { +- info->fbops->fb_imageblit = nvidiafb_imageblit; +- info->fbops->fb_fillrect = nvidiafb_fillrect; +- info->fbops->fb_copyarea = nvidiafb_copyarea; +- info->fbops->fb_sync = nvidiafb_sync; ++ pax_open_kernel(); ++ *(void **)&info->fbops->fb_imageblit = nvidiafb_imageblit; ++ *(void **)&info->fbops->fb_fillrect = nvidiafb_fillrect; ++ *(void **)&info->fbops->fb_copyarea = nvidiafb_copyarea; ++ *(void **)&info->fbops->fb_sync = nvidiafb_sync; ++ pax_close_kernel(); + info->pixmap.scan_align = 4; + info->flags &= ~FBINFO_HWACCEL_DISABLED; + info->flags |= FBINFO_READS_FAST; + NVResetGraphics(info); + } else { +- info->fbops->fb_imageblit = cfb_imageblit; +- info->fbops->fb_fillrect = cfb_fillrect; +- info->fbops->fb_copyarea = cfb_copyarea; +- info->fbops->fb_sync = NULL; ++ pax_open_kernel(); ++ *(void **)&info->fbops->fb_imageblit = cfb_imageblit; ++ *(void **)&info->fbops->fb_fillrect = cfb_fillrect; ++ *(void **)&info->fbops->fb_copyarea = cfb_copyarea; ++ *(void **)&info->fbops->fb_sync = NULL; ++ pax_close_kernel(); + info->pixmap.scan_align = 1; + info->flags |= FBINFO_HWACCEL_DISABLED; + info->flags &= ~FBINFO_READS_FAST; +@@ -1173,8 +1177,11 @@ static int nvidia_set_fbinfo(struct fb_info *info) + info->pixmap.size = 8 * 1024; + info->pixmap.flags = FB_PIXMAP_SYSTEM; + +- if (!hwcur) +- info->fbops->fb_cursor = NULL; ++ if (!hwcur) { ++ pax_open_kernel(); ++ *(void **)&info->fbops->fb_cursor = NULL; ++ pax_close_kernel(); ++ } + + info->var.accel_flags = (!noaccel); + +diff --git a/drivers/video/omap2/dss/display.c b/drivers/video/omap2/dss/display.c +index 669a81f..e216d76 100644 +--- a/drivers/video/omap2/dss/display.c ++++ b/drivers/video/omap2/dss/display.c +@@ -137,12 +137,14 @@ int omapdss_register_display(struct omap_dss_device *dssdev) + snprintf(dssdev->alias, sizeof(dssdev->alias), + "display%d", disp_num_counter++); + ++ pax_open_kernel(); + if (drv && drv->get_resolution == NULL) +- drv->get_resolution = omapdss_default_get_resolution; ++ *(void **)&drv->get_resolution = omapdss_default_get_resolution; + if (drv && drv->get_recommended_bpp == NULL) +- drv->get_recommended_bpp = omapdss_default_get_recommended_bpp; ++ *(void **)&drv->get_recommended_bpp = omapdss_default_get_recommended_bpp; + if (drv && drv->get_timings == NULL) +- drv->get_timings = omapdss_default_get_timings; ++ *(void **)&drv->get_timings = omapdss_default_get_timings; ++ pax_close_kernel(); + + mutex_lock(&panel_list_mutex); + list_add_tail(&dssdev->panel_list, &panel_list); +diff --git a/drivers/video/s1d13xxxfb.c b/drivers/video/s1d13xxxfb.c +index 83433cb..71e9b98 100644 +--- a/drivers/video/s1d13xxxfb.c ++++ b/drivers/video/s1d13xxxfb.c +@@ -881,8 +881,10 @@ static int s1d13xxxfb_probe(struct platform_device *pdev) + + switch(prod_id) { + case S1D13506_PROD_ID: /* activate acceleration */ +- s1d13xxxfb_fbops.fb_fillrect = s1d13xxxfb_bitblt_solidfill; +- s1d13xxxfb_fbops.fb_copyarea = s1d13xxxfb_bitblt_copyarea; ++ pax_open_kernel(); ++ *(void **)&s1d13xxxfb_fbops.fb_fillrect = s1d13xxxfb_bitblt_solidfill; ++ *(void **)&s1d13xxxfb_fbops.fb_copyarea = s1d13xxxfb_bitblt_copyarea; ++ pax_close_kernel(); + info->flags = FBINFO_DEFAULT | FBINFO_HWACCEL_YPAN | + FBINFO_HWACCEL_FILLRECT | FBINFO_HWACCEL_COPYAREA; + break; +diff --git a/drivers/video/sh_mobile_lcdcfb.c b/drivers/video/sh_mobile_lcdcfb.c +index 2bcc84a..29dd1ea 100644 +--- a/drivers/video/sh_mobile_lcdcfb.c ++++ b/drivers/video/sh_mobile_lcdcfb.c +@@ -439,9 +439,9 @@ static unsigned long lcdc_sys_read_data(void *handle) + } + + static struct sh_mobile_lcdc_sys_bus_ops sh_mobile_lcdc_sys_bus_ops = { +- lcdc_sys_write_index, +- lcdc_sys_write_data, +- lcdc_sys_read_data, ++ .write_index = lcdc_sys_write_index, ++ .write_data = lcdc_sys_write_data, ++ .read_data = lcdc_sys_read_data, + }; + + static int sh_mobile_lcdc_sginit(struct fb_info *info, +diff --git a/drivers/video/smscufx.c b/drivers/video/smscufx.c +index d513ed6..90b0de9 100644 +--- a/drivers/video/smscufx.c ++++ b/drivers/video/smscufx.c +@@ -1175,7 +1175,9 @@ static int ufx_ops_release(struct fb_info *info, int user) + fb_deferred_io_cleanup(info); + kfree(info->fbdefio); + info->fbdefio = NULL; +- info->fbops->fb_mmap = ufx_ops_mmap; ++ pax_open_kernel(); ++ *(void **)&info->fbops->fb_mmap = ufx_ops_mmap; ++ pax_close_kernel(); + } + + pr_debug("released /dev/fb%d user=%d count=%d", +diff --git a/drivers/video/udlfb.c b/drivers/video/udlfb.c +index 77b890e..458e666 100644 +--- a/drivers/video/udlfb.c ++++ b/drivers/video/udlfb.c +@@ -623,11 +623,11 @@ static int dlfb_handle_damage(struct dlfb_data *dev, int x, int y, + dlfb_urb_completion(urb); + + error: +- atomic_add(bytes_sent, &dev->bytes_sent); +- atomic_add(bytes_identical, &dev->bytes_identical); +- atomic_add(width*height*2, &dev->bytes_rendered); ++ atomic_add_unchecked(bytes_sent, &dev->bytes_sent); ++ atomic_add_unchecked(bytes_identical, &dev->bytes_identical); ++ atomic_add_unchecked(width*height*2, &dev->bytes_rendered); + end_cycles = get_cycles(); +- atomic_add(((unsigned int) ((end_cycles - start_cycles) ++ atomic_add_unchecked(((unsigned int) ((end_cycles - start_cycles) + >> 10)), /* Kcycles */ + &dev->cpu_kcycles_used); + +@@ -748,11 +748,11 @@ static void dlfb_dpy_deferred_io(struct fb_info *info, + dlfb_urb_completion(urb); + + error: +- atomic_add(bytes_sent, &dev->bytes_sent); +- atomic_add(bytes_identical, &dev->bytes_identical); +- atomic_add(bytes_rendered, &dev->bytes_rendered); ++ atomic_add_unchecked(bytes_sent, &dev->bytes_sent); ++ atomic_add_unchecked(bytes_identical, &dev->bytes_identical); ++ atomic_add_unchecked(bytes_rendered, &dev->bytes_rendered); + end_cycles = get_cycles(); +- atomic_add(((unsigned int) ((end_cycles - start_cycles) ++ atomic_add_unchecked(((unsigned int) ((end_cycles - start_cycles) + >> 10)), /* Kcycles */ + &dev->cpu_kcycles_used); + } +@@ -993,7 +993,9 @@ static int dlfb_ops_release(struct fb_info *info, int user) + fb_deferred_io_cleanup(info); + kfree(info->fbdefio); + info->fbdefio = NULL; +- info->fbops->fb_mmap = dlfb_ops_mmap; ++ pax_open_kernel(); ++ *(void **)&info->fbops->fb_mmap = dlfb_ops_mmap; ++ pax_close_kernel(); + } + + pr_warn("released /dev/fb%d user=%d count=%d\n", +@@ -1376,7 +1378,7 @@ static ssize_t metrics_bytes_rendered_show(struct device *fbdev, + struct fb_info *fb_info = dev_get_drvdata(fbdev); + struct dlfb_data *dev = fb_info->par; + return snprintf(buf, PAGE_SIZE, "%u\n", +- atomic_read(&dev->bytes_rendered)); ++ atomic_read_unchecked(&dev->bytes_rendered)); + } + + static ssize_t metrics_bytes_identical_show(struct device *fbdev, +@@ -1384,7 +1386,7 @@ static ssize_t metrics_bytes_identical_show(struct device *fbdev, + struct fb_info *fb_info = dev_get_drvdata(fbdev); + struct dlfb_data *dev = fb_info->par; + return snprintf(buf, PAGE_SIZE, "%u\n", +- atomic_read(&dev->bytes_identical)); ++ atomic_read_unchecked(&dev->bytes_identical)); + } + + static ssize_t metrics_bytes_sent_show(struct device *fbdev, +@@ -1392,7 +1394,7 @@ static ssize_t metrics_bytes_sent_show(struct device *fbdev, + struct fb_info *fb_info = dev_get_drvdata(fbdev); + struct dlfb_data *dev = fb_info->par; + return snprintf(buf, PAGE_SIZE, "%u\n", +- atomic_read(&dev->bytes_sent)); ++ atomic_read_unchecked(&dev->bytes_sent)); + } + + static ssize_t metrics_cpu_kcycles_used_show(struct device *fbdev, +@@ -1400,7 +1402,7 @@ static ssize_t metrics_cpu_kcycles_used_show(struct device *fbdev, + struct fb_info *fb_info = dev_get_drvdata(fbdev); + struct dlfb_data *dev = fb_info->par; + return snprintf(buf, PAGE_SIZE, "%u\n", +- atomic_read(&dev->cpu_kcycles_used)); ++ atomic_read_unchecked(&dev->cpu_kcycles_used)); + } + + static ssize_t edid_show( +@@ -1460,10 +1462,10 @@ static ssize_t metrics_reset_store(struct device *fbdev, + struct fb_info *fb_info = dev_get_drvdata(fbdev); + struct dlfb_data *dev = fb_info->par; + +- atomic_set(&dev->bytes_rendered, 0); +- atomic_set(&dev->bytes_identical, 0); +- atomic_set(&dev->bytes_sent, 0); +- atomic_set(&dev->cpu_kcycles_used, 0); ++ atomic_set_unchecked(&dev->bytes_rendered, 0); ++ atomic_set_unchecked(&dev->bytes_identical, 0); ++ atomic_set_unchecked(&dev->bytes_sent, 0); ++ atomic_set_unchecked(&dev->cpu_kcycles_used, 0); + + return count; + } +diff --git a/drivers/video/uvesafb.c b/drivers/video/uvesafb.c +index 256fba7..6e75516 100644 +--- a/drivers/video/uvesafb.c ++++ b/drivers/video/uvesafb.c +@@ -19,6 +19,7 @@ + #include <linux/io.h> + #include <linux/mutex.h> + #include <linux/slab.h> ++#include <linux/moduleloader.h> + #include <video/edid.h> + #include <video/uvesafb.h> + #ifdef CONFIG_X86 +@@ -565,10 +566,32 @@ static int uvesafb_vbe_getpmi(struct uvesafb_ktask *task, + if ((task->t.regs.eax & 0xffff) != 0x4f || task->t.regs.es < 0xc000) { + par->pmi_setpal = par->ypan = 0; + } else { ++ ++#ifdef CONFIG_PAX_KERNEXEC ++#ifdef CONFIG_MODULES ++ par->pmi_code = module_alloc_exec((u16)task->t.regs.ecx); ++#endif ++ if (!par->pmi_code) { ++ par->pmi_setpal = par->ypan = 0; ++ return 0; ++ } ++#endif ++ + par->pmi_base = (u16 *)phys_to_virt(((u32)task->t.regs.es << 4) + + task->t.regs.edi); ++ ++#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC) ++ pax_open_kernel(); ++ memcpy(par->pmi_code, par->pmi_base, (u16)task->t.regs.ecx); ++ pax_close_kernel(); ++ ++ par->pmi_start = ktva_ktla(par->pmi_code + par->pmi_base[1]); ++ par->pmi_pal = ktva_ktla(par->pmi_code + par->pmi_base[2]); ++#else + par->pmi_start = (u8 *)par->pmi_base + par->pmi_base[1]; + par->pmi_pal = (u8 *)par->pmi_base + par->pmi_base[2]; ++#endif ++ + printk(KERN_INFO "uvesafb: protected mode interface info at " + "%04x:%04x\n", + (u16)task->t.regs.es, (u16)task->t.regs.edi); +@@ -813,13 +836,14 @@ static int uvesafb_vbe_init(struct fb_info *info) + par->ypan = ypan; + + if (par->pmi_setpal || par->ypan) { ++#if !defined(CONFIG_MODULES) || !defined(CONFIG_PAX_KERNEXEC) + if (__supported_pte_mask & _PAGE_NX) { + par->pmi_setpal = par->ypan = 0; + printk(KERN_WARNING "uvesafb: NX protection is active, " + "better not use the PMI.\n"); +- } else { ++ } else ++#endif + uvesafb_vbe_getpmi(task, par); +- } + } + #else + /* The protected mode interface is not available on non-x86. */ +@@ -1453,8 +1477,11 @@ static void uvesafb_init_info(struct fb_info *info, struct vbe_mode_ib *mode) + info->fix.ywrapstep = (par->ypan > 1) ? 1 : 0; + + /* Disable blanking if the user requested so. */ +- if (!blank) +- info->fbops->fb_blank = NULL; ++ if (!blank) { ++ pax_open_kernel(); ++ *(void **)&info->fbops->fb_blank = NULL; ++ pax_close_kernel(); ++ } + + /* + * Find out how much IO memory is required for the mode with +@@ -1530,8 +1557,11 @@ static void uvesafb_init_info(struct fb_info *info, struct vbe_mode_ib *mode) + info->flags = FBINFO_FLAG_DEFAULT | + (par->ypan ? FBINFO_HWACCEL_YPAN : 0); + +- if (!par->ypan) +- info->fbops->fb_pan_display = NULL; ++ if (!par->ypan) { ++ pax_open_kernel(); ++ *(void **)&info->fbops->fb_pan_display = NULL; ++ pax_close_kernel(); ++ } + } + + static void uvesafb_init_mtrr(struct fb_info *info) +@@ -1792,6 +1822,11 @@ out_mode: + out: + kfree(par->vbe_modes); + ++#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC) ++ if (par->pmi_code) ++ module_free_exec(NULL, par->pmi_code); ++#endif ++ + framebuffer_release(info); + return err; + } +@@ -1816,6 +1851,12 @@ static int uvesafb_remove(struct platform_device *dev) + kfree(par->vbe_modes); + kfree(par->vbe_state_orig); + kfree(par->vbe_state_saved); ++ ++#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC) ++ if (par->pmi_code) ++ module_free_exec(NULL, par->pmi_code); ++#endif ++ + } + + framebuffer_release(info); +diff --git a/drivers/video/vesafb.c b/drivers/video/vesafb.c +index 1c7da3b..56ea0bd 100644 +--- a/drivers/video/vesafb.c ++++ b/drivers/video/vesafb.c +@@ -9,6 +9,7 @@ + */ + + #include <linux/module.h> ++#include <linux/moduleloader.h> + #include <linux/kernel.h> + #include <linux/errno.h> + #include <linux/string.h> +@@ -52,8 +53,8 @@ static int vram_remap; /* Set amount of memory to be used */ + static int vram_total; /* Set total amount of memory */ + static int pmi_setpal __read_mostly = 1; /* pmi for palette changes ??? */ + static int ypan __read_mostly; /* 0..nothing, 1..ypan, 2..ywrap */ +-static void (*pmi_start)(void) __read_mostly; +-static void (*pmi_pal) (void) __read_mostly; ++static void (*pmi_start)(void) __read_only; ++static void (*pmi_pal) (void) __read_only; + static int depth __read_mostly; + static int vga_compat __read_mostly; + /* --------------------------------------------------------------------- */ +@@ -234,6 +235,7 @@ static int vesafb_probe(struct platform_device *dev) + unsigned int size_remap; + unsigned int size_total; + char *option = NULL; ++ void *pmi_code = NULL; + + /* ignore error return of fb_get_options */ + fb_get_options("vesafb", &option); +@@ -280,10 +282,6 @@ static int vesafb_probe(struct platform_device *dev) + size_remap = size_total; + vesafb_fix.smem_len = size_remap; + +-#ifndef __i386__ +- screen_info.vesapm_seg = 0; +-#endif +- + if (!request_mem_region(vesafb_fix.smem_start, size_total, "vesafb")) { + printk(KERN_WARNING + "vesafb: cannot reserve video memory at 0x%lx\n", +@@ -312,9 +310,21 @@ static int vesafb_probe(struct platform_device *dev) + printk(KERN_INFO "vesafb: mode is %dx%dx%d, linelength=%d, pages=%d\n", + vesafb_defined.xres, vesafb_defined.yres, vesafb_defined.bits_per_pixel, vesafb_fix.line_length, screen_info.pages); + ++#ifdef __i386__ ++ ++#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC) ++ pmi_code = module_alloc_exec(screen_info.vesapm_size); ++ if (!pmi_code) ++#elif !defined(CONFIG_PAX_KERNEXEC) ++ if (0) ++#endif ++ ++#endif ++ screen_info.vesapm_seg = 0; ++ + if (screen_info.vesapm_seg) { +- printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x\n", +- screen_info.vesapm_seg,screen_info.vesapm_off); ++ printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x %04x bytes\n", ++ screen_info.vesapm_seg,screen_info.vesapm_off,screen_info.vesapm_size); + } + + if (screen_info.vesapm_seg < 0xc000) +@@ -322,9 +332,25 @@ static int vesafb_probe(struct platform_device *dev) + + if (ypan || pmi_setpal) { + unsigned short *pmi_base; ++ + pmi_base = (unsigned short*)phys_to_virt(((unsigned long)screen_info.vesapm_seg << 4) + screen_info.vesapm_off); +- pmi_start = (void*)((char*)pmi_base + pmi_base[1]); +- pmi_pal = (void*)((char*)pmi_base + pmi_base[2]); ++ ++#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC) ++ pax_open_kernel(); ++ memcpy(pmi_code, pmi_base, screen_info.vesapm_size); ++#else ++ pmi_code = pmi_base; ++#endif ++ ++ pmi_start = (void*)((char*)pmi_code + pmi_base[1]); ++ pmi_pal = (void*)((char*)pmi_code + pmi_base[2]); ++ ++#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC) ++ pmi_start = ktva_ktla(pmi_start); ++ pmi_pal = ktva_ktla(pmi_pal); ++ pax_close_kernel(); ++#endif ++ + printk(KERN_INFO "vesafb: pmi: set display start = %p, set palette = %p\n",pmi_start,pmi_pal); + if (pmi_base[3]) { + printk(KERN_INFO "vesafb: pmi: ports = "); +@@ -477,8 +503,11 @@ static int vesafb_probe(struct platform_device *dev) + info->flags = FBINFO_FLAG_DEFAULT | FBINFO_MISC_FIRMWARE | + (ypan ? FBINFO_HWACCEL_YPAN : 0); + +- if (!ypan) +- info->fbops->fb_pan_display = NULL; ++ if (!ypan) { ++ pax_open_kernel(); ++ *(void **)&info->fbops->fb_pan_display = NULL; ++ pax_close_kernel(); ++ } + + if (fb_alloc_cmap(&info->cmap, 256, 0) < 0) { + err = -ENOMEM; +@@ -492,6 +521,11 @@ static int vesafb_probe(struct platform_device *dev) + fb_info(info, "%s frame buffer device\n", info->fix.id); + return 0; + err: ++ ++#if defined(__i386__) && defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC) ++ module_free_exec(NULL, pmi_code); ++#endif ++ + if (info->screen_base) + iounmap(info->screen_base); + framebuffer_release(info); +diff --git a/drivers/video/via/via_clock.h b/drivers/video/via/via_clock.h +index 88714ae..16c2e11 100644 +--- a/drivers/video/via/via_clock.h ++++ b/drivers/video/via/via_clock.h +@@ -56,7 +56,7 @@ struct via_clock { + + void (*set_engine_pll_state)(u8 state); + void (*set_engine_pll)(struct via_pll_config config); +-}; ++} __no_const; + + + static inline u32 get_pll_internal_frequency(u32 ref_freq, +diff --git a/drivers/xen/xenfs/xenstored.c b/drivers/xen/xenfs/xenstored.c +index fef20db..d28b1ab 100644 +--- a/drivers/xen/xenfs/xenstored.c ++++ b/drivers/xen/xenfs/xenstored.c +@@ -24,7 +24,12 @@ static int xsd_release(struct inode *inode, struct file *file) + static int xsd_kva_open(struct inode *inode, struct file *file) + { + file->private_data = (void *)kasprintf(GFP_KERNEL, "0x%p", ++#ifdef CONFIG_GRKERNSEC_HIDESYM ++ NULL); ++#else + xen_store_interface); ++#endif ++ + if (!file->private_data) + return -ENOMEM; + return 0; +diff --git a/fs/9p/vfs_addr.c b/fs/9p/vfs_addr.c +index c71e886..61d3d44b 100644 +--- a/fs/9p/vfs_addr.c ++++ b/fs/9p/vfs_addr.c +@@ -187,7 +187,7 @@ static int v9fs_vfs_writepage_locked(struct page *page) + + retval = v9fs_file_write_internal(inode, + v9inode->writeback_fid, +- (__force const char __user *)buffer, ++ (const char __force_user *)buffer, + len, &offset, 0); + if (retval > 0) + retval = 0; +diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c +index bb7991c..481e21a 100644 +--- a/fs/9p/vfs_inode.c ++++ b/fs/9p/vfs_inode.c +@@ -1312,7 +1312,7 @@ static void *v9fs_vfs_follow_link(struct dentry *dentry, struct nameidata *nd) + void + v9fs_vfs_put_link(struct dentry *dentry, struct nameidata *nd, void *p) + { +- char *s = nd_get_link(nd); ++ const char *s = nd_get_link(nd); + + p9_debug(P9_DEBUG_VFS, " %s %s\n", + dentry->d_name.name, IS_ERR(s) ? "<error>" : s); +diff --git a/fs/Kconfig.binfmt b/fs/Kconfig.binfmt +index 370b24c..ff0be7b 100644 +--- a/fs/Kconfig.binfmt ++++ b/fs/Kconfig.binfmt +@@ -103,7 +103,7 @@ config HAVE_AOUT + + config BINFMT_AOUT + tristate "Kernel support for a.out and ECOFF binaries" +- depends on HAVE_AOUT ++ depends on HAVE_AOUT && BROKEN + ---help--- + A.out (Assembler.OUTput) is a set of formats for libraries and + executables used in the earliest versions of UNIX. Linux used +diff --git a/fs/afs/inode.c b/fs/afs/inode.c +index ce25d75..dc09eeb 100644 +--- a/fs/afs/inode.c ++++ b/fs/afs/inode.c +@@ -141,7 +141,7 @@ struct inode *afs_iget_autocell(struct inode *dir, const char *dev_name, + struct afs_vnode *vnode; + struct super_block *sb; + struct inode *inode; +- static atomic_t afs_autocell_ino; ++ static atomic_unchecked_t afs_autocell_ino; + + _enter("{%x:%u},%*.*s,", + AFS_FS_I(dir)->fid.vid, AFS_FS_I(dir)->fid.vnode, +@@ -154,7 +154,7 @@ struct inode *afs_iget_autocell(struct inode *dir, const char *dev_name, + data.fid.unique = 0; + data.fid.vnode = 0; + +- inode = iget5_locked(sb, atomic_inc_return(&afs_autocell_ino), ++ inode = iget5_locked(sb, atomic_inc_return_unchecked(&afs_autocell_ino), + afs_iget5_autocell_test, afs_iget5_set, + &data); + if (!inode) { +diff --git a/fs/aio.c b/fs/aio.c +index 6d68e01..6bc8e9a 100644 +--- a/fs/aio.c ++++ b/fs/aio.c +@@ -380,7 +380,7 @@ static int aio_setup_ring(struct kioctx *ctx) + size += sizeof(struct io_event) * nr_events; + + nr_pages = PFN_UP(size); +- if (nr_pages < 0) ++ if (nr_pages <= 0) + return -EINVAL; + + file = aio_private_file(ctx, nr_pages); +@@ -1065,6 +1065,12 @@ static long aio_read_events_ring(struct kioctx *ctx, + tail = ring->tail; + kunmap_atomic(ring); + ++ /* ++ * Ensure that once we've read the current tail pointer, that ++ * we also see the events that were stored up to the tail. ++ */ ++ smp_rmb(); ++ + pr_debug("h%u t%u m%u\n", head, tail, ctx->nr_events); + + if (head == tail) +diff --git a/fs/attr.c b/fs/attr.c +index 6530ced..4a827e2 100644 +--- a/fs/attr.c ++++ b/fs/attr.c +@@ -102,6 +102,7 @@ int inode_newsize_ok(const struct inode *inode, loff_t offset) + unsigned long limit; + + limit = rlimit(RLIMIT_FSIZE); ++ gr_learn_resource(current, RLIMIT_FSIZE, (unsigned long)offset, 1); + if (limit != RLIM_INFINITY && offset > limit) + goto out_sig; + if (offset > inode->i_sb->s_maxbytes) +diff --git a/fs/autofs4/waitq.c b/fs/autofs4/waitq.c +index 116fd38..c04182da 100644 +--- a/fs/autofs4/waitq.c ++++ b/fs/autofs4/waitq.c +@@ -59,7 +59,7 @@ static int autofs4_write(struct autofs_sb_info *sbi, + { + unsigned long sigpipe, flags; + mm_segment_t fs; +- const char *data = (const char *)addr; ++ const char __user *data = (const char __force_user *)addr; + ssize_t wr = 0; + + sigpipe = sigismember(¤t->pending.signal, SIGPIPE); +@@ -340,6 +340,10 @@ static int validate_request(struct autofs_wait_queue **wait, + return 1; + } + ++#ifdef CONFIG_GRKERNSEC_HIDESYM ++static atomic_unchecked_t autofs_dummy_name_id = ATOMIC_INIT(0); ++#endif ++ + int autofs4_wait(struct autofs_sb_info *sbi, struct dentry *dentry, + enum autofs_notify notify) + { +@@ -385,7 +389,12 @@ int autofs4_wait(struct autofs_sb_info *sbi, struct dentry *dentry, + + /* If this is a direct mount request create a dummy name */ + if (IS_ROOT(dentry) && autofs_type_trigger(sbi->type)) ++#ifdef CONFIG_GRKERNSEC_HIDESYM ++ /* this name does get written to userland via autofs4_write() */ ++ qstr.len = sprintf(name, "%08x", atomic_inc_return_unchecked(&autofs_dummy_name_id)); ++#else + qstr.len = sprintf(name, "%p", dentry); ++#endif + else { + qstr.len = autofs4_getpath(sbi, dentry, &name); + if (!qstr.len) { +diff --git a/fs/befs/endian.h b/fs/befs/endian.h +index 2722387..56059b5 100644 +--- a/fs/befs/endian.h ++++ b/fs/befs/endian.h +@@ -11,7 +11,7 @@ + + #include <asm/byteorder.h> + +-static inline u64 ++static inline u64 __intentional_overflow(-1) + fs64_to_cpu(const struct super_block *sb, fs64 n) + { + if (BEFS_SB(sb)->byte_order == BEFS_BYTESEX_LE) +@@ -29,7 +29,7 @@ cpu_to_fs64(const struct super_block *sb, u64 n) + return (__force fs64)cpu_to_be64(n); + } + +-static inline u32 ++static inline u32 __intentional_overflow(-1) + fs32_to_cpu(const struct super_block *sb, fs32 n) + { + if (BEFS_SB(sb)->byte_order == BEFS_BYTESEX_LE) +@@ -47,7 +47,7 @@ cpu_to_fs32(const struct super_block *sb, u32 n) + return (__force fs32)cpu_to_be32(n); + } + +-static inline u16 ++static inline u16 __intentional_overflow(-1) + fs16_to_cpu(const struct super_block *sb, fs16 n) + { + if (BEFS_SB(sb)->byte_order == BEFS_BYTESEX_LE) +diff --git a/fs/binfmt_aout.c b/fs/binfmt_aout.c +index ca0ba15..0fa3257 100644 +--- a/fs/binfmt_aout.c ++++ b/fs/binfmt_aout.c +@@ -16,6 +16,7 @@ + #include <linux/string.h> + #include <linux/fs.h> + #include <linux/file.h> ++#include <linux/security.h> + #include <linux/stat.h> + #include <linux/fcntl.h> + #include <linux/ptrace.h> +@@ -58,6 +59,8 @@ static int aout_core_dump(struct coredump_params *cprm) + #endif + # define START_STACK(u) ((void __user *)u.start_stack) + ++ memset(&dump, 0, sizeof(dump)); ++ + fs = get_fs(); + set_fs(KERNEL_DS); + has_dumped = 1; +@@ -68,10 +71,12 @@ static int aout_core_dump(struct coredump_params *cprm) + + /* If the size of the dump file exceeds the rlimit, then see what would happen + if we wrote the stack, but not the data area. */ ++ gr_learn_resource(current, RLIMIT_CORE, (dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE, 1); + if ((dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE > cprm->limit) + dump.u_dsize = 0; + + /* Make sure we have enough room to write the stack and data areas. */ ++ gr_learn_resource(current, RLIMIT_CORE, (dump.u_ssize + 1) * PAGE_SIZE, 1); + if ((dump.u_ssize + 1) * PAGE_SIZE > cprm->limit) + dump.u_ssize = 0; + +@@ -232,6 +237,8 @@ static int load_aout_binary(struct linux_binprm * bprm) + rlim = rlimit(RLIMIT_DATA); + if (rlim >= RLIM_INFINITY) + rlim = ~0; ++ ++ gr_learn_resource(current, RLIMIT_DATA, ex.a_data + ex.a_bss, 1); + if (ex.a_data + ex.a_bss > rlim) + return -ENOMEM; + +@@ -264,6 +271,27 @@ static int load_aout_binary(struct linux_binprm * bprm) + + install_exec_creds(bprm); + ++#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR) ++ current->mm->pax_flags = 0UL; ++#endif ++ ++#ifdef CONFIG_PAX_PAGEEXEC ++ if (!(N_FLAGS(ex) & F_PAX_PAGEEXEC)) { ++ current->mm->pax_flags |= MF_PAX_PAGEEXEC; ++ ++#ifdef CONFIG_PAX_EMUTRAMP ++ if (N_FLAGS(ex) & F_PAX_EMUTRAMP) ++ current->mm->pax_flags |= MF_PAX_EMUTRAMP; ++#endif ++ ++#ifdef CONFIG_PAX_MPROTECT ++ if (!(N_FLAGS(ex) & F_PAX_MPROTECT)) ++ current->mm->pax_flags |= MF_PAX_MPROTECT; ++#endif ++ ++ } ++#endif ++ + if (N_MAGIC(ex) == OMAGIC) { + unsigned long text_addr, map_size; + loff_t pos; +@@ -321,7 +349,7 @@ static int load_aout_binary(struct linux_binprm * bprm) + } + + error = vm_mmap(bprm->file, N_DATADDR(ex), ex.a_data, +- PROT_READ | PROT_WRITE | PROT_EXEC, ++ PROT_READ | PROT_WRITE, + MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE, + fd_offset + ex.a_text); + if (error != N_DATADDR(ex)) { +diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c +index 67be295..83e2f86 100644 +--- a/fs/binfmt_elf.c ++++ b/fs/binfmt_elf.c +@@ -34,6 +34,7 @@ + #include <linux/utsname.h> + #include <linux/coredump.h> + #include <linux/sched.h> ++#include <linux/xattr.h> + #include <asm/uaccess.h> + #include <asm/param.h> + #include <asm/page.h> +@@ -48,7 +49,7 @@ + static int load_elf_binary(struct linux_binprm *bprm); + static int load_elf_library(struct file *); + static unsigned long elf_map(struct file *, unsigned long, struct elf_phdr *, +- int, int, unsigned long); ++ int, int, unsigned long) __intentional_overflow(-1); + + /* + * If we don't support core dumping, then supply a NULL so we +@@ -60,6 +61,14 @@ static int elf_core_dump(struct coredump_params *cprm); + #define elf_core_dump NULL + #endif + ++#ifdef CONFIG_PAX_MPROTECT ++static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags); ++#endif ++ ++#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG ++static void elf_handle_mmap(struct file *file); ++#endif ++ + #if ELF_EXEC_PAGESIZE > PAGE_SIZE + #define ELF_MIN_ALIGN ELF_EXEC_PAGESIZE + #else +@@ -79,6 +88,15 @@ static struct linux_binfmt elf_format = { + .load_binary = load_elf_binary, + .load_shlib = load_elf_library, + .core_dump = elf_core_dump, ++ ++#ifdef CONFIG_PAX_MPROTECT ++ .handle_mprotect= elf_handle_mprotect, ++#endif ++ ++#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG ++ .handle_mmap = elf_handle_mmap, ++#endif ++ + .min_coredump = ELF_EXEC_PAGESIZE, + }; + +@@ -86,6 +104,8 @@ static struct linux_binfmt elf_format = { + + static int set_brk(unsigned long start, unsigned long end) + { ++ unsigned long e = end; ++ + start = ELF_PAGEALIGN(start); + end = ELF_PAGEALIGN(end); + if (end > start) { +@@ -94,7 +114,7 @@ static int set_brk(unsigned long start, unsigned long end) + if (BAD_ADDR(addr)) + return addr; + } +- current->mm->start_brk = current->mm->brk = end; ++ current->mm->start_brk = current->mm->brk = e; + return 0; + } + +@@ -155,12 +175,13 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec, + elf_addr_t __user *u_rand_bytes; + const char *k_platform = ELF_PLATFORM; + const char *k_base_platform = ELF_BASE_PLATFORM; +- unsigned char k_rand_bytes[16]; ++ u32 k_rand_bytes[4]; + int items; + elf_addr_t *elf_info; + int ei_index = 0; + const struct cred *cred = current_cred(); + struct vm_area_struct *vma; ++ unsigned long saved_auxv[AT_VECTOR_SIZE]; + + /* + * In some cases (e.g. Hyper-Threading), we want to avoid L1 +@@ -202,8 +223,12 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec, + * Generate 16 random bytes for userspace PRNG seeding. + */ + get_random_bytes(k_rand_bytes, sizeof(k_rand_bytes)); +- u_rand_bytes = (elf_addr_t __user *) +- STACK_ALLOC(p, sizeof(k_rand_bytes)); ++ prandom_seed(k_rand_bytes[0] ^ prandom_u32()); ++ prandom_seed(k_rand_bytes[1] ^ prandom_u32()); ++ prandom_seed(k_rand_bytes[2] ^ prandom_u32()); ++ prandom_seed(k_rand_bytes[3] ^ prandom_u32()); ++ p = STACK_ROUND(p, sizeof(k_rand_bytes)); ++ u_rand_bytes = (elf_addr_t __user *) p; + if (__copy_to_user(u_rand_bytes, k_rand_bytes, sizeof(k_rand_bytes))) + return -EFAULT; + +@@ -318,9 +343,11 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec, + return -EFAULT; + current->mm->env_end = p; + ++ memcpy(saved_auxv, elf_info, ei_index * sizeof(elf_addr_t)); ++ + /* Put the elf_info on the stack in the right place. */ + sp = (elf_addr_t __user *)envp + 1; +- if (copy_to_user(sp, elf_info, ei_index * sizeof(elf_addr_t))) ++ if (copy_to_user(sp, saved_auxv, ei_index * sizeof(elf_addr_t))) + return -EFAULT; + return 0; + } +@@ -388,15 +415,14 @@ static unsigned long total_mapping_size(struct elf_phdr *cmds, int nr) + an ELF header */ + + static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex, +- struct file *interpreter, unsigned long *interp_map_addr, +- unsigned long no_base) ++ struct file *interpreter, unsigned long no_base) + { + struct elf_phdr *elf_phdata; + struct elf_phdr *eppnt; +- unsigned long load_addr = 0; ++ unsigned long load_addr = 0, pax_task_size = TASK_SIZE; + int load_addr_set = 0; + unsigned long last_bss = 0, elf_bss = 0; +- unsigned long error = ~0UL; ++ unsigned long error = -EINVAL; + unsigned long total_size; + int retval, i, size; + +@@ -442,6 +468,11 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex, + goto out_close; + } + ++#ifdef CONFIG_PAX_SEGMEXEC ++ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) ++ pax_task_size = SEGMEXEC_TASK_SIZE; ++#endif ++ + eppnt = elf_phdata; + for (i = 0; i < interp_elf_ex->e_phnum; i++, eppnt++) { + if (eppnt->p_type == PT_LOAD) { +@@ -465,8 +496,6 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex, + map_addr = elf_map(interpreter, load_addr + vaddr, + eppnt, elf_prot, elf_type, total_size); + total_size = 0; +- if (!*interp_map_addr) +- *interp_map_addr = map_addr; + error = map_addr; + if (BAD_ADDR(map_addr)) + goto out_close; +@@ -485,8 +514,8 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex, + k = load_addr + eppnt->p_vaddr; + if (BAD_ADDR(k) || + eppnt->p_filesz > eppnt->p_memsz || +- eppnt->p_memsz > TASK_SIZE || +- TASK_SIZE - eppnt->p_memsz < k) { ++ eppnt->p_memsz > pax_task_size || ++ pax_task_size - eppnt->p_memsz < k) { + error = -ENOMEM; + goto out_close; + } +@@ -525,9 +554,11 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex, + elf_bss = ELF_PAGESTART(elf_bss + ELF_MIN_ALIGN - 1); + + /* Map the last of the bss segment */ +- error = vm_brk(elf_bss, last_bss - elf_bss); +- if (BAD_ADDR(error)) +- goto out_close; ++ if (last_bss > elf_bss) { ++ error = vm_brk(elf_bss, last_bss - elf_bss); ++ if (BAD_ADDR(error)) ++ goto out_close; ++ } + } + + error = load_addr; +@@ -538,6 +569,336 @@ out: + return error; + } + ++#ifdef CONFIG_PAX_PT_PAX_FLAGS ++#ifdef CONFIG_PAX_SOFTMODE ++static unsigned long pax_parse_pt_pax_softmode(const struct elf_phdr * const elf_phdata) ++{ ++ unsigned long pax_flags = 0UL; ++ ++#ifdef CONFIG_PAX_PAGEEXEC ++ if (elf_phdata->p_flags & PF_PAGEEXEC) ++ pax_flags |= MF_PAX_PAGEEXEC; ++#endif ++ ++#ifdef CONFIG_PAX_SEGMEXEC ++ if (elf_phdata->p_flags & PF_SEGMEXEC) ++ pax_flags |= MF_PAX_SEGMEXEC; ++#endif ++ ++#ifdef CONFIG_PAX_EMUTRAMP ++ if ((elf_phdata->p_flags & PF_EMUTRAMP) && (pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))) ++ pax_flags |= MF_PAX_EMUTRAMP; ++#endif ++ ++#ifdef CONFIG_PAX_MPROTECT ++ if (elf_phdata->p_flags & PF_MPROTECT) ++ pax_flags |= MF_PAX_MPROTECT; ++#endif ++ ++#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK) ++ if (randomize_va_space && (elf_phdata->p_flags & PF_RANDMMAP)) ++ pax_flags |= MF_PAX_RANDMMAP; ++#endif ++ ++ return pax_flags; ++} ++#endif ++ ++static unsigned long pax_parse_pt_pax_hardmode(const struct elf_phdr * const elf_phdata) ++{ ++ unsigned long pax_flags = 0UL; ++ ++#ifdef CONFIG_PAX_PAGEEXEC ++ if (!(elf_phdata->p_flags & PF_NOPAGEEXEC)) ++ pax_flags |= MF_PAX_PAGEEXEC; ++#endif ++ ++#ifdef CONFIG_PAX_SEGMEXEC ++ if (!(elf_phdata->p_flags & PF_NOSEGMEXEC)) ++ pax_flags |= MF_PAX_SEGMEXEC; ++#endif ++ ++#ifdef CONFIG_PAX_EMUTRAMP ++ if (!(elf_phdata->p_flags & PF_NOEMUTRAMP)) ++ pax_flags |= MF_PAX_EMUTRAMP; ++#endif ++ ++#ifdef CONFIG_PAX_MPROTECT ++ if (!(elf_phdata->p_flags & PF_NOMPROTECT)) ++ pax_flags |= MF_PAX_MPROTECT; ++#endif ++ ++#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK) ++ if (randomize_va_space && !(elf_phdata->p_flags & PF_NORANDMMAP)) ++ pax_flags |= MF_PAX_RANDMMAP; ++#endif ++ ++ return pax_flags; ++} ++#endif ++ ++#ifdef CONFIG_PAX_XATTR_PAX_FLAGS ++#ifdef CONFIG_PAX_SOFTMODE ++static unsigned long pax_parse_xattr_pax_softmode(unsigned long pax_flags_softmode) ++{ ++ unsigned long pax_flags = 0UL; ++ ++#ifdef CONFIG_PAX_PAGEEXEC ++ if (pax_flags_softmode & MF_PAX_PAGEEXEC) ++ pax_flags |= MF_PAX_PAGEEXEC; ++#endif ++ ++#ifdef CONFIG_PAX_SEGMEXEC ++ if (pax_flags_softmode & MF_PAX_SEGMEXEC) ++ pax_flags |= MF_PAX_SEGMEXEC; ++#endif ++ ++#ifdef CONFIG_PAX_EMUTRAMP ++ if ((pax_flags_softmode & MF_PAX_EMUTRAMP) && (pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))) ++ pax_flags |= MF_PAX_EMUTRAMP; ++#endif ++ ++#ifdef CONFIG_PAX_MPROTECT ++ if (pax_flags_softmode & MF_PAX_MPROTECT) ++ pax_flags |= MF_PAX_MPROTECT; ++#endif ++ ++#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK) ++ if (randomize_va_space && (pax_flags_softmode & MF_PAX_RANDMMAP)) ++ pax_flags |= MF_PAX_RANDMMAP; ++#endif ++ ++ return pax_flags; ++} ++#endif ++ ++static unsigned long pax_parse_xattr_pax_hardmode(unsigned long pax_flags_hardmode) ++{ ++ unsigned long pax_flags = 0UL; ++ ++#ifdef CONFIG_PAX_PAGEEXEC ++ if (!(pax_flags_hardmode & MF_PAX_PAGEEXEC)) ++ pax_flags |= MF_PAX_PAGEEXEC; ++#endif ++ ++#ifdef CONFIG_PAX_SEGMEXEC ++ if (!(pax_flags_hardmode & MF_PAX_SEGMEXEC)) ++ pax_flags |= MF_PAX_SEGMEXEC; ++#endif ++ ++#ifdef CONFIG_PAX_EMUTRAMP ++ if (!(pax_flags_hardmode & MF_PAX_EMUTRAMP)) ++ pax_flags |= MF_PAX_EMUTRAMP; ++#endif ++ ++#ifdef CONFIG_PAX_MPROTECT ++ if (!(pax_flags_hardmode & MF_PAX_MPROTECT)) ++ pax_flags |= MF_PAX_MPROTECT; ++#endif ++ ++#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK) ++ if (randomize_va_space && !(pax_flags_hardmode & MF_PAX_RANDMMAP)) ++ pax_flags |= MF_PAX_RANDMMAP; ++#endif ++ ++ return pax_flags; ++} ++#endif ++ ++#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR) ++static unsigned long pax_parse_defaults(void) ++{ ++ unsigned long pax_flags = 0UL; ++ ++#ifdef CONFIG_PAX_SOFTMODE ++ if (pax_softmode) ++ return pax_flags; ++#endif ++ ++#ifdef CONFIG_PAX_PAGEEXEC ++ pax_flags |= MF_PAX_PAGEEXEC; ++#endif ++ ++#ifdef CONFIG_PAX_SEGMEXEC ++ pax_flags |= MF_PAX_SEGMEXEC; ++#endif ++ ++#ifdef CONFIG_PAX_MPROTECT ++ pax_flags |= MF_PAX_MPROTECT; ++#endif ++ ++#ifdef CONFIG_PAX_RANDMMAP ++ if (randomize_va_space) ++ pax_flags |= MF_PAX_RANDMMAP; ++#endif ++ ++ return pax_flags; ++} ++ ++static unsigned long pax_parse_ei_pax(const struct elfhdr * const elf_ex) ++{ ++ unsigned long pax_flags = PAX_PARSE_FLAGS_FALLBACK; ++ ++#ifdef CONFIG_PAX_EI_PAX ++ ++#ifdef CONFIG_PAX_SOFTMODE ++ if (pax_softmode) ++ return pax_flags; ++#endif ++ ++ pax_flags = 0UL; ++ ++#ifdef CONFIG_PAX_PAGEEXEC ++ if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_PAGEEXEC)) ++ pax_flags |= MF_PAX_PAGEEXEC; ++#endif ++ ++#ifdef CONFIG_PAX_SEGMEXEC ++ if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_SEGMEXEC)) ++ pax_flags |= MF_PAX_SEGMEXEC; ++#endif ++ ++#ifdef CONFIG_PAX_EMUTRAMP ++ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && (elf_ex->e_ident[EI_PAX] & EF_PAX_EMUTRAMP)) ++ pax_flags |= MF_PAX_EMUTRAMP; ++#endif ++ ++#ifdef CONFIG_PAX_MPROTECT ++ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && !(elf_ex->e_ident[EI_PAX] & EF_PAX_MPROTECT)) ++ pax_flags |= MF_PAX_MPROTECT; ++#endif ++ ++#ifdef CONFIG_PAX_ASLR ++ if (randomize_va_space && !(elf_ex->e_ident[EI_PAX] & EF_PAX_RANDMMAP)) ++ pax_flags |= MF_PAX_RANDMMAP; ++#endif ++ ++#endif ++ ++ return pax_flags; ++ ++} ++ ++static unsigned long pax_parse_pt_pax(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata) ++{ ++ ++#ifdef CONFIG_PAX_PT_PAX_FLAGS ++ unsigned long i; ++ ++ for (i = 0UL; i < elf_ex->e_phnum; i++) ++ if (elf_phdata[i].p_type == PT_PAX_FLAGS) { ++ if (((elf_phdata[i].p_flags & PF_PAGEEXEC) && (elf_phdata[i].p_flags & PF_NOPAGEEXEC)) || ++ ((elf_phdata[i].p_flags & PF_SEGMEXEC) && (elf_phdata[i].p_flags & PF_NOSEGMEXEC)) || ++ ((elf_phdata[i].p_flags & PF_EMUTRAMP) && (elf_phdata[i].p_flags & PF_NOEMUTRAMP)) || ++ ((elf_phdata[i].p_flags & PF_MPROTECT) && (elf_phdata[i].p_flags & PF_NOMPROTECT)) || ++ ((elf_phdata[i].p_flags & PF_RANDMMAP) && (elf_phdata[i].p_flags & PF_NORANDMMAP))) ++ return PAX_PARSE_FLAGS_FALLBACK; ++ ++#ifdef CONFIG_PAX_SOFTMODE ++ if (pax_softmode) ++ return pax_parse_pt_pax_softmode(&elf_phdata[i]); ++ else ++#endif ++ ++ return pax_parse_pt_pax_hardmode(&elf_phdata[i]); ++ break; ++ } ++#endif ++ ++ return PAX_PARSE_FLAGS_FALLBACK; ++} ++ ++static unsigned long pax_parse_xattr_pax(struct file * const file) ++{ ++ ++#ifdef CONFIG_PAX_XATTR_PAX_FLAGS ++ ssize_t xattr_size, i; ++ unsigned char xattr_value[sizeof("pemrs") - 1]; ++ unsigned long pax_flags_hardmode = 0UL, pax_flags_softmode = 0UL; ++ ++ xattr_size = pax_getxattr(file->f_path.dentry, xattr_value, sizeof xattr_value); ++ if (xattr_size < 0 || xattr_size > sizeof xattr_value) ++ return PAX_PARSE_FLAGS_FALLBACK; ++ ++ for (i = 0; i < xattr_size; i++) ++ switch (xattr_value[i]) { ++ default: ++ return PAX_PARSE_FLAGS_FALLBACK; ++ ++#define parse_flag(option1, option2, flag) \ ++ case option1: \ ++ if (pax_flags_hardmode & MF_PAX_##flag) \ ++ return PAX_PARSE_FLAGS_FALLBACK;\ ++ pax_flags_hardmode |= MF_PAX_##flag; \ ++ break; \ ++ case option2: \ ++ if (pax_flags_softmode & MF_PAX_##flag) \ ++ return PAX_PARSE_FLAGS_FALLBACK;\ ++ pax_flags_softmode |= MF_PAX_##flag; \ ++ break; ++ ++ parse_flag('p', 'P', PAGEEXEC); ++ parse_flag('e', 'E', EMUTRAMP); ++ parse_flag('m', 'M', MPROTECT); ++ parse_flag('r', 'R', RANDMMAP); ++ parse_flag('s', 'S', SEGMEXEC); ++ ++#undef parse_flag ++ } ++ ++ if (pax_flags_hardmode & pax_flags_softmode) ++ return PAX_PARSE_FLAGS_FALLBACK; ++ ++#ifdef CONFIG_PAX_SOFTMODE ++ if (pax_softmode) ++ return pax_parse_xattr_pax_softmode(pax_flags_softmode); ++ else ++#endif ++ ++ return pax_parse_xattr_pax_hardmode(pax_flags_hardmode); ++#else ++ return PAX_PARSE_FLAGS_FALLBACK; ++#endif ++ ++} ++ ++static long pax_parse_pax_flags(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata, struct file * const file) ++{ ++ unsigned long pax_flags, ei_pax_flags, pt_pax_flags, xattr_pax_flags; ++ ++ pax_flags = pax_parse_defaults(); ++ ei_pax_flags = pax_parse_ei_pax(elf_ex); ++ pt_pax_flags = pax_parse_pt_pax(elf_ex, elf_phdata); ++ xattr_pax_flags = pax_parse_xattr_pax(file); ++ ++ if (pt_pax_flags != PAX_PARSE_FLAGS_FALLBACK && ++ xattr_pax_flags != PAX_PARSE_FLAGS_FALLBACK && ++ pt_pax_flags != xattr_pax_flags) ++ return -EINVAL; ++ if (xattr_pax_flags != PAX_PARSE_FLAGS_FALLBACK) ++ pax_flags = xattr_pax_flags; ++ else if (pt_pax_flags != PAX_PARSE_FLAGS_FALLBACK) ++ pax_flags = pt_pax_flags; ++ else if (ei_pax_flags != PAX_PARSE_FLAGS_FALLBACK) ++ pax_flags = ei_pax_flags; ++ ++#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC) ++ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) { ++ if ((__supported_pte_mask & _PAGE_NX)) ++ pax_flags &= ~MF_PAX_SEGMEXEC; ++ else ++ pax_flags &= ~MF_PAX_PAGEEXEC; ++ } ++#endif ++ ++ if (0 > pax_check_flags(&pax_flags)) ++ return -EINVAL; ++ ++ current->mm->pax_flags = pax_flags; ++ return 0; ++} ++#endif ++ + /* + * These are the functions used to load ELF style executables and shared + * libraries. There is no binary dependent code anywhere else. +@@ -551,6 +912,11 @@ static unsigned long randomize_stack_top(unsigned long stack_top) + { + unsigned int random_variable = 0; + ++#ifdef CONFIG_PAX_RANDUSTACK ++ if (current->mm->pax_flags & MF_PAX_RANDMMAP) ++ return stack_top - current->mm->delta_stack; ++#endif ++ + if ((current->flags & PF_RANDOMIZE) && + !(current->personality & ADDR_NO_RANDOMIZE)) { + random_variable = get_random_int() & STACK_RND_MASK; +@@ -569,7 +935,7 @@ static int load_elf_binary(struct linux_binprm *bprm) + unsigned long load_addr = 0, load_bias = 0; + int load_addr_set = 0; + char * elf_interpreter = NULL; +- unsigned long error; ++ unsigned long error = 0; + struct elf_phdr *elf_ppnt, *elf_phdata; + unsigned long elf_bss, elf_brk; + int retval, i; +@@ -579,12 +945,12 @@ static int load_elf_binary(struct linux_binprm *bprm) + unsigned long start_code, end_code, start_data, end_data; + unsigned long reloc_func_desc __maybe_unused = 0; + int executable_stack = EXSTACK_DEFAULT; +- unsigned long def_flags = 0; + struct pt_regs *regs = current_pt_regs(); + struct { + struct elfhdr elf_ex; + struct elfhdr interp_elf_ex; + } *loc; ++ unsigned long pax_task_size; + + loc = kmalloc(sizeof(*loc), GFP_KERNEL); + if (!loc) { +@@ -720,11 +1086,82 @@ static int load_elf_binary(struct linux_binprm *bprm) + goto out_free_dentry; + + /* OK, This is the point of no return */ +- current->mm->def_flags = def_flags; ++ current->mm->def_flags = 0; + + /* Do this immediately, since STACK_TOP as used in setup_arg_pages + may depend on the personality. */ + SET_PERSONALITY(loc->elf_ex); ++ ++#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR) ++ current->mm->pax_flags = 0UL; ++#endif ++ ++#ifdef CONFIG_PAX_DLRESOLVE ++ current->mm->call_dl_resolve = 0UL; ++#endif ++ ++#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT) ++ current->mm->call_syscall = 0UL; ++#endif ++ ++#ifdef CONFIG_PAX_ASLR ++ current->mm->delta_mmap = 0UL; ++ current->mm->delta_stack = 0UL; ++#endif ++ ++#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR) ++ if (0 > pax_parse_pax_flags(&loc->elf_ex, elf_phdata, bprm->file)) { ++ send_sig(SIGKILL, current, 0); ++ goto out_free_dentry; ++ } ++#endif ++ ++#ifdef CONFIG_PAX_HAVE_ACL_FLAGS ++ pax_set_initial_flags(bprm); ++#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS) ++ if (pax_set_initial_flags_func) ++ (pax_set_initial_flags_func)(bprm); ++#endif ++ ++#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT ++ if ((current->mm->pax_flags & MF_PAX_PAGEEXEC) && !(__supported_pte_mask & _PAGE_NX)) { ++ current->mm->context.user_cs_limit = PAGE_SIZE; ++ current->mm->def_flags |= VM_PAGEEXEC | VM_NOHUGEPAGE; ++ } ++#endif ++ ++#ifdef CONFIG_PAX_SEGMEXEC ++ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) { ++ current->mm->context.user_cs_base = SEGMEXEC_TASK_SIZE; ++ current->mm->context.user_cs_limit = TASK_SIZE-SEGMEXEC_TASK_SIZE; ++ pax_task_size = SEGMEXEC_TASK_SIZE; ++ current->mm->def_flags |= VM_NOHUGEPAGE; ++ } else ++#endif ++ ++ pax_task_size = TASK_SIZE; ++ ++#if defined(CONFIG_ARCH_TRACK_EXEC_LIMIT) || defined(CONFIG_PAX_SEGMEXEC) ++ if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) { ++ set_user_cs(current->mm->context.user_cs_base, current->mm->context.user_cs_limit, get_cpu()); ++ put_cpu(); ++ } ++#endif ++ ++#ifdef CONFIG_PAX_ASLR ++ if (current->mm->pax_flags & MF_PAX_RANDMMAP) { ++ current->mm->delta_mmap = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN)-1)) << PAGE_SHIFT; ++ current->mm->delta_stack = (pax_get_random_long() & ((1UL << PAX_DELTA_STACK_LEN)-1)) << PAGE_SHIFT; ++ } ++#endif ++ ++#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC) ++ if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) { ++ executable_stack = EXSTACK_DISABLE_X; ++ current->personality &= ~READ_IMPLIES_EXEC; ++ } else ++#endif ++ + if (elf_read_implies_exec(loc->elf_ex, executable_stack)) + current->personality |= READ_IMPLIES_EXEC; + +@@ -814,6 +1251,20 @@ static int load_elf_binary(struct linux_binprm *bprm) + #else + load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr); + #endif ++ ++#ifdef CONFIG_PAX_RANDMMAP ++ /* PaX: randomize base address at the default exe base if requested */ ++ if ((current->mm->pax_flags & MF_PAX_RANDMMAP) && elf_interpreter) { ++#ifdef CONFIG_SPARC64 ++ load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << (PAGE_SHIFT+1); ++#else ++ load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << PAGE_SHIFT; ++#endif ++ load_bias = ELF_PAGESTART(PAX_ELF_ET_DYN_BASE - vaddr + load_bias); ++ elf_flags |= MAP_FIXED; ++ } ++#endif ++ + } + + error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt, +@@ -846,9 +1297,9 @@ static int load_elf_binary(struct linux_binprm *bprm) + * allowed task size. Note that p_filesz must always be + * <= p_memsz so it is only necessary to check p_memsz. + */ +- if (BAD_ADDR(k) || elf_ppnt->p_filesz > elf_ppnt->p_memsz || +- elf_ppnt->p_memsz > TASK_SIZE || +- TASK_SIZE - elf_ppnt->p_memsz < k) { ++ if (k >= pax_task_size || elf_ppnt->p_filesz > elf_ppnt->p_memsz || ++ elf_ppnt->p_memsz > pax_task_size || ++ pax_task_size - elf_ppnt->p_memsz < k) { + /* set_brk can never work. Avoid overflows. */ + send_sig(SIGKILL, current, 0); + retval = -EINVAL; +@@ -887,17 +1338,45 @@ static int load_elf_binary(struct linux_binprm *bprm) + goto out_free_dentry; + } + if (likely(elf_bss != elf_brk) && unlikely(padzero(elf_bss))) { +- send_sig(SIGSEGV, current, 0); +- retval = -EFAULT; /* Nobody gets to see this, but.. */ +- goto out_free_dentry; ++ /* ++ * This bss-zeroing can fail if the ELF ++ * file specifies odd protections. So ++ * we don't check the return value ++ */ + } + ++#ifdef CONFIG_PAX_RANDMMAP ++ if (current->mm->pax_flags & MF_PAX_RANDMMAP) { ++ unsigned long start, size, flags; ++ vm_flags_t vm_flags; ++ ++ start = ELF_PAGEALIGN(elf_brk); ++ size = PAGE_SIZE + ((pax_get_random_long() & ((1UL << 22) - 1UL)) << 4); ++ flags = MAP_FIXED | MAP_PRIVATE; ++ vm_flags = VM_DONTEXPAND | VM_DONTDUMP; ++ ++ down_write(¤t->mm->mmap_sem); ++ start = get_unmapped_area(NULL, start, PAGE_ALIGN(size), 0, flags); ++ retval = -ENOMEM; ++ if (!IS_ERR_VALUE(start) && !find_vma_intersection(current->mm, start, start + size + PAGE_SIZE)) { ++// if (current->personality & ADDR_NO_RANDOMIZE) ++// vm_flags |= VM_READ | VM_MAYREAD; ++ start = mmap_region(NULL, start, PAGE_ALIGN(size), vm_flags, 0); ++ retval = IS_ERR_VALUE(start) ? start : 0; ++ } ++ up_write(¤t->mm->mmap_sem); ++ if (retval == 0) ++ retval = set_brk(start + size, start + size + PAGE_SIZE); ++ if (retval < 0) { ++ send_sig(SIGKILL, current, 0); ++ goto out_free_dentry; ++ } ++ } ++#endif ++ + if (elf_interpreter) { +- unsigned long interp_map_addr = 0; +- + elf_entry = load_elf_interp(&loc->interp_elf_ex, + interpreter, +- &interp_map_addr, + load_bias); + if (!IS_ERR((void *)elf_entry)) { + /* +@@ -1119,7 +1598,7 @@ static bool always_dump_vma(struct vm_area_struct *vma) + * Decide what to dump of a segment, part, all or none. + */ + static unsigned long vma_dump_size(struct vm_area_struct *vma, +- unsigned long mm_flags) ++ unsigned long mm_flags, long signr) + { + #define FILTER(type) (mm_flags & (1UL << MMF_DUMP_##type)) + +@@ -1157,7 +1636,7 @@ static unsigned long vma_dump_size(struct vm_area_struct *vma, + if (vma->vm_file == NULL) + return 0; + +- if (FILTER(MAPPED_PRIVATE)) ++ if (signr == SIGKILL || FILTER(MAPPED_PRIVATE)) + goto whole; + + /* +@@ -1364,9 +1843,9 @@ static void fill_auxv_note(struct memelfnote *note, struct mm_struct *mm) + { + elf_addr_t *auxv = (elf_addr_t *) mm->saved_auxv; + int i = 0; +- do ++ do { + i += 2; +- while (auxv[i - 2] != AT_NULL); ++ } while (auxv[i - 2] != AT_NULL); + fill_note(note, "CORE", NT_AUXV, i * sizeof(elf_addr_t), auxv); + } + +@@ -1375,7 +1854,7 @@ static void fill_siginfo_note(struct memelfnote *note, user_siginfo_t *csigdata, + { + mm_segment_t old_fs = get_fs(); + set_fs(KERNEL_DS); +- copy_siginfo_to_user((user_siginfo_t __user *) csigdata, siginfo); ++ copy_siginfo_to_user((user_siginfo_t __force_user *) csigdata, siginfo); + set_fs(old_fs); + fill_note(note, "CORE", NT_SIGINFO, sizeof(*csigdata), csigdata); + } +@@ -1999,14 +2478,14 @@ static void fill_extnum_info(struct elfhdr *elf, struct elf_shdr *shdr4extnum, + } + + static size_t elf_core_vma_data_size(struct vm_area_struct *gate_vma, +- unsigned long mm_flags) ++ struct coredump_params *cprm) + { + struct vm_area_struct *vma; + size_t size = 0; + + for (vma = first_vma(current, gate_vma); vma != NULL; + vma = next_vma(vma, gate_vma)) +- size += vma_dump_size(vma, mm_flags); ++ size += vma_dump_size(vma, cprm->mm_flags, cprm->siginfo->si_signo); + return size; + } + +@@ -2097,7 +2576,7 @@ static int elf_core_dump(struct coredump_params *cprm) + + dataoff = offset = roundup(offset, ELF_EXEC_PAGESIZE); + +- offset += elf_core_vma_data_size(gate_vma, cprm->mm_flags); ++ offset += elf_core_vma_data_size(gate_vma, cprm); + offset += elf_core_extra_data_size(); + e_shoff = offset; + +@@ -2125,7 +2604,7 @@ static int elf_core_dump(struct coredump_params *cprm) + phdr.p_offset = offset; + phdr.p_vaddr = vma->vm_start; + phdr.p_paddr = 0; +- phdr.p_filesz = vma_dump_size(vma, cprm->mm_flags); ++ phdr.p_filesz = vma_dump_size(vma, cprm->mm_flags, cprm->siginfo->si_signo); + phdr.p_memsz = vma->vm_end - vma->vm_start; + offset += phdr.p_filesz; + phdr.p_flags = vma->vm_flags & VM_READ ? PF_R : 0; +@@ -2158,7 +2637,7 @@ static int elf_core_dump(struct coredump_params *cprm) + unsigned long addr; + unsigned long end; + +- end = vma->vm_start + vma_dump_size(vma, cprm->mm_flags); ++ end = vma->vm_start + vma_dump_size(vma, cprm->mm_flags, cprm->siginfo->si_signo); + + for (addr = vma->vm_start; addr < end; addr += PAGE_SIZE) { + struct page *page; +@@ -2199,6 +2678,167 @@ out: + + #endif /* CONFIG_ELF_CORE */ + ++#ifdef CONFIG_PAX_MPROTECT ++/* PaX: non-PIC ELF libraries need relocations on their executable segments ++ * therefore we'll grant them VM_MAYWRITE once during their life. Similarly ++ * we'll remove VM_MAYWRITE for good on RELRO segments. ++ * ++ * The checks favour ld-linux.so behaviour which operates on a per ELF segment ++ * basis because we want to allow the common case and not the special ones. ++ */ ++static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags) ++{ ++ struct elfhdr elf_h; ++ struct elf_phdr elf_p; ++ unsigned long i; ++ unsigned long oldflags; ++ bool is_textrel_rw, is_textrel_rx, is_relro; ++ ++ if (!(vma->vm_mm->pax_flags & MF_PAX_MPROTECT) || !vma->vm_file) ++ return; ++ ++ oldflags = vma->vm_flags & (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ); ++ newflags &= VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ; ++ ++#ifdef CONFIG_PAX_ELFRELOCS ++ /* possible TEXTREL */ ++ is_textrel_rw = !vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYREAD | VM_EXEC | VM_READ) && newflags == (VM_WRITE | VM_READ); ++ is_textrel_rx = vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_WRITE | VM_READ) && newflags == (VM_EXEC | VM_READ); ++#else ++ is_textrel_rw = false; ++ is_textrel_rx = false; ++#endif ++ ++ /* possible RELRO */ ++ is_relro = vma->anon_vma && oldflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ) && newflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ); ++ ++ if (!is_textrel_rw && !is_textrel_rx && !is_relro) ++ return; ++ ++ if (sizeof(elf_h) != kernel_read(vma->vm_file, 0UL, (char *)&elf_h, sizeof(elf_h)) || ++ memcmp(elf_h.e_ident, ELFMAG, SELFMAG) || ++ ++#ifdef CONFIG_PAX_ETEXECRELOCS ++ ((is_textrel_rw || is_textrel_rx) && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) || ++#else ++ ((is_textrel_rw || is_textrel_rx) && elf_h.e_type != ET_DYN) || ++#endif ++ ++ (is_relro && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) || ++ !elf_check_arch(&elf_h) || ++ elf_h.e_phentsize != sizeof(struct elf_phdr) || ++ elf_h.e_phnum > 65536UL / sizeof(struct elf_phdr)) ++ return; ++ ++ for (i = 0UL; i < elf_h.e_phnum; i++) { ++ if (sizeof(elf_p) != kernel_read(vma->vm_file, elf_h.e_phoff + i*sizeof(elf_p), (char *)&elf_p, sizeof(elf_p))) ++ return; ++ switch (elf_p.p_type) { ++ case PT_DYNAMIC: ++ if (!is_textrel_rw && !is_textrel_rx) ++ continue; ++ i = 0UL; ++ while ((i+1) * sizeof(elf_dyn) <= elf_p.p_filesz) { ++ elf_dyn dyn; ++ ++ if (sizeof(dyn) != kernel_read(vma->vm_file, elf_p.p_offset + i*sizeof(dyn), (char *)&dyn, sizeof(dyn))) ++ break; ++ if (dyn.d_tag == DT_NULL) ++ break; ++ if (dyn.d_tag == DT_TEXTREL || (dyn.d_tag == DT_FLAGS && (dyn.d_un.d_val & DF_TEXTREL))) { ++ gr_log_textrel(vma); ++ if (is_textrel_rw) ++ vma->vm_flags |= VM_MAYWRITE; ++ else ++ /* PaX: disallow write access after relocs are done, hopefully noone else needs it... */ ++ vma->vm_flags &= ~VM_MAYWRITE; ++ break; ++ } ++ i++; ++ } ++ is_textrel_rw = false; ++ is_textrel_rx = false; ++ continue; ++ ++ case PT_GNU_RELRO: ++ if (!is_relro) ++ continue; ++ if ((elf_p.p_offset >> PAGE_SHIFT) == vma->vm_pgoff && ELF_PAGEALIGN(elf_p.p_memsz) == vma->vm_end - vma->vm_start) ++ vma->vm_flags &= ~VM_MAYWRITE; ++ is_relro = false; ++ continue; ++ ++#ifdef CONFIG_PAX_PT_PAX_FLAGS ++ case PT_PAX_FLAGS: { ++ const char *msg_mprotect = "", *msg_emutramp = ""; ++ char *buffer_lib, *buffer_exe; ++ ++ if (elf_p.p_flags & PF_NOMPROTECT) ++ msg_mprotect = "MPROTECT disabled"; ++ ++#ifdef CONFIG_PAX_EMUTRAMP ++ if (!(vma->vm_mm->pax_flags & MF_PAX_EMUTRAMP) && !(elf_p.p_flags & PF_NOEMUTRAMP)) ++ msg_emutramp = "EMUTRAMP enabled"; ++#endif ++ ++ if (!msg_mprotect[0] && !msg_emutramp[0]) ++ continue; ++ ++ if (!printk_ratelimit()) ++ continue; ++ ++ buffer_lib = (char *)__get_free_page(GFP_KERNEL); ++ buffer_exe = (char *)__get_free_page(GFP_KERNEL); ++ if (buffer_lib && buffer_exe) { ++ char *path_lib, *path_exe; ++ ++ path_lib = pax_get_path(&vma->vm_file->f_path, buffer_lib, PAGE_SIZE); ++ path_exe = pax_get_path(&vma->vm_mm->exe_file->f_path, buffer_exe, PAGE_SIZE); ++ ++ pr_info("PAX: %s wants %s%s%s on %s\n", path_lib, msg_mprotect, ++ (msg_mprotect[0] && msg_emutramp[0] ? " and " : ""), msg_emutramp, path_exe); ++ ++ } ++ free_page((unsigned long)buffer_exe); ++ free_page((unsigned long)buffer_lib); ++ continue; ++ } ++#endif ++ ++ } ++ } ++} ++#endif ++ ++#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG ++ ++extern int grsec_enable_log_rwxmaps; ++ ++static void elf_handle_mmap(struct file *file) ++{ ++ struct elfhdr elf_h; ++ struct elf_phdr elf_p; ++ unsigned long i; ++ ++ if (!grsec_enable_log_rwxmaps) ++ return; ++ ++ if (sizeof(elf_h) != kernel_read(file, 0UL, (char *)&elf_h, sizeof(elf_h)) || ++ memcmp(elf_h.e_ident, ELFMAG, SELFMAG) || ++ (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC) || !elf_check_arch(&elf_h) || ++ elf_h.e_phentsize != sizeof(struct elf_phdr) || ++ elf_h.e_phnum > 65536UL / sizeof(struct elf_phdr)) ++ return; ++ ++ for (i = 0UL; i < elf_h.e_phnum; i++) { ++ if (sizeof(elf_p) != kernel_read(file, elf_h.e_phoff + i*sizeof(elf_p), (char *)&elf_p, sizeof(elf_p))) ++ return; ++ if (elf_p.p_type == PT_GNU_STACK && (elf_p.p_flags & PF_X)) ++ gr_log_ptgnustack(file); ++ } ++} ++#endif ++ + static int __init init_elf_binfmt(void) + { + register_binfmt(&elf_format); +diff --git a/fs/bio.c b/fs/bio.c +index 8754e7b..0669094 100644 +--- a/fs/bio.c ++++ b/fs/bio.c +@@ -1145,7 +1145,7 @@ struct bio *bio_copy_user_iov(struct request_queue *q, + /* + * Overflow, abort + */ +- if (end < start) ++ if (end < start || end - start > INT_MAX - nr_pages) + return ERR_PTR(-EINVAL); + + nr_pages += end - start; +@@ -1279,7 +1279,7 @@ static struct bio *__bio_map_user_iov(struct request_queue *q, + /* + * Overflow, abort + */ +- if (end < start) ++ if (end < start || end - start > INT_MAX - nr_pages) + return ERR_PTR(-EINVAL); + + nr_pages += end - start; +@@ -1541,7 +1541,7 @@ static void bio_copy_kern_endio(struct bio *bio, int err) + const int read = bio_data_dir(bio) == READ; + struct bio_map_data *bmd = bio->bi_private; + int i; +- char *p = bmd->sgvecs[0].iov_base; ++ char *p = (char __force_kernel *)bmd->sgvecs[0].iov_base; + + bio_for_each_segment_all(bvec, bio, i) { + char *addr = page_address(bvec->bv_page); +diff --git a/fs/block_dev.c b/fs/block_dev.c +index 1e86823..8e34695 100644 +--- a/fs/block_dev.c ++++ b/fs/block_dev.c +@@ -637,7 +637,7 @@ static bool bd_may_claim(struct block_device *bdev, struct block_device *whole, + else if (bdev->bd_contains == bdev) + return true; /* is a whole device which isn't held */ + +- else if (whole->bd_holder == bd_may_claim) ++ else if (whole->bd_holder == (void *)bd_may_claim) + return true; /* is a partition of a device that is being partitioned */ + else if (whole->bd_holder != NULL) + return false; /* is a partition of a held device */ +diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c +index cbd3a7d6f..c6a2881 100644 +--- a/fs/btrfs/ctree.c ++++ b/fs/btrfs/ctree.c +@@ -1216,9 +1216,12 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans, + free_extent_buffer(buf); + add_root_to_dirty_list(root); + } else { +- if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) +- parent_start = parent->start; +- else ++ if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) { ++ if (parent) ++ parent_start = parent->start; ++ else ++ parent_start = 0; ++ } else + parent_start = 0; + + WARN_ON(trans->transid != btrfs_header_generation(parent)); +diff --git a/fs/btrfs/delayed-inode.c b/fs/btrfs/delayed-inode.c +index 451b00c..a2cccee 100644 +--- a/fs/btrfs/delayed-inode.c ++++ b/fs/btrfs/delayed-inode.c +@@ -459,7 +459,7 @@ static int __btrfs_add_delayed_deletion_item(struct btrfs_delayed_node *node, + + static void finish_one_item(struct btrfs_delayed_root *delayed_root) + { +- int seq = atomic_inc_return(&delayed_root->items_seq); ++ int seq = atomic_inc_return_unchecked(&delayed_root->items_seq); + if ((atomic_dec_return(&delayed_root->items) < + BTRFS_DELAYED_BACKGROUND || seq % BTRFS_DELAYED_BATCH == 0) && + waitqueue_active(&delayed_root->wait)) +@@ -1409,7 +1409,7 @@ void btrfs_assert_delayed_root_empty(struct btrfs_root *root) + + static int could_end_wait(struct btrfs_delayed_root *delayed_root, int seq) + { +- int val = atomic_read(&delayed_root->items_seq); ++ int val = atomic_read_unchecked(&delayed_root->items_seq); + + if (val < seq || val >= seq + BTRFS_DELAYED_BATCH) + return 1; +@@ -1433,7 +1433,7 @@ void btrfs_balance_delayed_items(struct btrfs_root *root) + int seq; + int ret; + +- seq = atomic_read(&delayed_root->items_seq); ++ seq = atomic_read_unchecked(&delayed_root->items_seq); + + ret = btrfs_wq_run_delayed_node(delayed_root, root, 0); + if (ret) +diff --git a/fs/btrfs/delayed-inode.h b/fs/btrfs/delayed-inode.h +index f70119f..ab5894d 100644 +--- a/fs/btrfs/delayed-inode.h ++++ b/fs/btrfs/delayed-inode.h +@@ -43,7 +43,7 @@ struct btrfs_delayed_root { + */ + struct list_head prepare_list; + atomic_t items; /* for delayed items */ +- atomic_t items_seq; /* for delayed items */ ++ atomic_unchecked_t items_seq; /* for delayed items */ + int nodes; /* for delayed nodes */ + wait_queue_head_t wait; + }; +@@ -90,7 +90,7 @@ static inline void btrfs_init_delayed_root( + struct btrfs_delayed_root *delayed_root) + { + atomic_set(&delayed_root->items, 0); +- atomic_set(&delayed_root->items_seq, 0); ++ atomic_set_unchecked(&delayed_root->items_seq, 0); + delayed_root->nodes = 0; + spin_lock_init(&delayed_root->lock); + init_waitqueue_head(&delayed_root->wait); +diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c +index a6d8efa..2f062cf 100644 +--- a/fs/btrfs/ioctl.c ++++ b/fs/btrfs/ioctl.c +@@ -3491,9 +3491,12 @@ static long btrfs_ioctl_space_info(struct btrfs_root *root, void __user *arg) + for (i = 0; i < num_types; i++) { + struct btrfs_space_info *tmp; + ++ /* Don't copy in more than we allocated */ + if (!slot_count) + break; + ++ slot_count--; ++ + info = NULL; + rcu_read_lock(); + list_for_each_entry_rcu(tmp, &root->fs_info->space_info, +@@ -3515,10 +3518,7 @@ static long btrfs_ioctl_space_info(struct btrfs_root *root, void __user *arg) + memcpy(dest, &space, sizeof(space)); + dest++; + space_args.total_spaces++; +- slot_count--; + } +- if (!slot_count) +- break; + } + up_read(&info->groups_sem); + } +diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c +index d04db81..96e54f1 100644 +--- a/fs/btrfs/super.c ++++ b/fs/btrfs/super.c +@@ -268,7 +268,7 @@ void __btrfs_abort_transaction(struct btrfs_trans_handle *trans, + function, line, errstr); + return; + } +- ACCESS_ONCE(trans->transaction->aborted) = errno; ++ ACCESS_ONCE_RW(trans->transaction->aborted) = errno; + /* Wake up anybody who may be waiting on this transaction */ + wake_up(&root->fs_info->transaction_wait); + wake_up(&root->fs_info->transaction_blocked_wait); +diff --git a/fs/btrfs/sysfs.c b/fs/btrfs/sysfs.c +index ff286f3..8153a14 100644 +--- a/fs/btrfs/sysfs.c ++++ b/fs/btrfs/sysfs.c +@@ -437,7 +437,7 @@ static int addrm_unknown_feature_attrs(struct btrfs_fs_info *fs_info, bool add) + for (set = 0; set < FEAT_MAX; set++) { + int i; + struct attribute *attrs[2]; +- struct attribute_group agroup = { ++ attribute_group_no_const agroup = { + .name = "features", + .attrs = attrs, + }; +diff --git a/fs/buffer.c b/fs/buffer.c +index 27265a8..289f488 100644 +--- a/fs/buffer.c ++++ b/fs/buffer.c +@@ -3428,7 +3428,7 @@ void __init buffer_init(void) + bh_cachep = kmem_cache_create("buffer_head", + sizeof(struct buffer_head), 0, + (SLAB_RECLAIM_ACCOUNT|SLAB_PANIC| +- SLAB_MEM_SPREAD), ++ SLAB_MEM_SPREAD|SLAB_NO_SANITIZE), + NULL); + + /* +diff --git a/fs/cachefiles/bind.c b/fs/cachefiles/bind.c +index 622f469..e8d2d55 100644 +--- a/fs/cachefiles/bind.c ++++ b/fs/cachefiles/bind.c +@@ -39,13 +39,11 @@ int cachefiles_daemon_bind(struct cachefiles_cache *cache, char *args) + args); + + /* start by checking things over */ +- ASSERT(cache->fstop_percent >= 0 && +- cache->fstop_percent < cache->fcull_percent && ++ ASSERT(cache->fstop_percent < cache->fcull_percent && + cache->fcull_percent < cache->frun_percent && + cache->frun_percent < 100); + +- ASSERT(cache->bstop_percent >= 0 && +- cache->bstop_percent < cache->bcull_percent && ++ ASSERT(cache->bstop_percent < cache->bcull_percent && + cache->bcull_percent < cache->brun_percent && + cache->brun_percent < 100); + +diff --git a/fs/cachefiles/daemon.c b/fs/cachefiles/daemon.c +index 0a1467b..6a53245 100644 +--- a/fs/cachefiles/daemon.c ++++ b/fs/cachefiles/daemon.c +@@ -196,7 +196,7 @@ static ssize_t cachefiles_daemon_read(struct file *file, char __user *_buffer, + if (n > buflen) + return -EMSGSIZE; + +- if (copy_to_user(_buffer, buffer, n) != 0) ++ if (n > sizeof(buffer) || copy_to_user(_buffer, buffer, n) != 0) + return -EFAULT; + + return n; +@@ -222,7 +222,7 @@ static ssize_t cachefiles_daemon_write(struct file *file, + if (test_bit(CACHEFILES_DEAD, &cache->flags)) + return -EIO; + +- if (datalen < 0 || datalen > PAGE_SIZE - 1) ++ if (datalen > PAGE_SIZE - 1) + return -EOPNOTSUPP; + + /* drag the command string into the kernel so we can parse it */ +@@ -386,7 +386,7 @@ static int cachefiles_daemon_fstop(struct cachefiles_cache *cache, char *args) + if (args[0] != '%' || args[1] != '\0') + return -EINVAL; + +- if (fstop < 0 || fstop >= cache->fcull_percent) ++ if (fstop >= cache->fcull_percent) + return cachefiles_daemon_range_error(cache, args); + + cache->fstop_percent = fstop; +@@ -458,7 +458,7 @@ static int cachefiles_daemon_bstop(struct cachefiles_cache *cache, char *args) + if (args[0] != '%' || args[1] != '\0') + return -EINVAL; + +- if (bstop < 0 || bstop >= cache->bcull_percent) ++ if (bstop >= cache->bcull_percent) + return cachefiles_daemon_range_error(cache, args); + + cache->bstop_percent = bstop; +diff --git a/fs/cachefiles/internal.h b/fs/cachefiles/internal.h +index 5349473..d6c0b93 100644 +--- a/fs/cachefiles/internal.h ++++ b/fs/cachefiles/internal.h +@@ -59,7 +59,7 @@ struct cachefiles_cache { + wait_queue_head_t daemon_pollwq; /* poll waitqueue for daemon */ + struct rb_root active_nodes; /* active nodes (can't be culled) */ + rwlock_t active_lock; /* lock for active_nodes */ +- atomic_t gravecounter; /* graveyard uniquifier */ ++ atomic_unchecked_t gravecounter; /* graveyard uniquifier */ + unsigned frun_percent; /* when to stop culling (% files) */ + unsigned fcull_percent; /* when to start culling (% files) */ + unsigned fstop_percent; /* when to stop allocating (% files) */ +@@ -171,19 +171,19 @@ extern int cachefiles_check_in_use(struct cachefiles_cache *cache, + * proc.c + */ + #ifdef CONFIG_CACHEFILES_HISTOGRAM +-extern atomic_t cachefiles_lookup_histogram[HZ]; +-extern atomic_t cachefiles_mkdir_histogram[HZ]; +-extern atomic_t cachefiles_create_histogram[HZ]; ++extern atomic_unchecked_t cachefiles_lookup_histogram[HZ]; ++extern atomic_unchecked_t cachefiles_mkdir_histogram[HZ]; ++extern atomic_unchecked_t cachefiles_create_histogram[HZ]; + + extern int __init cachefiles_proc_init(void); + extern void cachefiles_proc_cleanup(void); + static inline +-void cachefiles_hist(atomic_t histogram[], unsigned long start_jif) ++void cachefiles_hist(atomic_unchecked_t histogram[], unsigned long start_jif) + { + unsigned long jif = jiffies - start_jif; + if (jif >= HZ) + jif = HZ - 1; +- atomic_inc(&histogram[jif]); ++ atomic_inc_unchecked(&histogram[jif]); + } + + #else +diff --git a/fs/cachefiles/namei.c b/fs/cachefiles/namei.c +index ca65f39..48921e3 100644 +--- a/fs/cachefiles/namei.c ++++ b/fs/cachefiles/namei.c +@@ -317,7 +317,7 @@ try_again: + /* first step is to make up a grave dentry in the graveyard */ + sprintf(nbuffer, "%08x%08x", + (uint32_t) get_seconds(), +- (uint32_t) atomic_inc_return(&cache->gravecounter)); ++ (uint32_t) atomic_inc_return_unchecked(&cache->gravecounter)); + + /* do the multiway lock magic */ + trap = lock_rename(cache->graveyard, dir); +diff --git a/fs/cachefiles/proc.c b/fs/cachefiles/proc.c +index eccd339..4c1d995 100644 +--- a/fs/cachefiles/proc.c ++++ b/fs/cachefiles/proc.c +@@ -14,9 +14,9 @@ + #include <linux/seq_file.h> + #include "internal.h" + +-atomic_t cachefiles_lookup_histogram[HZ]; +-atomic_t cachefiles_mkdir_histogram[HZ]; +-atomic_t cachefiles_create_histogram[HZ]; ++atomic_unchecked_t cachefiles_lookup_histogram[HZ]; ++atomic_unchecked_t cachefiles_mkdir_histogram[HZ]; ++atomic_unchecked_t cachefiles_create_histogram[HZ]; + + /* + * display the latency histogram +@@ -35,9 +35,9 @@ static int cachefiles_histogram_show(struct seq_file *m, void *v) + return 0; + default: + index = (unsigned long) v - 3; +- x = atomic_read(&cachefiles_lookup_histogram[index]); +- y = atomic_read(&cachefiles_mkdir_histogram[index]); +- z = atomic_read(&cachefiles_create_histogram[index]); ++ x = atomic_read_unchecked(&cachefiles_lookup_histogram[index]); ++ y = atomic_read_unchecked(&cachefiles_mkdir_histogram[index]); ++ z = atomic_read_unchecked(&cachefiles_create_histogram[index]); + if (x == 0 && y == 0 && z == 0) + return 0; + +diff --git a/fs/cachefiles/rdwr.c b/fs/cachefiles/rdwr.c +index ebaff36..7e3ea26 100644 +--- a/fs/cachefiles/rdwr.c ++++ b/fs/cachefiles/rdwr.c +@@ -950,7 +950,7 @@ int cachefiles_write_page(struct fscache_storage *op, struct page *page) + old_fs = get_fs(); + set_fs(KERNEL_DS); + ret = file->f_op->write( +- file, (const void __user *) data, len, &pos); ++ file, (const void __force_user *) data, len, &pos); + set_fs(old_fs); + kunmap(page); + file_end_write(file); +diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c +index 5e0982a..ca18377 100644 +--- a/fs/ceph/dir.c ++++ b/fs/ceph/dir.c +@@ -128,6 +128,8 @@ static int __dcache_readdir(struct file *file, struct dir_context *ctx) + struct dentry *dentry, *last; + struct ceph_dentry_info *di; + int err = 0; ++ char d_name[DNAME_INLINE_LEN]; ++ const unsigned char *name; + + /* claim ref on last dentry we returned */ + last = fi->dentry; +@@ -183,7 +185,12 @@ more: + dout(" %llu (%llu) dentry %p %.*s %p\n", di->offset, ctx->pos, + dentry, dentry->d_name.len, dentry->d_name.name, dentry->d_inode); + ctx->pos = di->offset; +- if (!dir_emit(ctx, dentry->d_name.name, ++ name = dentry->d_name.name; ++ if (name == dentry->d_iname) { ++ memcpy(d_name, name, dentry->d_name.len); ++ name = d_name; ++ } ++ if (!dir_emit(ctx, name, + dentry->d_name.len, + ceph_translate_ino(dentry->d_sb, dentry->d_inode->i_ino), + dentry->d_inode->i_mode >> 12)) { +@@ -248,7 +255,7 @@ static int ceph_readdir(struct file *file, struct dir_context *ctx) + struct ceph_fs_client *fsc = ceph_inode_to_client(inode); + struct ceph_mds_client *mdsc = fsc->mdsc; + unsigned frag = fpos_frag(ctx->pos); +- int off = fpos_off(ctx->pos); ++ unsigned int off = fpos_off(ctx->pos); + int err; + u32 ftype; + struct ceph_mds_reply_info_parsed *rinfo; +diff --git a/fs/ceph/super.c b/fs/ceph/super.c +index 10a4ccb..92dbc5e 100644 +--- a/fs/ceph/super.c ++++ b/fs/ceph/super.c +@@ -895,7 +895,7 @@ static int ceph_compare_super(struct super_block *sb, void *data) + /* + * construct our own bdi so we can control readahead, etc. + */ +-static atomic_long_t bdi_seq = ATOMIC_LONG_INIT(0); ++static atomic_long_unchecked_t bdi_seq = ATOMIC_LONG_INIT(0); + + static int ceph_register_bdi(struct super_block *sb, + struct ceph_fs_client *fsc) +@@ -912,7 +912,7 @@ static int ceph_register_bdi(struct super_block *sb, + default_backing_dev_info.ra_pages; + + err = bdi_register(&fsc->backing_dev_info, NULL, "ceph-%ld", +- atomic_long_inc_return(&bdi_seq)); ++ atomic_long_inc_return_unchecked(&bdi_seq)); + if (!err) + sb->s_bdi = &fsc->backing_dev_info; + return err; +diff --git a/fs/cifs/cifs_debug.c b/fs/cifs/cifs_debug.c +index f3ac415..3d2420c 100644 +--- a/fs/cifs/cifs_debug.c ++++ b/fs/cifs/cifs_debug.c +@@ -286,8 +286,8 @@ static ssize_t cifs_stats_proc_write(struct file *file, + + if (c == '1' || c == 'y' || c == 'Y' || c == '0') { + #ifdef CONFIG_CIFS_STATS2 +- atomic_set(&totBufAllocCount, 0); +- atomic_set(&totSmBufAllocCount, 0); ++ atomic_set_unchecked(&totBufAllocCount, 0); ++ atomic_set_unchecked(&totSmBufAllocCount, 0); + #endif /* CONFIG_CIFS_STATS2 */ + spin_lock(&cifs_tcp_ses_lock); + list_for_each(tmp1, &cifs_tcp_ses_list) { +@@ -300,7 +300,7 @@ static ssize_t cifs_stats_proc_write(struct file *file, + tcon = list_entry(tmp3, + struct cifs_tcon, + tcon_list); +- atomic_set(&tcon->num_smbs_sent, 0); ++ atomic_set_unchecked(&tcon->num_smbs_sent, 0); + if (server->ops->clear_stats) + server->ops->clear_stats(tcon); + } +@@ -332,8 +332,8 @@ static int cifs_stats_proc_show(struct seq_file *m, void *v) + smBufAllocCount.counter, cifs_min_small); + #ifdef CONFIG_CIFS_STATS2 + seq_printf(m, "Total Large %d Small %d Allocations\n", +- atomic_read(&totBufAllocCount), +- atomic_read(&totSmBufAllocCount)); ++ atomic_read_unchecked(&totBufAllocCount), ++ atomic_read_unchecked(&totSmBufAllocCount)); + #endif /* CONFIG_CIFS_STATS2 */ + + seq_printf(m, "Operations (MIDs): %d\n", atomic_read(&midCount)); +@@ -362,7 +362,7 @@ static int cifs_stats_proc_show(struct seq_file *m, void *v) + if (tcon->need_reconnect) + seq_puts(m, "\tDISCONNECTED "); + seq_printf(m, "\nSMBs: %d", +- atomic_read(&tcon->num_smbs_sent)); ++ atomic_read_unchecked(&tcon->num_smbs_sent)); + if (server->ops->print_stats) + server->ops->print_stats(m, tcon); + } +diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c +index 7c6b73c..a8f0db2 100644 +--- a/fs/cifs/cifsfs.c ++++ b/fs/cifs/cifsfs.c +@@ -1068,7 +1068,7 @@ cifs_init_request_bufs(void) + */ + cifs_req_cachep = kmem_cache_create("cifs_request", + CIFSMaxBufSize + max_hdr_size, 0, +- SLAB_HWCACHE_ALIGN, NULL); ++ SLAB_HWCACHE_ALIGN | SLAB_USERCOPY, NULL); + if (cifs_req_cachep == NULL) + return -ENOMEM; + +@@ -1095,7 +1095,7 @@ cifs_init_request_bufs(void) + efficient to alloc 1 per page off the slab compared to 17K (5page) + alloc of large cifs buffers even when page debugging is on */ + cifs_sm_req_cachep = kmem_cache_create("cifs_small_rq", +- MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN, ++ MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN | SLAB_USERCOPY, + NULL); + if (cifs_sm_req_cachep == NULL) { + mempool_destroy(cifs_req_poolp); +@@ -1180,8 +1180,8 @@ init_cifs(void) + atomic_set(&bufAllocCount, 0); + atomic_set(&smBufAllocCount, 0); + #ifdef CONFIG_CIFS_STATS2 +- atomic_set(&totBufAllocCount, 0); +- atomic_set(&totSmBufAllocCount, 0); ++ atomic_set_unchecked(&totBufAllocCount, 0); ++ atomic_set_unchecked(&totSmBufAllocCount, 0); + #endif /* CONFIG_CIFS_STATS2 */ + + atomic_set(&midCount, 0); +diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h +index 30f6e92..e915ba5 100644 +--- a/fs/cifs/cifsglob.h ++++ b/fs/cifs/cifsglob.h +@@ -806,35 +806,35 @@ struct cifs_tcon { + __u16 Flags; /* optional support bits */ + enum statusEnum tidStatus; + #ifdef CONFIG_CIFS_STATS +- atomic_t num_smbs_sent; ++ atomic_unchecked_t num_smbs_sent; + union { + struct { +- atomic_t num_writes; +- atomic_t num_reads; +- atomic_t num_flushes; +- atomic_t num_oplock_brks; +- atomic_t num_opens; +- atomic_t num_closes; +- atomic_t num_deletes; +- atomic_t num_mkdirs; +- atomic_t num_posixopens; +- atomic_t num_posixmkdirs; +- atomic_t num_rmdirs; +- atomic_t num_renames; +- atomic_t num_t2renames; +- atomic_t num_ffirst; +- atomic_t num_fnext; +- atomic_t num_fclose; +- atomic_t num_hardlinks; +- atomic_t num_symlinks; +- atomic_t num_locks; +- atomic_t num_acl_get; +- atomic_t num_acl_set; ++ atomic_unchecked_t num_writes; ++ atomic_unchecked_t num_reads; ++ atomic_unchecked_t num_flushes; ++ atomic_unchecked_t num_oplock_brks; ++ atomic_unchecked_t num_opens; ++ atomic_unchecked_t num_closes; ++ atomic_unchecked_t num_deletes; ++ atomic_unchecked_t num_mkdirs; ++ atomic_unchecked_t num_posixopens; ++ atomic_unchecked_t num_posixmkdirs; ++ atomic_unchecked_t num_rmdirs; ++ atomic_unchecked_t num_renames; ++ atomic_unchecked_t num_t2renames; ++ atomic_unchecked_t num_ffirst; ++ atomic_unchecked_t num_fnext; ++ atomic_unchecked_t num_fclose; ++ atomic_unchecked_t num_hardlinks; ++ atomic_unchecked_t num_symlinks; ++ atomic_unchecked_t num_locks; ++ atomic_unchecked_t num_acl_get; ++ atomic_unchecked_t num_acl_set; + } cifs_stats; + #ifdef CONFIG_CIFS_SMB2 + struct { +- atomic_t smb2_com_sent[NUMBER_OF_SMB2_COMMANDS]; +- atomic_t smb2_com_failed[NUMBER_OF_SMB2_COMMANDS]; ++ atomic_unchecked_t smb2_com_sent[NUMBER_OF_SMB2_COMMANDS]; ++ atomic_unchecked_t smb2_com_failed[NUMBER_OF_SMB2_COMMANDS]; + } smb2_stats; + #endif /* CONFIG_CIFS_SMB2 */ + } stats; +@@ -1170,7 +1170,7 @@ convert_delimiter(char *path, char delim) + } + + #ifdef CONFIG_CIFS_STATS +-#define cifs_stats_inc atomic_inc ++#define cifs_stats_inc atomic_inc_unchecked + + static inline void cifs_stats_bytes_written(struct cifs_tcon *tcon, + unsigned int bytes) +@@ -1536,8 +1536,8 @@ GLOBAL_EXTERN atomic_t tconInfoReconnectCount; + /* Various Debug counters */ + GLOBAL_EXTERN atomic_t bufAllocCount; /* current number allocated */ + #ifdef CONFIG_CIFS_STATS2 +-GLOBAL_EXTERN atomic_t totBufAllocCount; /* total allocated over all time */ +-GLOBAL_EXTERN atomic_t totSmBufAllocCount; ++GLOBAL_EXTERN atomic_unchecked_t totBufAllocCount; /* total allocated over all time */ ++GLOBAL_EXTERN atomic_unchecked_t totSmBufAllocCount; + #endif + GLOBAL_EXTERN atomic_t smBufAllocCount; + GLOBAL_EXTERN atomic_t midCount; +diff --git a/fs/cifs/file.c b/fs/cifs/file.c +index 87c4dd0..a90f115 100644 +--- a/fs/cifs/file.c ++++ b/fs/cifs/file.c +@@ -1900,10 +1900,14 @@ static int cifs_writepages(struct address_space *mapping, + index = mapping->writeback_index; /* Start from prev offset */ + end = -1; + } else { +- index = wbc->range_start >> PAGE_CACHE_SHIFT; +- end = wbc->range_end >> PAGE_CACHE_SHIFT; +- if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX) ++ if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX) { + range_whole = true; ++ index = 0; ++ end = ULONG_MAX; ++ } else { ++ index = wbc->range_start >> PAGE_CACHE_SHIFT; ++ end = wbc->range_end >> PAGE_CACHE_SHIFT; ++ } + scanned = true; + } + retry: +diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c +index 3b0c62e..f7d090c 100644 +--- a/fs/cifs/misc.c ++++ b/fs/cifs/misc.c +@@ -170,7 +170,7 @@ cifs_buf_get(void) + memset(ret_buf, 0, buf_size + 3); + atomic_inc(&bufAllocCount); + #ifdef CONFIG_CIFS_STATS2 +- atomic_inc(&totBufAllocCount); ++ atomic_inc_unchecked(&totBufAllocCount); + #endif /* CONFIG_CIFS_STATS2 */ + } + +@@ -205,7 +205,7 @@ cifs_small_buf_get(void) + /* memset(ret_buf, 0, sizeof(struct smb_hdr) + 27);*/ + atomic_inc(&smBufAllocCount); + #ifdef CONFIG_CIFS_STATS2 +- atomic_inc(&totSmBufAllocCount); ++ atomic_inc_unchecked(&totSmBufAllocCount); + #endif /* CONFIG_CIFS_STATS2 */ + + } +diff --git a/fs/cifs/smb1ops.c b/fs/cifs/smb1ops.c +index d1fdfa8..94558f8 100644 +--- a/fs/cifs/smb1ops.c ++++ b/fs/cifs/smb1ops.c +@@ -626,27 +626,27 @@ static void + cifs_clear_stats(struct cifs_tcon *tcon) + { + #ifdef CONFIG_CIFS_STATS +- atomic_set(&tcon->stats.cifs_stats.num_writes, 0); +- atomic_set(&tcon->stats.cifs_stats.num_reads, 0); +- atomic_set(&tcon->stats.cifs_stats.num_flushes, 0); +- atomic_set(&tcon->stats.cifs_stats.num_oplock_brks, 0); +- atomic_set(&tcon->stats.cifs_stats.num_opens, 0); +- atomic_set(&tcon->stats.cifs_stats.num_posixopens, 0); +- atomic_set(&tcon->stats.cifs_stats.num_posixmkdirs, 0); +- atomic_set(&tcon->stats.cifs_stats.num_closes, 0); +- atomic_set(&tcon->stats.cifs_stats.num_deletes, 0); +- atomic_set(&tcon->stats.cifs_stats.num_mkdirs, 0); +- atomic_set(&tcon->stats.cifs_stats.num_rmdirs, 0); +- atomic_set(&tcon->stats.cifs_stats.num_renames, 0); +- atomic_set(&tcon->stats.cifs_stats.num_t2renames, 0); +- atomic_set(&tcon->stats.cifs_stats.num_ffirst, 0); +- atomic_set(&tcon->stats.cifs_stats.num_fnext, 0); +- atomic_set(&tcon->stats.cifs_stats.num_fclose, 0); +- atomic_set(&tcon->stats.cifs_stats.num_hardlinks, 0); +- atomic_set(&tcon->stats.cifs_stats.num_symlinks, 0); +- atomic_set(&tcon->stats.cifs_stats.num_locks, 0); +- atomic_set(&tcon->stats.cifs_stats.num_acl_get, 0); +- atomic_set(&tcon->stats.cifs_stats.num_acl_set, 0); ++ atomic_set_unchecked(&tcon->stats.cifs_stats.num_writes, 0); ++ atomic_set_unchecked(&tcon->stats.cifs_stats.num_reads, 0); ++ atomic_set_unchecked(&tcon->stats.cifs_stats.num_flushes, 0); ++ atomic_set_unchecked(&tcon->stats.cifs_stats.num_oplock_brks, 0); ++ atomic_set_unchecked(&tcon->stats.cifs_stats.num_opens, 0); ++ atomic_set_unchecked(&tcon->stats.cifs_stats.num_posixopens, 0); ++ atomic_set_unchecked(&tcon->stats.cifs_stats.num_posixmkdirs, 0); ++ atomic_set_unchecked(&tcon->stats.cifs_stats.num_closes, 0); ++ atomic_set_unchecked(&tcon->stats.cifs_stats.num_deletes, 0); ++ atomic_set_unchecked(&tcon->stats.cifs_stats.num_mkdirs, 0); ++ atomic_set_unchecked(&tcon->stats.cifs_stats.num_rmdirs, 0); ++ atomic_set_unchecked(&tcon->stats.cifs_stats.num_renames, 0); ++ atomic_set_unchecked(&tcon->stats.cifs_stats.num_t2renames, 0); ++ atomic_set_unchecked(&tcon->stats.cifs_stats.num_ffirst, 0); ++ atomic_set_unchecked(&tcon->stats.cifs_stats.num_fnext, 0); ++ atomic_set_unchecked(&tcon->stats.cifs_stats.num_fclose, 0); ++ atomic_set_unchecked(&tcon->stats.cifs_stats.num_hardlinks, 0); ++ atomic_set_unchecked(&tcon->stats.cifs_stats.num_symlinks, 0); ++ atomic_set_unchecked(&tcon->stats.cifs_stats.num_locks, 0); ++ atomic_set_unchecked(&tcon->stats.cifs_stats.num_acl_get, 0); ++ atomic_set_unchecked(&tcon->stats.cifs_stats.num_acl_set, 0); + #endif + } + +@@ -655,36 +655,36 @@ cifs_print_stats(struct seq_file *m, struct cifs_tcon *tcon) + { + #ifdef CONFIG_CIFS_STATS + seq_printf(m, " Oplocks breaks: %d", +- atomic_read(&tcon->stats.cifs_stats.num_oplock_brks)); ++ atomic_read_unchecked(&tcon->stats.cifs_stats.num_oplock_brks)); + seq_printf(m, "\nReads: %d Bytes: %llu", +- atomic_read(&tcon->stats.cifs_stats.num_reads), ++ atomic_read_unchecked(&tcon->stats.cifs_stats.num_reads), + (long long)(tcon->bytes_read)); + seq_printf(m, "\nWrites: %d Bytes: %llu", +- atomic_read(&tcon->stats.cifs_stats.num_writes), ++ atomic_read_unchecked(&tcon->stats.cifs_stats.num_writes), + (long long)(tcon->bytes_written)); + seq_printf(m, "\nFlushes: %d", +- atomic_read(&tcon->stats.cifs_stats.num_flushes)); ++ atomic_read_unchecked(&tcon->stats.cifs_stats.num_flushes)); + seq_printf(m, "\nLocks: %d HardLinks: %d Symlinks: %d", +- atomic_read(&tcon->stats.cifs_stats.num_locks), +- atomic_read(&tcon->stats.cifs_stats.num_hardlinks), +- atomic_read(&tcon->stats.cifs_stats.num_symlinks)); ++ atomic_read_unchecked(&tcon->stats.cifs_stats.num_locks), ++ atomic_read_unchecked(&tcon->stats.cifs_stats.num_hardlinks), ++ atomic_read_unchecked(&tcon->stats.cifs_stats.num_symlinks)); + seq_printf(m, "\nOpens: %d Closes: %d Deletes: %d", +- atomic_read(&tcon->stats.cifs_stats.num_opens), +- atomic_read(&tcon->stats.cifs_stats.num_closes), +- atomic_read(&tcon->stats.cifs_stats.num_deletes)); ++ atomic_read_unchecked(&tcon->stats.cifs_stats.num_opens), ++ atomic_read_unchecked(&tcon->stats.cifs_stats.num_closes), ++ atomic_read_unchecked(&tcon->stats.cifs_stats.num_deletes)); + seq_printf(m, "\nPosix Opens: %d Posix Mkdirs: %d", +- atomic_read(&tcon->stats.cifs_stats.num_posixopens), +- atomic_read(&tcon->stats.cifs_stats.num_posixmkdirs)); ++ atomic_read_unchecked(&tcon->stats.cifs_stats.num_posixopens), ++ atomic_read_unchecked(&tcon->stats.cifs_stats.num_posixmkdirs)); + seq_printf(m, "\nMkdirs: %d Rmdirs: %d", +- atomic_read(&tcon->stats.cifs_stats.num_mkdirs), +- atomic_read(&tcon->stats.cifs_stats.num_rmdirs)); ++ atomic_read_unchecked(&tcon->stats.cifs_stats.num_mkdirs), ++ atomic_read_unchecked(&tcon->stats.cifs_stats.num_rmdirs)); + seq_printf(m, "\nRenames: %d T2 Renames %d", +- atomic_read(&tcon->stats.cifs_stats.num_renames), +- atomic_read(&tcon->stats.cifs_stats.num_t2renames)); ++ atomic_read_unchecked(&tcon->stats.cifs_stats.num_renames), ++ atomic_read_unchecked(&tcon->stats.cifs_stats.num_t2renames)); + seq_printf(m, "\nFindFirst: %d FNext %d FClose %d", +- atomic_read(&tcon->stats.cifs_stats.num_ffirst), +- atomic_read(&tcon->stats.cifs_stats.num_fnext), +- atomic_read(&tcon->stats.cifs_stats.num_fclose)); ++ atomic_read_unchecked(&tcon->stats.cifs_stats.num_ffirst), ++ atomic_read_unchecked(&tcon->stats.cifs_stats.num_fnext), ++ atomic_read_unchecked(&tcon->stats.cifs_stats.num_fclose)); + #endif + } + +diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c +index 35ddc3e..563e809 100644 +--- a/fs/cifs/smb2ops.c ++++ b/fs/cifs/smb2ops.c +@@ -364,8 +364,8 @@ smb2_clear_stats(struct cifs_tcon *tcon) + #ifdef CONFIG_CIFS_STATS + int i; + for (i = 0; i < NUMBER_OF_SMB2_COMMANDS; i++) { +- atomic_set(&tcon->stats.smb2_stats.smb2_com_sent[i], 0); +- atomic_set(&tcon->stats.smb2_stats.smb2_com_failed[i], 0); ++ atomic_set_unchecked(&tcon->stats.smb2_stats.smb2_com_sent[i], 0); ++ atomic_set_unchecked(&tcon->stats.smb2_stats.smb2_com_failed[i], 0); + } + #endif + } +@@ -405,65 +405,65 @@ static void + smb2_print_stats(struct seq_file *m, struct cifs_tcon *tcon) + { + #ifdef CONFIG_CIFS_STATS +- atomic_t *sent = tcon->stats.smb2_stats.smb2_com_sent; +- atomic_t *failed = tcon->stats.smb2_stats.smb2_com_failed; ++ atomic_unchecked_t *sent = tcon->stats.smb2_stats.smb2_com_sent; ++ atomic_unchecked_t *failed = tcon->stats.smb2_stats.smb2_com_failed; + seq_printf(m, "\nNegotiates: %d sent %d failed", +- atomic_read(&sent[SMB2_NEGOTIATE_HE]), +- atomic_read(&failed[SMB2_NEGOTIATE_HE])); ++ atomic_read_unchecked(&sent[SMB2_NEGOTIATE_HE]), ++ atomic_read_unchecked(&failed[SMB2_NEGOTIATE_HE])); + seq_printf(m, "\nSessionSetups: %d sent %d failed", +- atomic_read(&sent[SMB2_SESSION_SETUP_HE]), +- atomic_read(&failed[SMB2_SESSION_SETUP_HE])); ++ atomic_read_unchecked(&sent[SMB2_SESSION_SETUP_HE]), ++ atomic_read_unchecked(&failed[SMB2_SESSION_SETUP_HE])); + seq_printf(m, "\nLogoffs: %d sent %d failed", +- atomic_read(&sent[SMB2_LOGOFF_HE]), +- atomic_read(&failed[SMB2_LOGOFF_HE])); ++ atomic_read_unchecked(&sent[SMB2_LOGOFF_HE]), ++ atomic_read_unchecked(&failed[SMB2_LOGOFF_HE])); + seq_printf(m, "\nTreeConnects: %d sent %d failed", +- atomic_read(&sent[SMB2_TREE_CONNECT_HE]), +- atomic_read(&failed[SMB2_TREE_CONNECT_HE])); ++ atomic_read_unchecked(&sent[SMB2_TREE_CONNECT_HE]), ++ atomic_read_unchecked(&failed[SMB2_TREE_CONNECT_HE])); + seq_printf(m, "\nTreeDisconnects: %d sent %d failed", +- atomic_read(&sent[SMB2_TREE_DISCONNECT_HE]), +- atomic_read(&failed[SMB2_TREE_DISCONNECT_HE])); ++ atomic_read_unchecked(&sent[SMB2_TREE_DISCONNECT_HE]), ++ atomic_read_unchecked(&failed[SMB2_TREE_DISCONNECT_HE])); + seq_printf(m, "\nCreates: %d sent %d failed", +- atomic_read(&sent[SMB2_CREATE_HE]), +- atomic_read(&failed[SMB2_CREATE_HE])); ++ atomic_read_unchecked(&sent[SMB2_CREATE_HE]), ++ atomic_read_unchecked(&failed[SMB2_CREATE_HE])); + seq_printf(m, "\nCloses: %d sent %d failed", +- atomic_read(&sent[SMB2_CLOSE_HE]), +- atomic_read(&failed[SMB2_CLOSE_HE])); ++ atomic_read_unchecked(&sent[SMB2_CLOSE_HE]), ++ atomic_read_unchecked(&failed[SMB2_CLOSE_HE])); + seq_printf(m, "\nFlushes: %d sent %d failed", +- atomic_read(&sent[SMB2_FLUSH_HE]), +- atomic_read(&failed[SMB2_FLUSH_HE])); ++ atomic_read_unchecked(&sent[SMB2_FLUSH_HE]), ++ atomic_read_unchecked(&failed[SMB2_FLUSH_HE])); + seq_printf(m, "\nReads: %d sent %d failed", +- atomic_read(&sent[SMB2_READ_HE]), +- atomic_read(&failed[SMB2_READ_HE])); ++ atomic_read_unchecked(&sent[SMB2_READ_HE]), ++ atomic_read_unchecked(&failed[SMB2_READ_HE])); + seq_printf(m, "\nWrites: %d sent %d failed", +- atomic_read(&sent[SMB2_WRITE_HE]), +- atomic_read(&failed[SMB2_WRITE_HE])); ++ atomic_read_unchecked(&sent[SMB2_WRITE_HE]), ++ atomic_read_unchecked(&failed[SMB2_WRITE_HE])); + seq_printf(m, "\nLocks: %d sent %d failed", +- atomic_read(&sent[SMB2_LOCK_HE]), +- atomic_read(&failed[SMB2_LOCK_HE])); ++ atomic_read_unchecked(&sent[SMB2_LOCK_HE]), ++ atomic_read_unchecked(&failed[SMB2_LOCK_HE])); + seq_printf(m, "\nIOCTLs: %d sent %d failed", +- atomic_read(&sent[SMB2_IOCTL_HE]), +- atomic_read(&failed[SMB2_IOCTL_HE])); ++ atomic_read_unchecked(&sent[SMB2_IOCTL_HE]), ++ atomic_read_unchecked(&failed[SMB2_IOCTL_HE])); + seq_printf(m, "\nCancels: %d sent %d failed", +- atomic_read(&sent[SMB2_CANCEL_HE]), +- atomic_read(&failed[SMB2_CANCEL_HE])); ++ atomic_read_unchecked(&sent[SMB2_CANCEL_HE]), ++ atomic_read_unchecked(&failed[SMB2_CANCEL_HE])); + seq_printf(m, "\nEchos: %d sent %d failed", +- atomic_read(&sent[SMB2_ECHO_HE]), +- atomic_read(&failed[SMB2_ECHO_HE])); ++ atomic_read_unchecked(&sent[SMB2_ECHO_HE]), ++ atomic_read_unchecked(&failed[SMB2_ECHO_HE])); + seq_printf(m, "\nQueryDirectories: %d sent %d failed", +- atomic_read(&sent[SMB2_QUERY_DIRECTORY_HE]), +- atomic_read(&failed[SMB2_QUERY_DIRECTORY_HE])); ++ atomic_read_unchecked(&sent[SMB2_QUERY_DIRECTORY_HE]), ++ atomic_read_unchecked(&failed[SMB2_QUERY_DIRECTORY_HE])); + seq_printf(m, "\nChangeNotifies: %d sent %d failed", +- atomic_read(&sent[SMB2_CHANGE_NOTIFY_HE]), +- atomic_read(&failed[SMB2_CHANGE_NOTIFY_HE])); ++ atomic_read_unchecked(&sent[SMB2_CHANGE_NOTIFY_HE]), ++ atomic_read_unchecked(&failed[SMB2_CHANGE_NOTIFY_HE])); + seq_printf(m, "\nQueryInfos: %d sent %d failed", +- atomic_read(&sent[SMB2_QUERY_INFO_HE]), +- atomic_read(&failed[SMB2_QUERY_INFO_HE])); ++ atomic_read_unchecked(&sent[SMB2_QUERY_INFO_HE]), ++ atomic_read_unchecked(&failed[SMB2_QUERY_INFO_HE])); + seq_printf(m, "\nSetInfos: %d sent %d failed", +- atomic_read(&sent[SMB2_SET_INFO_HE]), +- atomic_read(&failed[SMB2_SET_INFO_HE])); ++ atomic_read_unchecked(&sent[SMB2_SET_INFO_HE]), ++ atomic_read_unchecked(&failed[SMB2_SET_INFO_HE])); + seq_printf(m, "\nOplockBreaks: %d sent %d failed", +- atomic_read(&sent[SMB2_OPLOCK_BREAK_HE]), +- atomic_read(&failed[SMB2_OPLOCK_BREAK_HE])); ++ atomic_read_unchecked(&sent[SMB2_OPLOCK_BREAK_HE]), ++ atomic_read_unchecked(&failed[SMB2_OPLOCK_BREAK_HE])); + #endif + } + +diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c +index 049a3f2..0f41305 100644 +--- a/fs/cifs/smb2pdu.c ++++ b/fs/cifs/smb2pdu.c +@@ -2099,8 +2099,7 @@ SMB2_query_directory(const unsigned int xid, struct cifs_tcon *tcon, + default: + cifs_dbg(VFS, "info level %u isn't supported\n", + srch_inf->info_level); +- rc = -EINVAL; +- goto qdir_exit; ++ return -EINVAL; + } + + req->FileIndex = cpu_to_le32(index); +diff --git a/fs/coda/cache.c b/fs/coda/cache.c +index 1da168c..8bc7ff6 100644 +--- a/fs/coda/cache.c ++++ b/fs/coda/cache.c +@@ -24,7 +24,7 @@ + #include "coda_linux.h" + #include "coda_cache.h" + +-static atomic_t permission_epoch = ATOMIC_INIT(0); ++static atomic_unchecked_t permission_epoch = ATOMIC_INIT(0); + + /* replace or extend an acl cache hit */ + void coda_cache_enter(struct inode *inode, int mask) +@@ -32,7 +32,7 @@ void coda_cache_enter(struct inode *inode, int mask) + struct coda_inode_info *cii = ITOC(inode); + + spin_lock(&cii->c_lock); +- cii->c_cached_epoch = atomic_read(&permission_epoch); ++ cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch); + if (!uid_eq(cii->c_uid, current_fsuid())) { + cii->c_uid = current_fsuid(); + cii->c_cached_perm = mask; +@@ -46,14 +46,14 @@ void coda_cache_clear_inode(struct inode *inode) + { + struct coda_inode_info *cii = ITOC(inode); + spin_lock(&cii->c_lock); +- cii->c_cached_epoch = atomic_read(&permission_epoch) - 1; ++ cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch) - 1; + spin_unlock(&cii->c_lock); + } + + /* remove all acl caches */ + void coda_cache_clear_all(struct super_block *sb) + { +- atomic_inc(&permission_epoch); ++ atomic_inc_unchecked(&permission_epoch); + } + + +@@ -66,7 +66,7 @@ int coda_cache_check(struct inode *inode, int mask) + spin_lock(&cii->c_lock); + hit = (mask & cii->c_cached_perm) == mask && + uid_eq(cii->c_uid, current_fsuid()) && +- cii->c_cached_epoch == atomic_read(&permission_epoch); ++ cii->c_cached_epoch == atomic_read_unchecked(&permission_epoch); + spin_unlock(&cii->c_lock); + + return hit; +diff --git a/fs/compat.c b/fs/compat.c +index 6af20de..fec3fbb 100644 +--- a/fs/compat.c ++++ b/fs/compat.c +@@ -54,7 +54,7 @@ + #include <asm/ioctls.h> + #include "internal.h" + +-int compat_log = 1; ++int compat_log = 0; + + int compat_printk(const char *fmt, ...) + { +@@ -488,7 +488,7 @@ compat_sys_io_setup(unsigned nr_reqs, u32 __user *ctx32p) + + set_fs(KERNEL_DS); + /* The __user pointer cast is valid because of the set_fs() */ +- ret = sys_io_setup(nr_reqs, (aio_context_t __user *) &ctx64); ++ ret = sys_io_setup(nr_reqs, (aio_context_t __force_user *) &ctx64); + set_fs(oldfs); + /* truncating is ok because it's a user address */ + if (!ret) +@@ -546,7 +546,7 @@ ssize_t compat_rw_copy_check_uvector(int type, + goto out; + + ret = -EINVAL; +- if (nr_segs > UIO_MAXIOV || nr_segs < 0) ++ if (nr_segs > UIO_MAXIOV) + goto out; + if (nr_segs > fast_segs) { + ret = -ENOMEM; +@@ -834,6 +834,7 @@ struct compat_old_linux_dirent { + struct compat_readdir_callback { + struct dir_context ctx; + struct compat_old_linux_dirent __user *dirent; ++ struct file * file; + int result; + }; + +@@ -851,6 +852,10 @@ static int compat_fillonedir(void *__buf, const char *name, int namlen, + buf->result = -EOVERFLOW; + return -EOVERFLOW; + } ++ ++ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino)) ++ return 0; ++ + buf->result++; + dirent = buf->dirent; + if (!access_ok(VERIFY_WRITE, dirent, +@@ -882,6 +887,7 @@ asmlinkage long compat_sys_old_readdir(unsigned int fd, + if (!f.file) + return -EBADF; + ++ buf.file = f.file; + error = iterate_dir(f.file, &buf.ctx); + if (buf.result) + error = buf.result; +@@ -901,6 +907,7 @@ struct compat_getdents_callback { + struct dir_context ctx; + struct compat_linux_dirent __user *current_dir; + struct compat_linux_dirent __user *previous; ++ struct file * file; + int count; + int error; + }; +@@ -922,6 +929,10 @@ static int compat_filldir(void *__buf, const char *name, int namlen, + buf->error = -EOVERFLOW; + return -EOVERFLOW; + } ++ ++ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino)) ++ return 0; ++ + dirent = buf->previous; + if (dirent) { + if (__put_user(offset, &dirent->d_off)) +@@ -967,6 +978,7 @@ asmlinkage long compat_sys_getdents(unsigned int fd, + if (!f.file) + return -EBADF; + ++ buf.file = f.file; + error = iterate_dir(f.file, &buf.ctx); + if (error >= 0) + error = buf.error; +@@ -987,6 +999,7 @@ struct compat_getdents_callback64 { + struct dir_context ctx; + struct linux_dirent64 __user *current_dir; + struct linux_dirent64 __user *previous; ++ struct file * file; + int count; + int error; + }; +@@ -1003,6 +1016,10 @@ static int compat_filldir64(void * __buf, const char * name, int namlen, loff_t + buf->error = -EINVAL; /* only used if we fail.. */ + if (reclen > buf->count) + return -EINVAL; ++ ++ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino)) ++ return 0; ++ + dirent = buf->previous; + + if (dirent) { +@@ -1052,6 +1069,7 @@ asmlinkage long compat_sys_getdents64(unsigned int fd, + if (!f.file) + return -EBADF; + ++ buf.file = f.file; + error = iterate_dir(f.file, &buf.ctx); + if (error >= 0) + error = buf.error; +diff --git a/fs/compat_binfmt_elf.c b/fs/compat_binfmt_elf.c +index a81147e..20bf2b5 100644 +--- a/fs/compat_binfmt_elf.c ++++ b/fs/compat_binfmt_elf.c +@@ -30,11 +30,13 @@ + #undef elf_phdr + #undef elf_shdr + #undef elf_note ++#undef elf_dyn + #undef elf_addr_t + #define elfhdr elf32_hdr + #define elf_phdr elf32_phdr + #define elf_shdr elf32_shdr + #define elf_note elf32_note ++#define elf_dyn Elf32_Dyn + #define elf_addr_t Elf32_Addr + + /* +diff --git a/fs/compat_ioctl.c b/fs/compat_ioctl.c +index 3881610..d4599d0 100644 +--- a/fs/compat_ioctl.c ++++ b/fs/compat_ioctl.c +@@ -621,7 +621,7 @@ static int serial_struct_ioctl(unsigned fd, unsigned cmd, + return -EFAULT; + if (__get_user(udata, &ss32->iomem_base)) + return -EFAULT; +- ss.iomem_base = compat_ptr(udata); ++ ss.iomem_base = (unsigned char __force_kernel *)compat_ptr(udata); + if (__get_user(ss.iomem_reg_shift, &ss32->iomem_reg_shift) || + __get_user(ss.port_high, &ss32->port_high)) + return -EFAULT; +@@ -703,8 +703,8 @@ static int do_i2c_rdwr_ioctl(unsigned int fd, unsigned int cmd, + for (i = 0; i < nmsgs; i++) { + if (copy_in_user(&tmsgs[i].addr, &umsgs[i].addr, 3*sizeof(u16))) + return -EFAULT; +- if (get_user(datap, &umsgs[i].buf) || +- put_user(compat_ptr(datap), &tmsgs[i].buf)) ++ if (get_user(datap, (compat_caddr_t __user *)&umsgs[i].buf) || ++ put_user(compat_ptr(datap), (u8 __user * __user *)&tmsgs[i].buf)) + return -EFAULT; + } + return sys_ioctl(fd, cmd, (unsigned long)tdata); +@@ -797,7 +797,7 @@ static int compat_ioctl_preallocate(struct file *file, + copy_in_user(&p->l_len, &p32->l_len, sizeof(s64)) || + copy_in_user(&p->l_sysid, &p32->l_sysid, sizeof(s32)) || + copy_in_user(&p->l_pid, &p32->l_pid, sizeof(u32)) || +- copy_in_user(&p->l_pad, &p32->l_pad, 4*sizeof(u32))) ++ copy_in_user(p->l_pad, p32->l_pad, 4*sizeof(u32))) + return -EFAULT; + + return ioctl_preallocate(file, p); +@@ -1617,8 +1617,8 @@ asmlinkage long compat_sys_ioctl(unsigned int fd, unsigned int cmd, + static int __init init_sys32_ioctl_cmp(const void *p, const void *q) + { + unsigned int a, b; +- a = *(unsigned int *)p; +- b = *(unsigned int *)q; ++ a = *(const unsigned int *)p; ++ b = *(const unsigned int *)q; + if (a > b) + return 1; + if (a < b) +diff --git a/fs/configfs/dir.c b/fs/configfs/dir.c +index e081acb..911df21 100644 +--- a/fs/configfs/dir.c ++++ b/fs/configfs/dir.c +@@ -1548,7 +1548,8 @@ static int configfs_readdir(struct file *file, struct dir_context *ctx) + } + for (p = q->next; p != &parent_sd->s_children; p = p->next) { + struct configfs_dirent *next; +- const char *name; ++ const unsigned char * name; ++ char d_name[sizeof(next->s_dentry->d_iname)]; + int len; + struct inode *inode = NULL; + +@@ -1557,7 +1558,12 @@ static int configfs_readdir(struct file *file, struct dir_context *ctx) + continue; + + name = configfs_get_name(next); +- len = strlen(name); ++ if (next->s_dentry && name == next->s_dentry->d_iname) { ++ len = next->s_dentry->d_name.len; ++ memcpy(d_name, name, len); ++ name = d_name; ++ } else ++ len = strlen(name); + + /* + * We'll have a dentry and an inode for +diff --git a/fs/coredump.c b/fs/coredump.c +index a93f7e6..d58bcbe 100644 +--- a/fs/coredump.c ++++ b/fs/coredump.c +@@ -442,8 +442,8 @@ static void wait_for_dump_helpers(struct file *file) + struct pipe_inode_info *pipe = file->private_data; + + pipe_lock(pipe); +- pipe->readers++; +- pipe->writers--; ++ atomic_inc(&pipe->readers); ++ atomic_dec(&pipe->writers); + wake_up_interruptible_sync(&pipe->wait); + kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN); + pipe_unlock(pipe); +@@ -452,11 +452,11 @@ static void wait_for_dump_helpers(struct file *file) + * We actually want wait_event_freezable() but then we need + * to clear TIF_SIGPENDING and improve dump_interrupted(). + */ +- wait_event_interruptible(pipe->wait, pipe->readers == 1); ++ wait_event_interruptible(pipe->wait, atomic_read(&pipe->readers) == 1); + + pipe_lock(pipe); +- pipe->readers--; +- pipe->writers++; ++ atomic_dec(&pipe->readers); ++ atomic_inc(&pipe->writers); + pipe_unlock(pipe); + } + +@@ -503,7 +503,9 @@ void do_coredump(const siginfo_t *siginfo) + struct files_struct *displaced; + bool need_nonrelative = false; + bool core_dumped = false; +- static atomic_t core_dump_count = ATOMIC_INIT(0); ++ static atomic_unchecked_t core_dump_count = ATOMIC_INIT(0); ++ long signr = siginfo->si_signo; ++ int dumpable; + struct coredump_params cprm = { + .siginfo = siginfo, + .regs = signal_pt_regs(), +@@ -516,12 +518,17 @@ void do_coredump(const siginfo_t *siginfo) + .mm_flags = mm->flags, + }; + +- audit_core_dumps(siginfo->si_signo); ++ audit_core_dumps(signr); ++ ++ dumpable = __get_dumpable(cprm.mm_flags); ++ ++ if (signr == SIGSEGV || signr == SIGBUS || signr == SIGKILL || signr == SIGILL) ++ gr_handle_brute_attach(dumpable); + + binfmt = mm->binfmt; + if (!binfmt || !binfmt->core_dump) + goto fail; +- if (!__get_dumpable(cprm.mm_flags)) ++ if (!dumpable) + goto fail; + + cred = prepare_creds(); +@@ -540,7 +547,7 @@ void do_coredump(const siginfo_t *siginfo) + need_nonrelative = true; + } + +- retval = coredump_wait(siginfo->si_signo, &core_state); ++ retval = coredump_wait(signr, &core_state); + if (retval < 0) + goto fail_creds; + +@@ -583,7 +590,7 @@ void do_coredump(const siginfo_t *siginfo) + } + cprm.limit = RLIM_INFINITY; + +- dump_count = atomic_inc_return(&core_dump_count); ++ dump_count = atomic_inc_return_unchecked(&core_dump_count); + if (core_pipe_limit && (core_pipe_limit < dump_count)) { + printk(KERN_WARNING "Pid %d(%s) over core_pipe_limit\n", + task_tgid_vnr(current), current->comm); +@@ -615,6 +622,8 @@ void do_coredump(const siginfo_t *siginfo) + } else { + struct inode *inode; + ++ gr_learn_resource(current, RLIMIT_CORE, binfmt->min_coredump, 1); ++ + if (cprm.limit < binfmt->min_coredump) + goto fail_unlock; + +@@ -673,7 +682,7 @@ close_fail: + filp_close(cprm.file, NULL); + fail_dropcount: + if (ispipe) +- atomic_dec(&core_dump_count); ++ atomic_dec_unchecked(&core_dump_count); + fail_unlock: + kfree(cn.corename); + coredump_finish(mm, core_dumped); +@@ -694,6 +703,8 @@ int dump_emit(struct coredump_params *cprm, const void *addr, int nr) + struct file *file = cprm->file; + loff_t pos = file->f_pos; + ssize_t n; ++ ++ gr_learn_resource(current, RLIMIT_CORE, cprm->written + nr, 1); + if (cprm->written + nr > cprm->limit) + return 0; + while (nr) { +diff --git a/fs/dcache.c b/fs/dcache.c +index 7f3b400..9c911f2 100644 +--- a/fs/dcache.c ++++ b/fs/dcache.c +@@ -1495,7 +1495,7 @@ struct dentry *__d_alloc(struct super_block *sb, const struct qstr *name) + */ + dentry->d_iname[DNAME_INLINE_LEN-1] = 0; + if (name->len > DNAME_INLINE_LEN-1) { +- dname = kmalloc(name->len + 1, GFP_KERNEL); ++ dname = kmalloc(round_up(name->len + 1, sizeof(unsigned long)), GFP_KERNEL); + if (!dname) { + kmem_cache_free(dentry_cache, dentry); + return NULL; +@@ -3430,7 +3430,8 @@ void __init vfs_caches_init(unsigned long mempages) + mempages -= reserve; + + names_cachep = kmem_cache_create("names_cache", PATH_MAX, 0, +- SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL); ++ SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_USERCOPY| ++ SLAB_NO_SANITIZE, NULL); + + dcache_init(); + inode_init(); +diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c +index 1576195..49a19ae 100644 +--- a/fs/debugfs/inode.c ++++ b/fs/debugfs/inode.c +@@ -415,7 +415,11 @@ EXPORT_SYMBOL_GPL(debugfs_create_file); + */ + struct dentry *debugfs_create_dir(const char *name, struct dentry *parent) + { ++#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT ++ return __create_file(name, S_IFDIR | S_IRWXU, ++#else + return __create_file(name, S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO, ++#endif + parent, NULL, NULL); + } + EXPORT_SYMBOL_GPL(debugfs_create_dir); +diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c +index b167ca4..a224e19 100644 +--- a/fs/ecryptfs/inode.c ++++ b/fs/ecryptfs/inode.c +@@ -673,7 +673,7 @@ static char *ecryptfs_readlink_lower(struct dentry *dentry, size_t *bufsiz) + old_fs = get_fs(); + set_fs(get_ds()); + rc = lower_dentry->d_inode->i_op->readlink(lower_dentry, +- (char __user *)lower_buf, ++ (char __force_user *)lower_buf, + PATH_MAX); + set_fs(old_fs); + if (rc < 0) +diff --git a/fs/ecryptfs/miscdev.c b/fs/ecryptfs/miscdev.c +index e4141f2..d8263e8 100644 +--- a/fs/ecryptfs/miscdev.c ++++ b/fs/ecryptfs/miscdev.c +@@ -304,7 +304,7 @@ check_list: + goto out_unlock_msg_ctx; + i = PKT_TYPE_SIZE + PKT_CTR_SIZE; + if (msg_ctx->msg) { +- if (copy_to_user(&buf[i], packet_length, packet_length_size)) ++ if (packet_length_size > sizeof(packet_length) || copy_to_user(&buf[i], packet_length, packet_length_size)) + goto out_unlock_msg_ctx; + i += packet_length_size; + if (copy_to_user(&buf[i], msg_ctx->msg, msg_ctx->msg_size)) +diff --git a/fs/exec.c b/fs/exec.c +index 31e46b1..88754df 100644 +--- a/fs/exec.c ++++ b/fs/exec.c +@@ -55,8 +55,20 @@ + #include <linux/pipe_fs_i.h> + #include <linux/oom.h> + #include <linux/compat.h> ++#include <linux/random.h> ++#include <linux/seq_file.h> ++#include <linux/coredump.h> ++#include <linux/mman.h> ++ ++#ifdef CONFIG_PAX_REFCOUNT ++#include <linux/kallsyms.h> ++#include <linux/kdebug.h> ++#endif ++ ++#include <trace/events/fs.h> + + #include <asm/uaccess.h> ++#include <asm/sections.h> + #include <asm/mmu_context.h> + #include <asm/tlb.h> + +@@ -65,19 +77,34 @@ + + #include <trace/events/sched.h> + ++#ifdef CONFIG_PAX_HAVE_ACL_FLAGS ++void __weak pax_set_initial_flags(struct linux_binprm *bprm) ++{ ++ pr_warn_once("PAX: PAX_HAVE_ACL_FLAGS was enabled without providing the pax_set_initial_flags callback, this is probably not what you wanted.\n"); ++} ++#endif ++ ++#ifdef CONFIG_PAX_HOOK_ACL_FLAGS ++void (*pax_set_initial_flags_func)(struct linux_binprm *bprm); ++EXPORT_SYMBOL(pax_set_initial_flags_func); ++#endif ++ + int suid_dumpable = 0; + + static LIST_HEAD(formats); + static DEFINE_RWLOCK(binfmt_lock); + ++extern int gr_process_kernel_exec_ban(void); ++extern int gr_process_suid_exec_ban(const struct linux_binprm *bprm); ++ + void __register_binfmt(struct linux_binfmt * fmt, int insert) + { + BUG_ON(!fmt); + if (WARN_ON(!fmt->load_binary)) + return; + write_lock(&binfmt_lock); +- insert ? list_add(&fmt->lh, &formats) : +- list_add_tail(&fmt->lh, &formats); ++ insert ? pax_list_add((struct list_head *)&fmt->lh, &formats) : ++ pax_list_add_tail((struct list_head *)&fmt->lh, &formats); + write_unlock(&binfmt_lock); + } + +@@ -86,7 +113,7 @@ EXPORT_SYMBOL(__register_binfmt); + void unregister_binfmt(struct linux_binfmt * fmt) + { + write_lock(&binfmt_lock); +- list_del(&fmt->lh); ++ pax_list_del((struct list_head *)&fmt->lh); + write_unlock(&binfmt_lock); + } + +@@ -180,18 +207,10 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos, + int write) + { + struct page *page; +- int ret; + +-#ifdef CONFIG_STACK_GROWSUP +- if (write) { +- ret = expand_downwards(bprm->vma, pos); +- if (ret < 0) +- return NULL; +- } +-#endif +- ret = get_user_pages(current, bprm->mm, pos, +- 1, write, 1, &page, NULL); +- if (ret <= 0) ++ if (0 > expand_downwards(bprm->vma, pos)) ++ return NULL; ++ if (0 >= get_user_pages(current, bprm->mm, pos, 1, write, 1, &page, NULL)) + return NULL; + + if (write) { +@@ -207,6 +226,17 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos, + if (size <= ARG_MAX) + return page; + ++#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP ++ // only allow 512KB for argv+env on suid/sgid binaries ++ // to prevent easy ASLR exhaustion ++ if (((!uid_eq(bprm->cred->euid, current_euid())) || ++ (!gid_eq(bprm->cred->egid, current_egid()))) && ++ (size > (512 * 1024))) { ++ put_page(page); ++ return NULL; ++ } ++#endif ++ + /* + * Limit to 1/4-th the stack size for the argv+env strings. + * This ensures that: +@@ -266,6 +296,11 @@ static int __bprm_mm_init(struct linux_binprm *bprm) + vma->vm_end = STACK_TOP_MAX; + vma->vm_start = vma->vm_end - PAGE_SIZE; + vma->vm_flags = VM_SOFTDIRTY | VM_STACK_FLAGS | VM_STACK_INCOMPLETE_SETUP; ++ ++#ifdef CONFIG_PAX_SEGMEXEC ++ vma->vm_flags &= ~(VM_EXEC | VM_MAYEXEC); ++#endif ++ + vma->vm_page_prot = vm_get_page_prot(vma->vm_flags); + INIT_LIST_HEAD(&vma->anon_vma_chain); + +@@ -276,6 +311,12 @@ static int __bprm_mm_init(struct linux_binprm *bprm) + mm->stack_vm = mm->total_vm = 1; + up_write(&mm->mmap_sem); + bprm->p = vma->vm_end - sizeof(void *); ++ ++#ifdef CONFIG_PAX_RANDUSTACK ++ if (randomize_va_space) ++ bprm->p ^= prandom_u32() & ~PAGE_MASK; ++#endif ++ + return 0; + err: + up_write(&mm->mmap_sem); +@@ -396,7 +437,7 @@ struct user_arg_ptr { + } ptr; + }; + +-static const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr) ++const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr) + { + const char __user *native; + +@@ -405,14 +446,14 @@ static const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr) + compat_uptr_t compat; + + if (get_user(compat, argv.ptr.compat + nr)) +- return ERR_PTR(-EFAULT); ++ return (const char __force_user *)ERR_PTR(-EFAULT); + + return compat_ptr(compat); + } + #endif + + if (get_user(native, argv.ptr.native + nr)) +- return ERR_PTR(-EFAULT); ++ return (const char __force_user *)ERR_PTR(-EFAULT); + + return native; + } +@@ -431,7 +472,7 @@ static int count(struct user_arg_ptr argv, int max) + if (!p) + break; + +- if (IS_ERR(p)) ++ if (IS_ERR((const char __force_kernel *)p)) + return -EFAULT; + + if (i >= max) +@@ -466,7 +507,7 @@ static int copy_strings(int argc, struct user_arg_ptr argv, + + ret = -EFAULT; + str = get_user_arg_ptr(argv, argc); +- if (IS_ERR(str)) ++ if (IS_ERR((const char __force_kernel *)str)) + goto out; + + len = strnlen_user(str, MAX_ARG_STRLEN); +@@ -548,7 +589,7 @@ int copy_strings_kernel(int argc, const char *const *__argv, + int r; + mm_segment_t oldfs = get_fs(); + struct user_arg_ptr argv = { +- .ptr.native = (const char __user *const __user *)__argv, ++ .ptr.native = (const char __user * const __force_user *)__argv, + }; + + set_fs(KERNEL_DS); +@@ -583,7 +624,8 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift) + unsigned long new_end = old_end - shift; + struct mmu_gather tlb; + +- BUG_ON(new_start > new_end); ++ if (new_start >= new_end || new_start < mmap_min_addr) ++ return -ENOMEM; + + /* + * ensure there are no vmas between where we want to go +@@ -592,6 +634,10 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift) + if (vma != find_vma(mm, new_start)) + return -EFAULT; + ++#ifdef CONFIG_PAX_SEGMEXEC ++ BUG_ON(pax_find_mirror_vma(vma)); ++#endif ++ + /* + * cover the whole range: [new_start, old_end) + */ +@@ -672,10 +718,6 @@ int setup_arg_pages(struct linux_binprm *bprm, + stack_top = arch_align_stack(stack_top); + stack_top = PAGE_ALIGN(stack_top); + +- if (unlikely(stack_top < mmap_min_addr) || +- unlikely(vma->vm_end - vma->vm_start >= stack_top - mmap_min_addr)) +- return -ENOMEM; +- + stack_shift = vma->vm_end - stack_top; + + bprm->p -= stack_shift; +@@ -687,8 +729,28 @@ int setup_arg_pages(struct linux_binprm *bprm, + bprm->exec -= stack_shift; + + down_write(&mm->mmap_sem); ++ ++ /* Move stack pages down in memory. */ ++ if (stack_shift) { ++ ret = shift_arg_pages(vma, stack_shift); ++ if (ret) ++ goto out_unlock; ++ } ++ + vm_flags = VM_STACK_FLAGS; + ++#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC) ++ if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) { ++ vm_flags &= ~VM_EXEC; ++ ++#ifdef CONFIG_PAX_MPROTECT ++ if (mm->pax_flags & MF_PAX_MPROTECT) ++ vm_flags &= ~VM_MAYEXEC; ++#endif ++ ++ } ++#endif ++ + /* + * Adjust stack execute permissions; explicitly enable for + * EXSTACK_ENABLE_X, disable for EXSTACK_DISABLE_X and leave alone +@@ -707,13 +769,6 @@ int setup_arg_pages(struct linux_binprm *bprm, + goto out_unlock; + BUG_ON(prev != vma); + +- /* Move stack pages down in memory. */ +- if (stack_shift) { +- ret = shift_arg_pages(vma, stack_shift); +- if (ret) +- goto out_unlock; +- } +- + /* mprotect_fixup is overkill to remove the temporary stack flags */ + vma->vm_flags &= ~VM_STACK_INCOMPLETE_SETUP; + +@@ -737,6 +792,27 @@ int setup_arg_pages(struct linux_binprm *bprm, + #endif + current->mm->start_stack = bprm->p; + ret = expand_stack(vma, stack_base); ++ ++#if !defined(CONFIG_STACK_GROWSUP) && defined(CONFIG_PAX_RANDMMAP) ++ if (!ret && (mm->pax_flags & MF_PAX_RANDMMAP) && STACK_TOP <= 0xFFFFFFFFU && STACK_TOP > vma->vm_end) { ++ unsigned long size; ++ vm_flags_t vm_flags; ++ ++ size = STACK_TOP - vma->vm_end; ++ vm_flags = VM_NONE | VM_DONTEXPAND | VM_DONTDUMP; ++ ++ ret = vma->vm_end != mmap_region(NULL, vma->vm_end, size, vm_flags, 0); ++ ++#ifdef CONFIG_X86 ++ if (!ret) { ++ size = PAGE_SIZE + mmap_min_addr + ((mm->delta_mmap ^ mm->delta_stack) & (0xFFUL << PAGE_SHIFT)); ++ ret = 0 != mmap_region(NULL, 0, PAGE_ALIGN(size), vm_flags, 0); ++ } ++#endif ++ ++ } ++#endif ++ + if (ret) + ret = -EFAULT; + +@@ -772,6 +848,8 @@ static struct file *do_open_exec(struct filename *name) + + fsnotify_open(file); + ++ trace_open_exec(name->name); ++ + err = deny_write_access(file); + if (err) + goto exit; +@@ -801,7 +879,7 @@ int kernel_read(struct file *file, loff_t offset, + old_fs = get_fs(); + set_fs(get_ds()); + /* The cast to a user pointer is valid due to the set_fs() */ +- result = vfs_read(file, (void __user *)addr, count, &pos); ++ result = vfs_read(file, (void __force_user *)addr, count, &pos); + set_fs(old_fs); + return result; + } +@@ -846,6 +924,7 @@ static int exec_mmap(struct mm_struct *mm) + tsk->mm = mm; + tsk->active_mm = mm; + activate_mm(active_mm, mm); ++ populate_stack(); + task_unlock(tsk); + if (old_mm) { + up_read(&old_mm->mmap_sem); +@@ -1258,7 +1337,7 @@ static void check_unsafe_exec(struct linux_binprm *bprm) + } + rcu_read_unlock(); + +- if (p->fs->users > n_fs) ++ if (atomic_read(&p->fs->users) > n_fs) + bprm->unsafe |= LSM_UNSAFE_SHARE; + else + p->fs->in_exec = 1; +@@ -1434,6 +1513,31 @@ static int exec_binprm(struct linux_binprm *bprm) + return ret; + } + ++#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP ++static DEFINE_PER_CPU(u64, exec_counter); ++static int __init init_exec_counters(void) ++{ ++ unsigned int cpu; ++ ++ for_each_possible_cpu(cpu) { ++ per_cpu(exec_counter, cpu) = (u64)cpu; ++ } ++ ++ return 0; ++} ++early_initcall(init_exec_counters); ++static inline void increment_exec_counter(void) ++{ ++ BUILD_BUG_ON(NR_CPUS > (1 << 16)); ++ current->exec_id = this_cpu_add_return(exec_counter, 1 << 16); ++} ++#else ++static inline void increment_exec_counter(void) {} ++#endif ++ ++extern void gr_handle_exec_args(struct linux_binprm *bprm, ++ struct user_arg_ptr argv); ++ + /* + * sys_execve() executes a new program. + */ +@@ -1441,6 +1545,11 @@ static int do_execve_common(struct filename *filename, + struct user_arg_ptr argv, + struct user_arg_ptr envp) + { ++#ifdef CONFIG_GRKERNSEC ++ struct file *old_exec_file; ++ struct acl_subject_label *old_acl; ++ struct rlimit old_rlim[RLIM_NLIMITS]; ++#endif + struct linux_binprm *bprm; + struct file *file; + struct files_struct *displaced; +@@ -1449,6 +1558,8 @@ static int do_execve_common(struct filename *filename, + if (IS_ERR(filename)) + return PTR_ERR(filename); + ++ gr_learn_resource(current, RLIMIT_NPROC, atomic_read(¤t_user()->processes), 1); ++ + /* + * We move the actual failure in case of RLIMIT_NPROC excess from + * set*uid() to execve() because too many poorly written programs +@@ -1486,11 +1597,21 @@ static int do_execve_common(struct filename *filename, + if (IS_ERR(file)) + goto out_unmark; + ++ if (gr_ptrace_readexec(file, bprm->unsafe)) { ++ retval = -EPERM; ++ goto out_unmark; ++ } ++ + sched_exec(); + + bprm->file = file; + bprm->filename = bprm->interp = filename->name; + ++ if (!gr_acl_handle_execve(file->f_path.dentry, file->f_path.mnt)) { ++ retval = -EACCES; ++ goto out_unmark; ++ } ++ + retval = bprm_mm_init(bprm); + if (retval) + goto out_unmark; +@@ -1507,24 +1628,70 @@ static int do_execve_common(struct filename *filename, + if (retval < 0) + goto out; + ++#ifdef CONFIG_GRKERNSEC ++ old_acl = current->acl; ++ memcpy(old_rlim, current->signal->rlim, sizeof(old_rlim)); ++ old_exec_file = current->exec_file; ++ get_file(file); ++ current->exec_file = file; ++#endif ++#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP ++ /* limit suid stack to 8MB ++ * we saved the old limits above and will restore them if this exec fails ++ */ ++ if (((!uid_eq(bprm->cred->euid, current_euid())) || (!gid_eq(bprm->cred->egid, current_egid()))) && ++ (old_rlim[RLIMIT_STACK].rlim_cur > (8 * 1024 * 1024))) ++ current->signal->rlim[RLIMIT_STACK].rlim_cur = 8 * 1024 * 1024; ++#endif ++ ++ if (gr_process_kernel_exec_ban() || gr_process_suid_exec_ban(bprm)) { ++ retval = -EPERM; ++ goto out_fail; ++ } ++ ++ if (!gr_tpe_allow(file)) { ++ retval = -EACCES; ++ goto out_fail; ++ } ++ ++ if (gr_check_crash_exec(file)) { ++ retval = -EACCES; ++ goto out_fail; ++ } ++ ++ retval = gr_set_proc_label(file->f_path.dentry, file->f_path.mnt, ++ bprm->unsafe); ++ if (retval < 0) ++ goto out_fail; ++ + retval = copy_strings_kernel(1, &bprm->filename, bprm); + if (retval < 0) +- goto out; ++ goto out_fail; + + bprm->exec = bprm->p; + retval = copy_strings(bprm->envc, envp, bprm); + if (retval < 0) +- goto out; ++ goto out_fail; + + retval = copy_strings(bprm->argc, argv, bprm); + if (retval < 0) +- goto out; ++ goto out_fail; ++ ++ gr_log_chroot_exec(file->f_path.dentry, file->f_path.mnt); ++ ++ gr_handle_exec_args(bprm, argv); + + retval = exec_binprm(bprm); + if (retval < 0) +- goto out; ++ goto out_fail; ++#ifdef CONFIG_GRKERNSEC ++ if (old_exec_file) ++ fput(old_exec_file); ++#endif + + /* execve succeeded */ ++ ++ increment_exec_counter(); + current->fs->in_exec = 0; + current->in_execve = 0; + acct_update_integrals(current); +@@ -1535,6 +1702,14 @@ static int do_execve_common(struct filename *filename, + put_files_struct(displaced); + return retval; + ++out_fail: ++#ifdef CONFIG_GRKERNSEC ++ current->acl = old_acl; ++ memcpy(current->signal->rlim, old_rlim, sizeof(old_rlim)); ++ fput(current->exec_file); ++ current->exec_file = old_exec_file; ++#endif ++ + out: + if (bprm->mm) { + acct_arg_size(bprm, 0); +@@ -1626,3 +1801,312 @@ asmlinkage long compat_sys_execve(const char __user * filename, + return compat_do_execve(getname(filename), argv, envp); + } + #endif ++ ++int pax_check_flags(unsigned long *flags) ++{ ++ int retval = 0; ++ ++#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_SEGMEXEC) ++ if (*flags & MF_PAX_SEGMEXEC) ++ { ++ *flags &= ~MF_PAX_SEGMEXEC; ++ retval = -EINVAL; ++ } ++#endif ++ ++ if ((*flags & MF_PAX_PAGEEXEC) ++ ++#ifdef CONFIG_PAX_PAGEEXEC ++ && (*flags & MF_PAX_SEGMEXEC) ++#endif ++ ++ ) ++ { ++ *flags &= ~MF_PAX_PAGEEXEC; ++ retval = -EINVAL; ++ } ++ ++ if ((*flags & MF_PAX_MPROTECT) ++ ++#ifdef CONFIG_PAX_MPROTECT ++ && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) ++#endif ++ ++ ) ++ { ++ *flags &= ~MF_PAX_MPROTECT; ++ retval = -EINVAL; ++ } ++ ++ if ((*flags & MF_PAX_EMUTRAMP) ++ ++#ifdef CONFIG_PAX_EMUTRAMP ++ && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) ++#endif ++ ++ ) ++ { ++ *flags &= ~MF_PAX_EMUTRAMP; ++ retval = -EINVAL; ++ } ++ ++ return retval; ++} ++ ++EXPORT_SYMBOL(pax_check_flags); ++ ++#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC) ++char *pax_get_path(const struct path *path, char *buf, int buflen) ++{ ++ char *pathname = d_path(path, buf, buflen); ++ ++ if (IS_ERR(pathname)) ++ goto toolong; ++ ++ pathname = mangle_path(buf, pathname, "\t\n\\"); ++ if (!pathname) ++ goto toolong; ++ ++ *pathname = 0; ++ return buf; ++ ++toolong: ++ return "<path too long>"; ++} ++EXPORT_SYMBOL(pax_get_path); ++ ++void pax_report_fault(struct pt_regs *regs, void *pc, void *sp) ++{ ++ struct task_struct *tsk = current; ++ struct mm_struct *mm = current->mm; ++ char *buffer_exec = (char *)__get_free_page(GFP_KERNEL); ++ char *buffer_fault = (char *)__get_free_page(GFP_KERNEL); ++ char *path_exec = NULL; ++ char *path_fault = NULL; ++ unsigned long start = 0UL, end = 0UL, offset = 0UL; ++ siginfo_t info = { }; ++ ++ if (buffer_exec && buffer_fault) { ++ struct vm_area_struct *vma, *vma_exec = NULL, *vma_fault = NULL; ++ ++ down_read(&mm->mmap_sem); ++ vma = mm->mmap; ++ while (vma && (!vma_exec || !vma_fault)) { ++ if (vma->vm_file && mm->exe_file == vma->vm_file && (vma->vm_flags & VM_EXEC)) ++ vma_exec = vma; ++ if (vma->vm_start <= (unsigned long)pc && (unsigned long)pc < vma->vm_end) ++ vma_fault = vma; ++ vma = vma->vm_next; ++ } ++ if (vma_exec) ++ path_exec = pax_get_path(&vma_exec->vm_file->f_path, buffer_exec, PAGE_SIZE); ++ if (vma_fault) { ++ start = vma_fault->vm_start; ++ end = vma_fault->vm_end; ++ offset = vma_fault->vm_pgoff << PAGE_SHIFT; ++ if (vma_fault->vm_file) ++ path_fault = pax_get_path(&vma_fault->vm_file->f_path, buffer_fault, PAGE_SIZE); ++ else if ((unsigned long)pc >= mm->start_brk && (unsigned long)pc < mm->brk) ++ path_fault = "<heap>"; ++ else if (vma_fault->vm_flags & (VM_GROWSDOWN | VM_GROWSUP)) ++ path_fault = "<stack>"; ++ else ++ path_fault = "<anonymous mapping>"; ++ } ++ up_read(&mm->mmap_sem); ++ } ++ if (tsk->signal->curr_ip) ++ printk(KERN_ERR "PAX: From %pI4: execution attempt in: %s, %08lx-%08lx %08lx\n", &tsk->signal->curr_ip, path_fault, start, end, offset); ++ else ++ printk(KERN_ERR "PAX: execution attempt in: %s, %08lx-%08lx %08lx\n", path_fault, start, end, offset); ++ printk(KERN_ERR "PAX: terminating task: %s(%s):%d, uid/euid: %u/%u, PC: %p, SP: %p\n", path_exec, tsk->comm, task_pid_nr(tsk), ++ from_kuid_munged(&init_user_ns, task_uid(tsk)), from_kuid_munged(&init_user_ns, task_euid(tsk)), pc, sp); ++ free_page((unsigned long)buffer_exec); ++ free_page((unsigned long)buffer_fault); ++ pax_report_insns(regs, pc, sp); ++ info.si_signo = SIGKILL; ++ info.si_errno = 0; ++ info.si_code = SI_KERNEL; ++ info.si_pid = 0; ++ info.si_uid = 0; ++ do_coredump(&info); ++} ++#endif ++ ++#ifdef CONFIG_PAX_REFCOUNT ++void pax_report_refcount_overflow(struct pt_regs *regs) ++{ ++ if (current->signal->curr_ip) ++ printk(KERN_ERR "PAX: From %pI4: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n", ++ ¤t->signal->curr_ip, current->comm, task_pid_nr(current), ++ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid())); ++ else ++ printk(KERN_ERR "PAX: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n", current->comm, task_pid_nr(current), ++ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid())); ++ print_symbol(KERN_ERR "PAX: refcount overflow occured at: %s\n", instruction_pointer(regs)); ++ preempt_disable(); ++ show_regs(regs); ++ preempt_enable(); ++ force_sig_info(SIGKILL, SEND_SIG_FORCED, current); ++} ++#endif ++ ++#ifdef CONFIG_PAX_USERCOPY ++/* 0: not at all, 1: fully, 2: fully inside frame, -1: partially (implies an error) */ ++static noinline int check_stack_object(const void *obj, unsigned long len) ++{ ++ const void * const stack = task_stack_page(current); ++ const void * const stackend = stack + THREAD_SIZE; ++ ++#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86) ++ const void *frame = NULL; ++ const void *oldframe; ++#endif ++ ++ if (obj + len < obj) ++ return -1; ++ ++ if (obj + len <= stack || stackend <= obj) ++ return 0; ++ ++ if (obj < stack || stackend < obj + len) ++ return -1; ++ ++#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86) ++ oldframe = __builtin_frame_address(1); ++ if (oldframe) ++ frame = __builtin_frame_address(2); ++ /* ++ low ----------------------------------------------> high ++ [saved bp][saved ip][args][local vars][saved bp][saved ip] ++ ^----------------^ ++ allow copies only within here ++ */ ++ while (stack <= frame && frame < stackend) { ++ /* if obj + len extends past the last frame, this ++ check won't pass and the next frame will be 0, ++ causing us to bail out and correctly report ++ the copy as invalid ++ */ ++ if (obj + len <= frame) ++ return obj >= oldframe + 2 * sizeof(void *) ? 2 : -1; ++ oldframe = frame; ++ frame = *(const void * const *)frame; ++ } ++ return -1; ++#else ++ return 1; ++#endif ++} ++ ++static __noreturn void pax_report_usercopy(const void *ptr, unsigned long len, bool to_user, const char *type) ++{ ++ if (current->signal->curr_ip) ++ printk(KERN_ERR "PAX: From %pI4: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n", ++ ¤t->signal->curr_ip, to_user ? "leak" : "overwrite", to_user ? "from" : "to", ptr, type ? : "unknown", len); ++ else ++ printk(KERN_ERR "PAX: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n", ++ to_user ? "leak" : "overwrite", to_user ? "from" : "to", ptr, type ? : "unknown", len); ++ dump_stack(); ++ gr_handle_kernel_exploit(); ++ do_group_exit(SIGKILL); ++} ++#endif ++ ++#ifdef CONFIG_PAX_USERCOPY ++ ++static inline bool check_kernel_text_object(unsigned long low, unsigned long high) ++{ ++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC) ++ unsigned long textlow = ktla_ktva((unsigned long)_stext); ++#ifdef CONFIG_MODULES ++ unsigned long texthigh = (unsigned long)MODULES_EXEC_VADDR; ++#else ++ unsigned long texthigh = ktla_ktva((unsigned long)_etext); ++#endif ++ ++#else ++ unsigned long textlow = (unsigned long)_stext; ++ unsigned long texthigh = (unsigned long)_etext; ++ ++#ifdef CONFIG_X86_64 ++ /* check against linear mapping as well */ ++ if (high > (unsigned long)__va(__pa(textlow)) && ++ low < (unsigned long)__va(__pa(texthigh))) ++ return true; ++#endif ++ ++#endif ++ ++ if (high <= textlow || low >= texthigh) ++ return false; ++ else ++ return true; ++} ++#endif ++ ++void __check_object_size(const void *ptr, unsigned long n, bool to_user, bool const_size) ++{ ++#ifdef CONFIG_PAX_USERCOPY ++ const char *type; ++#endif ++ ++#ifndef CONFIG_STACK_GROWSUP ++ unsigned long stackstart = (unsigned long)task_stack_page(current); ++ unsigned long currentsp = (unsigned long)&stackstart; ++ if (unlikely((currentsp < stackstart + 512 || ++ currentsp >= stackstart + THREAD_SIZE) && !in_interrupt())) ++ BUG(); ++#endif ++ ++#ifndef CONFIG_PAX_USERCOPY_DEBUG ++ if (const_size) ++ return; ++#endif ++ ++#ifdef CONFIG_PAX_USERCOPY ++ if (!n) ++ return; ++ ++ type = check_heap_object(ptr, n); ++ if (!type) { ++ int ret = check_stack_object(ptr, n); ++ if (ret == 1 || ret == 2) ++ return; ++ if (ret == 0) { ++ if (check_kernel_text_object((unsigned long)ptr, (unsigned long)ptr + n)) ++ type = "<kernel text>"; ++ else ++ return; ++ } else ++ type = "<process stack>"; ++ } ++ ++ pax_report_usercopy(ptr, n, to_user, type); ++#endif ++ ++} ++EXPORT_SYMBOL(__check_object_size); ++ ++#ifdef CONFIG_PAX_MEMORY_STACKLEAK ++void pax_track_stack(void) ++{ ++ unsigned long sp = (unsigned long)&sp; ++ if (sp < current_thread_info()->lowest_stack && ++ sp > (unsigned long)task_stack_page(current)) ++ current_thread_info()->lowest_stack = sp; ++ if (unlikely((sp & ~(THREAD_SIZE - 1)) < (THREAD_SIZE/16))) ++ BUG(); ++} ++EXPORT_SYMBOL(pax_track_stack); ++#endif ++ ++#ifdef CONFIG_PAX_SIZE_OVERFLOW ++void report_size_overflow(const char *file, unsigned int line, const char *func, const char *ssa_name) ++{ ++ printk(KERN_ERR "PAX: size overflow detected in function %s %s:%u %s", func, file, line, ssa_name); ++ dump_stack(); ++ do_group_exit(SIGKILL); ++} ++EXPORT_SYMBOL(report_size_overflow); ++#endif +diff --git a/fs/ext2/balloc.c b/fs/ext2/balloc.c +index 9f9992b..8b59411 100644 +--- a/fs/ext2/balloc.c ++++ b/fs/ext2/balloc.c +@@ -1184,10 +1184,10 @@ static int ext2_has_free_blocks(struct ext2_sb_info *sbi) + + free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter); + root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count); +- if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) && ++ if (free_blocks < root_blocks + 1 && + !uid_eq(sbi->s_resuid, current_fsuid()) && + (gid_eq(sbi->s_resgid, GLOBAL_ROOT_GID) || +- !in_group_p (sbi->s_resgid))) { ++ !in_group_p (sbi->s_resgid)) && !capable_nolog(CAP_SYS_RESOURCE)) { + return 0; + } + return 1; +diff --git a/fs/ext2/xattr.c b/fs/ext2/xattr.c +index 9142614..97484fa 100644 +--- a/fs/ext2/xattr.c ++++ b/fs/ext2/xattr.c +@@ -247,7 +247,7 @@ ext2_xattr_list(struct dentry *dentry, char *buffer, size_t buffer_size) + struct buffer_head *bh = NULL; + struct ext2_xattr_entry *entry; + char *end; +- size_t rest = buffer_size; ++ size_t rest = buffer_size, total_size = 0; + int error; + + ea_idebug(inode, "buffer=%p, buffer_size=%ld", +@@ -305,9 +305,10 @@ bad_block: ext2_error(inode->i_sb, "ext2_xattr_list", + buffer += size; + } + rest -= size; ++ total_size += size; + } + } +- error = buffer_size - rest; /* total size */ ++ error = total_size; + + cleanup: + brelse(bh); +diff --git a/fs/ext3/balloc.c b/fs/ext3/balloc.c +index 22548f5..41521d8 100644 +--- a/fs/ext3/balloc.c ++++ b/fs/ext3/balloc.c +@@ -1438,10 +1438,10 @@ static int ext3_has_free_blocks(struct ext3_sb_info *sbi, int use_reservation) + + free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter); + root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count); +- if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) && ++ if (free_blocks < root_blocks + 1 && + !use_reservation && !uid_eq(sbi->s_resuid, current_fsuid()) && + (gid_eq(sbi->s_resgid, GLOBAL_ROOT_GID) || +- !in_group_p (sbi->s_resgid))) { ++ !in_group_p (sbi->s_resgid)) && !capable_nolog(CAP_SYS_RESOURCE)) { + return 0; + } + return 1; +diff --git a/fs/ext3/xattr.c b/fs/ext3/xattr.c +index c6874be..f8a6ae8 100644 +--- a/fs/ext3/xattr.c ++++ b/fs/ext3/xattr.c +@@ -330,7 +330,7 @@ static int + ext3_xattr_list_entries(struct dentry *dentry, struct ext3_xattr_entry *entry, + char *buffer, size_t buffer_size) + { +- size_t rest = buffer_size; ++ size_t rest = buffer_size, total_size = 0; + + for (; !IS_LAST_ENTRY(entry); entry = EXT3_XATTR_NEXT(entry)) { + const struct xattr_handler *handler = +@@ -347,9 +347,10 @@ ext3_xattr_list_entries(struct dentry *dentry, struct ext3_xattr_entry *entry, + buffer += size; + } + rest -= size; ++ total_size += size; + } + } +- return buffer_size - rest; ++ return total_size; + } + + static int +diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c +index 6ea7b14..8fa16d9 100644 +--- a/fs/ext4/balloc.c ++++ b/fs/ext4/balloc.c +@@ -534,8 +534,8 @@ static int ext4_has_free_clusters(struct ext4_sb_info *sbi, + /* Hm, nope. Are (enough) root reserved clusters available? */ + if (uid_eq(sbi->s_resuid, current_fsuid()) || + (!gid_eq(sbi->s_resgid, GLOBAL_ROOT_GID) && in_group_p(sbi->s_resgid)) || +- capable(CAP_SYS_RESOURCE) || +- (flags & EXT4_MB_USE_ROOT_BLOCKS)) { ++ (flags & EXT4_MB_USE_ROOT_BLOCKS) || ++ capable_nolog(CAP_SYS_RESOURCE)) { + + if (free_clusters >= (nclusters + dirty_clusters + + resv_clusters)) +diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h +index 62f024c..a6a1a61 100644 +--- a/fs/ext4/ext4.h ++++ b/fs/ext4/ext4.h +@@ -1269,19 +1269,19 @@ struct ext4_sb_info { + unsigned long s_mb_last_start; + + /* stats for buddy allocator */ +- atomic_t s_bal_reqs; /* number of reqs with len > 1 */ +- atomic_t s_bal_success; /* we found long enough chunks */ +- atomic_t s_bal_allocated; /* in blocks */ +- atomic_t s_bal_ex_scanned; /* total extents scanned */ +- atomic_t s_bal_goals; /* goal hits */ +- atomic_t s_bal_breaks; /* too long searches */ +- atomic_t s_bal_2orders; /* 2^order hits */ ++ atomic_unchecked_t s_bal_reqs; /* number of reqs with len > 1 */ ++ atomic_unchecked_t s_bal_success; /* we found long enough chunks */ ++ atomic_unchecked_t s_bal_allocated; /* in blocks */ ++ atomic_unchecked_t s_bal_ex_scanned; /* total extents scanned */ ++ atomic_unchecked_t s_bal_goals; /* goal hits */ ++ atomic_unchecked_t s_bal_breaks; /* too long searches */ ++ atomic_unchecked_t s_bal_2orders; /* 2^order hits */ + spinlock_t s_bal_lock; + unsigned long s_mb_buddies_generated; + unsigned long long s_mb_generation_time; +- atomic_t s_mb_lost_chunks; +- atomic_t s_mb_preallocated; +- atomic_t s_mb_discarded; ++ atomic_unchecked_t s_mb_lost_chunks; ++ atomic_unchecked_t s_mb_preallocated; ++ atomic_unchecked_t s_mb_discarded; + atomic_t s_lock_busy; + + /* locality groups */ +diff --git a/fs/ext4/indirect.c b/fs/ext4/indirect.c +index e6574d7..c30cbe2 100644 +--- a/fs/ext4/indirect.c ++++ b/fs/ext4/indirect.c +@@ -1345,8 +1345,8 @@ static int free_hole_blocks(handle_t *handle, struct inode *inode, + if (level == 0 || + (bh && all_zeroes((__le32 *)bh->b_data, + (__le32 *)bh->b_data + addr_per_block))) { +- ext4_free_data(handle, inode, parent_bh, &blk, &blk+1); +- *i_data = 0; ++ ext4_free_data(handle, inode, parent_bh, ++ i_data, i_data + 1); + } + brelse(bh); + bh = NULL; +diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c +index 242226a..f3eb6c1 100644 +--- a/fs/ext4/mballoc.c ++++ b/fs/ext4/mballoc.c +@@ -1882,7 +1882,7 @@ void ext4_mb_simple_scan_group(struct ext4_allocation_context *ac, + BUG_ON(ac->ac_b_ex.fe_len != ac->ac_g_ex.fe_len); + + if (EXT4_SB(sb)->s_mb_stats) +- atomic_inc(&EXT4_SB(sb)->s_bal_2orders); ++ atomic_inc_unchecked(&EXT4_SB(sb)->s_bal_2orders); + + break; + } +@@ -2191,7 +2191,7 @@ repeat: + ac->ac_status = AC_STATUS_CONTINUE; + ac->ac_flags |= EXT4_MB_HINT_FIRST; + cr = 3; +- atomic_inc(&sbi->s_mb_lost_chunks); ++ atomic_inc_unchecked(&sbi->s_mb_lost_chunks); + goto repeat; + } + } +@@ -2699,25 +2699,25 @@ int ext4_mb_release(struct super_block *sb) + if (sbi->s_mb_stats) { + ext4_msg(sb, KERN_INFO, + "mballoc: %u blocks %u reqs (%u success)", +- atomic_read(&sbi->s_bal_allocated), +- atomic_read(&sbi->s_bal_reqs), +- atomic_read(&sbi->s_bal_success)); ++ atomic_read_unchecked(&sbi->s_bal_allocated), ++ atomic_read_unchecked(&sbi->s_bal_reqs), ++ atomic_read_unchecked(&sbi->s_bal_success)); + ext4_msg(sb, KERN_INFO, + "mballoc: %u extents scanned, %u goal hits, " + "%u 2^N hits, %u breaks, %u lost", +- atomic_read(&sbi->s_bal_ex_scanned), +- atomic_read(&sbi->s_bal_goals), +- atomic_read(&sbi->s_bal_2orders), +- atomic_read(&sbi->s_bal_breaks), +- atomic_read(&sbi->s_mb_lost_chunks)); ++ atomic_read_unchecked(&sbi->s_bal_ex_scanned), ++ atomic_read_unchecked(&sbi->s_bal_goals), ++ atomic_read_unchecked(&sbi->s_bal_2orders), ++ atomic_read_unchecked(&sbi->s_bal_breaks), ++ atomic_read_unchecked(&sbi->s_mb_lost_chunks)); + ext4_msg(sb, KERN_INFO, + "mballoc: %lu generated and it took %Lu", + sbi->s_mb_buddies_generated, + sbi->s_mb_generation_time); + ext4_msg(sb, KERN_INFO, + "mballoc: %u preallocated, %u discarded", +- atomic_read(&sbi->s_mb_preallocated), +- atomic_read(&sbi->s_mb_discarded)); ++ atomic_read_unchecked(&sbi->s_mb_preallocated), ++ atomic_read_unchecked(&sbi->s_mb_discarded)); + } + + free_percpu(sbi->s_locality_groups); +@@ -3171,16 +3171,16 @@ static void ext4_mb_collect_stats(struct ext4_allocation_context *ac) + struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); + + if (sbi->s_mb_stats && ac->ac_g_ex.fe_len > 1) { +- atomic_inc(&sbi->s_bal_reqs); +- atomic_add(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated); ++ atomic_inc_unchecked(&sbi->s_bal_reqs); ++ atomic_add_unchecked(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated); + if (ac->ac_b_ex.fe_len >= ac->ac_o_ex.fe_len) +- atomic_inc(&sbi->s_bal_success); +- atomic_add(ac->ac_found, &sbi->s_bal_ex_scanned); ++ atomic_inc_unchecked(&sbi->s_bal_success); ++ atomic_add_unchecked(ac->ac_found, &sbi->s_bal_ex_scanned); + if (ac->ac_g_ex.fe_start == ac->ac_b_ex.fe_start && + ac->ac_g_ex.fe_group == ac->ac_b_ex.fe_group) +- atomic_inc(&sbi->s_bal_goals); ++ atomic_inc_unchecked(&sbi->s_bal_goals); + if (ac->ac_found > sbi->s_mb_max_to_scan) +- atomic_inc(&sbi->s_bal_breaks); ++ atomic_inc_unchecked(&sbi->s_bal_breaks); + } + + if (ac->ac_op == EXT4_MB_HISTORY_ALLOC) +@@ -3607,7 +3607,7 @@ ext4_mb_new_inode_pa(struct ext4_allocation_context *ac) + trace_ext4_mb_new_inode_pa(ac, pa); + + ext4_mb_use_inode_pa(ac, pa); +- atomic_add(pa->pa_free, &sbi->s_mb_preallocated); ++ atomic_add_unchecked(pa->pa_free, &sbi->s_mb_preallocated); + + ei = EXT4_I(ac->ac_inode); + grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group); +@@ -3667,7 +3667,7 @@ ext4_mb_new_group_pa(struct ext4_allocation_context *ac) + trace_ext4_mb_new_group_pa(ac, pa); + + ext4_mb_use_group_pa(ac, pa); +- atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated); ++ atomic_add_unchecked(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated); + + grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group); + lg = ac->ac_lg; +@@ -3756,7 +3756,7 @@ ext4_mb_release_inode_pa(struct ext4_buddy *e4b, struct buffer_head *bitmap_bh, + * from the bitmap and continue. + */ + } +- atomic_add(free, &sbi->s_mb_discarded); ++ atomic_add_unchecked(free, &sbi->s_mb_discarded); + + return err; + } +@@ -3774,7 +3774,7 @@ ext4_mb_release_group_pa(struct ext4_buddy *e4b, + ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit); + BUG_ON(group != e4b->bd_group && pa->pa_len != 0); + mb_free_blocks(pa->pa_inode, e4b, bit, pa->pa_len); +- atomic_add(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded); ++ atomic_add_unchecked(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded); + trace_ext4_mballoc_discard(sb, NULL, group, bit, pa->pa_len); + + return 0; +diff --git a/fs/ext4/mmp.c b/fs/ext4/mmp.c +index 04434ad..6404663 100644 +--- a/fs/ext4/mmp.c ++++ b/fs/ext4/mmp.c +@@ -113,7 +113,7 @@ static int read_mmp_block(struct super_block *sb, struct buffer_head **bh, + void __dump_mmp_msg(struct super_block *sb, struct mmp_struct *mmp, + const char *function, unsigned int line, const char *msg) + { +- __ext4_warning(sb, function, line, msg); ++ __ext4_warning(sb, function, line, "%s", msg); + __ext4_warning(sb, function, line, + "MMP failure info: last update time: %llu, last update " + "node: %s, last update device: %s\n", +diff --git a/fs/ext4/super.c b/fs/ext4/super.c +index a46030d..1477295 100644 +--- a/fs/ext4/super.c ++++ b/fs/ext4/super.c +@@ -1270,7 +1270,7 @@ static ext4_fsblk_t get_sb_block(void **data) + } + + #define DEFAULT_JOURNAL_IOPRIO (IOPRIO_PRIO_VALUE(IOPRIO_CLASS_BE, 3)) +-static char deprecated_msg[] = "Mount option \"%s\" will be removed by %s\n" ++static const char deprecated_msg[] = "Mount option \"%s\" will be removed by %s\n" + "Contact linux-ext4@vger.kernel.org if you think we should keep it.\n"; + + #ifdef CONFIG_QUOTA +@@ -2448,7 +2448,7 @@ struct ext4_attr { + int offset; + int deprecated_val; + } u; +-}; ++} __do_const; + + static int parse_strtoull(const char *buf, + unsigned long long max, unsigned long long *value) +diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c +index 55e611c..cfad16d 100644 +--- a/fs/ext4/xattr.c ++++ b/fs/ext4/xattr.c +@@ -381,7 +381,7 @@ static int + ext4_xattr_list_entries(struct dentry *dentry, struct ext4_xattr_entry *entry, + char *buffer, size_t buffer_size) + { +- size_t rest = buffer_size; ++ size_t rest = buffer_size, total_size = 0; + + for (; !IS_LAST_ENTRY(entry); entry = EXT4_XATTR_NEXT(entry)) { + const struct xattr_handler *handler = +@@ -398,9 +398,10 @@ ext4_xattr_list_entries(struct dentry *dentry, struct ext4_xattr_entry *entry, + buffer += size; + } + rest -= size; ++ total_size += size; + } + } +- return buffer_size - rest; ++ return total_size; + } + + static int +diff --git a/fs/fcntl.c b/fs/fcntl.c +index ef68665..5deacdc 100644 +--- a/fs/fcntl.c ++++ b/fs/fcntl.c +@@ -106,6 +106,11 @@ int __f_setown(struct file *filp, struct pid *pid, enum pid_type type, + if (err) + return err; + ++ if (gr_handle_chroot_fowner(pid, type)) ++ return -ENOENT; ++ if (gr_check_protected_task_fowner(pid, type)) ++ return -EACCES; ++ + f_modown(filp, pid, type, force); + return 0; + } +diff --git a/fs/fhandle.c b/fs/fhandle.c +index 999ff5c..ac037c9 100644 +--- a/fs/fhandle.c ++++ b/fs/fhandle.c +@@ -8,6 +8,7 @@ + #include <linux/fs_struct.h> + #include <linux/fsnotify.h> + #include <linux/personality.h> ++#include <linux/grsecurity.h> + #include <asm/uaccess.h> + #include "internal.h" + #include "mount.h" +@@ -67,8 +68,7 @@ static long do_sys_name_to_handle(struct path *path, + } else + retval = 0; + /* copy the mount id */ +- if (copy_to_user(mnt_id, &real_mount(path->mnt)->mnt_id, +- sizeof(*mnt_id)) || ++ if (put_user(real_mount(path->mnt)->mnt_id, mnt_id) || + copy_to_user(ufh, handle, + sizeof(struct file_handle) + handle_bytes)) + retval = -EFAULT; +@@ -175,7 +175,7 @@ static int handle_to_path(int mountdirfd, struct file_handle __user *ufh, + * the directory. Ideally we would like CAP_DAC_SEARCH. + * But we don't have that + */ +- if (!capable(CAP_DAC_READ_SEARCH)) { ++ if (!capable(CAP_DAC_READ_SEARCH) || !gr_chroot_fhandle()) { + retval = -EPERM; + goto out_err; + } +diff --git a/fs/file.c b/fs/file.c +index eb56a13..ccee850 100644 +--- a/fs/file.c ++++ b/fs/file.c +@@ -16,6 +16,7 @@ + #include <linux/slab.h> + #include <linux/vmalloc.h> + #include <linux/file.h> ++#include <linux/security.h> + #include <linux/fdtable.h> + #include <linux/bitops.h> + #include <linux/interrupt.h> +@@ -141,7 +142,7 @@ out: + * Return <0 error code on error; 1 on successful completion. + * The files->file_lock should be held on entry, and will be held on exit. + */ +-static int expand_fdtable(struct files_struct *files, int nr) ++static int expand_fdtable(struct files_struct *files, unsigned int nr) + __releases(files->file_lock) + __acquires(files->file_lock) + { +@@ -186,7 +187,7 @@ static int expand_fdtable(struct files_struct *files, int nr) + * expanded and execution may have blocked. + * The files->file_lock should be held on entry, and will be held on exit. + */ +-static int expand_files(struct files_struct *files, int nr) ++static int expand_files(struct files_struct *files, unsigned int nr) + { + struct fdtable *fdt; + +@@ -807,6 +808,7 @@ int replace_fd(unsigned fd, struct file *file, unsigned flags) + if (!file) + return __close_fd(files, fd); + ++ gr_learn_resource(current, RLIMIT_NOFILE, fd, 0); + if (fd >= rlimit(RLIMIT_NOFILE)) + return -EBADF; + +@@ -833,6 +835,7 @@ SYSCALL_DEFINE3(dup3, unsigned int, oldfd, unsigned int, newfd, int, flags) + if (unlikely(oldfd == newfd)) + return -EINVAL; + ++ gr_learn_resource(current, RLIMIT_NOFILE, newfd, 0); + if (newfd >= rlimit(RLIMIT_NOFILE)) + return -EBADF; + +@@ -888,6 +891,7 @@ SYSCALL_DEFINE1(dup, unsigned int, fildes) + int f_dupfd(unsigned int from, struct file *file, unsigned flags) + { + int err; ++ gr_learn_resource(current, RLIMIT_NOFILE, from, 0); + if (from >= rlimit(RLIMIT_NOFILE)) + return -EINVAL; + err = alloc_fd(from, flags); +diff --git a/fs/filesystems.c b/fs/filesystems.c +index 92567d9..fcd8cbf 100644 +--- a/fs/filesystems.c ++++ b/fs/filesystems.c +@@ -273,7 +273,11 @@ struct file_system_type *get_fs_type(const char *name) + int len = dot ? dot - name : strlen(name); + + fs = __get_fs_type(name, len); ++#ifdef CONFIG_GRKERNSEC_MODHARDEN ++ if (!fs && (___request_module(true, "grsec_modharden_fs", "fs-%.*s", len, name) == 0)) ++#else + if (!fs && (request_module("fs-%.*s", len, name) == 0)) ++#endif + fs = __get_fs_type(name, len); + + if (dot && fs && !(fs->fs_flags & FS_HAS_SUBTYPE)) { +diff --git a/fs/fs_struct.c b/fs/fs_struct.c +index 7dca743..543d620 100644 +--- a/fs/fs_struct.c ++++ b/fs/fs_struct.c +@@ -4,6 +4,7 @@ + #include <linux/path.h> + #include <linux/slab.h> + #include <linux/fs_struct.h> ++#include <linux/grsecurity.h> + #include "internal.h" + + /* +@@ -19,6 +20,7 @@ void set_fs_root(struct fs_struct *fs, const struct path *path) + write_seqcount_begin(&fs->seq); + old_root = fs->root; + fs->root = *path; ++ gr_set_chroot_entries(current, path); + write_seqcount_end(&fs->seq); + spin_unlock(&fs->lock); + if (old_root.dentry) +@@ -67,6 +69,10 @@ void chroot_fs_refs(const struct path *old_root, const struct path *new_root) + int hits = 0; + spin_lock(&fs->lock); + write_seqcount_begin(&fs->seq); ++ /* this root replacement is only done by pivot_root, ++ leave grsec's chroot tagging alone for this task ++ so that a pivoted root isn't treated as a chroot ++ */ + hits += replace_path(&fs->root, old_root, new_root); + hits += replace_path(&fs->pwd, old_root, new_root); + write_seqcount_end(&fs->seq); +@@ -99,7 +105,8 @@ void exit_fs(struct task_struct *tsk) + task_lock(tsk); + spin_lock(&fs->lock); + tsk->fs = NULL; +- kill = !--fs->users; ++ gr_clear_chroot_entries(tsk); ++ kill = !atomic_dec_return(&fs->users); + spin_unlock(&fs->lock); + task_unlock(tsk); + if (kill) +@@ -112,7 +119,7 @@ struct fs_struct *copy_fs_struct(struct fs_struct *old) + struct fs_struct *fs = kmem_cache_alloc(fs_cachep, GFP_KERNEL); + /* We don't need to lock fs - think why ;-) */ + if (fs) { +- fs->users = 1; ++ atomic_set(&fs->users, 1); + fs->in_exec = 0; + spin_lock_init(&fs->lock); + seqcount_init(&fs->seq); +@@ -121,6 +128,9 @@ struct fs_struct *copy_fs_struct(struct fs_struct *old) + spin_lock(&old->lock); + fs->root = old->root; + path_get(&fs->root); ++ /* instead of calling gr_set_chroot_entries here, ++ we call it from every caller of this function ++ */ + fs->pwd = old->pwd; + path_get(&fs->pwd); + spin_unlock(&old->lock); +@@ -139,8 +149,9 @@ int unshare_fs_struct(void) + + task_lock(current); + spin_lock(&fs->lock); +- kill = !--fs->users; ++ kill = !atomic_dec_return(&fs->users); + current->fs = new_fs; ++ gr_set_chroot_entries(current, &new_fs->root); + spin_unlock(&fs->lock); + task_unlock(current); + +@@ -153,13 +164,13 @@ EXPORT_SYMBOL_GPL(unshare_fs_struct); + + int current_umask(void) + { +- return current->fs->umask; ++ return current->fs->umask | gr_acl_umask(); + } + EXPORT_SYMBOL(current_umask); + + /* to be mentioned only in INIT_TASK */ + struct fs_struct init_fs = { +- .users = 1, ++ .users = ATOMIC_INIT(1), + .lock = __SPIN_LOCK_UNLOCKED(init_fs.lock), + .seq = SEQCNT_ZERO(init_fs.seq), + .umask = 0022, +diff --git a/fs/fscache/cookie.c b/fs/fscache/cookie.c +index 29d7feb..303644d 100644 +--- a/fs/fscache/cookie.c ++++ b/fs/fscache/cookie.c +@@ -19,7 +19,7 @@ + + struct kmem_cache *fscache_cookie_jar; + +-static atomic_t fscache_object_debug_id = ATOMIC_INIT(0); ++static atomic_unchecked_t fscache_object_debug_id = ATOMIC_INIT(0); + + static int fscache_acquire_non_index_cookie(struct fscache_cookie *cookie); + static int fscache_alloc_object(struct fscache_cache *cache, +@@ -69,11 +69,11 @@ struct fscache_cookie *__fscache_acquire_cookie( + parent ? (char *) parent->def->name : "<no-parent>", + def->name, netfs_data, enable); + +- fscache_stat(&fscache_n_acquires); ++ fscache_stat_unchecked(&fscache_n_acquires); + + /* if there's no parent cookie, then we don't create one here either */ + if (!parent) { +- fscache_stat(&fscache_n_acquires_null); ++ fscache_stat_unchecked(&fscache_n_acquires_null); + _leave(" [no parent]"); + return NULL; + } +@@ -88,7 +88,7 @@ struct fscache_cookie *__fscache_acquire_cookie( + /* allocate and initialise a cookie */ + cookie = kmem_cache_alloc(fscache_cookie_jar, GFP_KERNEL); + if (!cookie) { +- fscache_stat(&fscache_n_acquires_oom); ++ fscache_stat_unchecked(&fscache_n_acquires_oom); + _leave(" [ENOMEM]"); + return NULL; + } +@@ -115,13 +115,13 @@ struct fscache_cookie *__fscache_acquire_cookie( + + switch (cookie->def->type) { + case FSCACHE_COOKIE_TYPE_INDEX: +- fscache_stat(&fscache_n_cookie_index); ++ fscache_stat_unchecked(&fscache_n_cookie_index); + break; + case FSCACHE_COOKIE_TYPE_DATAFILE: +- fscache_stat(&fscache_n_cookie_data); ++ fscache_stat_unchecked(&fscache_n_cookie_data); + break; + default: +- fscache_stat(&fscache_n_cookie_special); ++ fscache_stat_unchecked(&fscache_n_cookie_special); + break; + } + +@@ -135,7 +135,7 @@ struct fscache_cookie *__fscache_acquire_cookie( + } else { + atomic_dec(&parent->n_children); + __fscache_cookie_put(cookie); +- fscache_stat(&fscache_n_acquires_nobufs); ++ fscache_stat_unchecked(&fscache_n_acquires_nobufs); + _leave(" = NULL"); + return NULL; + } +@@ -144,7 +144,7 @@ struct fscache_cookie *__fscache_acquire_cookie( + } + } + +- fscache_stat(&fscache_n_acquires_ok); ++ fscache_stat_unchecked(&fscache_n_acquires_ok); + _leave(" = %p", cookie); + return cookie; + } +@@ -213,7 +213,7 @@ static int fscache_acquire_non_index_cookie(struct fscache_cookie *cookie) + cache = fscache_select_cache_for_object(cookie->parent); + if (!cache) { + up_read(&fscache_addremove_sem); +- fscache_stat(&fscache_n_acquires_no_cache); ++ fscache_stat_unchecked(&fscache_n_acquires_no_cache); + _leave(" = -ENOMEDIUM [no cache]"); + return -ENOMEDIUM; + } +@@ -297,14 +297,14 @@ static int fscache_alloc_object(struct fscache_cache *cache, + object = cache->ops->alloc_object(cache, cookie); + fscache_stat_d(&fscache_n_cop_alloc_object); + if (IS_ERR(object)) { +- fscache_stat(&fscache_n_object_no_alloc); ++ fscache_stat_unchecked(&fscache_n_object_no_alloc); + ret = PTR_ERR(object); + goto error; + } + +- fscache_stat(&fscache_n_object_alloc); ++ fscache_stat_unchecked(&fscache_n_object_alloc); + +- object->debug_id = atomic_inc_return(&fscache_object_debug_id); ++ object->debug_id = atomic_inc_return_unchecked(&fscache_object_debug_id); + + _debug("ALLOC OBJ%x: %s {%lx}", + object->debug_id, cookie->def->name, object->events); +@@ -418,7 +418,7 @@ void __fscache_invalidate(struct fscache_cookie *cookie) + + _enter("{%s}", cookie->def->name); + +- fscache_stat(&fscache_n_invalidates); ++ fscache_stat_unchecked(&fscache_n_invalidates); + + /* Only permit invalidation of data files. Invalidating an index will + * require the caller to release all its attachments to the tree rooted +@@ -477,10 +477,10 @@ void __fscache_update_cookie(struct fscache_cookie *cookie) + { + struct fscache_object *object; + +- fscache_stat(&fscache_n_updates); ++ fscache_stat_unchecked(&fscache_n_updates); + + if (!cookie) { +- fscache_stat(&fscache_n_updates_null); ++ fscache_stat_unchecked(&fscache_n_updates_null); + _leave(" [no cookie]"); + return; + } +@@ -581,12 +581,12 @@ EXPORT_SYMBOL(__fscache_disable_cookie); + */ + void __fscache_relinquish_cookie(struct fscache_cookie *cookie, bool retire) + { +- fscache_stat(&fscache_n_relinquishes); ++ fscache_stat_unchecked(&fscache_n_relinquishes); + if (retire) +- fscache_stat(&fscache_n_relinquishes_retire); ++ fscache_stat_unchecked(&fscache_n_relinquishes_retire); + + if (!cookie) { +- fscache_stat(&fscache_n_relinquishes_null); ++ fscache_stat_unchecked(&fscache_n_relinquishes_null); + _leave(" [no cookie]"); + return; + } +@@ -687,7 +687,7 @@ int __fscache_check_consistency(struct fscache_cookie *cookie) + if (test_bit(FSCACHE_IOERROR, &object->cache->flags)) + goto inconsistent; + +- op->debug_id = atomic_inc_return(&fscache_op_debug_id); ++ op->debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id); + + __fscache_use_cookie(cookie); + if (fscache_submit_op(object, op) < 0) +diff --git a/fs/fscache/internal.h b/fs/fscache/internal.h +index 4226f66..0fb3f45 100644 +--- a/fs/fscache/internal.h ++++ b/fs/fscache/internal.h +@@ -133,8 +133,8 @@ extern void fscache_operation_gc(struct work_struct *); + extern int fscache_wait_for_deferred_lookup(struct fscache_cookie *); + extern int fscache_wait_for_operation_activation(struct fscache_object *, + struct fscache_operation *, +- atomic_t *, +- atomic_t *, ++ atomic_unchecked_t *, ++ atomic_unchecked_t *, + void (*)(struct fscache_operation *)); + extern void fscache_invalidate_writes(struct fscache_cookie *); + +@@ -153,101 +153,101 @@ extern void fscache_proc_cleanup(void); + * stats.c + */ + #ifdef CONFIG_FSCACHE_STATS +-extern atomic_t fscache_n_ops_processed[FSCACHE_MAX_THREADS]; +-extern atomic_t fscache_n_objs_processed[FSCACHE_MAX_THREADS]; ++extern atomic_unchecked_t fscache_n_ops_processed[FSCACHE_MAX_THREADS]; ++extern atomic_unchecked_t fscache_n_objs_processed[FSCACHE_MAX_THREADS]; + +-extern atomic_t fscache_n_op_pend; +-extern atomic_t fscache_n_op_run; +-extern atomic_t fscache_n_op_enqueue; +-extern atomic_t fscache_n_op_deferred_release; +-extern atomic_t fscache_n_op_release; +-extern atomic_t fscache_n_op_gc; +-extern atomic_t fscache_n_op_cancelled; +-extern atomic_t fscache_n_op_rejected; ++extern atomic_unchecked_t fscache_n_op_pend; ++extern atomic_unchecked_t fscache_n_op_run; ++extern atomic_unchecked_t fscache_n_op_enqueue; ++extern atomic_unchecked_t fscache_n_op_deferred_release; ++extern atomic_unchecked_t fscache_n_op_release; ++extern atomic_unchecked_t fscache_n_op_gc; ++extern atomic_unchecked_t fscache_n_op_cancelled; ++extern atomic_unchecked_t fscache_n_op_rejected; + +-extern atomic_t fscache_n_attr_changed; +-extern atomic_t fscache_n_attr_changed_ok; +-extern atomic_t fscache_n_attr_changed_nobufs; +-extern atomic_t fscache_n_attr_changed_nomem; +-extern atomic_t fscache_n_attr_changed_calls; ++extern atomic_unchecked_t fscache_n_attr_changed; ++extern atomic_unchecked_t fscache_n_attr_changed_ok; ++extern atomic_unchecked_t fscache_n_attr_changed_nobufs; ++extern atomic_unchecked_t fscache_n_attr_changed_nomem; ++extern atomic_unchecked_t fscache_n_attr_changed_calls; + +-extern atomic_t fscache_n_allocs; +-extern atomic_t fscache_n_allocs_ok; +-extern atomic_t fscache_n_allocs_wait; +-extern atomic_t fscache_n_allocs_nobufs; +-extern atomic_t fscache_n_allocs_intr; +-extern atomic_t fscache_n_allocs_object_dead; +-extern atomic_t fscache_n_alloc_ops; +-extern atomic_t fscache_n_alloc_op_waits; ++extern atomic_unchecked_t fscache_n_allocs; ++extern atomic_unchecked_t fscache_n_allocs_ok; ++extern atomic_unchecked_t fscache_n_allocs_wait; ++extern atomic_unchecked_t fscache_n_allocs_nobufs; ++extern atomic_unchecked_t fscache_n_allocs_intr; ++extern atomic_unchecked_t fscache_n_allocs_object_dead; ++extern atomic_unchecked_t fscache_n_alloc_ops; ++extern atomic_unchecked_t fscache_n_alloc_op_waits; + +-extern atomic_t fscache_n_retrievals; +-extern atomic_t fscache_n_retrievals_ok; +-extern atomic_t fscache_n_retrievals_wait; +-extern atomic_t fscache_n_retrievals_nodata; +-extern atomic_t fscache_n_retrievals_nobufs; +-extern atomic_t fscache_n_retrievals_intr; +-extern atomic_t fscache_n_retrievals_nomem; +-extern atomic_t fscache_n_retrievals_object_dead; +-extern atomic_t fscache_n_retrieval_ops; +-extern atomic_t fscache_n_retrieval_op_waits; ++extern atomic_unchecked_t fscache_n_retrievals; ++extern atomic_unchecked_t fscache_n_retrievals_ok; ++extern atomic_unchecked_t fscache_n_retrievals_wait; ++extern atomic_unchecked_t fscache_n_retrievals_nodata; ++extern atomic_unchecked_t fscache_n_retrievals_nobufs; ++extern atomic_unchecked_t fscache_n_retrievals_intr; ++extern atomic_unchecked_t fscache_n_retrievals_nomem; ++extern atomic_unchecked_t fscache_n_retrievals_object_dead; ++extern atomic_unchecked_t fscache_n_retrieval_ops; ++extern atomic_unchecked_t fscache_n_retrieval_op_waits; + +-extern atomic_t fscache_n_stores; +-extern atomic_t fscache_n_stores_ok; +-extern atomic_t fscache_n_stores_again; +-extern atomic_t fscache_n_stores_nobufs; +-extern atomic_t fscache_n_stores_oom; +-extern atomic_t fscache_n_store_ops; +-extern atomic_t fscache_n_store_calls; +-extern atomic_t fscache_n_store_pages; +-extern atomic_t fscache_n_store_radix_deletes; +-extern atomic_t fscache_n_store_pages_over_limit; ++extern atomic_unchecked_t fscache_n_stores; ++extern atomic_unchecked_t fscache_n_stores_ok; ++extern atomic_unchecked_t fscache_n_stores_again; ++extern atomic_unchecked_t fscache_n_stores_nobufs; ++extern atomic_unchecked_t fscache_n_stores_oom; ++extern atomic_unchecked_t fscache_n_store_ops; ++extern atomic_unchecked_t fscache_n_store_calls; ++extern atomic_unchecked_t fscache_n_store_pages; ++extern atomic_unchecked_t fscache_n_store_radix_deletes; ++extern atomic_unchecked_t fscache_n_store_pages_over_limit; + +-extern atomic_t fscache_n_store_vmscan_not_storing; +-extern atomic_t fscache_n_store_vmscan_gone; +-extern atomic_t fscache_n_store_vmscan_busy; +-extern atomic_t fscache_n_store_vmscan_cancelled; +-extern atomic_t fscache_n_store_vmscan_wait; ++extern atomic_unchecked_t fscache_n_store_vmscan_not_storing; ++extern atomic_unchecked_t fscache_n_store_vmscan_gone; ++extern atomic_unchecked_t fscache_n_store_vmscan_busy; ++extern atomic_unchecked_t fscache_n_store_vmscan_cancelled; ++extern atomic_unchecked_t fscache_n_store_vmscan_wait; + +-extern atomic_t fscache_n_marks; +-extern atomic_t fscache_n_uncaches; ++extern atomic_unchecked_t fscache_n_marks; ++extern atomic_unchecked_t fscache_n_uncaches; + +-extern atomic_t fscache_n_acquires; +-extern atomic_t fscache_n_acquires_null; +-extern atomic_t fscache_n_acquires_no_cache; +-extern atomic_t fscache_n_acquires_ok; +-extern atomic_t fscache_n_acquires_nobufs; +-extern atomic_t fscache_n_acquires_oom; ++extern atomic_unchecked_t fscache_n_acquires; ++extern atomic_unchecked_t fscache_n_acquires_null; ++extern atomic_unchecked_t fscache_n_acquires_no_cache; ++extern atomic_unchecked_t fscache_n_acquires_ok; ++extern atomic_unchecked_t fscache_n_acquires_nobufs; ++extern atomic_unchecked_t fscache_n_acquires_oom; + +-extern atomic_t fscache_n_invalidates; +-extern atomic_t fscache_n_invalidates_run; ++extern atomic_unchecked_t fscache_n_invalidates; ++extern atomic_unchecked_t fscache_n_invalidates_run; + +-extern atomic_t fscache_n_updates; +-extern atomic_t fscache_n_updates_null; +-extern atomic_t fscache_n_updates_run; ++extern atomic_unchecked_t fscache_n_updates; ++extern atomic_unchecked_t fscache_n_updates_null; ++extern atomic_unchecked_t fscache_n_updates_run; + +-extern atomic_t fscache_n_relinquishes; +-extern atomic_t fscache_n_relinquishes_null; +-extern atomic_t fscache_n_relinquishes_waitcrt; +-extern atomic_t fscache_n_relinquishes_retire; ++extern atomic_unchecked_t fscache_n_relinquishes; ++extern atomic_unchecked_t fscache_n_relinquishes_null; ++extern atomic_unchecked_t fscache_n_relinquishes_waitcrt; ++extern atomic_unchecked_t fscache_n_relinquishes_retire; + +-extern atomic_t fscache_n_cookie_index; +-extern atomic_t fscache_n_cookie_data; +-extern atomic_t fscache_n_cookie_special; ++extern atomic_unchecked_t fscache_n_cookie_index; ++extern atomic_unchecked_t fscache_n_cookie_data; ++extern atomic_unchecked_t fscache_n_cookie_special; + +-extern atomic_t fscache_n_object_alloc; +-extern atomic_t fscache_n_object_no_alloc; +-extern atomic_t fscache_n_object_lookups; +-extern atomic_t fscache_n_object_lookups_negative; +-extern atomic_t fscache_n_object_lookups_positive; +-extern atomic_t fscache_n_object_lookups_timed_out; +-extern atomic_t fscache_n_object_created; +-extern atomic_t fscache_n_object_avail; +-extern atomic_t fscache_n_object_dead; ++extern atomic_unchecked_t fscache_n_object_alloc; ++extern atomic_unchecked_t fscache_n_object_no_alloc; ++extern atomic_unchecked_t fscache_n_object_lookups; ++extern atomic_unchecked_t fscache_n_object_lookups_negative; ++extern atomic_unchecked_t fscache_n_object_lookups_positive; ++extern atomic_unchecked_t fscache_n_object_lookups_timed_out; ++extern atomic_unchecked_t fscache_n_object_created; ++extern atomic_unchecked_t fscache_n_object_avail; ++extern atomic_unchecked_t fscache_n_object_dead; + +-extern atomic_t fscache_n_checkaux_none; +-extern atomic_t fscache_n_checkaux_okay; +-extern atomic_t fscache_n_checkaux_update; +-extern atomic_t fscache_n_checkaux_obsolete; ++extern atomic_unchecked_t fscache_n_checkaux_none; ++extern atomic_unchecked_t fscache_n_checkaux_okay; ++extern atomic_unchecked_t fscache_n_checkaux_update; ++extern atomic_unchecked_t fscache_n_checkaux_obsolete; + + extern atomic_t fscache_n_cop_alloc_object; + extern atomic_t fscache_n_cop_lookup_object; +@@ -272,6 +272,11 @@ static inline void fscache_stat(atomic_t *stat) + atomic_inc(stat); + } + ++static inline void fscache_stat_unchecked(atomic_unchecked_t *stat) ++{ ++ atomic_inc_unchecked(stat); ++} ++ + static inline void fscache_stat_d(atomic_t *stat) + { + atomic_dec(stat); +@@ -284,6 +289,7 @@ extern const struct file_operations fscache_stats_fops; + + #define __fscache_stat(stat) (NULL) + #define fscache_stat(stat) do {} while (0) ++#define fscache_stat_unchecked(stat) do {} while (0) + #define fscache_stat_d(stat) do {} while (0) + #endif + +diff --git a/fs/fscache/object.c b/fs/fscache/object.c +index d3b4539..ed0c659 100644 +--- a/fs/fscache/object.c ++++ b/fs/fscache/object.c +@@ -454,7 +454,7 @@ static const struct fscache_state *fscache_look_up_object(struct fscache_object + _debug("LOOKUP \"%s\" in \"%s\"", + cookie->def->name, object->cache->tag->name); + +- fscache_stat(&fscache_n_object_lookups); ++ fscache_stat_unchecked(&fscache_n_object_lookups); + fscache_stat(&fscache_n_cop_lookup_object); + ret = object->cache->ops->lookup_object(object); + fscache_stat_d(&fscache_n_cop_lookup_object); +@@ -464,7 +464,7 @@ static const struct fscache_state *fscache_look_up_object(struct fscache_object + if (ret == -ETIMEDOUT) { + /* probably stuck behind another object, so move this one to + * the back of the queue */ +- fscache_stat(&fscache_n_object_lookups_timed_out); ++ fscache_stat_unchecked(&fscache_n_object_lookups_timed_out); + _leave(" [timeout]"); + return NO_TRANSIT; + } +@@ -492,7 +492,7 @@ void fscache_object_lookup_negative(struct fscache_object *object) + _enter("{OBJ%x,%s}", object->debug_id, object->state->name); + + if (!test_and_set_bit(FSCACHE_OBJECT_IS_LOOKED_UP, &object->flags)) { +- fscache_stat(&fscache_n_object_lookups_negative); ++ fscache_stat_unchecked(&fscache_n_object_lookups_negative); + + /* Allow write requests to begin stacking up and read requests to begin + * returning ENODATA. +@@ -527,7 +527,7 @@ void fscache_obtained_object(struct fscache_object *object) + /* if we were still looking up, then we must have a positive lookup + * result, in which case there may be data available */ + if (!test_and_set_bit(FSCACHE_OBJECT_IS_LOOKED_UP, &object->flags)) { +- fscache_stat(&fscache_n_object_lookups_positive); ++ fscache_stat_unchecked(&fscache_n_object_lookups_positive); + + /* We do (presumably) have data */ + clear_bit_unlock(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags); +@@ -539,7 +539,7 @@ void fscache_obtained_object(struct fscache_object *object) + clear_bit_unlock(FSCACHE_COOKIE_LOOKING_UP, &cookie->flags); + wake_up_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP); + } else { +- fscache_stat(&fscache_n_object_created); ++ fscache_stat_unchecked(&fscache_n_object_created); + } + + set_bit(FSCACHE_OBJECT_IS_AVAILABLE, &object->flags); +@@ -575,7 +575,7 @@ static const struct fscache_state *fscache_object_available(struct fscache_objec + fscache_stat_d(&fscache_n_cop_lookup_complete); + + fscache_hist(fscache_obj_instantiate_histogram, object->lookup_jif); +- fscache_stat(&fscache_n_object_avail); ++ fscache_stat_unchecked(&fscache_n_object_avail); + + _leave(""); + return transit_to(JUMPSTART_DEPS); +@@ -722,7 +722,7 @@ static const struct fscache_state *fscache_drop_object(struct fscache_object *ob + + /* this just shifts the object release to the work processor */ + fscache_put_object(object); +- fscache_stat(&fscache_n_object_dead); ++ fscache_stat_unchecked(&fscache_n_object_dead); + + _leave(""); + return transit_to(OBJECT_DEAD); +@@ -887,7 +887,7 @@ enum fscache_checkaux fscache_check_aux(struct fscache_object *object, + enum fscache_checkaux result; + + if (!object->cookie->def->check_aux) { +- fscache_stat(&fscache_n_checkaux_none); ++ fscache_stat_unchecked(&fscache_n_checkaux_none); + return FSCACHE_CHECKAUX_OKAY; + } + +@@ -896,17 +896,17 @@ enum fscache_checkaux fscache_check_aux(struct fscache_object *object, + switch (result) { + /* entry okay as is */ + case FSCACHE_CHECKAUX_OKAY: +- fscache_stat(&fscache_n_checkaux_okay); ++ fscache_stat_unchecked(&fscache_n_checkaux_okay); + break; + + /* entry requires update */ + case FSCACHE_CHECKAUX_NEEDS_UPDATE: +- fscache_stat(&fscache_n_checkaux_update); ++ fscache_stat_unchecked(&fscache_n_checkaux_update); + break; + + /* entry requires deletion */ + case FSCACHE_CHECKAUX_OBSOLETE: +- fscache_stat(&fscache_n_checkaux_obsolete); ++ fscache_stat_unchecked(&fscache_n_checkaux_obsolete); + break; + + default: +@@ -992,7 +992,7 @@ static const struct fscache_state *fscache_invalidate_object(struct fscache_obje + { + const struct fscache_state *s; + +- fscache_stat(&fscache_n_invalidates_run); ++ fscache_stat_unchecked(&fscache_n_invalidates_run); + fscache_stat(&fscache_n_cop_invalidate_object); + s = _fscache_invalidate_object(object, event); + fscache_stat_d(&fscache_n_cop_invalidate_object); +@@ -1007,7 +1007,7 @@ static const struct fscache_state *fscache_update_object(struct fscache_object * + { + _enter("{OBJ%x},%d", object->debug_id, event); + +- fscache_stat(&fscache_n_updates_run); ++ fscache_stat_unchecked(&fscache_n_updates_run); + fscache_stat(&fscache_n_cop_update_object); + object->cache->ops->update_object(object); + fscache_stat_d(&fscache_n_cop_update_object); +diff --git a/fs/fscache/operation.c b/fs/fscache/operation.c +index 318071a..379938b 100644 +--- a/fs/fscache/operation.c ++++ b/fs/fscache/operation.c +@@ -17,7 +17,7 @@ + #include <linux/slab.h> + #include "internal.h" + +-atomic_t fscache_op_debug_id; ++atomic_unchecked_t fscache_op_debug_id; + EXPORT_SYMBOL(fscache_op_debug_id); + + /** +@@ -39,7 +39,7 @@ void fscache_enqueue_operation(struct fscache_operation *op) + ASSERTCMP(atomic_read(&op->usage), >, 0); + ASSERTCMP(op->state, ==, FSCACHE_OP_ST_IN_PROGRESS); + +- fscache_stat(&fscache_n_op_enqueue); ++ fscache_stat_unchecked(&fscache_n_op_enqueue); + switch (op->flags & FSCACHE_OP_TYPE) { + case FSCACHE_OP_ASYNC: + _debug("queue async"); +@@ -73,7 +73,7 @@ static void fscache_run_op(struct fscache_object *object, + wake_up_bit(&op->flags, FSCACHE_OP_WAITING); + if (op->processor) + fscache_enqueue_operation(op); +- fscache_stat(&fscache_n_op_run); ++ fscache_stat_unchecked(&fscache_n_op_run); + } + + /* +@@ -105,11 +105,11 @@ int fscache_submit_exclusive_op(struct fscache_object *object, + if (object->n_in_progress > 0) { + atomic_inc(&op->usage); + list_add_tail(&op->pend_link, &object->pending_ops); +- fscache_stat(&fscache_n_op_pend); ++ fscache_stat_unchecked(&fscache_n_op_pend); + } else if (!list_empty(&object->pending_ops)) { + atomic_inc(&op->usage); + list_add_tail(&op->pend_link, &object->pending_ops); +- fscache_stat(&fscache_n_op_pend); ++ fscache_stat_unchecked(&fscache_n_op_pend); + fscache_start_operations(object); + } else { + ASSERTCMP(object->n_in_progress, ==, 0); +@@ -125,7 +125,7 @@ int fscache_submit_exclusive_op(struct fscache_object *object, + object->n_exclusive++; /* reads and writes must wait */ + atomic_inc(&op->usage); + list_add_tail(&op->pend_link, &object->pending_ops); +- fscache_stat(&fscache_n_op_pend); ++ fscache_stat_unchecked(&fscache_n_op_pend); + ret = 0; + } else { + /* If we're in any other state, there must have been an I/O +@@ -212,11 +212,11 @@ int fscache_submit_op(struct fscache_object *object, + if (object->n_exclusive > 0) { + atomic_inc(&op->usage); + list_add_tail(&op->pend_link, &object->pending_ops); +- fscache_stat(&fscache_n_op_pend); ++ fscache_stat_unchecked(&fscache_n_op_pend); + } else if (!list_empty(&object->pending_ops)) { + atomic_inc(&op->usage); + list_add_tail(&op->pend_link, &object->pending_ops); +- fscache_stat(&fscache_n_op_pend); ++ fscache_stat_unchecked(&fscache_n_op_pend); + fscache_start_operations(object); + } else { + ASSERTCMP(object->n_exclusive, ==, 0); +@@ -228,10 +228,10 @@ int fscache_submit_op(struct fscache_object *object, + object->n_ops++; + atomic_inc(&op->usage); + list_add_tail(&op->pend_link, &object->pending_ops); +- fscache_stat(&fscache_n_op_pend); ++ fscache_stat_unchecked(&fscache_n_op_pend); + ret = 0; + } else if (fscache_object_is_dying(object)) { +- fscache_stat(&fscache_n_op_rejected); ++ fscache_stat_unchecked(&fscache_n_op_rejected); + op->state = FSCACHE_OP_ST_CANCELLED; + ret = -ENOBUFS; + } else if (!test_bit(FSCACHE_IOERROR, &object->cache->flags)) { +@@ -310,7 +310,7 @@ int fscache_cancel_op(struct fscache_operation *op, + ret = -EBUSY; + if (op->state == FSCACHE_OP_ST_PENDING) { + ASSERT(!list_empty(&op->pend_link)); +- fscache_stat(&fscache_n_op_cancelled); ++ fscache_stat_unchecked(&fscache_n_op_cancelled); + list_del_init(&op->pend_link); + if (do_cancel) + do_cancel(op); +@@ -342,7 +342,7 @@ void fscache_cancel_all_ops(struct fscache_object *object) + while (!list_empty(&object->pending_ops)) { + op = list_entry(object->pending_ops.next, + struct fscache_operation, pend_link); +- fscache_stat(&fscache_n_op_cancelled); ++ fscache_stat_unchecked(&fscache_n_op_cancelled); + list_del_init(&op->pend_link); + + ASSERTCMP(op->state, ==, FSCACHE_OP_ST_PENDING); +@@ -414,7 +414,7 @@ void fscache_put_operation(struct fscache_operation *op) + op->state, ==, FSCACHE_OP_ST_CANCELLED); + op->state = FSCACHE_OP_ST_DEAD; + +- fscache_stat(&fscache_n_op_release); ++ fscache_stat_unchecked(&fscache_n_op_release); + + if (op->release) { + op->release(op); +@@ -433,7 +433,7 @@ void fscache_put_operation(struct fscache_operation *op) + * lock, and defer it otherwise */ + if (!spin_trylock(&object->lock)) { + _debug("defer put"); +- fscache_stat(&fscache_n_op_deferred_release); ++ fscache_stat_unchecked(&fscache_n_op_deferred_release); + + cache = object->cache; + spin_lock(&cache->op_gc_list_lock); +@@ -486,7 +486,7 @@ void fscache_operation_gc(struct work_struct *work) + + _debug("GC DEFERRED REL OBJ%x OP%x", + object->debug_id, op->debug_id); +- fscache_stat(&fscache_n_op_gc); ++ fscache_stat_unchecked(&fscache_n_op_gc); + + ASSERTCMP(atomic_read(&op->usage), ==, 0); + ASSERTCMP(op->state, ==, FSCACHE_OP_ST_DEAD); +diff --git a/fs/fscache/page.c b/fs/fscache/page.c +index 7f5c658..6c1e164 100644 +--- a/fs/fscache/page.c ++++ b/fs/fscache/page.c +@@ -61,7 +61,7 @@ try_again: + val = radix_tree_lookup(&cookie->stores, page->index); + if (!val) { + rcu_read_unlock(); +- fscache_stat(&fscache_n_store_vmscan_not_storing); ++ fscache_stat_unchecked(&fscache_n_store_vmscan_not_storing); + __fscache_uncache_page(cookie, page); + return true; + } +@@ -91,11 +91,11 @@ try_again: + spin_unlock(&cookie->stores_lock); + + if (xpage) { +- fscache_stat(&fscache_n_store_vmscan_cancelled); +- fscache_stat(&fscache_n_store_radix_deletes); ++ fscache_stat_unchecked(&fscache_n_store_vmscan_cancelled); ++ fscache_stat_unchecked(&fscache_n_store_radix_deletes); + ASSERTCMP(xpage, ==, page); + } else { +- fscache_stat(&fscache_n_store_vmscan_gone); ++ fscache_stat_unchecked(&fscache_n_store_vmscan_gone); + } + + wake_up_bit(&cookie->flags, 0); +@@ -110,11 +110,11 @@ page_busy: + * sleeping on memory allocation, so we may need to impose a timeout + * too. */ + if (!(gfp & __GFP_WAIT) || !(gfp & __GFP_FS)) { +- fscache_stat(&fscache_n_store_vmscan_busy); ++ fscache_stat_unchecked(&fscache_n_store_vmscan_busy); + return false; + } + +- fscache_stat(&fscache_n_store_vmscan_wait); ++ fscache_stat_unchecked(&fscache_n_store_vmscan_wait); + __fscache_wait_on_page_write(cookie, page); + gfp &= ~__GFP_WAIT; + goto try_again; +@@ -140,7 +140,7 @@ static void fscache_end_page_write(struct fscache_object *object, + FSCACHE_COOKIE_STORING_TAG); + if (!radix_tree_tag_get(&cookie->stores, page->index, + FSCACHE_COOKIE_PENDING_TAG)) { +- fscache_stat(&fscache_n_store_radix_deletes); ++ fscache_stat_unchecked(&fscache_n_store_radix_deletes); + xpage = radix_tree_delete(&cookie->stores, page->index); + } + spin_unlock(&cookie->stores_lock); +@@ -161,7 +161,7 @@ static void fscache_attr_changed_op(struct fscache_operation *op) + + _enter("{OBJ%x OP%x}", object->debug_id, op->debug_id); + +- fscache_stat(&fscache_n_attr_changed_calls); ++ fscache_stat_unchecked(&fscache_n_attr_changed_calls); + + if (fscache_object_is_active(object)) { + fscache_stat(&fscache_n_cop_attr_changed); +@@ -188,11 +188,11 @@ int __fscache_attr_changed(struct fscache_cookie *cookie) + + ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX); + +- fscache_stat(&fscache_n_attr_changed); ++ fscache_stat_unchecked(&fscache_n_attr_changed); + + op = kzalloc(sizeof(*op), GFP_KERNEL); + if (!op) { +- fscache_stat(&fscache_n_attr_changed_nomem); ++ fscache_stat_unchecked(&fscache_n_attr_changed_nomem); + _leave(" = -ENOMEM"); + return -ENOMEM; + } +@@ -214,7 +214,7 @@ int __fscache_attr_changed(struct fscache_cookie *cookie) + if (fscache_submit_exclusive_op(object, op) < 0) + goto nobufs; + spin_unlock(&cookie->lock); +- fscache_stat(&fscache_n_attr_changed_ok); ++ fscache_stat_unchecked(&fscache_n_attr_changed_ok); + fscache_put_operation(op); + _leave(" = 0"); + return 0; +@@ -225,7 +225,7 @@ nobufs: + kfree(op); + if (wake_cookie) + __fscache_wake_unused_cookie(cookie); +- fscache_stat(&fscache_n_attr_changed_nobufs); ++ fscache_stat_unchecked(&fscache_n_attr_changed_nobufs); + _leave(" = %d", -ENOBUFS); + return -ENOBUFS; + } +@@ -264,7 +264,7 @@ static struct fscache_retrieval *fscache_alloc_retrieval( + /* allocate a retrieval operation and attempt to submit it */ + op = kzalloc(sizeof(*op), GFP_NOIO); + if (!op) { +- fscache_stat(&fscache_n_retrievals_nomem); ++ fscache_stat_unchecked(&fscache_n_retrievals_nomem); + return NULL; + } + +@@ -294,13 +294,13 @@ int fscache_wait_for_deferred_lookup(struct fscache_cookie *cookie) + return 0; + } + +- fscache_stat(&fscache_n_retrievals_wait); ++ fscache_stat_unchecked(&fscache_n_retrievals_wait); + + jif = jiffies; + if (wait_on_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP, + fscache_wait_bit_interruptible, + TASK_INTERRUPTIBLE) != 0) { +- fscache_stat(&fscache_n_retrievals_intr); ++ fscache_stat_unchecked(&fscache_n_retrievals_intr); + _leave(" = -ERESTARTSYS"); + return -ERESTARTSYS; + } +@@ -329,8 +329,8 @@ static void fscache_do_cancel_retrieval(struct fscache_operation *_op) + */ + int fscache_wait_for_operation_activation(struct fscache_object *object, + struct fscache_operation *op, +- atomic_t *stat_op_waits, +- atomic_t *stat_object_dead, ++ atomic_unchecked_t *stat_op_waits, ++ atomic_unchecked_t *stat_object_dead, + void (*do_cancel)(struct fscache_operation *)) + { + int ret; +@@ -340,7 +340,7 @@ int fscache_wait_for_operation_activation(struct fscache_object *object, + + _debug(">>> WT"); + if (stat_op_waits) +- fscache_stat(stat_op_waits); ++ fscache_stat_unchecked(stat_op_waits); + if (wait_on_bit(&op->flags, FSCACHE_OP_WAITING, + fscache_wait_bit_interruptible, + TASK_INTERRUPTIBLE) != 0) { +@@ -358,7 +358,7 @@ int fscache_wait_for_operation_activation(struct fscache_object *object, + check_if_dead: + if (op->state == FSCACHE_OP_ST_CANCELLED) { + if (stat_object_dead) +- fscache_stat(stat_object_dead); ++ fscache_stat_unchecked(stat_object_dead); + _leave(" = -ENOBUFS [cancelled]"); + return -ENOBUFS; + } +@@ -366,7 +366,7 @@ check_if_dead: + pr_err("%s() = -ENOBUFS [obj dead %d]\n", __func__, op->state); + fscache_cancel_op(op, do_cancel); + if (stat_object_dead) +- fscache_stat(stat_object_dead); ++ fscache_stat_unchecked(stat_object_dead); + return -ENOBUFS; + } + return 0; +@@ -394,7 +394,7 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie, + + _enter("%p,%p,,,", cookie, page); + +- fscache_stat(&fscache_n_retrievals); ++ fscache_stat_unchecked(&fscache_n_retrievals); + + if (hlist_empty(&cookie->backing_objects)) + goto nobufs; +@@ -436,7 +436,7 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie, + goto nobufs_unlock_dec; + spin_unlock(&cookie->lock); + +- fscache_stat(&fscache_n_retrieval_ops); ++ fscache_stat_unchecked(&fscache_n_retrieval_ops); + + /* pin the netfs read context in case we need to do the actual netfs + * read because we've encountered a cache read failure */ +@@ -467,15 +467,15 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie, + + error: + if (ret == -ENOMEM) +- fscache_stat(&fscache_n_retrievals_nomem); ++ fscache_stat_unchecked(&fscache_n_retrievals_nomem); + else if (ret == -ERESTARTSYS) +- fscache_stat(&fscache_n_retrievals_intr); ++ fscache_stat_unchecked(&fscache_n_retrievals_intr); + else if (ret == -ENODATA) +- fscache_stat(&fscache_n_retrievals_nodata); ++ fscache_stat_unchecked(&fscache_n_retrievals_nodata); + else if (ret < 0) +- fscache_stat(&fscache_n_retrievals_nobufs); ++ fscache_stat_unchecked(&fscache_n_retrievals_nobufs); + else +- fscache_stat(&fscache_n_retrievals_ok); ++ fscache_stat_unchecked(&fscache_n_retrievals_ok); + + fscache_put_retrieval(op); + _leave(" = %d", ret); +@@ -490,7 +490,7 @@ nobufs_unlock: + __fscache_wake_unused_cookie(cookie); + kfree(op); + nobufs: +- fscache_stat(&fscache_n_retrievals_nobufs); ++ fscache_stat_unchecked(&fscache_n_retrievals_nobufs); + _leave(" = -ENOBUFS"); + return -ENOBUFS; + } +@@ -529,7 +529,7 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie, + + _enter("%p,,%d,,,", cookie, *nr_pages); + +- fscache_stat(&fscache_n_retrievals); ++ fscache_stat_unchecked(&fscache_n_retrievals); + + if (hlist_empty(&cookie->backing_objects)) + goto nobufs; +@@ -567,7 +567,7 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie, + goto nobufs_unlock_dec; + spin_unlock(&cookie->lock); + +- fscache_stat(&fscache_n_retrieval_ops); ++ fscache_stat_unchecked(&fscache_n_retrieval_ops); + + /* pin the netfs read context in case we need to do the actual netfs + * read because we've encountered a cache read failure */ +@@ -598,15 +598,15 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie, + + error: + if (ret == -ENOMEM) +- fscache_stat(&fscache_n_retrievals_nomem); ++ fscache_stat_unchecked(&fscache_n_retrievals_nomem); + else if (ret == -ERESTARTSYS) +- fscache_stat(&fscache_n_retrievals_intr); ++ fscache_stat_unchecked(&fscache_n_retrievals_intr); + else if (ret == -ENODATA) +- fscache_stat(&fscache_n_retrievals_nodata); ++ fscache_stat_unchecked(&fscache_n_retrievals_nodata); + else if (ret < 0) +- fscache_stat(&fscache_n_retrievals_nobufs); ++ fscache_stat_unchecked(&fscache_n_retrievals_nobufs); + else +- fscache_stat(&fscache_n_retrievals_ok); ++ fscache_stat_unchecked(&fscache_n_retrievals_ok); + + fscache_put_retrieval(op); + _leave(" = %d", ret); +@@ -621,7 +621,7 @@ nobufs_unlock: + if (wake_cookie) + __fscache_wake_unused_cookie(cookie); + nobufs: +- fscache_stat(&fscache_n_retrievals_nobufs); ++ fscache_stat_unchecked(&fscache_n_retrievals_nobufs); + _leave(" = -ENOBUFS"); + return -ENOBUFS; + } +@@ -646,7 +646,7 @@ int __fscache_alloc_page(struct fscache_cookie *cookie, + + _enter("%p,%p,,,", cookie, page); + +- fscache_stat(&fscache_n_allocs); ++ fscache_stat_unchecked(&fscache_n_allocs); + + if (hlist_empty(&cookie->backing_objects)) + goto nobufs; +@@ -680,7 +680,7 @@ int __fscache_alloc_page(struct fscache_cookie *cookie, + goto nobufs_unlock_dec; + spin_unlock(&cookie->lock); + +- fscache_stat(&fscache_n_alloc_ops); ++ fscache_stat_unchecked(&fscache_n_alloc_ops); + + ret = fscache_wait_for_operation_activation( + object, &op->op, +@@ -697,11 +697,11 @@ int __fscache_alloc_page(struct fscache_cookie *cookie, + + error: + if (ret == -ERESTARTSYS) +- fscache_stat(&fscache_n_allocs_intr); ++ fscache_stat_unchecked(&fscache_n_allocs_intr); + else if (ret < 0) +- fscache_stat(&fscache_n_allocs_nobufs); ++ fscache_stat_unchecked(&fscache_n_allocs_nobufs); + else +- fscache_stat(&fscache_n_allocs_ok); ++ fscache_stat_unchecked(&fscache_n_allocs_ok); + + fscache_put_retrieval(op); + _leave(" = %d", ret); +@@ -715,7 +715,7 @@ nobufs_unlock: + if (wake_cookie) + __fscache_wake_unused_cookie(cookie); + nobufs: +- fscache_stat(&fscache_n_allocs_nobufs); ++ fscache_stat_unchecked(&fscache_n_allocs_nobufs); + _leave(" = -ENOBUFS"); + return -ENOBUFS; + } +@@ -791,7 +791,7 @@ static void fscache_write_op(struct fscache_operation *_op) + + spin_lock(&cookie->stores_lock); + +- fscache_stat(&fscache_n_store_calls); ++ fscache_stat_unchecked(&fscache_n_store_calls); + + /* find a page to store */ + page = NULL; +@@ -802,7 +802,7 @@ static void fscache_write_op(struct fscache_operation *_op) + page = results[0]; + _debug("gang %d [%lx]", n, page->index); + if (page->index > op->store_limit) { +- fscache_stat(&fscache_n_store_pages_over_limit); ++ fscache_stat_unchecked(&fscache_n_store_pages_over_limit); + goto superseded; + } + +@@ -814,7 +814,7 @@ static void fscache_write_op(struct fscache_operation *_op) + spin_unlock(&cookie->stores_lock); + spin_unlock(&object->lock); + +- fscache_stat(&fscache_n_store_pages); ++ fscache_stat_unchecked(&fscache_n_store_pages); + fscache_stat(&fscache_n_cop_write_page); + ret = object->cache->ops->write_page(op, page); + fscache_stat_d(&fscache_n_cop_write_page); +@@ -918,7 +918,7 @@ int __fscache_write_page(struct fscache_cookie *cookie, + ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX); + ASSERT(PageFsCache(page)); + +- fscache_stat(&fscache_n_stores); ++ fscache_stat_unchecked(&fscache_n_stores); + + if (test_bit(FSCACHE_COOKIE_INVALIDATING, &cookie->flags)) { + _leave(" = -ENOBUFS [invalidating]"); +@@ -977,7 +977,7 @@ int __fscache_write_page(struct fscache_cookie *cookie, + spin_unlock(&cookie->stores_lock); + spin_unlock(&object->lock); + +- op->op.debug_id = atomic_inc_return(&fscache_op_debug_id); ++ op->op.debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id); + op->store_limit = object->store_limit; + + __fscache_use_cookie(cookie); +@@ -986,8 +986,8 @@ int __fscache_write_page(struct fscache_cookie *cookie, + + spin_unlock(&cookie->lock); + radix_tree_preload_end(); +- fscache_stat(&fscache_n_store_ops); +- fscache_stat(&fscache_n_stores_ok); ++ fscache_stat_unchecked(&fscache_n_store_ops); ++ fscache_stat_unchecked(&fscache_n_stores_ok); + + /* the work queue now carries its own ref on the object */ + fscache_put_operation(&op->op); +@@ -995,14 +995,14 @@ int __fscache_write_page(struct fscache_cookie *cookie, + return 0; + + already_queued: +- fscache_stat(&fscache_n_stores_again); ++ fscache_stat_unchecked(&fscache_n_stores_again); + already_pending: + spin_unlock(&cookie->stores_lock); + spin_unlock(&object->lock); + spin_unlock(&cookie->lock); + radix_tree_preload_end(); + kfree(op); +- fscache_stat(&fscache_n_stores_ok); ++ fscache_stat_unchecked(&fscache_n_stores_ok); + _leave(" = 0"); + return 0; + +@@ -1024,14 +1024,14 @@ nobufs: + kfree(op); + if (wake_cookie) + __fscache_wake_unused_cookie(cookie); +- fscache_stat(&fscache_n_stores_nobufs); ++ fscache_stat_unchecked(&fscache_n_stores_nobufs); + _leave(" = -ENOBUFS"); + return -ENOBUFS; + + nomem_free: + kfree(op); + nomem: +- fscache_stat(&fscache_n_stores_oom); ++ fscache_stat_unchecked(&fscache_n_stores_oom); + _leave(" = -ENOMEM"); + return -ENOMEM; + } +@@ -1049,7 +1049,7 @@ void __fscache_uncache_page(struct fscache_cookie *cookie, struct page *page) + ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX); + ASSERTCMP(page, !=, NULL); + +- fscache_stat(&fscache_n_uncaches); ++ fscache_stat_unchecked(&fscache_n_uncaches); + + /* cache withdrawal may beat us to it */ + if (!PageFsCache(page)) +@@ -1100,7 +1100,7 @@ void fscache_mark_page_cached(struct fscache_retrieval *op, struct page *page) + struct fscache_cookie *cookie = op->op.object->cookie; + + #ifdef CONFIG_FSCACHE_STATS +- atomic_inc(&fscache_n_marks); ++ atomic_inc_unchecked(&fscache_n_marks); + #endif + + _debug("- mark %p{%lx}", page, page->index); +diff --git a/fs/fscache/stats.c b/fs/fscache/stats.c +index 40d13c7..ddf52b9 100644 +--- a/fs/fscache/stats.c ++++ b/fs/fscache/stats.c +@@ -18,99 +18,99 @@ + /* + * operation counters + */ +-atomic_t fscache_n_op_pend; +-atomic_t fscache_n_op_run; +-atomic_t fscache_n_op_enqueue; +-atomic_t fscache_n_op_requeue; +-atomic_t fscache_n_op_deferred_release; +-atomic_t fscache_n_op_release; +-atomic_t fscache_n_op_gc; +-atomic_t fscache_n_op_cancelled; +-atomic_t fscache_n_op_rejected; ++atomic_unchecked_t fscache_n_op_pend; ++atomic_unchecked_t fscache_n_op_run; ++atomic_unchecked_t fscache_n_op_enqueue; ++atomic_unchecked_t fscache_n_op_requeue; ++atomic_unchecked_t fscache_n_op_deferred_release; ++atomic_unchecked_t fscache_n_op_release; ++atomic_unchecked_t fscache_n_op_gc; ++atomic_unchecked_t fscache_n_op_cancelled; ++atomic_unchecked_t fscache_n_op_rejected; + +-atomic_t fscache_n_attr_changed; +-atomic_t fscache_n_attr_changed_ok; +-atomic_t fscache_n_attr_changed_nobufs; +-atomic_t fscache_n_attr_changed_nomem; +-atomic_t fscache_n_attr_changed_calls; ++atomic_unchecked_t fscache_n_attr_changed; ++atomic_unchecked_t fscache_n_attr_changed_ok; ++atomic_unchecked_t fscache_n_attr_changed_nobufs; ++atomic_unchecked_t fscache_n_attr_changed_nomem; ++atomic_unchecked_t fscache_n_attr_changed_calls; + +-atomic_t fscache_n_allocs; +-atomic_t fscache_n_allocs_ok; +-atomic_t fscache_n_allocs_wait; +-atomic_t fscache_n_allocs_nobufs; +-atomic_t fscache_n_allocs_intr; +-atomic_t fscache_n_allocs_object_dead; +-atomic_t fscache_n_alloc_ops; +-atomic_t fscache_n_alloc_op_waits; ++atomic_unchecked_t fscache_n_allocs; ++atomic_unchecked_t fscache_n_allocs_ok; ++atomic_unchecked_t fscache_n_allocs_wait; ++atomic_unchecked_t fscache_n_allocs_nobufs; ++atomic_unchecked_t fscache_n_allocs_intr; ++atomic_unchecked_t fscache_n_allocs_object_dead; ++atomic_unchecked_t fscache_n_alloc_ops; ++atomic_unchecked_t fscache_n_alloc_op_waits; + +-atomic_t fscache_n_retrievals; +-atomic_t fscache_n_retrievals_ok; +-atomic_t fscache_n_retrievals_wait; +-atomic_t fscache_n_retrievals_nodata; +-atomic_t fscache_n_retrievals_nobufs; +-atomic_t fscache_n_retrievals_intr; +-atomic_t fscache_n_retrievals_nomem; +-atomic_t fscache_n_retrievals_object_dead; +-atomic_t fscache_n_retrieval_ops; +-atomic_t fscache_n_retrieval_op_waits; ++atomic_unchecked_t fscache_n_retrievals; ++atomic_unchecked_t fscache_n_retrievals_ok; ++atomic_unchecked_t fscache_n_retrievals_wait; ++atomic_unchecked_t fscache_n_retrievals_nodata; ++atomic_unchecked_t fscache_n_retrievals_nobufs; ++atomic_unchecked_t fscache_n_retrievals_intr; ++atomic_unchecked_t fscache_n_retrievals_nomem; ++atomic_unchecked_t fscache_n_retrievals_object_dead; ++atomic_unchecked_t fscache_n_retrieval_ops; ++atomic_unchecked_t fscache_n_retrieval_op_waits; + +-atomic_t fscache_n_stores; +-atomic_t fscache_n_stores_ok; +-atomic_t fscache_n_stores_again; +-atomic_t fscache_n_stores_nobufs; +-atomic_t fscache_n_stores_oom; +-atomic_t fscache_n_store_ops; +-atomic_t fscache_n_store_calls; +-atomic_t fscache_n_store_pages; +-atomic_t fscache_n_store_radix_deletes; +-atomic_t fscache_n_store_pages_over_limit; ++atomic_unchecked_t fscache_n_stores; ++atomic_unchecked_t fscache_n_stores_ok; ++atomic_unchecked_t fscache_n_stores_again; ++atomic_unchecked_t fscache_n_stores_nobufs; ++atomic_unchecked_t fscache_n_stores_oom; ++atomic_unchecked_t fscache_n_store_ops; ++atomic_unchecked_t fscache_n_store_calls; ++atomic_unchecked_t fscache_n_store_pages; ++atomic_unchecked_t fscache_n_store_radix_deletes; ++atomic_unchecked_t fscache_n_store_pages_over_limit; + +-atomic_t fscache_n_store_vmscan_not_storing; +-atomic_t fscache_n_store_vmscan_gone; +-atomic_t fscache_n_store_vmscan_busy; +-atomic_t fscache_n_store_vmscan_cancelled; +-atomic_t fscache_n_store_vmscan_wait; ++atomic_unchecked_t fscache_n_store_vmscan_not_storing; ++atomic_unchecked_t fscache_n_store_vmscan_gone; ++atomic_unchecked_t fscache_n_store_vmscan_busy; ++atomic_unchecked_t fscache_n_store_vmscan_cancelled; ++atomic_unchecked_t fscache_n_store_vmscan_wait; + +-atomic_t fscache_n_marks; +-atomic_t fscache_n_uncaches; ++atomic_unchecked_t fscache_n_marks; ++atomic_unchecked_t fscache_n_uncaches; + +-atomic_t fscache_n_acquires; +-atomic_t fscache_n_acquires_null; +-atomic_t fscache_n_acquires_no_cache; +-atomic_t fscache_n_acquires_ok; +-atomic_t fscache_n_acquires_nobufs; +-atomic_t fscache_n_acquires_oom; ++atomic_unchecked_t fscache_n_acquires; ++atomic_unchecked_t fscache_n_acquires_null; ++atomic_unchecked_t fscache_n_acquires_no_cache; ++atomic_unchecked_t fscache_n_acquires_ok; ++atomic_unchecked_t fscache_n_acquires_nobufs; ++atomic_unchecked_t fscache_n_acquires_oom; + +-atomic_t fscache_n_invalidates; +-atomic_t fscache_n_invalidates_run; ++atomic_unchecked_t fscache_n_invalidates; ++atomic_unchecked_t fscache_n_invalidates_run; + +-atomic_t fscache_n_updates; +-atomic_t fscache_n_updates_null; +-atomic_t fscache_n_updates_run; ++atomic_unchecked_t fscache_n_updates; ++atomic_unchecked_t fscache_n_updates_null; ++atomic_unchecked_t fscache_n_updates_run; + +-atomic_t fscache_n_relinquishes; +-atomic_t fscache_n_relinquishes_null; +-atomic_t fscache_n_relinquishes_waitcrt; +-atomic_t fscache_n_relinquishes_retire; ++atomic_unchecked_t fscache_n_relinquishes; ++atomic_unchecked_t fscache_n_relinquishes_null; ++atomic_unchecked_t fscache_n_relinquishes_waitcrt; ++atomic_unchecked_t fscache_n_relinquishes_retire; + +-atomic_t fscache_n_cookie_index; +-atomic_t fscache_n_cookie_data; +-atomic_t fscache_n_cookie_special; ++atomic_unchecked_t fscache_n_cookie_index; ++atomic_unchecked_t fscache_n_cookie_data; ++atomic_unchecked_t fscache_n_cookie_special; + +-atomic_t fscache_n_object_alloc; +-atomic_t fscache_n_object_no_alloc; +-atomic_t fscache_n_object_lookups; +-atomic_t fscache_n_object_lookups_negative; +-atomic_t fscache_n_object_lookups_positive; +-atomic_t fscache_n_object_lookups_timed_out; +-atomic_t fscache_n_object_created; +-atomic_t fscache_n_object_avail; +-atomic_t fscache_n_object_dead; ++atomic_unchecked_t fscache_n_object_alloc; ++atomic_unchecked_t fscache_n_object_no_alloc; ++atomic_unchecked_t fscache_n_object_lookups; ++atomic_unchecked_t fscache_n_object_lookups_negative; ++atomic_unchecked_t fscache_n_object_lookups_positive; ++atomic_unchecked_t fscache_n_object_lookups_timed_out; ++atomic_unchecked_t fscache_n_object_created; ++atomic_unchecked_t fscache_n_object_avail; ++atomic_unchecked_t fscache_n_object_dead; + +-atomic_t fscache_n_checkaux_none; +-atomic_t fscache_n_checkaux_okay; +-atomic_t fscache_n_checkaux_update; +-atomic_t fscache_n_checkaux_obsolete; ++atomic_unchecked_t fscache_n_checkaux_none; ++atomic_unchecked_t fscache_n_checkaux_okay; ++atomic_unchecked_t fscache_n_checkaux_update; ++atomic_unchecked_t fscache_n_checkaux_obsolete; + + atomic_t fscache_n_cop_alloc_object; + atomic_t fscache_n_cop_lookup_object; +@@ -138,118 +138,118 @@ static int fscache_stats_show(struct seq_file *m, void *v) + seq_puts(m, "FS-Cache statistics\n"); + + seq_printf(m, "Cookies: idx=%u dat=%u spc=%u\n", +- atomic_read(&fscache_n_cookie_index), +- atomic_read(&fscache_n_cookie_data), +- atomic_read(&fscache_n_cookie_special)); ++ atomic_read_unchecked(&fscache_n_cookie_index), ++ atomic_read_unchecked(&fscache_n_cookie_data), ++ atomic_read_unchecked(&fscache_n_cookie_special)); + + seq_printf(m, "Objects: alc=%u nal=%u avl=%u ded=%u\n", +- atomic_read(&fscache_n_object_alloc), +- atomic_read(&fscache_n_object_no_alloc), +- atomic_read(&fscache_n_object_avail), +- atomic_read(&fscache_n_object_dead)); ++ atomic_read_unchecked(&fscache_n_object_alloc), ++ atomic_read_unchecked(&fscache_n_object_no_alloc), ++ atomic_read_unchecked(&fscache_n_object_avail), ++ atomic_read_unchecked(&fscache_n_object_dead)); + seq_printf(m, "ChkAux : non=%u ok=%u upd=%u obs=%u\n", +- atomic_read(&fscache_n_checkaux_none), +- atomic_read(&fscache_n_checkaux_okay), +- atomic_read(&fscache_n_checkaux_update), +- atomic_read(&fscache_n_checkaux_obsolete)); ++ atomic_read_unchecked(&fscache_n_checkaux_none), ++ atomic_read_unchecked(&fscache_n_checkaux_okay), ++ atomic_read_unchecked(&fscache_n_checkaux_update), ++ atomic_read_unchecked(&fscache_n_checkaux_obsolete)); + + seq_printf(m, "Pages : mrk=%u unc=%u\n", +- atomic_read(&fscache_n_marks), +- atomic_read(&fscache_n_uncaches)); ++ atomic_read_unchecked(&fscache_n_marks), ++ atomic_read_unchecked(&fscache_n_uncaches)); + + seq_printf(m, "Acquire: n=%u nul=%u noc=%u ok=%u nbf=%u" + " oom=%u\n", +- atomic_read(&fscache_n_acquires), +- atomic_read(&fscache_n_acquires_null), +- atomic_read(&fscache_n_acquires_no_cache), +- atomic_read(&fscache_n_acquires_ok), +- atomic_read(&fscache_n_acquires_nobufs), +- atomic_read(&fscache_n_acquires_oom)); ++ atomic_read_unchecked(&fscache_n_acquires), ++ atomic_read_unchecked(&fscache_n_acquires_null), ++ atomic_read_unchecked(&fscache_n_acquires_no_cache), ++ atomic_read_unchecked(&fscache_n_acquires_ok), ++ atomic_read_unchecked(&fscache_n_acquires_nobufs), ++ atomic_read_unchecked(&fscache_n_acquires_oom)); + + seq_printf(m, "Lookups: n=%u neg=%u pos=%u crt=%u tmo=%u\n", +- atomic_read(&fscache_n_object_lookups), +- atomic_read(&fscache_n_object_lookups_negative), +- atomic_read(&fscache_n_object_lookups_positive), +- atomic_read(&fscache_n_object_created), +- atomic_read(&fscache_n_object_lookups_timed_out)); ++ atomic_read_unchecked(&fscache_n_object_lookups), ++ atomic_read_unchecked(&fscache_n_object_lookups_negative), ++ atomic_read_unchecked(&fscache_n_object_lookups_positive), ++ atomic_read_unchecked(&fscache_n_object_created), ++ atomic_read_unchecked(&fscache_n_object_lookups_timed_out)); + + seq_printf(m, "Invals : n=%u run=%u\n", +- atomic_read(&fscache_n_invalidates), +- atomic_read(&fscache_n_invalidates_run)); ++ atomic_read_unchecked(&fscache_n_invalidates), ++ atomic_read_unchecked(&fscache_n_invalidates_run)); + + seq_printf(m, "Updates: n=%u nul=%u run=%u\n", +- atomic_read(&fscache_n_updates), +- atomic_read(&fscache_n_updates_null), +- atomic_read(&fscache_n_updates_run)); ++ atomic_read_unchecked(&fscache_n_updates), ++ atomic_read_unchecked(&fscache_n_updates_null), ++ atomic_read_unchecked(&fscache_n_updates_run)); + + seq_printf(m, "Relinqs: n=%u nul=%u wcr=%u rtr=%u\n", +- atomic_read(&fscache_n_relinquishes), +- atomic_read(&fscache_n_relinquishes_null), +- atomic_read(&fscache_n_relinquishes_waitcrt), +- atomic_read(&fscache_n_relinquishes_retire)); ++ atomic_read_unchecked(&fscache_n_relinquishes), ++ atomic_read_unchecked(&fscache_n_relinquishes_null), ++ atomic_read_unchecked(&fscache_n_relinquishes_waitcrt), ++ atomic_read_unchecked(&fscache_n_relinquishes_retire)); + + seq_printf(m, "AttrChg: n=%u ok=%u nbf=%u oom=%u run=%u\n", +- atomic_read(&fscache_n_attr_changed), +- atomic_read(&fscache_n_attr_changed_ok), +- atomic_read(&fscache_n_attr_changed_nobufs), +- atomic_read(&fscache_n_attr_changed_nomem), +- atomic_read(&fscache_n_attr_changed_calls)); ++ atomic_read_unchecked(&fscache_n_attr_changed), ++ atomic_read_unchecked(&fscache_n_attr_changed_ok), ++ atomic_read_unchecked(&fscache_n_attr_changed_nobufs), ++ atomic_read_unchecked(&fscache_n_attr_changed_nomem), ++ atomic_read_unchecked(&fscache_n_attr_changed_calls)); + + seq_printf(m, "Allocs : n=%u ok=%u wt=%u nbf=%u int=%u\n", +- atomic_read(&fscache_n_allocs), +- atomic_read(&fscache_n_allocs_ok), +- atomic_read(&fscache_n_allocs_wait), +- atomic_read(&fscache_n_allocs_nobufs), +- atomic_read(&fscache_n_allocs_intr)); ++ atomic_read_unchecked(&fscache_n_allocs), ++ atomic_read_unchecked(&fscache_n_allocs_ok), ++ atomic_read_unchecked(&fscache_n_allocs_wait), ++ atomic_read_unchecked(&fscache_n_allocs_nobufs), ++ atomic_read_unchecked(&fscache_n_allocs_intr)); + seq_printf(m, "Allocs : ops=%u owt=%u abt=%u\n", +- atomic_read(&fscache_n_alloc_ops), +- atomic_read(&fscache_n_alloc_op_waits), +- atomic_read(&fscache_n_allocs_object_dead)); ++ atomic_read_unchecked(&fscache_n_alloc_ops), ++ atomic_read_unchecked(&fscache_n_alloc_op_waits), ++ atomic_read_unchecked(&fscache_n_allocs_object_dead)); + + seq_printf(m, "Retrvls: n=%u ok=%u wt=%u nod=%u nbf=%u" + " int=%u oom=%u\n", +- atomic_read(&fscache_n_retrievals), +- atomic_read(&fscache_n_retrievals_ok), +- atomic_read(&fscache_n_retrievals_wait), +- atomic_read(&fscache_n_retrievals_nodata), +- atomic_read(&fscache_n_retrievals_nobufs), +- atomic_read(&fscache_n_retrievals_intr), +- atomic_read(&fscache_n_retrievals_nomem)); ++ atomic_read_unchecked(&fscache_n_retrievals), ++ atomic_read_unchecked(&fscache_n_retrievals_ok), ++ atomic_read_unchecked(&fscache_n_retrievals_wait), ++ atomic_read_unchecked(&fscache_n_retrievals_nodata), ++ atomic_read_unchecked(&fscache_n_retrievals_nobufs), ++ atomic_read_unchecked(&fscache_n_retrievals_intr), ++ atomic_read_unchecked(&fscache_n_retrievals_nomem)); + seq_printf(m, "Retrvls: ops=%u owt=%u abt=%u\n", +- atomic_read(&fscache_n_retrieval_ops), +- atomic_read(&fscache_n_retrieval_op_waits), +- atomic_read(&fscache_n_retrievals_object_dead)); ++ atomic_read_unchecked(&fscache_n_retrieval_ops), ++ atomic_read_unchecked(&fscache_n_retrieval_op_waits), ++ atomic_read_unchecked(&fscache_n_retrievals_object_dead)); + + seq_printf(m, "Stores : n=%u ok=%u agn=%u nbf=%u oom=%u\n", +- atomic_read(&fscache_n_stores), +- atomic_read(&fscache_n_stores_ok), +- atomic_read(&fscache_n_stores_again), +- atomic_read(&fscache_n_stores_nobufs), +- atomic_read(&fscache_n_stores_oom)); ++ atomic_read_unchecked(&fscache_n_stores), ++ atomic_read_unchecked(&fscache_n_stores_ok), ++ atomic_read_unchecked(&fscache_n_stores_again), ++ atomic_read_unchecked(&fscache_n_stores_nobufs), ++ atomic_read_unchecked(&fscache_n_stores_oom)); + seq_printf(m, "Stores : ops=%u run=%u pgs=%u rxd=%u olm=%u\n", +- atomic_read(&fscache_n_store_ops), +- atomic_read(&fscache_n_store_calls), +- atomic_read(&fscache_n_store_pages), +- atomic_read(&fscache_n_store_radix_deletes), +- atomic_read(&fscache_n_store_pages_over_limit)); ++ atomic_read_unchecked(&fscache_n_store_ops), ++ atomic_read_unchecked(&fscache_n_store_calls), ++ atomic_read_unchecked(&fscache_n_store_pages), ++ atomic_read_unchecked(&fscache_n_store_radix_deletes), ++ atomic_read_unchecked(&fscache_n_store_pages_over_limit)); + + seq_printf(m, "VmScan : nos=%u gon=%u bsy=%u can=%u wt=%u\n", +- atomic_read(&fscache_n_store_vmscan_not_storing), +- atomic_read(&fscache_n_store_vmscan_gone), +- atomic_read(&fscache_n_store_vmscan_busy), +- atomic_read(&fscache_n_store_vmscan_cancelled), +- atomic_read(&fscache_n_store_vmscan_wait)); ++ atomic_read_unchecked(&fscache_n_store_vmscan_not_storing), ++ atomic_read_unchecked(&fscache_n_store_vmscan_gone), ++ atomic_read_unchecked(&fscache_n_store_vmscan_busy), ++ atomic_read_unchecked(&fscache_n_store_vmscan_cancelled), ++ atomic_read_unchecked(&fscache_n_store_vmscan_wait)); + + seq_printf(m, "Ops : pend=%u run=%u enq=%u can=%u rej=%u\n", +- atomic_read(&fscache_n_op_pend), +- atomic_read(&fscache_n_op_run), +- atomic_read(&fscache_n_op_enqueue), +- atomic_read(&fscache_n_op_cancelled), +- atomic_read(&fscache_n_op_rejected)); ++ atomic_read_unchecked(&fscache_n_op_pend), ++ atomic_read_unchecked(&fscache_n_op_run), ++ atomic_read_unchecked(&fscache_n_op_enqueue), ++ atomic_read_unchecked(&fscache_n_op_cancelled), ++ atomic_read_unchecked(&fscache_n_op_rejected)); + seq_printf(m, "Ops : dfr=%u rel=%u gc=%u\n", +- atomic_read(&fscache_n_op_deferred_release), +- atomic_read(&fscache_n_op_release), +- atomic_read(&fscache_n_op_gc)); ++ atomic_read_unchecked(&fscache_n_op_deferred_release), ++ atomic_read_unchecked(&fscache_n_op_release), ++ atomic_read_unchecked(&fscache_n_op_gc)); + + seq_printf(m, "CacheOp: alo=%d luo=%d luc=%d gro=%d\n", + atomic_read(&fscache_n_cop_alloc_object), +diff --git a/fs/fuse/cuse.c b/fs/fuse/cuse.c +index b96a49b..9bfdc47 100644 +--- a/fs/fuse/cuse.c ++++ b/fs/fuse/cuse.c +@@ -606,10 +606,12 @@ static int __init cuse_init(void) + INIT_LIST_HEAD(&cuse_conntbl[i]); + + /* inherit and extend fuse_dev_operations */ +- cuse_channel_fops = fuse_dev_operations; +- cuse_channel_fops.owner = THIS_MODULE; +- cuse_channel_fops.open = cuse_channel_open; +- cuse_channel_fops.release = cuse_channel_release; ++ pax_open_kernel(); ++ memcpy((void *)&cuse_channel_fops, &fuse_dev_operations, sizeof(fuse_dev_operations)); ++ *(void **)&cuse_channel_fops.owner = THIS_MODULE; ++ *(void **)&cuse_channel_fops.open = cuse_channel_open; ++ *(void **)&cuse_channel_fops.release = cuse_channel_release; ++ pax_close_kernel(); + + cuse_class = class_create(THIS_MODULE, "cuse"); + if (IS_ERR(cuse_class)) +diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c +index 0a648bb..8d463f1 100644 +--- a/fs/fuse/dev.c ++++ b/fs/fuse/dev.c +@@ -1323,7 +1323,7 @@ static ssize_t fuse_dev_splice_read(struct file *in, loff_t *ppos, + ret = 0; + pipe_lock(pipe); + +- if (!pipe->readers) { ++ if (!atomic_read(&pipe->readers)) { + send_sig(SIGPIPE, current, 0); + if (!ret) + ret = -EPIPE; +@@ -1352,7 +1352,7 @@ static ssize_t fuse_dev_splice_read(struct file *in, loff_t *ppos, + page_nr++; + ret += buf->len; + +- if (pipe->files) ++ if (atomic_read(&pipe->files)) + do_wakeup = 1; + } + +diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c +index 342f0239..d67794c 100644 +--- a/fs/fuse/dir.c ++++ b/fs/fuse/dir.c +@@ -1419,7 +1419,7 @@ static char *read_link(struct dentry *dentry) + return link; + } + +-static void free_link(char *link) ++static void free_link(const char *link) + { + if (!IS_ERR(link)) + free_page((unsigned long) link); +diff --git a/fs/hostfs/hostfs_kern.c b/fs/hostfs/hostfs_kern.c +index fe649d3..c679164 100644 +--- a/fs/hostfs/hostfs_kern.c ++++ b/fs/hostfs/hostfs_kern.c +@@ -898,7 +898,7 @@ static void *hostfs_follow_link(struct dentry *dentry, struct nameidata *nd) + + static void hostfs_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie) + { +- char *s = nd_get_link(nd); ++ const char *s = nd_get_link(nd); + if (!IS_ERR(s)) + __putname(s); + } +diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c +index d19b30a..ef89c36 100644 +--- a/fs/hugetlbfs/inode.c ++++ b/fs/hugetlbfs/inode.c +@@ -152,6 +152,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr, + struct mm_struct *mm = current->mm; + struct vm_area_struct *vma; + struct hstate *h = hstate_file(file); ++ unsigned long offset = gr_rand_threadstack_offset(mm, file, flags); + struct vm_unmapped_area_info info; + + if (len & ~huge_page_mask(h)) +@@ -165,17 +166,26 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr, + return addr; + } + ++#ifdef CONFIG_PAX_RANDMMAP ++ if (!(mm->pax_flags & MF_PAX_RANDMMAP)) ++#endif ++ + if (addr) { + addr = ALIGN(addr, huge_page_size(h)); + vma = find_vma(mm, addr); +- if (TASK_SIZE - len >= addr && +- (!vma || addr + len <= vma->vm_start)) ++ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset)) + return addr; + } + + info.flags = 0; + info.length = len; + info.low_limit = TASK_UNMAPPED_BASE; ++ ++#ifdef CONFIG_PAX_RANDMMAP ++ if (mm->pax_flags & MF_PAX_RANDMMAP) ++ info.low_limit += mm->delta_mmap; ++#endif ++ + info.high_limit = TASK_SIZE; + info.align_mask = PAGE_MASK & ~huge_page_mask(h); + info.align_offset = 0; +@@ -908,7 +918,7 @@ static struct file_system_type hugetlbfs_fs_type = { + }; + MODULE_ALIAS_FS("hugetlbfs"); + +-static struct vfsmount *hugetlbfs_vfsmount[HUGE_MAX_HSTATE]; ++struct vfsmount *hugetlbfs_vfsmount[HUGE_MAX_HSTATE]; + + static int can_do_hugetlb_shm(void) + { +diff --git a/fs/inode.c b/fs/inode.c +index e846a32..bb06bd0 100644 +--- a/fs/inode.c ++++ b/fs/inode.c +@@ -839,16 +839,20 @@ unsigned int get_next_ino(void) + unsigned int *p = &get_cpu_var(last_ino); + unsigned int res = *p; + ++start: ++ + #ifdef CONFIG_SMP + if (unlikely((res & (LAST_INO_BATCH-1)) == 0)) { +- static atomic_t shared_last_ino; +- int next = atomic_add_return(LAST_INO_BATCH, &shared_last_ino); ++ static atomic_unchecked_t shared_last_ino; ++ int next = atomic_add_return_unchecked(LAST_INO_BATCH, &shared_last_ino); + + res = next - LAST_INO_BATCH; + } + #endif + +- *p = ++res; ++ if (unlikely(!++res)) ++ goto start; /* never zero */ ++ *p = res; + put_cpu_var(last_ino); + return res; + } +diff --git a/fs/jffs2/erase.c b/fs/jffs2/erase.c +index 4a6cf28..d3a29d3 100644 +--- a/fs/jffs2/erase.c ++++ b/fs/jffs2/erase.c +@@ -452,7 +452,8 @@ static void jffs2_mark_erased_block(struct jffs2_sb_info *c, struct jffs2_eraseb + struct jffs2_unknown_node marker = { + .magic = cpu_to_je16(JFFS2_MAGIC_BITMASK), + .nodetype = cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER), +- .totlen = cpu_to_je32(c->cleanmarker_size) ++ .totlen = cpu_to_je32(c->cleanmarker_size), ++ .hdr_crc = cpu_to_je32(0) + }; + + jffs2_prealloc_raw_node_refs(c, jeb, 1); +diff --git a/fs/jffs2/wbuf.c b/fs/jffs2/wbuf.c +index a6597d6..41b30ec 100644 +--- a/fs/jffs2/wbuf.c ++++ b/fs/jffs2/wbuf.c +@@ -1023,7 +1023,8 @@ static const struct jffs2_unknown_node oob_cleanmarker = + { + .magic = constant_cpu_to_je16(JFFS2_MAGIC_BITMASK), + .nodetype = constant_cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER), +- .totlen = constant_cpu_to_je32(8) ++ .totlen = constant_cpu_to_je32(8), ++ .hdr_crc = constant_cpu_to_je32(0) + }; + + /* +diff --git a/fs/jfs/super.c b/fs/jfs/super.c +index e2b7483..855bca3 100644 +--- a/fs/jfs/super.c ++++ b/fs/jfs/super.c +@@ -884,7 +884,7 @@ static int __init init_jfs_fs(void) + + jfs_inode_cachep = + kmem_cache_create("jfs_ip", sizeof(struct jfs_inode_info), 0, +- SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD, ++ SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD|SLAB_USERCOPY, + init_once); + if (jfs_inode_cachep == NULL) + return -ENOMEM; +diff --git a/fs/kernfs/dir.c b/fs/kernfs/dir.c +index 39c0143..d54fad4 100644 +--- a/fs/kernfs/dir.c ++++ b/fs/kernfs/dir.c +@@ -28,7 +28,7 @@ DEFINE_MUTEX(kernfs_mutex); + * + * Returns 31 bit hash of ns + name (so it fits in an off_t ) + */ +-static unsigned int kernfs_name_hash(const char *name, const void *ns) ++static unsigned int kernfs_name_hash(const unsigned char *name, const void *ns) + { + unsigned long hash = init_name_hash(); + unsigned int len = strlen(name); +diff --git a/fs/kernfs/file.c b/fs/kernfs/file.c +index d29640b..32d2b6b 100644 +--- a/fs/kernfs/file.c ++++ b/fs/kernfs/file.c +@@ -33,7 +33,7 @@ static DEFINE_MUTEX(kernfs_open_file_mutex); + + struct kernfs_open_node { + atomic_t refcnt; +- atomic_t event; ++ atomic_unchecked_t event; + wait_queue_head_t poll; + struct list_head files; /* goes through kernfs_open_file.list */ + }; +@@ -149,7 +149,7 @@ static int kernfs_seq_show(struct seq_file *sf, void *v) + { + struct kernfs_open_file *of = sf->private; + +- of->event = atomic_read(&of->kn->attr.open->event); ++ of->event = atomic_read_unchecked(&of->kn->attr.open->event); + + return of->kn->attr.ops->seq_show(sf, v); + } +@@ -353,12 +353,12 @@ static int kernfs_vma_page_mkwrite(struct vm_area_struct *vma, + return ret; + } + +-static int kernfs_vma_access(struct vm_area_struct *vma, unsigned long addr, +- void *buf, int len, int write) ++static ssize_t kernfs_vma_access(struct vm_area_struct *vma, unsigned long addr, ++ void *buf, size_t len, int write) + { + struct file *file = vma->vm_file; + struct kernfs_open_file *of = kernfs_of(file); +- int ret; ++ ssize_t ret; + + if (!of->vm_ops) + return -EINVAL; +@@ -559,7 +559,7 @@ static int kernfs_get_open_node(struct kernfs_node *kn, + return -ENOMEM; + + atomic_set(&new_on->refcnt, 0); +- atomic_set(&new_on->event, 1); ++ atomic_set_unchecked(&new_on->event, 1); + init_waitqueue_head(&new_on->poll); + INIT_LIST_HEAD(&new_on->files); + goto retry; +@@ -756,7 +756,7 @@ static unsigned int kernfs_fop_poll(struct file *filp, poll_table *wait) + + kernfs_put_active(kn); + +- if (of->event != atomic_read(&on->event)) ++ if (of->event != atomic_read_unchecked(&on->event)) + goto trigger; + + return DEFAULT_POLLMASK; +@@ -781,7 +781,7 @@ void kernfs_notify(struct kernfs_node *kn) + if (!WARN_ON(kernfs_type(kn) != KERNFS_FILE)) { + on = kn->attr.open; + if (on) { +- atomic_inc(&on->event); ++ atomic_inc_unchecked(&on->event); + wake_up_interruptible(&on->poll); + } + } +diff --git a/fs/kernfs/symlink.c b/fs/kernfs/symlink.c +index 4d45705..b35e0bd 100644 +--- a/fs/kernfs/symlink.c ++++ b/fs/kernfs/symlink.c +@@ -132,7 +132,7 @@ static void *kernfs_iop_follow_link(struct dentry *dentry, struct nameidata *nd) + static void kernfs_iop_put_link(struct dentry *dentry, struct nameidata *nd, + void *cookie) + { +- char *page = nd_get_link(nd); ++ const char *page = nd_get_link(nd); + if (!IS_ERR(page)) + free_page((unsigned long)page); + } +diff --git a/fs/libfs.c b/fs/libfs.c +index a184424..944ddce 100644 +--- a/fs/libfs.c ++++ b/fs/libfs.c +@@ -159,6 +159,9 @@ int dcache_readdir(struct file *file, struct dir_context *ctx) + + for (p = q->next; p != &dentry->d_subdirs; p = p->next) { + struct dentry *next = list_entry(p, struct dentry, d_u.d_child); ++ char d_name[sizeof(next->d_iname)]; ++ const unsigned char *name; ++ + spin_lock_nested(&next->d_lock, DENTRY_D_LOCK_NESTED); + if (!simple_positive(next)) { + spin_unlock(&next->d_lock); +@@ -167,7 +170,12 @@ int dcache_readdir(struct file *file, struct dir_context *ctx) + + spin_unlock(&next->d_lock); + spin_unlock(&dentry->d_lock); +- if (!dir_emit(ctx, next->d_name.name, next->d_name.len, ++ name = next->d_name.name; ++ if (name == next->d_iname) { ++ memcpy(d_name, name, next->d_name.len); ++ name = d_name; ++ } ++ if (!dir_emit(ctx, name, next->d_name.len, + next->d_inode->i_ino, dt_type(next->d_inode))) + return 0; + spin_lock(&dentry->d_lock); +@@ -999,7 +1007,7 @@ EXPORT_SYMBOL(noop_fsync); + void kfree_put_link(struct dentry *dentry, struct nameidata *nd, + void *cookie) + { +- char *s = nd_get_link(nd); ++ const char *s = nd_get_link(nd); + if (!IS_ERR(s)) + kfree(s); + } +diff --git a/fs/lockd/clntproc.c b/fs/lockd/clntproc.c +index acd3947..1f896e2 100644 +--- a/fs/lockd/clntproc.c ++++ b/fs/lockd/clntproc.c +@@ -36,11 +36,11 @@ static const struct rpc_call_ops nlmclnt_cancel_ops; + /* + * Cookie counter for NLM requests + */ +-static atomic_t nlm_cookie = ATOMIC_INIT(0x1234); ++static atomic_unchecked_t nlm_cookie = ATOMIC_INIT(0x1234); + + void nlmclnt_next_cookie(struct nlm_cookie *c) + { +- u32 cookie = atomic_inc_return(&nlm_cookie); ++ u32 cookie = atomic_inc_return_unchecked(&nlm_cookie); + + memcpy(c->data, &cookie, 4); + c->len=4; +diff --git a/fs/locks.c b/fs/locks.c +index 4dd39b9..12d6aaf 100644 +--- a/fs/locks.c ++++ b/fs/locks.c +@@ -2218,16 +2218,16 @@ void locks_remove_flock(struct file *filp) + return; + + if (filp->f_op->flock) { +- struct file_lock fl = { ++ struct file_lock flock = { + .fl_pid = current->tgid, + .fl_file = filp, + .fl_flags = FL_FLOCK, + .fl_type = F_UNLCK, + .fl_end = OFFSET_MAX, + }; +- filp->f_op->flock(filp, F_SETLKW, &fl); +- if (fl.fl_ops && fl.fl_ops->fl_release_private) +- fl.fl_ops->fl_release_private(&fl); ++ filp->f_op->flock(filp, F_SETLKW, &flock); ++ if (flock.fl_ops && flock.fl_ops->fl_release_private) ++ flock.fl_ops->fl_release_private(&flock); + } + + spin_lock(&inode->i_lock); +diff --git a/fs/mount.h b/fs/mount.h +index b29e42f..5ea7fdf 100644 +--- a/fs/mount.h ++++ b/fs/mount.h +@@ -11,7 +11,7 @@ struct mnt_namespace { + u64 seq; /* Sequence number to prevent loops */ + wait_queue_head_t poll; + int event; +-}; ++} __randomize_layout; + + struct mnt_pcp { + int mnt_count; +@@ -57,7 +57,7 @@ struct mount { + int mnt_expiry_mark; /* true if marked for expiry */ + int mnt_pinned; + struct path mnt_ex_mountpoint; +-}; ++} __randomize_layout; + + #define MNT_NS_INTERNAL ERR_PTR(-EINVAL) /* distinct from any mnt_namespace */ + +diff --git a/fs/namei.c b/fs/namei.c +index bdea109..e242796 100644 +--- a/fs/namei.c ++++ b/fs/namei.c +@@ -330,17 +330,34 @@ int generic_permission(struct inode *inode, int mask) + if (ret != -EACCES) + return ret; + ++#ifdef CONFIG_GRKERNSEC ++ /* we'll block if we have to log due to a denied capability use */ ++ if (mask & MAY_NOT_BLOCK) ++ return -ECHILD; ++#endif ++ + if (S_ISDIR(inode->i_mode)) { + /* DACs are overridable for directories */ +- if (capable_wrt_inode_uidgid(inode, CAP_DAC_OVERRIDE)) +- return 0; + if (!(mask & MAY_WRITE)) +- if (capable_wrt_inode_uidgid(inode, ++ if (capable_wrt_inode_uidgid_nolog(inode, ++ CAP_DAC_OVERRIDE) || ++ capable_wrt_inode_uidgid(inode, + CAP_DAC_READ_SEARCH)) + return 0; ++ if (capable_wrt_inode_uidgid(inode, CAP_DAC_OVERRIDE)) ++ return 0; + return -EACCES; + } + /* ++ * Searching includes executable on directories, else just read. ++ */ ++ mask &= MAY_READ | MAY_WRITE | MAY_EXEC; ++ if (mask == MAY_READ) ++ if (capable_wrt_inode_uidgid_nolog(inode, CAP_DAC_OVERRIDE) || ++ capable_wrt_inode_uidgid(inode, CAP_DAC_READ_SEARCH)) ++ return 0; ++ ++ /* + * Read/write DACs are always overridable. + * Executable DACs are overridable when there is + * at least one exec bit set. +@@ -349,14 +366,6 @@ int generic_permission(struct inode *inode, int mask) + if (capable_wrt_inode_uidgid(inode, CAP_DAC_OVERRIDE)) + return 0; + +- /* +- * Searching includes executable on directories, else just read. +- */ +- mask &= MAY_READ | MAY_WRITE | MAY_EXEC; +- if (mask == MAY_READ) +- if (capable_wrt_inode_uidgid(inode, CAP_DAC_READ_SEARCH)) +- return 0; +- + return -EACCES; + } + +@@ -822,7 +831,7 @@ follow_link(struct path *link, struct nameidata *nd, void **p) + { + struct dentry *dentry = link->dentry; + int error; +- char *s; ++ const char *s; + + BUG_ON(nd->flags & LOOKUP_RCU); + +@@ -843,6 +852,12 @@ follow_link(struct path *link, struct nameidata *nd, void **p) + if (error) + goto out_put_nd_path; + ++ if (gr_handle_follow_link(dentry->d_parent->d_inode, ++ dentry->d_inode, dentry, nd->path.mnt)) { ++ error = -EACCES; ++ goto out_put_nd_path; ++ } ++ + nd->last_type = LAST_BIND; + *p = dentry->d_inode->i_op->follow_link(dentry, nd); + error = PTR_ERR(*p); +@@ -1591,6 +1606,8 @@ static inline int nested_symlink(struct path *path, struct nameidata *nd) + if (res) + break; + res = walk_component(nd, path, LOOKUP_FOLLOW); ++ if (res >= 0 && gr_handle_symlink_owner(&link, nd->inode)) ++ res = -EACCES; + put_link(nd, &link, cookie); + } while (res > 0); + +@@ -1664,7 +1681,7 @@ EXPORT_SYMBOL(full_name_hash); + static inline unsigned long hash_name(const char *name, unsigned int *hashp) + { + unsigned long a, b, adata, bdata, mask, hash, len; +- const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS; ++ static const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS; + + hash = a = 0; + len = -sizeof(unsigned long); +@@ -1948,6 +1965,8 @@ static int path_lookupat(int dfd, const char *name, + if (err) + break; + err = lookup_last(nd, &path); ++ if (!err && gr_handle_symlink_owner(&link, nd->inode)) ++ err = -EACCES; + put_link(nd, &link, cookie); + } + } +@@ -1955,6 +1974,13 @@ static int path_lookupat(int dfd, const char *name, + if (!err) + err = complete_walk(nd); + ++ if (!err && !(nd->flags & LOOKUP_PARENT)) { ++ if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) { ++ path_put(&nd->path); ++ err = -ENOENT; ++ } ++ } ++ + if (!err && nd->flags & LOOKUP_DIRECTORY) { + if (!d_is_directory(nd->path.dentry)) { + path_put(&nd->path); +@@ -1982,8 +2008,15 @@ static int filename_lookup(int dfd, struct filename *name, + retval = path_lookupat(dfd, name->name, + flags | LOOKUP_REVAL, nd); + +- if (likely(!retval)) ++ if (likely(!retval)) { + audit_inode(name, nd->path.dentry, flags & LOOKUP_PARENT); ++ if (name->name[0] != '/' && nd->path.dentry && nd->inode) { ++ if (!gr_chroot_fchdir(nd->path.dentry, nd->path.mnt)) { ++ path_put(&nd->path); ++ return -ENOENT; ++ } ++ } ++ } + return retval; + } + +@@ -2558,6 +2591,13 @@ static int may_open(struct path *path, int acc_mode, int flag) + if (flag & O_NOATIME && !inode_owner_or_capable(inode)) + return -EPERM; + ++ if (gr_handle_rofs_blockwrite(dentry, path->mnt, acc_mode)) ++ return -EPERM; ++ if (gr_handle_rawio(inode)) ++ return -EPERM; ++ if (!gr_acl_handle_open(dentry, path->mnt, acc_mode)) ++ return -EACCES; ++ + return 0; + } + +@@ -2789,7 +2829,7 @@ looked_up: + * cleared otherwise prior to returning. + */ + static int lookup_open(struct nameidata *nd, struct path *path, +- struct file *file, ++ struct path *link, struct file *file, + const struct open_flags *op, + bool got_write, int *opened) + { +@@ -2824,6 +2864,17 @@ static int lookup_open(struct nameidata *nd, struct path *path, + /* Negative dentry, just create the file */ + if (!dentry->d_inode && (op->open_flag & O_CREAT)) { + umode_t mode = op->mode; ++ ++ if (link && gr_handle_symlink_owner(link, dir->d_inode)) { ++ error = -EACCES; ++ goto out_dput; ++ } ++ ++ if (!gr_acl_handle_creat(dentry, dir, nd->path.mnt, op->open_flag, op->acc_mode, mode)) { ++ error = -EACCES; ++ goto out_dput; ++ } ++ + if (!IS_POSIXACL(dir->d_inode)) + mode &= ~current_umask(); + /* +@@ -2845,6 +2896,8 @@ static int lookup_open(struct nameidata *nd, struct path *path, + nd->flags & LOOKUP_EXCL); + if (error) + goto out_dput; ++ else ++ gr_handle_create(dentry, nd->path.mnt); + } + out_no_open: + path->dentry = dentry; +@@ -2859,7 +2912,7 @@ out_dput: + /* + * Handle the last step of open() + */ +-static int do_last(struct nameidata *nd, struct path *path, ++static int do_last(struct nameidata *nd, struct path *path, struct path *link, + struct file *file, const struct open_flags *op, + int *opened, struct filename *name) + { +@@ -2909,6 +2962,15 @@ static int do_last(struct nameidata *nd, struct path *path, + if (error) + return error; + ++ if (!gr_acl_handle_hidden_file(dir, nd->path.mnt)) { ++ error = -ENOENT; ++ goto out; ++ } ++ if (link && gr_handle_symlink_owner(link, nd->inode)) { ++ error = -EACCES; ++ goto out; ++ } ++ + audit_inode(name, dir, LOOKUP_PARENT); + error = -EISDIR; + /* trailing slashes? */ +@@ -2928,7 +2990,7 @@ retry_lookup: + */ + } + mutex_lock(&dir->d_inode->i_mutex); +- error = lookup_open(nd, path, file, op, got_write, opened); ++ error = lookup_open(nd, path, link, file, op, got_write, opened); + mutex_unlock(&dir->d_inode->i_mutex); + + if (error <= 0) { +@@ -2952,11 +3014,28 @@ retry_lookup: + goto finish_open_created; + } + ++ if (!gr_acl_handle_hidden_file(path->dentry, nd->path.mnt)) { ++ error = -ENOENT; ++ goto exit_dput; ++ } ++ if (link && gr_handle_symlink_owner(link, path->dentry->d_inode)) { ++ error = -EACCES; ++ goto exit_dput; ++ } ++ + /* + * create/update audit record if it already exists. + */ +- if (d_is_positive(path->dentry)) ++ if (d_is_positive(path->dentry)) { ++ /* only check if O_CREAT is specified, all other checks need to go ++ into may_open */ ++ if (gr_handle_fifo(path->dentry, path->mnt, dir, open_flag, acc_mode)) { ++ error = -EACCES; ++ goto exit_dput; ++ } ++ + audit_inode(name, path->dentry, 0); ++ } + + /* + * If atomic_open() acquired write access it is dropped now due to +@@ -2997,6 +3076,11 @@ finish_lookup: + } + } + BUG_ON(inode != path->dentry->d_inode); ++ /* if we're resolving a symlink to another symlink */ ++ if (link && gr_handle_symlink_owner(link, inode)) { ++ error = -EACCES; ++ goto out; ++ } + return 1; + } + +@@ -3006,7 +3090,6 @@ finish_lookup: + save_parent.dentry = nd->path.dentry; + save_parent.mnt = mntget(path->mnt); + nd->path.dentry = path->dentry; +- + } + nd->inode = inode; + /* Why this, you ask? _Now_ we might have grown LOOKUP_JUMPED... */ +@@ -3016,7 +3099,18 @@ finish_open: + path_put(&save_parent); + return error; + } ++ ++ if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) { ++ error = -ENOENT; ++ goto out; ++ } ++ if (link && gr_handle_symlink_owner(link, nd->inode)) { ++ error = -EACCES; ++ goto out; ++ } ++ + audit_inode(name, nd->path.dentry, 0); ++ + error = -EISDIR; + if ((open_flag & O_CREAT) && + (d_is_directory(nd->path.dentry) || d_is_autodir(nd->path.dentry))) +@@ -3180,7 +3274,7 @@ static struct file *path_openat(int dfd, struct filename *pathname, + if (unlikely(error)) + goto out; + +- error = do_last(nd, &path, file, op, &opened, pathname); ++ error = do_last(nd, &path, NULL, file, op, &opened, pathname); + while (unlikely(error > 0)) { /* trailing symlink */ + struct path link = path; + void *cookie; +@@ -3198,7 +3292,7 @@ static struct file *path_openat(int dfd, struct filename *pathname, + error = follow_link(&link, nd, &cookie); + if (unlikely(error)) + break; +- error = do_last(nd, &path, file, op, &opened, pathname); ++ error = do_last(nd, &path, &link, file, op, &opened, pathname); + put_link(nd, &link, cookie); + } + out: +@@ -3298,9 +3392,11 @@ struct dentry *kern_path_create(int dfd, const char *pathname, + goto unlock; + + error = -EEXIST; +- if (d_is_positive(dentry)) ++ if (d_is_positive(dentry)) { ++ if (!gr_acl_handle_hidden_file(dentry, nd.path.mnt)) ++ error = -ENOENT; + goto fail; +- ++ } + /* + * Special case - lookup gave negative, but... we had foo/bar/ + * From the vfs_mknod() POV we just have a negative dentry - +@@ -3352,6 +3448,20 @@ struct dentry *user_path_create(int dfd, const char __user *pathname, + } + EXPORT_SYMBOL(user_path_create); + ++static struct dentry *user_path_create_with_name(int dfd, const char __user *pathname, struct path *path, struct filename **to, unsigned int lookup_flags) ++{ ++ struct filename *tmp = getname(pathname); ++ struct dentry *res; ++ if (IS_ERR(tmp)) ++ return ERR_CAST(tmp); ++ res = kern_path_create(dfd, tmp->name, path, lookup_flags); ++ if (IS_ERR(res)) ++ putname(tmp); ++ else ++ *to = tmp; ++ return res; ++} ++ + int vfs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, dev_t dev) + { + int error = may_create(dir, dentry); +@@ -3414,6 +3524,17 @@ retry: + + if (!IS_POSIXACL(path.dentry->d_inode)) + mode &= ~current_umask(); ++ ++ if (gr_handle_chroot_mknod(dentry, path.mnt, mode)) { ++ error = -EPERM; ++ goto out; ++ } ++ ++ if (!gr_acl_handle_mknod(dentry, path.dentry, path.mnt, mode)) { ++ error = -EACCES; ++ goto out; ++ } ++ + error = security_path_mknod(&path, dentry, mode, dev); + if (error) + goto out; +@@ -3430,6 +3551,8 @@ retry: + break; + } + out: ++ if (!error) ++ gr_handle_create(dentry, path.mnt); + done_path_create(&path, dentry); + if (retry_estale(error, lookup_flags)) { + lookup_flags |= LOOKUP_REVAL; +@@ -3482,9 +3605,16 @@ retry: + + if (!IS_POSIXACL(path.dentry->d_inode)) + mode &= ~current_umask(); ++ if (!gr_acl_handle_mkdir(dentry, path.dentry, path.mnt)) { ++ error = -EACCES; ++ goto out; ++ } + error = security_path_mkdir(&path, dentry, mode); + if (!error) + error = vfs_mkdir(path.dentry->d_inode, dentry, mode); ++ if (!error) ++ gr_handle_create(dentry, path.mnt); ++out: + done_path_create(&path, dentry); + if (retry_estale(error, lookup_flags)) { + lookup_flags |= LOOKUP_REVAL; +@@ -3565,6 +3695,8 @@ static long do_rmdir(int dfd, const char __user *pathname) + struct filename *name; + struct dentry *dentry; + struct nameidata nd; ++ ino_t saved_ino = 0; ++ dev_t saved_dev = 0; + unsigned int lookup_flags = 0; + retry: + name = user_path_parent(dfd, pathname, &nd, lookup_flags); +@@ -3597,10 +3729,21 @@ retry: + error = -ENOENT; + goto exit3; + } ++ ++ saved_ino = dentry->d_inode->i_ino; ++ saved_dev = gr_get_dev_from_dentry(dentry); ++ ++ if (!gr_acl_handle_rmdir(dentry, nd.path.mnt)) { ++ error = -EACCES; ++ goto exit3; ++ } ++ + error = security_path_rmdir(&nd.path, dentry); + if (error) + goto exit3; + error = vfs_rmdir(nd.path.dentry->d_inode, dentry); ++ if (!error && (saved_dev || saved_ino)) ++ gr_handle_delete(saved_ino, saved_dev); + exit3: + dput(dentry); + exit2: +@@ -3690,6 +3833,8 @@ static long do_unlinkat(int dfd, const char __user *pathname) + struct nameidata nd; + struct inode *inode = NULL; + struct inode *delegated_inode = NULL; ++ ino_t saved_ino = 0; ++ dev_t saved_dev = 0; + unsigned int lookup_flags = 0; + retry: + name = user_path_parent(dfd, pathname, &nd, lookup_flags); +@@ -3716,10 +3861,22 @@ retry_deleg: + if (d_is_negative(dentry)) + goto slashes; + ihold(inode); ++ ++ if (inode->i_nlink <= 1) { ++ saved_ino = inode->i_ino; ++ saved_dev = gr_get_dev_from_dentry(dentry); ++ } ++ if (!gr_acl_handle_unlink(dentry, nd.path.mnt)) { ++ error = -EACCES; ++ goto exit2; ++ } ++ + error = security_path_unlink(&nd.path, dentry); + if (error) + goto exit2; + error = vfs_unlink(nd.path.dentry->d_inode, dentry, &delegated_inode); ++ if (!error && (saved_ino || saved_dev)) ++ gr_handle_delete(saved_ino, saved_dev); + exit2: + dput(dentry); + } +@@ -3807,9 +3964,17 @@ retry: + if (IS_ERR(dentry)) + goto out_putname; + ++ if (!gr_acl_handle_symlink(dentry, path.dentry, path.mnt, from)) { ++ error = -EACCES; ++ goto out; ++ } ++ + error = security_path_symlink(&path, dentry, from->name); + if (!error) + error = vfs_symlink(path.dentry->d_inode, dentry, from->name); ++ if (!error) ++ gr_handle_create(dentry, path.mnt); ++out: + done_path_create(&path, dentry); + if (retry_estale(error, lookup_flags)) { + lookup_flags |= LOOKUP_REVAL; +@@ -3912,6 +4077,7 @@ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname, + struct dentry *new_dentry; + struct path old_path, new_path; + struct inode *delegated_inode = NULL; ++ struct filename *to = NULL; + int how = 0; + int error; + +@@ -3935,7 +4101,7 @@ retry: + if (error) + return error; + +- new_dentry = user_path_create(newdfd, newname, &new_path, ++ new_dentry = user_path_create_with_name(newdfd, newname, &new_path, &to, + (how & LOOKUP_REVAL)); + error = PTR_ERR(new_dentry); + if (IS_ERR(new_dentry)) +@@ -3947,11 +4113,28 @@ retry: + error = may_linkat(&old_path); + if (unlikely(error)) + goto out_dput; ++ ++ if (gr_handle_hardlink(old_path.dentry, old_path.mnt, ++ old_path.dentry->d_inode, ++ old_path.dentry->d_inode->i_mode, to)) { ++ error = -EACCES; ++ goto out_dput; ++ } ++ ++ if (!gr_acl_handle_link(new_dentry, new_path.dentry, new_path.mnt, ++ old_path.dentry, old_path.mnt, to)) { ++ error = -EACCES; ++ goto out_dput; ++ } ++ + error = security_path_link(old_path.dentry, &new_path, new_dentry); + if (error) + goto out_dput; + error = vfs_link(old_path.dentry, new_path.dentry->d_inode, new_dentry, &delegated_inode); ++ if (!error) ++ gr_handle_create(new_dentry, new_path.mnt); + out_dput: ++ putname(to); + done_path_create(&new_path, new_dentry); + if (delegated_inode) { + error = break_deleg_wait(&delegated_inode); +@@ -4238,6 +4421,12 @@ retry_deleg: + if (new_dentry == trap) + goto exit5; + ++ error = gr_acl_handle_rename(new_dentry, new_dir, newnd.path.mnt, ++ old_dentry, old_dir->d_inode, oldnd.path.mnt, ++ to); ++ if (error) ++ goto exit5; ++ + error = security_path_rename(&oldnd.path, old_dentry, + &newnd.path, new_dentry); + if (error) +@@ -4245,6 +4434,9 @@ retry_deleg: + error = vfs_rename(old_dir->d_inode, old_dentry, + new_dir->d_inode, new_dentry, + &delegated_inode); ++ if (!error) ++ gr_handle_rename(old_dir->d_inode, new_dir->d_inode, old_dentry, ++ new_dentry, oldnd.path.mnt, new_dentry->d_inode ? 1 : 0); + exit5: + dput(new_dentry); + exit4: +@@ -4281,6 +4473,8 @@ SYSCALL_DEFINE2(rename, const char __user *, oldname, const char __user *, newna + + int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen, const char *link) + { ++ char tmpbuf[64]; ++ const char *newlink; + int len; + + len = PTR_ERR(link); +@@ -4290,7 +4484,14 @@ int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen, const c + len = strlen(link); + if (len > (unsigned) buflen) + len = buflen; +- if (copy_to_user(buffer, link, len)) ++ ++ if (len < sizeof(tmpbuf)) { ++ memcpy(tmpbuf, link, len); ++ newlink = tmpbuf; ++ } else ++ newlink = link; ++ ++ if (copy_to_user(buffer, newlink, len)) + len = -EFAULT; + out: + return len; +diff --git a/fs/namespace.c b/fs/namespace.c +index 65233a5..82ac953 100644 +--- a/fs/namespace.c ++++ b/fs/namespace.c +@@ -1339,6 +1339,9 @@ static int do_umount(struct mount *mnt, int flags) + if (!(sb->s_flags & MS_RDONLY)) + retval = do_remount_sb(sb, MS_RDONLY, NULL, 0); + up_write(&sb->s_umount); ++ ++ gr_log_remount(mnt->mnt_devname, retval); ++ + return retval; + } + +@@ -1361,6 +1364,9 @@ static int do_umount(struct mount *mnt, int flags) + } + unlock_mount_hash(); + namespace_unlock(); ++ ++ gr_log_unmount(mnt->mnt_devname, retval); ++ + return retval; + } + +@@ -1380,7 +1386,7 @@ static inline bool may_mount(void) + * unixes. Our API is identical to OSF/1 to avoid making a mess of AMD + */ + +-SYSCALL_DEFINE2(umount, char __user *, name, int, flags) ++SYSCALL_DEFINE2(umount, const char __user *, name, int, flags) + { + struct path path; + struct mount *mnt; +@@ -1422,7 +1428,7 @@ out: + /* + * The 2.0 compatible umount. No flags. + */ +-SYSCALL_DEFINE1(oldumount, char __user *, name) ++SYSCALL_DEFINE1(oldumount, const char __user *, name) + { + return sys_umount(name, 0); + } +@@ -2431,6 +2437,16 @@ long do_mount(const char *dev_name, const char *dir_name, + MS_NOATIME | MS_NODIRATIME | MS_RELATIME| MS_KERNMOUNT | + MS_STRICTATIME); + ++ if (gr_handle_rofs_mount(path.dentry, path.mnt, mnt_flags)) { ++ retval = -EPERM; ++ goto dput_out; ++ } ++ ++ if (gr_handle_chroot_mount(path.dentry, path.mnt, dev_name)) { ++ retval = -EPERM; ++ goto dput_out; ++ } ++ + if (flags & MS_REMOUNT) + retval = do_remount(&path, flags & ~MS_REMOUNT, mnt_flags, + data_page); +@@ -2445,6 +2461,9 @@ long do_mount(const char *dev_name, const char *dir_name, + dev_name, data_page); + dput_out: + path_put(&path); ++ ++ gr_log_mount(dev_name, dir_name, retval); ++ + return retval; + } + +@@ -2462,7 +2481,7 @@ static void free_mnt_ns(struct mnt_namespace *ns) + * number incrementing at 10Ghz will take 12,427 years to wrap which + * is effectively never, so we can ignore the possibility. + */ +-static atomic64_t mnt_ns_seq = ATOMIC64_INIT(1); ++static atomic64_unchecked_t mnt_ns_seq = ATOMIC64_INIT(1); + + static struct mnt_namespace *alloc_mnt_ns(struct user_namespace *user_ns) + { +@@ -2477,7 +2496,7 @@ static struct mnt_namespace *alloc_mnt_ns(struct user_namespace *user_ns) + kfree(new_ns); + return ERR_PTR(ret); + } +- new_ns->seq = atomic64_add_return(1, &mnt_ns_seq); ++ new_ns->seq = atomic64_inc_return_unchecked(&mnt_ns_seq); + atomic_set(&new_ns->count, 1); + new_ns->root = NULL; + INIT_LIST_HEAD(&new_ns->list); +@@ -2487,7 +2506,7 @@ static struct mnt_namespace *alloc_mnt_ns(struct user_namespace *user_ns) + return new_ns; + } + +-struct mnt_namespace *copy_mnt_ns(unsigned long flags, struct mnt_namespace *ns, ++__latent_entropy struct mnt_namespace *copy_mnt_ns(unsigned long flags, struct mnt_namespace *ns, + struct user_namespace *user_ns, struct fs_struct *new_fs) + { + struct mnt_namespace *new_ns; +@@ -2608,8 +2627,8 @@ struct dentry *mount_subtree(struct vfsmount *mnt, const char *name) + } + EXPORT_SYMBOL(mount_subtree); + +-SYSCALL_DEFINE5(mount, char __user *, dev_name, char __user *, dir_name, +- char __user *, type, unsigned long, flags, void __user *, data) ++SYSCALL_DEFINE5(mount, const char __user *, dev_name, const char __user *, dir_name, ++ const char __user *, type, unsigned long, flags, void __user *, data) + { + int ret; + char *kernel_type; +@@ -2722,6 +2741,11 @@ SYSCALL_DEFINE2(pivot_root, const char __user *, new_root, + if (error) + goto out2; + ++ if (gr_handle_chroot_pivot()) { ++ error = -EPERM; ++ goto out2; ++ } ++ + get_fs_root(current->fs, &root); + old_mp = lock_mount(&old); + error = PTR_ERR(old_mp); +@@ -2990,7 +3014,7 @@ static int mntns_install(struct nsproxy *nsproxy, void *ns) + !ns_capable(current_user_ns(), CAP_SYS_ADMIN)) + return -EPERM; + +- if (fs->users != 1) ++ if (atomic_read(&fs->users) != 1) + return -EINVAL; + + get_mnt_ns(mnt_ns); +diff --git a/fs/nfs/callback_xdr.c b/fs/nfs/callback_xdr.c +index f4ccfe6..a5cf064 100644 +--- a/fs/nfs/callback_xdr.c ++++ b/fs/nfs/callback_xdr.c +@@ -51,7 +51,7 @@ struct callback_op { + callback_decode_arg_t decode_args; + callback_encode_res_t encode_res; + long res_maxsize; +-}; ++} __do_const; + + static struct callback_op callback_ops[]; + +diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c +index 15f9d98..082c625 100644 +--- a/fs/nfs/inode.c ++++ b/fs/nfs/inode.c +@@ -1189,16 +1189,16 @@ static int nfs_size_need_update(const struct inode *inode, const struct nfs_fatt + return nfs_size_to_loff_t(fattr->size) > i_size_read(inode); + } + +-static atomic_long_t nfs_attr_generation_counter; ++static atomic_long_unchecked_t nfs_attr_generation_counter; + + static unsigned long nfs_read_attr_generation_counter(void) + { +- return atomic_long_read(&nfs_attr_generation_counter); ++ return atomic_long_read_unchecked(&nfs_attr_generation_counter); + } + + unsigned long nfs_inc_attr_generation_counter(void) + { +- return atomic_long_inc_return(&nfs_attr_generation_counter); ++ return atomic_long_inc_return_unchecked(&nfs_attr_generation_counter); + } + + void nfs_fattr_init(struct nfs_fattr *fattr) +diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c +index f23a6ca..730ddcc 100644 +--- a/fs/nfsd/nfs4proc.c ++++ b/fs/nfsd/nfs4proc.c +@@ -1169,7 +1169,7 @@ struct nfsd4_operation { + nfsd4op_rsize op_rsize_bop; + stateid_getter op_get_currentstateid; + stateid_setter op_set_currentstateid; +-}; ++} __do_const; + + static struct nfsd4_operation nfsd4_ops[]; + +diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c +index 8657335..cd3e37f 100644 +--- a/fs/nfsd/nfs4xdr.c ++++ b/fs/nfsd/nfs4xdr.c +@@ -1542,7 +1542,7 @@ nfsd4_decode_notsupp(struct nfsd4_compoundargs *argp, void *p) + + typedef __be32(*nfsd4_dec)(struct nfsd4_compoundargs *argp, void *); + +-static nfsd4_dec nfsd4_dec_ops[] = { ++static const nfsd4_dec nfsd4_dec_ops[] = { + [OP_ACCESS] = (nfsd4_dec)nfsd4_decode_access, + [OP_CLOSE] = (nfsd4_dec)nfsd4_decode_close, + [OP_COMMIT] = (nfsd4_dec)nfsd4_decode_commit, +diff --git a/fs/nfsd/nfscache.c b/fs/nfsd/nfscache.c +index f8f060f..d9a7258 100644 +--- a/fs/nfsd/nfscache.c ++++ b/fs/nfsd/nfscache.c +@@ -519,14 +519,17 @@ nfsd_cache_update(struct svc_rqst *rqstp, int cachetype, __be32 *statp) + { + struct svc_cacherep *rp = rqstp->rq_cacherep; + struct kvec *resv = &rqstp->rq_res.head[0], *cachv; +- int len; ++ long len; + size_t bufsize = 0; + + if (!rp) + return; + +- len = resv->iov_len - ((char*)statp - (char*)resv->iov_base); +- len >>= 2; ++ if (statp) { ++ len = (char*)statp - (char*)resv->iov_base; ++ len = resv->iov_len - len; ++ len >>= 2; ++ } + + /* Don't cache excessive amounts of data and XDR failures */ + if (!statp || len > (256 >> 2)) { +@@ -537,7 +540,7 @@ nfsd_cache_update(struct svc_rqst *rqstp, int cachetype, __be32 *statp) + switch (cachetype) { + case RC_REPLSTAT: + if (len != 1) +- printk("nfsd: RC_REPLSTAT/reply len %d!\n",len); ++ printk("nfsd: RC_REPLSTAT/reply len %ld!\n",len); + rp->c_replstat = *statp; + break; + case RC_REPLBUFF: +diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c +index eea5ad1..5a84ac7 100644 +--- a/fs/nfsd/vfs.c ++++ b/fs/nfsd/vfs.c +@@ -843,7 +843,7 @@ nfsd_vfs_read(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file, + } else { + oldfs = get_fs(); + set_fs(KERNEL_DS); +- host_err = vfs_readv(file, (struct iovec __user *)vec, vlen, &offset); ++ host_err = vfs_readv(file, (struct iovec __force_user *)vec, vlen, &offset); + set_fs(oldfs); + } + +@@ -934,7 +934,7 @@ nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file, + + /* Write the data. */ + oldfs = get_fs(); set_fs(KERNEL_DS); +- host_err = vfs_writev(file, (struct iovec __user *)vec, vlen, &pos); ++ host_err = vfs_writev(file, (struct iovec __force_user *)vec, vlen, &pos); + set_fs(oldfs); + if (host_err < 0) + goto out_nfserr; +@@ -1479,7 +1479,7 @@ nfsd_readlink(struct svc_rqst *rqstp, struct svc_fh *fhp, char *buf, int *lenp) + */ + + oldfs = get_fs(); set_fs(KERNEL_DS); +- host_err = inode->i_op->readlink(path.dentry, (char __user *)buf, *lenp); ++ host_err = inode->i_op->readlink(path.dentry, (char __force_user *)buf, *lenp); + set_fs(oldfs); + + if (host_err < 0) +diff --git a/fs/nls/nls_base.c b/fs/nls/nls_base.c +index 52ccd34..7a6b202 100644 +--- a/fs/nls/nls_base.c ++++ b/fs/nls/nls_base.c +@@ -234,21 +234,25 @@ EXPORT_SYMBOL(utf16s_to_utf8s); + + int __register_nls(struct nls_table *nls, struct module *owner) + { +- struct nls_table ** tmp = &tables; ++ struct nls_table *tmp = tables; + + if (nls->next) + return -EBUSY; + +- nls->owner = owner; ++ pax_open_kernel(); ++ *(void **)&nls->owner = owner; ++ pax_close_kernel(); + spin_lock(&nls_lock); +- while (*tmp) { +- if (nls == *tmp) { ++ while (tmp) { ++ if (nls == tmp) { + spin_unlock(&nls_lock); + return -EBUSY; + } +- tmp = &(*tmp)->next; ++ tmp = tmp->next; + } +- nls->next = tables; ++ pax_open_kernel(); ++ *(struct nls_table **)&nls->next = tables; ++ pax_close_kernel(); + tables = nls; + spin_unlock(&nls_lock); + return 0; +@@ -257,12 +261,14 @@ EXPORT_SYMBOL(__register_nls); + + int unregister_nls(struct nls_table * nls) + { +- struct nls_table ** tmp = &tables; ++ struct nls_table * const * tmp = &tables; + + spin_lock(&nls_lock); + while (*tmp) { + if (nls == *tmp) { +- *tmp = nls->next; ++ pax_open_kernel(); ++ *(struct nls_table **)tmp = nls->next; ++ pax_close_kernel(); + spin_unlock(&nls_lock); + return 0; + } +@@ -272,7 +278,7 @@ int unregister_nls(struct nls_table * nls) + return -EINVAL; + } + +-static struct nls_table *find_nls(char *charset) ++static struct nls_table *find_nls(const char *charset) + { + struct nls_table *nls; + spin_lock(&nls_lock); +@@ -288,7 +294,7 @@ static struct nls_table *find_nls(char *charset) + return nls; + } + +-struct nls_table *load_nls(char *charset) ++struct nls_table *load_nls(const char *charset) + { + return try_then_request_module(find_nls(charset), "nls_%s", charset); + } +diff --git a/fs/nls/nls_euc-jp.c b/fs/nls/nls_euc-jp.c +index 162b3f1..6076a7c 100644 +--- a/fs/nls/nls_euc-jp.c ++++ b/fs/nls/nls_euc-jp.c +@@ -560,8 +560,10 @@ static int __init init_nls_euc_jp(void) + p_nls = load_nls("cp932"); + + if (p_nls) { +- table.charset2upper = p_nls->charset2upper; +- table.charset2lower = p_nls->charset2lower; ++ pax_open_kernel(); ++ *(const unsigned char **)&table.charset2upper = p_nls->charset2upper; ++ *(const unsigned char **)&table.charset2lower = p_nls->charset2lower; ++ pax_close_kernel(); + return register_nls(&table); + } + +diff --git a/fs/nls/nls_koi8-ru.c b/fs/nls/nls_koi8-ru.c +index a80a741..7b96e1b 100644 +--- a/fs/nls/nls_koi8-ru.c ++++ b/fs/nls/nls_koi8-ru.c +@@ -62,8 +62,10 @@ static int __init init_nls_koi8_ru(void) + p_nls = load_nls("koi8-u"); + + if (p_nls) { +- table.charset2upper = p_nls->charset2upper; +- table.charset2lower = p_nls->charset2lower; ++ pax_open_kernel(); ++ *(const unsigned char **)&table.charset2upper = p_nls->charset2upper; ++ *(const unsigned char **)&table.charset2lower = p_nls->charset2lower; ++ pax_close_kernel(); + return register_nls(&table); + } + +diff --git a/fs/notify/fanotify/fanotify_user.c b/fs/notify/fanotify/fanotify_user.c +index 287a22c..4e56e4e 100644 +--- a/fs/notify/fanotify/fanotify_user.c ++++ b/fs/notify/fanotify/fanotify_user.c +@@ -251,8 +251,8 @@ static ssize_t copy_event_to_user(struct fsnotify_group *group, + + fd = fanotify_event_metadata.fd; + ret = -EFAULT; +- if (copy_to_user(buf, &fanotify_event_metadata, +- fanotify_event_metadata.event_len)) ++ if (fanotify_event_metadata.event_len > sizeof fanotify_event_metadata || ++ copy_to_user(buf, &fanotify_event_metadata, fanotify_event_metadata.event_len)) + goto out_close_fd; + + ret = prepare_for_access_response(group, event, fd); +@@ -742,6 +742,8 @@ SYSCALL_DEFINE2(fanotify_init, unsigned int, flags, unsigned int, event_f_flags) + oevent->path.mnt = NULL; + oevent->path.dentry = NULL; + ++ if (force_o_largefile()) ++ event_f_flags |= O_LARGEFILE; + group->fanotify_data.f_flags = event_f_flags; + #ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS + oevent->response = 0; +diff --git a/fs/notify/notification.c b/fs/notify/notification.c +index 1e58402..bb2d6f4 100644 +--- a/fs/notify/notification.c ++++ b/fs/notify/notification.c +@@ -48,7 +48,7 @@ + #include <linux/fsnotify_backend.h> + #include "fsnotify.h" + +-static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0); ++static atomic_unchecked_t fsnotify_sync_cookie = ATOMIC_INIT(0); + + /** + * fsnotify_get_cookie - return a unique cookie for use in synchronizing events. +@@ -56,7 +56,7 @@ static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0); + */ + u32 fsnotify_get_cookie(void) + { +- return atomic_inc_return(&fsnotify_sync_cookie); ++ return atomic_inc_return_unchecked(&fsnotify_sync_cookie); + } + EXPORT_SYMBOL_GPL(fsnotify_get_cookie); + +diff --git a/fs/ntfs/dir.c b/fs/ntfs/dir.c +index 9e38daf..5727cae 100644 +--- a/fs/ntfs/dir.c ++++ b/fs/ntfs/dir.c +@@ -1310,7 +1310,7 @@ find_next_index_buffer: + ia = (INDEX_ALLOCATION*)(kaddr + (ia_pos & ~PAGE_CACHE_MASK & + ~(s64)(ndir->itype.index.block_size - 1))); + /* Bounds checks. */ +- if (unlikely((u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) { ++ if (unlikely(!kaddr || (u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) { + ntfs_error(sb, "Out of bounds check failed. Corrupt directory " + "inode 0x%lx or driver bug.", vdir->i_ino); + goto err_out; +diff --git a/fs/ntfs/file.c b/fs/ntfs/file.c +index db9bd8a..8338fb6 100644 +--- a/fs/ntfs/file.c ++++ b/fs/ntfs/file.c +@@ -1282,7 +1282,7 @@ static inline size_t ntfs_copy_from_user(struct page **pages, + char *addr; + size_t total = 0; + unsigned len; +- int left; ++ unsigned left; + + do { + len = PAGE_CACHE_SIZE - ofs; +diff --git a/fs/ntfs/super.c b/fs/ntfs/super.c +index 82650d5..db37dcf 100644 +--- a/fs/ntfs/super.c ++++ b/fs/ntfs/super.c +@@ -685,7 +685,7 @@ static struct buffer_head *read_ntfs_boot_sector(struct super_block *sb, + if (!silent) + ntfs_error(sb, "Primary boot sector is invalid."); + } else if (!silent) +- ntfs_error(sb, read_err_str, "primary"); ++ ntfs_error(sb, read_err_str, "%s", "primary"); + if (!(NTFS_SB(sb)->on_errors & ON_ERRORS_RECOVER)) { + if (bh_primary) + brelse(bh_primary); +@@ -701,7 +701,7 @@ static struct buffer_head *read_ntfs_boot_sector(struct super_block *sb, + goto hotfix_primary_boot_sector; + brelse(bh_backup); + } else if (!silent) +- ntfs_error(sb, read_err_str, "backup"); ++ ntfs_error(sb, read_err_str, "%s", "backup"); + /* Try to read NT3.51- backup boot sector. */ + if ((bh_backup = sb_bread(sb, nr_blocks >> 1))) { + if (is_boot_sector_ntfs(sb, (NTFS_BOOT_SECTOR*) +@@ -712,7 +712,7 @@ static struct buffer_head *read_ntfs_boot_sector(struct super_block *sb, + "sector."); + brelse(bh_backup); + } else if (!silent) +- ntfs_error(sb, read_err_str, "backup"); ++ ntfs_error(sb, read_err_str, "%s", "backup"); + /* We failed. Cleanup and return. */ + if (bh_primary) + brelse(bh_primary); +diff --git a/fs/ocfs2/localalloc.c b/fs/ocfs2/localalloc.c +index 0440134..d52c93a 100644 +--- a/fs/ocfs2/localalloc.c ++++ b/fs/ocfs2/localalloc.c +@@ -1320,7 +1320,7 @@ static int ocfs2_local_alloc_slide_window(struct ocfs2_super *osb, + goto bail; + } + +- atomic_inc(&osb->alloc_stats.moves); ++ atomic_inc_unchecked(&osb->alloc_stats.moves); + + bail: + if (handle) +diff --git a/fs/ocfs2/ocfs2.h b/fs/ocfs2/ocfs2.h +index 553f53c..aaf5133 100644 +--- a/fs/ocfs2/ocfs2.h ++++ b/fs/ocfs2/ocfs2.h +@@ -235,11 +235,11 @@ enum ocfs2_vol_state + + struct ocfs2_alloc_stats + { +- atomic_t moves; +- atomic_t local_data; +- atomic_t bitmap_data; +- atomic_t bg_allocs; +- atomic_t bg_extends; ++ atomic_unchecked_t moves; ++ atomic_unchecked_t local_data; ++ atomic_unchecked_t bitmap_data; ++ atomic_unchecked_t bg_allocs; ++ atomic_unchecked_t bg_extends; + }; + + enum ocfs2_local_alloc_state +diff --git a/fs/ocfs2/suballoc.c b/fs/ocfs2/suballoc.c +index 47ae266..6e8b793 100644 +--- a/fs/ocfs2/suballoc.c ++++ b/fs/ocfs2/suballoc.c +@@ -866,7 +866,7 @@ static int ocfs2_reserve_suballoc_bits(struct ocfs2_super *osb, + mlog_errno(status); + goto bail; + } +- atomic_inc(&osb->alloc_stats.bg_extends); ++ atomic_inc_unchecked(&osb->alloc_stats.bg_extends); + + /* You should never ask for this much metadata */ + BUG_ON(bits_wanted > +@@ -1992,7 +1992,7 @@ int ocfs2_claim_metadata(handle_t *handle, + mlog_errno(status); + goto bail; + } +- atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs); ++ atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs); + + *suballoc_loc = res.sr_bg_blkno; + *suballoc_bit_start = res.sr_bit_offset; +@@ -2156,7 +2156,7 @@ int ocfs2_claim_new_inode_at_loc(handle_t *handle, + trace_ocfs2_claim_new_inode_at_loc((unsigned long long)di_blkno, + res->sr_bits); + +- atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs); ++ atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs); + + BUG_ON(res->sr_bits != 1); + +@@ -2198,7 +2198,7 @@ int ocfs2_claim_new_inode(handle_t *handle, + mlog_errno(status); + goto bail; + } +- atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs); ++ atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs); + + BUG_ON(res.sr_bits != 1); + +@@ -2302,7 +2302,7 @@ int __ocfs2_claim_clusters(handle_t *handle, + cluster_start, + num_clusters); + if (!status) +- atomic_inc(&osb->alloc_stats.local_data); ++ atomic_inc_unchecked(&osb->alloc_stats.local_data); + } else { + if (min_clusters > (osb->bitmap_cpg - 1)) { + /* The only paths asking for contiguousness +@@ -2328,7 +2328,7 @@ int __ocfs2_claim_clusters(handle_t *handle, + ocfs2_desc_bitmap_to_cluster_off(ac->ac_inode, + res.sr_bg_blkno, + res.sr_bit_offset); +- atomic_inc(&osb->alloc_stats.bitmap_data); ++ atomic_inc_unchecked(&osb->alloc_stats.bitmap_data); + *num_clusters = res.sr_bits; + } + } +diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c +index 49d84f8..4807e0b 100644 +--- a/fs/ocfs2/super.c ++++ b/fs/ocfs2/super.c +@@ -300,11 +300,11 @@ static int ocfs2_osb_dump(struct ocfs2_super *osb, char *buf, int len) + "%10s => GlobalAllocs: %d LocalAllocs: %d " + "SubAllocs: %d LAWinMoves: %d SAExtends: %d\n", + "Stats", +- atomic_read(&osb->alloc_stats.bitmap_data), +- atomic_read(&osb->alloc_stats.local_data), +- atomic_read(&osb->alloc_stats.bg_allocs), +- atomic_read(&osb->alloc_stats.moves), +- atomic_read(&osb->alloc_stats.bg_extends)); ++ atomic_read_unchecked(&osb->alloc_stats.bitmap_data), ++ atomic_read_unchecked(&osb->alloc_stats.local_data), ++ atomic_read_unchecked(&osb->alloc_stats.bg_allocs), ++ atomic_read_unchecked(&osb->alloc_stats.moves), ++ atomic_read_unchecked(&osb->alloc_stats.bg_extends)); + + out += snprintf(buf + out, len - out, + "%10s => State: %u Descriptor: %llu Size: %u bits " +@@ -2123,11 +2123,11 @@ static int ocfs2_initialize_super(struct super_block *sb, + spin_lock_init(&osb->osb_xattr_lock); + ocfs2_init_steal_slots(osb); + +- atomic_set(&osb->alloc_stats.moves, 0); +- atomic_set(&osb->alloc_stats.local_data, 0); +- atomic_set(&osb->alloc_stats.bitmap_data, 0); +- atomic_set(&osb->alloc_stats.bg_allocs, 0); +- atomic_set(&osb->alloc_stats.bg_extends, 0); ++ atomic_set_unchecked(&osb->alloc_stats.moves, 0); ++ atomic_set_unchecked(&osb->alloc_stats.local_data, 0); ++ atomic_set_unchecked(&osb->alloc_stats.bitmap_data, 0); ++ atomic_set_unchecked(&osb->alloc_stats.bg_allocs, 0); ++ atomic_set_unchecked(&osb->alloc_stats.bg_extends, 0); + + /* Copy the blockcheck stats from the superblock probe */ + osb->osb_ecc_stats = *stats; +diff --git a/fs/open.c b/fs/open.c +index 2ed7325..4e77ac3 100644 +--- a/fs/open.c ++++ b/fs/open.c +@@ -32,6 +32,8 @@ + #include <linux/dnotify.h> + #include <linux/compat.h> + ++#define CREATE_TRACE_POINTS ++#include <trace/events/fs.h> + #include "internal.h" + + int do_truncate(struct dentry *dentry, loff_t length, unsigned int time_attrs, +@@ -103,6 +105,8 @@ long vfs_truncate(struct path *path, loff_t length) + error = locks_verify_truncate(inode, NULL, length); + if (!error) + error = security_path_truncate(path); ++ if (!error && !gr_acl_handle_truncate(path->dentry, path->mnt)) ++ error = -EACCES; + if (!error) + error = do_truncate(path->dentry, length, 0, NULL); + +@@ -187,6 +191,8 @@ static long do_sys_ftruncate(unsigned int fd, loff_t length, int small) + error = locks_verify_truncate(inode, f.file, length); + if (!error) + error = security_path_truncate(&f.file->f_path); ++ if (!error && !gr_acl_handle_truncate(f.file->f_path.dentry, f.file->f_path.mnt)) ++ error = -EACCES; + if (!error) + error = do_truncate(dentry, length, ATTR_MTIME|ATTR_CTIME, f.file); + sb_end_write(inode->i_sb); +@@ -361,6 +367,9 @@ retry: + if (__mnt_is_readonly(path.mnt)) + res = -EROFS; + ++ if (!res && !gr_acl_handle_access(path.dentry, path.mnt, mode)) ++ res = -EACCES; ++ + out_path_release: + path_put(&path); + if (retry_estale(res, lookup_flags)) { +@@ -392,6 +401,8 @@ retry: + if (error) + goto dput_and_out; + ++ gr_log_chdir(path.dentry, path.mnt); ++ + set_fs_pwd(current->fs, &path); + + dput_and_out: +@@ -421,6 +432,13 @@ SYSCALL_DEFINE1(fchdir, unsigned int, fd) + goto out_putf; + + error = inode_permission(inode, MAY_EXEC | MAY_CHDIR); ++ ++ if (!error && !gr_chroot_fchdir(f.file->f_path.dentry, f.file->f_path.mnt)) ++ error = -EPERM; ++ ++ if (!error) ++ gr_log_chdir(f.file->f_path.dentry, f.file->f_path.mnt); ++ + if (!error) + set_fs_pwd(current->fs, &f.file->f_path); + out_putf: +@@ -450,7 +468,13 @@ retry: + if (error) + goto dput_and_out; + ++ if (gr_handle_chroot_chroot(path.dentry, path.mnt)) ++ goto dput_and_out; ++ + set_fs_root(current->fs, &path); ++ ++ gr_handle_chroot_chdir(&path); ++ + error = 0; + dput_and_out: + path_put(&path); +@@ -474,6 +498,16 @@ static int chmod_common(struct path *path, umode_t mode) + return error; + retry_deleg: + mutex_lock(&inode->i_mutex); ++ ++ if (!gr_acl_handle_chmod(path->dentry, path->mnt, &mode)) { ++ error = -EACCES; ++ goto out_unlock; ++ } ++ if (gr_handle_chroot_chmod(path->dentry, path->mnt, mode)) { ++ error = -EACCES; ++ goto out_unlock; ++ } ++ + error = security_path_chmod(path, mode); + if (error) + goto out_unlock; +@@ -539,6 +573,9 @@ static int chown_common(struct path *path, uid_t user, gid_t group) + uid = make_kuid(current_user_ns(), user); + gid = make_kgid(current_user_ns(), group); + ++ if (!gr_acl_handle_chown(path->dentry, path->mnt)) ++ return -EACCES; ++ + newattrs.ia_valid = ATTR_CTIME; + if (user != (uid_t) -1) { + if (!uid_valid(uid)) +@@ -982,6 +1019,7 @@ long do_sys_open(int dfd, const char __user *filename, int flags, umode_t mode) + } else { + fsnotify_open(f); + fd_install(fd, f); ++ trace_do_sys_open(tmp->name, flags, mode); + } + } + putname(tmp); +diff --git a/fs/pipe.c b/fs/pipe.c +index 78fd0d0..f71fc09 100644 +--- a/fs/pipe.c ++++ b/fs/pipe.c +@@ -56,7 +56,7 @@ unsigned int pipe_min_size = PAGE_SIZE; + + static void pipe_lock_nested(struct pipe_inode_info *pipe, int subclass) + { +- if (pipe->files) ++ if (atomic_read(&pipe->files)) + mutex_lock_nested(&pipe->mutex, subclass); + } + +@@ -71,7 +71,7 @@ EXPORT_SYMBOL(pipe_lock); + + void pipe_unlock(struct pipe_inode_info *pipe) + { +- if (pipe->files) ++ if (atomic_read(&pipe->files)) + mutex_unlock(&pipe->mutex); + } + EXPORT_SYMBOL(pipe_unlock); +@@ -449,9 +449,9 @@ redo: + } + if (bufs) /* More to do? */ + continue; +- if (!pipe->writers) ++ if (!atomic_read(&pipe->writers)) + break; +- if (!pipe->waiting_writers) { ++ if (!atomic_read(&pipe->waiting_writers)) { + /* syscall merging: Usually we must not sleep + * if O_NONBLOCK is set, or if we got some data. + * But if a writer sleeps in kernel space, then +@@ -513,7 +513,7 @@ pipe_write(struct kiocb *iocb, const struct iovec *_iov, + ret = 0; + __pipe_lock(pipe); + +- if (!pipe->readers) { ++ if (!atomic_read(&pipe->readers)) { + send_sig(SIGPIPE, current, 0); + ret = -EPIPE; + goto out; +@@ -562,7 +562,7 @@ redo1: + for (;;) { + int bufs; + +- if (!pipe->readers) { ++ if (!atomic_read(&pipe->readers)) { + send_sig(SIGPIPE, current, 0); + if (!ret) + ret = -EPIPE; +@@ -653,9 +653,9 @@ redo2: + kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN); + do_wakeup = 0; + } +- pipe->waiting_writers++; ++ atomic_inc(&pipe->waiting_writers); + pipe_wait(pipe); +- pipe->waiting_writers--; ++ atomic_dec(&pipe->waiting_writers); + } + out: + __pipe_unlock(pipe); +@@ -710,7 +710,7 @@ pipe_poll(struct file *filp, poll_table *wait) + mask = 0; + if (filp->f_mode & FMODE_READ) { + mask = (nrbufs > 0) ? POLLIN | POLLRDNORM : 0; +- if (!pipe->writers && filp->f_version != pipe->w_counter) ++ if (!atomic_read(&pipe->writers) && filp->f_version != pipe->w_counter) + mask |= POLLHUP; + } + +@@ -720,7 +720,7 @@ pipe_poll(struct file *filp, poll_table *wait) + * Most Unices do not set POLLERR for FIFOs but on Linux they + * behave exactly like pipes for poll(). + */ +- if (!pipe->readers) ++ if (!atomic_read(&pipe->readers)) + mask |= POLLERR; + } + +@@ -732,7 +732,7 @@ static void put_pipe_info(struct inode *inode, struct pipe_inode_info *pipe) + int kill = 0; + + spin_lock(&inode->i_lock); +- if (!--pipe->files) { ++ if (atomic_dec_and_test(&pipe->files)) { + inode->i_pipe = NULL; + kill = 1; + } +@@ -749,11 +749,11 @@ pipe_release(struct inode *inode, struct file *file) + + __pipe_lock(pipe); + if (file->f_mode & FMODE_READ) +- pipe->readers--; ++ atomic_dec(&pipe->readers); + if (file->f_mode & FMODE_WRITE) +- pipe->writers--; ++ atomic_dec(&pipe->writers); + +- if (pipe->readers || pipe->writers) { ++ if (atomic_read(&pipe->readers) || atomic_read(&pipe->writers)) { + wake_up_interruptible_sync_poll(&pipe->wait, POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM | POLLERR | POLLHUP); + kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN); + kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT); +@@ -818,7 +818,7 @@ void free_pipe_info(struct pipe_inode_info *pipe) + kfree(pipe); + } + +-static struct vfsmount *pipe_mnt __read_mostly; ++struct vfsmount *pipe_mnt __read_mostly; + + /* + * pipefs_dname() is called from d_path(). +@@ -848,8 +848,9 @@ static struct inode * get_pipe_inode(void) + goto fail_iput; + + inode->i_pipe = pipe; +- pipe->files = 2; +- pipe->readers = pipe->writers = 1; ++ atomic_set(&pipe->files, 2); ++ atomic_set(&pipe->readers, 1); ++ atomic_set(&pipe->writers, 1); + inode->i_fop = &pipefifo_fops; + + /* +@@ -1028,17 +1029,17 @@ static int fifo_open(struct inode *inode, struct file *filp) + spin_lock(&inode->i_lock); + if (inode->i_pipe) { + pipe = inode->i_pipe; +- pipe->files++; ++ atomic_inc(&pipe->files); + spin_unlock(&inode->i_lock); + } else { + spin_unlock(&inode->i_lock); + pipe = alloc_pipe_info(); + if (!pipe) + return -ENOMEM; +- pipe->files = 1; ++ atomic_set(&pipe->files, 1); + spin_lock(&inode->i_lock); + if (unlikely(inode->i_pipe)) { +- inode->i_pipe->files++; ++ atomic_inc(&inode->i_pipe->files); + spin_unlock(&inode->i_lock); + free_pipe_info(pipe); + pipe = inode->i_pipe; +@@ -1063,10 +1064,10 @@ static int fifo_open(struct inode *inode, struct file *filp) + * opened, even when there is no process writing the FIFO. + */ + pipe->r_counter++; +- if (pipe->readers++ == 0) ++ if (atomic_inc_return(&pipe->readers) == 1) + wake_up_partner(pipe); + +- if (!is_pipe && !pipe->writers) { ++ if (!is_pipe && !atomic_read(&pipe->writers)) { + if ((filp->f_flags & O_NONBLOCK)) { + /* suppress POLLHUP until we have + * seen a writer */ +@@ -1085,14 +1086,14 @@ static int fifo_open(struct inode *inode, struct file *filp) + * errno=ENXIO when there is no process reading the FIFO. + */ + ret = -ENXIO; +- if (!is_pipe && (filp->f_flags & O_NONBLOCK) && !pipe->readers) ++ if (!is_pipe && (filp->f_flags & O_NONBLOCK) && !atomic_read(&pipe->readers)) + goto err; + + pipe->w_counter++; +- if (!pipe->writers++) ++ if (atomic_inc_return(&pipe->writers) == 1) + wake_up_partner(pipe); + +- if (!is_pipe && !pipe->readers) { ++ if (!is_pipe && !atomic_read(&pipe->readers)) { + if (wait_for_partner(pipe, &pipe->r_counter)) + goto err_wr; + } +@@ -1106,11 +1107,11 @@ static int fifo_open(struct inode *inode, struct file *filp) + * the process can at least talk to itself. + */ + +- pipe->readers++; +- pipe->writers++; ++ atomic_inc(&pipe->readers); ++ atomic_inc(&pipe->writers); + pipe->r_counter++; + pipe->w_counter++; +- if (pipe->readers == 1 || pipe->writers == 1) ++ if (atomic_read(&pipe->readers) == 1 || atomic_read(&pipe->writers) == 1) + wake_up_partner(pipe); + break; + +@@ -1124,13 +1125,13 @@ static int fifo_open(struct inode *inode, struct file *filp) + return 0; + + err_rd: +- if (!--pipe->readers) ++ if (atomic_dec_and_test(&pipe->readers)) + wake_up_interruptible(&pipe->wait); + ret = -ERESTARTSYS; + goto err; + + err_wr: +- if (!--pipe->writers) ++ if (atomic_dec_and_test(&pipe->writers)) + wake_up_interruptible(&pipe->wait); + ret = -ERESTARTSYS; + goto err; +diff --git a/fs/posix_acl.c b/fs/posix_acl.c +index 0855f77..6787d50 100644 +--- a/fs/posix_acl.c ++++ b/fs/posix_acl.c +@@ -20,6 +20,7 @@ + #include <linux/xattr.h> + #include <linux/export.h> + #include <linux/user_namespace.h> ++#include <linux/grsecurity.h> + + struct posix_acl **acl_by_type(struct inode *inode, int type) + { +@@ -277,7 +278,7 @@ posix_acl_equiv_mode(const struct posix_acl *acl, umode_t *mode_p) + } + } + if (mode_p) +- *mode_p = (*mode_p & ~S_IRWXUGO) | mode; ++ *mode_p = ((*mode_p & ~S_IRWXUGO) | mode) & ~gr_acl_umask(); + return not_equiv; + } + EXPORT_SYMBOL(posix_acl_equiv_mode); +@@ -427,7 +428,7 @@ static int posix_acl_create_masq(struct posix_acl *acl, umode_t *mode_p) + mode &= (group_obj->e_perm << 3) | ~S_IRWXG; + } + +- *mode_p = (*mode_p & ~S_IRWXUGO) | mode; ++ *mode_p = ((*mode_p & ~S_IRWXUGO) | mode) & ~gr_acl_umask(); + return not_equiv; + } + +@@ -485,6 +486,8 @@ __posix_acl_create(struct posix_acl **acl, gfp_t gfp, umode_t *mode_p) + struct posix_acl *clone = posix_acl_clone(*acl, gfp); + int err = -ENOMEM; + if (clone) { ++ *mode_p &= ~gr_acl_umask(); ++ + err = posix_acl_create_masq(clone, mode_p); + if (err < 0) { + posix_acl_release(clone); +@@ -659,11 +662,12 @@ struct posix_acl * + posix_acl_from_xattr(struct user_namespace *user_ns, + const void *value, size_t size) + { +- posix_acl_xattr_header *header = (posix_acl_xattr_header *)value; +- posix_acl_xattr_entry *entry = (posix_acl_xattr_entry *)(header+1), *end; ++ const posix_acl_xattr_header *header = (const posix_acl_xattr_header *)value; ++ const posix_acl_xattr_entry *entry = (const posix_acl_xattr_entry *)(header+1), *end; + int count; + struct posix_acl *acl; + struct posix_acl_entry *acl_e; ++ umode_t umask = gr_acl_umask(); + + if (!value) + return NULL; +@@ -689,12 +693,18 @@ posix_acl_from_xattr(struct user_namespace *user_ns, + + switch(acl_e->e_tag) { + case ACL_USER_OBJ: ++ acl_e->e_perm &= ~((umask & S_IRWXU) >> 6); ++ break; + case ACL_GROUP_OBJ: + case ACL_MASK: ++ acl_e->e_perm &= ~((umask & S_IRWXG) >> 3); ++ break; + case ACL_OTHER: ++ acl_e->e_perm &= ~(umask & S_IRWXO); + break; + + case ACL_USER: ++ acl_e->e_perm &= ~((umask & S_IRWXU) >> 6); + acl_e->e_uid = + make_kuid(user_ns, + le32_to_cpu(entry->e_id)); +@@ -702,6 +712,7 @@ posix_acl_from_xattr(struct user_namespace *user_ns, + goto fail; + break; + case ACL_GROUP: ++ acl_e->e_perm &= ~((umask & S_IRWXG) >> 3); + acl_e->e_gid = + make_kgid(user_ns, + le32_to_cpu(entry->e_id)); +diff --git a/fs/proc/Kconfig b/fs/proc/Kconfig +index 2183fcf..3c32a98 100644 +--- a/fs/proc/Kconfig ++++ b/fs/proc/Kconfig +@@ -30,7 +30,7 @@ config PROC_FS + + config PROC_KCORE + bool "/proc/kcore support" if !ARM +- depends on PROC_FS && MMU ++ depends on PROC_FS && MMU && !GRKERNSEC_PROC_ADD + help + Provides a virtual ELF core file of the live kernel. This can + be read with gdb and other ELF tools. No modifications can be +@@ -38,8 +38,8 @@ config PROC_KCORE + + config PROC_VMCORE + bool "/proc/vmcore support" +- depends on PROC_FS && CRASH_DUMP +- default y ++ depends on PROC_FS && CRASH_DUMP && !GRKERNSEC ++ default n + help + Exports the dump image of crashed kernel in ELF format. + +@@ -63,8 +63,8 @@ config PROC_SYSCTL + limited in memory. + + config PROC_PAGE_MONITOR +- default y +- depends on PROC_FS && MMU ++ default n ++ depends on PROC_FS && MMU && !GRKERNSEC + bool "Enable /proc page monitoring" if EXPERT + help + Various /proc files exist to monitor process memory utilization: +diff --git a/fs/proc/array.c b/fs/proc/array.c +index 656e401..b5b86b9 100644 +--- a/fs/proc/array.c ++++ b/fs/proc/array.c +@@ -60,6 +60,7 @@ + #include <linux/tty.h> + #include <linux/string.h> + #include <linux/mman.h> ++#include <linux/grsecurity.h> + #include <linux/proc_fs.h> + #include <linux/ioport.h> + #include <linux/uaccess.h> +@@ -356,6 +357,21 @@ static void task_cpus_allowed(struct seq_file *m, struct task_struct *task) + seq_putc(m, '\n'); + } + ++#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR) ++static inline void task_pax(struct seq_file *m, struct task_struct *p) ++{ ++ if (p->mm) ++ seq_printf(m, "PaX:\t%c%c%c%c%c\n", ++ p->mm->pax_flags & MF_PAX_PAGEEXEC ? 'P' : 'p', ++ p->mm->pax_flags & MF_PAX_EMUTRAMP ? 'E' : 'e', ++ p->mm->pax_flags & MF_PAX_MPROTECT ? 'M' : 'm', ++ p->mm->pax_flags & MF_PAX_RANDMMAP ? 'R' : 'r', ++ p->mm->pax_flags & MF_PAX_SEGMEXEC ? 'S' : 's'); ++ else ++ seq_printf(m, "PaX:\t-----\n"); ++} ++#endif ++ + int proc_pid_status(struct seq_file *m, struct pid_namespace *ns, + struct pid *pid, struct task_struct *task) + { +@@ -374,9 +390,24 @@ int proc_pid_status(struct seq_file *m, struct pid_namespace *ns, + task_cpus_allowed(m, task); + cpuset_task_status_allowed(m, task); + task_context_switch_counts(m, task); ++ ++#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR) ++ task_pax(m, task); ++#endif ++ ++#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC) ++ task_grsec_rbac(m, task); ++#endif ++ + return 0; + } + ++#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP ++#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \ ++ (_mm->pax_flags & MF_PAX_RANDMMAP || \ ++ _mm->pax_flags & MF_PAX_SEGMEXEC)) ++#endif ++ + static int do_task_stat(struct seq_file *m, struct pid_namespace *ns, + struct pid *pid, struct task_struct *task, int whole) + { +@@ -398,6 +429,13 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns, + char tcomm[sizeof(task->comm)]; + unsigned long flags; + ++#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP ++ if (current->exec_id != m->exec_id) { ++ gr_log_badprocpid("stat"); ++ return 0; ++ } ++#endif ++ + state = *get_task_state(task); + vsize = eip = esp = 0; + permitted = ptrace_may_access(task, PTRACE_MODE_READ | PTRACE_MODE_NOAUDIT); +@@ -468,6 +506,19 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns, + gtime = task_gtime(task); + } + ++#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP ++ if (PAX_RAND_FLAGS(mm)) { ++ eip = 0; ++ esp = 0; ++ wchan = 0; ++ } ++#endif ++#ifdef CONFIG_GRKERNSEC_HIDESYM ++ wchan = 0; ++ eip =0; ++ esp =0; ++#endif ++ + /* scale priority and nice values from timeslices to -20..20 */ + /* to make it look like a "normal" Unix priority/nice value */ + priority = task_prio(task); +@@ -504,9 +555,15 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns, + seq_put_decimal_ull(m, ' ', vsize); + seq_put_decimal_ull(m, ' ', mm ? get_mm_rss(mm) : 0); + seq_put_decimal_ull(m, ' ', rsslim); ++#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP ++ seq_put_decimal_ull(m, ' ', PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->start_code : 1) : 0)); ++ seq_put_decimal_ull(m, ' ', PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->end_code : 1) : 0)); ++ seq_put_decimal_ull(m, ' ', PAX_RAND_FLAGS(mm) ? 0 : ((permitted && mm) ? mm->start_stack : 0)); ++#else + seq_put_decimal_ull(m, ' ', mm ? (permitted ? mm->start_code : 1) : 0); + seq_put_decimal_ull(m, ' ', mm ? (permitted ? mm->end_code : 1) : 0); + seq_put_decimal_ull(m, ' ', (permitted && mm) ? mm->start_stack : 0); ++#endif + seq_put_decimal_ull(m, ' ', esp); + seq_put_decimal_ull(m, ' ', eip); + /* The signal information here is obsolete. +@@ -528,7 +585,11 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns, + seq_put_decimal_ull(m, ' ', cputime_to_clock_t(gtime)); + seq_put_decimal_ll(m, ' ', cputime_to_clock_t(cgtime)); + +- if (mm && permitted) { ++ if (mm && permitted ++#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP ++ && !PAX_RAND_FLAGS(mm) ++#endif ++ ) { + seq_put_decimal_ull(m, ' ', mm->start_data); + seq_put_decimal_ull(m, ' ', mm->end_data); + seq_put_decimal_ull(m, ' ', mm->start_brk); +@@ -566,8 +627,15 @@ int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns, + struct pid *pid, struct task_struct *task) + { + unsigned long size = 0, resident = 0, shared = 0, text = 0, data = 0; +- struct mm_struct *mm = get_task_mm(task); ++ struct mm_struct *mm; + ++#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP ++ if (current->exec_id != m->exec_id) { ++ gr_log_badprocpid("statm"); ++ return 0; ++ } ++#endif ++ mm = get_task_mm(task); + if (mm) { + size = task_statm(mm, &shared, &text, &data, &resident); + mmput(mm); +@@ -590,6 +658,13 @@ int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns, + return 0; + } + ++#ifdef CONFIG_GRKERNSEC_PROC_IPADDR ++int proc_pid_ipaddr(struct task_struct *task, char *buffer) ++{ ++ return sprintf(buffer, "%pI4\n", &task->signal->curr_ip); ++} ++#endif ++ + #ifdef CONFIG_CHECKPOINT_RESTORE + static struct pid * + get_children_pid(struct inode *inode, struct pid *pid_prev, loff_t pos) +diff --git a/fs/proc/base.c b/fs/proc/base.c +index b976062..584d0bc 100644 +--- a/fs/proc/base.c ++++ b/fs/proc/base.c +@@ -113,6 +113,14 @@ struct pid_entry { + union proc_op op; + }; + ++struct getdents_callback { ++ struct linux_dirent __user * current_dir; ++ struct linux_dirent __user * previous; ++ struct file * file; ++ int count; ++ int error; ++}; ++ + #define NOD(NAME, MODE, IOP, FOP, OP) { \ + .name = (NAME), \ + .len = sizeof(NAME) - 1, \ +@@ -210,6 +218,9 @@ static int proc_pid_cmdline(struct task_struct *task, char * buffer) + if (!mm->arg_end) + goto out_mm; /* Shh! No looking before we're done */ + ++ if (gr_acl_handle_procpidmem(task)) ++ goto out_mm; ++ + len = mm->arg_end - mm->arg_start; + + if (len > PAGE_SIZE) +@@ -237,12 +248,28 @@ out: + return res; + } + ++#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP ++#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \ ++ (_mm->pax_flags & MF_PAX_RANDMMAP || \ ++ _mm->pax_flags & MF_PAX_SEGMEXEC)) ++#endif ++ + static int proc_pid_auxv(struct task_struct *task, char *buffer) + { + struct mm_struct *mm = mm_access(task, PTRACE_MODE_READ); + int res = PTR_ERR(mm); + if (mm && !IS_ERR(mm)) { + unsigned int nwords = 0; ++ ++#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP ++ /* allow if we're currently ptracing this task */ ++ if (PAX_RAND_FLAGS(mm) && ++ (!(task->ptrace & PT_PTRACED) || (task->parent != current))) { ++ mmput(mm); ++ return 0; ++ } ++#endif ++ + do { + nwords += 2; + } while (mm->saved_auxv[nwords - 2] != 0); /* AT_NULL */ +@@ -256,7 +283,7 @@ static int proc_pid_auxv(struct task_struct *task, char *buffer) + } + + +-#ifdef CONFIG_KALLSYMS ++#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM) + /* + * Provides a wchan file via kallsyms in a proper one-value-per-file format. + * Returns the resolved symbol. If that fails, simply return the address. +@@ -295,7 +322,7 @@ static void unlock_trace(struct task_struct *task) + mutex_unlock(&task->signal->cred_guard_mutex); + } + +-#ifdef CONFIG_STACKTRACE ++#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM) + + #define MAX_STACK_TRACE_DEPTH 64 + +@@ -518,7 +545,7 @@ static int proc_pid_limits(struct task_struct *task, char *buffer) + return count; + } + +-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK ++#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP) + static int proc_pid_syscall(struct task_struct *task, char *buffer) + { + long nr; +@@ -547,7 +574,7 @@ static int proc_pid_syscall(struct task_struct *task, char *buffer) + /************************************************************************/ + + /* permission checks */ +-static int proc_fd_access_allowed(struct inode *inode) ++static int proc_fd_access_allowed(struct inode *inode, unsigned int log) + { + struct task_struct *task; + int allowed = 0; +@@ -557,7 +584,10 @@ static int proc_fd_access_allowed(struct inode *inode) + */ + task = get_proc_task(inode); + if (task) { +- allowed = ptrace_may_access(task, PTRACE_MODE_READ); ++ if (log) ++ allowed = ptrace_may_access(task, PTRACE_MODE_READ); ++ else ++ allowed = ptrace_may_access(task, PTRACE_MODE_READ | PTRACE_MODE_NOAUDIT); + put_task_struct(task); + } + return allowed; +@@ -588,10 +618,35 @@ static bool has_pid_permissions(struct pid_namespace *pid, + struct task_struct *task, + int hide_pid_min) + { ++ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task)) ++ return false; ++ ++#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP) ++ rcu_read_lock(); ++ { ++ const struct cred *tmpcred = current_cred(); ++ const struct cred *cred = __task_cred(task); ++ ++ if (uid_eq(tmpcred->uid, GLOBAL_ROOT_UID) || uid_eq(tmpcred->uid, cred->uid) ++#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP ++ || in_group_p(grsec_proc_gid) ++#endif ++ ) { ++ rcu_read_unlock(); ++ return true; ++ } ++ } ++ rcu_read_unlock(); ++ ++ if (!pid->hide_pid) ++ return false; ++#endif ++ + if (pid->hide_pid < hide_pid_min) + return true; + if (in_group_p(pid->pid_gid)) + return true; ++ + return ptrace_may_access(task, PTRACE_MODE_READ); + } + +@@ -609,7 +664,11 @@ static int proc_pid_permission(struct inode *inode, int mask) + put_task_struct(task); + + if (!has_perms) { ++#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP) ++ { ++#else + if (pid->hide_pid == 2) { ++#endif + /* + * Let's make getdents(), stat(), and open() + * consistent with each other. If a process +@@ -707,6 +766,11 @@ static int __mem_open(struct inode *inode, struct file *file, unsigned int mode) + if (!task) + return -ESRCH; + ++ if (gr_acl_handle_procpidmem(task)) { ++ put_task_struct(task); ++ return -EPERM; ++ } ++ + mm = mm_access(task, mode); + put_task_struct(task); + +@@ -722,6 +786,10 @@ static int __mem_open(struct inode *inode, struct file *file, unsigned int mode) + + file->private_data = mm; + ++#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP ++ file->f_version = current->exec_id; ++#endif ++ + return 0; + } + +@@ -743,6 +811,17 @@ static ssize_t mem_rw(struct file *file, char __user *buf, + ssize_t copied; + char *page; + ++#ifdef CONFIG_GRKERNSEC ++ if (write) ++ return -EPERM; ++#endif ++#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP ++ if (file->f_version != current->exec_id) { ++ gr_log_badprocpid("mem"); ++ return 0; ++ } ++#endif ++ + if (!mm) + return 0; + +@@ -755,7 +834,7 @@ static ssize_t mem_rw(struct file *file, char __user *buf, + goto free; + + while (count > 0) { +- int this_len = min_t(int, count, PAGE_SIZE); ++ ssize_t this_len = min_t(ssize_t, count, PAGE_SIZE); + + if (write && copy_from_user(page, buf, this_len)) { + copied = -EFAULT; +@@ -847,6 +926,13 @@ static ssize_t environ_read(struct file *file, char __user *buf, + if (!mm) + return 0; + ++#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP ++ if (file->f_version != current->exec_id) { ++ gr_log_badprocpid("environ"); ++ return 0; ++ } ++#endif ++ + page = (char *)__get_free_page(GFP_TEMPORARY); + if (!page) + return -ENOMEM; +@@ -856,7 +942,7 @@ static ssize_t environ_read(struct file *file, char __user *buf, + goto free; + while (count > 0) { + size_t this_len, max_len; +- int retval; ++ ssize_t retval; + + if (src >= (mm->env_end - mm->env_start)) + break; +@@ -1467,7 +1553,7 @@ static void *proc_pid_follow_link(struct dentry *dentry, struct nameidata *nd) + int error = -EACCES; + + /* Are we allowed to snoop on the tasks file descriptors? */ +- if (!proc_fd_access_allowed(inode)) ++ if (!proc_fd_access_allowed(inode, 0)) + goto out; + + error = PROC_I(inode)->op.proc_get_link(dentry, &path); +@@ -1511,8 +1597,18 @@ static int proc_pid_readlink(struct dentry * dentry, char __user * buffer, int b + struct path path; + + /* Are we allowed to snoop on the tasks file descriptors? */ +- if (!proc_fd_access_allowed(inode)) +- goto out; ++ /* logging this is needed for learning on chromium to work properly, ++ but we don't want to flood the logs from 'ps' which does a readlink ++ on /proc/fd/2 of tasks in the listing, nor do we want 'ps' to learn ++ CAP_SYS_PTRACE as it's not necessary for its basic functionality ++ */ ++ if (dentry->d_name.name[0] == '2' && dentry->d_name.name[1] == '\0') { ++ if (!proc_fd_access_allowed(inode,0)) ++ goto out; ++ } else { ++ if (!proc_fd_access_allowed(inode,1)) ++ goto out; ++ } + + error = PROC_I(inode)->op.proc_get_link(dentry, &path); + if (error) +@@ -1562,7 +1658,11 @@ struct inode *proc_pid_make_inode(struct super_block * sb, struct task_struct *t + rcu_read_lock(); + cred = __task_cred(task); + inode->i_uid = cred->euid; ++#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP ++ inode->i_gid = grsec_proc_gid; ++#else + inode->i_gid = cred->egid; ++#endif + rcu_read_unlock(); + } + security_task_to_inode(task, inode); +@@ -1598,10 +1698,19 @@ int pid_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat) + return -ENOENT; + } + if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) || ++#ifdef CONFIG_GRKERNSEC_PROC_USER ++ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) || ++#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP) ++ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) || ++#endif + task_dumpable(task)) { + cred = __task_cred(task); + stat->uid = cred->euid; ++#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP ++ stat->gid = grsec_proc_gid; ++#else + stat->gid = cred->egid; ++#endif + } + } + rcu_read_unlock(); +@@ -1639,11 +1748,20 @@ int pid_revalidate(struct dentry *dentry, unsigned int flags) + + if (task) { + if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) || ++#ifdef CONFIG_GRKERNSEC_PROC_USER ++ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) || ++#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP) ++ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) || ++#endif + task_dumpable(task)) { + rcu_read_lock(); + cred = __task_cred(task); + inode->i_uid = cred->euid; ++#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP ++ inode->i_gid = grsec_proc_gid; ++#else + inode->i_gid = cred->egid; ++#endif + rcu_read_unlock(); + } else { + inode->i_uid = GLOBAL_ROOT_UID; +@@ -2178,6 +2296,9 @@ static struct dentry *proc_pident_lookup(struct inode *dir, + if (!task) + goto out_no_task; + ++ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task)) ++ goto out; ++ + /* + * Yes, it does not scale. And it should not. Don't add + * new entries into /proc/<tgid>/ without very good reasons. +@@ -2208,6 +2329,9 @@ static int proc_pident_readdir(struct file *file, struct dir_context *ctx, + if (!task) + return -ENOENT; + ++ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task)) ++ goto out; ++ + if (!dir_emit_dots(file, ctx)) + goto out; + +@@ -2597,7 +2721,7 @@ static const struct pid_entry tgid_base_stuff[] = { + REG("autogroup", S_IRUGO|S_IWUSR, proc_pid_sched_autogroup_operations), + #endif + REG("comm", S_IRUGO|S_IWUSR, proc_pid_set_comm_operations), +-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK ++#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP) + INF("syscall", S_IRUGO, proc_pid_syscall), + #endif + INF("cmdline", S_IRUGO, proc_pid_cmdline), +@@ -2622,10 +2746,10 @@ static const struct pid_entry tgid_base_stuff[] = { + #ifdef CONFIG_SECURITY + DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations), + #endif +-#ifdef CONFIG_KALLSYMS ++#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM) + INF("wchan", S_IRUGO, proc_pid_wchan), + #endif +-#ifdef CONFIG_STACKTRACE ++#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM) + ONE("stack", S_IRUGO, proc_pid_stack), + #endif + #ifdef CONFIG_SCHEDSTATS +@@ -2659,6 +2783,9 @@ static const struct pid_entry tgid_base_stuff[] = { + #ifdef CONFIG_HARDWALL + INF("hardwall", S_IRUGO, proc_pid_hardwall), + #endif ++#ifdef CONFIG_GRKERNSEC_PROC_IPADDR ++ INF("ipaddr", S_IRUSR, proc_pid_ipaddr), ++#endif + #ifdef CONFIG_USER_NS + REG("uid_map", S_IRUGO|S_IWUSR, proc_uid_map_operations), + REG("gid_map", S_IRUGO|S_IWUSR, proc_gid_map_operations), +@@ -2789,7 +2916,14 @@ static int proc_pid_instantiate(struct inode *dir, + if (!inode) + goto out; + ++#ifdef CONFIG_GRKERNSEC_PROC_USER ++ inode->i_mode = S_IFDIR|S_IRUSR|S_IXUSR; ++#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP) ++ inode->i_gid = grsec_proc_gid; ++ inode->i_mode = S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP; ++#else + inode->i_mode = S_IFDIR|S_IRUGO|S_IXUGO; ++#endif + inode->i_op = &proc_tgid_base_inode_operations; + inode->i_fop = &proc_tgid_base_operations; + inode->i_flags|=S_IMMUTABLE; +@@ -2827,7 +2961,11 @@ struct dentry *proc_pid_lookup(struct inode *dir, struct dentry * dentry, unsign + if (!task) + goto out; + ++ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task)) ++ goto out_put_task; ++ + result = proc_pid_instantiate(dir, dentry, task, NULL); ++out_put_task: + put_task_struct(task); + out: + return ERR_PTR(result); +@@ -2933,7 +3071,7 @@ static const struct pid_entry tid_base_stuff[] = { + REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations), + #endif + REG("comm", S_IRUGO|S_IWUSR, proc_pid_set_comm_operations), +-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK ++#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP) + INF("syscall", S_IRUGO, proc_pid_syscall), + #endif + INF("cmdline", S_IRUGO, proc_pid_cmdline), +@@ -2960,10 +3098,10 @@ static const struct pid_entry tid_base_stuff[] = { + #ifdef CONFIG_SECURITY + DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations), + #endif +-#ifdef CONFIG_KALLSYMS ++#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM) + INF("wchan", S_IRUGO, proc_pid_wchan), + #endif +-#ifdef CONFIG_STACKTRACE ++#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM) + ONE("stack", S_IRUGO, proc_pid_stack), + #endif + #ifdef CONFIG_SCHEDSTATS +diff --git a/fs/proc/cmdline.c b/fs/proc/cmdline.c +index cbd82df..c0407d2 100644 +--- a/fs/proc/cmdline.c ++++ b/fs/proc/cmdline.c +@@ -23,7 +23,11 @@ static const struct file_operations cmdline_proc_fops = { + + static int __init proc_cmdline_init(void) + { ++#ifdef CONFIG_GRKERNSEC_PROC_ADD ++ proc_create_grsec("cmdline", 0, NULL, &cmdline_proc_fops); ++#else + proc_create("cmdline", 0, NULL, &cmdline_proc_fops); ++#endif + return 0; + } + fs_initcall(proc_cmdline_init); +diff --git a/fs/proc/devices.c b/fs/proc/devices.c +index 50493ed..248166b 100644 +--- a/fs/proc/devices.c ++++ b/fs/proc/devices.c +@@ -64,7 +64,11 @@ static const struct file_operations proc_devinfo_operations = { + + static int __init proc_devices_init(void) + { ++#ifdef CONFIG_GRKERNSEC_PROC_ADD ++ proc_create_grsec("devices", 0, NULL, &proc_devinfo_operations); ++#else + proc_create("devices", 0, NULL, &proc_devinfo_operations); ++#endif + return 0; + } + fs_initcall(proc_devices_init); +diff --git a/fs/proc/fd.c b/fs/proc/fd.c +index 985ea88..d118a0a 100644 +--- a/fs/proc/fd.c ++++ b/fs/proc/fd.c +@@ -25,7 +25,8 @@ static int seq_show(struct seq_file *m, void *v) + if (!task) + return -ENOENT; + +- files = get_files_struct(task); ++ if (!gr_acl_handle_procpidmem(task)) ++ files = get_files_struct(task); + put_task_struct(task); + + if (files) { +@@ -283,11 +284,21 @@ static struct dentry *proc_lookupfd(struct inode *dir, struct dentry *dentry, + */ + int proc_fd_permission(struct inode *inode, int mask) + { ++ struct task_struct *task; + int rv = generic_permission(inode, mask); +- if (rv == 0) +- return 0; ++ + if (task_tgid(current) == proc_pid(inode)) + rv = 0; ++ ++ task = get_proc_task(inode); ++ if (task == NULL) ++ return rv; ++ ++ if (gr_acl_handle_procpidmem(task)) ++ rv = -EACCES; ++ ++ put_task_struct(task); ++ + return rv; + } + +diff --git a/fs/proc/generic.c b/fs/proc/generic.c +index b7f268e..3bea6b7 100644 +--- a/fs/proc/generic.c ++++ b/fs/proc/generic.c +@@ -23,6 +23,7 @@ + #include <linux/bitops.h> + #include <linux/spinlock.h> + #include <linux/completion.h> ++#include <linux/grsecurity.h> + #include <asm/uaccess.h> + + #include "internal.h" +@@ -207,6 +208,15 @@ struct dentry *proc_lookup(struct inode *dir, struct dentry *dentry, + return proc_lookup_de(PDE(dir), dir, dentry); + } + ++struct dentry *proc_lookup_restrict(struct inode *dir, struct dentry *dentry, ++ unsigned int flags) ++{ ++ if (gr_proc_is_restricted()) ++ return ERR_PTR(-EACCES); ++ ++ return proc_lookup_de(PDE(dir), dir, dentry); ++} ++ + /* + * This returns non-zero if at EOF, so that the /proc + * root directory can use this and check if it should +@@ -264,6 +274,16 @@ int proc_readdir(struct file *file, struct dir_context *ctx) + return proc_readdir_de(PDE(inode), file, ctx); + } + ++int proc_readdir_restrict(struct file *file, struct dir_context *ctx) ++{ ++ struct inode *inode = file_inode(file); ++ ++ if (gr_proc_is_restricted()) ++ return -EACCES; ++ ++ return proc_readdir_de(PDE(inode), file, ctx); ++} ++ + /* + * These are the generic /proc directory operations. They + * use the in-memory "struct proc_dir_entry" tree to parse +@@ -275,6 +295,12 @@ static const struct file_operations proc_dir_operations = { + .iterate = proc_readdir, + }; + ++static const struct file_operations proc_dir_restricted_operations = { ++ .llseek = generic_file_llseek, ++ .read = generic_read_dir, ++ .iterate = proc_readdir_restrict, ++}; ++ + /* + * proc directories can do almost nothing.. + */ +@@ -284,6 +310,12 @@ static const struct inode_operations proc_dir_inode_operations = { + .setattr = proc_notify_change, + }; + ++static const struct inode_operations proc_dir_restricted_inode_operations = { ++ .lookup = proc_lookup_restrict, ++ .getattr = proc_getattr, ++ .setattr = proc_notify_change, ++}; ++ + static int proc_register(struct proc_dir_entry * dir, struct proc_dir_entry * dp) + { + struct proc_dir_entry *tmp; +@@ -294,8 +326,13 @@ static int proc_register(struct proc_dir_entry * dir, struct proc_dir_entry * dp + return ret; + + if (S_ISDIR(dp->mode)) { +- dp->proc_fops = &proc_dir_operations; +- dp->proc_iops = &proc_dir_inode_operations; ++ if (dp->restricted) { ++ dp->proc_fops = &proc_dir_restricted_operations; ++ dp->proc_iops = &proc_dir_restricted_inode_operations; ++ } else { ++ dp->proc_fops = &proc_dir_operations; ++ dp->proc_iops = &proc_dir_inode_operations; ++ } + dir->nlink++; + } else if (S_ISLNK(dp->mode)) { + dp->proc_iops = &proc_link_inode_operations; +@@ -407,6 +444,27 @@ struct proc_dir_entry *proc_mkdir_data(const char *name, umode_t mode, + } + EXPORT_SYMBOL_GPL(proc_mkdir_data); + ++struct proc_dir_entry *proc_mkdir_data_restrict(const char *name, umode_t mode, ++ struct proc_dir_entry *parent, void *data) ++{ ++ struct proc_dir_entry *ent; ++ ++ if (mode == 0) ++ mode = S_IRUGO | S_IXUGO; ++ ++ ent = __proc_create(&parent, name, S_IFDIR | mode, 2); ++ if (ent) { ++ ent->data = data; ++ ent->restricted = 1; ++ if (proc_register(parent, ent) < 0) { ++ kfree(ent); ++ ent = NULL; ++ } ++ } ++ return ent; ++} ++EXPORT_SYMBOL_GPL(proc_mkdir_data_restrict); ++ + struct proc_dir_entry *proc_mkdir_mode(const char *name, umode_t mode, + struct proc_dir_entry *parent) + { +@@ -421,6 +479,13 @@ struct proc_dir_entry *proc_mkdir(const char *name, + } + EXPORT_SYMBOL(proc_mkdir); + ++struct proc_dir_entry *proc_mkdir_restrict(const char *name, ++ struct proc_dir_entry *parent) ++{ ++ return proc_mkdir_data_restrict(name, 0, parent, NULL); ++} ++EXPORT_SYMBOL(proc_mkdir_restrict); ++ + struct proc_dir_entry *proc_create_data(const char *name, umode_t mode, + struct proc_dir_entry *parent, + const struct file_operations *proc_fops, +diff --git a/fs/proc/inode.c b/fs/proc/inode.c +index 124fc43..8afbb02 100644 +--- a/fs/proc/inode.c ++++ b/fs/proc/inode.c +@@ -23,11 +23,17 @@ + #include <linux/slab.h> + #include <linux/mount.h> + #include <linux/magic.h> ++#include <linux/grsecurity.h> + + #include <asm/uaccess.h> + + #include "internal.h" + ++#ifdef CONFIG_PROC_SYSCTL ++extern const struct inode_operations proc_sys_inode_operations; ++extern const struct inode_operations proc_sys_dir_operations; ++#endif ++ + static void proc_evict_inode(struct inode *inode) + { + struct proc_dir_entry *de; +@@ -55,6 +61,13 @@ static void proc_evict_inode(struct inode *inode) + ns = PROC_I(inode)->ns.ns; + if (ns_ops && ns) + ns_ops->put(ns); ++ ++#ifdef CONFIG_PROC_SYSCTL ++ if (inode->i_op == &proc_sys_inode_operations || ++ inode->i_op == &proc_sys_dir_operations) ++ gr_handle_delete(inode->i_ino, inode->i_sb->s_dev); ++#endif ++ + } + + static struct kmem_cache * proc_inode_cachep; +@@ -413,7 +426,11 @@ struct inode *proc_get_inode(struct super_block *sb, struct proc_dir_entry *de) + if (de->mode) { + inode->i_mode = de->mode; + inode->i_uid = de->uid; ++#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP ++ inode->i_gid = grsec_proc_gid; ++#else + inode->i_gid = de->gid; ++#endif + } + if (de->size) + inode->i_size = de->size; +diff --git a/fs/proc/internal.h b/fs/proc/internal.h +index 651d09a..6a4b495 100644 +--- a/fs/proc/internal.h ++++ b/fs/proc/internal.h +@@ -46,9 +46,10 @@ struct proc_dir_entry { + struct completion *pde_unload_completion; + struct list_head pde_openers; /* who did ->open, but not ->release */ + spinlock_t pde_unload_lock; /* proc_fops checks and pde_users bumps */ ++ u8 restricted; /* a directory in /proc/net that should be restricted via GRKERNSEC_PROC */ + u8 namelen; + char name[]; +-}; ++} __randomize_layout; + + union proc_op { + int (*proc_get_link)(struct dentry *, struct path *); +@@ -67,7 +68,7 @@ struct proc_inode { + struct ctl_table *sysctl_entry; + struct proc_ns ns; + struct inode vfs_inode; +-}; ++} __randomize_layout; + + /* + * General functions +@@ -155,6 +156,9 @@ extern int proc_pid_status(struct seq_file *, struct pid_namespace *, + struct pid *, struct task_struct *); + extern int proc_pid_statm(struct seq_file *, struct pid_namespace *, + struct pid *, struct task_struct *); ++#ifdef CONFIG_GRKERNSEC_PROC_IPADDR ++extern int proc_pid_ipaddr(struct task_struct *task, char *buffer); ++#endif + + /* + * base.c +@@ -181,9 +185,11 @@ extern bool proc_fill_cache(struct file *, struct dir_context *, const char *, i + extern spinlock_t proc_subdir_lock; + + extern struct dentry *proc_lookup(struct inode *, struct dentry *, unsigned int); ++extern struct dentry *proc_lookup_restrict(struct inode *, struct dentry *, unsigned int); + extern struct dentry *proc_lookup_de(struct proc_dir_entry *, struct inode *, + struct dentry *); + extern int proc_readdir(struct file *, struct dir_context *); ++extern int proc_readdir_restrict(struct file *, struct dir_context *); + extern int proc_readdir_de(struct proc_dir_entry *, struct file *, struct dir_context *); + + static inline struct proc_dir_entry *pde_get(struct proc_dir_entry *pde) +diff --git a/fs/proc/interrupts.c b/fs/proc/interrupts.c +index a352d57..cb94a5c 100644 +--- a/fs/proc/interrupts.c ++++ b/fs/proc/interrupts.c +@@ -47,7 +47,11 @@ static const struct file_operations proc_interrupts_operations = { + + static int __init proc_interrupts_init(void) + { ++#ifdef CONFIG_GRKERNSEC_PROC_ADD ++ proc_create_grsec("interrupts", 0, NULL, &proc_interrupts_operations); ++#else + proc_create("interrupts", 0, NULL, &proc_interrupts_operations); ++#endif + return 0; + } + fs_initcall(proc_interrupts_init); +diff --git a/fs/proc/kcore.c b/fs/proc/kcore.c +index 39e6ef3..2f9cb5e 100644 +--- a/fs/proc/kcore.c ++++ b/fs/proc/kcore.c +@@ -483,9 +483,10 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos) + * the addresses in the elf_phdr on our list. + */ + start = kc_offset_to_vaddr(*fpos - elf_buflen); +- if ((tsz = (PAGE_SIZE - (start & ~PAGE_MASK))) > buflen) ++ tsz = PAGE_SIZE - (start & ~PAGE_MASK); ++ if (tsz > buflen) + tsz = buflen; +- ++ + while (buflen) { + struct kcore_list *m; + +@@ -514,20 +515,23 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos) + kfree(elf_buf); + } else { + if (kern_addr_valid(start)) { +- unsigned long n; ++ char *elf_buf; ++ mm_segment_t oldfs; + +- n = copy_to_user(buffer, (char *)start, tsz); +- /* +- * We cannot distinguish between fault on source +- * and fault on destination. When this happens +- * we clear too and hope it will trigger the +- * EFAULT again. +- */ +- if (n) { +- if (clear_user(buffer + tsz - n, +- n)) ++ elf_buf = kmalloc(tsz, GFP_KERNEL); ++ if (!elf_buf) ++ return -ENOMEM; ++ oldfs = get_fs(); ++ set_fs(KERNEL_DS); ++ if (!__copy_from_user(elf_buf, (const void __user *)start, tsz)) { ++ set_fs(oldfs); ++ if (copy_to_user(buffer, elf_buf, tsz)) { ++ kfree(elf_buf); + return -EFAULT; ++ } + } ++ set_fs(oldfs); ++ kfree(elf_buf); + } else { + if (clear_user(buffer, tsz)) + return -EFAULT; +@@ -547,6 +551,9 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos) + + static int open_kcore(struct inode *inode, struct file *filp) + { ++#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM) ++ return -EPERM; ++#endif + if (!capable(CAP_SYS_RAWIO)) + return -EPERM; + if (kcore_need_update) +diff --git a/fs/proc/meminfo.c b/fs/proc/meminfo.c +index 136e548..1f88922 100644 +--- a/fs/proc/meminfo.c ++++ b/fs/proc/meminfo.c +@@ -187,7 +187,7 @@ static int meminfo_proc_show(struct seq_file *m, void *v) + vmi.used >> 10, + vmi.largest_chunk >> 10 + #ifdef CONFIG_MEMORY_FAILURE +- ,atomic_long_read(&num_poisoned_pages) << (PAGE_SHIFT - 10) ++ ,atomic_long_read_unchecked(&num_poisoned_pages) << (PAGE_SHIFT - 10) + #endif + #ifdef CONFIG_TRANSPARENT_HUGEPAGE + ,K(global_page_state(NR_ANON_TRANSPARENT_HUGEPAGES) * +diff --git a/fs/proc/nommu.c b/fs/proc/nommu.c +index d4a3574..b421ce9 100644 +--- a/fs/proc/nommu.c ++++ b/fs/proc/nommu.c +@@ -64,7 +64,7 @@ static int nommu_region_show(struct seq_file *m, struct vm_region *region) + + if (file) { + seq_pad(m, ' '); +- seq_path(m, &file->f_path, ""); ++ seq_path(m, &file->f_path, "\n\\"); + } + + seq_putc(m, '\n'); +diff --git a/fs/proc/proc_net.c b/fs/proc/proc_net.c +index 4677bb7..dad3045 100644 +--- a/fs/proc/proc_net.c ++++ b/fs/proc/proc_net.c +@@ -23,9 +23,27 @@ + #include <linux/nsproxy.h> + #include <net/net_namespace.h> + #include <linux/seq_file.h> ++#include <linux/grsecurity.h> + + #include "internal.h" + ++#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) ++static struct seq_operations *ipv6_seq_ops_addr; ++ ++void register_ipv6_seq_ops_addr(struct seq_operations *addr) ++{ ++ ipv6_seq_ops_addr = addr; ++} ++ ++void unregister_ipv6_seq_ops_addr(void) ++{ ++ ipv6_seq_ops_addr = NULL; ++} ++ ++EXPORT_SYMBOL_GPL(register_ipv6_seq_ops_addr); ++EXPORT_SYMBOL_GPL(unregister_ipv6_seq_ops_addr); ++#endif ++ + static inline struct net *PDE_NET(struct proc_dir_entry *pde) + { + return pde->parent->data; +@@ -36,6 +54,8 @@ static struct net *get_proc_net(const struct inode *inode) + return maybe_get_net(PDE_NET(PDE(inode))); + } + ++extern const struct seq_operations dev_seq_ops; ++ + int seq_open_net(struct inode *ino, struct file *f, + const struct seq_operations *ops, int size) + { +@@ -44,6 +64,14 @@ int seq_open_net(struct inode *ino, struct file *f, + + BUG_ON(size < sizeof(*p)); + ++ /* only permit access to /proc/net/dev */ ++ if ( ++#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) ++ ops != ipv6_seq_ops_addr && ++#endif ++ ops != &dev_seq_ops && gr_proc_is_restricted()) ++ return -EACCES; ++ + net = get_proc_net(ino); + if (net == NULL) + return -ENXIO; +@@ -66,6 +94,9 @@ int single_open_net(struct inode *inode, struct file *file, + int err; + struct net *net; + ++ if (gr_proc_is_restricted()) ++ return -EACCES; ++ + err = -ENXIO; + net = get_proc_net(inode); + if (net == NULL) +diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c +index 7129046..6914844 100644 +--- a/fs/proc/proc_sysctl.c ++++ b/fs/proc/proc_sysctl.c +@@ -11,13 +11,21 @@ + #include <linux/namei.h> + #include <linux/mm.h> + #include <linux/module.h> ++#include <linux/nsproxy.h> ++#ifdef CONFIG_GRKERNSEC ++#include <net/net_namespace.h> ++#endif + #include "internal.h" + ++extern int gr_handle_chroot_sysctl(const int op); ++extern int gr_handle_sysctl_mod(const char *dirname, const char *name, ++ const int op); ++ + static const struct dentry_operations proc_sys_dentry_operations; + static const struct file_operations proc_sys_file_operations; +-static const struct inode_operations proc_sys_inode_operations; ++const struct inode_operations proc_sys_inode_operations; + static const struct file_operations proc_sys_dir_file_operations; +-static const struct inode_operations proc_sys_dir_operations; ++const struct inode_operations proc_sys_dir_operations; + + void proc_sys_poll_notify(struct ctl_table_poll *poll) + { +@@ -467,6 +475,9 @@ static struct dentry *proc_sys_lookup(struct inode *dir, struct dentry *dentry, + + err = NULL; + d_set_d_op(dentry, &proc_sys_dentry_operations); ++ ++ gr_handle_proc_create(dentry, inode); ++ + d_add(dentry, inode); + + out: +@@ -482,6 +493,7 @@ static ssize_t proc_sys_call_handler(struct file *filp, void __user *buf, + struct inode *inode = file_inode(filp); + struct ctl_table_header *head = grab_header(inode); + struct ctl_table *table = PROC_I(inode)->sysctl_entry; ++ int op = write ? MAY_WRITE : MAY_READ; + ssize_t error; + size_t res; + +@@ -493,7 +505,7 @@ static ssize_t proc_sys_call_handler(struct file *filp, void __user *buf, + * and won't be until we finish. + */ + error = -EPERM; +- if (sysctl_perm(head, table, write ? MAY_WRITE : MAY_READ)) ++ if (sysctl_perm(head, table, op)) + goto out; + + /* if that can happen at all, it should be -EINVAL, not -EISDIR */ +@@ -501,6 +513,27 @@ static ssize_t proc_sys_call_handler(struct file *filp, void __user *buf, + if (!table->proc_handler) + goto out; + ++#ifdef CONFIG_GRKERNSEC ++ error = -EPERM; ++ if (gr_handle_chroot_sysctl(op)) ++ goto out; ++ dget(filp->f_path.dentry); ++ if (gr_handle_sysctl_mod(filp->f_path.dentry->d_parent->d_name.name, table->procname, op)) { ++ dput(filp->f_path.dentry); ++ goto out; ++ } ++ dput(filp->f_path.dentry); ++ if (!gr_acl_handle_open(filp->f_path.dentry, filp->f_path.mnt, op)) ++ goto out; ++ if (write) { ++ if (current->nsproxy->net_ns != table->extra2) { ++ if (!capable(CAP_SYS_ADMIN)) ++ goto out; ++ } else if (!ns_capable(current->nsproxy->net_ns->user_ns, CAP_NET_ADMIN)) ++ goto out; ++ } ++#endif ++ + /* careful: calling conventions are nasty here */ + res = count; + error = table->proc_handler(table, write, buf, &res, ppos); +@@ -598,6 +631,9 @@ static bool proc_sys_fill_cache(struct file *file, + return false; + } else { + d_set_d_op(child, &proc_sys_dentry_operations); ++ ++ gr_handle_proc_create(child, inode); ++ + d_add(child, inode); + } + } else { +@@ -641,6 +677,9 @@ static int scan(struct ctl_table_header *head, ctl_table *table, + if ((*pos)++ < ctx->pos) + return true; + ++ if (!gr_acl_handle_hidden_file(file->f_path.dentry, file->f_path.mnt)) ++ return 0; ++ + if (unlikely(S_ISLNK(table->mode))) + res = proc_sys_link_fill_cache(file, ctx, head, table); + else +@@ -734,6 +773,9 @@ static int proc_sys_getattr(struct vfsmount *mnt, struct dentry *dentry, struct + if (IS_ERR(head)) + return PTR_ERR(head); + ++ if (table && !gr_acl_handle_hidden_file(dentry, mnt)) ++ return -ENOENT; ++ + generic_fillattr(inode, stat); + if (table) + stat->mode = (stat->mode & S_IFMT) | table->mode; +@@ -756,13 +798,13 @@ static const struct file_operations proc_sys_dir_file_operations = { + .llseek = generic_file_llseek, + }; + +-static const struct inode_operations proc_sys_inode_operations = { ++const struct inode_operations proc_sys_inode_operations = { + .permission = proc_sys_permission, + .setattr = proc_sys_setattr, + .getattr = proc_sys_getattr, + }; + +-static const struct inode_operations proc_sys_dir_operations = { ++const struct inode_operations proc_sys_dir_operations = { + .lookup = proc_sys_lookup, + .permission = proc_sys_permission, + .setattr = proc_sys_setattr, +@@ -839,7 +881,7 @@ static struct ctl_dir *find_subdir(struct ctl_dir *dir, + static struct ctl_dir *new_dir(struct ctl_table_set *set, + const char *name, int namelen) + { +- struct ctl_table *table; ++ ctl_table_no_const *table; + struct ctl_dir *new; + struct ctl_node *node; + char *new_name; +@@ -851,7 +893,7 @@ static struct ctl_dir *new_dir(struct ctl_table_set *set, + return NULL; + + node = (struct ctl_node *)(new + 1); +- table = (struct ctl_table *)(node + 1); ++ table = (ctl_table_no_const *)(node + 1); + new_name = (char *)(table + 2); + memcpy(new_name, name, namelen); + new_name[namelen] = '\0'; +@@ -1020,7 +1062,8 @@ static int sysctl_check_table(const char *path, struct ctl_table *table) + static struct ctl_table_header *new_links(struct ctl_dir *dir, struct ctl_table *table, + struct ctl_table_root *link_root) + { +- struct ctl_table *link_table, *entry, *link; ++ ctl_table_no_const *link_table, *link; ++ struct ctl_table *entry; + struct ctl_table_header *links; + struct ctl_node *node; + char *link_name; +@@ -1043,7 +1086,7 @@ static struct ctl_table_header *new_links(struct ctl_dir *dir, struct ctl_table + return NULL; + + node = (struct ctl_node *)(links + 1); +- link_table = (struct ctl_table *)(node + nr_entries); ++ link_table = (ctl_table_no_const *)(node + nr_entries); + link_name = (char *)&link_table[nr_entries + 1]; + + for (link = link_table, entry = table; entry->procname; link++, entry++) { +@@ -1291,8 +1334,8 @@ static int register_leaf_sysctl_tables(const char *path, char *pos, + struct ctl_table_header ***subheader, struct ctl_table_set *set, + struct ctl_table *table) + { +- struct ctl_table *ctl_table_arg = NULL; +- struct ctl_table *entry, *files; ++ ctl_table_no_const *ctl_table_arg = NULL, *files = NULL; ++ struct ctl_table *entry; + int nr_files = 0; + int nr_dirs = 0; + int err = -ENOMEM; +@@ -1304,10 +1347,9 @@ static int register_leaf_sysctl_tables(const char *path, char *pos, + nr_files++; + } + +- files = table; + /* If there are mixed files and directories we need a new table */ + if (nr_dirs && nr_files) { +- struct ctl_table *new; ++ ctl_table_no_const *new; + files = kzalloc(sizeof(struct ctl_table) * (nr_files + 1), + GFP_KERNEL); + if (!files) +@@ -1325,7 +1367,7 @@ static int register_leaf_sysctl_tables(const char *path, char *pos, + /* Register everything except a directory full of subdirectories */ + if (nr_files || !nr_dirs) { + struct ctl_table_header *header; +- header = __register_sysctl_table(set, path, files); ++ header = __register_sysctl_table(set, path, files ? files : table); + if (!header) { + kfree(ctl_table_arg); + goto out; +diff --git a/fs/proc/root.c b/fs/proc/root.c +index 87dbcbe..55e1b4d 100644 +--- a/fs/proc/root.c ++++ b/fs/proc/root.c +@@ -186,7 +186,15 @@ void __init proc_root_init(void) + #ifdef CONFIG_PROC_DEVICETREE + proc_device_tree_init(); + #endif ++#ifdef CONFIG_GRKERNSEC_PROC_ADD ++#ifdef CONFIG_GRKERNSEC_PROC_USER ++ proc_mkdir_mode("bus", S_IRUSR | S_IXUSR, NULL); ++#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP) ++ proc_mkdir_mode("bus", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL); ++#endif ++#else + proc_mkdir("bus", NULL); ++#endif + proc_sys_init(); + } + +diff --git a/fs/proc/stat.c b/fs/proc/stat.c +index 6f599c6..bd00271 100644 +--- a/fs/proc/stat.c ++++ b/fs/proc/stat.c +@@ -11,6 +11,7 @@ + #include <linux/irqnr.h> + #include <asm/cputime.h> + #include <linux/tick.h> ++#include <linux/grsecurity.h> + + #ifndef arch_irq_stat_cpu + #define arch_irq_stat_cpu(cpu) 0 +@@ -87,6 +88,18 @@ static int show_stat(struct seq_file *p, void *v) + u64 sum_softirq = 0; + unsigned int per_softirq_sums[NR_SOFTIRQS] = {0}; + struct timespec boottime; ++ int unrestricted = 1; ++ ++#ifdef CONFIG_GRKERNSEC_PROC_ADD ++#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP) ++ if (!uid_eq(current_uid(), GLOBAL_ROOT_UID) ++#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP ++ && !in_group_p(grsec_proc_gid) ++#endif ++ ) ++ unrestricted = 0; ++#endif ++#endif + + user = nice = system = idle = iowait = + irq = softirq = steal = 0; +@@ -99,23 +112,25 @@ static int show_stat(struct seq_file *p, void *v) + nice += kcpustat_cpu(i).cpustat[CPUTIME_NICE]; + system += kcpustat_cpu(i).cpustat[CPUTIME_SYSTEM]; + idle += get_idle_time(i); +- iowait += get_iowait_time(i); +- irq += kcpustat_cpu(i).cpustat[CPUTIME_IRQ]; +- softirq += kcpustat_cpu(i).cpustat[CPUTIME_SOFTIRQ]; +- steal += kcpustat_cpu(i).cpustat[CPUTIME_STEAL]; +- guest += kcpustat_cpu(i).cpustat[CPUTIME_GUEST]; +- guest_nice += kcpustat_cpu(i).cpustat[CPUTIME_GUEST_NICE]; +- sum += kstat_cpu_irqs_sum(i); +- sum += arch_irq_stat_cpu(i); ++ if (unrestricted) { ++ iowait += get_iowait_time(i); ++ irq += kcpustat_cpu(i).cpustat[CPUTIME_IRQ]; ++ softirq += kcpustat_cpu(i).cpustat[CPUTIME_SOFTIRQ]; ++ steal += kcpustat_cpu(i).cpustat[CPUTIME_STEAL]; ++ guest += kcpustat_cpu(i).cpustat[CPUTIME_GUEST]; ++ guest_nice += kcpustat_cpu(i).cpustat[CPUTIME_GUEST_NICE]; ++ sum += kstat_cpu_irqs_sum(i); ++ sum += arch_irq_stat_cpu(i); ++ for (j = 0; j < NR_SOFTIRQS; j++) { ++ unsigned int softirq_stat = kstat_softirqs_cpu(j, i); + +- for (j = 0; j < NR_SOFTIRQS; j++) { +- unsigned int softirq_stat = kstat_softirqs_cpu(j, i); +- +- per_softirq_sums[j] += softirq_stat; +- sum_softirq += softirq_stat; ++ per_softirq_sums[j] += softirq_stat; ++ sum_softirq += softirq_stat; ++ } + } + } +- sum += arch_irq_stat(); ++ if (unrestricted) ++ sum += arch_irq_stat(); + + seq_puts(p, "cpu "); + seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(user)); +@@ -136,12 +151,14 @@ static int show_stat(struct seq_file *p, void *v) + nice = kcpustat_cpu(i).cpustat[CPUTIME_NICE]; + system = kcpustat_cpu(i).cpustat[CPUTIME_SYSTEM]; + idle = get_idle_time(i); +- iowait = get_iowait_time(i); +- irq = kcpustat_cpu(i).cpustat[CPUTIME_IRQ]; +- softirq = kcpustat_cpu(i).cpustat[CPUTIME_SOFTIRQ]; +- steal = kcpustat_cpu(i).cpustat[CPUTIME_STEAL]; +- guest = kcpustat_cpu(i).cpustat[CPUTIME_GUEST]; +- guest_nice = kcpustat_cpu(i).cpustat[CPUTIME_GUEST_NICE]; ++ if (unrestricted) { ++ iowait = get_iowait_time(i); ++ irq = kcpustat_cpu(i).cpustat[CPUTIME_IRQ]; ++ softirq = kcpustat_cpu(i).cpustat[CPUTIME_SOFTIRQ]; ++ steal = kcpustat_cpu(i).cpustat[CPUTIME_STEAL]; ++ guest = kcpustat_cpu(i).cpustat[CPUTIME_GUEST]; ++ guest_nice = kcpustat_cpu(i).cpustat[CPUTIME_GUEST_NICE]; ++ } + seq_printf(p, "cpu%d", i); + seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(user)); + seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(nice)); +@@ -159,7 +176,7 @@ static int show_stat(struct seq_file *p, void *v) + + /* sum again ? it could be updated? */ + for_each_irq_nr(j) +- seq_put_decimal_ull(p, ' ', kstat_irqs(j)); ++ seq_put_decimal_ull(p, ' ', unrestricted ? kstat_irqs(j) : 0ULL); + + seq_printf(p, + "\nctxt %llu\n" +@@ -167,11 +184,11 @@ static int show_stat(struct seq_file *p, void *v) + "processes %lu\n" + "procs_running %lu\n" + "procs_blocked %lu\n", +- nr_context_switches(), ++ unrestricted ? nr_context_switches() : 0ULL, + (unsigned long)jif, +- total_forks, +- nr_running(), +- nr_iowait()); ++ unrestricted ? total_forks : 0UL, ++ unrestricted ? nr_running() : 0UL, ++ unrestricted ? nr_iowait() : 0UL); + + seq_printf(p, "softirq %llu", (unsigned long long)sum_softirq); + +diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c +index 8f78819..ba6c272 100644 +--- a/fs/proc/task_mmu.c ++++ b/fs/proc/task_mmu.c +@@ -12,12 +12,19 @@ + #include <linux/swap.h> + #include <linux/swapops.h> + #include <linux/mmu_notifier.h> ++#include <linux/grsecurity.h> + + #include <asm/elf.h> + #include <asm/uaccess.h> + #include <asm/tlbflush.h> + #include "internal.h" + ++#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP ++#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \ ++ (_mm->pax_flags & MF_PAX_RANDMMAP || \ ++ _mm->pax_flags & MF_PAX_SEGMEXEC)) ++#endif ++ + void task_mem(struct seq_file *m, struct mm_struct *mm) + { + unsigned long data, text, lib, swap; +@@ -53,8 +60,13 @@ void task_mem(struct seq_file *m, struct mm_struct *mm) + "VmExe:\t%8lu kB\n" + "VmLib:\t%8lu kB\n" + "VmPTE:\t%8lu kB\n" +- "VmSwap:\t%8lu kB\n", +- hiwater_vm << (PAGE_SHIFT-10), ++ "VmSwap:\t%8lu kB\n" ++ ++#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT ++ "CsBase:\t%8lx\nCsLim:\t%8lx\n" ++#endif ++ ++ ,hiwater_vm << (PAGE_SHIFT-10), + total_vm << (PAGE_SHIFT-10), + mm->locked_vm << (PAGE_SHIFT-10), + mm->pinned_vm << (PAGE_SHIFT-10), +@@ -64,7 +76,19 @@ void task_mem(struct seq_file *m, struct mm_struct *mm) + mm->stack_vm << (PAGE_SHIFT-10), text, lib, + (PTRS_PER_PTE * sizeof(pte_t) * + atomic_long_read(&mm->nr_ptes)) >> 10, +- swap << (PAGE_SHIFT-10)); ++ swap << (PAGE_SHIFT-10) ++ ++#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT ++#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP ++ , PAX_RAND_FLAGS(mm) ? 0 : mm->context.user_cs_base ++ , PAX_RAND_FLAGS(mm) ? 0 : mm->context.user_cs_limit ++#else ++ , mm->context.user_cs_base ++ , mm->context.user_cs_limit ++#endif ++#endif ++ ++ ); + } + + unsigned long task_vsize(struct mm_struct *mm) +@@ -270,13 +294,13 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid) + pgoff = ((loff_t)vma->vm_pgoff) << PAGE_SHIFT; + } + +- /* We don't show the stack guard page in /proc/maps */ ++#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP ++ start = PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_start; ++ end = PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_end; ++#else + start = vma->vm_start; +- if (stack_guard_page_start(vma, start)) +- start += PAGE_SIZE; + end = vma->vm_end; +- if (stack_guard_page_end(vma, end)) +- end -= PAGE_SIZE; ++#endif + + seq_setwidth(m, 25 + sizeof(void *) * 6 - 1); + seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu ", +@@ -286,7 +310,11 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid) + flags & VM_WRITE ? 'w' : '-', + flags & VM_EXEC ? 'x' : '-', + flags & VM_MAYSHARE ? 's' : 'p', ++#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP ++ PAX_RAND_FLAGS(mm) ? 0UL : pgoff, ++#else + pgoff, ++#endif + MAJOR(dev), MINOR(dev), ino); + + /* +@@ -295,7 +323,7 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid) + */ + if (file) { + seq_pad(m, ' '); +- seq_path(m, &file->f_path, "\n"); ++ seq_path(m, &file->f_path, "\n\\"); + goto done; + } + +@@ -321,8 +349,9 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid) + * Thread stack in /proc/PID/task/TID/maps or + * the main process stack. + */ +- if (!is_pid || (vma->vm_start <= mm->start_stack && +- vma->vm_end >= mm->start_stack)) { ++ if (!is_pid || (vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP)) || ++ (vma->vm_start <= mm->start_stack && ++ vma->vm_end >= mm->start_stack)) { + name = "[stack]"; + } else { + /* Thread stack in /proc/PID/maps */ +@@ -346,6 +375,13 @@ static int show_map(struct seq_file *m, void *v, int is_pid) + struct proc_maps_private *priv = m->private; + struct task_struct *task = priv->task; + ++#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP ++ if (current->exec_id != m->exec_id) { ++ gr_log_badprocpid("maps"); ++ return 0; ++ } ++#endif ++ + show_map_vma(m, vma, is_pid); + + if (m->count < m->size) /* vma is copied successfully */ +@@ -586,12 +622,23 @@ static int show_smap(struct seq_file *m, void *v, int is_pid) + .private = &mss, + }; + ++#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP ++ if (current->exec_id != m->exec_id) { ++ gr_log_badprocpid("smaps"); ++ return 0; ++ } ++#endif + memset(&mss, 0, sizeof mss); +- mss.vma = vma; +- /* mmap_sem is held in m_start */ +- if (vma->vm_mm && !is_vm_hugetlb_page(vma)) +- walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk); +- ++#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP ++ if (!PAX_RAND_FLAGS(vma->vm_mm)) { ++#endif ++ mss.vma = vma; ++ /* mmap_sem is held in m_start */ ++ if (vma->vm_mm && !is_vm_hugetlb_page(vma)) ++ walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk); ++#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP ++ } ++#endif + show_map_vma(m, vma, is_pid); + + seq_printf(m, +@@ -609,7 +656,11 @@ static int show_smap(struct seq_file *m, void *v, int is_pid) + "KernelPageSize: %8lu kB\n" + "MMUPageSize: %8lu kB\n" + "Locked: %8lu kB\n", ++#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP ++ PAX_RAND_FLAGS(vma->vm_mm) ? 0UL : (vma->vm_end - vma->vm_start) >> 10, ++#else + (vma->vm_end - vma->vm_start) >> 10, ++#endif + mss.resident >> 10, + (unsigned long)(mss.pss >> (10 + PSS_SHIFT)), + mss.shared_clean >> 10, +@@ -1387,6 +1438,13 @@ static int show_numa_map(struct seq_file *m, void *v, int is_pid) + char buffer[64]; + int nid; + ++#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP ++ if (current->exec_id != m->exec_id) { ++ gr_log_badprocpid("numa_maps"); ++ return 0; ++ } ++#endif ++ + if (!mm) + return 0; + +@@ -1404,11 +1462,15 @@ static int show_numa_map(struct seq_file *m, void *v, int is_pid) + mpol_to_str(buffer, sizeof(buffer), pol); + mpol_cond_put(pol); + ++#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP ++ seq_printf(m, "%08lx %s", PAX_RAND_FLAGS(vma->vm_mm) ? 0UL : vma->vm_start, buffer); ++#else + seq_printf(m, "%08lx %s", vma->vm_start, buffer); ++#endif + + if (file) { + seq_printf(m, " file="); +- seq_path(m, &file->f_path, "\n\t= "); ++ seq_path(m, &file->f_path, "\n\t\\= "); + } else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) { + seq_printf(m, " heap"); + } else { +diff --git a/fs/proc/task_nommu.c b/fs/proc/task_nommu.c +index 678455d..ebd3245 100644 +--- a/fs/proc/task_nommu.c ++++ b/fs/proc/task_nommu.c +@@ -51,7 +51,7 @@ void task_mem(struct seq_file *m, struct mm_struct *mm) + else + bytes += kobjsize(mm); + +- if (current->fs && current->fs->users > 1) ++ if (current->fs && atomic_read(¤t->fs->users) > 1) + sbytes += kobjsize(current->fs); + else + bytes += kobjsize(current->fs); +@@ -161,7 +161,7 @@ static int nommu_vma_show(struct seq_file *m, struct vm_area_struct *vma, + + if (file) { + seq_pad(m, ' '); +- seq_path(m, &file->f_path, ""); ++ seq_path(m, &file->f_path, "\n\\"); + } else if (mm) { + pid_t tid = vm_is_stack(priv->task, vma, is_pid); + +diff --git a/fs/proc/vmcore.c b/fs/proc/vmcore.c +index 88d4585..c10bfeb 100644 +--- a/fs/proc/vmcore.c ++++ b/fs/proc/vmcore.c +@@ -105,9 +105,13 @@ static ssize_t read_from_oldmem(char *buf, size_t count, + nr_bytes = count; + + /* If pfn is not ram, return zeros for sparse dump files */ +- if (pfn_is_ram(pfn) == 0) +- memset(buf, 0, nr_bytes); +- else { ++ if (pfn_is_ram(pfn) == 0) { ++ if (userbuf) { ++ if (clear_user((char __force_user *)buf, nr_bytes)) ++ return -EFAULT; ++ } else ++ memset(buf, 0, nr_bytes); ++ } else { + tmp = copy_oldmem_page(pfn, buf, nr_bytes, + offset, userbuf); + if (tmp < 0) +@@ -170,7 +174,7 @@ int __weak remap_oldmem_pfn_range(struct vm_area_struct *vma, + static int copy_to(void *target, void *src, size_t size, int userbuf) + { + if (userbuf) { +- if (copy_to_user((char __user *) target, src, size)) ++ if (copy_to_user((char __force_user *) target, src, size)) + return -EFAULT; + } else { + memcpy(target, src, size); +@@ -233,7 +237,7 @@ static ssize_t __read_vmcore(char *buffer, size_t buflen, loff_t *fpos, + if (*fpos < m->offset + m->size) { + tsz = min_t(size_t, m->offset + m->size - *fpos, buflen); + start = m->paddr + *fpos - m->offset; +- tmp = read_from_oldmem(buffer, tsz, &start, userbuf); ++ tmp = read_from_oldmem((char __force_kernel *)buffer, tsz, &start, userbuf); + if (tmp < 0) + return tmp; + buflen -= tsz; +@@ -253,7 +257,7 @@ static ssize_t __read_vmcore(char *buffer, size_t buflen, loff_t *fpos, + static ssize_t read_vmcore(struct file *file, char __user *buffer, + size_t buflen, loff_t *fpos) + { +- return __read_vmcore((__force char *) buffer, buflen, fpos, 1); ++ return __read_vmcore((__force_kernel char *) buffer, buflen, fpos, 1); + } + + /* +diff --git a/fs/qnx6/qnx6.h b/fs/qnx6/qnx6.h +index b00fcc9..e0c6381 100644 +--- a/fs/qnx6/qnx6.h ++++ b/fs/qnx6/qnx6.h +@@ -74,7 +74,7 @@ enum { + BYTESEX_BE, + }; + +-static inline __u64 fs64_to_cpu(struct qnx6_sb_info *sbi, __fs64 n) ++static inline __u64 __intentional_overflow(-1) fs64_to_cpu(struct qnx6_sb_info *sbi, __fs64 n) + { + if (sbi->s_bytesex == BYTESEX_LE) + return le64_to_cpu((__force __le64)n); +@@ -90,7 +90,7 @@ static inline __fs64 cpu_to_fs64(struct qnx6_sb_info *sbi, __u64 n) + return (__force __fs64)cpu_to_be64(n); + } + +-static inline __u32 fs32_to_cpu(struct qnx6_sb_info *sbi, __fs32 n) ++static inline __u32 __intentional_overflow(-1) fs32_to_cpu(struct qnx6_sb_info *sbi, __fs32 n) + { + if (sbi->s_bytesex == BYTESEX_LE) + return le32_to_cpu((__force __le32)n); +diff --git a/fs/quota/netlink.c b/fs/quota/netlink.c +index 72d2917..c917c12 100644 +--- a/fs/quota/netlink.c ++++ b/fs/quota/netlink.c +@@ -45,7 +45,7 @@ static struct genl_family quota_genl_family = { + void quota_send_warning(struct kqid qid, dev_t dev, + const char warntype) + { +- static atomic_t seq; ++ static atomic_unchecked_t seq; + struct sk_buff *skb; + void *msg_head; + int ret; +@@ -61,7 +61,7 @@ void quota_send_warning(struct kqid qid, dev_t dev, + "VFS: Not enough memory to send quota warning.\n"); + return; + } +- msg_head = genlmsg_put(skb, 0, atomic_add_return(1, &seq), ++ msg_head = genlmsg_put(skb, 0, atomic_add_return_unchecked(1, &seq), + "a_genl_family, 0, QUOTA_NL_C_WARNING); + if (!msg_head) { + printk(KERN_ERR +diff --git a/fs/read_write.c b/fs/read_write.c +index 28cc9c8..208e4fb 100644 +--- a/fs/read_write.c ++++ b/fs/read_write.c +@@ -450,7 +450,7 @@ ssize_t __kernel_write(struct file *file, const char *buf, size_t count, loff_t + + old_fs = get_fs(); + set_fs(get_ds()); +- p = (__force const char __user *)buf; ++ p = (const char __force_user *)buf; + if (count > MAX_RW_COUNT) + count = MAX_RW_COUNT; + if (file->f_op->write) +diff --git a/fs/readdir.c b/fs/readdir.c +index 5b53d99..a6c3049 100644 +--- a/fs/readdir.c ++++ b/fs/readdir.c +@@ -17,6 +17,7 @@ + #include <linux/security.h> + #include <linux/syscalls.h> + #include <linux/unistd.h> ++#include <linux/namei.h> + + #include <asm/uaccess.h> + +@@ -69,6 +70,7 @@ struct old_linux_dirent { + struct readdir_callback { + struct dir_context ctx; + struct old_linux_dirent __user * dirent; ++ struct file * file; + int result; + }; + +@@ -86,6 +88,10 @@ static int fillonedir(void * __buf, const char * name, int namlen, loff_t offset + buf->result = -EOVERFLOW; + return -EOVERFLOW; + } ++ ++ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino)) ++ return 0; ++ + buf->result++; + dirent = buf->dirent; + if (!access_ok(VERIFY_WRITE, dirent, +@@ -117,6 +123,7 @@ SYSCALL_DEFINE3(old_readdir, unsigned int, fd, + if (!f.file) + return -EBADF; + ++ buf.file = f.file; + error = iterate_dir(f.file, &buf.ctx); + if (buf.result) + error = buf.result; +@@ -142,6 +149,7 @@ struct getdents_callback { + struct dir_context ctx; + struct linux_dirent __user * current_dir; + struct linux_dirent __user * previous; ++ struct file * file; + int count; + int error; + }; +@@ -163,6 +171,10 @@ static int filldir(void * __buf, const char * name, int namlen, loff_t offset, + buf->error = -EOVERFLOW; + return -EOVERFLOW; + } ++ ++ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino)) ++ return 0; ++ + dirent = buf->previous; + if (dirent) { + if (__put_user(offset, &dirent->d_off)) +@@ -208,6 +220,7 @@ SYSCALL_DEFINE3(getdents, unsigned int, fd, + if (!f.file) + return -EBADF; + ++ buf.file = f.file; + error = iterate_dir(f.file, &buf.ctx); + if (error >= 0) + error = buf.error; +@@ -226,6 +239,7 @@ struct getdents_callback64 { + struct dir_context ctx; + struct linux_dirent64 __user * current_dir; + struct linux_dirent64 __user * previous; ++ struct file *file; + int count; + int error; + }; +@@ -241,6 +255,10 @@ static int filldir64(void * __buf, const char * name, int namlen, loff_t offset, + buf->error = -EINVAL; /* only used if we fail.. */ + if (reclen > buf->count) + return -EINVAL; ++ ++ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino)) ++ return 0; ++ + dirent = buf->previous; + if (dirent) { + if (__put_user(offset, &dirent->d_off)) +@@ -288,6 +306,7 @@ SYSCALL_DEFINE3(getdents64, unsigned int, fd, + if (!f.file) + return -EBADF; + ++ buf.file = f.file; + error = iterate_dir(f.file, &buf.ctx); + if (error >= 0) + error = buf.error; +diff --git a/fs/reiserfs/do_balan.c b/fs/reiserfs/do_balan.c +index 9a3c68c..767933e 100644 +--- a/fs/reiserfs/do_balan.c ++++ b/fs/reiserfs/do_balan.c +@@ -1546,7 +1546,7 @@ void do_balance(struct tree_balance *tb, /* tree_balance structure */ + return; + } + +- atomic_inc(&(fs_generation(tb->tb_sb))); ++ atomic_inc_unchecked(&(fs_generation(tb->tb_sb))); + do_balance_starts(tb); + + /* balance leaf returns 0 except if combining L R and S into +diff --git a/fs/reiserfs/item_ops.c b/fs/reiserfs/item_ops.c +index ee382ef..f4eb6eb5 100644 +--- a/fs/reiserfs/item_ops.c ++++ b/fs/reiserfs/item_ops.c +@@ -725,18 +725,18 @@ static void errcatch_print_vi(struct virtual_item *vi) + } + + static struct item_operations errcatch_ops = { +- errcatch_bytes_number, +- errcatch_decrement_key, +- errcatch_is_left_mergeable, +- errcatch_print_item, +- errcatch_check_item, ++ .bytes_number = errcatch_bytes_number, ++ .decrement_key = errcatch_decrement_key, ++ .is_left_mergeable = errcatch_is_left_mergeable, ++ .print_item = errcatch_print_item, ++ .check_item = errcatch_check_item, + +- errcatch_create_vi, +- errcatch_check_left, +- errcatch_check_right, +- errcatch_part_size, +- errcatch_unit_num, +- errcatch_print_vi ++ .create_vi = errcatch_create_vi, ++ .check_left = errcatch_check_left, ++ .check_right = errcatch_check_right, ++ .part_size = errcatch_part_size, ++ .unit_num = errcatch_unit_num, ++ .print_vi = errcatch_print_vi + }; + + ////////////////////////////////////////////////////////////////////////////// +diff --git a/fs/reiserfs/procfs.c b/fs/reiserfs/procfs.c +index 02b0b7d..c85018b 100644 +--- a/fs/reiserfs/procfs.c ++++ b/fs/reiserfs/procfs.c +@@ -114,7 +114,7 @@ static int show_super(struct seq_file *m, void *unused) + "SMALL_TAILS " : "NO_TAILS ", + replay_only(sb) ? "REPLAY_ONLY " : "", + convert_reiserfs(sb) ? "CONV " : "", +- atomic_read(&r->s_generation_counter), ++ atomic_read_unchecked(&r->s_generation_counter), + SF(s_disk_reads), SF(s_disk_writes), SF(s_fix_nodes), + SF(s_do_balance), SF(s_unneeded_left_neighbor), + SF(s_good_search_by_key_reada), SF(s_bmaps), +diff --git a/fs/reiserfs/reiserfs.h b/fs/reiserfs/reiserfs.h +index 8d06adf..7e1c9f8 100644 +--- a/fs/reiserfs/reiserfs.h ++++ b/fs/reiserfs/reiserfs.h +@@ -453,7 +453,7 @@ struct reiserfs_sb_info { + /* Comment? -Hans */ + wait_queue_head_t s_wait; + /* To be obsoleted soon by per buffer seals.. -Hans */ +- atomic_t s_generation_counter; // increased by one every time the ++ atomic_unchecked_t s_generation_counter; // increased by one every time the + // tree gets re-balanced + unsigned long s_properties; /* File system properties. Currently holds + on-disk FS format */ +@@ -1972,7 +1972,7 @@ static inline loff_t max_reiserfs_offset(struct inode *inode) + #define REISERFS_USER_MEM 1 /* reiserfs user memory mode */ + + #define fs_generation(s) (REISERFS_SB(s)->s_generation_counter) +-#define get_generation(s) atomic_read (&fs_generation(s)) ++#define get_generation(s) atomic_read_unchecked (&fs_generation(s)) + #define FILESYSTEM_CHANGED_TB(tb) (get_generation((tb)->tb_sb) != (tb)->fs_gen) + #define __fs_changed(gen,s) (gen != get_generation (s)) + #define fs_changed(gen,s) \ +diff --git a/fs/select.c b/fs/select.c +index 467bb1c..cf9d65a 100644 +--- a/fs/select.c ++++ b/fs/select.c +@@ -20,6 +20,7 @@ + #include <linux/export.h> + #include <linux/slab.h> + #include <linux/poll.h> ++#include <linux/security.h> + #include <linux/personality.h> /* for STICKY_TIMEOUTS */ + #include <linux/file.h> + #include <linux/fdtable.h> +@@ -880,6 +881,7 @@ int do_sys_poll(struct pollfd __user *ufds, unsigned int nfds, + struct poll_list *walk = head; + unsigned long todo = nfds; + ++ gr_learn_resource(current, RLIMIT_NOFILE, nfds, 1); + if (nfds > rlimit(RLIMIT_NOFILE)) + return -EINVAL; + +diff --git a/fs/seq_file.c b/fs/seq_file.c +index 1d641bb..9ca7f61 100644 +--- a/fs/seq_file.c ++++ b/fs/seq_file.c +@@ -10,6 +10,8 @@ + #include <linux/seq_file.h> + #include <linux/slab.h> + #include <linux/cred.h> ++#include <linux/sched.h> ++#include <linux/grsecurity.h> + + #include <asm/uaccess.h> + #include <asm/page.h> +@@ -60,6 +62,9 @@ int seq_open(struct file *file, const struct seq_operations *op) + #ifdef CONFIG_USER_NS + p->user_ns = file->f_cred->user_ns; + #endif ++#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP ++ p->exec_id = current->exec_id; ++#endif + + /* + * Wrappers around seq_open(e.g. swaps_open) need to be +@@ -82,6 +87,16 @@ int seq_open(struct file *file, const struct seq_operations *op) + } + EXPORT_SYMBOL(seq_open); + ++ ++int seq_open_restrict(struct file *file, const struct seq_operations *op) ++{ ++ if (gr_proc_is_restricted()) ++ return -EACCES; ++ ++ return seq_open(file, op); ++} ++EXPORT_SYMBOL(seq_open_restrict); ++ + static int traverse(struct seq_file *m, loff_t offset) + { + loff_t pos = 0, index; +@@ -96,7 +111,7 @@ static int traverse(struct seq_file *m, loff_t offset) + return 0; + } + if (!m->buf) { +- m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL); ++ m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL | GFP_USERCOPY); + if (!m->buf) + return -ENOMEM; + } +@@ -137,7 +152,7 @@ Eoverflow: + m->op->stop(m, p); + kfree(m->buf); + m->count = 0; +- m->buf = kmalloc(m->size <<= 1, GFP_KERNEL); ++ m->buf = kmalloc(m->size <<= 1, GFP_KERNEL | GFP_USERCOPY); + return !m->buf ? -ENOMEM : -EAGAIN; + } + +@@ -153,7 +168,7 @@ Eoverflow: + ssize_t seq_read(struct file *file, char __user *buf, size_t size, loff_t *ppos) + { + struct seq_file *m = file->private_data; +- size_t copied = 0; ++ ssize_t copied = 0; + loff_t pos; + size_t n; + void *p; +@@ -192,7 +207,7 @@ ssize_t seq_read(struct file *file, char __user *buf, size_t size, loff_t *ppos) + + /* grab buffer if we didn't have one */ + if (!m->buf) { +- m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL); ++ m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL | GFP_USERCOPY); + if (!m->buf) + goto Enomem; + } +@@ -234,7 +249,7 @@ ssize_t seq_read(struct file *file, char __user *buf, size_t size, loff_t *ppos) + m->op->stop(m, p); + kfree(m->buf); + m->count = 0; +- m->buf = kmalloc(m->size <<= 1, GFP_KERNEL); ++ m->buf = kmalloc(m->size <<= 1, GFP_KERNEL | GFP_USERCOPY); + if (!m->buf) + goto Enomem; + m->version = 0; +@@ -584,7 +599,7 @@ static void single_stop(struct seq_file *p, void *v) + int single_open(struct file *file, int (*show)(struct seq_file *, void *), + void *data) + { +- struct seq_operations *op = kmalloc(sizeof(*op), GFP_KERNEL); ++ seq_operations_no_const *op = kzalloc(sizeof(*op), GFP_KERNEL); + int res = -ENOMEM; + + if (op) { +@@ -605,7 +620,7 @@ EXPORT_SYMBOL(single_open); + int single_open_size(struct file *file, int (*show)(struct seq_file *, void *), + void *data, size_t size) + { +- char *buf = kmalloc(size, GFP_KERNEL); ++ char *buf = kmalloc(size, GFP_KERNEL | GFP_USERCOPY); + int ret; + if (!buf) + return -ENOMEM; +@@ -620,6 +635,17 @@ int single_open_size(struct file *file, int (*show)(struct seq_file *, void *), + } + EXPORT_SYMBOL(single_open_size); + ++int single_open_restrict(struct file *file, int (*show)(struct seq_file *, void *), ++ void *data) ++{ ++ if (gr_proc_is_restricted()) ++ return -EACCES; ++ ++ return single_open(file, show, data); ++} ++EXPORT_SYMBOL(single_open_restrict); ++ ++ + int single_release(struct inode *inode, struct file *file) + { + const struct seq_operations *op = ((struct seq_file *)file->private_data)->op; +diff --git a/fs/splice.c b/fs/splice.c +index 12028fa..a6f2619 100644 +--- a/fs/splice.c ++++ b/fs/splice.c +@@ -196,7 +196,7 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe, + pipe_lock(pipe); + + for (;;) { +- if (!pipe->readers) { ++ if (!atomic_read(&pipe->readers)) { + send_sig(SIGPIPE, current, 0); + if (!ret) + ret = -EPIPE; +@@ -219,7 +219,7 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe, + page_nr++; + ret += buf->len; + +- if (pipe->files) ++ if (atomic_read(&pipe->files)) + do_wakeup = 1; + + if (!--spd->nr_pages) +@@ -250,9 +250,9 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe, + do_wakeup = 0; + } + +- pipe->waiting_writers++; ++ atomic_inc(&pipe->waiting_writers); + pipe_wait(pipe); +- pipe->waiting_writers--; ++ atomic_dec(&pipe->waiting_writers); + } + + pipe_unlock(pipe); +@@ -583,7 +583,7 @@ static ssize_t kernel_readv(struct file *file, const struct iovec *vec, + old_fs = get_fs(); + set_fs(get_ds()); + /* The cast to a user pointer is valid due to the set_fs() */ +- res = vfs_readv(file, (const struct iovec __user *)vec, vlen, &pos); ++ res = vfs_readv(file, (const struct iovec __force_user *)vec, vlen, &pos); + set_fs(old_fs); + + return res; +@@ -598,7 +598,7 @@ ssize_t kernel_write(struct file *file, const char *buf, size_t count, + old_fs = get_fs(); + set_fs(get_ds()); + /* The cast to a user pointer is valid due to the set_fs() */ +- res = vfs_write(file, (__force const char __user *)buf, count, &pos); ++ res = vfs_write(file, (const char __force_user *)buf, count, &pos); + set_fs(old_fs); + + return res; +@@ -651,7 +651,7 @@ ssize_t default_file_splice_read(struct file *in, loff_t *ppos, + goto err; + + this_len = min_t(size_t, len, PAGE_CACHE_SIZE - offset); +- vec[i].iov_base = (void __user *) page_address(page); ++ vec[i].iov_base = (void __force_user *) page_address(page); + vec[i].iov_len = this_len; + spd.pages[i] = page; + spd.nr_pages++; +@@ -847,7 +847,7 @@ int splice_from_pipe_feed(struct pipe_inode_info *pipe, struct splice_desc *sd, + ops->release(pipe, buf); + pipe->curbuf = (pipe->curbuf + 1) & (pipe->buffers - 1); + pipe->nrbufs--; +- if (pipe->files) ++ if (atomic_read(&pipe->files)) + sd->need_wakeup = true; + } + +@@ -872,10 +872,10 @@ EXPORT_SYMBOL(splice_from_pipe_feed); + int splice_from_pipe_next(struct pipe_inode_info *pipe, struct splice_desc *sd) + { + while (!pipe->nrbufs) { +- if (!pipe->writers) ++ if (!atomic_read(&pipe->writers)) + return 0; + +- if (!pipe->waiting_writers && sd->num_spliced) ++ if (!atomic_read(&pipe->waiting_writers) && sd->num_spliced) + return 0; + + if (sd->flags & SPLICE_F_NONBLOCK) +@@ -1197,7 +1197,7 @@ ssize_t splice_direct_to_actor(struct file *in, struct splice_desc *sd, + * out of the pipe right after the splice_to_pipe(). So set + * PIPE_READERS appropriately. + */ +- pipe->readers = 1; ++ atomic_set(&pipe->readers, 1); + + current->splice_pipe = pipe; + } +@@ -1493,6 +1493,7 @@ static int get_iovec_page_array(const struct iovec __user *iov, + + partial[buffers].offset = off; + partial[buffers].len = plen; ++ partial[buffers].private = 0; + + off = 0; + len -= plen; +@@ -1795,9 +1796,9 @@ static int ipipe_prep(struct pipe_inode_info *pipe, unsigned int flags) + ret = -ERESTARTSYS; + break; + } +- if (!pipe->writers) ++ if (!atomic_read(&pipe->writers)) + break; +- if (!pipe->waiting_writers) { ++ if (!atomic_read(&pipe->waiting_writers)) { + if (flags & SPLICE_F_NONBLOCK) { + ret = -EAGAIN; + break; +@@ -1829,7 +1830,7 @@ static int opipe_prep(struct pipe_inode_info *pipe, unsigned int flags) + pipe_lock(pipe); + + while (pipe->nrbufs >= pipe->buffers) { +- if (!pipe->readers) { ++ if (!atomic_read(&pipe->readers)) { + send_sig(SIGPIPE, current, 0); + ret = -EPIPE; + break; +@@ -1842,9 +1843,9 @@ static int opipe_prep(struct pipe_inode_info *pipe, unsigned int flags) + ret = -ERESTARTSYS; + break; + } +- pipe->waiting_writers++; ++ atomic_inc(&pipe->waiting_writers); + pipe_wait(pipe); +- pipe->waiting_writers--; ++ atomic_dec(&pipe->waiting_writers); + } + + pipe_unlock(pipe); +@@ -1880,14 +1881,14 @@ retry: + pipe_double_lock(ipipe, opipe); + + do { +- if (!opipe->readers) { ++ if (!atomic_read(&opipe->readers)) { + send_sig(SIGPIPE, current, 0); + if (!ret) + ret = -EPIPE; + break; + } + +- if (!ipipe->nrbufs && !ipipe->writers) ++ if (!ipipe->nrbufs && !atomic_read(&ipipe->writers)) + break; + + /* +@@ -1984,7 +1985,7 @@ static int link_pipe(struct pipe_inode_info *ipipe, + pipe_double_lock(ipipe, opipe); + + do { +- if (!opipe->readers) { ++ if (!atomic_read(&opipe->readers)) { + send_sig(SIGPIPE, current, 0); + if (!ret) + ret = -EPIPE; +@@ -2029,7 +2030,7 @@ static int link_pipe(struct pipe_inode_info *ipipe, + * return EAGAIN if we have the potential of some data in the + * future, otherwise just return 0 + */ +- if (!ret && ipipe->waiting_writers && (flags & SPLICE_F_NONBLOCK)) ++ if (!ret && atomic_read(&ipipe->waiting_writers) && (flags & SPLICE_F_NONBLOCK)) + ret = -EAGAIN; + + pipe_unlock(ipipe); +diff --git a/fs/stat.c b/fs/stat.c +index ae0c3ce..9ee641c 100644 +--- a/fs/stat.c ++++ b/fs/stat.c +@@ -28,8 +28,13 @@ void generic_fillattr(struct inode *inode, struct kstat *stat) + stat->gid = inode->i_gid; + stat->rdev = inode->i_rdev; + stat->size = i_size_read(inode); +- stat->atime = inode->i_atime; +- stat->mtime = inode->i_mtime; ++ if (is_sidechannel_device(inode) && !capable_nolog(CAP_MKNOD)) { ++ stat->atime = inode->i_ctime; ++ stat->mtime = inode->i_ctime; ++ } else { ++ stat->atime = inode->i_atime; ++ stat->mtime = inode->i_mtime; ++ } + stat->ctime = inode->i_ctime; + stat->blksize = (1 << inode->i_blkbits); + stat->blocks = inode->i_blocks; +@@ -52,9 +57,16 @@ EXPORT_SYMBOL(generic_fillattr); + int vfs_getattr_nosec(struct path *path, struct kstat *stat) + { + struct inode *inode = path->dentry->d_inode; ++ int retval; + +- if (inode->i_op->getattr) +- return inode->i_op->getattr(path->mnt, path->dentry, stat); ++ if (inode->i_op->getattr) { ++ retval = inode->i_op->getattr(path->mnt, path->dentry, stat); ++ if (!retval && is_sidechannel_device(inode) && !capable_nolog(CAP_MKNOD)) { ++ stat->atime = stat->ctime; ++ stat->mtime = stat->ctime; ++ } ++ return retval; ++ } + + generic_fillattr(inode, stat); + return 0; +diff --git a/fs/sysfs/dir.c b/fs/sysfs/dir.c +index ee0d761..b346c58 100644 +--- a/fs/sysfs/dir.c ++++ b/fs/sysfs/dir.c +@@ -62,9 +62,16 @@ void sysfs_warn_dup(struct kernfs_node *parent, const char *name) + int sysfs_create_dir_ns(struct kobject *kobj, const void *ns) + { + struct kernfs_node *parent, *kn; ++ const char *name; ++ umode_t mode = S_IRWXU | S_IRUGO | S_IXUGO; ++#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT ++ const char *parent_name; ++#endif + + BUG_ON(!kobj); + ++ name = kobject_name(kobj); ++ + if (kobj->parent) + parent = kobj->parent->sd; + else +@@ -73,11 +80,22 @@ int sysfs_create_dir_ns(struct kobject *kobj, const void *ns) + if (!parent) + return -ENOENT; + +- kn = kernfs_create_dir_ns(parent, kobject_name(kobj), +- S_IRWXU | S_IRUGO | S_IXUGO, kobj, ns); ++#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT ++ parent_name = parent->name; ++ mode = S_IRWXU; ++ ++ if ((!strcmp(parent_name, "") && (!strcmp(name, "devices") || !strcmp(name, "fs"))) || ++ (!strcmp(parent_name, "devices") && !strcmp(name, "system")) || ++ (!strcmp(parent_name, "fs") && (!strcmp(name, "selinux") || !strcmp(name, "fuse") || !strcmp(name, "ecryptfs"))) || ++ (!strcmp(parent_name, "system") && !strcmp(name, "cpu"))) ++ mode = S_IRWXU | S_IRUGO | S_IXUGO; ++#endif ++ ++ kn = kernfs_create_dir_ns(parent, name, ++ mode, kobj, ns); + if (IS_ERR(kn)) { + if (PTR_ERR(kn) == -EEXIST) +- sysfs_warn_dup(parent, kobject_name(kobj)); ++ sysfs_warn_dup(parent, name); + return PTR_ERR(kn); + } + +diff --git a/fs/sysv/sysv.h b/fs/sysv/sysv.h +index 69d4889..a810bd4 100644 +--- a/fs/sysv/sysv.h ++++ b/fs/sysv/sysv.h +@@ -188,7 +188,7 @@ static inline u32 PDP_swab(u32 x) + #endif + } + +-static inline __u32 fs32_to_cpu(struct sysv_sb_info *sbi, __fs32 n) ++static inline __u32 __intentional_overflow(-1) fs32_to_cpu(struct sysv_sb_info *sbi, __fs32 n) + { + if (sbi->s_bytesex == BYTESEX_PDP) + return PDP_swab((__force __u32)n); +diff --git a/fs/ubifs/io.c b/fs/ubifs/io.c +index e18b988..f1d4ad0f 100644 +--- a/fs/ubifs/io.c ++++ b/fs/ubifs/io.c +@@ -155,7 +155,7 @@ int ubifs_leb_change(struct ubifs_info *c, int lnum, const void *buf, int len) + return err; + } + +-int ubifs_leb_unmap(struct ubifs_info *c, int lnum) ++int __intentional_overflow(-1) ubifs_leb_unmap(struct ubifs_info *c, int lnum) + { + int err; + +diff --git a/fs/udf/misc.c b/fs/udf/misc.c +index c175b4d..8f36a16 100644 +--- a/fs/udf/misc.c ++++ b/fs/udf/misc.c +@@ -289,7 +289,7 @@ void udf_new_tag(char *data, uint16_t ident, uint16_t version, uint16_t snum, + + u8 udf_tag_checksum(const struct tag *t) + { +- u8 *data = (u8 *)t; ++ const u8 *data = (const u8 *)t; + u8 checksum = 0; + int i; + for (i = 0; i < sizeof(struct tag); ++i) +diff --git a/fs/ufs/swab.h b/fs/ufs/swab.h +index 8d974c4..b82f6ec 100644 +--- a/fs/ufs/swab.h ++++ b/fs/ufs/swab.h +@@ -22,7 +22,7 @@ enum { + BYTESEX_BE + }; + +-static inline u64 ++static inline u64 __intentional_overflow(-1) + fs64_to_cpu(struct super_block *sbp, __fs64 n) + { + if (UFS_SB(sbp)->s_bytesex == BYTESEX_LE) +@@ -40,7 +40,7 @@ cpu_to_fs64(struct super_block *sbp, u64 n) + return (__force __fs64)cpu_to_be64(n); + } + +-static inline u32 ++static inline u32 __intentional_overflow(-1) + fs32_to_cpu(struct super_block *sbp, __fs32 n) + { + if (UFS_SB(sbp)->s_bytesex == BYTESEX_LE) +diff --git a/fs/utimes.c b/fs/utimes.c +index aa138d6..5f3a811 100644 +--- a/fs/utimes.c ++++ b/fs/utimes.c +@@ -1,6 +1,7 @@ + #include <linux/compiler.h> + #include <linux/file.h> + #include <linux/fs.h> ++#include <linux/security.h> + #include <linux/linkage.h> + #include <linux/mount.h> + #include <linux/namei.h> +@@ -103,6 +104,12 @@ static int utimes_common(struct path *path, struct timespec *times) + } + } + retry_deleg: ++ ++ if (!gr_acl_handle_utime(path->dentry, path->mnt)) { ++ error = -EACCES; ++ goto mnt_drop_write_and_out; ++ } ++ + mutex_lock(&inode->i_mutex); + error = notify_change(path->dentry, &newattrs, &delegated_inode); + mutex_unlock(&inode->i_mutex); +diff --git a/fs/xattr.c b/fs/xattr.c +index 3377dff..f394815 100644 +--- a/fs/xattr.c ++++ b/fs/xattr.c +@@ -227,6 +227,27 @@ int vfs_xattr_cmp(struct dentry *dentry, const char *xattr_name, + return rc; + } + ++#ifdef CONFIG_PAX_XATTR_PAX_FLAGS ++ssize_t ++pax_getxattr(struct dentry *dentry, void *value, size_t size) ++{ ++ struct inode *inode = dentry->d_inode; ++ ssize_t error; ++ ++ error = inode_permission(inode, MAY_EXEC); ++ if (error) ++ return error; ++ ++ if (inode->i_op->getxattr) ++ error = inode->i_op->getxattr(dentry, XATTR_NAME_PAX_FLAGS, value, size); ++ else ++ error = -EOPNOTSUPP; ++ ++ return error; ++} ++EXPORT_SYMBOL(pax_getxattr); ++#endif ++ + ssize_t + vfs_getxattr(struct dentry *dentry, const char *name, void *value, size_t size) + { +@@ -319,7 +340,7 @@ EXPORT_SYMBOL_GPL(vfs_removexattr); + * Extended attribute SET operations + */ + static long +-setxattr(struct dentry *d, const char __user *name, const void __user *value, ++setxattr(struct path *path, const char __user *name, const void __user *value, + size_t size, int flags) + { + int error; +@@ -355,7 +376,12 @@ setxattr(struct dentry *d, const char __user *name, const void __user *value, + posix_acl_fix_xattr_from_user(kvalue, size); + } + +- error = vfs_setxattr(d, kname, kvalue, size, flags); ++ if (!gr_acl_handle_setxattr(path->dentry, path->mnt)) { ++ error = -EACCES; ++ goto out; ++ } ++ ++ error = vfs_setxattr(path->dentry, kname, kvalue, size, flags); + out: + if (vvalue) + vfree(vvalue); +@@ -377,7 +403,7 @@ retry: + return error; + error = mnt_want_write(path.mnt); + if (!error) { +- error = setxattr(path.dentry, name, value, size, flags); ++ error = setxattr(&path, name, value, size, flags); + mnt_drop_write(path.mnt); + } + path_put(&path); +@@ -401,7 +427,7 @@ retry: + return error; + error = mnt_want_write(path.mnt); + if (!error) { +- error = setxattr(path.dentry, name, value, size, flags); ++ error = setxattr(&path, name, value, size, flags); + mnt_drop_write(path.mnt); + } + path_put(&path); +@@ -416,16 +442,14 @@ SYSCALL_DEFINE5(fsetxattr, int, fd, const char __user *, name, + const void __user *,value, size_t, size, int, flags) + { + struct fd f = fdget(fd); +- struct dentry *dentry; + int error = -EBADF; + + if (!f.file) + return error; +- dentry = f.file->f_path.dentry; +- audit_inode(NULL, dentry, 0); ++ audit_inode(NULL, f.file->f_path.dentry, 0); + error = mnt_want_write_file(f.file); + if (!error) { +- error = setxattr(dentry, name, value, size, flags); ++ error = setxattr(&f.file->f_path, name, value, size, flags); + mnt_drop_write_file(f.file); + } + fdput(f); +@@ -626,7 +650,7 @@ SYSCALL_DEFINE3(flistxattr, int, fd, char __user *, list, size_t, size) + * Extended attribute REMOVE operations + */ + static long +-removexattr(struct dentry *d, const char __user *name) ++removexattr(struct path *path, const char __user *name) + { + int error; + char kname[XATTR_NAME_MAX + 1]; +@@ -637,7 +661,10 @@ removexattr(struct dentry *d, const char __user *name) + if (error < 0) + return error; + +- return vfs_removexattr(d, kname); ++ if (!gr_acl_handle_removexattr(path->dentry, path->mnt)) ++ return -EACCES; ++ ++ return vfs_removexattr(path->dentry, kname); + } + + SYSCALL_DEFINE2(removexattr, const char __user *, pathname, +@@ -652,7 +679,7 @@ retry: + return error; + error = mnt_want_write(path.mnt); + if (!error) { +- error = removexattr(path.dentry, name); ++ error = removexattr(&path, name); + mnt_drop_write(path.mnt); + } + path_put(&path); +@@ -675,7 +702,7 @@ retry: + return error; + error = mnt_want_write(path.mnt); + if (!error) { +- error = removexattr(path.dentry, name); ++ error = removexattr(&path, name); + mnt_drop_write(path.mnt); + } + path_put(&path); +@@ -689,16 +716,16 @@ retry: + SYSCALL_DEFINE2(fremovexattr, int, fd, const char __user *, name) + { + struct fd f = fdget(fd); +- struct dentry *dentry; ++ struct path *path; + int error = -EBADF; + + if (!f.file) + return error; +- dentry = f.file->f_path.dentry; +- audit_inode(NULL, dentry, 0); ++ path = &f.file->f_path; ++ audit_inode(NULL, path->dentry, 0); + error = mnt_want_write_file(f.file); + if (!error) { +- error = removexattr(dentry, name); ++ error = removexattr(path, name); + mnt_drop_write_file(f.file); + } + fdput(f); +diff --git a/fs/xfs/xfs_bmap.c b/fs/xfs/xfs_bmap.c +index 152543c..d80c361 100644 +--- a/fs/xfs/xfs_bmap.c ++++ b/fs/xfs/xfs_bmap.c +@@ -584,7 +584,7 @@ xfs_bmap_validate_ret( + + #else + #define xfs_bmap_check_leaf_extents(cur, ip, whichfork) do { } while (0) +-#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap) ++#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap) do { } while (0) + #endif /* DEBUG */ + + /* +diff --git a/fs/xfs/xfs_dir2_readdir.c b/fs/xfs/xfs_dir2_readdir.c +index aead369..0dfecfd 100644 +--- a/fs/xfs/xfs_dir2_readdir.c ++++ b/fs/xfs/xfs_dir2_readdir.c +@@ -160,7 +160,12 @@ xfs_dir2_sf_getdents( + ino = dp->d_ops->sf_get_ino(sfp, sfep); + filetype = dp->d_ops->sf_get_ftype(sfep); + ctx->pos = off & 0x7fffffff; +- if (!dir_emit(ctx, (char *)sfep->name, sfep->namelen, ino, ++ if (dp->i_df.if_u1.if_data == dp->i_df.if_u2.if_inline_data) { ++ char name[sfep->namelen]; ++ memcpy(name, sfep->name, sfep->namelen); ++ if (!dir_emit(ctx, name, sfep->namelen, ino, xfs_dir3_get_dtype(mp, filetype))) ++ return 0; ++ } else if (!dir_emit(ctx, (char *)sfep->name, sfep->namelen, ino, + xfs_dir3_get_dtype(mp, filetype))) + return 0; + sfep = dp->d_ops->sf_nextentry(sfp, sfep); +diff --git a/fs/xfs/xfs_ioctl.c b/fs/xfs/xfs_ioctl.c +index 78e62cc..eec3706 100644 +--- a/fs/xfs/xfs_ioctl.c ++++ b/fs/xfs/xfs_ioctl.c +@@ -122,7 +122,7 @@ xfs_find_handle( + } + + error = -EFAULT; +- if (copy_to_user(hreq->ohandle, &handle, hsize) || ++ if (hsize > sizeof handle || copy_to_user(hreq->ohandle, &handle, hsize) || + copy_to_user(hreq->ohandlen, &hsize, sizeof(__s32))) + goto out_put; + +diff --git a/grsecurity/Kconfig b/grsecurity/Kconfig +new file mode 100644 +index 0000000..27cec32 +--- /dev/null ++++ b/grsecurity/Kconfig +@@ -0,0 +1,1166 @@ ++# ++# grecurity configuration ++# ++menu "Memory Protections" ++depends on GRKERNSEC ++ ++config GRKERNSEC_KMEM ++ bool "Deny reading/writing to /dev/kmem, /dev/mem, and /dev/port" ++ default y if GRKERNSEC_CONFIG_AUTO ++ select STRICT_DEVMEM if (X86 || ARM || TILE || S390) ++ help ++ If you say Y here, /dev/kmem and /dev/mem won't be allowed to ++ be written to or read from to modify or leak the contents of the running ++ kernel. /dev/port will also not be allowed to be opened, writing to ++ /dev/cpu/*/msr will be prevented, and support for kexec will be removed. ++ If you have module support disabled, enabling this will close up several ++ ways that are currently used to insert malicious code into the running ++ kernel. ++ ++ Even with this feature enabled, we still highly recommend that ++ you use the RBAC system, as it is still possible for an attacker to ++ modify the running kernel through other more obscure methods. ++ ++ It is highly recommended that you say Y here if you meet all the ++ conditions above. ++ ++config GRKERNSEC_VM86 ++ bool "Restrict VM86 mode" ++ default y if (GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER) ++ depends on X86_32 ++ ++ help ++ If you say Y here, only processes with CAP_SYS_RAWIO will be able to ++ make use of a special execution mode on 32bit x86 processors called ++ Virtual 8086 (VM86) mode. XFree86 may need vm86 mode for certain ++ video cards and will still work with this option enabled. The purpose ++ of the option is to prevent exploitation of emulation errors in ++ virtualization of vm86 mode like the one discovered in VMWare in 2009. ++ Nearly all users should be able to enable this option. ++ ++config GRKERNSEC_IO ++ bool "Disable privileged I/O" ++ default y if (GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER) ++ depends on X86 ++ select RTC_CLASS ++ select RTC_INTF_DEV ++ select RTC_DRV_CMOS ++ ++ help ++ If you say Y here, all ioperm and iopl calls will return an error. ++ Ioperm and iopl can be used to modify the running kernel. ++ Unfortunately, some programs need this access to operate properly, ++ the most notable of which are XFree86 and hwclock. hwclock can be ++ remedied by having RTC support in the kernel, so real-time ++ clock support is enabled if this option is enabled, to ensure ++ that hwclock operates correctly. If hwclock still does not work, ++ either update udev or symlink /dev/rtc to /dev/rtc0. ++ ++ If you're using XFree86 or a version of Xorg from 2012 or earlier, ++ you may not be able to boot into a graphical environment with this ++ option enabled. In this case, you should use the RBAC system instead. ++ ++config GRKERNSEC_JIT_HARDEN ++ bool "Harden BPF JIT against spray attacks" ++ default y if GRKERNSEC_CONFIG_AUTO ++ depends on BPF_JIT && X86 ++ help ++ If you say Y here, the native code generated by the kernel's Berkeley ++ Packet Filter (BPF) JIT engine will be hardened against JIT-spraying ++ attacks that attempt to fit attacker-beneficial instructions in ++ 32bit immediate fields of JIT-generated native instructions. The ++ attacker will generally aim to cause an unintended instruction sequence ++ of JIT-generated native code to execute by jumping into the middle of ++ a generated instruction. This feature effectively randomizes the 32bit ++ immediate constants present in the generated code to thwart such attacks. ++ ++ If you're using KERNEXEC, it's recommended that you enable this option ++ to supplement the hardening of the kernel. ++ ++config GRKERNSEC_PERF_HARDEN ++ bool "Disable unprivileged PERF_EVENTS usage by default" ++ default y if GRKERNSEC_CONFIG_AUTO ++ depends on PERF_EVENTS ++ help ++ If you say Y here, the range of acceptable values for the ++ /proc/sys/kernel/perf_event_paranoid sysctl will be expanded to allow and ++ default to a new value: 3. When the sysctl is set to this value, no ++ unprivileged use of the PERF_EVENTS syscall interface will be permitted. ++ ++ Though PERF_EVENTS can be used legitimately for performance monitoring ++ and low-level application profiling, it is forced on regardless of ++ configuration, has been at fault for several vulnerabilities, and ++ creates new opportunities for side channels and other information leaks. ++ ++ This feature puts PERF_EVENTS into a secure default state and permits ++ the administrator to change out of it temporarily if unprivileged ++ application profiling is needed. ++ ++config GRKERNSEC_RAND_THREADSTACK ++ bool "Insert random gaps between thread stacks" ++ default y if GRKERNSEC_CONFIG_AUTO ++ depends on PAX_RANDMMAP && !PPC ++ help ++ If you say Y here, a random-sized gap will be enforced between allocated ++ thread stacks. Glibc's NPTL and other threading libraries that ++ pass MAP_STACK to the kernel for thread stack allocation are supported. ++ The implementation currently provides 8 bits of entropy for the gap. ++ ++ Many distributions do not compile threaded remote services with the ++ -fstack-check argument to GCC, causing the variable-sized stack-based ++ allocator, alloca(), to not probe the stack on allocation. This ++ permits an unbounded alloca() to skip over any guard page and potentially ++ modify another thread's stack reliably. An enforced random gap ++ reduces the reliability of such an attack and increases the chance ++ that such a read/write to another thread's stack instead lands in ++ an unmapped area, causing a crash and triggering grsecurity's ++ anti-bruteforcing logic. ++ ++config GRKERNSEC_PROC_MEMMAP ++ bool "Harden ASLR against information leaks and entropy reduction" ++ default y if (GRKERNSEC_CONFIG_AUTO || PAX_NOEXEC || PAX_ASLR) ++ depends on PAX_NOEXEC || PAX_ASLR ++ help ++ If you say Y here, the /proc/<pid>/maps and /proc/<pid>/stat files will ++ give no information about the addresses of its mappings if ++ PaX features that rely on random addresses are enabled on the task. ++ In addition to sanitizing this information and disabling other ++ dangerous sources of information, this option causes reads of sensitive ++ /proc/<pid> entries where the file descriptor was opened in a different ++ task than the one performing the read. Such attempts are logged. ++ This option also limits argv/env strings for suid/sgid binaries ++ to 512KB to prevent a complete exhaustion of the stack entropy provided ++ by ASLR. Finally, it places an 8MB stack resource limit on suid/sgid ++ binaries to prevent alternative mmap layouts from being abused. ++ ++ If you use PaX it is essential that you say Y here as it closes up ++ several holes that make full ASLR useless locally. ++ ++ ++config GRKERNSEC_KSTACKOVERFLOW ++ bool "Prevent kernel stack overflows" ++ default y if GRKERNSEC_CONFIG_AUTO ++ depends on !IA64 && 64BIT ++ help ++ If you say Y here, the kernel's process stacks will be allocated ++ with vmalloc instead of the kernel's default allocator. This ++ introduces guard pages that in combination with the alloca checking ++ of the STACKLEAK feature prevents all forms of kernel process stack ++ overflow abuse. Note that this is different from kernel stack ++ buffer overflows. ++ ++config GRKERNSEC_BRUTE ++ bool "Deter exploit bruteforcing" ++ default y if GRKERNSEC_CONFIG_AUTO ++ help ++ If you say Y here, attempts to bruteforce exploits against forking ++ daemons such as apache or sshd, as well as against suid/sgid binaries ++ will be deterred. When a child of a forking daemon is killed by PaX ++ or crashes due to an illegal instruction or other suspicious signal, ++ the parent process will be delayed 30 seconds upon every subsequent ++ fork until the administrator is able to assess the situation and ++ restart the daemon. ++ In the suid/sgid case, the attempt is logged, the user has all their ++ existing instances of the suid/sgid binary terminated and will ++ be unable to execute any suid/sgid binaries for 15 minutes. ++ ++ It is recommended that you also enable signal logging in the auditing ++ section so that logs are generated when a process triggers a suspicious ++ signal. ++ If the sysctl option is enabled, a sysctl option with name ++ "deter_bruteforce" is created. ++ ++config GRKERNSEC_MODHARDEN ++ bool "Harden module auto-loading" ++ default y if GRKERNSEC_CONFIG_AUTO ++ depends on MODULES ++ help ++ If you say Y here, module auto-loading in response to use of some ++ feature implemented by an unloaded module will be restricted to ++ root users. Enabling this option helps defend against attacks ++ by unprivileged users who abuse the auto-loading behavior to ++ cause a vulnerable module to load that is then exploited. ++ ++ If this option prevents a legitimate use of auto-loading for a ++ non-root user, the administrator can execute modprobe manually ++ with the exact name of the module mentioned in the alert log. ++ Alternatively, the administrator can add the module to the list ++ of modules loaded at boot by modifying init scripts. ++ ++ Modification of init scripts will most likely be needed on ++ Ubuntu servers with encrypted home directory support enabled, ++ as the first non-root user logging in will cause the ecb(aes), ++ ecb(aes)-all, cbc(aes), and cbc(aes)-all modules to be loaded. ++ ++config GRKERNSEC_HIDESYM ++ bool "Hide kernel symbols" ++ default y if GRKERNSEC_CONFIG_AUTO ++ select PAX_USERCOPY_SLABS ++ help ++ If you say Y here, getting information on loaded modules, and ++ displaying all kernel symbols through a syscall will be restricted ++ to users with CAP_SYS_MODULE. For software compatibility reasons, ++ /proc/kallsyms will be restricted to the root user. The RBAC ++ system can hide that entry even from root. ++ ++ This option also prevents leaking of kernel addresses through ++ several /proc entries. ++ ++ Note that this option is only effective provided the following ++ conditions are met: ++ 1) The kernel using grsecurity is not precompiled by some distribution ++ 2) You have also enabled GRKERNSEC_DMESG ++ 3) You are using the RBAC system and hiding other files such as your ++ kernel image and System.map. Alternatively, enabling this option ++ causes the permissions on /boot, /lib/modules, and the kernel ++ source directory to change at compile time to prevent ++ reading by non-root users. ++ If the above conditions are met, this option will aid in providing a ++ useful protection against local kernel exploitation of overflows ++ and arbitrary read/write vulnerabilities. ++ ++ It is highly recommended that you enable GRKERNSEC_PERF_HARDEN ++ in addition to this feature. ++ ++config GRKERNSEC_RANDSTRUCT ++ bool "Randomize layout of sensitive kernel structures" ++ default y if GRKERNSEC_CONFIG_AUTO ++ select GRKERNSEC_HIDESYM ++ select MODVERSIONS if MODULES ++ help ++ If you say Y here, the layouts of a number of sensitive kernel ++ structures (task, fs, cred, etc) and all structures composed entirely ++ of function pointers (aka "ops" structs) will be randomized at compile-time. ++ This can introduce the requirement of an additional infoleak ++ vulnerability for exploits targeting these structure types. ++ ++ Enabling this feature will introduce some performance impact, slightly ++ increase memory usage, and prevent the use of forensic tools like ++ Volatility against the system (unless the kernel source tree isn't ++ cleaned after kernel installation). ++ ++ The seed used for compilation is located at tools/gcc/randomize_layout_seed.h. ++ It remains after a make clean to allow for external modules to be compiled ++ with the existing seed and will be removed by a make mrproper or ++ make distclean. ++ ++ Note that the implementation requires gcc 4.6.4. or newer. You may need ++ to install the supporting headers explicitly in addition to the normal ++ gcc package. ++ ++config GRKERNSEC_RANDSTRUCT_PERFORMANCE ++ bool "Use cacheline-aware structure randomization" ++ depends on GRKERNSEC_RANDSTRUCT ++ default y if GRKERNSEC_CONFIG_PRIORITY_PERF ++ help ++ If you say Y here, the RANDSTRUCT randomization will make a best effort ++ at restricting randomization to cacheline-sized groups of elements. It ++ will further not randomize bitfields in structures. This reduces the ++ performance hit of RANDSTRUCT at the cost of weakened randomization. ++ ++config GRKERNSEC_KERN_LOCKOUT ++ bool "Active kernel exploit response" ++ default y if GRKERNSEC_CONFIG_AUTO ++ depends on X86 || ARM || PPC || SPARC ++ help ++ If you say Y here, when a PaX alert is triggered due to suspicious ++ activity in the kernel (from KERNEXEC/UDEREF/USERCOPY) ++ or an OOPS occurs due to bad memory accesses, instead of just ++ terminating the offending process (and potentially allowing ++ a subsequent exploit from the same user), we will take one of two ++ actions: ++ If the user was root, we will panic the system ++ If the user was non-root, we will log the attempt, terminate ++ all processes owned by the user, then prevent them from creating ++ any new processes until the system is restarted ++ This deters repeated kernel exploitation/bruteforcing attempts ++ and is useful for later forensics. ++ ++config GRKERNSEC_OLD_ARM_USERLAND ++ bool "Old ARM userland compatibility" ++ depends on ARM && (CPU_V6 || CPU_V6K || CPU_V7) ++ help ++ If you say Y here, stubs of executable code to perform such operations ++ as "compare-exchange" will be placed at fixed locations in the ARM vector ++ table. This is unfortunately needed for old ARM userland meant to run ++ across a wide range of processors. Without this option enabled, ++ the get_tls and data memory barrier stubs will be emulated by the kernel, ++ which is enough for Linaro userlands or other userlands designed for v6 ++ and newer ARM CPUs. It's recommended that you try without this option enabled ++ first, and only enable it if your userland does not boot (it will likely fail ++ at init time). ++ ++endmenu ++menu "Role Based Access Control Options" ++depends on GRKERNSEC ++ ++config GRKERNSEC_RBAC_DEBUG ++ bool ++ ++config GRKERNSEC_NO_RBAC ++ bool "Disable RBAC system" ++ help ++ If you say Y here, the /dev/grsec device will be removed from the kernel, ++ preventing the RBAC system from being enabled. You should only say Y ++ here if you have no intention of using the RBAC system, so as to prevent ++ an attacker with root access from misusing the RBAC system to hide files ++ and processes when loadable module support and /dev/[k]mem have been ++ locked down. ++ ++config GRKERNSEC_ACL_HIDEKERN ++ bool "Hide kernel processes" ++ help ++ If you say Y here, all kernel threads will be hidden to all ++ processes but those whose subject has the "view hidden processes" ++ flag. ++ ++config GRKERNSEC_ACL_MAXTRIES ++ int "Maximum tries before password lockout" ++ default 3 ++ help ++ This option enforces the maximum number of times a user can attempt ++ to authorize themselves with the grsecurity RBAC system before being ++ denied the ability to attempt authorization again for a specified time. ++ The lower the number, the harder it will be to brute-force a password. ++ ++config GRKERNSEC_ACL_TIMEOUT ++ int "Time to wait after max password tries, in seconds" ++ default 30 ++ help ++ This option specifies the time the user must wait after attempting to ++ authorize to the RBAC system with the maximum number of invalid ++ passwords. The higher the number, the harder it will be to brute-force ++ a password. ++ ++endmenu ++menu "Filesystem Protections" ++depends on GRKERNSEC ++ ++config GRKERNSEC_PROC ++ bool "Proc restrictions" ++ default y if GRKERNSEC_CONFIG_AUTO ++ help ++ If you say Y here, the permissions of the /proc filesystem ++ will be altered to enhance system security and privacy. You MUST ++ choose either a user only restriction or a user and group restriction. ++ Depending upon the option you choose, you can either restrict users to ++ see only the processes they themselves run, or choose a group that can ++ view all processes and files normally restricted to root if you choose ++ the "restrict to user only" option. NOTE: If you're running identd or ++ ntpd as a non-root user, you will have to run it as the group you ++ specify here. ++ ++config GRKERNSEC_PROC_USER ++ bool "Restrict /proc to user only" ++ depends on GRKERNSEC_PROC ++ help ++ If you say Y here, non-root users will only be able to view their own ++ processes, and restricts them from viewing network-related information, ++ and viewing kernel symbol and module information. ++ ++config GRKERNSEC_PROC_USERGROUP ++ bool "Allow special group" ++ default y if GRKERNSEC_CONFIG_AUTO ++ depends on GRKERNSEC_PROC && !GRKERNSEC_PROC_USER ++ help ++ If you say Y here, you will be able to select a group that will be ++ able to view all processes and network-related information. If you've ++ enabled GRKERNSEC_HIDESYM, kernel and symbol information may still ++ remain hidden. This option is useful if you want to run identd as ++ a non-root user. The group you select may also be chosen at boot time ++ via "grsec_proc_gid=" on the kernel commandline. ++ ++config GRKERNSEC_PROC_GID ++ int "GID for special group" ++ depends on GRKERNSEC_PROC_USERGROUP ++ default 1001 ++ ++config GRKERNSEC_PROC_ADD ++ bool "Additional restrictions" ++ default y if GRKERNSEC_CONFIG_AUTO ++ depends on GRKERNSEC_PROC_USER || GRKERNSEC_PROC_USERGROUP ++ help ++ If you say Y here, additional restrictions will be placed on ++ /proc that keep normal users from viewing device information and ++ slabinfo information that could be useful for exploits. ++ ++config GRKERNSEC_LINK ++ bool "Linking restrictions" ++ default y if GRKERNSEC_CONFIG_AUTO ++ help ++ If you say Y here, /tmp race exploits will be prevented, since users ++ will no longer be able to follow symlinks owned by other users in ++ world-writable +t directories (e.g. /tmp), unless the owner of the ++ symlink is the owner of the directory. users will also not be ++ able to hardlink to files they do not own. If the sysctl option is ++ enabled, a sysctl option with name "linking_restrictions" is created. ++ ++config GRKERNSEC_SYMLINKOWN ++ bool "Kernel-enforced SymlinksIfOwnerMatch" ++ default y if GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER ++ help ++ Apache's SymlinksIfOwnerMatch option has an inherent race condition ++ that prevents it from being used as a security feature. As Apache ++ verifies the symlink by performing a stat() against the target of ++ the symlink before it is followed, an attacker can setup a symlink ++ to point to a same-owned file, then replace the symlink with one ++ that targets another user's file just after Apache "validates" the ++ symlink -- a classic TOCTOU race. If you say Y here, a complete, ++ race-free replacement for Apache's "SymlinksIfOwnerMatch" option ++ will be in place for the group you specify. If the sysctl option ++ is enabled, a sysctl option with name "enforce_symlinksifowner" is ++ created. ++ ++config GRKERNSEC_SYMLINKOWN_GID ++ int "GID for users with kernel-enforced SymlinksIfOwnerMatch" ++ depends on GRKERNSEC_SYMLINKOWN ++ default 1006 ++ help ++ Setting this GID determines what group kernel-enforced ++ SymlinksIfOwnerMatch will be enabled for. If the sysctl option ++ is enabled, a sysctl option with name "symlinkown_gid" is created. ++ ++config GRKERNSEC_FIFO ++ bool "FIFO restrictions" ++ default y if GRKERNSEC_CONFIG_AUTO ++ help ++ If you say Y here, users will not be able to write to FIFOs they don't ++ own in world-writable +t directories (e.g. /tmp), unless the owner of ++ the FIFO is the same owner of the directory it's held in. If the sysctl ++ option is enabled, a sysctl option with name "fifo_restrictions" is ++ created. ++ ++config GRKERNSEC_SYSFS_RESTRICT ++ bool "Sysfs/debugfs restriction" ++ default y if (GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER) ++ depends on SYSFS ++ help ++ If you say Y here, sysfs (the pseudo-filesystem mounted at /sys) and ++ any filesystem normally mounted under it (e.g. debugfs) will be ++ mostly accessible only by root. These filesystems generally provide access ++ to hardware and debug information that isn't appropriate for unprivileged ++ users of the system. Sysfs and debugfs have also become a large source ++ of new vulnerabilities, ranging from infoleaks to local compromise. ++ There has been very little oversight with an eye toward security involved ++ in adding new exporters of information to these filesystems, so their ++ use is discouraged. ++ For reasons of compatibility, a few directories have been whitelisted ++ for access by non-root users: ++ /sys/fs/selinux ++ /sys/fs/fuse ++ /sys/devices/system/cpu ++ ++config GRKERNSEC_ROFS ++ bool "Runtime read-only mount protection" ++ depends on SYSCTL ++ help ++ If you say Y here, a sysctl option with name "romount_protect" will ++ be created. By setting this option to 1 at runtime, filesystems ++ will be protected in the following ways: ++ * No new writable mounts will be allowed ++ * Existing read-only mounts won't be able to be remounted read/write ++ * Write operations will be denied on all block devices ++ This option acts independently of grsec_lock: once it is set to 1, ++ it cannot be turned off. Therefore, please be mindful of the resulting ++ behavior if this option is enabled in an init script on a read-only ++ filesystem. ++ Also be aware that as with other root-focused features, GRKERNSEC_KMEM ++ and GRKERNSEC_IO should be enabled and module loading disabled via ++ config or at runtime. ++ This feature is mainly intended for secure embedded systems. ++ ++ ++config GRKERNSEC_DEVICE_SIDECHANNEL ++ bool "Eliminate stat/notify-based device sidechannels" ++ default y if GRKERNSEC_CONFIG_AUTO ++ help ++ If you say Y here, timing analyses on block or character ++ devices like /dev/ptmx using stat or inotify/dnotify/fanotify ++ will be thwarted for unprivileged users. If a process without ++ CAP_MKNOD stats such a device, the last access and last modify times ++ will match the device's create time. No access or modify events ++ will be triggered through inotify/dnotify/fanotify for such devices. ++ This feature will prevent attacks that may at a minimum ++ allow an attacker to determine the administrator's password length. ++ ++config GRKERNSEC_CHROOT ++ bool "Chroot jail restrictions" ++ default y if GRKERNSEC_CONFIG_AUTO ++ help ++ If you say Y here, you will be able to choose several options that will ++ make breaking out of a chrooted jail much more difficult. If you ++ encounter no software incompatibilities with the following options, it ++ is recommended that you enable each one. ++ ++ Note that the chroot restrictions are not intended to apply to "chroots" ++ to directories that are simple bind mounts of the global root filesystem. ++ For several other reasons, a user shouldn't expect any significant ++ security by performing such a chroot. ++ ++config GRKERNSEC_CHROOT_MOUNT ++ bool "Deny mounts" ++ default y if GRKERNSEC_CONFIG_AUTO ++ depends on GRKERNSEC_CHROOT ++ help ++ If you say Y here, processes inside a chroot will not be able to ++ mount or remount filesystems. If the sysctl option is enabled, a ++ sysctl option with name "chroot_deny_mount" is created. ++ ++config GRKERNSEC_CHROOT_DOUBLE ++ bool "Deny double-chroots" ++ default y if GRKERNSEC_CONFIG_AUTO ++ depends on GRKERNSEC_CHROOT ++ help ++ If you say Y here, processes inside a chroot will not be able to chroot ++ again outside the chroot. This is a widely used method of breaking ++ out of a chroot jail and should not be allowed. If the sysctl ++ option is enabled, a sysctl option with name ++ "chroot_deny_chroot" is created. ++ ++config GRKERNSEC_CHROOT_PIVOT ++ bool "Deny pivot_root in chroot" ++ default y if GRKERNSEC_CONFIG_AUTO ++ depends on GRKERNSEC_CHROOT ++ help ++ If you say Y here, processes inside a chroot will not be able to use ++ a function called pivot_root() that was introduced in Linux 2.3.41. It ++ works similar to chroot in that it changes the root filesystem. This ++ function could be misused in a chrooted process to attempt to break out ++ of the chroot, and therefore should not be allowed. If the sysctl ++ option is enabled, a sysctl option with name "chroot_deny_pivot" is ++ created. ++ ++config GRKERNSEC_CHROOT_CHDIR ++ bool "Enforce chdir(\"/\") on all chroots" ++ default y if GRKERNSEC_CONFIG_AUTO ++ depends on GRKERNSEC_CHROOT ++ help ++ If you say Y here, the current working directory of all newly-chrooted ++ applications will be set to the the root directory of the chroot. ++ The man page on chroot(2) states: ++ Note that this call does not change the current working ++ directory, so that `.' can be outside the tree rooted at ++ `/'. In particular, the super-user can escape from a ++ `chroot jail' by doing `mkdir foo; chroot foo; cd ..'. ++ ++ It is recommended that you say Y here, since it's not known to break ++ any software. If the sysctl option is enabled, a sysctl option with ++ name "chroot_enforce_chdir" is created. ++ ++config GRKERNSEC_CHROOT_CHMOD ++ bool "Deny (f)chmod +s" ++ default y if GRKERNSEC_CONFIG_AUTO ++ depends on GRKERNSEC_CHROOT ++ help ++ If you say Y here, processes inside a chroot will not be able to chmod ++ or fchmod files to make them have suid or sgid bits. This protects ++ against another published method of breaking a chroot. If the sysctl ++ option is enabled, a sysctl option with name "chroot_deny_chmod" is ++ created. ++ ++config GRKERNSEC_CHROOT_FCHDIR ++ bool "Deny fchdir and fhandle out of chroot" ++ default y if GRKERNSEC_CONFIG_AUTO ++ depends on GRKERNSEC_CHROOT ++ help ++ If you say Y here, a well-known method of breaking chroots by fchdir'ing ++ to a file descriptor of the chrooting process that points to a directory ++ outside the filesystem will be stopped. Additionally, this option prevents ++ use of the recently-created syscall for opening files by a guessable "file ++ handle" inside a chroot. If the sysctl option is enabled, a sysctl option ++ with name "chroot_deny_fchdir" is created. ++ ++config GRKERNSEC_CHROOT_MKNOD ++ bool "Deny mknod" ++ default y if GRKERNSEC_CONFIG_AUTO ++ depends on GRKERNSEC_CHROOT ++ help ++ If you say Y here, processes inside a chroot will not be allowed to ++ mknod. The problem with using mknod inside a chroot is that it ++ would allow an attacker to create a device entry that is the same ++ as one on the physical root of your system, which could range from ++ anything from the console device to a device for your harddrive (which ++ they could then use to wipe the drive or steal data). It is recommended ++ that you say Y here, unless you run into software incompatibilities. ++ If the sysctl option is enabled, a sysctl option with name ++ "chroot_deny_mknod" is created. ++ ++config GRKERNSEC_CHROOT_SHMAT ++ bool "Deny shmat() out of chroot" ++ default y if GRKERNSEC_CONFIG_AUTO ++ depends on GRKERNSEC_CHROOT ++ help ++ If you say Y here, processes inside a chroot will not be able to attach ++ to shared memory segments that were created outside of the chroot jail. ++ It is recommended that you say Y here. If the sysctl option is enabled, ++ a sysctl option with name "chroot_deny_shmat" is created. ++ ++config GRKERNSEC_CHROOT_UNIX ++ bool "Deny access to abstract AF_UNIX sockets out of chroot" ++ default y if GRKERNSEC_CONFIG_AUTO ++ depends on GRKERNSEC_CHROOT ++ help ++ If you say Y here, processes inside a chroot will not be able to ++ connect to abstract (meaning not belonging to a filesystem) Unix ++ domain sockets that were bound outside of a chroot. It is recommended ++ that you say Y here. If the sysctl option is enabled, a sysctl option ++ with name "chroot_deny_unix" is created. ++ ++config GRKERNSEC_CHROOT_FINDTASK ++ bool "Protect outside processes" ++ default y if GRKERNSEC_CONFIG_AUTO ++ depends on GRKERNSEC_CHROOT ++ help ++ If you say Y here, processes inside a chroot will not be able to ++ kill, send signals with fcntl, ptrace, capget, getpgid, setpgid, ++ getsid, or view any process outside of the chroot. If the sysctl ++ option is enabled, a sysctl option with name "chroot_findtask" is ++ created. ++ ++config GRKERNSEC_CHROOT_NICE ++ bool "Restrict priority changes" ++ default y if GRKERNSEC_CONFIG_AUTO ++ depends on GRKERNSEC_CHROOT ++ help ++ If you say Y here, processes inside a chroot will not be able to raise ++ the priority of processes in the chroot, or alter the priority of ++ processes outside the chroot. This provides more security than simply ++ removing CAP_SYS_NICE from the process' capability set. If the ++ sysctl option is enabled, a sysctl option with name "chroot_restrict_nice" ++ is created. ++ ++config GRKERNSEC_CHROOT_SYSCTL ++ bool "Deny sysctl writes" ++ default y if GRKERNSEC_CONFIG_AUTO ++ depends on GRKERNSEC_CHROOT ++ help ++ If you say Y here, an attacker in a chroot will not be able to ++ write to sysctl entries, either by sysctl(2) or through a /proc ++ interface. It is strongly recommended that you say Y here. If the ++ sysctl option is enabled, a sysctl option with name ++ "chroot_deny_sysctl" is created. ++ ++config GRKERNSEC_CHROOT_CAPS ++ bool "Capability restrictions" ++ default y if GRKERNSEC_CONFIG_AUTO ++ depends on GRKERNSEC_CHROOT ++ help ++ If you say Y here, the capabilities on all processes within a ++ chroot jail will be lowered to stop module insertion, raw i/o, ++ system and net admin tasks, rebooting the system, modifying immutable ++ files, modifying IPC owned by another, and changing the system time. ++ This is left an option because it can break some apps. Disable this ++ if your chrooted apps are having problems performing those kinds of ++ tasks. If the sysctl option is enabled, a sysctl option with ++ name "chroot_caps" is created. ++ ++config GRKERNSEC_CHROOT_INITRD ++ bool "Exempt initrd tasks from restrictions" ++ default y if GRKERNSEC_CONFIG_AUTO ++ depends on GRKERNSEC_CHROOT && BLK_DEV_INITRD ++ help ++ If you say Y here, tasks started prior to init will be exempted from ++ grsecurity's chroot restrictions. This option is mainly meant to ++ resolve Plymouth's performing privileged operations unnecessarily ++ in a chroot. ++ ++endmenu ++menu "Kernel Auditing" ++depends on GRKERNSEC ++ ++config GRKERNSEC_AUDIT_GROUP ++ bool "Single group for auditing" ++ help ++ If you say Y here, the exec and chdir logging features will only operate ++ on a group you specify. This option is recommended if you only want to ++ watch certain users instead of having a large amount of logs from the ++ entire system. If the sysctl option is enabled, a sysctl option with ++ name "audit_group" is created. ++ ++config GRKERNSEC_AUDIT_GID ++ int "GID for auditing" ++ depends on GRKERNSEC_AUDIT_GROUP ++ default 1007 ++ ++config GRKERNSEC_EXECLOG ++ bool "Exec logging" ++ help ++ If you say Y here, all execve() calls will be logged (since the ++ other exec*() calls are frontends to execve(), all execution ++ will be logged). Useful for shell-servers that like to keep track ++ of their users. If the sysctl option is enabled, a sysctl option with ++ name "exec_logging" is created. ++ WARNING: This option when enabled will produce a LOT of logs, especially ++ on an active system. ++ ++config GRKERNSEC_RESLOG ++ bool "Resource logging" ++ default y if GRKERNSEC_CONFIG_AUTO ++ help ++ If you say Y here, all attempts to overstep resource limits will ++ be logged with the resource name, the requested size, and the current ++ limit. It is highly recommended that you say Y here. If the sysctl ++ option is enabled, a sysctl option with name "resource_logging" is ++ created. If the RBAC system is enabled, the sysctl value is ignored. ++ ++config GRKERNSEC_CHROOT_EXECLOG ++ bool "Log execs within chroot" ++ help ++ If you say Y here, all executions inside a chroot jail will be logged ++ to syslog. This can cause a large amount of logs if certain ++ applications (eg. djb's daemontools) are installed on the system, and ++ is therefore left as an option. If the sysctl option is enabled, a ++ sysctl option with name "chroot_execlog" is created. ++ ++config GRKERNSEC_AUDIT_PTRACE ++ bool "Ptrace logging" ++ help ++ If you say Y here, all attempts to attach to a process via ptrace ++ will be logged. If the sysctl option is enabled, a sysctl option ++ with name "audit_ptrace" is created. ++ ++config GRKERNSEC_AUDIT_CHDIR ++ bool "Chdir logging" ++ help ++ If you say Y here, all chdir() calls will be logged. If the sysctl ++ option is enabled, a sysctl option with name "audit_chdir" is created. ++ ++config GRKERNSEC_AUDIT_MOUNT ++ bool "(Un)Mount logging" ++ help ++ If you say Y here, all mounts and unmounts will be logged. If the ++ sysctl option is enabled, a sysctl option with name "audit_mount" is ++ created. ++ ++config GRKERNSEC_SIGNAL ++ bool "Signal logging" ++ default y if GRKERNSEC_CONFIG_AUTO ++ help ++ If you say Y here, certain important signals will be logged, such as ++ SIGSEGV, which will as a result inform you of when a error in a program ++ occurred, which in some cases could mean a possible exploit attempt. ++ If the sysctl option is enabled, a sysctl option with name ++ "signal_logging" is created. ++ ++config GRKERNSEC_FORKFAIL ++ bool "Fork failure logging" ++ help ++ If you say Y here, all failed fork() attempts will be logged. ++ This could suggest a fork bomb, or someone attempting to overstep ++ their process limit. If the sysctl option is enabled, a sysctl option ++ with name "forkfail_logging" is created. ++ ++config GRKERNSEC_TIME ++ bool "Time change logging" ++ default y if GRKERNSEC_CONFIG_AUTO ++ help ++ If you say Y here, any changes of the system clock will be logged. ++ If the sysctl option is enabled, a sysctl option with name ++ "timechange_logging" is created. ++ ++config GRKERNSEC_PROC_IPADDR ++ bool "/proc/<pid>/ipaddr support" ++ default y if GRKERNSEC_CONFIG_AUTO ++ help ++ If you say Y here, a new entry will be added to each /proc/<pid> ++ directory that contains the IP address of the person using the task. ++ The IP is carried across local TCP and AF_UNIX stream sockets. ++ This information can be useful for IDS/IPSes to perform remote response ++ to a local attack. The entry is readable by only the owner of the ++ process (and root if he has CAP_DAC_OVERRIDE, which can be removed via ++ the RBAC system), and thus does not create privacy concerns. ++ ++config GRKERNSEC_RWXMAP_LOG ++ bool 'Denied RWX mmap/mprotect logging' ++ default y if GRKERNSEC_CONFIG_AUTO ++ depends on PAX_MPROTECT && !PAX_EMUPLT && !PAX_EMUSIGRT ++ help ++ If you say Y here, calls to mmap() and mprotect() with explicit ++ usage of PROT_WRITE and PROT_EXEC together will be logged when ++ denied by the PAX_MPROTECT feature. This feature will also ++ log other problematic scenarios that can occur when PAX_MPROTECT ++ is enabled on a binary, like textrels and PT_GNU_STACK. If the ++ sysctl option is enabled, a sysctl option with name "rwxmap_logging" ++ is created. ++ ++endmenu ++ ++menu "Executable Protections" ++depends on GRKERNSEC ++ ++config GRKERNSEC_DMESG ++ bool "Dmesg(8) restriction" ++ default y if GRKERNSEC_CONFIG_AUTO ++ help ++ If you say Y here, non-root users will not be able to use dmesg(8) ++ to view the contents of the kernel's circular log buffer. ++ The kernel's log buffer often contains kernel addresses and other ++ identifying information useful to an attacker in fingerprinting a ++ system for a targeted exploit. ++ If the sysctl option is enabled, a sysctl option with name "dmesg" is ++ created. ++ ++config GRKERNSEC_HARDEN_PTRACE ++ bool "Deter ptrace-based process snooping" ++ default y if GRKERNSEC_CONFIG_AUTO ++ help ++ If you say Y here, TTY sniffers and other malicious monitoring ++ programs implemented through ptrace will be defeated. If you ++ have been using the RBAC system, this option has already been ++ enabled for several years for all users, with the ability to make ++ fine-grained exceptions. ++ ++ This option only affects the ability of non-root users to ptrace ++ processes that are not a descendent of the ptracing process. ++ This means that strace ./binary and gdb ./binary will still work, ++ but attaching to arbitrary processes will not. If the sysctl ++ option is enabled, a sysctl option with name "harden_ptrace" is ++ created. ++ ++config GRKERNSEC_PTRACE_READEXEC ++ bool "Require read access to ptrace sensitive binaries" ++ default y if GRKERNSEC_CONFIG_AUTO ++ help ++ If you say Y here, unprivileged users will not be able to ptrace unreadable ++ binaries. This option is useful in environments that ++ remove the read bits (e.g. file mode 4711) from suid binaries to ++ prevent infoleaking of their contents. This option adds ++ consistency to the use of that file mode, as the binary could normally ++ be read out when run without privileges while ptracing. ++ ++ If the sysctl option is enabled, a sysctl option with name "ptrace_readexec" ++ is created. ++ ++config GRKERNSEC_SETXID ++ bool "Enforce consistent multithreaded privileges" ++ default y if GRKERNSEC_CONFIG_AUTO ++ depends on (X86 || SPARC64 || PPC || ARM || MIPS) ++ help ++ If you say Y here, a change from a root uid to a non-root uid ++ in a multithreaded application will cause the resulting uids, ++ gids, supplementary groups, and capabilities in that thread ++ to be propagated to the other threads of the process. In most ++ cases this is unnecessary, as glibc will emulate this behavior ++ on behalf of the application. Other libcs do not act in the ++ same way, allowing the other threads of the process to continue ++ running with root privileges. If the sysctl option is enabled, ++ a sysctl option with name "consistent_setxid" is created. ++ ++config GRKERNSEC_HARDEN_IPC ++ bool "Disallow access to overly-permissive IPC objects" ++ default y if GRKERNSEC_CONFIG_AUTO ++ depends on SYSVIPC ++ help ++ If you say Y here, access to overly-permissive IPC objects (shared ++ memory, message queues, and semaphores) will be denied for processes ++ given the following criteria beyond normal permission checks: ++ 1) If the IPC object is world-accessible and the euid doesn't match ++ that of the creator or current uid for the IPC object ++ 2) If the IPC object is group-accessible and the egid doesn't ++ match that of the creator or current gid for the IPC object ++ It's a common error to grant too much permission to these objects, ++ with impact ranging from denial of service and information leaking to ++ privilege escalation. This feature was developed in response to ++ research by Tim Brown: ++ http://labs.portcullis.co.uk/whitepapers/memory-squatting-attacks-on-system-v-shared-memory/ ++ who found hundreds of such insecure usages. Processes with ++ CAP_IPC_OWNER are still permitted to access these IPC objects. ++ If the sysctl option is enabled, a sysctl option with name ++ "harden_ipc" is created. ++ ++config GRKERNSEC_TPE ++ bool "Trusted Path Execution (TPE)" ++ default y if GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER ++ help ++ If you say Y here, you will be able to choose a gid to add to the ++ supplementary groups of users you want to mark as "untrusted." ++ These users will not be able to execute any files that are not in ++ root-owned directories writable only by root. If the sysctl option ++ is enabled, a sysctl option with name "tpe" is created. ++ ++config GRKERNSEC_TPE_ALL ++ bool "Partially restrict all non-root users" ++ depends on GRKERNSEC_TPE ++ help ++ If you say Y here, all non-root users will be covered under ++ a weaker TPE restriction. This is separate from, and in addition to, ++ the main TPE options that you have selected elsewhere. Thus, if a ++ "trusted" GID is chosen, this restriction applies to even that GID. ++ Under this restriction, all non-root users will only be allowed to ++ execute files in directories they own that are not group or ++ world-writable, or in directories owned by root and writable only by ++ root. If the sysctl option is enabled, a sysctl option with name ++ "tpe_restrict_all" is created. ++ ++config GRKERNSEC_TPE_INVERT ++ bool "Invert GID option" ++ depends on GRKERNSEC_TPE ++ help ++ If you say Y here, the group you specify in the TPE configuration will ++ decide what group TPE restrictions will be *disabled* for. This ++ option is useful if you want TPE restrictions to be applied to most ++ users on the system. If the sysctl option is enabled, a sysctl option ++ with name "tpe_invert" is created. Unlike other sysctl options, this ++ entry will default to on for backward-compatibility. ++ ++config GRKERNSEC_TPE_GID ++ int ++ default GRKERNSEC_TPE_UNTRUSTED_GID if (GRKERNSEC_TPE && !GRKERNSEC_TPE_INVERT) ++ default GRKERNSEC_TPE_TRUSTED_GID if (GRKERNSEC_TPE && GRKERNSEC_TPE_INVERT) ++ ++config GRKERNSEC_TPE_UNTRUSTED_GID ++ int "GID for TPE-untrusted users" ++ depends on GRKERNSEC_TPE && !GRKERNSEC_TPE_INVERT ++ default 1005 ++ help ++ Setting this GID determines what group TPE restrictions will be ++ *enabled* for. If the sysctl option is enabled, a sysctl option ++ with name "tpe_gid" is created. ++ ++config GRKERNSEC_TPE_TRUSTED_GID ++ int "GID for TPE-trusted users" ++ depends on GRKERNSEC_TPE && GRKERNSEC_TPE_INVERT ++ default 1005 ++ help ++ Setting this GID determines what group TPE restrictions will be ++ *disabled* for. If the sysctl option is enabled, a sysctl option ++ with name "tpe_gid" is created. ++ ++endmenu ++menu "Network Protections" ++depends on GRKERNSEC ++ ++config GRKERNSEC_BLACKHOLE ++ bool "TCP/UDP blackhole and LAST_ACK DoS prevention" ++ default y if GRKERNSEC_CONFIG_AUTO ++ depends on NET ++ help ++ If you say Y here, neither TCP resets nor ICMP ++ destination-unreachable packets will be sent in response to packets ++ sent to ports for which no associated listening process exists. ++ This feature supports both IPV4 and IPV6 and exempts the ++ loopback interface from blackholing. Enabling this feature ++ makes a host more resilient to DoS attacks and reduces network ++ visibility against scanners. ++ ++ The blackhole feature as-implemented is equivalent to the FreeBSD ++ blackhole feature, as it prevents RST responses to all packets, not ++ just SYNs. Under most application behavior this causes no ++ problems, but applications (like haproxy) may not close certain ++ connections in a way that cleanly terminates them on the remote ++ end, leaving the remote host in LAST_ACK state. Because of this ++ side-effect and to prevent intentional LAST_ACK DoSes, this ++ feature also adds automatic mitigation against such attacks. ++ The mitigation drastically reduces the amount of time a socket ++ can spend in LAST_ACK state. If you're using haproxy and not ++ all servers it connects to have this option enabled, consider ++ disabling this feature on the haproxy host. ++ ++ If the sysctl option is enabled, two sysctl options with names ++ "ip_blackhole" and "lastack_retries" will be created. ++ While "ip_blackhole" takes the standard zero/non-zero on/off ++ toggle, "lastack_retries" uses the same kinds of values as ++ "tcp_retries1" and "tcp_retries2". The default value of 4 ++ prevents a socket from lasting more than 45 seconds in LAST_ACK ++ state. ++ ++config GRKERNSEC_NO_SIMULT_CONNECT ++ bool "Disable TCP Simultaneous Connect" ++ default y if GRKERNSEC_CONFIG_AUTO ++ depends on NET ++ help ++ If you say Y here, a feature by Willy Tarreau will be enabled that ++ removes a weakness in Linux's strict implementation of TCP that ++ allows two clients to connect to each other without either entering ++ a listening state. The weakness allows an attacker to easily prevent ++ a client from connecting to a known server provided the source port ++ for the connection is guessed correctly. ++ ++ As the weakness could be used to prevent an antivirus or IPS from ++ fetching updates, or prevent an SSL gateway from fetching a CRL, ++ it should be eliminated by enabling this option. Though Linux is ++ one of few operating systems supporting simultaneous connect, it ++ has no legitimate use in practice and is rarely supported by firewalls. ++ ++config GRKERNSEC_SOCKET ++ bool "Socket restrictions" ++ depends on NET ++ help ++ If you say Y here, you will be able to choose from several options. ++ If you assign a GID on your system and add it to the supplementary ++ groups of users you want to restrict socket access to, this patch ++ will perform up to three things, based on the option(s) you choose. ++ ++config GRKERNSEC_SOCKET_ALL ++ bool "Deny any sockets to group" ++ depends on GRKERNSEC_SOCKET ++ help ++ If you say Y here, you will be able to choose a GID of whose users will ++ be unable to connect to other hosts from your machine or run server ++ applications from your machine. If the sysctl option is enabled, a ++ sysctl option with name "socket_all" is created. ++ ++config GRKERNSEC_SOCKET_ALL_GID ++ int "GID to deny all sockets for" ++ depends on GRKERNSEC_SOCKET_ALL ++ default 1004 ++ help ++ Here you can choose the GID to disable socket access for. Remember to ++ add the users you want socket access disabled for to the GID ++ specified here. If the sysctl option is enabled, a sysctl option ++ with name "socket_all_gid" is created. ++ ++config GRKERNSEC_SOCKET_CLIENT ++ bool "Deny client sockets to group" ++ depends on GRKERNSEC_SOCKET ++ help ++ If you say Y here, you will be able to choose a GID of whose users will ++ be unable to connect to other hosts from your machine, but will be ++ able to run servers. If this option is enabled, all users in the group ++ you specify will have to use passive mode when initiating ftp transfers ++ from the shell on your machine. If the sysctl option is enabled, a ++ sysctl option with name "socket_client" is created. ++ ++config GRKERNSEC_SOCKET_CLIENT_GID ++ int "GID to deny client sockets for" ++ depends on GRKERNSEC_SOCKET_CLIENT ++ default 1003 ++ help ++ Here you can choose the GID to disable client socket access for. ++ Remember to add the users you want client socket access disabled for to ++ the GID specified here. If the sysctl option is enabled, a sysctl ++ option with name "socket_client_gid" is created. ++ ++config GRKERNSEC_SOCKET_SERVER ++ bool "Deny server sockets to group" ++ depends on GRKERNSEC_SOCKET ++ help ++ If you say Y here, you will be able to choose a GID of whose users will ++ be unable to run server applications from your machine. If the sysctl ++ option is enabled, a sysctl option with name "socket_server" is created. ++ ++config GRKERNSEC_SOCKET_SERVER_GID ++ int "GID to deny server sockets for" ++ depends on GRKERNSEC_SOCKET_SERVER ++ default 1002 ++ help ++ Here you can choose the GID to disable server socket access for. ++ Remember to add the users you want server socket access disabled for to ++ the GID specified here. If the sysctl option is enabled, a sysctl ++ option with name "socket_server_gid" is created. ++ ++endmenu ++ ++menu "Physical Protections" ++depends on GRKERNSEC ++ ++config GRKERNSEC_DENYUSB ++ bool "Deny new USB connections after toggle" ++ default y if GRKERNSEC_CONFIG_AUTO ++ depends on SYSCTL && USB_SUPPORT ++ help ++ If you say Y here, a new sysctl option with name "deny_new_usb" ++ will be created. Setting its value to 1 will prevent any new ++ USB devices from being recognized by the OS. Any attempted USB ++ device insertion will be logged. This option is intended to be ++ used against custom USB devices designed to exploit vulnerabilities ++ in various USB device drivers. ++ ++ For greatest effectiveness, this sysctl should be set after any ++ relevant init scripts. This option is safe to enable in distros ++ as each user can choose whether or not to toggle the sysctl. ++ ++config GRKERNSEC_DENYUSB_FORCE ++ bool "Reject all USB devices not connected at boot" ++ select USB ++ depends on GRKERNSEC_DENYUSB ++ help ++ If you say Y here, a variant of GRKERNSEC_DENYUSB will be enabled ++ that doesn't involve a sysctl entry. This option should only be ++ enabled if you're sure you want to deny all new USB connections ++ at runtime and don't want to modify init scripts. This should not ++ be enabled by distros. It forces the core USB code to be built ++ into the kernel image so that all devices connected at boot time ++ can be recognized and new USB device connections can be prevented ++ prior to init running. ++ ++endmenu ++ ++menu "Sysctl Support" ++depends on GRKERNSEC && SYSCTL ++ ++config GRKERNSEC_SYSCTL ++ bool "Sysctl support" ++ default y if GRKERNSEC_CONFIG_AUTO ++ help ++ If you say Y here, you will be able to change the options that ++ grsecurity runs with at bootup, without having to recompile your ++ kernel. You can echo values to files in /proc/sys/kernel/grsecurity ++ to enable (1) or disable (0) various features. All the sysctl entries ++ are mutable until the "grsec_lock" entry is set to a non-zero value. ++ All features enabled in the kernel configuration are disabled at boot ++ if you do not say Y to the "Turn on features by default" option. ++ All options should be set at startup, and the grsec_lock entry should ++ be set to a non-zero value after all the options are set. ++ *THIS IS EXTREMELY IMPORTANT* ++ ++config GRKERNSEC_SYSCTL_DISTRO ++ bool "Extra sysctl support for distro makers (READ HELP)" ++ depends on GRKERNSEC_SYSCTL && GRKERNSEC_IO ++ help ++ If you say Y here, additional sysctl options will be created ++ for features that affect processes running as root. Therefore, ++ it is critical when using this option that the grsec_lock entry be ++ enabled after boot. Only distros with prebuilt kernel packages ++ with this option enabled that can ensure grsec_lock is enabled ++ after boot should use this option. ++ *Failure to set grsec_lock after boot makes all grsec features ++ this option covers useless* ++ ++ Currently this option creates the following sysctl entries: ++ "Disable Privileged I/O": "disable_priv_io" ++ ++config GRKERNSEC_SYSCTL_ON ++ bool "Turn on features by default" ++ default y if GRKERNSEC_CONFIG_AUTO ++ depends on GRKERNSEC_SYSCTL ++ help ++ If you say Y here, instead of having all features enabled in the ++ kernel configuration disabled at boot time, the features will be ++ enabled at boot time. It is recommended you say Y here unless ++ there is some reason you would want all sysctl-tunable features to ++ be disabled by default. As mentioned elsewhere, it is important ++ to enable the grsec_lock entry once you have finished modifying ++ the sysctl entries. ++ ++endmenu ++menu "Logging Options" ++depends on GRKERNSEC ++ ++config GRKERNSEC_FLOODTIME ++ int "Seconds in between log messages (minimum)" ++ default 10 ++ help ++ This option allows you to enforce the number of seconds between ++ grsecurity log messages. The default should be suitable for most ++ people, however, if you choose to change it, choose a value small enough ++ to allow informative logs to be produced, but large enough to ++ prevent flooding. ++ ++ Setting both this value and GRKERNSEC_FLOODBURST to 0 will disable ++ any rate limiting on grsecurity log messages. ++ ++config GRKERNSEC_FLOODBURST ++ int "Number of messages in a burst (maximum)" ++ default 6 ++ help ++ This option allows you to choose the maximum number of messages allowed ++ within the flood time interval you chose in a separate option. The ++ default should be suitable for most people, however if you find that ++ many of your logs are being interpreted as flooding, you may want to ++ raise this value. ++ ++ Setting both this value and GRKERNSEC_FLOODTIME to 0 will disable ++ any rate limiting on grsecurity log messages. ++ ++endmenu +diff --git a/grsecurity/Makefile b/grsecurity/Makefile +new file mode 100644 +index 0000000..30ababb +--- /dev/null ++++ b/grsecurity/Makefile +@@ -0,0 +1,54 @@ ++# grsecurity – access control and security hardening for Linux ++# All code in this directory and various hooks located throughout the Linux kernel are ++# Copyright (C) 2001-2014 Bradley Spengler, Open Source Security, Inc. ++# http://www.grsecurity.net spender@grsecurity.net ++# ++# This program is free software; you can redistribute it and/or ++# modify it under the terms of the GNU General Public License version 2 ++# as published by the Free Software Foundation. ++# ++# This program is distributed in the hope that it will be useful, ++# but WITHOUT ANY WARRANTY; without even the implied warranty of ++# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++# GNU General Public License for more details. ++# ++# You should have received a copy of the GNU General Public License ++# along with this program; if not, write to the Free Software ++# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. ++ ++KBUILD_CFLAGS += -Werror ++ ++obj-y = grsec_chdir.o grsec_chroot.o grsec_exec.o grsec_fifo.o grsec_fork.o \ ++ grsec_mount.o grsec_sig.o grsec_sysctl.o \ ++ grsec_time.o grsec_tpe.o grsec_link.o grsec_pax.o grsec_ptrace.o \ ++ grsec_usb.o grsec_ipc.o grsec_proc.o ++ ++obj-$(CONFIG_GRKERNSEC) += grsec_init.o grsum.o gracl.o gracl_segv.o \ ++ gracl_cap.o gracl_alloc.o gracl_shm.o grsec_mem.o gracl_fs.o \ ++ gracl_learn.o grsec_log.o gracl_policy.o ++ifdef CONFIG_COMPAT ++obj-$(CONFIG_GRKERNSEC) += gracl_compat.o ++endif ++ ++obj-$(CONFIG_GRKERNSEC_RESLOG) += gracl_res.o ++ ++ifdef CONFIG_NET ++obj-y += grsec_sock.o ++obj-$(CONFIG_GRKERNSEC) += gracl_ip.o ++endif ++ ++ifndef CONFIG_GRKERNSEC ++obj-y += grsec_disabled.o ++endif ++ ++ifdef CONFIG_GRKERNSEC_HIDESYM ++extra-y := grsec_hidesym.o ++$(obj)/grsec_hidesym.o: ++ @-chmod -f 500 /boot ++ @-chmod -f 500 /lib/modules ++ @-chmod -f 500 /lib64/modules ++ @-chmod -f 500 /lib32/modules ++ @-chmod -f 700 . ++ @-chmod -f 700 $(objtree) ++ @echo ' grsec: protected kernel image paths' ++endif +diff --git a/grsecurity/gracl.c b/grsecurity/gracl.c +new file mode 100644 +index 0000000..e56396f +--- /dev/null ++++ b/grsecurity/gracl.c +@@ -0,0 +1,2679 @@ ++#include <linux/kernel.h> ++#include <linux/module.h> ++#include <linux/sched.h> ++#include <linux/mm.h> ++#include <linux/file.h> ++#include <linux/fs.h> ++#include <linux/namei.h> ++#include <linux/mount.h> ++#include <linux/tty.h> ++#include <linux/proc_fs.h> ++#include <linux/lglock.h> ++#include <linux/slab.h> ++#include <linux/vmalloc.h> ++#include <linux/types.h> ++#include <linux/sysctl.h> ++#include <linux/netdevice.h> ++#include <linux/ptrace.h> ++#include <linux/gracl.h> ++#include <linux/gralloc.h> ++#include <linux/security.h> ++#include <linux/grinternal.h> ++#include <linux/pid_namespace.h> ++#include <linux/stop_machine.h> ++#include <linux/fdtable.h> ++#include <linux/percpu.h> ++#include <linux/lglock.h> ++#include <linux/hugetlb.h> ++#include <linux/posix-timers.h> ++#include <linux/prefetch.h> ++#if defined(CONFIG_BTRFS_FS) || defined(CONFIG_BTRFS_FS_MODULE) ++#include <linux/magic.h> ++#include <linux/pagemap.h> ++#include "../fs/btrfs/async-thread.h" ++#include "../fs/btrfs/ctree.h" ++#include "../fs/btrfs/btrfs_inode.h" ++#endif ++#include "../fs/mount.h" ++ ++#include <asm/uaccess.h> ++#include <asm/errno.h> ++#include <asm/mman.h> ++ ++#define FOR_EACH_ROLE_START(role) \ ++ role = running_polstate.role_list; \ ++ while (role) { ++ ++#define FOR_EACH_ROLE_END(role) \ ++ role = role->prev; \ ++ } ++ ++extern struct path gr_real_root; ++ ++static struct gr_policy_state running_polstate; ++struct gr_policy_state *polstate = &running_polstate; ++extern struct gr_alloc_state *current_alloc_state; ++ ++extern char *gr_shared_page[4]; ++DEFINE_RWLOCK(gr_inode_lock); ++ ++static unsigned int gr_status __read_only = GR_STATUS_INIT; ++ ++#ifdef CONFIG_NET ++extern struct vfsmount *sock_mnt; ++#endif ++ ++extern struct vfsmount *pipe_mnt; ++extern struct vfsmount *shm_mnt; ++ ++#ifdef CONFIG_HUGETLBFS ++extern struct vfsmount *hugetlbfs_vfsmount[HUGE_MAX_HSTATE]; ++#endif ++ ++extern u16 acl_sp_role_value; ++extern struct acl_object_label *fakefs_obj_rw; ++extern struct acl_object_label *fakefs_obj_rwx; ++ ++int gr_acl_is_enabled(void) ++{ ++ return (gr_status & GR_READY); ++} ++ ++void gr_enable_rbac_system(void) ++{ ++ pax_open_kernel(); ++ gr_status |= GR_READY; ++ pax_close_kernel(); ++} ++ ++int gr_rbac_disable(void *unused) ++{ ++ pax_open_kernel(); ++ gr_status &= ~GR_READY; ++ pax_close_kernel(); ++ ++ return 0; ++} ++ ++static inline dev_t __get_dev(const struct dentry *dentry) ++{ ++#if defined(CONFIG_BTRFS_FS) || defined(CONFIG_BTRFS_FS_MODULE) ++ if (dentry->d_sb->s_magic == BTRFS_SUPER_MAGIC) ++ return BTRFS_I(dentry->d_inode)->root->anon_dev; ++ else ++#endif ++ return dentry->d_sb->s_dev; ++} ++ ++dev_t gr_get_dev_from_dentry(struct dentry *dentry) ++{ ++ return __get_dev(dentry); ++} ++ ++static char gr_task_roletype_to_char(struct task_struct *task) ++{ ++ switch (task->role->roletype & ++ (GR_ROLE_DEFAULT | GR_ROLE_USER | GR_ROLE_GROUP | ++ GR_ROLE_SPECIAL)) { ++ case GR_ROLE_DEFAULT: ++ return 'D'; ++ case GR_ROLE_USER: ++ return 'U'; ++ case GR_ROLE_GROUP: ++ return 'G'; ++ case GR_ROLE_SPECIAL: ++ return 'S'; ++ } ++ ++ return 'X'; ++} ++ ++char gr_roletype_to_char(void) ++{ ++ return gr_task_roletype_to_char(current); ++} ++ ++__inline__ int ++gr_acl_tpe_check(void) ++{ ++ if (unlikely(!(gr_status & GR_READY))) ++ return 0; ++ if (current->role->roletype & GR_ROLE_TPE) ++ return 1; ++ else ++ return 0; ++} ++ ++int ++gr_handle_rawio(const struct inode *inode) ++{ ++#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS ++ if (inode && (S_ISBLK(inode->i_mode) || (S_ISCHR(inode->i_mode) && imajor(inode) == RAW_MAJOR)) && ++ grsec_enable_chroot_caps && proc_is_chrooted(current) && ++ !capable(CAP_SYS_RAWIO)) ++ return 1; ++#endif ++ return 0; ++} ++ ++int ++gr_streq(const char *a, const char *b, const unsigned int lena, const unsigned int lenb) ++{ ++ if (likely(lena != lenb)) ++ return 0; ++ ++ return !memcmp(a, b, lena); ++} ++ ++static int prepend(char **buffer, int *buflen, const char *str, int namelen) ++{ ++ *buflen -= namelen; ++ if (*buflen < 0) ++ return -ENAMETOOLONG; ++ *buffer -= namelen; ++ memcpy(*buffer, str, namelen); ++ return 0; ++} ++ ++static int prepend_name(char **buffer, int *buflen, struct qstr *name) ++{ ++ return prepend(buffer, buflen, name->name, name->len); ++} ++ ++static int prepend_path(const struct path *path, struct path *root, ++ char **buffer, int *buflen) ++{ ++ struct dentry *dentry = path->dentry; ++ struct vfsmount *vfsmnt = path->mnt; ++ struct mount *mnt = real_mount(vfsmnt); ++ bool slash = false; ++ int error = 0; ++ ++ while (dentry != root->dentry || vfsmnt != root->mnt) { ++ struct dentry * parent; ++ ++ if (dentry == vfsmnt->mnt_root || IS_ROOT(dentry)) { ++ /* Global root? */ ++ if (!mnt_has_parent(mnt)) { ++ goto out; ++ } ++ dentry = mnt->mnt_mountpoint; ++ mnt = mnt->mnt_parent; ++ vfsmnt = &mnt->mnt; ++ continue; ++ } ++ parent = dentry->d_parent; ++ prefetch(parent); ++ spin_lock(&dentry->d_lock); ++ error = prepend_name(buffer, buflen, &dentry->d_name); ++ spin_unlock(&dentry->d_lock); ++ if (!error) ++ error = prepend(buffer, buflen, "/", 1); ++ if (error) ++ break; ++ ++ slash = true; ++ dentry = parent; ++ } ++ ++out: ++ if (!error && !slash) ++ error = prepend(buffer, buflen, "/", 1); ++ ++ return error; ++} ++ ++/* this must be called with mount_lock and rename_lock held */ ++ ++static char *__our_d_path(const struct path *path, struct path *root, ++ char *buf, int buflen) ++{ ++ char *res = buf + buflen; ++ int error; ++ ++ prepend(&res, &buflen, "\0", 1); ++ error = prepend_path(path, root, &res, &buflen); ++ if (error) ++ return ERR_PTR(error); ++ ++ return res; ++} ++ ++static char * ++gen_full_path(struct path *path, struct path *root, char *buf, int buflen) ++{ ++ char *retval; ++ ++ retval = __our_d_path(path, root, buf, buflen); ++ if (unlikely(IS_ERR(retval))) ++ retval = strcpy(buf, "<path too long>"); ++ else if (unlikely(retval[1] == '/' && retval[2] == '\0')) ++ retval[1] = '\0'; ++ ++ return retval; ++} ++ ++static char * ++__d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt, ++ char *buf, int buflen) ++{ ++ struct path path; ++ char *res; ++ ++ path.dentry = (struct dentry *)dentry; ++ path.mnt = (struct vfsmount *)vfsmnt; ++ ++ /* we can use gr_real_root.dentry, gr_real_root.mnt, because this is only called ++ by the RBAC system */ ++ res = gen_full_path(&path, &gr_real_root, buf, buflen); ++ ++ return res; ++} ++ ++static char * ++d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt, ++ char *buf, int buflen) ++{ ++ char *res; ++ struct path path; ++ struct path root; ++ struct task_struct *reaper = init_pid_ns.child_reaper; ++ ++ path.dentry = (struct dentry *)dentry; ++ path.mnt = (struct vfsmount *)vfsmnt; ++ ++ /* we can't use gr_real_root.dentry, gr_real_root.mnt, because they belong only to the RBAC system */ ++ get_fs_root(reaper->fs, &root); ++ ++ read_seqlock_excl(&mount_lock); ++ write_seqlock(&rename_lock); ++ res = gen_full_path(&path, &root, buf, buflen); ++ write_sequnlock(&rename_lock); ++ read_sequnlock_excl(&mount_lock); ++ ++ path_put(&root); ++ return res; ++} ++ ++char * ++gr_to_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt) ++{ ++ char *ret; ++ read_seqlock_excl(&mount_lock); ++ write_seqlock(&rename_lock); ++ ret = __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()), ++ PAGE_SIZE); ++ write_sequnlock(&rename_lock); ++ read_sequnlock_excl(&mount_lock); ++ return ret; ++} ++ ++static char * ++gr_to_proc_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt) ++{ ++ char *ret; ++ char *buf; ++ int buflen; ++ ++ read_seqlock_excl(&mount_lock); ++ write_seqlock(&rename_lock); ++ buf = per_cpu_ptr(gr_shared_page[0], smp_processor_id()); ++ ret = __d_real_path(dentry, mnt, buf, PAGE_SIZE - 6); ++ buflen = (int)(ret - buf); ++ if (buflen >= 5) ++ prepend(&ret, &buflen, "/proc", 5); ++ else ++ ret = strcpy(buf, "<path too long>"); ++ write_sequnlock(&rename_lock); ++ read_sequnlock_excl(&mount_lock); ++ return ret; ++} ++ ++char * ++gr_to_filename_nolock(const struct dentry *dentry, const struct vfsmount *mnt) ++{ ++ return __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()), ++ PAGE_SIZE); ++} ++ ++char * ++gr_to_filename(const struct dentry *dentry, const struct vfsmount *mnt) ++{ ++ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()), ++ PAGE_SIZE); ++} ++ ++char * ++gr_to_filename1(const struct dentry *dentry, const struct vfsmount *mnt) ++{ ++ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[1], smp_processor_id()), ++ PAGE_SIZE); ++} ++ ++char * ++gr_to_filename2(const struct dentry *dentry, const struct vfsmount *mnt) ++{ ++ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[2], smp_processor_id()), ++ PAGE_SIZE); ++} ++ ++char * ++gr_to_filename3(const struct dentry *dentry, const struct vfsmount *mnt) ++{ ++ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[3], smp_processor_id()), ++ PAGE_SIZE); ++} ++ ++__inline__ __u32 ++to_gr_audit(const __u32 reqmode) ++{ ++ /* masks off auditable permission flags, then shifts them to create ++ auditing flags, and adds the special case of append auditing if ++ we're requesting write */ ++ return (((reqmode & ~GR_AUDITS) << 10) | ((reqmode & GR_WRITE) ? GR_AUDIT_APPEND : 0)); ++} ++ ++struct acl_role_label * ++__lookup_acl_role_label(const struct gr_policy_state *state, const struct task_struct *task, const uid_t uid, ++ const gid_t gid) ++{ ++ unsigned int index = gr_rhash(uid, GR_ROLE_USER, state->acl_role_set.r_size); ++ struct acl_role_label *match; ++ struct role_allowed_ip *ipp; ++ unsigned int x; ++ u32 curr_ip = task->signal->saved_ip; ++ ++ match = state->acl_role_set.r_hash[index]; ++ ++ while (match) { ++ if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_USER)) == (GR_ROLE_DOMAIN | GR_ROLE_USER)) { ++ for (x = 0; x < match->domain_child_num; x++) { ++ if (match->domain_children[x] == uid) ++ goto found; ++ } ++ } else if (match->uidgid == uid && match->roletype & GR_ROLE_USER) ++ break; ++ match = match->next; ++ } ++found: ++ if (match == NULL) { ++ try_group: ++ index = gr_rhash(gid, GR_ROLE_GROUP, state->acl_role_set.r_size); ++ match = state->acl_role_set.r_hash[index]; ++ ++ while (match) { ++ if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) == (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) { ++ for (x = 0; x < match->domain_child_num; x++) { ++ if (match->domain_children[x] == gid) ++ goto found2; ++ } ++ } else if (match->uidgid == gid && match->roletype & GR_ROLE_GROUP) ++ break; ++ match = match->next; ++ } ++found2: ++ if (match == NULL) ++ match = state->default_role; ++ if (match->allowed_ips == NULL) ++ return match; ++ else { ++ for (ipp = match->allowed_ips; ipp; ipp = ipp->next) { ++ if (likely ++ ((ntohl(curr_ip) & ipp->netmask) == ++ (ntohl(ipp->addr) & ipp->netmask))) ++ return match; ++ } ++ match = state->default_role; ++ } ++ } else if (match->allowed_ips == NULL) { ++ return match; ++ } else { ++ for (ipp = match->allowed_ips; ipp; ipp = ipp->next) { ++ if (likely ++ ((ntohl(curr_ip) & ipp->netmask) == ++ (ntohl(ipp->addr) & ipp->netmask))) ++ return match; ++ } ++ goto try_group; ++ } ++ ++ return match; ++} ++ ++static struct acl_role_label * ++lookup_acl_role_label(const struct task_struct *task, const uid_t uid, ++ const gid_t gid) ++{ ++ return __lookup_acl_role_label(&running_polstate, task, uid, gid); ++} ++ ++struct acl_subject_label * ++lookup_acl_subj_label(const ino_t ino, const dev_t dev, ++ const struct acl_role_label *role) ++{ ++ unsigned int index = gr_fhash(ino, dev, role->subj_hash_size); ++ struct acl_subject_label *match; ++ ++ match = role->subj_hash[index]; ++ ++ while (match && (match->inode != ino || match->device != dev || ++ (match->mode & GR_DELETED))) { ++ match = match->next; ++ } ++ ++ if (match && !(match->mode & GR_DELETED)) ++ return match; ++ else ++ return NULL; ++} ++ ++struct acl_subject_label * ++lookup_acl_subj_label_deleted(const ino_t ino, const dev_t dev, ++ const struct acl_role_label *role) ++{ ++ unsigned int index = gr_fhash(ino, dev, role->subj_hash_size); ++ struct acl_subject_label *match; ++ ++ match = role->subj_hash[index]; ++ ++ while (match && (match->inode != ino || match->device != dev || ++ !(match->mode & GR_DELETED))) { ++ match = match->next; ++ } ++ ++ if (match && (match->mode & GR_DELETED)) ++ return match; ++ else ++ return NULL; ++} ++ ++static struct acl_object_label * ++lookup_acl_obj_label(const ino_t ino, const dev_t dev, ++ const struct acl_subject_label *subj) ++{ ++ unsigned int index = gr_fhash(ino, dev, subj->obj_hash_size); ++ struct acl_object_label *match; ++ ++ match = subj->obj_hash[index]; ++ ++ while (match && (match->inode != ino || match->device != dev || ++ (match->mode & GR_DELETED))) { ++ match = match->next; ++ } ++ ++ if (match && !(match->mode & GR_DELETED)) ++ return match; ++ else ++ return NULL; ++} ++ ++static struct acl_object_label * ++lookup_acl_obj_label_create(const ino_t ino, const dev_t dev, ++ const struct acl_subject_label *subj) ++{ ++ unsigned int index = gr_fhash(ino, dev, subj->obj_hash_size); ++ struct acl_object_label *match; ++ ++ match = subj->obj_hash[index]; ++ ++ while (match && (match->inode != ino || match->device != dev || ++ !(match->mode & GR_DELETED))) { ++ match = match->next; ++ } ++ ++ if (match && (match->mode & GR_DELETED)) ++ return match; ++ ++ match = subj->obj_hash[index]; ++ ++ while (match && (match->inode != ino || match->device != dev || ++ (match->mode & GR_DELETED))) { ++ match = match->next; ++ } ++ ++ if (match && !(match->mode & GR_DELETED)) ++ return match; ++ else ++ return NULL; ++} ++ ++struct name_entry * ++__lookup_name_entry(const struct gr_policy_state *state, const char *name) ++{ ++ unsigned int len = strlen(name); ++ unsigned int key = full_name_hash(name, len); ++ unsigned int index = key % state->name_set.n_size; ++ struct name_entry *match; ++ ++ match = state->name_set.n_hash[index]; ++ ++ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len))) ++ match = match->next; ++ ++ return match; ++} ++ ++static struct name_entry * ++lookup_name_entry(const char *name) ++{ ++ return __lookup_name_entry(&running_polstate, name); ++} ++ ++static struct name_entry * ++lookup_name_entry_create(const char *name) ++{ ++ unsigned int len = strlen(name); ++ unsigned int key = full_name_hash(name, len); ++ unsigned int index = key % running_polstate.name_set.n_size; ++ struct name_entry *match; ++ ++ match = running_polstate.name_set.n_hash[index]; ++ ++ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) || ++ !match->deleted)) ++ match = match->next; ++ ++ if (match && match->deleted) ++ return match; ++ ++ match = running_polstate.name_set.n_hash[index]; ++ ++ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) || ++ match->deleted)) ++ match = match->next; ++ ++ if (match && !match->deleted) ++ return match; ++ else ++ return NULL; ++} ++ ++static struct inodev_entry * ++lookup_inodev_entry(const ino_t ino, const dev_t dev) ++{ ++ unsigned int index = gr_fhash(ino, dev, running_polstate.inodev_set.i_size); ++ struct inodev_entry *match; ++ ++ match = running_polstate.inodev_set.i_hash[index]; ++ ++ while (match && (match->nentry->inode != ino || match->nentry->device != dev)) ++ match = match->next; ++ ++ return match; ++} ++ ++void ++__insert_inodev_entry(const struct gr_policy_state *state, struct inodev_entry *entry) ++{ ++ unsigned int index = gr_fhash(entry->nentry->inode, entry->nentry->device, ++ state->inodev_set.i_size); ++ struct inodev_entry **curr; ++ ++ entry->prev = NULL; ++ ++ curr = &state->inodev_set.i_hash[index]; ++ if (*curr != NULL) ++ (*curr)->prev = entry; ++ ++ entry->next = *curr; ++ *curr = entry; ++ ++ return; ++} ++ ++static void ++insert_inodev_entry(struct inodev_entry *entry) ++{ ++ __insert_inodev_entry(&running_polstate, entry); ++} ++ ++void ++insert_acl_obj_label(struct acl_object_label *obj, ++ struct acl_subject_label *subj) ++{ ++ unsigned int index = ++ gr_fhash(obj->inode, obj->device, subj->obj_hash_size); ++ struct acl_object_label **curr; ++ ++ obj->prev = NULL; ++ ++ curr = &subj->obj_hash[index]; ++ if (*curr != NULL) ++ (*curr)->prev = obj; ++ ++ obj->next = *curr; ++ *curr = obj; ++ ++ return; ++} ++ ++void ++insert_acl_subj_label(struct acl_subject_label *obj, ++ struct acl_role_label *role) ++{ ++ unsigned int index = gr_fhash(obj->inode, obj->device, role->subj_hash_size); ++ struct acl_subject_label **curr; ++ ++ obj->prev = NULL; ++ ++ curr = &role->subj_hash[index]; ++ if (*curr != NULL) ++ (*curr)->prev = obj; ++ ++ obj->next = *curr; ++ *curr = obj; ++ ++ return; ++} ++ ++/* derived from glibc fnmatch() 0: match, 1: no match*/ ++ ++static int ++glob_match(const char *p, const char *n) ++{ ++ char c; ++ ++ while ((c = *p++) != '\0') { ++ switch (c) { ++ case '?': ++ if (*n == '\0') ++ return 1; ++ else if (*n == '/') ++ return 1; ++ break; ++ case '\\': ++ if (*n != c) ++ return 1; ++ break; ++ case '*': ++ for (c = *p++; c == '?' || c == '*'; c = *p++) { ++ if (*n == '/') ++ return 1; ++ else if (c == '?') { ++ if (*n == '\0') ++ return 1; ++ else ++ ++n; ++ } ++ } ++ if (c == '\0') { ++ return 0; ++ } else { ++ const char *endp; ++ ++ if ((endp = strchr(n, '/')) == NULL) ++ endp = n + strlen(n); ++ ++ if (c == '[') { ++ for (--p; n < endp; ++n) ++ if (!glob_match(p, n)) ++ return 0; ++ } else if (c == '/') { ++ while (*n != '\0' && *n != '/') ++ ++n; ++ if (*n == '/' && !glob_match(p, n + 1)) ++ return 0; ++ } else { ++ for (--p; n < endp; ++n) ++ if (*n == c && !glob_match(p, n)) ++ return 0; ++ } ++ ++ return 1; ++ } ++ case '[': ++ { ++ int not; ++ char cold; ++ ++ if (*n == '\0' || *n == '/') ++ return 1; ++ ++ not = (*p == '!' || *p == '^'); ++ if (not) ++ ++p; ++ ++ c = *p++; ++ for (;;) { ++ unsigned char fn = (unsigned char)*n; ++ ++ if (c == '\0') ++ return 1; ++ else { ++ if (c == fn) ++ goto matched; ++ cold = c; ++ c = *p++; ++ ++ if (c == '-' && *p != ']') { ++ unsigned char cend = *p++; ++ ++ if (cend == '\0') ++ return 1; ++ ++ if (cold <= fn && fn <= cend) ++ goto matched; ++ ++ c = *p++; ++ } ++ } ++ ++ if (c == ']') ++ break; ++ } ++ if (!not) ++ return 1; ++ break; ++ matched: ++ while (c != ']') { ++ if (c == '\0') ++ return 1; ++ ++ c = *p++; ++ } ++ if (not) ++ return 1; ++ } ++ break; ++ default: ++ if (c != *n) ++ return 1; ++ } ++ ++ ++n; ++ } ++ ++ if (*n == '\0') ++ return 0; ++ ++ if (*n == '/') ++ return 0; ++ ++ return 1; ++} ++ ++static struct acl_object_label * ++chk_glob_label(struct acl_object_label *globbed, ++ const struct dentry *dentry, const struct vfsmount *mnt, char **path) ++{ ++ struct acl_object_label *tmp; ++ ++ if (*path == NULL) ++ *path = gr_to_filename_nolock(dentry, mnt); ++ ++ tmp = globbed; ++ ++ while (tmp) { ++ if (!glob_match(tmp->filename, *path)) ++ return tmp; ++ tmp = tmp->next; ++ } ++ ++ return NULL; ++} ++ ++static struct acl_object_label * ++__full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt, ++ const ino_t curr_ino, const dev_t curr_dev, ++ const struct acl_subject_label *subj, char **path, const int checkglob) ++{ ++ struct acl_subject_label *tmpsubj; ++ struct acl_object_label *retval; ++ struct acl_object_label *retval2; ++ ++ tmpsubj = (struct acl_subject_label *) subj; ++ read_lock(&gr_inode_lock); ++ do { ++ retval = lookup_acl_obj_label(curr_ino, curr_dev, tmpsubj); ++ if (retval) { ++ if (checkglob && retval->globbed) { ++ retval2 = chk_glob_label(retval->globbed, orig_dentry, orig_mnt, path); ++ if (retval2) ++ retval = retval2; ++ } ++ break; ++ } ++ } while ((tmpsubj = tmpsubj->parent_subject)); ++ read_unlock(&gr_inode_lock); ++ ++ return retval; ++} ++ ++static __inline__ struct acl_object_label * ++full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt, ++ struct dentry *curr_dentry, ++ const struct acl_subject_label *subj, char **path, const int checkglob) ++{ ++ int newglob = checkglob; ++ ino_t inode; ++ dev_t device; ++ ++ /* if we aren't checking a subdirectory of the original path yet, don't do glob checking ++ as we don't want a / * rule to match instead of the / object ++ don't do this for create lookups that call this function though, since they're looking up ++ on the parent and thus need globbing checks on all paths ++ */ ++ if (orig_dentry == curr_dentry && newglob != GR_CREATE_GLOB) ++ newglob = GR_NO_GLOB; ++ ++ spin_lock(&curr_dentry->d_lock); ++ inode = curr_dentry->d_inode->i_ino; ++ device = __get_dev(curr_dentry); ++ spin_unlock(&curr_dentry->d_lock); ++ ++ return __full_lookup(orig_dentry, orig_mnt, inode, device, subj, path, newglob); ++} ++ ++#ifdef CONFIG_HUGETLBFS ++static inline bool ++is_hugetlbfs_mnt(const struct vfsmount *mnt) ++{ ++ int i; ++ for (i = 0; i < HUGE_MAX_HSTATE; i++) { ++ if (unlikely(hugetlbfs_vfsmount[i] == mnt)) ++ return true; ++ } ++ ++ return false; ++} ++#endif ++ ++static struct acl_object_label * ++__chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt, ++ const struct acl_subject_label *subj, char *path, const int checkglob) ++{ ++ struct dentry *dentry = (struct dentry *) l_dentry; ++ struct vfsmount *mnt = (struct vfsmount *) l_mnt; ++ struct mount *real_mnt = real_mount(mnt); ++ struct acl_object_label *retval; ++ struct dentry *parent; ++ ++ read_seqlock_excl(&mount_lock); ++ write_seqlock(&rename_lock); ++ ++ if (unlikely((mnt == shm_mnt && dentry->d_inode->i_nlink == 0) || mnt == pipe_mnt || ++#ifdef CONFIG_NET ++ mnt == sock_mnt || ++#endif ++#ifdef CONFIG_HUGETLBFS ++ (is_hugetlbfs_mnt(mnt) && dentry->d_inode->i_nlink == 0) || ++#endif ++ /* ignore Eric Biederman */ ++ IS_PRIVATE(l_dentry->d_inode))) { ++ retval = (subj->mode & GR_SHMEXEC) ? fakefs_obj_rwx : fakefs_obj_rw; ++ goto out; ++ } ++ ++ for (;;) { ++ if (dentry == gr_real_root.dentry && mnt == gr_real_root.mnt) ++ break; ++ ++ if (dentry == mnt->mnt_root || IS_ROOT(dentry)) { ++ if (!mnt_has_parent(real_mnt)) ++ break; ++ ++ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob); ++ if (retval != NULL) ++ goto out; ++ ++ dentry = real_mnt->mnt_mountpoint; ++ real_mnt = real_mnt->mnt_parent; ++ mnt = &real_mnt->mnt; ++ continue; ++ } ++ ++ parent = dentry->d_parent; ++ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob); ++ if (retval != NULL) ++ goto out; ++ ++ dentry = parent; ++ } ++ ++ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob); ++ ++ /* gr_real_root is pinned so we don't have to hold a reference */ ++ if (retval == NULL) ++ retval = full_lookup(l_dentry, l_mnt, gr_real_root.dentry, subj, &path, checkglob); ++out: ++ write_sequnlock(&rename_lock); ++ read_sequnlock_excl(&mount_lock); ++ ++ BUG_ON(retval == NULL); ++ ++ return retval; ++} ++ ++static __inline__ struct acl_object_label * ++chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt, ++ const struct acl_subject_label *subj) ++{ ++ char *path = NULL; ++ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_REG_GLOB); ++} ++ ++static __inline__ struct acl_object_label * ++chk_obj_label_noglob(const struct dentry *l_dentry, const struct vfsmount *l_mnt, ++ const struct acl_subject_label *subj) ++{ ++ char *path = NULL; ++ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_NO_GLOB); ++} ++ ++static __inline__ struct acl_object_label * ++chk_obj_create_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt, ++ const struct acl_subject_label *subj, char *path) ++{ ++ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_CREATE_GLOB); ++} ++ ++struct acl_subject_label * ++chk_subj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt, ++ const struct acl_role_label *role) ++{ ++ struct dentry *dentry = (struct dentry *) l_dentry; ++ struct vfsmount *mnt = (struct vfsmount *) l_mnt; ++ struct mount *real_mnt = real_mount(mnt); ++ struct acl_subject_label *retval; ++ struct dentry *parent; ++ ++ read_seqlock_excl(&mount_lock); ++ write_seqlock(&rename_lock); ++ ++ for (;;) { ++ if (dentry == gr_real_root.dentry && mnt == gr_real_root.mnt) ++ break; ++ if (dentry == mnt->mnt_root || IS_ROOT(dentry)) { ++ if (!mnt_has_parent(real_mnt)) ++ break; ++ ++ spin_lock(&dentry->d_lock); ++ read_lock(&gr_inode_lock); ++ retval = ++ lookup_acl_subj_label(dentry->d_inode->i_ino, ++ __get_dev(dentry), role); ++ read_unlock(&gr_inode_lock); ++ spin_unlock(&dentry->d_lock); ++ if (retval != NULL) ++ goto out; ++ ++ dentry = real_mnt->mnt_mountpoint; ++ real_mnt = real_mnt->mnt_parent; ++ mnt = &real_mnt->mnt; ++ continue; ++ } ++ ++ spin_lock(&dentry->d_lock); ++ read_lock(&gr_inode_lock); ++ retval = lookup_acl_subj_label(dentry->d_inode->i_ino, ++ __get_dev(dentry), role); ++ read_unlock(&gr_inode_lock); ++ parent = dentry->d_parent; ++ spin_unlock(&dentry->d_lock); ++ ++ if (retval != NULL) ++ goto out; ++ ++ dentry = parent; ++ } ++ ++ spin_lock(&dentry->d_lock); ++ read_lock(&gr_inode_lock); ++ retval = lookup_acl_subj_label(dentry->d_inode->i_ino, ++ __get_dev(dentry), role); ++ read_unlock(&gr_inode_lock); ++ spin_unlock(&dentry->d_lock); ++ ++ if (unlikely(retval == NULL)) { ++ /* gr_real_root is pinned, we don't need to hold a reference */ ++ read_lock(&gr_inode_lock); ++ retval = lookup_acl_subj_label(gr_real_root.dentry->d_inode->i_ino, ++ __get_dev(gr_real_root.dentry), role); ++ read_unlock(&gr_inode_lock); ++ } ++out: ++ write_sequnlock(&rename_lock); ++ read_sequnlock_excl(&mount_lock); ++ ++ BUG_ON(retval == NULL); ++ ++ return retval; ++} ++ ++void ++assign_special_role(const char *rolename) ++{ ++ struct acl_object_label *obj; ++ struct acl_role_label *r; ++ struct acl_role_label *assigned = NULL; ++ struct task_struct *tsk; ++ struct file *filp; ++ ++ FOR_EACH_ROLE_START(r) ++ if (!strcmp(rolename, r->rolename) && ++ (r->roletype & GR_ROLE_SPECIAL)) { ++ assigned = r; ++ break; ++ } ++ FOR_EACH_ROLE_END(r) ++ ++ if (!assigned) ++ return; ++ ++ read_lock(&tasklist_lock); ++ read_lock(&grsec_exec_file_lock); ++ ++ tsk = current->real_parent; ++ if (tsk == NULL) ++ goto out_unlock; ++ ++ filp = tsk->exec_file; ++ if (filp == NULL) ++ goto out_unlock; ++ ++ tsk->is_writable = 0; ++ tsk->inherited = 0; ++ ++ tsk->acl_sp_role = 1; ++ tsk->acl_role_id = ++acl_sp_role_value; ++ tsk->role = assigned; ++ tsk->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role); ++ ++ /* ignore additional mmap checks for processes that are writable ++ by the default ACL */ ++ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, running_polstate.default_role->root_label); ++ if (unlikely(obj->mode & GR_WRITE)) ++ tsk->is_writable = 1; ++ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role->root_label); ++ if (unlikely(obj->mode & GR_WRITE)) ++ tsk->is_writable = 1; ++ ++#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG ++ printk(KERN_ALERT "Assigning special role:%s subject:%s to process (%s:%d)\n", tsk->role->rolename, ++ tsk->acl->filename, tsk->comm, task_pid_nr(tsk)); ++#endif ++ ++out_unlock: ++ read_unlock(&grsec_exec_file_lock); ++ read_unlock(&tasklist_lock); ++ return; ++} ++ ++ ++static void ++gr_log_learn(const struct dentry *dentry, const struct vfsmount *mnt, const __u32 mode) ++{ ++ struct task_struct *task = current; ++ const struct cred *cred = current_cred(); ++ ++ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, task->role->roletype, ++ GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid), task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry, ++ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename, ++ 1UL, 1UL, gr_to_filename(dentry, mnt), (unsigned long) mode, &task->signal->saved_ip); ++ ++ return; ++} ++ ++static void ++gr_log_learn_uid_change(const kuid_t real, const kuid_t effective, const kuid_t fs) ++{ ++ struct task_struct *task = current; ++ const struct cred *cred = current_cred(); ++ ++ security_learn(GR_ID_LEARN_MSG, task->role->rolename, task->role->roletype, ++ GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid), task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry, ++ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename, ++ 'u', GR_GLOBAL_UID(real), GR_GLOBAL_UID(effective), GR_GLOBAL_UID(fs), &task->signal->saved_ip); ++ ++ return; ++} ++ ++static void ++gr_log_learn_gid_change(const kgid_t real, const kgid_t effective, const kgid_t fs) ++{ ++ struct task_struct *task = current; ++ const struct cred *cred = current_cred(); ++ ++ security_learn(GR_ID_LEARN_MSG, task->role->rolename, task->role->roletype, ++ GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid), task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry, ++ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename, ++ 'g', GR_GLOBAL_GID(real), GR_GLOBAL_GID(effective), GR_GLOBAL_GID(fs), &task->signal->saved_ip); ++ ++ return; ++} ++ ++static void ++gr_set_proc_res(struct task_struct *task) ++{ ++ struct acl_subject_label *proc; ++ unsigned short i; ++ ++ proc = task->acl; ++ ++ if (proc->mode & (GR_LEARN | GR_INHERITLEARN)) ++ return; ++ ++ for (i = 0; i < RLIM_NLIMITS; i++) { ++ if (!(proc->resmask & (1U << i))) ++ continue; ++ ++ task->signal->rlim[i].rlim_cur = proc->res[i].rlim_cur; ++ task->signal->rlim[i].rlim_max = proc->res[i].rlim_max; ++ ++ if (i == RLIMIT_CPU) ++ update_rlimit_cpu(task, proc->res[i].rlim_cur); ++ } ++ ++ return; ++} ++ ++/* both of the below must be called with ++ rcu_read_lock(); ++ read_lock(&tasklist_lock); ++ read_lock(&grsec_exec_file_lock); ++*/ ++ ++struct acl_subject_label *__gr_get_subject_for_task(const struct gr_policy_state *state, struct task_struct *task, const char *filename) ++{ ++ char *tmpname; ++ struct acl_subject_label *tmpsubj; ++ struct file *filp; ++ struct name_entry *nmatch; ++ ++ filp = task->exec_file; ++ if (filp == NULL) ++ return NULL; ++ ++ /* the following is to apply the correct subject ++ on binaries running when the RBAC system ++ is enabled, when the binaries have been ++ replaced or deleted since their execution ++ ----- ++ when the RBAC system starts, the inode/dev ++ from exec_file will be one the RBAC system ++ is unaware of. It only knows the inode/dev ++ of the present file on disk, or the absence ++ of it. ++ */ ++ ++ if (filename) ++ nmatch = __lookup_name_entry(state, filename); ++ else { ++ preempt_disable(); ++ tmpname = gr_to_filename_rbac(filp->f_path.dentry, filp->f_path.mnt); ++ ++ nmatch = __lookup_name_entry(state, tmpname); ++ preempt_enable(); ++ } ++ tmpsubj = NULL; ++ if (nmatch) { ++ if (nmatch->deleted) ++ tmpsubj = lookup_acl_subj_label_deleted(nmatch->inode, nmatch->device, task->role); ++ else ++ tmpsubj = lookup_acl_subj_label(nmatch->inode, nmatch->device, task->role); ++ } ++ /* this also works for the reload case -- if we don't match a potentially inherited subject ++ then we fall back to a normal lookup based on the binary's ino/dev ++ */ ++ if (tmpsubj == NULL) ++ tmpsubj = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, task->role); ++ ++ return tmpsubj; ++} ++ ++static struct acl_subject_label *gr_get_subject_for_task(struct task_struct *task, const char *filename) ++{ ++ return __gr_get_subject_for_task(&running_polstate, task, filename); ++} ++ ++void __gr_apply_subject_to_task(const struct gr_policy_state *state, struct task_struct *task, struct acl_subject_label *subj) ++{ ++ struct acl_object_label *obj; ++ struct file *filp; ++ ++ filp = task->exec_file; ++ ++ task->acl = subj; ++ task->is_writable = 0; ++ /* ignore additional mmap checks for processes that are writable ++ by the default ACL */ ++ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, state->default_role->root_label); ++ if (unlikely(obj->mode & GR_WRITE)) ++ task->is_writable = 1; ++ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label); ++ if (unlikely(obj->mode & GR_WRITE)) ++ task->is_writable = 1; ++ ++ gr_set_proc_res(task); ++ ++#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG ++ printk(KERN_ALERT "gr_set_acls for (%s:%d): role:%s, subject:%s\n", task->comm, task_pid_nr(task), task->role->rolename, task->acl->filename); ++#endif ++} ++ ++static void gr_apply_subject_to_task(struct task_struct *task, struct acl_subject_label *subj) ++{ ++ __gr_apply_subject_to_task(&running_polstate, task, subj); ++} ++ ++__u32 ++gr_search_file(const struct dentry * dentry, const __u32 mode, ++ const struct vfsmount * mnt) ++{ ++ __u32 retval = mode; ++ struct acl_subject_label *curracl; ++ struct acl_object_label *currobj; ++ ++ if (unlikely(!(gr_status & GR_READY))) ++ return (mode & ~GR_AUDITS); ++ ++ curracl = current->acl; ++ ++ currobj = chk_obj_label(dentry, mnt, curracl); ++ retval = currobj->mode & mode; ++ ++ /* if we're opening a specified transfer file for writing ++ (e.g. /dev/initctl), then transfer our role to init ++ */ ++ if (unlikely(currobj->mode & GR_INIT_TRANSFER && retval & GR_WRITE && ++ current->role->roletype & GR_ROLE_PERSIST)) { ++ struct task_struct *task = init_pid_ns.child_reaper; ++ ++ if (task->role != current->role) { ++ struct acl_subject_label *subj; ++ ++ task->acl_sp_role = 0; ++ task->acl_role_id = current->acl_role_id; ++ task->role = current->role; ++ rcu_read_lock(); ++ read_lock(&grsec_exec_file_lock); ++ subj = gr_get_subject_for_task(task, NULL); ++ gr_apply_subject_to_task(task, subj); ++ read_unlock(&grsec_exec_file_lock); ++ rcu_read_unlock(); ++ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_INIT_TRANSFER_MSG); ++ } ++ } ++ ++ if (unlikely ++ ((curracl->mode & (GR_LEARN | GR_INHERITLEARN)) && !(mode & GR_NOPTRACE) ++ && (retval != (mode & ~(GR_AUDITS | GR_SUPPRESS))))) { ++ __u32 new_mode = mode; ++ ++ new_mode &= ~(GR_AUDITS | GR_SUPPRESS); ++ ++ retval = new_mode; ++ ++ if (new_mode & GR_EXEC && curracl->mode & GR_INHERITLEARN) ++ new_mode |= GR_INHERIT; ++ ++ if (!(mode & GR_NOLEARN)) ++ gr_log_learn(dentry, mnt, new_mode); ++ } ++ ++ return retval; ++} ++ ++struct acl_object_label *gr_get_create_object(const struct dentry *new_dentry, ++ const struct dentry *parent, ++ const struct vfsmount *mnt) ++{ ++ struct name_entry *match; ++ struct acl_object_label *matchpo; ++ struct acl_subject_label *curracl; ++ char *path; ++ ++ if (unlikely(!(gr_status & GR_READY))) ++ return NULL; ++ ++ preempt_disable(); ++ path = gr_to_filename_rbac(new_dentry, mnt); ++ match = lookup_name_entry_create(path); ++ ++ curracl = current->acl; ++ ++ if (match) { ++ read_lock(&gr_inode_lock); ++ matchpo = lookup_acl_obj_label_create(match->inode, match->device, curracl); ++ read_unlock(&gr_inode_lock); ++ ++ if (matchpo) { ++ preempt_enable(); ++ return matchpo; ++ } ++ } ++ ++ // lookup parent ++ ++ matchpo = chk_obj_create_label(parent, mnt, curracl, path); ++ ++ preempt_enable(); ++ return matchpo; ++} ++ ++__u32 ++gr_check_create(const struct dentry * new_dentry, const struct dentry * parent, ++ const struct vfsmount * mnt, const __u32 mode) ++{ ++ struct acl_object_label *matchpo; ++ __u32 retval; ++ ++ if (unlikely(!(gr_status & GR_READY))) ++ return (mode & ~GR_AUDITS); ++ ++ matchpo = gr_get_create_object(new_dentry, parent, mnt); ++ ++ retval = matchpo->mode & mode; ++ ++ if ((retval != (mode & ~(GR_AUDITS | GR_SUPPRESS))) ++ && (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))) { ++ __u32 new_mode = mode; ++ ++ new_mode &= ~(GR_AUDITS | GR_SUPPRESS); ++ ++ gr_log_learn(new_dentry, mnt, new_mode); ++ return new_mode; ++ } ++ ++ return retval; ++} ++ ++__u32 ++gr_check_link(const struct dentry * new_dentry, ++ const struct dentry * parent_dentry, ++ const struct vfsmount * parent_mnt, ++ const struct dentry * old_dentry, const struct vfsmount * old_mnt) ++{ ++ struct acl_object_label *obj; ++ __u32 oldmode, newmode; ++ __u32 needmode; ++ __u32 checkmodes = GR_FIND | GR_APPEND | GR_WRITE | GR_EXEC | GR_SETID | GR_READ | ++ GR_DELETE | GR_INHERIT; ++ ++ if (unlikely(!(gr_status & GR_READY))) ++ return (GR_CREATE | GR_LINK); ++ ++ obj = chk_obj_label(old_dentry, old_mnt, current->acl); ++ oldmode = obj->mode; ++ ++ obj = gr_get_create_object(new_dentry, parent_dentry, parent_mnt); ++ newmode = obj->mode; ++ ++ needmode = newmode & checkmodes; ++ ++ // old name for hardlink must have at least the permissions of the new name ++ if ((oldmode & needmode) != needmode) ++ goto bad; ++ ++ // if old name had restrictions/auditing, make sure the new name does as well ++ needmode = oldmode & (GR_NOPTRACE | GR_PTRACERD | GR_INHERIT | GR_AUDITS); ++ ++ // don't allow hardlinking of suid/sgid/fcapped files without permission ++ if (is_privileged_binary(old_dentry)) ++ needmode |= GR_SETID; ++ ++ if ((newmode & needmode) != needmode) ++ goto bad; ++ ++ // enforce minimum permissions ++ if ((newmode & (GR_CREATE | GR_LINK)) == (GR_CREATE | GR_LINK)) ++ return newmode; ++bad: ++ needmode = oldmode; ++ if (is_privileged_binary(old_dentry)) ++ needmode |= GR_SETID; ++ ++ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) { ++ gr_log_learn(old_dentry, old_mnt, needmode | GR_CREATE | GR_LINK); ++ return (GR_CREATE | GR_LINK); ++ } else if (newmode & GR_SUPPRESS) ++ return GR_SUPPRESS; ++ else ++ return 0; ++} ++ ++int ++gr_check_hidden_task(const struct task_struct *task) ++{ ++ if (unlikely(!(gr_status & GR_READY))) ++ return 0; ++ ++ if (!(task->acl->mode & GR_PROCFIND) && !(current->acl->mode & GR_VIEW)) ++ return 1; ++ ++ return 0; ++} ++ ++int ++gr_check_protected_task(const struct task_struct *task) ++{ ++ if (unlikely(!(gr_status & GR_READY) || !task)) ++ return 0; ++ ++ if ((task->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) && ++ task->acl != current->acl) ++ return 1; ++ ++ return 0; ++} ++ ++int ++gr_check_protected_task_fowner(struct pid *pid, enum pid_type type) ++{ ++ struct task_struct *p; ++ int ret = 0; ++ ++ if (unlikely(!(gr_status & GR_READY) || !pid)) ++ return ret; ++ ++ read_lock(&tasklist_lock); ++ do_each_pid_task(pid, type, p) { ++ if ((p->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) && ++ p->acl != current->acl) { ++ ret = 1; ++ goto out; ++ } ++ } while_each_pid_task(pid, type, p); ++out: ++ read_unlock(&tasklist_lock); ++ ++ return ret; ++} ++ ++void ++gr_copy_label(struct task_struct *tsk) ++{ ++ struct task_struct *p = current; ++ ++ tsk->inherited = p->inherited; ++ tsk->acl_sp_role = 0; ++ tsk->acl_role_id = p->acl_role_id; ++ tsk->acl = p->acl; ++ tsk->role = p->role; ++ tsk->signal->used_accept = 0; ++ tsk->signal->curr_ip = p->signal->curr_ip; ++ tsk->signal->saved_ip = p->signal->saved_ip; ++ if (p->exec_file) ++ get_file(p->exec_file); ++ tsk->exec_file = p->exec_file; ++ tsk->is_writable = p->is_writable; ++ if (unlikely(p->signal->used_accept)) { ++ p->signal->curr_ip = 0; ++ p->signal->saved_ip = 0; ++ } ++ ++ return; ++} ++ ++extern int gr_process_kernel_setuid_ban(struct user_struct *user); ++ ++int ++gr_check_user_change(kuid_t real, kuid_t effective, kuid_t fs) ++{ ++ unsigned int i; ++ __u16 num; ++ uid_t *uidlist; ++ uid_t curuid; ++ int realok = 0; ++ int effectiveok = 0; ++ int fsok = 0; ++ uid_t globalreal, globaleffective, globalfs; ++ ++#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) ++ struct user_struct *user; ++ ++ if (!uid_valid(real)) ++ goto skipit; ++ ++ /* find user based on global namespace */ ++ ++ globalreal = GR_GLOBAL_UID(real); ++ ++ user = find_user(make_kuid(&init_user_ns, globalreal)); ++ if (user == NULL) ++ goto skipit; ++ ++ if (gr_process_kernel_setuid_ban(user)) { ++ /* for find_user */ ++ free_uid(user); ++ return 1; ++ } ++ ++ /* for find_user */ ++ free_uid(user); ++ ++skipit: ++#endif ++ ++ if (unlikely(!(gr_status & GR_READY))) ++ return 0; ++ ++ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) ++ gr_log_learn_uid_change(real, effective, fs); ++ ++ num = current->acl->user_trans_num; ++ uidlist = current->acl->user_transitions; ++ ++ if (uidlist == NULL) ++ return 0; ++ ++ if (!uid_valid(real)) { ++ realok = 1; ++ globalreal = (uid_t)-1; ++ } else { ++ globalreal = GR_GLOBAL_UID(real); ++ } ++ if (!uid_valid(effective)) { ++ effectiveok = 1; ++ globaleffective = (uid_t)-1; ++ } else { ++ globaleffective = GR_GLOBAL_UID(effective); ++ } ++ if (!uid_valid(fs)) { ++ fsok = 1; ++ globalfs = (uid_t)-1; ++ } else { ++ globalfs = GR_GLOBAL_UID(fs); ++ } ++ ++ if (current->acl->user_trans_type & GR_ID_ALLOW) { ++ for (i = 0; i < num; i++) { ++ curuid = uidlist[i]; ++ if (globalreal == curuid) ++ realok = 1; ++ if (globaleffective == curuid) ++ effectiveok = 1; ++ if (globalfs == curuid) ++ fsok = 1; ++ } ++ } else if (current->acl->user_trans_type & GR_ID_DENY) { ++ for (i = 0; i < num; i++) { ++ curuid = uidlist[i]; ++ if (globalreal == curuid) ++ break; ++ if (globaleffective == curuid) ++ break; ++ if (globalfs == curuid) ++ break; ++ } ++ /* not in deny list */ ++ if (i == num) { ++ realok = 1; ++ effectiveok = 1; ++ fsok = 1; ++ } ++ } ++ ++ if (realok && effectiveok && fsok) ++ return 0; ++ else { ++ gr_log_int(GR_DONT_AUDIT, GR_USRCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : globalfs) : globaleffective) : globalreal); ++ return 1; ++ } ++} ++ ++int ++gr_check_group_change(kgid_t real, kgid_t effective, kgid_t fs) ++{ ++ unsigned int i; ++ __u16 num; ++ gid_t *gidlist; ++ gid_t curgid; ++ int realok = 0; ++ int effectiveok = 0; ++ int fsok = 0; ++ gid_t globalreal, globaleffective, globalfs; ++ ++ if (unlikely(!(gr_status & GR_READY))) ++ return 0; ++ ++ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) ++ gr_log_learn_gid_change(real, effective, fs); ++ ++ num = current->acl->group_trans_num; ++ gidlist = current->acl->group_transitions; ++ ++ if (gidlist == NULL) ++ return 0; ++ ++ if (!gid_valid(real)) { ++ realok = 1; ++ globalreal = (gid_t)-1; ++ } else { ++ globalreal = GR_GLOBAL_GID(real); ++ } ++ if (!gid_valid(effective)) { ++ effectiveok = 1; ++ globaleffective = (gid_t)-1; ++ } else { ++ globaleffective = GR_GLOBAL_GID(effective); ++ } ++ if (!gid_valid(fs)) { ++ fsok = 1; ++ globalfs = (gid_t)-1; ++ } else { ++ globalfs = GR_GLOBAL_GID(fs); ++ } ++ ++ if (current->acl->group_trans_type & GR_ID_ALLOW) { ++ for (i = 0; i < num; i++) { ++ curgid = gidlist[i]; ++ if (globalreal == curgid) ++ realok = 1; ++ if (globaleffective == curgid) ++ effectiveok = 1; ++ if (globalfs == curgid) ++ fsok = 1; ++ } ++ } else if (current->acl->group_trans_type & GR_ID_DENY) { ++ for (i = 0; i < num; i++) { ++ curgid = gidlist[i]; ++ if (globalreal == curgid) ++ break; ++ if (globaleffective == curgid) ++ break; ++ if (globalfs == curgid) ++ break; ++ } ++ /* not in deny list */ ++ if (i == num) { ++ realok = 1; ++ effectiveok = 1; ++ fsok = 1; ++ } ++ } ++ ++ if (realok && effectiveok && fsok) ++ return 0; ++ else { ++ gr_log_int(GR_DONT_AUDIT, GR_GRPCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : globalfs) : globaleffective) : globalreal); ++ return 1; ++ } ++} ++ ++extern int gr_acl_is_capable(const int cap); ++ ++void ++gr_set_role_label(struct task_struct *task, const kuid_t kuid, const kgid_t kgid) ++{ ++ struct acl_role_label *role = task->role; ++ struct acl_subject_label *subj = NULL; ++ struct acl_object_label *obj; ++ struct file *filp; ++ uid_t uid; ++ gid_t gid; ++ ++ if (unlikely(!(gr_status & GR_READY))) ++ return; ++ ++ uid = GR_GLOBAL_UID(kuid); ++ gid = GR_GLOBAL_GID(kgid); ++ ++ filp = task->exec_file; ++ ++ /* kernel process, we'll give them the kernel role */ ++ if (unlikely(!filp)) { ++ task->role = running_polstate.kernel_role; ++ task->acl = running_polstate.kernel_role->root_label; ++ return; ++ } else if (!task->role || !(task->role->roletype & GR_ROLE_SPECIAL)) { ++ /* save the current ip at time of role lookup so that the proper ++ IP will be learned for role_allowed_ip */ ++ task->signal->saved_ip = task->signal->curr_ip; ++ role = lookup_acl_role_label(task, uid, gid); ++ } ++ ++ /* don't change the role if we're not a privileged process */ ++ if (role && task->role != role && ++ (((role->roletype & GR_ROLE_USER) && !gr_acl_is_capable(CAP_SETUID)) || ++ ((role->roletype & GR_ROLE_GROUP) && !gr_acl_is_capable(CAP_SETGID)))) ++ return; ++ ++ /* perform subject lookup in possibly new role ++ we can use this result below in the case where role == task->role ++ */ ++ subj = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, role); ++ ++ /* if we changed uid/gid, but result in the same role ++ and are using inheritance, don't lose the inherited subject ++ if current subject is other than what normal lookup ++ would result in, we arrived via inheritance, don't ++ lose subject ++ */ ++ if (role != task->role || (!(task->acl->mode & GR_INHERITLEARN) && ++ (subj == task->acl))) ++ task->acl = subj; ++ ++ /* leave task->inherited unaffected */ ++ ++ task->role = role; ++ ++ task->is_writable = 0; ++ ++ /* ignore additional mmap checks for processes that are writable ++ by the default ACL */ ++ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, running_polstate.default_role->root_label); ++ if (unlikely(obj->mode & GR_WRITE)) ++ task->is_writable = 1; ++ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label); ++ if (unlikely(obj->mode & GR_WRITE)) ++ task->is_writable = 1; ++ ++#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG ++ printk(KERN_ALERT "Set role label for (%s:%d): role:%s, subject:%s\n", task->comm, task_pid_nr(task), task->role->rolename, task->acl->filename); ++#endif ++ ++ gr_set_proc_res(task); ++ ++ return; ++} ++ ++int ++gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt, ++ const int unsafe_flags) ++{ ++ struct task_struct *task = current; ++ struct acl_subject_label *newacl; ++ struct acl_object_label *obj; ++ __u32 retmode; ++ ++ if (unlikely(!(gr_status & GR_READY))) ++ return 0; ++ ++ newacl = chk_subj_label(dentry, mnt, task->role); ++ ++ /* special handling for if we did an strace -f -p <pid> from an admin role, where pid then ++ did an exec ++ */ ++ rcu_read_lock(); ++ read_lock(&tasklist_lock); ++ if (task->ptrace && task->parent && ((task->parent->role->roletype & GR_ROLE_GOD) || ++ (task->parent->acl->mode & GR_POVERRIDE))) { ++ read_unlock(&tasklist_lock); ++ rcu_read_unlock(); ++ goto skip_check; ++ } ++ read_unlock(&tasklist_lock); ++ rcu_read_unlock(); ++ ++ if (unsafe_flags && !(task->acl->mode & GR_POVERRIDE) && (task->acl != newacl) && ++ !(task->role->roletype & GR_ROLE_GOD) && ++ !gr_search_file(dentry, GR_PTRACERD, mnt) && ++ !(task->acl->mode & (GR_LEARN | GR_INHERITLEARN))) { ++ if (unsafe_flags & LSM_UNSAFE_SHARE) ++ gr_log_fs_generic(GR_DONT_AUDIT, GR_UNSAFESHARE_EXEC_ACL_MSG, dentry, mnt); ++ else ++ gr_log_fs_generic(GR_DONT_AUDIT, GR_PTRACE_EXEC_ACL_MSG, dentry, mnt); ++ return -EACCES; ++ } ++ ++skip_check: ++ ++ obj = chk_obj_label(dentry, mnt, task->acl); ++ retmode = obj->mode & (GR_INHERIT | GR_AUDIT_INHERIT); ++ ++ if (!(task->acl->mode & GR_INHERITLEARN) && ++ ((newacl->mode & GR_LEARN) || !(retmode & GR_INHERIT))) { ++ if (obj->nested) ++ task->acl = obj->nested; ++ else ++ task->acl = newacl; ++ task->inherited = 0; ++ } else { ++ task->inherited = 1; ++ if (retmode & GR_INHERIT && retmode & GR_AUDIT_INHERIT) ++ gr_log_str_fs(GR_DO_AUDIT, GR_INHERIT_ACL_MSG, task->acl->filename, dentry, mnt); ++ } ++ ++ task->is_writable = 0; ++ ++ /* ignore additional mmap checks for processes that are writable ++ by the default ACL */ ++ obj = chk_obj_label(dentry, mnt, running_polstate.default_role->root_label); ++ if (unlikely(obj->mode & GR_WRITE)) ++ task->is_writable = 1; ++ obj = chk_obj_label(dentry, mnt, task->role->root_label); ++ if (unlikely(obj->mode & GR_WRITE)) ++ task->is_writable = 1; ++ ++ gr_set_proc_res(task); ++ ++#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG ++ printk(KERN_ALERT "Set subject label for (%s:%d): role:%s, subject:%s\n", task->comm, task_pid_nr(task), task->role->rolename, task->acl->filename); ++#endif ++ return 0; ++} ++ ++/* always called with valid inodev ptr */ ++static void ++do_handle_delete(struct inodev_entry *inodev, const ino_t ino, const dev_t dev) ++{ ++ struct acl_object_label *matchpo; ++ struct acl_subject_label *matchps; ++ struct acl_subject_label *subj; ++ struct acl_role_label *role; ++ unsigned int x; ++ ++ FOR_EACH_ROLE_START(role) ++ FOR_EACH_SUBJECT_START(role, subj, x) ++ if ((matchpo = lookup_acl_obj_label(ino, dev, subj)) != NULL) ++ matchpo->mode |= GR_DELETED; ++ FOR_EACH_SUBJECT_END(subj,x) ++ FOR_EACH_NESTED_SUBJECT_START(role, subj) ++ /* nested subjects aren't in the role's subj_hash table */ ++ if ((matchpo = lookup_acl_obj_label(ino, dev, subj)) != NULL) ++ matchpo->mode |= GR_DELETED; ++ FOR_EACH_NESTED_SUBJECT_END(subj) ++ if ((matchps = lookup_acl_subj_label(ino, dev, role)) != NULL) ++ matchps->mode |= GR_DELETED; ++ FOR_EACH_ROLE_END(role) ++ ++ inodev->nentry->deleted = 1; ++ ++ return; ++} ++ ++void ++gr_handle_delete(const ino_t ino, const dev_t dev) ++{ ++ struct inodev_entry *inodev; ++ ++ if (unlikely(!(gr_status & GR_READY))) ++ return; ++ ++ write_lock(&gr_inode_lock); ++ inodev = lookup_inodev_entry(ino, dev); ++ if (inodev != NULL) ++ do_handle_delete(inodev, ino, dev); ++ write_unlock(&gr_inode_lock); ++ ++ return; ++} ++ ++static void ++update_acl_obj_label(const ino_t oldinode, const dev_t olddevice, ++ const ino_t newinode, const dev_t newdevice, ++ struct acl_subject_label *subj) ++{ ++ unsigned int index = gr_fhash(oldinode, olddevice, subj->obj_hash_size); ++ struct acl_object_label *match; ++ ++ match = subj->obj_hash[index]; ++ ++ while (match && (match->inode != oldinode || ++ match->device != olddevice || ++ !(match->mode & GR_DELETED))) ++ match = match->next; ++ ++ if (match && (match->inode == oldinode) ++ && (match->device == olddevice) ++ && (match->mode & GR_DELETED)) { ++ if (match->prev == NULL) { ++ subj->obj_hash[index] = match->next; ++ if (match->next != NULL) ++ match->next->prev = NULL; ++ } else { ++ match->prev->next = match->next; ++ if (match->next != NULL) ++ match->next->prev = match->prev; ++ } ++ match->prev = NULL; ++ match->next = NULL; ++ match->inode = newinode; ++ match->device = newdevice; ++ match->mode &= ~GR_DELETED; ++ ++ insert_acl_obj_label(match, subj); ++ } ++ ++ return; ++} ++ ++static void ++update_acl_subj_label(const ino_t oldinode, const dev_t olddevice, ++ const ino_t newinode, const dev_t newdevice, ++ struct acl_role_label *role) ++{ ++ unsigned int index = gr_fhash(oldinode, olddevice, role->subj_hash_size); ++ struct acl_subject_label *match; ++ ++ match = role->subj_hash[index]; ++ ++ while (match && (match->inode != oldinode || ++ match->device != olddevice || ++ !(match->mode & GR_DELETED))) ++ match = match->next; ++ ++ if (match && (match->inode == oldinode) ++ && (match->device == olddevice) ++ && (match->mode & GR_DELETED)) { ++ if (match->prev == NULL) { ++ role->subj_hash[index] = match->next; ++ if (match->next != NULL) ++ match->next->prev = NULL; ++ } else { ++ match->prev->next = match->next; ++ if (match->next != NULL) ++ match->next->prev = match->prev; ++ } ++ match->prev = NULL; ++ match->next = NULL; ++ match->inode = newinode; ++ match->device = newdevice; ++ match->mode &= ~GR_DELETED; ++ ++ insert_acl_subj_label(match, role); ++ } ++ ++ return; ++} ++ ++static void ++update_inodev_entry(const ino_t oldinode, const dev_t olddevice, ++ const ino_t newinode, const dev_t newdevice) ++{ ++ unsigned int index = gr_fhash(oldinode, olddevice, running_polstate.inodev_set.i_size); ++ struct inodev_entry *match; ++ ++ match = running_polstate.inodev_set.i_hash[index]; ++ ++ while (match && (match->nentry->inode != oldinode || ++ match->nentry->device != olddevice || !match->nentry->deleted)) ++ match = match->next; ++ ++ if (match && (match->nentry->inode == oldinode) ++ && (match->nentry->device == olddevice) && ++ match->nentry->deleted) { ++ if (match->prev == NULL) { ++ running_polstate.inodev_set.i_hash[index] = match->next; ++ if (match->next != NULL) ++ match->next->prev = NULL; ++ } else { ++ match->prev->next = match->next; ++ if (match->next != NULL) ++ match->next->prev = match->prev; ++ } ++ match->prev = NULL; ++ match->next = NULL; ++ match->nentry->inode = newinode; ++ match->nentry->device = newdevice; ++ match->nentry->deleted = 0; ++ ++ insert_inodev_entry(match); ++ } ++ ++ return; ++} ++ ++static void ++__do_handle_create(const struct name_entry *matchn, ino_t ino, dev_t dev) ++{ ++ struct acl_subject_label *subj; ++ struct acl_role_label *role; ++ unsigned int x; ++ ++ FOR_EACH_ROLE_START(role) ++ update_acl_subj_label(matchn->inode, matchn->device, ino, dev, role); ++ ++ FOR_EACH_NESTED_SUBJECT_START(role, subj) ++ if ((subj->inode == ino) && (subj->device == dev)) { ++ subj->inode = ino; ++ subj->device = dev; ++ } ++ /* nested subjects aren't in the role's subj_hash table */ ++ update_acl_obj_label(matchn->inode, matchn->device, ++ ino, dev, subj); ++ FOR_EACH_NESTED_SUBJECT_END(subj) ++ FOR_EACH_SUBJECT_START(role, subj, x) ++ update_acl_obj_label(matchn->inode, matchn->device, ++ ino, dev, subj); ++ FOR_EACH_SUBJECT_END(subj,x) ++ FOR_EACH_ROLE_END(role) ++ ++ update_inodev_entry(matchn->inode, matchn->device, ino, dev); ++ ++ return; ++} ++ ++static void ++do_handle_create(const struct name_entry *matchn, const struct dentry *dentry, ++ const struct vfsmount *mnt) ++{ ++ ino_t ino = dentry->d_inode->i_ino; ++ dev_t dev = __get_dev(dentry); ++ ++ __do_handle_create(matchn, ino, dev); ++ ++ return; ++} ++ ++void ++gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt) ++{ ++ struct name_entry *matchn; ++ ++ if (unlikely(!(gr_status & GR_READY))) ++ return; ++ ++ preempt_disable(); ++ matchn = lookup_name_entry(gr_to_filename_rbac(dentry, mnt)); ++ ++ if (unlikely((unsigned long)matchn)) { ++ write_lock(&gr_inode_lock); ++ do_handle_create(matchn, dentry, mnt); ++ write_unlock(&gr_inode_lock); ++ } ++ preempt_enable(); ++ ++ return; ++} ++ ++void ++gr_handle_proc_create(const struct dentry *dentry, const struct inode *inode) ++{ ++ struct name_entry *matchn; ++ ++ if (unlikely(!(gr_status & GR_READY))) ++ return; ++ ++ preempt_disable(); ++ matchn = lookup_name_entry(gr_to_proc_filename_rbac(dentry, init_pid_ns.proc_mnt)); ++ ++ if (unlikely((unsigned long)matchn)) { ++ write_lock(&gr_inode_lock); ++ __do_handle_create(matchn, inode->i_ino, inode->i_sb->s_dev); ++ write_unlock(&gr_inode_lock); ++ } ++ preempt_enable(); ++ ++ return; ++} ++ ++void ++gr_handle_rename(struct inode *old_dir, struct inode *new_dir, ++ struct dentry *old_dentry, ++ struct dentry *new_dentry, ++ struct vfsmount *mnt, const __u8 replace) ++{ ++ struct name_entry *matchn; ++ struct inodev_entry *inodev; ++ struct inode *inode = new_dentry->d_inode; ++ ino_t old_ino = old_dentry->d_inode->i_ino; ++ dev_t old_dev = __get_dev(old_dentry); ++ ++ /* vfs_rename swaps the name and parent link for old_dentry and ++ new_dentry ++ at this point, old_dentry has the new name, parent link, and inode ++ for the renamed file ++ if a file is being replaced by a rename, new_dentry has the inode ++ and name for the replaced file ++ */ ++ ++ if (unlikely(!(gr_status & GR_READY))) ++ return; ++ ++ preempt_disable(); ++ matchn = lookup_name_entry(gr_to_filename_rbac(old_dentry, mnt)); ++ ++ /* we wouldn't have to check d_inode if it weren't for ++ NFS silly-renaming ++ */ ++ ++ write_lock(&gr_inode_lock); ++ if (unlikely(replace && inode)) { ++ ino_t new_ino = inode->i_ino; ++ dev_t new_dev = __get_dev(new_dentry); ++ ++ inodev = lookup_inodev_entry(new_ino, new_dev); ++ if (inodev != NULL && ((inode->i_nlink <= 1) || S_ISDIR(inode->i_mode))) ++ do_handle_delete(inodev, new_ino, new_dev); ++ } ++ ++ inodev = lookup_inodev_entry(old_ino, old_dev); ++ if (inodev != NULL && ((old_dentry->d_inode->i_nlink <= 1) || S_ISDIR(old_dentry->d_inode->i_mode))) ++ do_handle_delete(inodev, old_ino, old_dev); ++ ++ if (unlikely((unsigned long)matchn)) ++ do_handle_create(matchn, old_dentry, mnt); ++ ++ write_unlock(&gr_inode_lock); ++ preempt_enable(); ++ ++ return; ++} ++ ++#if defined(CONFIG_GRKERNSEC_RESLOG) || !defined(CONFIG_GRKERNSEC_NO_RBAC) ++static const unsigned long res_learn_bumps[GR_NLIMITS] = { ++ [RLIMIT_CPU] = GR_RLIM_CPU_BUMP, ++ [RLIMIT_FSIZE] = GR_RLIM_FSIZE_BUMP, ++ [RLIMIT_DATA] = GR_RLIM_DATA_BUMP, ++ [RLIMIT_STACK] = GR_RLIM_STACK_BUMP, ++ [RLIMIT_CORE] = GR_RLIM_CORE_BUMP, ++ [RLIMIT_RSS] = GR_RLIM_RSS_BUMP, ++ [RLIMIT_NPROC] = GR_RLIM_NPROC_BUMP, ++ [RLIMIT_NOFILE] = GR_RLIM_NOFILE_BUMP, ++ [RLIMIT_MEMLOCK] = GR_RLIM_MEMLOCK_BUMP, ++ [RLIMIT_AS] = GR_RLIM_AS_BUMP, ++ [RLIMIT_LOCKS] = GR_RLIM_LOCKS_BUMP, ++ [RLIMIT_SIGPENDING] = GR_RLIM_SIGPENDING_BUMP, ++ [RLIMIT_MSGQUEUE] = GR_RLIM_MSGQUEUE_BUMP, ++ [RLIMIT_NICE] = GR_RLIM_NICE_BUMP, ++ [RLIMIT_RTPRIO] = GR_RLIM_RTPRIO_BUMP, ++ [RLIMIT_RTTIME] = GR_RLIM_RTTIME_BUMP ++}; ++ ++void ++gr_learn_resource(const struct task_struct *task, ++ const int res, const unsigned long wanted, const int gt) ++{ ++ struct acl_subject_label *acl; ++ const struct cred *cred; ++ ++ if (unlikely((gr_status & GR_READY) && ++ task->acl && (task->acl->mode & (GR_LEARN | GR_INHERITLEARN)))) ++ goto skip_reslog; ++ ++ gr_log_resource(task, res, wanted, gt); ++skip_reslog: ++ ++ if (unlikely(!(gr_status & GR_READY) || !wanted || res >= GR_NLIMITS)) ++ return; ++ ++ acl = task->acl; ++ ++ if (likely(!acl || !(acl->mode & (GR_LEARN | GR_INHERITLEARN)) || ++ !(acl->resmask & (1U << (unsigned short) res)))) ++ return; ++ ++ if (wanted >= acl->res[res].rlim_cur) { ++ unsigned long res_add; ++ ++ res_add = wanted + res_learn_bumps[res]; ++ ++ acl->res[res].rlim_cur = res_add; ++ ++ if (wanted > acl->res[res].rlim_max) ++ acl->res[res].rlim_max = res_add; ++ ++ /* only log the subject filename, since resource logging is supported for ++ single-subject learning only */ ++ rcu_read_lock(); ++ cred = __task_cred(task); ++ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, ++ task->role->roletype, GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid), acl->filename, ++ acl->filename, acl->res[res].rlim_cur, acl->res[res].rlim_max, ++ "", (unsigned long) res, &task->signal->saved_ip); ++ rcu_read_unlock(); ++ } ++ ++ return; ++} ++EXPORT_SYMBOL_GPL(gr_learn_resource); ++#endif ++ ++#if defined(CONFIG_PAX_HAVE_ACL_FLAGS) && (defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)) ++void ++pax_set_initial_flags(struct linux_binprm *bprm) ++{ ++ struct task_struct *task = current; ++ struct acl_subject_label *proc; ++ unsigned long flags; ++ ++ if (unlikely(!(gr_status & GR_READY))) ++ return; ++ ++ flags = pax_get_flags(task); ++ ++ proc = task->acl; ++ ++ if (proc->pax_flags & GR_PAX_DISABLE_PAGEEXEC) ++ flags &= ~MF_PAX_PAGEEXEC; ++ if (proc->pax_flags & GR_PAX_DISABLE_SEGMEXEC) ++ flags &= ~MF_PAX_SEGMEXEC; ++ if (proc->pax_flags & GR_PAX_DISABLE_RANDMMAP) ++ flags &= ~MF_PAX_RANDMMAP; ++ if (proc->pax_flags & GR_PAX_DISABLE_EMUTRAMP) ++ flags &= ~MF_PAX_EMUTRAMP; ++ if (proc->pax_flags & GR_PAX_DISABLE_MPROTECT) ++ flags &= ~MF_PAX_MPROTECT; ++ ++ if (proc->pax_flags & GR_PAX_ENABLE_PAGEEXEC) ++ flags |= MF_PAX_PAGEEXEC; ++ if (proc->pax_flags & GR_PAX_ENABLE_SEGMEXEC) ++ flags |= MF_PAX_SEGMEXEC; ++ if (proc->pax_flags & GR_PAX_ENABLE_RANDMMAP) ++ flags |= MF_PAX_RANDMMAP; ++ if (proc->pax_flags & GR_PAX_ENABLE_EMUTRAMP) ++ flags |= MF_PAX_EMUTRAMP; ++ if (proc->pax_flags & GR_PAX_ENABLE_MPROTECT) ++ flags |= MF_PAX_MPROTECT; ++ ++ pax_set_flags(task, flags); ++ ++ return; ++} ++#endif ++ ++int ++gr_handle_proc_ptrace(struct task_struct *task) ++{ ++ struct file *filp; ++ struct task_struct *tmp = task; ++ struct task_struct *curtemp = current; ++ __u32 retmode; ++ ++#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE ++ if (unlikely(!(gr_status & GR_READY))) ++ return 0; ++#endif ++ ++ read_lock(&tasklist_lock); ++ read_lock(&grsec_exec_file_lock); ++ filp = task->exec_file; ++ ++ while (task_pid_nr(tmp) > 0) { ++ if (tmp == curtemp) ++ break; ++ tmp = tmp->real_parent; ++ } ++ ++ if (!filp || (task_pid_nr(tmp) == 0 && ((grsec_enable_harden_ptrace && gr_is_global_nonroot(current_uid()) && !(gr_status & GR_READY)) || ++ ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE))))) { ++ read_unlock(&grsec_exec_file_lock); ++ read_unlock(&tasklist_lock); ++ return 1; ++ } ++ ++#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE ++ if (!(gr_status & GR_READY)) { ++ read_unlock(&grsec_exec_file_lock); ++ read_unlock(&tasklist_lock); ++ return 0; ++ } ++#endif ++ ++ retmode = gr_search_file(filp->f_path.dentry, GR_NOPTRACE, filp->f_path.mnt); ++ read_unlock(&grsec_exec_file_lock); ++ read_unlock(&tasklist_lock); ++ ++ if (retmode & GR_NOPTRACE) ++ return 1; ++ ++ if (!(current->acl->mode & GR_POVERRIDE) && !(current->role->roletype & GR_ROLE_GOD) ++ && (current->acl != task->acl || (current->acl != current->role->root_label ++ && task_pid_nr(current) != task_pid_nr(task)))) ++ return 1; ++ ++ return 0; ++} ++ ++void task_grsec_rbac(struct seq_file *m, struct task_struct *p) ++{ ++ if (unlikely(!(gr_status & GR_READY))) ++ return; ++ ++ if (!(current->role->roletype & GR_ROLE_GOD)) ++ return; ++ ++ seq_printf(m, "RBAC:\t%.64s:%c:%.950s\n", ++ p->role->rolename, gr_task_roletype_to_char(p), ++ p->acl->filename); ++} ++ ++int ++gr_handle_ptrace(struct task_struct *task, const long request) ++{ ++ struct task_struct *tmp = task; ++ struct task_struct *curtemp = current; ++ __u32 retmode; ++ ++#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE ++ if (unlikely(!(gr_status & GR_READY))) ++ return 0; ++#endif ++ if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) { ++ read_lock(&tasklist_lock); ++ while (task_pid_nr(tmp) > 0) { ++ if (tmp == curtemp) ++ break; ++ tmp = tmp->real_parent; ++ } ++ ++ if (task_pid_nr(tmp) == 0 && ((grsec_enable_harden_ptrace && gr_is_global_nonroot(current_uid()) && !(gr_status & GR_READY)) || ++ ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE)))) { ++ read_unlock(&tasklist_lock); ++ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task); ++ return 1; ++ } ++ read_unlock(&tasklist_lock); ++ } ++ ++#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE ++ if (!(gr_status & GR_READY)) ++ return 0; ++#endif ++ ++ read_lock(&grsec_exec_file_lock); ++ if (unlikely(!task->exec_file)) { ++ read_unlock(&grsec_exec_file_lock); ++ return 0; ++ } ++ ++ retmode = gr_search_file(task->exec_file->f_path.dentry, GR_PTRACERD | GR_NOPTRACE, task->exec_file->f_path.mnt); ++ read_unlock(&grsec_exec_file_lock); ++ ++ if (retmode & GR_NOPTRACE) { ++ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task); ++ return 1; ++ } ++ ++ if (retmode & GR_PTRACERD) { ++ switch (request) { ++ case PTRACE_SEIZE: ++ case PTRACE_POKETEXT: ++ case PTRACE_POKEDATA: ++ case PTRACE_POKEUSR: ++#if !defined(CONFIG_PPC32) && !defined(CONFIG_PPC64) && !defined(CONFIG_PARISC) && !defined(CONFIG_ALPHA) && !defined(CONFIG_IA64) ++ case PTRACE_SETREGS: ++ case PTRACE_SETFPREGS: ++#endif ++#ifdef CONFIG_X86 ++ case PTRACE_SETFPXREGS: ++#endif ++#ifdef CONFIG_ALTIVEC ++ case PTRACE_SETVRREGS: ++#endif ++ return 1; ++ default: ++ return 0; ++ } ++ } else if (!(current->acl->mode & GR_POVERRIDE) && ++ !(current->role->roletype & GR_ROLE_GOD) && ++ (current->acl != task->acl)) { ++ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task); ++ return 1; ++ } ++ ++ return 0; ++} ++ ++static int is_writable_mmap(const struct file *filp) ++{ ++ struct task_struct *task = current; ++ struct acl_object_label *obj, *obj2; ++ ++ if (gr_status & GR_READY && !(task->acl->mode & GR_OVERRIDE) && ++ !task->is_writable && S_ISREG(filp->f_path.dentry->d_inode->i_mode) && (filp->f_path.mnt != shm_mnt || (filp->f_path.dentry->d_inode->i_nlink > 0))) { ++ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, running_polstate.default_role->root_label); ++ obj2 = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, ++ task->role->root_label); ++ if (unlikely((obj->mode & GR_WRITE) || (obj2->mode & GR_WRITE))) { ++ gr_log_fs_generic(GR_DONT_AUDIT, GR_WRITLIB_ACL_MSG, filp->f_path.dentry, filp->f_path.mnt); ++ return 1; ++ } ++ } ++ return 0; ++} ++ ++int ++gr_acl_handle_mmap(const struct file *file, const unsigned long prot) ++{ ++ __u32 mode; ++ ++ if (unlikely(!file || !(prot & PROT_EXEC))) ++ return 1; ++ ++ if (is_writable_mmap(file)) ++ return 0; ++ ++ mode = ++ gr_search_file(file->f_path.dentry, ++ GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS, ++ file->f_path.mnt); ++ ++ if (!gr_tpe_allow(file)) ++ return 0; ++ ++ if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) { ++ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt); ++ return 0; ++ } else if (unlikely(!(mode & GR_EXEC))) { ++ return 0; ++ } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) { ++ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt); ++ return 1; ++ } ++ ++ return 1; ++} ++ ++int ++gr_acl_handle_mprotect(const struct file *file, const unsigned long prot) ++{ ++ __u32 mode; ++ ++ if (unlikely(!file || !(prot & PROT_EXEC))) ++ return 1; ++ ++ if (is_writable_mmap(file)) ++ return 0; ++ ++ mode = ++ gr_search_file(file->f_path.dentry, ++ GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS, ++ file->f_path.mnt); ++ ++ if (!gr_tpe_allow(file)) ++ return 0; ++ ++ if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) { ++ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt); ++ return 0; ++ } else if (unlikely(!(mode & GR_EXEC))) { ++ return 0; ++ } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) { ++ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt); ++ return 1; ++ } ++ ++ return 1; ++} ++ ++void ++gr_acl_handle_psacct(struct task_struct *task, const long code) ++{ ++ unsigned long runtime, cputime; ++ cputime_t utime, stime; ++ unsigned int wday, cday; ++ __u8 whr, chr; ++ __u8 wmin, cmin; ++ __u8 wsec, csec; ++ struct timespec timeval; ++ ++ if (unlikely(!(gr_status & GR_READY) || !task->acl || ++ !(task->acl->mode & GR_PROCACCT))) ++ return; ++ ++ do_posix_clock_monotonic_gettime(&timeval); ++ runtime = timeval.tv_sec - task->start_time.tv_sec; ++ wday = runtime / (60 * 60 * 24); ++ runtime -= wday * (60 * 60 * 24); ++ whr = runtime / (60 * 60); ++ runtime -= whr * (60 * 60); ++ wmin = runtime / 60; ++ runtime -= wmin * 60; ++ wsec = runtime; ++ ++ task_cputime(task, &utime, &stime); ++ cputime = cputime_to_secs(utime + stime); ++ cday = cputime / (60 * 60 * 24); ++ cputime -= cday * (60 * 60 * 24); ++ chr = cputime / (60 * 60); ++ cputime -= chr * (60 * 60); ++ cmin = cputime / 60; ++ cputime -= cmin * 60; ++ csec = cputime; ++ ++ gr_log_procacct(GR_DO_AUDIT, GR_ACL_PROCACCT_MSG, task, wday, whr, wmin, wsec, cday, chr, cmin, csec, code); ++ ++ return; ++} ++ ++#ifdef CONFIG_TASKSTATS ++int gr_is_taskstats_denied(int pid) ++{ ++ struct task_struct *task; ++#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP) ++ const struct cred *cred; ++#endif ++ int ret = 0; ++ ++ /* restrict taskstats viewing to un-chrooted root users ++ who have the 'view' subject flag if the RBAC system is enabled ++ */ ++ ++ rcu_read_lock(); ++ read_lock(&tasklist_lock); ++ task = find_task_by_vpid(pid); ++ if (task) { ++#ifdef CONFIG_GRKERNSEC_CHROOT ++ if (proc_is_chrooted(task)) ++ ret = -EACCES; ++#endif ++#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP) ++ cred = __task_cred(task); ++#ifdef CONFIG_GRKERNSEC_PROC_USER ++ if (gr_is_global_nonroot(cred->uid)) ++ ret = -EACCES; ++#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP) ++ if (gr_is_global_nonroot(cred->uid) && !groups_search(cred->group_info, grsec_proc_gid)) ++ ret = -EACCES; ++#endif ++#endif ++ if (gr_status & GR_READY) { ++ if (!(task->acl->mode & GR_VIEW)) ++ ret = -EACCES; ++ } ++ } else ++ ret = -ENOENT; ++ ++ read_unlock(&tasklist_lock); ++ rcu_read_unlock(); ++ ++ return ret; ++} ++#endif ++ ++/* AUXV entries are filled via a descendant of search_binary_handler ++ after we've already applied the subject for the target ++*/ ++int gr_acl_enable_at_secure(void) ++{ ++ if (unlikely(!(gr_status & GR_READY))) ++ return 0; ++ ++ if (current->acl->mode & GR_ATSECURE) ++ return 1; ++ ++ return 0; ++} ++ ++int gr_acl_handle_filldir(const struct file *file, const char *name, const unsigned int namelen, const ino_t ino) ++{ ++ struct task_struct *task = current; ++ struct dentry *dentry = file->f_path.dentry; ++ struct vfsmount *mnt = file->f_path.mnt; ++ struct acl_object_label *obj, *tmp; ++ struct acl_subject_label *subj; ++ unsigned int bufsize; ++ int is_not_root; ++ char *path; ++ dev_t dev = __get_dev(dentry); ++ ++ if (unlikely(!(gr_status & GR_READY))) ++ return 1; ++ ++ if (task->acl->mode & (GR_LEARN | GR_INHERITLEARN)) ++ return 1; ++ ++ /* ignore Eric Biederman */ ++ if (IS_PRIVATE(dentry->d_inode)) ++ return 1; ++ ++ subj = task->acl; ++ read_lock(&gr_inode_lock); ++ do { ++ obj = lookup_acl_obj_label(ino, dev, subj); ++ if (obj != NULL) { ++ read_unlock(&gr_inode_lock); ++ return (obj->mode & GR_FIND) ? 1 : 0; ++ } ++ } while ((subj = subj->parent_subject)); ++ read_unlock(&gr_inode_lock); ++ ++ /* this is purely an optimization since we're looking for an object ++ for the directory we're doing a readdir on ++ if it's possible for any globbed object to match the entry we're ++ filling into the directory, then the object we find here will be ++ an anchor point with attached globbed objects ++ */ ++ obj = chk_obj_label_noglob(dentry, mnt, task->acl); ++ if (obj->globbed == NULL) ++ return (obj->mode & GR_FIND) ? 1 : 0; ++ ++ is_not_root = ((obj->filename[0] == '/') && ++ (obj->filename[1] == '\0')) ? 0 : 1; ++ bufsize = PAGE_SIZE - namelen - is_not_root; ++ ++ /* check bufsize > PAGE_SIZE || bufsize == 0 */ ++ if (unlikely((bufsize - 1) > (PAGE_SIZE - 1))) ++ return 1; ++ ++ preempt_disable(); ++ path = d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()), ++ bufsize); ++ ++ bufsize = strlen(path); ++ ++ /* if base is "/", don't append an additional slash */ ++ if (is_not_root) ++ *(path + bufsize) = '/'; ++ memcpy(path + bufsize + is_not_root, name, namelen); ++ *(path + bufsize + namelen + is_not_root) = '\0'; ++ ++ tmp = obj->globbed; ++ while (tmp) { ++ if (!glob_match(tmp->filename, path)) { ++ preempt_enable(); ++ return (tmp->mode & GR_FIND) ? 1 : 0; ++ } ++ tmp = tmp->next; ++ } ++ preempt_enable(); ++ return (obj->mode & GR_FIND) ? 1 : 0; ++} ++ ++void gr_put_exec_file(struct task_struct *task) ++{ ++ struct file *filp; ++ ++ write_lock(&grsec_exec_file_lock); ++ filp = task->exec_file; ++ task->exec_file = NULL; ++ write_unlock(&grsec_exec_file_lock); ++ ++ if (filp) ++ fput(filp); ++ ++ return; ++} ++ ++ ++#ifdef CONFIG_NETFILTER_XT_MATCH_GRADM_MODULE ++EXPORT_SYMBOL_GPL(gr_acl_is_enabled); ++#endif ++#ifdef CONFIG_SECURITY ++EXPORT_SYMBOL_GPL(gr_check_user_change); ++EXPORT_SYMBOL_GPL(gr_check_group_change); ++#endif ++ +diff --git a/grsecurity/gracl_alloc.c b/grsecurity/gracl_alloc.c +new file mode 100644 +index 0000000..18ffbbd +--- /dev/null ++++ b/grsecurity/gracl_alloc.c +@@ -0,0 +1,105 @@ ++#include <linux/kernel.h> ++#include <linux/mm.h> ++#include <linux/slab.h> ++#include <linux/vmalloc.h> ++#include <linux/gracl.h> ++#include <linux/grsecurity.h> ++ ++static struct gr_alloc_state __current_alloc_state = { 1, 1, NULL }; ++struct gr_alloc_state *current_alloc_state = &__current_alloc_state; ++ ++static __inline__ int ++alloc_pop(void) ++{ ++ if (current_alloc_state->alloc_stack_next == 1) ++ return 0; ++ ++ kfree(current_alloc_state->alloc_stack[current_alloc_state->alloc_stack_next - 2]); ++ ++ current_alloc_state->alloc_stack_next--; ++ ++ return 1; ++} ++ ++static __inline__ int ++alloc_push(void *buf) ++{ ++ if (current_alloc_state->alloc_stack_next >= current_alloc_state->alloc_stack_size) ++ return 1; ++ ++ current_alloc_state->alloc_stack[current_alloc_state->alloc_stack_next - 1] = buf; ++ ++ current_alloc_state->alloc_stack_next++; ++ ++ return 0; ++} ++ ++void * ++acl_alloc(unsigned long len) ++{ ++ void *ret = NULL; ++ ++ if (!len || len > PAGE_SIZE) ++ goto out; ++ ++ ret = kmalloc(len, GFP_KERNEL); ++ ++ if (ret) { ++ if (alloc_push(ret)) { ++ kfree(ret); ++ ret = NULL; ++ } ++ } ++ ++out: ++ return ret; ++} ++ ++void * ++acl_alloc_num(unsigned long num, unsigned long len) ++{ ++ if (!len || (num > (PAGE_SIZE / len))) ++ return NULL; ++ ++ return acl_alloc(num * len); ++} ++ ++void ++acl_free_all(void) ++{ ++ if (!current_alloc_state->alloc_stack) ++ return; ++ ++ while (alloc_pop()) ; ++ ++ if (current_alloc_state->alloc_stack) { ++ if ((current_alloc_state->alloc_stack_size * sizeof (void *)) <= PAGE_SIZE) ++ kfree(current_alloc_state->alloc_stack); ++ else ++ vfree(current_alloc_state->alloc_stack); ++ } ++ ++ current_alloc_state->alloc_stack = NULL; ++ current_alloc_state->alloc_stack_size = 1; ++ current_alloc_state->alloc_stack_next = 1; ++ ++ return; ++} ++ ++int ++acl_alloc_stack_init(unsigned long size) ++{ ++ if ((size * sizeof (void *)) <= PAGE_SIZE) ++ current_alloc_state->alloc_stack = ++ (void **) kmalloc(size * sizeof (void *), GFP_KERNEL); ++ else ++ current_alloc_state->alloc_stack = (void **) vmalloc(size * sizeof (void *)); ++ ++ current_alloc_state->alloc_stack_size = size; ++ current_alloc_state->alloc_stack_next = 1; ++ ++ if (!current_alloc_state->alloc_stack) ++ return 0; ++ else ++ return 1; ++} +diff --git a/grsecurity/gracl_cap.c b/grsecurity/gracl_cap.c +new file mode 100644 +index 0000000..1a94c11 +--- /dev/null ++++ b/grsecurity/gracl_cap.c +@@ -0,0 +1,127 @@ ++#include <linux/kernel.h> ++#include <linux/module.h> ++#include <linux/sched.h> ++#include <linux/gracl.h> ++#include <linux/grsecurity.h> ++#include <linux/grinternal.h> ++ ++extern const char *captab_log[]; ++extern int captab_log_entries; ++ ++int gr_learn_cap(const struct task_struct *task, const struct cred *cred, const int cap) ++{ ++ struct acl_subject_label *curracl; ++ ++ if (!gr_acl_is_enabled()) ++ return 1; ++ ++ curracl = task->acl; ++ ++ if (curracl->mode & (GR_LEARN | GR_INHERITLEARN)) { ++ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, ++ task->role->roletype, GR_GLOBAL_UID(cred->uid), ++ GR_GLOBAL_GID(cred->gid), task->exec_file ? ++ gr_to_filename(task->exec_file->f_path.dentry, ++ task->exec_file->f_path.mnt) : curracl->filename, ++ curracl->filename, 0UL, ++ 0UL, "", (unsigned long) cap, &task->signal->saved_ip); ++ return 1; ++ } ++ ++ return 0; ++} ++ ++int gr_task_acl_is_capable(const struct task_struct *task, const struct cred *cred, const int cap) ++{ ++ struct acl_subject_label *curracl; ++ kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set; ++ kernel_cap_t cap_audit = __cap_empty_set; ++ ++ if (!gr_acl_is_enabled()) ++ return 1; ++ ++ curracl = task->acl; ++ ++ cap_drop = curracl->cap_lower; ++ cap_mask = curracl->cap_mask; ++ cap_audit = curracl->cap_invert_audit; ++ ++ while ((curracl = curracl->parent_subject)) { ++ /* if the cap isn't specified in the current computed mask but is specified in the ++ current level subject, and is lowered in the current level subject, then add ++ it to the set of dropped capabilities ++ otherwise, add the current level subject's mask to the current computed mask ++ */ ++ if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) { ++ cap_raise(cap_mask, cap); ++ if (cap_raised(curracl->cap_lower, cap)) ++ cap_raise(cap_drop, cap); ++ if (cap_raised(curracl->cap_invert_audit, cap)) ++ cap_raise(cap_audit, cap); ++ } ++ } ++ ++ if (!cap_raised(cap_drop, cap)) { ++ if (cap_raised(cap_audit, cap)) ++ gr_log_cap(GR_DO_AUDIT, GR_CAP_ACL_MSG2, task, captab_log[cap]); ++ return 1; ++ } ++ ++ /* only learn the capability use if the process has the capability in the ++ general case, the two uses in sys.c of gr_learn_cap are an exception ++ to this rule to ensure any role transition involves what the full-learned ++ policy believes in a privileged process ++ */ ++ if (cap_raised(cred->cap_effective, cap) && gr_learn_cap(task, cred, cap)) ++ return 1; ++ ++ if ((cap >= 0) && (cap < captab_log_entries) && cap_raised(cred->cap_effective, cap) && !cap_raised(cap_audit, cap)) ++ gr_log_cap(GR_DONT_AUDIT, GR_CAP_ACL_MSG, task, captab_log[cap]); ++ ++ return 0; ++} ++ ++int ++gr_acl_is_capable(const int cap) ++{ ++ return gr_task_acl_is_capable(current, current_cred(), cap); ++} ++ ++int gr_task_acl_is_capable_nolog(const struct task_struct *task, const int cap) ++{ ++ struct acl_subject_label *curracl; ++ kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set; ++ ++ if (!gr_acl_is_enabled()) ++ return 1; ++ ++ curracl = task->acl; ++ ++ cap_drop = curracl->cap_lower; ++ cap_mask = curracl->cap_mask; ++ ++ while ((curracl = curracl->parent_subject)) { ++ /* if the cap isn't specified in the current computed mask but is specified in the ++ current level subject, and is lowered in the current level subject, then add ++ it to the set of dropped capabilities ++ otherwise, add the current level subject's mask to the current computed mask ++ */ ++ if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) { ++ cap_raise(cap_mask, cap); ++ if (cap_raised(curracl->cap_lower, cap)) ++ cap_raise(cap_drop, cap); ++ } ++ } ++ ++ if (!cap_raised(cap_drop, cap)) ++ return 1; ++ ++ return 0; ++} ++ ++int ++gr_acl_is_capable_nolog(const int cap) ++{ ++ return gr_task_acl_is_capable_nolog(current, cap); ++} ++ +diff --git a/grsecurity/gracl_compat.c b/grsecurity/gracl_compat.c +new file mode 100644 +index 0000000..ca25605 +--- /dev/null ++++ b/grsecurity/gracl_compat.c +@@ -0,0 +1,270 @@ ++#include <linux/kernel.h> ++#include <linux/gracl.h> ++#include <linux/compat.h> ++#include <linux/gracl_compat.h> ++ ++#include <asm/uaccess.h> ++ ++int copy_gr_arg_wrapper_compat(const char *buf, struct gr_arg_wrapper *uwrap) ++{ ++ struct gr_arg_wrapper_compat uwrapcompat; ++ ++ if (copy_from_user(&uwrapcompat, buf, sizeof(uwrapcompat))) ++ return -EFAULT; ++ ++ if (((uwrapcompat.version != GRSECURITY_VERSION) && ++ (uwrapcompat.version != 0x2901)) || ++ (uwrapcompat.size != sizeof(struct gr_arg_compat))) ++ return -EINVAL; ++ ++ uwrap->arg = compat_ptr(uwrapcompat.arg); ++ uwrap->version = uwrapcompat.version; ++ uwrap->size = sizeof(struct gr_arg); ++ ++ return 0; ++} ++ ++int copy_gr_arg_compat(const struct gr_arg __user *buf, struct gr_arg *arg) ++{ ++ struct gr_arg_compat argcompat; ++ ++ if (copy_from_user(&argcompat, buf, sizeof(argcompat))) ++ return -EFAULT; ++ ++ arg->role_db.r_table = compat_ptr(argcompat.role_db.r_table); ++ arg->role_db.num_pointers = argcompat.role_db.num_pointers; ++ arg->role_db.num_roles = argcompat.role_db.num_roles; ++ arg->role_db.num_domain_children = argcompat.role_db.num_domain_children; ++ arg->role_db.num_subjects = argcompat.role_db.num_subjects; ++ arg->role_db.num_objects = argcompat.role_db.num_objects; ++ ++ memcpy(&arg->pw, &argcompat.pw, sizeof(arg->pw)); ++ memcpy(&arg->salt, &argcompat.salt, sizeof(arg->salt)); ++ memcpy(&arg->sum, &argcompat.sum, sizeof(arg->sum)); ++ memcpy(&arg->sp_role, &argcompat.sp_role, sizeof(arg->sp_role)); ++ arg->sprole_pws = compat_ptr(argcompat.sprole_pws); ++ arg->segv_device = argcompat.segv_device; ++ arg->segv_inode = argcompat.segv_inode; ++ arg->segv_uid = argcompat.segv_uid; ++ arg->num_sprole_pws = argcompat.num_sprole_pws; ++ arg->mode = argcompat.mode; ++ ++ return 0; ++} ++ ++int copy_acl_object_label_compat(struct acl_object_label *obj, const struct acl_object_label *userp) ++{ ++ struct acl_object_label_compat objcompat; ++ ++ if (copy_from_user(&objcompat, userp, sizeof(objcompat))) ++ return -EFAULT; ++ ++ obj->filename = compat_ptr(objcompat.filename); ++ obj->inode = objcompat.inode; ++ obj->device = objcompat.device; ++ obj->mode = objcompat.mode; ++ ++ obj->nested = compat_ptr(objcompat.nested); ++ obj->globbed = compat_ptr(objcompat.globbed); ++ ++ obj->prev = compat_ptr(objcompat.prev); ++ obj->next = compat_ptr(objcompat.next); ++ ++ return 0; ++} ++ ++int copy_acl_subject_label_compat(struct acl_subject_label *subj, const struct acl_subject_label *userp) ++{ ++ unsigned int i; ++ struct acl_subject_label_compat subjcompat; ++ ++ if (copy_from_user(&subjcompat, userp, sizeof(subjcompat))) ++ return -EFAULT; ++ ++ subj->filename = compat_ptr(subjcompat.filename); ++ subj->inode = subjcompat.inode; ++ subj->device = subjcompat.device; ++ subj->mode = subjcompat.mode; ++ subj->cap_mask = subjcompat.cap_mask; ++ subj->cap_lower = subjcompat.cap_lower; ++ subj->cap_invert_audit = subjcompat.cap_invert_audit; ++ ++ for (i = 0; i < GR_NLIMITS; i++) { ++ if (subjcompat.res[i].rlim_cur == COMPAT_RLIM_INFINITY) ++ subj->res[i].rlim_cur = RLIM_INFINITY; ++ else ++ subj->res[i].rlim_cur = subjcompat.res[i].rlim_cur; ++ if (subjcompat.res[i].rlim_max == COMPAT_RLIM_INFINITY) ++ subj->res[i].rlim_max = RLIM_INFINITY; ++ else ++ subj->res[i].rlim_max = subjcompat.res[i].rlim_max; ++ } ++ subj->resmask = subjcompat.resmask; ++ ++ subj->user_trans_type = subjcompat.user_trans_type; ++ subj->group_trans_type = subjcompat.group_trans_type; ++ subj->user_transitions = compat_ptr(subjcompat.user_transitions); ++ subj->group_transitions = compat_ptr(subjcompat.group_transitions); ++ subj->user_trans_num = subjcompat.user_trans_num; ++ subj->group_trans_num = subjcompat.group_trans_num; ++ ++ memcpy(&subj->sock_families, &subjcompat.sock_families, sizeof(subj->sock_families)); ++ memcpy(&subj->ip_proto, &subjcompat.ip_proto, sizeof(subj->ip_proto)); ++ subj->ip_type = subjcompat.ip_type; ++ subj->ips = compat_ptr(subjcompat.ips); ++ subj->ip_num = subjcompat.ip_num; ++ subj->inaddr_any_override = subjcompat.inaddr_any_override; ++ ++ subj->crashes = subjcompat.crashes; ++ subj->expires = subjcompat.expires; ++ ++ subj->parent_subject = compat_ptr(subjcompat.parent_subject); ++ subj->hash = compat_ptr(subjcompat.hash); ++ subj->prev = compat_ptr(subjcompat.prev); ++ subj->next = compat_ptr(subjcompat.next); ++ ++ subj->obj_hash = compat_ptr(subjcompat.obj_hash); ++ subj->obj_hash_size = subjcompat.obj_hash_size; ++ subj->pax_flags = subjcompat.pax_flags; ++ ++ return 0; ++} ++ ++int copy_acl_role_label_compat(struct acl_role_label *role, const struct acl_role_label *userp) ++{ ++ struct acl_role_label_compat rolecompat; ++ ++ if (copy_from_user(&rolecompat, userp, sizeof(rolecompat))) ++ return -EFAULT; ++ ++ role->rolename = compat_ptr(rolecompat.rolename); ++ role->uidgid = rolecompat.uidgid; ++ role->roletype = rolecompat.roletype; ++ ++ role->auth_attempts = rolecompat.auth_attempts; ++ role->expires = rolecompat.expires; ++ ++ role->root_label = compat_ptr(rolecompat.root_label); ++ role->hash = compat_ptr(rolecompat.hash); ++ ++ role->prev = compat_ptr(rolecompat.prev); ++ role->next = compat_ptr(rolecompat.next); ++ ++ role->transitions = compat_ptr(rolecompat.transitions); ++ role->allowed_ips = compat_ptr(rolecompat.allowed_ips); ++ role->domain_children = compat_ptr(rolecompat.domain_children); ++ role->domain_child_num = rolecompat.domain_child_num; ++ ++ role->umask = rolecompat.umask; ++ ++ role->subj_hash = compat_ptr(rolecompat.subj_hash); ++ role->subj_hash_size = rolecompat.subj_hash_size; ++ ++ return 0; ++} ++ ++int copy_role_allowed_ip_compat(struct role_allowed_ip *roleip, const struct role_allowed_ip *userp) ++{ ++ struct role_allowed_ip_compat roleip_compat; ++ ++ if (copy_from_user(&roleip_compat, userp, sizeof(roleip_compat))) ++ return -EFAULT; ++ ++ roleip->addr = roleip_compat.addr; ++ roleip->netmask = roleip_compat.netmask; ++ ++ roleip->prev = compat_ptr(roleip_compat.prev); ++ roleip->next = compat_ptr(roleip_compat.next); ++ ++ return 0; ++} ++ ++int copy_role_transition_compat(struct role_transition *trans, const struct role_transition *userp) ++{ ++ struct role_transition_compat trans_compat; ++ ++ if (copy_from_user(&trans_compat, userp, sizeof(trans_compat))) ++ return -EFAULT; ++ ++ trans->rolename = compat_ptr(trans_compat.rolename); ++ ++ trans->prev = compat_ptr(trans_compat.prev); ++ trans->next = compat_ptr(trans_compat.next); ++ ++ return 0; ++ ++} ++ ++int copy_gr_hash_struct_compat(struct gr_hash_struct *hash, const struct gr_hash_struct *userp) ++{ ++ struct gr_hash_struct_compat hash_compat; ++ ++ if (copy_from_user(&hash_compat, userp, sizeof(hash_compat))) ++ return -EFAULT; ++ ++ hash->table = compat_ptr(hash_compat.table); ++ hash->nametable = compat_ptr(hash_compat.nametable); ++ hash->first = compat_ptr(hash_compat.first); ++ ++ hash->table_size = hash_compat.table_size; ++ hash->used_size = hash_compat.used_size; ++ ++ hash->type = hash_compat.type; ++ ++ return 0; ++} ++ ++int copy_pointer_from_array_compat(void *ptr, unsigned long idx, const void *userp) ++{ ++ compat_uptr_t ptrcompat; ++ ++ if (copy_from_user(&ptrcompat, userp + (idx * sizeof(ptrcompat)), sizeof(ptrcompat))) ++ return -EFAULT; ++ ++ *(void **)ptr = compat_ptr(ptrcompat); ++ ++ return 0; ++} ++ ++int copy_acl_ip_label_compat(struct acl_ip_label *ip, const struct acl_ip_label *userp) ++{ ++ struct acl_ip_label_compat ip_compat; ++ ++ if (copy_from_user(&ip_compat, userp, sizeof(ip_compat))) ++ return -EFAULT; ++ ++ ip->iface = compat_ptr(ip_compat.iface); ++ ip->addr = ip_compat.addr; ++ ip->netmask = ip_compat.netmask; ++ ip->low = ip_compat.low; ++ ip->high = ip_compat.high; ++ ip->mode = ip_compat.mode; ++ ip->type = ip_compat.type; ++ ++ memcpy(&ip->proto, &ip_compat.proto, sizeof(ip->proto)); ++ ++ ip->prev = compat_ptr(ip_compat.prev); ++ ip->next = compat_ptr(ip_compat.next); ++ ++ return 0; ++} ++ ++int copy_sprole_pw_compat(struct sprole_pw *pw, unsigned long idx, const struct sprole_pw *userp) ++{ ++ struct sprole_pw_compat pw_compat; ++ ++ if (copy_from_user(&pw_compat, (const void *)userp + (sizeof(pw_compat) * idx), sizeof(pw_compat))) ++ return -EFAULT; ++ ++ pw->rolename = compat_ptr(pw_compat.rolename); ++ memcpy(&pw->salt, pw_compat.salt, sizeof(pw->salt)); ++ memcpy(&pw->sum, pw_compat.sum, sizeof(pw->sum)); ++ ++ return 0; ++} ++ ++size_t get_gr_arg_wrapper_size_compat(void) ++{ ++ return sizeof(struct gr_arg_wrapper_compat); ++} ++ +diff --git a/grsecurity/gracl_fs.c b/grsecurity/gracl_fs.c +new file mode 100644 +index 0000000..a89b1f4 +--- /dev/null ++++ b/grsecurity/gracl_fs.c +@@ -0,0 +1,437 @@ ++#include <linux/kernel.h> ++#include <linux/sched.h> ++#include <linux/types.h> ++#include <linux/fs.h> ++#include <linux/file.h> ++#include <linux/stat.h> ++#include <linux/grsecurity.h> ++#include <linux/grinternal.h> ++#include <linux/gracl.h> ++ ++umode_t ++gr_acl_umask(void) ++{ ++ if (unlikely(!gr_acl_is_enabled())) ++ return 0; ++ ++ return current->role->umask; ++} ++ ++__u32 ++gr_acl_handle_hidden_file(const struct dentry * dentry, ++ const struct vfsmount * mnt) ++{ ++ __u32 mode; ++ ++ if (unlikely(d_is_negative(dentry))) ++ return GR_FIND; ++ ++ mode = ++ gr_search_file(dentry, GR_FIND | GR_AUDIT_FIND | GR_SUPPRESS, mnt); ++ ++ if (unlikely(mode & GR_FIND && mode & GR_AUDIT_FIND)) { ++ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt); ++ return mode; ++ } else if (unlikely(!(mode & GR_FIND) && !(mode & GR_SUPPRESS))) { ++ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt); ++ return 0; ++ } else if (unlikely(!(mode & GR_FIND))) ++ return 0; ++ ++ return GR_FIND; ++} ++ ++__u32 ++gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt, ++ int acc_mode) ++{ ++ __u32 reqmode = GR_FIND; ++ __u32 mode; ++ ++ if (unlikely(d_is_negative(dentry))) ++ return reqmode; ++ ++ if (acc_mode & MAY_APPEND) ++ reqmode |= GR_APPEND; ++ else if (acc_mode & MAY_WRITE) ++ reqmode |= GR_WRITE; ++ if ((acc_mode & MAY_READ) && !S_ISDIR(dentry->d_inode->i_mode)) ++ reqmode |= GR_READ; ++ ++ mode = ++ gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS, ++ mnt); ++ ++ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) { ++ gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt, ++ reqmode & GR_READ ? " reading" : "", ++ reqmode & GR_WRITE ? " writing" : reqmode & ++ GR_APPEND ? " appending" : ""); ++ return reqmode; ++ } else ++ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS))) ++ { ++ gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt, ++ reqmode & GR_READ ? " reading" : "", ++ reqmode & GR_WRITE ? " writing" : reqmode & ++ GR_APPEND ? " appending" : ""); ++ return 0; ++ } else if (unlikely((mode & reqmode) != reqmode)) ++ return 0; ++ ++ return reqmode; ++} ++ ++__u32 ++gr_acl_handle_creat(const struct dentry * dentry, ++ const struct dentry * p_dentry, ++ const struct vfsmount * p_mnt, int open_flags, int acc_mode, ++ const int imode) ++{ ++ __u32 reqmode = GR_WRITE | GR_CREATE; ++ __u32 mode; ++ ++ if (acc_mode & MAY_APPEND) ++ reqmode |= GR_APPEND; ++ // if a directory was required or the directory already exists, then ++ // don't count this open as a read ++ if ((acc_mode & MAY_READ) && ++ !((open_flags & O_DIRECTORY) || (dentry->d_inode && S_ISDIR(dentry->d_inode->i_mode)))) ++ reqmode |= GR_READ; ++ if ((open_flags & O_CREAT) && ++ ((imode & S_ISUID) || ((imode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP)))) ++ reqmode |= GR_SETID; ++ ++ mode = ++ gr_check_create(dentry, p_dentry, p_mnt, ++ reqmode | to_gr_audit(reqmode) | GR_SUPPRESS); ++ ++ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) { ++ gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt, ++ reqmode & GR_READ ? " reading" : "", ++ reqmode & GR_WRITE ? " writing" : reqmode & ++ GR_APPEND ? " appending" : ""); ++ return reqmode; ++ } else ++ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS))) ++ { ++ gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt, ++ reqmode & GR_READ ? " reading" : "", ++ reqmode & GR_WRITE ? " writing" : reqmode & ++ GR_APPEND ? " appending" : ""); ++ return 0; ++ } else if (unlikely((mode & reqmode) != reqmode)) ++ return 0; ++ ++ return reqmode; ++} ++ ++__u32 ++gr_acl_handle_access(const struct dentry * dentry, const struct vfsmount * mnt, ++ const int fmode) ++{ ++ __u32 mode, reqmode = GR_FIND; ++ ++ if ((fmode & S_IXOTH) && !S_ISDIR(dentry->d_inode->i_mode)) ++ reqmode |= GR_EXEC; ++ if (fmode & S_IWOTH) ++ reqmode |= GR_WRITE; ++ if (fmode & S_IROTH) ++ reqmode |= GR_READ; ++ ++ mode = ++ gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS, ++ mnt); ++ ++ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) { ++ gr_log_fs_rbac_mode3(GR_DO_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt, ++ reqmode & GR_READ ? " reading" : "", ++ reqmode & GR_WRITE ? " writing" : "", ++ reqmode & GR_EXEC ? " executing" : ""); ++ return reqmode; ++ } else ++ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS))) ++ { ++ gr_log_fs_rbac_mode3(GR_DONT_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt, ++ reqmode & GR_READ ? " reading" : "", ++ reqmode & GR_WRITE ? " writing" : "", ++ reqmode & GR_EXEC ? " executing" : ""); ++ return 0; ++ } else if (unlikely((mode & reqmode) != reqmode)) ++ return 0; ++ ++ return reqmode; ++} ++ ++static __u32 generic_fs_handler(const struct dentry *dentry, const struct vfsmount *mnt, __u32 reqmode, const char *fmt) ++{ ++ __u32 mode; ++ ++ mode = gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS, mnt); ++ ++ if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) { ++ gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, dentry, mnt); ++ return mode; ++ } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) { ++ gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, dentry, mnt); ++ return 0; ++ } else if (unlikely((mode & (reqmode)) != (reqmode))) ++ return 0; ++ ++ return (reqmode); ++} ++ ++__u32 ++gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt) ++{ ++ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_RMDIR_ACL_MSG); ++} ++ ++__u32 ++gr_acl_handle_unlink(const struct dentry *dentry, const struct vfsmount *mnt) ++{ ++ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_UNLINK_ACL_MSG); ++} ++ ++__u32 ++gr_acl_handle_truncate(const struct dentry *dentry, const struct vfsmount *mnt) ++{ ++ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_TRUNCATE_ACL_MSG); ++} ++ ++__u32 ++gr_acl_handle_utime(const struct dentry *dentry, const struct vfsmount *mnt) ++{ ++ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_ATIME_ACL_MSG); ++} ++ ++__u32 ++gr_acl_handle_chmod(const struct dentry *dentry, const struct vfsmount *mnt, ++ umode_t *modeptr) ++{ ++ umode_t mode; ++ ++ *modeptr &= ~gr_acl_umask(); ++ mode = *modeptr; ++ ++ if (unlikely(dentry->d_inode && S_ISSOCK(dentry->d_inode->i_mode))) ++ return 1; ++ ++ if (unlikely(dentry->d_inode && !S_ISDIR(dentry->d_inode->i_mode) && ++ ((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))))) { ++ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_SETID, ++ GR_CHMOD_ACL_MSG); ++ } else { ++ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHMOD_ACL_MSG); ++ } ++} ++ ++__u32 ++gr_acl_handle_chown(const struct dentry *dentry, const struct vfsmount *mnt) ++{ ++ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHOWN_ACL_MSG); ++} ++ ++__u32 ++gr_acl_handle_setxattr(const struct dentry *dentry, const struct vfsmount *mnt) ++{ ++ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_SETXATTR_ACL_MSG); ++} ++ ++__u32 ++gr_acl_handle_removexattr(const struct dentry *dentry, const struct vfsmount *mnt) ++{ ++ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_REMOVEXATTR_ACL_MSG); ++} ++ ++__u32 ++gr_acl_handle_execve(const struct dentry *dentry, const struct vfsmount *mnt) ++{ ++ return generic_fs_handler(dentry, mnt, GR_EXEC, GR_EXEC_ACL_MSG); ++} ++ ++__u32 ++gr_acl_handle_unix(const struct dentry *dentry, const struct vfsmount *mnt) ++{ ++ return generic_fs_handler(dentry, mnt, GR_READ | GR_WRITE, ++ GR_UNIXCONNECT_ACL_MSG); ++} ++ ++/* hardlinks require at minimum create and link permission, ++ any additional privilege required is based on the ++ privilege of the file being linked to ++*/ ++__u32 ++gr_acl_handle_link(const struct dentry * new_dentry, ++ const struct dentry * parent_dentry, ++ const struct vfsmount * parent_mnt, ++ const struct dentry * old_dentry, ++ const struct vfsmount * old_mnt, const struct filename *to) ++{ ++ __u32 mode; ++ __u32 needmode = GR_CREATE | GR_LINK; ++ __u32 needaudit = GR_AUDIT_CREATE | GR_AUDIT_LINK; ++ ++ mode = ++ gr_check_link(new_dentry, parent_dentry, parent_mnt, old_dentry, ++ old_mnt); ++ ++ if (unlikely(((mode & needmode) == needmode) && (mode & needaudit))) { ++ gr_log_fs_rbac_str(GR_DO_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to->name); ++ return mode; ++ } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) { ++ gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to->name); ++ return 0; ++ } else if (unlikely((mode & needmode) != needmode)) ++ return 0; ++ ++ return 1; ++} ++ ++__u32 ++gr_acl_handle_symlink(const struct dentry * new_dentry, ++ const struct dentry * parent_dentry, ++ const struct vfsmount * parent_mnt, const struct filename *from) ++{ ++ __u32 needmode = GR_WRITE | GR_CREATE; ++ __u32 mode; ++ ++ mode = ++ gr_check_create(new_dentry, parent_dentry, parent_mnt, ++ GR_CREATE | GR_AUDIT_CREATE | ++ GR_WRITE | GR_AUDIT_WRITE | GR_SUPPRESS); ++ ++ if (unlikely(mode & GR_WRITE && mode & GR_AUDITS)) { ++ gr_log_fs_str_rbac(GR_DO_AUDIT, GR_SYMLINK_ACL_MSG, from->name, new_dentry, parent_mnt); ++ return mode; ++ } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) { ++ gr_log_fs_str_rbac(GR_DONT_AUDIT, GR_SYMLINK_ACL_MSG, from->name, new_dentry, parent_mnt); ++ return 0; ++ } else if (unlikely((mode & needmode) != needmode)) ++ return 0; ++ ++ return (GR_WRITE | GR_CREATE); ++} ++ ++static __u32 generic_fs_create_handler(const struct dentry *new_dentry, const struct dentry *parent_dentry, const struct vfsmount *parent_mnt, __u32 reqmode, const char *fmt) ++{ ++ __u32 mode; ++ ++ mode = gr_check_create(new_dentry, parent_dentry, parent_mnt, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS); ++ ++ if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) { ++ gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, new_dentry, parent_mnt); ++ return mode; ++ } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) { ++ gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, new_dentry, parent_mnt); ++ return 0; ++ } else if (unlikely((mode & (reqmode)) != (reqmode))) ++ return 0; ++ ++ return (reqmode); ++} ++ ++__u32 ++gr_acl_handle_mknod(const struct dentry * new_dentry, ++ const struct dentry * parent_dentry, ++ const struct vfsmount * parent_mnt, ++ const int mode) ++{ ++ __u32 reqmode = GR_WRITE | GR_CREATE; ++ if (unlikely((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP)))) ++ reqmode |= GR_SETID; ++ ++ return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt, ++ reqmode, GR_MKNOD_ACL_MSG); ++} ++ ++__u32 ++gr_acl_handle_mkdir(const struct dentry *new_dentry, ++ const struct dentry *parent_dentry, ++ const struct vfsmount *parent_mnt) ++{ ++ return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt, ++ GR_WRITE | GR_CREATE, GR_MKDIR_ACL_MSG); ++} ++ ++#define RENAME_CHECK_SUCCESS(old, new) \ ++ (((old & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)) && \ ++ ((new & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ))) ++ ++int ++gr_acl_handle_rename(struct dentry *new_dentry, ++ struct dentry *parent_dentry, ++ const struct vfsmount *parent_mnt, ++ struct dentry *old_dentry, ++ struct inode *old_parent_inode, ++ struct vfsmount *old_mnt, const struct filename *newname) ++{ ++ __u32 comp1, comp2; ++ int error = 0; ++ ++ if (unlikely(!gr_acl_is_enabled())) ++ return 0; ++ ++ if (d_is_negative(new_dentry)) { ++ comp1 = gr_check_create(new_dentry, parent_dentry, parent_mnt, ++ GR_READ | GR_WRITE | GR_CREATE | GR_AUDIT_READ | ++ GR_AUDIT_WRITE | GR_AUDIT_CREATE | GR_SUPPRESS); ++ comp2 = gr_search_file(old_dentry, GR_READ | GR_WRITE | ++ GR_DELETE | GR_AUDIT_DELETE | ++ GR_AUDIT_READ | GR_AUDIT_WRITE | ++ GR_SUPPRESS, old_mnt); ++ } else { ++ comp1 = gr_search_file(new_dentry, GR_READ | GR_WRITE | ++ GR_CREATE | GR_DELETE | ++ GR_AUDIT_CREATE | GR_AUDIT_DELETE | ++ GR_AUDIT_READ | GR_AUDIT_WRITE | ++ GR_SUPPRESS, parent_mnt); ++ comp2 = ++ gr_search_file(old_dentry, ++ GR_READ | GR_WRITE | GR_AUDIT_READ | ++ GR_DELETE | GR_AUDIT_DELETE | ++ GR_AUDIT_WRITE | GR_SUPPRESS, old_mnt); ++ } ++ ++ if (RENAME_CHECK_SUCCESS(comp1, comp2) && ++ ((comp1 & GR_AUDITS) || (comp2 & GR_AUDITS))) ++ gr_log_fs_rbac_str(GR_DO_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname->name); ++ else if (!RENAME_CHECK_SUCCESS(comp1, comp2) && !(comp1 & GR_SUPPRESS) ++ && !(comp2 & GR_SUPPRESS)) { ++ gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname->name); ++ error = -EACCES; ++ } else if (unlikely(!RENAME_CHECK_SUCCESS(comp1, comp2))) ++ error = -EACCES; ++ ++ return error; ++} ++ ++void ++gr_acl_handle_exit(void) ++{ ++ u16 id; ++ char *rolename; ++ ++ if (unlikely(current->acl_sp_role && gr_acl_is_enabled() && ++ !(current->role->roletype & GR_ROLE_PERSIST))) { ++ id = current->acl_role_id; ++ rolename = current->role->rolename; ++ gr_set_acls(1); ++ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLEL_ACL_MSG, rolename, id); ++ } ++ ++ gr_put_exec_file(current); ++ return; ++} ++ ++int ++gr_acl_handle_procpidmem(const struct task_struct *task) ++{ ++ if (unlikely(!gr_acl_is_enabled())) ++ return 0; ++ ++ if (task != current && task->acl->mode & GR_PROTPROCFD) ++ return -EACCES; ++ ++ return 0; ++} +diff --git a/grsecurity/gracl_ip.c b/grsecurity/gracl_ip.c +new file mode 100644 +index 0000000..f056b81 +--- /dev/null ++++ b/grsecurity/gracl_ip.c +@@ -0,0 +1,386 @@ ++#include <linux/kernel.h> ++#include <asm/uaccess.h> ++#include <asm/errno.h> ++#include <net/sock.h> ++#include <linux/file.h> ++#include <linux/fs.h> ++#include <linux/net.h> ++#include <linux/in.h> ++#include <linux/skbuff.h> ++#include <linux/ip.h> ++#include <linux/udp.h> ++#include <linux/types.h> ++#include <linux/sched.h> ++#include <linux/netdevice.h> ++#include <linux/inetdevice.h> ++#include <linux/gracl.h> ++#include <linux/grsecurity.h> ++#include <linux/grinternal.h> ++ ++#define GR_BIND 0x01 ++#define GR_CONNECT 0x02 ++#define GR_INVERT 0x04 ++#define GR_BINDOVERRIDE 0x08 ++#define GR_CONNECTOVERRIDE 0x10 ++#define GR_SOCK_FAMILY 0x20 ++ ++static const char * gr_protocols[IPPROTO_MAX] = { ++ "ip", "icmp", "igmp", "ggp", "ipencap", "st", "tcp", "cbt", ++ "egp", "igp", "bbn-rcc", "nvp", "pup", "argus", "emcon", "xnet", ++ "chaos", "udp", "mux", "dcn", "hmp", "prm", "xns-idp", "trunk-1", ++ "trunk-2", "leaf-1", "leaf-2", "rdp", "irtp", "iso-tp4", "netblt", "mfe-nsp", ++ "merit-inp", "sep", "3pc", "idpr", "xtp", "ddp", "idpr-cmtp", "tp++", ++ "il", "ipv6", "sdrp", "ipv6-route", "ipv6-frag", "idrp", "rsvp", "gre", ++ "mhrp", "bna", "ipv6-crypt", "ipv6-auth", "i-nlsp", "swipe", "narp", "mobile", ++ "tlsp", "skip", "ipv6-icmp", "ipv6-nonxt", "ipv6-opts", "unknown:61", "cftp", "unknown:63", ++ "sat-expak", "kryptolan", "rvd", "ippc", "unknown:68", "sat-mon", "visa", "ipcv", ++ "cpnx", "cphb", "wsn", "pvp", "br-sat-mon", "sun-nd", "wb-mon", "wb-expak", ++ "iso-ip", "vmtp", "secure-vmtp", "vines", "ttp", "nfsnet-igp", "dgp", "tcf", ++ "eigrp", "ospf", "sprite-rpc", "larp", "mtp", "ax.25", "ipip", "micp", ++ "scc-sp", "etherip", "encap", "unknown:99", "gmtp", "ifmp", "pnni", "pim", ++ "aris", "scps", "qnx", "a/n", "ipcomp", "snp", "compaq-peer", "ipx-in-ip", ++ "vrrp", "pgm", "unknown:114", "l2tp", "ddx", "iatp", "stp", "srp", ++ "uti", "smp", "sm", "ptp", "isis", "fire", "crtp", "crdup", ++ "sscopmce", "iplt", "sps", "pipe", "sctp", "fc", "unkown:134", "unknown:135", ++ "unknown:136", "unknown:137", "unknown:138", "unknown:139", "unknown:140", "unknown:141", "unknown:142", "unknown:143", ++ "unknown:144", "unknown:145", "unknown:146", "unknown:147", "unknown:148", "unknown:149", "unknown:150", "unknown:151", ++ "unknown:152", "unknown:153", "unknown:154", "unknown:155", "unknown:156", "unknown:157", "unknown:158", "unknown:159", ++ "unknown:160", "unknown:161", "unknown:162", "unknown:163", "unknown:164", "unknown:165", "unknown:166", "unknown:167", ++ "unknown:168", "unknown:169", "unknown:170", "unknown:171", "unknown:172", "unknown:173", "unknown:174", "unknown:175", ++ "unknown:176", "unknown:177", "unknown:178", "unknown:179", "unknown:180", "unknown:181", "unknown:182", "unknown:183", ++ "unknown:184", "unknown:185", "unknown:186", "unknown:187", "unknown:188", "unknown:189", "unknown:190", "unknown:191", ++ "unknown:192", "unknown:193", "unknown:194", "unknown:195", "unknown:196", "unknown:197", "unknown:198", "unknown:199", ++ "unknown:200", "unknown:201", "unknown:202", "unknown:203", "unknown:204", "unknown:205", "unknown:206", "unknown:207", ++ "unknown:208", "unknown:209", "unknown:210", "unknown:211", "unknown:212", "unknown:213", "unknown:214", "unknown:215", ++ "unknown:216", "unknown:217", "unknown:218", "unknown:219", "unknown:220", "unknown:221", "unknown:222", "unknown:223", ++ "unknown:224", "unknown:225", "unknown:226", "unknown:227", "unknown:228", "unknown:229", "unknown:230", "unknown:231", ++ "unknown:232", "unknown:233", "unknown:234", "unknown:235", "unknown:236", "unknown:237", "unknown:238", "unknown:239", ++ "unknown:240", "unknown:241", "unknown:242", "unknown:243", "unknown:244", "unknown:245", "unknown:246", "unknown:247", ++ "unknown:248", "unknown:249", "unknown:250", "unknown:251", "unknown:252", "unknown:253", "unknown:254", "unknown:255", ++ }; ++ ++static const char * gr_socktypes[SOCK_MAX] = { ++ "unknown:0", "stream", "dgram", "raw", "rdm", "seqpacket", "unknown:6", ++ "unknown:7", "unknown:8", "unknown:9", "packet" ++ }; ++ ++static const char * gr_sockfamilies[AF_MAX+1] = { ++ "unspec", "unix", "inet", "ax25", "ipx", "appletalk", "netrom", "bridge", "atmpvc", "x25", ++ "inet6", "rose", "decnet", "netbeui", "security", "key", "netlink", "packet", "ash", ++ "econet", "atmsvc", "rds", "sna", "irda", "ppox", "wanpipe", "llc", "fam_27", "fam_28", ++ "tipc", "bluetooth", "iucv", "rxrpc", "isdn", "phonet", "ieee802154", "ciaf" ++ }; ++ ++const char * ++gr_proto_to_name(unsigned char proto) ++{ ++ return gr_protocols[proto]; ++} ++ ++const char * ++gr_socktype_to_name(unsigned char type) ++{ ++ return gr_socktypes[type]; ++} ++ ++const char * ++gr_sockfamily_to_name(unsigned char family) ++{ ++ return gr_sockfamilies[family]; ++} ++ ++extern const struct net_proto_family __rcu *net_families[NPROTO] __read_mostly; ++ ++int ++gr_search_socket(const int domain, const int type, const int protocol) ++{ ++ struct acl_subject_label *curr; ++ const struct cred *cred = current_cred(); ++ ++ if (unlikely(!gr_acl_is_enabled())) ++ goto exit; ++ ++ if ((domain < 0) || (type < 0) || (protocol < 0) || ++ (domain >= AF_MAX) || (type >= SOCK_MAX) || (protocol >= IPPROTO_MAX)) ++ goto exit; // let the kernel handle it ++ ++ curr = current->acl; ++ ++ if (curr->sock_families[domain / 32] & (1U << (domain % 32))) { ++ /* the family is allowed, if this is PF_INET allow it only if ++ the extra sock type/protocol checks pass */ ++ if (domain == PF_INET) ++ goto inet_check; ++ goto exit; ++ } else { ++ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) { ++ __u32 fakeip = 0; ++ security_learn(GR_IP_LEARN_MSG, current->role->rolename, ++ current->role->roletype, GR_GLOBAL_UID(cred->uid), ++ GR_GLOBAL_GID(cred->gid), current->exec_file ? ++ gr_to_filename(current->exec_file->f_path.dentry, ++ current->exec_file->f_path.mnt) : ++ curr->filename, curr->filename, ++ &fakeip, domain, 0, 0, GR_SOCK_FAMILY, ++ ¤t->signal->saved_ip); ++ goto exit; ++ } ++ goto exit_fail; ++ } ++ ++inet_check: ++ /* the rest of this checking is for IPv4 only */ ++ if (!curr->ips) ++ goto exit; ++ ++ if ((curr->ip_type & (1U << type)) && ++ (curr->ip_proto[protocol / 32] & (1U << (protocol % 32)))) ++ goto exit; ++ ++ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) { ++ /* we don't place acls on raw sockets , and sometimes ++ dgram/ip sockets are opened for ioctl and not ++ bind/connect, so we'll fake a bind learn log */ ++ if (type == SOCK_RAW || type == SOCK_PACKET) { ++ __u32 fakeip = 0; ++ security_learn(GR_IP_LEARN_MSG, current->role->rolename, ++ current->role->roletype, GR_GLOBAL_UID(cred->uid), ++ GR_GLOBAL_GID(cred->gid), current->exec_file ? ++ gr_to_filename(current->exec_file->f_path.dentry, ++ current->exec_file->f_path.mnt) : ++ curr->filename, curr->filename, ++ &fakeip, 0, type, ++ protocol, GR_CONNECT, ¤t->signal->saved_ip); ++ } else if ((type == SOCK_DGRAM) && (protocol == IPPROTO_IP)) { ++ __u32 fakeip = 0; ++ security_learn(GR_IP_LEARN_MSG, current->role->rolename, ++ current->role->roletype, GR_GLOBAL_UID(cred->uid), ++ GR_GLOBAL_GID(cred->gid), current->exec_file ? ++ gr_to_filename(current->exec_file->f_path.dentry, ++ current->exec_file->f_path.mnt) : ++ curr->filename, curr->filename, ++ &fakeip, 0, type, ++ protocol, GR_BIND, ¤t->signal->saved_ip); ++ } ++ /* we'll log when they use connect or bind */ ++ goto exit; ++ } ++ ++exit_fail: ++ if (domain == PF_INET) ++ gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(domain), ++ gr_socktype_to_name(type), gr_proto_to_name(protocol)); ++ else if (rcu_access_pointer(net_families[domain]) != NULL) ++ gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(domain), ++ gr_socktype_to_name(type), protocol); ++ ++ return 0; ++exit: ++ return 1; ++} ++ ++int check_ip_policy(struct acl_ip_label *ip, __u32 ip_addr, __u16 ip_port, __u8 protocol, const int mode, const int type, __u32 our_addr, __u32 our_netmask) ++{ ++ if ((ip->mode & mode) && ++ (ip_port >= ip->low) && ++ (ip_port <= ip->high) && ++ ((ntohl(ip_addr) & our_netmask) == ++ (ntohl(our_addr) & our_netmask)) ++ && (ip->proto[protocol / 32] & (1U << (protocol % 32))) ++ && (ip->type & (1U << type))) { ++ if (ip->mode & GR_INVERT) ++ return 2; // specifically denied ++ else ++ return 1; // allowed ++ } ++ ++ return 0; // not specifically allowed, may continue parsing ++} ++ ++static int ++gr_search_connectbind(const int full_mode, struct sock *sk, ++ struct sockaddr_in *addr, const int type) ++{ ++ char iface[IFNAMSIZ] = {0}; ++ struct acl_subject_label *curr; ++ struct acl_ip_label *ip; ++ struct inet_sock *isk; ++ struct net_device *dev; ++ struct in_device *idev; ++ unsigned long i; ++ int ret; ++ int mode = full_mode & (GR_BIND | GR_CONNECT); ++ __u32 ip_addr = 0; ++ __u32 our_addr; ++ __u32 our_netmask; ++ char *p; ++ __u16 ip_port = 0; ++ const struct cred *cred = current_cred(); ++ ++ if (unlikely(!gr_acl_is_enabled() || sk->sk_family != PF_INET)) ++ return 0; ++ ++ curr = current->acl; ++ isk = inet_sk(sk); ++ ++ /* INADDR_ANY overriding for binds, inaddr_any_override is already in network order */ ++ if ((full_mode & GR_BINDOVERRIDE) && addr->sin_addr.s_addr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0) ++ addr->sin_addr.s_addr = curr->inaddr_any_override; ++ if ((full_mode & GR_CONNECT) && isk->inet_saddr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0) { ++ struct sockaddr_in saddr; ++ int err; ++ ++ saddr.sin_family = AF_INET; ++ saddr.sin_addr.s_addr = curr->inaddr_any_override; ++ saddr.sin_port = isk->inet_sport; ++ ++ err = security_socket_bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in)); ++ if (err) ++ return err; ++ ++ err = sk->sk_socket->ops->bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in)); ++ if (err) ++ return err; ++ } ++ ++ if (!curr->ips) ++ return 0; ++ ++ ip_addr = addr->sin_addr.s_addr; ++ ip_port = ntohs(addr->sin_port); ++ ++ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) { ++ security_learn(GR_IP_LEARN_MSG, current->role->rolename, ++ current->role->roletype, GR_GLOBAL_UID(cred->uid), ++ GR_GLOBAL_GID(cred->gid), current->exec_file ? ++ gr_to_filename(current->exec_file->f_path.dentry, ++ current->exec_file->f_path.mnt) : ++ curr->filename, curr->filename, ++ &ip_addr, ip_port, type, ++ sk->sk_protocol, mode, ¤t->signal->saved_ip); ++ return 0; ++ } ++ ++ for (i = 0; i < curr->ip_num; i++) { ++ ip = *(curr->ips + i); ++ if (ip->iface != NULL) { ++ strncpy(iface, ip->iface, IFNAMSIZ - 1); ++ p = strchr(iface, ':'); ++ if (p != NULL) ++ *p = '\0'; ++ dev = dev_get_by_name(sock_net(sk), iface); ++ if (dev == NULL) ++ continue; ++ idev = in_dev_get(dev); ++ if (idev == NULL) { ++ dev_put(dev); ++ continue; ++ } ++ rcu_read_lock(); ++ for_ifa(idev) { ++ if (!strcmp(ip->iface, ifa->ifa_label)) { ++ our_addr = ifa->ifa_address; ++ our_netmask = 0xffffffff; ++ ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask); ++ if (ret == 1) { ++ rcu_read_unlock(); ++ in_dev_put(idev); ++ dev_put(dev); ++ return 0; ++ } else if (ret == 2) { ++ rcu_read_unlock(); ++ in_dev_put(idev); ++ dev_put(dev); ++ goto denied; ++ } ++ } ++ } endfor_ifa(idev); ++ rcu_read_unlock(); ++ in_dev_put(idev); ++ dev_put(dev); ++ } else { ++ our_addr = ip->addr; ++ our_netmask = ip->netmask; ++ ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask); ++ if (ret == 1) ++ return 0; ++ else if (ret == 2) ++ goto denied; ++ } ++ } ++ ++denied: ++ if (mode == GR_BIND) ++ gr_log_int5_str2(GR_DONT_AUDIT, GR_BIND_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol)); ++ else if (mode == GR_CONNECT) ++ gr_log_int5_str2(GR_DONT_AUDIT, GR_CONNECT_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol)); ++ ++ return -EACCES; ++} ++ ++int ++gr_search_connect(struct socket *sock, struct sockaddr_in *addr) ++{ ++ /* always allow disconnection of dgram sockets with connect */ ++ if (addr->sin_family == AF_UNSPEC) ++ return 0; ++ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sock->sk, addr, sock->type); ++} ++ ++int ++gr_search_bind(struct socket *sock, struct sockaddr_in *addr) ++{ ++ return gr_search_connectbind(GR_BIND | GR_BINDOVERRIDE, sock->sk, addr, sock->type); ++} ++ ++int gr_search_listen(struct socket *sock) ++{ ++ struct sock *sk = sock->sk; ++ struct sockaddr_in addr; ++ ++ addr.sin_addr.s_addr = inet_sk(sk)->inet_saddr; ++ addr.sin_port = inet_sk(sk)->inet_sport; ++ ++ return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type); ++} ++ ++int gr_search_accept(struct socket *sock) ++{ ++ struct sock *sk = sock->sk; ++ struct sockaddr_in addr; ++ ++ addr.sin_addr.s_addr = inet_sk(sk)->inet_saddr; ++ addr.sin_port = inet_sk(sk)->inet_sport; ++ ++ return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type); ++} ++ ++int ++gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr) ++{ ++ if (addr) ++ return gr_search_connectbind(GR_CONNECT, sk, addr, SOCK_DGRAM); ++ else { ++ struct sockaddr_in sin; ++ const struct inet_sock *inet = inet_sk(sk); ++ ++ sin.sin_addr.s_addr = inet->inet_daddr; ++ sin.sin_port = inet->inet_dport; ++ ++ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM); ++ } ++} ++ ++int ++gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb) ++{ ++ struct sockaddr_in sin; ++ ++ if (unlikely(skb->len < sizeof (struct udphdr))) ++ return 0; // skip this packet ++ ++ sin.sin_addr.s_addr = ip_hdr(skb)->saddr; ++ sin.sin_port = udp_hdr(skb)->source; ++ ++ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM); ++} +diff --git a/grsecurity/gracl_learn.c b/grsecurity/gracl_learn.c +new file mode 100644 +index 0000000..25f54ef +--- /dev/null ++++ b/grsecurity/gracl_learn.c +@@ -0,0 +1,207 @@ ++#include <linux/kernel.h> ++#include <linux/mm.h> ++#include <linux/sched.h> ++#include <linux/poll.h> ++#include <linux/string.h> ++#include <linux/file.h> ++#include <linux/types.h> ++#include <linux/vmalloc.h> ++#include <linux/grinternal.h> ++ ++extern ssize_t write_grsec_handler(struct file * file, const char __user * buf, ++ size_t count, loff_t *ppos); ++extern int gr_acl_is_enabled(void); ++ ++static DECLARE_WAIT_QUEUE_HEAD(learn_wait); ++static int gr_learn_attached; ++ ++/* use a 512k buffer */ ++#define LEARN_BUFFER_SIZE (512 * 1024) ++ ++static DEFINE_SPINLOCK(gr_learn_lock); ++static DEFINE_MUTEX(gr_learn_user_mutex); ++ ++/* we need to maintain two buffers, so that the kernel context of grlearn ++ uses a semaphore around the userspace copying, and the other kernel contexts ++ use a spinlock when copying into the buffer, since they cannot sleep ++*/ ++static char *learn_buffer; ++static char *learn_buffer_user; ++static int learn_buffer_len; ++static int learn_buffer_user_len; ++ ++static ssize_t ++read_learn(struct file *file, char __user * buf, size_t count, loff_t * ppos) ++{ ++ DECLARE_WAITQUEUE(wait, current); ++ ssize_t retval = 0; ++ ++ add_wait_queue(&learn_wait, &wait); ++ set_current_state(TASK_INTERRUPTIBLE); ++ do { ++ mutex_lock(&gr_learn_user_mutex); ++ spin_lock(&gr_learn_lock); ++ if (learn_buffer_len) ++ break; ++ spin_unlock(&gr_learn_lock); ++ mutex_unlock(&gr_learn_user_mutex); ++ if (file->f_flags & O_NONBLOCK) { ++ retval = -EAGAIN; ++ goto out; ++ } ++ if (signal_pending(current)) { ++ retval = -ERESTARTSYS; ++ goto out; ++ } ++ ++ schedule(); ++ } while (1); ++ ++ memcpy(learn_buffer_user, learn_buffer, learn_buffer_len); ++ learn_buffer_user_len = learn_buffer_len; ++ retval = learn_buffer_len; ++ learn_buffer_len = 0; ++ ++ spin_unlock(&gr_learn_lock); ++ ++ if (copy_to_user(buf, learn_buffer_user, learn_buffer_user_len)) ++ retval = -EFAULT; ++ ++ mutex_unlock(&gr_learn_user_mutex); ++out: ++ set_current_state(TASK_RUNNING); ++ remove_wait_queue(&learn_wait, &wait); ++ return retval; ++} ++ ++static unsigned int ++poll_learn(struct file * file, poll_table * wait) ++{ ++ poll_wait(file, &learn_wait, wait); ++ ++ if (learn_buffer_len) ++ return (POLLIN | POLLRDNORM); ++ ++ return 0; ++} ++ ++void ++gr_clear_learn_entries(void) ++{ ++ char *tmp; ++ ++ mutex_lock(&gr_learn_user_mutex); ++ spin_lock(&gr_learn_lock); ++ tmp = learn_buffer; ++ learn_buffer = NULL; ++ spin_unlock(&gr_learn_lock); ++ if (tmp) ++ vfree(tmp); ++ if (learn_buffer_user != NULL) { ++ vfree(learn_buffer_user); ++ learn_buffer_user = NULL; ++ } ++ learn_buffer_len = 0; ++ mutex_unlock(&gr_learn_user_mutex); ++ ++ return; ++} ++ ++void ++gr_add_learn_entry(const char *fmt, ...) ++{ ++ va_list args; ++ unsigned int len; ++ ++ if (!gr_learn_attached) ++ return; ++ ++ spin_lock(&gr_learn_lock); ++ ++ /* leave a gap at the end so we know when it's "full" but don't have to ++ compute the exact length of the string we're trying to append ++ */ ++ if (learn_buffer_len > LEARN_BUFFER_SIZE - 16384) { ++ spin_unlock(&gr_learn_lock); ++ wake_up_interruptible(&learn_wait); ++ return; ++ } ++ if (learn_buffer == NULL) { ++ spin_unlock(&gr_learn_lock); ++ return; ++ } ++ ++ va_start(args, fmt); ++ len = vsnprintf(learn_buffer + learn_buffer_len, LEARN_BUFFER_SIZE - learn_buffer_len, fmt, args); ++ va_end(args); ++ ++ learn_buffer_len += len + 1; ++ ++ spin_unlock(&gr_learn_lock); ++ wake_up_interruptible(&learn_wait); ++ ++ return; ++} ++ ++static int ++open_learn(struct inode *inode, struct file *file) ++{ ++ if (file->f_mode & FMODE_READ && gr_learn_attached) ++ return -EBUSY; ++ if (file->f_mode & FMODE_READ) { ++ int retval = 0; ++ mutex_lock(&gr_learn_user_mutex); ++ if (learn_buffer == NULL) ++ learn_buffer = vmalloc(LEARN_BUFFER_SIZE); ++ if (learn_buffer_user == NULL) ++ learn_buffer_user = vmalloc(LEARN_BUFFER_SIZE); ++ if (learn_buffer == NULL) { ++ retval = -ENOMEM; ++ goto out_error; ++ } ++ if (learn_buffer_user == NULL) { ++ retval = -ENOMEM; ++ goto out_error; ++ } ++ learn_buffer_len = 0; ++ learn_buffer_user_len = 0; ++ gr_learn_attached = 1; ++out_error: ++ mutex_unlock(&gr_learn_user_mutex); ++ return retval; ++ } ++ return 0; ++} ++ ++static int ++close_learn(struct inode *inode, struct file *file) ++{ ++ if (file->f_mode & FMODE_READ) { ++ char *tmp = NULL; ++ mutex_lock(&gr_learn_user_mutex); ++ spin_lock(&gr_learn_lock); ++ tmp = learn_buffer; ++ learn_buffer = NULL; ++ spin_unlock(&gr_learn_lock); ++ if (tmp) ++ vfree(tmp); ++ if (learn_buffer_user != NULL) { ++ vfree(learn_buffer_user); ++ learn_buffer_user = NULL; ++ } ++ learn_buffer_len = 0; ++ learn_buffer_user_len = 0; ++ gr_learn_attached = 0; ++ mutex_unlock(&gr_learn_user_mutex); ++ } ++ ++ return 0; ++} ++ ++const struct file_operations grsec_fops = { ++ .read = read_learn, ++ .write = write_grsec_handler, ++ .open = open_learn, ++ .release = close_learn, ++ .poll = poll_learn, ++}; +diff --git a/grsecurity/gracl_policy.c b/grsecurity/gracl_policy.c +new file mode 100644 +index 0000000..3f8ade0 +--- /dev/null ++++ b/grsecurity/gracl_policy.c +@@ -0,0 +1,1782 @@ ++#include <linux/kernel.h> ++#include <linux/module.h> ++#include <linux/sched.h> ++#include <linux/mm.h> ++#include <linux/file.h> ++#include <linux/fs.h> ++#include <linux/namei.h> ++#include <linux/mount.h> ++#include <linux/tty.h> ++#include <linux/proc_fs.h> ++#include <linux/lglock.h> ++#include <linux/slab.h> ++#include <linux/vmalloc.h> ++#include <linux/types.h> ++#include <linux/sysctl.h> ++#include <linux/netdevice.h> ++#include <linux/ptrace.h> ++#include <linux/gracl.h> ++#include <linux/gralloc.h> ++#include <linux/security.h> ++#include <linux/grinternal.h> ++#include <linux/pid_namespace.h> ++#include <linux/stop_machine.h> ++#include <linux/fdtable.h> ++#include <linux/percpu.h> ++#include <linux/lglock.h> ++#include <linux/hugetlb.h> ++#include <linux/posix-timers.h> ++#include "../fs/mount.h" ++ ++#include <asm/uaccess.h> ++#include <asm/errno.h> ++#include <asm/mman.h> ++ ++extern struct gr_policy_state *polstate; ++ ++#define FOR_EACH_ROLE_START(role) \ ++ role = polstate->role_list; \ ++ while (role) { ++ ++#define FOR_EACH_ROLE_END(role) \ ++ role = role->prev; \ ++ } ++ ++struct path gr_real_root; ++ ++extern struct gr_alloc_state *current_alloc_state; ++ ++u16 acl_sp_role_value; ++ ++static DEFINE_MUTEX(gr_dev_mutex); ++ ++extern int chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum); ++extern void gr_clear_learn_entries(void); ++ ++struct gr_arg *gr_usermode __read_only; ++unsigned char *gr_system_salt __read_only; ++unsigned char *gr_system_sum __read_only; ++ ++static unsigned int gr_auth_attempts = 0; ++static unsigned long gr_auth_expires = 0UL; ++ ++struct acl_object_label *fakefs_obj_rw; ++struct acl_object_label *fakefs_obj_rwx; ++ ++extern int gr_init_uidset(void); ++extern void gr_free_uidset(void); ++extern void gr_remove_uid(uid_t uid); ++extern int gr_find_uid(uid_t uid); ++ ++extern struct acl_subject_label *__gr_get_subject_for_task(const struct gr_policy_state *state, struct task_struct *task, const char *filename); ++extern void __gr_apply_subject_to_task(struct gr_policy_state *state, struct task_struct *task, struct acl_subject_label *subj); ++extern int gr_streq(const char *a, const char *b, const unsigned int lena, const unsigned int lenb); ++extern void __insert_inodev_entry(const struct gr_policy_state *state, struct inodev_entry *entry); ++extern struct acl_role_label *__lookup_acl_role_label(const struct gr_policy_state *state, const struct task_struct *task, const uid_t uid, const gid_t gid); ++extern void insert_acl_obj_label(struct acl_object_label *obj, struct acl_subject_label *subj); ++extern void insert_acl_subj_label(struct acl_subject_label *obj, struct acl_role_label *role); ++extern struct name_entry * __lookup_name_entry(const struct gr_policy_state *state, const char *name); ++extern char *gr_to_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt); ++extern struct acl_subject_label *lookup_acl_subj_label(const ino_t ino, const dev_t dev, const struct acl_role_label *role); ++extern struct acl_subject_label *lookup_acl_subj_label_deleted(const ino_t ino, const dev_t dev, const struct acl_role_label *role); ++extern void assign_special_role(const char *rolename); ++extern struct acl_subject_label *chk_subj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt, const struct acl_role_label *role); ++extern int gr_rbac_disable(void *unused); ++extern void gr_enable_rbac_system(void); ++ ++static int copy_acl_object_label_normal(struct acl_object_label *obj, const struct acl_object_label *userp) ++{ ++ if (copy_from_user(obj, userp, sizeof(struct acl_object_label))) ++ return -EFAULT; ++ ++ return 0; ++} ++ ++static int copy_acl_ip_label_normal(struct acl_ip_label *ip, const struct acl_ip_label *userp) ++{ ++ if (copy_from_user(ip, userp, sizeof(struct acl_ip_label))) ++ return -EFAULT; ++ ++ return 0; ++} ++ ++static int copy_acl_subject_label_normal(struct acl_subject_label *subj, const struct acl_subject_label *userp) ++{ ++ if (copy_from_user(subj, userp, sizeof(struct acl_subject_label))) ++ return -EFAULT; ++ ++ return 0; ++} ++ ++static int copy_acl_role_label_normal(struct acl_role_label *role, const struct acl_role_label *userp) ++{ ++ if (copy_from_user(role, userp, sizeof(struct acl_role_label))) ++ return -EFAULT; ++ ++ return 0; ++} ++ ++static int copy_role_allowed_ip_normal(struct role_allowed_ip *roleip, const struct role_allowed_ip *userp) ++{ ++ if (copy_from_user(roleip, userp, sizeof(struct role_allowed_ip))) ++ return -EFAULT; ++ ++ return 0; ++} ++ ++static int copy_sprole_pw_normal(struct sprole_pw *pw, unsigned long idx, const struct sprole_pw *userp) ++{ ++ if (copy_from_user(pw, userp + idx, sizeof(struct sprole_pw))) ++ return -EFAULT; ++ ++ return 0; ++} ++ ++static int copy_gr_hash_struct_normal(struct gr_hash_struct *hash, const struct gr_hash_struct *userp) ++{ ++ if (copy_from_user(hash, userp, sizeof(struct gr_hash_struct))) ++ return -EFAULT; ++ ++ return 0; ++} ++ ++static int copy_role_transition_normal(struct role_transition *trans, const struct role_transition *userp) ++{ ++ if (copy_from_user(trans, userp, sizeof(struct role_transition))) ++ return -EFAULT; ++ ++ return 0; ++} ++ ++int copy_pointer_from_array_normal(void *ptr, unsigned long idx, const void *userp) ++{ ++ if (copy_from_user(ptr, userp + (idx * sizeof(void *)), sizeof(void *))) ++ return -EFAULT; ++ ++ return 0; ++} ++ ++static int copy_gr_arg_wrapper_normal(const char __user *buf, struct gr_arg_wrapper *uwrap) ++{ ++ if (copy_from_user(uwrap, buf, sizeof (struct gr_arg_wrapper))) ++ return -EFAULT; ++ ++ if (((uwrap->version != GRSECURITY_VERSION) && ++ (uwrap->version != 0x2901)) || ++ (uwrap->size != sizeof(struct gr_arg))) ++ return -EINVAL; ++ ++ return 0; ++} ++ ++static int copy_gr_arg_normal(const struct gr_arg __user *buf, struct gr_arg *arg) ++{ ++ if (copy_from_user(arg, buf, sizeof (struct gr_arg))) ++ return -EFAULT; ++ ++ return 0; ++} ++ ++static size_t get_gr_arg_wrapper_size_normal(void) ++{ ++ return sizeof(struct gr_arg_wrapper); ++} ++ ++#ifdef CONFIG_COMPAT ++extern int copy_gr_arg_wrapper_compat(const char *buf, struct gr_arg_wrapper *uwrap); ++extern int copy_gr_arg_compat(const struct gr_arg __user *buf, struct gr_arg *arg); ++extern int copy_acl_object_label_compat(struct acl_object_label *obj, const struct acl_object_label *userp); ++extern int copy_acl_subject_label_compat(struct acl_subject_label *subj, const struct acl_subject_label *userp); ++extern int copy_acl_role_label_compat(struct acl_role_label *role, const struct acl_role_label *userp); ++extern int copy_role_allowed_ip_compat(struct role_allowed_ip *roleip, const struct role_allowed_ip *userp); ++extern int copy_role_transition_compat(struct role_transition *trans, const struct role_transition *userp); ++extern int copy_gr_hash_struct_compat(struct gr_hash_struct *hash, const struct gr_hash_struct *userp); ++extern int copy_pointer_from_array_compat(void *ptr, unsigned long idx, const void *userp); ++extern int copy_acl_ip_label_compat(struct acl_ip_label *ip, const struct acl_ip_label *userp); ++extern int copy_sprole_pw_compat(struct sprole_pw *pw, unsigned long idx, const struct sprole_pw *userp); ++extern size_t get_gr_arg_wrapper_size_compat(void); ++ ++int (* copy_gr_arg_wrapper)(const char *buf, struct gr_arg_wrapper *uwrap) __read_only; ++int (* copy_gr_arg)(const struct gr_arg *buf, struct gr_arg *arg) __read_only; ++int (* copy_acl_object_label)(struct acl_object_label *obj, const struct acl_object_label *userp) __read_only; ++int (* copy_acl_subject_label)(struct acl_subject_label *subj, const struct acl_subject_label *userp) __read_only; ++int (* copy_acl_role_label)(struct acl_role_label *role, const struct acl_role_label *userp) __read_only; ++int (* copy_acl_ip_label)(struct acl_ip_label *ip, const struct acl_ip_label *userp) __read_only; ++int (* copy_pointer_from_array)(void *ptr, unsigned long idx, const void *userp) __read_only; ++int (* copy_sprole_pw)(struct sprole_pw *pw, unsigned long idx, const struct sprole_pw *userp) __read_only; ++int (* copy_gr_hash_struct)(struct gr_hash_struct *hash, const struct gr_hash_struct *userp) __read_only; ++int (* copy_role_transition)(struct role_transition *trans, const struct role_transition *userp) __read_only; ++int (* copy_role_allowed_ip)(struct role_allowed_ip *roleip, const struct role_allowed_ip *userp) __read_only; ++size_t (* get_gr_arg_wrapper_size)(void) __read_only; ++ ++#else ++#define copy_gr_arg_wrapper copy_gr_arg_wrapper_normal ++#define copy_gr_arg copy_gr_arg_normal ++#define copy_gr_hash_struct copy_gr_hash_struct_normal ++#define copy_acl_object_label copy_acl_object_label_normal ++#define copy_acl_subject_label copy_acl_subject_label_normal ++#define copy_acl_role_label copy_acl_role_label_normal ++#define copy_acl_ip_label copy_acl_ip_label_normal ++#define copy_pointer_from_array copy_pointer_from_array_normal ++#define copy_sprole_pw copy_sprole_pw_normal ++#define copy_role_transition copy_role_transition_normal ++#define copy_role_allowed_ip copy_role_allowed_ip_normal ++#define get_gr_arg_wrapper_size get_gr_arg_wrapper_size_normal ++#endif ++ ++static struct acl_subject_label * ++lookup_subject_map(const struct acl_subject_label *userp) ++{ ++ unsigned int index = gr_shash(userp, polstate->subj_map_set.s_size); ++ struct subject_map *match; ++ ++ match = polstate->subj_map_set.s_hash[index]; ++ ++ while (match && match->user != userp) ++ match = match->next; ++ ++ if (match != NULL) ++ return match->kernel; ++ else ++ return NULL; ++} ++ ++static void ++insert_subj_map_entry(struct subject_map *subjmap) ++{ ++ unsigned int index = gr_shash(subjmap->user, polstate->subj_map_set.s_size); ++ struct subject_map **curr; ++ ++ subjmap->prev = NULL; ++ ++ curr = &polstate->subj_map_set.s_hash[index]; ++ if (*curr != NULL) ++ (*curr)->prev = subjmap; ++ ++ subjmap->next = *curr; ++ *curr = subjmap; ++ ++ return; ++} ++ ++static void ++__insert_acl_role_label(struct acl_role_label *role, uid_t uidgid) ++{ ++ unsigned int index = ++ gr_rhash(uidgid, role->roletype & (GR_ROLE_USER | GR_ROLE_GROUP), polstate->acl_role_set.r_size); ++ struct acl_role_label **curr; ++ struct acl_role_label *tmp, *tmp2; ++ ++ curr = &polstate->acl_role_set.r_hash[index]; ++ ++ /* simple case, slot is empty, just set it to our role */ ++ if (*curr == NULL) { ++ *curr = role; ++ } else { ++ /* example: ++ 1 -> 2 -> 3 (adding 2 -> 3 to here) ++ 2 -> 3 ++ */ ++ /* first check to see if we can already be reached via this slot */ ++ tmp = *curr; ++ while (tmp && tmp != role) ++ tmp = tmp->next; ++ if (tmp == role) { ++ /* we don't need to add ourselves to this slot's chain */ ++ return; ++ } ++ /* we need to add ourselves to this chain, two cases */ ++ if (role->next == NULL) { ++ /* simple case, append the current chain to our role */ ++ role->next = *curr; ++ *curr = role; ++ } else { ++ /* 1 -> 2 -> 3 -> 4 ++ 2 -> 3 -> 4 ++ 3 -> 4 (adding 1 -> 2 -> 3 -> 4 to here) ++ */ ++ /* trickier case: walk our role's chain until we find ++ the role for the start of the current slot's chain */ ++ tmp = role; ++ tmp2 = *curr; ++ while (tmp->next && tmp->next != tmp2) ++ tmp = tmp->next; ++ if (tmp->next == tmp2) { ++ /* from example above, we found 3, so just ++ replace this slot's chain with ours */ ++ *curr = role; ++ } else { ++ /* we didn't find a subset of our role's chain ++ in the current slot's chain, so append their ++ chain to ours, and set us as the first role in ++ the slot's chain ++ ++ we could fold this case with the case above, ++ but making it explicit for clarity ++ */ ++ tmp->next = tmp2; ++ *curr = role; ++ } ++ } ++ } ++ ++ return; ++} ++ ++static void ++insert_acl_role_label(struct acl_role_label *role) ++{ ++ int i; ++ ++ if (polstate->role_list == NULL) { ++ polstate->role_list = role; ++ role->prev = NULL; ++ } else { ++ role->prev = polstate->role_list; ++ polstate->role_list = role; ++ } ++ ++ /* used for hash chains */ ++ role->next = NULL; ++ ++ if (role->roletype & GR_ROLE_DOMAIN) { ++ for (i = 0; i < role->domain_child_num; i++) ++ __insert_acl_role_label(role, role->domain_children[i]); ++ } else ++ __insert_acl_role_label(role, role->uidgid); ++} ++ ++static int ++insert_name_entry(char *name, const ino_t inode, const dev_t device, __u8 deleted) ++{ ++ struct name_entry **curr, *nentry; ++ struct inodev_entry *ientry; ++ unsigned int len = strlen(name); ++ unsigned int key = full_name_hash(name, len); ++ unsigned int index = key % polstate->name_set.n_size; ++ ++ curr = &polstate->name_set.n_hash[index]; ++ ++ while (*curr && ((*curr)->key != key || !gr_streq((*curr)->name, name, (*curr)->len, len))) ++ curr = &((*curr)->next); ++ ++ if (*curr != NULL) ++ return 1; ++ ++ nentry = acl_alloc(sizeof (struct name_entry)); ++ if (nentry == NULL) ++ return 0; ++ ientry = acl_alloc(sizeof (struct inodev_entry)); ++ if (ientry == NULL) ++ return 0; ++ ientry->nentry = nentry; ++ ++ nentry->key = key; ++ nentry->name = name; ++ nentry->inode = inode; ++ nentry->device = device; ++ nentry->len = len; ++ nentry->deleted = deleted; ++ ++ nentry->prev = NULL; ++ curr = &polstate->name_set.n_hash[index]; ++ if (*curr != NULL) ++ (*curr)->prev = nentry; ++ nentry->next = *curr; ++ *curr = nentry; ++ ++ /* insert us into the table searchable by inode/dev */ ++ __insert_inodev_entry(polstate, ientry); ++ ++ return 1; ++} ++ ++/* allocating chained hash tables, so optimal size is where lambda ~ 1 */ ++ ++static void * ++create_table(__u32 * len, int elementsize) ++{ ++ unsigned int table_sizes[] = { ++ 7, 13, 31, 61, 127, 251, 509, 1021, 2039, 4093, 8191, 16381, ++ 32749, 65521, 131071, 262139, 524287, 1048573, 2097143, ++ 4194301, 8388593, 16777213, 33554393, 67108859 ++ }; ++ void *newtable = NULL; ++ unsigned int pwr = 0; ++ ++ while ((pwr < ((sizeof (table_sizes) / sizeof (table_sizes[0])) - 1)) && ++ table_sizes[pwr] <= *len) ++ pwr++; ++ ++ if (table_sizes[pwr] <= *len || (table_sizes[pwr] > ULONG_MAX / elementsize)) ++ return newtable; ++ ++ if ((table_sizes[pwr] * elementsize) <= PAGE_SIZE) ++ newtable = ++ kmalloc(table_sizes[pwr] * elementsize, GFP_KERNEL); ++ else ++ newtable = vmalloc(table_sizes[pwr] * elementsize); ++ ++ *len = table_sizes[pwr]; ++ ++ return newtable; ++} ++ ++static int ++init_variables(const struct gr_arg *arg, bool reload) ++{ ++ struct task_struct *reaper = init_pid_ns.child_reaper; ++ unsigned int stacksize; ++ ++ polstate->subj_map_set.s_size = arg->role_db.num_subjects; ++ polstate->acl_role_set.r_size = arg->role_db.num_roles + arg->role_db.num_domain_children; ++ polstate->name_set.n_size = arg->role_db.num_objects; ++ polstate->inodev_set.i_size = arg->role_db.num_objects; ++ ++ if (!polstate->subj_map_set.s_size || !polstate->acl_role_set.r_size || ++ !polstate->name_set.n_size || !polstate->inodev_set.i_size) ++ return 1; ++ ++ if (!reload) { ++ if (!gr_init_uidset()) ++ return 1; ++ } ++ ++ /* set up the stack that holds allocation info */ ++ ++ stacksize = arg->role_db.num_pointers + 5; ++ ++ if (!acl_alloc_stack_init(stacksize)) ++ return 1; ++ ++ if (!reload) { ++ /* grab reference for the real root dentry and vfsmount */ ++ get_fs_root(reaper->fs, &gr_real_root); ++ ++#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG ++ printk(KERN_ALERT "Obtained real root device=%d, inode=%lu\n", __get_dev(gr_real_root.dentry), gr_real_root.dentry->d_inode->i_ino); ++#endif ++ ++ fakefs_obj_rw = kzalloc(sizeof(struct acl_object_label), GFP_KERNEL); ++ if (fakefs_obj_rw == NULL) ++ return 1; ++ fakefs_obj_rw->mode = GR_FIND | GR_READ | GR_WRITE; ++ ++ fakefs_obj_rwx = kzalloc(sizeof(struct acl_object_label), GFP_KERNEL); ++ if (fakefs_obj_rwx == NULL) ++ return 1; ++ fakefs_obj_rwx->mode = GR_FIND | GR_READ | GR_WRITE | GR_EXEC; ++ } ++ ++ polstate->subj_map_set.s_hash = ++ (struct subject_map **) create_table(&polstate->subj_map_set.s_size, sizeof(void *)); ++ polstate->acl_role_set.r_hash = ++ (struct acl_role_label **) create_table(&polstate->acl_role_set.r_size, sizeof(void *)); ++ polstate->name_set.n_hash = (struct name_entry **) create_table(&polstate->name_set.n_size, sizeof(void *)); ++ polstate->inodev_set.i_hash = ++ (struct inodev_entry **) create_table(&polstate->inodev_set.i_size, sizeof(void *)); ++ ++ if (!polstate->subj_map_set.s_hash || !polstate->acl_role_set.r_hash || ++ !polstate->name_set.n_hash || !polstate->inodev_set.i_hash) ++ return 1; ++ ++ memset(polstate->subj_map_set.s_hash, 0, ++ sizeof(struct subject_map *) * polstate->subj_map_set.s_size); ++ memset(polstate->acl_role_set.r_hash, 0, ++ sizeof (struct acl_role_label *) * polstate->acl_role_set.r_size); ++ memset(polstate->name_set.n_hash, 0, ++ sizeof (struct name_entry *) * polstate->name_set.n_size); ++ memset(polstate->inodev_set.i_hash, 0, ++ sizeof (struct inodev_entry *) * polstate->inodev_set.i_size); ++ ++ return 0; ++} ++ ++/* free information not needed after startup ++ currently contains user->kernel pointer mappings for subjects ++*/ ++ ++static void ++free_init_variables(void) ++{ ++ __u32 i; ++ ++ if (polstate->subj_map_set.s_hash) { ++ for (i = 0; i < polstate->subj_map_set.s_size; i++) { ++ if (polstate->subj_map_set.s_hash[i]) { ++ kfree(polstate->subj_map_set.s_hash[i]); ++ polstate->subj_map_set.s_hash[i] = NULL; ++ } ++ } ++ ++ if ((polstate->subj_map_set.s_size * sizeof (struct subject_map *)) <= ++ PAGE_SIZE) ++ kfree(polstate->subj_map_set.s_hash); ++ else ++ vfree(polstate->subj_map_set.s_hash); ++ } ++ ++ return; ++} ++ ++static void ++free_variables(bool reload) ++{ ++ struct acl_subject_label *s; ++ struct acl_role_label *r; ++ struct task_struct *task, *task2; ++ unsigned int x; ++ ++ if (!reload) { ++ gr_clear_learn_entries(); ++ ++ read_lock(&tasklist_lock); ++ do_each_thread(task2, task) { ++ task->acl_sp_role = 0; ++ task->acl_role_id = 0; ++ task->inherited = 0; ++ task->acl = NULL; ++ task->role = NULL; ++ } while_each_thread(task2, task); ++ read_unlock(&tasklist_lock); ++ ++ kfree(fakefs_obj_rw); ++ fakefs_obj_rw = NULL; ++ kfree(fakefs_obj_rwx); ++ fakefs_obj_rwx = NULL; ++ ++ /* release the reference to the real root dentry and vfsmount */ ++ path_put(&gr_real_root); ++ memset(&gr_real_root, 0, sizeof(gr_real_root)); ++ } ++ ++ /* free all object hash tables */ ++ ++ FOR_EACH_ROLE_START(r) ++ if (r->subj_hash == NULL) ++ goto next_role; ++ FOR_EACH_SUBJECT_START(r, s, x) ++ if (s->obj_hash == NULL) ++ break; ++ if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE) ++ kfree(s->obj_hash); ++ else ++ vfree(s->obj_hash); ++ FOR_EACH_SUBJECT_END(s, x) ++ FOR_EACH_NESTED_SUBJECT_START(r, s) ++ if (s->obj_hash == NULL) ++ break; ++ if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE) ++ kfree(s->obj_hash); ++ else ++ vfree(s->obj_hash); ++ FOR_EACH_NESTED_SUBJECT_END(s) ++ if ((r->subj_hash_size * sizeof (struct acl_subject_label *)) <= PAGE_SIZE) ++ kfree(r->subj_hash); ++ else ++ vfree(r->subj_hash); ++ r->subj_hash = NULL; ++next_role: ++ FOR_EACH_ROLE_END(r) ++ ++ acl_free_all(); ++ ++ if (polstate->acl_role_set.r_hash) { ++ if ((polstate->acl_role_set.r_size * sizeof (struct acl_role_label *)) <= ++ PAGE_SIZE) ++ kfree(polstate->acl_role_set.r_hash); ++ else ++ vfree(polstate->acl_role_set.r_hash); ++ } ++ if (polstate->name_set.n_hash) { ++ if ((polstate->name_set.n_size * sizeof (struct name_entry *)) <= ++ PAGE_SIZE) ++ kfree(polstate->name_set.n_hash); ++ else ++ vfree(polstate->name_set.n_hash); ++ } ++ ++ if (polstate->inodev_set.i_hash) { ++ if ((polstate->inodev_set.i_size * sizeof (struct inodev_entry *)) <= ++ PAGE_SIZE) ++ kfree(polstate->inodev_set.i_hash); ++ else ++ vfree(polstate->inodev_set.i_hash); ++ } ++ ++ if (!reload) ++ gr_free_uidset(); ++ ++ memset(&polstate->name_set, 0, sizeof (struct name_db)); ++ memset(&polstate->inodev_set, 0, sizeof (struct inodev_db)); ++ memset(&polstate->acl_role_set, 0, sizeof (struct acl_role_db)); ++ memset(&polstate->subj_map_set, 0, sizeof (struct acl_subj_map_db)); ++ ++ polstate->default_role = NULL; ++ polstate->kernel_role = NULL; ++ polstate->role_list = NULL; ++ ++ return; ++} ++ ++static struct acl_subject_label * ++do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role, int *already_copied); ++ ++static int alloc_and_copy_string(char **name, unsigned int maxlen) ++{ ++ unsigned int len = strnlen_user(*name, maxlen); ++ char *tmp; ++ ++ if (!len || len >= maxlen) ++ return -EINVAL; ++ ++ if ((tmp = (char *) acl_alloc(len)) == NULL) ++ return -ENOMEM; ++ ++ if (copy_from_user(tmp, *name, len)) ++ return -EFAULT; ++ ++ tmp[len-1] = '\0'; ++ *name = tmp; ++ ++ return 0; ++} ++ ++static int ++copy_user_glob(struct acl_object_label *obj) ++{ ++ struct acl_object_label *g_tmp, **guser; ++ int error; ++ ++ if (obj->globbed == NULL) ++ return 0; ++ ++ guser = &obj->globbed; ++ while (*guser) { ++ g_tmp = (struct acl_object_label *) ++ acl_alloc(sizeof (struct acl_object_label)); ++ if (g_tmp == NULL) ++ return -ENOMEM; ++ ++ if (copy_acl_object_label(g_tmp, *guser)) ++ return -EFAULT; ++ ++ error = alloc_and_copy_string(&g_tmp->filename, PATH_MAX); ++ if (error) ++ return error; ++ ++ *guser = g_tmp; ++ guser = &(g_tmp->next); ++ } ++ ++ return 0; ++} ++ ++static int ++copy_user_objs(struct acl_object_label *userp, struct acl_subject_label *subj, ++ struct acl_role_label *role) ++{ ++ struct acl_object_label *o_tmp; ++ int ret; ++ ++ while (userp) { ++ if ((o_tmp = (struct acl_object_label *) ++ acl_alloc(sizeof (struct acl_object_label))) == NULL) ++ return -ENOMEM; ++ ++ if (copy_acl_object_label(o_tmp, userp)) ++ return -EFAULT; ++ ++ userp = o_tmp->prev; ++ ++ ret = alloc_and_copy_string(&o_tmp->filename, PATH_MAX); ++ if (ret) ++ return ret; ++ ++ insert_acl_obj_label(o_tmp, subj); ++ if (!insert_name_entry(o_tmp->filename, o_tmp->inode, ++ o_tmp->device, (o_tmp->mode & GR_DELETED) ? 1 : 0)) ++ return -ENOMEM; ++ ++ ret = copy_user_glob(o_tmp); ++ if (ret) ++ return ret; ++ ++ if (o_tmp->nested) { ++ int already_copied; ++ ++ o_tmp->nested = do_copy_user_subj(o_tmp->nested, role, &already_copied); ++ if (IS_ERR(o_tmp->nested)) ++ return PTR_ERR(o_tmp->nested); ++ ++ /* insert into nested subject list if we haven't copied this one yet ++ to prevent duplicate entries */ ++ if (!already_copied) { ++ o_tmp->nested->next = role->hash->first; ++ role->hash->first = o_tmp->nested; ++ } ++ } ++ } ++ ++ return 0; ++} ++ ++static __u32 ++count_user_subjs(struct acl_subject_label *userp) ++{ ++ struct acl_subject_label s_tmp; ++ __u32 num = 0; ++ ++ while (userp) { ++ if (copy_acl_subject_label(&s_tmp, userp)) ++ break; ++ ++ userp = s_tmp.prev; ++ } ++ ++ return num; ++} ++ ++static int ++copy_user_allowedips(struct acl_role_label *rolep) ++{ ++ struct role_allowed_ip *ruserip, *rtmp = NULL, *rlast; ++ ++ ruserip = rolep->allowed_ips; ++ ++ while (ruserip) { ++ rlast = rtmp; ++ ++ if ((rtmp = (struct role_allowed_ip *) ++ acl_alloc(sizeof (struct role_allowed_ip))) == NULL) ++ return -ENOMEM; ++ ++ if (copy_role_allowed_ip(rtmp, ruserip)) ++ return -EFAULT; ++ ++ ruserip = rtmp->prev; ++ ++ if (!rlast) { ++ rtmp->prev = NULL; ++ rolep->allowed_ips = rtmp; ++ } else { ++ rlast->next = rtmp; ++ rtmp->prev = rlast; ++ } ++ ++ if (!ruserip) ++ rtmp->next = NULL; ++ } ++ ++ return 0; ++} ++ ++static int ++copy_user_transitions(struct acl_role_label *rolep) ++{ ++ struct role_transition *rusertp, *rtmp = NULL, *rlast; ++ int error; ++ ++ rusertp = rolep->transitions; ++ ++ while (rusertp) { ++ rlast = rtmp; ++ ++ if ((rtmp = (struct role_transition *) ++ acl_alloc(sizeof (struct role_transition))) == NULL) ++ return -ENOMEM; ++ ++ if (copy_role_transition(rtmp, rusertp)) ++ return -EFAULT; ++ ++ rusertp = rtmp->prev; ++ ++ error = alloc_and_copy_string(&rtmp->rolename, GR_SPROLE_LEN); ++ if (error) ++ return error; ++ ++ if (!rlast) { ++ rtmp->prev = NULL; ++ rolep->transitions = rtmp; ++ } else { ++ rlast->next = rtmp; ++ rtmp->prev = rlast; ++ } ++ ++ if (!rusertp) ++ rtmp->next = NULL; ++ } ++ ++ return 0; ++} ++ ++static __u32 count_user_objs(const struct acl_object_label __user *userp) ++{ ++ struct acl_object_label o_tmp; ++ __u32 num = 0; ++ ++ while (userp) { ++ if (copy_acl_object_label(&o_tmp, userp)) ++ break; ++ ++ userp = o_tmp.prev; ++ num++; ++ } ++ ++ return num; ++} ++ ++static struct acl_subject_label * ++do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role, int *already_copied) ++{ ++ struct acl_subject_label *s_tmp = NULL, *s_tmp2; ++ __u32 num_objs; ++ struct acl_ip_label **i_tmp, *i_utmp2; ++ struct gr_hash_struct ghash; ++ struct subject_map *subjmap; ++ unsigned int i_num; ++ int err; ++ ++ if (already_copied != NULL) ++ *already_copied = 0; ++ ++ s_tmp = lookup_subject_map(userp); ++ ++ /* we've already copied this subject into the kernel, just return ++ the reference to it, and don't copy it over again ++ */ ++ if (s_tmp) { ++ if (already_copied != NULL) ++ *already_copied = 1; ++ return(s_tmp); ++ } ++ ++ if ((s_tmp = (struct acl_subject_label *) ++ acl_alloc(sizeof (struct acl_subject_label))) == NULL) ++ return ERR_PTR(-ENOMEM); ++ ++ subjmap = (struct subject_map *)kmalloc(sizeof (struct subject_map), GFP_KERNEL); ++ if (subjmap == NULL) ++ return ERR_PTR(-ENOMEM); ++ ++ subjmap->user = userp; ++ subjmap->kernel = s_tmp; ++ insert_subj_map_entry(subjmap); ++ ++ if (copy_acl_subject_label(s_tmp, userp)) ++ return ERR_PTR(-EFAULT); ++ ++ err = alloc_and_copy_string(&s_tmp->filename, PATH_MAX); ++ if (err) ++ return ERR_PTR(err); ++ ++ if (!strcmp(s_tmp->filename, "/")) ++ role->root_label = s_tmp; ++ ++ if (copy_gr_hash_struct(&ghash, s_tmp->hash)) ++ return ERR_PTR(-EFAULT); ++ ++ /* copy user and group transition tables */ ++ ++ if (s_tmp->user_trans_num) { ++ uid_t *uidlist; ++ ++ uidlist = (uid_t *)acl_alloc_num(s_tmp->user_trans_num, sizeof(uid_t)); ++ if (uidlist == NULL) ++ return ERR_PTR(-ENOMEM); ++ if (copy_from_user(uidlist, s_tmp->user_transitions, s_tmp->user_trans_num * sizeof(uid_t))) ++ return ERR_PTR(-EFAULT); ++ ++ s_tmp->user_transitions = uidlist; ++ } ++ ++ if (s_tmp->group_trans_num) { ++ gid_t *gidlist; ++ ++ gidlist = (gid_t *)acl_alloc_num(s_tmp->group_trans_num, sizeof(gid_t)); ++ if (gidlist == NULL) ++ return ERR_PTR(-ENOMEM); ++ if (copy_from_user(gidlist, s_tmp->group_transitions, s_tmp->group_trans_num * sizeof(gid_t))) ++ return ERR_PTR(-EFAULT); ++ ++ s_tmp->group_transitions = gidlist; ++ } ++ ++ /* set up object hash table */ ++ num_objs = count_user_objs(ghash.first); ++ ++ s_tmp->obj_hash_size = num_objs; ++ s_tmp->obj_hash = ++ (struct acl_object_label **) ++ create_table(&(s_tmp->obj_hash_size), sizeof(void *)); ++ ++ if (!s_tmp->obj_hash) ++ return ERR_PTR(-ENOMEM); ++ ++ memset(s_tmp->obj_hash, 0, ++ s_tmp->obj_hash_size * ++ sizeof (struct acl_object_label *)); ++ ++ /* add in objects */ ++ err = copy_user_objs(ghash.first, s_tmp, role); ++ ++ if (err) ++ return ERR_PTR(err); ++ ++ /* set pointer for parent subject */ ++ if (s_tmp->parent_subject) { ++ s_tmp2 = do_copy_user_subj(s_tmp->parent_subject, role, NULL); ++ ++ if (IS_ERR(s_tmp2)) ++ return s_tmp2; ++ ++ s_tmp->parent_subject = s_tmp2; ++ } ++ ++ /* add in ip acls */ ++ ++ if (!s_tmp->ip_num) { ++ s_tmp->ips = NULL; ++ goto insert; ++ } ++ ++ i_tmp = ++ (struct acl_ip_label **) acl_alloc_num(s_tmp->ip_num, ++ sizeof (struct acl_ip_label *)); ++ ++ if (!i_tmp) ++ return ERR_PTR(-ENOMEM); ++ ++ for (i_num = 0; i_num < s_tmp->ip_num; i_num++) { ++ *(i_tmp + i_num) = ++ (struct acl_ip_label *) ++ acl_alloc(sizeof (struct acl_ip_label)); ++ if (!*(i_tmp + i_num)) ++ return ERR_PTR(-ENOMEM); ++ ++ if (copy_pointer_from_array(&i_utmp2, i_num, s_tmp->ips)) ++ return ERR_PTR(-EFAULT); ++ ++ if (copy_acl_ip_label(*(i_tmp + i_num), i_utmp2)) ++ return ERR_PTR(-EFAULT); ++ ++ if ((*(i_tmp + i_num))->iface == NULL) ++ continue; ++ ++ err = alloc_and_copy_string(&(*(i_tmp + i_num))->iface, IFNAMSIZ); ++ if (err) ++ return ERR_PTR(err); ++ } ++ ++ s_tmp->ips = i_tmp; ++ ++insert: ++ if (!insert_name_entry(s_tmp->filename, s_tmp->inode, ++ s_tmp->device, (s_tmp->mode & GR_DELETED) ? 1 : 0)) ++ return ERR_PTR(-ENOMEM); ++ ++ return s_tmp; ++} ++ ++static int ++copy_user_subjs(struct acl_subject_label *userp, struct acl_role_label *role) ++{ ++ struct acl_subject_label s_pre; ++ struct acl_subject_label * ret; ++ int err; ++ ++ while (userp) { ++ if (copy_acl_subject_label(&s_pre, userp)) ++ return -EFAULT; ++ ++ ret = do_copy_user_subj(userp, role, NULL); ++ ++ err = PTR_ERR(ret); ++ if (IS_ERR(ret)) ++ return err; ++ ++ insert_acl_subj_label(ret, role); ++ ++ userp = s_pre.prev; ++ } ++ ++ return 0; ++} ++ ++static int ++copy_user_acl(struct gr_arg *arg) ++{ ++ struct acl_role_label *r_tmp = NULL, **r_utmp, *r_utmp2; ++ struct acl_subject_label *subj_list; ++ struct sprole_pw *sptmp; ++ struct gr_hash_struct *ghash; ++ uid_t *domainlist; ++ unsigned int r_num; ++ int err = 0; ++ __u16 i; ++ __u32 num_subjs; ++ ++ /* we need a default and kernel role */ ++ if (arg->role_db.num_roles < 2) ++ return -EINVAL; ++ ++ /* copy special role authentication info from userspace */ ++ ++ polstate->num_sprole_pws = arg->num_sprole_pws; ++ polstate->acl_special_roles = (struct sprole_pw **) acl_alloc_num(polstate->num_sprole_pws, sizeof(struct sprole_pw *)); ++ ++ if (!polstate->acl_special_roles && polstate->num_sprole_pws) ++ return -ENOMEM; ++ ++ for (i = 0; i < polstate->num_sprole_pws; i++) { ++ sptmp = (struct sprole_pw *) acl_alloc(sizeof(struct sprole_pw)); ++ if (!sptmp) ++ return -ENOMEM; ++ if (copy_sprole_pw(sptmp, i, arg->sprole_pws)) ++ return -EFAULT; ++ ++ err = alloc_and_copy_string((char **)&sptmp->rolename, GR_SPROLE_LEN); ++ if (err) ++ return err; ++ ++#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG ++ printk(KERN_ALERT "Copying special role %s\n", sptmp->rolename); ++#endif ++ ++ polstate->acl_special_roles[i] = sptmp; ++ } ++ ++ r_utmp = (struct acl_role_label **) arg->role_db.r_table; ++ ++ for (r_num = 0; r_num < arg->role_db.num_roles; r_num++) { ++ r_tmp = acl_alloc(sizeof (struct acl_role_label)); ++ ++ if (!r_tmp) ++ return -ENOMEM; ++ ++ if (copy_pointer_from_array(&r_utmp2, r_num, r_utmp)) ++ return -EFAULT; ++ ++ if (copy_acl_role_label(r_tmp, r_utmp2)) ++ return -EFAULT; ++ ++ err = alloc_and_copy_string(&r_tmp->rolename, GR_SPROLE_LEN); ++ if (err) ++ return err; ++ ++ if (!strcmp(r_tmp->rolename, "default") ++ && (r_tmp->roletype & GR_ROLE_DEFAULT)) { ++ polstate->default_role = r_tmp; ++ } else if (!strcmp(r_tmp->rolename, ":::kernel:::")) { ++ polstate->kernel_role = r_tmp; ++ } ++ ++ if ((ghash = (struct gr_hash_struct *) acl_alloc(sizeof(struct gr_hash_struct))) == NULL) ++ return -ENOMEM; ++ ++ if (copy_gr_hash_struct(ghash, r_tmp->hash)) ++ return -EFAULT; ++ ++ r_tmp->hash = ghash; ++ ++ num_subjs = count_user_subjs(r_tmp->hash->first); ++ ++ r_tmp->subj_hash_size = num_subjs; ++ r_tmp->subj_hash = ++ (struct acl_subject_label **) ++ create_table(&(r_tmp->subj_hash_size), sizeof(void *)); ++ ++ if (!r_tmp->subj_hash) ++ return -ENOMEM; ++ ++ err = copy_user_allowedips(r_tmp); ++ if (err) ++ return err; ++ ++ /* copy domain info */ ++ if (r_tmp->domain_children != NULL) { ++ domainlist = acl_alloc_num(r_tmp->domain_child_num, sizeof(uid_t)); ++ if (domainlist == NULL) ++ return -ENOMEM; ++ ++ if (copy_from_user(domainlist, r_tmp->domain_children, r_tmp->domain_child_num * sizeof(uid_t))) ++ return -EFAULT; ++ ++ r_tmp->domain_children = domainlist; ++ } ++ ++ err = copy_user_transitions(r_tmp); ++ if (err) ++ return err; ++ ++ memset(r_tmp->subj_hash, 0, ++ r_tmp->subj_hash_size * ++ sizeof (struct acl_subject_label *)); ++ ++ /* acquire the list of subjects, then NULL out ++ the list prior to parsing the subjects for this role, ++ as during this parsing the list is replaced with a list ++ of *nested* subjects for the role ++ */ ++ subj_list = r_tmp->hash->first; ++ ++ /* set nested subject list to null */ ++ r_tmp->hash->first = NULL; ++ ++ err = copy_user_subjs(subj_list, r_tmp); ++ ++ if (err) ++ return err; ++ ++ insert_acl_role_label(r_tmp); ++ } ++ ++ if (polstate->default_role == NULL || polstate->kernel_role == NULL) ++ return -EINVAL; ++ ++ return err; ++} ++ ++static int gracl_reload_apply_policies(void *reload) ++{ ++ struct gr_reload_state *reload_state = (struct gr_reload_state *)reload; ++ struct task_struct *task, *task2; ++ struct acl_role_label *role, *rtmp; ++ struct acl_subject_label *subj; ++ const struct cred *cred; ++ int role_applied; ++ int ret = 0; ++ ++ memcpy(&reload_state->oldpolicy, reload_state->oldpolicy_ptr, sizeof(struct gr_policy_state)); ++ memcpy(&reload_state->oldalloc, reload_state->oldalloc_ptr, sizeof(struct gr_alloc_state)); ++ ++ /* first make sure we'll be able to apply the new policy cleanly */ ++ do_each_thread(task2, task) { ++ if (task->exec_file == NULL) ++ continue; ++ role_applied = 0; ++ if (!reload_state->oldmode && task->role->roletype & GR_ROLE_SPECIAL) { ++ /* preserve special roles */ ++ FOR_EACH_ROLE_START(role) ++ if ((role->roletype & GR_ROLE_SPECIAL) && !strcmp(task->role->rolename, role->rolename)) { ++ rtmp = task->role; ++ task->role = role; ++ role_applied = 1; ++ break; ++ } ++ FOR_EACH_ROLE_END(role) ++ } ++ if (!role_applied) { ++ cred = __task_cred(task); ++ rtmp = task->role; ++ task->role = __lookup_acl_role_label(polstate, task, GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid)); ++ } ++ /* this handles non-nested inherited subjects, nested subjects will still ++ be dropped currently */ ++ subj = __gr_get_subject_for_task(polstate, task, task->acl->filename); ++ task->tmpacl = __gr_get_subject_for_task(polstate, task, NULL); ++ /* change the role back so that we've made no modifications to the policy */ ++ task->role = rtmp; ++ ++ if (subj == NULL || task->tmpacl == NULL) { ++ ret = -EINVAL; ++ goto out; ++ } ++ } while_each_thread(task2, task); ++ ++ /* now actually apply the policy */ ++ ++ do_each_thread(task2, task) { ++ if (task->exec_file) { ++ role_applied = 0; ++ if (!reload_state->oldmode && task->role->roletype & GR_ROLE_SPECIAL) { ++ /* preserve special roles */ ++ FOR_EACH_ROLE_START(role) ++ if ((role->roletype & GR_ROLE_SPECIAL) && !strcmp(task->role->rolename, role->rolename)) { ++ task->role = role; ++ role_applied = 1; ++ break; ++ } ++ FOR_EACH_ROLE_END(role) ++ } ++ if (!role_applied) { ++ cred = __task_cred(task); ++ task->role = __lookup_acl_role_label(polstate, task, GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid)); ++ } ++ /* this handles non-nested inherited subjects, nested subjects will still ++ be dropped currently */ ++ if (!reload_state->oldmode && task->inherited) ++ subj = __gr_get_subject_for_task(polstate, task, task->acl->filename); ++ else { ++ /* looked up and tagged to the task previously */ ++ subj = task->tmpacl; ++ } ++ /* subj will be non-null */ ++ __gr_apply_subject_to_task(polstate, task, subj); ++ if (reload_state->oldmode) { ++ task->acl_role_id = 0; ++ task->acl_sp_role = 0; ++ task->inherited = 0; ++ } ++ } else { ++ // it's a kernel process ++ task->role = polstate->kernel_role; ++ task->acl = polstate->kernel_role->root_label; ++#ifdef CONFIG_GRKERNSEC_ACL_HIDEKERN ++ task->acl->mode &= ~GR_PROCFIND; ++#endif ++ } ++ } while_each_thread(task2, task); ++ ++ memcpy(reload_state->oldpolicy_ptr, &reload_state->newpolicy, sizeof(struct gr_policy_state)); ++ memcpy(reload_state->oldalloc_ptr, &reload_state->newalloc, sizeof(struct gr_alloc_state)); ++ ++out: ++ ++ return ret; ++} ++ ++static int gracl_reload(struct gr_arg *args, unsigned char oldmode) ++{ ++ struct gr_reload_state new_reload_state = { }; ++ int err; ++ ++ new_reload_state.oldpolicy_ptr = polstate; ++ new_reload_state.oldalloc_ptr = current_alloc_state; ++ new_reload_state.oldmode = oldmode; ++ ++ current_alloc_state = &new_reload_state.newalloc; ++ polstate = &new_reload_state.newpolicy; ++ ++ /* everything relevant is now saved off, copy in the new policy */ ++ if (init_variables(args, true)) { ++ gr_log_str(GR_DONT_AUDIT_GOOD, GR_INITF_ACL_MSG, GR_VERSION); ++ err = -ENOMEM; ++ goto error; ++ } ++ ++ err = copy_user_acl(args); ++ free_init_variables(); ++ if (err) ++ goto error; ++ /* the new policy is copied in, with the old policy available via saved_state ++ first go through applying roles, making sure to preserve special roles ++ then apply new subjects, making sure to preserve inherited and nested subjects, ++ though currently only inherited subjects will be preserved ++ */ ++ err = stop_machine(gracl_reload_apply_policies, &new_reload_state, NULL); ++ if (err) ++ goto error; ++ ++ /* we've now applied the new policy, so restore the old policy state to free it */ ++ polstate = &new_reload_state.oldpolicy; ++ current_alloc_state = &new_reload_state.oldalloc; ++ free_variables(true); ++ ++ /* oldpolicy/oldalloc_ptr point to the new policy/alloc states as they were copied ++ to running_polstate/current_alloc_state inside stop_machine ++ */ ++ err = 0; ++ goto out; ++error: ++ /* on error of loading the new policy, we'll just keep the previous ++ policy set around ++ */ ++ free_variables(true); ++ ++ /* doesn't affect runtime, but maintains consistent state */ ++out: ++ polstate = new_reload_state.oldpolicy_ptr; ++ current_alloc_state = new_reload_state.oldalloc_ptr; ++ ++ return err; ++} ++ ++static int ++gracl_init(struct gr_arg *args) ++{ ++ int error = 0; ++ ++ memcpy(gr_system_salt, args->salt, GR_SALT_LEN); ++ memcpy(gr_system_sum, args->sum, GR_SHA_LEN); ++ ++ if (init_variables(args, false)) { ++ gr_log_str(GR_DONT_AUDIT_GOOD, GR_INITF_ACL_MSG, GR_VERSION); ++ error = -ENOMEM; ++ goto out; ++ } ++ ++ error = copy_user_acl(args); ++ free_init_variables(); ++ if (error) ++ goto out; ++ ++ error = gr_set_acls(0); ++ if (error) ++ goto out; ++ ++ gr_enable_rbac_system(); ++ ++ return 0; ++ ++out: ++ free_variables(false); ++ return error; ++} ++ ++static int ++lookup_special_role_auth(__u16 mode, const char *rolename, unsigned char **salt, ++ unsigned char **sum) ++{ ++ struct acl_role_label *r; ++ struct role_allowed_ip *ipp; ++ struct role_transition *trans; ++ unsigned int i; ++ int found = 0; ++ u32 curr_ip = current->signal->curr_ip; ++ ++ current->signal->saved_ip = curr_ip; ++ ++ /* check transition table */ ++ ++ for (trans = current->role->transitions; trans; trans = trans->next) { ++ if (!strcmp(rolename, trans->rolename)) { ++ found = 1; ++ break; ++ } ++ } ++ ++ if (!found) ++ return 0; ++ ++ /* handle special roles that do not require authentication ++ and check ip */ ++ ++ FOR_EACH_ROLE_START(r) ++ if (!strcmp(rolename, r->rolename) && ++ (r->roletype & GR_ROLE_SPECIAL)) { ++ found = 0; ++ if (r->allowed_ips != NULL) { ++ for (ipp = r->allowed_ips; ipp; ipp = ipp->next) { ++ if ((ntohl(curr_ip) & ipp->netmask) == ++ (ntohl(ipp->addr) & ipp->netmask)) ++ found = 1; ++ } ++ } else ++ found = 2; ++ if (!found) ++ return 0; ++ ++ if (((mode == GR_SPROLE) && (r->roletype & GR_ROLE_NOPW)) || ++ ((mode == GR_SPROLEPAM) && (r->roletype & GR_ROLE_PAM))) { ++ *salt = NULL; ++ *sum = NULL; ++ return 1; ++ } ++ } ++ FOR_EACH_ROLE_END(r) ++ ++ for (i = 0; i < polstate->num_sprole_pws; i++) { ++ if (!strcmp(rolename, polstate->acl_special_roles[i]->rolename)) { ++ *salt = polstate->acl_special_roles[i]->salt; ++ *sum = polstate->acl_special_roles[i]->sum; ++ return 1; ++ } ++ } ++ ++ return 0; ++} ++ ++int gr_check_secure_terminal(struct task_struct *task) ++{ ++ struct task_struct *p, *p2, *p3; ++ struct files_struct *files; ++ struct fdtable *fdt; ++ struct file *our_file = NULL, *file; ++ int i; ++ ++ if (task->signal->tty == NULL) ++ return 1; ++ ++ files = get_files_struct(task); ++ if (files != NULL) { ++ rcu_read_lock(); ++ fdt = files_fdtable(files); ++ for (i=0; i < fdt->max_fds; i++) { ++ file = fcheck_files(files, i); ++ if (file && (our_file == NULL) && (file->private_data == task->signal->tty)) { ++ get_file(file); ++ our_file = file; ++ } ++ } ++ rcu_read_unlock(); ++ put_files_struct(files); ++ } ++ ++ if (our_file == NULL) ++ return 1; ++ ++ read_lock(&tasklist_lock); ++ do_each_thread(p2, p) { ++ files = get_files_struct(p); ++ if (files == NULL || ++ (p->signal && p->signal->tty == task->signal->tty)) { ++ if (files != NULL) ++ put_files_struct(files); ++ continue; ++ } ++ rcu_read_lock(); ++ fdt = files_fdtable(files); ++ for (i=0; i < fdt->max_fds; i++) { ++ file = fcheck_files(files, i); ++ if (file && S_ISCHR(file->f_path.dentry->d_inode->i_mode) && ++ file->f_path.dentry->d_inode->i_rdev == our_file->f_path.dentry->d_inode->i_rdev) { ++ p3 = task; ++ while (task_pid_nr(p3) > 0) { ++ if (p3 == p) ++ break; ++ p3 = p3->real_parent; ++ } ++ if (p3 == p) ++ break; ++ gr_log_ttysniff(GR_DONT_AUDIT_GOOD, GR_TTYSNIFF_ACL_MSG, p); ++ gr_handle_alertkill(p); ++ rcu_read_unlock(); ++ put_files_struct(files); ++ read_unlock(&tasklist_lock); ++ fput(our_file); ++ return 0; ++ } ++ } ++ rcu_read_unlock(); ++ put_files_struct(files); ++ } while_each_thread(p2, p); ++ read_unlock(&tasklist_lock); ++ ++ fput(our_file); ++ return 1; ++} ++ ++ssize_t ++write_grsec_handler(struct file *file, const char __user * buf, size_t count, loff_t *ppos) ++{ ++ struct gr_arg_wrapper uwrap; ++ unsigned char *sprole_salt = NULL; ++ unsigned char *sprole_sum = NULL; ++ int error = 0; ++ int error2 = 0; ++ size_t req_count = 0; ++ unsigned char oldmode = 0; ++ ++ mutex_lock(&gr_dev_mutex); ++ ++ if (gr_acl_is_enabled() && !(current->acl->mode & GR_KERNELAUTH)) { ++ error = -EPERM; ++ goto out; ++ } ++ ++#ifdef CONFIG_COMPAT ++ pax_open_kernel(); ++ if (is_compat_task()) { ++ copy_gr_arg_wrapper = ©_gr_arg_wrapper_compat; ++ copy_gr_arg = ©_gr_arg_compat; ++ copy_acl_object_label = ©_acl_object_label_compat; ++ copy_acl_subject_label = ©_acl_subject_label_compat; ++ copy_acl_role_label = ©_acl_role_label_compat; ++ copy_acl_ip_label = ©_acl_ip_label_compat; ++ copy_role_allowed_ip = ©_role_allowed_ip_compat; ++ copy_role_transition = ©_role_transition_compat; ++ copy_sprole_pw = ©_sprole_pw_compat; ++ copy_gr_hash_struct = ©_gr_hash_struct_compat; ++ copy_pointer_from_array = ©_pointer_from_array_compat; ++ get_gr_arg_wrapper_size = &get_gr_arg_wrapper_size_compat; ++ } else { ++ copy_gr_arg_wrapper = ©_gr_arg_wrapper_normal; ++ copy_gr_arg = ©_gr_arg_normal; ++ copy_acl_object_label = ©_acl_object_label_normal; ++ copy_acl_subject_label = ©_acl_subject_label_normal; ++ copy_acl_role_label = ©_acl_role_label_normal; ++ copy_acl_ip_label = ©_acl_ip_label_normal; ++ copy_role_allowed_ip = ©_role_allowed_ip_normal; ++ copy_role_transition = ©_role_transition_normal; ++ copy_sprole_pw = ©_sprole_pw_normal; ++ copy_gr_hash_struct = ©_gr_hash_struct_normal; ++ copy_pointer_from_array = ©_pointer_from_array_normal; ++ get_gr_arg_wrapper_size = &get_gr_arg_wrapper_size_normal; ++ } ++ pax_close_kernel(); ++#endif ++ ++ req_count = get_gr_arg_wrapper_size(); ++ ++ if (count != req_count) { ++ gr_log_int_int(GR_DONT_AUDIT_GOOD, GR_DEV_ACL_MSG, (int)count, (int)req_count); ++ error = -EINVAL; ++ goto out; ++ } ++ ++ ++ if (gr_auth_expires && time_after_eq(get_seconds(), gr_auth_expires)) { ++ gr_auth_expires = 0; ++ gr_auth_attempts = 0; ++ } ++ ++ error = copy_gr_arg_wrapper(buf, &uwrap); ++ if (error) ++ goto out; ++ ++ error = copy_gr_arg(uwrap.arg, gr_usermode); ++ if (error) ++ goto out; ++ ++ if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_SPROLEPAM && ++ gr_auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES && ++ time_after(gr_auth_expires, get_seconds())) { ++ error = -EBUSY; ++ goto out; ++ } ++ ++ /* if non-root trying to do anything other than use a special role, ++ do not attempt authentication, do not count towards authentication ++ locking ++ */ ++ ++ if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_STATUS && ++ gr_usermode->mode != GR_UNSPROLE && gr_usermode->mode != GR_SPROLEPAM && ++ gr_is_global_nonroot(current_uid())) { ++ error = -EPERM; ++ goto out; ++ } ++ ++ /* ensure pw and special role name are null terminated */ ++ ++ gr_usermode->pw[GR_PW_LEN - 1] = '\0'; ++ gr_usermode->sp_role[GR_SPROLE_LEN - 1] = '\0'; ++ ++ /* Okay. ++ * We have our enough of the argument structure..(we have yet ++ * to copy_from_user the tables themselves) . Copy the tables ++ * only if we need them, i.e. for loading operations. */ ++ ++ switch (gr_usermode->mode) { ++ case GR_STATUS: ++ if (gr_acl_is_enabled()) { ++ error = 1; ++ if (!gr_check_secure_terminal(current)) ++ error = 3; ++ } else ++ error = 2; ++ goto out; ++ case GR_SHUTDOWN: ++ if (gr_acl_is_enabled() && !(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) { ++ stop_machine(gr_rbac_disable, NULL, NULL); ++ free_variables(false); ++ memset(gr_usermode, 0, sizeof(struct gr_arg)); ++ memset(gr_system_salt, 0, GR_SALT_LEN); ++ memset(gr_system_sum, 0, GR_SHA_LEN); ++ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTS_ACL_MSG); ++ } else if (gr_acl_is_enabled()) { ++ gr_log_noargs(GR_DONT_AUDIT, GR_SHUTF_ACL_MSG); ++ error = -EPERM; ++ } else { ++ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTI_ACL_MSG); ++ error = -EAGAIN; ++ } ++ break; ++ case GR_ENABLE: ++ if (!gr_acl_is_enabled() && !(error2 = gracl_init(gr_usermode))) ++ gr_log_str(GR_DONT_AUDIT_GOOD, GR_ENABLE_ACL_MSG, GR_VERSION); ++ else { ++ if (gr_acl_is_enabled()) ++ error = -EAGAIN; ++ else ++ error = error2; ++ gr_log_str(GR_DONT_AUDIT, GR_ENABLEF_ACL_MSG, GR_VERSION); ++ } ++ break; ++ case GR_OLDRELOAD: ++ oldmode = 1; ++ case GR_RELOAD: ++ if (!gr_acl_is_enabled()) { ++ gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOADI_ACL_MSG, GR_VERSION); ++ error = -EAGAIN; ++ } else if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) { ++ error2 = gracl_reload(gr_usermode, oldmode); ++ if (!error2) ++ gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOAD_ACL_MSG, GR_VERSION); ++ else { ++ gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION); ++ error = error2; ++ } ++ } else { ++ gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION); ++ error = -EPERM; ++ } ++ break; ++ case GR_SEGVMOD: ++ if (unlikely(!gr_acl_is_enabled())) { ++ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODI_ACL_MSG); ++ error = -EAGAIN; ++ break; ++ } ++ ++ if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) { ++ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODS_ACL_MSG); ++ if (gr_usermode->segv_device && gr_usermode->segv_inode) { ++ struct acl_subject_label *segvacl; ++ segvacl = ++ lookup_acl_subj_label(gr_usermode->segv_inode, ++ gr_usermode->segv_device, ++ current->role); ++ if (segvacl) { ++ segvacl->crashes = 0; ++ segvacl->expires = 0; ++ } ++ } else if (gr_find_uid(gr_usermode->segv_uid) >= 0) { ++ gr_remove_uid(gr_usermode->segv_uid); ++ } ++ } else { ++ gr_log_noargs(GR_DONT_AUDIT, GR_SEGVMODF_ACL_MSG); ++ error = -EPERM; ++ } ++ break; ++ case GR_SPROLE: ++ case GR_SPROLEPAM: ++ if (unlikely(!gr_acl_is_enabled())) { ++ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SPROLEI_ACL_MSG); ++ error = -EAGAIN; ++ break; ++ } ++ ++ if (current->role->expires && time_after_eq(get_seconds(), current->role->expires)) { ++ current->role->expires = 0; ++ current->role->auth_attempts = 0; ++ } ++ ++ if (current->role->auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES && ++ time_after(current->role->expires, get_seconds())) { ++ error = -EBUSY; ++ goto out; ++ } ++ ++ if (lookup_special_role_auth ++ (gr_usermode->mode, gr_usermode->sp_role, &sprole_salt, &sprole_sum) ++ && ((!sprole_salt && !sprole_sum) ++ || !(chkpw(gr_usermode, sprole_salt, sprole_sum)))) { ++ char *p = ""; ++ assign_special_role(gr_usermode->sp_role); ++ read_lock(&tasklist_lock); ++ if (current->real_parent) ++ p = current->real_parent->role->rolename; ++ read_unlock(&tasklist_lock); ++ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLES_ACL_MSG, ++ p, acl_sp_role_value); ++ } else { ++ gr_log_str(GR_DONT_AUDIT, GR_SPROLEF_ACL_MSG, gr_usermode->sp_role); ++ error = -EPERM; ++ if(!(current->role->auth_attempts++)) ++ current->role->expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT; ++ ++ goto out; ++ } ++ break; ++ case GR_UNSPROLE: ++ if (unlikely(!gr_acl_is_enabled())) { ++ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_UNSPROLEI_ACL_MSG); ++ error = -EAGAIN; ++ break; ++ } ++ ++ if (current->role->roletype & GR_ROLE_SPECIAL) { ++ char *p = ""; ++ int i = 0; ++ ++ read_lock(&tasklist_lock); ++ if (current->real_parent) { ++ p = current->real_parent->role->rolename; ++ i = current->real_parent->acl_role_id; ++ } ++ read_unlock(&tasklist_lock); ++ ++ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_UNSPROLES_ACL_MSG, p, i); ++ gr_set_acls(1); ++ } else { ++ error = -EPERM; ++ goto out; ++ } ++ break; ++ default: ++ gr_log_int(GR_DONT_AUDIT, GR_INVMODE_ACL_MSG, gr_usermode->mode); ++ error = -EINVAL; ++ break; ++ } ++ ++ if (error != -EPERM) ++ goto out; ++ ++ if(!(gr_auth_attempts++)) ++ gr_auth_expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT; ++ ++ out: ++ mutex_unlock(&gr_dev_mutex); ++ ++ if (!error) ++ error = req_count; ++ ++ return error; ++} ++ ++int ++gr_set_acls(const int type) ++{ ++ struct task_struct *task, *task2; ++ struct acl_role_label *role = current->role; ++ struct acl_subject_label *subj; ++ __u16 acl_role_id = current->acl_role_id; ++ const struct cred *cred; ++ int ret; ++ ++ rcu_read_lock(); ++ read_lock(&tasklist_lock); ++ read_lock(&grsec_exec_file_lock); ++ do_each_thread(task2, task) { ++ /* check to see if we're called from the exit handler, ++ if so, only replace ACLs that have inherited the admin ++ ACL */ ++ ++ if (type && (task->role != role || ++ task->acl_role_id != acl_role_id)) ++ continue; ++ ++ task->acl_role_id = 0; ++ task->acl_sp_role = 0; ++ task->inherited = 0; ++ ++ if (task->exec_file) { ++ cred = __task_cred(task); ++ task->role = __lookup_acl_role_label(polstate, task, GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid)); ++ subj = __gr_get_subject_for_task(polstate, task, NULL); ++ if (subj == NULL) { ++ ret = -EINVAL; ++ read_unlock(&grsec_exec_file_lock); ++ read_unlock(&tasklist_lock); ++ rcu_read_unlock(); ++ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_DEFACL_MSG, task->comm, task_pid_nr(task)); ++ return ret; ++ } ++ __gr_apply_subject_to_task(polstate, task, subj); ++ } else { ++ // it's a kernel process ++ task->role = polstate->kernel_role; ++ task->acl = polstate->kernel_role->root_label; ++#ifdef CONFIG_GRKERNSEC_ACL_HIDEKERN ++ task->acl->mode &= ~GR_PROCFIND; ++#endif ++ } ++ } while_each_thread(task2, task); ++ read_unlock(&grsec_exec_file_lock); ++ read_unlock(&tasklist_lock); ++ rcu_read_unlock(); ++ ++ return 0; ++} +diff --git a/grsecurity/gracl_res.c b/grsecurity/gracl_res.c +new file mode 100644 +index 0000000..39645c9 +--- /dev/null ++++ b/grsecurity/gracl_res.c +@@ -0,0 +1,68 @@ ++#include <linux/kernel.h> ++#include <linux/sched.h> ++#include <linux/gracl.h> ++#include <linux/grinternal.h> ++ ++static const char *restab_log[] = { ++ [RLIMIT_CPU] = "RLIMIT_CPU", ++ [RLIMIT_FSIZE] = "RLIMIT_FSIZE", ++ [RLIMIT_DATA] = "RLIMIT_DATA", ++ [RLIMIT_STACK] = "RLIMIT_STACK", ++ [RLIMIT_CORE] = "RLIMIT_CORE", ++ [RLIMIT_RSS] = "RLIMIT_RSS", ++ [RLIMIT_NPROC] = "RLIMIT_NPROC", ++ [RLIMIT_NOFILE] = "RLIMIT_NOFILE", ++ [RLIMIT_MEMLOCK] = "RLIMIT_MEMLOCK", ++ [RLIMIT_AS] = "RLIMIT_AS", ++ [RLIMIT_LOCKS] = "RLIMIT_LOCKS", ++ [RLIMIT_SIGPENDING] = "RLIMIT_SIGPENDING", ++ [RLIMIT_MSGQUEUE] = "RLIMIT_MSGQUEUE", ++ [RLIMIT_NICE] = "RLIMIT_NICE", ++ [RLIMIT_RTPRIO] = "RLIMIT_RTPRIO", ++ [RLIMIT_RTTIME] = "RLIMIT_RTTIME", ++ [GR_CRASH_RES] = "RLIMIT_CRASH" ++}; ++ ++void ++gr_log_resource(const struct task_struct *task, ++ const int res, const unsigned long wanted, const int gt) ++{ ++ const struct cred *cred; ++ unsigned long rlim; ++ ++ if (!gr_acl_is_enabled() && !grsec_resource_logging) ++ return; ++ ++ // not yet supported resource ++ if (unlikely(!restab_log[res])) ++ return; ++ ++ if (res == RLIMIT_CPU || res == RLIMIT_RTTIME) ++ rlim = task_rlimit_max(task, res); ++ else ++ rlim = task_rlimit(task, res); ++ ++ if (likely((rlim == RLIM_INFINITY) || (gt && wanted <= rlim) || (!gt && wanted < rlim))) ++ return; ++ ++ rcu_read_lock(); ++ cred = __task_cred(task); ++ ++ if (res == RLIMIT_NPROC && ++ (cap_raised(cred->cap_effective, CAP_SYS_ADMIN) || ++ cap_raised(cred->cap_effective, CAP_SYS_RESOURCE))) ++ goto out_rcu_unlock; ++ else if (res == RLIMIT_MEMLOCK && ++ cap_raised(cred->cap_effective, CAP_IPC_LOCK)) ++ goto out_rcu_unlock; ++ else if (res == RLIMIT_NICE && cap_raised(cred->cap_effective, CAP_SYS_NICE)) ++ goto out_rcu_unlock; ++ rcu_read_unlock(); ++ ++ gr_log_res_ulong2_str(GR_DONT_AUDIT, GR_RESOURCE_MSG, task, wanted, restab_log[res], rlim); ++ ++ return; ++out_rcu_unlock: ++ rcu_read_unlock(); ++ return; ++} +diff --git a/grsecurity/gracl_segv.c b/grsecurity/gracl_segv.c +new file mode 100644 +index 0000000..2040e61 +--- /dev/null ++++ b/grsecurity/gracl_segv.c +@@ -0,0 +1,313 @@ ++#include <linux/kernel.h> ++#include <linux/mm.h> ++#include <asm/uaccess.h> ++#include <asm/errno.h> ++#include <asm/mman.h> ++#include <net/sock.h> ++#include <linux/file.h> ++#include <linux/fs.h> ++#include <linux/net.h> ++#include <linux/in.h> ++#include <linux/slab.h> ++#include <linux/types.h> ++#include <linux/sched.h> ++#include <linux/timer.h> ++#include <linux/gracl.h> ++#include <linux/grsecurity.h> ++#include <linux/grinternal.h> ++#if defined(CONFIG_BTRFS_FS) || defined(CONFIG_BTRFS_FS_MODULE) ++#include <linux/magic.h> ++#include <linux/pagemap.h> ++#include "../fs/btrfs/async-thread.h" ++#include "../fs/btrfs/ctree.h" ++#include "../fs/btrfs/btrfs_inode.h" ++#endif ++ ++static struct crash_uid *uid_set; ++static unsigned short uid_used; ++static DEFINE_SPINLOCK(gr_uid_lock); ++extern rwlock_t gr_inode_lock; ++extern struct acl_subject_label * ++ lookup_acl_subj_label(const ino_t inode, const dev_t dev, ++ struct acl_role_label *role); ++ ++static inline dev_t __get_dev(const struct dentry *dentry) ++{ ++#if defined(CONFIG_BTRFS_FS) || defined(CONFIG_BTRFS_FS_MODULE) ++ if (dentry->d_sb->s_magic == BTRFS_SUPER_MAGIC) ++ return BTRFS_I(dentry->d_inode)->root->anon_dev; ++ else ++#endif ++ return dentry->d_sb->s_dev; ++} ++ ++int ++gr_init_uidset(void) ++{ ++ uid_set = ++ kmalloc(GR_UIDTABLE_MAX * sizeof (struct crash_uid), GFP_KERNEL); ++ uid_used = 0; ++ ++ return uid_set ? 1 : 0; ++} ++ ++void ++gr_free_uidset(void) ++{ ++ if (uid_set) { ++ struct crash_uid *tmpset; ++ spin_lock(&gr_uid_lock); ++ tmpset = uid_set; ++ uid_set = NULL; ++ uid_used = 0; ++ spin_unlock(&gr_uid_lock); ++ if (tmpset) ++ kfree(tmpset); ++ } ++ ++ return; ++} ++ ++int ++gr_find_uid(const uid_t uid) ++{ ++ struct crash_uid *tmp = uid_set; ++ uid_t buid; ++ int low = 0, high = uid_used - 1, mid; ++ ++ while (high >= low) { ++ mid = (low + high) >> 1; ++ buid = tmp[mid].uid; ++ if (buid == uid) ++ return mid; ++ if (buid > uid) ++ high = mid - 1; ++ if (buid < uid) ++ low = mid + 1; ++ } ++ ++ return -1; ++} ++ ++static __inline__ void ++gr_insertsort(void) ++{ ++ unsigned short i, j; ++ struct crash_uid index; ++ ++ for (i = 1; i < uid_used; i++) { ++ index = uid_set[i]; ++ j = i; ++ while ((j > 0) && uid_set[j - 1].uid > index.uid) { ++ uid_set[j] = uid_set[j - 1]; ++ j--; ++ } ++ uid_set[j] = index; ++ } ++ ++ return; ++} ++ ++static __inline__ void ++gr_insert_uid(const kuid_t kuid, const unsigned long expires) ++{ ++ int loc; ++ uid_t uid = GR_GLOBAL_UID(kuid); ++ ++ if (uid_used == GR_UIDTABLE_MAX) ++ return; ++ ++ loc = gr_find_uid(uid); ++ ++ if (loc >= 0) { ++ uid_set[loc].expires = expires; ++ return; ++ } ++ ++ uid_set[uid_used].uid = uid; ++ uid_set[uid_used].expires = expires; ++ uid_used++; ++ ++ gr_insertsort(); ++ ++ return; ++} ++ ++void ++gr_remove_uid(const unsigned short loc) ++{ ++ unsigned short i; ++ ++ for (i = loc + 1; i < uid_used; i++) ++ uid_set[i - 1] = uid_set[i]; ++ ++ uid_used--; ++ ++ return; ++} ++ ++int ++gr_check_crash_uid(const kuid_t kuid) ++{ ++ int loc; ++ int ret = 0; ++ uid_t uid; ++ ++ if (unlikely(!gr_acl_is_enabled())) ++ return 0; ++ ++ uid = GR_GLOBAL_UID(kuid); ++ ++ spin_lock(&gr_uid_lock); ++ loc = gr_find_uid(uid); ++ ++ if (loc < 0) ++ goto out_unlock; ++ ++ if (time_before_eq(uid_set[loc].expires, get_seconds())) ++ gr_remove_uid(loc); ++ else ++ ret = 1; ++ ++out_unlock: ++ spin_unlock(&gr_uid_lock); ++ return ret; ++} ++ ++static __inline__ int ++proc_is_setxid(const struct cred *cred) ++{ ++ if (!uid_eq(cred->uid, cred->euid) || !uid_eq(cred->uid, cred->suid) || ++ !uid_eq(cred->uid, cred->fsuid)) ++ return 1; ++ if (!gid_eq(cred->gid, cred->egid) || !gid_eq(cred->gid, cred->sgid) || ++ !gid_eq(cred->gid, cred->fsgid)) ++ return 1; ++ ++ return 0; ++} ++ ++extern int gr_fake_force_sig(int sig, struct task_struct *t); ++ ++void ++gr_handle_crash(struct task_struct *task, const int sig) ++{ ++ struct acl_subject_label *curr; ++ struct task_struct *tsk, *tsk2; ++ const struct cred *cred; ++ const struct cred *cred2; ++ ++ if (sig != SIGSEGV && sig != SIGKILL && sig != SIGBUS && sig != SIGILL) ++ return; ++ ++ if (unlikely(!gr_acl_is_enabled())) ++ return; ++ ++ curr = task->acl; ++ ++ if (!(curr->resmask & (1U << GR_CRASH_RES))) ++ return; ++ ++ if (time_before_eq(curr->expires, get_seconds())) { ++ curr->expires = 0; ++ curr->crashes = 0; ++ } ++ ++ curr->crashes++; ++ ++ if (!curr->expires) ++ curr->expires = get_seconds() + curr->res[GR_CRASH_RES].rlim_max; ++ ++ if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) && ++ time_after(curr->expires, get_seconds())) { ++ rcu_read_lock(); ++ cred = __task_cred(task); ++ if (gr_is_global_nonroot(cred->uid) && proc_is_setxid(cred)) { ++ gr_log_crash1(GR_DONT_AUDIT, GR_SEGVSTART_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max); ++ spin_lock(&gr_uid_lock); ++ gr_insert_uid(cred->uid, curr->expires); ++ spin_unlock(&gr_uid_lock); ++ curr->expires = 0; ++ curr->crashes = 0; ++ read_lock(&tasklist_lock); ++ do_each_thread(tsk2, tsk) { ++ cred2 = __task_cred(tsk); ++ if (tsk != task && uid_eq(cred2->uid, cred->uid)) ++ gr_fake_force_sig(SIGKILL, tsk); ++ } while_each_thread(tsk2, tsk); ++ read_unlock(&tasklist_lock); ++ } else { ++ gr_log_crash2(GR_DONT_AUDIT, GR_SEGVNOSUID_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max); ++ read_lock(&tasklist_lock); ++ read_lock(&grsec_exec_file_lock); ++ do_each_thread(tsk2, tsk) { ++ if (likely(tsk != task)) { ++ // if this thread has the same subject as the one that triggered ++ // RES_CRASH and it's the same binary, kill it ++ if (tsk->acl == task->acl && gr_is_same_file(tsk->exec_file, task->exec_file)) ++ gr_fake_force_sig(SIGKILL, tsk); ++ } ++ } while_each_thread(tsk2, tsk); ++ read_unlock(&grsec_exec_file_lock); ++ read_unlock(&tasklist_lock); ++ } ++ rcu_read_unlock(); ++ } ++ ++ return; ++} ++ ++int ++gr_check_crash_exec(const struct file *filp) ++{ ++ struct acl_subject_label *curr; ++ ++ if (unlikely(!gr_acl_is_enabled())) ++ return 0; ++ ++ read_lock(&gr_inode_lock); ++ curr = lookup_acl_subj_label(filp->f_path.dentry->d_inode->i_ino, ++ __get_dev(filp->f_path.dentry), ++ current->role); ++ read_unlock(&gr_inode_lock); ++ ++ if (!curr || !(curr->resmask & (1U << GR_CRASH_RES)) || ++ (!curr->crashes && !curr->expires)) ++ return 0; ++ ++ if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) && ++ time_after(curr->expires, get_seconds())) ++ return 1; ++ else if (time_before_eq(curr->expires, get_seconds())) { ++ curr->crashes = 0; ++ curr->expires = 0; ++ } ++ ++ return 0; ++} ++ ++void ++gr_handle_alertkill(struct task_struct *task) ++{ ++ struct acl_subject_label *curracl; ++ __u32 curr_ip; ++ struct task_struct *p, *p2; ++ ++ if (unlikely(!gr_acl_is_enabled())) ++ return; ++ ++ curracl = task->acl; ++ curr_ip = task->signal->curr_ip; ++ ++ if ((curracl->mode & GR_KILLIPPROC) && curr_ip) { ++ read_lock(&tasklist_lock); ++ do_each_thread(p2, p) { ++ if (p->signal->curr_ip == curr_ip) ++ gr_fake_force_sig(SIGKILL, p); ++ } while_each_thread(p2, p); ++ read_unlock(&tasklist_lock); ++ } else if (curracl->mode & GR_KILLPROC) ++ gr_fake_force_sig(SIGKILL, task); ++ ++ return; ++} +diff --git a/grsecurity/gracl_shm.c b/grsecurity/gracl_shm.c +new file mode 100644 +index 0000000..98011b0 +--- /dev/null ++++ b/grsecurity/gracl_shm.c +@@ -0,0 +1,40 @@ ++#include <linux/kernel.h> ++#include <linux/mm.h> ++#include <linux/sched.h> ++#include <linux/file.h> ++#include <linux/ipc.h> ++#include <linux/gracl.h> ++#include <linux/grsecurity.h> ++#include <linux/grinternal.h> ++ ++int ++gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid, ++ const time_t shm_createtime, const kuid_t cuid, const int shmid) ++{ ++ struct task_struct *task; ++ ++ if (!gr_acl_is_enabled()) ++ return 1; ++ ++ rcu_read_lock(); ++ read_lock(&tasklist_lock); ++ ++ task = find_task_by_vpid(shm_cprid); ++ ++ if (unlikely(!task)) ++ task = find_task_by_vpid(shm_lapid); ++ ++ if (unlikely(task && (time_before_eq((unsigned long)task->start_time.tv_sec, (unsigned long)shm_createtime) || ++ (task_pid_nr(task) == shm_lapid)) && ++ (task->acl->mode & GR_PROTSHM) && ++ (task->acl != current->acl))) { ++ read_unlock(&tasklist_lock); ++ rcu_read_unlock(); ++ gr_log_int3(GR_DONT_AUDIT, GR_SHMAT_ACL_MSG, GR_GLOBAL_UID(cuid), shm_cprid, shmid); ++ return 0; ++ } ++ read_unlock(&tasklist_lock); ++ rcu_read_unlock(); ++ ++ return 1; ++} +diff --git a/grsecurity/grsec_chdir.c b/grsecurity/grsec_chdir.c +new file mode 100644 +index 0000000..bc0be01 +--- /dev/null ++++ b/grsecurity/grsec_chdir.c +@@ -0,0 +1,19 @@ ++#include <linux/kernel.h> ++#include <linux/sched.h> ++#include <linux/fs.h> ++#include <linux/file.h> ++#include <linux/grsecurity.h> ++#include <linux/grinternal.h> ++ ++void ++gr_log_chdir(const struct dentry *dentry, const struct vfsmount *mnt) ++{ ++#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR ++ if ((grsec_enable_chdir && grsec_enable_group && ++ in_group_p(grsec_audit_gid)) || (grsec_enable_chdir && ++ !grsec_enable_group)) { ++ gr_log_fs_generic(GR_DO_AUDIT, GR_CHDIR_AUDIT_MSG, dentry, mnt); ++ } ++#endif ++ return; ++} +diff --git a/grsecurity/grsec_chroot.c b/grsecurity/grsec_chroot.c +new file mode 100644 +index 0000000..baa635c +--- /dev/null ++++ b/grsecurity/grsec_chroot.c +@@ -0,0 +1,387 @@ ++#include <linux/kernel.h> ++#include <linux/module.h> ++#include <linux/sched.h> ++#include <linux/file.h> ++#include <linux/fs.h> ++#include <linux/mount.h> ++#include <linux/types.h> ++#include "../fs/mount.h" ++#include <linux/grsecurity.h> ++#include <linux/grinternal.h> ++ ++#ifdef CONFIG_GRKERNSEC_CHROOT_INITRD ++int gr_init_ran; ++#endif ++ ++void gr_set_chroot_entries(struct task_struct *task, const struct path *path) ++{ ++#ifdef CONFIG_GRKERNSEC ++ if (task_pid_nr(task) > 1 && path->dentry != init_task.fs->root.dentry && ++ path->dentry != task->nsproxy->mnt_ns->root->mnt.mnt_root ++#ifdef CONFIG_GRKERNSEC_CHROOT_INITRD ++ && gr_init_ran ++#endif ++ ) ++ task->gr_is_chrooted = 1; ++ else { ++#ifdef CONFIG_GRKERNSEC_CHROOT_INITRD ++ if (task_pid_nr(task) == 1 && !gr_init_ran) ++ gr_init_ran = 1; ++#endif ++ task->gr_is_chrooted = 0; ++ } ++ ++ task->gr_chroot_dentry = path->dentry; ++#endif ++ return; ++} ++ ++void gr_clear_chroot_entries(struct task_struct *task) ++{ ++#ifdef CONFIG_GRKERNSEC ++ task->gr_is_chrooted = 0; ++ task->gr_chroot_dentry = NULL; ++#endif ++ return; ++} ++ ++int ++gr_handle_chroot_unix(const pid_t pid) ++{ ++#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX ++ struct task_struct *p; ++ ++ if (unlikely(!grsec_enable_chroot_unix)) ++ return 1; ++ ++ if (likely(!proc_is_chrooted(current))) ++ return 1; ++ ++ rcu_read_lock(); ++ read_lock(&tasklist_lock); ++ p = find_task_by_vpid_unrestricted(pid); ++ if (unlikely(p && !have_same_root(current, p))) { ++ read_unlock(&tasklist_lock); ++ rcu_read_unlock(); ++ gr_log_noargs(GR_DONT_AUDIT, GR_UNIX_CHROOT_MSG); ++ return 0; ++ } ++ read_unlock(&tasklist_lock); ++ rcu_read_unlock(); ++#endif ++ return 1; ++} ++ ++int ++gr_handle_chroot_nice(void) ++{ ++#ifdef CONFIG_GRKERNSEC_CHROOT_NICE ++ if (grsec_enable_chroot_nice && proc_is_chrooted(current)) { ++ gr_log_noargs(GR_DONT_AUDIT, GR_NICE_CHROOT_MSG); ++ return -EPERM; ++ } ++#endif ++ return 0; ++} ++ ++int ++gr_handle_chroot_setpriority(struct task_struct *p, const int niceval) ++{ ++#ifdef CONFIG_GRKERNSEC_CHROOT_NICE ++ if (grsec_enable_chroot_nice && (niceval < task_nice(p)) ++ && proc_is_chrooted(current)) { ++ gr_log_str_int(GR_DONT_AUDIT, GR_PRIORITY_CHROOT_MSG, p->comm, task_pid_nr(p)); ++ return -EACCES; ++ } ++#endif ++ return 0; ++} ++ ++int ++gr_handle_chroot_fowner(struct pid *pid, enum pid_type type) ++{ ++#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK ++ struct task_struct *p; ++ int ret = 0; ++ if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || !pid) ++ return ret; ++ ++ read_lock(&tasklist_lock); ++ do_each_pid_task(pid, type, p) { ++ if (!have_same_root(current, p)) { ++ ret = 1; ++ goto out; ++ } ++ } while_each_pid_task(pid, type, p); ++out: ++ read_unlock(&tasklist_lock); ++ return ret; ++#endif ++ return 0; ++} ++ ++int ++gr_pid_is_chrooted(struct task_struct *p) ++{ ++#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK ++ if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || p == NULL) ++ return 0; ++ ++ if ((p->exit_state & (EXIT_ZOMBIE | EXIT_DEAD)) || ++ !have_same_root(current, p)) { ++ return 1; ++ } ++#endif ++ return 0; ++} ++ ++EXPORT_SYMBOL_GPL(gr_pid_is_chrooted); ++ ++#if defined(CONFIG_GRKERNSEC_CHROOT_DOUBLE) || defined(CONFIG_GRKERNSEC_CHROOT_FCHDIR) ++int gr_is_outside_chroot(const struct dentry *u_dentry, const struct vfsmount *u_mnt) ++{ ++ struct path path, currentroot; ++ int ret = 0; ++ ++ path.dentry = (struct dentry *)u_dentry; ++ path.mnt = (struct vfsmount *)u_mnt; ++ get_fs_root(current->fs, ¤troot); ++ if (path_is_under(&path, ¤troot)) ++ ret = 1; ++ path_put(¤troot); ++ ++ return ret; ++} ++#endif ++ ++int ++gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt) ++{ ++#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR ++ if (!grsec_enable_chroot_fchdir) ++ return 1; ++ ++ if (!proc_is_chrooted(current)) ++ return 1; ++ else if (!gr_is_outside_chroot(u_dentry, u_mnt)) { ++ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_FCHDIR_MSG, u_dentry, u_mnt); ++ return 0; ++ } ++#endif ++ return 1; ++} ++ ++int ++gr_chroot_fhandle(void) ++{ ++#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR ++ if (!grsec_enable_chroot_fchdir) ++ return 1; ++ ++ if (!proc_is_chrooted(current)) ++ return 1; ++ else { ++ gr_log_noargs(GR_DONT_AUDIT, GR_CHROOT_FHANDLE_MSG); ++ return 0; ++ } ++#endif ++ return 1; ++} ++ ++int ++gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid, ++ const time_t shm_createtime) ++{ ++#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT ++ struct task_struct *p; ++ time_t starttime; ++ ++ if (unlikely(!grsec_enable_chroot_shmat)) ++ return 1; ++ ++ if (likely(!proc_is_chrooted(current))) ++ return 1; ++ ++ rcu_read_lock(); ++ read_lock(&tasklist_lock); ++ ++ if ((p = find_task_by_vpid_unrestricted(shm_cprid))) { ++ starttime = p->start_time.tv_sec; ++ if (time_before_eq((unsigned long)starttime, (unsigned long)shm_createtime)) { ++ if (have_same_root(current, p)) { ++ goto allow; ++ } else { ++ read_unlock(&tasklist_lock); ++ rcu_read_unlock(); ++ gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG); ++ return 0; ++ } ++ } ++ /* creator exited, pid reuse, fall through to next check */ ++ } ++ if ((p = find_task_by_vpid_unrestricted(shm_lapid))) { ++ if (unlikely(!have_same_root(current, p))) { ++ read_unlock(&tasklist_lock); ++ rcu_read_unlock(); ++ gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG); ++ return 0; ++ } ++ } ++ ++allow: ++ read_unlock(&tasklist_lock); ++ rcu_read_unlock(); ++#endif ++ return 1; ++} ++ ++void ++gr_log_chroot_exec(const struct dentry *dentry, const struct vfsmount *mnt) ++{ ++#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG ++ if (grsec_enable_chroot_execlog && proc_is_chrooted(current)) ++ gr_log_fs_generic(GR_DO_AUDIT, GR_EXEC_CHROOT_MSG, dentry, mnt); ++#endif ++ return; ++} ++ ++int ++gr_handle_chroot_mknod(const struct dentry *dentry, ++ const struct vfsmount *mnt, const int mode) ++{ ++#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD ++ if (grsec_enable_chroot_mknod && !S_ISFIFO(mode) && !S_ISREG(mode) && ++ proc_is_chrooted(current)) { ++ gr_log_fs_generic(GR_DONT_AUDIT, GR_MKNOD_CHROOT_MSG, dentry, mnt); ++ return -EPERM; ++ } ++#endif ++ return 0; ++} ++ ++int ++gr_handle_chroot_mount(const struct dentry *dentry, ++ const struct vfsmount *mnt, const char *dev_name) ++{ ++#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT ++ if (grsec_enable_chroot_mount && proc_is_chrooted(current)) { ++ gr_log_str_fs(GR_DONT_AUDIT, GR_MOUNT_CHROOT_MSG, dev_name ? dev_name : "none", dentry, mnt); ++ return -EPERM; ++ } ++#endif ++ return 0; ++} ++ ++int ++gr_handle_chroot_pivot(void) ++{ ++#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT ++ if (grsec_enable_chroot_pivot && proc_is_chrooted(current)) { ++ gr_log_noargs(GR_DONT_AUDIT, GR_PIVOT_CHROOT_MSG); ++ return -EPERM; ++ } ++#endif ++ return 0; ++} ++ ++int ++gr_handle_chroot_chroot(const struct dentry *dentry, const struct vfsmount *mnt) ++{ ++#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE ++ if (grsec_enable_chroot_double && proc_is_chrooted(current) && ++ !gr_is_outside_chroot(dentry, mnt)) { ++ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_CHROOT_MSG, dentry, mnt); ++ return -EPERM; ++ } ++#endif ++ return 0; ++} ++ ++extern const char *captab_log[]; ++extern int captab_log_entries; ++ ++int ++gr_task_chroot_is_capable(const struct task_struct *task, const struct cred *cred, const int cap) ++{ ++#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS ++ if (grsec_enable_chroot_caps && proc_is_chrooted(task)) { ++ kernel_cap_t chroot_caps = GR_CHROOT_CAPS; ++ if (cap_raised(chroot_caps, cap)) { ++ if (cap_raised(cred->cap_effective, cap) && cap < captab_log_entries) { ++ gr_log_cap(GR_DONT_AUDIT, GR_CAP_CHROOT_MSG, task, captab_log[cap]); ++ } ++ return 0; ++ } ++ } ++#endif ++ return 1; ++} ++ ++int ++gr_chroot_is_capable(const int cap) ++{ ++#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS ++ return gr_task_chroot_is_capable(current, current_cred(), cap); ++#endif ++ return 1; ++} ++ ++int ++gr_task_chroot_is_capable_nolog(const struct task_struct *task, const int cap) ++{ ++#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS ++ if (grsec_enable_chroot_caps && proc_is_chrooted(task)) { ++ kernel_cap_t chroot_caps = GR_CHROOT_CAPS; ++ if (cap_raised(chroot_caps, cap)) { ++ return 0; ++ } ++ } ++#endif ++ return 1; ++} ++ ++int ++gr_chroot_is_capable_nolog(const int cap) ++{ ++#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS ++ return gr_task_chroot_is_capable_nolog(current, cap); ++#endif ++ return 1; ++} ++ ++int ++gr_handle_chroot_sysctl(const int op) ++{ ++#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL ++ if (grsec_enable_chroot_sysctl && (op & MAY_WRITE) && ++ proc_is_chrooted(current)) ++ return -EACCES; ++#endif ++ return 0; ++} ++ ++void ++gr_handle_chroot_chdir(const struct path *path) ++{ ++#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR ++ if (grsec_enable_chroot_chdir) ++ set_fs_pwd(current->fs, path); ++#endif ++ return; ++} ++ ++int ++gr_handle_chroot_chmod(const struct dentry *dentry, ++ const struct vfsmount *mnt, const int mode) ++{ ++#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD ++ /* allow chmod +s on directories, but not files */ ++ if (grsec_enable_chroot_chmod && !S_ISDIR(dentry->d_inode->i_mode) && ++ ((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))) && ++ proc_is_chrooted(current)) { ++ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHMOD_CHROOT_MSG, dentry, mnt); ++ return -EPERM; ++ } ++#endif ++ return 0; ++} +diff --git a/grsecurity/grsec_disabled.c b/grsecurity/grsec_disabled.c +new file mode 100644 +index 0000000..1e028d7 +--- /dev/null ++++ b/grsecurity/grsec_disabled.c +@@ -0,0 +1,439 @@ ++#include <linux/kernel.h> ++#include <linux/module.h> ++#include <linux/sched.h> ++#include <linux/file.h> ++#include <linux/fs.h> ++#include <linux/kdev_t.h> ++#include <linux/net.h> ++#include <linux/in.h> ++#include <linux/ip.h> ++#include <linux/skbuff.h> ++#include <linux/sysctl.h> ++ ++#ifdef CONFIG_PAX_HAVE_ACL_FLAGS ++void ++pax_set_initial_flags(struct linux_binprm *bprm) ++{ ++ return; ++} ++#endif ++ ++#ifdef CONFIG_SYSCTL ++__u32 ++gr_handle_sysctl(const struct ctl_table * table, const int op) ++{ ++ return 0; ++} ++#endif ++ ++#ifdef CONFIG_TASKSTATS ++int gr_is_taskstats_denied(int pid) ++{ ++ return 0; ++} ++#endif ++ ++int ++gr_acl_is_enabled(void) ++{ ++ return 0; ++} ++ ++int ++gr_learn_cap(const struct task_struct *task, const struct cred *cred, const int cap) ++{ ++ return 0; ++} ++ ++void ++gr_handle_proc_create(const struct dentry *dentry, const struct inode *inode) ++{ ++ return; ++} ++ ++int ++gr_handle_rawio(const struct inode *inode) ++{ ++ return 0; ++} ++ ++void ++gr_acl_handle_psacct(struct task_struct *task, const long code) ++{ ++ return; ++} ++ ++int ++gr_handle_ptrace(struct task_struct *task, const long request) ++{ ++ return 0; ++} ++ ++int ++gr_handle_proc_ptrace(struct task_struct *task) ++{ ++ return 0; ++} ++ ++int ++gr_set_acls(const int type) ++{ ++ return 0; ++} ++ ++int ++gr_check_hidden_task(const struct task_struct *tsk) ++{ ++ return 0; ++} ++ ++int ++gr_check_protected_task(const struct task_struct *task) ++{ ++ return 0; ++} ++ ++int ++gr_check_protected_task_fowner(struct pid *pid, enum pid_type type) ++{ ++ return 0; ++} ++ ++void ++gr_copy_label(struct task_struct *tsk) ++{ ++ return; ++} ++ ++void ++gr_set_pax_flags(struct task_struct *task) ++{ ++ return; ++} ++ ++int ++gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt, ++ const int unsafe_share) ++{ ++ return 0; ++} ++ ++void ++gr_handle_delete(const ino_t ino, const dev_t dev) ++{ ++ return; ++} ++ ++void ++gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt) ++{ ++ return; ++} ++ ++void ++gr_handle_crash(struct task_struct *task, const int sig) ++{ ++ return; ++} ++ ++int ++gr_check_crash_exec(const struct file *filp) ++{ ++ return 0; ++} ++ ++int ++gr_check_crash_uid(const kuid_t uid) ++{ ++ return 0; ++} ++ ++void ++gr_handle_rename(struct inode *old_dir, struct inode *new_dir, ++ struct dentry *old_dentry, ++ struct dentry *new_dentry, ++ struct vfsmount *mnt, const __u8 replace) ++{ ++ return; ++} ++ ++int ++gr_search_socket(const int family, const int type, const int protocol) ++{ ++ return 1; ++} ++ ++int ++gr_search_connectbind(const int mode, const struct socket *sock, ++ const struct sockaddr_in *addr) ++{ ++ return 0; ++} ++ ++void ++gr_handle_alertkill(struct task_struct *task) ++{ ++ return; ++} ++ ++__u32 ++gr_acl_handle_execve(const struct dentry * dentry, const struct vfsmount * mnt) ++{ ++ return 1; ++} ++ ++__u32 ++gr_acl_handle_hidden_file(const struct dentry * dentry, ++ const struct vfsmount * mnt) ++{ ++ return 1; ++} ++ ++__u32 ++gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt, ++ int acc_mode) ++{ ++ return 1; ++} ++ ++__u32 ++gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt) ++{ ++ return 1; ++} ++ ++__u32 ++gr_acl_handle_unlink(const struct dentry * dentry, const struct vfsmount * mnt) ++{ ++ return 1; ++} ++ ++int ++gr_acl_handle_mmap(const struct file *file, const unsigned long prot, ++ unsigned int *vm_flags) ++{ ++ return 1; ++} ++ ++__u32 ++gr_acl_handle_truncate(const struct dentry * dentry, ++ const struct vfsmount * mnt) ++{ ++ return 1; ++} ++ ++__u32 ++gr_acl_handle_utime(const struct dentry * dentry, const struct vfsmount * mnt) ++{ ++ return 1; ++} ++ ++__u32 ++gr_acl_handle_access(const struct dentry * dentry, ++ const struct vfsmount * mnt, const int fmode) ++{ ++ return 1; ++} ++ ++__u32 ++gr_acl_handle_chmod(const struct dentry * dentry, const struct vfsmount * mnt, ++ umode_t *mode) ++{ ++ return 1; ++} ++ ++__u32 ++gr_acl_handle_chown(const struct dentry * dentry, const struct vfsmount * mnt) ++{ ++ return 1; ++} ++ ++__u32 ++gr_acl_handle_setxattr(const struct dentry * dentry, const struct vfsmount * mnt) ++{ ++ return 1; ++} ++ ++__u32 ++gr_acl_handle_removexattr(const struct dentry * dentry, const struct vfsmount * mnt) ++{ ++ return 1; ++} ++ ++void ++grsecurity_init(void) ++{ ++ return; ++} ++ ++umode_t gr_acl_umask(void) ++{ ++ return 0; ++} ++ ++__u32 ++gr_acl_handle_mknod(const struct dentry * new_dentry, ++ const struct dentry * parent_dentry, ++ const struct vfsmount * parent_mnt, ++ const int mode) ++{ ++ return 1; ++} ++ ++__u32 ++gr_acl_handle_mkdir(const struct dentry * new_dentry, ++ const struct dentry * parent_dentry, ++ const struct vfsmount * parent_mnt) ++{ ++ return 1; ++} ++ ++__u32 ++gr_acl_handle_symlink(const struct dentry * new_dentry, ++ const struct dentry * parent_dentry, ++ const struct vfsmount * parent_mnt, const struct filename *from) ++{ ++ return 1; ++} ++ ++__u32 ++gr_acl_handle_link(const struct dentry * new_dentry, ++ const struct dentry * parent_dentry, ++ const struct vfsmount * parent_mnt, ++ const struct dentry * old_dentry, ++ const struct vfsmount * old_mnt, const struct filename *to) ++{ ++ return 1; ++} ++ ++int ++gr_acl_handle_rename(const struct dentry *new_dentry, ++ const struct dentry *parent_dentry, ++ const struct vfsmount *parent_mnt, ++ const struct dentry *old_dentry, ++ const struct inode *old_parent_inode, ++ const struct vfsmount *old_mnt, const struct filename *newname) ++{ ++ return 0; ++} ++ ++int ++gr_acl_handle_filldir(const struct file *file, const char *name, ++ const int namelen, const ino_t ino) ++{ ++ return 1; ++} ++ ++int ++gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid, ++ const time_t shm_createtime, const kuid_t cuid, const int shmid) ++{ ++ return 1; ++} ++ ++int ++gr_search_bind(const struct socket *sock, const struct sockaddr_in *addr) ++{ ++ return 0; ++} ++ ++int ++gr_search_accept(const struct socket *sock) ++{ ++ return 0; ++} ++ ++int ++gr_search_listen(const struct socket *sock) ++{ ++ return 0; ++} ++ ++int ++gr_search_connect(const struct socket *sock, const struct sockaddr_in *addr) ++{ ++ return 0; ++} ++ ++__u32 ++gr_acl_handle_unix(const struct dentry * dentry, const struct vfsmount * mnt) ++{ ++ return 1; ++} ++ ++__u32 ++gr_acl_handle_creat(const struct dentry * dentry, ++ const struct dentry * p_dentry, ++ const struct vfsmount * p_mnt, int open_flags, int acc_mode, ++ const int imode) ++{ ++ return 1; ++} ++ ++void ++gr_acl_handle_exit(void) ++{ ++ return; ++} ++ ++int ++gr_acl_handle_mprotect(const struct file *file, const unsigned long prot) ++{ ++ return 1; ++} ++ ++void ++gr_set_role_label(const kuid_t uid, const kgid_t gid) ++{ ++ return; ++} ++ ++int ++gr_acl_handle_procpidmem(const struct task_struct *task) ++{ ++ return 0; ++} ++ ++int ++gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb) ++{ ++ return 0; ++} ++ ++int ++gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr) ++{ ++ return 0; ++} ++ ++int ++gr_check_user_change(kuid_t real, kuid_t effective, kuid_t fs) ++{ ++ return 0; ++} ++ ++int ++gr_check_group_change(kgid_t real, kgid_t effective, kgid_t fs) ++{ ++ return 0; ++} ++ ++int gr_acl_enable_at_secure(void) ++{ ++ return 0; ++} ++ ++dev_t gr_get_dev_from_dentry(struct dentry *dentry) ++{ ++ return dentry->d_sb->s_dev; ++} ++ ++void gr_put_exec_file(struct task_struct *task) ++{ ++ return; ++} ++ ++#ifdef CONFIG_SECURITY ++EXPORT_SYMBOL_GPL(gr_check_user_change); ++EXPORT_SYMBOL_GPL(gr_check_group_change); ++#endif +diff --git a/grsecurity/grsec_exec.c b/grsecurity/grsec_exec.c +new file mode 100644 +index 0000000..14638ff +--- /dev/null ++++ b/grsecurity/grsec_exec.c +@@ -0,0 +1,188 @@ ++#include <linux/kernel.h> ++#include <linux/sched.h> ++#include <linux/file.h> ++#include <linux/binfmts.h> ++#include <linux/fs.h> ++#include <linux/types.h> ++#include <linux/grdefs.h> ++#include <linux/grsecurity.h> ++#include <linux/grinternal.h> ++#include <linux/capability.h> ++#include <linux/module.h> ++#include <linux/compat.h> ++ ++#include <asm/uaccess.h> ++ ++#ifdef CONFIG_GRKERNSEC_EXECLOG ++static char gr_exec_arg_buf[132]; ++static DEFINE_MUTEX(gr_exec_arg_mutex); ++#endif ++ ++struct user_arg_ptr { ++#ifdef CONFIG_COMPAT ++ bool is_compat; ++#endif ++ union { ++ const char __user *const __user *native; ++#ifdef CONFIG_COMPAT ++ const compat_uptr_t __user *compat; ++#endif ++ } ptr; ++}; ++ ++extern const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr); ++ ++void ++gr_handle_exec_args(struct linux_binprm *bprm, struct user_arg_ptr argv) ++{ ++#ifdef CONFIG_GRKERNSEC_EXECLOG ++ char *grarg = gr_exec_arg_buf; ++ unsigned int i, x, execlen = 0; ++ char c; ++ ++ if (!((grsec_enable_execlog && grsec_enable_group && ++ in_group_p(grsec_audit_gid)) ++ || (grsec_enable_execlog && !grsec_enable_group))) ++ return; ++ ++ mutex_lock(&gr_exec_arg_mutex); ++ memset(grarg, 0, sizeof(gr_exec_arg_buf)); ++ ++ for (i = 0; i < bprm->argc && execlen < 128; i++) { ++ const char __user *p; ++ unsigned int len; ++ ++ p = get_user_arg_ptr(argv, i); ++ if (IS_ERR(p)) ++ goto log; ++ ++ len = strnlen_user(p, 128 - execlen); ++ if (len > 128 - execlen) ++ len = 128 - execlen; ++ else if (len > 0) ++ len--; ++ if (copy_from_user(grarg + execlen, p, len)) ++ goto log; ++ ++ /* rewrite unprintable characters */ ++ for (x = 0; x < len; x++) { ++ c = *(grarg + execlen + x); ++ if (c < 32 || c > 126) ++ *(grarg + execlen + x) = ' '; ++ } ++ ++ execlen += len; ++ *(grarg + execlen) = ' '; ++ *(grarg + execlen + 1) = '\0'; ++ execlen++; ++ } ++ ++ log: ++ gr_log_fs_str(GR_DO_AUDIT, GR_EXEC_AUDIT_MSG, bprm->file->f_path.dentry, ++ bprm->file->f_path.mnt, grarg); ++ mutex_unlock(&gr_exec_arg_mutex); ++#endif ++ return; ++} ++ ++#ifdef CONFIG_GRKERNSEC ++extern int gr_acl_is_capable(const int cap); ++extern int gr_acl_is_capable_nolog(const int cap); ++extern int gr_task_acl_is_capable(const struct task_struct *task, const struct cred *cred, const int cap); ++extern int gr_task_acl_is_capable_nolog(const struct task_struct *task, const int cap); ++extern int gr_chroot_is_capable(const int cap); ++extern int gr_chroot_is_capable_nolog(const int cap); ++extern int gr_task_chroot_is_capable(const struct task_struct *task, const struct cred *cred, const int cap); ++extern int gr_task_chroot_is_capable_nolog(const struct task_struct *task, const int cap); ++#endif ++ ++const char *captab_log[] = { ++ "CAP_CHOWN", ++ "CAP_DAC_OVERRIDE", ++ "CAP_DAC_READ_SEARCH", ++ "CAP_FOWNER", ++ "CAP_FSETID", ++ "CAP_KILL", ++ "CAP_SETGID", ++ "CAP_SETUID", ++ "CAP_SETPCAP", ++ "CAP_LINUX_IMMUTABLE", ++ "CAP_NET_BIND_SERVICE", ++ "CAP_NET_BROADCAST", ++ "CAP_NET_ADMIN", ++ "CAP_NET_RAW", ++ "CAP_IPC_LOCK", ++ "CAP_IPC_OWNER", ++ "CAP_SYS_MODULE", ++ "CAP_SYS_RAWIO", ++ "CAP_SYS_CHROOT", ++ "CAP_SYS_PTRACE", ++ "CAP_SYS_PACCT", ++ "CAP_SYS_ADMIN", ++ "CAP_SYS_BOOT", ++ "CAP_SYS_NICE", ++ "CAP_SYS_RESOURCE", ++ "CAP_SYS_TIME", ++ "CAP_SYS_TTY_CONFIG", ++ "CAP_MKNOD", ++ "CAP_LEASE", ++ "CAP_AUDIT_WRITE", ++ "CAP_AUDIT_CONTROL", ++ "CAP_SETFCAP", ++ "CAP_MAC_OVERRIDE", ++ "CAP_MAC_ADMIN", ++ "CAP_SYSLOG", ++ "CAP_WAKE_ALARM", ++ "CAP_BLOCK_SUSPEND" ++}; ++ ++int captab_log_entries = sizeof(captab_log)/sizeof(captab_log[0]); ++ ++int gr_is_capable(const int cap) ++{ ++#ifdef CONFIG_GRKERNSEC ++ if (gr_acl_is_capable(cap) && gr_chroot_is_capable(cap)) ++ return 1; ++ return 0; ++#else ++ return 1; ++#endif ++} ++ ++int gr_task_is_capable(const struct task_struct *task, const struct cred *cred, const int cap) ++{ ++#ifdef CONFIG_GRKERNSEC ++ if (gr_task_acl_is_capable(task, cred, cap) && gr_task_chroot_is_capable(task, cred, cap)) ++ return 1; ++ return 0; ++#else ++ return 1; ++#endif ++} ++ ++int gr_is_capable_nolog(const int cap) ++{ ++#ifdef CONFIG_GRKERNSEC ++ if (gr_acl_is_capable_nolog(cap) && gr_chroot_is_capable_nolog(cap)) ++ return 1; ++ return 0; ++#else ++ return 1; ++#endif ++} ++ ++int gr_task_is_capable_nolog(const struct task_struct *task, const int cap) ++{ ++#ifdef CONFIG_GRKERNSEC ++ if (gr_task_acl_is_capable_nolog(task, cap) && gr_task_chroot_is_capable_nolog(task, cap)) ++ return 1; ++ return 0; ++#else ++ return 1; ++#endif ++} ++ ++EXPORT_SYMBOL_GPL(gr_is_capable); ++EXPORT_SYMBOL_GPL(gr_is_capable_nolog); ++EXPORT_SYMBOL_GPL(gr_task_is_capable); ++EXPORT_SYMBOL_GPL(gr_task_is_capable_nolog); +diff --git a/grsecurity/grsec_fifo.c b/grsecurity/grsec_fifo.c +new file mode 100644 +index 0000000..06cc6ea +--- /dev/null ++++ b/grsecurity/grsec_fifo.c +@@ -0,0 +1,24 @@ ++#include <linux/kernel.h> ++#include <linux/sched.h> ++#include <linux/fs.h> ++#include <linux/file.h> ++#include <linux/grinternal.h> ++ ++int ++gr_handle_fifo(const struct dentry *dentry, const struct vfsmount *mnt, ++ const struct dentry *dir, const int flag, const int acc_mode) ++{ ++#ifdef CONFIG_GRKERNSEC_FIFO ++ const struct cred *cred = current_cred(); ++ ++ if (grsec_enable_fifo && S_ISFIFO(dentry->d_inode->i_mode) && ++ !(flag & O_EXCL) && (dir->d_inode->i_mode & S_ISVTX) && ++ !uid_eq(dentry->d_inode->i_uid, dir->d_inode->i_uid) && ++ !uid_eq(cred->fsuid, dentry->d_inode->i_uid)) { ++ if (!inode_permission(dentry->d_inode, acc_mode)) ++ gr_log_fs_int2(GR_DONT_AUDIT, GR_FIFO_MSG, dentry, mnt, GR_GLOBAL_UID(dentry->d_inode->i_uid), GR_GLOBAL_GID(dentry->d_inode->i_gid)); ++ return -EACCES; ++ } ++#endif ++ return 0; ++} +diff --git a/grsecurity/grsec_fork.c b/grsecurity/grsec_fork.c +new file mode 100644 +index 0000000..8ca18bf +--- /dev/null ++++ b/grsecurity/grsec_fork.c +@@ -0,0 +1,23 @@ ++#include <linux/kernel.h> ++#include <linux/sched.h> ++#include <linux/grsecurity.h> ++#include <linux/grinternal.h> ++#include <linux/errno.h> ++ ++void ++gr_log_forkfail(const int retval) ++{ ++#ifdef CONFIG_GRKERNSEC_FORKFAIL ++ if (grsec_enable_forkfail && (retval == -EAGAIN || retval == -ENOMEM)) { ++ switch (retval) { ++ case -EAGAIN: ++ gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "EAGAIN"); ++ break; ++ case -ENOMEM: ++ gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "ENOMEM"); ++ break; ++ } ++ } ++#endif ++ return; ++} +diff --git a/grsecurity/grsec_init.c b/grsecurity/grsec_init.c +new file mode 100644 +index 0000000..b7cb191 +--- /dev/null ++++ b/grsecurity/grsec_init.c +@@ -0,0 +1,286 @@ ++#include <linux/kernel.h> ++#include <linux/sched.h> ++#include <linux/mm.h> ++#include <linux/gracl.h> ++#include <linux/slab.h> ++#include <linux/vmalloc.h> ++#include <linux/percpu.h> ++#include <linux/module.h> ++ ++int grsec_enable_ptrace_readexec; ++int grsec_enable_setxid; ++int grsec_enable_symlinkown; ++kgid_t grsec_symlinkown_gid; ++int grsec_enable_brute; ++int grsec_enable_link; ++int grsec_enable_dmesg; ++int grsec_enable_harden_ptrace; ++int grsec_enable_harden_ipc; ++int grsec_enable_fifo; ++int grsec_enable_execlog; ++int grsec_enable_signal; ++int grsec_enable_forkfail; ++int grsec_enable_audit_ptrace; ++int grsec_enable_time; ++int grsec_enable_group; ++kgid_t grsec_audit_gid; ++int grsec_enable_chdir; ++int grsec_enable_mount; ++int grsec_enable_rofs; ++int grsec_deny_new_usb; ++int grsec_enable_chroot_findtask; ++int grsec_enable_chroot_mount; ++int grsec_enable_chroot_shmat; ++int grsec_enable_chroot_fchdir; ++int grsec_enable_chroot_double; ++int grsec_enable_chroot_pivot; ++int grsec_enable_chroot_chdir; ++int grsec_enable_chroot_chmod; ++int grsec_enable_chroot_mknod; ++int grsec_enable_chroot_nice; ++int grsec_enable_chroot_execlog; ++int grsec_enable_chroot_caps; ++int grsec_enable_chroot_sysctl; ++int grsec_enable_chroot_unix; ++int grsec_enable_tpe; ++kgid_t grsec_tpe_gid; ++int grsec_enable_blackhole; ++#ifdef CONFIG_IPV6_MODULE ++EXPORT_SYMBOL_GPL(grsec_enable_blackhole); ++#endif ++int grsec_lastack_retries; ++int grsec_enable_tpe_all; ++int grsec_enable_tpe_invert; ++int grsec_enable_socket_all; ++kgid_t grsec_socket_all_gid; ++int grsec_enable_socket_client; ++kgid_t grsec_socket_client_gid; ++int grsec_enable_socket_server; ++kgid_t grsec_socket_server_gid; ++int grsec_resource_logging; ++int grsec_disable_privio; ++int grsec_enable_log_rwxmaps; ++int grsec_lock; ++ ++DEFINE_SPINLOCK(grsec_alert_lock); ++unsigned long grsec_alert_wtime = 0; ++unsigned long grsec_alert_fyet = 0; ++ ++DEFINE_SPINLOCK(grsec_audit_lock); ++ ++DEFINE_RWLOCK(grsec_exec_file_lock); ++ ++char *gr_shared_page[4]; ++ ++char *gr_alert_log_fmt; ++char *gr_audit_log_fmt; ++char *gr_alert_log_buf; ++char *gr_audit_log_buf; ++ ++extern struct gr_arg *gr_usermode; ++extern unsigned char *gr_system_salt; ++extern unsigned char *gr_system_sum; ++ ++void __init ++grsecurity_init(void) ++{ ++ int j; ++ /* create the per-cpu shared pages */ ++ ++#ifdef CONFIG_X86 ++ memset((char *)(0x41a + PAGE_OFFSET), 0, 36); ++#endif ++ ++ for (j = 0; j < 4; j++) { ++ gr_shared_page[j] = (char *)__alloc_percpu(PAGE_SIZE, __alignof__(unsigned long long)); ++ if (gr_shared_page[j] == NULL) { ++ panic("Unable to allocate grsecurity shared page"); ++ return; ++ } ++ } ++ ++ /* allocate log buffers */ ++ gr_alert_log_fmt = kmalloc(512, GFP_KERNEL); ++ if (!gr_alert_log_fmt) { ++ panic("Unable to allocate grsecurity alert log format buffer"); ++ return; ++ } ++ gr_audit_log_fmt = kmalloc(512, GFP_KERNEL); ++ if (!gr_audit_log_fmt) { ++ panic("Unable to allocate grsecurity audit log format buffer"); ++ return; ++ } ++ gr_alert_log_buf = (char *) get_zeroed_page(GFP_KERNEL); ++ if (!gr_alert_log_buf) { ++ panic("Unable to allocate grsecurity alert log buffer"); ++ return; ++ } ++ gr_audit_log_buf = (char *) get_zeroed_page(GFP_KERNEL); ++ if (!gr_audit_log_buf) { ++ panic("Unable to allocate grsecurity audit log buffer"); ++ return; ++ } ++ ++ /* allocate memory for authentication structure */ ++ gr_usermode = kmalloc(sizeof(struct gr_arg), GFP_KERNEL); ++ gr_system_salt = kmalloc(GR_SALT_LEN, GFP_KERNEL); ++ gr_system_sum = kmalloc(GR_SHA_LEN, GFP_KERNEL); ++ ++ if (!gr_usermode || !gr_system_salt || !gr_system_sum) { ++ panic("Unable to allocate grsecurity authentication structure"); ++ return; ++ } ++ ++#ifdef CONFIG_GRKERNSEC_IO ++#if !defined(CONFIG_GRKERNSEC_SYSCTL_DISTRO) ++ grsec_disable_privio = 1; ++#elif defined(CONFIG_GRKERNSEC_SYSCTL_ON) ++ grsec_disable_privio = 1; ++#else ++ grsec_disable_privio = 0; ++#endif ++#endif ++ ++#ifdef CONFIG_GRKERNSEC_TPE_INVERT ++ /* for backward compatibility, tpe_invert always defaults to on if ++ enabled in the kernel ++ */ ++ grsec_enable_tpe_invert = 1; ++#endif ++ ++#if !defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_SYSCTL_ON) ++#ifndef CONFIG_GRKERNSEC_SYSCTL ++ grsec_lock = 1; ++#endif ++ ++#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG ++ grsec_enable_log_rwxmaps = 1; ++#endif ++#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP ++ grsec_enable_group = 1; ++ grsec_audit_gid = KGIDT_INIT(CONFIG_GRKERNSEC_AUDIT_GID); ++#endif ++#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC ++ grsec_enable_ptrace_readexec = 1; ++#endif ++#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR ++ grsec_enable_chdir = 1; ++#endif ++#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE ++ grsec_enable_harden_ptrace = 1; ++#endif ++#ifdef CONFIG_GRKERNSEC_HARDEN_IPC ++ grsec_enable_harden_ipc = 1; ++#endif ++#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT ++ grsec_enable_mount = 1; ++#endif ++#ifdef CONFIG_GRKERNSEC_LINK ++ grsec_enable_link = 1; ++#endif ++#ifdef CONFIG_GRKERNSEC_BRUTE ++ grsec_enable_brute = 1; ++#endif ++#ifdef CONFIG_GRKERNSEC_DMESG ++ grsec_enable_dmesg = 1; ++#endif ++#ifdef CONFIG_GRKERNSEC_BLACKHOLE ++ grsec_enable_blackhole = 1; ++ grsec_lastack_retries = 4; ++#endif ++#ifdef CONFIG_GRKERNSEC_FIFO ++ grsec_enable_fifo = 1; ++#endif ++#ifdef CONFIG_GRKERNSEC_EXECLOG ++ grsec_enable_execlog = 1; ++#endif ++#ifdef CONFIG_GRKERNSEC_SETXID ++ grsec_enable_setxid = 1; ++#endif ++#ifdef CONFIG_GRKERNSEC_SIGNAL ++ grsec_enable_signal = 1; ++#endif ++#ifdef CONFIG_GRKERNSEC_FORKFAIL ++ grsec_enable_forkfail = 1; ++#endif ++#ifdef CONFIG_GRKERNSEC_TIME ++ grsec_enable_time = 1; ++#endif ++#ifdef CONFIG_GRKERNSEC_RESLOG ++ grsec_resource_logging = 1; ++#endif ++#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK ++ grsec_enable_chroot_findtask = 1; ++#endif ++#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX ++ grsec_enable_chroot_unix = 1; ++#endif ++#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT ++ grsec_enable_chroot_mount = 1; ++#endif ++#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR ++ grsec_enable_chroot_fchdir = 1; ++#endif ++#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT ++ grsec_enable_chroot_shmat = 1; ++#endif ++#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE ++ grsec_enable_audit_ptrace = 1; ++#endif ++#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE ++ grsec_enable_chroot_double = 1; ++#endif ++#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT ++ grsec_enable_chroot_pivot = 1; ++#endif ++#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR ++ grsec_enable_chroot_chdir = 1; ++#endif ++#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD ++ grsec_enable_chroot_chmod = 1; ++#endif ++#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD ++ grsec_enable_chroot_mknod = 1; ++#endif ++#ifdef CONFIG_GRKERNSEC_CHROOT_NICE ++ grsec_enable_chroot_nice = 1; ++#endif ++#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG ++ grsec_enable_chroot_execlog = 1; ++#endif ++#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS ++ grsec_enable_chroot_caps = 1; ++#endif ++#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL ++ grsec_enable_chroot_sysctl = 1; ++#endif ++#ifdef CONFIG_GRKERNSEC_SYMLINKOWN ++ grsec_enable_symlinkown = 1; ++ grsec_symlinkown_gid = KGIDT_INIT(CONFIG_GRKERNSEC_SYMLINKOWN_GID); ++#endif ++#ifdef CONFIG_GRKERNSEC_TPE ++ grsec_enable_tpe = 1; ++ grsec_tpe_gid = KGIDT_INIT(CONFIG_GRKERNSEC_TPE_GID); ++#ifdef CONFIG_GRKERNSEC_TPE_ALL ++ grsec_enable_tpe_all = 1; ++#endif ++#endif ++#ifdef CONFIG_GRKERNSEC_SOCKET_ALL ++ grsec_enable_socket_all = 1; ++ grsec_socket_all_gid = KGIDT_INIT(CONFIG_GRKERNSEC_SOCKET_ALL_GID); ++#endif ++#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT ++ grsec_enable_socket_client = 1; ++ grsec_socket_client_gid = KGIDT_INIT(CONFIG_GRKERNSEC_SOCKET_CLIENT_GID); ++#endif ++#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER ++ grsec_enable_socket_server = 1; ++ grsec_socket_server_gid = KGIDT_INIT(CONFIG_GRKERNSEC_SOCKET_SERVER_GID); ++#endif ++#endif ++#ifdef CONFIG_GRKERNSEC_DENYUSB_FORCE ++ grsec_deny_new_usb = 1; ++#endif ++ ++ return; ++} +diff --git a/grsecurity/grsec_ipc.c b/grsecurity/grsec_ipc.c +new file mode 100644 +index 0000000..1773300 +--- /dev/null ++++ b/grsecurity/grsec_ipc.c +@@ -0,0 +1,48 @@ ++#include <linux/kernel.h> ++#include <linux/mm.h> ++#include <linux/sched.h> ++#include <linux/file.h> ++#include <linux/ipc.h> ++#include <linux/ipc_namespace.h> ++#include <linux/grsecurity.h> ++#include <linux/grinternal.h> ++ ++int ++gr_ipc_permitted(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp, int requested_mode, int granted_mode) ++{ ++#ifdef CONFIG_GRKERNSEC_HARDEN_IPC ++ int write; ++ int orig_granted_mode; ++ kuid_t euid; ++ kgid_t egid; ++ ++ if (!grsec_enable_harden_ipc) ++ return 1; ++ ++ euid = current_euid(); ++ egid = current_egid(); ++ ++ write = requested_mode & 00002; ++ orig_granted_mode = ipcp->mode; ++ ++ if (uid_eq(euid, ipcp->cuid) || uid_eq(euid, ipcp->uid)) ++ orig_granted_mode >>= 6; ++ else { ++ /* if likely wrong permissions, lock to user */ ++ if (orig_granted_mode & 0007) ++ orig_granted_mode = 0; ++ /* otherwise do a egid-only check */ ++ else if (gid_eq(egid, ipcp->cgid) || gid_eq(egid, ipcp->gid)) ++ orig_granted_mode >>= 3; ++ /* otherwise, no access */ ++ else ++ orig_granted_mode = 0; ++ } ++ if (!(requested_mode & ~granted_mode & 0007) && (requested_mode & ~orig_granted_mode & 0007) && ++ !ns_capable_nolog(ns->user_ns, CAP_IPC_OWNER)) { ++ gr_log_str_int(GR_DONT_AUDIT, GR_IPC_DENIED_MSG, write ? "write" : "read", GR_GLOBAL_UID(ipcp->cuid)); ++ return 0; ++ } ++#endif ++ return 1; ++} +diff --git a/grsecurity/grsec_link.c b/grsecurity/grsec_link.c +new file mode 100644 +index 0000000..5e05e20 +--- /dev/null ++++ b/grsecurity/grsec_link.c +@@ -0,0 +1,58 @@ ++#include <linux/kernel.h> ++#include <linux/sched.h> ++#include <linux/fs.h> ++#include <linux/file.h> ++#include <linux/grinternal.h> ++ ++int gr_handle_symlink_owner(const struct path *link, const struct inode *target) ++{ ++#ifdef CONFIG_GRKERNSEC_SYMLINKOWN ++ const struct inode *link_inode = link->dentry->d_inode; ++ ++ if (grsec_enable_symlinkown && in_group_p(grsec_symlinkown_gid) && ++ /* ignore root-owned links, e.g. /proc/self */ ++ gr_is_global_nonroot(link_inode->i_uid) && target && ++ !uid_eq(link_inode->i_uid, target->i_uid)) { ++ gr_log_fs_int2(GR_DONT_AUDIT, GR_SYMLINKOWNER_MSG, link->dentry, link->mnt, link_inode->i_uid, target->i_uid); ++ return 1; ++ } ++#endif ++ return 0; ++} ++ ++int ++gr_handle_follow_link(const struct inode *parent, ++ const struct inode *inode, ++ const struct dentry *dentry, const struct vfsmount *mnt) ++{ ++#ifdef CONFIG_GRKERNSEC_LINK ++ const struct cred *cred = current_cred(); ++ ++ if (grsec_enable_link && S_ISLNK(inode->i_mode) && ++ (parent->i_mode & S_ISVTX) && !uid_eq(parent->i_uid, inode->i_uid) && ++ (parent->i_mode & S_IWOTH) && !uid_eq(cred->fsuid, inode->i_uid)) { ++ gr_log_fs_int2(GR_DONT_AUDIT, GR_SYMLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid); ++ return -EACCES; ++ } ++#endif ++ return 0; ++} ++ ++int ++gr_handle_hardlink(const struct dentry *dentry, ++ const struct vfsmount *mnt, ++ struct inode *inode, const int mode, const struct filename *to) ++{ ++#ifdef CONFIG_GRKERNSEC_LINK ++ const struct cred *cred = current_cred(); ++ ++ if (grsec_enable_link && !uid_eq(cred->fsuid, inode->i_uid) && ++ (!S_ISREG(mode) || is_privileged_binary(dentry) || ++ (inode_permission(inode, MAY_READ | MAY_WRITE))) && ++ !capable(CAP_FOWNER) && gr_is_global_nonroot(cred->uid)) { ++ gr_log_fs_int2_str(GR_DONT_AUDIT, GR_HARDLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid, to->name); ++ return -EPERM; ++ } ++#endif ++ return 0; ++} +diff --git a/grsecurity/grsec_log.c b/grsecurity/grsec_log.c +new file mode 100644 +index 0000000..dbe0a6b +--- /dev/null ++++ b/grsecurity/grsec_log.c +@@ -0,0 +1,341 @@ ++#include <linux/kernel.h> ++#include <linux/sched.h> ++#include <linux/file.h> ++#include <linux/tty.h> ++#include <linux/fs.h> ++#include <linux/mm.h> ++#include <linux/grinternal.h> ++ ++#ifdef CONFIG_TREE_PREEMPT_RCU ++#define DISABLE_PREEMPT() preempt_disable() ++#define ENABLE_PREEMPT() preempt_enable() ++#else ++#define DISABLE_PREEMPT() ++#define ENABLE_PREEMPT() ++#endif ++ ++#define BEGIN_LOCKS(x) \ ++ DISABLE_PREEMPT(); \ ++ rcu_read_lock(); \ ++ read_lock(&tasklist_lock); \ ++ read_lock(&grsec_exec_file_lock); \ ++ if (x != GR_DO_AUDIT) \ ++ spin_lock(&grsec_alert_lock); \ ++ else \ ++ spin_lock(&grsec_audit_lock) ++ ++#define END_LOCKS(x) \ ++ if (x != GR_DO_AUDIT) \ ++ spin_unlock(&grsec_alert_lock); \ ++ else \ ++ spin_unlock(&grsec_audit_lock); \ ++ read_unlock(&grsec_exec_file_lock); \ ++ read_unlock(&tasklist_lock); \ ++ rcu_read_unlock(); \ ++ ENABLE_PREEMPT(); \ ++ if (x == GR_DONT_AUDIT) \ ++ gr_handle_alertkill(current) ++ ++enum { ++ FLOODING, ++ NO_FLOODING ++}; ++ ++extern char *gr_alert_log_fmt; ++extern char *gr_audit_log_fmt; ++extern char *gr_alert_log_buf; ++extern char *gr_audit_log_buf; ++ ++static int gr_log_start(int audit) ++{ ++ char *loglevel = (audit == GR_DO_AUDIT) ? KERN_INFO : KERN_ALERT; ++ char *fmt = (audit == GR_DO_AUDIT) ? gr_audit_log_fmt : gr_alert_log_fmt; ++ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf; ++#if (CONFIG_GRKERNSEC_FLOODTIME > 0 && CONFIG_GRKERNSEC_FLOODBURST > 0) ++ unsigned long curr_secs = get_seconds(); ++ ++ if (audit == GR_DO_AUDIT) ++ goto set_fmt; ++ ++ if (!grsec_alert_wtime || time_after(curr_secs, grsec_alert_wtime + CONFIG_GRKERNSEC_FLOODTIME)) { ++ grsec_alert_wtime = curr_secs; ++ grsec_alert_fyet = 0; ++ } else if (time_before_eq(curr_secs, grsec_alert_wtime + CONFIG_GRKERNSEC_FLOODTIME) ++ && (grsec_alert_fyet < CONFIG_GRKERNSEC_FLOODBURST)) { ++ grsec_alert_fyet++; ++ } else if (grsec_alert_fyet == CONFIG_GRKERNSEC_FLOODBURST) { ++ grsec_alert_wtime = curr_secs; ++ grsec_alert_fyet++; ++ printk(KERN_ALERT "grsec: more alerts, logging disabled for %d seconds\n", CONFIG_GRKERNSEC_FLOODTIME); ++ return FLOODING; ++ } ++ else return FLOODING; ++ ++set_fmt: ++#endif ++ memset(buf, 0, PAGE_SIZE); ++ if (current->signal->curr_ip && gr_acl_is_enabled()) { ++ sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: (%.64s:%c:%.950s) "); ++ snprintf(buf, PAGE_SIZE - 1, fmt, ¤t->signal->curr_ip, current->role->rolename, gr_roletype_to_char(), current->acl->filename); ++ } else if (current->signal->curr_ip) { ++ sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: "); ++ snprintf(buf, PAGE_SIZE - 1, fmt, ¤t->signal->curr_ip); ++ } else if (gr_acl_is_enabled()) { ++ sprintf(fmt, "%s%s", loglevel, "grsec: (%.64s:%c:%.950s) "); ++ snprintf(buf, PAGE_SIZE - 1, fmt, current->role->rolename, gr_roletype_to_char(), current->acl->filename); ++ } else { ++ sprintf(fmt, "%s%s", loglevel, "grsec: "); ++ strcpy(buf, fmt); ++ } ++ ++ return NO_FLOODING; ++} ++ ++static void gr_log_middle(int audit, const char *msg, va_list ap) ++ __attribute__ ((format (printf, 2, 0))); ++ ++static void gr_log_middle(int audit, const char *msg, va_list ap) ++{ ++ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf; ++ unsigned int len = strlen(buf); ++ ++ vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap); ++ ++ return; ++} ++ ++static void gr_log_middle_varargs(int audit, const char *msg, ...) ++ __attribute__ ((format (printf, 2, 3))); ++ ++static void gr_log_middle_varargs(int audit, const char *msg, ...) ++{ ++ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf; ++ unsigned int len = strlen(buf); ++ va_list ap; ++ ++ va_start(ap, msg); ++ vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap); ++ va_end(ap); ++ ++ return; ++} ++ ++static void gr_log_end(int audit, int append_default) ++{ ++ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf; ++ if (append_default) { ++ struct task_struct *task = current; ++ struct task_struct *parent = task->real_parent; ++ const struct cred *cred = __task_cred(task); ++ const struct cred *pcred = __task_cred(parent); ++ unsigned int len = strlen(buf); ++ ++ snprintf(buf + len, PAGE_SIZE - len - 1, DEFAULTSECMSG, gr_task_fullpath(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath(task), parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid)); ++ } ++ ++ printk("%s\n", buf); ++ ++ return; ++} ++ ++void gr_log_varargs(int audit, const char *msg, int argtypes, ...) ++{ ++ int logtype; ++ char *result = (audit == GR_DO_AUDIT) ? "successful" : "denied"; ++ char *str1 = NULL, *str2 = NULL, *str3 = NULL; ++ void *voidptr = NULL; ++ int num1 = 0, num2 = 0; ++ unsigned long ulong1 = 0, ulong2 = 0; ++ struct dentry *dentry = NULL; ++ struct vfsmount *mnt = NULL; ++ struct file *file = NULL; ++ struct task_struct *task = NULL; ++ struct vm_area_struct *vma = NULL; ++ const struct cred *cred, *pcred; ++ va_list ap; ++ ++ BEGIN_LOCKS(audit); ++ logtype = gr_log_start(audit); ++ if (logtype == FLOODING) { ++ END_LOCKS(audit); ++ return; ++ } ++ va_start(ap, argtypes); ++ switch (argtypes) { ++ case GR_TTYSNIFF: ++ task = va_arg(ap, struct task_struct *); ++ gr_log_middle_varargs(audit, msg, &task->signal->curr_ip, gr_task_fullpath0(task), task->comm, task_pid_nr(task), gr_parent_task_fullpath0(task), task->real_parent->comm, task_pid_nr(task->real_parent)); ++ break; ++ case GR_SYSCTL_HIDDEN: ++ str1 = va_arg(ap, char *); ++ gr_log_middle_varargs(audit, msg, result, str1); ++ break; ++ case GR_RBAC: ++ dentry = va_arg(ap, struct dentry *); ++ mnt = va_arg(ap, struct vfsmount *); ++ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt)); ++ break; ++ case GR_RBAC_STR: ++ dentry = va_arg(ap, struct dentry *); ++ mnt = va_arg(ap, struct vfsmount *); ++ str1 = va_arg(ap, char *); ++ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1); ++ break; ++ case GR_STR_RBAC: ++ str1 = va_arg(ap, char *); ++ dentry = va_arg(ap, struct dentry *); ++ mnt = va_arg(ap, struct vfsmount *); ++ gr_log_middle_varargs(audit, msg, result, str1, gr_to_filename(dentry, mnt)); ++ break; ++ case GR_RBAC_MODE2: ++ dentry = va_arg(ap, struct dentry *); ++ mnt = va_arg(ap, struct vfsmount *); ++ str1 = va_arg(ap, char *); ++ str2 = va_arg(ap, char *); ++ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2); ++ break; ++ case GR_RBAC_MODE3: ++ dentry = va_arg(ap, struct dentry *); ++ mnt = va_arg(ap, struct vfsmount *); ++ str1 = va_arg(ap, char *); ++ str2 = va_arg(ap, char *); ++ str3 = va_arg(ap, char *); ++ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2, str3); ++ break; ++ case GR_FILENAME: ++ dentry = va_arg(ap, struct dentry *); ++ mnt = va_arg(ap, struct vfsmount *); ++ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt)); ++ break; ++ case GR_STR_FILENAME: ++ str1 = va_arg(ap, char *); ++ dentry = va_arg(ap, struct dentry *); ++ mnt = va_arg(ap, struct vfsmount *); ++ gr_log_middle_varargs(audit, msg, str1, gr_to_filename(dentry, mnt)); ++ break; ++ case GR_FILENAME_STR: ++ dentry = va_arg(ap, struct dentry *); ++ mnt = va_arg(ap, struct vfsmount *); ++ str1 = va_arg(ap, char *); ++ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), str1); ++ break; ++ case GR_FILENAME_TWO_INT: ++ dentry = va_arg(ap, struct dentry *); ++ mnt = va_arg(ap, struct vfsmount *); ++ num1 = va_arg(ap, int); ++ num2 = va_arg(ap, int); ++ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2); ++ break; ++ case GR_FILENAME_TWO_INT_STR: ++ dentry = va_arg(ap, struct dentry *); ++ mnt = va_arg(ap, struct vfsmount *); ++ num1 = va_arg(ap, int); ++ num2 = va_arg(ap, int); ++ str1 = va_arg(ap, char *); ++ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2, str1); ++ break; ++ case GR_TEXTREL: ++ file = va_arg(ap, struct file *); ++ ulong1 = va_arg(ap, unsigned long); ++ ulong2 = va_arg(ap, unsigned long); ++ gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>", ulong1, ulong2); ++ break; ++ case GR_PTRACE: ++ task = va_arg(ap, struct task_struct *); ++ gr_log_middle_varargs(audit, msg, task->exec_file ? gr_to_filename(task->exec_file->f_path.dentry, task->exec_file->f_path.mnt) : "(none)", task->comm, task_pid_nr(task)); ++ break; ++ case GR_RESOURCE: ++ task = va_arg(ap, struct task_struct *); ++ cred = __task_cred(task); ++ pcred = __task_cred(task->real_parent); ++ ulong1 = va_arg(ap, unsigned long); ++ str1 = va_arg(ap, char *); ++ ulong2 = va_arg(ap, unsigned long); ++ gr_log_middle_varargs(audit, msg, ulong1, str1, ulong2, gr_task_fullpath(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath(task), task->real_parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid)); ++ break; ++ case GR_CAP: ++ task = va_arg(ap, struct task_struct *); ++ cred = __task_cred(task); ++ pcred = __task_cred(task->real_parent); ++ str1 = va_arg(ap, char *); ++ gr_log_middle_varargs(audit, msg, str1, gr_task_fullpath(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath(task), task->real_parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid)); ++ break; ++ case GR_SIG: ++ str1 = va_arg(ap, char *); ++ voidptr = va_arg(ap, void *); ++ gr_log_middle_varargs(audit, msg, str1, voidptr); ++ break; ++ case GR_SIG2: ++ task = va_arg(ap, struct task_struct *); ++ cred = __task_cred(task); ++ pcred = __task_cred(task->real_parent); ++ num1 = va_arg(ap, int); ++ gr_log_middle_varargs(audit, msg, num1, gr_task_fullpath0(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath0(task), task->real_parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid)); ++ break; ++ case GR_CRASH1: ++ task = va_arg(ap, struct task_struct *); ++ cred = __task_cred(task); ++ pcred = __task_cred(task->real_parent); ++ ulong1 = va_arg(ap, unsigned long); ++ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath(task), task->real_parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid), GR_GLOBAL_UID(cred->uid), ulong1); ++ break; ++ case GR_CRASH2: ++ task = va_arg(ap, struct task_struct *); ++ cred = __task_cred(task); ++ pcred = __task_cred(task->real_parent); ++ ulong1 = va_arg(ap, unsigned long); ++ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath(task), task->real_parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid), ulong1); ++ break; ++ case GR_RWXMAP: ++ file = va_arg(ap, struct file *); ++ gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>"); ++ break; ++ case GR_RWXMAPVMA: ++ vma = va_arg(ap, struct vm_area_struct *); ++ if (vma->vm_file) ++ str1 = gr_to_filename(vma->vm_file->f_path.dentry, vma->vm_file->f_path.mnt); ++ else if (vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP)) ++ str1 = "<stack>"; ++ else if (vma->vm_start <= current->mm->brk && ++ vma->vm_end >= current->mm->start_brk) ++ str1 = "<heap>"; ++ else ++ str1 = "<anonymous mapping>"; ++ gr_log_middle_varargs(audit, msg, str1); ++ break; ++ case GR_PSACCT: ++ { ++ unsigned int wday, cday; ++ __u8 whr, chr; ++ __u8 wmin, cmin; ++ __u8 wsec, csec; ++ char cur_tty[64] = { 0 }; ++ char parent_tty[64] = { 0 }; ++ ++ task = va_arg(ap, struct task_struct *); ++ wday = va_arg(ap, unsigned int); ++ cday = va_arg(ap, unsigned int); ++ whr = va_arg(ap, int); ++ chr = va_arg(ap, int); ++ wmin = va_arg(ap, int); ++ cmin = va_arg(ap, int); ++ wsec = va_arg(ap, int); ++ csec = va_arg(ap, int); ++ ulong1 = va_arg(ap, unsigned long); ++ cred = __task_cred(task); ++ pcred = __task_cred(task->real_parent); ++ ++ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task_pid_nr(task), &task->signal->curr_ip, tty_name(task->signal->tty, cur_tty), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), wday, whr, wmin, wsec, cday, chr, cmin, csec, (task->flags & PF_SIGNALED) ? "killed by signal" : "exited", ulong1, gr_parent_task_fullpath(task), task->real_parent->comm, task_pid_nr(task->real_parent), &task->real_parent->signal->curr_ip, tty_name(task->real_parent->signal->tty, parent_tty), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid)); ++ } ++ break; ++ default: ++ gr_log_middle(audit, msg, ap); ++ } ++ va_end(ap); ++ // these don't need DEFAULTSECARGS printed on the end ++ if (argtypes == GR_CRASH1 || argtypes == GR_CRASH2) ++ gr_log_end(audit, 0); ++ else ++ gr_log_end(audit, 1); ++ END_LOCKS(audit); ++} +diff --git a/grsecurity/grsec_mem.c b/grsecurity/grsec_mem.c +new file mode 100644 +index 0000000..0e39d8c +--- /dev/null ++++ b/grsecurity/grsec_mem.c +@@ -0,0 +1,48 @@ ++#include <linux/kernel.h> ++#include <linux/sched.h> ++#include <linux/mm.h> ++#include <linux/mman.h> ++#include <linux/module.h> ++#include <linux/grinternal.h> ++ ++void gr_handle_msr_write(void) ++{ ++ gr_log_noargs(GR_DONT_AUDIT, GR_MSRWRITE_MSG); ++ return; ++} ++EXPORT_SYMBOL_GPL(gr_handle_msr_write); ++ ++void ++gr_handle_ioperm(void) ++{ ++ gr_log_noargs(GR_DONT_AUDIT, GR_IOPERM_MSG); ++ return; ++} ++ ++void ++gr_handle_iopl(void) ++{ ++ gr_log_noargs(GR_DONT_AUDIT, GR_IOPL_MSG); ++ return; ++} ++ ++void ++gr_handle_mem_readwrite(u64 from, u64 to) ++{ ++ gr_log_two_u64(GR_DONT_AUDIT, GR_MEM_READWRITE_MSG, from, to); ++ return; ++} ++ ++void ++gr_handle_vm86(void) ++{ ++ gr_log_noargs(GR_DONT_AUDIT, GR_VM86_MSG); ++ return; ++} ++ ++void ++gr_log_badprocpid(const char *entry) ++{ ++ gr_log_str(GR_DONT_AUDIT, GR_BADPROCPID_MSG, entry); ++ return; ++} +diff --git a/grsecurity/grsec_mount.c b/grsecurity/grsec_mount.c +new file mode 100644 +index 0000000..cd9e124 +--- /dev/null ++++ b/grsecurity/grsec_mount.c +@@ -0,0 +1,65 @@ ++#include <linux/kernel.h> ++#include <linux/sched.h> ++#include <linux/mount.h> ++#include <linux/major.h> ++#include <linux/grsecurity.h> ++#include <linux/grinternal.h> ++ ++void ++gr_log_remount(const char *devname, const int retval) ++{ ++#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT ++ if (grsec_enable_mount && (retval >= 0)) ++ gr_log_str(GR_DO_AUDIT, GR_REMOUNT_AUDIT_MSG, devname ? devname : "none"); ++#endif ++ return; ++} ++ ++void ++gr_log_unmount(const char *devname, const int retval) ++{ ++#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT ++ if (grsec_enable_mount && (retval >= 0)) ++ gr_log_str(GR_DO_AUDIT, GR_UNMOUNT_AUDIT_MSG, devname ? devname : "none"); ++#endif ++ return; ++} ++ ++void ++gr_log_mount(const char *from, const char *to, const int retval) ++{ ++#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT ++ if (grsec_enable_mount && (retval >= 0)) ++ gr_log_str_str(GR_DO_AUDIT, GR_MOUNT_AUDIT_MSG, from ? from : "none", to); ++#endif ++ return; ++} ++ ++int ++gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags) ++{ ++#ifdef CONFIG_GRKERNSEC_ROFS ++ if (grsec_enable_rofs && !(mnt_flags & MNT_READONLY)) { ++ gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_MOUNT_MSG, dentry, mnt); ++ return -EPERM; ++ } else ++ return 0; ++#endif ++ return 0; ++} ++ ++int ++gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode) ++{ ++#ifdef CONFIG_GRKERNSEC_ROFS ++ struct inode *inode = dentry->d_inode; ++ ++ if (grsec_enable_rofs && (acc_mode & MAY_WRITE) && ++ inode && (S_ISBLK(inode->i_mode) || (S_ISCHR(inode->i_mode) && imajor(inode) == RAW_MAJOR))) { ++ gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_BLOCKWRITE_MSG, dentry, mnt); ++ return -EPERM; ++ } else ++ return 0; ++#endif ++ return 0; ++} +diff --git a/grsecurity/grsec_pax.c b/grsecurity/grsec_pax.c +new file mode 100644 +index 0000000..6ee9d50 +--- /dev/null ++++ b/grsecurity/grsec_pax.c +@@ -0,0 +1,45 @@ ++#include <linux/kernel.h> ++#include <linux/sched.h> ++#include <linux/mm.h> ++#include <linux/file.h> ++#include <linux/grinternal.h> ++#include <linux/grsecurity.h> ++ ++void ++gr_log_textrel(struct vm_area_struct * vma) ++{ ++#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG ++ if (grsec_enable_log_rwxmaps) ++ gr_log_textrel_ulong_ulong(GR_DONT_AUDIT, GR_TEXTREL_AUDIT_MSG, vma->vm_file, vma->vm_start, vma->vm_pgoff); ++#endif ++ return; ++} ++ ++void gr_log_ptgnustack(struct file *file) ++{ ++#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG ++ if (grsec_enable_log_rwxmaps) ++ gr_log_rwxmap(GR_DONT_AUDIT, GR_PTGNUSTACK_MSG, file); ++#endif ++ return; ++} ++ ++void ++gr_log_rwxmmap(struct file *file) ++{ ++#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG ++ if (grsec_enable_log_rwxmaps) ++ gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMMAP_MSG, file); ++#endif ++ return; ++} ++ ++void ++gr_log_rwxmprotect(struct vm_area_struct *vma) ++{ ++#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG ++ if (grsec_enable_log_rwxmaps) ++ gr_log_rwxmap_vma(GR_DONT_AUDIT, GR_RWXMPROTECT_MSG, vma); ++#endif ++ return; ++} +diff --git a/grsecurity/grsec_proc.c b/grsecurity/grsec_proc.c +new file mode 100644 +index 0000000..2005a3a +--- /dev/null ++++ b/grsecurity/grsec_proc.c +@@ -0,0 +1,20 @@ ++#include <linux/kernel.h> ++#include <linux/sched.h> ++#include <linux/grsecurity.h> ++#include <linux/grinternal.h> ++ ++int gr_proc_is_restricted(void) ++{ ++#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP) ++ const struct cred *cred = current_cred(); ++#endif ++ ++#ifdef CONFIG_GRKERNSEC_PROC_USER ++ if (!uid_eq(cred->fsuid, GLOBAL_ROOT_UID)) ++ return -EACCES; ++#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP) ++ if (!uid_eq(cred->fsuid, GLOBAL_ROOT_UID) && !in_group_p(grsec_proc_gid)) ++ return -EACCES; ++#endif ++ return 0; ++} +diff --git a/grsecurity/grsec_ptrace.c b/grsecurity/grsec_ptrace.c +new file mode 100644 +index 0000000..f7f29aa +--- /dev/null ++++ b/grsecurity/grsec_ptrace.c +@@ -0,0 +1,30 @@ ++#include <linux/kernel.h> ++#include <linux/sched.h> ++#include <linux/grinternal.h> ++#include <linux/security.h> ++ ++void ++gr_audit_ptrace(struct task_struct *task) ++{ ++#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE ++ if (grsec_enable_audit_ptrace) ++ gr_log_ptrace(GR_DO_AUDIT, GR_PTRACE_AUDIT_MSG, task); ++#endif ++ return; ++} ++ ++int ++gr_ptrace_readexec(struct file *file, int unsafe_flags) ++{ ++#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC ++ const struct dentry *dentry = file->f_path.dentry; ++ const struct vfsmount *mnt = file->f_path.mnt; ++ ++ if (grsec_enable_ptrace_readexec && (unsafe_flags & LSM_UNSAFE_PTRACE) && ++ (inode_permission(dentry->d_inode, MAY_READ) || !gr_acl_handle_open(dentry, mnt, MAY_READ))) { ++ gr_log_fs_generic(GR_DONT_AUDIT, GR_PTRACE_READEXEC_MSG, dentry, mnt); ++ return -EACCES; ++ } ++#endif ++ return 0; ++} +diff --git a/grsecurity/grsec_sig.c b/grsecurity/grsec_sig.c +new file mode 100644 +index 0000000..3860c7e +--- /dev/null ++++ b/grsecurity/grsec_sig.c +@@ -0,0 +1,236 @@ ++#include <linux/kernel.h> ++#include <linux/sched.h> ++#include <linux/fs.h> ++#include <linux/delay.h> ++#include <linux/grsecurity.h> ++#include <linux/grinternal.h> ++#include <linux/hardirq.h> ++ ++char *signames[] = { ++ [SIGSEGV] = "Segmentation fault", ++ [SIGILL] = "Illegal instruction", ++ [SIGABRT] = "Abort", ++ [SIGBUS] = "Invalid alignment/Bus error" ++}; ++ ++void ++gr_log_signal(const int sig, const void *addr, const struct task_struct *t) ++{ ++#ifdef CONFIG_GRKERNSEC_SIGNAL ++ if (grsec_enable_signal && ((sig == SIGSEGV) || (sig == SIGILL) || ++ (sig == SIGABRT) || (sig == SIGBUS))) { ++ if (task_pid_nr(t) == task_pid_nr(current)) { ++ gr_log_sig_addr(GR_DONT_AUDIT_GOOD, GR_UNISIGLOG_MSG, signames[sig], addr); ++ } else { ++ gr_log_sig_task(GR_DONT_AUDIT_GOOD, GR_DUALSIGLOG_MSG, t, sig); ++ } ++ } ++#endif ++ return; ++} ++ ++int ++gr_handle_signal(const struct task_struct *p, const int sig) ++{ ++#ifdef CONFIG_GRKERNSEC ++ /* ignore the 0 signal for protected task checks */ ++ if (task_pid_nr(current) > 1 && sig && gr_check_protected_task(p)) { ++ gr_log_sig_task(GR_DONT_AUDIT, GR_SIG_ACL_MSG, p, sig); ++ return -EPERM; ++ } else if (gr_pid_is_chrooted((struct task_struct *)p)) { ++ return -EPERM; ++ } ++#endif ++ return 0; ++} ++ ++#ifdef CONFIG_GRKERNSEC ++extern int specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t); ++ ++int gr_fake_force_sig(int sig, struct task_struct *t) ++{ ++ unsigned long int flags; ++ int ret, blocked, ignored; ++ struct k_sigaction *action; ++ ++ spin_lock_irqsave(&t->sighand->siglock, flags); ++ action = &t->sighand->action[sig-1]; ++ ignored = action->sa.sa_handler == SIG_IGN; ++ blocked = sigismember(&t->blocked, sig); ++ if (blocked || ignored) { ++ action->sa.sa_handler = SIG_DFL; ++ if (blocked) { ++ sigdelset(&t->blocked, sig); ++ recalc_sigpending_and_wake(t); ++ } ++ } ++ if (action->sa.sa_handler == SIG_DFL) ++ t->signal->flags &= ~SIGNAL_UNKILLABLE; ++ ret = specific_send_sig_info(sig, SEND_SIG_PRIV, t); ++ ++ spin_unlock_irqrestore(&t->sighand->siglock, flags); ++ ++ return ret; ++} ++#endif ++ ++#define GR_USER_BAN_TIME (15 * 60) ++#define GR_DAEMON_BRUTE_TIME (30 * 60) ++ ++void gr_handle_brute_attach(int dumpable) ++{ ++#ifdef CONFIG_GRKERNSEC_BRUTE ++ struct task_struct *p = current; ++ kuid_t uid = GLOBAL_ROOT_UID; ++ int daemon = 0; ++ ++ if (!grsec_enable_brute) ++ return; ++ ++ rcu_read_lock(); ++ read_lock(&tasklist_lock); ++ read_lock(&grsec_exec_file_lock); ++ if (p->real_parent && gr_is_same_file(p->real_parent->exec_file, p->exec_file)) { ++ p->real_parent->brute_expires = get_seconds() + GR_DAEMON_BRUTE_TIME; ++ p->real_parent->brute = 1; ++ daemon = 1; ++ } else { ++ const struct cred *cred = __task_cred(p), *cred2; ++ struct task_struct *tsk, *tsk2; ++ ++ if (dumpable != SUID_DUMP_USER && gr_is_global_nonroot(cred->uid)) { ++ struct user_struct *user; ++ ++ uid = cred->uid; ++ ++ /* this is put upon execution past expiration */ ++ user = find_user(uid); ++ if (user == NULL) ++ goto unlock; ++ user->suid_banned = 1; ++ user->suid_ban_expires = get_seconds() + GR_USER_BAN_TIME; ++ if (user->suid_ban_expires == ~0UL) ++ user->suid_ban_expires--; ++ ++ /* only kill other threads of the same binary, from the same user */ ++ do_each_thread(tsk2, tsk) { ++ cred2 = __task_cred(tsk); ++ if (tsk != p && uid_eq(cred2->uid, uid) && gr_is_same_file(tsk->exec_file, p->exec_file)) ++ gr_fake_force_sig(SIGKILL, tsk); ++ } while_each_thread(tsk2, tsk); ++ } ++ } ++unlock: ++ read_unlock(&grsec_exec_file_lock); ++ read_unlock(&tasklist_lock); ++ rcu_read_unlock(); ++ ++ if (gr_is_global_nonroot(uid)) ++ gr_log_fs_int2(GR_DONT_AUDIT, GR_BRUTE_SUID_MSG, p->exec_file->f_path.dentry, p->exec_file->f_path.mnt, GR_GLOBAL_UID(uid), GR_USER_BAN_TIME / 60); ++ else if (daemon) ++ gr_log_noargs(GR_DONT_AUDIT, GR_BRUTE_DAEMON_MSG); ++ ++#endif ++ return; ++} ++ ++void gr_handle_brute_check(void) ++{ ++#ifdef CONFIG_GRKERNSEC_BRUTE ++ struct task_struct *p = current; ++ ++ if (unlikely(p->brute)) { ++ if (!grsec_enable_brute) ++ p->brute = 0; ++ else if (time_before(get_seconds(), p->brute_expires)) ++ msleep(30 * 1000); ++ } ++#endif ++ return; ++} ++ ++void gr_handle_kernel_exploit(void) ++{ ++#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT ++ const struct cred *cred; ++ struct task_struct *tsk, *tsk2; ++ struct user_struct *user; ++ kuid_t uid; ++ ++ if (in_irq() || in_serving_softirq() || in_nmi()) ++ panic("grsec: halting the system due to suspicious kernel crash caused in interrupt context"); ++ ++ uid = current_uid(); ++ ++ if (gr_is_global_root(uid)) ++ panic("grsec: halting the system due to suspicious kernel crash caused by root"); ++ else { ++ /* kill all the processes of this user, hold a reference ++ to their creds struct, and prevent them from creating ++ another process until system reset ++ */ ++ printk(KERN_ALERT "grsec: banning user with uid %u until system restart for suspicious kernel crash\n", ++ GR_GLOBAL_UID(uid)); ++ /* we intentionally leak this ref */ ++ user = get_uid(current->cred->user); ++ if (user) ++ user->kernel_banned = 1; ++ ++ /* kill all processes of this user */ ++ read_lock(&tasklist_lock); ++ do_each_thread(tsk2, tsk) { ++ cred = __task_cred(tsk); ++ if (uid_eq(cred->uid, uid)) ++ gr_fake_force_sig(SIGKILL, tsk); ++ } while_each_thread(tsk2, tsk); ++ read_unlock(&tasklist_lock); ++ } ++#endif ++} ++ ++#ifdef CONFIG_GRKERNSEC_BRUTE ++static bool suid_ban_expired(struct user_struct *user) ++{ ++ if (user->suid_ban_expires != ~0UL && time_after_eq(get_seconds(), user->suid_ban_expires)) { ++ user->suid_banned = 0; ++ user->suid_ban_expires = 0; ++ free_uid(user); ++ return true; ++ } ++ ++ return false; ++} ++#endif ++ ++int gr_process_kernel_exec_ban(void) ++{ ++#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT ++ if (unlikely(current->cred->user->kernel_banned)) ++ return -EPERM; ++#endif ++ return 0; ++} ++ ++int gr_process_kernel_setuid_ban(struct user_struct *user) ++{ ++#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT ++ if (unlikely(user->kernel_banned)) ++ gr_fake_force_sig(SIGKILL, current); ++#endif ++ return 0; ++} ++ ++int gr_process_suid_exec_ban(const struct linux_binprm *bprm) ++{ ++#ifdef CONFIG_GRKERNSEC_BRUTE ++ struct user_struct *user = current->cred->user; ++ if (unlikely(user->suid_banned)) { ++ if (suid_ban_expired(user)) ++ return 0; ++ /* disallow execution of suid binaries only */ ++ else if (!uid_eq(bprm->cred->euid, current->cred->uid)) ++ return -EPERM; ++ } ++#endif ++ return 0; ++} +diff --git a/grsecurity/grsec_sock.c b/grsecurity/grsec_sock.c +new file mode 100644 +index 0000000..c0aef3a +--- /dev/null ++++ b/grsecurity/grsec_sock.c +@@ -0,0 +1,244 @@ ++#include <linux/kernel.h> ++#include <linux/module.h> ++#include <linux/sched.h> ++#include <linux/file.h> ++#include <linux/net.h> ++#include <linux/in.h> ++#include <linux/ip.h> ++#include <net/sock.h> ++#include <net/inet_sock.h> ++#include <linux/grsecurity.h> ++#include <linux/grinternal.h> ++#include <linux/gracl.h> ++ ++extern int gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb); ++extern int gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr); ++ ++EXPORT_SYMBOL_GPL(gr_search_udp_recvmsg); ++EXPORT_SYMBOL_GPL(gr_search_udp_sendmsg); ++ ++#ifdef CONFIG_UNIX_MODULE ++EXPORT_SYMBOL_GPL(gr_acl_handle_unix); ++EXPORT_SYMBOL_GPL(gr_acl_handle_mknod); ++EXPORT_SYMBOL_GPL(gr_handle_chroot_unix); ++EXPORT_SYMBOL_GPL(gr_handle_create); ++#endif ++ ++#ifdef CONFIG_GRKERNSEC ++#define gr_conn_table_size 32749 ++struct conn_table_entry { ++ struct conn_table_entry *next; ++ struct signal_struct *sig; ++}; ++ ++struct conn_table_entry *gr_conn_table[gr_conn_table_size]; ++DEFINE_SPINLOCK(gr_conn_table_lock); ++ ++extern const char * gr_socktype_to_name(unsigned char type); ++extern const char * gr_proto_to_name(unsigned char proto); ++extern const char * gr_sockfamily_to_name(unsigned char family); ++ ++static __inline__ int ++conn_hash(__u32 saddr, __u32 daddr, __u16 sport, __u16 dport, unsigned int size) ++{ ++ return ((daddr + saddr + (sport << 8) + (dport << 16)) % size); ++} ++ ++static __inline__ int ++conn_match(const struct signal_struct *sig, __u32 saddr, __u32 daddr, ++ __u16 sport, __u16 dport) ++{ ++ if (unlikely(sig->gr_saddr == saddr && sig->gr_daddr == daddr && ++ sig->gr_sport == sport && sig->gr_dport == dport)) ++ return 1; ++ else ++ return 0; ++} ++ ++static void gr_add_to_task_ip_table_nolock(struct signal_struct *sig, struct conn_table_entry *newent) ++{ ++ struct conn_table_entry **match; ++ unsigned int index; ++ ++ index = conn_hash(sig->gr_saddr, sig->gr_daddr, ++ sig->gr_sport, sig->gr_dport, ++ gr_conn_table_size); ++ ++ newent->sig = sig; ++ ++ match = &gr_conn_table[index]; ++ newent->next = *match; ++ *match = newent; ++ ++ return; ++} ++ ++static void gr_del_task_from_ip_table_nolock(struct signal_struct *sig) ++{ ++ struct conn_table_entry *match, *last = NULL; ++ unsigned int index; ++ ++ index = conn_hash(sig->gr_saddr, sig->gr_daddr, ++ sig->gr_sport, sig->gr_dport, ++ gr_conn_table_size); ++ ++ match = gr_conn_table[index]; ++ while (match && !conn_match(match->sig, ++ sig->gr_saddr, sig->gr_daddr, sig->gr_sport, ++ sig->gr_dport)) { ++ last = match; ++ match = match->next; ++ } ++ ++ if (match) { ++ if (last) ++ last->next = match->next; ++ else ++ gr_conn_table[index] = NULL; ++ kfree(match); ++ } ++ ++ return; ++} ++ ++static struct signal_struct * gr_lookup_task_ip_table(__u32 saddr, __u32 daddr, ++ __u16 sport, __u16 dport) ++{ ++ struct conn_table_entry *match; ++ unsigned int index; ++ ++ index = conn_hash(saddr, daddr, sport, dport, gr_conn_table_size); ++ ++ match = gr_conn_table[index]; ++ while (match && !conn_match(match->sig, saddr, daddr, sport, dport)) ++ match = match->next; ++ ++ if (match) ++ return match->sig; ++ else ++ return NULL; ++} ++ ++#endif ++ ++void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet) ++{ ++#ifdef CONFIG_GRKERNSEC ++ struct signal_struct *sig = task->signal; ++ struct conn_table_entry *newent; ++ ++ newent = kmalloc(sizeof(struct conn_table_entry), GFP_ATOMIC); ++ if (newent == NULL) ++ return; ++ /* no bh lock needed since we are called with bh disabled */ ++ spin_lock(&gr_conn_table_lock); ++ gr_del_task_from_ip_table_nolock(sig); ++ sig->gr_saddr = inet->inet_rcv_saddr; ++ sig->gr_daddr = inet->inet_daddr; ++ sig->gr_sport = inet->inet_sport; ++ sig->gr_dport = inet->inet_dport; ++ gr_add_to_task_ip_table_nolock(sig, newent); ++ spin_unlock(&gr_conn_table_lock); ++#endif ++ return; ++} ++ ++void gr_del_task_from_ip_table(struct task_struct *task) ++{ ++#ifdef CONFIG_GRKERNSEC ++ spin_lock_bh(&gr_conn_table_lock); ++ gr_del_task_from_ip_table_nolock(task->signal); ++ spin_unlock_bh(&gr_conn_table_lock); ++#endif ++ return; ++} ++ ++void ++gr_attach_curr_ip(const struct sock *sk) ++{ ++#ifdef CONFIG_GRKERNSEC ++ struct signal_struct *p, *set; ++ const struct inet_sock *inet = inet_sk(sk); ++ ++ if (unlikely(sk->sk_protocol != IPPROTO_TCP)) ++ return; ++ ++ set = current->signal; ++ ++ spin_lock_bh(&gr_conn_table_lock); ++ p = gr_lookup_task_ip_table(inet->inet_daddr, inet->inet_rcv_saddr, ++ inet->inet_dport, inet->inet_sport); ++ if (unlikely(p != NULL)) { ++ set->curr_ip = p->curr_ip; ++ set->used_accept = 1; ++ gr_del_task_from_ip_table_nolock(p); ++ spin_unlock_bh(&gr_conn_table_lock); ++ return; ++ } ++ spin_unlock_bh(&gr_conn_table_lock); ++ ++ set->curr_ip = inet->inet_daddr; ++ set->used_accept = 1; ++#endif ++ return; ++} ++ ++int ++gr_handle_sock_all(const int family, const int type, const int protocol) ++{ ++#ifdef CONFIG_GRKERNSEC_SOCKET_ALL ++ if (grsec_enable_socket_all && in_group_p(grsec_socket_all_gid) && ++ (family != AF_UNIX)) { ++ if (family == AF_INET) ++ gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), gr_proto_to_name(protocol)); ++ else ++ gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), protocol); ++ return -EACCES; ++ } ++#endif ++ return 0; ++} ++ ++int ++gr_handle_sock_server(const struct sockaddr *sck) ++{ ++#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER ++ if (grsec_enable_socket_server && ++ in_group_p(grsec_socket_server_gid) && ++ sck && (sck->sa_family != AF_UNIX) && ++ (sck->sa_family != AF_LOCAL)) { ++ gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG); ++ return -EACCES; ++ } ++#endif ++ return 0; ++} ++ ++int ++gr_handle_sock_server_other(const struct sock *sck) ++{ ++#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER ++ if (grsec_enable_socket_server && ++ in_group_p(grsec_socket_server_gid) && ++ sck && (sck->sk_family != AF_UNIX) && ++ (sck->sk_family != AF_LOCAL)) { ++ gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG); ++ return -EACCES; ++ } ++#endif ++ return 0; ++} ++ ++int ++gr_handle_sock_client(const struct sockaddr *sck) ++{ ++#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT ++ if (grsec_enable_socket_client && in_group_p(grsec_socket_client_gid) && ++ sck && (sck->sa_family != AF_UNIX) && ++ (sck->sa_family != AF_LOCAL)) { ++ gr_log_noargs(GR_DONT_AUDIT, GR_CONNECT_MSG); ++ return -EACCES; ++ } ++#endif ++ return 0; ++} +diff --git a/grsecurity/grsec_sysctl.c b/grsecurity/grsec_sysctl.c +new file mode 100644 +index 0000000..8159888 +--- /dev/null ++++ b/grsecurity/grsec_sysctl.c +@@ -0,0 +1,479 @@ ++#include <linux/kernel.h> ++#include <linux/sched.h> ++#include <linux/sysctl.h> ++#include <linux/grsecurity.h> ++#include <linux/grinternal.h> ++ ++int ++gr_handle_sysctl_mod(const char *dirname, const char *name, const int op) ++{ ++#ifdef CONFIG_GRKERNSEC_SYSCTL ++ if (dirname == NULL || name == NULL) ++ return 0; ++ if (!strcmp(dirname, "grsecurity") && grsec_lock && (op & MAY_WRITE)) { ++ gr_log_str(GR_DONT_AUDIT, GR_SYSCTL_MSG, name); ++ return -EACCES; ++ } ++#endif ++ return 0; ++} ++ ++#if defined(CONFIG_GRKERNSEC_ROFS) || defined(CONFIG_GRKERNSEC_DENYUSB) ++static int __maybe_unused __read_only one = 1; ++#endif ++ ++#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS) || \ ++ defined(CONFIG_GRKERNSEC_DENYUSB) ++struct ctl_table grsecurity_table[] = { ++#ifdef CONFIG_GRKERNSEC_SYSCTL ++#ifdef CONFIG_GRKERNSEC_SYSCTL_DISTRO ++#ifdef CONFIG_GRKERNSEC_IO ++ { ++ .procname = "disable_priv_io", ++ .data = &grsec_disable_privio, ++ .maxlen = sizeof(int), ++ .mode = 0600, ++ .proc_handler = &proc_dointvec, ++ }, ++#endif ++#endif ++#ifdef CONFIG_GRKERNSEC_LINK ++ { ++ .procname = "linking_restrictions", ++ .data = &grsec_enable_link, ++ .maxlen = sizeof(int), ++ .mode = 0600, ++ .proc_handler = &proc_dointvec, ++ }, ++#endif ++#ifdef CONFIG_GRKERNSEC_SYMLINKOWN ++ { ++ .procname = "enforce_symlinksifowner", ++ .data = &grsec_enable_symlinkown, ++ .maxlen = sizeof(int), ++ .mode = 0600, ++ .proc_handler = &proc_dointvec, ++ }, ++ { ++ .procname = "symlinkown_gid", ++ .data = &grsec_symlinkown_gid, ++ .maxlen = sizeof(int), ++ .mode = 0600, ++ .proc_handler = &proc_dointvec, ++ }, ++#endif ++#ifdef CONFIG_GRKERNSEC_BRUTE ++ { ++ .procname = "deter_bruteforce", ++ .data = &grsec_enable_brute, ++ .maxlen = sizeof(int), ++ .mode = 0600, ++ .proc_handler = &proc_dointvec, ++ }, ++#endif ++#ifdef CONFIG_GRKERNSEC_FIFO ++ { ++ .procname = "fifo_restrictions", ++ .data = &grsec_enable_fifo, ++ .maxlen = sizeof(int), ++ .mode = 0600, ++ .proc_handler = &proc_dointvec, ++ }, ++#endif ++#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC ++ { ++ .procname = "ptrace_readexec", ++ .data = &grsec_enable_ptrace_readexec, ++ .maxlen = sizeof(int), ++ .mode = 0600, ++ .proc_handler = &proc_dointvec, ++ }, ++#endif ++#ifdef CONFIG_GRKERNSEC_SETXID ++ { ++ .procname = "consistent_setxid", ++ .data = &grsec_enable_setxid, ++ .maxlen = sizeof(int), ++ .mode = 0600, ++ .proc_handler = &proc_dointvec, ++ }, ++#endif ++#ifdef CONFIG_GRKERNSEC_BLACKHOLE ++ { ++ .procname = "ip_blackhole", ++ .data = &grsec_enable_blackhole, ++ .maxlen = sizeof(int), ++ .mode = 0600, ++ .proc_handler = &proc_dointvec, ++ }, ++ { ++ .procname = "lastack_retries", ++ .data = &grsec_lastack_retries, ++ .maxlen = sizeof(int), ++ .mode = 0600, ++ .proc_handler = &proc_dointvec, ++ }, ++#endif ++#ifdef CONFIG_GRKERNSEC_EXECLOG ++ { ++ .procname = "exec_logging", ++ .data = &grsec_enable_execlog, ++ .maxlen = sizeof(int), ++ .mode = 0600, ++ .proc_handler = &proc_dointvec, ++ }, ++#endif ++#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG ++ { ++ .procname = "rwxmap_logging", ++ .data = &grsec_enable_log_rwxmaps, ++ .maxlen = sizeof(int), ++ .mode = 0600, ++ .proc_handler = &proc_dointvec, ++ }, ++#endif ++#ifdef CONFIG_GRKERNSEC_SIGNAL ++ { ++ .procname = "signal_logging", ++ .data = &grsec_enable_signal, ++ .maxlen = sizeof(int), ++ .mode = 0600, ++ .proc_handler = &proc_dointvec, ++ }, ++#endif ++#ifdef CONFIG_GRKERNSEC_FORKFAIL ++ { ++ .procname = "forkfail_logging", ++ .data = &grsec_enable_forkfail, ++ .maxlen = sizeof(int), ++ .mode = 0600, ++ .proc_handler = &proc_dointvec, ++ }, ++#endif ++#ifdef CONFIG_GRKERNSEC_TIME ++ { ++ .procname = "timechange_logging", ++ .data = &grsec_enable_time, ++ .maxlen = sizeof(int), ++ .mode = 0600, ++ .proc_handler = &proc_dointvec, ++ }, ++#endif ++#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT ++ { ++ .procname = "chroot_deny_shmat", ++ .data = &grsec_enable_chroot_shmat, ++ .maxlen = sizeof(int), ++ .mode = 0600, ++ .proc_handler = &proc_dointvec, ++ }, ++#endif ++#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX ++ { ++ .procname = "chroot_deny_unix", ++ .data = &grsec_enable_chroot_unix, ++ .maxlen = sizeof(int), ++ .mode = 0600, ++ .proc_handler = &proc_dointvec, ++ }, ++#endif ++#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT ++ { ++ .procname = "chroot_deny_mount", ++ .data = &grsec_enable_chroot_mount, ++ .maxlen = sizeof(int), ++ .mode = 0600, ++ .proc_handler = &proc_dointvec, ++ }, ++#endif ++#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR ++ { ++ .procname = "chroot_deny_fchdir", ++ .data = &grsec_enable_chroot_fchdir, ++ .maxlen = sizeof(int), ++ .mode = 0600, ++ .proc_handler = &proc_dointvec, ++ }, ++#endif ++#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE ++ { ++ .procname = "chroot_deny_chroot", ++ .data = &grsec_enable_chroot_double, ++ .maxlen = sizeof(int), ++ .mode = 0600, ++ .proc_handler = &proc_dointvec, ++ }, ++#endif ++#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT ++ { ++ .procname = "chroot_deny_pivot", ++ .data = &grsec_enable_chroot_pivot, ++ .maxlen = sizeof(int), ++ .mode = 0600, ++ .proc_handler = &proc_dointvec, ++ }, ++#endif ++#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR ++ { ++ .procname = "chroot_enforce_chdir", ++ .data = &grsec_enable_chroot_chdir, ++ .maxlen = sizeof(int), ++ .mode = 0600, ++ .proc_handler = &proc_dointvec, ++ }, ++#endif ++#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD ++ { ++ .procname = "chroot_deny_chmod", ++ .data = &grsec_enable_chroot_chmod, ++ .maxlen = sizeof(int), ++ .mode = 0600, ++ .proc_handler = &proc_dointvec, ++ }, ++#endif ++#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD ++ { ++ .procname = "chroot_deny_mknod", ++ .data = &grsec_enable_chroot_mknod, ++ .maxlen = sizeof(int), ++ .mode = 0600, ++ .proc_handler = &proc_dointvec, ++ }, ++#endif ++#ifdef CONFIG_GRKERNSEC_CHROOT_NICE ++ { ++ .procname = "chroot_restrict_nice", ++ .data = &grsec_enable_chroot_nice, ++ .maxlen = sizeof(int), ++ .mode = 0600, ++ .proc_handler = &proc_dointvec, ++ }, ++#endif ++#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG ++ { ++ .procname = "chroot_execlog", ++ .data = &grsec_enable_chroot_execlog, ++ .maxlen = sizeof(int), ++ .mode = 0600, ++ .proc_handler = &proc_dointvec, ++ }, ++#endif ++#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS ++ { ++ .procname = "chroot_caps", ++ .data = &grsec_enable_chroot_caps, ++ .maxlen = sizeof(int), ++ .mode = 0600, ++ .proc_handler = &proc_dointvec, ++ }, ++#endif ++#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL ++ { ++ .procname = "chroot_deny_sysctl", ++ .data = &grsec_enable_chroot_sysctl, ++ .maxlen = sizeof(int), ++ .mode = 0600, ++ .proc_handler = &proc_dointvec, ++ }, ++#endif ++#ifdef CONFIG_GRKERNSEC_TPE ++ { ++ .procname = "tpe", ++ .data = &grsec_enable_tpe, ++ .maxlen = sizeof(int), ++ .mode = 0600, ++ .proc_handler = &proc_dointvec, ++ }, ++ { ++ .procname = "tpe_gid", ++ .data = &grsec_tpe_gid, ++ .maxlen = sizeof(int), ++ .mode = 0600, ++ .proc_handler = &proc_dointvec, ++ }, ++#endif ++#ifdef CONFIG_GRKERNSEC_TPE_INVERT ++ { ++ .procname = "tpe_invert", ++ .data = &grsec_enable_tpe_invert, ++ .maxlen = sizeof(int), ++ .mode = 0600, ++ .proc_handler = &proc_dointvec, ++ }, ++#endif ++#ifdef CONFIG_GRKERNSEC_TPE_ALL ++ { ++ .procname = "tpe_restrict_all", ++ .data = &grsec_enable_tpe_all, ++ .maxlen = sizeof(int), ++ .mode = 0600, ++ .proc_handler = &proc_dointvec, ++ }, ++#endif ++#ifdef CONFIG_GRKERNSEC_SOCKET_ALL ++ { ++ .procname = "socket_all", ++ .data = &grsec_enable_socket_all, ++ .maxlen = sizeof(int), ++ .mode = 0600, ++ .proc_handler = &proc_dointvec, ++ }, ++ { ++ .procname = "socket_all_gid", ++ .data = &grsec_socket_all_gid, ++ .maxlen = sizeof(int), ++ .mode = 0600, ++ .proc_handler = &proc_dointvec, ++ }, ++#endif ++#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT ++ { ++ .procname = "socket_client", ++ .data = &grsec_enable_socket_client, ++ .maxlen = sizeof(int), ++ .mode = 0600, ++ .proc_handler = &proc_dointvec, ++ }, ++ { ++ .procname = "socket_client_gid", ++ .data = &grsec_socket_client_gid, ++ .maxlen = sizeof(int), ++ .mode = 0600, ++ .proc_handler = &proc_dointvec, ++ }, ++#endif ++#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER ++ { ++ .procname = "socket_server", ++ .data = &grsec_enable_socket_server, ++ .maxlen = sizeof(int), ++ .mode = 0600, ++ .proc_handler = &proc_dointvec, ++ }, ++ { ++ .procname = "socket_server_gid", ++ .data = &grsec_socket_server_gid, ++ .maxlen = sizeof(int), ++ .mode = 0600, ++ .proc_handler = &proc_dointvec, ++ }, ++#endif ++#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP ++ { ++ .procname = "audit_group", ++ .data = &grsec_enable_group, ++ .maxlen = sizeof(int), ++ .mode = 0600, ++ .proc_handler = &proc_dointvec, ++ }, ++ { ++ .procname = "audit_gid", ++ .data = &grsec_audit_gid, ++ .maxlen = sizeof(int), ++ .mode = 0600, ++ .proc_handler = &proc_dointvec, ++ }, ++#endif ++#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR ++ { ++ .procname = "audit_chdir", ++ .data = &grsec_enable_chdir, ++ .maxlen = sizeof(int), ++ .mode = 0600, ++ .proc_handler = &proc_dointvec, ++ }, ++#endif ++#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT ++ { ++ .procname = "audit_mount", ++ .data = &grsec_enable_mount, ++ .maxlen = sizeof(int), ++ .mode = 0600, ++ .proc_handler = &proc_dointvec, ++ }, ++#endif ++#ifdef CONFIG_GRKERNSEC_DMESG ++ { ++ .procname = "dmesg", ++ .data = &grsec_enable_dmesg, ++ .maxlen = sizeof(int), ++ .mode = 0600, ++ .proc_handler = &proc_dointvec, ++ }, ++#endif ++#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK ++ { ++ .procname = "chroot_findtask", ++ .data = &grsec_enable_chroot_findtask, ++ .maxlen = sizeof(int), ++ .mode = 0600, ++ .proc_handler = &proc_dointvec, ++ }, ++#endif ++#ifdef CONFIG_GRKERNSEC_RESLOG ++ { ++ .procname = "resource_logging", ++ .data = &grsec_resource_logging, ++ .maxlen = sizeof(int), ++ .mode = 0600, ++ .proc_handler = &proc_dointvec, ++ }, ++#endif ++#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE ++ { ++ .procname = "audit_ptrace", ++ .data = &grsec_enable_audit_ptrace, ++ .maxlen = sizeof(int), ++ .mode = 0600, ++ .proc_handler = &proc_dointvec, ++ }, ++#endif ++#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE ++ { ++ .procname = "harden_ptrace", ++ .data = &grsec_enable_harden_ptrace, ++ .maxlen = sizeof(int), ++ .mode = 0600, ++ .proc_handler = &proc_dointvec, ++ }, ++#endif ++#ifdef CONFIG_GRKERNSEC_HARDEN_IPC ++ { ++ .procname = "harden_ipc", ++ .data = &grsec_enable_harden_ipc, ++ .maxlen = sizeof(int), ++ .mode = 0600, ++ .proc_handler = &proc_dointvec, ++ }, ++#endif ++ { ++ .procname = "grsec_lock", ++ .data = &grsec_lock, ++ .maxlen = sizeof(int), ++ .mode = 0600, ++ .proc_handler = &proc_dointvec, ++ }, ++#endif ++#ifdef CONFIG_GRKERNSEC_ROFS ++ { ++ .procname = "romount_protect", ++ .data = &grsec_enable_rofs, ++ .maxlen = sizeof(int), ++ .mode = 0600, ++ .proc_handler = &proc_dointvec_minmax, ++ .extra1 = &one, ++ .extra2 = &one, ++ }, ++#endif ++#if defined(CONFIG_GRKERNSEC_DENYUSB) && !defined(CONFIG_GRKERNSEC_DENYUSB_FORCE) ++ { ++ .procname = "deny_new_usb", ++ .data = &grsec_deny_new_usb, ++ .maxlen = sizeof(int), ++ .mode = 0600, ++ .proc_handler = &proc_dointvec, ++ }, ++#endif ++ { } ++}; ++#endif +diff --git a/grsecurity/grsec_time.c b/grsecurity/grsec_time.c +new file mode 100644 +index 0000000..61b514e +--- /dev/null ++++ b/grsecurity/grsec_time.c +@@ -0,0 +1,16 @@ ++#include <linux/kernel.h> ++#include <linux/sched.h> ++#include <linux/grinternal.h> ++#include <linux/module.h> ++ ++void ++gr_log_timechange(void) ++{ ++#ifdef CONFIG_GRKERNSEC_TIME ++ if (grsec_enable_time) ++ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_TIME_MSG); ++#endif ++ return; ++} ++ ++EXPORT_SYMBOL_GPL(gr_log_timechange); +diff --git a/grsecurity/grsec_tpe.c b/grsecurity/grsec_tpe.c +new file mode 100644 +index 0000000..d1953de +--- /dev/null ++++ b/grsecurity/grsec_tpe.c +@@ -0,0 +1,78 @@ ++#include <linux/kernel.h> ++#include <linux/sched.h> ++#include <linux/file.h> ++#include <linux/fs.h> ++#include <linux/grinternal.h> ++ ++extern int gr_acl_tpe_check(void); ++ ++int ++gr_tpe_allow(const struct file *file) ++{ ++#ifdef CONFIG_GRKERNSEC ++ struct inode *inode = file->f_path.dentry->d_parent->d_inode; ++ struct inode *file_inode = file->f_path.dentry->d_inode; ++ const struct cred *cred = current_cred(); ++ char *msg = NULL; ++ char *msg2 = NULL; ++ ++ // never restrict root ++ if (gr_is_global_root(cred->uid)) ++ return 1; ++ ++ if (grsec_enable_tpe) { ++#ifdef CONFIG_GRKERNSEC_TPE_INVERT ++ if (grsec_enable_tpe_invert && !in_group_p(grsec_tpe_gid)) ++ msg = "not being in trusted group"; ++ else if (!grsec_enable_tpe_invert && in_group_p(grsec_tpe_gid)) ++ msg = "being in untrusted group"; ++#else ++ if (in_group_p(grsec_tpe_gid)) ++ msg = "being in untrusted group"; ++#endif ++ } ++ if (!msg && gr_acl_tpe_check()) ++ msg = "being in untrusted role"; ++ ++ // not in any affected group/role ++ if (!msg) ++ goto next_check; ++ ++ if (gr_is_global_nonroot(inode->i_uid)) ++ msg2 = "file in non-root-owned directory"; ++ else if (inode->i_mode & S_IWOTH) ++ msg2 = "file in world-writable directory"; ++ else if (inode->i_mode & S_IWGRP) ++ msg2 = "file in group-writable directory"; ++ else if (file_inode->i_mode & S_IWOTH) ++ msg2 = "file is world-writable"; ++ ++ if (msg && msg2) { ++ char fullmsg[70] = {0}; ++ snprintf(fullmsg, sizeof(fullmsg)-1, "%s and %s", msg, msg2); ++ gr_log_str_fs(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, fullmsg, file->f_path.dentry, file->f_path.mnt); ++ return 0; ++ } ++ msg = NULL; ++next_check: ++#ifdef CONFIG_GRKERNSEC_TPE_ALL ++ if (!grsec_enable_tpe || !grsec_enable_tpe_all) ++ return 1; ++ ++ if (gr_is_global_nonroot(inode->i_uid) && !uid_eq(inode->i_uid, cred->uid)) ++ msg = "directory not owned by user"; ++ else if (inode->i_mode & S_IWOTH) ++ msg = "file in world-writable directory"; ++ else if (inode->i_mode & S_IWGRP) ++ msg = "file in group-writable directory"; ++ else if (file_inode->i_mode & S_IWOTH) ++ msg = "file is world-writable"; ++ ++ if (msg) { ++ gr_log_str_fs(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, msg, file->f_path.dentry, file->f_path.mnt); ++ return 0; ++ } ++#endif ++#endif ++ return 1; ++} +diff --git a/grsecurity/grsec_usb.c b/grsecurity/grsec_usb.c +new file mode 100644 +index 0000000..ae02d8e +--- /dev/null ++++ b/grsecurity/grsec_usb.c +@@ -0,0 +1,15 @@ ++#include <linux/kernel.h> ++#include <linux/grinternal.h> ++#include <linux/module.h> ++ ++int gr_handle_new_usb(void) ++{ ++#ifdef CONFIG_GRKERNSEC_DENYUSB ++ if (grsec_deny_new_usb) { ++ printk(KERN_ALERT "grsec: denied insert of new USB device\n"); ++ return 1; ++ } ++#endif ++ return 0; ++} ++EXPORT_SYMBOL_GPL(gr_handle_new_usb); +diff --git a/grsecurity/grsum.c b/grsecurity/grsum.c +new file mode 100644 +index 0000000..158b330 +--- /dev/null ++++ b/grsecurity/grsum.c +@@ -0,0 +1,64 @@ ++#include <linux/err.h> ++#include <linux/kernel.h> ++#include <linux/sched.h> ++#include <linux/mm.h> ++#include <linux/scatterlist.h> ++#include <linux/crypto.h> ++#include <linux/gracl.h> ++ ++ ++#if !defined(CONFIG_CRYPTO) || defined(CONFIG_CRYPTO_MODULE) || !defined(CONFIG_CRYPTO_SHA256) || defined(CONFIG_CRYPTO_SHA256_MODULE) ++#error "crypto and sha256 must be built into the kernel" ++#endif ++ ++int ++chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum) ++{ ++ struct crypto_hash *tfm; ++ struct hash_desc desc; ++ struct scatterlist sg[2]; ++ unsigned char temp_sum[GR_SHA_LEN] __attribute__((aligned(__alignof__(unsigned long)))); ++ unsigned long *tmpsumptr = (unsigned long *)temp_sum; ++ unsigned long *sumptr = (unsigned long *)sum; ++ int cryptres; ++ int retval = 1; ++ volatile int mismatched = 0; ++ volatile int dummy = 0; ++ unsigned int i; ++ ++ tfm = crypto_alloc_hash("sha256", 0, CRYPTO_ALG_ASYNC); ++ if (IS_ERR(tfm)) { ++ /* should never happen, since sha256 should be built in */ ++ memset(entry->pw, 0, GR_PW_LEN); ++ return 1; ++ } ++ ++ sg_init_table(sg, 2); ++ sg_set_buf(&sg[0], salt, GR_SALT_LEN); ++ sg_set_buf(&sg[1], entry->pw, strlen(entry->pw)); ++ ++ desc.tfm = tfm; ++ desc.flags = 0; ++ ++ cryptres = crypto_hash_digest(&desc, sg, GR_SALT_LEN + strlen(entry->pw), ++ temp_sum); ++ ++ memset(entry->pw, 0, GR_PW_LEN); ++ ++ if (cryptres) ++ goto out; ++ ++ for (i = 0; i < GR_SHA_LEN/sizeof(tmpsumptr[0]); i++) ++ if (sumptr[i] != tmpsumptr[i]) ++ mismatched = 1; ++ else ++ dummy = 1; // waste a cycle ++ ++ if (!mismatched) ++ retval = dummy - 1; ++ ++out: ++ crypto_free_hash(tfm); ++ ++ return retval; ++} +diff --git a/include/asm-generic/4level-fixup.h b/include/asm-generic/4level-fixup.h +index 77ff547..181834f 100644 +--- a/include/asm-generic/4level-fixup.h ++++ b/include/asm-generic/4level-fixup.h +@@ -13,8 +13,10 @@ + #define pmd_alloc(mm, pud, address) \ + ((unlikely(pgd_none(*(pud))) && __pmd_alloc(mm, pud, address))? \ + NULL: pmd_offset(pud, address)) ++#define pmd_alloc_kernel(mm, pud, address) pmd_alloc((mm), (pud), (address)) + + #define pud_alloc(mm, pgd, address) (pgd) ++#define pud_alloc_kernel(mm, pgd, address) pud_alloc((mm), (pgd), (address)) + #define pud_offset(pgd, start) (pgd) + #define pud_none(pud) 0 + #define pud_bad(pud) 0 +diff --git a/include/asm-generic/atomic-long.h b/include/asm-generic/atomic-long.h +index b7babf0..97f4c4f 100644 +--- a/include/asm-generic/atomic-long.h ++++ b/include/asm-generic/atomic-long.h +@@ -22,6 +22,12 @@ + + typedef atomic64_t atomic_long_t; + ++#ifdef CONFIG_PAX_REFCOUNT ++typedef atomic64_unchecked_t atomic_long_unchecked_t; ++#else ++typedef atomic64_t atomic_long_unchecked_t; ++#endif ++ + #define ATOMIC_LONG_INIT(i) ATOMIC64_INIT(i) + + static inline long atomic_long_read(atomic_long_t *l) +@@ -31,6 +37,15 @@ static inline long atomic_long_read(atomic_long_t *l) + return (long)atomic64_read(v); + } + ++#ifdef CONFIG_PAX_REFCOUNT ++static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l) ++{ ++ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l; ++ ++ return (long)atomic64_read_unchecked(v); ++} ++#endif ++ + static inline void atomic_long_set(atomic_long_t *l, long i) + { + atomic64_t *v = (atomic64_t *)l; +@@ -38,6 +53,15 @@ static inline void atomic_long_set(atomic_long_t *l, long i) + atomic64_set(v, i); + } + ++#ifdef CONFIG_PAX_REFCOUNT ++static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i) ++{ ++ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l; ++ ++ atomic64_set_unchecked(v, i); ++} ++#endif ++ + static inline void atomic_long_inc(atomic_long_t *l) + { + atomic64_t *v = (atomic64_t *)l; +@@ -45,6 +69,15 @@ static inline void atomic_long_inc(atomic_long_t *l) + atomic64_inc(v); + } + ++#ifdef CONFIG_PAX_REFCOUNT ++static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l) ++{ ++ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l; ++ ++ atomic64_inc_unchecked(v); ++} ++#endif ++ + static inline void atomic_long_dec(atomic_long_t *l) + { + atomic64_t *v = (atomic64_t *)l; +@@ -52,6 +85,15 @@ static inline void atomic_long_dec(atomic_long_t *l) + atomic64_dec(v); + } + ++#ifdef CONFIG_PAX_REFCOUNT ++static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l) ++{ ++ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l; ++ ++ atomic64_dec_unchecked(v); ++} ++#endif ++ + static inline void atomic_long_add(long i, atomic_long_t *l) + { + atomic64_t *v = (atomic64_t *)l; +@@ -59,6 +101,15 @@ static inline void atomic_long_add(long i, atomic_long_t *l) + atomic64_add(i, v); + } + ++#ifdef CONFIG_PAX_REFCOUNT ++static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l) ++{ ++ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l; ++ ++ atomic64_add_unchecked(i, v); ++} ++#endif ++ + static inline void atomic_long_sub(long i, atomic_long_t *l) + { + atomic64_t *v = (atomic64_t *)l; +@@ -66,6 +117,15 @@ static inline void atomic_long_sub(long i, atomic_long_t *l) + atomic64_sub(i, v); + } + ++#ifdef CONFIG_PAX_REFCOUNT ++static inline void atomic_long_sub_unchecked(long i, atomic_long_unchecked_t *l) ++{ ++ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l; ++ ++ atomic64_sub_unchecked(i, v); ++} ++#endif ++ + static inline int atomic_long_sub_and_test(long i, atomic_long_t *l) + { + atomic64_t *v = (atomic64_t *)l; +@@ -94,13 +154,22 @@ static inline int atomic_long_add_negative(long i, atomic_long_t *l) + return atomic64_add_negative(i, v); + } + +-static inline long atomic_long_add_return(long i, atomic_long_t *l) ++static inline long __intentional_overflow(-1) atomic_long_add_return(long i, atomic_long_t *l) + { + atomic64_t *v = (atomic64_t *)l; + + return (long)atomic64_add_return(i, v); + } + ++#ifdef CONFIG_PAX_REFCOUNT ++static inline long atomic_long_add_return_unchecked(long i, atomic_long_unchecked_t *l) ++{ ++ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l; ++ ++ return (long)atomic64_add_return_unchecked(i, v); ++} ++#endif ++ + static inline long atomic_long_sub_return(long i, atomic_long_t *l) + { + atomic64_t *v = (atomic64_t *)l; +@@ -115,6 +184,15 @@ static inline long atomic_long_inc_return(atomic_long_t *l) + return (long)atomic64_inc_return(v); + } + ++#ifdef CONFIG_PAX_REFCOUNT ++static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l) ++{ ++ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l; ++ ++ return (long)atomic64_inc_return_unchecked(v); ++} ++#endif ++ + static inline long atomic_long_dec_return(atomic_long_t *l) + { + atomic64_t *v = (atomic64_t *)l; +@@ -140,6 +218,12 @@ static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u) + + typedef atomic_t atomic_long_t; + ++#ifdef CONFIG_PAX_REFCOUNT ++typedef atomic_unchecked_t atomic_long_unchecked_t; ++#else ++typedef atomic_t atomic_long_unchecked_t; ++#endif ++ + #define ATOMIC_LONG_INIT(i) ATOMIC_INIT(i) + static inline long atomic_long_read(atomic_long_t *l) + { +@@ -148,6 +232,15 @@ static inline long atomic_long_read(atomic_long_t *l) + return (long)atomic_read(v); + } + ++#ifdef CONFIG_PAX_REFCOUNT ++static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l) ++{ ++ atomic_unchecked_t *v = (atomic_unchecked_t *)l; ++ ++ return (long)atomic_read_unchecked(v); ++} ++#endif ++ + static inline void atomic_long_set(atomic_long_t *l, long i) + { + atomic_t *v = (atomic_t *)l; +@@ -155,6 +248,15 @@ static inline void atomic_long_set(atomic_long_t *l, long i) + atomic_set(v, i); + } + ++#ifdef CONFIG_PAX_REFCOUNT ++static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i) ++{ ++ atomic_unchecked_t *v = (atomic_unchecked_t *)l; ++ ++ atomic_set_unchecked(v, i); ++} ++#endif ++ + static inline void atomic_long_inc(atomic_long_t *l) + { + atomic_t *v = (atomic_t *)l; +@@ -162,6 +264,15 @@ static inline void atomic_long_inc(atomic_long_t *l) + atomic_inc(v); + } + ++#ifdef CONFIG_PAX_REFCOUNT ++static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l) ++{ ++ atomic_unchecked_t *v = (atomic_unchecked_t *)l; ++ ++ atomic_inc_unchecked(v); ++} ++#endif ++ + static inline void atomic_long_dec(atomic_long_t *l) + { + atomic_t *v = (atomic_t *)l; +@@ -169,6 +280,15 @@ static inline void atomic_long_dec(atomic_long_t *l) + atomic_dec(v); + } + ++#ifdef CONFIG_PAX_REFCOUNT ++static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l) ++{ ++ atomic_unchecked_t *v = (atomic_unchecked_t *)l; ++ ++ atomic_dec_unchecked(v); ++} ++#endif ++ + static inline void atomic_long_add(long i, atomic_long_t *l) + { + atomic_t *v = (atomic_t *)l; +@@ -176,6 +296,15 @@ static inline void atomic_long_add(long i, atomic_long_t *l) + atomic_add(i, v); + } + ++#ifdef CONFIG_PAX_REFCOUNT ++static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l) ++{ ++ atomic_unchecked_t *v = (atomic_unchecked_t *)l; ++ ++ atomic_add_unchecked(i, v); ++} ++#endif ++ + static inline void atomic_long_sub(long i, atomic_long_t *l) + { + atomic_t *v = (atomic_t *)l; +@@ -183,6 +312,15 @@ static inline void atomic_long_sub(long i, atomic_long_t *l) + atomic_sub(i, v); + } + ++#ifdef CONFIG_PAX_REFCOUNT ++static inline void atomic_long_sub_unchecked(long i, atomic_long_unchecked_t *l) ++{ ++ atomic_unchecked_t *v = (atomic_unchecked_t *)l; ++ ++ atomic_sub_unchecked(i, v); ++} ++#endif ++ + static inline int atomic_long_sub_and_test(long i, atomic_long_t *l) + { + atomic_t *v = (atomic_t *)l; +@@ -218,6 +356,16 @@ static inline long atomic_long_add_return(long i, atomic_long_t *l) + return (long)atomic_add_return(i, v); + } + ++#ifdef CONFIG_PAX_REFCOUNT ++static inline long atomic_long_add_return_unchecked(long i, atomic_long_unchecked_t *l) ++{ ++ atomic_unchecked_t *v = (atomic_unchecked_t *)l; ++ ++ return (long)atomic_add_return_unchecked(i, v); ++} ++ ++#endif ++ + static inline long atomic_long_sub_return(long i, atomic_long_t *l) + { + atomic_t *v = (atomic_t *)l; +@@ -232,6 +380,15 @@ static inline long atomic_long_inc_return(atomic_long_t *l) + return (long)atomic_inc_return(v); + } + ++#ifdef CONFIG_PAX_REFCOUNT ++static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l) ++{ ++ atomic_unchecked_t *v = (atomic_unchecked_t *)l; ++ ++ return (long)atomic_inc_return_unchecked(v); ++} ++#endif ++ + static inline long atomic_long_dec_return(atomic_long_t *l) + { + atomic_t *v = (atomic_t *)l; +@@ -255,4 +412,57 @@ static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u) + + #endif /* BITS_PER_LONG == 64 */ + ++#ifdef CONFIG_PAX_REFCOUNT ++static inline void pax_refcount_needs_these_functions(void) ++{ ++ atomic_read_unchecked((atomic_unchecked_t *)NULL); ++ atomic_set_unchecked((atomic_unchecked_t *)NULL, 0); ++ atomic_add_unchecked(0, (atomic_unchecked_t *)NULL); ++ atomic_sub_unchecked(0, (atomic_unchecked_t *)NULL); ++ atomic_inc_unchecked((atomic_unchecked_t *)NULL); ++ (void)atomic_inc_and_test_unchecked((atomic_unchecked_t *)NULL); ++ atomic_inc_return_unchecked((atomic_unchecked_t *)NULL); ++ atomic_add_return_unchecked(0, (atomic_unchecked_t *)NULL); ++ atomic_dec_unchecked((atomic_unchecked_t *)NULL); ++ atomic_cmpxchg_unchecked((atomic_unchecked_t *)NULL, 0, 0); ++ (void)atomic_xchg_unchecked((atomic_unchecked_t *)NULL, 0); ++#ifdef CONFIG_X86 ++ atomic_clear_mask_unchecked(0, NULL); ++ atomic_set_mask_unchecked(0, NULL); ++#endif ++ ++ atomic_long_read_unchecked((atomic_long_unchecked_t *)NULL); ++ atomic_long_set_unchecked((atomic_long_unchecked_t *)NULL, 0); ++ atomic_long_add_unchecked(0, (atomic_long_unchecked_t *)NULL); ++ atomic_long_sub_unchecked(0, (atomic_long_unchecked_t *)NULL); ++ atomic_long_inc_unchecked((atomic_long_unchecked_t *)NULL); ++ atomic_long_add_return_unchecked(0, (atomic_long_unchecked_t *)NULL); ++ atomic_long_inc_return_unchecked((atomic_long_unchecked_t *)NULL); ++ atomic_long_dec_unchecked((atomic_long_unchecked_t *)NULL); ++} ++#else ++#define atomic_read_unchecked(v) atomic_read(v) ++#define atomic_set_unchecked(v, i) atomic_set((v), (i)) ++#define atomic_add_unchecked(i, v) atomic_add((i), (v)) ++#define atomic_sub_unchecked(i, v) atomic_sub((i), (v)) ++#define atomic_inc_unchecked(v) atomic_inc(v) ++#define atomic_inc_and_test_unchecked(v) atomic_inc_and_test(v) ++#define atomic_inc_return_unchecked(v) atomic_inc_return(v) ++#define atomic_add_return_unchecked(i, v) atomic_add_return((i), (v)) ++#define atomic_dec_unchecked(v) atomic_dec(v) ++#define atomic_cmpxchg_unchecked(v, o, n) atomic_cmpxchg((v), (o), (n)) ++#define atomic_xchg_unchecked(v, i) atomic_xchg((v), (i)) ++#define atomic_clear_mask_unchecked(mask, v) atomic_clear_mask((mask), (v)) ++#define atomic_set_mask_unchecked(mask, v) atomic_set_mask((mask), (v)) ++ ++#define atomic_long_read_unchecked(v) atomic_long_read(v) ++#define atomic_long_set_unchecked(v, i) atomic_long_set((v), (i)) ++#define atomic_long_add_unchecked(i, v) atomic_long_add((i), (v)) ++#define atomic_long_sub_unchecked(i, v) atomic_long_sub((i), (v)) ++#define atomic_long_inc_unchecked(v) atomic_long_inc(v) ++#define atomic_long_add_return_unchecked(i, v) atomic_long_add_return((i), (v)) ++#define atomic_long_inc_return_unchecked(v) atomic_long_inc_return(v) ++#define atomic_long_dec_unchecked(v) atomic_long_dec(v) ++#endif ++ + #endif /* _ASM_GENERIC_ATOMIC_LONG_H */ +diff --git a/include/asm-generic/atomic.h b/include/asm-generic/atomic.h +index 33bd2de..f31bff97 100644 +--- a/include/asm-generic/atomic.h ++++ b/include/asm-generic/atomic.h +@@ -153,7 +153,7 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u) + * Atomically clears the bits set in @mask from @v + */ + #ifndef atomic_clear_mask +-static inline void atomic_clear_mask(unsigned long mask, atomic_t *v) ++static inline void atomic_clear_mask(unsigned int mask, atomic_t *v) + { + unsigned long flags; + +diff --git a/include/asm-generic/atomic64.h b/include/asm-generic/atomic64.h +index b18ce4f..2ee2843 100644 +--- a/include/asm-generic/atomic64.h ++++ b/include/asm-generic/atomic64.h +@@ -16,6 +16,8 @@ typedef struct { + long long counter; + } atomic64_t; + ++typedef atomic64_t atomic64_unchecked_t; ++ + #define ATOMIC64_INIT(i) { (i) } + + extern long long atomic64_read(const atomic64_t *v); +@@ -39,4 +41,14 @@ extern int atomic64_add_unless(atomic64_t *v, long long a, long long u); + #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0) + #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL) + ++#define atomic64_read_unchecked(v) atomic64_read(v) ++#define atomic64_set_unchecked(v, i) atomic64_set((v), (i)) ++#define atomic64_add_unchecked(a, v) atomic64_add((a), (v)) ++#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v)) ++#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v)) ++#define atomic64_inc_unchecked(v) atomic64_inc(v) ++#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v) ++#define atomic64_dec_unchecked(v) atomic64_dec(v) ++#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n)) ++ + #endif /* _ASM_GENERIC_ATOMIC64_H */ +diff --git a/include/asm-generic/barrier.h b/include/asm-generic/barrier.h +index 6f692f8..2ad9dd2 100644 +--- a/include/asm-generic/barrier.h ++++ b/include/asm-generic/barrier.h +@@ -66,7 +66,7 @@ + do { \ + compiletime_assert_atomic_type(*p); \ + smp_mb(); \ +- ACCESS_ONCE(*p) = (v); \ ++ ACCESS_ONCE_RW(*p) = (v); \ + } while (0) + + #define smp_load_acquire(p) \ +diff --git a/include/asm-generic/bitops/__fls.h b/include/asm-generic/bitops/__fls.h +index a60a7cc..0fe12f2 100644 +--- a/include/asm-generic/bitops/__fls.h ++++ b/include/asm-generic/bitops/__fls.h +@@ -9,7 +9,7 @@ + * + * Undefined if no set bit exists, so code should check against 0 first. + */ +-static __always_inline unsigned long __fls(unsigned long word) ++static __always_inline unsigned long __intentional_overflow(-1) __fls(unsigned long word) + { + int num = BITS_PER_LONG - 1; + +diff --git a/include/asm-generic/bitops/fls.h b/include/asm-generic/bitops/fls.h +index 0576d1f..dad6c71 100644 +--- a/include/asm-generic/bitops/fls.h ++++ b/include/asm-generic/bitops/fls.h +@@ -9,7 +9,7 @@ + * Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32. + */ + +-static __always_inline int fls(int x) ++static __always_inline int __intentional_overflow(-1) fls(int x) + { + int r = 32; + +diff --git a/include/asm-generic/bitops/fls64.h b/include/asm-generic/bitops/fls64.h +index b097cf8..3d40e14 100644 +--- a/include/asm-generic/bitops/fls64.h ++++ b/include/asm-generic/bitops/fls64.h +@@ -15,7 +15,7 @@ + * at position 64. + */ + #if BITS_PER_LONG == 32 +-static __always_inline int fls64(__u64 x) ++static __always_inline int __intentional_overflow(-1) fls64(__u64 x) + { + __u32 h = x >> 32; + if (h) +@@ -23,7 +23,7 @@ static __always_inline int fls64(__u64 x) + return fls(x); + } + #elif BITS_PER_LONG == 64 +-static __always_inline int fls64(__u64 x) ++static __always_inline int __intentional_overflow(-1) fls64(__u64 x) + { + if (x == 0) + return 0; +diff --git a/include/asm-generic/cache.h b/include/asm-generic/cache.h +index 1bfcfe5..e04c5c9 100644 +--- a/include/asm-generic/cache.h ++++ b/include/asm-generic/cache.h +@@ -6,7 +6,7 @@ + * cache lines need to provide their own cache.h. + */ + +-#define L1_CACHE_SHIFT 5 +-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT) ++#define L1_CACHE_SHIFT 5UL ++#define L1_CACHE_BYTES (1UL << L1_CACHE_SHIFT) + + #endif /* __ASM_GENERIC_CACHE_H */ +diff --git a/include/asm-generic/emergency-restart.h b/include/asm-generic/emergency-restart.h +index 0d68a1e..b74a761 100644 +--- a/include/asm-generic/emergency-restart.h ++++ b/include/asm-generic/emergency-restart.h +@@ -1,7 +1,7 @@ + #ifndef _ASM_GENERIC_EMERGENCY_RESTART_H + #define _ASM_GENERIC_EMERGENCY_RESTART_H + +-static inline void machine_emergency_restart(void) ++static inline __noreturn void machine_emergency_restart(void) + { + machine_restart(NULL); + } +diff --git a/include/asm-generic/kmap_types.h b/include/asm-generic/kmap_types.h +index 90f99c7..00ce236 100644 +--- a/include/asm-generic/kmap_types.h ++++ b/include/asm-generic/kmap_types.h +@@ -2,9 +2,9 @@ + #define _ASM_GENERIC_KMAP_TYPES_H + + #ifdef __WITH_KM_FENCE +-# define KM_TYPE_NR 41 ++# define KM_TYPE_NR 42 + #else +-# define KM_TYPE_NR 20 ++# define KM_TYPE_NR 21 + #endif + + #endif +diff --git a/include/asm-generic/local.h b/include/asm-generic/local.h +index 9ceb03b..62b0b8f 100644 +--- a/include/asm-generic/local.h ++++ b/include/asm-generic/local.h +@@ -23,24 +23,37 @@ typedef struct + atomic_long_t a; + } local_t; + ++typedef struct { ++ atomic_long_unchecked_t a; ++} local_unchecked_t; ++ + #define LOCAL_INIT(i) { ATOMIC_LONG_INIT(i) } + + #define local_read(l) atomic_long_read(&(l)->a) ++#define local_read_unchecked(l) atomic_long_read_unchecked(&(l)->a) + #define local_set(l,i) atomic_long_set((&(l)->a),(i)) ++#define local_set_unchecked(l,i) atomic_long_set_unchecked((&(l)->a),(i)) + #define local_inc(l) atomic_long_inc(&(l)->a) ++#define local_inc_unchecked(l) atomic_long_inc_unchecked(&(l)->a) + #define local_dec(l) atomic_long_dec(&(l)->a) ++#define local_dec_unchecked(l) atomic_long_dec_unchecked(&(l)->a) + #define local_add(i,l) atomic_long_add((i),(&(l)->a)) ++#define local_add_unchecked(i,l) atomic_long_add_unchecked((i),(&(l)->a)) + #define local_sub(i,l) atomic_long_sub((i),(&(l)->a)) ++#define local_sub_unchecked(i,l) atomic_long_sub_unchecked((i),(&(l)->a)) + + #define local_sub_and_test(i, l) atomic_long_sub_and_test((i), (&(l)->a)) + #define local_dec_and_test(l) atomic_long_dec_and_test(&(l)->a) + #define local_inc_and_test(l) atomic_long_inc_and_test(&(l)->a) + #define local_add_negative(i, l) atomic_long_add_negative((i), (&(l)->a)) + #define local_add_return(i, l) atomic_long_add_return((i), (&(l)->a)) ++#define local_add_return_unchecked(i, l) atomic_long_add_return_unchecked((i), (&(l)->a)) + #define local_sub_return(i, l) atomic_long_sub_return((i), (&(l)->a)) + #define local_inc_return(l) atomic_long_inc_return(&(l)->a) ++#define local_dec_return(l) atomic_long_dec_return(&(l)->a) + + #define local_cmpxchg(l, o, n) atomic_long_cmpxchg((&(l)->a), (o), (n)) ++#define local_cmpxchg_unchecked(l, o, n) atomic_long_cmpxchg((&(l)->a), (o), (n)) + #define local_xchg(l, n) atomic_long_xchg((&(l)->a), (n)) + #define local_add_unless(l, _a, u) atomic_long_add_unless((&(l)->a), (_a), (u)) + #define local_inc_not_zero(l) atomic_long_inc_not_zero(&(l)->a) +diff --git a/include/asm-generic/pgtable-nopmd.h b/include/asm-generic/pgtable-nopmd.h +index 725612b..9cc513a 100644 +--- a/include/asm-generic/pgtable-nopmd.h ++++ b/include/asm-generic/pgtable-nopmd.h +@@ -1,14 +1,19 @@ + #ifndef _PGTABLE_NOPMD_H + #define _PGTABLE_NOPMD_H + +-#ifndef __ASSEMBLY__ +- + #include <asm-generic/pgtable-nopud.h> + +-struct mm_struct; +- + #define __PAGETABLE_PMD_FOLDED + ++#define PMD_SHIFT PUD_SHIFT ++#define PTRS_PER_PMD 1 ++#define PMD_SIZE (_AC(1,UL) << PMD_SHIFT) ++#define PMD_MASK (~(PMD_SIZE-1)) ++ ++#ifndef __ASSEMBLY__ ++ ++struct mm_struct; ++ + /* + * Having the pmd type consist of a pud gets the size right, and allows + * us to conceptually access the pud entry that this pmd is folded into +@@ -16,11 +21,6 @@ struct mm_struct; + */ + typedef struct { pud_t pud; } pmd_t; + +-#define PMD_SHIFT PUD_SHIFT +-#define PTRS_PER_PMD 1 +-#define PMD_SIZE (1UL << PMD_SHIFT) +-#define PMD_MASK (~(PMD_SIZE-1)) +- + /* + * The "pud_xxx()" functions here are trivial for a folded two-level + * setup: the pmd is never bad, and a pmd always exists (as it's folded +diff --git a/include/asm-generic/pgtable-nopud.h b/include/asm-generic/pgtable-nopud.h +index 810431d..0ec4804f 100644 +--- a/include/asm-generic/pgtable-nopud.h ++++ b/include/asm-generic/pgtable-nopud.h +@@ -1,10 +1,15 @@ + #ifndef _PGTABLE_NOPUD_H + #define _PGTABLE_NOPUD_H + +-#ifndef __ASSEMBLY__ +- + #define __PAGETABLE_PUD_FOLDED + ++#define PUD_SHIFT PGDIR_SHIFT ++#define PTRS_PER_PUD 1 ++#define PUD_SIZE (_AC(1,UL) << PUD_SHIFT) ++#define PUD_MASK (~(PUD_SIZE-1)) ++ ++#ifndef __ASSEMBLY__ ++ + /* + * Having the pud type consist of a pgd gets the size right, and allows + * us to conceptually access the pgd entry that this pud is folded into +@@ -12,11 +17,6 @@ + */ + typedef struct { pgd_t pgd; } pud_t; + +-#define PUD_SHIFT PGDIR_SHIFT +-#define PTRS_PER_PUD 1 +-#define PUD_SIZE (1UL << PUD_SHIFT) +-#define PUD_MASK (~(PUD_SIZE-1)) +- + /* + * The "pgd_xxx()" functions here are trivial for a folded two-level + * setup: the pud is never bad, and a pud always exists (as it's folded +@@ -29,6 +29,7 @@ static inline void pgd_clear(pgd_t *pgd) { } + #define pud_ERROR(pud) (pgd_ERROR((pud).pgd)) + + #define pgd_populate(mm, pgd, pud) do { } while (0) ++#define pgd_populate_kernel(mm, pgd, pud) do { } while (0) + /* + * (puds are folded into pgds so this doesn't get actually called, + * but the define is needed for a generic inline function.) +diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h +index 38a7437..47f62a4 100644 +--- a/include/asm-generic/pgtable.h ++++ b/include/asm-generic/pgtable.h +@@ -802,6 +802,22 @@ static inline void pmdp_set_numa(struct mm_struct *mm, unsigned long addr, + } + #endif /* CONFIG_NUMA_BALANCING */ + ++#ifndef __HAVE_ARCH_PAX_OPEN_KERNEL ++#ifdef CONFIG_PAX_KERNEXEC ++#error KERNEXEC requires pax_open_kernel ++#else ++static inline unsigned long pax_open_kernel(void) { return 0; } ++#endif ++#endif ++ ++#ifndef __HAVE_ARCH_PAX_CLOSE_KERNEL ++#ifdef CONFIG_PAX_KERNEXEC ++#error KERNEXEC requires pax_close_kernel ++#else ++static inline unsigned long pax_close_kernel(void) { return 0; } ++#endif ++#endif ++ + #endif /* CONFIG_MMU */ + + #endif /* !__ASSEMBLY__ */ +diff --git a/include/asm-generic/uaccess.h b/include/asm-generic/uaccess.h +index 72d8803..cb9749c 100644 +--- a/include/asm-generic/uaccess.h ++++ b/include/asm-generic/uaccess.h +@@ -343,4 +343,20 @@ clear_user(void __user *to, unsigned long n) + return __clear_user(to, n); + } + ++#ifndef __HAVE_ARCH_PAX_OPEN_USERLAND ++#ifdef CONFIG_PAX_MEMORY_UDEREF ++#error UDEREF requires pax_open_userland ++#else ++static inline unsigned long pax_open_userland(void) { return 0; } ++#endif ++#endif ++ ++#ifndef __HAVE_ARCH_PAX_CLOSE_USERLAND ++#ifdef CONFIG_PAX_MEMORY_UDEREF ++#error UDEREF requires pax_close_userland ++#else ++static inline unsigned long pax_close_userland(void) { return 0; } ++#endif ++#endif ++ + #endif /* __ASM_GENERIC_UACCESS_H */ +diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h +index bc2121f..2f41f9a 100644 +--- a/include/asm-generic/vmlinux.lds.h ++++ b/include/asm-generic/vmlinux.lds.h +@@ -232,6 +232,7 @@ + .rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \ + VMLINUX_SYMBOL(__start_rodata) = .; \ + *(.rodata) *(.rodata.*) \ ++ *(.data..read_only) \ + *(__vermagic) /* Kernel version magic */ \ + . = ALIGN(8); \ + VMLINUX_SYMBOL(__start___tracepoints_ptrs) = .; \ +@@ -716,17 +717,18 @@ + * section in the linker script will go there too. @phdr should have + * a leading colon. + * +- * Note that this macros defines __per_cpu_load as an absolute symbol. ++ * Note that this macros defines per_cpu_load as an absolute symbol. + * If there is no need to put the percpu section at a predetermined + * address, use PERCPU_SECTION. + */ + #define PERCPU_VADDR(cacheline, vaddr, phdr) \ +- VMLINUX_SYMBOL(__per_cpu_load) = .; \ +- .data..percpu vaddr : AT(VMLINUX_SYMBOL(__per_cpu_load) \ ++ per_cpu_load = .; \ ++ .data..percpu vaddr : AT(VMLINUX_SYMBOL(per_cpu_load) \ + - LOAD_OFFSET) { \ ++ VMLINUX_SYMBOL(__per_cpu_load) = . + per_cpu_load; \ + PERCPU_INPUT(cacheline) \ + } phdr \ +- . = VMLINUX_SYMBOL(__per_cpu_load) + SIZEOF(.data..percpu); ++ . = VMLINUX_SYMBOL(per_cpu_load) + SIZEOF(.data..percpu); + + /** + * PERCPU_SECTION - define output section for percpu area, simple version +diff --git a/include/crypto/algapi.h b/include/crypto/algapi.h +index e73c19e..5b89e00 100644 +--- a/include/crypto/algapi.h ++++ b/include/crypto/algapi.h +@@ -34,7 +34,7 @@ struct crypto_type { + unsigned int maskclear; + unsigned int maskset; + unsigned int tfmsize; +-}; ++} __do_const; + + struct crypto_instance { + struct crypto_alg alg; +diff --git a/include/drm/drmP.h b/include/drm/drmP.h +index 04a7f31..668d424 100644 +--- a/include/drm/drmP.h ++++ b/include/drm/drmP.h +@@ -67,6 +67,7 @@ + #include <linux/workqueue.h> + #include <linux/poll.h> + #include <asm/pgalloc.h> ++#include <asm/local.h> + #include <drm/drm.h> + #include <drm/drm_sarea.h> + #include <drm/drm_vma_manager.h> +@@ -297,10 +298,12 @@ do { \ + * \param cmd command. + * \param arg argument. + */ +-typedef int drm_ioctl_t(struct drm_device *dev, void *data, ++typedef int (* const drm_ioctl_t)(struct drm_device *dev, void *data, ++ struct drm_file *file_priv); ++typedef int (* drm_ioctl_no_const_t)(struct drm_device *dev, void *data, + struct drm_file *file_priv); + +-typedef int drm_ioctl_compat_t(struct file *filp, unsigned int cmd, ++typedef int (* const drm_ioctl_compat_t)(struct file *filp, unsigned int cmd, + unsigned long arg); + + #define DRM_IOCTL_NR(n) _IOC_NR(n) +@@ -316,10 +319,10 @@ typedef int drm_ioctl_compat_t(struct file *filp, unsigned int cmd, + struct drm_ioctl_desc { + unsigned int cmd; + int flags; +- drm_ioctl_t *func; ++ drm_ioctl_t func; + unsigned int cmd_drv; + const char *name; +-}; ++} __do_const; + + /** + * Creates a driver or general drm_ioctl_desc array entry for the given +@@ -1022,7 +1025,8 @@ struct drm_info_list { + int (*show)(struct seq_file*, void*); /** show callback */ + u32 driver_features; /**< Required driver features for this entry */ + void *data; +-}; ++} __do_const; ++typedef struct drm_info_list __no_const drm_info_list_no_const; + + /** + * debugfs node structure. This structure represents a debugfs file. +@@ -1106,7 +1110,7 @@ struct drm_device { + + /** \name Usage Counters */ + /*@{ */ +- int open_count; /**< Outstanding files open */ ++ local_t open_count; /**< Outstanding files open */ + int buf_use; /**< Buffers in use -- cannot alloc */ + atomic_t buf_alloc; /**< Buffer allocation in progress */ + /*@} */ +diff --git a/include/drm/drm_crtc_helper.h b/include/drm/drm_crtc_helper.h +index b1388b5..e1d1163 100644 +--- a/include/drm/drm_crtc_helper.h ++++ b/include/drm/drm_crtc_helper.h +@@ -109,7 +109,7 @@ struct drm_encoder_helper_funcs { + struct drm_connector *connector); + /* disable encoder when not in use - more explicit than dpms off */ + void (*disable)(struct drm_encoder *encoder); +-}; ++} __no_const; + + /** + * drm_connector_helper_funcs - helper operations for connectors +diff --git a/include/drm/i915_pciids.h b/include/drm/i915_pciids.h +index 940ece4..8cb727f 100644 +--- a/include/drm/i915_pciids.h ++++ b/include/drm/i915_pciids.h +@@ -37,7 +37,7 @@ + */ + #define INTEL_VGA_DEVICE(id, info) { \ + 0x8086, id, \ +- ~0, ~0, \ ++ PCI_ANY_ID, PCI_ANY_ID, \ + 0x030000, 0xff0000, \ + (unsigned long) info } + +diff --git a/include/drm/ttm/ttm_memory.h b/include/drm/ttm/ttm_memory.h +index 72dcbe8..8db58d7 100644 +--- a/include/drm/ttm/ttm_memory.h ++++ b/include/drm/ttm/ttm_memory.h +@@ -48,7 +48,7 @@ + + struct ttm_mem_shrink { + int (*do_shrink) (struct ttm_mem_shrink *); +-}; ++} __no_const; + + /** + * struct ttm_mem_global - Global memory accounting structure. +diff --git a/include/drm/ttm/ttm_page_alloc.h b/include/drm/ttm/ttm_page_alloc.h +index 49a8284..9643967 100644 +--- a/include/drm/ttm/ttm_page_alloc.h ++++ b/include/drm/ttm/ttm_page_alloc.h +@@ -80,6 +80,7 @@ void ttm_dma_page_alloc_fini(void); + */ + extern int ttm_dma_page_alloc_debugfs(struct seq_file *m, void *data); + ++struct device; + extern int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, struct device *dev); + extern void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev); + +diff --git a/include/keys/asymmetric-subtype.h b/include/keys/asymmetric-subtype.h +index 4b840e8..155d235 100644 +--- a/include/keys/asymmetric-subtype.h ++++ b/include/keys/asymmetric-subtype.h +@@ -37,7 +37,7 @@ struct asymmetric_key_subtype { + /* Verify the signature on a key of this subtype (optional) */ + int (*verify_signature)(const struct key *key, + const struct public_key_signature *sig); +-}; ++} __do_const; + + /** + * asymmetric_key_subtype - Get the subtype from an asymmetric key +diff --git a/include/linux/atmdev.h b/include/linux/atmdev.h +index c1da539..1dcec55 100644 +--- a/include/linux/atmdev.h ++++ b/include/linux/atmdev.h +@@ -28,7 +28,7 @@ struct compat_atm_iobuf { + #endif + + struct k_atm_aal_stats { +-#define __HANDLE_ITEM(i) atomic_t i ++#define __HANDLE_ITEM(i) atomic_unchecked_t i + __AAL_STAT_ITEMS + #undef __HANDLE_ITEM + }; +@@ -200,7 +200,7 @@ struct atmdev_ops { /* only send is required */ + int (*change_qos)(struct atm_vcc *vcc,struct atm_qos *qos,int flags); + int (*proc_read)(struct atm_dev *dev,loff_t *pos,char *page); + struct module *owner; +-}; ++} __do_const ; + + struct atmphy_ops { + int (*start)(struct atm_dev *dev); +diff --git a/include/linux/audit.h b/include/linux/audit.h +index ec1464d..833274b 100644 +--- a/include/linux/audit.h ++++ b/include/linux/audit.h +@@ -196,7 +196,7 @@ static inline void audit_ptrace(struct task_struct *t) + extern unsigned int audit_serial(void); + extern int auditsc_get_stamp(struct audit_context *ctx, + struct timespec *t, unsigned int *serial); +-extern int audit_set_loginuid(kuid_t loginuid); ++extern int __intentional_overflow(-1) audit_set_loginuid(kuid_t loginuid); + + static inline kuid_t audit_get_loginuid(struct task_struct *tsk) + { +diff --git a/include/linux/binfmts.h b/include/linux/binfmts.h +index b4a745d..e3c0942 100644 +--- a/include/linux/binfmts.h ++++ b/include/linux/binfmts.h +@@ -45,7 +45,7 @@ struct linux_binprm { + unsigned interp_data; + unsigned long loader, exec; + char tcomm[TASK_COMM_LEN]; +-}; ++} __randomize_layout; + + #define BINPRM_FLAGS_ENFORCE_NONDUMP_BIT 0 + #define BINPRM_FLAGS_ENFORCE_NONDUMP (1 << BINPRM_FLAGS_ENFORCE_NONDUMP_BIT) +@@ -74,8 +74,10 @@ struct linux_binfmt { + int (*load_binary)(struct linux_binprm *); + int (*load_shlib)(struct file *); + int (*core_dump)(struct coredump_params *cprm); ++ void (*handle_mprotect)(struct vm_area_struct *vma, unsigned long newflags); ++ void (*handle_mmap)(struct file *); + unsigned long min_coredump; /* minimal dump size */ +-}; ++} __do_const __randomize_layout; + + extern void __register_binfmt(struct linux_binfmt *fmt, int insert); + +diff --git a/include/linux/bitops.h b/include/linux/bitops.h +index be5fd38..d71192a 100644 +--- a/include/linux/bitops.h ++++ b/include/linux/bitops.h +@@ -102,7 +102,7 @@ static inline __u64 ror64(__u64 word, unsigned int shift) + * @word: value to rotate + * @shift: bits to roll + */ +-static inline __u32 rol32(__u32 word, unsigned int shift) ++static inline __u32 __intentional_overflow(-1) rol32(__u32 word, unsigned int shift) + { + return (word << shift) | (word >> (32 - shift)); + } +@@ -112,7 +112,7 @@ static inline __u32 rol32(__u32 word, unsigned int shift) + * @word: value to rotate + * @shift: bits to roll + */ +-static inline __u32 ror32(__u32 word, unsigned int shift) ++static inline __u32 __intentional_overflow(-1) ror32(__u32 word, unsigned int shift) + { + return (word >> shift) | (word << (32 - shift)); + } +@@ -168,7 +168,7 @@ static inline __s32 sign_extend32(__u32 value, int index) + return (__s32)(value << shift) >> shift; + } + +-static inline unsigned fls_long(unsigned long l) ++static inline unsigned __intentional_overflow(-1) fls_long(unsigned long l) + { + if (sizeof(l) == 4) + return fls(l); +diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h +index 4afa4f8..1ed7824 100644 +--- a/include/linux/blkdev.h ++++ b/include/linux/blkdev.h +@@ -1572,7 +1572,7 @@ struct block_device_operations { + /* this callback is with swap_lock and sometimes page table lock held */ + void (*swap_slot_free_notify) (struct block_device *, unsigned long); + struct module *owner; +-}; ++} __do_const; + + extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int, + unsigned long); +diff --git a/include/linux/blktrace_api.h b/include/linux/blktrace_api.h +index afc1343..9735539 100644 +--- a/include/linux/blktrace_api.h ++++ b/include/linux/blktrace_api.h +@@ -25,7 +25,7 @@ struct blk_trace { + struct dentry *dropped_file; + struct dentry *msg_file; + struct list_head running_list; +- atomic_t dropped; ++ atomic_unchecked_t dropped; + }; + + extern int blk_trace_ioctl(struct block_device *, unsigned, char __user *); +diff --git a/include/linux/cache.h b/include/linux/cache.h +index 17e7e82..1d7da26 100644 +--- a/include/linux/cache.h ++++ b/include/linux/cache.h +@@ -16,6 +16,14 @@ + #define __read_mostly + #endif + ++#ifndef __read_only ++#ifdef CONFIG_PAX_KERNEXEC ++#error KERNEXEC requires __read_only ++#else ++#define __read_only __read_mostly ++#endif ++#endif ++ + #ifndef ____cacheline_aligned + #define ____cacheline_aligned __attribute__((__aligned__(SMP_CACHE_BYTES))) + #endif +diff --git a/include/linux/capability.h b/include/linux/capability.h +index 84b13ad..d7b6550 100644 +--- a/include/linux/capability.h ++++ b/include/linux/capability.h +@@ -212,8 +212,13 @@ extern bool capable(int cap); + extern bool ns_capable(struct user_namespace *ns, int cap); + extern bool capable_wrt_inode_uidgid(const struct inode *inode, int cap); + extern bool file_ns_capable(const struct file *file, struct user_namespace *ns, int cap); ++extern bool capable_nolog(int cap); ++extern bool ns_capable_nolog(struct user_namespace *ns, int cap); ++extern bool capable_wrt_inode_uidgid_nolog(const struct inode *inode, int cap); + + /* audit system wants to get cap info from files as well */ + extern int get_vfs_caps_from_disk(const struct dentry *dentry, struct cpu_vfs_cap_data *cpu_caps); + ++extern int is_privileged_binary(const struct dentry *dentry); ++ + #endif /* !_LINUX_CAPABILITY_H */ +diff --git a/include/linux/cdrom.h b/include/linux/cdrom.h +index 8609d57..86e4d79 100644 +--- a/include/linux/cdrom.h ++++ b/include/linux/cdrom.h +@@ -87,7 +87,6 @@ struct cdrom_device_ops { + + /* driver specifications */ + const int capability; /* capability flags */ +- int n_minors; /* number of active minor devices */ + /* handle uniform packets for scsi type devices (scsi,atapi) */ + int (*generic_packet) (struct cdrom_device_info *, + struct packet_command *); +diff --git a/include/linux/cleancache.h b/include/linux/cleancache.h +index 4ce9056..86caac6 100644 +--- a/include/linux/cleancache.h ++++ b/include/linux/cleancache.h +@@ -31,7 +31,7 @@ struct cleancache_ops { + void (*invalidate_page)(int, struct cleancache_filekey, pgoff_t); + void (*invalidate_inode)(int, struct cleancache_filekey); + void (*invalidate_fs)(int); +-}; ++} __no_const; + + extern struct cleancache_ops * + cleancache_register_ops(struct cleancache_ops *ops); +diff --git a/include/linux/clk-provider.h b/include/linux/clk-provider.h +index 939533d..cf0a57c 100644 +--- a/include/linux/clk-provider.h ++++ b/include/linux/clk-provider.h +@@ -166,6 +166,7 @@ struct clk_ops { + unsigned long parent_accuracy); + void (*init)(struct clk_hw *hw); + }; ++typedef struct clk_ops __no_const clk_ops_no_const; + + /** + * struct clk_init_data - holds init data that's common to all clocks and is +diff --git a/include/linux/compat.h b/include/linux/compat.h +index 3f448c6..df3ce1d 100644 +--- a/include/linux/compat.h ++++ b/include/linux/compat.h +@@ -313,7 +313,7 @@ compat_sys_get_robust_list(int pid, compat_uptr_t __user *head_ptr, + compat_size_t __user *len_ptr); + + asmlinkage long compat_sys_ipc(u32, int, int, u32, compat_uptr_t, u32); +-asmlinkage long compat_sys_shmat(int shmid, compat_uptr_t shmaddr, int shmflg); ++asmlinkage long compat_sys_shmat(int shmid, compat_uptr_t shmaddr, int shmflg) __intentional_overflow(0); + asmlinkage long compat_sys_semctl(int semid, int semnum, int cmd, int arg); + asmlinkage long compat_sys_msgsnd(int msqid, compat_uptr_t msgp, + compat_ssize_t msgsz, int msgflg); +@@ -420,7 +420,7 @@ extern int compat_ptrace_request(struct task_struct *child, + extern long compat_arch_ptrace(struct task_struct *child, compat_long_t request, + compat_ulong_t addr, compat_ulong_t data); + asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid, +- compat_long_t addr, compat_long_t data); ++ compat_ulong_t addr, compat_ulong_t data); + + asmlinkage long compat_sys_lookup_dcookie(u32, u32, char __user *, compat_size_t); + /* +diff --git a/include/linux/compiler-gcc4.h b/include/linux/compiler-gcc4.h +index 2507fd2..55203f8 100644 +--- a/include/linux/compiler-gcc4.h ++++ b/include/linux/compiler-gcc4.h +@@ -39,9 +39,34 @@ + # define __compiletime_warning(message) __attribute__((warning(message))) + # define __compiletime_error(message) __attribute__((error(message))) + #endif /* __CHECKER__ */ ++ ++#define __alloc_size(...) __attribute((alloc_size(__VA_ARGS__))) ++#define __bos(ptr, arg) __builtin_object_size((ptr), (arg)) ++#define __bos0(ptr) __bos((ptr), 0) ++#define __bos1(ptr) __bos((ptr), 1) + #endif /* GCC_VERSION >= 40300 */ + + #if GCC_VERSION >= 40500 ++ ++#ifdef RANDSTRUCT_PLUGIN ++#define __randomize_layout __attribute__((randomize_layout)) ++#define __no_randomize_layout __attribute__((no_randomize_layout)) ++#endif ++ ++#ifdef CONSTIFY_PLUGIN ++#define __no_const __attribute__((no_const)) ++#define __do_const __attribute__((do_const)) ++#endif ++ ++#ifdef SIZE_OVERFLOW_PLUGIN ++#define __size_overflow(...) __attribute__((size_overflow(__VA_ARGS__))) ++#define __intentional_overflow(...) __attribute__((intentional_overflow(__VA_ARGS__))) ++#endif ++ ++#ifdef LATENT_ENTROPY_PLUGIN ++#define __latent_entropy __attribute__((latent_entropy)) ++#endif ++ + /* + * Mark a position in code as unreachable. This can be used to + * suppress control flow warnings after asm blocks that transfer +diff --git a/include/linux/compiler.h b/include/linux/compiler.h +index 2472740..4857634 100644 +--- a/include/linux/compiler.h ++++ b/include/linux/compiler.h +@@ -5,11 +5,14 @@ + + #ifdef __CHECKER__ + # define __user __attribute__((noderef, address_space(1))) ++# define __force_user __force __user + # define __kernel __attribute__((address_space(0))) ++# define __force_kernel __force __kernel + # define __safe __attribute__((safe)) + # define __force __attribute__((force)) + # define __nocast __attribute__((nocast)) + # define __iomem __attribute__((noderef, address_space(2))) ++# define __force_iomem __force __iomem + # define __must_hold(x) __attribute__((context(x,1,1))) + # define __acquires(x) __attribute__((context(x,0,1))) + # define __releases(x) __attribute__((context(x,1,0))) +@@ -17,20 +20,37 @@ + # define __release(x) __context__(x,-1) + # define __cond_lock(x,c) ((c) ? ({ __acquire(x); 1; }) : 0) + # define __percpu __attribute__((noderef, address_space(3))) ++# define __force_percpu __force __percpu + #ifdef CONFIG_SPARSE_RCU_POINTER + # define __rcu __attribute__((noderef, address_space(4))) ++# define __force_rcu __force __rcu + #else + # define __rcu ++# define __force_rcu + #endif + extern void __chk_user_ptr(const volatile void __user *); + extern void __chk_io_ptr(const volatile void __iomem *); + #else +-# define __user +-# define __kernel ++# ifdef CHECKER_PLUGIN ++//# define __user ++//# define __force_user ++//# define __kernel ++//# define __force_kernel ++# else ++# ifdef STRUCTLEAK_PLUGIN ++# define __user __attribute__((user)) ++# else ++# define __user ++# endif ++# define __force_user ++# define __kernel ++# define __force_kernel ++# endif + # define __safe + # define __force + # define __nocast + # define __iomem ++# define __force_iomem + # define __chk_user_ptr(x) (void)0 + # define __chk_io_ptr(x) (void)0 + # define __builtin_warning(x, y...) (1) +@@ -41,7 +61,9 @@ extern void __chk_io_ptr(const volatile void __iomem *); + # define __release(x) (void)0 + # define __cond_lock(x,c) (c) + # define __percpu ++# define __force_percpu + # define __rcu ++# define __force_rcu + #endif + + /* Indirect macros required for expanded argument pasting, eg. __LINE__. */ +@@ -279,6 +301,34 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect); + # define __attribute_const__ /* unimplemented */ + #endif + ++#ifndef __randomize_layout ++# define __randomize_layout ++#endif ++ ++#ifndef __no_randomize_layout ++# define __no_randomize_layout ++#endif ++ ++#ifndef __no_const ++# define __no_const ++#endif ++ ++#ifndef __do_const ++# define __do_const ++#endif ++ ++#ifndef __size_overflow ++# define __size_overflow(...) ++#endif ++ ++#ifndef __intentional_overflow ++# define __intentional_overflow(...) ++#endif ++ ++#ifndef __latent_entropy ++# define __latent_entropy ++#endif ++ + /* + * Tell gcc if a function is cold. The compiler will assume any path + * directly leading to the call is unlikely. +@@ -288,6 +338,22 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect); + #define __cold + #endif + ++#ifndef __alloc_size ++#define __alloc_size(...) ++#endif ++ ++#ifndef __bos ++#define __bos(ptr, arg) ++#endif ++ ++#ifndef __bos0 ++#define __bos0(ptr) ++#endif ++ ++#ifndef __bos1 ++#define __bos1(ptr) ++#endif ++ + /* Simple shorthand for a section definition */ + #ifndef __section + # define __section(S) __attribute__ ((__section__(#S))) +@@ -362,7 +428,8 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect); + * use is to mediate communication between process-level code and irq/NMI + * handlers, all running on the same CPU. + */ +-#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x)) ++#define ACCESS_ONCE(x) (*(volatile const typeof(x) *)&(x)) ++#define ACCESS_ONCE_RW(x) (*(volatile typeof(x) *)&(x)) + + /* Ignore/forbid kprobes attach on very low level functions marked by this attribute: */ + #ifdef CONFIG_KPROBES +diff --git a/include/linux/completion.h b/include/linux/completion.h +index 5d5aaae..0ea9b84 100644 +--- a/include/linux/completion.h ++++ b/include/linux/completion.h +@@ -90,16 +90,16 @@ static inline void reinit_completion(struct completion *x) + + extern void wait_for_completion(struct completion *); + extern void wait_for_completion_io(struct completion *); +-extern int wait_for_completion_interruptible(struct completion *x); +-extern int wait_for_completion_killable(struct completion *x); ++extern int wait_for_completion_interruptible(struct completion *x) __intentional_overflow(-1); ++extern int wait_for_completion_killable(struct completion *x) __intentional_overflow(-1); + extern unsigned long wait_for_completion_timeout(struct completion *x, +- unsigned long timeout); ++ unsigned long timeout) __intentional_overflow(-1); + extern unsigned long wait_for_completion_io_timeout(struct completion *x, +- unsigned long timeout); ++ unsigned long timeout) __intentional_overflow(-1); + extern long wait_for_completion_interruptible_timeout( +- struct completion *x, unsigned long timeout); ++ struct completion *x, unsigned long timeout) __intentional_overflow(-1); + extern long wait_for_completion_killable_timeout( +- struct completion *x, unsigned long timeout); ++ struct completion *x, unsigned long timeout) __intentional_overflow(-1); + extern bool try_wait_for_completion(struct completion *x); + extern bool completion_done(struct completion *x); + +diff --git a/include/linux/configfs.h b/include/linux/configfs.h +index 34025df..d94bbbc 100644 +--- a/include/linux/configfs.h ++++ b/include/linux/configfs.h +@@ -125,7 +125,7 @@ struct configfs_attribute { + const char *ca_name; + struct module *ca_owner; + umode_t ca_mode; +-}; ++} __do_const; + + /* + * Users often need to create attribute structures for their configurable +diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h +index 4d89e0e..5281847 100644 +--- a/include/linux/cpufreq.h ++++ b/include/linux/cpufreq.h +@@ -191,6 +191,7 @@ struct global_attr { + ssize_t (*store)(struct kobject *a, struct attribute *b, + const char *c, size_t count); + }; ++typedef struct global_attr __no_const global_attr_no_const; + + #define define_one_global_ro(_name) \ + static struct global_attr _name = \ +@@ -232,7 +233,7 @@ struct cpufreq_driver { + bool boost_supported; + bool boost_enabled; + int (*set_boost) (int state); +-}; ++} __do_const; + + /* flags */ + #define CPUFREQ_STICKY (1 << 0) /* driver isn't removed even if +diff --git a/include/linux/cpuidle.h b/include/linux/cpuidle.h +index 50fcbb0..9d2dbd9 100644 +--- a/include/linux/cpuidle.h ++++ b/include/linux/cpuidle.h +@@ -50,7 +50,8 @@ struct cpuidle_state { + int index); + + int (*enter_dead) (struct cpuidle_device *dev, int index); +-}; ++} __do_const; ++typedef struct cpuidle_state __no_const cpuidle_state_no_const; + + /* Idle State Flags */ + #define CPUIDLE_FLAG_TIME_VALID (0x01) /* is residency time measurable? */ +@@ -192,7 +193,7 @@ struct cpuidle_governor { + void (*reflect) (struct cpuidle_device *dev, int index); + + struct module *owner; +-}; ++} __do_const; + + #ifdef CONFIG_CPU_IDLE + extern int cpuidle_register_governor(struct cpuidle_governor *gov); +diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h +index d08e4d2..95fad61 100644 +--- a/include/linux/cpumask.h ++++ b/include/linux/cpumask.h +@@ -118,17 +118,17 @@ static inline unsigned int cpumask_first(const struct cpumask *srcp) + } + + /* Valid inputs for n are -1 and 0. */ +-static inline unsigned int cpumask_next(int n, const struct cpumask *srcp) ++static inline unsigned int __intentional_overflow(-1) cpumask_next(int n, const struct cpumask *srcp) + { + return n+1; + } + +-static inline unsigned int cpumask_next_zero(int n, const struct cpumask *srcp) ++static inline unsigned int __intentional_overflow(-1) cpumask_next_zero(int n, const struct cpumask *srcp) + { + return n+1; + } + +-static inline unsigned int cpumask_next_and(int n, ++static inline unsigned int __intentional_overflow(-1) cpumask_next_and(int n, + const struct cpumask *srcp, + const struct cpumask *andp) + { +@@ -167,7 +167,7 @@ static inline unsigned int cpumask_first(const struct cpumask *srcp) + * + * Returns >= nr_cpu_ids if no further cpus set. + */ +-static inline unsigned int cpumask_next(int n, const struct cpumask *srcp) ++static inline unsigned int __intentional_overflow(-1) cpumask_next(int n, const struct cpumask *srcp) + { + /* -1 is a legal arg here. */ + if (n != -1) +@@ -182,7 +182,7 @@ static inline unsigned int cpumask_next(int n, const struct cpumask *srcp) + * + * Returns >= nr_cpu_ids if no further cpus unset. + */ +-static inline unsigned int cpumask_next_zero(int n, const struct cpumask *srcp) ++static inline unsigned int __intentional_overflow(-1) cpumask_next_zero(int n, const struct cpumask *srcp) + { + /* -1 is a legal arg here. */ + if (n != -1) +@@ -190,7 +190,7 @@ static inline unsigned int cpumask_next_zero(int n, const struct cpumask *srcp) + return find_next_zero_bit(cpumask_bits(srcp), nr_cpumask_bits, n+1); + } + +-int cpumask_next_and(int n, const struct cpumask *, const struct cpumask *); ++int cpumask_next_and(int n, const struct cpumask *, const struct cpumask *) __intentional_overflow(-1); + int cpumask_any_but(const struct cpumask *mask, unsigned int cpu); + + /** +diff --git a/include/linux/cred.h b/include/linux/cred.h +index 04421e8..a85afd4 100644 +--- a/include/linux/cred.h ++++ b/include/linux/cred.h +@@ -35,7 +35,7 @@ struct group_info { + int nblocks; + kgid_t small_block[NGROUPS_SMALL]; + kgid_t *blocks[0]; +-}; ++} __randomize_layout; + + /** + * get_group_info - Get a reference to a group info structure +@@ -136,7 +136,7 @@ struct cred { + struct user_namespace *user_ns; /* user_ns the caps and keyrings are relative to. */ + struct group_info *group_info; /* supplementary groups for euid/fsgid */ + struct rcu_head rcu; /* RCU deletion hook */ +-}; ++} __randomize_layout; + + extern void __put_cred(struct cred *); + extern void exit_creds(struct task_struct *); +@@ -194,6 +194,9 @@ static inline void validate_creds_for_do_exit(struct task_struct *tsk) + static inline void validate_process_creds(void) + { + } ++static inline void validate_task_creds(struct task_struct *task) ++{ ++} + #endif + + /** +@@ -322,6 +325,7 @@ static inline void put_cred(const struct cred *_cred) + + #define task_uid(task) (task_cred_xxx((task), uid)) + #define task_euid(task) (task_cred_xxx((task), euid)) ++#define task_securebits(task) (task_cred_xxx((task), securebits)) + + #define current_cred_xxx(xxx) \ + ({ \ +diff --git a/include/linux/crypto.h b/include/linux/crypto.h +index b92eadf..b4ecdc1 100644 +--- a/include/linux/crypto.h ++++ b/include/linux/crypto.h +@@ -373,7 +373,7 @@ struct cipher_tfm { + const u8 *key, unsigned int keylen); + void (*cit_encrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src); + void (*cit_decrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src); +-}; ++} __no_const; + + struct hash_tfm { + int (*init)(struct hash_desc *desc); +@@ -394,13 +394,13 @@ struct compress_tfm { + int (*cot_decompress)(struct crypto_tfm *tfm, + const u8 *src, unsigned int slen, + u8 *dst, unsigned int *dlen); +-}; ++} __no_const; + + struct rng_tfm { + int (*rng_gen_random)(struct crypto_rng *tfm, u8 *rdata, + unsigned int dlen); + int (*rng_reset)(struct crypto_rng *tfm, u8 *seed, unsigned int slen); +-}; ++} __no_const; + + #define crt_ablkcipher crt_u.ablkcipher + #define crt_aead crt_u.aead +diff --git a/include/linux/ctype.h b/include/linux/ctype.h +index 653589e..4ef254a 100644 +--- a/include/linux/ctype.h ++++ b/include/linux/ctype.h +@@ -56,7 +56,7 @@ static inline unsigned char __toupper(unsigned char c) + * Fast implementation of tolower() for internal usage. Do not use in your + * code. + */ +-static inline char _tolower(const char c) ++static inline unsigned char _tolower(const unsigned char c) + { + return c | 0x20; + } +diff --git a/include/linux/dcache.h b/include/linux/dcache.h +index bf72e9a..4ca7927 100644 +--- a/include/linux/dcache.h ++++ b/include/linux/dcache.h +@@ -133,7 +133,7 @@ struct dentry { + } d_u; + struct list_head d_subdirs; /* our children */ + struct hlist_node d_alias; /* inode alias list */ +-}; ++} __randomize_layout; + + /* + * dentry->d_lock spinlock nesting subclasses: +diff --git a/include/linux/decompress/mm.h b/include/linux/decompress/mm.h +index 7925bf0..d5143d2 100644 +--- a/include/linux/decompress/mm.h ++++ b/include/linux/decompress/mm.h +@@ -77,7 +77,7 @@ static void free(void *where) + * warnings when not needed (indeed large_malloc / large_free are not + * needed by inflate */ + +-#define malloc(a) kmalloc(a, GFP_KERNEL) ++#define malloc(a) kmalloc((a), GFP_KERNEL) + #define free(a) kfree(a) + + #define large_malloc(a) vmalloc(a) +diff --git a/include/linux/devfreq.h b/include/linux/devfreq.h +index d48dc00..211ee54 100644 +--- a/include/linux/devfreq.h ++++ b/include/linux/devfreq.h +@@ -114,7 +114,7 @@ struct devfreq_governor { + int (*get_target_freq)(struct devfreq *this, unsigned long *freq); + int (*event_handler)(struct devfreq *devfreq, + unsigned int event, void *data); +-}; ++} __do_const; + + /** + * struct devfreq - Device devfreq structure +diff --git a/include/linux/device.h b/include/linux/device.h +index 952b010..d5b7691 100644 +--- a/include/linux/device.h ++++ b/include/linux/device.h +@@ -310,7 +310,7 @@ struct subsys_interface { + struct list_head node; + int (*add_dev)(struct device *dev, struct subsys_interface *sif); + int (*remove_dev)(struct device *dev, struct subsys_interface *sif); +-}; ++} __do_const; + + int subsys_interface_register(struct subsys_interface *sif); + void subsys_interface_unregister(struct subsys_interface *sif); +@@ -506,7 +506,7 @@ struct device_type { + void (*release)(struct device *dev); + + const struct dev_pm_ops *pm; +-}; ++} __do_const; + + /* interface for exporting device attributes */ + struct device_attribute { +@@ -516,11 +516,12 @@ struct device_attribute { + ssize_t (*store)(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count); + }; ++typedef struct device_attribute __no_const device_attribute_no_const; + + struct dev_ext_attribute { + struct device_attribute attr; + void *var; +-}; ++} __do_const; + + ssize_t device_show_ulong(struct device *dev, struct device_attribute *attr, + char *buf); +diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h +index fd4aee2..1f28db9 100644 +--- a/include/linux/dma-mapping.h ++++ b/include/linux/dma-mapping.h +@@ -54,7 +54,7 @@ struct dma_map_ops { + u64 (*get_required_mask)(struct device *dev); + #endif + int is_phys; +-}; ++} __do_const; + + #define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1)) + +diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h +index 0a5f552..6661a5a 100644 +--- a/include/linux/dmaengine.h ++++ b/include/linux/dmaengine.h +@@ -1151,9 +1151,9 @@ struct dma_pinned_list { + struct dma_pinned_list *dma_pin_iovec_pages(struct iovec *iov, size_t len); + void dma_unpin_iovec_pages(struct dma_pinned_list* pinned_list); + +-dma_cookie_t dma_memcpy_to_iovec(struct dma_chan *chan, struct iovec *iov, ++dma_cookie_t __intentional_overflow(0) dma_memcpy_to_iovec(struct dma_chan *chan, struct iovec *iov, + struct dma_pinned_list *pinned_list, unsigned char *kdata, size_t len); +-dma_cookie_t dma_memcpy_pg_to_iovec(struct dma_chan *chan, struct iovec *iov, ++dma_cookie_t __intentional_overflow(0) dma_memcpy_pg_to_iovec(struct dma_chan *chan, struct iovec *iov, + struct dma_pinned_list *pinned_list, struct page *page, + unsigned int offset, size_t len); + +diff --git a/include/linux/efi.h b/include/linux/efi.h +index 0a819e7..8ed47f1 100644 +--- a/include/linux/efi.h ++++ b/include/linux/efi.h +@@ -768,6 +768,7 @@ struct efivar_operations { + efi_set_variable_t *set_variable; + efi_query_variable_store_t *query_variable_store; + }; ++typedef struct efivar_operations __no_const efivar_operations_no_const; + + struct efivars { + /* +diff --git a/include/linux/elf.h b/include/linux/elf.h +index 67a5fa7..b817372 100644 +--- a/include/linux/elf.h ++++ b/include/linux/elf.h +@@ -24,6 +24,7 @@ extern Elf32_Dyn _DYNAMIC []; + #define elf_note elf32_note + #define elf_addr_t Elf32_Off + #define Elf_Half Elf32_Half ++#define elf_dyn Elf32_Dyn + + #else + +@@ -34,6 +35,7 @@ extern Elf64_Dyn _DYNAMIC []; + #define elf_note elf64_note + #define elf_addr_t Elf64_Off + #define Elf_Half Elf64_Half ++#define elf_dyn Elf64_Dyn + + #endif + +diff --git a/include/linux/err.h b/include/linux/err.h +index 15f92e0..e825a8e 100644 +--- a/include/linux/err.h ++++ b/include/linux/err.h +@@ -19,12 +19,12 @@ + + #define IS_ERR_VALUE(x) unlikely((x) >= (unsigned long)-MAX_ERRNO) + +-static inline void * __must_check ERR_PTR(long error) ++static inline void * __must_check __intentional_overflow(-1) ERR_PTR(long error) + { + return (void *) error; + } + +-static inline long __must_check PTR_ERR(__force const void *ptr) ++static inline long __must_check __intentional_overflow(-1) PTR_ERR(__force const void *ptr) + { + return (long) ptr; + } +diff --git a/include/linux/extcon.h b/include/linux/extcon.h +index 21c59af..6057a03 100644 +--- a/include/linux/extcon.h ++++ b/include/linux/extcon.h +@@ -135,7 +135,7 @@ struct extcon_dev { + /* /sys/class/extcon/.../mutually_exclusive/... */ + struct attribute_group attr_g_muex; + struct attribute **attrs_muex; +- struct device_attribute *d_attrs_muex; ++ device_attribute_no_const *d_attrs_muex; + }; + + /** +diff --git a/include/linux/fb.h b/include/linux/fb.h +index fe6ac95..898d41d 100644 +--- a/include/linux/fb.h ++++ b/include/linux/fb.h +@@ -304,7 +304,7 @@ struct fb_ops { + /* called at KDB enter and leave time to prepare the console */ + int (*fb_debug_enter)(struct fb_info *info); + int (*fb_debug_leave)(struct fb_info *info); +-}; ++} __do_const; + + #ifdef CONFIG_FB_TILEBLITTING + #define FB_TILE_CURSOR_NONE 0 +diff --git a/include/linux/fdtable.h b/include/linux/fdtable.h +index 70e8e21..1939916 100644 +--- a/include/linux/fdtable.h ++++ b/include/linux/fdtable.h +@@ -102,7 +102,7 @@ struct files_struct *get_files_struct(struct task_struct *); + void put_files_struct(struct files_struct *fs); + void reset_files_struct(struct files_struct *); + int unshare_files(struct files_struct **); +-struct files_struct *dup_fd(struct files_struct *, int *); ++struct files_struct *dup_fd(struct files_struct *, int *) __latent_entropy; + void do_close_on_exec(struct files_struct *); + int iterate_fd(struct files_struct *, unsigned, + int (*)(const void *, struct file *, unsigned), +diff --git a/include/linux/frontswap.h b/include/linux/frontswap.h +index 8293262..2b3b8bd 100644 +--- a/include/linux/frontswap.h ++++ b/include/linux/frontswap.h +@@ -11,7 +11,7 @@ struct frontswap_ops { + int (*load)(unsigned, pgoff_t, struct page *); + void (*invalidate_page)(unsigned, pgoff_t); + void (*invalidate_area)(unsigned); +-}; ++} __no_const; + + extern bool frontswap_enabled; + extern struct frontswap_ops * +diff --git a/include/linux/fs.h b/include/linux/fs.h +index 23b2a35..8764ab7 100644 +--- a/include/linux/fs.h ++++ b/include/linux/fs.h +@@ -426,7 +426,7 @@ struct address_space { + spinlock_t private_lock; /* for use by the address_space */ + struct list_head private_list; /* ditto */ + void *private_data; /* ditto */ +-} __attribute__((aligned(sizeof(long)))); ++} __attribute__((aligned(sizeof(long)))) __randomize_layout; + /* + * On most architectures that alignment is already the case; but + * must be enforced here for CRIS, to let the least significant bit +@@ -469,7 +469,7 @@ struct block_device { + int bd_fsfreeze_count; + /* Mutex for freeze */ + struct mutex bd_fsfreeze_mutex; +-}; ++} __randomize_layout; + + /* + * Radix-tree tags, for tagging dirty and writeback pages within the pagecache +@@ -613,7 +613,7 @@ struct inode { + atomic_t i_readcount; /* struct files open RO */ + #endif + void *i_private; /* fs or device private pointer */ +-}; ++} __randomize_layout; + + static inline int inode_unhashed(struct inode *inode) + { +@@ -812,7 +812,7 @@ struct file { + #ifdef CONFIG_DEBUG_WRITECOUNT + unsigned long f_mnt_write_state; + #endif +-} __attribute__((aligned(4))); /* lest something weird decides that 2 is OK */ ++} __attribute__((aligned(4))) __randomize_layout; /* lest something weird decides that 2 is OK */ + + struct file_handle { + __u32 handle_bytes; +@@ -982,7 +982,7 @@ struct file_lock { + int state; /* state of grant or error if -ve */ + } afs; + } fl_u; +-}; ++} __randomize_layout; + + /* The following constant reflects the upper bound of the file/locking space */ + #ifndef OFFSET_MAX +@@ -1329,7 +1329,7 @@ struct super_block { + struct list_lru s_dentry_lru ____cacheline_aligned_in_smp; + struct list_lru s_inode_lru ____cacheline_aligned_in_smp; + struct rcu_head rcu; +-}; ++} __randomize_layout; + + extern struct timespec current_fs_time(struct super_block *sb); + +@@ -1551,7 +1551,8 @@ struct file_operations { + long (*fallocate)(struct file *file, int mode, loff_t offset, + loff_t len); + int (*show_fdinfo)(struct seq_file *m, struct file *f); +-}; ++} __do_const __randomize_layout; ++typedef struct file_operations __no_const file_operations_no_const; + + struct inode_operations { + struct dentry * (*lookup) (struct inode *,struct dentry *, unsigned int); +@@ -2820,4 +2821,14 @@ static inline bool dir_relax(struct inode *inode) + return !IS_DEADDIR(inode); + } + ++static inline bool is_sidechannel_device(const struct inode *inode) ++{ ++#ifdef CONFIG_GRKERNSEC_DEVICE_SIDECHANNEL ++ umode_t mode = inode->i_mode; ++ return ((S_ISCHR(mode) || S_ISBLK(mode)) && (mode & (S_IROTH | S_IWOTH))); ++#else ++ return false; ++#endif ++} ++ + #endif /* _LINUX_FS_H */ +diff --git a/include/linux/fs_struct.h b/include/linux/fs_struct.h +index 0efc3e6..fd23610 100644 +--- a/include/linux/fs_struct.h ++++ b/include/linux/fs_struct.h +@@ -6,13 +6,13 @@ + #include <linux/seqlock.h> + + struct fs_struct { +- int users; ++ atomic_t users; + spinlock_t lock; + seqcount_t seq; + int umask; + int in_exec; + struct path root, pwd; +-}; ++} __randomize_layout; + + extern struct kmem_cache *fs_cachep; + +diff --git a/include/linux/fscache-cache.h b/include/linux/fscache-cache.h +index 7714849..a4a5c7a 100644 +--- a/include/linux/fscache-cache.h ++++ b/include/linux/fscache-cache.h +@@ -113,7 +113,7 @@ struct fscache_operation { + fscache_operation_release_t release; + }; + +-extern atomic_t fscache_op_debug_id; ++extern atomic_unchecked_t fscache_op_debug_id; + extern void fscache_op_work_func(struct work_struct *work); + + extern void fscache_enqueue_operation(struct fscache_operation *); +@@ -135,7 +135,7 @@ static inline void fscache_operation_init(struct fscache_operation *op, + INIT_WORK(&op->work, fscache_op_work_func); + atomic_set(&op->usage, 1); + op->state = FSCACHE_OP_ST_INITIALISED; +- op->debug_id = atomic_inc_return(&fscache_op_debug_id); ++ op->debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id); + op->processor = processor; + op->release = release; + INIT_LIST_HEAD(&op->pend_link); +diff --git a/include/linux/fscache.h b/include/linux/fscache.h +index 115bb81..e7b812b 100644 +--- a/include/linux/fscache.h ++++ b/include/linux/fscache.h +@@ -152,7 +152,7 @@ struct fscache_cookie_def { + * - this is mandatory for any object that may have data + */ + void (*now_uncached)(void *cookie_netfs_data); +-}; ++} __do_const; + + /* + * fscache cached network filesystem type +diff --git a/include/linux/fsnotify.h b/include/linux/fsnotify.h +index 1c804b0..1432c2b 100644 +--- a/include/linux/fsnotify.h ++++ b/include/linux/fsnotify.h +@@ -195,6 +195,9 @@ static inline void fsnotify_access(struct file *file) + struct inode *inode = file_inode(file); + __u32 mask = FS_ACCESS; + ++ if (is_sidechannel_device(inode)) ++ return; ++ + if (S_ISDIR(inode->i_mode)) + mask |= FS_ISDIR; + +@@ -213,6 +216,9 @@ static inline void fsnotify_modify(struct file *file) + struct inode *inode = file_inode(file); + __u32 mask = FS_MODIFY; + ++ if (is_sidechannel_device(inode)) ++ return; ++ + if (S_ISDIR(inode->i_mode)) + mask |= FS_ISDIR; + +@@ -315,7 +321,7 @@ static inline void fsnotify_change(struct dentry *dentry, unsigned int ia_valid) + */ + static inline const unsigned char *fsnotify_oldname_init(const unsigned char *name) + { +- return kstrdup(name, GFP_KERNEL); ++ return (const unsigned char *)kstrdup((const char *)name, GFP_KERNEL); + } + + /* +diff --git a/include/linux/genhd.h b/include/linux/genhd.h +index 9f3c275..8bdff5d 100644 +--- a/include/linux/genhd.h ++++ b/include/linux/genhd.h +@@ -194,7 +194,7 @@ struct gendisk { + struct kobject *slave_dir; + + struct timer_rand_state *random; +- atomic_t sync_io; /* RAID */ ++ atomic_unchecked_t sync_io; /* RAID */ + struct disk_events *ev; + #ifdef CONFIG_BLK_DEV_INTEGRITY + struct blk_integrity *integrity; +@@ -435,7 +435,7 @@ extern void disk_flush_events(struct gendisk *disk, unsigned int mask); + extern unsigned int disk_clear_events(struct gendisk *disk, unsigned int mask); + + /* drivers/char/random.c */ +-extern void add_disk_randomness(struct gendisk *disk); ++extern void add_disk_randomness(struct gendisk *disk) __latent_entropy; + extern void rand_initialize_disk(struct gendisk *disk); + + static inline sector_t get_start_sect(struct block_device *bdev) +diff --git a/include/linux/genl_magic_func.h b/include/linux/genl_magic_func.h +index c0894dd..2fbf10c 100644 +--- a/include/linux/genl_magic_func.h ++++ b/include/linux/genl_magic_func.h +@@ -246,7 +246,7 @@ const char *CONCAT_(GENL_MAGIC_FAMILY, _genl_cmd_to_str)(__u8 cmd) + }, + + #define ZZZ_genl_ops CONCAT_(GENL_MAGIC_FAMILY, _genl_ops) +-static struct genl_ops ZZZ_genl_ops[] __read_mostly = { ++static struct genl_ops ZZZ_genl_ops[] = { + #include GENL_MAGIC_INCLUDE_FILE + }; + +diff --git a/include/linux/gfp.h b/include/linux/gfp.h +index 39b81dc..819dc51 100644 +--- a/include/linux/gfp.h ++++ b/include/linux/gfp.h +@@ -36,6 +36,13 @@ struct vm_area_struct; + #define ___GFP_NO_KSWAPD 0x400000u + #define ___GFP_OTHER_NODE 0x800000u + #define ___GFP_WRITE 0x1000000u ++ ++#ifdef CONFIG_PAX_USERCOPY_SLABS ++#define ___GFP_USERCOPY 0x2000000u ++#else ++#define ___GFP_USERCOPY 0 ++#endif ++ + /* If the above are modified, __GFP_BITS_SHIFT may need updating */ + + /* +@@ -93,6 +100,7 @@ struct vm_area_struct; + #define __GFP_OTHER_NODE ((__force gfp_t)___GFP_OTHER_NODE) /* On behalf of other node */ + #define __GFP_KMEMCG ((__force gfp_t)___GFP_KMEMCG) /* Allocation comes from a memcg-accounted resource */ + #define __GFP_WRITE ((__force gfp_t)___GFP_WRITE) /* Allocator intends to dirty page */ ++#define __GFP_USERCOPY ((__force gfp_t)___GFP_USERCOPY)/* Allocator intends to copy page to/from userland */ + + /* + * This may seem redundant, but it's a way of annotating false positives vs. +@@ -100,7 +108,7 @@ struct vm_area_struct; + */ + #define __GFP_NOTRACK_FALSE_POSITIVE (__GFP_NOTRACK) + +-#define __GFP_BITS_SHIFT 25 /* Room for N __GFP_FOO bits */ ++#define __GFP_BITS_SHIFT 26 /* Room for N __GFP_FOO bits */ + #define __GFP_BITS_MASK ((__force gfp_t)((1 << __GFP_BITS_SHIFT) - 1)) + + /* This equals 0, but use constants in case they ever change */ +@@ -158,6 +166,8 @@ struct vm_area_struct; + /* 4GB DMA on some platforms */ + #define GFP_DMA32 __GFP_DMA32 + ++#define GFP_USERCOPY __GFP_USERCOPY ++ + /* Convert GFP flags to their corresponding migrate type */ + static inline int allocflags_to_migratetype(gfp_t gfp_flags) + { +diff --git a/include/linux/gracl.h b/include/linux/gracl.h +new file mode 100644 +index 0000000..edb2cb6 +--- /dev/null ++++ b/include/linux/gracl.h +@@ -0,0 +1,340 @@ ++#ifndef GR_ACL_H ++#define GR_ACL_H ++ ++#include <linux/grdefs.h> ++#include <linux/resource.h> ++#include <linux/capability.h> ++#include <linux/dcache.h> ++#include <asm/resource.h> ++ ++/* Major status information */ ++ ++#define GR_VERSION "grsecurity 3.0" ++#define GRSECURITY_VERSION 0x3000 ++ ++enum { ++ GR_SHUTDOWN = 0, ++ GR_ENABLE = 1, ++ GR_SPROLE = 2, ++ GR_OLDRELOAD = 3, ++ GR_SEGVMOD = 4, ++ GR_STATUS = 5, ++ GR_UNSPROLE = 6, ++ GR_PASSSET = 7, ++ GR_SPROLEPAM = 8, ++ GR_RELOAD = 9, ++}; ++ ++/* Password setup definitions ++ * kernel/grhash.c */ ++enum { ++ GR_PW_LEN = 128, ++ GR_SALT_LEN = 16, ++ GR_SHA_LEN = 32, ++}; ++ ++enum { ++ GR_SPROLE_LEN = 64, ++}; ++ ++enum { ++ GR_NO_GLOB = 0, ++ GR_REG_GLOB, ++ GR_CREATE_GLOB ++}; ++ ++#define GR_NLIMITS 32 ++ ++/* Begin Data Structures */ ++ ++struct sprole_pw { ++ unsigned char *rolename; ++ unsigned char salt[GR_SALT_LEN]; ++ unsigned char sum[GR_SHA_LEN]; /* 256-bit SHA hash of the password */ ++}; ++ ++struct name_entry { ++ __u32 key; ++ ino_t inode; ++ dev_t device; ++ char *name; ++ __u16 len; ++ __u8 deleted; ++ struct name_entry *prev; ++ struct name_entry *next; ++}; ++ ++struct inodev_entry { ++ struct name_entry *nentry; ++ struct inodev_entry *prev; ++ struct inodev_entry *next; ++}; ++ ++struct acl_role_db { ++ struct acl_role_label **r_hash; ++ __u32 r_size; ++}; ++ ++struct inodev_db { ++ struct inodev_entry **i_hash; ++ __u32 i_size; ++}; ++ ++struct name_db { ++ struct name_entry **n_hash; ++ __u32 n_size; ++}; ++ ++struct crash_uid { ++ uid_t uid; ++ unsigned long expires; ++}; ++ ++struct gr_hash_struct { ++ void **table; ++ void **nametable; ++ void *first; ++ __u32 table_size; ++ __u32 used_size; ++ int type; ++}; ++ ++/* Userspace Grsecurity ACL data structures */ ++ ++struct acl_subject_label { ++ char *filename; ++ ino_t inode; ++ dev_t device; ++ __u32 mode; ++ kernel_cap_t cap_mask; ++ kernel_cap_t cap_lower; ++ kernel_cap_t cap_invert_audit; ++ ++ struct rlimit res[GR_NLIMITS]; ++ __u32 resmask; ++ ++ __u8 user_trans_type; ++ __u8 group_trans_type; ++ uid_t *user_transitions; ++ gid_t *group_transitions; ++ __u16 user_trans_num; ++ __u16 group_trans_num; ++ ++ __u32 sock_families[2]; ++ __u32 ip_proto[8]; ++ __u32 ip_type; ++ struct acl_ip_label **ips; ++ __u32 ip_num; ++ __u32 inaddr_any_override; ++ ++ __u32 crashes; ++ unsigned long expires; ++ ++ struct acl_subject_label *parent_subject; ++ struct gr_hash_struct *hash; ++ struct acl_subject_label *prev; ++ struct acl_subject_label *next; ++ ++ struct acl_object_label **obj_hash; ++ __u32 obj_hash_size; ++ __u16 pax_flags; ++}; ++ ++struct role_allowed_ip { ++ __u32 addr; ++ __u32 netmask; ++ ++ struct role_allowed_ip *prev; ++ struct role_allowed_ip *next; ++}; ++ ++struct role_transition { ++ char *rolename; ++ ++ struct role_transition *prev; ++ struct role_transition *next; ++}; ++ ++struct acl_role_label { ++ char *rolename; ++ uid_t uidgid; ++ __u16 roletype; ++ ++ __u16 auth_attempts; ++ unsigned long expires; ++ ++ struct acl_subject_label *root_label; ++ struct gr_hash_struct *hash; ++ ++ struct acl_role_label *prev; ++ struct acl_role_label *next; ++ ++ struct role_transition *transitions; ++ struct role_allowed_ip *allowed_ips; ++ uid_t *domain_children; ++ __u16 domain_child_num; ++ ++ umode_t umask; ++ ++ struct acl_subject_label **subj_hash; ++ __u32 subj_hash_size; ++}; ++ ++struct user_acl_role_db { ++ struct acl_role_label **r_table; ++ __u32 num_pointers; /* Number of allocations to track */ ++ __u32 num_roles; /* Number of roles */ ++ __u32 num_domain_children; /* Number of domain children */ ++ __u32 num_subjects; /* Number of subjects */ ++ __u32 num_objects; /* Number of objects */ ++}; ++ ++struct acl_object_label { ++ char *filename; ++ ino_t inode; ++ dev_t device; ++ __u32 mode; ++ ++ struct acl_subject_label *nested; ++ struct acl_object_label *globbed; ++ ++ /* next two structures not used */ ++ ++ struct acl_object_label *prev; ++ struct acl_object_label *next; ++}; ++ ++struct acl_ip_label { ++ char *iface; ++ __u32 addr; ++ __u32 netmask; ++ __u16 low, high; ++ __u8 mode; ++ __u32 type; ++ __u32 proto[8]; ++ ++ /* next two structures not used */ ++ ++ struct acl_ip_label *prev; ++ struct acl_ip_label *next; ++}; ++ ++struct gr_arg { ++ struct user_acl_role_db role_db; ++ unsigned char pw[GR_PW_LEN]; ++ unsigned char salt[GR_SALT_LEN]; ++ unsigned char sum[GR_SHA_LEN]; ++ unsigned char sp_role[GR_SPROLE_LEN]; ++ struct sprole_pw *sprole_pws; ++ dev_t segv_device; ++ ino_t segv_inode; ++ uid_t segv_uid; ++ __u16 num_sprole_pws; ++ __u16 mode; ++}; ++ ++struct gr_arg_wrapper { ++ struct gr_arg *arg; ++ __u32 version; ++ __u32 size; ++}; ++ ++struct subject_map { ++ struct acl_subject_label *user; ++ struct acl_subject_label *kernel; ++ struct subject_map *prev; ++ struct subject_map *next; ++}; ++ ++struct acl_subj_map_db { ++ struct subject_map **s_hash; ++ __u32 s_size; ++}; ++ ++struct gr_policy_state { ++ struct sprole_pw **acl_special_roles; ++ __u16 num_sprole_pws; ++ struct acl_role_label *kernel_role; ++ struct acl_role_label *role_list; ++ struct acl_role_label *default_role; ++ struct acl_role_db acl_role_set; ++ struct acl_subj_map_db subj_map_set; ++ struct name_db name_set; ++ struct inodev_db inodev_set; ++}; ++ ++struct gr_alloc_state { ++ unsigned long alloc_stack_next; ++ unsigned long alloc_stack_size; ++ void **alloc_stack; ++}; ++ ++struct gr_reload_state { ++ struct gr_policy_state oldpolicy; ++ struct gr_alloc_state oldalloc; ++ struct gr_policy_state newpolicy; ++ struct gr_alloc_state newalloc; ++ struct gr_policy_state *oldpolicy_ptr; ++ struct gr_alloc_state *oldalloc_ptr; ++ unsigned char oldmode; ++}; ++ ++/* End Data Structures Section */ ++ ++/* Hash functions generated by empirical testing by Brad Spengler ++ Makes good use of the low bits of the inode. Generally 0-1 times ++ in loop for successful match. 0-3 for unsuccessful match. ++ Shift/add algorithm with modulus of table size and an XOR*/ ++ ++static __inline__ unsigned int ++gr_rhash(const uid_t uid, const __u16 type, const unsigned int sz) ++{ ++ return ((((uid + type) << (16 + type)) ^ uid) % sz); ++} ++ ++ static __inline__ unsigned int ++gr_shash(const struct acl_subject_label *userp, const unsigned int sz) ++{ ++ return ((const unsigned long)userp % sz); ++} ++ ++static __inline__ unsigned int ++gr_fhash(const ino_t ino, const dev_t dev, const unsigned int sz) ++{ ++ return (((ino + dev) ^ ((ino << 13) + (ino << 23) + (dev << 9))) % sz); ++} ++ ++static __inline__ unsigned int ++gr_nhash(const char *name, const __u16 len, const unsigned int sz) ++{ ++ return full_name_hash((const unsigned char *)name, len) % sz; ++} ++ ++#define FOR_EACH_SUBJECT_START(role,subj,iter) \ ++ subj = NULL; \ ++ iter = 0; \ ++ while (iter < role->subj_hash_size) { \ ++ if (subj == NULL) \ ++ subj = role->subj_hash[iter]; \ ++ if (subj == NULL) { \ ++ iter++; \ ++ continue; \ ++ } ++ ++#define FOR_EACH_SUBJECT_END(subj,iter) \ ++ subj = subj->next; \ ++ if (subj == NULL) \ ++ iter++; \ ++ } ++ ++ ++#define FOR_EACH_NESTED_SUBJECT_START(role,subj) \ ++ subj = role->hash->first; \ ++ while (subj != NULL) { ++ ++#define FOR_EACH_NESTED_SUBJECT_END(subj) \ ++ subj = subj->next; \ ++ } ++ ++#endif ++ +diff --git a/include/linux/gracl_compat.h b/include/linux/gracl_compat.h +new file mode 100644 +index 0000000..33ebd1f +--- /dev/null ++++ b/include/linux/gracl_compat.h +@@ -0,0 +1,156 @@ ++#ifndef GR_ACL_COMPAT_H ++#define GR_ACL_COMPAT_H ++ ++#include <linux/resource.h> ++#include <asm/resource.h> ++ ++struct sprole_pw_compat { ++ compat_uptr_t rolename; ++ unsigned char salt[GR_SALT_LEN]; ++ unsigned char sum[GR_SHA_LEN]; ++}; ++ ++struct gr_hash_struct_compat { ++ compat_uptr_t table; ++ compat_uptr_t nametable; ++ compat_uptr_t first; ++ __u32 table_size; ++ __u32 used_size; ++ int type; ++}; ++ ++struct acl_subject_label_compat { ++ compat_uptr_t filename; ++ compat_ino_t inode; ++ __u32 device; ++ __u32 mode; ++ kernel_cap_t cap_mask; ++ kernel_cap_t cap_lower; ++ kernel_cap_t cap_invert_audit; ++ ++ struct compat_rlimit res[GR_NLIMITS]; ++ __u32 resmask; ++ ++ __u8 user_trans_type; ++ __u8 group_trans_type; ++ compat_uptr_t user_transitions; ++ compat_uptr_t group_transitions; ++ __u16 user_trans_num; ++ __u16 group_trans_num; ++ ++ __u32 sock_families[2]; ++ __u32 ip_proto[8]; ++ __u32 ip_type; ++ compat_uptr_t ips; ++ __u32 ip_num; ++ __u32 inaddr_any_override; ++ ++ __u32 crashes; ++ compat_ulong_t expires; ++ ++ compat_uptr_t parent_subject; ++ compat_uptr_t hash; ++ compat_uptr_t prev; ++ compat_uptr_t next; ++ ++ compat_uptr_t obj_hash; ++ __u32 obj_hash_size; ++ __u16 pax_flags; ++}; ++ ++struct role_allowed_ip_compat { ++ __u32 addr; ++ __u32 netmask; ++ ++ compat_uptr_t prev; ++ compat_uptr_t next; ++}; ++ ++struct role_transition_compat { ++ compat_uptr_t rolename; ++ ++ compat_uptr_t prev; ++ compat_uptr_t next; ++}; ++ ++struct acl_role_label_compat { ++ compat_uptr_t rolename; ++ uid_t uidgid; ++ __u16 roletype; ++ ++ __u16 auth_attempts; ++ compat_ulong_t expires; ++ ++ compat_uptr_t root_label; ++ compat_uptr_t hash; ++ ++ compat_uptr_t prev; ++ compat_uptr_t next; ++ ++ compat_uptr_t transitions; ++ compat_uptr_t allowed_ips; ++ compat_uptr_t domain_children; ++ __u16 domain_child_num; ++ ++ umode_t umask; ++ ++ compat_uptr_t subj_hash; ++ __u32 subj_hash_size; ++}; ++ ++struct user_acl_role_db_compat { ++ compat_uptr_t r_table; ++ __u32 num_pointers; ++ __u32 num_roles; ++ __u32 num_domain_children; ++ __u32 num_subjects; ++ __u32 num_objects; ++}; ++ ++struct acl_object_label_compat { ++ compat_uptr_t filename; ++ compat_ino_t inode; ++ __u32 device; ++ __u32 mode; ++ ++ compat_uptr_t nested; ++ compat_uptr_t globbed; ++ ++ compat_uptr_t prev; ++ compat_uptr_t next; ++}; ++ ++struct acl_ip_label_compat { ++ compat_uptr_t iface; ++ __u32 addr; ++ __u32 netmask; ++ __u16 low, high; ++ __u8 mode; ++ __u32 type; ++ __u32 proto[8]; ++ ++ compat_uptr_t prev; ++ compat_uptr_t next; ++}; ++ ++struct gr_arg_compat { ++ struct user_acl_role_db_compat role_db; ++ unsigned char pw[GR_PW_LEN]; ++ unsigned char salt[GR_SALT_LEN]; ++ unsigned char sum[GR_SHA_LEN]; ++ unsigned char sp_role[GR_SPROLE_LEN]; ++ compat_uptr_t sprole_pws; ++ __u32 segv_device; ++ compat_ino_t segv_inode; ++ uid_t segv_uid; ++ __u16 num_sprole_pws; ++ __u16 mode; ++}; ++ ++struct gr_arg_wrapper_compat { ++ compat_uptr_t arg; ++ __u32 version; ++ __u32 size; ++}; ++ ++#endif +diff --git a/include/linux/gralloc.h b/include/linux/gralloc.h +new file mode 100644 +index 0000000..323ecf2 +--- /dev/null ++++ b/include/linux/gralloc.h +@@ -0,0 +1,9 @@ ++#ifndef __GRALLOC_H ++#define __GRALLOC_H ++ ++void acl_free_all(void); ++int acl_alloc_stack_init(unsigned long size); ++void *acl_alloc(unsigned long len); ++void *acl_alloc_num(unsigned long num, unsigned long len); ++ ++#endif +diff --git a/include/linux/grdefs.h b/include/linux/grdefs.h +new file mode 100644 +index 0000000..be66033 +--- /dev/null ++++ b/include/linux/grdefs.h +@@ -0,0 +1,140 @@ ++#ifndef GRDEFS_H ++#define GRDEFS_H ++ ++/* Begin grsecurity status declarations */ ++ ++enum { ++ GR_READY = 0x01, ++ GR_STATUS_INIT = 0x00 // disabled state ++}; ++ ++/* Begin ACL declarations */ ++ ++/* Role flags */ ++ ++enum { ++ GR_ROLE_USER = 0x0001, ++ GR_ROLE_GROUP = 0x0002, ++ GR_ROLE_DEFAULT = 0x0004, ++ GR_ROLE_SPECIAL = 0x0008, ++ GR_ROLE_AUTH = 0x0010, ++ GR_ROLE_NOPW = 0x0020, ++ GR_ROLE_GOD = 0x0040, ++ GR_ROLE_LEARN = 0x0080, ++ GR_ROLE_TPE = 0x0100, ++ GR_ROLE_DOMAIN = 0x0200, ++ GR_ROLE_PAM = 0x0400, ++ GR_ROLE_PERSIST = 0x0800 ++}; ++ ++/* ACL Subject and Object mode flags */ ++enum { ++ GR_DELETED = 0x80000000 ++}; ++ ++/* ACL Object-only mode flags */ ++enum { ++ GR_READ = 0x00000001, ++ GR_APPEND = 0x00000002, ++ GR_WRITE = 0x00000004, ++ GR_EXEC = 0x00000008, ++ GR_FIND = 0x00000010, ++ GR_INHERIT = 0x00000020, ++ GR_SETID = 0x00000040, ++ GR_CREATE = 0x00000080, ++ GR_DELETE = 0x00000100, ++ GR_LINK = 0x00000200, ++ GR_AUDIT_READ = 0x00000400, ++ GR_AUDIT_APPEND = 0x00000800, ++ GR_AUDIT_WRITE = 0x00001000, ++ GR_AUDIT_EXEC = 0x00002000, ++ GR_AUDIT_FIND = 0x00004000, ++ GR_AUDIT_INHERIT= 0x00008000, ++ GR_AUDIT_SETID = 0x00010000, ++ GR_AUDIT_CREATE = 0x00020000, ++ GR_AUDIT_DELETE = 0x00040000, ++ GR_AUDIT_LINK = 0x00080000, ++ GR_PTRACERD = 0x00100000, ++ GR_NOPTRACE = 0x00200000, ++ GR_SUPPRESS = 0x00400000, ++ GR_NOLEARN = 0x00800000, ++ GR_INIT_TRANSFER= 0x01000000 ++}; ++ ++#define GR_AUDITS (GR_AUDIT_READ | GR_AUDIT_WRITE | GR_AUDIT_APPEND | GR_AUDIT_EXEC | \ ++ GR_AUDIT_FIND | GR_AUDIT_INHERIT | GR_AUDIT_SETID | \ ++ GR_AUDIT_CREATE | GR_AUDIT_DELETE | GR_AUDIT_LINK) ++ ++/* ACL subject-only mode flags */ ++enum { ++ GR_KILL = 0x00000001, ++ GR_VIEW = 0x00000002, ++ GR_PROTECTED = 0x00000004, ++ GR_LEARN = 0x00000008, ++ GR_OVERRIDE = 0x00000010, ++ /* just a placeholder, this mode is only used in userspace */ ++ GR_DUMMY = 0x00000020, ++ GR_PROTSHM = 0x00000040, ++ GR_KILLPROC = 0x00000080, ++ GR_KILLIPPROC = 0x00000100, ++ /* just a placeholder, this mode is only used in userspace */ ++ GR_NOTROJAN = 0x00000200, ++ GR_PROTPROCFD = 0x00000400, ++ GR_PROCACCT = 0x00000800, ++ GR_RELAXPTRACE = 0x00001000, ++ //GR_NESTED = 0x00002000, ++ GR_INHERITLEARN = 0x00004000, ++ GR_PROCFIND = 0x00008000, ++ GR_POVERRIDE = 0x00010000, ++ GR_KERNELAUTH = 0x00020000, ++ GR_ATSECURE = 0x00040000, ++ GR_SHMEXEC = 0x00080000 ++}; ++ ++enum { ++ GR_PAX_ENABLE_SEGMEXEC = 0x0001, ++ GR_PAX_ENABLE_PAGEEXEC = 0x0002, ++ GR_PAX_ENABLE_MPROTECT = 0x0004, ++ GR_PAX_ENABLE_RANDMMAP = 0x0008, ++ GR_PAX_ENABLE_EMUTRAMP = 0x0010, ++ GR_PAX_DISABLE_SEGMEXEC = 0x0100, ++ GR_PAX_DISABLE_PAGEEXEC = 0x0200, ++ GR_PAX_DISABLE_MPROTECT = 0x0400, ++ GR_PAX_DISABLE_RANDMMAP = 0x0800, ++ GR_PAX_DISABLE_EMUTRAMP = 0x1000, ++}; ++ ++enum { ++ GR_ID_USER = 0x01, ++ GR_ID_GROUP = 0x02, ++}; ++ ++enum { ++ GR_ID_ALLOW = 0x01, ++ GR_ID_DENY = 0x02, ++}; ++ ++#define GR_CRASH_RES 31 ++#define GR_UIDTABLE_MAX 500 ++ ++/* begin resource learning section */ ++enum { ++ GR_RLIM_CPU_BUMP = 60, ++ GR_RLIM_FSIZE_BUMP = 50000, ++ GR_RLIM_DATA_BUMP = 10000, ++ GR_RLIM_STACK_BUMP = 1000, ++ GR_RLIM_CORE_BUMP = 10000, ++ GR_RLIM_RSS_BUMP = 500000, ++ GR_RLIM_NPROC_BUMP = 1, ++ GR_RLIM_NOFILE_BUMP = 5, ++ GR_RLIM_MEMLOCK_BUMP = 50000, ++ GR_RLIM_AS_BUMP = 500000, ++ GR_RLIM_LOCKS_BUMP = 2, ++ GR_RLIM_SIGPENDING_BUMP = 5, ++ GR_RLIM_MSGQUEUE_BUMP = 10000, ++ GR_RLIM_NICE_BUMP = 1, ++ GR_RLIM_RTPRIO_BUMP = 1, ++ GR_RLIM_RTTIME_BUMP = 1000000 ++}; ++ ++#endif +diff --git a/include/linux/grinternal.h b/include/linux/grinternal.h +new file mode 100644 +index 0000000..d25522e +--- /dev/null ++++ b/include/linux/grinternal.h +@@ -0,0 +1,229 @@ ++#ifndef __GRINTERNAL_H ++#define __GRINTERNAL_H ++ ++#ifdef CONFIG_GRKERNSEC ++ ++#include <linux/fs.h> ++#include <linux/mnt_namespace.h> ++#include <linux/nsproxy.h> ++#include <linux/gracl.h> ++#include <linux/grdefs.h> ++#include <linux/grmsg.h> ++ ++void gr_add_learn_entry(const char *fmt, ...) ++ __attribute__ ((format (printf, 1, 2))); ++__u32 gr_search_file(const struct dentry *dentry, const __u32 mode, ++ const struct vfsmount *mnt); ++__u32 gr_check_create(const struct dentry *new_dentry, ++ const struct dentry *parent, ++ const struct vfsmount *mnt, const __u32 mode); ++int gr_check_protected_task(const struct task_struct *task); ++__u32 to_gr_audit(const __u32 reqmode); ++int gr_set_acls(const int type); ++int gr_acl_is_enabled(void); ++char gr_roletype_to_char(void); ++ ++void gr_handle_alertkill(struct task_struct *task); ++char *gr_to_filename(const struct dentry *dentry, ++ const struct vfsmount *mnt); ++char *gr_to_filename1(const struct dentry *dentry, ++ const struct vfsmount *mnt); ++char *gr_to_filename2(const struct dentry *dentry, ++ const struct vfsmount *mnt); ++char *gr_to_filename3(const struct dentry *dentry, ++ const struct vfsmount *mnt); ++ ++extern int grsec_enable_ptrace_readexec; ++extern int grsec_enable_harden_ptrace; ++extern int grsec_enable_link; ++extern int grsec_enable_fifo; ++extern int grsec_enable_execve; ++extern int grsec_enable_shm; ++extern int grsec_enable_execlog; ++extern int grsec_enable_signal; ++extern int grsec_enable_audit_ptrace; ++extern int grsec_enable_forkfail; ++extern int grsec_enable_time; ++extern int grsec_enable_rofs; ++extern int grsec_deny_new_usb; ++extern int grsec_enable_chroot_shmat; ++extern int grsec_enable_chroot_mount; ++extern int grsec_enable_chroot_double; ++extern int grsec_enable_chroot_pivot; ++extern int grsec_enable_chroot_chdir; ++extern int grsec_enable_chroot_chmod; ++extern int grsec_enable_chroot_mknod; ++extern int grsec_enable_chroot_fchdir; ++extern int grsec_enable_chroot_nice; ++extern int grsec_enable_chroot_execlog; ++extern int grsec_enable_chroot_caps; ++extern int grsec_enable_chroot_sysctl; ++extern int grsec_enable_chroot_unix; ++extern int grsec_enable_symlinkown; ++extern kgid_t grsec_symlinkown_gid; ++extern int grsec_enable_tpe; ++extern kgid_t grsec_tpe_gid; ++extern int grsec_enable_tpe_all; ++extern int grsec_enable_tpe_invert; ++extern int grsec_enable_socket_all; ++extern kgid_t grsec_socket_all_gid; ++extern int grsec_enable_socket_client; ++extern kgid_t grsec_socket_client_gid; ++extern int grsec_enable_socket_server; ++extern kgid_t grsec_socket_server_gid; ++extern kgid_t grsec_audit_gid; ++extern int grsec_enable_group; ++extern int grsec_enable_log_rwxmaps; ++extern int grsec_enable_mount; ++extern int grsec_enable_chdir; ++extern int grsec_resource_logging; ++extern int grsec_enable_blackhole; ++extern int grsec_lastack_retries; ++extern int grsec_enable_brute; ++extern int grsec_enable_harden_ipc; ++extern int grsec_lock; ++ ++extern spinlock_t grsec_alert_lock; ++extern unsigned long grsec_alert_wtime; ++extern unsigned long grsec_alert_fyet; ++ ++extern spinlock_t grsec_audit_lock; ++ ++extern rwlock_t grsec_exec_file_lock; ++ ++#define gr_task_fullpath(tsk) ((tsk)->exec_file ? \ ++ gr_to_filename2((tsk)->exec_file->f_path.dentry, \ ++ (tsk)->exec_file->f_path.mnt) : "/") ++ ++#define gr_parent_task_fullpath(tsk) ((tsk)->real_parent->exec_file ? \ ++ gr_to_filename3((tsk)->real_parent->exec_file->f_path.dentry, \ ++ (tsk)->real_parent->exec_file->f_path.mnt) : "/") ++ ++#define gr_task_fullpath0(tsk) ((tsk)->exec_file ? \ ++ gr_to_filename((tsk)->exec_file->f_path.dentry, \ ++ (tsk)->exec_file->f_path.mnt) : "/") ++ ++#define gr_parent_task_fullpath0(tsk) ((tsk)->real_parent->exec_file ? \ ++ gr_to_filename1((tsk)->real_parent->exec_file->f_path.dentry, \ ++ (tsk)->real_parent->exec_file->f_path.mnt) : "/") ++ ++#define proc_is_chrooted(tsk_a) ((tsk_a)->gr_is_chrooted) ++ ++#define have_same_root(tsk_a,tsk_b) ((tsk_a)->gr_chroot_dentry == (tsk_b)->gr_chroot_dentry) ++ ++static inline bool gr_is_same_file(const struct file *file1, const struct file *file2) ++{ ++ if (file1 && file2) { ++ const struct inode *inode1 = file1->f_path.dentry->d_inode; ++ const struct inode *inode2 = file2->f_path.dentry->d_inode; ++ if (inode1->i_ino == inode2->i_ino && inode1->i_sb->s_dev == inode2->i_sb->s_dev) ++ return true; ++ } ++ ++ return false; ++} ++ ++#define GR_CHROOT_CAPS {{ \ ++ CAP_TO_MASK(CAP_LINUX_IMMUTABLE) | CAP_TO_MASK(CAP_NET_ADMIN) | \ ++ CAP_TO_MASK(CAP_SYS_MODULE) | CAP_TO_MASK(CAP_SYS_RAWIO) | \ ++ CAP_TO_MASK(CAP_SYS_PACCT) | CAP_TO_MASK(CAP_SYS_ADMIN) | \ ++ CAP_TO_MASK(CAP_SYS_BOOT) | CAP_TO_MASK(CAP_SYS_TIME) | \ ++ CAP_TO_MASK(CAP_NET_RAW) | CAP_TO_MASK(CAP_SYS_TTY_CONFIG) | \ ++ CAP_TO_MASK(CAP_IPC_OWNER) | CAP_TO_MASK(CAP_SETFCAP), \ ++ CAP_TO_MASK(CAP_SYSLOG) | CAP_TO_MASK(CAP_MAC_ADMIN) }} ++ ++#define security_learn(normal_msg,args...) \ ++({ \ ++ read_lock(&grsec_exec_file_lock); \ ++ gr_add_learn_entry(normal_msg "\n", ## args); \ ++ read_unlock(&grsec_exec_file_lock); \ ++}) ++ ++enum { ++ GR_DO_AUDIT, ++ GR_DONT_AUDIT, ++ /* used for non-audit messages that we shouldn't kill the task on */ ++ GR_DONT_AUDIT_GOOD ++}; ++ ++enum { ++ GR_TTYSNIFF, ++ GR_RBAC, ++ GR_RBAC_STR, ++ GR_STR_RBAC, ++ GR_RBAC_MODE2, ++ GR_RBAC_MODE3, ++ GR_FILENAME, ++ GR_SYSCTL_HIDDEN, ++ GR_NOARGS, ++ GR_ONE_INT, ++ GR_ONE_INT_TWO_STR, ++ GR_ONE_STR, ++ GR_STR_INT, ++ GR_TWO_STR_INT, ++ GR_TWO_INT, ++ GR_TWO_U64, ++ GR_THREE_INT, ++ GR_FIVE_INT_TWO_STR, ++ GR_TWO_STR, ++ GR_THREE_STR, ++ GR_FOUR_STR, ++ GR_STR_FILENAME, ++ GR_FILENAME_STR, ++ GR_FILENAME_TWO_INT, ++ GR_FILENAME_TWO_INT_STR, ++ GR_TEXTREL, ++ GR_PTRACE, ++ GR_RESOURCE, ++ GR_CAP, ++ GR_SIG, ++ GR_SIG2, ++ GR_CRASH1, ++ GR_CRASH2, ++ GR_PSACCT, ++ GR_RWXMAP, ++ GR_RWXMAPVMA ++}; ++ ++#define gr_log_hidden_sysctl(audit, msg, str) gr_log_varargs(audit, msg, GR_SYSCTL_HIDDEN, str) ++#define gr_log_ttysniff(audit, msg, task) gr_log_varargs(audit, msg, GR_TTYSNIFF, task) ++#define gr_log_fs_rbac_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_RBAC, dentry, mnt) ++#define gr_log_fs_rbac_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_RBAC_STR, dentry, mnt, str) ++#define gr_log_fs_str_rbac(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_RBAC, str, dentry, mnt) ++#define gr_log_fs_rbac_mode2(audit, msg, dentry, mnt, str1, str2) gr_log_varargs(audit, msg, GR_RBAC_MODE2, dentry, mnt, str1, str2) ++#define gr_log_fs_rbac_mode3(audit, msg, dentry, mnt, str1, str2, str3) gr_log_varargs(audit, msg, GR_RBAC_MODE3, dentry, mnt, str1, str2, str3) ++#define gr_log_fs_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_FILENAME, dentry, mnt) ++#define gr_log_noargs(audit, msg) gr_log_varargs(audit, msg, GR_NOARGS) ++#define gr_log_int(audit, msg, num) gr_log_varargs(audit, msg, GR_ONE_INT, num) ++#define gr_log_int_str2(audit, msg, num, str1, str2) gr_log_varargs(audit, msg, GR_ONE_INT_TWO_STR, num, str1, str2) ++#define gr_log_str(audit, msg, str) gr_log_varargs(audit, msg, GR_ONE_STR, str) ++#define gr_log_str_int(audit, msg, str, num) gr_log_varargs(audit, msg, GR_STR_INT, str, num) ++#define gr_log_int_int(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_INT, num1, num2) ++#define gr_log_two_u64(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_U64, num1, num2) ++#define gr_log_int3(audit, msg, num1, num2, num3) gr_log_varargs(audit, msg, GR_THREE_INT, num1, num2, num3) ++#define gr_log_int5_str2(audit, msg, num1, num2, str1, str2) gr_log_varargs(audit, msg, GR_FIVE_INT_TWO_STR, num1, num2, str1, str2) ++#define gr_log_str_str(audit, msg, str1, str2) gr_log_varargs(audit, msg, GR_TWO_STR, str1, str2) ++#define gr_log_str2_int(audit, msg, str1, str2, num) gr_log_varargs(audit, msg, GR_TWO_STR_INT, str1, str2, num) ++#define gr_log_str3(audit, msg, str1, str2, str3) gr_log_varargs(audit, msg, GR_THREE_STR, str1, str2, str3) ++#define gr_log_str4(audit, msg, str1, str2, str3, str4) gr_log_varargs(audit, msg, GR_FOUR_STR, str1, str2, str3, str4) ++#define gr_log_str_fs(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_FILENAME, str, dentry, mnt) ++#define gr_log_fs_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_FILENAME_STR, dentry, mnt, str) ++#define gr_log_fs_int2(audit, msg, dentry, mnt, num1, num2) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT, dentry, mnt, num1, num2) ++#define gr_log_fs_int2_str(audit, msg, dentry, mnt, num1, num2, str) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT_STR, dentry, mnt, num1, num2, str) ++#define gr_log_textrel_ulong_ulong(audit, msg, file, ulong1, ulong2) gr_log_varargs(audit, msg, GR_TEXTREL, file, ulong1, ulong2) ++#define gr_log_ptrace(audit, msg, task) gr_log_varargs(audit, msg, GR_PTRACE, task) ++#define gr_log_res_ulong2_str(audit, msg, task, ulong1, str, ulong2) gr_log_varargs(audit, msg, GR_RESOURCE, task, ulong1, str, ulong2) ++#define gr_log_cap(audit, msg, task, str) gr_log_varargs(audit, msg, GR_CAP, task, str) ++#define gr_log_sig_addr(audit, msg, str, addr) gr_log_varargs(audit, msg, GR_SIG, str, addr) ++#define gr_log_sig_task(audit, msg, task, num) gr_log_varargs(audit, msg, GR_SIG2, task, num) ++#define gr_log_crash1(audit, msg, task, ulong) gr_log_varargs(audit, msg, GR_CRASH1, task, ulong) ++#define gr_log_crash2(audit, msg, task, ulong1) gr_log_varargs(audit, msg, GR_CRASH2, task, ulong1) ++#define gr_log_procacct(audit, msg, task, num1, num2, num3, num4, num5, num6, num7, num8, num9) gr_log_varargs(audit, msg, GR_PSACCT, task, num1, num2, num3, num4, num5, num6, num7, num8, num9) ++#define gr_log_rwxmap(audit, msg, str) gr_log_varargs(audit, msg, GR_RWXMAP, str) ++#define gr_log_rwxmap_vma(audit, msg, str) gr_log_varargs(audit, msg, GR_RWXMAPVMA, str) ++ ++void gr_log_varargs(int audit, const char *msg, int argtypes, ...); ++ ++#endif ++ ++#endif +diff --git a/include/linux/grmsg.h b/include/linux/grmsg.h +new file mode 100644 +index 0000000..b02ba9d +--- /dev/null ++++ b/include/linux/grmsg.h +@@ -0,0 +1,117 @@ ++#define DEFAULTSECMSG "%.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u, parent %.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u" ++#define GR_ACL_PROCACCT_MSG "%.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u run time:[%ud %uh %um %us] cpu time:[%ud %uh %um %us] %s with exit code %ld, parent %.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u" ++#define GR_PTRACE_ACL_MSG "denied ptrace of %.950s(%.16s:%d) by " ++#define GR_STOPMOD_MSG "denied modification of module state by " ++#define GR_ROFS_BLOCKWRITE_MSG "denied write to block device %.950s by " ++#define GR_ROFS_MOUNT_MSG "denied writable mount of %.950s by " ++#define GR_IOPERM_MSG "denied use of ioperm() by " ++#define GR_IOPL_MSG "denied use of iopl() by " ++#define GR_SHMAT_ACL_MSG "denied attach of shared memory of UID %u, PID %d, ID %u by " ++#define GR_UNIX_CHROOT_MSG "denied connect() to abstract AF_UNIX socket outside of chroot by " ++#define GR_SHMAT_CHROOT_MSG "denied attach of shared memory outside of chroot by " ++#define GR_MEM_READWRITE_MSG "denied access of range %Lx -> %Lx in /dev/mem by " ++#define GR_SYMLINK_MSG "not following symlink %.950s owned by %d.%d by " ++#define GR_LEARN_AUDIT_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%lu\t%lu\t%.4095s\t%lu\t%pI4" ++#define GR_ID_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%c\t%d\t%d\t%d\t%pI4" ++#define GR_HIDDEN_ACL_MSG "%s access to hidden file %.950s by " ++#define GR_OPEN_ACL_MSG "%s open of %.950s for%s%s by " ++#define GR_CREATE_ACL_MSG "%s create of %.950s for%s%s by " ++#define GR_FIFO_MSG "denied writing FIFO %.950s of %d.%d by " ++#define GR_MKNOD_CHROOT_MSG "denied mknod of %.950s from chroot by " ++#define GR_MKNOD_ACL_MSG "%s mknod of %.950s by " ++#define GR_UNIXCONNECT_ACL_MSG "%s connect() to the unix domain socket %.950s by " ++#define GR_TTYSNIFF_ACL_MSG "terminal being sniffed by IP:%pI4 %.480s[%.16s:%d], parent %.480s[%.16s:%d] against " ++#define GR_MKDIR_ACL_MSG "%s mkdir of %.950s by " ++#define GR_RMDIR_ACL_MSG "%s rmdir of %.950s by " ++#define GR_UNLINK_ACL_MSG "%s unlink of %.950s by " ++#define GR_SYMLINK_ACL_MSG "%s symlink from %.480s to %.480s by " ++#define GR_HARDLINK_MSG "denied hardlink of %.930s (owned by %d.%d) to %.30s for " ++#define GR_LINK_ACL_MSG "%s link of %.480s to %.480s by " ++#define GR_INHERIT_ACL_MSG "successful inherit of %.480s's ACL for %.480s by " ++#define GR_RENAME_ACL_MSG "%s rename of %.480s to %.480s by " ++#define GR_UNSAFESHARE_EXEC_ACL_MSG "denied exec with cloned fs of %.950s by " ++#define GR_PTRACE_EXEC_ACL_MSG "denied ptrace of %.950s by " ++#define GR_EXEC_ACL_MSG "%s execution of %.950s by " ++#define GR_EXEC_TPE_MSG "denied untrusted exec (due to %.70s) of %.950s by " ++#define GR_SEGVSTART_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning uid %u from login for %lu seconds" ++#define GR_SEGVNOSUID_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning execution for %lu seconds" ++#define GR_MOUNT_CHROOT_MSG "denied mount of %.256s as %.930s from chroot by " ++#define GR_PIVOT_CHROOT_MSG "denied pivot_root from chroot by " ++#define GR_TRUNCATE_ACL_MSG "%s truncate of %.950s by " ++#define GR_ATIME_ACL_MSG "%s access time change of %.950s by " ++#define GR_ACCESS_ACL_MSG "%s access of %.950s for%s%s%s by " ++#define GR_CHROOT_CHROOT_MSG "denied double chroot to %.950s by " ++#define GR_CHMOD_CHROOT_MSG "denied chmod +s of %.950s by " ++#define GR_CHMOD_ACL_MSG "%s chmod of %.950s by " ++#define GR_CHROOT_FCHDIR_MSG "denied fchdir outside of chroot to %.950s by " ++#define GR_CHROOT_FHANDLE_MSG "denied use of file handles inside chroot by " ++#define GR_CHOWN_ACL_MSG "%s chown of %.950s by " ++#define GR_SETXATTR_ACL_MSG "%s setting extended attribute of %.950s by " ++#define GR_REMOVEXATTR_ACL_MSG "%s removing extended attribute of %.950s by " ++#define GR_WRITLIB_ACL_MSG "denied load of writable library %.950s by " ++#define GR_INITF_ACL_MSG "init_variables() failed %s by " ++#define GR_DISABLED_ACL_MSG "Error loading %s, trying to run kernel with acls disabled. To disable acls at startup use <kernel image name> gracl=off from your boot loader" ++#define GR_DEV_ACL_MSG "/dev/grsec: %d bytes sent %d required, being fed garbage by " ++#define GR_SHUTS_ACL_MSG "shutdown auth success for " ++#define GR_SHUTF_ACL_MSG "shutdown auth failure for " ++#define GR_SHUTI_ACL_MSG "ignoring shutdown for disabled RBAC system for " ++#define GR_SEGVMODS_ACL_MSG "segvmod auth success for " ++#define GR_SEGVMODF_ACL_MSG "segvmod auth failure for " ++#define GR_SEGVMODI_ACL_MSG "ignoring segvmod for disabled RBAC system for " ++#define GR_ENABLE_ACL_MSG "%s RBAC system loaded by " ++#define GR_ENABLEF_ACL_MSG "unable to load %s for " ++#define GR_RELOADI_ACL_MSG "ignoring reload request for disabled RBAC system" ++#define GR_RELOAD_ACL_MSG "%s RBAC system reloaded by " ++#define GR_RELOADF_ACL_MSG "failed reload of %s for " ++#define GR_SPROLEI_ACL_MSG "ignoring change to special role for disabled RBAC system for " ++#define GR_SPROLES_ACL_MSG "successful change to special role %s (id %d) by " ++#define GR_SPROLEL_ACL_MSG "special role %s (id %d) exited by " ++#define GR_SPROLEF_ACL_MSG "special role %s failure for " ++#define GR_UNSPROLEI_ACL_MSG "ignoring unauth of special role for disabled RBAC system for " ++#define GR_UNSPROLES_ACL_MSG "successful unauth of special role %s (id %d) by " ++#define GR_INVMODE_ACL_MSG "invalid mode %d by " ++#define GR_PRIORITY_CHROOT_MSG "denied priority change of process (%.16s:%d) by " ++#define GR_FAILFORK_MSG "failed fork with errno %s by " ++#define GR_NICE_CHROOT_MSG "denied priority change by " ++#define GR_UNISIGLOG_MSG "%.32s occurred at %p in " ++#define GR_DUALSIGLOG_MSG "signal %d sent to " DEFAULTSECMSG " by " ++#define GR_SIG_ACL_MSG "denied send of signal %d to protected task " DEFAULTSECMSG " by " ++#define GR_SYSCTL_MSG "denied modification of grsecurity sysctl value : %.32s by " ++#define GR_SYSCTL_ACL_MSG "%s sysctl of %.950s for%s%s by " ++#define GR_TIME_MSG "time set by " ++#define GR_DEFACL_MSG "fatal: unable to find subject for (%.16s:%d), loaded by " ++#define GR_MMAP_ACL_MSG "%s executable mmap of %.950s by " ++#define GR_MPROTECT_ACL_MSG "%s executable mprotect of %.950s by " ++#define GR_SOCK_MSG "denied socket(%.16s,%.16s,%.16s) by " ++#define GR_SOCK_NOINET_MSG "denied socket(%.16s,%.16s,%d) by " ++#define GR_BIND_MSG "denied bind() by " ++#define GR_CONNECT_MSG "denied connect() by " ++#define GR_BIND_ACL_MSG "denied bind() to %pI4 port %u sock type %.16s protocol %.16s by " ++#define GR_CONNECT_ACL_MSG "denied connect() to %pI4 port %u sock type %.16s protocol %.16s by " ++#define GR_IP_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%pI4\t%u\t%u\t%u\t%u\t%pI4" ++#define GR_EXEC_CHROOT_MSG "exec of %.980s within chroot by process " ++#define GR_CAP_ACL_MSG "use of %s denied for " ++#define GR_CAP_CHROOT_MSG "use of %s in chroot denied for " ++#define GR_CAP_ACL_MSG2 "use of %s permitted for " ++#define GR_USRCHANGE_ACL_MSG "change to uid %u denied for " ++#define GR_GRPCHANGE_ACL_MSG "change to gid %u denied for " ++#define GR_REMOUNT_AUDIT_MSG "remount of %.256s by " ++#define GR_UNMOUNT_AUDIT_MSG "unmount of %.256s by " ++#define GR_MOUNT_AUDIT_MSG "mount of %.256s to %.256s by " ++#define GR_CHDIR_AUDIT_MSG "chdir to %.980s by " ++#define GR_EXEC_AUDIT_MSG "exec of %.930s (%.128s) by " ++#define GR_RESOURCE_MSG "denied resource overstep by requesting %lu for %.16s against limit %lu for " ++#define GR_RWXMMAP_MSG "denied RWX mmap of %.950s by " ++#define GR_RWXMPROTECT_MSG "denied RWX mprotect of %.950s by " ++#define GR_TEXTREL_AUDIT_MSG "denied text relocation in %.950s, VMA:0x%08lx 0x%08lx by " ++#define GR_PTGNUSTACK_MSG "denied marking stack executable as requested by PT_GNU_STACK marking in %.950s by " ++#define GR_VM86_MSG "denied use of vm86 by " ++#define GR_PTRACE_AUDIT_MSG "process %.950s(%.16s:%d) attached to via ptrace by " ++#define GR_PTRACE_READEXEC_MSG "denied ptrace of unreadable binary %.950s by " ++#define GR_INIT_TRANSFER_MSG "persistent special role transferred privilege to init by " ++#define GR_BADPROCPID_MSG "denied read of sensitive /proc/pid/%s entry via fd passed across exec by " ++#define GR_SYMLINKOWNER_MSG "denied following symlink %.950s since symlink owner %u does not match target owner %u, by " ++#define GR_BRUTE_DAEMON_MSG "bruteforce prevention initiated for the next 30 minutes or until service restarted, stalling each fork 30 seconds. Please investigate the crash report for " ++#define GR_BRUTE_SUID_MSG "bruteforce prevention initiated due to crash of %.950s against uid %u, banning suid/sgid execs for %u minutes. Please investigate the crash report for " ++#define GR_IPC_DENIED_MSG "denied %s of overly-permissive IPC object with creator uid %u by " ++#define GR_MSRWRITE_MSG "denied write to CPU MSR by " +diff --git a/include/linux/grsecurity.h b/include/linux/grsecurity.h +new file mode 100644 +index 0000000..acda855 +--- /dev/null ++++ b/include/linux/grsecurity.h +@@ -0,0 +1,254 @@ ++#ifndef GR_SECURITY_H ++#define GR_SECURITY_H ++#include <linux/fs.h> ++#include <linux/fs_struct.h> ++#include <linux/binfmts.h> ++#include <linux/gracl.h> ++ ++/* notify of brain-dead configs */ ++#if defined(CONFIG_GRKERNSEC_PROC_USER) && defined(CONFIG_GRKERNSEC_PROC_USERGROUP) ++#error "CONFIG_GRKERNSEC_PROC_USER and CONFIG_GRKERNSEC_PROC_USERGROUP cannot both be enabled." ++#endif ++#if defined(CONFIG_GRKERNSEC_PROC) && !defined(CONFIG_GRKERNSEC_PROC_USER) && !defined(CONFIG_GRKERNSEC_PROC_USERGROUP) ++#error "CONFIG_GRKERNSEC_PROC enabled, but neither CONFIG_GRKERNSEC_PROC_USER nor CONFIG_GRKERNSEC_PROC_USERGROUP enabled" ++#endif ++#if defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_PAGEEXEC) && !defined(CONFIG_PAX_SEGMEXEC) && !defined(CONFIG_PAX_KERNEXEC) ++#error "CONFIG_PAX_NOEXEC enabled, but PAGEEXEC, SEGMEXEC, and KERNEXEC are disabled." ++#endif ++#if defined(CONFIG_PAX_ASLR) && !defined(CONFIG_PAX_RANDKSTACK) && !defined(CONFIG_PAX_RANDUSTACK) && !defined(CONFIG_PAX_RANDMMAP) ++#error "CONFIG_PAX_ASLR enabled, but RANDKSTACK, RANDUSTACK, and RANDMMAP are disabled." ++#endif ++#if defined(CONFIG_PAX) && !defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_ASLR) ++#error "CONFIG_PAX enabled, but no PaX options are enabled." ++#endif ++ ++int gr_handle_new_usb(void); ++ ++void gr_handle_brute_attach(int dumpable); ++void gr_handle_brute_check(void); ++void gr_handle_kernel_exploit(void); ++ ++char gr_roletype_to_char(void); ++ ++int gr_proc_is_restricted(void); ++ ++int gr_acl_enable_at_secure(void); ++ ++int gr_check_user_change(kuid_t real, kuid_t effective, kuid_t fs); ++int gr_check_group_change(kgid_t real, kgid_t effective, kgid_t fs); ++ ++int gr_learn_cap(const struct task_struct *task, const struct cred *cred, const int cap); ++ ++void gr_del_task_from_ip_table(struct task_struct *p); ++ ++int gr_pid_is_chrooted(struct task_struct *p); ++int gr_handle_chroot_fowner(struct pid *pid, enum pid_type type); ++int gr_handle_chroot_nice(void); ++int gr_handle_chroot_sysctl(const int op); ++int gr_handle_chroot_setpriority(struct task_struct *p, ++ const int niceval); ++int gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt); ++int gr_chroot_fhandle(void); ++int gr_handle_chroot_chroot(const struct dentry *dentry, ++ const struct vfsmount *mnt); ++void gr_handle_chroot_chdir(const struct path *path); ++int gr_handle_chroot_chmod(const struct dentry *dentry, ++ const struct vfsmount *mnt, const int mode); ++int gr_handle_chroot_mknod(const struct dentry *dentry, ++ const struct vfsmount *mnt, const int mode); ++int gr_handle_chroot_mount(const struct dentry *dentry, ++ const struct vfsmount *mnt, ++ const char *dev_name); ++int gr_handle_chroot_pivot(void); ++int gr_handle_chroot_unix(const pid_t pid); ++ ++int gr_handle_rawio(const struct inode *inode); ++ ++void gr_handle_ioperm(void); ++void gr_handle_iopl(void); ++void gr_handle_msr_write(void); ++ ++umode_t gr_acl_umask(void); ++ ++int gr_tpe_allow(const struct file *file); ++ ++void gr_set_chroot_entries(struct task_struct *task, const struct path *path); ++void gr_clear_chroot_entries(struct task_struct *task); ++ ++void gr_log_forkfail(const int retval); ++void gr_log_timechange(void); ++void gr_log_signal(const int sig, const void *addr, const struct task_struct *t); ++void gr_log_chdir(const struct dentry *dentry, ++ const struct vfsmount *mnt); ++void gr_log_chroot_exec(const struct dentry *dentry, ++ const struct vfsmount *mnt); ++void gr_log_remount(const char *devname, const int retval); ++void gr_log_unmount(const char *devname, const int retval); ++void gr_log_mount(const char *from, const char *to, const int retval); ++void gr_log_textrel(struct vm_area_struct *vma); ++void gr_log_ptgnustack(struct file *file); ++void gr_log_rwxmmap(struct file *file); ++void gr_log_rwxmprotect(struct vm_area_struct *vma); ++ ++int gr_handle_follow_link(const struct inode *parent, ++ const struct inode *inode, ++ const struct dentry *dentry, ++ const struct vfsmount *mnt); ++int gr_handle_fifo(const struct dentry *dentry, ++ const struct vfsmount *mnt, ++ const struct dentry *dir, const int flag, ++ const int acc_mode); ++int gr_handle_hardlink(const struct dentry *dentry, ++ const struct vfsmount *mnt, ++ struct inode *inode, ++ const int mode, const struct filename *to); ++ ++int gr_is_capable(const int cap); ++int gr_is_capable_nolog(const int cap); ++int gr_task_is_capable(const struct task_struct *task, const struct cred *cred, const int cap); ++int gr_task_is_capable_nolog(const struct task_struct *task, const int cap); ++ ++void gr_copy_label(struct task_struct *tsk); ++void gr_handle_crash(struct task_struct *task, const int sig); ++int gr_handle_signal(const struct task_struct *p, const int sig); ++int gr_check_crash_uid(const kuid_t uid); ++int gr_check_protected_task(const struct task_struct *task); ++int gr_check_protected_task_fowner(struct pid *pid, enum pid_type type); ++int gr_acl_handle_mmap(const struct file *file, ++ const unsigned long prot); ++int gr_acl_handle_mprotect(const struct file *file, ++ const unsigned long prot); ++int gr_check_hidden_task(const struct task_struct *tsk); ++__u32 gr_acl_handle_truncate(const struct dentry *dentry, ++ const struct vfsmount *mnt); ++__u32 gr_acl_handle_utime(const struct dentry *dentry, ++ const struct vfsmount *mnt); ++__u32 gr_acl_handle_access(const struct dentry *dentry, ++ const struct vfsmount *mnt, const int fmode); ++__u32 gr_acl_handle_chmod(const struct dentry *dentry, ++ const struct vfsmount *mnt, umode_t *mode); ++__u32 gr_acl_handle_chown(const struct dentry *dentry, ++ const struct vfsmount *mnt); ++__u32 gr_acl_handle_setxattr(const struct dentry *dentry, ++ const struct vfsmount *mnt); ++__u32 gr_acl_handle_removexattr(const struct dentry *dentry, ++ const struct vfsmount *mnt); ++int gr_handle_ptrace(struct task_struct *task, const long request); ++int gr_handle_proc_ptrace(struct task_struct *task); ++__u32 gr_acl_handle_execve(const struct dentry *dentry, ++ const struct vfsmount *mnt); ++int gr_check_crash_exec(const struct file *filp); ++int gr_acl_is_enabled(void); ++void gr_set_role_label(struct task_struct *task, const kuid_t uid, ++ const kgid_t gid); ++int gr_set_proc_label(const struct dentry *dentry, ++ const struct vfsmount *mnt, ++ const int unsafe_flags); ++__u32 gr_acl_handle_hidden_file(const struct dentry *dentry, ++ const struct vfsmount *mnt); ++__u32 gr_acl_handle_open(const struct dentry *dentry, ++ const struct vfsmount *mnt, int acc_mode); ++__u32 gr_acl_handle_creat(const struct dentry *dentry, ++ const struct dentry *p_dentry, ++ const struct vfsmount *p_mnt, ++ int open_flags, int acc_mode, const int imode); ++void gr_handle_create(const struct dentry *dentry, ++ const struct vfsmount *mnt); ++void gr_handle_proc_create(const struct dentry *dentry, ++ const struct inode *inode); ++__u32 gr_acl_handle_mknod(const struct dentry *new_dentry, ++ const struct dentry *parent_dentry, ++ const struct vfsmount *parent_mnt, ++ const int mode); ++__u32 gr_acl_handle_mkdir(const struct dentry *new_dentry, ++ const struct dentry *parent_dentry, ++ const struct vfsmount *parent_mnt); ++__u32 gr_acl_handle_rmdir(const struct dentry *dentry, ++ const struct vfsmount *mnt); ++void gr_handle_delete(const ino_t ino, const dev_t dev); ++__u32 gr_acl_handle_unlink(const struct dentry *dentry, ++ const struct vfsmount *mnt); ++__u32 gr_acl_handle_symlink(const struct dentry *new_dentry, ++ const struct dentry *parent_dentry, ++ const struct vfsmount *parent_mnt, ++ const struct filename *from); ++__u32 gr_acl_handle_link(const struct dentry *new_dentry, ++ const struct dentry *parent_dentry, ++ const struct vfsmount *parent_mnt, ++ const struct dentry *old_dentry, ++ const struct vfsmount *old_mnt, const struct filename *to); ++int gr_handle_symlink_owner(const struct path *link, const struct inode *target); ++int gr_acl_handle_rename(struct dentry *new_dentry, ++ struct dentry *parent_dentry, ++ const struct vfsmount *parent_mnt, ++ struct dentry *old_dentry, ++ struct inode *old_parent_inode, ++ struct vfsmount *old_mnt, const struct filename *newname); ++void gr_handle_rename(struct inode *old_dir, struct inode *new_dir, ++ struct dentry *old_dentry, ++ struct dentry *new_dentry, ++ struct vfsmount *mnt, const __u8 replace); ++__u32 gr_check_link(const struct dentry *new_dentry, ++ const struct dentry *parent_dentry, ++ const struct vfsmount *parent_mnt, ++ const struct dentry *old_dentry, ++ const struct vfsmount *old_mnt); ++int gr_acl_handle_filldir(const struct file *file, const char *name, ++ const unsigned int namelen, const ino_t ino); ++ ++__u32 gr_acl_handle_unix(const struct dentry *dentry, ++ const struct vfsmount *mnt); ++void gr_acl_handle_exit(void); ++void gr_acl_handle_psacct(struct task_struct *task, const long code); ++int gr_acl_handle_procpidmem(const struct task_struct *task); ++int gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags); ++int gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode); ++void gr_audit_ptrace(struct task_struct *task); ++dev_t gr_get_dev_from_dentry(struct dentry *dentry); ++void gr_put_exec_file(struct task_struct *task); ++ ++int gr_ptrace_readexec(struct file *file, int unsafe_flags); ++ ++#if defined(CONFIG_GRKERNSEC) && (defined(CONFIG_GRKERNSEC_RESLOG) || !defined(CONFIG_GRKERNSEC_NO_RBAC)) ++extern void gr_learn_resource(const struct task_struct *task, const int res, ++ const unsigned long wanted, const int gt); ++#else ++static inline void gr_learn_resource(const struct task_struct *task, const int res, ++ const unsigned long wanted, const int gt) ++{ ++} ++#endif ++ ++#ifdef CONFIG_GRKERNSEC_RESLOG ++extern void gr_log_resource(const struct task_struct *task, const int res, ++ const unsigned long wanted, const int gt); ++#else ++static inline void gr_log_resource(const struct task_struct *task, const int res, ++ const unsigned long wanted, const int gt) ++{ ++} ++#endif ++ ++#ifdef CONFIG_GRKERNSEC ++void task_grsec_rbac(struct seq_file *m, struct task_struct *p); ++void gr_handle_vm86(void); ++void gr_handle_mem_readwrite(u64 from, u64 to); ++ ++void gr_log_badprocpid(const char *entry); ++ ++extern int grsec_enable_dmesg; ++extern int grsec_disable_privio; ++ ++#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP ++extern kgid_t grsec_proc_gid; ++#endif ++ ++#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK ++extern int grsec_enable_chroot_findtask; ++#endif ++#ifdef CONFIG_GRKERNSEC_SETXID ++extern int grsec_enable_setxid; ++#endif ++#endif ++ ++#endif +diff --git a/include/linux/grsock.h b/include/linux/grsock.h +new file mode 100644 +index 0000000..e7ffaaf +--- /dev/null ++++ b/include/linux/grsock.h +@@ -0,0 +1,19 @@ ++#ifndef __GRSOCK_H ++#define __GRSOCK_H ++ ++extern void gr_attach_curr_ip(const struct sock *sk); ++extern int gr_handle_sock_all(const int family, const int type, ++ const int protocol); ++extern int gr_handle_sock_server(const struct sockaddr *sck); ++extern int gr_handle_sock_server_other(const struct sock *sck); ++extern int gr_handle_sock_client(const struct sockaddr *sck); ++extern int gr_search_connect(struct socket * sock, ++ struct sockaddr_in * addr); ++extern int gr_search_bind(struct socket * sock, ++ struct sockaddr_in * addr); ++extern int gr_search_listen(struct socket * sock); ++extern int gr_search_accept(struct socket * sock); ++extern int gr_search_socket(const int domain, const int type, ++ const int protocol); ++ ++#endif +diff --git a/include/linux/hash.h b/include/linux/hash.h +index bd1754c..8240892 100644 +--- a/include/linux/hash.h ++++ b/include/linux/hash.h +@@ -83,7 +83,7 @@ static inline u32 hash32_ptr(const void *ptr) + struct fast_hash_ops { + u32 (*hash)(const void *data, u32 len, u32 seed); + u32 (*hash2)(const u32 *data, u32 len, u32 seed); +-}; ++} __no_const; + + /** + * arch_fast_hash - Caclulates a hash over a given buffer that can have +diff --git a/include/linux/highmem.h b/include/linux/highmem.h +index 7fb31da..08b5114 100644 +--- a/include/linux/highmem.h ++++ b/include/linux/highmem.h +@@ -189,6 +189,18 @@ static inline void clear_highpage(struct page *page) + kunmap_atomic(kaddr); + } + ++static inline void sanitize_highpage(struct page *page) ++{ ++ void *kaddr; ++ unsigned long flags; ++ ++ local_irq_save(flags); ++ kaddr = kmap_atomic(page); ++ clear_page(kaddr); ++ kunmap_atomic(kaddr); ++ local_irq_restore(flags); ++} ++ + static inline void zero_user_segments(struct page *page, + unsigned start1, unsigned end1, + unsigned start2, unsigned end2) +diff --git a/include/linux/hwmon-sysfs.h b/include/linux/hwmon-sysfs.h +index 1c7b89a..7dda400 100644 +--- a/include/linux/hwmon-sysfs.h ++++ b/include/linux/hwmon-sysfs.h +@@ -25,7 +25,8 @@ + struct sensor_device_attribute{ + struct device_attribute dev_attr; + int index; +-}; ++} __do_const; ++typedef struct sensor_device_attribute __no_const sensor_device_attribute_no_const; + #define to_sensor_dev_attr(_dev_attr) \ + container_of(_dev_attr, struct sensor_device_attribute, dev_attr) + +@@ -41,7 +42,8 @@ struct sensor_device_attribute_2 { + struct device_attribute dev_attr; + u8 index; + u8 nr; +-}; ++} __do_const; ++typedef struct sensor_device_attribute_2 __no_const sensor_device_attribute_2_no_const; + #define to_sensor_dev_attr_2(_dev_attr) \ + container_of(_dev_attr, struct sensor_device_attribute_2, dev_attr) + +diff --git a/include/linux/i2c.h b/include/linux/i2c.h +index deddeb8..bcaf62d 100644 +--- a/include/linux/i2c.h ++++ b/include/linux/i2c.h +@@ -378,6 +378,7 @@ struct i2c_algorithm { + /* To determine what the adapter supports */ + u32 (*functionality) (struct i2c_adapter *); + }; ++typedef struct i2c_algorithm __no_const i2c_algorithm_no_const; + + /** + * struct i2c_bus_recovery_info - I2C bus recovery information +diff --git a/include/linux/i2o.h b/include/linux/i2o.h +index d23c3c2..eb63c81 100644 +--- a/include/linux/i2o.h ++++ b/include/linux/i2o.h +@@ -565,7 +565,7 @@ struct i2o_controller { + struct i2o_device *exec; /* Executive */ + #if BITS_PER_LONG == 64 + spinlock_t context_list_lock; /* lock for context_list */ +- atomic_t context_list_counter; /* needed for unique contexts */ ++ atomic_unchecked_t context_list_counter; /* needed for unique contexts */ + struct list_head context_list; /* list of context id's + and pointers */ + #endif +diff --git a/include/linux/if_pppox.h b/include/linux/if_pppox.h +index aff7ad8..3942bbd 100644 +--- a/include/linux/if_pppox.h ++++ b/include/linux/if_pppox.h +@@ -76,7 +76,7 @@ struct pppox_proto { + int (*ioctl)(struct socket *sock, unsigned int cmd, + unsigned long arg); + struct module *owner; +-}; ++} __do_const; + + extern int register_pppox_proto(int proto_num, const struct pppox_proto *pp); + extern void unregister_pppox_proto(int proto_num); +diff --git a/include/linux/init.h b/include/linux/init.h +index e168880..d9b489d 100644 +--- a/include/linux/init.h ++++ b/include/linux/init.h +@@ -37,9 +37,17 @@ + * section. + */ + ++#define add_init_latent_entropy __latent_entropy ++ ++#ifdef CONFIG_MEMORY_HOTPLUG ++#define add_meminit_latent_entropy ++#else ++#define add_meminit_latent_entropy __latent_entropy ++#endif ++ + /* These are for everybody (although not all archs will actually + discard it in modules) */ +-#define __init __section(.init.text) __cold notrace ++#define __init __section(.init.text) __cold notrace add_init_latent_entropy + #define __initdata __section(.init.data) + #define __initconst __constsection(.init.rodata) + #define __exitdata __section(.exit.data) +@@ -100,7 +108,7 @@ + #define __cpuexitconst + + /* Used for MEMORY_HOTPLUG */ +-#define __meminit __section(.meminit.text) __cold notrace ++#define __meminit __section(.meminit.text) __cold notrace add_meminit_latent_entropy + #define __meminitdata __section(.meminit.data) + #define __meminitconst __constsection(.meminit.rodata) + #define __memexit __section(.memexit.text) __exitused __cold notrace +diff --git a/include/linux/init_task.h b/include/linux/init_task.h +index 6df7f9f..d0bf699 100644 +--- a/include/linux/init_task.h ++++ b/include/linux/init_task.h +@@ -156,6 +156,12 @@ extern struct task_group root_task_group; + + #define INIT_TASK_COMM "swapper" + ++#ifdef CONFIG_X86 ++#define INIT_TASK_THREAD_INFO .tinfo = INIT_THREAD_INFO, ++#else ++#define INIT_TASK_THREAD_INFO ++#endif ++ + #ifdef CONFIG_RT_MUTEXES + # define INIT_RT_MUTEXES(tsk) \ + .pi_waiters = RB_ROOT, \ +@@ -203,6 +209,7 @@ extern struct task_group root_task_group; + RCU_POINTER_INITIALIZER(cred, &init_cred), \ + .comm = INIT_TASK_COMM, \ + .thread = INIT_THREAD, \ ++ INIT_TASK_THREAD_INFO \ + .fs = &init_fs, \ + .files = &init_files, \ + .signal = &init_signals, \ +diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h +index 203c43d..605836b 100644 +--- a/include/linux/interrupt.h ++++ b/include/linux/interrupt.h +@@ -411,8 +411,8 @@ extern const char * const softirq_to_name[NR_SOFTIRQS]; + + struct softirq_action + { +- void (*action)(struct softirq_action *); +-}; ++ void (*action)(void); ++} __no_const; + + asmlinkage void do_softirq(void); + asmlinkage void __do_softirq(void); +@@ -426,7 +426,7 @@ static inline void do_softirq_own_stack(void) + } + #endif + +-extern void open_softirq(int nr, void (*action)(struct softirq_action *)); ++extern void open_softirq(int nr, void (*action)(void)); + extern void softirq_init(void); + extern void __raise_softirq_irqoff(unsigned int nr); + +diff --git a/include/linux/iommu.h b/include/linux/iommu.h +index b96a5b2..2732d1c 100644 +--- a/include/linux/iommu.h ++++ b/include/linux/iommu.h +@@ -131,7 +131,7 @@ struct iommu_ops { + u32 (*domain_get_windows)(struct iommu_domain *domain); + + unsigned long pgsize_bitmap; +-}; ++} __do_const; + + #define IOMMU_GROUP_NOTIFY_ADD_DEVICE 1 /* Device added */ + #define IOMMU_GROUP_NOTIFY_DEL_DEVICE 2 /* Pre Device removed */ +diff --git a/include/linux/ioport.h b/include/linux/ioport.h +index 89b7c24..382af74 100644 +--- a/include/linux/ioport.h ++++ b/include/linux/ioport.h +@@ -161,7 +161,7 @@ struct resource *lookup_resource(struct resource *root, resource_size_t start); + int adjust_resource(struct resource *res, resource_size_t start, + resource_size_t size); + resource_size_t resource_alignment(struct resource *res); +-static inline resource_size_t resource_size(const struct resource *res) ++static inline resource_size_t __intentional_overflow(-1) resource_size(const struct resource *res) + { + return res->end - res->start + 1; + } +diff --git a/include/linux/ipc_namespace.h b/include/linux/ipc_namespace.h +index 35e7eca..6afb7ad 100644 +--- a/include/linux/ipc_namespace.h ++++ b/include/linux/ipc_namespace.h +@@ -69,7 +69,7 @@ struct ipc_namespace { + struct user_namespace *user_ns; + + unsigned int proc_inum; +-}; ++} __randomize_layout; + + extern struct ipc_namespace init_ipc_ns; + extern atomic_t nr_ipc_ns; +diff --git a/include/linux/irq.h b/include/linux/irq.h +index ef1ac9f..e1db06c 100644 +--- a/include/linux/irq.h ++++ b/include/linux/irq.h +@@ -338,7 +338,8 @@ struct irq_chip { + void (*irq_print_chip)(struct irq_data *data, struct seq_file *p); + + unsigned long flags; +-}; ++} __do_const; ++typedef struct irq_chip __no_const irq_chip_no_const; + + /* + * irq_chip specific flags +diff --git a/include/linux/irqchip/arm-gic.h b/include/linux/irqchip/arm-gic.h +index 0ceb389..eed3fb8 100644 +--- a/include/linux/irqchip/arm-gic.h ++++ b/include/linux/irqchip/arm-gic.h +@@ -73,9 +73,11 @@ + + #ifndef __ASSEMBLY__ + ++#include <linux/irq.h> ++ + struct device_node; + +-extern struct irq_chip gic_arch_extn; ++extern irq_chip_no_const gic_arch_extn; + + void gic_init_bases(unsigned int, int, void __iomem *, void __iomem *, + u32 offset, struct device_node *); +diff --git a/include/linux/jiffies.h b/include/linux/jiffies.h +index 1f44466..b481806 100644 +--- a/include/linux/jiffies.h ++++ b/include/linux/jiffies.h +@@ -292,20 +292,20 @@ extern unsigned long preset_lpj; + /* + * Convert various time units to each other: + */ +-extern unsigned int jiffies_to_msecs(const unsigned long j); +-extern unsigned int jiffies_to_usecs(const unsigned long j); ++extern unsigned int jiffies_to_msecs(const unsigned long j) __intentional_overflow(-1); ++extern unsigned int jiffies_to_usecs(const unsigned long j) __intentional_overflow(-1); + +-static inline u64 jiffies_to_nsecs(const unsigned long j) ++static inline u64 __intentional_overflow(-1) jiffies_to_nsecs(const unsigned long j) + { + return (u64)jiffies_to_usecs(j) * NSEC_PER_USEC; + } + +-extern unsigned long msecs_to_jiffies(const unsigned int m); +-extern unsigned long usecs_to_jiffies(const unsigned int u); ++extern unsigned long msecs_to_jiffies(const unsigned int m) __intentional_overflow(-1); ++extern unsigned long usecs_to_jiffies(const unsigned int u) __intentional_overflow(-1); + extern unsigned long timespec_to_jiffies(const struct timespec *value); + extern void jiffies_to_timespec(const unsigned long jiffies, +- struct timespec *value); +-extern unsigned long timeval_to_jiffies(const struct timeval *value); ++ struct timespec *value) __intentional_overflow(-1); ++extern unsigned long timeval_to_jiffies(const struct timeval *value) __intentional_overflow(-1); + extern void jiffies_to_timeval(const unsigned long jiffies, + struct timeval *value); + +diff --git a/include/linux/kallsyms.h b/include/linux/kallsyms.h +index 6883e19..e854fcb 100644 +--- a/include/linux/kallsyms.h ++++ b/include/linux/kallsyms.h +@@ -15,7 +15,8 @@ + + struct module; + +-#ifdef CONFIG_KALLSYMS ++#if !defined(__INCLUDED_BY_HIDESYM) || !defined(CONFIG_KALLSYMS) ++#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM) + /* Lookup the address for a symbol. Returns 0 if not found. */ + unsigned long kallsyms_lookup_name(const char *name); + +@@ -106,6 +107,21 @@ static inline int lookup_symbol_attrs(unsigned long addr, unsigned long *size, u + /* Stupid that this does nothing, but I didn't create this mess. */ + #define __print_symbol(fmt, addr) + #endif /*CONFIG_KALLSYMS*/ ++#else /* when included by kallsyms.c, vsnprintf.c, kprobes.c, or ++ arch/x86/kernel/dumpstack.c, with HIDESYM enabled */ ++extern unsigned long kallsyms_lookup_name(const char *name); ++extern void __print_symbol(const char *fmt, unsigned long address); ++extern int sprint_backtrace(char *buffer, unsigned long address); ++extern int sprint_symbol(char *buffer, unsigned long address); ++extern int sprint_symbol_no_offset(char *buffer, unsigned long address); ++const char *kallsyms_lookup(unsigned long addr, ++ unsigned long *symbolsize, ++ unsigned long *offset, ++ char **modname, char *namebuf); ++extern int kallsyms_lookup_size_offset(unsigned long addr, ++ unsigned long *symbolsize, ++ unsigned long *offset); ++#endif + + /* This macro allows us to keep printk typechecking */ + static __printf(1, 2) +diff --git a/include/linux/key-type.h b/include/linux/key-type.h +index a74c3a8..28d3f21 100644 +--- a/include/linux/key-type.h ++++ b/include/linux/key-type.h +@@ -131,7 +131,7 @@ struct key_type { + /* internal fields */ + struct list_head link; /* link in types list */ + struct lock_class_key lock_class; /* key->sem lock class */ +-}; ++} __do_const; + + extern struct key_type key_type_keyring; + +diff --git a/include/linux/kgdb.h b/include/linux/kgdb.h +index 6b06d37..c134867 100644 +--- a/include/linux/kgdb.h ++++ b/include/linux/kgdb.h +@@ -52,7 +52,7 @@ extern int kgdb_connected; + extern int kgdb_io_module_registered; + + extern atomic_t kgdb_setting_breakpoint; +-extern atomic_t kgdb_cpu_doing_single_step; ++extern atomic_unchecked_t kgdb_cpu_doing_single_step; + + extern struct task_struct *kgdb_usethread; + extern struct task_struct *kgdb_contthread; +@@ -254,7 +254,7 @@ struct kgdb_arch { + void (*correct_hw_break)(void); + + void (*enable_nmi)(bool on); +-}; ++} __do_const; + + /** + * struct kgdb_io - Describe the interface for an I/O driver to talk with KGDB. +@@ -279,7 +279,7 @@ struct kgdb_io { + void (*pre_exception) (void); + void (*post_exception) (void); + int is_console; +-}; ++} __do_const; + + extern struct kgdb_arch arch_kgdb_ops; + +diff --git a/include/linux/kmod.h b/include/linux/kmod.h +index 0555cc6..40116ce 100644 +--- a/include/linux/kmod.h ++++ b/include/linux/kmod.h +@@ -34,6 +34,8 @@ extern char modprobe_path[]; /* for sysctl */ + * usually useless though. */ + extern __printf(2, 3) + int __request_module(bool wait, const char *name, ...); ++extern __printf(3, 4) ++int ___request_module(bool wait, char *param_name, const char *name, ...); + #define request_module(mod...) __request_module(true, mod) + #define request_module_nowait(mod...) __request_module(false, mod) + #define try_then_request_module(x, mod...) \ +@@ -57,6 +59,9 @@ struct subprocess_info { + struct work_struct work; + struct completion *complete; + char *path; ++#ifdef CONFIG_GRKERNSEC ++ char *origpath; ++#endif + char **argv; + char **envp; + int wait; +diff --git a/include/linux/kobject.h b/include/linux/kobject.h +index 926afb6..58dd6e5 100644 +--- a/include/linux/kobject.h ++++ b/include/linux/kobject.h +@@ -116,7 +116,7 @@ struct kobj_type { + struct attribute **default_attrs; + const struct kobj_ns_type_operations *(*child_ns_type)(struct kobject *kobj); + const void *(*namespace)(struct kobject *kobj); +-}; ++} __do_const; + + struct kobj_uevent_env { + char *envp[UEVENT_NUM_ENVP]; +@@ -139,6 +139,7 @@ struct kobj_attribute { + ssize_t (*store)(struct kobject *kobj, struct kobj_attribute *attr, + const char *buf, size_t count); + }; ++typedef struct kobj_attribute __no_const kobj_attribute_no_const; + + extern const struct sysfs_ops kobj_sysfs_ops; + +@@ -166,7 +167,7 @@ struct kset { + spinlock_t list_lock; + struct kobject kobj; + const struct kset_uevent_ops *uevent_ops; +-}; ++} __randomize_layout; + + extern void kset_init(struct kset *kset); + extern int __must_check kset_register(struct kset *kset); +diff --git a/include/linux/kobject_ns.h b/include/linux/kobject_ns.h +index df32d25..fb52e27 100644 +--- a/include/linux/kobject_ns.h ++++ b/include/linux/kobject_ns.h +@@ -44,7 +44,7 @@ struct kobj_ns_type_operations { + const void *(*netlink_ns)(struct sock *sk); + const void *(*initial_ns)(void); + void (*drop_ns)(void *); +-}; ++} __do_const; + + int kobj_ns_type_register(const struct kobj_ns_type_operations *ops); + int kobj_ns_type_registered(enum kobj_ns_type type); +diff --git a/include/linux/kref.h b/include/linux/kref.h +index 484604d..0f6c5b6 100644 +--- a/include/linux/kref.h ++++ b/include/linux/kref.h +@@ -68,7 +68,7 @@ static inline void kref_get(struct kref *kref) + static inline int kref_sub(struct kref *kref, unsigned int count, + void (*release)(struct kref *kref)) + { +- WARN_ON(release == NULL); ++ BUG_ON(release == NULL); + + if (atomic_sub_and_test((int) count, &kref->refcount)) { + release(kref); +diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h +index b8e9a43..632678d 100644 +--- a/include/linux/kvm_host.h ++++ b/include/linux/kvm_host.h +@@ -455,7 +455,7 @@ static inline void kvm_irqfd_exit(void) + { + } + #endif +-int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align, ++int kvm_init(const void *opaque, unsigned vcpu_size, unsigned vcpu_align, + struct module *module); + void kvm_exit(void); + +@@ -621,7 +621,7 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, + struct kvm_guest_debug *dbg); + int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run); + +-int kvm_arch_init(void *opaque); ++int kvm_arch_init(const void *opaque); + void kvm_arch_exit(void); + + int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu); +diff --git a/include/linux/libata.h b/include/linux/libata.h +index e13b3ae..5f450e6 100644 +--- a/include/linux/libata.h ++++ b/include/linux/libata.h +@@ -977,7 +977,7 @@ struct ata_port_operations { + * fields must be pointers. + */ + const struct ata_port_operations *inherits; +-}; ++} __do_const; + + struct ata_port_info { + unsigned long flags; +diff --git a/include/linux/linkage.h b/include/linux/linkage.h +index a6a42dd..6c5ebce 100644 +--- a/include/linux/linkage.h ++++ b/include/linux/linkage.h +@@ -36,6 +36,7 @@ + #endif + + #define __page_aligned_data __section(.data..page_aligned) __aligned(PAGE_SIZE) ++#define __page_aligned_rodata __read_only __aligned(PAGE_SIZE) + #define __page_aligned_bss __section(.bss..page_aligned) __aligned(PAGE_SIZE) + + /* +diff --git a/include/linux/list.h b/include/linux/list.h +index ef95941..82db65a 100644 +--- a/include/linux/list.h ++++ b/include/linux/list.h +@@ -112,6 +112,19 @@ extern void __list_del_entry(struct list_head *entry); + extern void list_del(struct list_head *entry); + #endif + ++extern void __pax_list_add(struct list_head *new, ++ struct list_head *prev, ++ struct list_head *next); ++static inline void pax_list_add(struct list_head *new, struct list_head *head) ++{ ++ __pax_list_add(new, head, head->next); ++} ++static inline void pax_list_add_tail(struct list_head *new, struct list_head *head) ++{ ++ __pax_list_add(new, head->prev, head); ++} ++extern void pax_list_del(struct list_head *entry); ++ + /** + * list_replace - replace old entry by new one + * @old : the element to be replaced +@@ -145,6 +158,8 @@ static inline void list_del_init(struct list_head *entry) + INIT_LIST_HEAD(entry); + } + ++extern void pax_list_del_init(struct list_head *entry); ++ + /** + * list_move - delete from one list and add as another's head + * @list: the entry to move +diff --git a/include/linux/math64.h b/include/linux/math64.h +index c45c089..298841c 100644 +--- a/include/linux/math64.h ++++ b/include/linux/math64.h +@@ -15,7 +15,7 @@ + * This is commonly provided by 32bit archs to provide an optimized 64bit + * divide. + */ +-static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder) ++static inline u64 __intentional_overflow(-1) div_u64_rem(u64 dividend, u32 divisor, u32 *remainder) + { + *remainder = dividend % divisor; + return dividend / divisor; +@@ -42,7 +42,7 @@ static inline u64 div64_u64_rem(u64 dividend, u64 divisor, u64 *remainder) + /** + * div64_u64 - unsigned 64bit divide with 64bit divisor + */ +-static inline u64 div64_u64(u64 dividend, u64 divisor) ++static inline u64 __intentional_overflow(-1) div64_u64(u64 dividend, u64 divisor) + { + return dividend / divisor; + } +@@ -61,7 +61,7 @@ static inline s64 div64_s64(s64 dividend, s64 divisor) + #define div64_ul(x, y) div_u64((x), (y)) + + #ifndef div_u64_rem +-static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder) ++static inline u64 __intentional_overflow(-1) div_u64_rem(u64 dividend, u32 divisor, u32 *remainder) + { + *remainder = do_div(dividend, divisor); + return dividend; +@@ -77,7 +77,7 @@ extern u64 div64_u64_rem(u64 dividend, u64 divisor, u64 *remainder); + #endif + + #ifndef div64_u64 +-extern u64 div64_u64(u64 dividend, u64 divisor); ++extern u64 __intentional_overflow(-1) div64_u64(u64 dividend, u64 divisor); + #endif + + #ifndef div64_s64 +@@ -94,7 +94,7 @@ extern s64 div64_s64(s64 dividend, s64 divisor); + * divide. + */ + #ifndef div_u64 +-static inline u64 div_u64(u64 dividend, u32 divisor) ++static inline u64 __intentional_overflow(-1) div_u64(u64 dividend, u32 divisor) + { + u32 remainder; + return div_u64_rem(dividend, divisor, &remainder); +diff --git a/include/linux/mempolicy.h b/include/linux/mempolicy.h +index 5bba088..7ad4ae7 100644 +--- a/include/linux/mempolicy.h ++++ b/include/linux/mempolicy.h +@@ -91,6 +91,10 @@ static inline struct mempolicy *mpol_dup(struct mempolicy *pol) + } + + #define vma_policy(vma) ((vma)->vm_policy) ++static inline void set_vma_policy(struct vm_area_struct *vma, struct mempolicy *pol) ++{ ++ vma->vm_policy = pol; ++} + + static inline void mpol_get(struct mempolicy *pol) + { +@@ -229,6 +233,9 @@ static inline void mpol_free_shared_policy(struct shared_policy *p) + } + + #define vma_policy(vma) NULL ++static inline void set_vma_policy(struct vm_area_struct *vma, struct mempolicy *pol) ++{ ++} + + static inline int + vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst) +diff --git a/include/linux/mm.h b/include/linux/mm.h +index c1b7414..5ea2ad8 100644 +--- a/include/linux/mm.h ++++ b/include/linux/mm.h +@@ -127,6 +127,11 @@ extern unsigned int kobjsize(const void *objp); + #define VM_HUGETLB 0x00400000 /* Huge TLB Page VM */ + #define VM_NONLINEAR 0x00800000 /* Is non-linear (remap_file_pages) */ + #define VM_ARCH_1 0x01000000 /* Architecture-specific flag */ ++ ++#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32) ++#define VM_PAGEEXEC 0x02000000 /* vma->vm_page_prot needs special handling */ ++#endif ++ + #define VM_DONTDUMP 0x04000000 /* Do not include in the core dump */ + + #ifdef CONFIG_MEM_SOFT_DIRTY +@@ -229,8 +234,8 @@ struct vm_operations_struct { + /* called by access_process_vm when get_user_pages() fails, typically + * for use by special VMAs that can switch between memory and hardware + */ +- int (*access)(struct vm_area_struct *vma, unsigned long addr, +- void *buf, int len, int write); ++ ssize_t (*access)(struct vm_area_struct *vma, unsigned long addr, ++ void *buf, size_t len, int write); + #ifdef CONFIG_NUMA + /* + * set_policy() op must add a reference to any non-NULL @new mempolicy +@@ -260,6 +265,7 @@ struct vm_operations_struct { + int (*remap_pages)(struct vm_area_struct *vma, unsigned long addr, + unsigned long size, pgoff_t pgoff); + }; ++typedef struct vm_operations_struct __no_const vm_operations_struct_no_const; + + struct mmu_gather; + struct inode; +@@ -1112,8 +1118,8 @@ int follow_pfn(struct vm_area_struct *vma, unsigned long address, + unsigned long *pfn); + int follow_phys(struct vm_area_struct *vma, unsigned long address, + unsigned int flags, unsigned long *prot, resource_size_t *phys); +-int generic_access_phys(struct vm_area_struct *vma, unsigned long addr, +- void *buf, int len, int write); ++ssize_t generic_access_phys(struct vm_area_struct *vma, unsigned long addr, ++ void *buf, size_t len, int write); + + static inline void unmap_shared_mapping_range(struct address_space *mapping, + loff_t const holebegin, loff_t const holelen) +@@ -1152,9 +1158,9 @@ static inline int fixup_user_fault(struct task_struct *tsk, + } + #endif + +-extern int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write); +-extern int access_remote_vm(struct mm_struct *mm, unsigned long addr, +- void *buf, int len, int write); ++extern ssize_t access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, size_t len, int write); ++extern ssize_t access_remote_vm(struct mm_struct *mm, unsigned long addr, ++ void *buf, size_t len, int write); + + long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm, + unsigned long start, unsigned long nr_pages, +@@ -1186,34 +1192,6 @@ int set_page_dirty(struct page *page); + int set_page_dirty_lock(struct page *page); + int clear_page_dirty_for_io(struct page *page); + +-/* Is the vma a continuation of the stack vma above it? */ +-static inline int vma_growsdown(struct vm_area_struct *vma, unsigned long addr) +-{ +- return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN); +-} +- +-static inline int stack_guard_page_start(struct vm_area_struct *vma, +- unsigned long addr) +-{ +- return (vma->vm_flags & VM_GROWSDOWN) && +- (vma->vm_start == addr) && +- !vma_growsdown(vma->vm_prev, addr); +-} +- +-/* Is the vma a continuation of the stack vma below it? */ +-static inline int vma_growsup(struct vm_area_struct *vma, unsigned long addr) +-{ +- return vma && (vma->vm_start == addr) && (vma->vm_flags & VM_GROWSUP); +-} +- +-static inline int stack_guard_page_end(struct vm_area_struct *vma, +- unsigned long addr) +-{ +- return (vma->vm_flags & VM_GROWSUP) && +- (vma->vm_end == addr) && +- !vma_growsup(vma->vm_next, addr); +-} +- + extern pid_t + vm_is_stack(struct task_struct *task, struct vm_area_struct *vma, int in_group); + +@@ -1313,6 +1291,15 @@ static inline void sync_mm_rss(struct mm_struct *mm) + } + #endif + ++#ifdef CONFIG_MMU ++pgprot_t vm_get_page_prot(vm_flags_t vm_flags); ++#else ++static inline pgprot_t vm_get_page_prot(vm_flags_t vm_flags) ++{ ++ return __pgprot(0); ++} ++#endif ++ + int vma_wants_writenotify(struct vm_area_struct *vma); + + extern pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr, +@@ -1331,8 +1318,15 @@ static inline int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, + { + return 0; + } ++ ++static inline int __pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd, ++ unsigned long address) ++{ ++ return 0; ++} + #else + int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address); ++int __pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd, unsigned long address); + #endif + + #ifdef __PAGETABLE_PMD_FOLDED +@@ -1341,8 +1335,15 @@ static inline int __pmd_alloc(struct mm_struct *mm, pud_t *pud, + { + return 0; + } ++ ++static inline int __pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud, ++ unsigned long address) ++{ ++ return 0; ++} + #else + int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address); ++int __pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud, unsigned long address); + #endif + + int __pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma, +@@ -1360,11 +1361,23 @@ static inline pud_t *pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long a + NULL: pud_offset(pgd, address); + } + ++static inline pud_t *pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd, unsigned long address) ++{ ++ return (unlikely(pgd_none(*pgd)) && __pud_alloc_kernel(mm, pgd, address))? ++ NULL: pud_offset(pgd, address); ++} ++ + static inline pmd_t *pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address) + { + return (unlikely(pud_none(*pud)) && __pmd_alloc(mm, pud, address))? + NULL: pmd_offset(pud, address); + } ++ ++static inline pmd_t *pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud, unsigned long address) ++{ ++ return (unlikely(pud_none(*pud)) && __pmd_alloc_kernel(mm, pud, address))? ++ NULL: pmd_offset(pud, address); ++} + #endif /* CONFIG_MMU && !__ARCH_HAS_4LEVEL_HACK */ + + #if USE_SPLIT_PTE_PTLOCKS +@@ -1754,7 +1767,7 @@ extern int install_special_mapping(struct mm_struct *mm, + unsigned long addr, unsigned long len, + unsigned long flags, struct page **pages); + +-extern unsigned long get_unmapped_area(struct file *, unsigned long, unsigned long, unsigned long, unsigned long); ++extern unsigned long get_unmapped_area(struct file *, unsigned long, unsigned long, unsigned long, unsigned long) __intentional_overflow(-1); + + extern unsigned long mmap_region(struct file *file, unsigned long addr, + unsigned long len, vm_flags_t vm_flags, unsigned long pgoff); +@@ -1762,6 +1775,7 @@ extern unsigned long do_mmap_pgoff(struct file *file, unsigned long addr, + unsigned long len, unsigned long prot, unsigned long flags, + unsigned long pgoff, unsigned long *populate); + extern int do_munmap(struct mm_struct *, unsigned long, size_t); ++extern int __do_munmap(struct mm_struct *, unsigned long, size_t); + + #ifdef CONFIG_MMU + extern int __mm_populate(unsigned long addr, unsigned long len, +@@ -1790,10 +1804,11 @@ struct vm_unmapped_area_info { + unsigned long high_limit; + unsigned long align_mask; + unsigned long align_offset; ++ unsigned long threadstack_offset; + }; + +-extern unsigned long unmapped_area(struct vm_unmapped_area_info *info); +-extern unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info); ++extern unsigned long unmapped_area(const struct vm_unmapped_area_info *info); ++extern unsigned long unmapped_area_topdown(const struct vm_unmapped_area_info *info); + + /* + * Search for an unmapped address range. +@@ -1805,7 +1820,7 @@ extern unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info); + * - satisfies (begin_addr & align_mask) == (align_offset & align_mask) + */ + static inline unsigned long +-vm_unmapped_area(struct vm_unmapped_area_info *info) ++vm_unmapped_area(const struct vm_unmapped_area_info *info) + { + if (!(info->flags & VM_UNMAPPED_AREA_TOPDOWN)) + return unmapped_area(info); +@@ -1868,6 +1883,10 @@ extern struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long add + extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr, + struct vm_area_struct **pprev); + ++extern struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma); ++extern __must_check long pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma); ++extern void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl); ++ + /* Look up the first VMA which intersects the interval start_addr..end_addr-1, + NULL if none. Assume start_addr < end_addr. */ + static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * mm, unsigned long start_addr, unsigned long end_addr) +@@ -1896,15 +1915,6 @@ static inline struct vm_area_struct *find_exact_vma(struct mm_struct *mm, + return vma; + } + +-#ifdef CONFIG_MMU +-pgprot_t vm_get_page_prot(unsigned long vm_flags); +-#else +-static inline pgprot_t vm_get_page_prot(unsigned long vm_flags) +-{ +- return __pgprot(0); +-} +-#endif +- + #ifdef CONFIG_NUMA_BALANCING + unsigned long change_prot_numa(struct vm_area_struct *vma, + unsigned long start, unsigned long end); +@@ -1956,6 +1966,11 @@ void vm_stat_account(struct mm_struct *, unsigned long, struct file *, long); + static inline void vm_stat_account(struct mm_struct *mm, + unsigned long flags, struct file *file, long pages) + { ++ ++#ifdef CONFIG_PAX_RANDMMAP ++ if (!(mm->pax_flags & MF_PAX_RANDMMAP) || (flags & (VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC))) ++#endif ++ + mm->total_vm += pages; + } + #endif /* CONFIG_PROC_FS */ +@@ -2037,7 +2052,7 @@ extern int unpoison_memory(unsigned long pfn); + extern int sysctl_memory_failure_early_kill; + extern int sysctl_memory_failure_recovery; + extern void shake_page(struct page *p, int access); +-extern atomic_long_t num_poisoned_pages; ++extern atomic_long_unchecked_t num_poisoned_pages; + extern int soft_offline_page(struct page *page, int flags); + + #if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLBFS) +@@ -2072,5 +2087,11 @@ void __init setup_nr_node_ids(void); + static inline void setup_nr_node_ids(void) {} + #endif + ++#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT ++extern void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot); ++#else ++static inline void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot) {} ++#endif ++ + #endif /* __KERNEL__ */ + #endif /* _LINUX_MM_H */ +diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h +index 290901a..e99b01c 100644 +--- a/include/linux/mm_types.h ++++ b/include/linux/mm_types.h +@@ -307,7 +307,9 @@ struct vm_area_struct { + #ifdef CONFIG_NUMA + struct mempolicy *vm_policy; /* NUMA policy for the VMA */ + #endif +-}; ++ ++ struct vm_area_struct *vm_mirror;/* PaX: mirror vma or NULL */ ++} __randomize_layout; + + struct core_thread { + struct task_struct *task; +@@ -453,7 +455,25 @@ struct mm_struct { + bool tlb_flush_pending; + #endif + struct uprobes_state uprobes_state; +-}; ++ ++#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR) ++ unsigned long pax_flags; ++#endif ++ ++#ifdef CONFIG_PAX_DLRESOLVE ++ unsigned long call_dl_resolve; ++#endif ++ ++#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT) ++ unsigned long call_syscall; ++#endif ++ ++#ifdef CONFIG_PAX_ASLR ++ unsigned long delta_mmap; /* randomized offset */ ++ unsigned long delta_stack; /* randomized offset */ ++#endif ++ ++} __randomize_layout; + + static inline void mm_init_cpumask(struct mm_struct *mm) + { +diff --git a/include/linux/mmiotrace.h b/include/linux/mmiotrace.h +index c5d5278..f0b68c8 100644 +--- a/include/linux/mmiotrace.h ++++ b/include/linux/mmiotrace.h +@@ -46,7 +46,7 @@ extern int kmmio_handler(struct pt_regs *regs, unsigned long addr); + /* Called from ioremap.c */ + extern void mmiotrace_ioremap(resource_size_t offset, unsigned long size, + void __iomem *addr); +-extern void mmiotrace_iounmap(volatile void __iomem *addr); ++extern void mmiotrace_iounmap(const volatile void __iomem *addr); + + /* For anyone to insert markers. Remember trailing newline. */ + extern __printf(1, 2) int mmiotrace_printk(const char *fmt, ...); +@@ -66,7 +66,7 @@ static inline void mmiotrace_ioremap(resource_size_t offset, + { + } + +-static inline void mmiotrace_iounmap(volatile void __iomem *addr) ++static inline void mmiotrace_iounmap(const volatile void __iomem *addr) + { + } + +diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h +index e6800f0..d59674e 100644 +--- a/include/linux/mmzone.h ++++ b/include/linux/mmzone.h +@@ -400,7 +400,7 @@ struct zone { + unsigned long flags; /* zone flags, see below */ + + /* Zone statistics */ +- atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS]; ++ atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS]; + + /* + * The target ratio of ACTIVE_ANON to INACTIVE_ANON pages on +diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h +index 45e9214..4a547ac 100644 +--- a/include/linux/mod_devicetable.h ++++ b/include/linux/mod_devicetable.h +@@ -139,7 +139,7 @@ struct usb_device_id { + #define USB_DEVICE_ID_MATCH_INT_PROTOCOL 0x0200 + #define USB_DEVICE_ID_MATCH_INT_NUMBER 0x0400 + +-#define HID_ANY_ID (~0) ++#define HID_ANY_ID (~0U) + #define HID_BUS_ANY 0xffff + #define HID_GROUP_ANY 0x0000 + +@@ -467,7 +467,7 @@ struct dmi_system_id { + const char *ident; + struct dmi_strmatch matches[4]; + void *driver_data; +-}; ++} __do_const; + /* + * struct dmi_device_id appears during expansion of + * "MODULE_DEVICE_TABLE(dmi, x)". Compiler doesn't look inside it +diff --git a/include/linux/module.h b/include/linux/module.h +index eaf60ff..641979a 100644 +--- a/include/linux/module.h ++++ b/include/linux/module.h +@@ -17,9 +17,11 @@ + #include <linux/moduleparam.h> + #include <linux/tracepoint.h> + #include <linux/export.h> ++#include <linux/fs.h> + + #include <linux/percpu.h> + #include <asm/module.h> ++#include <asm/pgtable.h> + + /* In stripped ARM and x86-64 modules, ~ is surprisingly rare. */ + #define MODULE_SIG_STRING "~Module signature appended~\n" +@@ -42,7 +44,7 @@ struct module_kobject { + struct kobject *drivers_dir; + struct module_param_attrs *mp; + struct completion *kobj_completion; +-}; ++} __randomize_layout; + + struct module_attribute { + struct attribute attr; +@@ -54,12 +56,13 @@ struct module_attribute { + int (*test)(struct module *); + void (*free)(struct module *); + }; ++typedef struct module_attribute __no_const module_attribute_no_const; + + struct module_version_attribute { + struct module_attribute mattr; + const char *module_name; + const char *version; +-} __attribute__ ((__aligned__(sizeof(void *)))); ++} __do_const __attribute__ ((__aligned__(sizeof(void *)))); + + extern ssize_t __modver_version_show(struct module_attribute *, + struct module_kobject *, char *); +@@ -238,7 +241,7 @@ struct module { + + /* Sysfs stuff. */ + struct module_kobject mkobj; +- struct module_attribute *modinfo_attrs; ++ module_attribute_no_const *modinfo_attrs; + const char *version; + const char *srcversion; + struct kobject *holders_dir; +@@ -287,19 +290,16 @@ struct module { + int (*init)(void); + + /* If this is non-NULL, vfree after init() returns */ +- void *module_init; ++ void *module_init_rx, *module_init_rw; + + /* Here is the actual code + data, vfree'd on unload. */ +- void *module_core; ++ void *module_core_rx, *module_core_rw; + + /* Here are the sizes of the init and core sections */ +- unsigned int init_size, core_size; ++ unsigned int init_size_rw, core_size_rw; + + /* The size of the executable code in each section. */ +- unsigned int init_text_size, core_text_size; +- +- /* Size of RO sections of the module (text+rodata) */ +- unsigned int init_ro_size, core_ro_size; ++ unsigned int init_size_rx, core_size_rx; + + /* Arch-specific module values */ + struct mod_arch_specific arch; +@@ -355,6 +355,10 @@ struct module { + #ifdef CONFIG_EVENT_TRACING + struct ftrace_event_call **trace_events; + unsigned int num_trace_events; ++ struct file_operations trace_id; ++ struct file_operations trace_enable; ++ struct file_operations trace_format; ++ struct file_operations trace_filter; + #endif + #ifdef CONFIG_FTRACE_MCOUNT_RECORD + unsigned int num_ftrace_callsites; +@@ -378,7 +382,7 @@ struct module { + ctor_fn_t *ctors; + unsigned int num_ctors; + #endif +-}; ++} __randomize_layout; + #ifndef MODULE_ARCH_INIT + #define MODULE_ARCH_INIT {} + #endif +@@ -399,16 +403,46 @@ bool is_module_address(unsigned long addr); + bool is_module_percpu_address(unsigned long addr); + bool is_module_text_address(unsigned long addr); + ++static inline int within_module_range(unsigned long addr, void *start, unsigned long size) ++{ ++ ++#ifdef CONFIG_PAX_KERNEXEC ++ if (ktla_ktva(addr) >= (unsigned long)start && ++ ktla_ktva(addr) < (unsigned long)start + size) ++ return 1; ++#endif ++ ++ return ((void *)addr >= start && (void *)addr < start + size); ++} ++ ++static inline int within_module_core_rx(unsigned long addr, const struct module *mod) ++{ ++ return within_module_range(addr, mod->module_core_rx, mod->core_size_rx); ++} ++ ++static inline int within_module_core_rw(unsigned long addr, const struct module *mod) ++{ ++ return within_module_range(addr, mod->module_core_rw, mod->core_size_rw); ++} ++ ++static inline int within_module_init_rx(unsigned long addr, const struct module *mod) ++{ ++ return within_module_range(addr, mod->module_init_rx, mod->init_size_rx); ++} ++ ++static inline int within_module_init_rw(unsigned long addr, const struct module *mod) ++{ ++ return within_module_range(addr, mod->module_init_rw, mod->init_size_rw); ++} ++ + static inline int within_module_core(unsigned long addr, const struct module *mod) + { +- return (unsigned long)mod->module_core <= addr && +- addr < (unsigned long)mod->module_core + mod->core_size; ++ return within_module_core_rx(addr, mod) || within_module_core_rw(addr, mod); + } + + static inline int within_module_init(unsigned long addr, const struct module *mod) + { +- return (unsigned long)mod->module_init <= addr && +- addr < (unsigned long)mod->module_init + mod->init_size; ++ return within_module_init_rx(addr, mod) || within_module_init_rw(addr, mod); + } + + /* Search for module by name: must hold module_mutex. */ +diff --git a/include/linux/moduleloader.h b/include/linux/moduleloader.h +index 560ca53..ef621ef 100644 +--- a/include/linux/moduleloader.h ++++ b/include/linux/moduleloader.h +@@ -25,9 +25,21 @@ unsigned int arch_mod_section_prepend(struct module *mod, unsigned int section); + sections. Returns NULL on failure. */ + void *module_alloc(unsigned long size); + ++#ifdef CONFIG_PAX_KERNEXEC ++void *module_alloc_exec(unsigned long size); ++#else ++#define module_alloc_exec(x) module_alloc(x) ++#endif ++ + /* Free memory returned from module_alloc. */ + void module_free(struct module *mod, void *module_region); + ++#ifdef CONFIG_PAX_KERNEXEC ++void module_free_exec(struct module *mod, void *module_region); ++#else ++#define module_free_exec(x, y) module_free((x), (y)) ++#endif ++ + /* + * Apply the given relocation to the (simplified) ELF. Return -error + * or 0. +@@ -45,7 +57,9 @@ static inline int apply_relocate(Elf_Shdr *sechdrs, + unsigned int relsec, + struct module *me) + { ++#ifdef CONFIG_MODULES + printk(KERN_ERR "module %s: REL relocation unsupported\n", me->name); ++#endif + return -ENOEXEC; + } + #endif +@@ -67,7 +81,9 @@ static inline int apply_relocate_add(Elf_Shdr *sechdrs, + unsigned int relsec, + struct module *me) + { ++#ifdef CONFIG_MODULES + printk(KERN_ERR "module %s: REL relocation unsupported\n", me->name); ++#endif + return -ENOEXEC; + } + #endif +diff --git a/include/linux/moduleparam.h b/include/linux/moduleparam.h +index c3eb102..073c4a6 100644 +--- a/include/linux/moduleparam.h ++++ b/include/linux/moduleparam.h +@@ -295,7 +295,7 @@ static inline void __kernel_param_unlock(void) + * @len is usually just sizeof(string). + */ + #define module_param_string(name, string, len, perm) \ +- static const struct kparam_string __param_string_##name \ ++ static const struct kparam_string __param_string_##name __used \ + = { len, string }; \ + __module_param_call(MODULE_PARAM_PREFIX, name, \ + ¶m_ops_string, \ +@@ -434,7 +434,7 @@ extern int param_set_bint(const char *val, const struct kernel_param *kp); + */ + #define module_param_array_named(name, array, type, nump, perm) \ + param_check_##type(name, &(array)[0]); \ +- static const struct kparam_array __param_arr_##name \ ++ static const struct kparam_array __param_arr_##name __used \ + = { .max = ARRAY_SIZE(array), .num = nump, \ + .ops = ¶m_ops_##type, \ + .elemsize = sizeof(array[0]), .elem = array }; \ +diff --git a/include/linux/mount.h b/include/linux/mount.h +index 839bac2..a96b37c 100644 +--- a/include/linux/mount.h ++++ b/include/linux/mount.h +@@ -59,7 +59,7 @@ struct vfsmount { + struct dentry *mnt_root; /* root of the mounted tree */ + struct super_block *mnt_sb; /* pointer to superblock */ + int mnt_flags; +-}; ++} __randomize_layout; + + struct file; /* forward dec */ + +diff --git a/include/linux/namei.h b/include/linux/namei.h +index 492de72..1bddcd4 100644 +--- a/include/linux/namei.h ++++ b/include/linux/namei.h +@@ -19,7 +19,7 @@ struct nameidata { + unsigned seq, m_seq; + int last_type; + unsigned depth; +- char *saved_names[MAX_NESTED_LINKS + 1]; ++ const char *saved_names[MAX_NESTED_LINKS + 1]; + }; + + /* +@@ -83,12 +83,12 @@ extern void unlock_rename(struct dentry *, struct dentry *); + + extern void nd_jump_link(struct nameidata *nd, struct path *path); + +-static inline void nd_set_link(struct nameidata *nd, char *path) ++static inline void nd_set_link(struct nameidata *nd, const char *path) + { + nd->saved_names[nd->depth] = path; + } + +-static inline char *nd_get_link(struct nameidata *nd) ++static inline const char *nd_get_link(const struct nameidata *nd) + { + return nd->saved_names[nd->depth]; + } +diff --git a/include/linux/net.h b/include/linux/net.h +index 17d8339..81656c0 100644 +--- a/include/linux/net.h ++++ b/include/linux/net.h +@@ -192,7 +192,7 @@ struct net_proto_family { + int (*create)(struct net *net, struct socket *sock, + int protocol, int kern); + struct module *owner; +-}; ++} __do_const; + + struct iovec; + struct kvec; +diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h +index 911718f..f673407 100644 +--- a/include/linux/netdevice.h ++++ b/include/linux/netdevice.h +@@ -1147,6 +1147,7 @@ struct net_device_ops { + void *priv); + int (*ndo_get_lock_subclass)(struct net_device *dev); + }; ++typedef struct net_device_ops __no_const net_device_ops_no_const; + + /* + * The DEVICE structure. +@@ -1229,7 +1230,7 @@ struct net_device { + int iflink; + + struct net_device_stats stats; +- atomic_long_t rx_dropped; /* dropped packets by core network ++ atomic_long_unchecked_t rx_dropped; /* dropped packets by core network + * Do not use this in drivers. + */ + +diff --git a/include/linux/netfilter.h b/include/linux/netfilter.h +index 2077489..a15e561 100644 +--- a/include/linux/netfilter.h ++++ b/include/linux/netfilter.h +@@ -84,7 +84,7 @@ struct nf_sockopt_ops { + #endif + /* Use the module struct to lock set/get code in place */ + struct module *owner; +-}; ++} __do_const; + + /* Function to register/unregister hook points. */ + int nf_register_hook(struct nf_hook_ops *reg); +diff --git a/include/linux/netfilter/nfnetlink.h b/include/linux/netfilter/nfnetlink.h +index 28c7436..2d6156a 100644 +--- a/include/linux/netfilter/nfnetlink.h ++++ b/include/linux/netfilter/nfnetlink.h +@@ -19,7 +19,7 @@ struct nfnl_callback { + const struct nlattr * const cda[]); + const struct nla_policy *policy; /* netlink attribute policy */ + const u_int16_t attr_count; /* number of nlattr's */ +-}; ++} __do_const; + + struct nfnetlink_subsystem { + const char *name; +diff --git a/include/linux/netfilter/xt_gradm.h b/include/linux/netfilter/xt_gradm.h +new file mode 100644 +index 0000000..33f4af8 +--- /dev/null ++++ b/include/linux/netfilter/xt_gradm.h +@@ -0,0 +1,9 @@ ++#ifndef _LINUX_NETFILTER_XT_GRADM_H ++#define _LINUX_NETFILTER_XT_GRADM_H 1 ++ ++struct xt_gradm_mtinfo { ++ __u16 flags; ++ __u16 invflags; ++}; ++ ++#endif +diff --git a/include/linux/nls.h b/include/linux/nls.h +index 520681b..2b7fabb 100644 +--- a/include/linux/nls.h ++++ b/include/linux/nls.h +@@ -31,7 +31,7 @@ struct nls_table { + const unsigned char *charset2upper; + struct module *owner; + struct nls_table *next; +-}; ++} __do_const; + + /* this value hold the maximum octet of charset */ + #define NLS_MAX_CHARSET_SIZE 6 /* for UTF-8 */ +@@ -46,7 +46,7 @@ enum utf16_endian { + /* nls_base.c */ + extern int __register_nls(struct nls_table *, struct module *); + extern int unregister_nls(struct nls_table *); +-extern struct nls_table *load_nls(char *); ++extern struct nls_table *load_nls(const char *); + extern void unload_nls(struct nls_table *); + extern struct nls_table *load_nls_default(void); + #define register_nls(nls) __register_nls((nls), THIS_MODULE) +diff --git a/include/linux/notifier.h b/include/linux/notifier.h +index d14a4c3..a078786 100644 +--- a/include/linux/notifier.h ++++ b/include/linux/notifier.h +@@ -54,7 +54,8 @@ struct notifier_block { + notifier_fn_t notifier_call; + struct notifier_block __rcu *next; + int priority; +-}; ++} __do_const; ++typedef struct notifier_block __no_const notifier_block_no_const; + + struct atomic_notifier_head { + spinlock_t lock; +diff --git a/include/linux/oprofile.h b/include/linux/oprofile.h +index b2a0f15..4d7da32 100644 +--- a/include/linux/oprofile.h ++++ b/include/linux/oprofile.h +@@ -138,9 +138,9 @@ int oprofilefs_create_ulong(struct dentry * root, + int oprofilefs_create_ro_ulong(struct dentry * root, + char const * name, ulong * val); + +-/** Create a file for read-only access to an atomic_t. */ ++/** Create a file for read-only access to an atomic_unchecked_t. */ + int oprofilefs_create_ro_atomic(struct dentry * root, +- char const * name, atomic_t * val); ++ char const * name, atomic_unchecked_t * val); + + /** create a directory */ + struct dentry *oprofilefs_mkdir(struct dentry *parent, char const *name); +diff --git a/include/linux/padata.h b/include/linux/padata.h +index 4386946..f50c615 100644 +--- a/include/linux/padata.h ++++ b/include/linux/padata.h +@@ -129,7 +129,7 @@ struct parallel_data { + struct padata_serial_queue __percpu *squeue; + atomic_t reorder_objects; + atomic_t refcnt; +- atomic_t seq_nr; ++ atomic_unchecked_t seq_nr; + struct padata_cpumask cpumask; + spinlock_t lock ____cacheline_aligned; + unsigned int processed; +diff --git a/include/linux/path.h b/include/linux/path.h +index d137218..be0c176 100644 +--- a/include/linux/path.h ++++ b/include/linux/path.h +@@ -1,13 +1,15 @@ + #ifndef _LINUX_PATH_H + #define _LINUX_PATH_H + ++#include <linux/compiler.h> ++ + struct dentry; + struct vfsmount; + + struct path { + struct vfsmount *mnt; + struct dentry *dentry; +-}; ++} __randomize_layout; + + extern void path_get(const struct path *); + extern void path_put(const struct path *); +diff --git a/include/linux/pci_hotplug.h b/include/linux/pci_hotplug.h +index 5f2e559..7d59314 100644 +--- a/include/linux/pci_hotplug.h ++++ b/include/linux/pci_hotplug.h +@@ -71,7 +71,8 @@ struct hotplug_slot_ops { + int (*get_latch_status) (struct hotplug_slot *slot, u8 *value); + int (*get_adapter_status) (struct hotplug_slot *slot, u8 *value); + int (*reset_slot) (struct hotplug_slot *slot, int probe); +-}; ++} __do_const; ++typedef struct hotplug_slot_ops __no_const hotplug_slot_ops_no_const; + + /** + * struct hotplug_slot_info - used to notify the hotplug pci core of the state of the slot +diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h +index e56b07f..aef789b 100644 +--- a/include/linux/perf_event.h ++++ b/include/linux/perf_event.h +@@ -328,8 +328,8 @@ struct perf_event { + + enum perf_event_active_state state; + unsigned int attach_state; +- local64_t count; +- atomic64_t child_count; ++ local64_t count; /* PaX: fix it one day */ ++ atomic64_unchecked_t child_count; + + /* + * These are the total time in nanoseconds that the event +@@ -380,8 +380,8 @@ struct perf_event { + * These accumulate total time (in nanoseconds) that children + * events have been enabled and running, respectively. + */ +- atomic64_t child_total_time_enabled; +- atomic64_t child_total_time_running; ++ atomic64_unchecked_t child_total_time_enabled; ++ atomic64_unchecked_t child_total_time_running; + + /* + * Protect attach/detach and child_list: +@@ -708,7 +708,7 @@ static inline void perf_callchain_store(struct perf_callchain_entry *entry, u64 + entry->ip[entry->nr++] = ip; + } + +-extern int sysctl_perf_event_paranoid; ++extern int sysctl_perf_event_legitimately_concerned; + extern int sysctl_perf_event_mlock; + extern int sysctl_perf_event_sample_rate; + extern int sysctl_perf_cpu_time_max_percent; +@@ -723,19 +723,24 @@ extern int perf_cpu_time_max_percent_handler(struct ctl_table *table, int write, + loff_t *ppos); + + ++static inline bool perf_paranoid_any(void) ++{ ++ return sysctl_perf_event_legitimately_concerned > 2; ++} ++ + static inline bool perf_paranoid_tracepoint_raw(void) + { +- return sysctl_perf_event_paranoid > -1; ++ return sysctl_perf_event_legitimately_concerned > -1; + } + + static inline bool perf_paranoid_cpu(void) + { +- return sysctl_perf_event_paranoid > 0; ++ return sysctl_perf_event_legitimately_concerned > 0; + } + + static inline bool perf_paranoid_kernel(void) + { +- return sysctl_perf_event_paranoid > 1; ++ return sysctl_perf_event_legitimately_concerned > 1; + } + + extern void perf_event_init(void); +@@ -851,7 +856,7 @@ struct perf_pmu_events_attr { + struct device_attribute attr; + u64 id; + const char *event_str; +-}; ++} __do_const; + + #define PMU_EVENT_ATTR(_name, _var, _id, _show) \ + static struct perf_pmu_events_attr _var = { \ +diff --git a/include/linux/pid_namespace.h b/include/linux/pid_namespace.h +index 7246ef3..1539ea4 100644 +--- a/include/linux/pid_namespace.h ++++ b/include/linux/pid_namespace.h +@@ -43,7 +43,7 @@ struct pid_namespace { + int hide_pid; + int reboot; /* group exit code if this pidns was rebooted */ + unsigned int proc_inum; +-}; ++} __randomize_layout; + + extern struct pid_namespace init_pid_ns; + +diff --git a/include/linux/pipe_fs_i.h b/include/linux/pipe_fs_i.h +index ab57526..94598804 100644 +--- a/include/linux/pipe_fs_i.h ++++ b/include/linux/pipe_fs_i.h +@@ -47,10 +47,10 @@ struct pipe_inode_info { + struct mutex mutex; + wait_queue_head_t wait; + unsigned int nrbufs, curbuf, buffers; +- unsigned int readers; +- unsigned int writers; +- unsigned int files; +- unsigned int waiting_writers; ++ atomic_t readers; ++ atomic_t writers; ++ atomic_t files; ++ atomic_t waiting_writers; + unsigned int r_counter; + unsigned int w_counter; + struct page *tmp_page; +diff --git a/include/linux/pm.h b/include/linux/pm.h +index 8c6583a..febb84c 100644 +--- a/include/linux/pm.h ++++ b/include/linux/pm.h +@@ -597,6 +597,7 @@ extern int dev_pm_put_subsys_data(struct device *dev); + struct dev_pm_domain { + struct dev_pm_ops ops; + }; ++typedef struct dev_pm_domain __no_const dev_pm_domain_no_const; + + /* + * The PM_EVENT_ messages are also used by drivers implementing the legacy +diff --git a/include/linux/pm_domain.h b/include/linux/pm_domain.h +index 7c1d252..0e7061d 100644 +--- a/include/linux/pm_domain.h ++++ b/include/linux/pm_domain.h +@@ -44,11 +44,11 @@ struct gpd_dev_ops { + int (*thaw_early)(struct device *dev); + int (*thaw)(struct device *dev); + bool (*active_wakeup)(struct device *dev); +-}; ++} __no_const; + + struct gpd_cpu_data { + unsigned int saved_exit_latency; +- struct cpuidle_state *idle_state; ++ cpuidle_state_no_const *idle_state; + }; + + struct generic_pm_domain { +diff --git a/include/linux/pm_runtime.h b/include/linux/pm_runtime.h +index 16c9a62..f9f0838 100644 +--- a/include/linux/pm_runtime.h ++++ b/include/linux/pm_runtime.h +@@ -109,7 +109,7 @@ static inline bool pm_runtime_callbacks_present(struct device *dev) + + static inline void pm_runtime_mark_last_busy(struct device *dev) + { +- ACCESS_ONCE(dev->power.last_busy) = jiffies; ++ ACCESS_ONCE_RW(dev->power.last_busy) = jiffies; + } + + #else /* !CONFIG_PM_RUNTIME */ +diff --git a/include/linux/pnp.h b/include/linux/pnp.h +index 195aafc..49a7bc2 100644 +--- a/include/linux/pnp.h ++++ b/include/linux/pnp.h +@@ -297,7 +297,7 @@ static inline void pnp_set_drvdata(struct pnp_dev *pdev, void *data) + struct pnp_fixup { + char id[7]; + void (*quirk_function) (struct pnp_dev * dev); /* fixup function */ +-}; ++} __do_const; + + /* config parameters */ + #define PNP_CONFIG_NORMAL 0x0001 +diff --git a/include/linux/poison.h b/include/linux/poison.h +index 2110a81..13a11bb 100644 +--- a/include/linux/poison.h ++++ b/include/linux/poison.h +@@ -19,8 +19,8 @@ + * under normal circumstances, used to verify that nobody uses + * non-initialized list entries. + */ +-#define LIST_POISON1 ((void *) 0x00100100 + POISON_POINTER_DELTA) +-#define LIST_POISON2 ((void *) 0x00200200 + POISON_POINTER_DELTA) ++#define LIST_POISON1 ((void *) (long)0xFFFFFF01) ++#define LIST_POISON2 ((void *) (long)0xFFFFFF02) + + /********** include/linux/timer.h **********/ + /* +diff --git a/include/linux/power/smartreflex.h b/include/linux/power/smartreflex.h +index d8b187c3..9a9257a 100644 +--- a/include/linux/power/smartreflex.h ++++ b/include/linux/power/smartreflex.h +@@ -238,7 +238,7 @@ struct omap_sr_class_data { + int (*notify)(struct omap_sr *sr, u32 status); + u8 notify_flags; + u8 class_type; +-}; ++} __do_const; + + /** + * struct omap_sr_nvalue_table - Smartreflex n-target value info +diff --git a/include/linux/ppp-comp.h b/include/linux/ppp-comp.h +index 4ea1d37..80f4b33 100644 +--- a/include/linux/ppp-comp.h ++++ b/include/linux/ppp-comp.h +@@ -84,7 +84,7 @@ struct compressor { + struct module *owner; + /* Extra skb space needed by the compressor algorithm */ + unsigned int comp_extra; +-}; ++} __do_const; + + /* + * The return value from decompress routine is the length of the +diff --git a/include/linux/preempt.h b/include/linux/preempt.h +index 1841b58..fbeebf8 100644 +--- a/include/linux/preempt.h ++++ b/include/linux/preempt.h +@@ -29,11 +29,16 @@ extern void preempt_count_sub(int val); + #define preempt_count_dec_and_test() __preempt_count_dec_and_test() + #endif + ++#define raw_preempt_count_add(val) __preempt_count_add(val) ++#define raw_preempt_count_sub(val) __preempt_count_sub(val) ++ + #define __preempt_count_inc() __preempt_count_add(1) + #define __preempt_count_dec() __preempt_count_sub(1) + + #define preempt_count_inc() preempt_count_add(1) ++#define raw_preempt_count_inc() raw_preempt_count_add(1) + #define preempt_count_dec() preempt_count_sub(1) ++#define raw_preempt_count_dec() raw_preempt_count_sub(1) + + #ifdef CONFIG_PREEMPT_COUNT + +@@ -43,6 +48,12 @@ do { \ + barrier(); \ + } while (0) + ++#define raw_preempt_disable() \ ++do { \ ++ raw_preempt_count_inc(); \ ++ barrier(); \ ++} while (0) ++ + #define sched_preempt_enable_no_resched() \ + do { \ + barrier(); \ +@@ -51,6 +62,12 @@ do { \ + + #define preempt_enable_no_resched() sched_preempt_enable_no_resched() + ++#define raw_preempt_enable_no_resched() \ ++do { \ ++ barrier(); \ ++ raw_preempt_count_dec(); \ ++} while (0) ++ + #ifdef CONFIG_PREEMPT + #define preempt_enable() \ + do { \ +@@ -115,8 +132,10 @@ do { \ + * region. + */ + #define preempt_disable() barrier() ++#define raw_preempt_disable() barrier() + #define sched_preempt_enable_no_resched() barrier() + #define preempt_enable_no_resched() barrier() ++#define raw_preempt_enable_no_resched() barrier() + #define preempt_enable() barrier() + #define preempt_check_resched() do { } while (0) + +@@ -130,11 +149,13 @@ do { \ + /* + * Modules have no business playing preemption tricks. + */ ++#ifndef CONFIG_PAX_KERNEXEC + #undef sched_preempt_enable_no_resched + #undef preempt_enable_no_resched + #undef preempt_enable_no_resched_notrace + #undef preempt_check_resched + #endif ++#endif + + #define preempt_set_need_resched() \ + do { \ +diff --git a/include/linux/printk.h b/include/linux/printk.h +index cbf094f..86007b7 100644 +--- a/include/linux/printk.h ++++ b/include/linux/printk.h +@@ -114,6 +114,8 @@ static inline __printf(1, 2) __cold + void early_printk(const char *s, ...) { } + #endif + ++extern int kptr_restrict; ++ + #ifdef CONFIG_PRINTK + asmlinkage __printf(5, 0) + int vprintk_emit(int facility, int level, +@@ -148,7 +150,6 @@ extern bool printk_timed_ratelimit(unsigned long *caller_jiffies, + + extern int printk_delay_msec; + extern int dmesg_restrict; +-extern int kptr_restrict; + + extern void wake_up_klogd(void); + +diff --git a/include/linux/proc_fs.h b/include/linux/proc_fs.h +index 608e60a..bbcb1a0 100644 +--- a/include/linux/proc_fs.h ++++ b/include/linux/proc_fs.h +@@ -17,8 +17,11 @@ extern void proc_flush_task(struct task_struct *); + extern struct proc_dir_entry *proc_symlink(const char *, + struct proc_dir_entry *, const char *); + extern struct proc_dir_entry *proc_mkdir(const char *, struct proc_dir_entry *); ++extern struct proc_dir_entry *proc_mkdir_restrict(const char *, struct proc_dir_entry *); + extern struct proc_dir_entry *proc_mkdir_data(const char *, umode_t, + struct proc_dir_entry *, void *); ++extern struct proc_dir_entry *proc_mkdir_data_restrict(const char *, umode_t, ++ struct proc_dir_entry *, void *); + extern struct proc_dir_entry *proc_mkdir_mode(const char *, umode_t, + struct proc_dir_entry *); + +@@ -34,6 +37,19 @@ static inline struct proc_dir_entry *proc_create( + return proc_create_data(name, mode, parent, proc_fops, NULL); + } + ++static inline struct proc_dir_entry *proc_create_grsec(const char *name, umode_t mode, ++ struct proc_dir_entry *parent, const struct file_operations *proc_fops) ++{ ++#ifdef CONFIG_GRKERNSEC_PROC_USER ++ return proc_create_data(name, S_IRUSR, parent, proc_fops, NULL); ++#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP) ++ return proc_create_data(name, S_IRUSR | S_IRGRP, parent, proc_fops, NULL); ++#else ++ return proc_create_data(name, mode, parent, proc_fops, NULL); ++#endif ++} ++ ++ + extern void proc_set_size(struct proc_dir_entry *, loff_t); + extern void proc_set_user(struct proc_dir_entry *, kuid_t, kgid_t); + extern void *PDE_DATA(const struct inode *); +@@ -73,7 +89,7 @@ static inline int remove_proc_subtree(const char *name, struct proc_dir_entry *p + static inline struct proc_dir_entry *proc_net_mkdir( + struct net *net, const char *name, struct proc_dir_entry *parent) + { +- return proc_mkdir_data(name, 0, parent, net); ++ return proc_mkdir_data_restrict(name, 0, parent, net); + } + + #endif /* _LINUX_PROC_FS_H */ +diff --git a/include/linux/proc_ns.h b/include/linux/proc_ns.h +index 34a1e10..70f6bde 100644 +--- a/include/linux/proc_ns.h ++++ b/include/linux/proc_ns.h +@@ -14,7 +14,7 @@ struct proc_ns_operations { + void (*put)(void *ns); + int (*install)(struct nsproxy *nsproxy, void *ns); + unsigned int (*inum)(void *ns); +-}; ++} __do_const __randomize_layout; + + struct proc_ns { + void *ns; +diff --git a/include/linux/quota.h b/include/linux/quota.h +index cc7494a..1e27036 100644 +--- a/include/linux/quota.h ++++ b/include/linux/quota.h +@@ -70,7 +70,7 @@ struct kqid { /* Type in which we store the quota identifier */ + + extern bool qid_eq(struct kqid left, struct kqid right); + extern bool qid_lt(struct kqid left, struct kqid right); +-extern qid_t from_kqid(struct user_namespace *to, struct kqid qid); ++extern qid_t from_kqid(struct user_namespace *to, struct kqid qid) __intentional_overflow(-1); + extern qid_t from_kqid_munged(struct user_namespace *to, struct kqid qid); + extern bool qid_valid(struct kqid qid); + +diff --git a/include/linux/random.h b/include/linux/random.h +index 1cfce0e..bf99e0b 100644 +--- a/include/linux/random.h ++++ b/include/linux/random.h +@@ -9,9 +9,19 @@ + #include <uapi/linux/random.h> + + extern void add_device_randomness(const void *, unsigned int); ++ ++static inline void add_latent_entropy(void) ++{ ++ ++#ifdef LATENT_ENTROPY_PLUGIN ++ add_device_randomness((const void *)&latent_entropy, sizeof(latent_entropy)); ++#endif ++ ++} ++ + extern void add_input_randomness(unsigned int type, unsigned int code, +- unsigned int value); +-extern void add_interrupt_randomness(int irq, int irq_flags); ++ unsigned int value) __latent_entropy; ++extern void add_interrupt_randomness(int irq, int irq_flags) __latent_entropy; + + extern void get_random_bytes(void *buf, int nbytes); + extern void get_random_bytes_arch(void *buf, int nbytes); +@@ -22,10 +32,10 @@ extern int random_int_secret_init(void); + extern const struct file_operations random_fops, urandom_fops; + #endif + +-unsigned int get_random_int(void); ++unsigned int __intentional_overflow(-1) get_random_int(void); + unsigned long randomize_range(unsigned long start, unsigned long end, unsigned long len); + +-u32 prandom_u32(void); ++u32 prandom_u32(void) __intentional_overflow(-1); + void prandom_bytes(void *buf, int nbytes); + void prandom_seed(u32 seed); + void prandom_reseed_late(void); +@@ -37,6 +47,11 @@ struct rnd_state { + u32 prandom_u32_state(struct rnd_state *state); + void prandom_bytes_state(struct rnd_state *state, void *buf, int nbytes); + ++static inline unsigned long __intentional_overflow(-1) pax_get_random_long(void) ++{ ++ return prandom_u32() + (sizeof(long) > 4 ? (unsigned long)prandom_u32() << 32 : 0); ++} ++ + /** + * prandom_u32_max - returns a pseudo-random number in interval [0, ep_ro) + * @ep_ro: right open interval endpoint +@@ -49,7 +64,7 @@ void prandom_bytes_state(struct rnd_state *state, void *buf, int nbytes); + * + * Returns: pseudo-random number in interval [0, ep_ro) + */ +-static inline u32 prandom_u32_max(u32 ep_ro) ++static inline u32 __intentional_overflow(-1) prandom_u32_max(u32 ep_ro) + { + return (u32)(((u64) prandom_u32() * ep_ro) >> 32); + } +diff --git a/include/linux/rbtree_augmented.h b/include/linux/rbtree_augmented.h +index fea49b5..2ac22bb 100644 +--- a/include/linux/rbtree_augmented.h ++++ b/include/linux/rbtree_augmented.h +@@ -80,7 +80,9 @@ rbname ## _rotate(struct rb_node *rb_old, struct rb_node *rb_new) \ + old->rbaugmented = rbcompute(old); \ + } \ + rbstatic const struct rb_augment_callbacks rbname = { \ +- rbname ## _propagate, rbname ## _copy, rbname ## _rotate \ ++ .propagate = rbname ## _propagate, \ ++ .copy = rbname ## _copy, \ ++ .rotate = rbname ## _rotate \ + }; + + +diff --git a/include/linux/rculist.h b/include/linux/rculist.h +index dbaf990..52e07b8 100644 +--- a/include/linux/rculist.h ++++ b/include/linux/rculist.h +@@ -29,8 +29,8 @@ + */ + static inline void INIT_LIST_HEAD_RCU(struct list_head *list) + { +- ACCESS_ONCE(list->next) = list; +- ACCESS_ONCE(list->prev) = list; ++ ACCESS_ONCE_RW(list->next) = list; ++ ACCESS_ONCE_RW(list->prev) = list; + } + + /* +@@ -59,6 +59,9 @@ void __list_add_rcu(struct list_head *new, + struct list_head *prev, struct list_head *next); + #endif + ++void __pax_list_add_rcu(struct list_head *new, ++ struct list_head *prev, struct list_head *next); ++ + /** + * list_add_rcu - add a new entry to rcu-protected list + * @new: new entry to be added +@@ -80,6 +83,11 @@ static inline void list_add_rcu(struct list_head *new, struct list_head *head) + __list_add_rcu(new, head, head->next); + } + ++static inline void pax_list_add_rcu(struct list_head *new, struct list_head *head) ++{ ++ __pax_list_add_rcu(new, head, head->next); ++} ++ + /** + * list_add_tail_rcu - add a new entry to rcu-protected list + * @new: new entry to be added +@@ -102,6 +110,12 @@ static inline void list_add_tail_rcu(struct list_head *new, + __list_add_rcu(new, head->prev, head); + } + ++static inline void pax_list_add_tail_rcu(struct list_head *new, ++ struct list_head *head) ++{ ++ __pax_list_add_rcu(new, head->prev, head); ++} ++ + /** + * list_del_rcu - deletes entry from list without re-initialization + * @entry: the element to delete from the list. +@@ -132,6 +146,8 @@ static inline void list_del_rcu(struct list_head *entry) + entry->prev = LIST_POISON2; + } + ++extern void pax_list_del_rcu(struct list_head *entry); ++ + /** + * hlist_del_init_rcu - deletes entry from hash list with re-initialization + * @n: the element to delete from the hash list. +diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h +index 72bf3a0..853347f 100644 +--- a/include/linux/rcupdate.h ++++ b/include/linux/rcupdate.h +@@ -588,7 +588,7 @@ static inline void rcu_preempt_sleep_check(void) + #define rcu_assign_pointer(p, v) \ + do { \ + smp_wmb(); \ +- ACCESS_ONCE(p) = RCU_INITIALIZER(v); \ ++ ACCESS_ONCE_RW(p) = RCU_INITIALIZER(v); \ + } while (0) + + +diff --git a/include/linux/reboot.h b/include/linux/reboot.h +index 9e7db9e..7d4fd72 100644 +--- a/include/linux/reboot.h ++++ b/include/linux/reboot.h +@@ -44,9 +44,9 @@ extern int unregister_reboot_notifier(struct notifier_block *); + */ + + extern void migrate_to_reboot_cpu(void); +-extern void machine_restart(char *cmd); +-extern void machine_halt(void); +-extern void machine_power_off(void); ++extern void machine_restart(char *cmd) __noreturn; ++extern void machine_halt(void) __noreturn; ++extern void machine_power_off(void) __noreturn; + + extern void machine_shutdown(void); + struct pt_regs; +@@ -57,9 +57,9 @@ extern void machine_crash_shutdown(struct pt_regs *); + */ + + extern void kernel_restart_prepare(char *cmd); +-extern void kernel_restart(char *cmd); +-extern void kernel_halt(void); +-extern void kernel_power_off(void); ++extern void kernel_restart(char *cmd) __noreturn; ++extern void kernel_halt(void) __noreturn; ++extern void kernel_power_off(void) __noreturn; + + extern int C_A_D; /* for sysctl */ + void ctrl_alt_del(void); +@@ -73,7 +73,7 @@ extern int orderly_poweroff(bool force); + * Emergency restart, callable from an interrupt handler. + */ + +-extern void emergency_restart(void); ++extern void emergency_restart(void) __noreturn; + #include <asm/emergency-restart.h> + + #endif /* _LINUX_REBOOT_H */ +diff --git a/include/linux/regset.h b/include/linux/regset.h +index 8e0c9fe..ac4d221 100644 +--- a/include/linux/regset.h ++++ b/include/linux/regset.h +@@ -161,7 +161,8 @@ struct user_regset { + unsigned int align; + unsigned int bias; + unsigned int core_note_type; +-}; ++} __do_const; ++typedef struct user_regset __no_const user_regset_no_const; + + /** + * struct user_regset_view - available regsets +diff --git a/include/linux/relay.h b/include/linux/relay.h +index d7c8359..818daf5 100644 +--- a/include/linux/relay.h ++++ b/include/linux/relay.h +@@ -157,7 +157,7 @@ struct rchan_callbacks + * The callback should return 0 if successful, negative if not. + */ + int (*remove_buf_file)(struct dentry *dentry); +-}; ++} __no_const; + + /* + * CONFIG_RELAY kernel API, kernel/relay.c +diff --git a/include/linux/rio.h b/include/linux/rio.h +index b71d573..2f940bd 100644 +--- a/include/linux/rio.h ++++ b/include/linux/rio.h +@@ -355,7 +355,7 @@ struct rio_ops { + int (*map_inb)(struct rio_mport *mport, dma_addr_t lstart, + u64 rstart, u32 size, u32 flags); + void (*unmap_inb)(struct rio_mport *mport, dma_addr_t lstart); +-}; ++} __no_const; + + #define RIO_RESOURCE_MEM 0x00000100 + #define RIO_RESOURCE_DOORBELL 0x00000200 +diff --git a/include/linux/rmap.h b/include/linux/rmap.h +index b66c211..13d2915 100644 +--- a/include/linux/rmap.h ++++ b/include/linux/rmap.h +@@ -145,8 +145,8 @@ static inline void anon_vma_unlock_read(struct anon_vma *anon_vma) + void anon_vma_init(void); /* create anon_vma_cachep */ + int anon_vma_prepare(struct vm_area_struct *); + void unlink_anon_vmas(struct vm_area_struct *); +-int anon_vma_clone(struct vm_area_struct *, struct vm_area_struct *); +-int anon_vma_fork(struct vm_area_struct *, struct vm_area_struct *); ++int anon_vma_clone(struct vm_area_struct *, const struct vm_area_struct *); ++int anon_vma_fork(struct vm_area_struct *, const struct vm_area_struct *); + + static inline void anon_vma_merge(struct vm_area_struct *vma, + struct vm_area_struct *next) +diff --git a/include/linux/scatterlist.h b/include/linux/scatterlist.h +index a964f72..b475afb 100644 +--- a/include/linux/scatterlist.h ++++ b/include/linux/scatterlist.h +@@ -1,6 +1,7 @@ + #ifndef _LINUX_SCATTERLIST_H + #define _LINUX_SCATTERLIST_H + ++#include <linux/sched.h> + #include <linux/string.h> + #include <linux/bug.h> + #include <linux/mm.h> +@@ -114,6 +115,12 @@ static inline void sg_set_buf(struct scatterlist *sg, const void *buf, + #ifdef CONFIG_DEBUG_SG + BUG_ON(!virt_addr_valid(buf)); + #endif ++#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW ++ if (object_starts_on_stack(buf)) { ++ void *adjbuf = buf - current->stack + current->lowmem_stack; ++ sg_set_page(sg, virt_to_page(adjbuf), buflen, offset_in_page(adjbuf)); ++ } else ++#endif + sg_set_page(sg, virt_to_page(buf), buflen, offset_in_page(buf)); + } + +diff --git a/include/linux/sched.h b/include/linux/sched.h +index ccd0c6f..84d9030 100644 +--- a/include/linux/sched.h ++++ b/include/linux/sched.h +@@ -129,6 +129,7 @@ struct fs_struct; + struct perf_event_context; + struct blk_plug; + struct filename; ++struct linux_binprm; + + /* + * List of flags we want to share for kernel threads, +@@ -369,7 +370,7 @@ extern char __sched_text_start[], __sched_text_end[]; + extern int in_sched_functions(unsigned long addr); + + #define MAX_SCHEDULE_TIMEOUT LONG_MAX +-extern signed long schedule_timeout(signed long timeout); ++extern signed long schedule_timeout(signed long timeout) __intentional_overflow(-1); + extern signed long schedule_timeout_interruptible(signed long timeout); + extern signed long schedule_timeout_killable(signed long timeout); + extern signed long schedule_timeout_uninterruptible(signed long timeout); +@@ -380,6 +381,19 @@ struct nsproxy; + struct user_namespace; + + #ifdef CONFIG_MMU ++ ++#ifdef CONFIG_GRKERNSEC_RAND_THREADSTACK ++extern unsigned long gr_rand_threadstack_offset(const struct mm_struct *mm, const struct file *filp, unsigned long flags); ++#else ++static inline unsigned long gr_rand_threadstack_offset(const struct mm_struct *mm, const struct file *filp, unsigned long flags) ++{ ++ return 0; ++} ++#endif ++ ++extern bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len, unsigned long offset); ++extern unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len, unsigned long offset); ++ + extern void arch_pick_mmap_layout(struct mm_struct *mm); + extern unsigned long + arch_get_unmapped_area(struct file *, unsigned long, unsigned long, +@@ -677,6 +691,17 @@ struct signal_struct { + #ifdef CONFIG_TASKSTATS + struct taskstats *stats; + #endif ++ ++#ifdef CONFIG_GRKERNSEC ++ u32 curr_ip; ++ u32 saved_ip; ++ u32 gr_saddr; ++ u32 gr_daddr; ++ u16 gr_sport; ++ u16 gr_dport; ++ u8 used_accept:1; ++#endif ++ + #ifdef CONFIG_AUDIT + unsigned audit_tty; + unsigned audit_tty_log_passwd; +@@ -703,7 +728,7 @@ struct signal_struct { + struct mutex cred_guard_mutex; /* guard against foreign influences on + * credential calculations + * (notably. ptrace) */ +-}; ++} __randomize_layout; + + /* + * Bits in flags field of signal_struct. +@@ -757,6 +782,14 @@ struct user_struct { + struct key *session_keyring; /* UID's default session keyring */ + #endif + ++#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT ++ unsigned char kernel_banned; ++#endif ++#ifdef CONFIG_GRKERNSEC_BRUTE ++ unsigned char suid_banned; ++ unsigned long suid_ban_expires; ++#endif ++ + /* Hash table maintenance information */ + struct hlist_node uidhash_node; + kuid_t uid; +@@ -764,7 +797,7 @@ struct user_struct { + #ifdef CONFIG_PERF_EVENTS + atomic_long_t locked_vm; + #endif +-}; ++} __randomize_layout; + + extern int uids_sysfs_init(void); + +@@ -1164,6 +1197,9 @@ enum perf_event_task_context { + struct task_struct { + volatile long state; /* -1 unrunnable, 0 runnable, >0 stopped */ + void *stack; ++#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW ++ void *lowmem_stack; ++#endif + atomic_t usage; + unsigned int flags; /* per process flags, defined below */ + unsigned int ptrace; +@@ -1286,8 +1322,8 @@ struct task_struct { + struct list_head thread_node; + + struct completion *vfork_done; /* for vfork() */ +- int __user *set_child_tid; /* CLONE_CHILD_SETTID */ +- int __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */ ++ pid_t __user *set_child_tid; /* CLONE_CHILD_SETTID */ ++ pid_t __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */ + + cputime_t utime, stime, utimescaled, stimescaled; + cputime_t gtime; +@@ -1312,11 +1348,6 @@ struct task_struct { + struct task_cputime cputime_expires; + struct list_head cpu_timers[3]; + +-/* process credentials */ +- const struct cred __rcu *real_cred; /* objective and real subjective task +- * credentials (COW) */ +- const struct cred __rcu *cred; /* effective (overridable) subjective task +- * credentials (COW) */ + char comm[TASK_COMM_LEN]; /* executable name excluding path + - access with [gs]et_task_comm (which lock + it with task_lock()) +@@ -1333,6 +1364,10 @@ struct task_struct { + #endif + /* CPU-specific state of this task */ + struct thread_struct thread; ++/* thread_info moved to task_struct */ ++#ifdef CONFIG_X86 ++ struct thread_info tinfo; ++#endif + /* filesystem information */ + struct fs_struct *fs; + /* open file information */ +@@ -1409,6 +1444,10 @@ struct task_struct { + gfp_t lockdep_reclaim_gfp; + #endif + ++/* process credentials */ ++ const struct cred __rcu *real_cred; /* objective and real subjective task ++ * credentials (COW) */ ++ + /* journalling filesystem info */ + void *journal_info; + +@@ -1447,6 +1486,10 @@ struct task_struct { + /* cg_list protected by css_set_lock and tsk->alloc_lock */ + struct list_head cg_list; + #endif ++ ++ const struct cred __rcu *cred; /* effective (overridable) subjective task ++ * credentials (COW) */ ++ + #ifdef CONFIG_FUTEX + struct robust_list_head __user *robust_list; + #ifdef CONFIG_COMPAT +@@ -1581,7 +1624,78 @@ struct task_struct { + unsigned int sequential_io; + unsigned int sequential_io_avg; + #endif +-}; ++ ++#ifdef CONFIG_GRKERNSEC ++ /* grsecurity */ ++#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP ++ u64 exec_id; ++#endif ++#ifdef CONFIG_GRKERNSEC_SETXID ++ const struct cred *delayed_cred; ++#endif ++ struct dentry *gr_chroot_dentry; ++ struct acl_subject_label *acl; ++ struct acl_subject_label *tmpacl; ++ struct acl_role_label *role; ++ struct file *exec_file; ++ unsigned long brute_expires; ++ u16 acl_role_id; ++ u8 inherited; ++ /* is this the task that authenticated to the special role */ ++ u8 acl_sp_role; ++ u8 is_writable; ++ u8 brute; ++ u8 gr_is_chrooted; ++#endif ++ ++} __randomize_layout; ++ ++#define MF_PAX_PAGEEXEC 0x01000000 /* Paging based non-executable pages */ ++#define MF_PAX_EMUTRAMP 0x02000000 /* Emulate trampolines */ ++#define MF_PAX_MPROTECT 0x04000000 /* Restrict mprotect() */ ++#define MF_PAX_RANDMMAP 0x08000000 /* Randomize mmap() base */ ++/*#define MF_PAX_RANDEXEC 0x10000000*/ /* Randomize ET_EXEC base */ ++#define MF_PAX_SEGMEXEC 0x20000000 /* Segmentation based non-executable pages */ ++ ++#ifdef CONFIG_PAX_SOFTMODE ++extern int pax_softmode; ++#endif ++ ++extern int pax_check_flags(unsigned long *); ++#define PAX_PARSE_FLAGS_FALLBACK (~0UL) ++ ++/* if tsk != current then task_lock must be held on it */ ++#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR) ++static inline unsigned long pax_get_flags(struct task_struct *tsk) ++{ ++ if (likely(tsk->mm)) ++ return tsk->mm->pax_flags; ++ else ++ return 0UL; ++} ++ ++/* if tsk != current then task_lock must be held on it */ ++static inline long pax_set_flags(struct task_struct *tsk, unsigned long flags) ++{ ++ if (likely(tsk->mm)) { ++ tsk->mm->pax_flags = flags; ++ return 0; ++ } ++ return -EINVAL; ++} ++#endif ++ ++#ifdef CONFIG_PAX_HAVE_ACL_FLAGS ++extern void pax_set_initial_flags(struct linux_binprm *bprm); ++#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS) ++extern void (*pax_set_initial_flags_func)(struct linux_binprm *bprm); ++#endif ++ ++struct path; ++extern char *pax_get_path(const struct path *path, char *buf, int buflen); ++extern void pax_report_fault(struct pt_regs *regs, void *pc, void *sp); ++extern void pax_report_insns(struct pt_regs *regs, void *pc, void *sp); ++extern void pax_report_refcount_overflow(struct pt_regs *regs); + + /* Future-safe accessor for struct task_struct's cpus_allowed. */ + #define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed) +@@ -1658,7 +1772,7 @@ struct pid_namespace; + pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type, + struct pid_namespace *ns); + +-static inline pid_t task_pid_nr(struct task_struct *tsk) ++static inline pid_t task_pid_nr(const struct task_struct *tsk) + { + return tsk->pid; + } +@@ -2006,6 +2120,25 @@ extern u64 sched_clock_cpu(int cpu); + + extern void sched_clock_init(void); + ++#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW ++static inline void populate_stack(void) ++{ ++ struct task_struct *curtask = current; ++ int c; ++ int *ptr = curtask->stack; ++ int *end = curtask->stack + THREAD_SIZE; ++ ++ while (ptr < end) { ++ c = *(volatile int *)ptr; ++ ptr += PAGE_SIZE/sizeof(int); ++ } ++} ++#else ++static inline void populate_stack(void) ++{ ++} ++#endif ++ + #ifndef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK + static inline void sched_clock_tick(void) + { +@@ -2130,7 +2263,9 @@ void yield(void); + extern struct exec_domain default_exec_domain; + + union thread_union { ++#ifndef CONFIG_X86 + struct thread_info thread_info; ++#endif + unsigned long stack[THREAD_SIZE/sizeof(long)]; + }; + +@@ -2163,6 +2298,7 @@ extern struct pid_namespace init_pid_ns; + */ + + extern struct task_struct *find_task_by_vpid(pid_t nr); ++extern struct task_struct *find_task_by_vpid_unrestricted(pid_t nr); + extern struct task_struct *find_task_by_pid_ns(pid_t nr, + struct pid_namespace *ns); + +@@ -2325,7 +2461,7 @@ extern void __cleanup_sighand(struct sighand_struct *); + extern void exit_itimers(struct signal_struct *); + extern void flush_itimer_signals(void); + +-extern void do_group_exit(int); ++extern __noreturn void do_group_exit(int); + + extern int allow_signal(int); + extern int disallow_signal(int); +@@ -2526,9 +2662,9 @@ static inline unsigned long *end_of_stack(struct task_struct *p) + + #endif + +-static inline int object_is_on_stack(void *obj) ++static inline int object_starts_on_stack(const void *obj) + { +- void *stack = task_stack_page(current); ++ const void *stack = task_stack_page(current); + + return (obj >= stack) && (obj < (stack + THREAD_SIZE)); + } +diff --git a/include/linux/sched/sysctl.h b/include/linux/sched/sysctl.h +index 8045a55..c959cd5 100644 +--- a/include/linux/sched/sysctl.h ++++ b/include/linux/sched/sysctl.h +@@ -30,6 +30,7 @@ enum { sysctl_hung_task_timeout_secs = 0 }; + #define DEFAULT_MAX_MAP_COUNT (USHRT_MAX - MAPCOUNT_ELF_CORE_MARGIN) + + extern int sysctl_max_map_count; ++extern unsigned long sysctl_heap_stack_gap; + + extern unsigned int sysctl_sched_latency; + extern unsigned int sysctl_sched_min_granularity; +diff --git a/include/linux/security.h b/include/linux/security.h +index 2fc42d1..4d802f2 100644 +--- a/include/linux/security.h ++++ b/include/linux/security.h +@@ -27,6 +27,7 @@ + #include <linux/slab.h> + #include <linux/err.h> + #include <linux/string.h> ++#include <linux/grsecurity.h> + + struct linux_binprm; + struct cred; +@@ -116,8 +117,6 @@ struct seq_file; + + extern int cap_netlink_send(struct sock *sk, struct sk_buff *skb); + +-void reset_security_ops(void); +- + #ifdef CONFIG_MMU + extern unsigned long mmap_min_addr; + extern unsigned long dac_mmap_min_addr; +@@ -1719,7 +1718,7 @@ struct security_operations { + struct audit_context *actx); + void (*audit_rule_free) (void *lsmrule); + #endif /* CONFIG_AUDIT */ +-}; ++} __randomize_layout; + + /* prototypes */ + extern int security_init(void); +diff --git a/include/linux/semaphore.h b/include/linux/semaphore.h +index dc368b8..e895209 100644 +--- a/include/linux/semaphore.h ++++ b/include/linux/semaphore.h +@@ -37,7 +37,7 @@ static inline void sema_init(struct semaphore *sem, int val) + } + + extern void down(struct semaphore *sem); +-extern int __must_check down_interruptible(struct semaphore *sem); ++extern int __must_check down_interruptible(struct semaphore *sem) __intentional_overflow(-1); + extern int __must_check down_killable(struct semaphore *sem); + extern int __must_check down_trylock(struct semaphore *sem); + extern int __must_check down_timeout(struct semaphore *sem, long jiffies); +diff --git a/include/linux/seq_file.h b/include/linux/seq_file.h +index 52e0097..383f21d 100644 +--- a/include/linux/seq_file.h ++++ b/include/linux/seq_file.h +@@ -27,6 +27,9 @@ struct seq_file { + struct mutex lock; + const struct seq_operations *op; + int poll_event; ++#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP ++ u64 exec_id; ++#endif + #ifdef CONFIG_USER_NS + struct user_namespace *user_ns; + #endif +@@ -39,6 +42,7 @@ struct seq_operations { + void * (*next) (struct seq_file *m, void *v, loff_t *pos); + int (*show) (struct seq_file *m, void *v); + }; ++typedef struct seq_operations __no_const seq_operations_no_const; + + #define SEQ_SKIP 1 + +@@ -96,6 +100,7 @@ void seq_pad(struct seq_file *m, char c); + + char *mangle_path(char *s, const char *p, const char *esc); + int seq_open(struct file *, const struct seq_operations *); ++int seq_open_restrict(struct file *, const struct seq_operations *); + ssize_t seq_read(struct file *, char __user *, size_t, loff_t *); + loff_t seq_lseek(struct file *, loff_t, int); + int seq_release(struct inode *, struct file *); +@@ -138,6 +143,7 @@ static inline int seq_nodemask_list(struct seq_file *m, nodemask_t *mask) + } + + int single_open(struct file *, int (*)(struct seq_file *, void *), void *); ++int single_open_restrict(struct file *, int (*)(struct seq_file *, void *), void *); + int single_open_size(struct file *, int (*)(struct seq_file *, void *), void *, size_t); + int single_release(struct inode *, struct file *); + void *__seq_open_private(struct file *, const struct seq_operations *, int); +diff --git a/include/linux/shm.h b/include/linux/shm.h +index 1e2cd2e..0288750 100644 +--- a/include/linux/shm.h ++++ b/include/linux/shm.h +@@ -21,6 +21,10 @@ struct shmid_kernel /* private to the kernel */ + + /* The task created the shm object. NULL if the task is dead. */ + struct task_struct *shm_creator; ++#ifdef CONFIG_GRKERNSEC ++ time_t shm_createtime; ++ pid_t shm_lapid; ++#endif + }; + + /* shm_mode upper byte flags */ +diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h +index 15ede6a..80161c3 100644 +--- a/include/linux/skbuff.h ++++ b/include/linux/skbuff.h +@@ -662,7 +662,7 @@ bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from, + struct sk_buff *__alloc_skb(unsigned int size, gfp_t priority, int flags, + int node); + struct sk_buff *build_skb(void *data, unsigned int frag_size); +-static inline struct sk_buff *alloc_skb(unsigned int size, ++static inline struct sk_buff * __intentional_overflow(0) alloc_skb(unsigned int size, + gfp_t priority) + { + return __alloc_skb(size, priority, 0, NUMA_NO_NODE); +@@ -1768,7 +1768,7 @@ static inline u32 skb_inner_network_header_len(const struct sk_buff *skb) + return skb->inner_transport_header - skb->inner_network_header; + } + +-static inline int skb_network_offset(const struct sk_buff *skb) ++static inline int __intentional_overflow(0) skb_network_offset(const struct sk_buff *skb) + { + return skb_network_header(skb) - skb->data; + } +@@ -1828,7 +1828,7 @@ static inline int pskb_network_may_pull(struct sk_buff *skb, unsigned int len) + * NET_IP_ALIGN(2) + ethernet_header(14) + IP_header(20/40) + ports(8) + */ + #ifndef NET_SKB_PAD +-#define NET_SKB_PAD max(32, L1_CACHE_BYTES) ++#define NET_SKB_PAD max(_AC(32,UL), L1_CACHE_BYTES) + #endif + + int ___pskb_trim(struct sk_buff *skb, unsigned int len); +@@ -2427,7 +2427,7 @@ struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags, int noblock, + int *err); + unsigned int datagram_poll(struct file *file, struct socket *sock, + struct poll_table_struct *wait); +-int skb_copy_datagram_iovec(const struct sk_buff *from, int offset, ++int __intentional_overflow(0) skb_copy_datagram_iovec(const struct sk_buff *from, int offset, + struct iovec *to, int size); + int skb_copy_and_csum_datagram_iovec(struct sk_buff *skb, int hlen, + struct iovec *iov); +@@ -2721,6 +2721,9 @@ static inline void nf_reset(struct sk_buff *skb) + nf_bridge_put(skb->nf_bridge); + skb->nf_bridge = NULL; + #endif ++#if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE) ++ skb->nf_trace = 0; ++#endif + } + + static inline void nf_reset_trace(struct sk_buff *skb) +diff --git a/include/linux/slab.h b/include/linux/slab.h +index b5b2df6..69f5734 100644 +--- a/include/linux/slab.h ++++ b/include/linux/slab.h +@@ -14,15 +14,29 @@ + #include <linux/gfp.h> + #include <linux/types.h> + #include <linux/workqueue.h> +- ++#include <linux/err.h> + + /* + * Flags to pass to kmem_cache_create(). + * The ones marked DEBUG are only valid if CONFIG_SLAB_DEBUG is set. + */ + #define SLAB_DEBUG_FREE 0x00000100UL /* DEBUG: Perform (expensive) checks on free */ ++ ++#ifdef CONFIG_PAX_USERCOPY_SLABS ++#define SLAB_USERCOPY 0x00000200UL /* PaX: Allow copying objs to/from userland */ ++#else ++#define SLAB_USERCOPY 0x00000000UL ++#endif ++ + #define SLAB_RED_ZONE 0x00000400UL /* DEBUG: Red zone objs in a cache */ + #define SLAB_POISON 0x00000800UL /* DEBUG: Poison objects */ ++ ++#ifdef CONFIG_PAX_MEMORY_SANITIZE ++#define SLAB_NO_SANITIZE 0x00001000UL /* PaX: Do not sanitize objs on free */ ++#else ++#define SLAB_NO_SANITIZE 0x00000000UL ++#endif ++ + #define SLAB_HWCACHE_ALIGN 0x00002000UL /* Align objs on cache lines */ + #define SLAB_CACHE_DMA 0x00004000UL /* Use GFP_DMA memory */ + #define SLAB_STORE_USER 0x00010000UL /* DEBUG: Store the last owner for bug hunting */ +@@ -98,10 +112,13 @@ + * ZERO_SIZE_PTR can be passed to kfree though in the same way that NULL can. + * Both make kfree a no-op. + */ +-#define ZERO_SIZE_PTR ((void *)16) ++#define ZERO_SIZE_PTR \ ++({ \ ++ BUILD_BUG_ON(!(MAX_ERRNO & ~PAGE_MASK));\ ++ (void *)(-MAX_ERRNO-1L); \ ++}) + +-#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \ +- (unsigned long)ZERO_SIZE_PTR) ++#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) - 1 >= (unsigned long)ZERO_SIZE_PTR - 1) + + #include <linux/kmemleak.h> + +@@ -142,6 +159,8 @@ void * __must_check krealloc(const void *, size_t, gfp_t); + void kfree(const void *); + void kzfree(const void *); + size_t ksize(const void *); ++const char *check_heap_object(const void *ptr, unsigned long n); ++bool is_usercopy_object(const void *ptr); + + /* + * Some archs want to perform DMA into kmalloc caches and need a guaranteed +@@ -174,7 +193,7 @@ struct kmem_cache { + unsigned int align; /* Alignment as calculated */ + unsigned long flags; /* Active flags on the slab */ + const char *name; /* Slab name for sysfs */ +- int refcount; /* Use counter */ ++ atomic_t refcount; /* Use counter */ + void (*ctor)(void *); /* Called on object slot creation */ + struct list_head list; /* List of all slab caches on the system */ + }; +@@ -248,6 +267,10 @@ extern struct kmem_cache *kmalloc_caches[KMALLOC_SHIFT_HIGH + 1]; + extern struct kmem_cache *kmalloc_dma_caches[KMALLOC_SHIFT_HIGH + 1]; + #endif + ++#ifdef CONFIG_PAX_USERCOPY_SLABS ++extern struct kmem_cache *kmalloc_usercopy_caches[KMALLOC_SHIFT_HIGH + 1]; ++#endif ++ + /* + * Figure out which kmalloc slab an allocation of a certain size + * belongs to. +@@ -256,7 +279,7 @@ extern struct kmem_cache *kmalloc_dma_caches[KMALLOC_SHIFT_HIGH + 1]; + * 2 = 120 .. 192 bytes + * n = 2^(n-1) .. 2^n -1 + */ +-static __always_inline int kmalloc_index(size_t size) ++static __always_inline __size_overflow(1) int kmalloc_index(size_t size) + { + if (!size) + return 0; +@@ -299,11 +322,11 @@ static __always_inline int kmalloc_index(size_t size) + } + #endif /* !CONFIG_SLOB */ + +-void *__kmalloc(size_t size, gfp_t flags); ++void *__kmalloc(size_t size, gfp_t flags) __alloc_size(1); + void *kmem_cache_alloc(struct kmem_cache *, gfp_t flags); + + #ifdef CONFIG_NUMA +-void *__kmalloc_node(size_t size, gfp_t flags, int node); ++void *__kmalloc_node(size_t size, gfp_t flags, int node) __alloc_size(1); + void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node); + #else + static __always_inline void *__kmalloc_node(size_t size, gfp_t flags, int node) +diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h +index 8235dfb..47ce586 100644 +--- a/include/linux/slab_def.h ++++ b/include/linux/slab_def.h +@@ -38,7 +38,7 @@ struct kmem_cache { + /* 4) cache creation/removal */ + const char *name; + struct list_head list; +- int refcount; ++ atomic_t refcount; + int object_size; + int align; + +@@ -54,10 +54,14 @@ struct kmem_cache { + unsigned long node_allocs; + unsigned long node_frees; + unsigned long node_overflow; +- atomic_t allochit; +- atomic_t allocmiss; +- atomic_t freehit; +- atomic_t freemiss; ++ atomic_unchecked_t allochit; ++ atomic_unchecked_t allocmiss; ++ atomic_unchecked_t freehit; ++ atomic_unchecked_t freemiss; ++#ifdef CONFIG_PAX_MEMORY_SANITIZE ++ atomic_unchecked_t sanitized; ++ atomic_unchecked_t not_sanitized; ++#endif + + /* + * If debugging is enabled, then the allocator can add additional +diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h +index f56bfa9..8378a26 100644 +--- a/include/linux/slub_def.h ++++ b/include/linux/slub_def.h +@@ -74,7 +74,7 @@ struct kmem_cache { + struct kmem_cache_order_objects max; + struct kmem_cache_order_objects min; + gfp_t allocflags; /* gfp flags to use on each alloc */ +- int refcount; /* Refcount for slab cache destroy */ ++ atomic_t refcount; /* Refcount for slab cache destroy */ + void (*ctor)(void *); + int inuse; /* Offset to metadata */ + int align; /* Alignment */ +diff --git a/include/linux/smp.h b/include/linux/smp.h +index 6ae004e..2743532 100644 +--- a/include/linux/smp.h ++++ b/include/linux/smp.h +@@ -180,7 +180,9 @@ static inline void kick_all_cpus_sync(void) { } + #endif + + #define get_cpu() ({ preempt_disable(); smp_processor_id(); }) ++#define raw_get_cpu() ({ raw_preempt_disable(); raw_smp_processor_id(); }) + #define put_cpu() preempt_enable() ++#define raw_put_cpu_no_resched() raw_preempt_enable_no_resched() + + /* + * Callback to arch code if there's nosmp or maxcpus=0 on the +diff --git a/include/linux/sock_diag.h b/include/linux/sock_diag.h +index 46cca4c..3323536 100644 +--- a/include/linux/sock_diag.h ++++ b/include/linux/sock_diag.h +@@ -11,7 +11,7 @@ struct sock; + struct sock_diag_handler { + __u8 family; + int (*dump)(struct sk_buff *skb, struct nlmsghdr *nlh); +-}; ++} __do_const; + + int sock_diag_register(const struct sock_diag_handler *h); + void sock_diag_unregister(const struct sock_diag_handler *h); +diff --git a/include/linux/sonet.h b/include/linux/sonet.h +index 680f9a3..f13aeb0 100644 +--- a/include/linux/sonet.h ++++ b/include/linux/sonet.h +@@ -7,7 +7,7 @@ + #include <uapi/linux/sonet.h> + + struct k_sonet_stats { +-#define __HANDLE_ITEM(i) atomic_t i ++#define __HANDLE_ITEM(i) atomic_unchecked_t i + __SONET_ITEMS + #undef __HANDLE_ITEM + }; +diff --git a/include/linux/sunrpc/addr.h b/include/linux/sunrpc/addr.h +index 07d8e53..dc934c9 100644 +--- a/include/linux/sunrpc/addr.h ++++ b/include/linux/sunrpc/addr.h +@@ -23,9 +23,9 @@ static inline unsigned short rpc_get_port(const struct sockaddr *sap) + { + switch (sap->sa_family) { + case AF_INET: +- return ntohs(((struct sockaddr_in *)sap)->sin_port); ++ return ntohs(((const struct sockaddr_in *)sap)->sin_port); + case AF_INET6: +- return ntohs(((struct sockaddr_in6 *)sap)->sin6_port); ++ return ntohs(((const struct sockaddr_in6 *)sap)->sin6_port); + } + return 0; + } +@@ -58,7 +58,7 @@ static inline bool __rpc_cmp_addr4(const struct sockaddr *sap1, + static inline bool __rpc_copy_addr4(struct sockaddr *dst, + const struct sockaddr *src) + { +- const struct sockaddr_in *ssin = (struct sockaddr_in *) src; ++ const struct sockaddr_in *ssin = (const struct sockaddr_in *) src; + struct sockaddr_in *dsin = (struct sockaddr_in *) dst; + + dsin->sin_family = ssin->sin_family; +@@ -164,7 +164,7 @@ static inline u32 rpc_get_scope_id(const struct sockaddr *sa) + if (sa->sa_family != AF_INET6) + return 0; + +- return ((struct sockaddr_in6 *) sa)->sin6_scope_id; ++ return ((const struct sockaddr_in6 *) sa)->sin6_scope_id; + } + + #endif /* _LINUX_SUNRPC_ADDR_H */ +diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h +index 8af2804..c7414ef 100644 +--- a/include/linux/sunrpc/clnt.h ++++ b/include/linux/sunrpc/clnt.h +@@ -97,7 +97,7 @@ struct rpc_procinfo { + unsigned int p_timer; /* Which RTT timer to use */ + u32 p_statidx; /* Which procedure to account */ + const char * p_name; /* name of procedure */ +-}; ++} __do_const; + + #ifdef __KERNEL__ + +diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h +index 04e7632..2e2a8a3 100644 +--- a/include/linux/sunrpc/svc.h ++++ b/include/linux/sunrpc/svc.h +@@ -412,7 +412,7 @@ struct svc_procedure { + unsigned int pc_count; /* call count */ + unsigned int pc_cachetype; /* cache info (NFS) */ + unsigned int pc_xdrressize; /* maximum size of XDR reply */ +-}; ++} __do_const; + + /* + * Function prototypes. +diff --git a/include/linux/sunrpc/svc_rdma.h b/include/linux/sunrpc/svc_rdma.h +index 0b8e3e6..33e0a01 100644 +--- a/include/linux/sunrpc/svc_rdma.h ++++ b/include/linux/sunrpc/svc_rdma.h +@@ -53,15 +53,15 @@ extern unsigned int svcrdma_ord; + extern unsigned int svcrdma_max_requests; + extern unsigned int svcrdma_max_req_size; + +-extern atomic_t rdma_stat_recv; +-extern atomic_t rdma_stat_read; +-extern atomic_t rdma_stat_write; +-extern atomic_t rdma_stat_sq_starve; +-extern atomic_t rdma_stat_rq_starve; +-extern atomic_t rdma_stat_rq_poll; +-extern atomic_t rdma_stat_rq_prod; +-extern atomic_t rdma_stat_sq_poll; +-extern atomic_t rdma_stat_sq_prod; ++extern atomic_unchecked_t rdma_stat_recv; ++extern atomic_unchecked_t rdma_stat_read; ++extern atomic_unchecked_t rdma_stat_write; ++extern atomic_unchecked_t rdma_stat_sq_starve; ++extern atomic_unchecked_t rdma_stat_rq_starve; ++extern atomic_unchecked_t rdma_stat_rq_poll; ++extern atomic_unchecked_t rdma_stat_rq_prod; ++extern atomic_unchecked_t rdma_stat_sq_poll; ++extern atomic_unchecked_t rdma_stat_sq_prod; + + #define RPCRDMA_VERSION 1 + +diff --git a/include/linux/sunrpc/svcauth.h b/include/linux/sunrpc/svcauth.h +index 8d71d65..f79586e 100644 +--- a/include/linux/sunrpc/svcauth.h ++++ b/include/linux/sunrpc/svcauth.h +@@ -120,7 +120,7 @@ struct auth_ops { + int (*release)(struct svc_rqst *rq); + void (*domain_release)(struct auth_domain *); + int (*set_client)(struct svc_rqst *rq); +-}; ++} __do_const; + + #define SVC_GARBAGE 1 + #define SVC_SYSERR 2 +diff --git a/include/linux/swiotlb.h b/include/linux/swiotlb.h +index a5ffd32..0935dea 100644 +--- a/include/linux/swiotlb.h ++++ b/include/linux/swiotlb.h +@@ -60,7 +60,8 @@ extern void + + extern void + swiotlb_free_coherent(struct device *hwdev, size_t size, +- void *vaddr, dma_addr_t dma_handle); ++ void *vaddr, dma_addr_t dma_handle, ++ struct dma_attrs *attrs); + + extern dma_addr_t swiotlb_map_page(struct device *dev, struct page *page, + unsigned long offset, size_t size, +diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h +index a747a77..9e14df7 100644 +--- a/include/linux/syscalls.h ++++ b/include/linux/syscalls.h +@@ -98,8 +98,14 @@ struct sigaltstack; + #define __MAP(n,...) __MAP##n(__VA_ARGS__) + + #define __SC_DECL(t, a) t a ++#define __TYPE_IS_U(t) (__same_type((t)0, 0UL) || __same_type((t)0, 0U) || __same_type((t)0, (unsigned short)0) || __same_type((t)0, (unsigned char)0)) + #define __TYPE_IS_LL(t) (__same_type((t)0, 0LL) || __same_type((t)0, 0ULL)) +-#define __SC_LONG(t, a) __typeof(__builtin_choose_expr(__TYPE_IS_LL(t), 0LL, 0L)) a ++#define __SC_LONG(t, a) __typeof( \ ++ __builtin_choose_expr( \ ++ sizeof(t) > sizeof(int), \ ++ (t) 0, \ ++ __builtin_choose_expr(__TYPE_IS_U(t), 0UL, 0L) \ ++ )) a + #define __SC_CAST(t, a) (t) a + #define __SC_ARGS(t, a) a + #define __SC_TEST(t, a) (void)BUILD_BUG_ON_ZERO(!__TYPE_IS_LL(t) && sizeof(t) > sizeof(long)) +@@ -371,11 +377,11 @@ asmlinkage long sys_sync(void); + asmlinkage long sys_fsync(unsigned int fd); + asmlinkage long sys_fdatasync(unsigned int fd); + asmlinkage long sys_bdflush(int func, long data); +-asmlinkage long sys_mount(char __user *dev_name, char __user *dir_name, +- char __user *type, unsigned long flags, ++asmlinkage long sys_mount(const char __user *dev_name, const char __user *dir_name, ++ const char __user *type, unsigned long flags, + void __user *data); +-asmlinkage long sys_umount(char __user *name, int flags); +-asmlinkage long sys_oldumount(char __user *name); ++asmlinkage long sys_umount(const char __user *name, int flags); ++asmlinkage long sys_oldumount(const char __user *name); + asmlinkage long sys_truncate(const char __user *path, long length); + asmlinkage long sys_ftruncate(unsigned int fd, unsigned long length); + asmlinkage long sys_stat(const char __user *filename, +@@ -587,7 +593,7 @@ asmlinkage long sys_getsockname(int, struct sockaddr __user *, int __user *); + asmlinkage long sys_getpeername(int, struct sockaddr __user *, int __user *); + asmlinkage long sys_send(int, void __user *, size_t, unsigned); + asmlinkage long sys_sendto(int, void __user *, size_t, unsigned, +- struct sockaddr __user *, int); ++ struct sockaddr __user *, int) __intentional_overflow(0); + asmlinkage long sys_sendmsg(int fd, struct msghdr __user *msg, unsigned flags); + asmlinkage long sys_sendmmsg(int fd, struct mmsghdr __user *msg, + unsigned int vlen, unsigned flags); +diff --git a/include/linux/syscore_ops.h b/include/linux/syscore_ops.h +index 27b3b0b..e093dd9 100644 +--- a/include/linux/syscore_ops.h ++++ b/include/linux/syscore_ops.h +@@ -16,7 +16,7 @@ struct syscore_ops { + int (*suspend)(void); + void (*resume)(void); + void (*shutdown)(void); +-}; ++} __do_const; + + extern void register_syscore_ops(struct syscore_ops *ops); + extern void unregister_syscore_ops(struct syscore_ops *ops); +diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h +index 14a8ff2..fa95f3a 100644 +--- a/include/linux/sysctl.h ++++ b/include/linux/sysctl.h +@@ -34,13 +34,13 @@ struct ctl_table_root; + struct ctl_table_header; + struct ctl_dir; + +-typedef struct ctl_table ctl_table; +- + typedef int proc_handler (struct ctl_table *ctl, int write, + void __user *buffer, size_t *lenp, loff_t *ppos); + + extern int proc_dostring(struct ctl_table *, int, + void __user *, size_t *, loff_t *); ++extern int proc_dostring_modpriv(struct ctl_table *, int, ++ void __user *, size_t *, loff_t *); + extern int proc_dointvec(struct ctl_table *, int, + void __user *, size_t *, loff_t *); + extern int proc_dointvec_minmax(struct ctl_table *, int, +@@ -115,7 +115,9 @@ struct ctl_table + struct ctl_table_poll *poll; + void *extra1; + void *extra2; +-}; ++} __do_const __randomize_layout; ++typedef struct ctl_table __no_const ctl_table_no_const; ++typedef struct ctl_table ctl_table; + + struct ctl_node { + struct rb_node node; +diff --git a/include/linux/sysfs.h b/include/linux/sysfs.h +index 30b2ebe..37412ef 100644 +--- a/include/linux/sysfs.h ++++ b/include/linux/sysfs.h +@@ -34,7 +34,8 @@ struct attribute { + struct lock_class_key *key; + struct lock_class_key skey; + #endif +-}; ++} __do_const; ++typedef struct attribute __no_const attribute_no_const; + + /** + * sysfs_attr_init - initialize a dynamically allocated sysfs attribute +@@ -63,7 +64,8 @@ struct attribute_group { + struct attribute *, int); + struct attribute **attrs; + struct bin_attribute **bin_attrs; +-}; ++} __do_const; ++typedef struct attribute_group __no_const attribute_group_no_const; + + /** + * Use these macros to make defining attributes easier. See include/linux/device.h +@@ -127,7 +129,8 @@ struct bin_attribute { + char *, loff_t, size_t); + int (*mmap)(struct file *, struct kobject *, struct bin_attribute *attr, + struct vm_area_struct *vma); +-}; ++} __do_const; ++typedef struct bin_attribute __no_const bin_attribute_no_const; + + /** + * sysfs_bin_attr_init - initialize a dynamically allocated bin_attribute +diff --git a/include/linux/sysrq.h b/include/linux/sysrq.h +index 387fa7d..3fcde6b 100644 +--- a/include/linux/sysrq.h ++++ b/include/linux/sysrq.h +@@ -16,6 +16,7 @@ + + #include <linux/errno.h> + #include <linux/types.h> ++#include <linux/compiler.h> + + /* Possible values of bitmask for enabling sysrq functions */ + /* 0x0001 is reserved for enable everything */ +@@ -33,7 +34,7 @@ struct sysrq_key_op { + char *help_msg; + char *action_msg; + int enable_mask; +-}; ++} __do_const; + + #ifdef CONFIG_MAGIC_SYSRQ + +diff --git a/include/linux/thread_info.h b/include/linux/thread_info.h +index a629e4b..3fea3d9 100644 +--- a/include/linux/thread_info.h ++++ b/include/linux/thread_info.h +@@ -159,6 +159,13 @@ static inline bool test_and_clear_restore_sigmask(void) + #error "no set_restore_sigmask() provided and default one won't work" + #endif + ++extern void __check_object_size(const void *ptr, unsigned long n, bool to_user, bool const_size); ++ ++static inline void check_object_size(const void *ptr, unsigned long n, bool to_user) ++{ ++ __check_object_size(ptr, n, to_user, __builtin_constant_p(n)); ++} ++ + #endif /* __KERNEL__ */ + + #endif /* _LINUX_THREAD_INFO_H */ +diff --git a/include/linux/tty.h b/include/linux/tty.h +index b90b5c2..e23a512 100644 +--- a/include/linux/tty.h ++++ b/include/linux/tty.h +@@ -202,7 +202,7 @@ struct tty_port { + const struct tty_port_operations *ops; /* Port operations */ + spinlock_t lock; /* Lock protecting tty field */ + int blocked_open; /* Waiting to open */ +- int count; /* Usage count */ ++ atomic_t count; /* Usage count */ + wait_queue_head_t open_wait; /* Open waiters */ + wait_queue_head_t close_wait; /* Close waiters */ + wait_queue_head_t delta_msr_wait; /* Modem status change */ +@@ -284,7 +284,7 @@ struct tty_struct { + /* If the tty has a pending do_SAK, queue it here - akpm */ + struct work_struct SAK_work; + struct tty_port *port; +-}; ++} __randomize_layout; + + /* Each of a tty's open files has private_data pointing to tty_file_private */ + struct tty_file_private { +@@ -550,7 +550,7 @@ extern int tty_port_open(struct tty_port *port, + struct tty_struct *tty, struct file *filp); + static inline int tty_port_users(struct tty_port *port) + { +- return port->count + port->blocked_open; ++ return atomic_read(&port->count) + port->blocked_open; + } + + extern int tty_register_ldisc(int disc, struct tty_ldisc_ops *new_ldisc); +diff --git a/include/linux/tty_driver.h b/include/linux/tty_driver.h +index 756a609..89db85e 100644 +--- a/include/linux/tty_driver.h ++++ b/include/linux/tty_driver.h +@@ -285,7 +285,7 @@ struct tty_operations { + void (*poll_put_char)(struct tty_driver *driver, int line, char ch); + #endif + const struct file_operations *proc_fops; +-}; ++} __do_const __randomize_layout; + + struct tty_driver { + int magic; /* magic number for this structure */ +@@ -319,7 +319,7 @@ struct tty_driver { + + const struct tty_operations *ops; + struct list_head tty_drivers; +-}; ++} __randomize_layout; + + extern struct list_head tty_drivers; + +diff --git a/include/linux/tty_ldisc.h b/include/linux/tty_ldisc.h +index b8347c2..85d8b0f 100644 +--- a/include/linux/tty_ldisc.h ++++ b/include/linux/tty_ldisc.h +@@ -213,7 +213,7 @@ struct tty_ldisc_ops { + + struct module *owner; + +- int refcount; ++ atomic_t refcount; + }; + + struct tty_ldisc { +diff --git a/include/linux/types.h b/include/linux/types.h +index 4d118ba..c3ee9bf 100644 +--- a/include/linux/types.h ++++ b/include/linux/types.h +@@ -176,10 +176,26 @@ typedef struct { + int counter; + } atomic_t; + ++#ifdef CONFIG_PAX_REFCOUNT ++typedef struct { ++ int counter; ++} atomic_unchecked_t; ++#else ++typedef atomic_t atomic_unchecked_t; ++#endif ++ + #ifdef CONFIG_64BIT + typedef struct { + long counter; + } atomic64_t; ++ ++#ifdef CONFIG_PAX_REFCOUNT ++typedef struct { ++ long counter; ++} atomic64_unchecked_t; ++#else ++typedef atomic64_t atomic64_unchecked_t; ++#endif + #endif + + struct list_head { +diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h +index ecd3319..8a36ded 100644 +--- a/include/linux/uaccess.h ++++ b/include/linux/uaccess.h +@@ -75,11 +75,11 @@ static inline unsigned long __copy_from_user_nocache(void *to, + long ret; \ + mm_segment_t old_fs = get_fs(); \ + \ +- set_fs(KERNEL_DS); \ + pagefault_disable(); \ +- ret = __copy_from_user_inatomic(&(retval), (__force typeof(retval) __user *)(addr), sizeof(retval)); \ +- pagefault_enable(); \ ++ set_fs(KERNEL_DS); \ ++ ret = __copy_from_user_inatomic(&(retval), (typeof(retval) __force_user *)(addr), sizeof(retval)); \ + set_fs(old_fs); \ ++ pagefault_enable(); \ + ret; \ + }) + +diff --git a/include/linux/uidgid.h b/include/linux/uidgid.h +index 2d1f9b6..d7a9fce 100644 +--- a/include/linux/uidgid.h ++++ b/include/linux/uidgid.h +@@ -175,4 +175,9 @@ static inline bool kgid_has_mapping(struct user_namespace *ns, kgid_t gid) + + #endif /* CONFIG_USER_NS */ + ++#define GR_GLOBAL_UID(x) from_kuid_munged(&init_user_ns, (x)) ++#define GR_GLOBAL_GID(x) from_kgid_munged(&init_user_ns, (x)) ++#define gr_is_global_root(x) uid_eq((x), GLOBAL_ROOT_UID) ++#define gr_is_global_nonroot(x) (!uid_eq((x), GLOBAL_ROOT_UID)) ++ + #endif /* _LINUX_UIDGID_H */ +diff --git a/include/linux/unaligned/access_ok.h b/include/linux/unaligned/access_ok.h +index 99c1b4d..562e6f3 100644 +--- a/include/linux/unaligned/access_ok.h ++++ b/include/linux/unaligned/access_ok.h +@@ -4,34 +4,34 @@ + #include <linux/kernel.h> + #include <asm/byteorder.h> + +-static inline u16 get_unaligned_le16(const void *p) ++static inline u16 __intentional_overflow(-1) get_unaligned_le16(const void *p) + { +- return le16_to_cpup((__le16 *)p); ++ return le16_to_cpup((const __le16 *)p); + } + +-static inline u32 get_unaligned_le32(const void *p) ++static inline u32 __intentional_overflow(-1) get_unaligned_le32(const void *p) + { +- return le32_to_cpup((__le32 *)p); ++ return le32_to_cpup((const __le32 *)p); + } + +-static inline u64 get_unaligned_le64(const void *p) ++static inline u64 __intentional_overflow(-1) get_unaligned_le64(const void *p) + { +- return le64_to_cpup((__le64 *)p); ++ return le64_to_cpup((const __le64 *)p); + } + +-static inline u16 get_unaligned_be16(const void *p) ++static inline u16 __intentional_overflow(-1) get_unaligned_be16(const void *p) + { +- return be16_to_cpup((__be16 *)p); ++ return be16_to_cpup((const __be16 *)p); + } + +-static inline u32 get_unaligned_be32(const void *p) ++static inline u32 __intentional_overflow(-1) get_unaligned_be32(const void *p) + { +- return be32_to_cpup((__be32 *)p); ++ return be32_to_cpup((const __be32 *)p); + } + +-static inline u64 get_unaligned_be64(const void *p) ++static inline u64 __intentional_overflow(-1) get_unaligned_be64(const void *p) + { +- return be64_to_cpup((__be64 *)p); ++ return be64_to_cpup((const __be64 *)p); + } + + static inline void put_unaligned_le16(u16 val, void *p) +diff --git a/include/linux/usb.h b/include/linux/usb.h +index 7f6eb85..656e806 100644 +--- a/include/linux/usb.h ++++ b/include/linux/usb.h +@@ -563,7 +563,7 @@ struct usb_device { + int maxchild; + + u32 quirks; +- atomic_t urbnum; ++ atomic_unchecked_t urbnum; + + unsigned long active_duration; + +@@ -1642,7 +1642,7 @@ void usb_buffer_unmap_sg(const struct usb_device *dev, int is_in, + + extern int usb_control_msg(struct usb_device *dev, unsigned int pipe, + __u8 request, __u8 requesttype, __u16 value, __u16 index, +- void *data, __u16 size, int timeout); ++ void *data, __u16 size, int timeout) __intentional_overflow(-1); + extern int usb_interrupt_msg(struct usb_device *usb_dev, unsigned int pipe, + void *data, int len, int *actual_length, int timeout); + extern int usb_bulk_msg(struct usb_device *usb_dev, unsigned int pipe, +diff --git a/include/linux/usb/renesas_usbhs.h b/include/linux/usb/renesas_usbhs.h +index e452ba6..78f8e80 100644 +--- a/include/linux/usb/renesas_usbhs.h ++++ b/include/linux/usb/renesas_usbhs.h +@@ -39,7 +39,7 @@ enum { + */ + struct renesas_usbhs_driver_callback { + int (*notify_hotplug)(struct platform_device *pdev); +-}; ++} __no_const; + + /* + * callback functions for platform +diff --git a/include/linux/user_namespace.h b/include/linux/user_namespace.h +index 4836ba3..603f6ee 100644 +--- a/include/linux/user_namespace.h ++++ b/include/linux/user_namespace.h +@@ -33,7 +33,7 @@ struct user_namespace { + struct key *persistent_keyring_register; + struct rw_semaphore persistent_keyring_register_sem; + #endif +-}; ++} __randomize_layout; + + extern struct user_namespace init_user_ns; + +diff --git a/include/linux/utsname.h b/include/linux/utsname.h +index 239e277..22a5cf5 100644 +--- a/include/linux/utsname.h ++++ b/include/linux/utsname.h +@@ -24,7 +24,7 @@ struct uts_namespace { + struct new_utsname name; + struct user_namespace *user_ns; + unsigned int proc_inum; +-}; ++} __randomize_layout; + extern struct uts_namespace init_uts_ns; + + #ifdef CONFIG_UTS_NS +diff --git a/include/linux/vermagic.h b/include/linux/vermagic.h +index 6f8fbcf..4efc177 100644 +--- a/include/linux/vermagic.h ++++ b/include/linux/vermagic.h +@@ -25,9 +25,42 @@ + #define MODULE_ARCH_VERMAGIC "" + #endif + ++#ifdef CONFIG_PAX_REFCOUNT ++#define MODULE_PAX_REFCOUNT "REFCOUNT " ++#else ++#define MODULE_PAX_REFCOUNT "" ++#endif ++ ++#ifdef CONSTIFY_PLUGIN ++#define MODULE_CONSTIFY_PLUGIN "CONSTIFY_PLUGIN " ++#else ++#define MODULE_CONSTIFY_PLUGIN "" ++#endif ++ ++#ifdef STACKLEAK_PLUGIN ++#define MODULE_STACKLEAK_PLUGIN "STACKLEAK_PLUGIN " ++#else ++#define MODULE_STACKLEAK_PLUGIN "" ++#endif ++ ++#ifdef RANDSTRUCT_PLUGIN ++#include <generated/randomize_layout_hash.h> ++#define MODULE_RANDSTRUCT_PLUGIN "RANDSTRUCT_PLUGIN_" RANDSTRUCT_HASHED_SEED ++#else ++#define MODULE_RANDSTRUCT_PLUGIN ++#endif ++ ++#ifdef CONFIG_GRKERNSEC ++#define MODULE_GRSEC "GRSEC " ++#else ++#define MODULE_GRSEC "" ++#endif ++ + #define VERMAGIC_STRING \ + UTS_RELEASE " " \ + MODULE_VERMAGIC_SMP MODULE_VERMAGIC_PREEMPT \ + MODULE_VERMAGIC_MODULE_UNLOAD MODULE_VERMAGIC_MODVERSIONS \ +- MODULE_ARCH_VERMAGIC ++ MODULE_ARCH_VERMAGIC \ ++ MODULE_PAX_REFCOUNT MODULE_CONSTIFY_PLUGIN MODULE_STACKLEAK_PLUGIN \ ++ MODULE_GRSEC MODULE_RANDSTRUCT_PLUGIN + +diff --git a/include/linux/vga_switcheroo.h b/include/linux/vga_switcheroo.h +index 502073a..a7de024 100644 +--- a/include/linux/vga_switcheroo.h ++++ b/include/linux/vga_switcheroo.h +@@ -63,8 +63,8 @@ int vga_switcheroo_get_client_state(struct pci_dev *dev); + + void vga_switcheroo_set_dynamic_switch(struct pci_dev *pdev, enum vga_switcheroo_state dynamic); + +-int vga_switcheroo_init_domain_pm_ops(struct device *dev, struct dev_pm_domain *domain); +-int vga_switcheroo_init_domain_pm_optimus_hdmi_audio(struct device *dev, struct dev_pm_domain *domain); ++int vga_switcheroo_init_domain_pm_ops(struct device *dev, dev_pm_domain_no_const *domain); ++int vga_switcheroo_init_domain_pm_optimus_hdmi_audio(struct device *dev, dev_pm_domain_no_const *domain); + #else + + static inline void vga_switcheroo_unregister_client(struct pci_dev *dev) {} +@@ -81,8 +81,8 @@ static inline int vga_switcheroo_get_client_state(struct pci_dev *dev) { return + + static inline void vga_switcheroo_set_dynamic_switch(struct pci_dev *pdev, enum vga_switcheroo_state dynamic) {} + +-static inline int vga_switcheroo_init_domain_pm_ops(struct device *dev, struct dev_pm_domain *domain) { return -EINVAL; } +-static inline int vga_switcheroo_init_domain_pm_optimus_hdmi_audio(struct device *dev, struct dev_pm_domain *domain) { return -EINVAL; } ++static inline int vga_switcheroo_init_domain_pm_ops(struct device *dev, dev_pm_domain_no_const *domain) { return -EINVAL; } ++static inline int vga_switcheroo_init_domain_pm_optimus_hdmi_audio(struct device *dev, dev_pm_domain_no_const *domain) { return -EINVAL; } + + #endif + #endif /* _LINUX_VGA_SWITCHEROO_H_ */ +diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h +index 4b8a891..e9a2863 100644 +--- a/include/linux/vmalloc.h ++++ b/include/linux/vmalloc.h +@@ -16,6 +16,11 @@ struct vm_area_struct; /* vma defining user mapping in mm_types.h */ + #define VM_USERMAP 0x00000008 /* suitable for remap_vmalloc_range */ + #define VM_VPAGES 0x00000010 /* buffer for pages was vmalloc'ed */ + #define VM_UNINITIALIZED 0x00000020 /* vm_struct is not fully initialized */ ++ ++#if defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC) ++#define VM_KERNEXEC 0x00000040 /* allocate from executable kernel memory range */ ++#endif ++ + /* bits [20..32] reserved for arch specific ioremap internals */ + + /* +@@ -82,6 +87,10 @@ extern void *vmap(struct page **pages, unsigned int count, + unsigned long flags, pgprot_t prot); + extern void vunmap(const void *addr); + ++#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW ++extern void unmap_process_stacks(struct task_struct *task); ++#endif ++ + extern int remap_vmalloc_range_partial(struct vm_area_struct *vma, + unsigned long uaddr, void *kaddr, + unsigned long size); +@@ -142,7 +151,7 @@ extern void free_vm_area(struct vm_struct *area); + + /* for /dev/kmem */ + extern long vread(char *buf, char *addr, unsigned long count); +-extern long vwrite(char *buf, char *addr, unsigned long count); ++extern long vwrite(char *buf, char *addr, unsigned long count) __size_overflow(3); + + /* + * Internals. Dont't use.. +diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h +index 67ce70c..d540954 100644 +--- a/include/linux/vmstat.h ++++ b/include/linux/vmstat.h +@@ -98,18 +98,18 @@ static inline void vm_events_fold_cpu(int cpu) + /* + * Zone based page accounting with per cpu differentials. + */ +-extern atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS]; ++extern atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS]; + + static inline void zone_page_state_add(long x, struct zone *zone, + enum zone_stat_item item) + { +- atomic_long_add(x, &zone->vm_stat[item]); +- atomic_long_add(x, &vm_stat[item]); ++ atomic_long_add_unchecked(x, &zone->vm_stat[item]); ++ atomic_long_add_unchecked(x, &vm_stat[item]); + } + +-static inline unsigned long global_page_state(enum zone_stat_item item) ++static inline unsigned long __intentional_overflow(-1) global_page_state(enum zone_stat_item item) + { +- long x = atomic_long_read(&vm_stat[item]); ++ long x = atomic_long_read_unchecked(&vm_stat[item]); + #ifdef CONFIG_SMP + if (x < 0) + x = 0; +@@ -117,10 +117,10 @@ static inline unsigned long global_page_state(enum zone_stat_item item) + return x; + } + +-static inline unsigned long zone_page_state(struct zone *zone, ++static inline unsigned long __intentional_overflow(-1) zone_page_state(struct zone *zone, + enum zone_stat_item item) + { +- long x = atomic_long_read(&zone->vm_stat[item]); ++ long x = atomic_long_read_unchecked(&zone->vm_stat[item]); + #ifdef CONFIG_SMP + if (x < 0) + x = 0; +@@ -137,7 +137,7 @@ static inline unsigned long zone_page_state(struct zone *zone, + static inline unsigned long zone_page_state_snapshot(struct zone *zone, + enum zone_stat_item item) + { +- long x = atomic_long_read(&zone->vm_stat[item]); ++ long x = atomic_long_read_unchecked(&zone->vm_stat[item]); + + #ifdef CONFIG_SMP + int cpu; +@@ -226,8 +226,8 @@ static inline void __mod_zone_page_state(struct zone *zone, + + static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item) + { +- atomic_long_inc(&zone->vm_stat[item]); +- atomic_long_inc(&vm_stat[item]); ++ atomic_long_inc_unchecked(&zone->vm_stat[item]); ++ atomic_long_inc_unchecked(&vm_stat[item]); + } + + static inline void __inc_zone_page_state(struct page *page, +@@ -238,8 +238,8 @@ static inline void __inc_zone_page_state(struct page *page, + + static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item) + { +- atomic_long_dec(&zone->vm_stat[item]); +- atomic_long_dec(&vm_stat[item]); ++ atomic_long_dec_unchecked(&zone->vm_stat[item]); ++ atomic_long_dec_unchecked(&vm_stat[item]); + } + + static inline void __dec_zone_page_state(struct page *page, +diff --git a/include/linux/xattr.h b/include/linux/xattr.h +index 91b0a68..0e9adf6 100644 +--- a/include/linux/xattr.h ++++ b/include/linux/xattr.h +@@ -28,7 +28,7 @@ struct xattr_handler { + size_t size, int handler_flags); + int (*set)(struct dentry *dentry, const char *name, const void *buffer, + size_t size, int flags, int handler_flags); +-}; ++} __do_const; + + struct xattr { + const char *name; +@@ -37,6 +37,9 @@ struct xattr { + }; + + ssize_t xattr_getsecurity(struct inode *, const char *, void *, size_t); ++#ifdef CONFIG_PAX_XATTR_PAX_FLAGS ++ssize_t pax_getxattr(struct dentry *, void *, size_t); ++#endif + ssize_t vfs_getxattr(struct dentry *, const char *, void *, size_t); + ssize_t vfs_listxattr(struct dentry *d, char *list, size_t size); + int __vfs_setxattr_noperm(struct dentry *, const char *, const void *, size_t, int); +diff --git a/include/linux/zlib.h b/include/linux/zlib.h +index 9c5a6b4..09c9438 100644 +--- a/include/linux/zlib.h ++++ b/include/linux/zlib.h +@@ -31,6 +31,7 @@ + #define _ZLIB_H + + #include <linux/zconf.h> ++#include <linux/compiler.h> + + /* zlib deflate based on ZLIB_VERSION "1.1.3" */ + /* zlib inflate based on ZLIB_VERSION "1.2.3" */ +@@ -179,7 +180,7 @@ typedef z_stream *z_streamp; + + /* basic functions */ + +-extern int zlib_deflate_workspacesize (int windowBits, int memLevel); ++extern int zlib_deflate_workspacesize (int windowBits, int memLevel) __intentional_overflow(0); + /* + Returns the number of bytes that needs to be allocated for a per- + stream workspace with the specified parameters. A pointer to this +diff --git a/include/media/v4l2-dev.h b/include/media/v4l2-dev.h +index c768c9f..bdcaa5a 100644 +--- a/include/media/v4l2-dev.h ++++ b/include/media/v4l2-dev.h +@@ -76,7 +76,7 @@ struct v4l2_file_operations { + int (*mmap) (struct file *, struct vm_area_struct *); + int (*open) (struct file *); + int (*release) (struct file *); +-}; ++} __do_const; + + /* + * Newer version of video_device, handled by videodev2.c +diff --git a/include/media/v4l2-device.h b/include/media/v4l2-device.h +index c9b1593..a572459 100644 +--- a/include/media/v4l2-device.h ++++ b/include/media/v4l2-device.h +@@ -95,7 +95,7 @@ int __must_check v4l2_device_register(struct device *dev, struct v4l2_device *v4 + this function returns 0. If the name ends with a digit (e.g. cx18), + then the name will be set to cx18-0 since cx180 looks really odd. */ + int v4l2_device_set_name(struct v4l2_device *v4l2_dev, const char *basename, +- atomic_t *instance); ++ atomic_unchecked_t *instance); + + /* Set v4l2_dev->dev to NULL. Call when the USB parent disconnects. + Since the parent disappears this ensures that v4l2_dev doesn't have an +diff --git a/include/net/9p/transport.h b/include/net/9p/transport.h +index 9a36d92..0aafe2a 100644 +--- a/include/net/9p/transport.h ++++ b/include/net/9p/transport.h +@@ -60,7 +60,7 @@ struct p9_trans_module { + int (*cancel) (struct p9_client *, struct p9_req_t *req); + int (*zc_request)(struct p9_client *, struct p9_req_t *, + char *, char *, int , int, int, int); +-}; ++} __do_const; + + void v9fs_register_trans(struct p9_trans_module *m); + void v9fs_unregister_trans(struct p9_trans_module *m); +diff --git a/include/net/af_unix.h b/include/net/af_unix.h +index a175ba4..196eb82 100644 +--- a/include/net/af_unix.h ++++ b/include/net/af_unix.h +@@ -36,7 +36,7 @@ struct unix_skb_parms { + u32 secid; /* Security ID */ + #endif + u32 consumed; +-}; ++} __randomize_layout; + + #define UNIXCB(skb) (*(struct unix_skb_parms *)&((skb)->cb)) + #define UNIXSID(skb) (&UNIXCB((skb)).secid) +diff --git a/include/net/bluetooth/l2cap.h b/include/net/bluetooth/l2cap.h +index dbc4a89..4a59b5d 100644 +--- a/include/net/bluetooth/l2cap.h ++++ b/include/net/bluetooth/l2cap.h +@@ -600,7 +600,7 @@ struct l2cap_ops { + long (*get_sndtimeo) (struct l2cap_chan *chan); + struct sk_buff *(*alloc_skb) (struct l2cap_chan *chan, + unsigned long len, int nb); +-}; ++} __do_const; + + struct l2cap_conn { + struct hci_conn *hcon; +diff --git a/include/net/caif/cfctrl.h b/include/net/caif/cfctrl.h +index f2ae33d..c457cf0 100644 +--- a/include/net/caif/cfctrl.h ++++ b/include/net/caif/cfctrl.h +@@ -52,7 +52,7 @@ struct cfctrl_rsp { + void (*radioset_rsp)(void); + void (*reject_rsp)(struct cflayer *layer, u8 linkid, + struct cflayer *client_layer); +-}; ++} __no_const; + + /* Link Setup Parameters for CAIF-Links. */ + struct cfctrl_link_param { +@@ -101,8 +101,8 @@ struct cfctrl_request_info { + struct cfctrl { + struct cfsrvl serv; + struct cfctrl_rsp res; +- atomic_t req_seq_no; +- atomic_t rsp_seq_no; ++ atomic_unchecked_t req_seq_no; ++ atomic_unchecked_t rsp_seq_no; + struct list_head list; + /* Protects from simultaneous access to first_req list */ + spinlock_t info_list_lock; +diff --git a/include/net/flow.h b/include/net/flow.h +index d23e7fa..e188307 100644 +--- a/include/net/flow.h ++++ b/include/net/flow.h +@@ -221,6 +221,6 @@ struct flow_cache_object *flow_cache_lookup(struct net *net, + + void flow_cache_flush(void); + void flow_cache_flush_deferred(void); +-extern atomic_t flow_cache_genid; ++extern atomic_unchecked_t flow_cache_genid; + + #endif +diff --git a/include/net/genetlink.h b/include/net/genetlink.h +index 93695f0..766d71c 100644 +--- a/include/net/genetlink.h ++++ b/include/net/genetlink.h +@@ -120,7 +120,7 @@ struct genl_ops { + u8 cmd; + u8 internal_flags; + u8 flags; +-}; ++} __do_const; + + int __genl_register_family(struct genl_family *family); + +diff --git a/include/net/gro_cells.h b/include/net/gro_cells.h +index 734d9b5..48a9a4b 100644 +--- a/include/net/gro_cells.h ++++ b/include/net/gro_cells.h +@@ -29,7 +29,7 @@ static inline void gro_cells_receive(struct gro_cells *gcells, struct sk_buff *s + cell += skb_get_rx_queue(skb) & gcells->gro_cells_mask; + + if (skb_queue_len(&cell->napi_skbs) > netdev_max_backlog) { +- atomic_long_inc(&dev->rx_dropped); ++ atomic_long_inc_unchecked(&dev->rx_dropped); + kfree_skb(skb); + return; + } +diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h +index c55aeed..b3393f4 100644 +--- a/include/net/inet_connection_sock.h ++++ b/include/net/inet_connection_sock.h +@@ -62,7 +62,7 @@ struct inet_connection_sock_af_ops { + void (*addr2sockaddr)(struct sock *sk, struct sockaddr *); + int (*bind_conflict)(const struct sock *sk, + const struct inet_bind_bucket *tb, bool relax); +-}; ++} __do_const; + + /** inet_connection_sock - INET connection oriented sock + * +diff --git a/include/net/inetpeer.h b/include/net/inetpeer.h +index 823ec7b..44c938c 100644 +--- a/include/net/inetpeer.h ++++ b/include/net/inetpeer.h +@@ -47,7 +47,7 @@ struct inet_peer { + */ + union { + struct { +- atomic_t rid; /* Frag reception counter */ ++ atomic_unchecked_t rid; /* Frag reception counter */ + }; + struct rcu_head rcu; + struct inet_peer *gc_next; +diff --git a/include/net/ip.h b/include/net/ip.h +index 937f196..7251808 100644 +--- a/include/net/ip.h ++++ b/include/net/ip.h +@@ -214,7 +214,7 @@ static inline void snmp_mib_free(void __percpu *ptr[SNMP_ARRAY_SZ]) + + void inet_get_local_port_range(struct net *net, int *low, int *high); + +-extern unsigned long *sysctl_local_reserved_ports; ++extern unsigned long sysctl_local_reserved_ports[65536 / 8 / sizeof(unsigned long)]; + static inline int inet_is_reserved_local_port(int port) + { + return test_bit(port, sysctl_local_reserved_ports); +@@ -297,7 +297,7 @@ static inline unsigned int ip_skb_dst_mtu(const struct sk_buff *skb) + } + } + +-u32 ip_idents_reserve(u32 hash, int segs); ++u32 ip_idents_reserve(u32 hash, int segs) __intentional_overflow(-1); + void __ip_select_ident(struct iphdr *iph, int segs); + + static inline void ip_select_ident_segs(struct sk_buff *skb, struct sock *sk, int segs) +diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h +index 9922093..a1755d6 100644 +--- a/include/net/ip_fib.h ++++ b/include/net/ip_fib.h +@@ -169,7 +169,7 @@ __be32 fib_info_update_nh_saddr(struct net *net, struct fib_nh *nh); + + #define FIB_RES_SADDR(net, res) \ + ((FIB_RES_NH(res).nh_saddr_genid == \ +- atomic_read(&(net)->ipv4.dev_addr_genid)) ? \ ++ atomic_read_unchecked(&(net)->ipv4.dev_addr_genid)) ? \ + FIB_RES_NH(res).nh_saddr : \ + fib_info_update_nh_saddr((net), &FIB_RES_NH(res))) + #define FIB_RES_GW(res) (FIB_RES_NH(res).nh_gw) +diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h +index 5679d92..2e7a690 100644 +--- a/include/net/ip_vs.h ++++ b/include/net/ip_vs.h +@@ -558,7 +558,7 @@ struct ip_vs_conn { + struct ip_vs_conn *control; /* Master control connection */ + atomic_t n_control; /* Number of controlled ones */ + struct ip_vs_dest *dest; /* real server */ +- atomic_t in_pkts; /* incoming packet counter */ ++ atomic_unchecked_t in_pkts; /* incoming packet counter */ + + /* packet transmitter for different forwarding methods. If it + mangles the packet, it must return NF_DROP or better NF_STOLEN, +@@ -705,7 +705,7 @@ struct ip_vs_dest { + __be16 port; /* port number of the server */ + union nf_inet_addr addr; /* IP address of the server */ + volatile unsigned int flags; /* dest status flags */ +- atomic_t conn_flags; /* flags to copy to conn */ ++ atomic_unchecked_t conn_flags; /* flags to copy to conn */ + atomic_t weight; /* server weight */ + + atomic_t refcnt; /* reference counter */ +@@ -960,11 +960,11 @@ struct netns_ipvs { + /* ip_vs_lblc */ + int sysctl_lblc_expiration; + struct ctl_table_header *lblc_ctl_header; +- struct ctl_table *lblc_ctl_table; ++ ctl_table_no_const *lblc_ctl_table; + /* ip_vs_lblcr */ + int sysctl_lblcr_expiration; + struct ctl_table_header *lblcr_ctl_header; +- struct ctl_table *lblcr_ctl_table; ++ ctl_table_no_const *lblcr_ctl_table; + /* ip_vs_est */ + struct list_head est_list; /* estimator list */ + spinlock_t est_lock; +diff --git a/include/net/irda/ircomm_tty.h b/include/net/irda/ircomm_tty.h +index 8d4f588..2e37ad2 100644 +--- a/include/net/irda/ircomm_tty.h ++++ b/include/net/irda/ircomm_tty.h +@@ -33,6 +33,7 @@ + #include <linux/termios.h> + #include <linux/timer.h> + #include <linux/tty.h> /* struct tty_struct */ ++#include <asm/local.h> + + #include <net/irda/irias_object.h> + #include <net/irda/ircomm_core.h> +diff --git a/include/net/iucv/af_iucv.h b/include/net/iucv/af_iucv.h +index 714cc9a..ea05f3e 100644 +--- a/include/net/iucv/af_iucv.h ++++ b/include/net/iucv/af_iucv.h +@@ -149,7 +149,7 @@ struct iucv_skb_cb { + struct iucv_sock_list { + struct hlist_head head; + rwlock_t lock; +- atomic_t autobind_name; ++ atomic_unchecked_t autobind_name; + }; + + unsigned int iucv_sock_poll(struct file *file, struct socket *sock, +diff --git a/include/net/llc_c_ac.h b/include/net/llc_c_ac.h +index f3be818..bf46196 100644 +--- a/include/net/llc_c_ac.h ++++ b/include/net/llc_c_ac.h +@@ -87,7 +87,7 @@ + #define LLC_CONN_AC_STOP_SENDACK_TMR 70 + #define LLC_CONN_AC_START_SENDACK_TMR_IF_NOT_RUNNING 71 + +-typedef int (*llc_conn_action_t)(struct sock *sk, struct sk_buff *skb); ++typedef int (* const llc_conn_action_t)(struct sock *sk, struct sk_buff *skb); + + int llc_conn_ac_clear_remote_busy(struct sock *sk, struct sk_buff *skb); + int llc_conn_ac_conn_ind(struct sock *sk, struct sk_buff *skb); +diff --git a/include/net/llc_c_ev.h b/include/net/llc_c_ev.h +index 3948cf1..83b28c4 100644 +--- a/include/net/llc_c_ev.h ++++ b/include/net/llc_c_ev.h +@@ -125,8 +125,8 @@ static __inline__ struct llc_conn_state_ev *llc_conn_ev(struct sk_buff *skb) + return (struct llc_conn_state_ev *)skb->cb; + } + +-typedef int (*llc_conn_ev_t)(struct sock *sk, struct sk_buff *skb); +-typedef int (*llc_conn_ev_qfyr_t)(struct sock *sk, struct sk_buff *skb); ++typedef int (* const llc_conn_ev_t)(struct sock *sk, struct sk_buff *skb); ++typedef int (* const llc_conn_ev_qfyr_t)(struct sock *sk, struct sk_buff *skb); + + int llc_conn_ev_conn_req(struct sock *sk, struct sk_buff *skb); + int llc_conn_ev_data_req(struct sock *sk, struct sk_buff *skb); +diff --git a/include/net/llc_c_st.h b/include/net/llc_c_st.h +index 0e79cfb..f46db31 100644 +--- a/include/net/llc_c_st.h ++++ b/include/net/llc_c_st.h +@@ -37,7 +37,7 @@ struct llc_conn_state_trans { + u8 next_state; + llc_conn_ev_qfyr_t *ev_qualifiers; + llc_conn_action_t *ev_actions; +-}; ++} __do_const; + + struct llc_conn_state { + u8 current_state; +diff --git a/include/net/llc_s_ac.h b/include/net/llc_s_ac.h +index a61b98c..aade1eb 100644 +--- a/include/net/llc_s_ac.h ++++ b/include/net/llc_s_ac.h +@@ -23,7 +23,7 @@ + #define SAP_ACT_TEST_IND 9 + + /* All action functions must look like this */ +-typedef int (*llc_sap_action_t)(struct llc_sap *sap, struct sk_buff *skb); ++typedef int (* const llc_sap_action_t)(struct llc_sap *sap, struct sk_buff *skb); + + int llc_sap_action_unitdata_ind(struct llc_sap *sap, struct sk_buff *skb); + int llc_sap_action_send_ui(struct llc_sap *sap, struct sk_buff *skb); +diff --git a/include/net/llc_s_st.h b/include/net/llc_s_st.h +index 567c681..cd73ac02 100644 +--- a/include/net/llc_s_st.h ++++ b/include/net/llc_s_st.h +@@ -20,7 +20,7 @@ struct llc_sap_state_trans { + llc_sap_ev_t ev; + u8 next_state; + llc_sap_action_t *ev_actions; +-}; ++} __do_const; + + struct llc_sap_state { + u8 curr_state; +diff --git a/include/net/mac80211.h b/include/net/mac80211.h +index f4ab2fb..71a85ba 100644 +--- a/include/net/mac80211.h ++++ b/include/net/mac80211.h +@@ -4476,7 +4476,7 @@ struct rate_control_ops { + void (*add_sta_debugfs)(void *priv, void *priv_sta, + struct dentry *dir); + void (*remove_sta_debugfs)(void *priv, void *priv_sta); +-}; ++} __do_const; + + static inline int rate_supported(struct ieee80211_sta *sta, + enum ieee80211_band band, +diff --git a/include/net/neighbour.h b/include/net/neighbour.h +index 7277caf..fd095bc 100644 +--- a/include/net/neighbour.h ++++ b/include/net/neighbour.h +@@ -163,7 +163,7 @@ struct neigh_ops { + void (*error_report)(struct neighbour *, struct sk_buff *); + int (*output)(struct neighbour *, struct sk_buff *); + int (*connected_output)(struct neighbour *, struct sk_buff *); +-}; ++} __do_const; + + struct pneigh_entry { + struct pneigh_entry *next; +@@ -203,7 +203,6 @@ struct neigh_table { + void (*proxy_redo)(struct sk_buff *skb); + char *id; + struct neigh_parms parms; +- /* HACK. gc_* should follow parms without a gap! */ + int gc_interval; + int gc_thresh1; + int gc_thresh2; +@@ -218,7 +217,7 @@ struct neigh_table { + struct neigh_statistics __percpu *stats; + struct neigh_hash_table __rcu *nht; + struct pneigh_entry **phash_buckets; +-}; ++} __randomize_layout; + + static inline int neigh_parms_family(struct neigh_parms *p) + { +diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h +index 991dcd9..ab58d00 100644 +--- a/include/net/net_namespace.h ++++ b/include/net/net_namespace.h +@@ -124,8 +124,8 @@ struct net { + struct netns_ipvs *ipvs; + #endif + struct sock *diag_nlsk; +- atomic_t fnhe_genid; +-}; ++ atomic_unchecked_t fnhe_genid; ++} __randomize_layout; + + /* + * ifindex generation is per-net namespace, and loopback is +@@ -289,7 +289,11 @@ static inline struct net *read_pnet(struct net * const *pnet) + #define __net_init __init + #define __net_exit __exit_refok + #define __net_initdata __initdata ++#ifdef CONSTIFY_PLUGIN + #define __net_initconst __initconst ++#else ++#define __net_initconst __initdata ++#endif + #endif + + struct pernet_operations { +@@ -299,7 +303,7 @@ struct pernet_operations { + void (*exit_batch)(struct list_head *net_exit_list); + int *id; + size_t size; +-}; ++} __do_const; + + /* + * Use these carefully. If you implement a network device and it +@@ -347,23 +351,23 @@ static inline void unregister_net_sysctl_table(struct ctl_table_header *header) + + static inline int rt_genid_ipv4(struct net *net) + { +- return atomic_read(&net->ipv4.rt_genid); ++ return atomic_read_unchecked(&net->ipv4.rt_genid); + } + + static inline void rt_genid_bump_ipv4(struct net *net) + { +- atomic_inc(&net->ipv4.rt_genid); ++ atomic_inc_unchecked(&net->ipv4.rt_genid); + } + + #if IS_ENABLED(CONFIG_IPV6) + static inline int rt_genid_ipv6(struct net *net) + { +- return atomic_read(&net->ipv6.rt_genid); ++ return atomic_read_unchecked(&net->ipv6.rt_genid); + } + + static inline void rt_genid_bump_ipv6(struct net *net) + { +- atomic_inc(&net->ipv6.rt_genid); ++ atomic_inc_unchecked(&net->ipv6.rt_genid); + } + #else + static inline int rt_genid_ipv6(struct net *net) +@@ -385,12 +389,12 @@ static inline void rt_genid_bump_all(struct net *net) + + static inline int fnhe_genid(struct net *net) + { +- return atomic_read(&net->fnhe_genid); ++ return atomic_read_unchecked(&net->fnhe_genid); + } + + static inline void fnhe_genid_bump(struct net *net) + { +- atomic_inc(&net->fnhe_genid); ++ atomic_inc_unchecked(&net->fnhe_genid); + } + + #endif /* __NET_NET_NAMESPACE_H */ +diff --git a/include/net/netdma.h b/include/net/netdma.h +index 8ba8ce2..99b7fff 100644 +--- a/include/net/netdma.h ++++ b/include/net/netdma.h +@@ -24,7 +24,7 @@ + #include <linux/dmaengine.h> + #include <linux/skbuff.h> + +-int dma_skb_copy_datagram_iovec(struct dma_chan* chan, ++int __intentional_overflow(3,5) dma_skb_copy_datagram_iovec(struct dma_chan* chan, + struct sk_buff *skb, int offset, struct iovec *to, + size_t len, struct dma_pinned_list *pinned_list); + +diff --git a/include/net/netlink.h b/include/net/netlink.h +index 2b47eaa..6d5bcc2 100644 +--- a/include/net/netlink.h ++++ b/include/net/netlink.h +@@ -521,7 +521,7 @@ static inline void *nlmsg_get_pos(struct sk_buff *skb) + static inline void nlmsg_trim(struct sk_buff *skb, const void *mark) + { + if (mark) +- skb_trim(skb, (unsigned char *) mark - skb->data); ++ skb_trim(skb, (const unsigned char *) mark - skb->data); + } + + /** +diff --git a/include/net/netns/conntrack.h b/include/net/netns/conntrack.h +index fbcc7fa..03c7e51 100644 +--- a/include/net/netns/conntrack.h ++++ b/include/net/netns/conntrack.h +@@ -12,10 +12,10 @@ struct nf_conntrack_ecache; + struct nf_proto_net { + #ifdef CONFIG_SYSCTL + struct ctl_table_header *ctl_table_header; +- struct ctl_table *ctl_table; ++ ctl_table_no_const *ctl_table; + #ifdef CONFIG_NF_CONNTRACK_PROC_COMPAT + struct ctl_table_header *ctl_compat_header; +- struct ctl_table *ctl_compat_table; ++ ctl_table_no_const *ctl_compat_table; + #endif + #endif + unsigned int users; +@@ -58,7 +58,7 @@ struct nf_ip_net { + struct nf_icmp_net icmpv6; + #if defined(CONFIG_SYSCTL) && defined(CONFIG_NF_CONNTRACK_PROC_COMPAT) + struct ctl_table_header *ctl_table_header; +- struct ctl_table *ctl_table; ++ ctl_table_no_const *ctl_table; + #endif + }; + +diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h +index 80f500a..f0c23c2 100644 +--- a/include/net/netns/ipv4.h ++++ b/include/net/netns/ipv4.h +@@ -74,7 +74,7 @@ struct netns_ipv4 { + + kgid_t sysctl_ping_group_range[2]; + +- atomic_t dev_addr_genid; ++ atomic_unchecked_t dev_addr_genid; + + #ifdef CONFIG_IP_MROUTE + #ifndef CONFIG_IP_MROUTE_MULTIPLE_TABLES +@@ -84,6 +84,6 @@ struct netns_ipv4 { + struct fib_rules_ops *mr_rules_ops; + #endif + #endif +- atomic_t rt_genid; ++ atomic_unchecked_t rt_genid; + }; + #endif +diff --git a/include/net/netns/ipv6.h b/include/net/netns/ipv6.h +index 21edaf1..4c5faae 100644 +--- a/include/net/netns/ipv6.h ++++ b/include/net/netns/ipv6.h +@@ -73,8 +73,8 @@ struct netns_ipv6 { + struct fib_rules_ops *mr6_rules_ops; + #endif + #endif +- atomic_t dev_addr_genid; +- atomic_t rt_genid; ++ atomic_unchecked_t dev_addr_genid; ++ atomic_unchecked_t rt_genid; + }; + + #if IS_ENABLED(CONFIG_NF_DEFRAG_IPV6) +diff --git a/include/net/ping.h b/include/net/ping.h +index 026479b..d9b2829 100644 +--- a/include/net/ping.h ++++ b/include/net/ping.h +@@ -54,7 +54,7 @@ struct ping_iter_state { + + extern struct proto ping_prot; + #if IS_ENABLED(CONFIG_IPV6) +-extern struct pingv6_ops pingv6_ops; ++extern struct pingv6_ops *pingv6_ops; + #endif + + struct pingfakehdr { +diff --git a/include/net/protocol.h b/include/net/protocol.h +index a7e986b..dc67bce 100644 +--- a/include/net/protocol.h ++++ b/include/net/protocol.h +@@ -49,7 +49,7 @@ struct net_protocol { + * socket lookup? + */ + icmp_strict_tag_validation:1; +-}; ++} __do_const; + + #if IS_ENABLED(CONFIG_IPV6) + struct inet6_protocol { +@@ -62,7 +62,7 @@ struct inet6_protocol { + u8 type, u8 code, int offset, + __be32 info); + unsigned int flags; /* INET6_PROTO_xxx */ +-}; ++} __do_const; + + #define INET6_PROTO_NOPOLICY 0x1 + #define INET6_PROTO_FINAL 0x2 +diff --git a/include/net/rtnetlink.h b/include/net/rtnetlink.h +index 661e45d..54c39df 100644 +--- a/include/net/rtnetlink.h ++++ b/include/net/rtnetlink.h +@@ -93,7 +93,7 @@ struct rtnl_link_ops { + int (*fill_slave_info)(struct sk_buff *skb, + const struct net_device *dev, + const struct net_device *slave_dev); +-}; ++} __do_const; + + int __rtnl_link_register(struct rtnl_link_ops *ops); + void __rtnl_link_unregister(struct rtnl_link_ops *ops); +diff --git a/include/net/sctp/checksum.h b/include/net/sctp/checksum.h +index 4a5b9a3..ca27d73 100644 +--- a/include/net/sctp/checksum.h ++++ b/include/net/sctp/checksum.h +@@ -61,8 +61,8 @@ static inline __le32 sctp_compute_cksum(const struct sk_buff *skb, + unsigned int offset) + { + struct sctphdr *sh = sctp_hdr(skb); +- __le32 ret, old = sh->checksum; +- const struct skb_checksum_ops ops = { ++ __le32 ret, old = sh->checksum; ++ static const struct skb_checksum_ops ops = { + .update = sctp_csum_update, + .combine = sctp_csum_combine, + }; +diff --git a/include/net/sctp/sm.h b/include/net/sctp/sm.h +index 7f4eeb3..37e8fe1 100644 +--- a/include/net/sctp/sm.h ++++ b/include/net/sctp/sm.h +@@ -80,7 +80,7 @@ typedef void (sctp_timer_event_t) (unsigned long); + typedef struct { + sctp_state_fn_t *fn; + const char *name; +-} sctp_sm_table_entry_t; ++} __do_const sctp_sm_table_entry_t; + + /* A naming convention of "sctp_sf_xxx" applies to all the state functions + * currently in use. +@@ -292,7 +292,7 @@ __u32 sctp_generate_tag(const struct sctp_endpoint *); + __u32 sctp_generate_tsn(const struct sctp_endpoint *); + + /* Extern declarations for major data structures. */ +-extern sctp_timer_event_t *sctp_timer_events[SCTP_NUM_TIMEOUT_TYPES]; ++extern sctp_timer_event_t * const sctp_timer_events[SCTP_NUM_TIMEOUT_TYPES]; + + + /* Get the size of a DATA chunk payload. */ +diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h +index 0dfcc92..7967849 100644 +--- a/include/net/sctp/structs.h ++++ b/include/net/sctp/structs.h +@@ -507,7 +507,7 @@ struct sctp_pf { + struct sctp_association *asoc); + void (*addr_v4map) (struct sctp_sock *, union sctp_addr *); + struct sctp_af *af; +-}; ++} __do_const; + + + /* Structure to track chunk fragments that have been acked, but peer +diff --git a/include/net/sock.h b/include/net/sock.h +index 2f7bc43..530dadc 100644 +--- a/include/net/sock.h ++++ b/include/net/sock.h +@@ -348,7 +348,7 @@ struct sock { + unsigned int sk_napi_id; + unsigned int sk_ll_usec; + #endif +- atomic_t sk_drops; ++ atomic_unchecked_t sk_drops; + int sk_rcvbuf; + + struct sk_filter __rcu *sk_filter; +@@ -1036,7 +1036,7 @@ struct proto { + void (*destroy_cgroup)(struct mem_cgroup *memcg); + struct cg_proto *(*proto_cgroup)(struct mem_cgroup *memcg); + #endif +-}; ++} __randomize_layout; + + /* + * Bits in struct cg_proto.flags +@@ -1223,7 +1223,7 @@ static inline u64 memcg_memory_allocated_read(struct cg_proto *prot) + return ret >> PAGE_SHIFT; + } + +-static inline long ++static inline long __intentional_overflow(-1) + sk_memory_allocated(const struct sock *sk) + { + struct proto *prot = sk->sk_prot; +@@ -1368,7 +1368,7 @@ struct sock_iocb { + struct scm_cookie *scm; + struct msghdr *msg, async_msg; + struct kiocb *kiocb; +-}; ++} __randomize_layout; + + static inline struct sock_iocb *kiocb_to_siocb(struct kiocb *iocb) + { +@@ -1830,7 +1830,7 @@ static inline void sk_nocaps_add(struct sock *sk, netdev_features_t flags) + } + + static inline int skb_do_copy_data_nocache(struct sock *sk, struct sk_buff *skb, +- char __user *from, char *to, ++ char __user *from, unsigned char *to, + int copy, int offset) + { + if (skb->ip_summed == CHECKSUM_NONE) { +@@ -2092,7 +2092,7 @@ static inline void sk_stream_moderate_sndbuf(struct sock *sk) + } + } + +-struct sk_buff *sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp); ++struct sk_buff * __intentional_overflow(0) sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp); + + /** + * sk_page_frag - return an appropriate page_frag +diff --git a/include/net/tcp.h b/include/net/tcp.h +index 743acce..44a58b0 100644 +--- a/include/net/tcp.h ++++ b/include/net/tcp.h +@@ -541,7 +541,7 @@ void tcp_retransmit_timer(struct sock *sk); + void tcp_xmit_retransmit_queue(struct sock *); + void tcp_simple_retransmit(struct sock *); + int tcp_trim_head(struct sock *, struct sk_buff *, u32); +-int tcp_fragment(struct sock *, struct sk_buff *, u32, unsigned int); ++int __intentional_overflow(3) tcp_fragment(struct sock *, struct sk_buff *, u32, unsigned int); + + void tcp_send_probe0(struct sock *); + void tcp_send_partial(struct sock *); +@@ -710,8 +710,8 @@ struct tcp_skb_cb { + struct inet6_skb_parm h6; + #endif + } header; /* For incoming frames */ +- __u32 seq; /* Starting sequence number */ +- __u32 end_seq; /* SEQ + FIN + SYN + datalen */ ++ __u32 seq __intentional_overflow(0); /* Starting sequence number */ ++ __u32 end_seq __intentional_overflow(0); /* SEQ + FIN + SYN + datalen */ + __u32 when; /* used to compute rtt's */ + __u8 tcp_flags; /* TCP header flags. (tcp[13]) */ + +@@ -725,7 +725,7 @@ struct tcp_skb_cb { + + __u8 ip_dsfield; /* IPv4 tos or IPv6 dsfield */ + /* 1 byte hole */ +- __u32 ack_seq; /* Sequence number ACK'd */ ++ __u32 ack_seq __intentional_overflow(0); /* Sequence number ACK'd */ + }; + + #define TCP_SKB_CB(__skb) ((struct tcp_skb_cb *)&((__skb)->cb[0])) +diff --git a/include/net/xfrm.h b/include/net/xfrm.h +index fb5654a..4457522 100644 +--- a/include/net/xfrm.h ++++ b/include/net/xfrm.h +@@ -286,7 +286,6 @@ struct xfrm_dst; + struct xfrm_policy_afinfo { + unsigned short family; + struct dst_ops *dst_ops; +- void (*garbage_collect)(struct net *net); + struct dst_entry *(*dst_lookup)(struct net *net, int tos, + const xfrm_address_t *saddr, + const xfrm_address_t *daddr); +@@ -304,7 +303,7 @@ struct xfrm_policy_afinfo { + struct net_device *dev, + const struct flowi *fl); + struct dst_entry *(*blackhole_route)(struct net *net, struct dst_entry *orig); +-}; ++} __do_const; + + int xfrm_policy_register_afinfo(struct xfrm_policy_afinfo *afinfo); + int xfrm_policy_unregister_afinfo(struct xfrm_policy_afinfo *afinfo); +@@ -343,7 +342,7 @@ struct xfrm_state_afinfo { + int (*transport_finish)(struct sk_buff *skb, + int async); + void (*local_error)(struct sk_buff *skb, u32 mtu); +-}; ++} __do_const; + + int xfrm_state_register_afinfo(struct xfrm_state_afinfo *afinfo); + int xfrm_state_unregister_afinfo(struct xfrm_state_afinfo *afinfo); +@@ -428,7 +427,7 @@ struct xfrm_mode { + struct module *owner; + unsigned int encap; + int flags; +-}; ++} __do_const; + + /* Flags for xfrm_mode. */ + enum { +@@ -525,7 +524,7 @@ struct xfrm_policy { + struct timer_list timer; + + struct flow_cache_object flo; +- atomic_t genid; ++ atomic_unchecked_t genid; + u32 priority; + u32 index; + struct xfrm_mark mark; +@@ -1165,6 +1164,7 @@ static inline void xfrm_sk_free_policy(struct sock *sk) + } + + void xfrm_garbage_collect(struct net *net); ++void xfrm_garbage_collect_deferred(struct net *net); + + #else + +@@ -1203,6 +1203,9 @@ static inline int xfrm6_policy_check_reverse(struct sock *sk, int dir, + static inline void xfrm_garbage_collect(struct net *net) + { + } ++static inline void xfrm_garbage_collect_deferred(struct net *net) ++{ ++} + #endif + + static __inline__ +diff --git a/include/rdma/iw_cm.h b/include/rdma/iw_cm.h +index 1017e0b..227aa4d 100644 +--- a/include/rdma/iw_cm.h ++++ b/include/rdma/iw_cm.h +@@ -122,7 +122,7 @@ struct iw_cm_verbs { + int backlog); + + int (*destroy_listen)(struct iw_cm_id *cm_id); +-}; ++} __no_const; + + /** + * iw_create_cm_id - Create an IW CM identifier. +diff --git a/include/scsi/libfc.h b/include/scsi/libfc.h +index 52beadf..598734c 100644 +--- a/include/scsi/libfc.h ++++ b/include/scsi/libfc.h +@@ -771,6 +771,7 @@ struct libfc_function_template { + */ + void (*disc_stop_final) (struct fc_lport *); + }; ++typedef struct libfc_function_template __no_const libfc_function_template_no_const; + + /** + * struct fc_disc - Discovery context +@@ -875,7 +876,7 @@ struct fc_lport { + struct fc_vport *vport; + + /* Operational Information */ +- struct libfc_function_template tt; ++ libfc_function_template_no_const tt; + u8 link_up; + u8 qfull; + enum fc_lport_state state; +diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h +index b4f1eff..7fdbd46 100644 +--- a/include/scsi/scsi_device.h ++++ b/include/scsi/scsi_device.h +@@ -180,9 +180,9 @@ struct scsi_device { + unsigned int max_device_blocked; /* what device_blocked counts down from */ + #define SCSI_DEFAULT_DEVICE_BLOCKED 3 + +- atomic_t iorequest_cnt; +- atomic_t iodone_cnt; +- atomic_t ioerr_cnt; ++ atomic_unchecked_t iorequest_cnt; ++ atomic_unchecked_t iodone_cnt; ++ atomic_unchecked_t ioerr_cnt; + + struct device sdev_gendev, + sdev_dev; +diff --git a/include/scsi/scsi_transport_fc.h b/include/scsi/scsi_transport_fc.h +index b797e8f..8e2c3aa 100644 +--- a/include/scsi/scsi_transport_fc.h ++++ b/include/scsi/scsi_transport_fc.h +@@ -751,7 +751,8 @@ struct fc_function_template { + unsigned long show_host_system_hostname:1; + + unsigned long disable_target_scan:1; +-}; ++} __do_const; ++typedef struct fc_function_template __no_const fc_function_template_no_const; + + + /** +diff --git a/include/sound/compress_driver.h b/include/sound/compress_driver.h +index ae6c3b8..fd748ac 100644 +--- a/include/sound/compress_driver.h ++++ b/include/sound/compress_driver.h +@@ -128,7 +128,7 @@ struct snd_compr_ops { + struct snd_compr_caps *caps); + int (*get_codec_caps) (struct snd_compr_stream *stream, + struct snd_compr_codec_caps *codec); +-}; ++} __no_const; + + /** + * struct snd_compr: Compressed device +diff --git a/include/sound/soc.h b/include/sound/soc.h +index 9a00147..d814573 100644 +--- a/include/sound/soc.h ++++ b/include/sound/soc.h +@@ -770,7 +770,7 @@ struct snd_soc_codec_driver { + /* probe ordering - for components with runtime dependencies */ + int probe_order; + int remove_order; +-}; ++} __do_const; + + /* SoC platform interface */ + struct snd_soc_platform_driver { +@@ -816,7 +816,7 @@ struct snd_soc_platform_driver { + unsigned int (*read)(struct snd_soc_platform *, unsigned int); + int (*write)(struct snd_soc_platform *, unsigned int, unsigned int); + int (*bespoke_trigger)(struct snd_pcm_substream *, int); +-}; ++} __do_const; + + struct snd_soc_platform { + const char *name; +diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h +index 1772fad..282e3e2 100644 +--- a/include/target/target_core_base.h ++++ b/include/target/target_core_base.h +@@ -754,7 +754,7 @@ struct se_device { + atomic_long_t write_bytes; + /* Active commands on this virtual SE device */ + atomic_t simple_cmds; +- atomic_t dev_ordered_id; ++ atomic_unchecked_t dev_ordered_id; + atomic_t dev_ordered_sync; + atomic_t dev_qf_count; + int export_count; +diff --git a/include/trace/events/fs.h b/include/trace/events/fs.h +new file mode 100644 +index 0000000..fb634b7 +--- /dev/null ++++ b/include/trace/events/fs.h +@@ -0,0 +1,53 @@ ++#undef TRACE_SYSTEM ++#define TRACE_SYSTEM fs ++ ++#if !defined(_TRACE_FS_H) || defined(TRACE_HEADER_MULTI_READ) ++#define _TRACE_FS_H ++ ++#include <linux/fs.h> ++#include <linux/tracepoint.h> ++ ++TRACE_EVENT(do_sys_open, ++ ++ TP_PROTO(const char *filename, int flags, int mode), ++ ++ TP_ARGS(filename, flags, mode), ++ ++ TP_STRUCT__entry( ++ __string( filename, filename ) ++ __field( int, flags ) ++ __field( int, mode ) ++ ), ++ ++ TP_fast_assign( ++ __assign_str(filename, filename); ++ __entry->flags = flags; ++ __entry->mode = mode; ++ ), ++ ++ TP_printk("\"%s\" %x %o", ++ __get_str(filename), __entry->flags, __entry->mode) ++); ++ ++TRACE_EVENT(open_exec, ++ ++ TP_PROTO(const char *filename), ++ ++ TP_ARGS(filename), ++ ++ TP_STRUCT__entry( ++ __string( filename, filename ) ++ ), ++ ++ TP_fast_assign( ++ __assign_str(filename, filename); ++ ), ++ ++ TP_printk("\"%s\"", ++ __get_str(filename)) ++); ++ ++#endif /* _TRACE_FS_H */ ++ ++/* This part must be outside protection */ ++#include <trace/define_trace.h> +diff --git a/include/trace/events/irq.h b/include/trace/events/irq.h +index 1c09820..7f5ec79 100644 +--- a/include/trace/events/irq.h ++++ b/include/trace/events/irq.h +@@ -36,7 +36,7 @@ struct softirq_action; + */ + TRACE_EVENT(irq_handler_entry, + +- TP_PROTO(int irq, struct irqaction *action), ++ TP_PROTO(int irq, const struct irqaction *action), + + TP_ARGS(irq, action), + +@@ -66,7 +66,7 @@ TRACE_EVENT(irq_handler_entry, + */ + TRACE_EVENT(irq_handler_exit, + +- TP_PROTO(int irq, struct irqaction *action, int ret), ++ TP_PROTO(int irq, const struct irqaction *action, int ret), + + TP_ARGS(irq, action, ret), + +diff --git a/include/uapi/linux/a.out.h b/include/uapi/linux/a.out.h +index 7caf44c..23c6f27 100644 +--- a/include/uapi/linux/a.out.h ++++ b/include/uapi/linux/a.out.h +@@ -39,6 +39,14 @@ enum machine_type { + M_MIPS2 = 152 /* MIPS R6000/R4000 binary */ + }; + ++/* Constants for the N_FLAGS field */ ++#define F_PAX_PAGEEXEC 1 /* Paging based non-executable pages */ ++#define F_PAX_EMUTRAMP 2 /* Emulate trampolines */ ++#define F_PAX_MPROTECT 4 /* Restrict mprotect() */ ++#define F_PAX_RANDMMAP 8 /* Randomize mmap() base */ ++/*#define F_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */ ++#define F_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */ ++ + #if !defined (N_MAGIC) + #define N_MAGIC(exec) ((exec).a_info & 0xffff) + #endif +diff --git a/include/uapi/linux/bcache.h b/include/uapi/linux/bcache.h +index 22b6ad3..aeba37e 100644 +--- a/include/uapi/linux/bcache.h ++++ b/include/uapi/linux/bcache.h +@@ -5,6 +5,7 @@ + * Bcache on disk data structures + */ + ++#include <linux/compiler.h> + #include <asm/types.h> + + #define BITMASK(name, type, field, offset, size) \ +@@ -20,8 +21,8 @@ static inline void SET_##name(type *k, __u64 v) \ + /* Btree keys - all units are in sectors */ + + struct bkey { +- __u64 high; +- __u64 low; ++ __u64 high __intentional_overflow(-1); ++ __u64 low __intentional_overflow(-1); + __u64 ptr[]; + }; + +diff --git a/include/uapi/linux/byteorder/little_endian.h b/include/uapi/linux/byteorder/little_endian.h +index d876736..ccce5c0 100644 +--- a/include/uapi/linux/byteorder/little_endian.h ++++ b/include/uapi/linux/byteorder/little_endian.h +@@ -42,51 +42,51 @@ + + static inline __le64 __cpu_to_le64p(const __u64 *p) + { +- return (__force __le64)*p; ++ return (__force const __le64)*p; + } +-static inline __u64 __le64_to_cpup(const __le64 *p) ++static inline __u64 __intentional_overflow(-1) __le64_to_cpup(const __le64 *p) + { +- return (__force __u64)*p; ++ return (__force const __u64)*p; + } + static inline __le32 __cpu_to_le32p(const __u32 *p) + { +- return (__force __le32)*p; ++ return (__force const __le32)*p; + } + static inline __u32 __le32_to_cpup(const __le32 *p) + { +- return (__force __u32)*p; ++ return (__force const __u32)*p; + } + static inline __le16 __cpu_to_le16p(const __u16 *p) + { +- return (__force __le16)*p; ++ return (__force const __le16)*p; + } + static inline __u16 __le16_to_cpup(const __le16 *p) + { +- return (__force __u16)*p; ++ return (__force const __u16)*p; + } + static inline __be64 __cpu_to_be64p(const __u64 *p) + { +- return (__force __be64)__swab64p(p); ++ return (__force const __be64)__swab64p(p); + } + static inline __u64 __be64_to_cpup(const __be64 *p) + { +- return __swab64p((__u64 *)p); ++ return __swab64p((const __u64 *)p); + } + static inline __be32 __cpu_to_be32p(const __u32 *p) + { +- return (__force __be32)__swab32p(p); ++ return (__force const __be32)__swab32p(p); + } +-static inline __u32 __be32_to_cpup(const __be32 *p) ++static inline __u32 __intentional_overflow(-1) __be32_to_cpup(const __be32 *p) + { +- return __swab32p((__u32 *)p); ++ return __swab32p((const __u32 *)p); + } + static inline __be16 __cpu_to_be16p(const __u16 *p) + { +- return (__force __be16)__swab16p(p); ++ return (__force const __be16)__swab16p(p); + } + static inline __u16 __be16_to_cpup(const __be16 *p) + { +- return __swab16p((__u16 *)p); ++ return __swab16p((const __u16 *)p); + } + #define __cpu_to_le64s(x) do { (void)(x); } while (0) + #define __le64_to_cpus(x) do { (void)(x); } while (0) +diff --git a/include/uapi/linux/elf.h b/include/uapi/linux/elf.h +index ef6103b..d4e65dd 100644 +--- a/include/uapi/linux/elf.h ++++ b/include/uapi/linux/elf.h +@@ -37,6 +37,17 @@ typedef __s64 Elf64_Sxword; + #define PT_GNU_EH_FRAME 0x6474e550 + + #define PT_GNU_STACK (PT_LOOS + 0x474e551) ++#define PT_GNU_RELRO (PT_LOOS + 0x474e552) ++ ++#define PT_PAX_FLAGS (PT_LOOS + 0x5041580) ++ ++/* Constants for the e_flags field */ ++#define EF_PAX_PAGEEXEC 1 /* Paging based non-executable pages */ ++#define EF_PAX_EMUTRAMP 2 /* Emulate trampolines */ ++#define EF_PAX_MPROTECT 4 /* Restrict mprotect() */ ++#define EF_PAX_RANDMMAP 8 /* Randomize mmap() base */ ++/*#define EF_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */ ++#define EF_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */ + + /* + * Extended Numbering +@@ -94,6 +105,8 @@ typedef __s64 Elf64_Sxword; + #define DT_DEBUG 21 + #define DT_TEXTREL 22 + #define DT_JMPREL 23 ++#define DT_FLAGS 30 ++ #define DF_TEXTREL 0x00000004 + #define DT_ENCODING 32 + #define OLD_DT_LOOS 0x60000000 + #define DT_LOOS 0x6000000d +@@ -240,6 +253,19 @@ typedef struct elf64_hdr { + #define PF_W 0x2 + #define PF_X 0x1 + ++#define PF_PAGEEXEC (1U << 4) /* Enable PAGEEXEC */ ++#define PF_NOPAGEEXEC (1U << 5) /* Disable PAGEEXEC */ ++#define PF_SEGMEXEC (1U << 6) /* Enable SEGMEXEC */ ++#define PF_NOSEGMEXEC (1U << 7) /* Disable SEGMEXEC */ ++#define PF_MPROTECT (1U << 8) /* Enable MPROTECT */ ++#define PF_NOMPROTECT (1U << 9) /* Disable MPROTECT */ ++/*#define PF_RANDEXEC (1U << 10)*/ /* Enable RANDEXEC */ ++/*#define PF_NORANDEXEC (1U << 11)*/ /* Disable RANDEXEC */ ++#define PF_EMUTRAMP (1U << 12) /* Enable EMUTRAMP */ ++#define PF_NOEMUTRAMP (1U << 13) /* Disable EMUTRAMP */ ++#define PF_RANDMMAP (1U << 14) /* Enable RANDMMAP */ ++#define PF_NORANDMMAP (1U << 15) /* Disable RANDMMAP */ ++ + typedef struct elf32_phdr{ + Elf32_Word p_type; + Elf32_Off p_offset; +@@ -332,6 +358,8 @@ typedef struct elf64_shdr { + #define EI_OSABI 7 + #define EI_PAD 8 + ++#define EI_PAX 14 ++ + #define ELFMAG0 0x7f /* EI_MAG */ + #define ELFMAG1 'E' + #define ELFMAG2 'L' +diff --git a/include/uapi/linux/personality.h b/include/uapi/linux/personality.h +index aa169c4..6a2771d 100644 +--- a/include/uapi/linux/personality.h ++++ b/include/uapi/linux/personality.h +@@ -30,6 +30,7 @@ enum { + #define PER_CLEAR_ON_SETID (READ_IMPLIES_EXEC | \ + ADDR_NO_RANDOMIZE | \ + ADDR_COMPAT_LAYOUT | \ ++ ADDR_LIMIT_3GB | \ + MMAP_PAGE_ZERO) + + /* +diff --git a/include/uapi/linux/screen_info.h b/include/uapi/linux/screen_info.h +index 7530e74..e714828 100644 +--- a/include/uapi/linux/screen_info.h ++++ b/include/uapi/linux/screen_info.h +@@ -43,7 +43,8 @@ struct screen_info { + __u16 pages; /* 0x32 */ + __u16 vesa_attributes; /* 0x34 */ + __u32 capabilities; /* 0x36 */ +- __u8 _reserved[6]; /* 0x3a */ ++ __u16 vesapm_size; /* 0x3a */ ++ __u8 _reserved[4]; /* 0x3c */ + } __attribute__((packed)); + + #define VIDEO_TYPE_MDA 0x10 /* Monochrome Text Display */ +diff --git a/include/uapi/linux/swab.h b/include/uapi/linux/swab.h +index 0e011eb..82681b1 100644 +--- a/include/uapi/linux/swab.h ++++ b/include/uapi/linux/swab.h +@@ -43,7 +43,7 @@ + * ___swab16, ___swab32, ___swab64, ___swahw32, ___swahb32 + */ + +-static inline __attribute_const__ __u16 __fswab16(__u16 val) ++static inline __intentional_overflow(-1) __attribute_const__ __u16 __fswab16(__u16 val) + { + #ifdef __HAVE_BUILTIN_BSWAP16__ + return __builtin_bswap16(val); +@@ -54,7 +54,7 @@ static inline __attribute_const__ __u16 __fswab16(__u16 val) + #endif + } + +-static inline __attribute_const__ __u32 __fswab32(__u32 val) ++static inline __intentional_overflow(-1) __attribute_const__ __u32 __fswab32(__u32 val) + { + #ifdef __HAVE_BUILTIN_BSWAP32__ + return __builtin_bswap32(val); +@@ -65,7 +65,7 @@ static inline __attribute_const__ __u32 __fswab32(__u32 val) + #endif + } + +-static inline __attribute_const__ __u64 __fswab64(__u64 val) ++static inline __intentional_overflow(-1) __attribute_const__ __u64 __fswab64(__u64 val) + { + #ifdef __HAVE_BUILTIN_BSWAP64__ + return __builtin_bswap64(val); +diff --git a/include/uapi/linux/sysctl.h b/include/uapi/linux/sysctl.h +index 6d67213..552fdd9 100644 +--- a/include/uapi/linux/sysctl.h ++++ b/include/uapi/linux/sysctl.h +@@ -155,8 +155,6 @@ enum + KERN_PANIC_ON_NMI=76, /* int: whether we will panic on an unrecovered */ + }; + +- +- + /* CTL_VM names: */ + enum + { +diff --git a/include/uapi/linux/videodev2.h b/include/uapi/linux/videodev2.h +index fe94bb9..c9e51c2 100644 +--- a/include/uapi/linux/videodev2.h ++++ b/include/uapi/linux/videodev2.h +@@ -1227,7 +1227,7 @@ struct v4l2_ext_control { + union { + __s32 value; + __s64 value64; +- char *string; ++ char __user *string; + }; + } __attribute__ ((packed)); + +diff --git a/include/uapi/linux/xattr.h b/include/uapi/linux/xattr.h +index c38355c..17a57bc 100644 +--- a/include/uapi/linux/xattr.h ++++ b/include/uapi/linux/xattr.h +@@ -73,5 +73,9 @@ + #define XATTR_POSIX_ACL_DEFAULT "posix_acl_default" + #define XATTR_NAME_POSIX_ACL_DEFAULT XATTR_SYSTEM_PREFIX XATTR_POSIX_ACL_DEFAULT + ++/* User namespace */ ++#define XATTR_PAX_PREFIX XATTR_USER_PREFIX "pax." ++#define XATTR_PAX_FLAGS_SUFFIX "flags" ++#define XATTR_NAME_PAX_FLAGS XATTR_PAX_PREFIX XATTR_PAX_FLAGS_SUFFIX + + #endif /* _UAPI_LINUX_XATTR_H */ +diff --git a/include/video/udlfb.h b/include/video/udlfb.h +index f9466fa..f4e2b81 100644 +--- a/include/video/udlfb.h ++++ b/include/video/udlfb.h +@@ -53,10 +53,10 @@ struct dlfb_data { + u32 pseudo_palette[256]; + int blank_mode; /*one of FB_BLANK_ */ + /* blit-only rendering path metrics, exposed through sysfs */ +- atomic_t bytes_rendered; /* raw pixel-bytes driver asked to render */ +- atomic_t bytes_identical; /* saved effort with backbuffer comparison */ +- atomic_t bytes_sent; /* to usb, after compression including overhead */ +- atomic_t cpu_kcycles_used; /* transpired during pixel processing */ ++ atomic_unchecked_t bytes_rendered; /* raw pixel-bytes driver asked to render */ ++ atomic_unchecked_t bytes_identical; /* saved effort with backbuffer comparison */ ++ atomic_unchecked_t bytes_sent; /* to usb, after compression including overhead */ ++ atomic_unchecked_t cpu_kcycles_used; /* transpired during pixel processing */ + }; + + #define NR_USB_REQUEST_I2C_SUB_IO 0x02 +diff --git a/include/video/uvesafb.h b/include/video/uvesafb.h +index 30f5362..8ed8ac9 100644 +--- a/include/video/uvesafb.h ++++ b/include/video/uvesafb.h +@@ -122,6 +122,7 @@ struct uvesafb_par { + u8 ypan; /* 0 - nothing, 1 - ypan, 2 - ywrap */ + u8 pmi_setpal; /* PMI for palette changes */ + u16 *pmi_base; /* protected mode interface location */ ++ u8 *pmi_code; /* protected mode code location */ + void *pmi_start; + void *pmi_pal; + u8 *vbe_state_orig; /* +diff --git a/init/Kconfig b/init/Kconfig +index 93c5ef0..ac92caa 100644 +--- a/init/Kconfig ++++ b/init/Kconfig +@@ -1079,6 +1079,7 @@ endif # CGROUPS + + config CHECKPOINT_RESTORE + bool "Checkpoint/restore support" if EXPERT ++ depends on !GRKERNSEC + default n + help + Enables additional kernel features in a sake of checkpoint/restore. +@@ -1545,7 +1546,7 @@ config SLUB_DEBUG + + config COMPAT_BRK + bool "Disable heap randomization" +- default y ++ default n + help + Randomizing heap placement makes heap exploits harder, but it + also breaks ancient binaries (including anything libc5 based). +@@ -1833,7 +1834,7 @@ config INIT_ALL_POSSIBLE + config STOP_MACHINE + bool + default y +- depends on (SMP && MODULE_UNLOAD) || HOTPLUG_CPU ++ depends on (SMP && MODULE_UNLOAD) || HOTPLUG_CPU || GRKERNSEC + help + Need stop_machine() primitive. + +diff --git a/init/Makefile b/init/Makefile +index 7bc47ee..6da2dc7 100644 +--- a/init/Makefile ++++ b/init/Makefile +@@ -2,6 +2,9 @@ + # Makefile for the linux kernel. + # + ++ccflags-y := $(GCC_PLUGINS_CFLAGS) ++asflags-y := $(GCC_PLUGINS_AFLAGS) ++ + obj-y := main.o version.o mounts.o + ifneq ($(CONFIG_BLK_DEV_INITRD),y) + obj-y += noinitramfs.o +diff --git a/init/do_mounts.c b/init/do_mounts.c +index 8e5addc..c96ea61 100644 +--- a/init/do_mounts.c ++++ b/init/do_mounts.c +@@ -359,11 +359,11 @@ static void __init get_fs_names(char *page) + static int __init do_mount_root(char *name, char *fs, int flags, void *data) + { + struct super_block *s; +- int err = sys_mount(name, "/root", fs, flags, data); ++ int err = sys_mount((char __force_user *)name, (char __force_user *)"/root", (char __force_user *)fs, flags, (void __force_user *)data); + if (err) + return err; + +- sys_chdir("/root"); ++ sys_chdir((const char __force_user *)"/root"); + s = current->fs->pwd.dentry->d_sb; + ROOT_DEV = s->s_dev; + printk(KERN_INFO +@@ -484,18 +484,18 @@ void __init change_floppy(char *fmt, ...) + va_start(args, fmt); + vsprintf(buf, fmt, args); + va_end(args); +- fd = sys_open("/dev/root", O_RDWR | O_NDELAY, 0); ++ fd = sys_open((char __user *)"/dev/root", O_RDWR | O_NDELAY, 0); + if (fd >= 0) { + sys_ioctl(fd, FDEJECT, 0); + sys_close(fd); + } + printk(KERN_NOTICE "VFS: Insert %s and press ENTER\n", buf); +- fd = sys_open("/dev/console", O_RDWR, 0); ++ fd = sys_open((__force const char __user *)"/dev/console", O_RDWR, 0); + if (fd >= 0) { + sys_ioctl(fd, TCGETS, (long)&termios); + termios.c_lflag &= ~ICANON; + sys_ioctl(fd, TCSETSF, (long)&termios); +- sys_read(fd, &c, 1); ++ sys_read(fd, (char __user *)&c, 1); + termios.c_lflag |= ICANON; + sys_ioctl(fd, TCSETSF, (long)&termios); + sys_close(fd); +@@ -589,8 +589,8 @@ void __init prepare_namespace(void) + mount_root(); + out: + devtmpfs_mount("dev"); +- sys_mount(".", "/", NULL, MS_MOVE, NULL); +- sys_chroot("."); ++ sys_mount((char __force_user *)".", (char __force_user *)"/", NULL, MS_MOVE, NULL); ++ sys_chroot((const char __force_user *)"."); + } + + static bool is_tmpfs; +diff --git a/init/do_mounts.h b/init/do_mounts.h +index f5b978a..69dbfe8 100644 +--- a/init/do_mounts.h ++++ b/init/do_mounts.h +@@ -15,15 +15,15 @@ extern int root_mountflags; + + static inline int create_dev(char *name, dev_t dev) + { +- sys_unlink(name); +- return sys_mknod(name, S_IFBLK|0600, new_encode_dev(dev)); ++ sys_unlink((char __force_user *)name); ++ return sys_mknod((char __force_user *)name, S_IFBLK|0600, new_encode_dev(dev)); + } + + #if BITS_PER_LONG == 32 + static inline u32 bstat(char *name) + { + struct stat64 stat; +- if (sys_stat64(name, &stat) != 0) ++ if (sys_stat64((char __force_user *)name, (struct stat64 __force_user *)&stat) != 0) + return 0; + if (!S_ISBLK(stat.st_mode)) + return 0; +@@ -35,7 +35,7 @@ static inline u32 bstat(char *name) + static inline u32 bstat(char *name) + { + struct stat stat; +- if (sys_newstat(name, &stat) != 0) ++ if (sys_newstat((const char __force_user *)name, (struct stat __force_user *)&stat) != 0) + return 0; + if (!S_ISBLK(stat.st_mode)) + return 0; +diff --git a/init/do_mounts_initrd.c b/init/do_mounts_initrd.c +index 3e0878e..8a9d7a0 100644 +--- a/init/do_mounts_initrd.c ++++ b/init/do_mounts_initrd.c +@@ -37,13 +37,13 @@ static int init_linuxrc(struct subprocess_info *info, struct cred *new) + { + sys_unshare(CLONE_FS | CLONE_FILES); + /* stdin/stdout/stderr for /linuxrc */ +- sys_open("/dev/console", O_RDWR, 0); ++ sys_open((const char __force_user *)"/dev/console", O_RDWR, 0); + sys_dup(0); + sys_dup(0); + /* move initrd over / and chdir/chroot in initrd root */ +- sys_chdir("/root"); +- sys_mount(".", "/", NULL, MS_MOVE, NULL); +- sys_chroot("."); ++ sys_chdir((const char __force_user *)"/root"); ++ sys_mount((char __force_user *)".", (char __force_user *)"/", NULL, MS_MOVE, NULL); ++ sys_chroot((const char __force_user *)"."); + sys_setsid(); + return 0; + } +@@ -59,8 +59,8 @@ static void __init handle_initrd(void) + create_dev("/dev/root.old", Root_RAM0); + /* mount initrd on rootfs' /root */ + mount_block_root("/dev/root.old", root_mountflags & ~MS_RDONLY); +- sys_mkdir("/old", 0700); +- sys_chdir("/old"); ++ sys_mkdir((const char __force_user *)"/old", 0700); ++ sys_chdir((const char __force_user *)"/old"); + + /* try loading default modules from initrd */ + load_default_modules(); +@@ -80,31 +80,31 @@ static void __init handle_initrd(void) + current->flags &= ~PF_FREEZER_SKIP; + + /* move initrd to rootfs' /old */ +- sys_mount("..", ".", NULL, MS_MOVE, NULL); ++ sys_mount((char __force_user *)"..", (char __force_user *)".", NULL, MS_MOVE, NULL); + /* switch root and cwd back to / of rootfs */ +- sys_chroot(".."); ++ sys_chroot((const char __force_user *)".."); + + if (new_decode_dev(real_root_dev) == Root_RAM0) { +- sys_chdir("/old"); ++ sys_chdir((const char __force_user *)"/old"); + return; + } + +- sys_chdir("/"); ++ sys_chdir((const char __force_user *)"/"); + ROOT_DEV = new_decode_dev(real_root_dev); + mount_root(); + + printk(KERN_NOTICE "Trying to move old root to /initrd ... "); +- error = sys_mount("/old", "/root/initrd", NULL, MS_MOVE, NULL); ++ error = sys_mount((char __force_user *)"/old", (char __force_user *)"/root/initrd", NULL, MS_MOVE, NULL); + if (!error) + printk("okay\n"); + else { +- int fd = sys_open("/dev/root.old", O_RDWR, 0); ++ int fd = sys_open((const char __force_user *)"/dev/root.old", O_RDWR, 0); + if (error == -ENOENT) + printk("/initrd does not exist. Ignored.\n"); + else + printk("failed\n"); + printk(KERN_NOTICE "Unmounting old root\n"); +- sys_umount("/old", MNT_DETACH); ++ sys_umount((char __force_user *)"/old", MNT_DETACH); + printk(KERN_NOTICE "Trying to free ramdisk memory ... "); + if (fd < 0) { + error = fd; +@@ -127,11 +127,11 @@ int __init initrd_load(void) + * mounted in the normal path. + */ + if (rd_load_image("/initrd.image") && ROOT_DEV != Root_RAM0) { +- sys_unlink("/initrd.image"); ++ sys_unlink((const char __force_user *)"/initrd.image"); + handle_initrd(); + return 1; + } + } +- sys_unlink("/initrd.image"); ++ sys_unlink((const char __force_user *)"/initrd.image"); + return 0; + } +diff --git a/init/do_mounts_md.c b/init/do_mounts_md.c +index 8cb6db5..d729f50 100644 +--- a/init/do_mounts_md.c ++++ b/init/do_mounts_md.c +@@ -180,7 +180,7 @@ static void __init md_setup_drive(void) + partitioned ? "_d" : "", minor, + md_setup_args[ent].device_names); + +- fd = sys_open(name, 0, 0); ++ fd = sys_open((char __force_user *)name, 0, 0); + if (fd < 0) { + printk(KERN_ERR "md: open failed - cannot start " + "array %s\n", name); +@@ -243,7 +243,7 @@ static void __init md_setup_drive(void) + * array without it + */ + sys_close(fd); +- fd = sys_open(name, 0, 0); ++ fd = sys_open((char __force_user *)name, 0, 0); + sys_ioctl(fd, BLKRRPART, 0); + } + sys_close(fd); +@@ -293,7 +293,7 @@ static void __init autodetect_raid(void) + + wait_for_device_probe(); + +- fd = sys_open("/dev/md0", 0, 0); ++ fd = sys_open((const char __force_user *) "/dev/md0", 0, 0); + if (fd >= 0) { + sys_ioctl(fd, RAID_AUTORUN, raid_autopart); + sys_close(fd); +diff --git a/init/init_task.c b/init/init_task.c +index ba0a7f36..2bcf1d5 100644 +--- a/init/init_task.c ++++ b/init/init_task.c +@@ -22,5 +22,9 @@ EXPORT_SYMBOL(init_task); + * Initial thread structure. Alignment of this is handled by a special + * linker map entry. + */ ++#ifdef CONFIG_X86 ++union thread_union init_thread_union __init_task_data; ++#else + union thread_union init_thread_union __init_task_data = + { INIT_THREAD_INFO(init_task) }; ++#endif +diff --git a/init/initramfs.c b/init/initramfs.c +index 93b6139..8d628b7 100644 +--- a/init/initramfs.c ++++ b/init/initramfs.c +@@ -84,7 +84,7 @@ static void __init free_hash(void) + } + } + +-static long __init do_utime(char *filename, time_t mtime) ++static long __init do_utime(char __force_user *filename, time_t mtime) + { + struct timespec t[2]; + +@@ -119,7 +119,7 @@ static void __init dir_utime(void) + struct dir_entry *de, *tmp; + list_for_each_entry_safe(de, tmp, &dir_list, list) { + list_del(&de->list); +- do_utime(de->name, de->mtime); ++ do_utime((char __force_user *)de->name, de->mtime); + kfree(de->name); + kfree(de); + } +@@ -281,7 +281,7 @@ static int __init maybe_link(void) + if (nlink >= 2) { + char *old = find_link(major, minor, ino, mode, collected); + if (old) +- return (sys_link(old, collected) < 0) ? -1 : 1; ++ return (sys_link((char __force_user *)old, (char __force_user *)collected) < 0) ? -1 : 1; + } + return 0; + } +@@ -290,11 +290,11 @@ static void __init clean_path(char *path, umode_t mode) + { + struct stat st; + +- if (!sys_newlstat(path, &st) && (st.st_mode^mode) & S_IFMT) { ++ if (!sys_newlstat((char __force_user *)path, (struct stat __force_user *)&st) && (st.st_mode^mode) & S_IFMT) { + if (S_ISDIR(st.st_mode)) +- sys_rmdir(path); ++ sys_rmdir((char __force_user *)path); + else +- sys_unlink(path); ++ sys_unlink((char __force_user *)path); + } + } + +@@ -315,7 +315,7 @@ static int __init do_name(void) + int openflags = O_WRONLY|O_CREAT; + if (ml != 1) + openflags |= O_TRUNC; +- wfd = sys_open(collected, openflags, mode); ++ wfd = sys_open((char __force_user *)collected, openflags, mode); + + if (wfd >= 0) { + sys_fchown(wfd, uid, gid); +@@ -327,17 +327,17 @@ static int __init do_name(void) + } + } + } else if (S_ISDIR(mode)) { +- sys_mkdir(collected, mode); +- sys_chown(collected, uid, gid); +- sys_chmod(collected, mode); ++ sys_mkdir((char __force_user *)collected, mode); ++ sys_chown((char __force_user *)collected, uid, gid); ++ sys_chmod((char __force_user *)collected, mode); + dir_add(collected, mtime); + } else if (S_ISBLK(mode) || S_ISCHR(mode) || + S_ISFIFO(mode) || S_ISSOCK(mode)) { + if (maybe_link() == 0) { +- sys_mknod(collected, mode, rdev); +- sys_chown(collected, uid, gid); +- sys_chmod(collected, mode); +- do_utime(collected, mtime); ++ sys_mknod((char __force_user *)collected, mode, rdev); ++ sys_chown((char __force_user *)collected, uid, gid); ++ sys_chmod((char __force_user *)collected, mode); ++ do_utime((char __force_user *)collected, mtime); + } + } + return 0; +@@ -346,15 +346,15 @@ static int __init do_name(void) + static int __init do_copy(void) + { + if (count >= body_len) { +- sys_write(wfd, victim, body_len); ++ sys_write(wfd, (char __force_user *)victim, body_len); + sys_close(wfd); +- do_utime(vcollected, mtime); ++ do_utime((char __force_user *)vcollected, mtime); + kfree(vcollected); + eat(body_len); + state = SkipIt; + return 0; + } else { +- sys_write(wfd, victim, count); ++ sys_write(wfd, (char __force_user *)victim, count); + body_len -= count; + eat(count); + return 1; +@@ -365,9 +365,9 @@ static int __init do_symlink(void) + { + collected[N_ALIGN(name_len) + body_len] = '\0'; + clean_path(collected, 0); +- sys_symlink(collected + N_ALIGN(name_len), collected); +- sys_lchown(collected, uid, gid); +- do_utime(collected, mtime); ++ sys_symlink((char __force_user *)collected + N_ALIGN(name_len), (char __force_user *)collected); ++ sys_lchown((char __force_user *)collected, uid, gid); ++ do_utime((char __force_user *)collected, mtime); + state = SkipIt; + next_state = Reset; + return 0; +diff --git a/init/main.c b/init/main.c +index 58c132d..ac3f3b0 100644 +--- a/init/main.c ++++ b/init/main.c +@@ -97,6 +97,8 @@ extern void radix_tree_init(void); + static inline void mark_rodata_ro(void) { } + #endif + ++extern void grsecurity_init(void); ++ + /* + * Debug helper: via this flag we know that we are in 'early bootup code' + * where only the boot processor is running with IRQ disabled. This means +@@ -158,6 +160,75 @@ static int __init set_reset_devices(char *str) + + __setup("reset_devices", set_reset_devices); + ++#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP ++kgid_t grsec_proc_gid = KGIDT_INIT(CONFIG_GRKERNSEC_PROC_GID); ++static int __init setup_grsec_proc_gid(char *str) ++{ ++ grsec_proc_gid = KGIDT_INIT(simple_strtol(str, NULL, 0)); ++ return 1; ++} ++__setup("grsec_proc_gid=", setup_grsec_proc_gid); ++#endif ++ ++#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF) ++unsigned long pax_user_shadow_base __read_only; ++EXPORT_SYMBOL(pax_user_shadow_base); ++extern char pax_enter_kernel_user[]; ++extern char pax_exit_kernel_user[]; ++#endif ++ ++#if defined(CONFIG_X86) && defined(CONFIG_PAX_MEMORY_UDEREF) ++static int __init setup_pax_nouderef(char *str) ++{ ++#ifdef CONFIG_X86_32 ++ unsigned int cpu; ++ struct desc_struct *gdt; ++ ++ for (cpu = 0; cpu < nr_cpu_ids; cpu++) { ++ gdt = get_cpu_gdt_table(cpu); ++ gdt[GDT_ENTRY_KERNEL_DS].type = 3; ++ gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf; ++ gdt[GDT_ENTRY_DEFAULT_USER_CS].limit = 0xf; ++ gdt[GDT_ENTRY_DEFAULT_USER_DS].limit = 0xf; ++ } ++ loadsegment(ds, __KERNEL_DS); ++ loadsegment(es, __KERNEL_DS); ++ loadsegment(ss, __KERNEL_DS); ++#else ++ memcpy(pax_enter_kernel_user, (unsigned char []){0xc3}, 1); ++ memcpy(pax_exit_kernel_user, (unsigned char []){0xc3}, 1); ++ clone_pgd_mask = ~(pgdval_t)0UL; ++ pax_user_shadow_base = 0UL; ++ setup_clear_cpu_cap(X86_FEATURE_PCID); ++ setup_clear_cpu_cap(X86_FEATURE_INVPCID); ++#endif ++ ++ return 0; ++} ++early_param("pax_nouderef", setup_pax_nouderef); ++ ++#ifdef CONFIG_X86_64 ++static int __init setup_pax_weakuderef(char *str) ++{ ++ if (clone_pgd_mask != ~(pgdval_t)0UL) ++ pax_user_shadow_base = 1UL << TASK_SIZE_MAX_SHIFT; ++ return 1; ++} ++__setup("pax_weakuderef", setup_pax_weakuderef); ++#endif ++#endif ++ ++#ifdef CONFIG_PAX_SOFTMODE ++int pax_softmode; ++ ++static int __init setup_pax_softmode(char *str) ++{ ++ get_option(&str, &pax_softmode); ++ return 1; ++} ++__setup("pax_softmode=", setup_pax_softmode); ++#endif ++ + static const char * argv_init[MAX_INIT_ARGS+2] = { "init", NULL, }; + const char * envp_init[MAX_INIT_ENVS+2] = { "HOME=/", "TERM=linux", NULL, }; + static const char *panic_later, *panic_param; +@@ -692,25 +763,24 @@ int __init_or_module do_one_initcall(initcall_t fn) + { + int count = preempt_count(); + int ret; +- char msgbuf[64]; ++ const char *msg1 = "", *msg2 = ""; + + if (initcall_debug) + ret = do_one_initcall_debug(fn); + else + ret = fn(); + +- msgbuf[0] = 0; +- + if (preempt_count() != count) { +- sprintf(msgbuf, "preemption imbalance "); ++ msg1 = " preemption imbalance"; + preempt_count_set(count); + } + if (irqs_disabled()) { +- strlcat(msgbuf, "disabled interrupts ", sizeof(msgbuf)); ++ msg2 = " disabled interrupts"; + local_irq_enable(); + } +- WARN(msgbuf[0], "initcall %pF returned with %s\n", fn, msgbuf); ++ WARN(*msg1 || *msg2, "initcall %pF returned with%s%s\n", fn, msg1, msg2); + ++ add_latent_entropy(); + return ret; + } + +@@ -817,8 +887,8 @@ static int run_init_process(const char *init_filename) + { + argv_init[0] = init_filename; + return do_execve(getname_kernel(init_filename), +- (const char __user *const __user *)argv_init, +- (const char __user *const __user *)envp_init); ++ (const char __user *const __force_user *)argv_init, ++ (const char __user *const __force_user *)envp_init); + } + + static int try_to_run_init_process(const char *init_filename) +@@ -835,6 +905,10 @@ static int try_to_run_init_process(const char *init_filename) + return ret; + } + ++#ifdef CONFIG_GRKERNSEC_CHROOT_INITRD ++extern int gr_init_ran; ++#endif ++ + static noinline void __init kernel_init_freeable(void); + + static int __ref kernel_init(void *unused) +@@ -859,6 +933,11 @@ static int __ref kernel_init(void *unused) + ramdisk_execute_command, ret); + } + ++#ifdef CONFIG_GRKERNSEC_CHROOT_INITRD ++ /* if no initrd was used, be extra sure we enforce chroot restrictions */ ++ gr_init_ran = 1; ++#endif ++ + /* + * We try each of these until one succeeds. + * +@@ -914,7 +993,7 @@ static noinline void __init kernel_init_freeable(void) + do_basic_setup(); + + /* Open the /dev/console on the rootfs, this should never fail */ +- if (sys_open((const char __user *) "/dev/console", O_RDWR, 0) < 0) ++ if (sys_open((const char __force_user *) "/dev/console", O_RDWR, 0) < 0) + pr_err("Warning: unable to open an initial console.\n"); + + (void) sys_dup(0); +@@ -927,11 +1006,13 @@ static noinline void __init kernel_init_freeable(void) + if (!ramdisk_execute_command) + ramdisk_execute_command = "/init"; + +- if (sys_access((const char __user *) ramdisk_execute_command, 0) != 0) { ++ if (sys_access((const char __force_user *) ramdisk_execute_command, 0) != 0) { + ramdisk_execute_command = NULL; + prepare_namespace(); + } + ++ grsecurity_init(); ++ + /* + * Ok, we have completed the initial bootup, and + * we're essentially up and running. Get rid of the +diff --git a/ipc/compat.c b/ipc/compat.c +index f486b00..442867f 100644 +--- a/ipc/compat.c ++++ b/ipc/compat.c +@@ -399,7 +399,7 @@ COMPAT_SYSCALL_DEFINE6(ipc, u32, call, int, first, int, second, + COMPAT_SHMLBA); + if (err < 0) + return err; +- return put_user(raddr, (compat_ulong_t *)compat_ptr(third)); ++ return put_user(raddr, (compat_ulong_t __user *)compat_ptr(third)); + } + case SHMDT: + return sys_shmdt(compat_ptr(ptr)); +diff --git a/ipc/ipc_sysctl.c b/ipc/ipc_sysctl.c +index 1702864..797fa84 100644 +--- a/ipc/ipc_sysctl.c ++++ b/ipc/ipc_sysctl.c +@@ -30,7 +30,7 @@ static void *get_ipc(ctl_table *table) + static int proc_ipc_dointvec(ctl_table *table, int write, + void __user *buffer, size_t *lenp, loff_t *ppos) + { +- struct ctl_table ipc_table; ++ ctl_table_no_const ipc_table; + + memcpy(&ipc_table, table, sizeof(ipc_table)); + ipc_table.data = get_ipc(table); +@@ -41,7 +41,7 @@ static int proc_ipc_dointvec(ctl_table *table, int write, + static int proc_ipc_dointvec_minmax(ctl_table *table, int write, + void __user *buffer, size_t *lenp, loff_t *ppos) + { +- struct ctl_table ipc_table; ++ ctl_table_no_const ipc_table; + + memcpy(&ipc_table, table, sizeof(ipc_table)); + ipc_table.data = get_ipc(table); +@@ -65,7 +65,7 @@ static int proc_ipc_dointvec_minmax_orphans(ctl_table *table, int write, + static int proc_ipc_callback_dointvec_minmax(ctl_table *table, int write, + void __user *buffer, size_t *lenp, loff_t *ppos) + { +- struct ctl_table ipc_table; ++ ctl_table_no_const ipc_table; + size_t lenp_bef = *lenp; + int rc; + +@@ -88,7 +88,7 @@ static int proc_ipc_callback_dointvec_minmax(ctl_table *table, int write, + static int proc_ipc_doulongvec_minmax(ctl_table *table, int write, + void __user *buffer, size_t *lenp, loff_t *ppos) + { +- struct ctl_table ipc_table; ++ ctl_table_no_const ipc_table; + memcpy(&ipc_table, table, sizeof(ipc_table)); + ipc_table.data = get_ipc(table); + +@@ -122,7 +122,7 @@ static void ipc_auto_callback(int val) + static int proc_ipcauto_dointvec_minmax(ctl_table *table, int write, + void __user *buffer, size_t *lenp, loff_t *ppos) + { +- struct ctl_table ipc_table; ++ ctl_table_no_const ipc_table; + size_t lenp_bef = *lenp; + int oldval; + int rc; +diff --git a/ipc/mq_sysctl.c b/ipc/mq_sysctl.c +index 5bb8bfe..a38ec05 100644 +--- a/ipc/mq_sysctl.c ++++ b/ipc/mq_sysctl.c +@@ -25,7 +25,7 @@ static void *get_mq(ctl_table *table) + static int proc_mq_dointvec(ctl_table *table, int write, + void __user *buffer, size_t *lenp, loff_t *ppos) + { +- struct ctl_table mq_table; ++ ctl_table_no_const mq_table; + memcpy(&mq_table, table, sizeof(mq_table)); + mq_table.data = get_mq(table); + +@@ -35,7 +35,7 @@ static int proc_mq_dointvec(ctl_table *table, int write, + static int proc_mq_dointvec_minmax(ctl_table *table, int write, + void __user *buffer, size_t *lenp, loff_t *ppos) + { +- struct ctl_table mq_table; ++ ctl_table_no_const mq_table; + memcpy(&mq_table, table, sizeof(mq_table)); + mq_table.data = get_mq(table); + +diff --git a/ipc/mqueue.c b/ipc/mqueue.c +index c3b3117..1efa933 100644 +--- a/ipc/mqueue.c ++++ b/ipc/mqueue.c +@@ -278,6 +278,7 @@ static struct inode *mqueue_get_inode(struct super_block *sb, + mq_bytes = mq_treesize + (info->attr.mq_maxmsg * + info->attr.mq_msgsize); + ++ gr_learn_resource(current, RLIMIT_MSGQUEUE, u->mq_bytes + mq_bytes, 1); + spin_lock(&mq_lock); + if (u->mq_bytes + mq_bytes < u->mq_bytes || + u->mq_bytes + mq_bytes > rlimit(RLIMIT_MSGQUEUE)) { +diff --git a/ipc/msg.c b/ipc/msg.c +index 6498531..b0ff3c8 100644 +--- a/ipc/msg.c ++++ b/ipc/msg.c +@@ -303,18 +303,19 @@ static inline int msg_security(struct kern_ipc_perm *ipcp, int msgflg) + return security_msg_queue_associate(msq, msgflg); + } + ++static struct ipc_ops msg_ops = { ++ .getnew = newque, ++ .associate = msg_security, ++ .more_checks = NULL ++}; ++ + SYSCALL_DEFINE2(msgget, key_t, key, int, msgflg) + { + struct ipc_namespace *ns; +- struct ipc_ops msg_ops; + struct ipc_params msg_params; + + ns = current->nsproxy->ipc_ns; + +- msg_ops.getnew = newque; +- msg_ops.associate = msg_security; +- msg_ops.more_checks = NULL; +- + msg_params.key = key; + msg_params.flg = msgflg; + +diff --git a/ipc/sem.c b/ipc/sem.c +index bee5554..e9af81dd 100644 +--- a/ipc/sem.c ++++ b/ipc/sem.c +@@ -561,10 +561,15 @@ static inline int sem_more_checks(struct kern_ipc_perm *ipcp, + return 0; + } + ++static struct ipc_ops sem_ops = { ++ .getnew = newary, ++ .associate = sem_security, ++ .more_checks = sem_more_checks ++}; ++ + SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg) + { + struct ipc_namespace *ns; +- struct ipc_ops sem_ops; + struct ipc_params sem_params; + + ns = current->nsproxy->ipc_ns; +@@ -572,10 +577,6 @@ SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg) + if (nsems < 0 || nsems > ns->sc_semmsl) + return -EINVAL; + +- sem_ops.getnew = newary; +- sem_ops.associate = sem_security; +- sem_ops.more_checks = sem_more_checks; +- + sem_params.key = key; + sem_params.flg = semflg; + sem_params.u.nsems = nsems; +diff --git a/ipc/shm.c b/ipc/shm.c +index 7645961..afc7f02 100644 +--- a/ipc/shm.c ++++ b/ipc/shm.c +@@ -72,6 +72,14 @@ static void shm_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp); + static int sysvipc_shm_proc_show(struct seq_file *s, void *it); + #endif + ++#ifdef CONFIG_GRKERNSEC ++extern int gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid, ++ const time_t shm_createtime, const kuid_t cuid, ++ const int shmid); ++extern int gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid, ++ const time_t shm_createtime); ++#endif ++ + void shm_init_ns(struct ipc_namespace *ns) + { + ns->shm_ctlmax = SHMMAX; +@@ -553,6 +561,14 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params) + shp->shm_lprid = 0; + shp->shm_atim = shp->shm_dtim = 0; + shp->shm_ctim = get_seconds(); ++#ifdef CONFIG_GRKERNSEC ++ { ++ struct timespec timeval; ++ do_posix_clock_monotonic_gettime(&timeval); ++ ++ shp->shm_createtime = timeval.tv_sec; ++ } ++#endif + shp->shm_segsz = size; + shp->shm_nattch = 0; + shp->shm_file = file; +@@ -606,18 +622,19 @@ static inline int shm_more_checks(struct kern_ipc_perm *ipcp, + return 0; + } + ++static struct ipc_ops shm_ops = { ++ .getnew = newseg, ++ .associate = shm_security, ++ .more_checks = shm_more_checks ++}; ++ + SYSCALL_DEFINE3(shmget, key_t, key, size_t, size, int, shmflg) + { + struct ipc_namespace *ns; +- struct ipc_ops shm_ops; + struct ipc_params shm_params; + + ns = current->nsproxy->ipc_ns; + +- shm_ops.getnew = newseg; +- shm_ops.associate = shm_security; +- shm_ops.more_checks = shm_more_checks; +- + shm_params.key = key; + shm_params.flg = shmflg; + shm_params.u.size = size; +@@ -1088,6 +1105,12 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr, + f_mode = FMODE_READ | FMODE_WRITE; + } + if (shmflg & SHM_EXEC) { ++ ++#ifdef CONFIG_PAX_MPROTECT ++ if (current->mm->pax_flags & MF_PAX_MPROTECT) ++ goto out; ++#endif ++ + prot |= PROT_EXEC; + acc_mode |= S_IXUGO; + } +@@ -1112,6 +1135,15 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr, + if (err) + goto out_unlock; + ++#ifdef CONFIG_GRKERNSEC ++ if (!gr_handle_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime, ++ shp->shm_perm.cuid, shmid) || ++ !gr_chroot_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime)) { ++ err = -EACCES; ++ goto out_unlock; ++ } ++#endif ++ + ipc_lock_object(&shp->shm_perm); + + /* check if shm_destroy() is tearing down shp */ +@@ -1124,6 +1156,9 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr, + path = shp->shm_file->f_path; + path_get(&path); + shp->shm_nattch++; ++#ifdef CONFIG_GRKERNSEC ++ shp->shm_lapid = current->pid; ++#endif + size = i_size_read(path.dentry->d_inode); + ipc_unlock_object(&shp->shm_perm); + rcu_read_unlock(); +diff --git a/ipc/util.c b/ipc/util.c +index e1b4c6d..8174204 100644 +--- a/ipc/util.c ++++ b/ipc/util.c +@@ -71,6 +71,8 @@ struct ipc_proc_iface { + int (*show)(struct seq_file *, void *); + }; + ++extern int gr_ipc_permitted(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp, int requested_mode, int granted_mode); ++ + static void ipc_memory_notifier(struct work_struct *work) + { + ipcns_notify(IPCNS_MEMCHANGED); +@@ -537,6 +539,10 @@ int ipcperms(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp, short flag) + granted_mode >>= 6; + else if (in_group_p(ipcp->cgid) || in_group_p(ipcp->gid)) + granted_mode >>= 3; ++ ++ if (!gr_ipc_permitted(ns, ipcp, requested_mode, granted_mode)) ++ return -1; ++ + /* is there some bit set in requested_mode but not in granted_mode? */ + if ((requested_mode & ~granted_mode & 0007) && + !ns_capable(ns->user_ns, CAP_IPC_OWNER)) +diff --git a/kernel/acct.c b/kernel/acct.c +index 8d6e145..33e0b1e 100644 +--- a/kernel/acct.c ++++ b/kernel/acct.c +@@ -556,7 +556,7 @@ static void do_acct_process(struct bsd_acct_struct *acct, + */ + flim = current->signal->rlim[RLIMIT_FSIZE].rlim_cur; + current->signal->rlim[RLIMIT_FSIZE].rlim_cur = RLIM_INFINITY; +- file->f_op->write(file, (char *)&ac, ++ file->f_op->write(file, (char __force_user *)&ac, + sizeof(acct_t), &file->f_pos); + current->signal->rlim[RLIMIT_FSIZE].rlim_cur = flim; + set_fs(fs); +diff --git a/kernel/audit.c b/kernel/audit.c +index 0c9dc86..a891393 100644 +--- a/kernel/audit.c ++++ b/kernel/audit.c +@@ -122,7 +122,7 @@ u32 audit_sig_sid = 0; + 3) suppressed due to audit_rate_limit + 4) suppressed due to audit_backlog_limit + */ +-static atomic_t audit_lost = ATOMIC_INIT(0); ++static atomic_unchecked_t audit_lost = ATOMIC_INIT(0); + + /* The netlink socket. */ + static struct sock *audit_sock; +@@ -256,7 +256,7 @@ void audit_log_lost(const char *message) + unsigned long now; + int print; + +- atomic_inc(&audit_lost); ++ atomic_inc_unchecked(&audit_lost); + + print = (audit_failure == AUDIT_FAIL_PANIC || !audit_rate_limit); + +@@ -273,7 +273,7 @@ void audit_log_lost(const char *message) + if (print) { + if (printk_ratelimit()) + pr_warn("audit_lost=%u audit_rate_limit=%u audit_backlog_limit=%u\n", +- atomic_read(&audit_lost), ++ atomic_read_unchecked(&audit_lost), + audit_rate_limit, + audit_backlog_limit); + audit_panic(message); +@@ -803,7 +803,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh) + s.pid = audit_pid; + s.rate_limit = audit_rate_limit; + s.backlog_limit = audit_backlog_limit; +- s.lost = atomic_read(&audit_lost); ++ s.lost = atomic_read_unchecked(&audit_lost); + s.backlog = skb_queue_len(&audit_skb_queue); + s.version = AUDIT_VERSION_LATEST; + s.backlog_wait_time = audit_backlog_wait_time; +diff --git a/kernel/auditsc.c b/kernel/auditsc.c +index 619b58d..e58d957 100644 +--- a/kernel/auditsc.c ++++ b/kernel/auditsc.c +@@ -1954,7 +1954,7 @@ int auditsc_get_stamp(struct audit_context *ctx, + } + + /* global counter which is incremented every time something logs in */ +-static atomic_t session_id = ATOMIC_INIT(0); ++static atomic_unchecked_t session_id = ATOMIC_INIT(0); + + static int audit_set_loginuid_perm(kuid_t loginuid) + { +@@ -2023,7 +2023,7 @@ int audit_set_loginuid(kuid_t loginuid) + + /* are we setting or clearing? */ + if (uid_valid(loginuid)) +- sessionid = (unsigned int)atomic_inc_return(&session_id); ++ sessionid = (unsigned int)atomic_inc_return_unchecked(&session_id); + + task->sessionid = sessionid; + task->loginuid = loginuid; +diff --git a/kernel/capability.c b/kernel/capability.c +index 1191a44..7c81292 100644 +--- a/kernel/capability.c ++++ b/kernel/capability.c +@@ -202,6 +202,9 @@ SYSCALL_DEFINE2(capget, cap_user_header_t, header, cap_user_data_t, dataptr) + * before modification is attempted and the application + * fails. + */ ++ if (tocopy > ARRAY_SIZE(kdata)) ++ return -EFAULT; ++ + if (copy_to_user(dataptr, kdata, tocopy + * sizeof(struct __user_cap_data_struct))) { + return -EFAULT; +@@ -303,10 +306,11 @@ bool has_ns_capability(struct task_struct *t, + int ret; + + rcu_read_lock(); +- ret = security_capable(__task_cred(t), ns, cap); ++ ret = security_capable(__task_cred(t), ns, cap) == 0 && ++ gr_task_is_capable(t, __task_cred(t), cap); + rcu_read_unlock(); + +- return (ret == 0); ++ return ret; + } + + /** +@@ -343,10 +347,10 @@ bool has_ns_capability_noaudit(struct task_struct *t, + int ret; + + rcu_read_lock(); +- ret = security_capable_noaudit(__task_cred(t), ns, cap); ++ ret = security_capable_noaudit(__task_cred(t), ns, cap) == 0 && gr_task_is_capable_nolog(t, cap); + rcu_read_unlock(); + +- return (ret == 0); ++ return ret; + } + + /** +@@ -384,7 +388,7 @@ bool ns_capable(struct user_namespace *ns, int cap) + BUG(); + } + +- if (security_capable(current_cred(), ns, cap) == 0) { ++ if (security_capable(current_cred(), ns, cap) == 0 && gr_is_capable(cap)) { + current->flags |= PF_SUPERPRIV; + return true; + } +@@ -392,6 +396,21 @@ bool ns_capable(struct user_namespace *ns, int cap) + } + EXPORT_SYMBOL(ns_capable); + ++bool ns_capable_nolog(struct user_namespace *ns, int cap) ++{ ++ if (unlikely(!cap_valid(cap))) { ++ printk(KERN_CRIT "capable_nolog() called with invalid cap=%u\n", cap); ++ BUG(); ++ } ++ ++ if (security_capable_noaudit(current_cred(), ns, cap) == 0 && gr_is_capable_nolog(cap)) { ++ current->flags |= PF_SUPERPRIV; ++ return true; ++ } ++ return false; ++} ++EXPORT_SYMBOL(ns_capable_nolog); ++ + /** + * file_ns_capable - Determine if the file's opener had a capability in effect + * @file: The file we want to check +@@ -432,6 +451,12 @@ bool capable(int cap) + } + EXPORT_SYMBOL(capable); + ++bool capable_nolog(int cap) ++{ ++ return ns_capable_nolog(&init_user_ns, cap); ++} ++EXPORT_SYMBOL(capable_nolog); ++ + /** + * capable_wrt_inode_uidgid - Check nsown_capable and uid and gid mapped + * @inode: The inode in question +@@ -449,3 +474,12 @@ bool capable_wrt_inode_uidgid(const struct inode *inode, int cap) + kgid_has_mapping(ns, inode->i_gid); + } + EXPORT_SYMBOL(capable_wrt_inode_uidgid); ++ ++bool capable_wrt_inode_uidgid_nolog(const struct inode *inode, int cap) ++{ ++ struct user_namespace *ns = current_user_ns(); ++ ++ return ns_capable_nolog(ns, cap) && kuid_has_mapping(ns, inode->i_uid) && ++ kgid_has_mapping(ns, inode->i_gid); ++} ++EXPORT_SYMBOL(capable_wrt_inode_uidgid_nolog); +diff --git a/kernel/cgroup.c b/kernel/cgroup.c +index 0c753dd..3ce8cca 100644 +--- a/kernel/cgroup.c ++++ b/kernel/cgroup.c +@@ -5190,6 +5190,14 @@ static void cgroup_release_agent(struct work_struct *work) + release_list); + list_del_init(&cgrp->release_list); + raw_spin_unlock(&release_list_lock); ++ ++ /* ++ * don't bother calling call_usermodehelper if we haven't ++ * configured a binary to execute ++ */ ++ if (cgrp->root->release_agent_path[0] == '\0') ++ goto continue_free; ++ + pathbuf = kmalloc(PAGE_SIZE, GFP_KERNEL); + if (!pathbuf) + goto continue_free; +@@ -5372,7 +5380,7 @@ static int cgroup_css_links_read(struct seq_file *seq, void *v) + struct css_set *cset = link->cset; + struct task_struct *task; + int count = 0; +- seq_printf(seq, "css_set %p\n", cset); ++ seq_printf(seq, "css_set %pK\n", cset); + list_for_each_entry(task, &cset->tasks, cg_list) { + if (count++ > MAX_TASKS_SHOWN_PER_CSS) { + seq_puts(seq, " ...\n"); +diff --git a/kernel/compat.c b/kernel/compat.c +index 0a09e48..b46b3d78 100644 +--- a/kernel/compat.c ++++ b/kernel/compat.c +@@ -13,6 +13,7 @@ + + #include <linux/linkage.h> + #include <linux/compat.h> ++#include <linux/module.h> + #include <linux/errno.h> + #include <linux/time.h> + #include <linux/signal.h> +@@ -220,7 +221,7 @@ static long compat_nanosleep_restart(struct restart_block *restart) + mm_segment_t oldfs; + long ret; + +- restart->nanosleep.rmtp = (struct timespec __user *) &rmt; ++ restart->nanosleep.rmtp = (struct timespec __force_user *) &rmt; + oldfs = get_fs(); + set_fs(KERNEL_DS); + ret = hrtimer_nanosleep_restart(restart); +@@ -252,7 +253,7 @@ asmlinkage long compat_sys_nanosleep(struct compat_timespec __user *rqtp, + oldfs = get_fs(); + set_fs(KERNEL_DS); + ret = hrtimer_nanosleep(&tu, +- rmtp ? (struct timespec __user *)&rmt : NULL, ++ rmtp ? (struct timespec __force_user *)&rmt : NULL, + HRTIMER_MODE_REL, CLOCK_MONOTONIC); + set_fs(oldfs); + +@@ -361,7 +362,7 @@ asmlinkage long compat_sys_sigpending(compat_old_sigset_t __user *set) + mm_segment_t old_fs = get_fs(); + + set_fs(KERNEL_DS); +- ret = sys_sigpending((old_sigset_t __user *) &s); ++ ret = sys_sigpending((old_sigset_t __force_user *) &s); + set_fs(old_fs); + if (ret == 0) + ret = put_user(s, set); +@@ -451,7 +452,7 @@ asmlinkage long compat_sys_old_getrlimit(unsigned int resource, + mm_segment_t old_fs = get_fs(); + + set_fs(KERNEL_DS); +- ret = sys_old_getrlimit(resource, &r); ++ ret = sys_old_getrlimit(resource, (struct rlimit __force_user *)&r); + set_fs(old_fs); + + if (!ret) { +@@ -533,8 +534,8 @@ COMPAT_SYSCALL_DEFINE4(wait4, + set_fs (KERNEL_DS); + ret = sys_wait4(pid, + (stat_addr ? +- (unsigned int __user *) &status : NULL), +- options, (struct rusage __user *) &r); ++ (unsigned int __force_user *) &status : NULL), ++ options, (struct rusage __force_user *) &r); + set_fs (old_fs); + + if (ret > 0) { +@@ -560,8 +561,8 @@ COMPAT_SYSCALL_DEFINE5(waitid, + memset(&info, 0, sizeof(info)); + + set_fs(KERNEL_DS); +- ret = sys_waitid(which, pid, (siginfo_t __user *)&info, options, +- uru ? (struct rusage __user *)&ru : NULL); ++ ret = sys_waitid(which, pid, (siginfo_t __force_user *)&info, options, ++ uru ? (struct rusage __force_user *)&ru : NULL); + set_fs(old_fs); + + if ((ret < 0) || (info.si_signo == 0)) +@@ -695,8 +696,8 @@ long compat_sys_timer_settime(timer_t timer_id, int flags, + oldfs = get_fs(); + set_fs(KERNEL_DS); + err = sys_timer_settime(timer_id, flags, +- (struct itimerspec __user *) &newts, +- (struct itimerspec __user *) &oldts); ++ (struct itimerspec __force_user *) &newts, ++ (struct itimerspec __force_user *) &oldts); + set_fs(oldfs); + if (!err && old && put_compat_itimerspec(old, &oldts)) + return -EFAULT; +@@ -713,7 +714,7 @@ long compat_sys_timer_gettime(timer_t timer_id, + oldfs = get_fs(); + set_fs(KERNEL_DS); + err = sys_timer_gettime(timer_id, +- (struct itimerspec __user *) &ts); ++ (struct itimerspec __force_user *) &ts); + set_fs(oldfs); + if (!err && put_compat_itimerspec(setting, &ts)) + return -EFAULT; +@@ -732,7 +733,7 @@ long compat_sys_clock_settime(clockid_t which_clock, + oldfs = get_fs(); + set_fs(KERNEL_DS); + err = sys_clock_settime(which_clock, +- (struct timespec __user *) &ts); ++ (struct timespec __force_user *) &ts); + set_fs(oldfs); + return err; + } +@@ -747,7 +748,7 @@ long compat_sys_clock_gettime(clockid_t which_clock, + oldfs = get_fs(); + set_fs(KERNEL_DS); + err = sys_clock_gettime(which_clock, +- (struct timespec __user *) &ts); ++ (struct timespec __force_user *) &ts); + set_fs(oldfs); + if (!err && put_compat_timespec(&ts, tp)) + return -EFAULT; +@@ -767,7 +768,7 @@ long compat_sys_clock_adjtime(clockid_t which_clock, + + oldfs = get_fs(); + set_fs(KERNEL_DS); +- ret = sys_clock_adjtime(which_clock, (struct timex __user *) &txc); ++ ret = sys_clock_adjtime(which_clock, (struct timex __force_user *) &txc); + set_fs(oldfs); + + err = compat_put_timex(utp, &txc); +@@ -787,7 +788,7 @@ long compat_sys_clock_getres(clockid_t which_clock, + oldfs = get_fs(); + set_fs(KERNEL_DS); + err = sys_clock_getres(which_clock, +- (struct timespec __user *) &ts); ++ (struct timespec __force_user *) &ts); + set_fs(oldfs); + if (!err && tp && put_compat_timespec(&ts, tp)) + return -EFAULT; +@@ -799,9 +800,9 @@ static long compat_clock_nanosleep_restart(struct restart_block *restart) + long err; + mm_segment_t oldfs; + struct timespec tu; +- struct compat_timespec *rmtp = restart->nanosleep.compat_rmtp; ++ struct compat_timespec __user *rmtp = restart->nanosleep.compat_rmtp; + +- restart->nanosleep.rmtp = (struct timespec __user *) &tu; ++ restart->nanosleep.rmtp = (struct timespec __force_user *) &tu; + oldfs = get_fs(); + set_fs(KERNEL_DS); + err = clock_nanosleep_restart(restart); +@@ -833,8 +834,8 @@ long compat_sys_clock_nanosleep(clockid_t which_clock, int flags, + oldfs = get_fs(); + set_fs(KERNEL_DS); + err = sys_clock_nanosleep(which_clock, flags, +- (struct timespec __user *) &in, +- (struct timespec __user *) &out); ++ (struct timespec __force_user *) &in, ++ (struct timespec __force_user *) &out); + set_fs(oldfs); + + if ((err == -ERESTART_RESTARTBLOCK) && rmtp && +@@ -1128,7 +1129,7 @@ COMPAT_SYSCALL_DEFINE2(sched_rr_get_interval, + mm_segment_t old_fs = get_fs(); + + set_fs(KERNEL_DS); +- ret = sys_sched_rr_get_interval(pid, (struct timespec __user *)&t); ++ ret = sys_sched_rr_get_interval(pid, (struct timespec __force_user *)&t); + set_fs(old_fs); + if (put_compat_timespec(&t, interval)) + return -EFAULT; +diff --git a/kernel/configs.c b/kernel/configs.c +index c18b1f1..b9a0132 100644 +--- a/kernel/configs.c ++++ b/kernel/configs.c +@@ -74,8 +74,19 @@ static int __init ikconfig_init(void) + struct proc_dir_entry *entry; + + /* create the current config file */ ++#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM) ++#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_HIDESYM) ++ entry = proc_create("config.gz", S_IFREG | S_IRUSR, NULL, ++ &ikconfig_file_ops); ++#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP) ++ entry = proc_create("config.gz", S_IFREG | S_IRUSR | S_IRGRP, NULL, ++ &ikconfig_file_ops); ++#endif ++#else + entry = proc_create("config.gz", S_IFREG | S_IRUGO, NULL, + &ikconfig_file_ops); ++#endif ++ + if (!entry) + return -ENOMEM; + +diff --git a/kernel/cred.c b/kernel/cred.c +index e0573a4..20fb164 100644 +--- a/kernel/cred.c ++++ b/kernel/cred.c +@@ -164,6 +164,16 @@ void exit_creds(struct task_struct *tsk) + validate_creds(cred); + alter_cred_subscribers(cred, -1); + put_cred(cred); ++ ++#ifdef CONFIG_GRKERNSEC_SETXID ++ cred = (struct cred *) tsk->delayed_cred; ++ if (cred != NULL) { ++ tsk->delayed_cred = NULL; ++ validate_creds(cred); ++ alter_cred_subscribers(cred, -1); ++ put_cred(cred); ++ } ++#endif + } + + /** +@@ -411,7 +421,7 @@ static bool cred_cap_issubset(const struct cred *set, const struct cred *subset) + * Always returns 0 thus allowing this function to be tail-called at the end + * of, say, sys_setgid(). + */ +-int commit_creds(struct cred *new) ++static int __commit_creds(struct cred *new) + { + struct task_struct *task = current; + const struct cred *old = task->real_cred; +@@ -430,6 +440,8 @@ int commit_creds(struct cred *new) + + get_cred(new); /* we will require a ref for the subj creds too */ + ++ gr_set_role_label(task, new->uid, new->gid); ++ + /* dumpability changes */ + if (!uid_eq(old->euid, new->euid) || + !gid_eq(old->egid, new->egid) || +@@ -479,6 +491,108 @@ int commit_creds(struct cred *new) + put_cred(old); + return 0; + } ++#ifdef CONFIG_GRKERNSEC_SETXID ++extern int set_user(struct cred *new); ++ ++void gr_delayed_cred_worker(void) ++{ ++ const struct cred *new = current->delayed_cred; ++ struct cred *ncred; ++ ++ current->delayed_cred = NULL; ++ ++ if (!uid_eq(current_uid(), GLOBAL_ROOT_UID) && new != NULL) { ++ // from doing get_cred on it when queueing this ++ put_cred(new); ++ return; ++ } else if (new == NULL) ++ return; ++ ++ ncred = prepare_creds(); ++ if (!ncred) ++ goto die; ++ // uids ++ ncred->uid = new->uid; ++ ncred->euid = new->euid; ++ ncred->suid = new->suid; ++ ncred->fsuid = new->fsuid; ++ // gids ++ ncred->gid = new->gid; ++ ncred->egid = new->egid; ++ ncred->sgid = new->sgid; ++ ncred->fsgid = new->fsgid; ++ // groups ++ if (set_groups(ncred, new->group_info) < 0) { ++ abort_creds(ncred); ++ goto die; ++ } ++ // caps ++ ncred->securebits = new->securebits; ++ ncred->cap_inheritable = new->cap_inheritable; ++ ncred->cap_permitted = new->cap_permitted; ++ ncred->cap_effective = new->cap_effective; ++ ncred->cap_bset = new->cap_bset; ++ ++ if (set_user(ncred)) { ++ abort_creds(ncred); ++ goto die; ++ } ++ ++ // from doing get_cred on it when queueing this ++ put_cred(new); ++ ++ __commit_creds(ncred); ++ return; ++die: ++ // from doing get_cred on it when queueing this ++ put_cred(new); ++ do_group_exit(SIGKILL); ++} ++#endif ++ ++int commit_creds(struct cred *new) ++{ ++#ifdef CONFIG_GRKERNSEC_SETXID ++ int ret; ++ int schedule_it = 0; ++ struct task_struct *t; ++ unsigned oldsecurebits = current_cred()->securebits; ++ ++ /* we won't get called with tasklist_lock held for writing ++ and interrupts disabled as the cred struct in that case is ++ init_cred ++ */ ++ if (grsec_enable_setxid && !current_is_single_threaded() && ++ uid_eq(current_uid(), GLOBAL_ROOT_UID) && ++ !uid_eq(new->uid, GLOBAL_ROOT_UID)) { ++ schedule_it = 1; ++ } ++ ret = __commit_creds(new); ++ if (schedule_it) { ++ rcu_read_lock(); ++ read_lock(&tasklist_lock); ++ for (t = next_thread(current); t != current; ++ t = next_thread(t)) { ++ /* we'll check if the thread has uid 0 in ++ * the delayed worker routine ++ */ ++ if (task_securebits(t) == oldsecurebits && ++ t->delayed_cred == NULL) { ++ t->delayed_cred = get_cred(new); ++ set_tsk_thread_flag(t, TIF_GRSEC_SETXID); ++ set_tsk_need_resched(t); ++ } ++ } ++ read_unlock(&tasklist_lock); ++ rcu_read_unlock(); ++ } ++ ++ return ret; ++#else ++ return __commit_creds(new); ++#endif ++} ++ + EXPORT_SYMBOL(commit_creds); + + /** +diff --git a/kernel/debug/debug_core.c b/kernel/debug/debug_core.c +index 334b398..9145fb1 100644 +--- a/kernel/debug/debug_core.c ++++ b/kernel/debug/debug_core.c +@@ -123,7 +123,7 @@ static DEFINE_RAW_SPINLOCK(dbg_slave_lock); + */ + static atomic_t masters_in_kgdb; + static atomic_t slaves_in_kgdb; +-static atomic_t kgdb_break_tasklet_var; ++static atomic_unchecked_t kgdb_break_tasklet_var; + atomic_t kgdb_setting_breakpoint; + + struct task_struct *kgdb_usethread; +@@ -133,7 +133,7 @@ int kgdb_single_step; + static pid_t kgdb_sstep_pid; + + /* to keep track of the CPU which is doing the single stepping*/ +-atomic_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1); ++atomic_unchecked_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1); + + /* + * If you are debugging a problem where roundup (the collection of +@@ -541,7 +541,7 @@ return_normal: + * kernel will only try for the value of sstep_tries before + * giving up and continuing on. + */ +- if (atomic_read(&kgdb_cpu_doing_single_step) != -1 && ++ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1 && + (kgdb_info[cpu].task && + kgdb_info[cpu].task->pid != kgdb_sstep_pid) && --sstep_tries) { + atomic_set(&kgdb_active, -1); +@@ -639,8 +639,8 @@ cpu_master_loop: + } + + kgdb_restore: +- if (atomic_read(&kgdb_cpu_doing_single_step) != -1) { +- int sstep_cpu = atomic_read(&kgdb_cpu_doing_single_step); ++ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1) { ++ int sstep_cpu = atomic_read_unchecked(&kgdb_cpu_doing_single_step); + if (kgdb_info[sstep_cpu].task) + kgdb_sstep_pid = kgdb_info[sstep_cpu].task->pid; + else +@@ -917,18 +917,18 @@ static void kgdb_unregister_callbacks(void) + static void kgdb_tasklet_bpt(unsigned long ing) + { + kgdb_breakpoint(); +- atomic_set(&kgdb_break_tasklet_var, 0); ++ atomic_set_unchecked(&kgdb_break_tasklet_var, 0); + } + + static DECLARE_TASKLET(kgdb_tasklet_breakpoint, kgdb_tasklet_bpt, 0); + + void kgdb_schedule_breakpoint(void) + { +- if (atomic_read(&kgdb_break_tasklet_var) || ++ if (atomic_read_unchecked(&kgdb_break_tasklet_var) || + atomic_read(&kgdb_active) != -1 || + atomic_read(&kgdb_setting_breakpoint)) + return; +- atomic_inc(&kgdb_break_tasklet_var); ++ atomic_inc_unchecked(&kgdb_break_tasklet_var); + tasklet_schedule(&kgdb_tasklet_breakpoint); + } + EXPORT_SYMBOL_GPL(kgdb_schedule_breakpoint); +diff --git a/kernel/debug/kdb/kdb_main.c b/kernel/debug/kdb/kdb_main.c +index 0b097c8..11dd5c5 100644 +--- a/kernel/debug/kdb/kdb_main.c ++++ b/kernel/debug/kdb/kdb_main.c +@@ -1977,7 +1977,7 @@ static int kdb_lsmod(int argc, const char **argv) + continue; + + kdb_printf("%-20s%8u 0x%p ", mod->name, +- mod->core_size, (void *)mod); ++ mod->core_size_rx + mod->core_size_rw, (void *)mod); + #ifdef CONFIG_MODULE_UNLOAD + kdb_printf("%4ld ", module_refcount(mod)); + #endif +@@ -1987,7 +1987,7 @@ static int kdb_lsmod(int argc, const char **argv) + kdb_printf(" (Loading)"); + else + kdb_printf(" (Live)"); +- kdb_printf(" 0x%p", mod->module_core); ++ kdb_printf(" 0x%p 0x%p", mod->module_core_rx, mod->module_core_rw); + + #ifdef CONFIG_MODULE_UNLOAD + { +diff --git a/kernel/events/core.c b/kernel/events/core.c +index f774e93..c602612 100644 +--- a/kernel/events/core.c ++++ b/kernel/events/core.c +@@ -158,8 +158,15 @@ static struct srcu_struct pmus_srcu; + * 0 - disallow raw tracepoint access for unpriv + * 1 - disallow cpu events for unpriv + * 2 - disallow kernel profiling for unpriv ++ * 3 - disallow all unpriv perf event use + */ +-int sysctl_perf_event_paranoid __read_mostly = 1; ++#ifdef CONFIG_GRKERNSEC_PERF_HARDEN ++int sysctl_perf_event_legitimately_concerned __read_mostly = 3; ++#elif defined(CONFIG_GRKERNSEC_HIDESYM) ++int sysctl_perf_event_legitimately_concerned __read_mostly = 2; ++#else ++int sysctl_perf_event_legitimately_concerned __read_mostly = 1; ++#endif + + /* Minimum for 512 kiB + 1 user control page */ + int sysctl_perf_event_mlock __read_mostly = 512 + (PAGE_SIZE / 1024); /* 'free' kiB per user */ +@@ -185,7 +192,7 @@ void update_perf_cpu_limits(void) + + tmp *= sysctl_perf_cpu_time_max_percent; + do_div(tmp, 100); +- ACCESS_ONCE(perf_sample_allowed_ns) = tmp; ++ ACCESS_ONCE_RW(perf_sample_allowed_ns) = tmp; + } + + static int perf_rotate_context(struct perf_cpu_context *cpuctx); +@@ -272,7 +279,7 @@ void perf_sample_event_took(u64 sample_len_ns) + update_perf_cpu_limits(); + } + +-static atomic64_t perf_event_id; ++static atomic64_unchecked_t perf_event_id; + + static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx, + enum event_type_t event_type); +@@ -3000,7 +3007,7 @@ static void __perf_event_read(void *info) + + static inline u64 perf_event_count(struct perf_event *event) + { +- return local64_read(&event->count) + atomic64_read(&event->child_count); ++ return local64_read(&event->count) + atomic64_read_unchecked(&event->child_count); + } + + static u64 perf_event_read(struct perf_event *event) +@@ -3365,9 +3372,9 @@ u64 perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running) + mutex_lock(&event->child_mutex); + total += perf_event_read(event); + *enabled += event->total_time_enabled + +- atomic64_read(&event->child_total_time_enabled); ++ atomic64_read_unchecked(&event->child_total_time_enabled); + *running += event->total_time_running + +- atomic64_read(&event->child_total_time_running); ++ atomic64_read_unchecked(&event->child_total_time_running); + + list_for_each_entry(child, &event->child_list, child_list) { + total += perf_event_read(child); +@@ -3796,10 +3803,10 @@ void perf_event_update_userpage(struct perf_event *event) + userpg->offset -= local64_read(&event->hw.prev_count); + + userpg->time_enabled = enabled + +- atomic64_read(&event->child_total_time_enabled); ++ atomic64_read_unchecked(&event->child_total_time_enabled); + + userpg->time_running = running + +- atomic64_read(&event->child_total_time_running); ++ atomic64_read_unchecked(&event->child_total_time_running); + + arch_perf_update_userpage(userpg, now); + +@@ -4350,7 +4357,7 @@ perf_output_sample_ustack(struct perf_output_handle *handle, u64 dump_size, + + /* Data. */ + sp = perf_user_stack_pointer(regs); +- rem = __output_copy_user(handle, (void *) sp, dump_size); ++ rem = __output_copy_user(handle, (void __user *) sp, dump_size); + dyn_size = dump_size - rem; + + perf_output_skip(handle, rem); +@@ -4441,11 +4448,11 @@ static void perf_output_read_one(struct perf_output_handle *handle, + values[n++] = perf_event_count(event); + if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) { + values[n++] = enabled + +- atomic64_read(&event->child_total_time_enabled); ++ atomic64_read_unchecked(&event->child_total_time_enabled); + } + if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) { + values[n++] = running + +- atomic64_read(&event->child_total_time_running); ++ atomic64_read_unchecked(&event->child_total_time_running); + } + if (read_format & PERF_FORMAT_ID) + values[n++] = primary_event_id(event); +@@ -6724,7 +6731,7 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu, + event->parent = parent_event; + + event->ns = get_pid_ns(task_active_pid_ns(current)); +- event->id = atomic64_inc_return(&perf_event_id); ++ event->id = atomic64_inc_return_unchecked(&perf_event_id); + + event->state = PERF_EVENT_STATE_INACTIVE; + +@@ -7024,6 +7031,11 @@ SYSCALL_DEFINE5(perf_event_open, + if (flags & ~PERF_FLAG_ALL) + return -EINVAL; + ++#ifdef CONFIG_GRKERNSEC_PERF_HARDEN ++ if (perf_paranoid_any() && !capable(CAP_SYS_ADMIN)) ++ return -EACCES; ++#endif ++ + err = perf_copy_attr(attr_uptr, &attr); + if (err) + return err; +@@ -7362,10 +7374,10 @@ static void sync_child_event(struct perf_event *child_event, + /* + * Add back the child's count to the parent's count: + */ +- atomic64_add(child_val, &parent_event->child_count); +- atomic64_add(child_event->total_time_enabled, ++ atomic64_add_unchecked(child_val, &parent_event->child_count); ++ atomic64_add_unchecked(child_event->total_time_enabled, + &parent_event->child_total_time_enabled); +- atomic64_add(child_event->total_time_running, ++ atomic64_add_unchecked(child_event->total_time_running, + &parent_event->child_total_time_running); + + /* +diff --git a/kernel/events/internal.h b/kernel/events/internal.h +index 569b2187..19940d9 100644 +--- a/kernel/events/internal.h ++++ b/kernel/events/internal.h +@@ -81,10 +81,10 @@ static inline unsigned long perf_data_size(struct ring_buffer *rb) + return rb->nr_pages << (PAGE_SHIFT + page_order(rb)); + } + +-#define DEFINE_OUTPUT_COPY(func_name, memcpy_func) \ ++#define DEFINE_OUTPUT_COPY(func_name, memcpy_func, user) \ + static inline unsigned long \ + func_name(struct perf_output_handle *handle, \ +- const void *buf, unsigned long len) \ ++ const void user *buf, unsigned long len) \ + { \ + unsigned long size, written; \ + \ +@@ -117,7 +117,7 @@ memcpy_common(void *dst, const void *src, unsigned long n) + return 0; + } + +-DEFINE_OUTPUT_COPY(__output_copy, memcpy_common) ++DEFINE_OUTPUT_COPY(__output_copy, memcpy_common, ) + + static inline unsigned long + memcpy_skip(void *dst, const void *src, unsigned long n) +@@ -125,7 +125,7 @@ memcpy_skip(void *dst, const void *src, unsigned long n) + return 0; + } + +-DEFINE_OUTPUT_COPY(__output_skip, memcpy_skip) ++DEFINE_OUTPUT_COPY(__output_skip, memcpy_skip, ) + + #ifndef arch_perf_out_copy_user + #define arch_perf_out_copy_user arch_perf_out_copy_user +@@ -143,7 +143,7 @@ arch_perf_out_copy_user(void *dst, const void *src, unsigned long n) + } + #endif + +-DEFINE_OUTPUT_COPY(__output_copy_user, arch_perf_out_copy_user) ++DEFINE_OUTPUT_COPY(__output_copy_user, arch_perf_out_copy_user, __user) + + /* Callchain handling */ + extern struct perf_callchain_entry * +diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c +index 307d87c..6466cbe 100644 +--- a/kernel/events/uprobes.c ++++ b/kernel/events/uprobes.c +@@ -1666,7 +1666,7 @@ static int is_trap_at_addr(struct mm_struct *mm, unsigned long vaddr) + { + struct page *page; + uprobe_opcode_t opcode; +- int result; ++ long result; + + pagefault_disable(); + result = __copy_from_user_inatomic(&opcode, (void __user*)vaddr, +diff --git a/kernel/exit.c b/kernel/exit.c +index 81b3d67..ef189a4 100644 +--- a/kernel/exit.c ++++ b/kernel/exit.c +@@ -173,6 +173,10 @@ void release_task(struct task_struct * p) + struct task_struct *leader; + int zap_leader; + repeat: ++#ifdef CONFIG_NET ++ gr_del_task_from_ip_table(p); ++#endif ++ + /* don't need to get the RCU readlock here - the process is dead and + * can't be modifying its own credentials. But shut RCU-lockdep up */ + rcu_read_lock(); +@@ -330,7 +334,7 @@ int allow_signal(int sig) + * know it'll be handled, so that they don't get converted to + * SIGKILL or just silently dropped. + */ +- current->sighand->action[(sig)-1].sa.sa_handler = (void __user *)2; ++ current->sighand->action[(sig)-1].sa.sa_handler = (__force void __user *)2; + recalc_sigpending(); + spin_unlock_irq(¤t->sighand->siglock); + return 0; +@@ -706,6 +710,8 @@ void do_exit(long code) + struct task_struct *tsk = current; + int group_dead; + ++ set_fs(USER_DS); ++ + profile_task_exit(tsk); + + WARN_ON(blk_needs_flush_plug(tsk)); +@@ -722,7 +728,6 @@ void do_exit(long code) + * mm_release()->clear_child_tid() from writing to a user-controlled + * kernel address. + */ +- set_fs(USER_DS); + + ptrace_event(PTRACE_EVENT_EXIT, code); + +@@ -781,6 +786,9 @@ void do_exit(long code) + tsk->exit_code = code; + taskstats_exit(tsk, group_dead); + ++ gr_acl_handle_psacct(tsk, code); ++ gr_acl_handle_exit(); ++ + exit_mm(tsk); + + if (group_dead) +@@ -900,7 +908,7 @@ SYSCALL_DEFINE1(exit, int, error_code) + * Take down every thread in the group. This is called by fatal signals + * as well as by sys_exit_group (below). + */ +-void ++__noreturn void + do_group_exit(int exit_code) + { + struct signal_struct *sig = current->signal; +diff --git a/kernel/fork.c b/kernel/fork.c +index c44bff8..a3c5876 100644 +--- a/kernel/fork.c ++++ b/kernel/fork.c +@@ -180,6 +180,48 @@ void thread_info_cache_init(void) + # endif + #endif + ++#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW ++static inline struct thread_info *gr_alloc_thread_info_node(struct task_struct *tsk, ++ int node, void **lowmem_stack) ++{ ++ struct page *pages[THREAD_SIZE / PAGE_SIZE]; ++ void *ret = NULL; ++ unsigned int i; ++ ++ *lowmem_stack = alloc_thread_info_node(tsk, node); ++ if (*lowmem_stack == NULL) ++ goto out; ++ ++ for (i = 0; i < THREAD_SIZE / PAGE_SIZE; i++) ++ pages[i] = virt_to_page(*lowmem_stack + (i * PAGE_SIZE)); ++ ++ /* use VM_IOREMAP to gain THREAD_SIZE alignment */ ++ ret = vmap(pages, THREAD_SIZE / PAGE_SIZE, VM_IOREMAP, PAGE_KERNEL); ++ if (ret == NULL) { ++ free_thread_info(*lowmem_stack); ++ *lowmem_stack = NULL; ++ } ++ ++out: ++ return ret; ++} ++ ++static inline void gr_free_thread_info(struct task_struct *tsk, struct thread_info *ti) ++{ ++ unmap_process_stacks(tsk); ++} ++#else ++static inline struct thread_info *gr_alloc_thread_info_node(struct task_struct *tsk, ++ int node, void **lowmem_stack) ++{ ++ return alloc_thread_info_node(tsk, node); ++} ++static inline void gr_free_thread_info(struct task_struct *tsk, struct thread_info *ti) ++{ ++ free_thread_info(ti); ++} ++#endif ++ + /* SLAB cache for signal_struct structures (tsk->signal) */ + static struct kmem_cache *signal_cachep; + +@@ -198,18 +240,22 @@ struct kmem_cache *vm_area_cachep; + /* SLAB cache for mm_struct structures (tsk->mm) */ + static struct kmem_cache *mm_cachep; + +-static void account_kernel_stack(struct thread_info *ti, int account) ++static void account_kernel_stack(struct task_struct *tsk, struct thread_info *ti, int account) + { ++#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW ++ struct zone *zone = page_zone(virt_to_page(tsk->lowmem_stack)); ++#else + struct zone *zone = page_zone(virt_to_page(ti)); ++#endif + + mod_zone_page_state(zone, NR_KERNEL_STACK, account); + } + + void free_task(struct task_struct *tsk) + { +- account_kernel_stack(tsk->stack, -1); ++ account_kernel_stack(tsk, tsk->stack, -1); + arch_release_thread_info(tsk->stack); +- free_thread_info(tsk->stack); ++ gr_free_thread_info(tsk, tsk->stack); + rt_mutex_debug_task_free(tsk); + ftrace_graph_exit_task(tsk); + put_seccomp_filter(tsk); +@@ -295,6 +341,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig) + struct task_struct *tsk; + struct thread_info *ti; + unsigned long *stackend; ++ void *lowmem_stack; + int node = tsk_fork_get_node(orig); + int err; + +@@ -302,7 +349,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig) + if (!tsk) + return NULL; + +- ti = alloc_thread_info_node(tsk, node); ++ ti = gr_alloc_thread_info_node(tsk, node, &lowmem_stack); + if (!ti) + goto free_tsk; + +@@ -311,6 +358,9 @@ static struct task_struct *dup_task_struct(struct task_struct *orig) + goto free_ti; + + tsk->stack = ti; ++#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW ++ tsk->lowmem_stack = lowmem_stack; ++#endif + + setup_thread_stack(tsk, orig); + clear_user_return_notifier(tsk); +@@ -319,7 +369,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig) + *stackend = STACK_END_MAGIC; /* for overflow detection */ + + #ifdef CONFIG_CC_STACKPROTECTOR +- tsk->stack_canary = get_random_int(); ++ tsk->stack_canary = pax_get_random_long(); + #endif + + /* +@@ -333,24 +383,92 @@ static struct task_struct *dup_task_struct(struct task_struct *orig) + tsk->splice_pipe = NULL; + tsk->task_frag.page = NULL; + +- account_kernel_stack(ti, 1); ++ account_kernel_stack(tsk, ti, 1); + + return tsk; + + free_ti: +- free_thread_info(ti); ++ gr_free_thread_info(tsk, ti); + free_tsk: + free_task_struct(tsk); + return NULL; + } + + #ifdef CONFIG_MMU +-static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm) ++static struct vm_area_struct *dup_vma(struct mm_struct *mm, struct mm_struct *oldmm, struct vm_area_struct *mpnt) ++{ ++ struct vm_area_struct *tmp; ++ unsigned long charge; ++ struct file *file; ++ int retval; ++ ++ charge = 0; ++ if (mpnt->vm_flags & VM_ACCOUNT) { ++ unsigned long len = vma_pages(mpnt); ++ ++ if (security_vm_enough_memory_mm(oldmm, len)) /* sic */ ++ goto fail_nomem; ++ charge = len; ++ } ++ tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL); ++ if (!tmp) ++ goto fail_nomem; ++ *tmp = *mpnt; ++ tmp->vm_mm = mm; ++ INIT_LIST_HEAD(&tmp->anon_vma_chain); ++ retval = vma_dup_policy(mpnt, tmp); ++ if (retval) ++ goto fail_nomem_policy; ++ if (anon_vma_fork(tmp, mpnt)) ++ goto fail_nomem_anon_vma_fork; ++ tmp->vm_flags &= ~VM_LOCKED; ++ tmp->vm_next = tmp->vm_prev = NULL; ++ tmp->vm_mirror = NULL; ++ file = tmp->vm_file; ++ if (file) { ++ struct inode *inode = file_inode(file); ++ struct address_space *mapping = file->f_mapping; ++ ++ get_file(file); ++ if (tmp->vm_flags & VM_DENYWRITE) ++ atomic_dec(&inode->i_writecount); ++ mutex_lock(&mapping->i_mmap_mutex); ++ if (tmp->vm_flags & VM_SHARED) ++ mapping->i_mmap_writable++; ++ flush_dcache_mmap_lock(mapping); ++ /* insert tmp into the share list, just after mpnt */ ++ if (unlikely(tmp->vm_flags & VM_NONLINEAR)) ++ vma_nonlinear_insert(tmp, &mapping->i_mmap_nonlinear); ++ else ++ vma_interval_tree_insert_after(tmp, mpnt, &mapping->i_mmap); ++ flush_dcache_mmap_unlock(mapping); ++ mutex_unlock(&mapping->i_mmap_mutex); ++ } ++ ++ /* ++ * Clear hugetlb-related page reserves for children. This only ++ * affects MAP_PRIVATE mappings. Faults generated by the child ++ * are not guaranteed to succeed, even if read-only ++ */ ++ if (is_vm_hugetlb_page(tmp)) ++ reset_vma_resv_huge_pages(tmp); ++ ++ return tmp; ++ ++fail_nomem_anon_vma_fork: ++ mpol_put(vma_policy(tmp)); ++fail_nomem_policy: ++ kmem_cache_free(vm_area_cachep, tmp); ++fail_nomem: ++ vm_unacct_memory(charge); ++ return NULL; ++} ++ ++static __latent_entropy int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm) + { + struct vm_area_struct *mpnt, *tmp, *prev, **pprev; + struct rb_node **rb_link, *rb_parent; + int retval; +- unsigned long charge; + + uprobe_start_dup_mmap(); + down_write(&oldmm->mmap_sem); +@@ -379,55 +497,15 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm) + + prev = NULL; + for (mpnt = oldmm->mmap; mpnt; mpnt = mpnt->vm_next) { +- struct file *file; +- + if (mpnt->vm_flags & VM_DONTCOPY) { + vm_stat_account(mm, mpnt->vm_flags, mpnt->vm_file, + -vma_pages(mpnt)); + continue; + } +- charge = 0; +- if (mpnt->vm_flags & VM_ACCOUNT) { +- unsigned long len = vma_pages(mpnt); +- +- if (security_vm_enough_memory_mm(oldmm, len)) /* sic */ +- goto fail_nomem; +- charge = len; +- } +- tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL); +- if (!tmp) +- goto fail_nomem; +- *tmp = *mpnt; +- INIT_LIST_HEAD(&tmp->anon_vma_chain); +- retval = vma_dup_policy(mpnt, tmp); +- if (retval) +- goto fail_nomem_policy; +- tmp->vm_mm = mm; +- if (anon_vma_fork(tmp, mpnt)) +- goto fail_nomem_anon_vma_fork; +- tmp->vm_flags &= ~VM_LOCKED; +- tmp->vm_next = tmp->vm_prev = NULL; +- file = tmp->vm_file; +- if (file) { +- struct inode *inode = file_inode(file); +- struct address_space *mapping = file->f_mapping; +- +- get_file(file); +- if (tmp->vm_flags & VM_DENYWRITE) +- atomic_dec(&inode->i_writecount); +- mutex_lock(&mapping->i_mmap_mutex); +- if (tmp->vm_flags & VM_SHARED) +- mapping->i_mmap_writable++; +- flush_dcache_mmap_lock(mapping); +- /* insert tmp into the share list, just after mpnt */ +- if (unlikely(tmp->vm_flags & VM_NONLINEAR)) +- vma_nonlinear_insert(tmp, +- &mapping->i_mmap_nonlinear); +- else +- vma_interval_tree_insert_after(tmp, mpnt, +- &mapping->i_mmap); +- flush_dcache_mmap_unlock(mapping); +- mutex_unlock(&mapping->i_mmap_mutex); ++ tmp = dup_vma(mm, oldmm, mpnt); ++ if (!tmp) { ++ retval = -ENOMEM; ++ goto out; + } + + /* +@@ -459,6 +537,31 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm) + if (retval) + goto out; + } ++ ++#ifdef CONFIG_PAX_SEGMEXEC ++ if (oldmm->pax_flags & MF_PAX_SEGMEXEC) { ++ struct vm_area_struct *mpnt_m; ++ ++ for (mpnt = oldmm->mmap, mpnt_m = mm->mmap; mpnt; mpnt = mpnt->vm_next, mpnt_m = mpnt_m->vm_next) { ++ BUG_ON(!mpnt_m || mpnt_m->vm_mirror || mpnt->vm_mm != oldmm || mpnt_m->vm_mm != mm); ++ ++ if (!mpnt->vm_mirror) ++ continue; ++ ++ if (mpnt->vm_end <= SEGMEXEC_TASK_SIZE) { ++ BUG_ON(mpnt->vm_mirror->vm_mirror != mpnt); ++ mpnt->vm_mirror = mpnt_m; ++ } else { ++ BUG_ON(mpnt->vm_mirror->vm_mirror == mpnt || mpnt->vm_mirror->vm_mirror->vm_mm != mm); ++ mpnt_m->vm_mirror = mpnt->vm_mirror->vm_mirror; ++ mpnt_m->vm_mirror->vm_mirror = mpnt_m; ++ mpnt->vm_mirror->vm_mirror = mpnt; ++ } ++ } ++ BUG_ON(mpnt_m); ++ } ++#endif ++ + /* a new mm has just been created */ + arch_dup_mmap(oldmm, mm); + retval = 0; +@@ -468,14 +571,6 @@ out: + up_write(&oldmm->mmap_sem); + uprobe_end_dup_mmap(); + return retval; +-fail_nomem_anon_vma_fork: +- mpol_put(vma_policy(tmp)); +-fail_nomem_policy: +- kmem_cache_free(vm_area_cachep, tmp); +-fail_nomem: +- retval = -ENOMEM; +- vm_unacct_memory(charge); +- goto out; + } + + static inline int mm_alloc_pgd(struct mm_struct *mm) +@@ -689,8 +784,8 @@ struct mm_struct *mm_access(struct task_struct *task, unsigned int mode) + return ERR_PTR(err); + + mm = get_task_mm(task); +- if (mm && mm != current->mm && +- !ptrace_may_access(task, mode)) { ++ if (mm && ((mm != current->mm && !ptrace_may_access(task, mode)) || ++ (mode == PTRACE_MODE_ATTACH && (gr_handle_proc_ptrace(task) || gr_acl_handle_procpidmem(task))))) { + mmput(mm); + mm = ERR_PTR(-EACCES); + } +@@ -906,13 +1001,20 @@ static int copy_fs(unsigned long clone_flags, struct task_struct *tsk) + spin_unlock(&fs->lock); + return -EAGAIN; + } +- fs->users++; ++ atomic_inc(&fs->users); + spin_unlock(&fs->lock); + return 0; + } + tsk->fs = copy_fs_struct(fs); + if (!tsk->fs) + return -ENOMEM; ++ /* Carry through gr_chroot_dentry and is_chrooted instead ++ of recomputing it here. Already copied when the task struct ++ is duplicated. This allows pivot_root to not be treated as ++ a chroot ++ */ ++ //gr_set_chroot_entries(tsk, &tsk->fs->root); ++ + return 0; + } + +@@ -1130,7 +1232,7 @@ init_task_pid(struct task_struct *task, enum pid_type type, struct pid *pid) + * parts of the process environment (as per the clone + * flags). The actual kick-off is left to the caller. + */ +-static struct task_struct *copy_process(unsigned long clone_flags, ++static __latent_entropy struct task_struct *copy_process(unsigned long clone_flags, + unsigned long stack_start, + unsigned long stack_size, + int __user *child_tidptr, +@@ -1202,6 +1304,9 @@ static struct task_struct *copy_process(unsigned long clone_flags, + DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled); + #endif + retval = -EAGAIN; ++ ++ gr_learn_resource(p, RLIMIT_NPROC, atomic_read(&p->real_cred->user->processes), 0); ++ + if (atomic_read(&p->real_cred->user->processes) >= + task_rlimit(p, RLIMIT_NPROC)) { + if (p->real_cred->user != INIT_USER && +@@ -1449,6 +1554,11 @@ static struct task_struct *copy_process(unsigned long clone_flags, + goto bad_fork_free_pid; + } + ++ /* synchronizes with gr_set_acls() ++ we need to call this past the point of no return for fork() ++ */ ++ gr_copy_label(p); ++ + if (likely(p->pid)) { + ptrace_init_task(p, (clone_flags & CLONE_PTRACE) || trace); + +@@ -1539,6 +1649,8 @@ bad_fork_cleanup_count: + bad_fork_free: + free_task(p); + fork_out: ++ gr_log_forkfail(retval); ++ + return ERR_PTR(retval); + } + +@@ -1600,6 +1712,7 @@ long do_fork(unsigned long clone_flags, + + p = copy_process(clone_flags, stack_start, stack_size, + child_tidptr, NULL, trace); ++ add_latent_entropy(); + /* + * Do this prior waking up the new thread - the thread pointer + * might get invalid after that point, if the thread exits quickly. +@@ -1616,6 +1729,8 @@ long do_fork(unsigned long clone_flags, + if (clone_flags & CLONE_PARENT_SETTID) + put_user(nr, parent_tidptr); + ++ gr_handle_brute_check(); ++ + if (clone_flags & CLONE_VFORK) { + p->vfork_done = &vfork; + init_completion(&vfork); +@@ -1734,7 +1849,7 @@ void __init proc_caches_init(void) + mm_cachep = kmem_cache_create("mm_struct", + sizeof(struct mm_struct), ARCH_MIN_MMSTRUCT_ALIGN, + SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_NOTRACK, NULL); +- vm_area_cachep = KMEM_CACHE(vm_area_struct, SLAB_PANIC); ++ vm_area_cachep = KMEM_CACHE(vm_area_struct, SLAB_PANIC | SLAB_NO_SANITIZE); + mmap_init(); + nsproxy_cache_init(); + } +@@ -1774,7 +1889,7 @@ static int unshare_fs(unsigned long unshare_flags, struct fs_struct **new_fsp) + return 0; + + /* don't need lock here; in the worst case we'll do useless copy */ +- if (fs->users == 1) ++ if (atomic_read(&fs->users) == 1) + return 0; + + *new_fsp = copy_fs_struct(fs); +@@ -1881,7 +1996,8 @@ SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags) + fs = current->fs; + spin_lock(&fs->lock); + current->fs = new_fs; +- if (--fs->users) ++ gr_set_chroot_entries(current, ¤t->fs->root); ++ if (atomic_dec_return(&fs->users)) + new_fs = NULL; + else + new_fs = fs; +diff --git a/kernel/futex.c b/kernel/futex.c +index e3087af..8e3b90f 100644 +--- a/kernel/futex.c ++++ b/kernel/futex.c +@@ -54,6 +54,7 @@ + #include <linux/mount.h> + #include <linux/pagemap.h> + #include <linux/syscalls.h> ++#include <linux/ptrace.h> + #include <linux/signal.h> + #include <linux/export.h> + #include <linux/magic.h> +@@ -188,7 +189,7 @@ struct futex_pi_state { + atomic_t refcount; + + union futex_key key; +-}; ++} __randomize_layout; + + /** + * struct futex_q - The hashed futex queue entry, one per waiting task +@@ -222,7 +223,7 @@ struct futex_q { + struct rt_mutex_waiter *rt_waiter; + union futex_key *requeue_pi_key; + u32 bitset; +-}; ++} __randomize_layout; + + static const struct futex_q futex_q_init = { + /* list gets initialized in queue_me()*/ +@@ -380,6 +381,11 @@ get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key, int rw) + struct page *page, *page_head; + int err, ro = 0; + ++#ifdef CONFIG_PAX_SEGMEXEC ++ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && address >= SEGMEXEC_TASK_SIZE) ++ return -EFAULT; ++#endif ++ + /* + * The futex address must be "naturally" aligned. + */ +@@ -579,7 +585,7 @@ static int cmpxchg_futex_value_locked(u32 *curval, u32 __user *uaddr, + + static int get_futex_value_locked(u32 *dest, u32 __user *from) + { +- int ret; ++ unsigned long ret; + + pagefault_disable(); + ret = __copy_from_user_inatomic(dest, from, sizeof(u32)); +@@ -3019,6 +3025,7 @@ static void __init futex_detect_cmpxchg(void) + { + #ifndef CONFIG_HAVE_FUTEX_CMPXCHG + u32 curval; ++ mm_segment_t oldfs; + + /* + * This will fail and we want it. Some arch implementations do +@@ -3030,8 +3037,11 @@ static void __init futex_detect_cmpxchg(void) + * implementation, the non-functional ones will return + * -ENOSYS. + */ ++ oldfs = get_fs(); ++ set_fs(USER_DS); + if (cmpxchg_futex_value_locked(&curval, NULL, 0, 0) == -EFAULT) + futex_cmpxchg_enabled = 1; ++ set_fs(oldfs); + #endif + } + +diff --git a/kernel/futex_compat.c b/kernel/futex_compat.c +index f9f44fd..29885e4 100644 +--- a/kernel/futex_compat.c ++++ b/kernel/futex_compat.c +@@ -32,7 +32,7 @@ fetch_robust_entry(compat_uptr_t *uentry, struct robust_list __user **entry, + return 0; + } + +-static void __user *futex_uaddr(struct robust_list __user *entry, ++static void __user __intentional_overflow(-1) *futex_uaddr(struct robust_list __user *entry, + compat_long_t futex_offset) + { + compat_uptr_t base = ptr_to_compat(entry); +diff --git a/kernel/gcov/base.c b/kernel/gcov/base.c +index f45b75b..bfac6d5 100644 +--- a/kernel/gcov/base.c ++++ b/kernel/gcov/base.c +@@ -108,11 +108,6 @@ void gcov_enable_events(void) + } + + #ifdef CONFIG_MODULES +-static inline int within(void *addr, void *start, unsigned long size) +-{ +- return ((addr >= start) && (addr < start + size)); +-} +- + /* Update list and generate events when modules are unloaded. */ + static int gcov_module_notifier(struct notifier_block *nb, unsigned long event, + void *data) +@@ -127,7 +122,7 @@ static int gcov_module_notifier(struct notifier_block *nb, unsigned long event, + + /* Remove entries located in module from linked list. */ + while ((info = gcov_info_next(info))) { +- if (within(info, mod->module_core, mod->core_size)) { ++ if (within_module_core_rw((unsigned long)info, mod)) { + gcov_info_unlink(prev, info); + if (gcov_events_enabled) + gcov_event(GCOV_REMOVE, info); +diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c +index 04d0374..e7c3725 100644 +--- a/kernel/hrtimer.c ++++ b/kernel/hrtimer.c +@@ -1461,7 +1461,7 @@ void hrtimer_peek_ahead_timers(void) + local_irq_restore(flags); + } + +-static void run_hrtimer_softirq(struct softirq_action *h) ++static __latent_entropy void run_hrtimer_softirq(void) + { + hrtimer_peek_ahead_timers(); + } +diff --git a/kernel/irq_work.c b/kernel/irq_work.c +index 55fcce6..0e4cf34 100644 +--- a/kernel/irq_work.c ++++ b/kernel/irq_work.c +@@ -189,12 +189,13 @@ static int irq_work_cpu_notify(struct notifier_block *self, + return NOTIFY_OK; + } + +-static struct notifier_block cpu_notify; ++static struct notifier_block cpu_notify = { ++ .notifier_call = irq_work_cpu_notify, ++ .priority = 0, ++}; + + static __init int irq_work_init_cpu_notifier(void) + { +- cpu_notify.notifier_call = irq_work_cpu_notify; +- cpu_notify.priority = 0; + register_cpu_notifier(&cpu_notify); + return 0; + } +diff --git a/kernel/jump_label.c b/kernel/jump_label.c +index 9019f15..9a3c42e 100644 +--- a/kernel/jump_label.c ++++ b/kernel/jump_label.c +@@ -14,6 +14,7 @@ + #include <linux/err.h> + #include <linux/static_key.h> + #include <linux/jump_label_ratelimit.h> ++#include <linux/mm.h> + + #ifdef HAVE_JUMP_LABEL + +@@ -51,7 +52,9 @@ jump_label_sort_entries(struct jump_entry *start, struct jump_entry *stop) + + size = (((unsigned long)stop - (unsigned long)start) + / sizeof(struct jump_entry)); ++ pax_open_kernel(); + sort(start, size, sizeof(struct jump_entry), jump_label_cmp, NULL); ++ pax_close_kernel(); + } + + static void jump_label_update(struct static_key *key, int enable); +@@ -363,10 +366,12 @@ static void jump_label_invalidate_module_init(struct module *mod) + struct jump_entry *iter_stop = iter_start + mod->num_jump_entries; + struct jump_entry *iter; + ++ pax_open_kernel(); + for (iter = iter_start; iter < iter_stop; iter++) { + if (within_module_init(iter->code, mod)) + iter->code = 0; + } ++ pax_close_kernel(); + } + + static int +diff --git a/kernel/kallsyms.c b/kernel/kallsyms.c +index 3127ad5..159d880 100644 +--- a/kernel/kallsyms.c ++++ b/kernel/kallsyms.c +@@ -11,6 +11,9 @@ + * Changed the compression method from stem compression to "table lookup" + * compression (see scripts/kallsyms.c for a more complete description) + */ ++#ifdef CONFIG_GRKERNSEC_HIDESYM ++#define __INCLUDED_BY_HIDESYM 1 ++#endif + #include <linux/kallsyms.h> + #include <linux/module.h> + #include <linux/init.h> +@@ -53,12 +56,33 @@ extern const unsigned long kallsyms_markers[] __attribute__((weak)); + + static inline int is_kernel_inittext(unsigned long addr) + { ++ if (system_state != SYSTEM_BOOTING) ++ return 0; ++ + if (addr >= (unsigned long)_sinittext + && addr <= (unsigned long)_einittext) + return 1; + return 0; + } + ++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC) ++#ifdef CONFIG_MODULES ++static inline int is_module_text(unsigned long addr) ++{ ++ if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END) ++ return 1; ++ ++ addr = ktla_ktva(addr); ++ return (unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END; ++} ++#else ++static inline int is_module_text(unsigned long addr) ++{ ++ return 0; ++} ++#endif ++#endif ++ + static inline int is_kernel_text(unsigned long addr) + { + if ((addr >= (unsigned long)_stext && addr <= (unsigned long)_etext) || +@@ -69,13 +93,28 @@ static inline int is_kernel_text(unsigned long addr) + + static inline int is_kernel(unsigned long addr) + { ++ ++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC) ++ if (is_kernel_text(addr) || is_kernel_inittext(addr)) ++ return 1; ++ ++ if (ktla_ktva((unsigned long)_text) <= addr && addr < (unsigned long)_end) ++#else + if (addr >= (unsigned long)_stext && addr <= (unsigned long)_end) ++#endif ++ + return 1; + return in_gate_area_no_mm(addr); + } + + static int is_ksym_addr(unsigned long addr) + { ++ ++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC) ++ if (is_module_text(addr)) ++ return 0; ++#endif ++ + if (all_var) + return is_kernel(addr); + +@@ -480,7 +519,6 @@ static unsigned long get_ksymbol_core(struct kallsym_iter *iter) + + static void reset_iter(struct kallsym_iter *iter, loff_t new_pos) + { +- iter->name[0] = '\0'; + iter->nameoff = get_symbol_offset(new_pos); + iter->pos = new_pos; + } +@@ -528,6 +566,11 @@ static int s_show(struct seq_file *m, void *p) + { + struct kallsym_iter *iter = m->private; + ++#ifdef CONFIG_GRKERNSEC_HIDESYM ++ if (!uid_eq(current_uid(), GLOBAL_ROOT_UID)) ++ return 0; ++#endif ++ + /* Some debugging symbols have no name. Ignore them. */ + if (!iter->name[0]) + return 0; +@@ -541,6 +584,7 @@ static int s_show(struct seq_file *m, void *p) + */ + type = iter->exported ? toupper(iter->type) : + tolower(iter->type); ++ + seq_printf(m, "%pK %c %s\t[%s]\n", (void *)iter->value, + type, iter->name, iter->module_name); + } else +@@ -566,7 +610,7 @@ static int kallsyms_open(struct inode *inode, struct file *file) + struct kallsym_iter *iter; + int ret; + +- iter = kmalloc(sizeof(*iter), GFP_KERNEL); ++ iter = kzalloc(sizeof(*iter), GFP_KERNEL); + if (!iter) + return -ENOMEM; + reset_iter(iter, 0); +diff --git a/kernel/kcmp.c b/kernel/kcmp.c +index e30ac0f..3528cac 100644 +--- a/kernel/kcmp.c ++++ b/kernel/kcmp.c +@@ -99,6 +99,10 @@ SYSCALL_DEFINE5(kcmp, pid_t, pid1, pid_t, pid2, int, type, + struct task_struct *task1, *task2; + int ret; + ++#ifdef CONFIG_GRKERNSEC ++ return -ENOSYS; ++#endif ++ + rcu_read_lock(); + + /* +diff --git a/kernel/kexec.c b/kernel/kexec.c +index 18ff0b9..40b0eab 100644 +--- a/kernel/kexec.c ++++ b/kernel/kexec.c +@@ -1045,7 +1045,8 @@ asmlinkage long compat_sys_kexec_load(unsigned long entry, + unsigned long flags) + { + struct compat_kexec_segment in; +- struct kexec_segment out, __user *ksegments; ++ struct kexec_segment out; ++ struct kexec_segment __user *ksegments; + unsigned long i, result; + + /* Don't allow clients that don't understand the native +diff --git a/kernel/kmod.c b/kernel/kmod.c +index 6b375af..eaff670 100644 +--- a/kernel/kmod.c ++++ b/kernel/kmod.c +@@ -75,7 +75,7 @@ static void free_modprobe_argv(struct subprocess_info *info) + kfree(info->argv); + } + +-static int call_modprobe(char *module_name, int wait) ++static int call_modprobe(char *module_name, char *module_param, int wait) + { + struct subprocess_info *info; + static char *envp[] = { +@@ -85,7 +85,7 @@ static int call_modprobe(char *module_name, int wait) + NULL + }; + +- char **argv = kmalloc(sizeof(char *[5]), GFP_KERNEL); ++ char **argv = kmalloc(sizeof(char *[6]), GFP_KERNEL); + if (!argv) + goto out; + +@@ -97,7 +97,8 @@ static int call_modprobe(char *module_name, int wait) + argv[1] = "-q"; + argv[2] = "--"; + argv[3] = module_name; /* check free_modprobe_argv() */ +- argv[4] = NULL; ++ argv[4] = module_param; ++ argv[5] = NULL; + + info = call_usermodehelper_setup(modprobe_path, argv, envp, GFP_KERNEL, + NULL, free_modprobe_argv, NULL); +@@ -129,9 +130,8 @@ out: + * If module auto-loading support is disabled then this function + * becomes a no-operation. + */ +-int __request_module(bool wait, const char *fmt, ...) ++static int ____request_module(bool wait, char *module_param, const char *fmt, va_list ap) + { +- va_list args; + char module_name[MODULE_NAME_LEN]; + unsigned int max_modprobes; + int ret; +@@ -150,9 +150,7 @@ int __request_module(bool wait, const char *fmt, ...) + if (!modprobe_path[0]) + return 0; + +- va_start(args, fmt); +- ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, args); +- va_end(args); ++ ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, ap); + if (ret >= MODULE_NAME_LEN) + return -ENAMETOOLONG; + +@@ -160,6 +158,20 @@ int __request_module(bool wait, const char *fmt, ...) + if (ret) + return ret; + ++#ifdef CONFIG_GRKERNSEC_MODHARDEN ++ if (uid_eq(current_uid(), GLOBAL_ROOT_UID)) { ++ /* hack to workaround consolekit/udisks stupidity */ ++ read_lock(&tasklist_lock); ++ if (!strcmp(current->comm, "mount") && ++ current->real_parent && !strncmp(current->real_parent->comm, "udisk", 5)) { ++ read_unlock(&tasklist_lock); ++ printk(KERN_ALERT "grsec: denied attempt to auto-load fs module %.64s by udisks\n", module_name); ++ return -EPERM; ++ } ++ read_unlock(&tasklist_lock); ++ } ++#endif ++ + /* If modprobe needs a service that is in a module, we get a recursive + * loop. Limit the number of running kmod threads to max_threads/2 or + * MAX_KMOD_CONCURRENT, whichever is the smaller. A cleaner method +@@ -188,11 +200,52 @@ int __request_module(bool wait, const char *fmt, ...) + + trace_module_request(module_name, wait, _RET_IP_); + +- ret = call_modprobe(module_name, wait ? UMH_WAIT_PROC : UMH_WAIT_EXEC); ++ ret = call_modprobe(module_name, module_param, wait ? UMH_WAIT_PROC : UMH_WAIT_EXEC); + + atomic_dec(&kmod_concurrent); + return ret; + } ++ ++int ___request_module(bool wait, char *module_param, const char *fmt, ...) ++{ ++ va_list args; ++ int ret; ++ ++ va_start(args, fmt); ++ ret = ____request_module(wait, module_param, fmt, args); ++ va_end(args); ++ ++ return ret; ++} ++ ++int __request_module(bool wait, const char *fmt, ...) ++{ ++ va_list args; ++ int ret; ++ ++#ifdef CONFIG_GRKERNSEC_MODHARDEN ++ if (!uid_eq(current_uid(), GLOBAL_ROOT_UID)) { ++ char module_param[MODULE_NAME_LEN]; ++ ++ memset(module_param, 0, sizeof(module_param)); ++ ++ snprintf(module_param, sizeof(module_param) - 1, "grsec_modharden_normal%u_", GR_GLOBAL_UID(current_uid())); ++ ++ va_start(args, fmt); ++ ret = ____request_module(wait, module_param, fmt, args); ++ va_end(args); ++ ++ return ret; ++ } ++#endif ++ ++ va_start(args, fmt); ++ ret = ____request_module(wait, NULL, fmt, args); ++ va_end(args); ++ ++ return ret; ++} ++ + EXPORT_SYMBOL(__request_module); + #endif /* CONFIG_MODULES */ + +@@ -218,6 +271,20 @@ static int ____call_usermodehelper(void *data) + */ + set_user_nice(current, 0); + ++#ifdef CONFIG_GRKERNSEC ++ /* this is race-free as far as userland is concerned as we copied ++ out the path to be used prior to this point and are now operating ++ on that copy ++ */ ++ if ((strncmp(sub_info->path, "/sbin/", 6) && strncmp(sub_info->path, "/usr/lib/", 9) && ++ strncmp(sub_info->path, "/lib/", 5) && strncmp(sub_info->path, "/lib64/", 7) && ++ strcmp(sub_info->path, "/usr/share/apport/apport")) || strstr(sub_info->path, "..")) { ++ printk(KERN_ALERT "grsec: denied exec of usermode helper binary %.950s located outside of /sbin and system library paths\n", sub_info->path); ++ retval = -EPERM; ++ goto fail; ++ } ++#endif ++ + retval = -ENOMEM; + new = prepare_kernel_cred(current); + if (!new) +@@ -240,8 +307,8 @@ static int ____call_usermodehelper(void *data) + commit_creds(new); + + retval = do_execve(getname_kernel(sub_info->path), +- (const char __user *const __user *)sub_info->argv, +- (const char __user *const __user *)sub_info->envp); ++ (const char __user *const __force_user *)sub_info->argv, ++ (const char __user *const __force_user *)sub_info->envp); + if (!retval) + return 0; + +@@ -260,6 +327,10 @@ static int call_helper(void *data) + + static void call_usermodehelper_freeinfo(struct subprocess_info *info) + { ++#ifdef CONFIG_GRKERNSEC ++ kfree(info->path); ++ info->path = info->origpath; ++#endif + if (info->cleanup) + (*info->cleanup)(info); + kfree(info); +@@ -303,7 +374,7 @@ static int wait_for_helper(void *data) + * + * Thus the __user pointer cast is valid here. + */ +- sys_wait4(pid, (int __user *)&ret, 0, NULL); ++ sys_wait4(pid, (int __force_user *)&ret, 0, NULL); + + /* + * If ret is 0, either ____call_usermodehelper failed and the +@@ -542,7 +613,12 @@ struct subprocess_info *call_usermodehelper_setup(char *path, char **argv, + goto out; + + INIT_WORK(&sub_info->work, __call_usermodehelper); ++#ifdef CONFIG_GRKERNSEC ++ sub_info->origpath = path; ++ sub_info->path = kstrdup(path, gfp_mask); ++#else + sub_info->path = path; ++#endif + sub_info->argv = argv; + sub_info->envp = envp; + +@@ -650,7 +726,7 @@ EXPORT_SYMBOL(call_usermodehelper); + static int proc_cap_handler(struct ctl_table *table, int write, + void __user *buffer, size_t *lenp, loff_t *ppos) + { +- struct ctl_table t; ++ ctl_table_no_const t; + unsigned long cap_array[_KERNEL_CAPABILITY_U32S]; + kernel_cap_t new_cap; + int err, i; +diff --git a/kernel/kprobes.c b/kernel/kprobes.c +index ceeadfc..11c18b6 100644 +--- a/kernel/kprobes.c ++++ b/kernel/kprobes.c +@@ -31,6 +31,9 @@ + * <jkenisto@us.ibm.com> and Prasanna S Panchamukhi + * <prasanna@in.ibm.com> added function-return probes. + */ ++#ifdef CONFIG_GRKERNSEC_HIDESYM ++#define __INCLUDED_BY_HIDESYM 1 ++#endif + #include <linux/kprobes.h> + #include <linux/hash.h> + #include <linux/init.h> +@@ -135,12 +138,12 @@ enum kprobe_slot_state { + + static void *alloc_insn_page(void) + { +- return module_alloc(PAGE_SIZE); ++ return module_alloc_exec(PAGE_SIZE); + } + + static void free_insn_page(void *page) + { +- module_free(NULL, page); ++ module_free_exec(NULL, page); + } + + struct kprobe_insn_cache kprobe_insn_slots = { +@@ -2151,11 +2154,11 @@ static void __kprobes report_probe(struct seq_file *pi, struct kprobe *p, + kprobe_type = "k"; + + if (sym) +- seq_printf(pi, "%p %s %s+0x%x %s ", ++ seq_printf(pi, "%pK %s %s+0x%x %s ", + p->addr, kprobe_type, sym, offset, + (modname ? modname : " ")); + else +- seq_printf(pi, "%p %s %p ", ++ seq_printf(pi, "%pK %s %pK ", + p->addr, kprobe_type, p->addr); + + if (!pp) +diff --git a/kernel/ksysfs.c b/kernel/ksysfs.c +index d945a94..0b7f45f 100644 +--- a/kernel/ksysfs.c ++++ b/kernel/ksysfs.c +@@ -46,6 +46,8 @@ static ssize_t uevent_helper_store(struct kobject *kobj, + { + if (count+1 > UEVENT_HELPER_PATH_LEN) + return -ENOENT; ++ if (!capable(CAP_SYS_ADMIN)) ++ return -EPERM; + memcpy(uevent_helper, buf, count); + uevent_helper[count] = '\0'; + if (count && uevent_helper[count-1] == '\n') +@@ -172,7 +174,7 @@ static ssize_t notes_read(struct file *filp, struct kobject *kobj, + return count; + } + +-static struct bin_attribute notes_attr = { ++static bin_attribute_no_const notes_attr __read_only = { + .attr = { + .name = "notes", + .mode = S_IRUGO, +diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c +index eb8a547..321d8e1 100644 +--- a/kernel/locking/lockdep.c ++++ b/kernel/locking/lockdep.c +@@ -597,6 +597,10 @@ static int static_obj(void *obj) + end = (unsigned long) &_end, + addr = (unsigned long) obj; + ++#ifdef CONFIG_PAX_KERNEXEC ++ start = ktla_ktva(start); ++#endif ++ + /* + * static variable? + */ +@@ -738,6 +742,7 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force) + if (!static_obj(lock->key)) { + debug_locks_off(); + printk("INFO: trying to register non-static key.\n"); ++ printk("lock:%pS key:%pS.\n", lock, lock->key); + printk("the code is fine but needs lockdep annotation.\n"); + printk("turning off the locking correctness validator.\n"); + dump_stack(); +@@ -3082,7 +3087,7 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass, + if (!class) + return 0; + } +- atomic_inc((atomic_t *)&class->ops); ++ atomic_long_inc_unchecked((atomic_long_unchecked_t *)&class->ops); + if (very_verbose(class)) { + printk("\nacquire class [%p] %s", class->key, class->name); + if (class->name_version > 1) +diff --git a/kernel/locking/lockdep_proc.c b/kernel/locking/lockdep_proc.c +index ef43ac4..2720dfa 100644 +--- a/kernel/locking/lockdep_proc.c ++++ b/kernel/locking/lockdep_proc.c +@@ -65,7 +65,7 @@ static int l_show(struct seq_file *m, void *v) + return 0; + } + +- seq_printf(m, "%p", class->key); ++ seq_printf(m, "%pK", class->key); + #ifdef CONFIG_DEBUG_LOCKDEP + seq_printf(m, " OPS:%8ld", class->ops); + #endif +@@ -83,7 +83,7 @@ static int l_show(struct seq_file *m, void *v) + + list_for_each_entry(entry, &class->locks_after, entry) { + if (entry->distance == 1) { +- seq_printf(m, " -> [%p] ", entry->class->key); ++ seq_printf(m, " -> [%pK] ", entry->class->key); + print_name(m, entry->class); + seq_puts(m, "\n"); + } +@@ -152,7 +152,7 @@ static int lc_show(struct seq_file *m, void *v) + if (!class->key) + continue; + +- seq_printf(m, "[%p] ", class->key); ++ seq_printf(m, "[%pK] ", class->key); + print_name(m, class); + seq_puts(m, "\n"); + } +@@ -496,7 +496,7 @@ static void seq_stats(struct seq_file *m, struct lock_stat_data *data) + if (!i) + seq_line(m, '-', 40-namelen, namelen); + +- snprintf(ip, sizeof(ip), "[<%p>]", ++ snprintf(ip, sizeof(ip), "[<%pK>]", + (void *)class->contention_point[i]); + seq_printf(m, "%40s %14lu %29s %pS\n", + name, stats->contention_point[i], +@@ -511,7 +511,7 @@ static void seq_stats(struct seq_file *m, struct lock_stat_data *data) + if (!i) + seq_line(m, '-', 40-namelen, namelen); + +- snprintf(ip, sizeof(ip), "[<%p>]", ++ snprintf(ip, sizeof(ip), "[<%pK>]", + (void *)class->contending_point[i]); + seq_printf(m, "%40s %14lu %29s %pS\n", + name, stats->contending_point[i], +diff --git a/kernel/locking/mutex-debug.c b/kernel/locking/mutex-debug.c +index faf6f5b..dc9070a 100644 +--- a/kernel/locking/mutex-debug.c ++++ b/kernel/locking/mutex-debug.c +@@ -49,21 +49,21 @@ void debug_mutex_free_waiter(struct mutex_waiter *waiter) + } + + void debug_mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter, +- struct thread_info *ti) ++ struct task_struct *task) + { + SMP_DEBUG_LOCKS_WARN_ON(!spin_is_locked(&lock->wait_lock)); + + /* Mark the current thread as blocked on the lock: */ +- ti->task->blocked_on = waiter; ++ task->blocked_on = waiter; + } + + void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter, +- struct thread_info *ti) ++ struct task_struct *task) + { + DEBUG_LOCKS_WARN_ON(list_empty(&waiter->list)); +- DEBUG_LOCKS_WARN_ON(waiter->task != ti->task); +- DEBUG_LOCKS_WARN_ON(ti->task->blocked_on != waiter); +- ti->task->blocked_on = NULL; ++ DEBUG_LOCKS_WARN_ON(waiter->task != task); ++ DEBUG_LOCKS_WARN_ON(task->blocked_on != waiter); ++ task->blocked_on = NULL; + + list_del_init(&waiter->list); + waiter->task = NULL; +diff --git a/kernel/locking/mutex-debug.h b/kernel/locking/mutex-debug.h +index 0799fd3..d06ae3b 100644 +--- a/kernel/locking/mutex-debug.h ++++ b/kernel/locking/mutex-debug.h +@@ -20,9 +20,9 @@ extern void debug_mutex_wake_waiter(struct mutex *lock, + extern void debug_mutex_free_waiter(struct mutex_waiter *waiter); + extern void debug_mutex_add_waiter(struct mutex *lock, + struct mutex_waiter *waiter, +- struct thread_info *ti); ++ struct task_struct *task); + extern void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter, +- struct thread_info *ti); ++ struct task_struct *task); + extern void debug_mutex_unlock(struct mutex *lock); + extern void debug_mutex_init(struct mutex *lock, const char *name, + struct lock_class_key *key); +diff --git a/kernel/locking/mutex.c b/kernel/locking/mutex.c +index 4dd6e4c..df52693 100644 +--- a/kernel/locking/mutex.c ++++ b/kernel/locking/mutex.c +@@ -135,7 +135,7 @@ void mspin_lock(struct mspin_node **lock, struct mspin_node *node) + node->locked = 1; + return; + } +- ACCESS_ONCE(prev->next) = node; ++ ACCESS_ONCE_RW(prev->next) = node; + smp_wmb(); + /* Wait until the lock holder passes the lock down */ + while (!ACCESS_ONCE(node->locked)) +@@ -156,7 +156,7 @@ static void mspin_unlock(struct mspin_node **lock, struct mspin_node *node) + while (!(next = ACCESS_ONCE(node->next))) + arch_mutex_cpu_relax(); + } +- ACCESS_ONCE(next->locked) = 1; ++ ACCESS_ONCE_RW(next->locked) = 1; + smp_wmb(); + } + +@@ -520,7 +520,7 @@ slowpath: + goto skip_wait; + + debug_mutex_lock_common(lock, &waiter); +- debug_mutex_add_waiter(lock, &waiter, task_thread_info(task)); ++ debug_mutex_add_waiter(lock, &waiter, task); + + /* add waiting tasks to the end of the waitqueue (FIFO): */ + list_add_tail(&waiter.list, &lock->wait_list); +@@ -564,7 +564,7 @@ slowpath: + schedule_preempt_disabled(); + spin_lock_mutex(&lock->wait_lock, flags); + } +- mutex_remove_waiter(lock, &waiter, current_thread_info()); ++ mutex_remove_waiter(lock, &waiter, task); + /* set it to 0 if there are no waiters left: */ + if (likely(list_empty(&lock->wait_list))) + atomic_set(&lock->count, 0); +@@ -601,7 +601,7 @@ skip_wait: + return 0; + + err: +- mutex_remove_waiter(lock, &waiter, task_thread_info(task)); ++ mutex_remove_waiter(lock, &waiter, task); + spin_unlock_mutex(&lock->wait_lock, flags); + debug_mutex_free_waiter(&waiter); + mutex_release(&lock->dep_map, 1, ip); +diff --git a/kernel/locking/rtmutex-tester.c b/kernel/locking/rtmutex-tester.c +index 1d96dd0..994ff19 100644 +--- a/kernel/locking/rtmutex-tester.c ++++ b/kernel/locking/rtmutex-tester.c +@@ -22,7 +22,7 @@ + #define MAX_RT_TEST_MUTEXES 8 + + static spinlock_t rttest_lock; +-static atomic_t rttest_event; ++static atomic_unchecked_t rttest_event; + + struct test_thread_data { + int opcode; +@@ -63,7 +63,7 @@ static int handle_op(struct test_thread_data *td, int lockwakeup) + + case RTTEST_LOCKCONT: + td->mutexes[td->opdata] = 1; +- td->event = atomic_add_return(1, &rttest_event); ++ td->event = atomic_add_return_unchecked(1, &rttest_event); + return 0; + + case RTTEST_RESET: +@@ -76,7 +76,7 @@ static int handle_op(struct test_thread_data *td, int lockwakeup) + return 0; + + case RTTEST_RESETEVENT: +- atomic_set(&rttest_event, 0); ++ atomic_set_unchecked(&rttest_event, 0); + return 0; + + default: +@@ -93,9 +93,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup) + return ret; + + td->mutexes[id] = 1; +- td->event = atomic_add_return(1, &rttest_event); ++ td->event = atomic_add_return_unchecked(1, &rttest_event); + rt_mutex_lock(&mutexes[id]); +- td->event = atomic_add_return(1, &rttest_event); ++ td->event = atomic_add_return_unchecked(1, &rttest_event); + td->mutexes[id] = 4; + return 0; + +@@ -106,9 +106,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup) + return ret; + + td->mutexes[id] = 1; +- td->event = atomic_add_return(1, &rttest_event); ++ td->event = atomic_add_return_unchecked(1, &rttest_event); + ret = rt_mutex_lock_interruptible(&mutexes[id], 0); +- td->event = atomic_add_return(1, &rttest_event); ++ td->event = atomic_add_return_unchecked(1, &rttest_event); + td->mutexes[id] = ret ? 0 : 4; + return ret ? -EINTR : 0; + +@@ -117,9 +117,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup) + if (id < 0 || id >= MAX_RT_TEST_MUTEXES || td->mutexes[id] != 4) + return ret; + +- td->event = atomic_add_return(1, &rttest_event); ++ td->event = atomic_add_return_unchecked(1, &rttest_event); + rt_mutex_unlock(&mutexes[id]); +- td->event = atomic_add_return(1, &rttest_event); ++ td->event = atomic_add_return_unchecked(1, &rttest_event); + td->mutexes[id] = 0; + return 0; + +@@ -166,7 +166,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex) + break; + + td->mutexes[dat] = 2; +- td->event = atomic_add_return(1, &rttest_event); ++ td->event = atomic_add_return_unchecked(1, &rttest_event); + break; + + default: +@@ -186,7 +186,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex) + return; + + td->mutexes[dat] = 3; +- td->event = atomic_add_return(1, &rttest_event); ++ td->event = atomic_add_return_unchecked(1, &rttest_event); + break; + + case RTTEST_LOCKNOWAIT: +@@ -198,7 +198,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex) + return; + + td->mutexes[dat] = 1; +- td->event = atomic_add_return(1, &rttest_event); ++ td->event = atomic_add_return_unchecked(1, &rttest_event); + return; + + default: +diff --git a/kernel/module.c b/kernel/module.c +index 6716a1f..9ddc1e1 100644 +--- a/kernel/module.c ++++ b/kernel/module.c +@@ -61,6 +61,7 @@ + #include <linux/pfn.h> + #include <linux/bsearch.h> + #include <linux/fips.h> ++#include <linux/grsecurity.h> + #include <uapi/linux/module.h> + #include "module-internal.h" + +@@ -157,7 +158,8 @@ static BLOCKING_NOTIFIER_HEAD(module_notify_list); + + /* Bounds of module allocation, for speeding __module_address. + * Protected by module_mutex. */ +-static unsigned long module_addr_min = -1UL, module_addr_max = 0; ++static unsigned long module_addr_min_rw = -1UL, module_addr_max_rw = 0; ++static unsigned long module_addr_min_rx = -1UL, module_addr_max_rx = 0; + + int register_module_notifier(struct notifier_block * nb) + { +@@ -324,7 +326,7 @@ bool each_symbol_section(bool (*fn)(const struct symsearch *arr, + return true; + + list_for_each_entry_rcu(mod, &modules, list) { +- struct symsearch arr[] = { ++ struct symsearch modarr[] = { + { mod->syms, mod->syms + mod->num_syms, mod->crcs, + NOT_GPL_ONLY, false }, + { mod->gpl_syms, mod->gpl_syms + mod->num_gpl_syms, +@@ -349,7 +351,7 @@ bool each_symbol_section(bool (*fn)(const struct symsearch *arr, + if (mod->state == MODULE_STATE_UNFORMED) + continue; + +- if (each_symbol_in_section(arr, ARRAY_SIZE(arr), mod, fn, data)) ++ if (each_symbol_in_section(modarr, ARRAY_SIZE(modarr), mod, fn, data)) + return true; + } + return false; +@@ -489,7 +491,7 @@ static int percpu_modalloc(struct module *mod, struct load_info *info) + if (!pcpusec->sh_size) + return 0; + +- if (align > PAGE_SIZE) { ++ if (align-1 >= PAGE_SIZE) { + pr_warn("%s: per-cpu alignment %li > %li\n", + mod->name, align, PAGE_SIZE); + align = PAGE_SIZE; +@@ -1059,7 +1061,7 @@ struct module_attribute module_uevent = + static ssize_t show_coresize(struct module_attribute *mattr, + struct module_kobject *mk, char *buffer) + { +- return sprintf(buffer, "%u\n", mk->mod->core_size); ++ return sprintf(buffer, "%u\n", mk->mod->core_size_rx + mk->mod->core_size_rw); + } + + static struct module_attribute modinfo_coresize = +@@ -1068,7 +1070,7 @@ static struct module_attribute modinfo_coresize = + static ssize_t show_initsize(struct module_attribute *mattr, + struct module_kobject *mk, char *buffer) + { +- return sprintf(buffer, "%u\n", mk->mod->init_size); ++ return sprintf(buffer, "%u\n", mk->mod->init_size_rx + mk->mod->init_size_rw); + } + + static struct module_attribute modinfo_initsize = +@@ -1160,12 +1162,29 @@ static int check_version(Elf_Shdr *sechdrs, + goto bad_version; + } + ++#ifdef CONFIG_GRKERNSEC_RANDSTRUCT ++ /* ++ * avoid potentially printing jibberish on attempted load ++ * of a module randomized with a different seed ++ */ ++ pr_warn("no symbol version for %s\n", symname); ++#else + pr_warn("%s: no symbol version for %s\n", mod->name, symname); ++#endif + return 0; + + bad_version: ++#ifdef CONFIG_GRKERNSEC_RANDSTRUCT ++ /* ++ * avoid potentially printing jibberish on attempted load ++ * of a module randomized with a different seed ++ */ ++ printk("attempted module disagrees about version of symbol %s\n", ++ symname); ++#else + printk("%s: disagrees about version of symbol %s\n", + mod->name, symname); ++#endif + return 0; + } + +@@ -1281,7 +1300,7 @@ resolve_symbol_wait(struct module *mod, + */ + #ifdef CONFIG_SYSFS + +-#ifdef CONFIG_KALLSYMS ++#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM) + static inline bool sect_empty(const Elf_Shdr *sect) + { + return !(sect->sh_flags & SHF_ALLOC) || sect->sh_size == 0; +@@ -1421,7 +1440,7 @@ static void add_notes_attrs(struct module *mod, const struct load_info *info) + { + unsigned int notes, loaded, i; + struct module_notes_attrs *notes_attrs; +- struct bin_attribute *nattr; ++ bin_attribute_no_const *nattr; + + /* failed to create section attributes, so can't create notes */ + if (!mod->sect_attrs) +@@ -1533,7 +1552,7 @@ static void del_usage_links(struct module *mod) + static int module_add_modinfo_attrs(struct module *mod) + { + struct module_attribute *attr; +- struct module_attribute *temp_attr; ++ module_attribute_no_const *temp_attr; + int error = 0; + int i; + +@@ -1754,21 +1773,21 @@ static void set_section_ro_nx(void *base, + + static void unset_module_core_ro_nx(struct module *mod) + { +- set_page_attributes(mod->module_core + mod->core_text_size, +- mod->module_core + mod->core_size, ++ set_page_attributes(mod->module_core_rw, ++ mod->module_core_rw + mod->core_size_rw, + set_memory_x); +- set_page_attributes(mod->module_core, +- mod->module_core + mod->core_ro_size, ++ set_page_attributes(mod->module_core_rx, ++ mod->module_core_rx + mod->core_size_rx, + set_memory_rw); + } + + static void unset_module_init_ro_nx(struct module *mod) + { +- set_page_attributes(mod->module_init + mod->init_text_size, +- mod->module_init + mod->init_size, ++ set_page_attributes(mod->module_init_rw, ++ mod->module_init_rw + mod->init_size_rw, + set_memory_x); +- set_page_attributes(mod->module_init, +- mod->module_init + mod->init_ro_size, ++ set_page_attributes(mod->module_init_rx, ++ mod->module_init_rx + mod->init_size_rx, + set_memory_rw); + } + +@@ -1781,14 +1800,14 @@ void set_all_modules_text_rw(void) + list_for_each_entry_rcu(mod, &modules, list) { + if (mod->state == MODULE_STATE_UNFORMED) + continue; +- if ((mod->module_core) && (mod->core_text_size)) { +- set_page_attributes(mod->module_core, +- mod->module_core + mod->core_text_size, ++ if ((mod->module_core_rx) && (mod->core_size_rx)) { ++ set_page_attributes(mod->module_core_rx, ++ mod->module_core_rx + mod->core_size_rx, + set_memory_rw); + } +- if ((mod->module_init) && (mod->init_text_size)) { +- set_page_attributes(mod->module_init, +- mod->module_init + mod->init_text_size, ++ if ((mod->module_init_rx) && (mod->init_size_rx)) { ++ set_page_attributes(mod->module_init_rx, ++ mod->module_init_rx + mod->init_size_rx, + set_memory_rw); + } + } +@@ -1804,14 +1823,14 @@ void set_all_modules_text_ro(void) + list_for_each_entry_rcu(mod, &modules, list) { + if (mod->state == MODULE_STATE_UNFORMED) + continue; +- if ((mod->module_core) && (mod->core_text_size)) { +- set_page_attributes(mod->module_core, +- mod->module_core + mod->core_text_size, ++ if ((mod->module_core_rx) && (mod->core_size_rx)) { ++ set_page_attributes(mod->module_core_rx, ++ mod->module_core_rx + mod->core_size_rx, + set_memory_ro); + } +- if ((mod->module_init) && (mod->init_text_size)) { +- set_page_attributes(mod->module_init, +- mod->module_init + mod->init_text_size, ++ if ((mod->module_init_rx) && (mod->init_size_rx)) { ++ set_page_attributes(mod->module_init_rx, ++ mod->module_init_rx + mod->init_size_rx, + set_memory_ro); + } + } +@@ -1862,16 +1881,19 @@ static void free_module(struct module *mod) + + /* This may be NULL, but that's OK */ + unset_module_init_ro_nx(mod); +- module_free(mod, mod->module_init); ++ module_free(mod, mod->module_init_rw); ++ module_free_exec(mod, mod->module_init_rx); + kfree(mod->args); + percpu_modfree(mod); + + /* Free lock-classes: */ +- lockdep_free_key_range(mod->module_core, mod->core_size); ++ lockdep_free_key_range(mod->module_core_rx, mod->core_size_rx); ++ lockdep_free_key_range(mod->module_core_rw, mod->core_size_rw); + + /* Finally, free the core (containing the module structure) */ + unset_module_core_ro_nx(mod); +- module_free(mod, mod->module_core); ++ module_free_exec(mod, mod->module_core_rx); ++ module_free(mod, mod->module_core_rw); + + #ifdef CONFIG_MPU + update_protections(current->mm); +@@ -1940,9 +1962,31 @@ static int simplify_symbols(struct module *mod, const struct load_info *info) + int ret = 0; + const struct kernel_symbol *ksym; + ++#ifdef CONFIG_GRKERNSEC_MODHARDEN ++ int is_fs_load = 0; ++ int register_filesystem_found = 0; ++ char *p; ++ ++ p = strstr(mod->args, "grsec_modharden_fs"); ++ if (p) { ++ char *endptr = p + sizeof("grsec_modharden_fs") - 1; ++ /* copy \0 as well */ ++ memmove(p, endptr, strlen(mod->args) - (unsigned int)(endptr - mod->args) + 1); ++ is_fs_load = 1; ++ } ++#endif ++ + for (i = 1; i < symsec->sh_size / sizeof(Elf_Sym); i++) { + const char *name = info->strtab + sym[i].st_name; + ++#ifdef CONFIG_GRKERNSEC_MODHARDEN ++ /* it's a real shame this will never get ripped and copied ++ upstream! ;( ++ */ ++ if (is_fs_load && !strcmp(name, "register_filesystem")) ++ register_filesystem_found = 1; ++#endif ++ + switch (sym[i].st_shndx) { + case SHN_COMMON: + /* We compiled with -fno-common. These are not +@@ -1963,7 +2007,9 @@ static int simplify_symbols(struct module *mod, const struct load_info *info) + ksym = resolve_symbol_wait(mod, info, name); + /* Ok if resolved. */ + if (ksym && !IS_ERR(ksym)) { ++ pax_open_kernel(); + sym[i].st_value = ksym->value; ++ pax_close_kernel(); + break; + } + +@@ -1982,11 +2028,20 @@ static int simplify_symbols(struct module *mod, const struct load_info *info) + secbase = (unsigned long)mod_percpu(mod); + else + secbase = info->sechdrs[sym[i].st_shndx].sh_addr; ++ pax_open_kernel(); + sym[i].st_value += secbase; ++ pax_close_kernel(); + break; + } + } + ++#ifdef CONFIG_GRKERNSEC_MODHARDEN ++ if (is_fs_load && !register_filesystem_found) { ++ printk(KERN_ALERT "grsec: Denied attempt to load non-fs module %.64s through mount\n", mod->name); ++ ret = -EPERM; ++ } ++#endif ++ + return ret; + } + +@@ -2070,22 +2125,12 @@ static void layout_sections(struct module *mod, struct load_info *info) + || s->sh_entsize != ~0UL + || strstarts(sname, ".init")) + continue; +- s->sh_entsize = get_offset(mod, &mod->core_size, s, i); ++ if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC)) ++ s->sh_entsize = get_offset(mod, &mod->core_size_rw, s, i); ++ else ++ s->sh_entsize = get_offset(mod, &mod->core_size_rx, s, i); + pr_debug("\t%s\n", sname); + } +- switch (m) { +- case 0: /* executable */ +- mod->core_size = debug_align(mod->core_size); +- mod->core_text_size = mod->core_size; +- break; +- case 1: /* RO: text and ro-data */ +- mod->core_size = debug_align(mod->core_size); +- mod->core_ro_size = mod->core_size; +- break; +- case 3: /* whole core */ +- mod->core_size = debug_align(mod->core_size); +- break; +- } + } + + pr_debug("Init section allocation order:\n"); +@@ -2099,23 +2144,13 @@ static void layout_sections(struct module *mod, struct load_info *info) + || s->sh_entsize != ~0UL + || !strstarts(sname, ".init")) + continue; +- s->sh_entsize = (get_offset(mod, &mod->init_size, s, i) +- | INIT_OFFSET_MASK); ++ if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC)) ++ s->sh_entsize = get_offset(mod, &mod->init_size_rw, s, i); ++ else ++ s->sh_entsize = get_offset(mod, &mod->init_size_rx, s, i); ++ s->sh_entsize |= INIT_OFFSET_MASK; + pr_debug("\t%s\n", sname); + } +- switch (m) { +- case 0: /* executable */ +- mod->init_size = debug_align(mod->init_size); +- mod->init_text_size = mod->init_size; +- break; +- case 1: /* RO: text and ro-data */ +- mod->init_size = debug_align(mod->init_size); +- mod->init_ro_size = mod->init_size; +- break; +- case 3: /* whole init */ +- mod->init_size = debug_align(mod->init_size); +- break; +- } + } + } + +@@ -2288,7 +2323,7 @@ static void layout_symtab(struct module *mod, struct load_info *info) + + /* Put symbol section at end of init part of module. */ + symsect->sh_flags |= SHF_ALLOC; +- symsect->sh_entsize = get_offset(mod, &mod->init_size, symsect, ++ symsect->sh_entsize = get_offset(mod, &mod->init_size_rx, symsect, + info->index.sym) | INIT_OFFSET_MASK; + pr_debug("\t%s\n", info->secstrings + symsect->sh_name); + +@@ -2305,13 +2340,13 @@ static void layout_symtab(struct module *mod, struct load_info *info) + } + + /* Append room for core symbols at end of core part. */ +- info->symoffs = ALIGN(mod->core_size, symsect->sh_addralign ?: 1); +- info->stroffs = mod->core_size = info->symoffs + ndst * sizeof(Elf_Sym); +- mod->core_size += strtab_size; ++ info->symoffs = ALIGN(mod->core_size_rx, symsect->sh_addralign ?: 1); ++ info->stroffs = mod->core_size_rx = info->symoffs + ndst * sizeof(Elf_Sym); ++ mod->core_size_rx += strtab_size; + + /* Put string table section at end of init part of module. */ + strsect->sh_flags |= SHF_ALLOC; +- strsect->sh_entsize = get_offset(mod, &mod->init_size, strsect, ++ strsect->sh_entsize = get_offset(mod, &mod->init_size_rx, strsect, + info->index.str) | INIT_OFFSET_MASK; + pr_debug("\t%s\n", info->secstrings + strsect->sh_name); + } +@@ -2329,12 +2364,14 @@ static void add_kallsyms(struct module *mod, const struct load_info *info) + /* Make sure we get permanent strtab: don't use info->strtab. */ + mod->strtab = (void *)info->sechdrs[info->index.str].sh_addr; + ++ pax_open_kernel(); ++ + /* Set types up while we still have access to sections. */ + for (i = 0; i < mod->num_symtab; i++) + mod->symtab[i].st_info = elf_type(&mod->symtab[i], info); + +- mod->core_symtab = dst = mod->module_core + info->symoffs; +- mod->core_strtab = s = mod->module_core + info->stroffs; ++ mod->core_symtab = dst = mod->module_core_rx + info->symoffs; ++ mod->core_strtab = s = mod->module_core_rx + info->stroffs; + src = mod->symtab; + for (ndst = i = 0; i < mod->num_symtab; i++) { + if (i == 0 || +@@ -2346,6 +2383,8 @@ static void add_kallsyms(struct module *mod, const struct load_info *info) + } + } + mod->core_num_syms = ndst; ++ ++ pax_close_kernel(); + } + #else + static inline void layout_symtab(struct module *mod, struct load_info *info) +@@ -2379,17 +2418,33 @@ void * __weak module_alloc(unsigned long size) + return vmalloc_exec(size); + } + +-static void *module_alloc_update_bounds(unsigned long size) ++static void *module_alloc_update_bounds_rw(unsigned long size) + { + void *ret = module_alloc(size); + + if (ret) { + mutex_lock(&module_mutex); + /* Update module bounds. */ +- if ((unsigned long)ret < module_addr_min) +- module_addr_min = (unsigned long)ret; +- if ((unsigned long)ret + size > module_addr_max) +- module_addr_max = (unsigned long)ret + size; ++ if ((unsigned long)ret < module_addr_min_rw) ++ module_addr_min_rw = (unsigned long)ret; ++ if ((unsigned long)ret + size > module_addr_max_rw) ++ module_addr_max_rw = (unsigned long)ret + size; ++ mutex_unlock(&module_mutex); ++ } ++ return ret; ++} ++ ++static void *module_alloc_update_bounds_rx(unsigned long size) ++{ ++ void *ret = module_alloc_exec(size); ++ ++ if (ret) { ++ mutex_lock(&module_mutex); ++ /* Update module bounds. */ ++ if ((unsigned long)ret < module_addr_min_rx) ++ module_addr_min_rx = (unsigned long)ret; ++ if ((unsigned long)ret + size > module_addr_max_rx) ++ module_addr_max_rx = (unsigned long)ret + size; + mutex_unlock(&module_mutex); + } + return ret; +@@ -2646,7 +2701,15 @@ static struct module *setup_load_info(struct load_info *info, int flags) + mod = (void *)info->sechdrs[info->index.mod].sh_addr; + + if (info->index.sym == 0) { ++#ifdef CONFIG_GRKERNSEC_RANDSTRUCT ++ /* ++ * avoid potentially printing jibberish on attempted load ++ * of a module randomized with a different seed ++ */ ++ pr_warn("module has no symbols (stripped?)\n"); ++#else + pr_warn("%s: module has no symbols (stripped?)\n", mod->name); ++#endif + return ERR_PTR(-ENOEXEC); + } + +@@ -2662,8 +2725,14 @@ static struct module *setup_load_info(struct load_info *info, int flags) + static int check_modinfo(struct module *mod, struct load_info *info, int flags) + { + const char *modmagic = get_modinfo(info, "vermagic"); ++ const char *license = get_modinfo(info, "license"); + int err; + ++#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR ++ if (!license || !license_is_gpl_compatible(license)) ++ return -ENOEXEC; ++#endif ++ + if (flags & MODULE_INIT_IGNORE_VERMAGIC) + modmagic = NULL; + +@@ -2688,7 +2757,7 @@ static int check_modinfo(struct module *mod, struct load_info *info, int flags) + } + + /* Set up license info based on the info section */ +- set_license(mod, get_modinfo(info, "license")); ++ set_license(mod, license); + + return 0; + } +@@ -2782,7 +2851,7 @@ static int move_module(struct module *mod, struct load_info *info) + void *ptr; + + /* Do the allocs. */ +- ptr = module_alloc_update_bounds(mod->core_size); ++ ptr = module_alloc_update_bounds_rw(mod->core_size_rw); + /* + * The pointer to this block is stored in the module structure + * which is inside the block. Just mark it as not being a +@@ -2792,11 +2861,11 @@ static int move_module(struct module *mod, struct load_info *info) + if (!ptr) + return -ENOMEM; + +- memset(ptr, 0, mod->core_size); +- mod->module_core = ptr; ++ memset(ptr, 0, mod->core_size_rw); ++ mod->module_core_rw = ptr; + +- if (mod->init_size) { +- ptr = module_alloc_update_bounds(mod->init_size); ++ if (mod->init_size_rw) { ++ ptr = module_alloc_update_bounds_rw(mod->init_size_rw); + /* + * The pointer to this block is stored in the module structure + * which is inside the block. This block doesn't need to be +@@ -2805,13 +2874,45 @@ static int move_module(struct module *mod, struct load_info *info) + */ + kmemleak_ignore(ptr); + if (!ptr) { +- module_free(mod, mod->module_core); ++ module_free(mod, mod->module_core_rw); + return -ENOMEM; + } +- memset(ptr, 0, mod->init_size); +- mod->module_init = ptr; ++ memset(ptr, 0, mod->init_size_rw); ++ mod->module_init_rw = ptr; + } else +- mod->module_init = NULL; ++ mod->module_init_rw = NULL; ++ ++ ptr = module_alloc_update_bounds_rx(mod->core_size_rx); ++ kmemleak_not_leak(ptr); ++ if (!ptr) { ++ if (mod->module_init_rw) ++ module_free(mod, mod->module_init_rw); ++ module_free(mod, mod->module_core_rw); ++ return -ENOMEM; ++ } ++ ++ pax_open_kernel(); ++ memset(ptr, 0, mod->core_size_rx); ++ pax_close_kernel(); ++ mod->module_core_rx = ptr; ++ ++ if (mod->init_size_rx) { ++ ptr = module_alloc_update_bounds_rx(mod->init_size_rx); ++ kmemleak_ignore(ptr); ++ if (!ptr && mod->init_size_rx) { ++ module_free_exec(mod, mod->module_core_rx); ++ if (mod->module_init_rw) ++ module_free(mod, mod->module_init_rw); ++ module_free(mod, mod->module_core_rw); ++ return -ENOMEM; ++ } ++ ++ pax_open_kernel(); ++ memset(ptr, 0, mod->init_size_rx); ++ pax_close_kernel(); ++ mod->module_init_rx = ptr; ++ } else ++ mod->module_init_rx = NULL; + + /* Transfer each section which specifies SHF_ALLOC */ + pr_debug("final section addresses:\n"); +@@ -2822,16 +2923,45 @@ static int move_module(struct module *mod, struct load_info *info) + if (!(shdr->sh_flags & SHF_ALLOC)) + continue; + +- if (shdr->sh_entsize & INIT_OFFSET_MASK) +- dest = mod->module_init +- + (shdr->sh_entsize & ~INIT_OFFSET_MASK); +- else +- dest = mod->module_core + shdr->sh_entsize; ++ if (shdr->sh_entsize & INIT_OFFSET_MASK) { ++ if ((shdr->sh_flags & SHF_WRITE) || !(shdr->sh_flags & SHF_ALLOC)) ++ dest = mod->module_init_rw ++ + (shdr->sh_entsize & ~INIT_OFFSET_MASK); ++ else ++ dest = mod->module_init_rx ++ + (shdr->sh_entsize & ~INIT_OFFSET_MASK); ++ } else { ++ if ((shdr->sh_flags & SHF_WRITE) || !(shdr->sh_flags & SHF_ALLOC)) ++ dest = mod->module_core_rw + shdr->sh_entsize; ++ else ++ dest = mod->module_core_rx + shdr->sh_entsize; ++ } ++ ++ if (shdr->sh_type != SHT_NOBITS) { ++ ++#ifdef CONFIG_PAX_KERNEXEC ++#ifdef CONFIG_X86_64 ++ if ((shdr->sh_flags & SHF_WRITE) && (shdr->sh_flags & SHF_EXECINSTR)) ++ set_memory_x((unsigned long)dest, (shdr->sh_size + PAGE_SIZE) >> PAGE_SHIFT); ++#endif ++ if (!(shdr->sh_flags & SHF_WRITE) && (shdr->sh_flags & SHF_ALLOC)) { ++ pax_open_kernel(); ++ memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size); ++ pax_close_kernel(); ++ } else ++#endif + +- if (shdr->sh_type != SHT_NOBITS) + memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size); ++ } + /* Update sh_addr to point to copy in image. */ +- shdr->sh_addr = (unsigned long)dest; ++ ++#ifdef CONFIG_PAX_KERNEXEC ++ if (shdr->sh_flags & SHF_EXECINSTR) ++ shdr->sh_addr = ktva_ktla((unsigned long)dest); ++ else ++#endif ++ ++ shdr->sh_addr = (unsigned long)dest; + pr_debug("\t0x%lx %s\n", + (long)shdr->sh_addr, info->secstrings + shdr->sh_name); + } +@@ -2888,12 +3018,12 @@ static void flush_module_icache(const struct module *mod) + * Do it before processing of module parameters, so the module + * can provide parameter accessor functions of its own. + */ +- if (mod->module_init) +- flush_icache_range((unsigned long)mod->module_init, +- (unsigned long)mod->module_init +- + mod->init_size); +- flush_icache_range((unsigned long)mod->module_core, +- (unsigned long)mod->module_core + mod->core_size); ++ if (mod->module_init_rx) ++ flush_icache_range((unsigned long)mod->module_init_rx, ++ (unsigned long)mod->module_init_rx ++ + mod->init_size_rx); ++ flush_icache_range((unsigned long)mod->module_core_rx, ++ (unsigned long)mod->module_core_rx + mod->core_size_rx); + + set_fs(old_fs); + } +@@ -2950,8 +3080,10 @@ static struct module *layout_and_allocate(struct load_info *info, int flags) + static void module_deallocate(struct module *mod, struct load_info *info) + { + percpu_modfree(mod); +- module_free(mod, mod->module_init); +- module_free(mod, mod->module_core); ++ module_free_exec(mod, mod->module_init_rx); ++ module_free_exec(mod, mod->module_core_rx); ++ module_free(mod, mod->module_init_rw); ++ module_free(mod, mod->module_core_rw); + } + + int __weak module_finalize(const Elf_Ehdr *hdr, +@@ -2964,7 +3096,9 @@ int __weak module_finalize(const Elf_Ehdr *hdr, + static int post_relocation(struct module *mod, const struct load_info *info) + { + /* Sort exception table now relocations are done. */ ++ pax_open_kernel(); + sort_extable(mod->extable, mod->extable + mod->num_exentries); ++ pax_close_kernel(); + + /* Copy relocated percpu area over. */ + percpu_modcopy(mod, (void *)info->sechdrs[info->index.pcpu].sh_addr, +@@ -3018,16 +3152,16 @@ static int do_init_module(struct module *mod) + MODULE_STATE_COMING, mod); + + /* Set RO and NX regions for core */ +- set_section_ro_nx(mod->module_core, +- mod->core_text_size, +- mod->core_ro_size, +- mod->core_size); ++ set_section_ro_nx(mod->module_core_rx, ++ mod->core_size_rx, ++ mod->core_size_rx, ++ mod->core_size_rx); + + /* Set RO and NX regions for init */ +- set_section_ro_nx(mod->module_init, +- mod->init_text_size, +- mod->init_ro_size, +- mod->init_size); ++ set_section_ro_nx(mod->module_init_rx, ++ mod->init_size_rx, ++ mod->init_size_rx, ++ mod->init_size_rx); + + do_mod_ctors(mod); + /* Start the module */ +@@ -3088,11 +3222,12 @@ static int do_init_module(struct module *mod) + mod->strtab = mod->core_strtab; + #endif + unset_module_init_ro_nx(mod); +- module_free(mod, mod->module_init); +- mod->module_init = NULL; +- mod->init_size = 0; +- mod->init_ro_size = 0; +- mod->init_text_size = 0; ++ module_free(mod, mod->module_init_rw); ++ module_free_exec(mod, mod->module_init_rx); ++ mod->module_init_rw = NULL; ++ mod->module_init_rx = NULL; ++ mod->init_size_rw = 0; ++ mod->init_size_rx = 0; + mutex_unlock(&module_mutex); + wake_up_all(&module_wq); + +@@ -3235,9 +3370,38 @@ static int load_module(struct load_info *info, const char __user *uargs, + if (err) + goto free_unload; + ++ /* Now copy in args */ ++ mod->args = strndup_user(uargs, ~0UL >> 1); ++ if (IS_ERR(mod->args)) { ++ err = PTR_ERR(mod->args); ++ goto free_unload; ++ } ++ + /* Set up MODINFO_ATTR fields */ + setup_modinfo(mod, info); + ++#ifdef CONFIG_GRKERNSEC_MODHARDEN ++ { ++ char *p, *p2; ++ ++ if (strstr(mod->args, "grsec_modharden_netdev")) { ++ printk(KERN_ALERT "grsec: denied auto-loading kernel module for a network device with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%.64s instead.", mod->name); ++ err = -EPERM; ++ goto free_modinfo; ++ } else if ((p = strstr(mod->args, "grsec_modharden_normal"))) { ++ p += sizeof("grsec_modharden_normal") - 1; ++ p2 = strstr(p, "_"); ++ if (p2) { ++ *p2 = '\0'; ++ printk(KERN_ALERT "grsec: denied kernel module auto-load of %.64s by uid %.9s\n", mod->name, p); ++ *p2 = '_'; ++ } ++ err = -EPERM; ++ goto free_modinfo; ++ } ++ } ++#endif ++ + /* Fix up syms, so that st_value is a pointer to location. */ + err = simplify_symbols(mod, info); + if (err < 0) +@@ -3253,13 +3417,6 @@ static int load_module(struct load_info *info, const char __user *uargs, + + flush_module_icache(mod); + +- /* Now copy in args */ +- mod->args = strndup_user(uargs, ~0UL >> 1); +- if (IS_ERR(mod->args)) { +- err = PTR_ERR(mod->args); +- goto free_arch_cleanup; +- } +- + dynamic_debug_setup(info->debug, info->num_debug); + + /* Ftrace init must be called in the MODULE_STATE_UNFORMED state */ +@@ -3297,11 +3454,10 @@ static int load_module(struct load_info *info, const char __user *uargs, + ddebug_cleanup: + dynamic_debug_remove(info->debug); + synchronize_sched(); +- kfree(mod->args); +- free_arch_cleanup: + module_arch_cleanup(mod); + free_modinfo: + free_modinfo(mod); ++ kfree(mod->args); + free_unload: + module_unload_free(mod); + unlink_mod: +@@ -3384,10 +3540,16 @@ static const char *get_ksymbol(struct module *mod, + unsigned long nextval; + + /* At worse, next value is at end of module */ +- if (within_module_init(addr, mod)) +- nextval = (unsigned long)mod->module_init+mod->init_text_size; ++ if (within_module_init_rx(addr, mod)) ++ nextval = (unsigned long)mod->module_init_rx+mod->init_size_rx; ++ else if (within_module_init_rw(addr, mod)) ++ nextval = (unsigned long)mod->module_init_rw+mod->init_size_rw; ++ else if (within_module_core_rx(addr, mod)) ++ nextval = (unsigned long)mod->module_core_rx+mod->core_size_rx; ++ else if (within_module_core_rw(addr, mod)) ++ nextval = (unsigned long)mod->module_core_rw+mod->core_size_rw; + else +- nextval = (unsigned long)mod->module_core+mod->core_text_size; ++ return NULL; + + /* Scan for closest preceding symbol, and next symbol. (ELF + starts real symbols at 1). */ +@@ -3638,7 +3800,7 @@ static int m_show(struct seq_file *m, void *p) + return 0; + + seq_printf(m, "%s %u", +- mod->name, mod->init_size + mod->core_size); ++ mod->name, mod->init_size_rx + mod->init_size_rw + mod->core_size_rx + mod->core_size_rw); + print_unload_info(m, mod); + + /* Informative for users. */ +@@ -3647,7 +3809,7 @@ static int m_show(struct seq_file *m, void *p) + mod->state == MODULE_STATE_COMING ? "Loading": + "Live"); + /* Used by oprofile and other similar tools. */ +- seq_printf(m, " 0x%pK", mod->module_core); ++ seq_printf(m, " 0x%pK 0x%pK", mod->module_core_rx, mod->module_core_rw); + + /* Taints info */ + if (mod->taints) +@@ -3683,7 +3845,17 @@ static const struct file_operations proc_modules_operations = { + + static int __init proc_modules_init(void) + { ++#ifndef CONFIG_GRKERNSEC_HIDESYM ++#ifdef CONFIG_GRKERNSEC_PROC_USER ++ proc_create("modules", S_IRUSR, NULL, &proc_modules_operations); ++#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP) ++ proc_create("modules", S_IRUSR | S_IRGRP, NULL, &proc_modules_operations); ++#else + proc_create("modules", 0, NULL, &proc_modules_operations); ++#endif ++#else ++ proc_create("modules", S_IRUSR, NULL, &proc_modules_operations); ++#endif + return 0; + } + module_init(proc_modules_init); +@@ -3744,14 +3916,14 @@ struct module *__module_address(unsigned long addr) + { + struct module *mod; + +- if (addr < module_addr_min || addr > module_addr_max) ++ if ((addr < module_addr_min_rx || addr > module_addr_max_rx) && ++ (addr < module_addr_min_rw || addr > module_addr_max_rw)) + return NULL; + + list_for_each_entry_rcu(mod, &modules, list) { + if (mod->state == MODULE_STATE_UNFORMED) + continue; +- if (within_module_core(addr, mod) +- || within_module_init(addr, mod)) ++ if (within_module_init(addr, mod) || within_module_core(addr, mod)) + return mod; + } + return NULL; +@@ -3786,11 +3958,20 @@ bool is_module_text_address(unsigned long addr) + */ + struct module *__module_text_address(unsigned long addr) + { +- struct module *mod = __module_address(addr); ++ struct module *mod; ++ ++#ifdef CONFIG_X86_32 ++ addr = ktla_ktva(addr); ++#endif ++ ++ if (addr < module_addr_min_rx || addr > module_addr_max_rx) ++ return NULL; ++ ++ mod = __module_address(addr); ++ + if (mod) { + /* Make sure it's within the text section. */ +- if (!within(addr, mod->module_init, mod->init_text_size) +- && !within(addr, mod->module_core, mod->core_text_size)) ++ if (!within_module_init_rx(addr, mod) && !within_module_core_rx(addr, mod)) + mod = NULL; + } + return mod; +diff --git a/kernel/notifier.c b/kernel/notifier.c +index 2d5cc4c..d9ea600 100644 +--- a/kernel/notifier.c ++++ b/kernel/notifier.c +@@ -5,6 +5,7 @@ + #include <linux/rcupdate.h> + #include <linux/vmalloc.h> + #include <linux/reboot.h> ++#include <linux/mm.h> + + /* + * Notifier list for kernel code which wants to be called +@@ -24,10 +25,12 @@ static int notifier_chain_register(struct notifier_block **nl, + while ((*nl) != NULL) { + if (n->priority > (*nl)->priority) + break; +- nl = &((*nl)->next); ++ nl = (struct notifier_block **)&((*nl)->next); + } +- n->next = *nl; ++ pax_open_kernel(); ++ *(const void **)&n->next = *nl; + rcu_assign_pointer(*nl, n); ++ pax_close_kernel(); + return 0; + } + +@@ -39,10 +42,12 @@ static int notifier_chain_cond_register(struct notifier_block **nl, + return 0; + if (n->priority > (*nl)->priority) + break; +- nl = &((*nl)->next); ++ nl = (struct notifier_block **)&((*nl)->next); + } +- n->next = *nl; ++ pax_open_kernel(); ++ *(const void **)&n->next = *nl; + rcu_assign_pointer(*nl, n); ++ pax_close_kernel(); + return 0; + } + +@@ -51,10 +56,12 @@ static int notifier_chain_unregister(struct notifier_block **nl, + { + while ((*nl) != NULL) { + if ((*nl) == n) { ++ pax_open_kernel(); + rcu_assign_pointer(*nl, n->next); ++ pax_close_kernel(); + return 0; + } +- nl = &((*nl)->next); ++ nl = (struct notifier_block **)&((*nl)->next); + } + return -ENOENT; + } +diff --git a/kernel/padata.c b/kernel/padata.c +index 161402f..598814c 100644 +--- a/kernel/padata.c ++++ b/kernel/padata.c +@@ -54,7 +54,7 @@ static int padata_cpu_hash(struct parallel_data *pd) + * seq_nr mod. number of cpus in use. + */ + +- seq_nr = atomic_inc_return(&pd->seq_nr); ++ seq_nr = atomic_inc_return_unchecked(&pd->seq_nr); + cpu_index = seq_nr % cpumask_weight(pd->cpumask.pcpu); + + return padata_index_to_cpu(pd, cpu_index); +@@ -428,7 +428,7 @@ static struct parallel_data *padata_alloc_pd(struct padata_instance *pinst, + padata_init_pqueues(pd); + padata_init_squeues(pd); + setup_timer(&pd->timer, padata_reorder_timer, (unsigned long)pd); +- atomic_set(&pd->seq_nr, -1); ++ atomic_set_unchecked(&pd->seq_nr, -1); + atomic_set(&pd->reorder_objects, 0); + atomic_set(&pd->refcnt, 0); + pd->pinst = pinst; +diff --git a/kernel/panic.c b/kernel/panic.c +index 6d63003..486a109 100644 +--- a/kernel/panic.c ++++ b/kernel/panic.c +@@ -52,7 +52,7 @@ EXPORT_SYMBOL(panic_blink); + /* + * Stop ourself in panic -- architecture code may override this + */ +-void __weak panic_smp_self_stop(void) ++void __weak __noreturn panic_smp_self_stop(void) + { + while (1) + cpu_relax(); +@@ -407,7 +407,7 @@ static void warn_slowpath_common(const char *file, int line, void *caller, + disable_trace_on_warning(); + + pr_warn("------------[ cut here ]------------\n"); +- pr_warn("WARNING: CPU: %d PID: %d at %s:%d %pS()\n", ++ pr_warn("WARNING: CPU: %d PID: %d at %s:%d %pA()\n", + raw_smp_processor_id(), current->pid, file, line, caller); + + if (args) +@@ -461,7 +461,8 @@ EXPORT_SYMBOL(warn_slowpath_null); + */ + void __stack_chk_fail(void) + { +- panic("stack-protector: Kernel stack is corrupted in: %p\n", ++ dump_stack(); ++ panic("stack-protector: Kernel stack is corrupted in: %pA\n", + __builtin_return_address(0)); + } + EXPORT_SYMBOL(__stack_chk_fail); +diff --git a/kernel/pid.c b/kernel/pid.c +index 9b9a266..c20ef80 100644 +--- a/kernel/pid.c ++++ b/kernel/pid.c +@@ -33,6 +33,7 @@ + #include <linux/rculist.h> + #include <linux/bootmem.h> + #include <linux/hash.h> ++#include <linux/security.h> + #include <linux/pid_namespace.h> + #include <linux/init_task.h> + #include <linux/syscalls.h> +@@ -47,7 +48,7 @@ struct pid init_struct_pid = INIT_STRUCT_PID; + + int pid_max = PID_MAX_DEFAULT; + +-#define RESERVED_PIDS 300 ++#define RESERVED_PIDS 500 + + int pid_max_min = RESERVED_PIDS + 1; + int pid_max_max = PID_MAX_LIMIT; +@@ -445,10 +446,18 @@ EXPORT_SYMBOL(pid_task); + */ + struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns) + { ++ struct task_struct *task; ++ + rcu_lockdep_assert(rcu_read_lock_held(), + "find_task_by_pid_ns() needs rcu_read_lock()" + " protection"); +- return pid_task(find_pid_ns(nr, ns), PIDTYPE_PID); ++ ++ task = pid_task(find_pid_ns(nr, ns), PIDTYPE_PID); ++ ++ if (gr_pid_is_chrooted(task)) ++ return NULL; ++ ++ return task; + } + + struct task_struct *find_task_by_vpid(pid_t vnr) +@@ -456,6 +465,14 @@ struct task_struct *find_task_by_vpid(pid_t vnr) + return find_task_by_pid_ns(vnr, task_active_pid_ns(current)); + } + ++struct task_struct *find_task_by_vpid_unrestricted(pid_t vnr) ++{ ++ rcu_lockdep_assert(rcu_read_lock_held(), ++ "find_task_by_pid_ns() needs rcu_read_lock()" ++ " protection"); ++ return pid_task(find_pid_ns(vnr, task_active_pid_ns(current)), PIDTYPE_PID); ++} ++ + struct pid *get_task_pid(struct task_struct *task, enum pid_type type) + { + struct pid *pid; +diff --git a/kernel/pid_namespace.c b/kernel/pid_namespace.c +index db95d8e..a0ca23f 100644 +--- a/kernel/pid_namespace.c ++++ b/kernel/pid_namespace.c +@@ -253,7 +253,7 @@ static int pid_ns_ctl_handler(struct ctl_table *table, int write, + void __user *buffer, size_t *lenp, loff_t *ppos) + { + struct pid_namespace *pid_ns = task_active_pid_ns(current); +- struct ctl_table tmp = *table; ++ ctl_table_no_const tmp = *table; + + if (write && !ns_capable(pid_ns->user_ns, CAP_SYS_ADMIN)) + return -EPERM; +diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c +index 3b89464..5e38379 100644 +--- a/kernel/posix-cpu-timers.c ++++ b/kernel/posix-cpu-timers.c +@@ -1464,14 +1464,14 @@ struct k_clock clock_posix_cpu = { + + static __init int init_posix_cpu_timers(void) + { +- struct k_clock process = { ++ static struct k_clock process = { + .clock_getres = process_cpu_clock_getres, + .clock_get = process_cpu_clock_get, + .timer_create = process_cpu_timer_create, + .nsleep = process_cpu_nsleep, + .nsleep_restart = process_cpu_nsleep_restart, + }; +- struct k_clock thread = { ++ static struct k_clock thread = { + .clock_getres = thread_cpu_clock_getres, + .clock_get = thread_cpu_clock_get, + .timer_create = thread_cpu_timer_create, +diff --git a/kernel/posix-timers.c b/kernel/posix-timers.c +index 424c2d4..679242f 100644 +--- a/kernel/posix-timers.c ++++ b/kernel/posix-timers.c +@@ -43,6 +43,7 @@ + #include <linux/hash.h> + #include <linux/posix-clock.h> + #include <linux/posix-timers.h> ++#include <linux/grsecurity.h> + #include <linux/syscalls.h> + #include <linux/wait.h> + #include <linux/workqueue.h> +@@ -122,7 +123,7 @@ static DEFINE_SPINLOCK(hash_lock); + * which we beg off on and pass to do_sys_settimeofday(). + */ + +-static struct k_clock posix_clocks[MAX_CLOCKS]; ++static struct k_clock *posix_clocks[MAX_CLOCKS]; + + /* + * These ones are defined below. +@@ -275,7 +276,7 @@ static int posix_get_tai(clockid_t which_clock, struct timespec *tp) + */ + static __init int init_posix_timers(void) + { +- struct k_clock clock_realtime = { ++ static struct k_clock clock_realtime = { + .clock_getres = hrtimer_get_res, + .clock_get = posix_clock_realtime_get, + .clock_set = posix_clock_realtime_set, +@@ -287,7 +288,7 @@ static __init int init_posix_timers(void) + .timer_get = common_timer_get, + .timer_del = common_timer_del, + }; +- struct k_clock clock_monotonic = { ++ static struct k_clock clock_monotonic = { + .clock_getres = hrtimer_get_res, + .clock_get = posix_ktime_get_ts, + .nsleep = common_nsleep, +@@ -297,19 +298,19 @@ static __init int init_posix_timers(void) + .timer_get = common_timer_get, + .timer_del = common_timer_del, + }; +- struct k_clock clock_monotonic_raw = { ++ static struct k_clock clock_monotonic_raw = { + .clock_getres = hrtimer_get_res, + .clock_get = posix_get_monotonic_raw, + }; +- struct k_clock clock_realtime_coarse = { ++ static struct k_clock clock_realtime_coarse = { + .clock_getres = posix_get_coarse_res, + .clock_get = posix_get_realtime_coarse, + }; +- struct k_clock clock_monotonic_coarse = { ++ static struct k_clock clock_monotonic_coarse = { + .clock_getres = posix_get_coarse_res, + .clock_get = posix_get_monotonic_coarse, + }; +- struct k_clock clock_tai = { ++ static struct k_clock clock_tai = { + .clock_getres = hrtimer_get_res, + .clock_get = posix_get_tai, + .nsleep = common_nsleep, +@@ -319,7 +320,7 @@ static __init int init_posix_timers(void) + .timer_get = common_timer_get, + .timer_del = common_timer_del, + }; +- struct k_clock clock_boottime = { ++ static struct k_clock clock_boottime = { + .clock_getres = hrtimer_get_res, + .clock_get = posix_get_boottime, + .nsleep = common_nsleep, +@@ -531,7 +532,7 @@ void posix_timers_register_clock(const clockid_t clock_id, + return; + } + +- posix_clocks[clock_id] = *new_clock; ++ posix_clocks[clock_id] = new_clock; + } + EXPORT_SYMBOL_GPL(posix_timers_register_clock); + +@@ -577,9 +578,9 @@ static struct k_clock *clockid_to_kclock(const clockid_t id) + return (id & CLOCKFD_MASK) == CLOCKFD ? + &clock_posix_dynamic : &clock_posix_cpu; + +- if (id >= MAX_CLOCKS || !posix_clocks[id].clock_getres) ++ if (id >= MAX_CLOCKS || !posix_clocks[id] || !posix_clocks[id]->clock_getres) + return NULL; +- return &posix_clocks[id]; ++ return posix_clocks[id]; + } + + static int common_timer_create(struct k_itimer *new_timer) +@@ -597,7 +598,7 @@ SYSCALL_DEFINE3(timer_create, const clockid_t, which_clock, + struct k_clock *kc = clockid_to_kclock(which_clock); + struct k_itimer *new_timer; + int error, new_timer_id; +- sigevent_t event; ++ sigevent_t event = { }; + int it_id_set = IT_ID_NOT_SET; + + if (!kc) +@@ -1011,6 +1012,13 @@ SYSCALL_DEFINE2(clock_settime, const clockid_t, which_clock, + if (copy_from_user(&new_tp, tp, sizeof (*tp))) + return -EFAULT; + ++ /* only the CLOCK_REALTIME clock can be set, all other clocks ++ have their clock_set fptr set to a nosettime dummy function ++ CLOCK_REALTIME has a NULL clock_set fptr which causes it to ++ call common_clock_set, which calls do_sys_settimeofday, which ++ we hook ++ */ ++ + return kc->clock_set(which_clock, &new_tp); + } + +diff --git a/kernel/power/Kconfig b/kernel/power/Kconfig +index 2fac9cc..56fef29 100644 +--- a/kernel/power/Kconfig ++++ b/kernel/power/Kconfig +@@ -24,6 +24,8 @@ config HIBERNATE_CALLBACKS + config HIBERNATION + bool "Hibernation (aka 'suspend to disk')" + depends on SWAP && ARCH_HIBERNATION_POSSIBLE ++ depends on !GRKERNSEC_KMEM ++ depends on !PAX_MEMORY_SANITIZE + select HIBERNATE_CALLBACKS + select LZO_COMPRESS + select LZO_DECOMPRESS +diff --git a/kernel/power/process.c b/kernel/power/process.c +index 14f9a8d..98ee610 100644 +--- a/kernel/power/process.c ++++ b/kernel/power/process.c +@@ -34,6 +34,7 @@ static int try_to_freeze_tasks(bool user_only) + unsigned int elapsed_msecs; + bool wakeup = false; + int sleep_usecs = USEC_PER_MSEC; ++ bool timedout = false; + + do_gettimeofday(&start); + +@@ -44,13 +45,20 @@ static int try_to_freeze_tasks(bool user_only) + + while (true) { + todo = 0; ++ if (time_after(jiffies, end_time)) ++ timedout = true; + read_lock(&tasklist_lock); + do_each_thread(g, p) { + if (p == current || !freeze_task(p)) + continue; + +- if (!freezer_should_skip(p)) ++ if (!freezer_should_skip(p)) { + todo++; ++ if (timedout) { ++ printk(KERN_ERR "Task refusing to freeze:\n"); ++ sched_show_task(p); ++ } ++ } + } while_each_thread(g, p); + read_unlock(&tasklist_lock); + +@@ -59,7 +67,7 @@ static int try_to_freeze_tasks(bool user_only) + todo += wq_busy; + } + +- if (!todo || time_after(jiffies, end_time)) ++ if (!todo || timedout) + break; + + if (pm_wakeup_pending()) { +diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c +index 8c086e6..a52bc51 100644 +--- a/kernel/printk/printk.c ++++ b/kernel/printk/printk.c +@@ -385,6 +385,11 @@ static int check_syslog_permissions(int type, bool from_file) + if (from_file && type != SYSLOG_ACTION_OPEN) + return 0; + ++#ifdef CONFIG_GRKERNSEC_DMESG ++ if (grsec_enable_dmesg && !capable(CAP_SYSLOG) && !capable_nolog(CAP_SYS_ADMIN)) ++ return -EPERM; ++#endif ++ + if (syslog_action_restricted(type)) { + if (capable(CAP_SYSLOG)) + return 0; +diff --git a/kernel/profile.c b/kernel/profile.c +index ebdd9c1..612ee05 100644 +--- a/kernel/profile.c ++++ b/kernel/profile.c +@@ -37,7 +37,7 @@ struct profile_hit { + #define NR_PROFILE_HIT (PAGE_SIZE/sizeof(struct profile_hit)) + #define NR_PROFILE_GRP (NR_PROFILE_HIT/PROFILE_GRPSZ) + +-static atomic_t *prof_buffer; ++static atomic_unchecked_t *prof_buffer; + static unsigned long prof_len, prof_shift; + + int prof_on __read_mostly; +@@ -260,7 +260,7 @@ static void profile_flip_buffers(void) + hits[i].pc = 0; + continue; + } +- atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]); ++ atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]); + hits[i].hits = hits[i].pc = 0; + } + } +@@ -321,9 +321,9 @@ static void do_profile_hits(int type, void *__pc, unsigned int nr_hits) + * Add the current hit(s) and flush the write-queue out + * to the global buffer: + */ +- atomic_add(nr_hits, &prof_buffer[pc]); ++ atomic_add_unchecked(nr_hits, &prof_buffer[pc]); + for (i = 0; i < NR_PROFILE_HIT; ++i) { +- atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]); ++ atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]); + hits[i].pc = hits[i].hits = 0; + } + out: +@@ -398,7 +398,7 @@ static void do_profile_hits(int type, void *__pc, unsigned int nr_hits) + { + unsigned long pc; + pc = ((unsigned long)__pc - (unsigned long)_stext) >> prof_shift; +- atomic_add(nr_hits, &prof_buffer[min(pc, prof_len - 1)]); ++ atomic_add_unchecked(nr_hits, &prof_buffer[min(pc, prof_len - 1)]); + } + #endif /* !CONFIG_SMP */ + +@@ -494,7 +494,7 @@ read_profile(struct file *file, char __user *buf, size_t count, loff_t *ppos) + return -EFAULT; + buf++; p++; count--; read++; + } +- pnt = (char *)prof_buffer + p - sizeof(atomic_t); ++ pnt = (char *)prof_buffer + p - sizeof(atomic_unchecked_t); + if (copy_to_user(buf, (void *)pnt, count)) + return -EFAULT; + read += count; +@@ -525,7 +525,7 @@ static ssize_t write_profile(struct file *file, const char __user *buf, + } + #endif + profile_discard_flip_buffers(); +- memset(prof_buffer, 0, prof_len * sizeof(atomic_t)); ++ memset(prof_buffer, 0, prof_len * sizeof(atomic_unchecked_t)); + return count; + } + +diff --git a/kernel/ptrace.c b/kernel/ptrace.c +index 1f4bcb3..99cf7ab 100644 +--- a/kernel/ptrace.c ++++ b/kernel/ptrace.c +@@ -327,7 +327,7 @@ static int ptrace_attach(struct task_struct *task, long request, + if (seize) + flags |= PT_SEIZED; + rcu_read_lock(); +- if (ns_capable(__task_cred(task)->user_ns, CAP_SYS_PTRACE)) ++ if (ns_capable_nolog(__task_cred(task)->user_ns, CAP_SYS_PTRACE)) + flags |= PT_PTRACE_CAP; + rcu_read_unlock(); + task->ptrace = flags; +@@ -538,7 +538,7 @@ int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst + break; + return -EIO; + } +- if (copy_to_user(dst, buf, retval)) ++ if (retval > sizeof(buf) || copy_to_user(dst, buf, retval)) + return -EFAULT; + copied += retval; + src += retval; +@@ -806,7 +806,7 @@ int ptrace_request(struct task_struct *child, long request, + bool seized = child->ptrace & PT_SEIZED; + int ret = -EIO; + siginfo_t siginfo, *si; +- void __user *datavp = (void __user *) data; ++ void __user *datavp = (__force void __user *) data; + unsigned long __user *datalp = datavp; + unsigned long flags; + +@@ -1052,14 +1052,21 @@ SYSCALL_DEFINE4(ptrace, long, request, long, pid, unsigned long, addr, + goto out; + } + ++ if (gr_handle_ptrace(child, request)) { ++ ret = -EPERM; ++ goto out_put_task_struct; ++ } ++ + if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) { + ret = ptrace_attach(child, request, addr, data); + /* + * Some architectures need to do book-keeping after + * a ptrace attach. + */ +- if (!ret) ++ if (!ret) { + arch_ptrace_attach(child); ++ gr_audit_ptrace(child); ++ } + goto out_put_task_struct; + } + +@@ -1087,7 +1094,7 @@ int generic_ptrace_peekdata(struct task_struct *tsk, unsigned long addr, + copied = access_process_vm(tsk, addr, &tmp, sizeof(tmp), 0); + if (copied != sizeof(tmp)) + return -EIO; +- return put_user(tmp, (unsigned long __user *)data); ++ return put_user(tmp, (__force unsigned long __user *)data); + } + + int generic_ptrace_pokedata(struct task_struct *tsk, unsigned long addr, +@@ -1181,7 +1188,7 @@ int compat_ptrace_request(struct task_struct *child, compat_long_t request, + } + + asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid, +- compat_long_t addr, compat_long_t data) ++ compat_ulong_t addr, compat_ulong_t data) + { + struct task_struct *child; + long ret; +@@ -1197,14 +1204,21 @@ asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid, + goto out; + } + ++ if (gr_handle_ptrace(child, request)) { ++ ret = -EPERM; ++ goto out_put_task_struct; ++ } ++ + if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) { + ret = ptrace_attach(child, request, addr, data); + /* + * Some architectures need to do book-keeping after + * a ptrace attach. + */ +- if (!ret) ++ if (!ret) { + arch_ptrace_attach(child); ++ gr_audit_ptrace(child); ++ } + goto out_put_task_struct; + } + +diff --git a/kernel/rcu/srcu.c b/kernel/rcu/srcu.c +index 3318d82..1a5b2d1 100644 +--- a/kernel/rcu/srcu.c ++++ b/kernel/rcu/srcu.c +@@ -300,9 +300,9 @@ int __srcu_read_lock(struct srcu_struct *sp) + + idx = ACCESS_ONCE(sp->completed) & 0x1; + preempt_disable(); +- ACCESS_ONCE(this_cpu_ptr(sp->per_cpu_ref)->c[idx]) += 1; ++ ACCESS_ONCE_RW(this_cpu_ptr(sp->per_cpu_ref)->c[idx]) += 1; + smp_mb(); /* B */ /* Avoid leaking the critical section. */ +- ACCESS_ONCE(this_cpu_ptr(sp->per_cpu_ref)->seq[idx]) += 1; ++ ACCESS_ONCE_RW(this_cpu_ptr(sp->per_cpu_ref)->seq[idx]) += 1; + preempt_enable(); + return idx; + } +diff --git a/kernel/rcu/tiny.c b/kernel/rcu/tiny.c +index 1254f31..16258dc 100644 +--- a/kernel/rcu/tiny.c ++++ b/kernel/rcu/tiny.c +@@ -46,7 +46,7 @@ + /* Forward declarations for tiny_plugin.h. */ + struct rcu_ctrlblk; + static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp); +-static void rcu_process_callbacks(struct softirq_action *unused); ++static void rcu_process_callbacks(void); + static void __call_rcu(struct rcu_head *head, + void (*func)(struct rcu_head *rcu), + struct rcu_ctrlblk *rcp); +@@ -312,7 +312,7 @@ static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp) + false)); + } + +-static void rcu_process_callbacks(struct softirq_action *unused) ++static __latent_entropy void rcu_process_callbacks(void) + { + __rcu_process_callbacks(&rcu_sched_ctrlblk); + __rcu_process_callbacks(&rcu_bh_ctrlblk); +diff --git a/kernel/rcu/torture.c b/kernel/rcu/torture.c +index 732f8ae..42c1919 100644 +--- a/kernel/rcu/torture.c ++++ b/kernel/rcu/torture.c +@@ -174,12 +174,12 @@ static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_count) = + { 0 }; + static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_batch) = + { 0 }; +-static atomic_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1]; +-static atomic_t n_rcu_torture_alloc; +-static atomic_t n_rcu_torture_alloc_fail; +-static atomic_t n_rcu_torture_free; +-static atomic_t n_rcu_torture_mberror; +-static atomic_t n_rcu_torture_error; ++static atomic_unchecked_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1]; ++static atomic_unchecked_t n_rcu_torture_alloc; ++static atomic_unchecked_t n_rcu_torture_alloc_fail; ++static atomic_unchecked_t n_rcu_torture_free; ++static atomic_unchecked_t n_rcu_torture_mberror; ++static atomic_unchecked_t n_rcu_torture_error; + static long n_rcu_torture_barrier_error; + static long n_rcu_torture_boost_ktrerror; + static long n_rcu_torture_boost_rterror; +@@ -297,11 +297,11 @@ rcu_torture_alloc(void) + + spin_lock_bh(&rcu_torture_lock); + if (list_empty(&rcu_torture_freelist)) { +- atomic_inc(&n_rcu_torture_alloc_fail); ++ atomic_inc_unchecked(&n_rcu_torture_alloc_fail); + spin_unlock_bh(&rcu_torture_lock); + return NULL; + } +- atomic_inc(&n_rcu_torture_alloc); ++ atomic_inc_unchecked(&n_rcu_torture_alloc); + p = rcu_torture_freelist.next; + list_del_init(p); + spin_unlock_bh(&rcu_torture_lock); +@@ -314,7 +314,7 @@ rcu_torture_alloc(void) + static void + rcu_torture_free(struct rcu_torture *p) + { +- atomic_inc(&n_rcu_torture_free); ++ atomic_inc_unchecked(&n_rcu_torture_free); + spin_lock_bh(&rcu_torture_lock); + list_add_tail(&p->rtort_free, &rcu_torture_freelist); + spin_unlock_bh(&rcu_torture_lock); +@@ -435,7 +435,7 @@ rcu_torture_cb(struct rcu_head *p) + i = rp->rtort_pipe_count; + if (i > RCU_TORTURE_PIPE_LEN) + i = RCU_TORTURE_PIPE_LEN; +- atomic_inc(&rcu_torture_wcount[i]); ++ atomic_inc_unchecked(&rcu_torture_wcount[i]); + if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) { + rp->rtort_mbtest = 0; + rcu_torture_free(rp); +@@ -823,7 +823,7 @@ rcu_torture_writer(void *arg) + i = old_rp->rtort_pipe_count; + if (i > RCU_TORTURE_PIPE_LEN) + i = RCU_TORTURE_PIPE_LEN; +- atomic_inc(&rcu_torture_wcount[i]); ++ atomic_inc_unchecked(&rcu_torture_wcount[i]); + old_rp->rtort_pipe_count++; + if (gp_normal == gp_exp) + exp = !!(rcu_random(&rand) & 0x80); +@@ -841,7 +841,7 @@ rcu_torture_writer(void *arg) + i = rp->rtort_pipe_count; + if (i > RCU_TORTURE_PIPE_LEN) + i = RCU_TORTURE_PIPE_LEN; +- atomic_inc(&rcu_torture_wcount[i]); ++ atomic_inc_unchecked(&rcu_torture_wcount[i]); + if (++rp->rtort_pipe_count >= + RCU_TORTURE_PIPE_LEN) { + rp->rtort_mbtest = 0; +@@ -940,7 +940,7 @@ static void rcu_torture_timer(unsigned long unused) + return; + } + if (p->rtort_mbtest == 0) +- atomic_inc(&n_rcu_torture_mberror); ++ atomic_inc_unchecked(&n_rcu_torture_mberror); + spin_lock(&rand_lock); + cur_ops->read_delay(&rand); + n_rcu_torture_timers++; +@@ -1010,7 +1010,7 @@ rcu_torture_reader(void *arg) + continue; + } + if (p->rtort_mbtest == 0) +- atomic_inc(&n_rcu_torture_mberror); ++ atomic_inc_unchecked(&n_rcu_torture_mberror); + cur_ops->read_delay(&rand); + preempt_disable(); + pipe_count = p->rtort_pipe_count; +@@ -1068,15 +1068,15 @@ rcu_torture_printk(char *page) + } + page += sprintf(page, "%s%s ", torture_type, TORTURE_FLAG); + page += sprintf(page, +- "rtc: %p ver: %lu tfle: %d rta: %d rtaf: %d rtf: %d ", ++ "rtc: %pP ver: %lu tfle: %d rta: %d rtaf: %d rtf: %d ", + rcu_torture_current, + rcu_torture_current_version, + list_empty(&rcu_torture_freelist), +- atomic_read(&n_rcu_torture_alloc), +- atomic_read(&n_rcu_torture_alloc_fail), +- atomic_read(&n_rcu_torture_free)); ++ atomic_read_unchecked(&n_rcu_torture_alloc), ++ atomic_read_unchecked(&n_rcu_torture_alloc_fail), ++ atomic_read_unchecked(&n_rcu_torture_free)); + page += sprintf(page, "rtmbe: %d rtbke: %ld rtbre: %ld ", +- atomic_read(&n_rcu_torture_mberror), ++ atomic_read_unchecked(&n_rcu_torture_mberror), + n_rcu_torture_boost_ktrerror, + n_rcu_torture_boost_rterror); + page += sprintf(page, "rtbf: %ld rtb: %ld nt: %ld ", +@@ -1095,14 +1095,14 @@ rcu_torture_printk(char *page) + n_barrier_attempts, + n_rcu_torture_barrier_error); + page += sprintf(page, "\n%s%s ", torture_type, TORTURE_FLAG); +- if (atomic_read(&n_rcu_torture_mberror) != 0 || ++ if (atomic_read_unchecked(&n_rcu_torture_mberror) != 0 || + n_rcu_torture_barrier_error != 0 || + n_rcu_torture_boost_ktrerror != 0 || + n_rcu_torture_boost_rterror != 0 || + n_rcu_torture_boost_failure != 0 || + i > 1) { + page += sprintf(page, "!!! "); +- atomic_inc(&n_rcu_torture_error); ++ atomic_inc_unchecked(&n_rcu_torture_error); + WARN_ON_ONCE(1); + } + page += sprintf(page, "Reader Pipe: "); +@@ -1116,7 +1116,7 @@ rcu_torture_printk(char *page) + page += sprintf(page, "Free-Block Circulation: "); + for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) { + page += sprintf(page, " %d", +- atomic_read(&rcu_torture_wcount[i])); ++ atomic_read_unchecked(&rcu_torture_wcount[i])); + } + page += sprintf(page, "\n"); + if (cur_ops->stats) +@@ -1839,7 +1839,7 @@ rcu_torture_cleanup(void) + + rcu_torture_stats_print(); /* -After- the stats thread is stopped! */ + +- if (atomic_read(&n_rcu_torture_error) || n_rcu_torture_barrier_error) ++ if (atomic_read_unchecked(&n_rcu_torture_error) || n_rcu_torture_barrier_error) + rcu_torture_print_module_parms(cur_ops, "End of test: FAILURE"); + else if (n_online_successes != n_online_attempts || + n_offline_successes != n_offline_attempts) +@@ -1961,18 +1961,18 @@ rcu_torture_init(void) + + rcu_torture_current = NULL; + rcu_torture_current_version = 0; +- atomic_set(&n_rcu_torture_alloc, 0); +- atomic_set(&n_rcu_torture_alloc_fail, 0); +- atomic_set(&n_rcu_torture_free, 0); +- atomic_set(&n_rcu_torture_mberror, 0); +- atomic_set(&n_rcu_torture_error, 0); ++ atomic_set_unchecked(&n_rcu_torture_alloc, 0); ++ atomic_set_unchecked(&n_rcu_torture_alloc_fail, 0); ++ atomic_set_unchecked(&n_rcu_torture_free, 0); ++ atomic_set_unchecked(&n_rcu_torture_mberror, 0); ++ atomic_set_unchecked(&n_rcu_torture_error, 0); + n_rcu_torture_barrier_error = 0; + n_rcu_torture_boost_ktrerror = 0; + n_rcu_torture_boost_rterror = 0; + n_rcu_torture_boost_failure = 0; + n_rcu_torture_boosts = 0; + for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) +- atomic_set(&rcu_torture_wcount[i], 0); ++ atomic_set_unchecked(&rcu_torture_wcount[i], 0); + for_each_possible_cpu(cpu) { + for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) { + per_cpu(rcu_torture_count, cpu)[i] = 0; +diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c +index b3d116c..ebf6598 100644 +--- a/kernel/rcu/tree.c ++++ b/kernel/rcu/tree.c +@@ -390,9 +390,9 @@ static void rcu_eqs_enter_common(struct rcu_dynticks *rdtp, long long oldval, + rcu_prepare_for_idle(smp_processor_id()); + /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */ + smp_mb__before_atomic_inc(); /* See above. */ +- atomic_inc(&rdtp->dynticks); ++ atomic_inc_unchecked(&rdtp->dynticks); + smp_mb__after_atomic_inc(); /* Force ordering with next sojourn. */ +- WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1); ++ WARN_ON_ONCE(atomic_read_unchecked(&rdtp->dynticks) & 0x1); + + /* + * It is illegal to enter an extended quiescent state while +@@ -510,10 +510,10 @@ static void rcu_eqs_exit_common(struct rcu_dynticks *rdtp, long long oldval, + int user) + { + smp_mb__before_atomic_inc(); /* Force ordering w/previous sojourn. */ +- atomic_inc(&rdtp->dynticks); ++ atomic_inc_unchecked(&rdtp->dynticks); + /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */ + smp_mb__after_atomic_inc(); /* See above. */ +- WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1)); ++ WARN_ON_ONCE(!(atomic_read_unchecked(&rdtp->dynticks) & 0x1)); + rcu_cleanup_after_idle(smp_processor_id()); + trace_rcu_dyntick(TPS("End"), oldval, rdtp->dynticks_nesting); + if (!user && !is_idle_task(current)) { +@@ -634,14 +634,14 @@ void rcu_nmi_enter(void) + struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks); + + if (rdtp->dynticks_nmi_nesting == 0 && +- (atomic_read(&rdtp->dynticks) & 0x1)) ++ (atomic_read_unchecked(&rdtp->dynticks) & 0x1)) + return; + rdtp->dynticks_nmi_nesting++; + smp_mb__before_atomic_inc(); /* Force delay from prior write. */ +- atomic_inc(&rdtp->dynticks); ++ atomic_inc_unchecked(&rdtp->dynticks); + /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */ + smp_mb__after_atomic_inc(); /* See above. */ +- WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1)); ++ WARN_ON_ONCE(!(atomic_read_unchecked(&rdtp->dynticks) & 0x1)); + } + + /** +@@ -660,9 +660,9 @@ void rcu_nmi_exit(void) + return; + /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */ + smp_mb__before_atomic_inc(); /* See above. */ +- atomic_inc(&rdtp->dynticks); ++ atomic_inc_unchecked(&rdtp->dynticks); + smp_mb__after_atomic_inc(); /* Force delay to next write. */ +- WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1); ++ WARN_ON_ONCE(atomic_read_unchecked(&rdtp->dynticks) & 0x1); + } + + /** +@@ -675,7 +675,7 @@ void rcu_nmi_exit(void) + */ + bool notrace __rcu_is_watching(void) + { +- return atomic_read(this_cpu_ptr(&rcu_dynticks.dynticks)) & 0x1; ++ return atomic_read_unchecked(this_cpu_ptr(&rcu_dynticks.dynticks)) & 0x1; + } + + /** +@@ -758,7 +758,7 @@ static int rcu_is_cpu_rrupt_from_idle(void) + static int dyntick_save_progress_counter(struct rcu_data *rdp, + bool *isidle, unsigned long *maxj) + { +- rdp->dynticks_snap = atomic_add_return(0, &rdp->dynticks->dynticks); ++ rdp->dynticks_snap = atomic_add_return_unchecked(0, &rdp->dynticks->dynticks); + rcu_sysidle_check_cpu(rdp, isidle, maxj); + return (rdp->dynticks_snap & 0x1) == 0; + } +@@ -781,7 +781,7 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp, + unsigned int curr; + unsigned int snap; + +- curr = (unsigned int)atomic_add_return(0, &rdp->dynticks->dynticks); ++ curr = (unsigned int)atomic_add_return_unchecked(0, &rdp->dynticks->dynticks); + snap = (unsigned int)rdp->dynticks_snap; + + /* +@@ -1450,9 +1450,9 @@ static int rcu_gp_init(struct rcu_state *rsp) + rdp = this_cpu_ptr(rsp->rda); + rcu_preempt_check_blocked_tasks(rnp); + rnp->qsmask = rnp->qsmaskinit; +- ACCESS_ONCE(rnp->gpnum) = rsp->gpnum; ++ ACCESS_ONCE_RW(rnp->gpnum) = rsp->gpnum; + WARN_ON_ONCE(rnp->completed != rsp->completed); +- ACCESS_ONCE(rnp->completed) = rsp->completed; ++ ACCESS_ONCE_RW(rnp->completed) = rsp->completed; + if (rnp == rdp->mynode) + __note_gp_changes(rsp, rnp, rdp); + rcu_preempt_boost_start_gp(rnp); +@@ -1546,7 +1546,7 @@ static void rcu_gp_cleanup(struct rcu_state *rsp) + rcu_for_each_node_breadth_first(rsp, rnp) { + raw_spin_lock_irq(&rnp->lock); + smp_mb__after_unlock_lock(); +- ACCESS_ONCE(rnp->completed) = rsp->gpnum; ++ ACCESS_ONCE_RW(rnp->completed) = rsp->gpnum; + rdp = this_cpu_ptr(rsp->rda); + if (rnp == rdp->mynode) + __note_gp_changes(rsp, rnp, rdp); +@@ -1912,7 +1912,7 @@ rcu_send_cbs_to_orphanage(int cpu, struct rcu_state *rsp, + rsp->qlen += rdp->qlen; + rdp->n_cbs_orphaned += rdp->qlen; + rdp->qlen_lazy = 0; +- ACCESS_ONCE(rdp->qlen) = 0; ++ ACCESS_ONCE_RW(rdp->qlen) = 0; + } + + /* +@@ -2159,7 +2159,7 @@ static void rcu_do_batch(struct rcu_state *rsp, struct rcu_data *rdp) + } + smp_mb(); /* List handling before counting for rcu_barrier(). */ + rdp->qlen_lazy -= count_lazy; +- ACCESS_ONCE(rdp->qlen) -= count; ++ ACCESS_ONCE_RW(rdp->qlen) -= count; + rdp->n_cbs_invoked += count; + + /* Reinstate batch limit if we have worked down the excess. */ +@@ -2362,7 +2362,7 @@ __rcu_process_callbacks(struct rcu_state *rsp) + /* + * Do RCU core processing for the current CPU. + */ +-static void rcu_process_callbacks(struct softirq_action *unused) ++static void rcu_process_callbacks(void) + { + struct rcu_state *rsp; + +@@ -2470,7 +2470,7 @@ __call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu), + WARN_ON_ONCE((unsigned long)head & 0x3); /* Misaligned rcu_head! */ + if (debug_rcu_head_queue(head)) { + /* Probable double call_rcu(), so leak the callback. */ +- ACCESS_ONCE(head->func) = rcu_leak_callback; ++ ACCESS_ONCE_RW(head->func) = rcu_leak_callback; + WARN_ONCE(1, "__call_rcu(): Leaked duplicate callback\n"); + return; + } +@@ -2498,7 +2498,7 @@ __call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu), + local_irq_restore(flags); + return; + } +- ACCESS_ONCE(rdp->qlen)++; ++ ACCESS_ONCE_RW(rdp->qlen)++; + if (lazy) + rdp->qlen_lazy++; + else +@@ -2707,11 +2707,11 @@ void synchronize_sched_expedited(void) + * counter wrap on a 32-bit system. Quite a few more CPUs would of + * course be required on a 64-bit system. + */ +- if (ULONG_CMP_GE((ulong)atomic_long_read(&rsp->expedited_start), ++ if (ULONG_CMP_GE((ulong)atomic_long_read_unchecked(&rsp->expedited_start), + (ulong)atomic_long_read(&rsp->expedited_done) + + ULONG_MAX / 8)) { + synchronize_sched(); +- atomic_long_inc(&rsp->expedited_wrap); ++ atomic_long_inc_unchecked(&rsp->expedited_wrap); + return; + } + +@@ -2719,7 +2719,7 @@ void synchronize_sched_expedited(void) + * Take a ticket. Note that atomic_inc_return() implies a + * full memory barrier. + */ +- snap = atomic_long_inc_return(&rsp->expedited_start); ++ snap = atomic_long_inc_return_unchecked(&rsp->expedited_start); + firstsnap = snap; + get_online_cpus(); + WARN_ON_ONCE(cpu_is_offline(raw_smp_processor_id())); +@@ -2732,14 +2732,14 @@ void synchronize_sched_expedited(void) + synchronize_sched_expedited_cpu_stop, + NULL) == -EAGAIN) { + put_online_cpus(); +- atomic_long_inc(&rsp->expedited_tryfail); ++ atomic_long_inc_unchecked(&rsp->expedited_tryfail); + + /* Check to see if someone else did our work for us. */ + s = atomic_long_read(&rsp->expedited_done); + if (ULONG_CMP_GE((ulong)s, (ulong)firstsnap)) { + /* ensure test happens before caller kfree */ + smp_mb__before_atomic_inc(); /* ^^^ */ +- atomic_long_inc(&rsp->expedited_workdone1); ++ atomic_long_inc_unchecked(&rsp->expedited_workdone1); + return; + } + +@@ -2748,7 +2748,7 @@ void synchronize_sched_expedited(void) + udelay(trycount * num_online_cpus()); + } else { + wait_rcu_gp(call_rcu_sched); +- atomic_long_inc(&rsp->expedited_normal); ++ atomic_long_inc_unchecked(&rsp->expedited_normal); + return; + } + +@@ -2757,7 +2757,7 @@ void synchronize_sched_expedited(void) + if (ULONG_CMP_GE((ulong)s, (ulong)firstsnap)) { + /* ensure test happens before caller kfree */ + smp_mb__before_atomic_inc(); /* ^^^ */ +- atomic_long_inc(&rsp->expedited_workdone2); ++ atomic_long_inc_unchecked(&rsp->expedited_workdone2); + return; + } + +@@ -2769,10 +2769,10 @@ void synchronize_sched_expedited(void) + * period works for us. + */ + get_online_cpus(); +- snap = atomic_long_read(&rsp->expedited_start); ++ snap = atomic_long_read_unchecked(&rsp->expedited_start); + smp_mb(); /* ensure read is before try_stop_cpus(). */ + } +- atomic_long_inc(&rsp->expedited_stoppedcpus); ++ atomic_long_inc_unchecked(&rsp->expedited_stoppedcpus); + + /* + * Everyone up to our most recent fetch is covered by our grace +@@ -2781,16 +2781,16 @@ void synchronize_sched_expedited(void) + * than we did already did their update. + */ + do { +- atomic_long_inc(&rsp->expedited_done_tries); ++ atomic_long_inc_unchecked(&rsp->expedited_done_tries); + s = atomic_long_read(&rsp->expedited_done); + if (ULONG_CMP_GE((ulong)s, (ulong)snap)) { + /* ensure test happens before caller kfree */ + smp_mb__before_atomic_inc(); /* ^^^ */ +- atomic_long_inc(&rsp->expedited_done_lost); ++ atomic_long_inc_unchecked(&rsp->expedited_done_lost); + break; + } + } while (atomic_long_cmpxchg(&rsp->expedited_done, s, snap) != s); +- atomic_long_inc(&rsp->expedited_done_exit); ++ atomic_long_inc_unchecked(&rsp->expedited_done_exit); + + put_online_cpus(); + } +@@ -2996,7 +2996,7 @@ static void _rcu_barrier(struct rcu_state *rsp) + * ACCESS_ONCE() to prevent the compiler from speculating + * the increment to precede the early-exit check. + */ +- ACCESS_ONCE(rsp->n_barrier_done)++; ++ ACCESS_ONCE_RW(rsp->n_barrier_done)++; + WARN_ON_ONCE((rsp->n_barrier_done & 0x1) != 1); + _rcu_barrier_trace(rsp, "Inc1", -1, rsp->n_barrier_done); + smp_mb(); /* Order ->n_barrier_done increment with below mechanism. */ +@@ -3046,7 +3046,7 @@ static void _rcu_barrier(struct rcu_state *rsp) + + /* Increment ->n_barrier_done to prevent duplicate work. */ + smp_mb(); /* Keep increment after above mechanism. */ +- ACCESS_ONCE(rsp->n_barrier_done)++; ++ ACCESS_ONCE_RW(rsp->n_barrier_done)++; + WARN_ON_ONCE((rsp->n_barrier_done & 0x1) != 0); + _rcu_barrier_trace(rsp, "Inc2", -1, rsp->n_barrier_done); + smp_mb(); /* Keep increment before caller's subsequent code. */ +@@ -3091,10 +3091,10 @@ rcu_boot_init_percpu_data(int cpu, struct rcu_state *rsp) + rdp->grpmask = 1UL << (cpu - rdp->mynode->grplo); + init_callback_list(rdp); + rdp->qlen_lazy = 0; +- ACCESS_ONCE(rdp->qlen) = 0; ++ ACCESS_ONCE_RW(rdp->qlen) = 0; + rdp->dynticks = &per_cpu(rcu_dynticks, cpu); + WARN_ON_ONCE(rdp->dynticks->dynticks_nesting != DYNTICK_TASK_EXIT_IDLE); +- WARN_ON_ONCE(atomic_read(&rdp->dynticks->dynticks) != 1); ++ WARN_ON_ONCE(atomic_read_unchecked(&rdp->dynticks->dynticks) != 1); + rdp->cpu = cpu; + rdp->rsp = rsp; + rcu_boot_init_nocb_percpu_data(rdp); +@@ -3128,8 +3128,8 @@ rcu_init_percpu_data(int cpu, struct rcu_state *rsp, int preemptible) + init_callback_list(rdp); /* Re-enable callbacks on this CPU. */ + rdp->dynticks->dynticks_nesting = DYNTICK_TASK_EXIT_IDLE; + rcu_sysidle_init_percpu_data(rdp->dynticks); +- atomic_set(&rdp->dynticks->dynticks, +- (atomic_read(&rdp->dynticks->dynticks) & ~0x1) + 1); ++ atomic_set_unchecked(&rdp->dynticks->dynticks, ++ (atomic_read_unchecked(&rdp->dynticks->dynticks) & ~0x1) + 1); + raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */ + + /* Add CPU to rcu_node bitmasks. */ +diff --git a/kernel/rcu/tree.h b/kernel/rcu/tree.h +index 8c19873..bf83c57 100644 +--- a/kernel/rcu/tree.h ++++ b/kernel/rcu/tree.h +@@ -87,11 +87,11 @@ struct rcu_dynticks { + long long dynticks_nesting; /* Track irq/process nesting level. */ + /* Process level is worth LLONG_MAX/2. */ + int dynticks_nmi_nesting; /* Track NMI nesting level. */ +- atomic_t dynticks; /* Even value for idle, else odd. */ ++ atomic_unchecked_t dynticks;/* Even value for idle, else odd. */ + #ifdef CONFIG_NO_HZ_FULL_SYSIDLE + long long dynticks_idle_nesting; + /* irq/process nesting level from idle. */ +- atomic_t dynticks_idle; /* Even value for idle, else odd. */ ++ atomic_unchecked_t dynticks_idle;/* Even value for idle, else odd. */ + /* "Idle" excludes userspace execution. */ + unsigned long dynticks_idle_jiffies; + /* End of last non-NMI non-idle period. */ +@@ -431,17 +431,17 @@ struct rcu_state { + /* _rcu_barrier(). */ + /* End of fields guarded by barrier_mutex. */ + +- atomic_long_t expedited_start; /* Starting ticket. */ +- atomic_long_t expedited_done; /* Done ticket. */ +- atomic_long_t expedited_wrap; /* # near-wrap incidents. */ +- atomic_long_t expedited_tryfail; /* # acquisition failures. */ +- atomic_long_t expedited_workdone1; /* # done by others #1. */ +- atomic_long_t expedited_workdone2; /* # done by others #2. */ +- atomic_long_t expedited_normal; /* # fallbacks to normal. */ +- atomic_long_t expedited_stoppedcpus; /* # successful stop_cpus. */ +- atomic_long_t expedited_done_tries; /* # tries to update _done. */ +- atomic_long_t expedited_done_lost; /* # times beaten to _done. */ +- atomic_long_t expedited_done_exit; /* # times exited _done loop. */ ++ atomic_long_unchecked_t expedited_start; /* Starting ticket. */ ++ atomic_long_t expedited_done; /* Done ticket. */ ++ atomic_long_unchecked_t expedited_wrap; /* # near-wrap incidents. */ ++ atomic_long_unchecked_t expedited_tryfail; /* # acquisition failures. */ ++ atomic_long_unchecked_t expedited_workdone1; /* # done by others #1. */ ++ atomic_long_unchecked_t expedited_workdone2; /* # done by others #2. */ ++ atomic_long_unchecked_t expedited_normal; /* # fallbacks to normal. */ ++ atomic_long_unchecked_t expedited_stoppedcpus; /* # successful stop_cpus. */ ++ atomic_long_unchecked_t expedited_done_tries; /* # tries to update _done. */ ++ atomic_long_unchecked_t expedited_done_lost; /* # times beaten to _done. */ ++ atomic_long_unchecked_t expedited_done_exit; /* # times exited _done loop. */ + + unsigned long jiffies_force_qs; /* Time at which to invoke */ + /* force_quiescent_state(). */ +diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h +index 6e2ef4b..c15df94 100644 +--- a/kernel/rcu/tree_plugin.h ++++ b/kernel/rcu/tree_plugin.h +@@ -758,7 +758,7 @@ static int rcu_preempted_readers_exp(struct rcu_node *rnp) + static int sync_rcu_preempt_exp_done(struct rcu_node *rnp) + { + return !rcu_preempted_readers_exp(rnp) && +- ACCESS_ONCE(rnp->expmask) == 0; ++ ACCESS_ONCE_RW(rnp->expmask) == 0; + } + + /* +@@ -920,7 +920,7 @@ void synchronize_rcu_expedited(void) + + /* Clean up and exit. */ + smp_mb(); /* ensure expedited GP seen before counter increment. */ +- ACCESS_ONCE(sync_rcu_preempt_exp_count)++; ++ ACCESS_ONCE_RW(sync_rcu_preempt_exp_count)++; + unlock_mb_ret: + mutex_unlock(&sync_rcu_preempt_exp_mutex); + mb_ret: +@@ -1496,7 +1496,7 @@ static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu) + free_cpumask_var(cm); + } + +-static struct smp_hotplug_thread rcu_cpu_thread_spec = { ++static struct smp_hotplug_thread rcu_cpu_thread_spec __read_only = { + .store = &rcu_cpu_kthread_task, + .thread_should_run = rcu_cpu_kthread_should_run, + .thread_fn = rcu_cpu_kthread, +@@ -1965,7 +1965,7 @@ static void print_cpu_stall_info(struct rcu_state *rsp, int cpu) + print_cpu_stall_fast_no_hz(fast_no_hz, cpu); + pr_err("\t%d: (%lu %s) idle=%03x/%llx/%d softirq=%u/%u %s\n", + cpu, ticks_value, ticks_title, +- atomic_read(&rdtp->dynticks) & 0xfff, ++ atomic_read_unchecked(&rdtp->dynticks) & 0xfff, + rdtp->dynticks_nesting, rdtp->dynticks_nmi_nesting, + rdp->softirq_snap, kstat_softirqs_cpu(RCU_SOFTIRQ, cpu), + fast_no_hz); +@@ -2129,7 +2129,7 @@ static void __call_rcu_nocb_enqueue(struct rcu_data *rdp, + + /* Enqueue the callback on the nocb list and update counts. */ + old_rhpp = xchg(&rdp->nocb_tail, rhtp); +- ACCESS_ONCE(*old_rhpp) = rhp; ++ ACCESS_ONCE_RW(*old_rhpp) = rhp; + atomic_long_add(rhcount, &rdp->nocb_q_count); + atomic_long_add(rhcount_lazy, &rdp->nocb_q_count_lazy); + +@@ -2302,12 +2302,12 @@ static int rcu_nocb_kthread(void *arg) + * Extract queued callbacks, update counts, and wait + * for a grace period to elapse. + */ +- ACCESS_ONCE(rdp->nocb_head) = NULL; ++ ACCESS_ONCE_RW(rdp->nocb_head) = NULL; + tail = xchg(&rdp->nocb_tail, &rdp->nocb_head); + c = atomic_long_xchg(&rdp->nocb_q_count, 0); + cl = atomic_long_xchg(&rdp->nocb_q_count_lazy, 0); +- ACCESS_ONCE(rdp->nocb_p_count) += c; +- ACCESS_ONCE(rdp->nocb_p_count_lazy) += cl; ++ ACCESS_ONCE_RW(rdp->nocb_p_count) += c; ++ ACCESS_ONCE_RW(rdp->nocb_p_count_lazy) += cl; + rcu_nocb_wait_gp(rdp); + + /* Each pass through the following loop invokes a callback. */ +@@ -2333,8 +2333,8 @@ static int rcu_nocb_kthread(void *arg) + list = next; + } + trace_rcu_batch_end(rdp->rsp->name, c, !!list, 0, 0, 1); +- ACCESS_ONCE(rdp->nocb_p_count) -= c; +- ACCESS_ONCE(rdp->nocb_p_count_lazy) -= cl; ++ ACCESS_ONCE_RW(rdp->nocb_p_count) -= c; ++ ACCESS_ONCE_RW(rdp->nocb_p_count_lazy) -= cl; + rdp->n_nocbs_invoked += c; + } + return 0; +@@ -2351,7 +2351,7 @@ static void do_nocb_deferred_wakeup(struct rcu_data *rdp) + { + if (!rcu_nocb_need_deferred_wakeup(rdp)) + return; +- ACCESS_ONCE(rdp->nocb_defer_wakeup) = false; ++ ACCESS_ONCE_RW(rdp->nocb_defer_wakeup) = false; + wake_up(&rdp->nocb_wq); + trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, TPS("DeferredWakeEmpty")); + } +@@ -2377,7 +2377,7 @@ static void __init rcu_spawn_nocb_kthreads(struct rcu_state *rsp) + t = kthread_run(rcu_nocb_kthread, rdp, + "rcuo%c/%d", rsp->abbr, cpu); + BUG_ON(IS_ERR(t)); +- ACCESS_ONCE(rdp->nocb_kthread) = t; ++ ACCESS_ONCE_RW(rdp->nocb_kthread) = t; + } + } + +@@ -2513,11 +2513,11 @@ static void rcu_sysidle_enter(struct rcu_dynticks *rdtp, int irq) + + /* Record start of fully idle period. */ + j = jiffies; +- ACCESS_ONCE(rdtp->dynticks_idle_jiffies) = j; ++ ACCESS_ONCE_RW(rdtp->dynticks_idle_jiffies) = j; + smp_mb__before_atomic_inc(); +- atomic_inc(&rdtp->dynticks_idle); ++ atomic_inc_unchecked(&rdtp->dynticks_idle); + smp_mb__after_atomic_inc(); +- WARN_ON_ONCE(atomic_read(&rdtp->dynticks_idle) & 0x1); ++ WARN_ON_ONCE(atomic_read_unchecked(&rdtp->dynticks_idle) & 0x1); + } + + /* +@@ -2582,9 +2582,9 @@ static void rcu_sysidle_exit(struct rcu_dynticks *rdtp, int irq) + + /* Record end of idle period. */ + smp_mb__before_atomic_inc(); +- atomic_inc(&rdtp->dynticks_idle); ++ atomic_inc_unchecked(&rdtp->dynticks_idle); + smp_mb__after_atomic_inc(); +- WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks_idle) & 0x1)); ++ WARN_ON_ONCE(!(atomic_read_unchecked(&rdtp->dynticks_idle) & 0x1)); + + /* + * If we are the timekeeping CPU, we are permitted to be non-idle +@@ -2625,7 +2625,7 @@ static void rcu_sysidle_check_cpu(struct rcu_data *rdp, bool *isidle, + WARN_ON_ONCE(smp_processor_id() != tick_do_timer_cpu); + + /* Pick up current idle and NMI-nesting counter and check. */ +- cur = atomic_read(&rdtp->dynticks_idle); ++ cur = atomic_read_unchecked(&rdtp->dynticks_idle); + if (cur & 0x1) { + *isidle = false; /* We are not idle! */ + return; +@@ -2688,7 +2688,7 @@ static void rcu_sysidle(unsigned long j) + case RCU_SYSIDLE_NOT: + + /* First time all are idle, so note a short idle period. */ +- ACCESS_ONCE(full_sysidle_state) = RCU_SYSIDLE_SHORT; ++ ACCESS_ONCE_RW(full_sysidle_state) = RCU_SYSIDLE_SHORT; + break; + + case RCU_SYSIDLE_SHORT: +@@ -2725,7 +2725,7 @@ static void rcu_sysidle(unsigned long j) + static void rcu_sysidle_cancel(void) + { + smp_mb(); +- ACCESS_ONCE(full_sysidle_state) = RCU_SYSIDLE_NOT; ++ ACCESS_ONCE_RW(full_sysidle_state) = RCU_SYSIDLE_NOT; + } + + /* +@@ -2773,7 +2773,7 @@ static void rcu_sysidle_cb(struct rcu_head *rhp) + smp_mb(); /* grace period precedes setting inuse. */ + + rshp = container_of(rhp, struct rcu_sysidle_head, rh); +- ACCESS_ONCE(rshp->inuse) = 0; ++ ACCESS_ONCE_RW(rshp->inuse) = 0; + } + + /* +diff --git a/kernel/rcu/tree_trace.c b/kernel/rcu/tree_trace.c +index 4def475..8ffddde 100644 +--- a/kernel/rcu/tree_trace.c ++++ b/kernel/rcu/tree_trace.c +@@ -121,7 +121,7 @@ static void print_one_rcu_data(struct seq_file *m, struct rcu_data *rdp) + ulong2long(rdp->completed), ulong2long(rdp->gpnum), + rdp->passed_quiesce, rdp->qs_pending); + seq_printf(m, " dt=%d/%llx/%d df=%lu", +- atomic_read(&rdp->dynticks->dynticks), ++ atomic_read_unchecked(&rdp->dynticks->dynticks), + rdp->dynticks->dynticks_nesting, + rdp->dynticks->dynticks_nmi_nesting, + rdp->dynticks_fqs); +@@ -182,17 +182,17 @@ static int show_rcuexp(struct seq_file *m, void *v) + struct rcu_state *rsp = (struct rcu_state *)m->private; + + seq_printf(m, "s=%lu d=%lu w=%lu tf=%lu wd1=%lu wd2=%lu n=%lu sc=%lu dt=%lu dl=%lu dx=%lu\n", +- atomic_long_read(&rsp->expedited_start), ++ atomic_long_read_unchecked(&rsp->expedited_start), + atomic_long_read(&rsp->expedited_done), +- atomic_long_read(&rsp->expedited_wrap), +- atomic_long_read(&rsp->expedited_tryfail), +- atomic_long_read(&rsp->expedited_workdone1), +- atomic_long_read(&rsp->expedited_workdone2), +- atomic_long_read(&rsp->expedited_normal), +- atomic_long_read(&rsp->expedited_stoppedcpus), +- atomic_long_read(&rsp->expedited_done_tries), +- atomic_long_read(&rsp->expedited_done_lost), +- atomic_long_read(&rsp->expedited_done_exit)); ++ atomic_long_read_unchecked(&rsp->expedited_wrap), ++ atomic_long_read_unchecked(&rsp->expedited_tryfail), ++ atomic_long_read_unchecked(&rsp->expedited_workdone1), ++ atomic_long_read_unchecked(&rsp->expedited_workdone2), ++ atomic_long_read_unchecked(&rsp->expedited_normal), ++ atomic_long_read_unchecked(&rsp->expedited_stoppedcpus), ++ atomic_long_read_unchecked(&rsp->expedited_done_tries), ++ atomic_long_read_unchecked(&rsp->expedited_done_lost), ++ atomic_long_read_unchecked(&rsp->expedited_done_exit)); + return 0; + } + +diff --git a/kernel/rcu/update.c b/kernel/rcu/update.c +index c54609f..2e8829c 100644 +--- a/kernel/rcu/update.c ++++ b/kernel/rcu/update.c +@@ -312,10 +312,10 @@ int rcu_jiffies_till_stall_check(void) + * for CONFIG_RCU_CPU_STALL_TIMEOUT. + */ + if (till_stall_check < 3) { +- ACCESS_ONCE(rcu_cpu_stall_timeout) = 3; ++ ACCESS_ONCE_RW(rcu_cpu_stall_timeout) = 3; + till_stall_check = 3; + } else if (till_stall_check > 300) { +- ACCESS_ONCE(rcu_cpu_stall_timeout) = 300; ++ ACCESS_ONCE_RW(rcu_cpu_stall_timeout) = 300; + till_stall_check = 300; + } + return till_stall_check * HZ + RCU_STALL_DELAY_DELTA; +diff --git a/kernel/resource.c b/kernel/resource.c +index 3f285dc..5755f62 100644 +--- a/kernel/resource.c ++++ b/kernel/resource.c +@@ -152,8 +152,18 @@ static const struct file_operations proc_iomem_operations = { + + static int __init ioresources_init(void) + { ++#ifdef CONFIG_GRKERNSEC_PROC_ADD ++#ifdef CONFIG_GRKERNSEC_PROC_USER ++ proc_create("ioports", S_IRUSR, NULL, &proc_ioports_operations); ++ proc_create("iomem", S_IRUSR, NULL, &proc_iomem_operations); ++#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP) ++ proc_create("ioports", S_IRUSR | S_IRGRP, NULL, &proc_ioports_operations); ++ proc_create("iomem", S_IRUSR | S_IRGRP, NULL, &proc_iomem_operations); ++#endif ++#else + proc_create("ioports", 0, NULL, &proc_ioports_operations); + proc_create("iomem", 0, NULL, &proc_iomem_operations); ++#endif + return 0; + } + __initcall(ioresources_init); +diff --git a/kernel/sched/auto_group.c b/kernel/sched/auto_group.c +index 4a07353..66b5291 100644 +--- a/kernel/sched/auto_group.c ++++ b/kernel/sched/auto_group.c +@@ -11,7 +11,7 @@ + + unsigned int __read_mostly sysctl_sched_autogroup_enabled = 1; + static struct autogroup autogroup_default; +-static atomic_t autogroup_seq_nr; ++static atomic_unchecked_t autogroup_seq_nr; + + void __init autogroup_init(struct task_struct *init_task) + { +@@ -79,7 +79,7 @@ static inline struct autogroup *autogroup_create(void) + + kref_init(&ag->kref); + init_rwsem(&ag->lock); +- ag->id = atomic_inc_return(&autogroup_seq_nr); ++ ag->id = atomic_inc_return_unchecked(&autogroup_seq_nr); + ag->tg = tg; + #ifdef CONFIG_RT_GROUP_SCHED + /* +diff --git a/kernel/sched/completion.c b/kernel/sched/completion.c +index a63f4dc..349bbb0 100644 +--- a/kernel/sched/completion.c ++++ b/kernel/sched/completion.c +@@ -204,7 +204,7 @@ EXPORT_SYMBOL(wait_for_completion_interruptible); + * Return: -ERESTARTSYS if interrupted, 0 if timed out, positive (at least 1, + * or number of jiffies left till timeout) if completed. + */ +-long __sched ++long __sched __intentional_overflow(-1) + wait_for_completion_interruptible_timeout(struct completion *x, + unsigned long timeout) + { +@@ -221,7 +221,7 @@ EXPORT_SYMBOL(wait_for_completion_interruptible_timeout); + * + * Return: -ERESTARTSYS if interrupted, 0 if completed. + */ +-int __sched wait_for_completion_killable(struct completion *x) ++int __sched __intentional_overflow(-1) wait_for_completion_killable(struct completion *x) + { + long t = wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_KILLABLE); + if (t == -ERESTARTSYS) +@@ -242,7 +242,7 @@ EXPORT_SYMBOL(wait_for_completion_killable); + * Return: -ERESTARTSYS if interrupted, 0 if timed out, positive (at least 1, + * or number of jiffies left till timeout) if completed. + */ +-long __sched ++long __sched __intentional_overflow(-1) + wait_for_completion_killable_timeout(struct completion *x, + unsigned long timeout) + { +diff --git a/kernel/sched/core.c b/kernel/sched/core.c +index 677ebad..e39b352 100644 +--- a/kernel/sched/core.c ++++ b/kernel/sched/core.c +@@ -1775,7 +1775,7 @@ void set_numabalancing_state(bool enabled) + int sysctl_numa_balancing(struct ctl_table *table, int write, + void __user *buffer, size_t *lenp, loff_t *ppos) + { +- struct ctl_table t; ++ ctl_table_no_const t; + int err; + int state = numabalancing_enabled; + +@@ -2251,8 +2251,10 @@ context_switch(struct rq *rq, struct task_struct *prev, + next->active_mm = oldmm; + atomic_inc(&oldmm->mm_count); + enter_lazy_tlb(oldmm, next); +- } else ++ } else { + switch_mm(oldmm, mm, next); ++ populate_stack(); ++ } + + if (!prev->mm) { + prev->active_mm = NULL; +@@ -3049,6 +3051,8 @@ int can_nice(const struct task_struct *p, const int nice) + /* convert nice value [19,-20] to rlimit style value [1,40] */ + int nice_rlim = 20 - nice; + ++ gr_learn_resource(p, RLIMIT_NICE, nice_rlim, 1); ++ + return (nice_rlim <= task_rlimit(p, RLIMIT_NICE) || + capable(CAP_SYS_NICE)); + } +@@ -3082,7 +3086,8 @@ SYSCALL_DEFINE1(nice, int, increment) + if (nice > 19) + nice = 19; + +- if (increment < 0 && !can_nice(current, nice)) ++ if (increment < 0 && (!can_nice(current, nice) || ++ gr_handle_chroot_nice())) + return -EPERM; + + retval = security_task_setnice(current, nice); +@@ -3355,6 +3360,7 @@ recheck: + if (policy != p->policy && !rlim_rtprio) + return -EPERM; + ++ gr_learn_resource(p, RLIMIT_RTPRIO, attr->sched_priority, 1); + /* can't increase priority */ + if (attr->sched_priority > p->rt_priority && + attr->sched_priority > rlim_rtprio) +@@ -4727,8 +4733,10 @@ void idle_task_exit(void) + + BUG_ON(cpu_online(smp_processor_id())); + +- if (mm != &init_mm) ++ if (mm != &init_mm) { + switch_mm(mm, &init_mm, current); ++ populate_stack(); ++ } + mmdrop(mm); + } + +@@ -4806,7 +4814,7 @@ static void migrate_tasks(unsigned int dead_cpu) + + #if defined(CONFIG_SCHED_DEBUG) && defined(CONFIG_SYSCTL) + +-static struct ctl_table sd_ctl_dir[] = { ++static ctl_table_no_const sd_ctl_dir[] __read_only = { + { + .procname = "sched_domain", + .mode = 0555, +@@ -4823,17 +4831,17 @@ static struct ctl_table sd_ctl_root[] = { + {} + }; + +-static struct ctl_table *sd_alloc_ctl_entry(int n) ++static ctl_table_no_const *sd_alloc_ctl_entry(int n) + { +- struct ctl_table *entry = ++ ctl_table_no_const *entry = + kcalloc(n, sizeof(struct ctl_table), GFP_KERNEL); + + return entry; + } + +-static void sd_free_ctl_entry(struct ctl_table **tablep) ++static void sd_free_ctl_entry(ctl_table_no_const *tablep) + { +- struct ctl_table *entry; ++ ctl_table_no_const *entry; + + /* + * In the intermediate directories, both the child directory and +@@ -4841,22 +4849,25 @@ static void sd_free_ctl_entry(struct ctl_table **tablep) + * will always be set. In the lowest directory the names are + * static strings and all have proc handlers. + */ +- for (entry = *tablep; entry->mode; entry++) { +- if (entry->child) +- sd_free_ctl_entry(&entry->child); ++ for (entry = tablep; entry->mode; entry++) { ++ if (entry->child) { ++ sd_free_ctl_entry(entry->child); ++ pax_open_kernel(); ++ entry->child = NULL; ++ pax_close_kernel(); ++ } + if (entry->proc_handler == NULL) + kfree(entry->procname); + } + +- kfree(*tablep); +- *tablep = NULL; ++ kfree(tablep); + } + + static int min_load_idx = 0; + static int max_load_idx = CPU_LOAD_IDX_MAX-1; + + static void +-set_table_entry(struct ctl_table *entry, ++set_table_entry(ctl_table_no_const *entry, + const char *procname, void *data, int maxlen, + umode_t mode, proc_handler *proc_handler, + bool load_idx) +@@ -4876,7 +4887,7 @@ set_table_entry(struct ctl_table *entry, + static struct ctl_table * + sd_alloc_ctl_domain_table(struct sched_domain *sd) + { +- struct ctl_table *table = sd_alloc_ctl_entry(13); ++ ctl_table_no_const *table = sd_alloc_ctl_entry(13); + + if (table == NULL) + return NULL; +@@ -4911,9 +4922,9 @@ sd_alloc_ctl_domain_table(struct sched_domain *sd) + return table; + } + +-static struct ctl_table *sd_alloc_ctl_cpu_table(int cpu) ++static ctl_table_no_const *sd_alloc_ctl_cpu_table(int cpu) + { +- struct ctl_table *entry, *table; ++ ctl_table_no_const *entry, *table; + struct sched_domain *sd; + int domain_num = 0, i; + char buf[32]; +@@ -4940,11 +4951,13 @@ static struct ctl_table_header *sd_sysctl_header; + static void register_sched_domain_sysctl(void) + { + int i, cpu_num = num_possible_cpus(); +- struct ctl_table *entry = sd_alloc_ctl_entry(cpu_num + 1); ++ ctl_table_no_const *entry = sd_alloc_ctl_entry(cpu_num + 1); + char buf[32]; + + WARN_ON(sd_ctl_dir[0].child); ++ pax_open_kernel(); + sd_ctl_dir[0].child = entry; ++ pax_close_kernel(); + + if (entry == NULL) + return; +@@ -4967,8 +4980,12 @@ static void unregister_sched_domain_sysctl(void) + if (sd_sysctl_header) + unregister_sysctl_table(sd_sysctl_header); + sd_sysctl_header = NULL; +- if (sd_ctl_dir[0].child) +- sd_free_ctl_entry(&sd_ctl_dir[0].child); ++ if (sd_ctl_dir[0].child) { ++ sd_free_ctl_entry(sd_ctl_dir[0].child); ++ pax_open_kernel(); ++ sd_ctl_dir[0].child = NULL; ++ pax_close_kernel(); ++ } + } + #else + static void register_sched_domain_sysctl(void) +diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c +index 9b4c4f3..665489b 100644 +--- a/kernel/sched/fair.c ++++ b/kernel/sched/fair.c +@@ -1647,7 +1647,7 @@ void task_numa_fault(int last_cpupid, int node, int pages, int flags) + + static void reset_ptenuma_scan(struct task_struct *p) + { +- ACCESS_ONCE(p->mm->numa_scan_seq)++; ++ ACCESS_ONCE_RW(p->mm->numa_scan_seq)++; + p->mm->numa_scan_offset = 0; + } + +@@ -6851,7 +6851,7 @@ static void nohz_idle_balance(struct rq *this_rq, enum cpu_idle_type idle) { } + * run_rebalance_domains is triggered when needed from the scheduler tick. + * Also triggered for nohz idle balancing (with nohz_balancing_kick set). + */ +-static void run_rebalance_domains(struct softirq_action *h) ++static __latent_entropy void run_rebalance_domains(void) + { + struct rq *this_rq = this_rq(); + enum cpu_idle_type idle = this_rq->idle_balance ? +diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h +index f964add..dcd823d 100644 +--- a/kernel/sched/sched.h ++++ b/kernel/sched/sched.h +@@ -1157,7 +1157,7 @@ struct sched_class { + #ifdef CONFIG_FAIR_GROUP_SCHED + void (*task_move_group) (struct task_struct *p, int on_rq); + #endif +-}; ++} __do_const; + + #define sched_class_highest (&stop_sched_class) + #define for_each_class(class) \ +diff --git a/kernel/signal.c b/kernel/signal.c +index 52f881d..1e9f941 100644 +--- a/kernel/signal.c ++++ b/kernel/signal.c +@@ -51,12 +51,12 @@ static struct kmem_cache *sigqueue_cachep; + + int print_fatal_signals __read_mostly; + +-static void __user *sig_handler(struct task_struct *t, int sig) ++static __sighandler_t sig_handler(struct task_struct *t, int sig) + { + return t->sighand->action[sig - 1].sa.sa_handler; + } + +-static int sig_handler_ignored(void __user *handler, int sig) ++static int sig_handler_ignored(__sighandler_t handler, int sig) + { + /* Is it explicitly or implicitly ignored? */ + return handler == SIG_IGN || +@@ -65,7 +65,7 @@ static int sig_handler_ignored(void __user *handler, int sig) + + static int sig_task_ignored(struct task_struct *t, int sig, bool force) + { +- void __user *handler; ++ __sighandler_t handler; + + handler = sig_handler(t, sig); + +@@ -369,6 +369,9 @@ __sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimi + atomic_inc(&user->sigpending); + rcu_read_unlock(); + ++ if (!override_rlimit) ++ gr_learn_resource(t, RLIMIT_SIGPENDING, atomic_read(&user->sigpending), 1); ++ + if (override_rlimit || + atomic_read(&user->sigpending) <= + task_rlimit(t, RLIMIT_SIGPENDING)) { +@@ -496,7 +499,7 @@ flush_signal_handlers(struct task_struct *t, int force_default) + + int unhandled_signal(struct task_struct *tsk, int sig) + { +- void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler; ++ __sighandler_t handler = tsk->sighand->action[sig-1].sa.sa_handler; + if (is_global_init(tsk)) + return 1; + if (handler != SIG_IGN && handler != SIG_DFL) +@@ -816,6 +819,13 @@ static int check_kill_permission(int sig, struct siginfo *info, + } + } + ++ /* allow glibc communication via tgkill to other threads in our ++ thread group */ ++ if ((info == SEND_SIG_NOINFO || info->si_code != SI_TKILL || ++ sig != (SIGRTMIN+1) || task_tgid_vnr(t) != info->si_pid) ++ && gr_handle_signal(t, sig)) ++ return -EPERM; ++ + return security_task_kill(t, info, sig, 0); + } + +@@ -1199,7 +1209,7 @@ __group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p) + return send_signal(sig, info, p, 1); + } + +-static int ++int + specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t) + { + return send_signal(sig, info, t, 0); +@@ -1236,6 +1246,7 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t) + unsigned long int flags; + int ret, blocked, ignored; + struct k_sigaction *action; ++ int is_unhandled = 0; + + spin_lock_irqsave(&t->sighand->siglock, flags); + action = &t->sighand->action[sig-1]; +@@ -1250,9 +1261,18 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t) + } + if (action->sa.sa_handler == SIG_DFL) + t->signal->flags &= ~SIGNAL_UNKILLABLE; ++ if (action->sa.sa_handler == SIG_IGN || action->sa.sa_handler == SIG_DFL) ++ is_unhandled = 1; + ret = specific_send_sig_info(sig, info, t); + spin_unlock_irqrestore(&t->sighand->siglock, flags); + ++ /* only deal with unhandled signals, java etc trigger SIGSEGV during ++ normal operation */ ++ if (is_unhandled) { ++ gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, t); ++ gr_handle_crash(t, sig); ++ } ++ + return ret; + } + +@@ -1319,8 +1339,11 @@ int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p) + ret = check_kill_permission(sig, info, p); + rcu_read_unlock(); + +- if (!ret && sig) ++ if (!ret && sig) { + ret = do_send_sig_info(sig, info, p, true); ++ if (!ret) ++ gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, p); ++ } + + return ret; + } +@@ -2926,7 +2949,15 @@ do_send_specific(pid_t tgid, pid_t pid, int sig, struct siginfo *info) + int error = -ESRCH; + + rcu_read_lock(); +- p = find_task_by_vpid(pid); ++#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK ++ /* allow glibc communication via tgkill to other threads in our ++ thread group */ ++ if (grsec_enable_chroot_findtask && info->si_code == SI_TKILL && ++ sig == (SIGRTMIN+1) && tgid == info->si_pid) ++ p = find_task_by_vpid_unrestricted(pid); ++ else ++#endif ++ p = find_task_by_vpid(pid); + if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) { + error = check_kill_permission(sig, info, p); + /* +@@ -3239,8 +3270,8 @@ COMPAT_SYSCALL_DEFINE2(sigaltstack, + } + seg = get_fs(); + set_fs(KERNEL_DS); +- ret = do_sigaltstack((stack_t __force __user *) (uss_ptr ? &uss : NULL), +- (stack_t __force __user *) &uoss, ++ ret = do_sigaltstack((stack_t __force_user *) (uss_ptr ? &uss : NULL), ++ (stack_t __force_user *) &uoss, + compat_user_stack_pointer()); + set_fs(seg); + if (ret >= 0 && uoss_ptr) { +diff --git a/kernel/smpboot.c b/kernel/smpboot.c +index eb89e18..a4e6792 100644 +--- a/kernel/smpboot.c ++++ b/kernel/smpboot.c +@@ -288,7 +288,7 @@ int smpboot_register_percpu_thread(struct smp_hotplug_thread *plug_thread) + } + smpboot_unpark_thread(plug_thread, cpu); + } +- list_add(&plug_thread->list, &hotplug_threads); ++ pax_list_add(&plug_thread->list, &hotplug_threads); + out: + mutex_unlock(&smpboot_threads_lock); + return ret; +@@ -305,7 +305,7 @@ void smpboot_unregister_percpu_thread(struct smp_hotplug_thread *plug_thread) + { + get_online_cpus(); + mutex_lock(&smpboot_threads_lock); +- list_del(&plug_thread->list); ++ pax_list_del(&plug_thread->list); + smpboot_destroy_threads(plug_thread); + mutex_unlock(&smpboot_threads_lock); + put_online_cpus(); +diff --git a/kernel/softirq.c b/kernel/softirq.c +index 490fcbb..1e502c6 100644 +--- a/kernel/softirq.c ++++ b/kernel/softirq.c +@@ -52,7 +52,7 @@ irq_cpustat_t irq_stat[NR_CPUS] ____cacheline_aligned; + EXPORT_SYMBOL(irq_stat); + #endif + +-static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp; ++static struct softirq_action softirq_vec[NR_SOFTIRQS] __read_only __aligned(PAGE_SIZE); + + DEFINE_PER_CPU(struct task_struct *, ksoftirqd); + +@@ -267,7 +267,7 @@ restart: + kstat_incr_softirqs_this_cpu(vec_nr); + + trace_softirq_entry(vec_nr); +- h->action(h); ++ h->action(); + trace_softirq_exit(vec_nr); + if (unlikely(prev_count != preempt_count())) { + pr_err("huh, entered softirq %u %s %p with preempt_count %08x, exited with %08x?\n", +@@ -427,7 +427,7 @@ void __raise_softirq_irqoff(unsigned int nr) + or_softirq_pending(1UL << nr); + } + +-void open_softirq(int nr, void (*action)(struct softirq_action *)) ++void __init open_softirq(int nr, void (*action)(void)) + { + softirq_vec[nr].action = action; + } +@@ -479,7 +479,7 @@ void __tasklet_hi_schedule_first(struct tasklet_struct *t) + } + EXPORT_SYMBOL(__tasklet_hi_schedule_first); + +-static void tasklet_action(struct softirq_action *a) ++static void tasklet_action(void) + { + struct tasklet_struct *list; + +@@ -515,7 +515,7 @@ static void tasklet_action(struct softirq_action *a) + } + } + +-static void tasklet_hi_action(struct softirq_action *a) ++static __latent_entropy void tasklet_hi_action(void) + { + struct tasklet_struct *list; + +@@ -742,7 +742,7 @@ static struct notifier_block cpu_nfb = { + .notifier_call = cpu_callback + }; + +-static struct smp_hotplug_thread softirq_threads = { ++static struct smp_hotplug_thread softirq_threads __read_only = { + .store = &ksoftirqd, + .thread_should_run = ksoftirqd_should_run, + .thread_fn = run_ksoftirqd, +diff --git a/kernel/sys.c b/kernel/sys.c +index c0a58be..95e292b 100644 +--- a/kernel/sys.c ++++ b/kernel/sys.c +@@ -148,6 +148,12 @@ static int set_one_prio(struct task_struct *p, int niceval, int error) + error = -EACCES; + goto out; + } ++ ++ if (gr_handle_chroot_setpriority(p, niceval)) { ++ error = -EACCES; ++ goto out; ++ } ++ + no_nice = security_task_setnice(p, niceval); + if (no_nice) { + error = no_nice; +@@ -351,6 +357,20 @@ SYSCALL_DEFINE2(setregid, gid_t, rgid, gid_t, egid) + goto error; + } + ++ if (gr_check_group_change(new->gid, new->egid, INVALID_GID)) ++ goto error; ++ ++ if (!gid_eq(new->gid, old->gid)) { ++ /* make sure we generate a learn log for what will ++ end up being a role transition after a full-learning ++ policy is generated ++ CAP_SETGID is required to perform a transition ++ we may not log a CAP_SETGID check above, e.g. ++ in the case where new rgid = old egid ++ */ ++ gr_learn_cap(current, new, CAP_SETGID); ++ } ++ + if (rgid != (gid_t) -1 || + (egid != (gid_t) -1 && !gid_eq(kegid, old->gid))) + new->sgid = new->egid; +@@ -386,6 +406,10 @@ SYSCALL_DEFINE1(setgid, gid_t, gid) + old = current_cred(); + + retval = -EPERM; ++ ++ if (gr_check_group_change(kgid, kgid, kgid)) ++ goto error; ++ + if (ns_capable(old->user_ns, CAP_SETGID)) + new->gid = new->egid = new->sgid = new->fsgid = kgid; + else if (gid_eq(kgid, old->gid) || gid_eq(kgid, old->sgid)) +@@ -403,7 +427,7 @@ error: + /* + * change the user struct in a credentials set to match the new UID + */ +-static int set_user(struct cred *new) ++int set_user(struct cred *new) + { + struct user_struct *new_user; + +@@ -483,7 +507,18 @@ SYSCALL_DEFINE2(setreuid, uid_t, ruid, uid_t, euid) + goto error; + } + ++ if (gr_check_user_change(new->uid, new->euid, INVALID_UID)) ++ goto error; ++ + if (!uid_eq(new->uid, old->uid)) { ++ /* make sure we generate a learn log for what will ++ end up being a role transition after a full-learning ++ policy is generated ++ CAP_SETUID is required to perform a transition ++ we may not log a CAP_SETUID check above, e.g. ++ in the case where new ruid = old euid ++ */ ++ gr_learn_cap(current, new, CAP_SETUID); + retval = set_user(new); + if (retval < 0) + goto error; +@@ -533,6 +568,12 @@ SYSCALL_DEFINE1(setuid, uid_t, uid) + old = current_cred(); + + retval = -EPERM; ++ ++ if (gr_check_crash_uid(kuid)) ++ goto error; ++ if (gr_check_user_change(kuid, kuid, kuid)) ++ goto error; ++ + if (ns_capable(old->user_ns, CAP_SETUID)) { + new->suid = new->uid = kuid; + if (!uid_eq(kuid, old->uid)) { +@@ -602,6 +643,9 @@ SYSCALL_DEFINE3(setresuid, uid_t, ruid, uid_t, euid, uid_t, suid) + goto error; + } + ++ if (gr_check_user_change(kruid, keuid, INVALID_UID)) ++ goto error; ++ + if (ruid != (uid_t) -1) { + new->uid = kruid; + if (!uid_eq(kruid, old->uid)) { +@@ -684,6 +728,9 @@ SYSCALL_DEFINE3(setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid) + goto error; + } + ++ if (gr_check_group_change(krgid, kegid, INVALID_GID)) ++ goto error; ++ + if (rgid != (gid_t) -1) + new->gid = krgid; + if (egid != (gid_t) -1) +@@ -745,12 +792,16 @@ SYSCALL_DEFINE1(setfsuid, uid_t, uid) + uid_eq(kuid, old->suid) || uid_eq(kuid, old->fsuid) || + ns_capable(old->user_ns, CAP_SETUID)) { + if (!uid_eq(kuid, old->fsuid)) { ++ if (gr_check_user_change(INVALID_UID, INVALID_UID, kuid)) ++ goto error; ++ + new->fsuid = kuid; + if (security_task_fix_setuid(new, old, LSM_SETID_FS) == 0) + goto change_okay; + } + } + ++error: + abort_creds(new); + return old_fsuid; + +@@ -783,12 +834,16 @@ SYSCALL_DEFINE1(setfsgid, gid_t, gid) + if (gid_eq(kgid, old->gid) || gid_eq(kgid, old->egid) || + gid_eq(kgid, old->sgid) || gid_eq(kgid, old->fsgid) || + ns_capable(old->user_ns, CAP_SETGID)) { ++ if (gr_check_group_change(INVALID_GID, INVALID_GID, kgid)) ++ goto error; ++ + if (!gid_eq(kgid, old->fsgid)) { + new->fsgid = kgid; + goto change_okay; + } + } + ++error: + abort_creds(new); + return old_fsgid; + +@@ -1167,19 +1222,19 @@ SYSCALL_DEFINE1(olduname, struct oldold_utsname __user *, name) + return -EFAULT; + + down_read(&uts_sem); +- error = __copy_to_user(&name->sysname, &utsname()->sysname, ++ error = __copy_to_user(name->sysname, &utsname()->sysname, + __OLD_UTS_LEN); + error |= __put_user(0, name->sysname + __OLD_UTS_LEN); +- error |= __copy_to_user(&name->nodename, &utsname()->nodename, ++ error |= __copy_to_user(name->nodename, &utsname()->nodename, + __OLD_UTS_LEN); + error |= __put_user(0, name->nodename + __OLD_UTS_LEN); +- error |= __copy_to_user(&name->release, &utsname()->release, ++ error |= __copy_to_user(name->release, &utsname()->release, + __OLD_UTS_LEN); + error |= __put_user(0, name->release + __OLD_UTS_LEN); +- error |= __copy_to_user(&name->version, &utsname()->version, ++ error |= __copy_to_user(name->version, &utsname()->version, + __OLD_UTS_LEN); + error |= __put_user(0, name->version + __OLD_UTS_LEN); +- error |= __copy_to_user(&name->machine, &utsname()->machine, ++ error |= __copy_to_user(name->machine, &utsname()->machine, + __OLD_UTS_LEN); + error |= __put_user(0, name->machine + __OLD_UTS_LEN); + up_read(&uts_sem); +@@ -1381,6 +1436,13 @@ int do_prlimit(struct task_struct *tsk, unsigned int resource, + */ + new_rlim->rlim_cur = 1; + } ++ /* Handle the case where a fork and setuid occur and then RLIMIT_NPROC ++ is changed to a lower value. Since tasks can be created by the same ++ user in between this limit change and an execve by this task, force ++ a recheck only for this task by setting PF_NPROC_EXCEEDED ++ */ ++ if (resource == RLIMIT_NPROC && tsk->real_cred->user != INIT_USER) ++ tsk->flags |= PF_NPROC_EXCEEDED; + } + if (!retval) { + if (old_rlim) +diff --git a/kernel/sysctl.c b/kernel/sysctl.c +index c1b26e1..bc7b50d 100644 +--- a/kernel/sysctl.c ++++ b/kernel/sysctl.c +@@ -94,7 +94,6 @@ + + + #if defined(CONFIG_SYSCTL) +- + /* External variables not in a header file. */ + extern int max_threads; + extern int suid_dumpable; +@@ -118,19 +117,18 @@ extern int blk_iopoll_enabled; + + /* Constants used for minimum and maximum */ + #ifdef CONFIG_LOCKUP_DETECTOR +-static int sixty = 60; ++static int sixty __read_only = 60; + #endif + +-static int __maybe_unused neg_one = -1; +- +-static int zero; +-static int __maybe_unused one = 1; +-static int __maybe_unused two = 2; +-static int __maybe_unused three = 3; +-static unsigned long one_ul = 1; +-static int one_hundred = 100; ++static int __maybe_unused neg_one __read_only = -1; ++static int zero __read_only = 0; ++static int __maybe_unused one __read_only = 1; ++static int __maybe_unused two __read_only = 2; ++static int __maybe_unused three __read_only = 3; ++static unsigned long one_ul __read_only = 1; ++static int one_hundred __read_only = 100; + #ifdef CONFIG_PRINTK +-static int ten_thousand = 10000; ++static int ten_thousand __read_only = 10000; + #endif + + /* this is needed for the proc_doulongvec_minmax of vm_dirty_bytes */ +@@ -181,10 +179,8 @@ static int proc_taint(struct ctl_table *table, int write, + void __user *buffer, size_t *lenp, loff_t *ppos); + #endif + +-#ifdef CONFIG_PRINTK + static int proc_dointvec_minmax_sysadmin(struct ctl_table *table, int write, + void __user *buffer, size_t *lenp, loff_t *ppos); +-#endif + + static int proc_dointvec_minmax_coredump(struct ctl_table *table, int write, + void __user *buffer, size_t *lenp, loff_t *ppos); +@@ -215,6 +211,8 @@ static int sysrq_sysctl_handler(ctl_table *table, int write, + + #endif + ++extern struct ctl_table grsecurity_table[]; ++ + static struct ctl_table kern_table[]; + static struct ctl_table vm_table[]; + static struct ctl_table fs_table[]; +@@ -229,6 +227,20 @@ extern struct ctl_table epoll_table[]; + int sysctl_legacy_va_layout; + #endif + ++#ifdef CONFIG_PAX_SOFTMODE ++static ctl_table pax_table[] = { ++ { ++ .procname = "softmode", ++ .data = &pax_softmode, ++ .maxlen = sizeof(unsigned int), ++ .mode = 0600, ++ .proc_handler = &proc_dointvec, ++ }, ++ ++ { } ++}; ++#endif ++ + /* The default sysctl tables: */ + + static struct ctl_table sysctl_base_table[] = { +@@ -277,6 +289,22 @@ static int max_extfrag_threshold = 1000; + #endif + + static struct ctl_table kern_table[] = { ++#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS) ++ { ++ .procname = "grsecurity", ++ .mode = 0500, ++ .child = grsecurity_table, ++ }, ++#endif ++ ++#ifdef CONFIG_PAX_SOFTMODE ++ { ++ .procname = "pax", ++ .mode = 0500, ++ .child = pax_table, ++ }, ++#endif ++ + { + .procname = "sched_child_runs_first", + .data = &sysctl_sched_child_runs_first, +@@ -639,7 +667,7 @@ static struct ctl_table kern_table[] = { + .data = &modprobe_path, + .maxlen = KMOD_PATH_LEN, + .mode = 0644, +- .proc_handler = proc_dostring, ++ .proc_handler = proc_dostring_modpriv, + }, + { + .procname = "modules_disabled", +@@ -806,16 +834,20 @@ static struct ctl_table kern_table[] = { + .extra1 = &zero, + .extra2 = &one, + }, ++#endif + { + .procname = "kptr_restrict", + .data = &kptr_restrict, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_dointvec_minmax_sysadmin, ++#ifdef CONFIG_GRKERNSEC_HIDESYM ++ .extra1 = &two, ++#else + .extra1 = &zero, ++#endif + .extra2 = &two, + }, +-#endif + { + .procname = "ngroups_max", + .data = &ngroups_max, +@@ -1060,10 +1092,17 @@ static struct ctl_table kern_table[] = { + */ + { + .procname = "perf_event_paranoid", +- .data = &sysctl_perf_event_paranoid, +- .maxlen = sizeof(sysctl_perf_event_paranoid), ++ .data = &sysctl_perf_event_legitimately_concerned, ++ .maxlen = sizeof(sysctl_perf_event_legitimately_concerned), + .mode = 0644, +- .proc_handler = proc_dointvec, ++ /* go ahead, be a hero */ ++ .proc_handler = proc_dointvec_minmax_sysadmin, ++ .extra1 = &neg_one, ++#ifdef CONFIG_GRKERNSEC_PERF_HARDEN ++ .extra2 = &three, ++#else ++ .extra2 = &two, ++#endif + }, + { + .procname = "perf_event_mlock_kb", +@@ -1334,6 +1373,13 @@ static struct ctl_table vm_table[] = { + .proc_handler = proc_dointvec_minmax, + .extra1 = &zero, + }, ++ { ++ .procname = "heap_stack_gap", ++ .data = &sysctl_heap_stack_gap, ++ .maxlen = sizeof(sysctl_heap_stack_gap), ++ .mode = 0644, ++ .proc_handler = proc_doulongvec_minmax, ++ }, + #else + { + .procname = "nr_trim_pages", +@@ -1798,6 +1844,16 @@ int proc_dostring(struct ctl_table *table, int write, + buffer, lenp, ppos); + } + ++int proc_dostring_modpriv(struct ctl_table *table, int write, ++ void __user *buffer, size_t *lenp, loff_t *ppos) ++{ ++ if (write && !capable(CAP_SYS_MODULE)) ++ return -EPERM; ++ ++ return _proc_do_string(table->data, table->maxlen, write, ++ buffer, lenp, ppos); ++} ++ + static size_t proc_skip_spaces(char **buf) + { + size_t ret; +@@ -1903,6 +1959,8 @@ static int proc_put_long(void __user **buf, size_t *size, unsigned long val, + len = strlen(tmp); + if (len > *size) + len = *size; ++ if (len > sizeof(tmp)) ++ len = sizeof(tmp); + if (copy_to_user(*buf, tmp, len)) + return -EFAULT; + *size -= len; +@@ -2067,7 +2125,7 @@ int proc_dointvec(struct ctl_table *table, int write, + static int proc_taint(struct ctl_table *table, int write, + void __user *buffer, size_t *lenp, loff_t *ppos) + { +- struct ctl_table t; ++ ctl_table_no_const t; + unsigned long tmptaint = get_taint(); + int err; + +@@ -2095,7 +2153,6 @@ static int proc_taint(struct ctl_table *table, int write, + return err; + } + +-#ifdef CONFIG_PRINTK + static int proc_dointvec_minmax_sysadmin(struct ctl_table *table, int write, + void __user *buffer, size_t *lenp, loff_t *ppos) + { +@@ -2104,7 +2161,6 @@ static int proc_dointvec_minmax_sysadmin(struct ctl_table *table, int write, + + return proc_dointvec_minmax(table, write, buffer, lenp, ppos); + } +-#endif + + struct do_proc_dointvec_minmax_conv_param { + int *min; +@@ -2651,6 +2707,12 @@ int proc_dostring(struct ctl_table *table, int write, + return -ENOSYS; + } + ++int proc_dostring_modpriv(struct ctl_table *table, int write, ++ void __user *buffer, size_t *lenp, loff_t *ppos) ++{ ++ return -ENOSYS; ++} ++ + int proc_dointvec(struct ctl_table *table, int write, + void __user *buffer, size_t *lenp, loff_t *ppos) + { +@@ -2707,5 +2769,6 @@ EXPORT_SYMBOL(proc_dointvec_minmax); + EXPORT_SYMBOL(proc_dointvec_userhz_jiffies); + EXPORT_SYMBOL(proc_dointvec_ms_jiffies); + EXPORT_SYMBOL(proc_dostring); ++EXPORT_SYMBOL(proc_dostring_modpriv); + EXPORT_SYMBOL(proc_doulongvec_minmax); + EXPORT_SYMBOL(proc_doulongvec_ms_jiffies_minmax); +diff --git a/kernel/taskstats.c b/kernel/taskstats.c +index 13d2f7c..c93d0b0 100644 +--- a/kernel/taskstats.c ++++ b/kernel/taskstats.c +@@ -28,9 +28,12 @@ + #include <linux/fs.h> + #include <linux/file.h> + #include <linux/pid_namespace.h> ++#include <linux/grsecurity.h> + #include <net/genetlink.h> + #include <linux/atomic.h> + ++extern int gr_is_taskstats_denied(int pid); ++ + /* + * Maximum length of a cpumask that can be specified in + * the TASKSTATS_CMD_ATTR_REGISTER/DEREGISTER_CPUMASK attribute +@@ -576,6 +579,9 @@ err: + + static int taskstats_user_cmd(struct sk_buff *skb, struct genl_info *info) + { ++ if (gr_is_taskstats_denied(current->pid)) ++ return -EACCES; ++ + if (info->attrs[TASKSTATS_CMD_ATTR_REGISTER_CPUMASK]) + return cmd_attr_register_cpumask(info); + else if (info->attrs[TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK]) +diff --git a/kernel/time.c b/kernel/time.c +index 7c7964c..2a0d412 100644 +--- a/kernel/time.c ++++ b/kernel/time.c +@@ -172,6 +172,11 @@ int do_sys_settimeofday(const struct timespec *tv, const struct timezone *tz) + return error; + + if (tz) { ++ /* we log in do_settimeofday called below, so don't log twice ++ */ ++ if (!tv) ++ gr_log_timechange(); ++ + sys_tz = *tz; + update_vsyscall_tz(); + if (firsttime) { +diff --git a/kernel/time/alarmtimer.c b/kernel/time/alarmtimer.c +index fe75444..190c528 100644 +--- a/kernel/time/alarmtimer.c ++++ b/kernel/time/alarmtimer.c +@@ -811,7 +811,7 @@ static int __init alarmtimer_init(void) + struct platform_device *pdev; + int error = 0; + int i; +- struct k_clock alarm_clock = { ++ static struct k_clock alarm_clock = { + .clock_getres = alarm_clock_getres, + .clock_get = alarm_clock_get, + .timer_create = alarm_timer_create, +diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c +index 5b40279..81e58db 100644 +--- a/kernel/time/timekeeping.c ++++ b/kernel/time/timekeeping.c +@@ -15,6 +15,7 @@ + #include <linux/init.h> + #include <linux/mm.h> + #include <linux/sched.h> ++#include <linux/grsecurity.h> + #include <linux/syscore_ops.h> + #include <linux/clocksource.h> + #include <linux/jiffies.h> +@@ -501,6 +502,8 @@ int do_settimeofday(const struct timespec *tv) + if (!timespec_valid_strict(tv)) + return -EINVAL; + ++ gr_log_timechange(); ++ + raw_spin_lock_irqsave(&timekeeper_lock, flags); + write_seqcount_begin(&timekeeper_seq); + +diff --git a/kernel/time/timer_list.c b/kernel/time/timer_list.c +index 61ed862..3b52c65 100644 +--- a/kernel/time/timer_list.c ++++ b/kernel/time/timer_list.c +@@ -45,12 +45,16 @@ DECLARE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases); + + static void print_name_offset(struct seq_file *m, void *sym) + { ++#ifdef CONFIG_GRKERNSEC_HIDESYM ++ SEQ_printf(m, "<%p>", NULL); ++#else + char symname[KSYM_NAME_LEN]; + + if (lookup_symbol_name((unsigned long)sym, symname) < 0) + SEQ_printf(m, "<%pK>", sym); + else + SEQ_printf(m, "%s", symname); ++#endif + } + + static void +@@ -119,7 +123,11 @@ next_one: + static void + print_base(struct seq_file *m, struct hrtimer_clock_base *base, u64 now) + { ++#ifdef CONFIG_GRKERNSEC_HIDESYM ++ SEQ_printf(m, " .base: %p\n", NULL); ++#else + SEQ_printf(m, " .base: %pK\n", base); ++#endif + SEQ_printf(m, " .index: %d\n", + base->index); + SEQ_printf(m, " .resolution: %Lu nsecs\n", +@@ -362,7 +370,11 @@ static int __init init_timer_list_procfs(void) + { + struct proc_dir_entry *pe; + ++#ifdef CONFIG_GRKERNSEC_PROC_ADD ++ pe = proc_create("timer_list", 0400, NULL, &timer_list_fops); ++#else + pe = proc_create("timer_list", 0444, NULL, &timer_list_fops); ++#endif + if (!pe) + return -ENOMEM; + return 0; +diff --git a/kernel/time/timer_stats.c b/kernel/time/timer_stats.c +index 1fb08f2..ca4bb1e 100644 +--- a/kernel/time/timer_stats.c ++++ b/kernel/time/timer_stats.c +@@ -116,7 +116,7 @@ static ktime_t time_start, time_stop; + static unsigned long nr_entries; + static struct entry entries[MAX_ENTRIES]; + +-static atomic_t overflow_count; ++static atomic_unchecked_t overflow_count; + + /* + * The entries are in a hash-table, for fast lookup: +@@ -140,7 +140,7 @@ static void reset_entries(void) + nr_entries = 0; + memset(entries, 0, sizeof(entries)); + memset(tstat_hash_table, 0, sizeof(tstat_hash_table)); +- atomic_set(&overflow_count, 0); ++ atomic_set_unchecked(&overflow_count, 0); + } + + static struct entry *alloc_entry(void) +@@ -261,7 +261,7 @@ void timer_stats_update_stats(void *timer, pid_t pid, void *startf, + if (likely(entry)) + entry->count++; + else +- atomic_inc(&overflow_count); ++ atomic_inc_unchecked(&overflow_count); + + out_unlock: + raw_spin_unlock_irqrestore(lock, flags); +@@ -269,12 +269,16 @@ void timer_stats_update_stats(void *timer, pid_t pid, void *startf, + + static void print_name_offset(struct seq_file *m, unsigned long addr) + { ++#ifdef CONFIG_GRKERNSEC_HIDESYM ++ seq_printf(m, "<%p>", NULL); ++#else + char symname[KSYM_NAME_LEN]; + + if (lookup_symbol_name(addr, symname) < 0) +- seq_printf(m, "<%p>", (void *)addr); ++ seq_printf(m, "<%pK>", (void *)addr); + else + seq_printf(m, "%s", symname); ++#endif + } + + static int tstats_show(struct seq_file *m, void *v) +@@ -300,8 +304,8 @@ static int tstats_show(struct seq_file *m, void *v) + + seq_puts(m, "Timer Stats Version: v0.3\n"); + seq_printf(m, "Sample period: %ld.%03ld s\n", period.tv_sec, ms); +- if (atomic_read(&overflow_count)) +- seq_printf(m, "Overflow: %d entries\n", atomic_read(&overflow_count)); ++ if (atomic_read_unchecked(&overflow_count)) ++ seq_printf(m, "Overflow: %d entries\n", atomic_read_unchecked(&overflow_count)); + seq_printf(m, "Collection: %s\n", timer_stats_active ? "active" : "inactive"); + + for (i = 0; i < nr_entries; i++) { +@@ -417,7 +421,11 @@ static int __init init_tstats_procfs(void) + { + struct proc_dir_entry *pe; + ++#ifdef CONFIG_GRKERNSEC_PROC_ADD ++ pe = proc_create("timer_stats", 0600, NULL, &tstats_fops); ++#else + pe = proc_create("timer_stats", 0644, NULL, &tstats_fops); ++#endif + if (!pe) + return -ENOMEM; + return 0; +diff --git a/kernel/timer.c b/kernel/timer.c +index 38f0d40..96b2ebf 100644 +--- a/kernel/timer.c ++++ b/kernel/timer.c +@@ -1366,7 +1366,7 @@ void update_process_times(int user_tick) + /* + * This function runs timers and the timer-tq in bottom half context. + */ +-static void run_timer_softirq(struct softirq_action *h) ++static __latent_entropy void run_timer_softirq(void) + { + struct tvec_base *base = __this_cpu_read(tvec_bases); + +@@ -1429,7 +1429,7 @@ static void process_timeout(unsigned long __data) + * + * In all cases the return value is guaranteed to be non-negative. + */ +-signed long __sched schedule_timeout(signed long timeout) ++signed long __sched __intentional_overflow(-1) schedule_timeout(signed long timeout) + { + struct timer_list timer; + unsigned long expire; +diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c +index 4f3a3c03..04b7886 100644 +--- a/kernel/trace/blktrace.c ++++ b/kernel/trace/blktrace.c +@@ -328,7 +328,7 @@ static ssize_t blk_dropped_read(struct file *filp, char __user *buffer, + struct blk_trace *bt = filp->private_data; + char buf[16]; + +- snprintf(buf, sizeof(buf), "%u\n", atomic_read(&bt->dropped)); ++ snprintf(buf, sizeof(buf), "%u\n", atomic_read_unchecked(&bt->dropped)); + + return simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf)); + } +@@ -386,7 +386,7 @@ static int blk_subbuf_start_callback(struct rchan_buf *buf, void *subbuf, + return 1; + + bt = buf->chan->private_data; +- atomic_inc(&bt->dropped); ++ atomic_inc_unchecked(&bt->dropped); + return 0; + } + +@@ -487,7 +487,7 @@ int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev, + + bt->dir = dir; + bt->dev = dev; +- atomic_set(&bt->dropped, 0); ++ atomic_set_unchecked(&bt->dropped, 0); + INIT_LIST_HEAD(&bt->running_list); + + ret = -EIO; +diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c +index e3be87e..7480b36 100644 +--- a/kernel/trace/ftrace.c ++++ b/kernel/trace/ftrace.c +@@ -1965,12 +1965,17 @@ ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec) + if (unlikely(ftrace_disabled)) + return 0; + ++ ret = ftrace_arch_code_modify_prepare(); ++ FTRACE_WARN_ON(ret); ++ if (ret) ++ return 0; ++ + ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR); ++ FTRACE_WARN_ON(ftrace_arch_code_modify_post_process()); + if (ret) { + ftrace_bug(ret, ip); +- return 0; + } +- return 1; ++ return ret ? 0 : 1; + } + + /* +@@ -4177,8 +4182,10 @@ static int ftrace_process_locs(struct module *mod, + if (!count) + return 0; + ++ pax_open_kernel(); + sort(start, count, sizeof(*start), + ftrace_cmp_ips, ftrace_swap_ips); ++ pax_close_kernel(); + + start_pg = ftrace_allocate_pages(count); + if (!start_pg) +@@ -4890,8 +4897,6 @@ ftrace_enable_sysctl(struct ctl_table *table, int write, + #ifdef CONFIG_FUNCTION_GRAPH_TRACER + + static int ftrace_graph_active; +-static struct notifier_block ftrace_suspend_notifier; +- + int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace) + { + return 0; +@@ -5067,6 +5072,10 @@ static void update_function_graph_func(void) + ftrace_graph_entry = ftrace_graph_entry_test; + } + ++static struct notifier_block ftrace_suspend_notifier = { ++ .notifier_call = ftrace_suspend_notifier_call ++}; ++ + int register_ftrace_graph(trace_func_graph_ret_t retfunc, + trace_func_graph_ent_t entryfunc) + { +@@ -5080,7 +5089,6 @@ int register_ftrace_graph(trace_func_graph_ret_t retfunc, + goto out; + } + +- ftrace_suspend_notifier.notifier_call = ftrace_suspend_notifier_call; + register_pm_notifier(&ftrace_suspend_notifier); + + ftrace_graph_active++; +diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c +index 0954450..1e3e687 100644 +--- a/kernel/trace/ring_buffer.c ++++ b/kernel/trace/ring_buffer.c +@@ -352,9 +352,9 @@ struct buffer_data_page { + */ + struct buffer_page { + struct list_head list; /* list of buffer pages */ +- local_t write; /* index for next write */ ++ local_unchecked_t write; /* index for next write */ + unsigned read; /* index for next read */ +- local_t entries; /* entries on this page */ ++ local_unchecked_t entries; /* entries on this page */ + unsigned long real_end; /* real end of data */ + struct buffer_data_page *page; /* Actual data page */ + }; +@@ -473,8 +473,8 @@ struct ring_buffer_per_cpu { + unsigned long last_overrun; + local_t entries_bytes; + local_t entries; +- local_t overrun; +- local_t commit_overrun; ++ local_unchecked_t overrun; ++ local_unchecked_t commit_overrun; + local_t dropped_events; + local_t committing; + local_t commits; +@@ -626,8 +626,22 @@ int ring_buffer_poll_wait(struct ring_buffer *buffer, int cpu, + work = &cpu_buffer->irq_work; + } + +- work->waiters_pending = true; + poll_wait(filp, &work->waiters, poll_table); ++ work->waiters_pending = true; ++ /* ++ * There's a tight race between setting the waiters_pending and ++ * checking if the ring buffer is empty. Once the waiters_pending bit ++ * is set, the next event will wake the task up, but we can get stuck ++ * if there's only a single event in. ++ * ++ * FIXME: Ideally, we need a memory barrier on the writer side as well, ++ * but adding a memory barrier to all events will cause too much of a ++ * performance hit in the fast path. We only need a memory barrier when ++ * the buffer goes from empty to having content. But as this race is ++ * extremely small, and it's not a problem if another event comes in, we ++ * will fix it later. ++ */ ++ smp_mb(); + + if ((cpu == RING_BUFFER_ALL_CPUS && !ring_buffer_empty(buffer)) || + (cpu != RING_BUFFER_ALL_CPUS && !ring_buffer_empty_cpu(buffer, cpu))) +@@ -991,8 +1005,8 @@ static int rb_tail_page_update(struct ring_buffer_per_cpu *cpu_buffer, + * + * We add a counter to the write field to denote this. + */ +- old_write = local_add_return(RB_WRITE_INTCNT, &next_page->write); +- old_entries = local_add_return(RB_WRITE_INTCNT, &next_page->entries); ++ old_write = local_add_return_unchecked(RB_WRITE_INTCNT, &next_page->write); ++ old_entries = local_add_return_unchecked(RB_WRITE_INTCNT, &next_page->entries); + + /* + * Just make sure we have seen our old_write and synchronize +@@ -1020,8 +1034,8 @@ static int rb_tail_page_update(struct ring_buffer_per_cpu *cpu_buffer, + * cmpxchg to only update if an interrupt did not already + * do it for us. If the cmpxchg fails, we don't care. + */ +- (void)local_cmpxchg(&next_page->write, old_write, val); +- (void)local_cmpxchg(&next_page->entries, old_entries, eval); ++ (void)local_cmpxchg_unchecked(&next_page->write, old_write, val); ++ (void)local_cmpxchg_unchecked(&next_page->entries, old_entries, eval); + + /* + * No need to worry about races with clearing out the commit. +@@ -1385,12 +1399,12 @@ static void rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer); + + static inline unsigned long rb_page_entries(struct buffer_page *bpage) + { +- return local_read(&bpage->entries) & RB_WRITE_MASK; ++ return local_read_unchecked(&bpage->entries) & RB_WRITE_MASK; + } + + static inline unsigned long rb_page_write(struct buffer_page *bpage) + { +- return local_read(&bpage->write) & RB_WRITE_MASK; ++ return local_read_unchecked(&bpage->write) & RB_WRITE_MASK; + } + + static int +@@ -1485,7 +1499,7 @@ rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned int nr_pages) + * bytes consumed in ring buffer from here. + * Increment overrun to account for the lost events. + */ +- local_add(page_entries, &cpu_buffer->overrun); ++ local_add_unchecked(page_entries, &cpu_buffer->overrun); + local_sub(BUF_PAGE_SIZE, &cpu_buffer->entries_bytes); + } + +@@ -2063,7 +2077,7 @@ rb_handle_head_page(struct ring_buffer_per_cpu *cpu_buffer, + * it is our responsibility to update + * the counters. + */ +- local_add(entries, &cpu_buffer->overrun); ++ local_add_unchecked(entries, &cpu_buffer->overrun); + local_sub(BUF_PAGE_SIZE, &cpu_buffer->entries_bytes); + + /* +@@ -2213,7 +2227,7 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer, + if (tail == BUF_PAGE_SIZE) + tail_page->real_end = 0; + +- local_sub(length, &tail_page->write); ++ local_sub_unchecked(length, &tail_page->write); + return; + } + +@@ -2248,7 +2262,7 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer, + rb_event_set_padding(event); + + /* Set the write back to the previous setting */ +- local_sub(length, &tail_page->write); ++ local_sub_unchecked(length, &tail_page->write); + return; + } + +@@ -2260,7 +2274,7 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer, + + /* Set write to end of buffer */ + length = (tail + length) - BUF_PAGE_SIZE; +- local_sub(length, &tail_page->write); ++ local_sub_unchecked(length, &tail_page->write); + } + + /* +@@ -2286,7 +2300,7 @@ rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer, + * about it. + */ + if (unlikely(next_page == commit_page)) { +- local_inc(&cpu_buffer->commit_overrun); ++ local_inc_unchecked(&cpu_buffer->commit_overrun); + goto out_reset; + } + +@@ -2342,7 +2356,7 @@ rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer, + cpu_buffer->tail_page) && + (cpu_buffer->commit_page == + cpu_buffer->reader_page))) { +- local_inc(&cpu_buffer->commit_overrun); ++ local_inc_unchecked(&cpu_buffer->commit_overrun); + goto out_reset; + } + } +@@ -2390,7 +2404,7 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer, + length += RB_LEN_TIME_EXTEND; + + tail_page = cpu_buffer->tail_page; +- write = local_add_return(length, &tail_page->write); ++ write = local_add_return_unchecked(length, &tail_page->write); + + /* set write to only the index of the write */ + write &= RB_WRITE_MASK; +@@ -2414,7 +2428,7 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer, + kmemcheck_annotate_bitfield(event, bitfield); + rb_update_event(cpu_buffer, event, length, add_timestamp, delta); + +- local_inc(&tail_page->entries); ++ local_inc_unchecked(&tail_page->entries); + + /* + * If this is the first commit on the page, then update +@@ -2447,7 +2461,7 @@ rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer, + + if (bpage->page == (void *)addr && rb_page_write(bpage) == old_index) { + unsigned long write_mask = +- local_read(&bpage->write) & ~RB_WRITE_MASK; ++ local_read_unchecked(&bpage->write) & ~RB_WRITE_MASK; + unsigned long event_length = rb_event_length(event); + /* + * This is on the tail page. It is possible that +@@ -2457,7 +2471,7 @@ rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer, + */ + old_index += write_mask; + new_index += write_mask; +- index = local_cmpxchg(&bpage->write, old_index, new_index); ++ index = local_cmpxchg_unchecked(&bpage->write, old_index, new_index); + if (index == old_index) { + /* update counters */ + local_sub(event_length, &cpu_buffer->entries_bytes); +@@ -2849,7 +2863,7 @@ rb_decrement_entry(struct ring_buffer_per_cpu *cpu_buffer, + + /* Do the likely case first */ + if (likely(bpage->page == (void *)addr)) { +- local_dec(&bpage->entries); ++ local_dec_unchecked(&bpage->entries); + return; + } + +@@ -2861,7 +2875,7 @@ rb_decrement_entry(struct ring_buffer_per_cpu *cpu_buffer, + start = bpage; + do { + if (bpage->page == (void *)addr) { +- local_dec(&bpage->entries); ++ local_dec_unchecked(&bpage->entries); + return; + } + rb_inc_page(cpu_buffer, &bpage); +@@ -3145,7 +3159,7 @@ static inline unsigned long + rb_num_of_entries(struct ring_buffer_per_cpu *cpu_buffer) + { + return local_read(&cpu_buffer->entries) - +- (local_read(&cpu_buffer->overrun) + cpu_buffer->read); ++ (local_read_unchecked(&cpu_buffer->overrun) + cpu_buffer->read); + } + + /** +@@ -3234,7 +3248,7 @@ unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu) + return 0; + + cpu_buffer = buffer->buffers[cpu]; +- ret = local_read(&cpu_buffer->overrun); ++ ret = local_read_unchecked(&cpu_buffer->overrun); + + return ret; + } +@@ -3257,7 +3271,7 @@ ring_buffer_commit_overrun_cpu(struct ring_buffer *buffer, int cpu) + return 0; + + cpu_buffer = buffer->buffers[cpu]; +- ret = local_read(&cpu_buffer->commit_overrun); ++ ret = local_read_unchecked(&cpu_buffer->commit_overrun); + + return ret; + } +@@ -3342,7 +3356,7 @@ unsigned long ring_buffer_overruns(struct ring_buffer *buffer) + /* if you care about this being correct, lock the buffer */ + for_each_buffer_cpu(buffer, cpu) { + cpu_buffer = buffer->buffers[cpu]; +- overruns += local_read(&cpu_buffer->overrun); ++ overruns += local_read_unchecked(&cpu_buffer->overrun); + } + + return overruns; +@@ -3518,8 +3532,8 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer) + /* + * Reset the reader page to size zero. + */ +- local_set(&cpu_buffer->reader_page->write, 0); +- local_set(&cpu_buffer->reader_page->entries, 0); ++ local_set_unchecked(&cpu_buffer->reader_page->write, 0); ++ local_set_unchecked(&cpu_buffer->reader_page->entries, 0); + local_set(&cpu_buffer->reader_page->page->commit, 0); + cpu_buffer->reader_page->real_end = 0; + +@@ -3553,7 +3567,7 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer) + * want to compare with the last_overrun. + */ + smp_mb(); +- overwrite = local_read(&(cpu_buffer->overrun)); ++ overwrite = local_read_unchecked(&(cpu_buffer->overrun)); + + /* + * Here's the tricky part. +@@ -4123,8 +4137,8 @@ rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer) + + cpu_buffer->head_page + = list_entry(cpu_buffer->pages, struct buffer_page, list); +- local_set(&cpu_buffer->head_page->write, 0); +- local_set(&cpu_buffer->head_page->entries, 0); ++ local_set_unchecked(&cpu_buffer->head_page->write, 0); ++ local_set_unchecked(&cpu_buffer->head_page->entries, 0); + local_set(&cpu_buffer->head_page->page->commit, 0); + + cpu_buffer->head_page->read = 0; +@@ -4134,14 +4148,14 @@ rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer) + + INIT_LIST_HEAD(&cpu_buffer->reader_page->list); + INIT_LIST_HEAD(&cpu_buffer->new_pages); +- local_set(&cpu_buffer->reader_page->write, 0); +- local_set(&cpu_buffer->reader_page->entries, 0); ++ local_set_unchecked(&cpu_buffer->reader_page->write, 0); ++ local_set_unchecked(&cpu_buffer->reader_page->entries, 0); + local_set(&cpu_buffer->reader_page->page->commit, 0); + cpu_buffer->reader_page->read = 0; + + local_set(&cpu_buffer->entries_bytes, 0); +- local_set(&cpu_buffer->overrun, 0); +- local_set(&cpu_buffer->commit_overrun, 0); ++ local_set_unchecked(&cpu_buffer->overrun, 0); ++ local_set_unchecked(&cpu_buffer->commit_overrun, 0); + local_set(&cpu_buffer->dropped_events, 0); + local_set(&cpu_buffer->entries, 0); + local_set(&cpu_buffer->committing, 0); +@@ -4546,8 +4560,8 @@ int ring_buffer_read_page(struct ring_buffer *buffer, + rb_init_page(bpage); + bpage = reader->page; + reader->page = *data_page; +- local_set(&reader->write, 0); +- local_set(&reader->entries, 0); ++ local_set_unchecked(&reader->write, 0); ++ local_set_unchecked(&reader->entries, 0); + reader->read = 0; + *data_page = bpage; + +diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c +index 7113672..e8a9c80 100644 +--- a/kernel/trace/trace.c ++++ b/kernel/trace/trace.c +@@ -3412,7 +3412,7 @@ int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set) + return 0; + } + +-int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled) ++int set_tracer_flag(struct trace_array *tr, unsigned long mask, int enabled) + { + /* do nothing if flag is already set */ + if (!!(trace_flags & mask) == !!enabled) +diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h +index c8bd809..33d7539 100644 +--- a/kernel/trace/trace.h ++++ b/kernel/trace/trace.h +@@ -1233,7 +1233,7 @@ extern const char *__stop___tracepoint_str[]; + void trace_printk_init_buffers(void); + void trace_printk_start_comm(void); + int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set); +-int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled); ++int set_tracer_flag(struct trace_array *tr, unsigned long mask, int enabled); + + /* + * Normal trace_printk() and friends allocates special buffers +diff --git a/kernel/trace/trace_clock.c b/kernel/trace/trace_clock.c +index 57b67b1..66082a9 100644 +--- a/kernel/trace/trace_clock.c ++++ b/kernel/trace/trace_clock.c +@@ -124,7 +124,7 @@ u64 notrace trace_clock_global(void) + return now; + } + +-static atomic64_t trace_counter; ++static atomic64_unchecked_t trace_counter; + + /* + * trace_clock_counter(): simply an atomic counter. +@@ -133,5 +133,5 @@ static atomic64_t trace_counter; + */ + u64 notrace trace_clock_counter(void) + { +- return atomic64_add_return(1, &trace_counter); ++ return atomic64_inc_return_unchecked(&trace_counter); + } +diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c +index e4c4efc..ef4e975 100644 +--- a/kernel/trace/trace_events.c ++++ b/kernel/trace/trace_events.c +@@ -1682,7 +1682,6 @@ __trace_early_add_new_event(struct ftrace_event_call *call, + return 0; + } + +-struct ftrace_module_file_ops; + static void __add_event_to_tracers(struct ftrace_event_call *call); + + /* Add an additional event_call dynamically */ +diff --git a/kernel/trace/trace_mmiotrace.c b/kernel/trace/trace_mmiotrace.c +index 0abd9b8..6a663a2 100644 +--- a/kernel/trace/trace_mmiotrace.c ++++ b/kernel/trace/trace_mmiotrace.c +@@ -24,7 +24,7 @@ struct header_iter { + static struct trace_array *mmio_trace_array; + static bool overrun_detected; + static unsigned long prev_overruns; +-static atomic_t dropped_count; ++static atomic_unchecked_t dropped_count; + + static void mmio_reset_data(struct trace_array *tr) + { +@@ -127,7 +127,7 @@ static void mmio_close(struct trace_iterator *iter) + + static unsigned long count_overruns(struct trace_iterator *iter) + { +- unsigned long cnt = atomic_xchg(&dropped_count, 0); ++ unsigned long cnt = atomic_xchg_unchecked(&dropped_count, 0); + unsigned long over = ring_buffer_overruns(iter->trace_buffer->buffer); + + if (over > prev_overruns) +@@ -317,7 +317,7 @@ static void __trace_mmiotrace_rw(struct trace_array *tr, + event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_RW, + sizeof(*entry), 0, pc); + if (!event) { +- atomic_inc(&dropped_count); ++ atomic_inc_unchecked(&dropped_count); + return; + } + entry = ring_buffer_event_data(event); +@@ -347,7 +347,7 @@ static void __trace_mmiotrace_map(struct trace_array *tr, + event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_MAP, + sizeof(*entry), 0, pc); + if (!event) { +- atomic_inc(&dropped_count); ++ atomic_inc_unchecked(&dropped_count); + return; + } + entry = ring_buffer_event_data(event); +diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c +index ed32284..884d6c3 100644 +--- a/kernel/trace/trace_output.c ++++ b/kernel/trace/trace_output.c +@@ -294,7 +294,7 @@ int trace_seq_path(struct trace_seq *s, const struct path *path) + + p = d_path(path, s->buffer + s->len, PAGE_SIZE - s->len); + if (!IS_ERR(p)) { +- p = mangle_path(s->buffer + s->len, p, "\n"); ++ p = mangle_path(s->buffer + s->len, p, "\n\\"); + if (p) { + s->len = p - s->buffer; + return 1; +@@ -908,14 +908,16 @@ int register_ftrace_event(struct trace_event *event) + goto out; + } + ++ pax_open_kernel(); + if (event->funcs->trace == NULL) +- event->funcs->trace = trace_nop_print; ++ *(void **)&event->funcs->trace = trace_nop_print; + if (event->funcs->raw == NULL) +- event->funcs->raw = trace_nop_print; ++ *(void **)&event->funcs->raw = trace_nop_print; + if (event->funcs->hex == NULL) +- event->funcs->hex = trace_nop_print; ++ *(void **)&event->funcs->hex = trace_nop_print; + if (event->funcs->binary == NULL) +- event->funcs->binary = trace_nop_print; ++ *(void **)&event->funcs->binary = trace_nop_print; ++ pax_close_kernel(); + + key = event->type & (EVENT_HASHSIZE - 1); + +diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c +index e6be585..d73ae5e 100644 +--- a/kernel/trace/trace_stack.c ++++ b/kernel/trace/trace_stack.c +@@ -68,7 +68,7 @@ check_stack(unsigned long ip, unsigned long *stack) + return; + + /* we do not handle interrupt stacks yet */ +- if (!object_is_on_stack(stack)) ++ if (!object_starts_on_stack(stack)) + return; + + local_irq_save(flags); +diff --git a/kernel/user_namespace.c b/kernel/user_namespace.c +index 80a57af..7f5a7ff 100644 +--- a/kernel/user_namespace.c ++++ b/kernel/user_namespace.c +@@ -82,6 +82,21 @@ int create_user_ns(struct cred *new) + !kgid_has_mapping(parent_ns, group)) + return -EPERM; + ++#ifdef CONFIG_GRKERNSEC ++ /* ++ * This doesn't really inspire confidence: ++ * http://marc.info/?l=linux-kernel&m=135543612731939&w=2 ++ * http://marc.info/?l=linux-kernel&m=135545831607095&w=2 ++ * Increases kernel attack surface in areas developers ++ * previously cared little about ("low importance due ++ * to requiring "root" capability") ++ * To be removed when this code receives *proper* review ++ */ ++ if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SETUID) || ++ !capable(CAP_SETGID)) ++ return -EPERM; ++#endif ++ + ns = kmem_cache_zalloc(user_ns_cachep, GFP_KERNEL); + if (!ns) + return -ENOMEM; +@@ -865,7 +880,7 @@ static int userns_install(struct nsproxy *nsproxy, void *ns) + if (atomic_read(¤t->mm->mm_users) > 1) + return -EINVAL; + +- if (current->fs->users != 1) ++ if (atomic_read(¤t->fs->users) != 1) + return -EINVAL; + + if (!ns_capable(user_ns, CAP_SYS_ADMIN)) +diff --git a/kernel/utsname_sysctl.c b/kernel/utsname_sysctl.c +index 4f69f9a..7c6f8f8 100644 +--- a/kernel/utsname_sysctl.c ++++ b/kernel/utsname_sysctl.c +@@ -47,7 +47,7 @@ static void put_uts(ctl_table *table, int write, void *which) + static int proc_do_uts_string(ctl_table *table, int write, + void __user *buffer, size_t *lenp, loff_t *ppos) + { +- struct ctl_table uts_table; ++ ctl_table_no_const uts_table; + int r; + memcpy(&uts_table, table, sizeof(uts_table)); + uts_table.data = get_uts(table, write); +diff --git a/kernel/watchdog.c b/kernel/watchdog.c +index c9b6f01..37781d9 100644 +--- a/kernel/watchdog.c ++++ b/kernel/watchdog.c +@@ -475,7 +475,7 @@ static int watchdog_nmi_enable(unsigned int cpu) { return 0; } + static void watchdog_nmi_disable(unsigned int cpu) { return; } + #endif /* CONFIG_HARDLOCKUP_DETECTOR */ + +-static struct smp_hotplug_thread watchdog_threads = { ++static struct smp_hotplug_thread watchdog_threads __read_only = { + .store = &softlockup_watchdog, + .thread_should_run = watchdog_should_run, + .thread_fn = watchdog, +diff --git a/kernel/workqueue.c b/kernel/workqueue.c +index b4defde..f092808 100644 +--- a/kernel/workqueue.c ++++ b/kernel/workqueue.c +@@ -4703,7 +4703,7 @@ static void rebind_workers(struct worker_pool *pool) + WARN_ON_ONCE(!(worker_flags & WORKER_UNBOUND)); + worker_flags |= WORKER_REBOUND; + worker_flags &= ~WORKER_UNBOUND; +- ACCESS_ONCE(worker->flags) = worker_flags; ++ ACCESS_ONCE_RW(worker->flags) = worker_flags; + } + + spin_unlock_irq(&pool->lock); +diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug +index a48abea..e108def 100644 +--- a/lib/Kconfig.debug ++++ b/lib/Kconfig.debug +@@ -854,7 +854,7 @@ config DEBUG_MUTEXES + + config DEBUG_WW_MUTEX_SLOWPATH + bool "Wait/wound mutex debugging: Slowpath testing" +- depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT ++ depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT && !PAX_CONSTIFY_PLUGIN + select DEBUG_LOCK_ALLOC + select DEBUG_SPINLOCK + select DEBUG_MUTEXES +@@ -867,7 +867,7 @@ config DEBUG_WW_MUTEX_SLOWPATH + + config DEBUG_LOCK_ALLOC + bool "Lock debugging: detect incorrect freeing of live locks" +- depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT ++ depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT && !PAX_CONSTIFY_PLUGIN + select DEBUG_SPINLOCK + select DEBUG_MUTEXES + select LOCKDEP +@@ -881,7 +881,7 @@ config DEBUG_LOCK_ALLOC + + config PROVE_LOCKING + bool "Lock debugging: prove locking correctness" +- depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT ++ depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT && !PAX_CONSTIFY_PLUGIN + select LOCKDEP + select DEBUG_SPINLOCK + select DEBUG_MUTEXES +@@ -932,7 +932,7 @@ config LOCKDEP + + config LOCK_STAT + bool "Lock usage statistics" +- depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT ++ depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT && !PAX_CONSTIFY_PLUGIN + select LOCKDEP + select DEBUG_SPINLOCK + select DEBUG_MUTEXES +@@ -1394,6 +1394,7 @@ config LATENCYTOP + depends on DEBUG_KERNEL + depends on STACKTRACE_SUPPORT + depends on PROC_FS ++ depends on !GRKERNSEC_HIDESYM + select FRAME_POINTER if !MIPS && !PPC && !S390 && !MICROBLAZE && !ARM_UNWIND && !ARC + select KALLSYMS + select KALLSYMS_ALL +@@ -1410,7 +1411,7 @@ config ARCH_HAS_DEBUG_STRICT_USER_COPY_CHECKS + config DEBUG_STRICT_USER_COPY_CHECKS + bool "Strict user copy size checks" + depends on ARCH_HAS_DEBUG_STRICT_USER_COPY_CHECKS +- depends on DEBUG_KERNEL && !TRACE_BRANCH_PROFILING ++ depends on DEBUG_KERNEL && !TRACE_BRANCH_PROFILING && !PAX_SIZE_OVERFLOW + help + Enabling this option turns a certain set of sanity checks for user + copy operations into compile time failures. +@@ -1529,7 +1530,7 @@ endmenu # runtime tests + + config PROVIDE_OHCI1394_DMA_INIT + bool "Remote debugging over FireWire early on boot" +- depends on PCI && X86 ++ depends on PCI && X86 && !GRKERNSEC + help + If you want to debug problems which hang or crash the kernel early + on boot and the crashing machine has a FireWire port, you can use +diff --git a/lib/Makefile b/lib/Makefile +index 48140e3..de854e5 100644 +--- a/lib/Makefile ++++ b/lib/Makefile +@@ -52,7 +52,7 @@ obj-$(CONFIG_GENERIC_HWEIGHT) += hweight.o + obj-$(CONFIG_BTREE) += btree.o + obj-$(CONFIG_ASSOCIATIVE_ARRAY) += assoc_array.o + obj-$(CONFIG_DEBUG_PREEMPT) += smp_processor_id.o +-obj-$(CONFIG_DEBUG_LIST) += list_debug.o ++obj-y += list_debug.o + obj-$(CONFIG_DEBUG_OBJECTS) += debugobjects.o + + ifneq ($(CONFIG_HAVE_DEC_LOCK),y) +diff --git a/lib/assoc_array.c b/lib/assoc_array.c +index c0b1007..ae146f0 100644 +--- a/lib/assoc_array.c ++++ b/lib/assoc_array.c +@@ -1735,7 +1735,7 @@ ascend_old_tree: + gc_complete: + edit->set[0].to = new_root; + assoc_array_apply_edit(edit); +- edit->array->nr_leaves_on_tree = nr_leaves_on_tree; ++ array->nr_leaves_on_tree = nr_leaves_on_tree; + return 0; + + enomem: +diff --git a/lib/average.c b/lib/average.c +index 114d1be..ab0350c 100644 +--- a/lib/average.c ++++ b/lib/average.c +@@ -55,7 +55,7 @@ struct ewma *ewma_add(struct ewma *avg, unsigned long val) + { + unsigned long internal = ACCESS_ONCE(avg->internal); + +- ACCESS_ONCE(avg->internal) = internal ? ++ ACCESS_ONCE_RW(avg->internal) = internal ? + (((internal << avg->weight) - internal) + + (val << avg->factor)) >> avg->weight : + (val << avg->factor); +diff --git a/lib/bitmap.c b/lib/bitmap.c +index 06f7e4f..f3cf2b0 100644 +--- a/lib/bitmap.c ++++ b/lib/bitmap.c +@@ -422,7 +422,7 @@ int __bitmap_parse(const char *buf, unsigned int buflen, + { + int c, old_c, totaldigits, ndigits, nchunks, nbits; + u32 chunk; +- const char __user __force *ubuf = (const char __user __force *)buf; ++ const char __user *ubuf = (const char __force_user *)buf; + + bitmap_zero(maskp, nmaskbits); + +@@ -507,7 +507,7 @@ int bitmap_parse_user(const char __user *ubuf, + { + if (!access_ok(VERIFY_READ, ubuf, ulen)) + return -EFAULT; +- return __bitmap_parse((const char __force *)ubuf, ++ return __bitmap_parse((const char __force_kernel *)ubuf, + ulen, 1, maskp, nmaskbits); + + } +@@ -598,7 +598,7 @@ static int __bitmap_parselist(const char *buf, unsigned int buflen, + { + unsigned a, b; + int c, old_c, totaldigits; +- const char __user __force *ubuf = (const char __user __force *)buf; ++ const char __user *ubuf = (const char __force_user *)buf; + int exp_digit, in_range; + + totaldigits = c = 0; +@@ -698,7 +698,7 @@ int bitmap_parselist_user(const char __user *ubuf, + { + if (!access_ok(VERIFY_READ, ubuf, ulen)) + return -EFAULT; +- return __bitmap_parselist((const char __force *)ubuf, ++ return __bitmap_parselist((const char __force_kernel *)ubuf, + ulen, 1, maskp, nmaskbits); + } + EXPORT_SYMBOL(bitmap_parselist_user); +diff --git a/lib/bug.c b/lib/bug.c +index 1686034..a9c00c8 100644 +--- a/lib/bug.c ++++ b/lib/bug.c +@@ -134,6 +134,8 @@ enum bug_trap_type report_bug(unsigned long bugaddr, struct pt_regs *regs) + return BUG_TRAP_TYPE_NONE; + + bug = find_bug(bugaddr); ++ if (!bug) ++ return BUG_TRAP_TYPE_NONE; + + file = NULL; + line = 0; +diff --git a/lib/debugobjects.c b/lib/debugobjects.c +index e0731c3..ad66444 100644 +--- a/lib/debugobjects.c ++++ b/lib/debugobjects.c +@@ -286,7 +286,7 @@ static void debug_object_is_on_stack(void *addr, int onstack) + if (limit > 4) + return; + +- is_on_stack = object_is_on_stack(addr); ++ is_on_stack = object_starts_on_stack(addr); + if (is_on_stack == onstack) + return; + +diff --git a/lib/devres.c b/lib/devres.c +index 8235331..5881053 100644 +--- a/lib/devres.c ++++ b/lib/devres.c +@@ -81,7 +81,7 @@ EXPORT_SYMBOL(devm_ioremap_nocache); + void devm_iounmap(struct device *dev, void __iomem *addr) + { + WARN_ON(devres_destroy(dev, devm_ioremap_release, devm_ioremap_match, +- (void *)addr)); ++ (void __force *)addr)); + iounmap(addr); + } + EXPORT_SYMBOL(devm_iounmap); +@@ -224,7 +224,7 @@ void devm_ioport_unmap(struct device *dev, void __iomem *addr) + { + ioport_unmap(addr); + WARN_ON(devres_destroy(dev, devm_ioport_map_release, +- devm_ioport_map_match, (void *)addr)); ++ devm_ioport_map_match, (void __force *)addr)); + } + EXPORT_SYMBOL(devm_ioport_unmap); + #endif /* CONFIG_HAS_IOPORT */ +diff --git a/lib/div64.c b/lib/div64.c +index 4382ad7..08aa558 100644 +--- a/lib/div64.c ++++ b/lib/div64.c +@@ -59,7 +59,7 @@ uint32_t __attribute__((weak)) __div64_32(uint64_t *n, uint32_t base) + EXPORT_SYMBOL(__div64_32); + + #ifndef div_s64_rem +-s64 div_s64_rem(s64 dividend, s32 divisor, s32 *remainder) ++s64 __intentional_overflow(-1) div_s64_rem(s64 dividend, s32 divisor, s32 *remainder) + { + u64 quotient; + +@@ -130,7 +130,7 @@ EXPORT_SYMBOL(div64_u64_rem); + * 'http://www.hackersdelight.org/HDcode/newCode/divDouble.c.txt' + */ + #ifndef div64_u64 +-u64 div64_u64(u64 dividend, u64 divisor) ++u64 __intentional_overflow(-1) div64_u64(u64 dividend, u64 divisor) + { + u32 high = divisor >> 32; + u64 quot; +diff --git a/lib/dma-debug.c b/lib/dma-debug.c +index 98f2d7e..899da5c 100644 +--- a/lib/dma-debug.c ++++ b/lib/dma-debug.c +@@ -971,7 +971,7 @@ static int dma_debug_device_change(struct notifier_block *nb, unsigned long acti + + void dma_debug_add_bus(struct bus_type *bus) + { +- struct notifier_block *nb; ++ notifier_block_no_const *nb; + + if (global_disable) + return; +@@ -1148,7 +1148,7 @@ static void check_unmap(struct dma_debug_entry *ref) + + static void check_for_stack(struct device *dev, void *addr) + { +- if (object_is_on_stack(addr)) ++ if (object_starts_on_stack(addr)) + err_printk(dev, NULL, "DMA-API: device driver maps memory from" + "stack [addr=%p]\n", addr); + } +diff --git a/lib/hash.c b/lib/hash.c +index fea973f..386626f 100644 +--- a/lib/hash.c ++++ b/lib/hash.c +@@ -14,7 +14,7 @@ + #include <linux/hash.h> + #include <linux/cache.h> + +-static struct fast_hash_ops arch_hash_ops __read_mostly = { ++static struct fast_hash_ops arch_hash_ops __read_only = { + .hash = jhash, + .hash2 = jhash2, + }; +diff --git a/lib/inflate.c b/lib/inflate.c +index 013a761..c28f3fc 100644 +--- a/lib/inflate.c ++++ b/lib/inflate.c +@@ -269,7 +269,7 @@ static void free(void *where) + malloc_ptr = free_mem_ptr; + } + #else +-#define malloc(a) kmalloc(a, GFP_KERNEL) ++#define malloc(a) kmalloc((a), GFP_KERNEL) + #define free(a) kfree(a) + #endif + +diff --git a/lib/ioremap.c b/lib/ioremap.c +index 0c9216c..863bd89 100644 +--- a/lib/ioremap.c ++++ b/lib/ioremap.c +@@ -38,7 +38,7 @@ static inline int ioremap_pmd_range(pud_t *pud, unsigned long addr, + unsigned long next; + + phys_addr -= addr; +- pmd = pmd_alloc(&init_mm, pud, addr); ++ pmd = pmd_alloc_kernel(&init_mm, pud, addr); + if (!pmd) + return -ENOMEM; + do { +@@ -56,7 +56,7 @@ static inline int ioremap_pud_range(pgd_t *pgd, unsigned long addr, + unsigned long next; + + phys_addr -= addr; +- pud = pud_alloc(&init_mm, pgd, addr); ++ pud = pud_alloc_kernel(&init_mm, pgd, addr); + if (!pud) + return -ENOMEM; + do { +diff --git a/lib/is_single_threaded.c b/lib/is_single_threaded.c +index bd2bea9..6b3c95e 100644 +--- a/lib/is_single_threaded.c ++++ b/lib/is_single_threaded.c +@@ -22,6 +22,9 @@ bool current_is_single_threaded(void) + struct task_struct *p, *t; + bool ret; + ++ if (!mm) ++ return true; ++ + if (atomic_read(&task->signal->live) != 1) + return false; + +diff --git a/lib/kobject.c b/lib/kobject.c +index cb14aea..8c53cdb 100644 +--- a/lib/kobject.c ++++ b/lib/kobject.c +@@ -931,9 +931,9 @@ EXPORT_SYMBOL_GPL(kset_create_and_add); + + + static DEFINE_SPINLOCK(kobj_ns_type_lock); +-static const struct kobj_ns_type_operations *kobj_ns_ops_tbl[KOBJ_NS_TYPES]; ++static const struct kobj_ns_type_operations *kobj_ns_ops_tbl[KOBJ_NS_TYPES] __read_only; + +-int kobj_ns_type_register(const struct kobj_ns_type_operations *ops) ++int __init kobj_ns_type_register(const struct kobj_ns_type_operations *ops) + { + enum kobj_ns_type type = ops->type; + int error; +diff --git a/lib/list_debug.c b/lib/list_debug.c +index c24c2f7..f0296f4 100644 +--- a/lib/list_debug.c ++++ b/lib/list_debug.c +@@ -11,7 +11,9 @@ + #include <linux/bug.h> + #include <linux/kernel.h> + #include <linux/rculist.h> ++#include <linux/mm.h> + ++#ifdef CONFIG_DEBUG_LIST + /* + * Insert a new entry between two known consecutive entries. + * +@@ -19,21 +21,40 @@ + * the prev/next entries already! + */ + ++static bool __list_add_debug(struct list_head *new, ++ struct list_head *prev, ++ struct list_head *next) ++{ ++ if (unlikely(next->prev != prev)) { ++ printk(KERN_ERR "list_add corruption. next->prev should be " ++ "prev (%p), but was %p. (next=%p).\n", ++ prev, next->prev, next); ++ BUG(); ++ return false; ++ } ++ if (unlikely(prev->next != next)) { ++ printk(KERN_ERR "list_add corruption. prev->next should be " ++ "next (%p), but was %p. (prev=%p).\n", ++ next, prev->next, prev); ++ BUG(); ++ return false; ++ } ++ if (unlikely(new == prev || new == next)) { ++ printk(KERN_ERR "list_add double add: new=%p, prev=%p, next=%p.\n", ++ new, prev, next); ++ BUG(); ++ return false; ++ } ++ return true; ++} ++ + void __list_add(struct list_head *new, +- struct list_head *prev, +- struct list_head *next) ++ struct list_head *prev, ++ struct list_head *next) + { +- WARN(next->prev != prev, +- "list_add corruption. next->prev should be " +- "prev (%p), but was %p. (next=%p).\n", +- prev, next->prev, next); +- WARN(prev->next != next, +- "list_add corruption. prev->next should be " +- "next (%p), but was %p. (prev=%p).\n", +- next, prev->next, prev); +- WARN(new == prev || new == next, +- "list_add double add: new=%p, prev=%p, next=%p.\n", +- new, prev, next); ++ if (!__list_add_debug(new, prev, next)) ++ return; ++ + next->prev = new; + new->next = next; + new->prev = prev; +@@ -41,28 +62,46 @@ void __list_add(struct list_head *new, + } + EXPORT_SYMBOL(__list_add); + +-void __list_del_entry(struct list_head *entry) ++static bool __list_del_entry_debug(struct list_head *entry) + { + struct list_head *prev, *next; + + prev = entry->prev; + next = entry->next; + +- if (WARN(next == LIST_POISON1, +- "list_del corruption, %p->next is LIST_POISON1 (%p)\n", +- entry, LIST_POISON1) || +- WARN(prev == LIST_POISON2, +- "list_del corruption, %p->prev is LIST_POISON2 (%p)\n", +- entry, LIST_POISON2) || +- WARN(prev->next != entry, +- "list_del corruption. prev->next should be %p, " +- "but was %p\n", entry, prev->next) || +- WARN(next->prev != entry, +- "list_del corruption. next->prev should be %p, " +- "but was %p\n", entry, next->prev)) ++ if (unlikely(next == LIST_POISON1)) { ++ printk(KERN_ERR "list_del corruption, %p->next is LIST_POISON1 (%p)\n", ++ entry, LIST_POISON1); ++ BUG(); ++ return false; ++ } ++ if (unlikely(prev == LIST_POISON2)) { ++ printk(KERN_ERR "list_del corruption, %p->prev is LIST_POISON2 (%p)\n", ++ entry, LIST_POISON2); ++ BUG(); ++ return false; ++ } ++ if (unlikely(entry->prev->next != entry)) { ++ printk(KERN_ERR "list_del corruption. prev->next should be %p, " ++ "but was %p\n", entry, prev->next); ++ BUG(); ++ return false; ++ } ++ if (unlikely(entry->next->prev != entry)) { ++ printk(KERN_ERR "list_del corruption. next->prev should be %p, " ++ "but was %p\n", entry, next->prev); ++ BUG(); ++ return false; ++ } ++ return true; ++} ++ ++void __list_del_entry(struct list_head *entry) ++{ ++ if (!__list_del_entry_debug(entry)) + return; + +- __list_del(prev, next); ++ __list_del(entry->prev, entry->next); + } + EXPORT_SYMBOL(__list_del_entry); + +@@ -86,15 +125,85 @@ EXPORT_SYMBOL(list_del); + void __list_add_rcu(struct list_head *new, + struct list_head *prev, struct list_head *next) + { +- WARN(next->prev != prev, +- "list_add_rcu corruption. next->prev should be prev (%p), but was %p. (next=%p).\n", +- prev, next->prev, next); +- WARN(prev->next != next, +- "list_add_rcu corruption. prev->next should be next (%p), but was %p. (prev=%p).\n", +- next, prev->next, prev); ++ if (!__list_add_debug(new, prev, next)) ++ return; ++ + new->next = next; + new->prev = prev; + rcu_assign_pointer(list_next_rcu(prev), new); + next->prev = new; + } + EXPORT_SYMBOL(__list_add_rcu); ++#endif ++ ++void __pax_list_add(struct list_head *new, struct list_head *prev, struct list_head *next) ++{ ++#ifdef CONFIG_DEBUG_LIST ++ if (!__list_add_debug(new, prev, next)) ++ return; ++#endif ++ ++ pax_open_kernel(); ++ next->prev = new; ++ new->next = next; ++ new->prev = prev; ++ prev->next = new; ++ pax_close_kernel(); ++} ++EXPORT_SYMBOL(__pax_list_add); ++ ++void pax_list_del(struct list_head *entry) ++{ ++#ifdef CONFIG_DEBUG_LIST ++ if (!__list_del_entry_debug(entry)) ++ return; ++#endif ++ ++ pax_open_kernel(); ++ __list_del(entry->prev, entry->next); ++ entry->next = LIST_POISON1; ++ entry->prev = LIST_POISON2; ++ pax_close_kernel(); ++} ++EXPORT_SYMBOL(pax_list_del); ++ ++void pax_list_del_init(struct list_head *entry) ++{ ++ pax_open_kernel(); ++ __list_del(entry->prev, entry->next); ++ INIT_LIST_HEAD(entry); ++ pax_close_kernel(); ++} ++EXPORT_SYMBOL(pax_list_del_init); ++ ++void __pax_list_add_rcu(struct list_head *new, ++ struct list_head *prev, struct list_head *next) ++{ ++#ifdef CONFIG_DEBUG_LIST ++ if (!__list_add_debug(new, prev, next)) ++ return; ++#endif ++ ++ pax_open_kernel(); ++ new->next = next; ++ new->prev = prev; ++ rcu_assign_pointer(list_next_rcu(prev), new); ++ next->prev = new; ++ pax_close_kernel(); ++} ++EXPORT_SYMBOL(__pax_list_add_rcu); ++ ++void pax_list_del_rcu(struct list_head *entry) ++{ ++#ifdef CONFIG_DEBUG_LIST ++ if (!__list_del_entry_debug(entry)) ++ return; ++#endif ++ ++ pax_open_kernel(); ++ __list_del(entry->prev, entry->next); ++ entry->next = LIST_POISON1; ++ entry->prev = LIST_POISON2; ++ pax_close_kernel(); ++} ++EXPORT_SYMBOL(pax_list_del_rcu); +diff --git a/lib/percpu-refcount.c b/lib/percpu-refcount.c +index 963b703..438bc51 100644 +--- a/lib/percpu-refcount.c ++++ b/lib/percpu-refcount.c +@@ -29,7 +29,7 @@ + * can't hit 0 before we've added up all the percpu refs. + */ + +-#define PCPU_COUNT_BIAS (1U << 31) ++#define PCPU_COUNT_BIAS (1U << 30) + + /** + * percpu_ref_init - initialize a percpu refcount +diff --git a/lib/radix-tree.c b/lib/radix-tree.c +index bd4a8df..9e4804f 100644 +--- a/lib/radix-tree.c ++++ b/lib/radix-tree.c +@@ -93,7 +93,7 @@ struct radix_tree_preload { + int nr; + struct radix_tree_node *nodes[RADIX_TREE_PRELOAD_SIZE]; + }; +-static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { 0, }; ++static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads); + + static inline void *ptr_to_indirect(void *ptr) + { +diff --git a/lib/random32.c b/lib/random32.c +index 6148967..009bfe8 100644 +--- a/lib/random32.c ++++ b/lib/random32.c +@@ -44,7 +44,7 @@ + static void __init prandom_state_selftest(void); + #endif + +-static DEFINE_PER_CPU(struct rnd_state, net_rand_state); ++static DEFINE_PER_CPU(struct rnd_state, net_rand_state) __latent_entropy; + + /** + * prandom_u32_state - seeded pseudo-random number generator. +diff --git a/lib/rbtree.c b/lib/rbtree.c +index 65f4eff..2cfa167 100644 +--- a/lib/rbtree.c ++++ b/lib/rbtree.c +@@ -380,7 +380,9 @@ static inline void dummy_copy(struct rb_node *old, struct rb_node *new) {} + static inline void dummy_rotate(struct rb_node *old, struct rb_node *new) {} + + static const struct rb_augment_callbacks dummy_callbacks = { +- dummy_propagate, dummy_copy, dummy_rotate ++ .propagate = dummy_propagate, ++ .copy = dummy_copy, ++ .rotate = dummy_rotate + }; + + void rb_insert_color(struct rb_node *node, struct rb_root *root) +diff --git a/lib/show_mem.c b/lib/show_mem.c +index 0922579..9d7adb9 100644 +--- a/lib/show_mem.c ++++ b/lib/show_mem.c +@@ -44,6 +44,6 @@ void show_mem(unsigned int filter) + quicklist_total_size()); + #endif + #ifdef CONFIG_MEMORY_FAILURE +- printk("%lu pages hwpoisoned\n", atomic_long_read(&num_poisoned_pages)); ++ printk("%lu pages hwpoisoned\n", atomic_long_read_unchecked(&num_poisoned_pages)); + #endif + } +diff --git a/lib/strncpy_from_user.c b/lib/strncpy_from_user.c +index bb2b201..46abaf9 100644 +--- a/lib/strncpy_from_user.c ++++ b/lib/strncpy_from_user.c +@@ -21,7 +21,7 @@ + */ + static inline long do_strncpy_from_user(char *dst, const char __user *src, long count, unsigned long max) + { +- const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS; ++ static const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS; + long res = 0; + + /* +diff --git a/lib/strnlen_user.c b/lib/strnlen_user.c +index a28df52..3d55877 100644 +--- a/lib/strnlen_user.c ++++ b/lib/strnlen_user.c +@@ -26,7 +26,7 @@ + */ + static inline long do_strnlen_user(const char __user *src, unsigned long count, unsigned long max) + { +- const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS; ++ static const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS; + long align, res = 0; + unsigned long c; + +diff --git a/lib/swiotlb.c b/lib/swiotlb.c +index b604b83..c0547f6 100644 +--- a/lib/swiotlb.c ++++ b/lib/swiotlb.c +@@ -674,7 +674,7 @@ EXPORT_SYMBOL(swiotlb_alloc_coherent); + + void + swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr, +- dma_addr_t dev_addr) ++ dma_addr_t dev_addr, struct dma_attrs *attrs) + { + phys_addr_t paddr = dma_to_phys(hwdev, dev_addr); + +diff --git a/lib/usercopy.c b/lib/usercopy.c +index 4f5b1dd..7cab418 100644 +--- a/lib/usercopy.c ++++ b/lib/usercopy.c +@@ -7,3 +7,9 @@ void copy_from_user_overflow(void) + WARN(1, "Buffer overflow detected!\n"); + } + EXPORT_SYMBOL(copy_from_user_overflow); ++ ++void copy_to_user_overflow(void) ++{ ++ WARN(1, "Buffer overflow detected!\n"); ++} ++EXPORT_SYMBOL(copy_to_user_overflow); +diff --git a/lib/vsprintf.c b/lib/vsprintf.c +index 185b6d3..823c48c 100644 +--- a/lib/vsprintf.c ++++ b/lib/vsprintf.c +@@ -16,6 +16,9 @@ + * - scnprintf and vscnprintf + */ + ++#ifdef CONFIG_GRKERNSEC_HIDESYM ++#define __INCLUDED_BY_HIDESYM 1 ++#endif + #include <stdarg.h> + #include <linux/module.h> /* for KSYM_SYMBOL_LEN */ + #include <linux/types.h> +@@ -1179,7 +1182,11 @@ char *address_val(char *buf, char *end, const void *addr, + return number(buf, end, num, spec); + } + ++#ifdef CONFIG_GRKERNSEC_HIDESYM ++int kptr_restrict __read_mostly = 2; ++#else + int kptr_restrict __read_mostly; ++#endif + + /* + * Show a '%p' thing. A kernel extension is that the '%p' is followed +@@ -1192,6 +1199,7 @@ int kptr_restrict __read_mostly; + * - 'f' For simple symbolic function names without offset + * - 'S' For symbolic direct pointers with offset + * - 's' For symbolic direct pointers without offset ++ * - 'A' For symbolic direct pointers with offset approved for use with GRKERNSEC_HIDESYM + * - '[FfSs]R' as above with __builtin_extract_return_addr() translation + * - 'B' For backtraced symbolic direct pointers with offset + * - 'R' For decoded struct resource, e.g., [mem 0x0-0x1f 64bit pref] +@@ -1259,12 +1267,12 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr, + + if (!ptr && *fmt != 'K') { + /* +- * Print (null) with the same width as a pointer so it makes ++ * Print (nil) with the same width as a pointer so it makes + * tabular output look nice. + */ + if (spec.field_width == -1) + spec.field_width = default_width; +- return string(buf, end, "(null)", spec); ++ return string(buf, end, "(nil)", spec); + } + + switch (*fmt) { +@@ -1274,6 +1282,12 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr, + /* Fallthrough */ + case 'S': + case 's': ++#ifdef CONFIG_GRKERNSEC_HIDESYM ++ break; ++#else ++ return symbol_string(buf, end, ptr, spec, fmt); ++#endif ++ case 'A': + case 'B': + return symbol_string(buf, end, ptr, spec, fmt); + case 'R': +@@ -1329,6 +1343,8 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr, + va_end(va); + return buf; + } ++ case 'P': ++ break; + case 'K': + /* + * %pK cannot be used in IRQ context because its test +@@ -1386,6 +1402,21 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr, + ((const struct file *)ptr)->f_path.dentry, + spec, fmt); + } ++ ++#ifdef CONFIG_GRKERNSEC_HIDESYM ++ /* 'P' = approved pointers to copy to userland, ++ as in the /proc/kallsyms case, as we make it display nothing ++ for non-root users, and the real contents for root users ++ Also ignore 'K' pointers, since we force their NULLing for non-root users ++ above ++ */ ++ if ((unsigned long)ptr > TASK_SIZE && *fmt != 'P' && *fmt != 'K' && is_usercopy_object(buf)) { ++ printk(KERN_ALERT "grsec: kernel infoleak detected! Please report this log to spender@grsecurity.net.\n"); ++ dump_stack(); ++ ptr = NULL; ++ } ++#endif ++ + spec.flags |= SMALL; + if (spec.field_width == -1) { + spec.field_width = default_width; +@@ -2107,11 +2138,11 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf) + typeof(type) value; \ + if (sizeof(type) == 8) { \ + args = PTR_ALIGN(args, sizeof(u32)); \ +- *(u32 *)&value = *(u32 *)args; \ +- *((u32 *)&value + 1) = *(u32 *)(args + 4); \ ++ *(u32 *)&value = *(const u32 *)args; \ ++ *((u32 *)&value + 1) = *(const u32 *)(args + 4); \ + } else { \ + args = PTR_ALIGN(args, sizeof(type)); \ +- value = *(typeof(type) *)args; \ ++ value = *(const typeof(type) *)args; \ + } \ + args += sizeof(type); \ + value; \ +@@ -2174,7 +2205,7 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf) + case FORMAT_TYPE_STR: { + const char *str_arg = args; + args += strlen(str_arg) + 1; +- str = string(str, end, (char *)str_arg, spec); ++ str = string(str, end, str_arg, spec); + break; + } + +diff --git a/localversion-grsec b/localversion-grsec +new file mode 100644 +index 0000000..7cd6065 +--- /dev/null ++++ b/localversion-grsec +@@ -0,0 +1 @@ ++-grsec +diff --git a/mm/Kconfig b/mm/Kconfig +index 0862816..2e3a043 100644 +--- a/mm/Kconfig ++++ b/mm/Kconfig +@@ -329,10 +329,11 @@ config KSM + root has set /sys/kernel/mm/ksm/run to 1 (if CONFIG_SYSFS is set). + + config DEFAULT_MMAP_MIN_ADDR +- int "Low address space to protect from user allocation" ++ int "Low address space to protect from user allocation" + depends on MMU +- default 4096 +- help ++ default 32768 if ALPHA || ARM || PARISC || SPARC32 ++ default 65536 ++ help + This is the portion of low virtual memory which should be protected + from userspace allocation. Keeping a user from writing to low pages + can help reduce the impact of kernel NULL pointer bugs. +@@ -363,7 +364,7 @@ config MEMORY_FAILURE + + config HWPOISON_INJECT + tristate "HWPoison pages injector" +- depends on MEMORY_FAILURE && DEBUG_KERNEL && PROC_FS ++ depends on MEMORY_FAILURE && DEBUG_KERNEL && PROC_FS && !GRKERNSEC + select PROC_PAGE_MONITOR + + config NOMMU_INITIAL_TRIM_EXCESS +diff --git a/mm/backing-dev.c b/mm/backing-dev.c +index 09d9591..165bb75 100644 +--- a/mm/backing-dev.c ++++ b/mm/backing-dev.c +@@ -12,7 +12,7 @@ + #include <linux/device.h> + #include <trace/events/writeback.h> + +-static atomic_long_t bdi_seq = ATOMIC_LONG_INIT(0); ++static atomic_long_unchecked_t bdi_seq = ATOMIC_LONG_INIT(0); + + struct backing_dev_info default_backing_dev_info = { + .name = "default", +@@ -533,7 +533,7 @@ int bdi_setup_and_register(struct backing_dev_info *bdi, char *name, + return err; + + err = bdi_register(bdi, NULL, "%.28s-%ld", name, +- atomic_long_inc_return(&bdi_seq)); ++ atomic_long_inc_return_unchecked(&bdi_seq)); + if (err) { + bdi_destroy(bdi); + return err; +diff --git a/mm/filemap.c b/mm/filemap.c +index 7a13f6a..e31738b 100644 +--- a/mm/filemap.c ++++ b/mm/filemap.c +@@ -192,9 +192,11 @@ static int filemap_check_errors(struct address_space *mapping) + { + int ret = 0; + /* Check for outstanding write errors */ +- if (test_and_clear_bit(AS_ENOSPC, &mapping->flags)) ++ if (test_bit(AS_ENOSPC, &mapping->flags) && ++ test_and_clear_bit(AS_ENOSPC, &mapping->flags)) + ret = -ENOSPC; +- if (test_and_clear_bit(AS_EIO, &mapping->flags)) ++ if (test_bit(AS_EIO, &mapping->flags) && ++ test_and_clear_bit(AS_EIO, &mapping->flags)) + ret = -EIO; + return ret; + } +@@ -1766,7 +1768,7 @@ int generic_file_mmap(struct file * file, struct vm_area_struct * vma) + struct address_space *mapping = file->f_mapping; + + if (!mapping->a_ops->readpage) +- return -ENOEXEC; ++ return -ENODEV; + file_accessed(file); + vma->vm_ops = &generic_file_vm_ops; + return 0; +@@ -1948,7 +1950,7 @@ static size_t __iovec_copy_from_user_inatomic(char *vaddr, + + while (bytes) { + char __user *buf = iov->iov_base + base; +- int copy = min(bytes, iov->iov_len - base); ++ size_t copy = min(bytes, iov->iov_len - base); + + base = 0; + left = __copy_from_user_inatomic(vaddr, buf, copy); +@@ -1977,7 +1979,7 @@ size_t iov_iter_copy_from_user_atomic(struct page *page, + BUG_ON(!in_atomic()); + kaddr = kmap_atomic(page); + if (likely(i->nr_segs == 1)) { +- int left; ++ size_t left; + char __user *buf = i->iov->iov_base + i->iov_offset; + left = __copy_from_user_inatomic(kaddr + offset, buf, bytes); + copied = bytes - left; +@@ -2005,7 +2007,7 @@ size_t iov_iter_copy_from_user(struct page *page, + + kaddr = kmap(page); + if (likely(i->nr_segs == 1)) { +- int left; ++ size_t left; + char __user *buf = i->iov->iov_base + i->iov_offset; + left = __copy_from_user(kaddr + offset, buf, bytes); + copied = bytes - left; +@@ -2035,7 +2037,7 @@ void iov_iter_advance(struct iov_iter *i, size_t bytes) + * zero-length segments (without overruning the iovec). + */ + while (bytes || unlikely(i->count && !iov->iov_len)) { +- int copy; ++ size_t copy; + + copy = min(bytes, iov->iov_len - base); + BUG_ON(!i->count || i->count < copy); +@@ -2106,6 +2108,7 @@ inline int generic_write_checks(struct file *file, loff_t *pos, size_t *count, i + *pos = i_size_read(inode); + + if (limit != RLIM_INFINITY) { ++ gr_learn_resource(current, RLIMIT_FSIZE,*pos, 0); + if (*pos >= limit) { + send_sig(SIGXFSZ, current, 0); + return -EFBIG; +diff --git a/mm/fremap.c b/mm/fremap.c +index 34feba6..315fe78 100644 +--- a/mm/fremap.c ++++ b/mm/fremap.c +@@ -179,6 +179,11 @@ SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size, + retry: + vma = find_vma(mm, start); + ++#ifdef CONFIG_PAX_SEGMEXEC ++ if (vma && (mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_MAYEXEC)) ++ goto out; ++#endif ++ + /* + * Make sure the vma is shared, that it supports prefaulting, + * and that the remapped range is valid and fully within +diff --git a/mm/highmem.c b/mm/highmem.c +index b32b70c..e512eb0 100644 +--- a/mm/highmem.c ++++ b/mm/highmem.c +@@ -138,8 +138,9 @@ static void flush_all_zero_pkmaps(void) + * So no dangers, even with speculative execution. + */ + page = pte_page(pkmap_page_table[i]); ++ pax_open_kernel(); + pte_clear(&init_mm, PKMAP_ADDR(i), &pkmap_page_table[i]); +- ++ pax_close_kernel(); + set_page_address(page, NULL); + need_flush = 1; + } +@@ -198,9 +199,11 @@ start: + } + } + vaddr = PKMAP_ADDR(last_pkmap_nr); ++ ++ pax_open_kernel(); + set_pte_at(&init_mm, vaddr, + &(pkmap_page_table[last_pkmap_nr]), mk_pte(page, kmap_prot)); +- ++ pax_close_kernel(); + pkmap_count[last_pkmap_nr] = 1; + set_page_address(page, (void *)vaddr); + +diff --git a/mm/hugetlb.c b/mm/hugetlb.c +index 923f38e..74e159a 100644 +--- a/mm/hugetlb.c ++++ b/mm/hugetlb.c +@@ -2070,15 +2070,17 @@ static int hugetlb_sysctl_handler_common(bool obey_mempolicy, + struct hstate *h = &default_hstate; + unsigned long tmp; + int ret; ++ ctl_table_no_const hugetlb_table; + + tmp = h->max_huge_pages; + + if (write && h->order >= MAX_ORDER) + return -EINVAL; + +- table->data = &tmp; +- table->maxlen = sizeof(unsigned long); +- ret = proc_doulongvec_minmax(table, write, buffer, length, ppos); ++ hugetlb_table = *table; ++ hugetlb_table.data = &tmp; ++ hugetlb_table.maxlen = sizeof(unsigned long); ++ ret = proc_doulongvec_minmax(&hugetlb_table, write, buffer, length, ppos); + if (ret) + goto out; + +@@ -2123,15 +2125,17 @@ int hugetlb_overcommit_handler(struct ctl_table *table, int write, + struct hstate *h = &default_hstate; + unsigned long tmp; + int ret; ++ ctl_table_no_const hugetlb_table; + + tmp = h->nr_overcommit_huge_pages; + + if (write && h->order >= MAX_ORDER) + return -EINVAL; + +- table->data = &tmp; +- table->maxlen = sizeof(unsigned long); +- ret = proc_doulongvec_minmax(table, write, buffer, length, ppos); ++ hugetlb_table = *table; ++ hugetlb_table.data = &tmp; ++ hugetlb_table.maxlen = sizeof(unsigned long); ++ ret = proc_doulongvec_minmax(&hugetlb_table, write, buffer, length, ppos); + if (ret) + goto out; + +@@ -2616,6 +2620,27 @@ static int unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma, + return 1; + } + ++#ifdef CONFIG_PAX_SEGMEXEC ++static void pax_mirror_huge_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m) ++{ ++ struct mm_struct *mm = vma->vm_mm; ++ struct vm_area_struct *vma_m; ++ unsigned long address_m; ++ pte_t *ptep_m; ++ ++ vma_m = pax_find_mirror_vma(vma); ++ if (!vma_m) ++ return; ++ ++ BUG_ON(address >= SEGMEXEC_TASK_SIZE); ++ address_m = address + SEGMEXEC_TASK_SIZE; ++ ptep_m = huge_pte_offset(mm, address_m & HPAGE_MASK); ++ get_page(page_m); ++ hugepage_add_anon_rmap(page_m, vma_m, address_m); ++ set_huge_pte_at(mm, address_m, ptep_m, make_huge_pte(vma_m, page_m, 0)); ++} ++#endif ++ + /* + * Hugetlb_cow() should be called with page lock of the original hugepage held. + * Called with hugetlb_instantiation_mutex held and pte_page locked so we +@@ -2732,6 +2757,11 @@ retry_avoidcopy: + make_huge_pte(vma, new_page, 1)); + page_remove_rmap(old_page); + hugepage_add_new_anon_rmap(new_page, vma, address); ++ ++#ifdef CONFIG_PAX_SEGMEXEC ++ pax_mirror_huge_pte(vma, address, new_page); ++#endif ++ + /* Make the old page be freed below */ + new_page = old_page; + } +@@ -2896,6 +2926,10 @@ retry: + && (vma->vm_flags & VM_SHARED))); + set_huge_pte_at(mm, address, ptep, new_pte); + ++#ifdef CONFIG_PAX_SEGMEXEC ++ pax_mirror_huge_pte(vma, address, page); ++#endif ++ + if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) { + /* Optimization, do the COW without a second fault */ + ret = hugetlb_cow(mm, vma, address, ptep, new_pte, page, ptl); +@@ -2926,6 +2960,10 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma, + static DEFINE_MUTEX(hugetlb_instantiation_mutex); + struct hstate *h = hstate_vma(vma); + ++#ifdef CONFIG_PAX_SEGMEXEC ++ struct vm_area_struct *vma_m; ++#endif ++ + address &= huge_page_mask(h); + + ptep = huge_pte_offset(mm, address); +@@ -2939,6 +2977,26 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma, + VM_FAULT_SET_HINDEX(hstate_index(h)); + } + ++#ifdef CONFIG_PAX_SEGMEXEC ++ vma_m = pax_find_mirror_vma(vma); ++ if (vma_m) { ++ unsigned long address_m; ++ ++ if (vma->vm_start > vma_m->vm_start) { ++ address_m = address; ++ address -= SEGMEXEC_TASK_SIZE; ++ vma = vma_m; ++ h = hstate_vma(vma); ++ } else ++ address_m = address + SEGMEXEC_TASK_SIZE; ++ ++ if (!huge_pte_alloc(mm, address_m, huge_page_size(h))) ++ return VM_FAULT_OOM; ++ address_m &= HPAGE_MASK; ++ unmap_hugepage_range(vma, address_m, address_m + HPAGE_SIZE, NULL); ++ } ++#endif ++ + ptep = huge_pte_alloc(mm, address, huge_page_size(h)); + if (!ptep) + return VM_FAULT_OOM; +diff --git a/mm/internal.h b/mm/internal.h +index 3e91000..4741a60 100644 +--- a/mm/internal.h ++++ b/mm/internal.h +@@ -94,6 +94,7 @@ extern pmd_t *mm_find_pmd(struct mm_struct *mm, unsigned long address); + * in mm/page_alloc.c + */ + extern void __free_pages_bootmem(struct page *page, unsigned int order); ++extern void free_compound_page(struct page *page); + extern void prep_compound_page(struct page *page, unsigned long order); + #ifdef CONFIG_MEMORY_FAILURE + extern bool is_free_buddy_page(struct page *page); +@@ -352,7 +353,7 @@ extern u32 hwpoison_filter_enable; + + extern unsigned long vm_mmap_pgoff(struct file *, unsigned long, + unsigned long, unsigned long, +- unsigned long, unsigned long); ++ unsigned long, unsigned long) __intentional_overflow(-1); + + extern void set_pageblock_order(void); + unsigned long reclaim_clean_pages_from_list(struct zone *zone, +diff --git a/mm/kmemleak.c b/mm/kmemleak.c +index 31f01c5..7015178 100644 +--- a/mm/kmemleak.c ++++ b/mm/kmemleak.c +@@ -363,7 +363,7 @@ static void print_unreferenced(struct seq_file *seq, + + for (i = 0; i < object->trace_len; i++) { + void *ptr = (void *)object->trace[i]; +- seq_printf(seq, " [<%p>] %pS\n", ptr, ptr); ++ seq_printf(seq, " [<%pP>] %pA\n", ptr, ptr); + } + } + +@@ -1853,7 +1853,7 @@ static int __init kmemleak_late_init(void) + return -ENOMEM; + } + +- dentry = debugfs_create_file("kmemleak", S_IRUGO, NULL, NULL, ++ dentry = debugfs_create_file("kmemleak", S_IRUSR, NULL, NULL, + &kmemleak_fops); + if (!dentry) + pr_warning("Failed to create the debugfs kmemleak file\n"); +diff --git a/mm/maccess.c b/mm/maccess.c +index d53adf9..03a24bf 100644 +--- a/mm/maccess.c ++++ b/mm/maccess.c +@@ -26,7 +26,7 @@ long __probe_kernel_read(void *dst, const void *src, size_t size) + set_fs(KERNEL_DS); + pagefault_disable(); + ret = __copy_from_user_inatomic(dst, +- (__force const void __user *)src, size); ++ (const void __force_user *)src, size); + pagefault_enable(); + set_fs(old_fs); + +@@ -53,7 +53,7 @@ long __probe_kernel_write(void *dst, const void *src, size_t size) + + set_fs(KERNEL_DS); + pagefault_disable(); +- ret = __copy_to_user_inatomic((__force void __user *)dst, src, size); ++ ret = __copy_to_user_inatomic((void __force_user *)dst, src, size); + pagefault_enable(); + set_fs(old_fs); + +diff --git a/mm/madvise.c b/mm/madvise.c +index 539eeb9..e24a987 100644 +--- a/mm/madvise.c ++++ b/mm/madvise.c +@@ -51,6 +51,10 @@ static long madvise_behavior(struct vm_area_struct *vma, + pgoff_t pgoff; + unsigned long new_flags = vma->vm_flags; + ++#ifdef CONFIG_PAX_SEGMEXEC ++ struct vm_area_struct *vma_m; ++#endif ++ + switch (behavior) { + case MADV_NORMAL: + new_flags = new_flags & ~VM_RAND_READ & ~VM_SEQ_READ; +@@ -126,6 +130,13 @@ success: + /* + * vm_flags is protected by the mmap_sem held in write mode. + */ ++ ++#ifdef CONFIG_PAX_SEGMEXEC ++ vma_m = pax_find_mirror_vma(vma); ++ if (vma_m) ++ vma_m->vm_flags = new_flags & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT); ++#endif ++ + vma->vm_flags = new_flags; + + out: +@@ -274,6 +285,11 @@ static long madvise_dontneed(struct vm_area_struct *vma, + struct vm_area_struct **prev, + unsigned long start, unsigned long end) + { ++ ++#ifdef CONFIG_PAX_SEGMEXEC ++ struct vm_area_struct *vma_m; ++#endif ++ + *prev = vma; + if (vma->vm_flags & (VM_LOCKED|VM_HUGETLB|VM_PFNMAP)) + return -EINVAL; +@@ -286,6 +302,21 @@ static long madvise_dontneed(struct vm_area_struct *vma, + zap_page_range(vma, start, end - start, &details); + } else + zap_page_range(vma, start, end - start, NULL); ++ ++#ifdef CONFIG_PAX_SEGMEXEC ++ vma_m = pax_find_mirror_vma(vma); ++ if (vma_m) { ++ if (unlikely(vma->vm_flags & VM_NONLINEAR)) { ++ struct zap_details details = { ++ .nonlinear_vma = vma_m, ++ .last_index = ULONG_MAX, ++ }; ++ zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, &details); ++ } else ++ zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, NULL); ++ } ++#endif ++ + return 0; + } + +@@ -491,6 +522,16 @@ SYSCALL_DEFINE3(madvise, unsigned long, start, size_t, len_in, int, behavior) + if (end < start) + return error; + ++#ifdef CONFIG_PAX_SEGMEXEC ++ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) { ++ if (end > SEGMEXEC_TASK_SIZE) ++ return error; ++ } else ++#endif ++ ++ if (end > TASK_SIZE) ++ return error; ++ + error = 0; + if (end == start) + return error; +diff --git a/mm/memory-failure.c b/mm/memory-failure.c +index 33365e9..2234ef9 100644 +--- a/mm/memory-failure.c ++++ b/mm/memory-failure.c +@@ -61,7 +61,7 @@ int sysctl_memory_failure_early_kill __read_mostly = 0; + + int sysctl_memory_failure_recovery __read_mostly = 1; + +-atomic_long_t num_poisoned_pages __read_mostly = ATOMIC_LONG_INIT(0); ++atomic_long_unchecked_t num_poisoned_pages __read_mostly = ATOMIC_LONG_INIT(0); + + #if defined(CONFIG_HWPOISON_INJECT) || defined(CONFIG_HWPOISON_INJECT_MODULE) + +@@ -202,7 +202,7 @@ static int kill_proc(struct task_struct *t, unsigned long addr, int trapno, + pfn, t->comm, t->pid); + si.si_signo = SIGBUS; + si.si_errno = 0; +- si.si_addr = (void *)addr; ++ si.si_addr = (void __user *)addr; + #ifdef __ARCH_SI_TRAPNO + si.si_trapno = trapno; + #endif +@@ -795,7 +795,7 @@ static struct page_state { + unsigned long res; + char *msg; + int (*action)(struct page *p, unsigned long pfn); +-} error_states[] = { ++} __do_const error_states[] = { + { reserved, reserved, "reserved kernel", me_kernel }, + /* + * free pages are specially detected outside this table: +@@ -1095,7 +1095,7 @@ int memory_failure(unsigned long pfn, int trapno, int flags) + nr_pages = 1 << compound_order(hpage); + else /* normal page or thp */ + nr_pages = 1; +- atomic_long_add(nr_pages, &num_poisoned_pages); ++ atomic_long_add_unchecked(nr_pages, &num_poisoned_pages); + + /* + * We need/can do nothing about count=0 pages. +@@ -1124,7 +1124,7 @@ int memory_failure(unsigned long pfn, int trapno, int flags) + if (PageHWPoison(hpage)) { + if ((hwpoison_filter(p) && TestClearPageHWPoison(p)) + || (p != hpage && TestSetPageHWPoison(hpage))) { +- atomic_long_sub(nr_pages, &num_poisoned_pages); ++ atomic_long_sub_unchecked(nr_pages, &num_poisoned_pages); + unlock_page(hpage); + return 0; + } +@@ -1190,14 +1190,14 @@ int memory_failure(unsigned long pfn, int trapno, int flags) + */ + if (!PageHWPoison(p)) { + printk(KERN_ERR "MCE %#lx: just unpoisoned\n", pfn); +- atomic_long_sub(nr_pages, &num_poisoned_pages); ++ atomic_long_sub_unchecked(nr_pages, &num_poisoned_pages); + put_page(hpage); + res = 0; + goto out; + } + if (hwpoison_filter(p)) { + if (TestClearPageHWPoison(p)) +- atomic_long_sub(nr_pages, &num_poisoned_pages); ++ atomic_long_sub_unchecked(nr_pages, &num_poisoned_pages); + unlock_page(hpage); + put_page(hpage); + return 0; +@@ -1419,7 +1419,7 @@ int unpoison_memory(unsigned long pfn) + return 0; + } + if (TestClearPageHWPoison(p)) +- atomic_long_dec(&num_poisoned_pages); ++ atomic_long_dec_unchecked(&num_poisoned_pages); + pr_info("MCE: Software-unpoisoned free page %#lx\n", pfn); + return 0; + } +@@ -1433,7 +1433,7 @@ int unpoison_memory(unsigned long pfn) + */ + if (TestClearPageHWPoison(page)) { + pr_info("MCE: Software-unpoisoned page %#lx\n", pfn); +- atomic_long_sub(nr_pages, &num_poisoned_pages); ++ atomic_long_sub_unchecked(nr_pages, &num_poisoned_pages); + freeit = 1; + if (PageHuge(page)) + clear_page_hwpoison_huge_page(page); +@@ -1558,11 +1558,11 @@ static int soft_offline_huge_page(struct page *page, int flags) + if (PageHuge(page)) { + set_page_hwpoison_huge_page(hpage); + dequeue_hwpoisoned_huge_page(hpage); +- atomic_long_add(1 << compound_order(hpage), ++ atomic_long_add_unchecked(1 << compound_order(hpage), + &num_poisoned_pages); + } else { + SetPageHWPoison(page); +- atomic_long_inc(&num_poisoned_pages); ++ atomic_long_inc_unchecked(&num_poisoned_pages); + } + } + return ret; +@@ -1601,7 +1601,7 @@ static int __soft_offline_page(struct page *page, int flags) + put_page(page); + pr_info("soft_offline: %#lx: invalidated\n", pfn); + SetPageHWPoison(page); +- atomic_long_inc(&num_poisoned_pages); ++ atomic_long_inc_unchecked(&num_poisoned_pages); + return 0; + } + +@@ -1652,7 +1652,7 @@ static int __soft_offline_page(struct page *page, int flags) + if (!is_free_buddy_page(page)) + pr_info("soft offline: %#lx: page leaked\n", + pfn); +- atomic_long_inc(&num_poisoned_pages); ++ atomic_long_inc_unchecked(&num_poisoned_pages); + } + } else { + pr_info("soft offline: %#lx: isolation failed: %d, page count %d, type %lx\n", +@@ -1726,11 +1726,11 @@ int soft_offline_page(struct page *page, int flags) + if (PageHuge(page)) { + set_page_hwpoison_huge_page(hpage); + dequeue_hwpoisoned_huge_page(hpage); +- atomic_long_add(1 << compound_order(hpage), ++ atomic_long_add_unchecked(1 << compound_order(hpage), + &num_poisoned_pages); + } else { + SetPageHWPoison(page); +- atomic_long_inc(&num_poisoned_pages); ++ atomic_long_inc_unchecked(&num_poisoned_pages); + } + } + unset_migratetype_isolate(page, MIGRATE_MOVABLE); +diff --git a/mm/memory.c b/mm/memory.c +index 2121d8b8..fa1095a 100644 +--- a/mm/memory.c ++++ b/mm/memory.c +@@ -403,6 +403,7 @@ static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud, + free_pte_range(tlb, pmd, addr); + } while (pmd++, addr = next, addr != end); + ++#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_PER_CPU_PGD) + start &= PUD_MASK; + if (start < floor) + return; +@@ -417,6 +418,8 @@ static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud, + pmd = pmd_offset(pud, start); + pud_clear(pud); + pmd_free_tlb(tlb, pmd, start); ++#endif ++ + } + + static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd, +@@ -436,6 +439,7 @@ static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd, + free_pmd_range(tlb, pud, addr, next, floor, ceiling); + } while (pud++, addr = next, addr != end); + ++#if !defined(CONFIG_X86_64) || !defined(CONFIG_PAX_PER_CPU_PGD) + start &= PGDIR_MASK; + if (start < floor) + return; +@@ -450,6 +454,8 @@ static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd, + pud = pud_offset(pgd, start); + pgd_clear(pgd); + pud_free_tlb(tlb, pud, start); ++#endif ++ + } + + /* +@@ -1636,12 +1642,6 @@ no_page_table: + return page; + } + +-static inline int stack_guard_page(struct vm_area_struct *vma, unsigned long addr) +-{ +- return stack_guard_page_start(vma, addr) || +- stack_guard_page_end(vma, addr+PAGE_SIZE); +-} +- + /** + * __get_user_pages() - pin user pages in memory + * @tsk: task_struct of target task +@@ -1728,10 +1728,10 @@ long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm, + + i = 0; + +- do { ++ while (nr_pages) { + struct vm_area_struct *vma; + +- vma = find_extend_vma(mm, start); ++ vma = find_vma(mm, start); + if (!vma && in_gate_area(mm, start)) { + unsigned long pg = start & PAGE_MASK; + pgd_t *pgd; +@@ -1780,7 +1780,7 @@ long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm, + goto next_page; + } + +- if (!vma || ++ if (!vma || start < vma->vm_start || + (vma->vm_flags & (VM_IO | VM_PFNMAP)) || + !(vm_flags & vma->vm_flags)) + return i ? : -EFAULT; +@@ -1809,11 +1809,6 @@ long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm, + int ret; + unsigned int fault_flags = 0; + +- /* For mlock, just skip the stack guard page. */ +- if (foll_flags & FOLL_MLOCK) { +- if (stack_guard_page(vma, start)) +- goto next_page; +- } + if (foll_flags & FOLL_WRITE) + fault_flags |= FAULT_FLAG_WRITE; + if (nonblocking) +@@ -1893,7 +1888,7 @@ next_page: + start += page_increm * PAGE_SIZE; + nr_pages -= page_increm; + } while (nr_pages && start < vma->vm_end); +- } while (nr_pages); ++ } + return i; + } + EXPORT_SYMBOL(__get_user_pages); +@@ -2105,6 +2100,10 @@ static int insert_page(struct vm_area_struct *vma, unsigned long addr, + page_add_file_rmap(page); + set_pte_at(mm, addr, pte, mk_pte(page, prot)); + ++#ifdef CONFIG_PAX_SEGMEXEC ++ pax_mirror_file_pte(vma, addr, page, ptl); ++#endif ++ + retval = 0; + pte_unmap_unlock(pte, ptl); + return retval; +@@ -2149,9 +2148,21 @@ int vm_insert_page(struct vm_area_struct *vma, unsigned long addr, + if (!page_count(page)) + return -EINVAL; + if (!(vma->vm_flags & VM_MIXEDMAP)) { ++ ++#ifdef CONFIG_PAX_SEGMEXEC ++ struct vm_area_struct *vma_m; ++#endif ++ + BUG_ON(down_read_trylock(&vma->vm_mm->mmap_sem)); + BUG_ON(vma->vm_flags & VM_PFNMAP); + vma->vm_flags |= VM_MIXEDMAP; ++ ++#ifdef CONFIG_PAX_SEGMEXEC ++ vma_m = pax_find_mirror_vma(vma); ++ if (vma_m) ++ vma_m->vm_flags |= VM_MIXEDMAP; ++#endif ++ + } + return insert_page(vma, addr, page, vma->vm_page_prot); + } +@@ -2234,6 +2245,7 @@ int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr, + unsigned long pfn) + { + BUG_ON(!(vma->vm_flags & VM_MIXEDMAP)); ++ BUG_ON(vma->vm_mirror); + + if (addr < vma->vm_start || addr >= vma->vm_end) + return -EFAULT; +@@ -2481,7 +2493,9 @@ static int apply_to_pmd_range(struct mm_struct *mm, pud_t *pud, + + BUG_ON(pud_huge(*pud)); + +- pmd = pmd_alloc(mm, pud, addr); ++ pmd = (mm == &init_mm) ? ++ pmd_alloc_kernel(mm, pud, addr) : ++ pmd_alloc(mm, pud, addr); + if (!pmd) + return -ENOMEM; + do { +@@ -2501,7 +2515,9 @@ static int apply_to_pud_range(struct mm_struct *mm, pgd_t *pgd, + unsigned long next; + int err; + +- pud = pud_alloc(mm, pgd, addr); ++ pud = (mm == &init_mm) ? ++ pud_alloc_kernel(mm, pgd, addr) : ++ pud_alloc(mm, pgd, addr); + if (!pud) + return -ENOMEM; + do { +@@ -2591,6 +2607,186 @@ static inline void cow_user_page(struct page *dst, struct page *src, unsigned lo + copy_user_highpage(dst, src, va, vma); + } + ++#ifdef CONFIG_PAX_SEGMEXEC ++static void pax_unmap_mirror_pte(struct vm_area_struct *vma, unsigned long address, pmd_t *pmd) ++{ ++ struct mm_struct *mm = vma->vm_mm; ++ spinlock_t *ptl; ++ pte_t *pte, entry; ++ ++ pte = pte_offset_map_lock(mm, pmd, address, &ptl); ++ entry = *pte; ++ if (!pte_present(entry)) { ++ if (!pte_none(entry)) { ++ BUG_ON(pte_file(entry)); ++ free_swap_and_cache(pte_to_swp_entry(entry)); ++ pte_clear_not_present_full(mm, address, pte, 0); ++ } ++ } else { ++ struct page *page; ++ ++ flush_cache_page(vma, address, pte_pfn(entry)); ++ entry = ptep_clear_flush(vma, address, pte); ++ BUG_ON(pte_dirty(entry)); ++ page = vm_normal_page(vma, address, entry); ++ if (page) { ++ update_hiwater_rss(mm); ++ if (PageAnon(page)) ++ dec_mm_counter_fast(mm, MM_ANONPAGES); ++ else ++ dec_mm_counter_fast(mm, MM_FILEPAGES); ++ page_remove_rmap(page); ++ page_cache_release(page); ++ } ++ } ++ pte_unmap_unlock(pte, ptl); ++} ++ ++/* PaX: if vma is mirrored, synchronize the mirror's PTE ++ * ++ * the ptl of the lower mapped page is held on entry and is not released on exit ++ * or inside to ensure atomic changes to the PTE states (swapout, mremap, munmap, etc) ++ */ ++static void pax_mirror_anon_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl) ++{ ++ struct mm_struct *mm = vma->vm_mm; ++ unsigned long address_m; ++ spinlock_t *ptl_m; ++ struct vm_area_struct *vma_m; ++ pmd_t *pmd_m; ++ pte_t *pte_m, entry_m; ++ ++ BUG_ON(!page_m || !PageAnon(page_m)); ++ ++ vma_m = pax_find_mirror_vma(vma); ++ if (!vma_m) ++ return; ++ ++ BUG_ON(!PageLocked(page_m)); ++ BUG_ON(address >= SEGMEXEC_TASK_SIZE); ++ address_m = address + SEGMEXEC_TASK_SIZE; ++ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m); ++ pte_m = pte_offset_map(pmd_m, address_m); ++ ptl_m = pte_lockptr(mm, pmd_m); ++ if (ptl != ptl_m) { ++ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING); ++ if (!pte_none(*pte_m)) ++ goto out; ++ } ++ ++ entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot); ++ page_cache_get(page_m); ++ page_add_anon_rmap(page_m, vma_m, address_m); ++ inc_mm_counter_fast(mm, MM_ANONPAGES); ++ set_pte_at(mm, address_m, pte_m, entry_m); ++ update_mmu_cache(vma_m, address_m, pte_m); ++out: ++ if (ptl != ptl_m) ++ spin_unlock(ptl_m); ++ pte_unmap(pte_m); ++ unlock_page(page_m); ++} ++ ++void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl) ++{ ++ struct mm_struct *mm = vma->vm_mm; ++ unsigned long address_m; ++ spinlock_t *ptl_m; ++ struct vm_area_struct *vma_m; ++ pmd_t *pmd_m; ++ pte_t *pte_m, entry_m; ++ ++ BUG_ON(!page_m || PageAnon(page_m)); ++ ++ vma_m = pax_find_mirror_vma(vma); ++ if (!vma_m) ++ return; ++ ++ BUG_ON(address >= SEGMEXEC_TASK_SIZE); ++ address_m = address + SEGMEXEC_TASK_SIZE; ++ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m); ++ pte_m = pte_offset_map(pmd_m, address_m); ++ ptl_m = pte_lockptr(mm, pmd_m); ++ if (ptl != ptl_m) { ++ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING); ++ if (!pte_none(*pte_m)) ++ goto out; ++ } ++ ++ entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot); ++ page_cache_get(page_m); ++ page_add_file_rmap(page_m); ++ inc_mm_counter_fast(mm, MM_FILEPAGES); ++ set_pte_at(mm, address_m, pte_m, entry_m); ++ update_mmu_cache(vma_m, address_m, pte_m); ++out: ++ if (ptl != ptl_m) ++ spin_unlock(ptl_m); ++ pte_unmap(pte_m); ++} ++ ++static void pax_mirror_pfn_pte(struct vm_area_struct *vma, unsigned long address, unsigned long pfn_m, spinlock_t *ptl) ++{ ++ struct mm_struct *mm = vma->vm_mm; ++ unsigned long address_m; ++ spinlock_t *ptl_m; ++ struct vm_area_struct *vma_m; ++ pmd_t *pmd_m; ++ pte_t *pte_m, entry_m; ++ ++ vma_m = pax_find_mirror_vma(vma); ++ if (!vma_m) ++ return; ++ ++ BUG_ON(address >= SEGMEXEC_TASK_SIZE); ++ address_m = address + SEGMEXEC_TASK_SIZE; ++ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m); ++ pte_m = pte_offset_map(pmd_m, address_m); ++ ptl_m = pte_lockptr(mm, pmd_m); ++ if (ptl != ptl_m) { ++ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING); ++ if (!pte_none(*pte_m)) ++ goto out; ++ } ++ ++ entry_m = pfn_pte(pfn_m, vma_m->vm_page_prot); ++ set_pte_at(mm, address_m, pte_m, entry_m); ++out: ++ if (ptl != ptl_m) ++ spin_unlock(ptl_m); ++ pte_unmap(pte_m); ++} ++ ++static void pax_mirror_pte(struct vm_area_struct *vma, unsigned long address, pte_t *pte, pmd_t *pmd, spinlock_t *ptl) ++{ ++ struct page *page_m; ++ pte_t entry; ++ ++ if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC)) ++ goto out; ++ ++ entry = *pte; ++ page_m = vm_normal_page(vma, address, entry); ++ if (!page_m) ++ pax_mirror_pfn_pte(vma, address, pte_pfn(entry), ptl); ++ else if (PageAnon(page_m)) { ++ if (pax_find_mirror_vma(vma)) { ++ pte_unmap_unlock(pte, ptl); ++ lock_page(page_m); ++ pte = pte_offset_map_lock(vma->vm_mm, pmd, address, &ptl); ++ if (pte_same(entry, *pte)) ++ pax_mirror_anon_pte(vma, address, page_m, ptl); ++ else ++ unlock_page(page_m); ++ } ++ } else ++ pax_mirror_file_pte(vma, address, page_m, ptl); ++ ++out: ++ pte_unmap_unlock(pte, ptl); ++} ++#endif ++ + /* + * This routine handles present pages, when users try to write + * to a shared page. It is done by copying the page to a new address +@@ -2815,6 +3011,12 @@ gotten: + */ + page_table = pte_offset_map_lock(mm, pmd, address, &ptl); + if (likely(pte_same(*page_table, orig_pte))) { ++ ++#ifdef CONFIG_PAX_SEGMEXEC ++ if (pax_find_mirror_vma(vma)) ++ BUG_ON(!trylock_page(new_page)); ++#endif ++ + if (old_page) { + if (!PageAnon(old_page)) { + dec_mm_counter_fast(mm, MM_FILEPAGES); +@@ -2866,6 +3068,10 @@ gotten: + page_remove_rmap(old_page); + } + ++#ifdef CONFIG_PAX_SEGMEXEC ++ pax_mirror_anon_pte(vma, address, new_page, ptl); ++#endif ++ + /* Free the old page.. */ + new_page = old_page; + ret |= VM_FAULT_WRITE; +@@ -3143,6 +3349,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma, + swap_free(entry); + if (vm_swap_full() || (vma->vm_flags & VM_LOCKED) || PageMlocked(page)) + try_to_free_swap(page); ++ ++#ifdef CONFIG_PAX_SEGMEXEC ++ if ((flags & FAULT_FLAG_WRITE) || !pax_find_mirror_vma(vma)) ++#endif ++ + unlock_page(page); + if (page != swapcache) { + /* +@@ -3166,6 +3377,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma, + + /* No need to invalidate - it was non-present before */ + update_mmu_cache(vma, address, page_table); ++ ++#ifdef CONFIG_PAX_SEGMEXEC ++ pax_mirror_anon_pte(vma, address, page, ptl); ++#endif ++ + unlock: + pte_unmap_unlock(page_table, ptl); + out: +@@ -3185,40 +3401,6 @@ out_release: + } + + /* +- * This is like a special single-page "expand_{down|up}wards()", +- * except we must first make sure that 'address{-|+}PAGE_SIZE' +- * doesn't hit another vma. +- */ +-static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned long address) +-{ +- address &= PAGE_MASK; +- if ((vma->vm_flags & VM_GROWSDOWN) && address == vma->vm_start) { +- struct vm_area_struct *prev = vma->vm_prev; +- +- /* +- * Is there a mapping abutting this one below? +- * +- * That's only ok if it's the same stack mapping +- * that has gotten split.. +- */ +- if (prev && prev->vm_end == address) +- return prev->vm_flags & VM_GROWSDOWN ? 0 : -ENOMEM; +- +- expand_downwards(vma, address - PAGE_SIZE); +- } +- if ((vma->vm_flags & VM_GROWSUP) && address + PAGE_SIZE == vma->vm_end) { +- struct vm_area_struct *next = vma->vm_next; +- +- /* As VM_GROWSDOWN but s/below/above/ */ +- if (next && next->vm_start == address + PAGE_SIZE) +- return next->vm_flags & VM_GROWSUP ? 0 : -ENOMEM; +- +- expand_upwards(vma, address + PAGE_SIZE); +- } +- return 0; +-} +- +-/* + * We enter with non-exclusive mmap_sem (to exclude vma changes, + * but allow concurrent faults), and pte mapped but not yet locked. + * We return with mmap_sem still held, but pte unmapped and unlocked. +@@ -3227,27 +3409,23 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma, + unsigned long address, pte_t *page_table, pmd_t *pmd, + unsigned int flags) + { +- struct page *page; ++ struct page *page = NULL; + spinlock_t *ptl; + pte_t entry; + +- pte_unmap(page_table); +- +- /* Check if we need to add a guard page to the stack */ +- if (check_stack_guard_page(vma, address) < 0) +- return VM_FAULT_SIGBUS; +- +- /* Use the zero-page for reads */ + if (!(flags & FAULT_FLAG_WRITE)) { + entry = pte_mkspecial(pfn_pte(my_zero_pfn(address), + vma->vm_page_prot)); +- page_table = pte_offset_map_lock(mm, pmd, address, &ptl); ++ ptl = pte_lockptr(mm, pmd); ++ spin_lock(ptl); + if (!pte_none(*page_table)) + goto unlock; + goto setpte; + } + + /* Allocate our own private page. */ ++ pte_unmap(page_table); ++ + if (unlikely(anon_vma_prepare(vma))) + goto oom; + page = alloc_zeroed_user_highpage_movable(vma, address); +@@ -3271,6 +3449,11 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma, + if (!pte_none(*page_table)) + goto release; + ++#ifdef CONFIG_PAX_SEGMEXEC ++ if (pax_find_mirror_vma(vma)) ++ BUG_ON(!trylock_page(page)); ++#endif ++ + inc_mm_counter_fast(mm, MM_ANONPAGES); + page_add_new_anon_rmap(page, vma, address); + setpte: +@@ -3278,6 +3461,12 @@ setpte: + + /* No need to invalidate - it was non-present before */ + update_mmu_cache(vma, address, page_table); ++ ++#ifdef CONFIG_PAX_SEGMEXEC ++ if (page) ++ pax_mirror_anon_pte(vma, address, page, ptl); ++#endif ++ + unlock: + pte_unmap_unlock(page_table, ptl); + return 0; +@@ -3422,6 +3611,12 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma, + */ + /* Only go through if we didn't race with anybody else... */ + if (likely(pte_same(*page_table, orig_pte))) { ++ ++#ifdef CONFIG_PAX_SEGMEXEC ++ if (anon && pax_find_mirror_vma(vma)) ++ BUG_ON(!trylock_page(page)); ++#endif ++ + flush_icache_page(vma, page); + entry = mk_pte(page, vma->vm_page_prot); + if (flags & FAULT_FLAG_WRITE) +@@ -3443,6 +3638,14 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma, + + /* no need to invalidate: a not-present page won't be cached */ + update_mmu_cache(vma, address, page_table); ++ ++#ifdef CONFIG_PAX_SEGMEXEC ++ if (anon) ++ pax_mirror_anon_pte(vma, address, page, ptl); ++ else ++ pax_mirror_file_pte(vma, address, page, ptl); ++#endif ++ + } else { + if (cow_page) + mem_cgroup_uncharge_page(cow_page); +@@ -3690,6 +3893,12 @@ static int handle_pte_fault(struct mm_struct *mm, + if (flags & FAULT_FLAG_WRITE) + flush_tlb_fix_spurious_fault(vma, address); + } ++ ++#ifdef CONFIG_PAX_SEGMEXEC ++ pax_mirror_pte(vma, address, pte, pmd, ptl); ++ return 0; ++#endif ++ + unlock: + pte_unmap_unlock(pte, ptl); + return 0; +@@ -3706,9 +3915,41 @@ static int __handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma, + pmd_t *pmd; + pte_t *pte; + ++#ifdef CONFIG_PAX_SEGMEXEC ++ struct vm_area_struct *vma_m; ++#endif ++ + if (unlikely(is_vm_hugetlb_page(vma))) + return hugetlb_fault(mm, vma, address, flags); + ++#ifdef CONFIG_PAX_SEGMEXEC ++ vma_m = pax_find_mirror_vma(vma); ++ if (vma_m) { ++ unsigned long address_m; ++ pgd_t *pgd_m; ++ pud_t *pud_m; ++ pmd_t *pmd_m; ++ ++ if (vma->vm_start > vma_m->vm_start) { ++ address_m = address; ++ address -= SEGMEXEC_TASK_SIZE; ++ vma = vma_m; ++ } else ++ address_m = address + SEGMEXEC_TASK_SIZE; ++ ++ pgd_m = pgd_offset(mm, address_m); ++ pud_m = pud_alloc(mm, pgd_m, address_m); ++ if (!pud_m) ++ return VM_FAULT_OOM; ++ pmd_m = pmd_alloc(mm, pud_m, address_m); ++ if (!pmd_m) ++ return VM_FAULT_OOM; ++ if (!pmd_present(*pmd_m) && __pte_alloc(mm, vma_m, pmd_m, address_m)) ++ return VM_FAULT_OOM; ++ pax_unmap_mirror_pte(vma_m, address_m, pmd_m); ++ } ++#endif ++ + pgd = pgd_offset(mm, address); + pud = pud_alloc(mm, pgd, address); + if (!pud) +@@ -3836,6 +4077,23 @@ int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address) + spin_unlock(&mm->page_table_lock); + return 0; + } ++ ++int __pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd, unsigned long address) ++{ ++ pud_t *new = pud_alloc_one(mm, address); ++ if (!new) ++ return -ENOMEM; ++ ++ smp_wmb(); /* See comment in __pte_alloc */ ++ ++ spin_lock(&mm->page_table_lock); ++ if (pgd_present(*pgd)) /* Another has populated it */ ++ pud_free(mm, new); ++ else ++ pgd_populate_kernel(mm, pgd, new); ++ spin_unlock(&mm->page_table_lock); ++ return 0; ++} + #endif /* __PAGETABLE_PUD_FOLDED */ + + #ifndef __PAGETABLE_PMD_FOLDED +@@ -3866,6 +4124,30 @@ int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address) + spin_unlock(&mm->page_table_lock); + return 0; + } ++ ++int __pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud, unsigned long address) ++{ ++ pmd_t *new = pmd_alloc_one(mm, address); ++ if (!new) ++ return -ENOMEM; ++ ++ smp_wmb(); /* See comment in __pte_alloc */ ++ ++ spin_lock(&mm->page_table_lock); ++#ifndef __ARCH_HAS_4LEVEL_HACK ++ if (pud_present(*pud)) /* Another has populated it */ ++ pmd_free(mm, new); ++ else ++ pud_populate_kernel(mm, pud, new); ++#else ++ if (pgd_present(*pud)) /* Another has populated it */ ++ pmd_free(mm, new); ++ else ++ pgd_populate_kernel(mm, pud, new); ++#endif /* __ARCH_HAS_4LEVEL_HACK */ ++ spin_unlock(&mm->page_table_lock); ++ return 0; ++} + #endif /* __PAGETABLE_PMD_FOLDED */ + + #if !defined(__HAVE_ARCH_GATE_AREA) +@@ -3879,7 +4161,7 @@ static int __init gate_vma_init(void) + gate_vma.vm_start = FIXADDR_USER_START; + gate_vma.vm_end = FIXADDR_USER_END; + gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC; +- gate_vma.vm_page_prot = __P101; ++ gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags); + + return 0; + } +@@ -4013,8 +4295,8 @@ out: + return ret; + } + +-int generic_access_phys(struct vm_area_struct *vma, unsigned long addr, +- void *buf, int len, int write) ++ssize_t generic_access_phys(struct vm_area_struct *vma, unsigned long addr, ++ void *buf, size_t len, int write) + { + resource_size_t phys_addr; + unsigned long prot = 0; +@@ -4040,8 +4322,8 @@ EXPORT_SYMBOL_GPL(generic_access_phys); + * Access another process' address space as given in mm. If non-NULL, use the + * given task for page fault accounting. + */ +-static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm, +- unsigned long addr, void *buf, int len, int write) ++static ssize_t __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm, ++ unsigned long addr, void *buf, size_t len, int write) + { + struct vm_area_struct *vma; + void *old_buf = buf; +@@ -4049,7 +4331,7 @@ static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm, + down_read(&mm->mmap_sem); + /* ignore errors, just check how much was successfully transferred */ + while (len) { +- int bytes, ret, offset; ++ ssize_t bytes, ret, offset; + void *maddr; + struct page *page = NULL; + +@@ -4108,8 +4390,8 @@ static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm, + * + * The caller must hold a reference on @mm. + */ +-int access_remote_vm(struct mm_struct *mm, unsigned long addr, +- void *buf, int len, int write) ++ssize_t access_remote_vm(struct mm_struct *mm, unsigned long addr, ++ void *buf, size_t len, int write) + { + return __access_remote_vm(NULL, mm, addr, buf, len, write); + } +@@ -4119,11 +4401,11 @@ int access_remote_vm(struct mm_struct *mm, unsigned long addr, + * Source/target buffer must be kernel space, + * Do not walk the page table directly, use get_user_pages + */ +-int access_process_vm(struct task_struct *tsk, unsigned long addr, +- void *buf, int len, int write) ++ssize_t access_process_vm(struct task_struct *tsk, unsigned long addr, ++ void *buf, size_t len, int write) + { + struct mm_struct *mm; +- int ret; ++ ssize_t ret; + + mm = get_task_mm(tsk); + if (!mm) +diff --git a/mm/mempolicy.c b/mm/mempolicy.c +index 15a8ea0..cb50389 100644 +--- a/mm/mempolicy.c ++++ b/mm/mempolicy.c +@@ -747,6 +747,10 @@ static int mbind_range(struct mm_struct *mm, unsigned long start, + unsigned long vmstart; + unsigned long vmend; + ++#ifdef CONFIG_PAX_SEGMEXEC ++ struct vm_area_struct *vma_m; ++#endif ++ + vma = find_vma(mm, start); + if (!vma || vma->vm_start > start) + return -EFAULT; +@@ -790,6 +794,16 @@ static int mbind_range(struct mm_struct *mm, unsigned long start, + err = vma_replace_policy(vma, new_pol); + if (err) + goto out; ++ ++#ifdef CONFIG_PAX_SEGMEXEC ++ vma_m = pax_find_mirror_vma(vma); ++ if (vma_m) { ++ err = vma_replace_policy(vma_m, new_pol); ++ if (err) ++ goto out; ++ } ++#endif ++ + } + + out: +@@ -1253,6 +1267,17 @@ static long do_mbind(unsigned long start, unsigned long len, + + if (end < start) + return -EINVAL; ++ ++#ifdef CONFIG_PAX_SEGMEXEC ++ if (mm->pax_flags & MF_PAX_SEGMEXEC) { ++ if (end > SEGMEXEC_TASK_SIZE) ++ return -EINVAL; ++ } else ++#endif ++ ++ if (end > TASK_SIZE) ++ return -EINVAL; ++ + if (end == start) + return 0; + +@@ -1478,8 +1503,7 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode, + */ + tcred = __task_cred(task); + if (!uid_eq(cred->euid, tcred->suid) && !uid_eq(cred->euid, tcred->uid) && +- !uid_eq(cred->uid, tcred->suid) && !uid_eq(cred->uid, tcred->uid) && +- !capable(CAP_SYS_NICE)) { ++ !uid_eq(cred->uid, tcred->suid) && !capable(CAP_SYS_NICE)) { + rcu_read_unlock(); + err = -EPERM; + goto out_put; +@@ -1510,6 +1534,15 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode, + goto out; + } + ++#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP ++ if (mm != current->mm && ++ (mm->pax_flags & MF_PAX_RANDMMAP || mm->pax_flags & MF_PAX_SEGMEXEC)) { ++ mmput(mm); ++ err = -EPERM; ++ goto out; ++ } ++#endif ++ + err = do_migrate_pages(mm, old, new, + capable(CAP_SYS_NICE) ? MPOL_MF_MOVE_ALL : MPOL_MF_MOVE); + +diff --git a/mm/migrate.c b/mm/migrate.c +index bed4880..a493f67 100644 +--- a/mm/migrate.c ++++ b/mm/migrate.c +@@ -1485,8 +1485,7 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages, + */ + tcred = __task_cred(task); + if (!uid_eq(cred->euid, tcred->suid) && !uid_eq(cred->euid, tcred->uid) && +- !uid_eq(cred->uid, tcred->suid) && !uid_eq(cred->uid, tcred->uid) && +- !capable(CAP_SYS_NICE)) { ++ !uid_eq(cred->uid, tcred->suid) && !capable(CAP_SYS_NICE)) { + rcu_read_unlock(); + err = -EPERM; + goto out; +diff --git a/mm/mlock.c b/mm/mlock.c +index b1eb536..091d154 100644 +--- a/mm/mlock.c ++++ b/mm/mlock.c +@@ -14,6 +14,7 @@ + #include <linux/pagevec.h> + #include <linux/mempolicy.h> + #include <linux/syscalls.h> ++#include <linux/security.h> + #include <linux/sched.h> + #include <linux/export.h> + #include <linux/rmap.h> +@@ -606,7 +607,7 @@ static int do_mlock(unsigned long start, size_t len, int on) + { + unsigned long nstart, end, tmp; + struct vm_area_struct * vma, * prev; +- int error; ++ int error = 0; + + VM_BUG_ON(start & ~PAGE_MASK); + VM_BUG_ON(len != PAGE_ALIGN(len)); +@@ -615,6 +616,9 @@ static int do_mlock(unsigned long start, size_t len, int on) + return -EINVAL; + if (end == start) + return 0; ++ if (end > TASK_SIZE) ++ return -EINVAL; ++ + vma = find_vma(current->mm, start); + if (!vma || vma->vm_start > start) + return -ENOMEM; +@@ -626,6 +630,11 @@ static int do_mlock(unsigned long start, size_t len, int on) + for (nstart = start ; ; ) { + vm_flags_t newflags; + ++#ifdef CONFIG_PAX_SEGMEXEC ++ if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE)) ++ break; ++#endif ++ + /* Here we know that vma->vm_start <= nstart < vma->vm_end. */ + + newflags = vma->vm_flags & ~VM_LOCKED; +@@ -739,6 +748,7 @@ SYSCALL_DEFINE2(mlock, unsigned long, start, size_t, len) + locked += current->mm->locked_vm; + + /* check against resource limits */ ++ gr_learn_resource(current, RLIMIT_MEMLOCK, (current->mm->locked_vm << PAGE_SHIFT) + len, 1); + if ((locked <= lock_limit) || capable(CAP_IPC_LOCK)) + error = do_mlock(start, len, 1); + +@@ -776,6 +786,11 @@ static int do_mlockall(int flags) + for (vma = current->mm->mmap; vma ; vma = prev->vm_next) { + vm_flags_t newflags; + ++#ifdef CONFIG_PAX_SEGMEXEC ++ if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE)) ++ break; ++#endif ++ + newflags = vma->vm_flags & ~VM_LOCKED; + if (flags & MCL_CURRENT) + newflags |= VM_LOCKED; +@@ -807,8 +822,10 @@ SYSCALL_DEFINE1(mlockall, int, flags) + lock_limit >>= PAGE_SHIFT; + + ret = -ENOMEM; ++ ++ gr_learn_resource(current, RLIMIT_MEMLOCK, current->mm->total_vm << PAGE_SHIFT, 1); ++ + down_write(¤t->mm->mmap_sem); +- + if (!(flags & MCL_CURRENT) || (current->mm->total_vm <= lock_limit) || + capable(CAP_IPC_LOCK)) + ret = do_mlockall(flags); +diff --git a/mm/mmap.c b/mm/mmap.c +index 20ff0c3..005dc47 100644 +--- a/mm/mmap.c ++++ b/mm/mmap.c +@@ -36,6 +36,7 @@ + #include <linux/sched/sysctl.h> + #include <linux/notifier.h> + #include <linux/memory.h> ++#include <linux/random.h> + + #include <asm/uaccess.h> + #include <asm/cacheflush.h> +@@ -52,6 +53,16 @@ + #define arch_rebalance_pgtables(addr, len) (addr) + #endif + ++static inline void verify_mm_writelocked(struct mm_struct *mm) ++{ ++#if defined(CONFIG_DEBUG_VM) || defined(CONFIG_PAX) ++ if (unlikely(down_read_trylock(&mm->mmap_sem))) { ++ up_read(&mm->mmap_sem); ++ BUG(); ++ } ++#endif ++} ++ + static void unmap_region(struct mm_struct *mm, + struct vm_area_struct *vma, struct vm_area_struct *prev, + unsigned long start, unsigned long end); +@@ -71,16 +82,25 @@ static void unmap_region(struct mm_struct *mm, + * x: (no) no x: (no) yes x: (no) yes x: (yes) yes + * + */ +-pgprot_t protection_map[16] = { ++pgprot_t protection_map[16] __read_only = { + __P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111, + __S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111 + }; + +-pgprot_t vm_get_page_prot(unsigned long vm_flags) ++pgprot_t vm_get_page_prot(vm_flags_t vm_flags) + { +- return __pgprot(pgprot_val(protection_map[vm_flags & ++ pgprot_t prot = __pgprot(pgprot_val(protection_map[vm_flags & + (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]) | + pgprot_val(arch_vm_get_page_prot(vm_flags))); ++ ++#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32) ++ if (!(__supported_pte_mask & _PAGE_NX) && ++ (vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC && ++ (vm_flags & (VM_READ | VM_WRITE))) ++ prot = __pgprot(pte_val(pte_exprotect(__pte(pgprot_val(prot))))); ++#endif ++ ++ return prot; + } + EXPORT_SYMBOL(vm_get_page_prot); + +@@ -90,6 +110,7 @@ unsigned long sysctl_overcommit_kbytes __read_mostly; + int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT; + unsigned long sysctl_user_reserve_kbytes __read_mostly = 1UL << 17; /* 128MB */ + unsigned long sysctl_admin_reserve_kbytes __read_mostly = 1UL << 13; /* 8MB */ ++unsigned long sysctl_heap_stack_gap __read_mostly = 64*1024; + /* + * Make sure vm_committed_as in one cacheline and not cacheline shared with + * other variables. It can be updated by several CPUs frequently. +@@ -246,6 +267,7 @@ static struct vm_area_struct *remove_vma(struct vm_area_struct *vma) + struct vm_area_struct *next = vma->vm_next; + + might_sleep(); ++ BUG_ON(vma->vm_mirror); + if (vma->vm_ops && vma->vm_ops->close) + vma->vm_ops->close(vma); + if (vma->vm_file) +@@ -290,6 +312,12 @@ SYSCALL_DEFINE1(brk, unsigned long, brk) + * not page aligned -Ram Gupta + */ + rlim = rlimit(RLIMIT_DATA); ++#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP ++ /* force a minimum 16MB brk heap on setuid/setgid binaries */ ++ if (rlim < PAGE_SIZE && (get_dumpable(mm) != SUID_DUMP_USER) && gr_is_global_nonroot(current_uid())) ++ rlim = 4096 * PAGE_SIZE; ++#endif ++ gr_learn_resource(current, RLIMIT_DATA, (brk - mm->start_brk) + (mm->end_data - mm->start_data), 1); + if (rlim < RLIM_INFINITY && (brk - mm->start_brk) + + (mm->end_data - mm->start_data) > rlim) + goto out; +@@ -940,6 +968,12 @@ static int + can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags, + struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff) + { ++ ++#ifdef CONFIG_PAX_SEGMEXEC ++ if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_start == SEGMEXEC_TASK_SIZE) ++ return 0; ++#endif ++ + if (is_mergeable_vma(vma, file, vm_flags) && + is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) { + if (vma->vm_pgoff == vm_pgoff) +@@ -959,6 +993,12 @@ static int + can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags, + struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff) + { ++ ++#ifdef CONFIG_PAX_SEGMEXEC ++ if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end == SEGMEXEC_TASK_SIZE) ++ return 0; ++#endif ++ + if (is_mergeable_vma(vma, file, vm_flags) && + is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) { + pgoff_t vm_pglen; +@@ -1001,13 +1041,20 @@ can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags, + struct vm_area_struct *vma_merge(struct mm_struct *mm, + struct vm_area_struct *prev, unsigned long addr, + unsigned long end, unsigned long vm_flags, +- struct anon_vma *anon_vma, struct file *file, ++ struct anon_vma *anon_vma, struct file *file, + pgoff_t pgoff, struct mempolicy *policy) + { + pgoff_t pglen = (end - addr) >> PAGE_SHIFT; + struct vm_area_struct *area, *next; + int err; + ++#ifdef CONFIG_PAX_SEGMEXEC ++ unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE, end_m = end + SEGMEXEC_TASK_SIZE; ++ struct vm_area_struct *area_m = NULL, *next_m = NULL, *prev_m = NULL; ++ ++ BUG_ON((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE < end); ++#endif ++ + /* + * We later require that vma->vm_flags == vm_flags, + * so this tests vma->vm_flags & VM_SPECIAL, too. +@@ -1023,6 +1070,15 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm, + if (next && next->vm_end == end) /* cases 6, 7, 8 */ + next = next->vm_next; + ++#ifdef CONFIG_PAX_SEGMEXEC ++ if (prev) ++ prev_m = pax_find_mirror_vma(prev); ++ if (area) ++ area_m = pax_find_mirror_vma(area); ++ if (next) ++ next_m = pax_find_mirror_vma(next); ++#endif ++ + /* + * Can it merge with the predecessor? + */ +@@ -1042,9 +1098,24 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm, + /* cases 1, 6 */ + err = vma_adjust(prev, prev->vm_start, + next->vm_end, prev->vm_pgoff, NULL); +- } else /* cases 2, 5, 7 */ ++ ++#ifdef CONFIG_PAX_SEGMEXEC ++ if (!err && prev_m) ++ err = vma_adjust(prev_m, prev_m->vm_start, ++ next_m->vm_end, prev_m->vm_pgoff, NULL); ++#endif ++ ++ } else { /* cases 2, 5, 7 */ + err = vma_adjust(prev, prev->vm_start, + end, prev->vm_pgoff, NULL); ++ ++#ifdef CONFIG_PAX_SEGMEXEC ++ if (!err && prev_m) ++ err = vma_adjust(prev_m, prev_m->vm_start, ++ end_m, prev_m->vm_pgoff, NULL); ++#endif ++ ++ } + if (err) + return NULL; + khugepaged_enter_vma_merge(prev); +@@ -1058,12 +1129,27 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm, + mpol_equal(policy, vma_policy(next)) && + can_vma_merge_before(next, vm_flags, + anon_vma, file, pgoff+pglen)) { +- if (prev && addr < prev->vm_end) /* case 4 */ ++ if (prev && addr < prev->vm_end) { /* case 4 */ + err = vma_adjust(prev, prev->vm_start, + addr, prev->vm_pgoff, NULL); +- else /* cases 3, 8 */ ++ ++#ifdef CONFIG_PAX_SEGMEXEC ++ if (!err && prev_m) ++ err = vma_adjust(prev_m, prev_m->vm_start, ++ addr_m, prev_m->vm_pgoff, NULL); ++#endif ++ ++ } else { /* cases 3, 8 */ + err = vma_adjust(area, addr, next->vm_end, + next->vm_pgoff - pglen, NULL); ++ ++#ifdef CONFIG_PAX_SEGMEXEC ++ if (!err && area_m) ++ err = vma_adjust(area_m, addr_m, next_m->vm_end, ++ next_m->vm_pgoff - pglen, NULL); ++#endif ++ ++ } + if (err) + return NULL; + khugepaged_enter_vma_merge(area); +@@ -1172,8 +1258,10 @@ none: + void vm_stat_account(struct mm_struct *mm, unsigned long flags, + struct file *file, long pages) + { +- const unsigned long stack_flags +- = VM_STACK_FLAGS & (VM_GROWSUP|VM_GROWSDOWN); ++ ++#ifdef CONFIG_PAX_RANDMMAP ++ if (!(mm->pax_flags & MF_PAX_RANDMMAP) || (flags & (VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC))) ++#endif + + mm->total_vm += pages; + +@@ -1181,7 +1269,7 @@ void vm_stat_account(struct mm_struct *mm, unsigned long flags, + mm->shared_vm += pages; + if ((flags & (VM_EXEC|VM_WRITE)) == VM_EXEC) + mm->exec_vm += pages; +- } else if (flags & stack_flags) ++ } else if (flags & (VM_GROWSUP|VM_GROWSDOWN)) + mm->stack_vm += pages; + } + #endif /* CONFIG_PROC_FS */ +@@ -1211,6 +1299,7 @@ static inline int mlock_future_check(struct mm_struct *mm, + locked += mm->locked_vm; + lock_limit = rlimit(RLIMIT_MEMLOCK); + lock_limit >>= PAGE_SHIFT; ++ gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1); + if (locked > lock_limit && !capable(CAP_IPC_LOCK)) + return -EAGAIN; + } +@@ -1237,7 +1326,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr, + * (the exception is when the underlying filesystem is noexec + * mounted, in which case we dont add PROT_EXEC.) + */ +- if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC)) ++ if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC)) + if (!(file && (file->f_path.mnt->mnt_flags & MNT_NOEXEC))) + prot |= PROT_EXEC; + +@@ -1263,7 +1352,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr, + /* Obtain the address to map to. we verify (or select) it and ensure + * that it represents a valid section of the address space. + */ +- addr = get_unmapped_area(file, addr, len, pgoff, flags); ++ addr = get_unmapped_area(file, addr, len, pgoff, flags | ((prot & PROT_EXEC) ? MAP_EXECUTABLE : 0)); + if (addr & ~PAGE_MASK) + return addr; + +@@ -1274,6 +1363,43 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr, + vm_flags = calc_vm_prot_bits(prot) | calc_vm_flag_bits(flags) | + mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC; + ++#ifdef CONFIG_PAX_MPROTECT ++ if (mm->pax_flags & MF_PAX_MPROTECT) { ++ ++#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG ++ if (file && !pgoff && (vm_flags & VM_EXEC) && mm->binfmt && ++ mm->binfmt->handle_mmap) ++ mm->binfmt->handle_mmap(file); ++#endif ++ ++#ifndef CONFIG_PAX_MPROTECT_COMPAT ++ if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC)) { ++ gr_log_rwxmmap(file); ++ ++#ifdef CONFIG_PAX_EMUPLT ++ vm_flags &= ~VM_EXEC; ++#else ++ return -EPERM; ++#endif ++ ++ } ++ ++ if (!(vm_flags & VM_EXEC)) ++ vm_flags &= ~VM_MAYEXEC; ++#else ++ if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC) ++ vm_flags &= ~(VM_EXEC | VM_MAYEXEC); ++#endif ++ else ++ vm_flags &= ~VM_MAYWRITE; ++ } ++#endif ++ ++#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32) ++ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && file) ++ vm_flags &= ~VM_PAGEEXEC; ++#endif ++ + if (flags & MAP_LOCKED) + if (!can_do_mlock()) + return -EPERM; +@@ -1361,6 +1487,9 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr, + vm_flags |= VM_NORESERVE; + } + ++ if (!gr_acl_handle_mmap(file, prot)) ++ return -EACCES; ++ + addr = mmap_region(file, addr, len, vm_flags, pgoff); + if (!IS_ERR_VALUE(addr) && + ((vm_flags & VM_LOCKED) || +@@ -1454,7 +1583,7 @@ int vma_wants_writenotify(struct vm_area_struct *vma) + vm_flags_t vm_flags = vma->vm_flags; + + /* If it was private or non-writable, the write bit is already clear */ +- if ((vm_flags & (VM_WRITE|VM_SHARED)) != ((VM_WRITE|VM_SHARED))) ++ if ((vm_flags & (VM_WRITE|VM_SHARED)) != (VM_WRITE|VM_SHARED)) + return 0; + + /* The backer wishes to know when pages are first written to? */ +@@ -1500,7 +1629,22 @@ unsigned long mmap_region(struct file *file, unsigned long addr, + struct rb_node **rb_link, *rb_parent; + unsigned long charged = 0; + ++#ifdef CONFIG_PAX_SEGMEXEC ++ struct vm_area_struct *vma_m = NULL; ++#endif ++ ++ /* ++ * mm->mmap_sem is required to protect against another thread ++ * changing the mappings in case we sleep. ++ */ ++ verify_mm_writelocked(mm); ++ + /* Check against address space limit. */ ++ ++#ifdef CONFIG_PAX_RANDMMAP ++ if (!(mm->pax_flags & MF_PAX_RANDMMAP) || (vm_flags & (VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC))) ++#endif ++ + if (!may_expand_vm(mm, len >> PAGE_SHIFT)) { + unsigned long nr_pages; + +@@ -1519,11 +1663,10 @@ unsigned long mmap_region(struct file *file, unsigned long addr, + + /* Clear old maps */ + error = -ENOMEM; +-munmap_back: + if (find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent)) { + if (do_munmap(mm, addr, len)) + return -ENOMEM; +- goto munmap_back; ++ BUG_ON(find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent)); + } + + /* +@@ -1554,6 +1697,16 @@ munmap_back: + goto unacct_error; + } + ++#ifdef CONFIG_PAX_SEGMEXEC ++ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vm_flags & VM_EXEC)) { ++ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL); ++ if (!vma_m) { ++ error = -ENOMEM; ++ goto free_vma; ++ } ++ } ++#endif ++ + vma->vm_mm = mm; + vma->vm_start = addr; + vma->vm_end = addr + len; +@@ -1573,6 +1726,13 @@ munmap_back: + if (error) + goto unmap_and_free_vma; + ++#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32) ++ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && !(vma->vm_flags & VM_SPECIAL)) { ++ vma->vm_flags |= VM_PAGEEXEC; ++ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags); ++ } ++#endif ++ + /* Can addr have changed?? + * + * Answer: Yes, several device drivers can do it in their +@@ -1606,6 +1766,12 @@ munmap_back: + } + + vma_link(mm, vma, prev, rb_link, rb_parent); ++ ++#ifdef CONFIG_PAX_SEGMEXEC ++ if (vma_m) ++ BUG_ON(pax_mirror_vma(vma_m, vma)); ++#endif ++ + /* Once vma denies write, undo our temporary denial count */ + if (vm_flags & VM_DENYWRITE) + allow_write_access(file); +@@ -1614,6 +1780,7 @@ out: + perf_event_mmap(vma); + + vm_stat_account(mm, vm_flags, file, len >> PAGE_SHIFT); ++ track_exec_limit(mm, addr, addr + len, vm_flags); + if (vm_flags & VM_LOCKED) { + if (!((vm_flags & VM_SPECIAL) || is_vm_hugetlb_page(vma) || + vma == get_gate_vma(current->mm))) +@@ -1646,6 +1813,12 @@ unmap_and_free_vma: + unmap_region(mm, vma, prev, vma->vm_start, vma->vm_end); + charged = 0; + free_vma: ++ ++#ifdef CONFIG_PAX_SEGMEXEC ++ if (vma_m) ++ kmem_cache_free(vm_area_cachep, vma_m); ++#endif ++ + kmem_cache_free(vm_area_cachep, vma); + unacct_error: + if (charged) +@@ -1653,7 +1826,63 @@ unacct_error: + return error; + } + +-unsigned long unmapped_area(struct vm_unmapped_area_info *info) ++#ifdef CONFIG_GRKERNSEC_RAND_THREADSTACK ++unsigned long gr_rand_threadstack_offset(const struct mm_struct *mm, const struct file *filp, unsigned long flags) ++{ ++ if ((mm->pax_flags & MF_PAX_RANDMMAP) && !filp && (flags & MAP_STACK)) ++ return ((prandom_u32() & 0xFF) + 1) << PAGE_SHIFT; ++ ++ return 0; ++} ++#endif ++ ++bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len, unsigned long offset) ++{ ++ if (!vma) { ++#ifdef CONFIG_STACK_GROWSUP ++ if (addr > sysctl_heap_stack_gap) ++ vma = find_vma(current->mm, addr - sysctl_heap_stack_gap); ++ else ++ vma = find_vma(current->mm, 0); ++ if (vma && (vma->vm_flags & VM_GROWSUP)) ++ return false; ++#endif ++ return true; ++ } ++ ++ if (addr + len > vma->vm_start) ++ return false; ++ ++ if (vma->vm_flags & VM_GROWSDOWN) ++ return sysctl_heap_stack_gap <= vma->vm_start - addr - len; ++#ifdef CONFIG_STACK_GROWSUP ++ else if (vma->vm_prev && (vma->vm_prev->vm_flags & VM_GROWSUP)) ++ return addr - vma->vm_prev->vm_end >= sysctl_heap_stack_gap; ++#endif ++ else if (offset) ++ return offset <= vma->vm_start - addr - len; ++ ++ return true; ++} ++ ++unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len, unsigned long offset) ++{ ++ if (vma->vm_start < len) ++ return -ENOMEM; ++ ++ if (!(vma->vm_flags & VM_GROWSDOWN)) { ++ if (offset <= vma->vm_start - len) ++ return vma->vm_start - len - offset; ++ else ++ return -ENOMEM; ++ } ++ ++ if (sysctl_heap_stack_gap <= vma->vm_start - len) ++ return vma->vm_start - len - sysctl_heap_stack_gap; ++ return -ENOMEM; ++} ++ ++unsigned long unmapped_area(const struct vm_unmapped_area_info *info) + { + /* + * We implement the search by looking for an rbtree node that +@@ -1701,11 +1930,29 @@ unsigned long unmapped_area(struct vm_unmapped_area_info *info) + } + } + +- gap_start = vma->vm_prev ? vma->vm_prev->vm_end : 0; ++ gap_start = vma->vm_prev ? vma->vm_prev->vm_end: 0; + check_current: + /* Check if current node has a suitable gap */ + if (gap_start > high_limit) + return -ENOMEM; ++ ++ if (gap_end - gap_start > info->threadstack_offset) ++ gap_start += info->threadstack_offset; ++ else ++ gap_start = gap_end; ++ ++ if (vma->vm_prev && (vma->vm_prev->vm_flags & VM_GROWSUP)) { ++ if (gap_end - gap_start > sysctl_heap_stack_gap) ++ gap_start += sysctl_heap_stack_gap; ++ else ++ gap_start = gap_end; ++ } ++ if (vma->vm_flags & VM_GROWSDOWN) { ++ if (gap_end - gap_start > sysctl_heap_stack_gap) ++ gap_end -= sysctl_heap_stack_gap; ++ else ++ gap_end = gap_start; ++ } + if (gap_end >= low_limit && gap_end - gap_start >= length) + goto found; + +@@ -1755,7 +2002,7 @@ found: + return gap_start; + } + +-unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info) ++unsigned long unmapped_area_topdown(const struct vm_unmapped_area_info *info) + { + struct mm_struct *mm = current->mm; + struct vm_area_struct *vma; +@@ -1809,6 +2056,24 @@ check_current: + gap_end = vma->vm_start; + if (gap_end < low_limit) + return -ENOMEM; ++ ++ if (gap_end - gap_start > info->threadstack_offset) ++ gap_end -= info->threadstack_offset; ++ else ++ gap_end = gap_start; ++ ++ if (vma->vm_prev && (vma->vm_prev->vm_flags & VM_GROWSUP)) { ++ if (gap_end - gap_start > sysctl_heap_stack_gap) ++ gap_start += sysctl_heap_stack_gap; ++ else ++ gap_start = gap_end; ++ } ++ if (vma->vm_flags & VM_GROWSDOWN) { ++ if (gap_end - gap_start > sysctl_heap_stack_gap) ++ gap_end -= sysctl_heap_stack_gap; ++ else ++ gap_end = gap_start; ++ } + if (gap_start <= high_limit && gap_end - gap_start >= length) + goto found; + +@@ -1872,6 +2137,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr, + struct mm_struct *mm = current->mm; + struct vm_area_struct *vma; + struct vm_unmapped_area_info info; ++ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags); + + if (len > TASK_SIZE - mmap_min_addr) + return -ENOMEM; +@@ -1879,11 +2145,15 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr, + if (flags & MAP_FIXED) + return addr; + ++#ifdef CONFIG_PAX_RANDMMAP ++ if (!(mm->pax_flags & MF_PAX_RANDMMAP)) ++#endif ++ + if (addr) { + addr = PAGE_ALIGN(addr); + vma = find_vma(mm, addr); + if (TASK_SIZE - len >= addr && addr >= mmap_min_addr && +- (!vma || addr + len <= vma->vm_start)) ++ check_heap_stack_gap(vma, addr, len, offset)) + return addr; + } + +@@ -1892,6 +2162,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr, + info.low_limit = mm->mmap_base; + info.high_limit = TASK_SIZE; + info.align_mask = 0; ++ info.threadstack_offset = offset; + return vm_unmapped_area(&info); + } + #endif +@@ -1910,6 +2181,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, + struct mm_struct *mm = current->mm; + unsigned long addr = addr0; + struct vm_unmapped_area_info info; ++ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags); + + /* requested length too big for entire address space */ + if (len > TASK_SIZE - mmap_min_addr) +@@ -1918,12 +2190,16 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, + if (flags & MAP_FIXED) + return addr; + ++#ifdef CONFIG_PAX_RANDMMAP ++ if (!(mm->pax_flags & MF_PAX_RANDMMAP)) ++#endif ++ + /* requesting a specific address */ + if (addr) { + addr = PAGE_ALIGN(addr); + vma = find_vma(mm, addr); + if (TASK_SIZE - len >= addr && addr >= mmap_min_addr && +- (!vma || addr + len <= vma->vm_start)) ++ check_heap_stack_gap(vma, addr, len, offset)) + return addr; + } + +@@ -1932,6 +2208,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, + info.low_limit = max(PAGE_SIZE, mmap_min_addr); + info.high_limit = mm->mmap_base; + info.align_mask = 0; ++ info.threadstack_offset = offset; + addr = vm_unmapped_area(&info); + + /* +@@ -1944,6 +2221,12 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, + VM_BUG_ON(addr != -ENOMEM); + info.flags = 0; + info.low_limit = TASK_UNMAPPED_BASE; ++ ++#ifdef CONFIG_PAX_RANDMMAP ++ if (mm->pax_flags & MF_PAX_RANDMMAP) ++ info.low_limit += mm->delta_mmap; ++#endif ++ + info.high_limit = TASK_SIZE; + addr = vm_unmapped_area(&info); + } +@@ -2045,6 +2328,28 @@ find_vma_prev(struct mm_struct *mm, unsigned long addr, + return vma; + } + ++#ifdef CONFIG_PAX_SEGMEXEC ++struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma) ++{ ++ struct vm_area_struct *vma_m; ++ ++ BUG_ON(!vma || vma->vm_start >= vma->vm_end); ++ if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC)) { ++ BUG_ON(vma->vm_mirror); ++ return NULL; ++ } ++ BUG_ON(vma->vm_start < SEGMEXEC_TASK_SIZE && SEGMEXEC_TASK_SIZE < vma->vm_end); ++ vma_m = vma->vm_mirror; ++ BUG_ON(!vma_m || vma_m->vm_mirror != vma); ++ BUG_ON(vma->vm_file != vma_m->vm_file); ++ BUG_ON(vma->vm_end - vma->vm_start != vma_m->vm_end - vma_m->vm_start); ++ BUG_ON(vma->vm_pgoff != vma_m->vm_pgoff); ++ BUG_ON(vma->anon_vma != vma_m->anon_vma && vma->anon_vma->root != vma_m->anon_vma->root); ++ BUG_ON((vma->vm_flags ^ vma_m->vm_flags) & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED)); ++ return vma_m; ++} ++#endif ++ + /* + * Verify that the stack growth is acceptable and + * update accounting. This is shared with both the +@@ -2061,6 +2366,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns + return -ENOMEM; + + /* Stack limit test */ ++ gr_learn_resource(current, RLIMIT_STACK, size, 1); + if (size > ACCESS_ONCE(rlim[RLIMIT_STACK].rlim_cur)) + return -ENOMEM; + +@@ -2071,6 +2377,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns + locked = mm->locked_vm + grow; + limit = ACCESS_ONCE(rlim[RLIMIT_MEMLOCK].rlim_cur); + limit >>= PAGE_SHIFT; ++ gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1); + if (locked > limit && !capable(CAP_IPC_LOCK)) + return -ENOMEM; + } +@@ -2100,37 +2407,48 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns + * PA-RISC uses this for its stack; IA64 for its Register Backing Store. + * vma is the last one with address > vma->vm_end. Have to extend vma. + */ ++#ifndef CONFIG_IA64 ++static ++#endif + int expand_upwards(struct vm_area_struct *vma, unsigned long address) + { + int error; ++ bool locknext; + + if (!(vma->vm_flags & VM_GROWSUP)) + return -EFAULT; + ++ /* Also guard against wrapping around to address 0. */ ++ if (address < PAGE_ALIGN(address+1)) ++ address = PAGE_ALIGN(address+1); ++ else ++ return -ENOMEM; ++ + /* + * We must make sure the anon_vma is allocated + * so that the anon_vma locking is not a noop. + */ + if (unlikely(anon_vma_prepare(vma))) + return -ENOMEM; ++ locknext = vma->vm_next && (vma->vm_next->vm_flags & VM_GROWSDOWN); ++ if (locknext && anon_vma_prepare(vma->vm_next)) ++ return -ENOMEM; + vma_lock_anon_vma(vma); ++ if (locknext) ++ vma_lock_anon_vma(vma->vm_next); + + /* + * vma->vm_start/vm_end cannot change under us because the caller + * is required to hold the mmap_sem in read mode. We need the +- * anon_vma lock to serialize against concurrent expand_stacks. +- * Also guard against wrapping around to address 0. ++ * anon_vma locks to serialize against concurrent expand_stacks ++ * and expand_upwards. + */ +- if (address < PAGE_ALIGN(address+4)) +- address = PAGE_ALIGN(address+4); +- else { +- vma_unlock_anon_vma(vma); +- return -ENOMEM; +- } + error = 0; + + /* Somebody else might have raced and expanded it already */ +- if (address > vma->vm_end) { ++ if (vma->vm_next && (vma->vm_next->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && vma->vm_next->vm_start - address < sysctl_heap_stack_gap) ++ error = -ENOMEM; ++ else if (address > vma->vm_end && (!locknext || vma->vm_next->vm_start >= address)) { + unsigned long size, grow; + + size = address - vma->vm_start; +@@ -2165,6 +2483,8 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address) + } + } + } ++ if (locknext) ++ vma_unlock_anon_vma(vma->vm_next); + vma_unlock_anon_vma(vma); + khugepaged_enter_vma_merge(vma); + validate_mm(vma->vm_mm); +@@ -2179,6 +2499,8 @@ int expand_downwards(struct vm_area_struct *vma, + unsigned long address) + { + int error; ++ bool lockprev = false; ++ struct vm_area_struct *prev; + + /* + * We must make sure the anon_vma is allocated +@@ -2192,6 +2514,15 @@ int expand_downwards(struct vm_area_struct *vma, + if (error) + return error; + ++ prev = vma->vm_prev; ++#if defined(CONFIG_STACK_GROWSUP) || defined(CONFIG_IA64) ++ lockprev = prev && (prev->vm_flags & VM_GROWSUP); ++#endif ++ if (lockprev && anon_vma_prepare(prev)) ++ return -ENOMEM; ++ if (lockprev) ++ vma_lock_anon_vma(prev); ++ + vma_lock_anon_vma(vma); + + /* +@@ -2201,9 +2532,17 @@ int expand_downwards(struct vm_area_struct *vma, + */ + + /* Somebody else might have raced and expanded it already */ +- if (address < vma->vm_start) { ++ if (prev && (prev->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && address - prev->vm_end < sysctl_heap_stack_gap) ++ error = -ENOMEM; ++ else if (address < vma->vm_start && (!lockprev || prev->vm_end <= address)) { + unsigned long size, grow; + ++#ifdef CONFIG_PAX_SEGMEXEC ++ struct vm_area_struct *vma_m; ++ ++ vma_m = pax_find_mirror_vma(vma); ++#endif ++ + size = vma->vm_end - address; + grow = (vma->vm_start - address) >> PAGE_SHIFT; + +@@ -2228,13 +2567,27 @@ int expand_downwards(struct vm_area_struct *vma, + vma->vm_pgoff -= grow; + anon_vma_interval_tree_post_update_vma(vma); + vma_gap_update(vma); ++ ++#ifdef CONFIG_PAX_SEGMEXEC ++ if (vma_m) { ++ anon_vma_interval_tree_pre_update_vma(vma_m); ++ vma_m->vm_start -= grow << PAGE_SHIFT; ++ vma_m->vm_pgoff -= grow; ++ anon_vma_interval_tree_post_update_vma(vma_m); ++ vma_gap_update(vma_m); ++ } ++#endif ++ + spin_unlock(&vma->vm_mm->page_table_lock); + ++ track_exec_limit(vma->vm_mm, vma->vm_start, vma->vm_end, vma->vm_flags); + perf_event_mmap(vma); + } + } + } + vma_unlock_anon_vma(vma); ++ if (lockprev) ++ vma_unlock_anon_vma(prev); + khugepaged_enter_vma_merge(vma); + validate_mm(vma->vm_mm); + return error; +@@ -2332,6 +2685,13 @@ static void remove_vma_list(struct mm_struct *mm, struct vm_area_struct *vma) + do { + long nrpages = vma_pages(vma); + ++#ifdef CONFIG_PAX_SEGMEXEC ++ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE)) { ++ vma = remove_vma(vma); ++ continue; ++ } ++#endif ++ + if (vma->vm_flags & VM_ACCOUNT) + nr_accounted += nrpages; + vm_stat_account(mm, vma->vm_flags, vma->vm_file, -nrpages); +@@ -2376,6 +2736,16 @@ detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma, + insertion_point = (prev ? &prev->vm_next : &mm->mmap); + vma->vm_prev = NULL; + do { ++ ++#ifdef CONFIG_PAX_SEGMEXEC ++ if (vma->vm_mirror) { ++ BUG_ON(!vma->vm_mirror->vm_mirror || vma->vm_mirror->vm_mirror != vma); ++ vma->vm_mirror->vm_mirror = NULL; ++ vma->vm_mirror->vm_flags &= ~VM_EXEC; ++ vma->vm_mirror = NULL; ++ } ++#endif ++ + vma_rb_erase(vma, &mm->mm_rb); + mm->map_count--; + tail_vma = vma; +@@ -2401,14 +2771,33 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma, + struct vm_area_struct *new; + int err = -ENOMEM; + ++#ifdef CONFIG_PAX_SEGMEXEC ++ struct vm_area_struct *vma_m, *new_m = NULL; ++ unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE; ++#endif ++ + if (is_vm_hugetlb_page(vma) && (addr & + ~(huge_page_mask(hstate_vma(vma))))) + return -EINVAL; + ++#ifdef CONFIG_PAX_SEGMEXEC ++ vma_m = pax_find_mirror_vma(vma); ++#endif ++ + new = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL); + if (!new) + goto out_err; + ++#ifdef CONFIG_PAX_SEGMEXEC ++ if (vma_m) { ++ new_m = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL); ++ if (!new_m) { ++ kmem_cache_free(vm_area_cachep, new); ++ goto out_err; ++ } ++ } ++#endif ++ + /* most fields are the same, copy all, and then fixup */ + *new = *vma; + +@@ -2421,6 +2810,22 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma, + new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT); + } + ++#ifdef CONFIG_PAX_SEGMEXEC ++ if (vma_m) { ++ *new_m = *vma_m; ++ INIT_LIST_HEAD(&new_m->anon_vma_chain); ++ new_m->vm_mirror = new; ++ new->vm_mirror = new_m; ++ ++ if (new_below) ++ new_m->vm_end = addr_m; ++ else { ++ new_m->vm_start = addr_m; ++ new_m->vm_pgoff += ((addr_m - vma_m->vm_start) >> PAGE_SHIFT); ++ } ++ } ++#endif ++ + err = vma_dup_policy(vma, new); + if (err) + goto out_free_vma; +@@ -2440,6 +2845,38 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma, + else + err = vma_adjust(vma, vma->vm_start, addr, vma->vm_pgoff, new); + ++#ifdef CONFIG_PAX_SEGMEXEC ++ if (!err && vma_m) { ++ struct mempolicy *pol = vma_policy(new); ++ ++ if (anon_vma_clone(new_m, vma_m)) ++ goto out_free_mpol; ++ ++ mpol_get(pol); ++ set_vma_policy(new_m, pol); ++ ++ if (new_m->vm_file) ++ get_file(new_m->vm_file); ++ ++ if (new_m->vm_ops && new_m->vm_ops->open) ++ new_m->vm_ops->open(new_m); ++ ++ if (new_below) ++ err = vma_adjust(vma_m, addr_m, vma_m->vm_end, vma_m->vm_pgoff + ++ ((addr_m - new_m->vm_start) >> PAGE_SHIFT), new_m); ++ else ++ err = vma_adjust(vma_m, vma_m->vm_start, addr_m, vma_m->vm_pgoff, new_m); ++ ++ if (err) { ++ if (new_m->vm_ops && new_m->vm_ops->close) ++ new_m->vm_ops->close(new_m); ++ if (new_m->vm_file) ++ fput(new_m->vm_file); ++ mpol_put(pol); ++ } ++ } ++#endif ++ + /* Success. */ + if (!err) + return 0; +@@ -2449,10 +2886,18 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma, + new->vm_ops->close(new); + if (new->vm_file) + fput(new->vm_file); +- unlink_anon_vmas(new); + out_free_mpol: + mpol_put(vma_policy(new)); + out_free_vma: ++ ++#ifdef CONFIG_PAX_SEGMEXEC ++ if (new_m) { ++ unlink_anon_vmas(new_m); ++ kmem_cache_free(vm_area_cachep, new_m); ++ } ++#endif ++ ++ unlink_anon_vmas(new); + kmem_cache_free(vm_area_cachep, new); + out_err: + return err; +@@ -2465,6 +2910,15 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma, + int split_vma(struct mm_struct *mm, struct vm_area_struct *vma, + unsigned long addr, int new_below) + { ++ ++#ifdef CONFIG_PAX_SEGMEXEC ++ if (mm->pax_flags & MF_PAX_SEGMEXEC) { ++ BUG_ON(vma->vm_end > SEGMEXEC_TASK_SIZE); ++ if (mm->map_count >= sysctl_max_map_count-1) ++ return -ENOMEM; ++ } else ++#endif ++ + if (mm->map_count >= sysctl_max_map_count) + return -ENOMEM; + +@@ -2476,11 +2930,30 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma, + * work. This now handles partial unmappings. + * Jeremy Fitzhardinge <jeremy@goop.org> + */ ++#ifdef CONFIG_PAX_SEGMEXEC + int do_munmap(struct mm_struct *mm, unsigned long start, size_t len) + { ++ int ret = __do_munmap(mm, start, len); ++ if (ret || !(mm->pax_flags & MF_PAX_SEGMEXEC)) ++ return ret; ++ ++ return __do_munmap(mm, start + SEGMEXEC_TASK_SIZE, len); ++} ++ ++int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len) ++#else ++int do_munmap(struct mm_struct *mm, unsigned long start, size_t len) ++#endif ++{ + unsigned long end; + struct vm_area_struct *vma, *prev, *last; + ++ /* ++ * mm->mmap_sem is required to protect against another thread ++ * changing the mappings in case we sleep. ++ */ ++ verify_mm_writelocked(mm); ++ + if ((start & ~PAGE_MASK) || start > TASK_SIZE || len > TASK_SIZE-start) + return -EINVAL; + +@@ -2555,6 +3028,8 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len) + /* Fix up all other VM information */ + remove_vma_list(mm, vma); + ++ track_exec_limit(mm, start, end, 0UL); ++ + return 0; + } + +@@ -2563,6 +3038,13 @@ int vm_munmap(unsigned long start, size_t len) + int ret; + struct mm_struct *mm = current->mm; + ++ ++#ifdef CONFIG_PAX_SEGMEXEC ++ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && ++ (len > SEGMEXEC_TASK_SIZE || start > SEGMEXEC_TASK_SIZE-len)) ++ return -EINVAL; ++#endif ++ + down_write(&mm->mmap_sem); + ret = do_munmap(mm, start, len); + up_write(&mm->mmap_sem); +@@ -2576,16 +3058,6 @@ SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len) + return vm_munmap(addr, len); + } + +-static inline void verify_mm_writelocked(struct mm_struct *mm) +-{ +-#ifdef CONFIG_DEBUG_VM +- if (unlikely(down_read_trylock(&mm->mmap_sem))) { +- WARN_ON(1); +- up_read(&mm->mmap_sem); +- } +-#endif +-} +- + /* + * this is really a simplified "do_mmap". it only handles + * anonymous maps. eventually we may be able to do some +@@ -2599,6 +3071,7 @@ static unsigned long do_brk(unsigned long addr, unsigned long len) + struct rb_node ** rb_link, * rb_parent; + pgoff_t pgoff = addr >> PAGE_SHIFT; + int error; ++ unsigned long charged; + + len = PAGE_ALIGN(len); + if (!len) +@@ -2606,10 +3079,24 @@ static unsigned long do_brk(unsigned long addr, unsigned long len) + + flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags; + ++#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC) ++ if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) { ++ flags &= ~VM_EXEC; ++ ++#ifdef CONFIG_PAX_MPROTECT ++ if (mm->pax_flags & MF_PAX_MPROTECT) ++ flags &= ~VM_MAYEXEC; ++#endif ++ ++ } ++#endif ++ + error = get_unmapped_area(NULL, addr, len, 0, MAP_FIXED); + if (error & ~PAGE_MASK) + return error; + ++ charged = len >> PAGE_SHIFT; ++ + error = mlock_future_check(mm, mm->def_flags, len); + if (error) + return error; +@@ -2623,21 +3110,20 @@ static unsigned long do_brk(unsigned long addr, unsigned long len) + /* + * Clear old maps. this also does some error checking for us + */ +- munmap_back: + if (find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent)) { + if (do_munmap(mm, addr, len)) + return -ENOMEM; +- goto munmap_back; ++ BUG_ON(find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent)); + } + + /* Check against address space limits *after* clearing old maps... */ +- if (!may_expand_vm(mm, len >> PAGE_SHIFT)) ++ if (!may_expand_vm(mm, charged)) + return -ENOMEM; + + if (mm->map_count > sysctl_max_map_count) + return -ENOMEM; + +- if (security_vm_enough_memory_mm(mm, len >> PAGE_SHIFT)) ++ if (security_vm_enough_memory_mm(mm, charged)) + return -ENOMEM; + + /* Can we just expand an old private anonymous mapping? */ +@@ -2651,7 +3137,7 @@ static unsigned long do_brk(unsigned long addr, unsigned long len) + */ + vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL); + if (!vma) { +- vm_unacct_memory(len >> PAGE_SHIFT); ++ vm_unacct_memory(charged); + return -ENOMEM; + } + +@@ -2665,10 +3151,11 @@ static unsigned long do_brk(unsigned long addr, unsigned long len) + vma_link(mm, vma, prev, rb_link, rb_parent); + out: + perf_event_mmap(vma); +- mm->total_vm += len >> PAGE_SHIFT; ++ mm->total_vm += charged; + if (flags & VM_LOCKED) +- mm->locked_vm += (len >> PAGE_SHIFT); ++ mm->locked_vm += charged; + vma->vm_flags |= VM_SOFTDIRTY; ++ track_exec_limit(mm, addr, addr + len, flags); + return addr; + } + +@@ -2730,6 +3217,7 @@ void exit_mmap(struct mm_struct *mm) + while (vma) { + if (vma->vm_flags & VM_ACCOUNT) + nr_accounted += vma_pages(vma); ++ vma->vm_mirror = NULL; + vma = remove_vma(vma); + } + vm_unacct_memory(nr_accounted); +@@ -2747,6 +3235,13 @@ int insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma) + struct vm_area_struct *prev; + struct rb_node **rb_link, *rb_parent; + ++#ifdef CONFIG_PAX_SEGMEXEC ++ struct vm_area_struct *vma_m = NULL; ++#endif ++ ++ if (security_mmap_addr(vma->vm_start)) ++ return -EPERM; ++ + /* + * The vm_pgoff of a purely anonymous vma should be irrelevant + * until its first write fault, when page's anon_vma and index +@@ -2770,7 +3265,21 @@ int insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma) + security_vm_enough_memory_mm(mm, vma_pages(vma))) + return -ENOMEM; + ++#ifdef CONFIG_PAX_SEGMEXEC ++ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_EXEC)) { ++ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL); ++ if (!vma_m) ++ return -ENOMEM; ++ } ++#endif ++ + vma_link(mm, vma, prev, rb_link, rb_parent); ++ ++#ifdef CONFIG_PAX_SEGMEXEC ++ if (vma_m) ++ BUG_ON(pax_mirror_vma(vma_m, vma)); ++#endif ++ + return 0; + } + +@@ -2789,6 +3298,8 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap, + struct rb_node **rb_link, *rb_parent; + bool faulted_in_anon_vma = true; + ++ BUG_ON(vma->vm_mirror); ++ + /* + * If anonymous vma has not yet been faulted, update new pgoff + * to match new location, to increase its chance of merging. +@@ -2853,6 +3364,39 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap, + return NULL; + } + ++#ifdef CONFIG_PAX_SEGMEXEC ++long pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma) ++{ ++ struct vm_area_struct *prev_m; ++ struct rb_node **rb_link_m, *rb_parent_m; ++ struct mempolicy *pol_m; ++ ++ BUG_ON(!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC)); ++ BUG_ON(vma->vm_mirror || vma_m->vm_mirror); ++ BUG_ON(!mpol_equal(vma_policy(vma), vma_policy(vma_m))); ++ *vma_m = *vma; ++ INIT_LIST_HEAD(&vma_m->anon_vma_chain); ++ if (anon_vma_clone(vma_m, vma)) ++ return -ENOMEM; ++ pol_m = vma_policy(vma_m); ++ mpol_get(pol_m); ++ set_vma_policy(vma_m, pol_m); ++ vma_m->vm_start += SEGMEXEC_TASK_SIZE; ++ vma_m->vm_end += SEGMEXEC_TASK_SIZE; ++ vma_m->vm_flags &= ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED); ++ vma_m->vm_page_prot = vm_get_page_prot(vma_m->vm_flags); ++ if (vma_m->vm_file) ++ get_file(vma_m->vm_file); ++ if (vma_m->vm_ops && vma_m->vm_ops->open) ++ vma_m->vm_ops->open(vma_m); ++ BUG_ON(find_vma_links(vma->vm_mm, vma_m->vm_start, vma_m->vm_end, &prev_m, &rb_link_m, &rb_parent_m)); ++ vma_link(vma->vm_mm, vma_m, prev_m, rb_link_m, rb_parent_m); ++ vma_m->vm_mirror = vma; ++ vma->vm_mirror = vma_m; ++ return 0; ++} ++#endif ++ + /* + * Return true if the calling process may expand its vm space by the passed + * number of pages +@@ -2864,6 +3408,7 @@ int may_expand_vm(struct mm_struct *mm, unsigned long npages) + + lim = rlimit(RLIMIT_AS) >> PAGE_SHIFT; + ++ gr_learn_resource(current, RLIMIT_AS, (cur + npages) << PAGE_SHIFT, 1); + if (cur + npages > lim) + return 0; + return 1; +@@ -2934,6 +3479,22 @@ int install_special_mapping(struct mm_struct *mm, + vma->vm_start = addr; + vma->vm_end = addr + len; + ++#ifdef CONFIG_PAX_MPROTECT ++ if (mm->pax_flags & MF_PAX_MPROTECT) { ++#ifndef CONFIG_PAX_MPROTECT_COMPAT ++ if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC)) ++ return -EPERM; ++ if (!(vm_flags & VM_EXEC)) ++ vm_flags &= ~VM_MAYEXEC; ++#else ++ if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC) ++ vm_flags &= ~(VM_EXEC | VM_MAYEXEC); ++#endif ++ else ++ vm_flags &= ~VM_MAYWRITE; ++ } ++#endif ++ + vma->vm_flags = vm_flags | mm->def_flags | VM_DONTEXPAND | VM_SOFTDIRTY; + vma->vm_page_prot = vm_get_page_prot(vma->vm_flags); + +diff --git a/mm/mprotect.c b/mm/mprotect.c +index 769a67a..414d24f 100644 +--- a/mm/mprotect.c ++++ b/mm/mprotect.c +@@ -24,10 +24,18 @@ + #include <linux/migrate.h> + #include <linux/perf_event.h> + #include <linux/ksm.h> ++#include <linux/sched/sysctl.h> ++ ++#ifdef CONFIG_PAX_MPROTECT ++#include <linux/elf.h> ++#include <linux/binfmts.h> ++#endif ++ + #include <asm/uaccess.h> + #include <asm/pgtable.h> + #include <asm/cacheflush.h> + #include <asm/tlbflush.h> ++#include <asm/mmu_context.h> + + #ifndef pgprot_modify + static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot) +@@ -214,6 +222,48 @@ unsigned long change_protection(struct vm_area_struct *vma, unsigned long start, + return pages; + } + ++#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT ++/* called while holding the mmap semaphor for writing except stack expansion */ ++void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot) ++{ ++ unsigned long oldlimit, newlimit = 0UL; ++ ++ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || (__supported_pte_mask & _PAGE_NX)) ++ return; ++ ++ spin_lock(&mm->page_table_lock); ++ oldlimit = mm->context.user_cs_limit; ++ if ((prot & VM_EXEC) && oldlimit < end) ++ /* USER_CS limit moved up */ ++ newlimit = end; ++ else if (!(prot & VM_EXEC) && start < oldlimit && oldlimit <= end) ++ /* USER_CS limit moved down */ ++ newlimit = start; ++ ++ if (newlimit) { ++ mm->context.user_cs_limit = newlimit; ++ ++#ifdef CONFIG_SMP ++ wmb(); ++ cpus_clear(mm->context.cpu_user_cs_mask); ++ cpu_set(smp_processor_id(), mm->context.cpu_user_cs_mask); ++#endif ++ ++ set_user_cs(mm->context.user_cs_base, mm->context.user_cs_limit, smp_processor_id()); ++ } ++ spin_unlock(&mm->page_table_lock); ++ if (newlimit == end) { ++ struct vm_area_struct *vma = find_vma(mm, oldlimit); ++ ++ for (; vma && vma->vm_start < end; vma = vma->vm_next) ++ if (is_vm_hugetlb_page(vma)) ++ hugetlb_change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot); ++ else ++ change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot, vma_wants_writenotify(vma), 0); ++ } ++} ++#endif ++ + int + mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev, + unsigned long start, unsigned long end, unsigned long newflags) +@@ -226,11 +276,29 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev, + int error; + int dirty_accountable = 0; + ++#ifdef CONFIG_PAX_SEGMEXEC ++ struct vm_area_struct *vma_m = NULL; ++ unsigned long start_m, end_m; ++ ++ start_m = start + SEGMEXEC_TASK_SIZE; ++ end_m = end + SEGMEXEC_TASK_SIZE; ++#endif ++ + if (newflags == oldflags) { + *pprev = vma; + return 0; + } + ++ if (newflags & (VM_READ | VM_WRITE | VM_EXEC)) { ++ struct vm_area_struct *prev = vma->vm_prev, *next = vma->vm_next; ++ ++ if (next && (next->vm_flags & VM_GROWSDOWN) && sysctl_heap_stack_gap > next->vm_start - end) ++ return -ENOMEM; ++ ++ if (prev && (prev->vm_flags & VM_GROWSUP) && sysctl_heap_stack_gap > start - prev->vm_end) ++ return -ENOMEM; ++ } ++ + /* + * If we make a private mapping writable we increase our commit; + * but (without finer accounting) cannot reduce our commit if we +@@ -247,6 +315,42 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev, + } + } + ++#ifdef CONFIG_PAX_SEGMEXEC ++ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && ((oldflags ^ newflags) & VM_EXEC)) { ++ if (start != vma->vm_start) { ++ error = split_vma(mm, vma, start, 1); ++ if (error) ++ goto fail; ++ BUG_ON(!*pprev || (*pprev)->vm_next == vma); ++ *pprev = (*pprev)->vm_next; ++ } ++ ++ if (end != vma->vm_end) { ++ error = split_vma(mm, vma, end, 0); ++ if (error) ++ goto fail; ++ } ++ ++ if (pax_find_mirror_vma(vma)) { ++ error = __do_munmap(mm, start_m, end_m - start_m); ++ if (error) ++ goto fail; ++ } else { ++ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL); ++ if (!vma_m) { ++ error = -ENOMEM; ++ goto fail; ++ } ++ vma->vm_flags = newflags; ++ error = pax_mirror_vma(vma_m, vma); ++ if (error) { ++ vma->vm_flags = oldflags; ++ goto fail; ++ } ++ } ++ } ++#endif ++ + /* + * First try to merge with previous and/or next vma. + */ +@@ -277,9 +381,21 @@ success: + * vm_flags and vm_page_prot are protected by the mmap_sem + * held in write mode. + */ ++ ++#ifdef CONFIG_PAX_SEGMEXEC ++ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (newflags & VM_EXEC) && ((vma->vm_flags ^ newflags) & VM_READ)) ++ pax_find_mirror_vma(vma)->vm_flags ^= VM_READ; ++#endif ++ + vma->vm_flags = newflags; ++ ++#ifdef CONFIG_PAX_MPROTECT ++ if (mm->binfmt && mm->binfmt->handle_mprotect) ++ mm->binfmt->handle_mprotect(vma, newflags); ++#endif ++ + vma->vm_page_prot = pgprot_modify(vma->vm_page_prot, +- vm_get_page_prot(newflags)); ++ vm_get_page_prot(vma->vm_flags)); + + if (vma_wants_writenotify(vma)) { + vma->vm_page_prot = vm_get_page_prot(newflags & ~VM_SHARED); +@@ -318,6 +434,17 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len, + end = start + len; + if (end <= start) + return -ENOMEM; ++ ++#ifdef CONFIG_PAX_SEGMEXEC ++ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) { ++ if (end > SEGMEXEC_TASK_SIZE) ++ return -EINVAL; ++ } else ++#endif ++ ++ if (end > TASK_SIZE) ++ return -EINVAL; ++ + if (!arch_validate_prot(prot)) + return -EINVAL; + +@@ -325,7 +452,7 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len, + /* + * Does the application expect PROT_READ to imply PROT_EXEC: + */ +- if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC)) ++ if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC)) + prot |= PROT_EXEC; + + vm_flags = calc_vm_prot_bits(prot); +@@ -357,6 +484,11 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len, + if (start > vma->vm_start) + prev = vma; + ++#ifdef CONFIG_PAX_MPROTECT ++ if (current->mm->binfmt && current->mm->binfmt->handle_mprotect) ++ current->mm->binfmt->handle_mprotect(vma, vm_flags); ++#endif ++ + for (nstart = start ; ; ) { + unsigned long newflags; + +@@ -367,6 +499,14 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len, + + /* newflags >> 4 shift VM_MAY% in place of VM_% */ + if ((newflags & ~(newflags >> 4)) & (VM_READ | VM_WRITE | VM_EXEC)) { ++ if (prot & (PROT_WRITE | PROT_EXEC)) ++ gr_log_rwxmprotect(vma); ++ ++ error = -EACCES; ++ goto out; ++ } ++ ++ if (!gr_acl_handle_mprotect(vma->vm_file, prot)) { + error = -EACCES; + goto out; + } +@@ -381,6 +521,9 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len, + error = mprotect_fixup(vma, &prev, nstart, tmp, newflags); + if (error) + goto out; ++ ++ track_exec_limit(current->mm, nstart, tmp, vm_flags); ++ + nstart = tmp; + + if (nstart < prev->vm_end) +diff --git a/mm/mremap.c b/mm/mremap.c +index 05f1180..c3cde48 100644 +--- a/mm/mremap.c ++++ b/mm/mremap.c +@@ -144,6 +144,12 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd, + continue; + pte = ptep_get_and_clear(mm, old_addr, old_pte); + pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr); ++ ++#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT ++ if (!(__supported_pte_mask & _PAGE_NX) && pte_present(pte) && (new_vma->vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC) ++ pte = pte_exprotect(pte); ++#endif ++ + pte = move_soft_dirty_pte(pte); + set_pte_at(mm, new_addr, new_pte, pte); + } +@@ -344,6 +350,11 @@ static struct vm_area_struct *vma_to_resize(unsigned long addr, + if (is_vm_hugetlb_page(vma)) + goto Einval; + ++#ifdef CONFIG_PAX_SEGMEXEC ++ if (pax_find_mirror_vma(vma)) ++ goto Einval; ++#endif ++ + /* We can't remap across vm area boundaries */ + if (old_len > vma->vm_end - addr) + goto Efault; +@@ -399,20 +410,25 @@ static unsigned long mremap_to(unsigned long addr, unsigned long old_len, + unsigned long ret = -EINVAL; + unsigned long charged = 0; + unsigned long map_flags; ++ unsigned long pax_task_size = TASK_SIZE; + + if (new_addr & ~PAGE_MASK) + goto out; + +- if (new_len > TASK_SIZE || new_addr > TASK_SIZE - new_len) ++#ifdef CONFIG_PAX_SEGMEXEC ++ if (mm->pax_flags & MF_PAX_SEGMEXEC) ++ pax_task_size = SEGMEXEC_TASK_SIZE; ++#endif ++ ++ pax_task_size -= PAGE_SIZE; ++ ++ if (new_len > TASK_SIZE || new_addr > pax_task_size - new_len) + goto out; + + /* Check if the location we're moving into overlaps the + * old location at all, and fail if it does. + */ +- if ((new_addr <= addr) && (new_addr+new_len) > addr) +- goto out; +- +- if ((addr <= new_addr) && (addr+old_len) > new_addr) ++ if (addr + old_len > new_addr && new_addr + new_len > addr) + goto out; + + ret = do_munmap(mm, new_addr, new_len); +@@ -481,6 +497,7 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len, + unsigned long ret = -EINVAL; + unsigned long charged = 0; + bool locked = false; ++ unsigned long pax_task_size = TASK_SIZE; + + if (flags & ~(MREMAP_FIXED | MREMAP_MAYMOVE)) + return ret; +@@ -502,6 +519,17 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len, + if (!new_len) + return ret; + ++#ifdef CONFIG_PAX_SEGMEXEC ++ if (mm->pax_flags & MF_PAX_SEGMEXEC) ++ pax_task_size = SEGMEXEC_TASK_SIZE; ++#endif ++ ++ pax_task_size -= PAGE_SIZE; ++ ++ if (new_len > pax_task_size || addr > pax_task_size-new_len || ++ old_len > pax_task_size || addr > pax_task_size-old_len) ++ return ret; ++ + down_write(¤t->mm->mmap_sem); + + if (flags & MREMAP_FIXED) { +@@ -552,6 +580,7 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len, + new_addr = addr; + } + ret = addr; ++ track_exec_limit(vma->vm_mm, vma->vm_start, addr + new_len, vma->vm_flags); + goto out; + } + } +@@ -575,7 +604,12 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len, + goto out; + } + ++ map_flags = vma->vm_flags; + ret = move_vma(vma, addr, old_len, new_len, new_addr, &locked); ++ if (!(ret & ~PAGE_MASK)) { ++ track_exec_limit(current->mm, addr, addr + old_len, 0UL); ++ track_exec_limit(current->mm, new_addr, new_addr + new_len, map_flags); ++ } + } + out: + if (ret & ~PAGE_MASK) +diff --git a/mm/nommu.c b/mm/nommu.c +index 8740213..f87e25b 100644 +--- a/mm/nommu.c ++++ b/mm/nommu.c +@@ -65,7 +65,6 @@ int sysctl_max_map_count = DEFAULT_MAX_MAP_COUNT; + int sysctl_nr_trim_pages = CONFIG_NOMMU_INITIAL_TRIM_EXCESS; + unsigned long sysctl_user_reserve_kbytes __read_mostly = 1UL << 17; /* 128MB */ + unsigned long sysctl_admin_reserve_kbytes __read_mostly = 1UL << 13; /* 8MB */ +-int heap_stack_gap = 0; + + atomic_long_t mmap_pages_allocated; + +@@ -845,15 +844,6 @@ struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr) + EXPORT_SYMBOL(find_vma); + + /* +- * find a VMA +- * - we don't extend stack VMAs under NOMMU conditions +- */ +-struct vm_area_struct *find_extend_vma(struct mm_struct *mm, unsigned long addr) +-{ +- return find_vma(mm, addr); +-} +- +-/* + * expand a stack to a given address + * - not supported under NOMMU conditions + */ +@@ -1564,6 +1554,7 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma, + + /* most fields are the same, copy all, and then fixup */ + *new = *vma; ++ INIT_LIST_HEAD(&new->anon_vma_chain); + *region = *vma->vm_region; + new->vm_region = region; + +@@ -1993,8 +1984,8 @@ int generic_file_remap_pages(struct vm_area_struct *vma, unsigned long addr, + } + EXPORT_SYMBOL(generic_file_remap_pages); + +-static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm, +- unsigned long addr, void *buf, int len, int write) ++static ssize_t __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm, ++ unsigned long addr, void *buf, size_t len, int write) + { + struct vm_area_struct *vma; + +@@ -2035,8 +2026,8 @@ static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm, + * + * The caller must hold a reference on @mm. + */ +-int access_remote_vm(struct mm_struct *mm, unsigned long addr, +- void *buf, int len, int write) ++ssize_t access_remote_vm(struct mm_struct *mm, unsigned long addr, ++ void *buf, size_t len, int write) + { + return __access_remote_vm(NULL, mm, addr, buf, len, write); + } +@@ -2045,7 +2036,7 @@ int access_remote_vm(struct mm_struct *mm, unsigned long addr, + * Access another process' address space. + * - source/target buffer must be kernel space + */ +-int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write) ++ssize_t access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, size_t len, int write) + { + struct mm_struct *mm; + +diff --git a/mm/page-writeback.c b/mm/page-writeback.c +index 9f45f87..749bfd8 100644 +--- a/mm/page-writeback.c ++++ b/mm/page-writeback.c +@@ -685,7 +685,7 @@ static long long pos_ratio_polynom(unsigned long setpoint, + * card's bdi_dirty may rush to many times higher than bdi_setpoint. + * - the bdi dirty thresh drops quickly due to change of JBOD workload + */ +-static unsigned long bdi_position_ratio(struct backing_dev_info *bdi, ++static unsigned long __intentional_overflow(-1) bdi_position_ratio(struct backing_dev_info *bdi, + unsigned long thresh, + unsigned long bg_thresh, + unsigned long dirty, +diff --git a/mm/page_alloc.c b/mm/page_alloc.c +index 62e400d..2072e4e 100644 +--- a/mm/page_alloc.c ++++ b/mm/page_alloc.c +@@ -61,6 +61,7 @@ + #include <linux/page-debug-flags.h> + #include <linux/hugetlb.h> + #include <linux/sched/rt.h> ++#include <linux/random.h> + + #include <asm/sections.h> + #include <asm/tlbflush.h> +@@ -355,7 +356,7 @@ out: + * This usage means that zero-order pages may not be compound. + */ + +-static void free_compound_page(struct page *page) ++void free_compound_page(struct page *page) + { + __free_pages_ok(page, compound_order(page)); + } +@@ -729,6 +730,10 @@ static bool free_pages_prepare(struct page *page, unsigned int order) + int i; + int bad = 0; + ++#ifdef CONFIG_PAX_MEMORY_SANITIZE ++ unsigned long index = 1UL << order; ++#endif ++ + trace_mm_page_free(page, order); + kmemcheck_free_shadow(page, order); + +@@ -745,6 +750,12 @@ static bool free_pages_prepare(struct page *page, unsigned int order) + debug_check_no_obj_freed(page_address(page), + PAGE_SIZE << order); + } ++ ++#ifdef CONFIG_PAX_MEMORY_SANITIZE ++ for (; index; --index) ++ sanitize_highpage(page + index - 1); ++#endif ++ + arch_free_page(page, order); + kernel_map_pages(page, 1 << order, 0); + +@@ -767,6 +778,20 @@ static void __free_pages_ok(struct page *page, unsigned int order) + local_irq_restore(flags); + } + ++#ifdef CONFIG_PAX_LATENT_ENTROPY ++bool __meminitdata extra_latent_entropy; ++ ++static int __init setup_pax_extra_latent_entropy(char *str) ++{ ++ extra_latent_entropy = true; ++ return 0; ++} ++early_param("pax_extra_latent_entropy", setup_pax_extra_latent_entropy); ++ ++volatile u64 latent_entropy __latent_entropy; ++EXPORT_SYMBOL(latent_entropy); ++#endif ++ + void __init __free_pages_bootmem(struct page *page, unsigned int order) + { + unsigned int nr_pages = 1 << order; +@@ -782,6 +807,19 @@ void __init __free_pages_bootmem(struct page *page, unsigned int order) + __ClearPageReserved(p); + set_page_count(p, 0); + ++#ifdef CONFIG_PAX_LATENT_ENTROPY ++ if (extra_latent_entropy && !PageHighMem(page) && page_to_pfn(page) < 0x100000) { ++ u64 hash = 0; ++ size_t index, end = PAGE_SIZE * nr_pages / sizeof hash; ++ const u64 *data = lowmem_page_address(page); ++ ++ for (index = 0; index < end; index++) ++ hash ^= hash + data[index]; ++ latent_entropy ^= hash; ++ add_device_randomness((const void *)&latent_entropy, sizeof(latent_entropy)); ++ } ++#endif ++ + page_zone(page)->managed_pages += nr_pages; + set_page_refcounted(page); + __free_pages(page, order); +@@ -910,8 +948,10 @@ static int prep_new_page(struct page *page, int order, gfp_t gfp_flags) + arch_alloc_page(page, order); + kernel_map_pages(page, 1 << order, 1); + ++#ifndef CONFIG_PAX_MEMORY_SANITIZE + if (gfp_flags & __GFP_ZERO) + prep_zero_page(page, order, gfp_flags); ++#endif + + if (order && (gfp_flags & __GFP_COMP)) + prep_compound_page(page, order); +@@ -2414,7 +2454,7 @@ static void reset_alloc_batches(struct zonelist *zonelist, + continue; + mod_zone_page_state(zone, NR_ALLOC_BATCH, + high_wmark_pages(zone) - low_wmark_pages(zone) - +- atomic_long_read(&zone->vm_stat[NR_ALLOC_BATCH])); ++ atomic_long_read_unchecked(&zone->vm_stat[NR_ALLOC_BATCH])); + } + } + +@@ -6605,4 +6645,4 @@ void dump_page(struct page *page, char *reason) + { + dump_page_badflags(page, reason, 0); + } +-EXPORT_SYMBOL_GPL(dump_page); ++EXPORT_SYMBOL(dump_page); +diff --git a/mm/page_io.c b/mm/page_io.c +index 7c59ef6..1358905 100644 +--- a/mm/page_io.c ++++ b/mm/page_io.c +@@ -260,7 +260,7 @@ int __swap_writepage(struct page *page, struct writeback_control *wbc, + struct file *swap_file = sis->swap_file; + struct address_space *mapping = swap_file->f_mapping; + struct iovec iov = { +- .iov_base = kmap(page), ++ .iov_base = (void __force_user *)kmap(page), + .iov_len = PAGE_SIZE, + }; + +diff --git a/mm/percpu.c b/mm/percpu.c +index a2a54a8..43ecb68 100644 +--- a/mm/percpu.c ++++ b/mm/percpu.c +@@ -122,7 +122,7 @@ static unsigned int pcpu_low_unit_cpu __read_mostly; + static unsigned int pcpu_high_unit_cpu __read_mostly; + + /* the address of the first chunk which starts with the kernel static area */ +-void *pcpu_base_addr __read_mostly; ++void *pcpu_base_addr __read_only; + EXPORT_SYMBOL_GPL(pcpu_base_addr); + + static const int *pcpu_unit_map __read_mostly; /* cpu -> unit */ +diff --git a/mm/pgtable-generic.c b/mm/pgtable-generic.c +index a8b9199..dfb79e0 100644 +--- a/mm/pgtable-generic.c ++++ b/mm/pgtable-generic.c +@@ -195,7 +195,7 @@ void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address, + pmd_t entry = *pmdp; + if (pmd_numa(entry)) + entry = pmd_mknonnuma(entry); +- set_pmd_at(vma->vm_mm, address, pmdp, pmd_mknotpresent(*pmdp)); ++ set_pmd_at(vma->vm_mm, address, pmdp, pmd_mknotpresent(entry)); + flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE); + } + #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ +diff --git a/mm/process_vm_access.c b/mm/process_vm_access.c +index fd26d04..0cea1b0 100644 +--- a/mm/process_vm_access.c ++++ b/mm/process_vm_access.c +@@ -13,6 +13,7 @@ + #include <linux/uio.h> + #include <linux/sched.h> + #include <linux/highmem.h> ++#include <linux/security.h> + #include <linux/ptrace.h> + #include <linux/slab.h> + #include <linux/syscalls.h> +@@ -258,19 +259,19 @@ static ssize_t process_vm_rw_core(pid_t pid, const struct iovec *lvec, + size_t iov_l_curr_offset = 0; + ssize_t iov_len; + ++ return -ENOSYS; // PaX: until properly audited ++ + /* + * Work out how many pages of struct pages we're going to need + * when eventually calling get_user_pages + */ + for (i = 0; i < riovcnt; i++) { + iov_len = rvec[i].iov_len; +- if (iov_len > 0) { +- nr_pages_iov = ((unsigned long)rvec[i].iov_base +- + iov_len) +- / PAGE_SIZE - (unsigned long)rvec[i].iov_base +- / PAGE_SIZE + 1; +- nr_pages = max(nr_pages, nr_pages_iov); +- } ++ if (iov_len <= 0) ++ continue; ++ nr_pages_iov = ((unsigned long)rvec[i].iov_base + iov_len) / PAGE_SIZE - ++ (unsigned long)rvec[i].iov_base / PAGE_SIZE + 1; ++ nr_pages = max(nr_pages, nr_pages_iov); + } + + if (nr_pages == 0) +@@ -298,6 +299,11 @@ static ssize_t process_vm_rw_core(pid_t pid, const struct iovec *lvec, + goto free_proc_pages; + } + ++ if (gr_handle_ptrace(task, vm_write ? PTRACE_POKETEXT : PTRACE_ATTACH)) { ++ rc = -EPERM; ++ goto put_task_struct; ++ } ++ + mm = mm_access(task, PTRACE_MODE_ATTACH); + if (!mm || IS_ERR(mm)) { + rc = IS_ERR(mm) ? PTR_ERR(mm) : -ESRCH; +diff --git a/mm/rmap.c b/mm/rmap.c +index cdbd312..2e1e0b9 100644 +--- a/mm/rmap.c ++++ b/mm/rmap.c +@@ -164,6 +164,10 @@ int anon_vma_prepare(struct vm_area_struct *vma) + struct anon_vma *anon_vma = vma->anon_vma; + struct anon_vma_chain *avc; + ++#ifdef CONFIG_PAX_SEGMEXEC ++ struct anon_vma_chain *avc_m = NULL; ++#endif ++ + might_sleep(); + if (unlikely(!anon_vma)) { + struct mm_struct *mm = vma->vm_mm; +@@ -173,6 +177,12 @@ int anon_vma_prepare(struct vm_area_struct *vma) + if (!avc) + goto out_enomem; + ++#ifdef CONFIG_PAX_SEGMEXEC ++ avc_m = anon_vma_chain_alloc(GFP_KERNEL); ++ if (!avc_m) ++ goto out_enomem_free_avc; ++#endif ++ + anon_vma = find_mergeable_anon_vma(vma); + allocated = NULL; + if (!anon_vma) { +@@ -186,6 +196,18 @@ int anon_vma_prepare(struct vm_area_struct *vma) + /* page_table_lock to protect against threads */ + spin_lock(&mm->page_table_lock); + if (likely(!vma->anon_vma)) { ++ ++#ifdef CONFIG_PAX_SEGMEXEC ++ struct vm_area_struct *vma_m = pax_find_mirror_vma(vma); ++ ++ if (vma_m) { ++ BUG_ON(vma_m->anon_vma); ++ vma_m->anon_vma = anon_vma; ++ anon_vma_chain_link(vma_m, avc_m, anon_vma); ++ avc_m = NULL; ++ } ++#endif ++ + vma->anon_vma = anon_vma; + anon_vma_chain_link(vma, avc, anon_vma); + allocated = NULL; +@@ -196,12 +218,24 @@ int anon_vma_prepare(struct vm_area_struct *vma) + + if (unlikely(allocated)) + put_anon_vma(allocated); ++ ++#ifdef CONFIG_PAX_SEGMEXEC ++ if (unlikely(avc_m)) ++ anon_vma_chain_free(avc_m); ++#endif ++ + if (unlikely(avc)) + anon_vma_chain_free(avc); + } + return 0; + + out_enomem_free_avc: ++ ++#ifdef CONFIG_PAX_SEGMEXEC ++ if (avc_m) ++ anon_vma_chain_free(avc_m); ++#endif ++ + anon_vma_chain_free(avc); + out_enomem: + return -ENOMEM; +@@ -237,7 +271,7 @@ static inline void unlock_anon_vma_root(struct anon_vma *root) + * Attach the anon_vmas from src to dst. + * Returns 0 on success, -ENOMEM on failure. + */ +-int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src) ++int anon_vma_clone(struct vm_area_struct *dst, const struct vm_area_struct *src) + { + struct anon_vma_chain *avc, *pavc; + struct anon_vma *root = NULL; +@@ -270,7 +304,7 @@ int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src) + * the corresponding VMA in the parent process is attached to. + * Returns 0 on success, non-zero on failure. + */ +-int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma) ++int anon_vma_fork(struct vm_area_struct *vma, const struct vm_area_struct *pvma) + { + struct anon_vma_chain *avc; + struct anon_vma *anon_vma; +@@ -374,8 +408,10 @@ static void anon_vma_ctor(void *data) + void __init anon_vma_init(void) + { + anon_vma_cachep = kmem_cache_create("anon_vma", sizeof(struct anon_vma), +- 0, SLAB_DESTROY_BY_RCU|SLAB_PANIC, anon_vma_ctor); +- anon_vma_chain_cachep = KMEM_CACHE(anon_vma_chain, SLAB_PANIC); ++ 0, SLAB_DESTROY_BY_RCU|SLAB_PANIC|SLAB_NO_SANITIZE, ++ anon_vma_ctor); ++ anon_vma_chain_cachep = KMEM_CACHE(anon_vma_chain, ++ SLAB_PANIC|SLAB_NO_SANITIZE); + } + + /* +diff --git a/mm/shmem.c b/mm/shmem.c +index ff85863..6aa94ab 100644 +--- a/mm/shmem.c ++++ b/mm/shmem.c +@@ -33,7 +33,7 @@ + #include <linux/swap.h> + #include <linux/aio.h> + +-static struct vfsmount *shm_mnt; ++struct vfsmount *shm_mnt; + + #ifdef CONFIG_SHMEM + /* +@@ -77,7 +77,7 @@ static struct vfsmount *shm_mnt; + #define BOGO_DIRENT_SIZE 20 + + /* Symlink up to this size is kmalloc'ed instead of using a swappable page */ +-#define SHORT_SYMLINK_LEN 128 ++#define SHORT_SYMLINK_LEN 64 + + /* + * shmem_fallocate communicates with shmem_fault or shmem_writepage via +@@ -2298,6 +2298,11 @@ static const struct xattr_handler *shmem_xattr_handlers[] = { + static int shmem_xattr_validate(const char *name) + { + struct { const char *prefix; size_t len; } arr[] = { ++ ++#ifdef CONFIG_PAX_XATTR_PAX_FLAGS ++ { XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN}, ++#endif ++ + { XATTR_SECURITY_PREFIX, XATTR_SECURITY_PREFIX_LEN }, + { XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN } + }; +@@ -2353,6 +2358,15 @@ static int shmem_setxattr(struct dentry *dentry, const char *name, + if (err) + return err; + ++#ifdef CONFIG_PAX_XATTR_PAX_FLAGS ++ if (!strncmp(name, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN)) { ++ if (strcmp(name, XATTR_NAME_PAX_FLAGS)) ++ return -EOPNOTSUPP; ++ if (size > 8) ++ return -EINVAL; ++ } ++#endif ++ + return simple_xattr_set(&info->xattrs, name, value, size, flags); + } + +@@ -2665,8 +2679,7 @@ int shmem_fill_super(struct super_block *sb, void *data, int silent) + int err = -ENOMEM; + + /* Round up to L1_CACHE_BYTES to resist false sharing */ +- sbinfo = kzalloc(max((int)sizeof(struct shmem_sb_info), +- L1_CACHE_BYTES), GFP_KERNEL); ++ sbinfo = kzalloc(max(sizeof(struct shmem_sb_info), L1_CACHE_BYTES), GFP_KERNEL); + if (!sbinfo) + return -ENOMEM; + +diff --git a/mm/slab.c b/mm/slab.c +index 6dd8d5f..2482a6d 100644 +--- a/mm/slab.c ++++ b/mm/slab.c +@@ -300,10 +300,12 @@ static void kmem_cache_node_init(struct kmem_cache_node *parent) + if ((x)->max_freeable < i) \ + (x)->max_freeable = i; \ + } while (0) +-#define STATS_INC_ALLOCHIT(x) atomic_inc(&(x)->allochit) +-#define STATS_INC_ALLOCMISS(x) atomic_inc(&(x)->allocmiss) +-#define STATS_INC_FREEHIT(x) atomic_inc(&(x)->freehit) +-#define STATS_INC_FREEMISS(x) atomic_inc(&(x)->freemiss) ++#define STATS_INC_ALLOCHIT(x) atomic_inc_unchecked(&(x)->allochit) ++#define STATS_INC_ALLOCMISS(x) atomic_inc_unchecked(&(x)->allocmiss) ++#define STATS_INC_FREEHIT(x) atomic_inc_unchecked(&(x)->freehit) ++#define STATS_INC_FREEMISS(x) atomic_inc_unchecked(&(x)->freemiss) ++#define STATS_INC_SANITIZED(x) atomic_inc_unchecked(&(x)->sanitized) ++#define STATS_INC_NOT_SANITIZED(x) atomic_inc_unchecked(&(x)->not_sanitized) + #else + #define STATS_INC_ACTIVE(x) do { } while (0) + #define STATS_DEC_ACTIVE(x) do { } while (0) +@@ -320,6 +322,8 @@ static void kmem_cache_node_init(struct kmem_cache_node *parent) + #define STATS_INC_ALLOCMISS(x) do { } while (0) + #define STATS_INC_FREEHIT(x) do { } while (0) + #define STATS_INC_FREEMISS(x) do { } while (0) ++#define STATS_INC_SANITIZED(x) do { } while (0) ++#define STATS_INC_NOT_SANITIZED(x) do { } while (0) + #endif + + #if DEBUG +@@ -436,7 +440,7 @@ static inline void *index_to_obj(struct kmem_cache *cache, struct page *page, + * reciprocal_divide(offset, cache->reciprocal_buffer_size) + */ + static inline unsigned int obj_to_index(const struct kmem_cache *cache, +- const struct page *page, void *obj) ++ const struct page *page, const void *obj) + { + u32 offset = (obj - page->s_mem); + return reciprocal_divide(offset, cache->reciprocal_buffer_size); +@@ -1536,12 +1540,12 @@ void __init kmem_cache_init(void) + */ + + kmalloc_caches[INDEX_AC] = create_kmalloc_cache("kmalloc-ac", +- kmalloc_size(INDEX_AC), ARCH_KMALLOC_FLAGS); ++ kmalloc_size(INDEX_AC), SLAB_USERCOPY | ARCH_KMALLOC_FLAGS); + + if (INDEX_AC != INDEX_NODE) + kmalloc_caches[INDEX_NODE] = + create_kmalloc_cache("kmalloc-node", +- kmalloc_size(INDEX_NODE), ARCH_KMALLOC_FLAGS); ++ kmalloc_size(INDEX_NODE), SLAB_USERCOPY | ARCH_KMALLOC_FLAGS); + + slab_early_init = 0; + +@@ -3484,6 +3488,21 @@ static inline void __cache_free(struct kmem_cache *cachep, void *objp, + struct array_cache *ac = cpu_cache_get(cachep); + + check_irq_off(); ++ ++#ifdef CONFIG_PAX_MEMORY_SANITIZE ++ if (pax_sanitize_slab) { ++ if (!(cachep->flags & (SLAB_POISON | SLAB_NO_SANITIZE))) { ++ memset(objp, PAX_MEMORY_SANITIZE_VALUE, cachep->object_size); ++ ++ if (cachep->ctor) ++ cachep->ctor(objp); ++ ++ STATS_INC_SANITIZED(cachep); ++ } else ++ STATS_INC_NOT_SANITIZED(cachep); ++ } ++#endif ++ + kmemleak_free_recursive(objp, cachep->flags); + objp = cache_free_debugcheck(cachep, objp, caller); + +@@ -3712,6 +3731,7 @@ void kfree(const void *objp) + + if (unlikely(ZERO_OR_NULL_PTR(objp))) + return; ++ VM_BUG_ON(!virt_addr_valid(objp)); + local_irq_save(flags); + kfree_debugcheck(objp); + c = virt_to_cache(objp); +@@ -4153,14 +4173,22 @@ void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *cachep) + } + /* cpu stats */ + { +- unsigned long allochit = atomic_read(&cachep->allochit); +- unsigned long allocmiss = atomic_read(&cachep->allocmiss); +- unsigned long freehit = atomic_read(&cachep->freehit); +- unsigned long freemiss = atomic_read(&cachep->freemiss); ++ unsigned long allochit = atomic_read_unchecked(&cachep->allochit); ++ unsigned long allocmiss = atomic_read_unchecked(&cachep->allocmiss); ++ unsigned long freehit = atomic_read_unchecked(&cachep->freehit); ++ unsigned long freemiss = atomic_read_unchecked(&cachep->freemiss); + + seq_printf(m, " : cpustat %6lu %6lu %6lu %6lu", + allochit, allocmiss, freehit, freemiss); + } ++#ifdef CONFIG_PAX_MEMORY_SANITIZE ++ { ++ unsigned long sanitized = atomic_read_unchecked(&cachep->sanitized); ++ unsigned long not_sanitized = atomic_read_unchecked(&cachep->not_sanitized); ++ ++ seq_printf(m, " : pax %6lu %6lu", sanitized, not_sanitized); ++ } ++#endif + #endif + } + +@@ -4381,13 +4409,69 @@ static const struct file_operations proc_slabstats_operations = { + static int __init slab_proc_init(void) + { + #ifdef CONFIG_DEBUG_SLAB_LEAK +- proc_create("slab_allocators", 0, NULL, &proc_slabstats_operations); ++ proc_create("slab_allocators", S_IRUSR, NULL, &proc_slabstats_operations); + #endif + return 0; + } + module_init(slab_proc_init); + #endif + ++bool is_usercopy_object(const void *ptr) ++{ ++ struct page *page; ++ struct kmem_cache *cachep; ++ ++ if (ZERO_OR_NULL_PTR(ptr)) ++ return false; ++ ++ if (!slab_is_available()) ++ return false; ++ ++ if (!virt_addr_valid(ptr)) ++ return false; ++ ++ page = virt_to_head_page(ptr); ++ ++ if (!PageSlab(page)) ++ return false; ++ ++ cachep = page->slab_cache; ++ return cachep->flags & SLAB_USERCOPY; ++} ++ ++#ifdef CONFIG_PAX_USERCOPY ++const char *check_heap_object(const void *ptr, unsigned long n) ++{ ++ struct page *page; ++ struct kmem_cache *cachep; ++ unsigned int objnr; ++ unsigned long offset; ++ ++ if (ZERO_OR_NULL_PTR(ptr)) ++ return "<null>"; ++ ++ if (!virt_addr_valid(ptr)) ++ return NULL; ++ ++ page = virt_to_head_page(ptr); ++ ++ if (!PageSlab(page)) ++ return NULL; ++ ++ cachep = page->slab_cache; ++ if (!(cachep->flags & SLAB_USERCOPY)) ++ return cachep->name; ++ ++ objnr = obj_to_index(cachep, page, ptr); ++ BUG_ON(objnr >= cachep->num); ++ offset = ptr - index_to_obj(cachep, page, objnr) - obj_offset(cachep); ++ if (offset <= cachep->object_size && n <= cachep->object_size - offset) ++ return NULL; ++ ++ return cachep->name; ++} ++#endif ++ + /** + * ksize - get the actual amount of memory allocated for a given object + * @objp: Pointer to the object +diff --git a/mm/slab.h b/mm/slab.h +index 8184a7c..ab27737 100644 +--- a/mm/slab.h ++++ b/mm/slab.h +@@ -32,6 +32,15 @@ extern struct list_head slab_caches; + /* The slab cache that manages slab cache information */ + extern struct kmem_cache *kmem_cache; + ++#ifdef CONFIG_PAX_MEMORY_SANITIZE ++#ifdef CONFIG_X86_64 ++#define PAX_MEMORY_SANITIZE_VALUE '\xfe' ++#else ++#define PAX_MEMORY_SANITIZE_VALUE '\xff' ++#endif ++extern bool pax_sanitize_slab; ++#endif ++ + unsigned long calculate_alignment(unsigned long flags, + unsigned long align, unsigned long size); + +@@ -67,7 +76,8 @@ __kmem_cache_alias(struct mem_cgroup *memcg, const char *name, size_t size, + + /* Legal flag mask for kmem_cache_create(), for various configurations */ + #define SLAB_CORE_FLAGS (SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA | SLAB_PANIC | \ +- SLAB_DESTROY_BY_RCU | SLAB_DEBUG_OBJECTS ) ++ SLAB_DESTROY_BY_RCU | SLAB_DEBUG_OBJECTS | \ ++ SLAB_USERCOPY | SLAB_NO_SANITIZE) + + #if defined(CONFIG_DEBUG_SLAB) + #define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER) +@@ -257,6 +267,9 @@ static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x) + return s; + + page = virt_to_head_page(x); ++ ++ BUG_ON(!PageSlab(page)); ++ + cachep = page->slab_cache; + if (slab_equal_or_root(cachep, s)) + return cachep; +diff --git a/mm/slab_common.c b/mm/slab_common.c +index f149e67..b366f92 100644 +--- a/mm/slab_common.c ++++ b/mm/slab_common.c +@@ -23,11 +23,22 @@ + + #include "slab.h" + +-enum slab_state slab_state; ++enum slab_state slab_state __read_only; + LIST_HEAD(slab_caches); + DEFINE_MUTEX(slab_mutex); + struct kmem_cache *kmem_cache; + ++#ifdef CONFIG_PAX_MEMORY_SANITIZE ++bool pax_sanitize_slab __read_only = true; ++static int __init pax_sanitize_slab_setup(char *str) ++{ ++ pax_sanitize_slab = !!simple_strtol(str, NULL, 0); ++ printk("%sabled PaX slab sanitization\n", pax_sanitize_slab ? "En" : "Dis"); ++ return 1; ++} ++__setup("pax_sanitize_slab=", pax_sanitize_slab_setup); ++#endif ++ + #ifdef CONFIG_DEBUG_VM + static int kmem_cache_sanity_check(struct mem_cgroup *memcg, const char *name, + size_t size) +@@ -225,7 +236,7 @@ kmem_cache_create_memcg(struct mem_cgroup *memcg, const char *name, size_t size, + if (err) + goto out_free_cache; + +- s->refcount = 1; ++ atomic_set(&s->refcount, 1); + list_add(&s->list, &slab_caches); + memcg_register_cache(s); + +@@ -278,8 +289,7 @@ void kmem_cache_destroy(struct kmem_cache *s) + + get_online_cpus(); + mutex_lock(&slab_mutex); +- s->refcount--; +- if (!s->refcount) { ++ if (atomic_dec_and_test(&s->refcount)) { + list_del(&s->list); + + if (!__kmem_cache_shutdown(s)) { +@@ -326,7 +336,7 @@ void __init create_boot_cache(struct kmem_cache *s, const char *name, size_t siz + panic("Creation of kmalloc slab %s size=%zu failed. Reason %d\n", + name, size, err); + +- s->refcount = -1; /* Exempt from merging for now */ ++ atomic_set(&s->refcount, -1); /* Exempt from merging for now */ + } + + struct kmem_cache *__init create_kmalloc_cache(const char *name, size_t size, +@@ -339,7 +349,7 @@ struct kmem_cache *__init create_kmalloc_cache(const char *name, size_t size, + + create_boot_cache(s, name, size, flags); + list_add(&s->list, &slab_caches); +- s->refcount = 1; ++ atomic_set(&s->refcount, 1); + return s; + } + +@@ -351,6 +361,11 @@ struct kmem_cache *kmalloc_dma_caches[KMALLOC_SHIFT_HIGH + 1]; + EXPORT_SYMBOL(kmalloc_dma_caches); + #endif + ++#ifdef CONFIG_PAX_USERCOPY_SLABS ++struct kmem_cache *kmalloc_usercopy_caches[KMALLOC_SHIFT_HIGH + 1]; ++EXPORT_SYMBOL(kmalloc_usercopy_caches); ++#endif ++ + /* + * Conversion table for small slabs sizes / 8 to the index in the + * kmalloc array. This is necessary for slabs < 192 since we have non power +@@ -415,6 +430,13 @@ struct kmem_cache *kmalloc_slab(size_t size, gfp_t flags) + return kmalloc_dma_caches[index]; + + #endif ++ ++#ifdef CONFIG_PAX_USERCOPY_SLABS ++ if (unlikely((flags & GFP_USERCOPY))) ++ return kmalloc_usercopy_caches[index]; ++ ++#endif ++ + return kmalloc_caches[index]; + } + +@@ -471,7 +493,7 @@ void __init create_kmalloc_caches(unsigned long flags) + for (i = KMALLOC_SHIFT_LOW; i <= KMALLOC_SHIFT_HIGH; i++) { + if (!kmalloc_caches[i]) { + kmalloc_caches[i] = create_kmalloc_cache(NULL, +- 1 << i, flags); ++ 1 << i, SLAB_USERCOPY | flags); + } + + /* +@@ -480,10 +502,10 @@ void __init create_kmalloc_caches(unsigned long flags) + * earlier power of two caches + */ + if (KMALLOC_MIN_SIZE <= 32 && !kmalloc_caches[1] && i == 6) +- kmalloc_caches[1] = create_kmalloc_cache(NULL, 96, flags); ++ kmalloc_caches[1] = create_kmalloc_cache(NULL, 96, SLAB_USERCOPY | flags); + + if (KMALLOC_MIN_SIZE <= 64 && !kmalloc_caches[2] && i == 7) +- kmalloc_caches[2] = create_kmalloc_cache(NULL, 192, flags); ++ kmalloc_caches[2] = create_kmalloc_cache(NULL, 192, SLAB_USERCOPY | flags); + } + + /* Kmalloc array is now usable */ +@@ -516,6 +538,23 @@ void __init create_kmalloc_caches(unsigned long flags) + } + } + #endif ++ ++#ifdef CONFIG_PAX_USERCOPY_SLABS ++ for (i = 0; i <= KMALLOC_SHIFT_HIGH; i++) { ++ struct kmem_cache *s = kmalloc_caches[i]; ++ ++ if (s) { ++ int size = kmalloc_size(i); ++ char *n = kasprintf(GFP_NOWAIT, ++ "usercopy-kmalloc-%d", size); ++ ++ BUG_ON(!n); ++ kmalloc_usercopy_caches[i] = create_kmalloc_cache(n, ++ size, SLAB_USERCOPY | flags); ++ } ++ } ++#endif ++ + } + #endif /* !CONFIG_SLOB */ + +@@ -556,6 +595,9 @@ void print_slabinfo_header(struct seq_file *m) + seq_puts(m, " : globalstat <listallocs> <maxobjs> <grown> <reaped> " + "<error> <maxfreeable> <nodeallocs> <remotefrees> <alienoverflow>"); + seq_puts(m, " : cpustat <allochit> <allocmiss> <freehit> <freemiss>"); ++#ifdef CONFIG_PAX_MEMORY_SANITIZE ++ seq_puts(m, " : pax <sanitized> <not_sanitized>"); ++#endif + #endif + seq_putc(m, '\n'); + } +diff --git a/mm/slob.c b/mm/slob.c +index 4bf8809..98a6914 100644 +--- a/mm/slob.c ++++ b/mm/slob.c +@@ -157,7 +157,7 @@ static void set_slob(slob_t *s, slobidx_t size, slob_t *next) + /* + * Return the size of a slob block. + */ +-static slobidx_t slob_units(slob_t *s) ++static slobidx_t slob_units(const slob_t *s) + { + if (s->units > 0) + return s->units; +@@ -167,7 +167,7 @@ static slobidx_t slob_units(slob_t *s) + /* + * Return the next free slob block pointer after this one. + */ +-static slob_t *slob_next(slob_t *s) ++static slob_t *slob_next(const slob_t *s) + { + slob_t *base = (slob_t *)((unsigned long)s & PAGE_MASK); + slobidx_t next; +@@ -182,14 +182,14 @@ static slob_t *slob_next(slob_t *s) + /* + * Returns true if s is the last free block in its page. + */ +-static int slob_last(slob_t *s) ++static int slob_last(const slob_t *s) + { + return !((unsigned long)slob_next(s) & ~PAGE_MASK); + } + +-static void *slob_new_pages(gfp_t gfp, int order, int node) ++static struct page *slob_new_pages(gfp_t gfp, unsigned int order, int node) + { +- void *page; ++ struct page *page; + + #ifdef CONFIG_NUMA + if (node != NUMA_NO_NODE) +@@ -201,14 +201,18 @@ static void *slob_new_pages(gfp_t gfp, int order, int node) + if (!page) + return NULL; + +- return page_address(page); ++ __SetPageSlab(page); ++ return page; + } + +-static void slob_free_pages(void *b, int order) ++static void slob_free_pages(struct page *sp, int order) + { + if (current->reclaim_state) + current->reclaim_state->reclaimed_slab += 1 << order; +- free_pages((unsigned long)b, order); ++ __ClearPageSlab(sp); ++ page_mapcount_reset(sp); ++ sp->private = 0; ++ __free_pages(sp, order); + } + + /* +@@ -313,15 +317,15 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node) + + /* Not enough space: must allocate a new page */ + if (!b) { +- b = slob_new_pages(gfp & ~__GFP_ZERO, 0, node); +- if (!b) ++ sp = slob_new_pages(gfp & ~__GFP_ZERO, 0, node); ++ if (!sp) + return NULL; +- sp = virt_to_page(b); +- __SetPageSlab(sp); ++ b = page_address(sp); + + spin_lock_irqsave(&slob_lock, flags); + sp->units = SLOB_UNITS(PAGE_SIZE); + sp->freelist = b; ++ sp->private = 0; + INIT_LIST_HEAD(&sp->list); + set_slob(b, SLOB_UNITS(PAGE_SIZE), b + SLOB_UNITS(PAGE_SIZE)); + set_slob_page_free(sp, slob_list); +@@ -359,12 +363,15 @@ static void slob_free(void *block, int size) + if (slob_page_free(sp)) + clear_slob_page_free(sp); + spin_unlock_irqrestore(&slob_lock, flags); +- __ClearPageSlab(sp); +- page_mapcount_reset(sp); +- slob_free_pages(b, 0); ++ slob_free_pages(sp, 0); + return; + } + ++#ifdef CONFIG_PAX_MEMORY_SANITIZE ++ if (pax_sanitize_slab) ++ memset(block, PAX_MEMORY_SANITIZE_VALUE, size); ++#endif ++ + if (!slob_page_free(sp)) { + /* This slob page is about to become partially free. Easy! */ + sp->units = units; +@@ -424,11 +431,10 @@ out: + */ + + static __always_inline void * +-__do_kmalloc_node(size_t size, gfp_t gfp, int node, unsigned long caller) ++__do_kmalloc_node_align(size_t size, gfp_t gfp, int node, unsigned long caller, int align) + { +- unsigned int *m; +- int align = max_t(size_t, ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN); +- void *ret; ++ slob_t *m; ++ void *ret = NULL; + + gfp &= gfp_allowed_mask; + +@@ -442,23 +448,41 @@ __do_kmalloc_node(size_t size, gfp_t gfp, int node, unsigned long caller) + + if (!m) + return NULL; +- *m = size; ++ BUILD_BUG_ON(ARCH_KMALLOC_MINALIGN < 2 * SLOB_UNIT); ++ BUILD_BUG_ON(ARCH_SLAB_MINALIGN < 2 * SLOB_UNIT); ++ m[0].units = size; ++ m[1].units = align; + ret = (void *)m + align; + + trace_kmalloc_node(caller, ret, + size, size + align, gfp, node); + } else { + unsigned int order = get_order(size); ++ struct page *page; + + if (likely(order)) + gfp |= __GFP_COMP; +- ret = slob_new_pages(gfp, order, node); ++ page = slob_new_pages(gfp, order, node); ++ if (page) { ++ ret = page_address(page); ++ page->private = size; ++ } + + trace_kmalloc_node(caller, ret, + size, PAGE_SIZE << order, gfp, node); + } + +- kmemleak_alloc(ret, size, 1, gfp); ++ return ret; ++} ++ ++static __always_inline void * ++__do_kmalloc_node(size_t size, gfp_t gfp, int node, unsigned long caller) ++{ ++ int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN); ++ void *ret = __do_kmalloc_node_align(size, gfp, node, caller, align); ++ ++ if (!ZERO_OR_NULL_PTR(ret)) ++ kmemleak_alloc(ret, size, 1, gfp); + return ret; + } + +@@ -493,34 +517,112 @@ void kfree(const void *block) + return; + kmemleak_free(block); + ++ VM_BUG_ON(!virt_addr_valid(block)); + sp = virt_to_page(block); +- if (PageSlab(sp)) { ++ VM_BUG_ON(!PageSlab(sp)); ++ if (!sp->private) { + int align = max_t(size_t, ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN); +- unsigned int *m = (unsigned int *)(block - align); +- slob_free(m, *m + align); +- } else ++ slob_t *m = (slob_t *)(block - align); ++ slob_free(m, m[0].units + align); ++ } else { ++ __ClearPageSlab(sp); ++ page_mapcount_reset(sp); ++ sp->private = 0; + __free_pages(sp, compound_order(sp)); ++ } + } + EXPORT_SYMBOL(kfree); + ++bool is_usercopy_object(const void *ptr) ++{ ++ if (!slab_is_available()) ++ return false; ++ ++ // PAX: TODO ++ ++ return false; ++} ++ ++#ifdef CONFIG_PAX_USERCOPY ++const char *check_heap_object(const void *ptr, unsigned long n) ++{ ++ struct page *page; ++ const slob_t *free; ++ const void *base; ++ unsigned long flags; ++ ++ if (ZERO_OR_NULL_PTR(ptr)) ++ return "<null>"; ++ ++ if (!virt_addr_valid(ptr)) ++ return NULL; ++ ++ page = virt_to_head_page(ptr); ++ if (!PageSlab(page)) ++ return NULL; ++ ++ if (page->private) { ++ base = page; ++ if (base <= ptr && n <= page->private - (ptr - base)) ++ return NULL; ++ return "<slob>"; ++ } ++ ++ /* some tricky double walking to find the chunk */ ++ spin_lock_irqsave(&slob_lock, flags); ++ base = (void *)((unsigned long)ptr & PAGE_MASK); ++ free = page->freelist; ++ ++ while (!slob_last(free) && (void *)free <= ptr) { ++ base = free + slob_units(free); ++ free = slob_next(free); ++ } ++ ++ while (base < (void *)free) { ++ slobidx_t m = ((slob_t *)base)[0].units, align = ((slob_t *)base)[1].units; ++ int size = SLOB_UNIT * SLOB_UNITS(m + align); ++ int offset; ++ ++ if (ptr < base + align) ++ break; ++ ++ offset = ptr - base - align; ++ if (offset >= m) { ++ base += size; ++ continue; ++ } ++ ++ if (n > m - offset) ++ break; ++ ++ spin_unlock_irqrestore(&slob_lock, flags); ++ return NULL; ++ } ++ ++ spin_unlock_irqrestore(&slob_lock, flags); ++ return "<slob>"; ++} ++#endif ++ + /* can't use ksize for kmem_cache_alloc memory, only kmalloc */ + size_t ksize(const void *block) + { + struct page *sp; + int align; +- unsigned int *m; ++ slob_t *m; + + BUG_ON(!block); + if (unlikely(block == ZERO_SIZE_PTR)) + return 0; + + sp = virt_to_page(block); +- if (unlikely(!PageSlab(sp))) +- return PAGE_SIZE << compound_order(sp); ++ VM_BUG_ON(!PageSlab(sp)); ++ if (sp->private) ++ return sp->private; + + align = max_t(size_t, ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN); +- m = (unsigned int *)(block - align); +- return SLOB_UNITS(*m) * SLOB_UNIT; ++ m = (slob_t *)(block - align); ++ return SLOB_UNITS(m[0].units) * SLOB_UNIT; + } + EXPORT_SYMBOL(ksize); + +@@ -536,23 +638,33 @@ int __kmem_cache_create(struct kmem_cache *c, unsigned long flags) + + void *slob_alloc_node(struct kmem_cache *c, gfp_t flags, int node) + { +- void *b; ++ void *b = NULL; + + flags &= gfp_allowed_mask; + + lockdep_trace_alloc(flags); + ++#ifdef CONFIG_PAX_USERCOPY_SLABS ++ b = __do_kmalloc_node_align(c->size, flags, node, _RET_IP_, c->align); ++#else + if (c->size < PAGE_SIZE) { + b = slob_alloc(c->size, flags, c->align, node); + trace_kmem_cache_alloc_node(_RET_IP_, b, c->object_size, + SLOB_UNITS(c->size) * SLOB_UNIT, + flags, node); + } else { +- b = slob_new_pages(flags, get_order(c->size), node); ++ struct page *sp; ++ ++ sp = slob_new_pages(flags, get_order(c->size), node); ++ if (sp) { ++ b = page_address(sp); ++ sp->private = c->size; ++ } + trace_kmem_cache_alloc_node(_RET_IP_, b, c->object_size, + PAGE_SIZE << get_order(c->size), + flags, node); + } ++#endif + + if (b && c->ctor) + c->ctor(b); +@@ -584,10 +696,14 @@ EXPORT_SYMBOL(kmem_cache_alloc_node); + + static void __kmem_cache_free(void *b, int size) + { +- if (size < PAGE_SIZE) ++ struct page *sp; ++ ++ sp = virt_to_page(b); ++ BUG_ON(!PageSlab(sp)); ++ if (!sp->private) + slob_free(b, size); + else +- slob_free_pages(b, get_order(size)); ++ slob_free_pages(sp, get_order(size)); + } + + static void kmem_rcu_free(struct rcu_head *head) +@@ -600,17 +716,31 @@ static void kmem_rcu_free(struct rcu_head *head) + + void kmem_cache_free(struct kmem_cache *c, void *b) + { ++ int size = c->size; ++ ++#ifdef CONFIG_PAX_USERCOPY_SLABS ++ if (size + c->align < PAGE_SIZE) { ++ size += c->align; ++ b -= c->align; ++ } ++#endif ++ + kmemleak_free_recursive(b, c->flags); + if (unlikely(c->flags & SLAB_DESTROY_BY_RCU)) { + struct slob_rcu *slob_rcu; +- slob_rcu = b + (c->size - sizeof(struct slob_rcu)); +- slob_rcu->size = c->size; ++ slob_rcu = b + (size - sizeof(struct slob_rcu)); ++ slob_rcu->size = size; + call_rcu(&slob_rcu->head, kmem_rcu_free); + } else { +- __kmem_cache_free(b, c->size); ++ __kmem_cache_free(b, size); + } + ++#ifdef CONFIG_PAX_USERCOPY_SLABS ++ trace_kfree(_RET_IP_, b); ++#else + trace_kmem_cache_free(_RET_IP_, b); ++#endif ++ + } + EXPORT_SYMBOL(kmem_cache_free); + +diff --git a/mm/slub.c b/mm/slub.c +index 25f14ad..c904f6f 100644 +--- a/mm/slub.c ++++ b/mm/slub.c +@@ -207,7 +207,7 @@ struct track { + + enum track_item { TRACK_ALLOC, TRACK_FREE }; + +-#ifdef CONFIG_SYSFS ++#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD) + static int sysfs_slab_add(struct kmem_cache *); + static int sysfs_slab_alias(struct kmem_cache *, const char *); + static void sysfs_slab_remove(struct kmem_cache *); +@@ -545,7 +545,7 @@ static void print_track(const char *s, struct track *t) + if (!t->addr) + return; + +- printk(KERN_ERR "INFO: %s in %pS age=%lu cpu=%u pid=%d\n", ++ printk(KERN_ERR "INFO: %s in %pA age=%lu cpu=%u pid=%d\n", + s, (void *)t->addr, jiffies - t->when, t->cpu, t->pid); + #ifdef CONFIG_STACKTRACE + { +@@ -2666,6 +2666,14 @@ static __always_inline void slab_free(struct kmem_cache *s, + + slab_free_hook(s, x); + ++#ifdef CONFIG_PAX_MEMORY_SANITIZE ++ if (pax_sanitize_slab && !(s->flags & SLAB_NO_SANITIZE)) { ++ memset(x, PAX_MEMORY_SANITIZE_VALUE, s->object_size); ++ if (s->ctor) ++ s->ctor(x); ++ } ++#endif ++ + redo: + /* + * Determine the currently cpus per cpu slab. +@@ -2733,7 +2741,7 @@ static int slub_min_objects; + * Merge control. If this is set then no merging of slab caches will occur. + * (Could be removed. This was introduced to pacify the merge skeptics.) + */ +-static int slub_nomerge; ++static int slub_nomerge = 1; + + /* + * Calculate the order of allocation given an slab object size. +@@ -3014,6 +3022,9 @@ static int calculate_sizes(struct kmem_cache *s, int forced_order) + s->inuse = size; + + if (((flags & (SLAB_DESTROY_BY_RCU | SLAB_POISON)) || ++#ifdef CONFIG_PAX_MEMORY_SANITIZE ++ (pax_sanitize_slab && !(flags & SLAB_NO_SANITIZE)) || ++#endif + s->ctor)) { + /* + * Relocate free pointer after the object if it is not +@@ -3359,6 +3370,59 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node) + EXPORT_SYMBOL(__kmalloc_node); + #endif + ++bool is_usercopy_object(const void *ptr) ++{ ++ struct page *page; ++ struct kmem_cache *s; ++ ++ if (ZERO_OR_NULL_PTR(ptr)) ++ return false; ++ ++ if (!slab_is_available()) ++ return false; ++ ++ if (!virt_addr_valid(ptr)) ++ return false; ++ ++ page = virt_to_head_page(ptr); ++ ++ if (!PageSlab(page)) ++ return false; ++ ++ s = page->slab_cache; ++ return s->flags & SLAB_USERCOPY; ++} ++ ++#ifdef CONFIG_PAX_USERCOPY ++const char *check_heap_object(const void *ptr, unsigned long n) ++{ ++ struct page *page; ++ struct kmem_cache *s; ++ unsigned long offset; ++ ++ if (ZERO_OR_NULL_PTR(ptr)) ++ return "<null>"; ++ ++ if (!virt_addr_valid(ptr)) ++ return NULL; ++ ++ page = virt_to_head_page(ptr); ++ ++ if (!PageSlab(page)) ++ return NULL; ++ ++ s = page->slab_cache; ++ if (!(s->flags & SLAB_USERCOPY)) ++ return s->name; ++ ++ offset = (ptr - page_address(page)) % s->size; ++ if (offset <= s->object_size && n <= s->object_size - offset) ++ return NULL; ++ ++ return s->name; ++} ++#endif ++ + size_t ksize(const void *object) + { + struct page *page; +@@ -3387,6 +3451,7 @@ void kfree(const void *x) + if (unlikely(ZERO_OR_NULL_PTR(x))) + return; + ++ VM_BUG_ON(!virt_addr_valid(x)); + page = virt_to_head_page(x); + if (unlikely(!PageSlab(page))) { + BUG_ON(!PageCompound(page)); +@@ -3692,7 +3757,7 @@ static int slab_unmergeable(struct kmem_cache *s) + /* + * We may have set a slab to be unmergeable during bootstrap. + */ +- if (s->refcount < 0) ++ if (atomic_read(&s->refcount) < 0) + return 1; + + return 0; +@@ -3750,7 +3815,7 @@ __kmem_cache_alias(struct mem_cgroup *memcg, const char *name, size_t size, + + s = find_mergeable(memcg, size, align, flags, name, ctor); + if (s) { +- s->refcount++; ++ atomic_inc(&s->refcount); + /* + * Adjust the object sizes so that we clear + * the complete object on kzalloc. +@@ -3759,7 +3824,7 @@ __kmem_cache_alias(struct mem_cgroup *memcg, const char *name, size_t size, + s->inuse = max_t(int, s->inuse, ALIGN(size, sizeof(void *))); + + if (sysfs_slab_alias(s, name)) { +- s->refcount--; ++ atomic_dec(&s->refcount); + s = NULL; + } + } +@@ -3879,7 +3944,7 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags, + } + #endif + +-#ifdef CONFIG_SYSFS ++#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD) + static int count_inuse(struct page *page) + { + return page->inuse; +@@ -4163,7 +4228,11 @@ static int list_locations(struct kmem_cache *s, char *buf, + len += sprintf(buf + len, "%7ld ", l->count); + + if (l->addr) ++#ifdef CONFIG_GRKERNSEC_HIDESYM ++ len += sprintf(buf + len, "%pS", NULL); ++#else + len += sprintf(buf + len, "%pS", (void *)l->addr); ++#endif + else + len += sprintf(buf + len, "<not-available>"); + +@@ -4268,12 +4337,12 @@ static void resiliency_test(void) + validate_slab_cache(kmalloc_caches[9]); + } + #else +-#ifdef CONFIG_SYSFS ++#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD) + static void resiliency_test(void) {}; + #endif + #endif + +-#ifdef CONFIG_SYSFS ++#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD) + enum slab_stat_type { + SL_ALL, /* All slabs */ + SL_PARTIAL, /* Only partially allocated slabs */ +@@ -4513,13 +4582,17 @@ static ssize_t ctor_show(struct kmem_cache *s, char *buf) + { + if (!s->ctor) + return 0; ++#ifdef CONFIG_GRKERNSEC_HIDESYM ++ return sprintf(buf, "%pS\n", NULL); ++#else + return sprintf(buf, "%pS\n", s->ctor); ++#endif + } + SLAB_ATTR_RO(ctor); + + static ssize_t aliases_show(struct kmem_cache *s, char *buf) + { +- return sprintf(buf, "%d\n", s->refcount - 1); ++ return sprintf(buf, "%d\n", atomic_read(&s->refcount) - 1); + } + SLAB_ATTR_RO(aliases); + +@@ -4607,6 +4680,14 @@ static ssize_t cache_dma_show(struct kmem_cache *s, char *buf) + SLAB_ATTR_RO(cache_dma); + #endif + ++#ifdef CONFIG_PAX_USERCOPY_SLABS ++static ssize_t usercopy_show(struct kmem_cache *s, char *buf) ++{ ++ return sprintf(buf, "%d\n", !!(s->flags & SLAB_USERCOPY)); ++} ++SLAB_ATTR_RO(usercopy); ++#endif ++ + static ssize_t destroy_by_rcu_show(struct kmem_cache *s, char *buf) + { + return sprintf(buf, "%d\n", !!(s->flags & SLAB_DESTROY_BY_RCU)); +@@ -4941,6 +5022,9 @@ static struct attribute *slab_attrs[] = { + #ifdef CONFIG_ZONE_DMA + &cache_dma_attr.attr, + #endif ++#ifdef CONFIG_PAX_USERCOPY_SLABS ++ &usercopy_attr.attr, ++#endif + #ifdef CONFIG_NUMA + &remote_node_defrag_ratio_attr.attr, + #endif +@@ -5173,6 +5257,7 @@ static char *create_unique_id(struct kmem_cache *s) + return name; + } + ++#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD) + static int sysfs_slab_add(struct kmem_cache *s) + { + int err; +@@ -5230,6 +5315,7 @@ static void sysfs_slab_remove(struct kmem_cache *s) + kobject_del(&s->kobj); + kobject_put(&s->kobj); + } ++#endif + + /* + * Need to buffer aliases during bootup until sysfs becomes +@@ -5243,6 +5329,7 @@ struct saved_alias { + + static struct saved_alias *alias_list; + ++#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD) + static int sysfs_slab_alias(struct kmem_cache *s, const char *name) + { + struct saved_alias *al; +@@ -5265,6 +5352,7 @@ static int sysfs_slab_alias(struct kmem_cache *s, const char *name) + alias_list = al; + return 0; + } ++#endif + + static int __init slab_sysfs_init(void) + { +diff --git a/mm/sparse-vmemmap.c b/mm/sparse-vmemmap.c +index 4cba9c2..b4f9fcc 100644 +--- a/mm/sparse-vmemmap.c ++++ b/mm/sparse-vmemmap.c +@@ -131,7 +131,7 @@ pud_t * __meminit vmemmap_pud_populate(pgd_t *pgd, unsigned long addr, int node) + void *p = vmemmap_alloc_block(PAGE_SIZE, node); + if (!p) + return NULL; +- pud_populate(&init_mm, pud, p); ++ pud_populate_kernel(&init_mm, pud, p); + } + return pud; + } +@@ -143,7 +143,7 @@ pgd_t * __meminit vmemmap_pgd_populate(unsigned long addr, int node) + void *p = vmemmap_alloc_block(PAGE_SIZE, node); + if (!p) + return NULL; +- pgd_populate(&init_mm, pgd, p); ++ pgd_populate_kernel(&init_mm, pgd, p); + } + return pgd; + } +diff --git a/mm/sparse.c b/mm/sparse.c +index 63c3ea5..95c0858 100644 +--- a/mm/sparse.c ++++ b/mm/sparse.c +@@ -748,7 +748,7 @@ static void clear_hwpoisoned_pages(struct page *memmap, int nr_pages) + + for (i = 0; i < PAGES_PER_SECTION; i++) { + if (PageHWPoison(&memmap[i])) { +- atomic_long_sub(1, &num_poisoned_pages); ++ atomic_long_sub_unchecked(1, &num_poisoned_pages); + ClearPageHWPoison(&memmap[i]); + } + } +diff --git a/mm/swap.c b/mm/swap.c +index 0092097..33361ff 100644 +--- a/mm/swap.c ++++ b/mm/swap.c +@@ -31,6 +31,7 @@ + #include <linux/memcontrol.h> + #include <linux/gfp.h> + #include <linux/uio.h> ++#include <linux/hugetlb.h> + + #include "internal.h" + +@@ -76,6 +77,8 @@ static void __put_compound_page(struct page *page) + + __page_cache_release(page); + dtor = get_compound_page_dtor(page); ++ if (!PageHuge(page)) ++ BUG_ON(dtor != free_compound_page); + (*dtor)(page); + } + +diff --git a/mm/swapfile.c b/mm/swapfile.c +index 4a7f7e6..22cddf5 100644 +--- a/mm/swapfile.c ++++ b/mm/swapfile.c +@@ -66,7 +66,7 @@ static DEFINE_MUTEX(swapon_mutex); + + static DECLARE_WAIT_QUEUE_HEAD(proc_poll_wait); + /* Activity counter to indicate that a swapon or swapoff has occurred */ +-static atomic_t proc_poll_event = ATOMIC_INIT(0); ++static atomic_unchecked_t proc_poll_event = ATOMIC_INIT(0); + + static inline unsigned char swap_count(unsigned char ent) + { +@@ -1959,7 +1959,7 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile) + spin_unlock(&swap_lock); + + err = 0; +- atomic_inc(&proc_poll_event); ++ atomic_inc_unchecked(&proc_poll_event); + wake_up_interruptible(&proc_poll_wait); + + out_dput: +@@ -1976,8 +1976,8 @@ static unsigned swaps_poll(struct file *file, poll_table *wait) + + poll_wait(file, &proc_poll_wait, wait); + +- if (seq->poll_event != atomic_read(&proc_poll_event)) { +- seq->poll_event = atomic_read(&proc_poll_event); ++ if (seq->poll_event != atomic_read_unchecked(&proc_poll_event)) { ++ seq->poll_event = atomic_read_unchecked(&proc_poll_event); + return POLLIN | POLLRDNORM | POLLERR | POLLPRI; + } + +@@ -2075,7 +2075,7 @@ static int swaps_open(struct inode *inode, struct file *file) + return ret; + + seq = file->private_data; +- seq->poll_event = atomic_read(&proc_poll_event); ++ seq->poll_event = atomic_read_unchecked(&proc_poll_event); + return 0; + } + +@@ -2534,7 +2534,7 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags) + (frontswap_map) ? "FS" : ""); + + mutex_unlock(&swapon_mutex); +- atomic_inc(&proc_poll_event); ++ atomic_inc_unchecked(&proc_poll_event); + wake_up_interruptible(&proc_poll_wait); + + if (S_ISREG(inode->i_mode)) +diff --git a/mm/util.c b/mm/util.c +index c1010cb..91e1a36 100644 +--- a/mm/util.c ++++ b/mm/util.c +@@ -294,6 +294,12 @@ done: + void arch_pick_mmap_layout(struct mm_struct *mm) + { + mm->mmap_base = TASK_UNMAPPED_BASE; ++ ++#ifdef CONFIG_PAX_RANDMMAP ++ if (mm->pax_flags & MF_PAX_RANDMMAP) ++ mm->mmap_base += mm->delta_mmap; ++#endif ++ + mm->get_unmapped_area = arch_get_unmapped_area; + } + #endif +diff --git a/mm/vmalloc.c b/mm/vmalloc.c +index 0fdf968..991ff6a 100644 +--- a/mm/vmalloc.c ++++ b/mm/vmalloc.c +@@ -38,6 +38,21 @@ struct vfree_deferred { + }; + static DEFINE_PER_CPU(struct vfree_deferred, vfree_deferred); + ++#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW ++struct stack_deferred_llist { ++ struct llist_head list; ++ void *stack; ++ void *lowmem_stack; ++}; ++ ++struct stack_deferred { ++ struct stack_deferred_llist list; ++ struct work_struct wq; ++}; ++ ++static DEFINE_PER_CPU(struct stack_deferred, stack_deferred); ++#endif ++ + static void __vunmap(const void *, int); + + static void free_work(struct work_struct *w) +@@ -45,12 +60,30 @@ static void free_work(struct work_struct *w) + struct vfree_deferred *p = container_of(w, struct vfree_deferred, wq); + struct llist_node *llnode = llist_del_all(&p->list); + while (llnode) { +- void *p = llnode; ++ void *x = llnode; + llnode = llist_next(llnode); +- __vunmap(p, 1); ++ __vunmap(x, 1); + } + } + ++#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW ++static void unmap_work(struct work_struct *w) ++{ ++ struct stack_deferred *p = container_of(w, struct stack_deferred, wq); ++ struct llist_node *llnode = llist_del_all(&p->list.list); ++ while (llnode) { ++ struct stack_deferred_llist *x = ++ llist_entry((struct llist_head *)llnode, ++ struct stack_deferred_llist, list); ++ void *stack = ACCESS_ONCE(x->stack); ++ void *lowmem_stack = ACCESS_ONCE(x->lowmem_stack); ++ llnode = llist_next(llnode); ++ __vunmap(stack, 0); ++ free_memcg_kmem_pages((unsigned long)lowmem_stack, THREAD_SIZE_ORDER); ++ } ++} ++#endif ++ + /*** Page table manipulation functions ***/ + + static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end) +@@ -59,8 +92,19 @@ static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end) + + pte = pte_offset_kernel(pmd, addr); + do { +- pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte); +- WARN_ON(!pte_none(ptent) && !pte_present(ptent)); ++ ++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC) ++ if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr < (unsigned long)MODULES_EXEC_END) { ++ BUG_ON(!pte_exec(*pte)); ++ set_pte_at(&init_mm, addr, pte, pfn_pte(__pa(addr) >> PAGE_SHIFT, PAGE_KERNEL_EXEC)); ++ continue; ++ } ++#endif ++ ++ { ++ pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte); ++ WARN_ON(!pte_none(ptent) && !pte_present(ptent)); ++ } + } while (pte++, addr += PAGE_SIZE, addr != end); + } + +@@ -120,16 +164,29 @@ static int vmap_pte_range(pmd_t *pmd, unsigned long addr, + pte = pte_alloc_kernel(pmd, addr); + if (!pte) + return -ENOMEM; ++ ++ pax_open_kernel(); + do { + struct page *page = pages[*nr]; + +- if (WARN_ON(!pte_none(*pte))) ++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC) ++ if (pgprot_val(prot) & _PAGE_NX) ++#endif ++ ++ if (!pte_none(*pte)) { ++ pax_close_kernel(); ++ WARN_ON(1); + return -EBUSY; +- if (WARN_ON(!page)) ++ } ++ if (!page) { ++ pax_close_kernel(); ++ WARN_ON(1); + return -ENOMEM; ++ } + set_pte_at(&init_mm, addr, pte, mk_pte(page, prot)); + (*nr)++; + } while (pte++, addr += PAGE_SIZE, addr != end); ++ pax_close_kernel(); + return 0; + } + +@@ -139,7 +196,7 @@ static int vmap_pmd_range(pud_t *pud, unsigned long addr, + pmd_t *pmd; + unsigned long next; + +- pmd = pmd_alloc(&init_mm, pud, addr); ++ pmd = pmd_alloc_kernel(&init_mm, pud, addr); + if (!pmd) + return -ENOMEM; + do { +@@ -156,7 +213,7 @@ static int vmap_pud_range(pgd_t *pgd, unsigned long addr, + pud_t *pud; + unsigned long next; + +- pud = pud_alloc(&init_mm, pgd, addr); ++ pud = pud_alloc_kernel(&init_mm, pgd, addr); + if (!pud) + return -ENOMEM; + do { +@@ -216,6 +273,12 @@ int is_vmalloc_or_module_addr(const void *x) + if (addr >= MODULES_VADDR && addr < MODULES_END) + return 1; + #endif ++ ++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC) ++ if (x >= (const void *)MODULES_EXEC_VADDR && x < (const void *)MODULES_EXEC_END) ++ return 1; ++#endif ++ + return is_vmalloc_addr(x); + } + +@@ -236,8 +299,14 @@ struct page *vmalloc_to_page(const void *vmalloc_addr) + + if (!pgd_none(*pgd)) { + pud_t *pud = pud_offset(pgd, addr); ++#ifdef CONFIG_X86 ++ if (!pud_large(*pud)) ++#endif + if (!pud_none(*pud)) { + pmd_t *pmd = pmd_offset(pud, addr); ++#ifdef CONFIG_X86 ++ if (!pmd_large(*pmd)) ++#endif + if (!pmd_none(*pmd)) { + pte_t *ptep, pte; + +@@ -1175,13 +1244,23 @@ void __init vmalloc_init(void) + for_each_possible_cpu(i) { + struct vmap_block_queue *vbq; + struct vfree_deferred *p; ++#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW ++ struct stack_deferred *p2; ++#endif + + vbq = &per_cpu(vmap_block_queue, i); + spin_lock_init(&vbq->lock); + INIT_LIST_HEAD(&vbq->free); ++ + p = &per_cpu(vfree_deferred, i); + init_llist_head(&p->list); + INIT_WORK(&p->wq, free_work); ++ ++#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW ++ p2 = &per_cpu(stack_deferred, i); ++ init_llist_head(&p2->list.list); ++ INIT_WORK(&p2->wq, unmap_work); ++#endif + } + + /* Import existing vmlist entries. */ +@@ -1309,6 +1388,16 @@ static struct vm_struct *__get_vm_area_node(unsigned long size, + struct vm_struct *area; + + BUG_ON(in_interrupt()); ++ ++#if defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC) ++ if (flags & VM_KERNEXEC) { ++ if (start != VMALLOC_START || end != VMALLOC_END) ++ return NULL; ++ start = (unsigned long)MODULES_EXEC_VADDR; ++ end = (unsigned long)MODULES_EXEC_END; ++ } ++#endif ++ + if (flags & VM_IOREMAP) + align = 1ul << clamp(fls(size), PAGE_SHIFT, IOREMAP_MAX_ORDER); + +@@ -1514,6 +1603,23 @@ void vunmap(const void *addr) + } + EXPORT_SYMBOL(vunmap); + ++#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW ++void unmap_process_stacks(struct task_struct *task) ++{ ++ if (unlikely(in_interrupt())) { ++ struct stack_deferred *p = &__get_cpu_var(stack_deferred); ++ struct stack_deferred_llist *list = task->stack; ++ list->stack = task->stack; ++ list->lowmem_stack = task->lowmem_stack; ++ if (llist_add((struct llist_node *)&list->list, &p->list.list)) ++ schedule_work(&p->wq); ++ } else { ++ __vunmap(task->stack, 0); ++ free_memcg_kmem_pages((unsigned long)task->lowmem_stack, THREAD_SIZE_ORDER); ++ } ++} ++#endif ++ + /** + * vmap - map an array of pages into virtually contiguous space + * @pages: array of page pointers +@@ -1534,6 +1640,11 @@ void *vmap(struct page **pages, unsigned int count, + if (count > totalram_pages) + return NULL; + ++#if defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC) ++ if (!(pgprot_val(prot) & _PAGE_NX)) ++ flags |= VM_KERNEXEC; ++#endif ++ + area = get_vm_area_caller((count << PAGE_SHIFT), flags, + __builtin_return_address(0)); + if (!area) +@@ -1634,6 +1745,13 @@ void *__vmalloc_node_range(unsigned long size, unsigned long align, + if (!size || (size >> PAGE_SHIFT) > totalram_pages) + goto fail; + ++#if defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC) ++ if (!(pgprot_val(prot) & _PAGE_NX)) ++ area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNINITIALIZED | VM_KERNEXEC, ++ VMALLOC_START, VMALLOC_END, node, gfp_mask, caller); ++ else ++#endif ++ + area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNINITIALIZED, + start, end, node, gfp_mask, caller); + if (!area) +@@ -1810,10 +1928,9 @@ EXPORT_SYMBOL(vzalloc_node); + * For tight control over page level allocator and protection flags + * use __vmalloc() instead. + */ +- + void *vmalloc_exec(unsigned long size) + { +- return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC, ++ return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL_EXEC, + NUMA_NO_NODE, __builtin_return_address(0)); + } + +@@ -2120,6 +2237,8 @@ int remap_vmalloc_range_partial(struct vm_area_struct *vma, unsigned long uaddr, + { + struct vm_struct *area; + ++ BUG_ON(vma->vm_mirror); ++ + size = PAGE_ALIGN(size); + + if (!PAGE_ALIGNED(uaddr) || !PAGE_ALIGNED(kaddr)) +@@ -2602,7 +2721,11 @@ static int s_show(struct seq_file *m, void *p) + v->addr, v->addr + v->size, v->size); + + if (v->caller) ++#ifdef CONFIG_GRKERNSEC_HIDESYM ++ seq_printf(m, " %pK", v->caller); ++#else + seq_printf(m, " %pS", v->caller); ++#endif + + if (v->nr_pages) + seq_printf(m, " pages=%d", v->nr_pages); +diff --git a/mm/vmstat.c b/mm/vmstat.c +index def5dd2..4ce55cec 100644 +--- a/mm/vmstat.c ++++ b/mm/vmstat.c +@@ -20,6 +20,7 @@ + #include <linux/writeback.h> + #include <linux/compaction.h> + #include <linux/mm_inline.h> ++#include <linux/grsecurity.h> + + #include "internal.h" + +@@ -79,7 +80,7 @@ void vm_events_fold_cpu(int cpu) + * + * vm_stat contains the global counters + */ +-atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS] __cacheline_aligned_in_smp; ++atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS] __cacheline_aligned_in_smp; + EXPORT_SYMBOL(vm_stat); + + #ifdef CONFIG_SMP +@@ -423,7 +424,7 @@ static inline void fold_diff(int *diff) + + for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++) + if (diff[i]) +- atomic_long_add(diff[i], &vm_stat[i]); ++ atomic_long_add_unchecked(diff[i], &vm_stat[i]); + } + + /* +@@ -455,7 +456,7 @@ static void refresh_cpu_vm_stats(void) + v = this_cpu_xchg(p->vm_stat_diff[i], 0); + if (v) { + +- atomic_long_add(v, &zone->vm_stat[i]); ++ atomic_long_add_unchecked(v, &zone->vm_stat[i]); + global_diff[i] += v; + #ifdef CONFIG_NUMA + /* 3 seconds idle till flush */ +@@ -517,7 +518,7 @@ void cpu_vm_stats_fold(int cpu) + + v = p->vm_stat_diff[i]; + p->vm_stat_diff[i] = 0; +- atomic_long_add(v, &zone->vm_stat[i]); ++ atomic_long_add_unchecked(v, &zone->vm_stat[i]); + global_diff[i] += v; + } + } +@@ -537,8 +538,8 @@ void drain_zonestat(struct zone *zone, struct per_cpu_pageset *pset) + if (pset->vm_stat_diff[i]) { + int v = pset->vm_stat_diff[i]; + pset->vm_stat_diff[i] = 0; +- atomic_long_add(v, &zone->vm_stat[i]); +- atomic_long_add(v, &vm_stat[i]); ++ atomic_long_add_unchecked(v, &zone->vm_stat[i]); ++ atomic_long_add_unchecked(v, &vm_stat[i]); + } + } + #endif +@@ -1150,10 +1151,22 @@ static void *vmstat_start(struct seq_file *m, loff_t *pos) + stat_items_size += sizeof(struct vm_event_state); + #endif + +- v = kmalloc(stat_items_size, GFP_KERNEL); ++ v = kzalloc(stat_items_size, GFP_KERNEL); + m->private = v; + if (!v) + return ERR_PTR(-ENOMEM); ++ ++#ifdef CONFIG_GRKERNSEC_PROC_ADD ++#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP) ++ if (!uid_eq(current_uid(), GLOBAL_ROOT_UID) ++#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP ++ && !in_group_p(grsec_proc_gid) ++#endif ++ ) ++ return (unsigned long *)m->private + *pos; ++#endif ++#endif ++ + for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++) + v[i] = global_page_state(i); + v += NR_VM_ZONE_STAT_ITEMS; +@@ -1302,10 +1315,16 @@ static int __init setup_vmstat(void) + put_online_cpus(); + #endif + #ifdef CONFIG_PROC_FS +- proc_create("buddyinfo", S_IRUGO, NULL, &fragmentation_file_operations); +- proc_create("pagetypeinfo", S_IRUGO, NULL, &pagetypeinfo_file_ops); +- proc_create("vmstat", S_IRUGO, NULL, &proc_vmstat_file_operations); +- proc_create("zoneinfo", S_IRUGO, NULL, &proc_zoneinfo_file_operations); ++ { ++ mode_t gr_mode = S_IRUGO; ++#ifdef CONFIG_GRKERNSEC_PROC_ADD ++ gr_mode = S_IRUSR; ++#endif ++ proc_create("buddyinfo", gr_mode, NULL, &fragmentation_file_operations); ++ proc_create("pagetypeinfo", gr_mode, NULL, &pagetypeinfo_file_ops); ++ proc_create("vmstat", S_IRUGO, NULL, &proc_vmstat_file_operations); ++ proc_create("zoneinfo", gr_mode, NULL, &proc_zoneinfo_file_operations); ++ } + #endif + return 0; + } +diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c +index 44ebd5c..1f732bae 100644 +--- a/net/8021q/vlan.c ++++ b/net/8021q/vlan.c +@@ -475,7 +475,7 @@ out: + return NOTIFY_DONE; + } + +-static struct notifier_block vlan_notifier_block __read_mostly = { ++static struct notifier_block vlan_notifier_block = { + .notifier_call = vlan_device_event, + }; + +@@ -550,8 +550,7 @@ static int vlan_ioctl_handler(struct net *net, void __user *arg) + err = -EPERM; + if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) + break; +- if ((args.u.name_type >= 0) && +- (args.u.name_type < VLAN_NAME_TYPE_HIGHEST)) { ++ if (args.u.name_type < VLAN_NAME_TYPE_HIGHEST) { + struct vlan_net *vn; + + vn = net_generic(net, vlan_net_id); +diff --git a/net/9p/client.c b/net/9p/client.c +index 9186550..e604a2f 100644 +--- a/net/9p/client.c ++++ b/net/9p/client.c +@@ -588,7 +588,7 @@ static int p9_check_zc_errors(struct p9_client *c, struct p9_req_t *req, + len - inline_len); + } else { + err = copy_from_user(ename + inline_len, +- uidata, len - inline_len); ++ (char __force_user *)uidata, len - inline_len); + if (err) { + err = -EFAULT; + goto out_err; +@@ -1560,7 +1560,7 @@ p9_client_read(struct p9_fid *fid, char *data, char __user *udata, u64 offset, + kernel_buf = 1; + indata = data; + } else +- indata = (__force char *)udata; ++ indata = (__force_kernel char *)udata; + /* + * response header len is 11 + * PDU Header(7) + IO Size (4) +@@ -1635,7 +1635,7 @@ p9_client_write(struct p9_fid *fid, char *data, const char __user *udata, + kernel_buf = 1; + odata = data; + } else +- odata = (char *)udata; ++ odata = (char __force_kernel *)udata; + req = p9_client_zc_rpc(clnt, P9_TWRITE, NULL, odata, 0, rsize, + P9_ZC_HDR_SZ, kernel_buf, "dqd", + fid->fid, offset, rsize); +diff --git a/net/9p/mod.c b/net/9p/mod.c +index 6ab36ae..6f1841b 100644 +--- a/net/9p/mod.c ++++ b/net/9p/mod.c +@@ -84,7 +84,7 @@ static LIST_HEAD(v9fs_trans_list); + void v9fs_register_trans(struct p9_trans_module *m) + { + spin_lock(&v9fs_trans_lock); +- list_add_tail(&m->list, &v9fs_trans_list); ++ pax_list_add_tail((struct list_head *)&m->list, &v9fs_trans_list); + spin_unlock(&v9fs_trans_lock); + } + EXPORT_SYMBOL(v9fs_register_trans); +@@ -97,7 +97,7 @@ EXPORT_SYMBOL(v9fs_register_trans); + void v9fs_unregister_trans(struct p9_trans_module *m) + { + spin_lock(&v9fs_trans_lock); +- list_del_init(&m->list); ++ pax_list_del_init((struct list_head *)&m->list); + spin_unlock(&v9fs_trans_lock); + } + EXPORT_SYMBOL(v9fs_unregister_trans); +diff --git a/net/9p/trans_fd.c b/net/9p/trans_fd.c +index b7bd7f2..2498bf7 100644 +--- a/net/9p/trans_fd.c ++++ b/net/9p/trans_fd.c +@@ -432,7 +432,7 @@ static int p9_fd_write(struct p9_client *client, void *v, int len) + oldfs = get_fs(); + set_fs(get_ds()); + /* The cast to a user pointer is valid due to the set_fs() */ +- ret = vfs_write(ts->wr, (__force void __user *)v, len, &ts->wr->f_pos); ++ ret = vfs_write(ts->wr, (void __force_user *)v, len, &ts->wr->f_pos); + set_fs(oldfs); + + if (ret <= 0 && ret != -ERESTARTSYS && ret != -EAGAIN) +diff --git a/net/appletalk/atalk_proc.c b/net/appletalk/atalk_proc.c +index af46bc4..f9adfcd 100644 +--- a/net/appletalk/atalk_proc.c ++++ b/net/appletalk/atalk_proc.c +@@ -256,7 +256,7 @@ int __init atalk_proc_init(void) + struct proc_dir_entry *p; + int rc = -ENOMEM; + +- atalk_proc_dir = proc_mkdir("atalk", init_net.proc_net); ++ atalk_proc_dir = proc_mkdir_restrict("atalk", init_net.proc_net); + if (!atalk_proc_dir) + goto out; + +diff --git a/net/atm/atm_misc.c b/net/atm/atm_misc.c +index 876fbe8..8bbea9f 100644 +--- a/net/atm/atm_misc.c ++++ b/net/atm/atm_misc.c +@@ -17,7 +17,7 @@ int atm_charge(struct atm_vcc *vcc, int truesize) + if (atomic_read(&sk_atm(vcc)->sk_rmem_alloc) <= sk_atm(vcc)->sk_rcvbuf) + return 1; + atm_return(vcc, truesize); +- atomic_inc(&vcc->stats->rx_drop); ++ atomic_inc_unchecked(&vcc->stats->rx_drop); + return 0; + } + EXPORT_SYMBOL(atm_charge); +@@ -39,7 +39,7 @@ struct sk_buff *atm_alloc_charge(struct atm_vcc *vcc, int pdu_size, + } + } + atm_return(vcc, guess); +- atomic_inc(&vcc->stats->rx_drop); ++ atomic_inc_unchecked(&vcc->stats->rx_drop); + return NULL; + } + EXPORT_SYMBOL(atm_alloc_charge); +@@ -86,7 +86,7 @@ EXPORT_SYMBOL(atm_pcr_goal); + + void sonet_copy_stats(struct k_sonet_stats *from, struct sonet_stats *to) + { +-#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i) ++#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i) + __SONET_ITEMS + #undef __HANDLE_ITEM + } +@@ -94,7 +94,7 @@ EXPORT_SYMBOL(sonet_copy_stats); + + void sonet_subtract_stats(struct k_sonet_stats *from, struct sonet_stats *to) + { +-#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i) ++#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i,&from->i) + __SONET_ITEMS + #undef __HANDLE_ITEM + } +diff --git a/net/atm/lec.c b/net/atm/lec.c +index 5a2f602..93961433 100644 +--- a/net/atm/lec.c ++++ b/net/atm/lec.c +@@ -111,9 +111,9 @@ static inline void lec_arp_put(struct lec_arp_table *entry) + } + + static struct lane2_ops lane2_ops = { +- lane2_resolve, /* resolve, spec 3.1.3 */ +- lane2_associate_req, /* associate_req, spec 3.1.4 */ +- NULL /* associate indicator, spec 3.1.5 */ ++ .resolve = lane2_resolve, ++ .associate_req = lane2_associate_req, ++ .associate_indicator = NULL + }; + + static unsigned char bus_mac[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }; +diff --git a/net/atm/lec.h b/net/atm/lec.h +index 4149db1..f2ab682 100644 +--- a/net/atm/lec.h ++++ b/net/atm/lec.h +@@ -48,7 +48,7 @@ struct lane2_ops { + const u8 *tlvs, u32 sizeoftlvs); + void (*associate_indicator) (struct net_device *dev, const u8 *mac_addr, + const u8 *tlvs, u32 sizeoftlvs); +-}; ++} __no_const; + + /* + * ATM LAN Emulation supports both LLC & Dix Ethernet EtherType +diff --git a/net/atm/mpoa_caches.c b/net/atm/mpoa_caches.c +index d1b2d9a..d549f7f 100644 +--- a/net/atm/mpoa_caches.c ++++ b/net/atm/mpoa_caches.c +@@ -535,30 +535,30 @@ static void eg_destroy_cache(struct mpoa_client *mpc) + + + static struct in_cache_ops ingress_ops = { +- in_cache_add_entry, /* add_entry */ +- in_cache_get, /* get */ +- in_cache_get_with_mask, /* get_with_mask */ +- in_cache_get_by_vcc, /* get_by_vcc */ +- in_cache_put, /* put */ +- in_cache_remove_entry, /* remove_entry */ +- cache_hit, /* cache_hit */ +- clear_count_and_expired, /* clear_count */ +- check_resolving_entries, /* check_resolving */ +- refresh_entries, /* refresh */ +- in_destroy_cache /* destroy_cache */ ++ .add_entry = in_cache_add_entry, ++ .get = in_cache_get, ++ .get_with_mask = in_cache_get_with_mask, ++ .get_by_vcc = in_cache_get_by_vcc, ++ .put = in_cache_put, ++ .remove_entry = in_cache_remove_entry, ++ .cache_hit = cache_hit, ++ .clear_count = clear_count_and_expired, ++ .check_resolving = check_resolving_entries, ++ .refresh = refresh_entries, ++ .destroy_cache = in_destroy_cache + }; + + static struct eg_cache_ops egress_ops = { +- eg_cache_add_entry, /* add_entry */ +- eg_cache_get_by_cache_id, /* get_by_cache_id */ +- eg_cache_get_by_tag, /* get_by_tag */ +- eg_cache_get_by_vcc, /* get_by_vcc */ +- eg_cache_get_by_src_ip, /* get_by_src_ip */ +- eg_cache_put, /* put */ +- eg_cache_remove_entry, /* remove_entry */ +- update_eg_cache_entry, /* update */ +- clear_expired, /* clear_expired */ +- eg_destroy_cache /* destroy_cache */ ++ .add_entry = eg_cache_add_entry, ++ .get_by_cache_id = eg_cache_get_by_cache_id, ++ .get_by_tag = eg_cache_get_by_tag, ++ .get_by_vcc = eg_cache_get_by_vcc, ++ .get_by_src_ip = eg_cache_get_by_src_ip, ++ .put = eg_cache_put, ++ .remove_entry = eg_cache_remove_entry, ++ .update = update_eg_cache_entry, ++ .clear_expired = clear_expired, ++ .destroy_cache = eg_destroy_cache + }; + + +diff --git a/net/atm/proc.c b/net/atm/proc.c +index bbb6461..cf04016 100644 +--- a/net/atm/proc.c ++++ b/net/atm/proc.c +@@ -45,9 +45,9 @@ static void add_stats(struct seq_file *seq, const char *aal, + const struct k_atm_aal_stats *stats) + { + seq_printf(seq, "%s ( %d %d %d %d %d )", aal, +- atomic_read(&stats->tx), atomic_read(&stats->tx_err), +- atomic_read(&stats->rx), atomic_read(&stats->rx_err), +- atomic_read(&stats->rx_drop)); ++ atomic_read_unchecked(&stats->tx),atomic_read_unchecked(&stats->tx_err), ++ atomic_read_unchecked(&stats->rx),atomic_read_unchecked(&stats->rx_err), ++ atomic_read_unchecked(&stats->rx_drop)); + } + + static void atm_dev_info(struct seq_file *seq, const struct atm_dev *dev) +diff --git a/net/atm/resources.c b/net/atm/resources.c +index 0447d5d..3cf4728 100644 +--- a/net/atm/resources.c ++++ b/net/atm/resources.c +@@ -160,7 +160,7 @@ EXPORT_SYMBOL(atm_dev_deregister); + static void copy_aal_stats(struct k_atm_aal_stats *from, + struct atm_aal_stats *to) + { +-#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i) ++#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i) + __AAL_STAT_ITEMS + #undef __HANDLE_ITEM + } +@@ -168,7 +168,7 @@ static void copy_aal_stats(struct k_atm_aal_stats *from, + static void subtract_aal_stats(struct k_atm_aal_stats *from, + struct atm_aal_stats *to) + { +-#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i) ++#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i, &from->i) + __AAL_STAT_ITEMS + #undef __HANDLE_ITEM + } +diff --git a/net/ax25/sysctl_net_ax25.c b/net/ax25/sysctl_net_ax25.c +index 919a5ce..cc6b444 100644 +--- a/net/ax25/sysctl_net_ax25.c ++++ b/net/ax25/sysctl_net_ax25.c +@@ -152,7 +152,7 @@ int ax25_register_dev_sysctl(ax25_dev *ax25_dev) + { + char path[sizeof("net/ax25/") + IFNAMSIZ]; + int k; +- struct ctl_table *table; ++ ctl_table_no_const *table; + + table = kmemdup(ax25_param_table, sizeof(ax25_param_table), GFP_KERNEL); + if (!table) +diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c +index d074d06..ad3cfcf 100644 +--- a/net/batman-adv/bat_iv_ogm.c ++++ b/net/batman-adv/bat_iv_ogm.c +@@ -312,7 +312,7 @@ static int batadv_iv_ogm_iface_enable(struct batadv_hard_iface *hard_iface) + + /* randomize initial seqno to avoid collision */ + get_random_bytes(&random_seqno, sizeof(random_seqno)); +- atomic_set(&hard_iface->bat_iv.ogm_seqno, random_seqno); ++ atomic_set_unchecked(&hard_iface->bat_iv.ogm_seqno, random_seqno); + + hard_iface->bat_iv.ogm_buff_len = BATADV_OGM_HLEN; + ogm_buff = kmalloc(hard_iface->bat_iv.ogm_buff_len, GFP_ATOMIC); +@@ -917,9 +917,9 @@ static void batadv_iv_ogm_schedule(struct batadv_hard_iface *hard_iface) + batadv_ogm_packet->tvlv_len = htons(tvlv_len); + + /* change sequence number to network order */ +- seqno = (uint32_t)atomic_read(&hard_iface->bat_iv.ogm_seqno); ++ seqno = (uint32_t)atomic_read_unchecked(&hard_iface->bat_iv.ogm_seqno); + batadv_ogm_packet->seqno = htonl(seqno); +- atomic_inc(&hard_iface->bat_iv.ogm_seqno); ++ atomic_inc_unchecked(&hard_iface->bat_iv.ogm_seqno); + + batadv_iv_ogm_slide_own_bcast_window(hard_iface); + +@@ -1596,7 +1596,7 @@ static void batadv_iv_ogm_process(const struct sk_buff *skb, int ogm_offset, + return; + + /* could be changed by schedule_own_packet() */ +- if_incoming_seqno = atomic_read(&if_incoming->bat_iv.ogm_seqno); ++ if_incoming_seqno = atomic_read_unchecked(&if_incoming->bat_iv.ogm_seqno); + + if (ogm_packet->flags & BATADV_DIRECTLINK) + has_directlink_flag = true; +diff --git a/net/batman-adv/fragmentation.c b/net/batman-adv/fragmentation.c +index c46387a..6ad5ef9 100644 +--- a/net/batman-adv/fragmentation.c ++++ b/net/batman-adv/fragmentation.c +@@ -450,7 +450,7 @@ bool batadv_frag_send_packet(struct sk_buff *skb, + frag_header.packet_type = BATADV_UNICAST_FRAG; + frag_header.version = BATADV_COMPAT_VERSION; + frag_header.ttl = BATADV_TTL; +- frag_header.seqno = htons(atomic_inc_return(&bat_priv->frag_seqno)); ++ frag_header.seqno = htons(atomic_inc_return_unchecked(&bat_priv->frag_seqno)); + frag_header.reserved = 0; + frag_header.no = 0; + frag_header.total_size = htons(skb->len); +diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c +index f82c267..0e56d32 100644 +--- a/net/batman-adv/soft-interface.c ++++ b/net/batman-adv/soft-interface.c +@@ -283,7 +283,7 @@ send: + primary_if->net_dev->dev_addr, ETH_ALEN); + + /* set broadcast sequence number */ +- seqno = atomic_inc_return(&bat_priv->bcast_seqno); ++ seqno = atomic_inc_return_unchecked(&bat_priv->bcast_seqno); + bcast_packet->seqno = htonl(seqno); + + batadv_add_bcast_packet_to_list(bat_priv, skb, brd_delay); +@@ -707,7 +707,7 @@ static int batadv_softif_init_late(struct net_device *dev) + atomic_set(&bat_priv->batman_queue_left, BATADV_BATMAN_QUEUE_LEN); + + atomic_set(&bat_priv->mesh_state, BATADV_MESH_INACTIVE); +- atomic_set(&bat_priv->bcast_seqno, 1); ++ atomic_set_unchecked(&bat_priv->bcast_seqno, 1); + atomic_set(&bat_priv->tt.vn, 0); + atomic_set(&bat_priv->tt.local_changes, 0); + atomic_set(&bat_priv->tt.ogm_append_cnt, 0); +@@ -721,7 +721,7 @@ static int batadv_softif_init_late(struct net_device *dev) + + /* randomize initial seqno to avoid collision */ + get_random_bytes(&random_seqno, sizeof(random_seqno)); +- atomic_set(&bat_priv->frag_seqno, random_seqno); ++ atomic_set_unchecked(&bat_priv->frag_seqno, random_seqno); + + bat_priv->primary_if = NULL; + bat_priv->num_ifaces = 0; +diff --git a/net/batman-adv/types.h b/net/batman-adv/types.h +index 78370ab..1cb3614 100644 +--- a/net/batman-adv/types.h ++++ b/net/batman-adv/types.h +@@ -66,7 +66,7 @@ enum batadv_dhcp_recipient { + struct batadv_hard_iface_bat_iv { + unsigned char *ogm_buff; + int ogm_buff_len; +- atomic_t ogm_seqno; ++ atomic_unchecked_t ogm_seqno; + }; + + /** +@@ -714,7 +714,7 @@ struct batadv_priv { + atomic_t bonding; + atomic_t fragmentation; + atomic_t packet_size_max; +- atomic_t frag_seqno; ++ atomic_unchecked_t frag_seqno; + #ifdef CONFIG_BATMAN_ADV_BLA + atomic_t bridge_loop_avoidance; + #endif +@@ -730,7 +730,7 @@ struct batadv_priv { + #endif + uint32_t isolation_mark; + uint32_t isolation_mark_mask; +- atomic_t bcast_seqno; ++ atomic_unchecked_t bcast_seqno; + atomic_t bcast_queue_left; + atomic_t batman_queue_left; + char num_ifaces; +diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c +index 7552f9e..074ce29 100644 +--- a/net/bluetooth/hci_sock.c ++++ b/net/bluetooth/hci_sock.c +@@ -1052,7 +1052,7 @@ static int hci_sock_setsockopt(struct socket *sock, int level, int optname, + uf.event_mask[1] = *((u32 *) f->event_mask + 1); + } + +- len = min_t(unsigned int, len, sizeof(uf)); ++ len = min((size_t)len, sizeof(uf)); + if (copy_from_user(&uf, optval, len)) { + err = -EFAULT; + break; +diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c +index 6afa3b4..7a14180 100644 +--- a/net/bluetooth/l2cap_core.c ++++ b/net/bluetooth/l2cap_core.c +@@ -3740,8 +3740,10 @@ static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len, + break; + + case L2CAP_CONF_RFC: +- if (olen == sizeof(rfc)) +- memcpy(&rfc, (void *)val, olen); ++ if (olen != sizeof(rfc)) ++ break; ++ ++ memcpy(&rfc, (void *)val, olen); + + if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) && + rfc.mode != chan->mode) +diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c +index 27ae841..e5a8343 100644 +--- a/net/bluetooth/l2cap_sock.c ++++ b/net/bluetooth/l2cap_sock.c +@@ -625,7 +625,8 @@ static int l2cap_sock_setsockopt_old(struct socket *sock, int optname, + struct sock *sk = sock->sk; + struct l2cap_chan *chan = l2cap_pi(sk)->chan; + struct l2cap_options opts; +- int len, err = 0; ++ int err = 0; ++ size_t len = optlen; + u32 opt; + + BT_DBG("sk %p", sk); +@@ -652,7 +653,7 @@ static int l2cap_sock_setsockopt_old(struct socket *sock, int optname, + opts.max_tx = chan->max_tx; + opts.txwin_size = chan->tx_win; + +- len = min_t(unsigned int, sizeof(opts), optlen); ++ len = min(sizeof(opts), len); + if (copy_from_user((char *) &opts, optval, len)) { + err = -EFAULT; + break; +@@ -734,7 +735,8 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, + struct bt_security sec; + struct bt_power pwr; + struct l2cap_conn *conn; +- int len, err = 0; ++ int err = 0; ++ size_t len = optlen; + u32 opt; + + BT_DBG("sk %p", sk); +@@ -757,7 +759,7 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, + + sec.level = BT_SECURITY_LOW; + +- len = min_t(unsigned int, sizeof(sec), optlen); ++ len = min(sizeof(sec), len); + if (copy_from_user((char *) &sec, optval, len)) { + err = -EFAULT; + break; +@@ -852,7 +854,7 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, + + pwr.force_active = BT_POWER_FORCE_ACTIVE_ON; + +- len = min_t(unsigned int, sizeof(pwr), optlen); ++ len = min(sizeof(pwr), len); + if (copy_from_user((char *) &pwr, optval, len)) { + err = -EFAULT; + break; +diff --git a/net/bluetooth/rfcomm/sock.c b/net/bluetooth/rfcomm/sock.c +index 3c2d3e4..884855a 100644 +--- a/net/bluetooth/rfcomm/sock.c ++++ b/net/bluetooth/rfcomm/sock.c +@@ -672,7 +672,7 @@ static int rfcomm_sock_setsockopt(struct socket *sock, int level, int optname, c + struct sock *sk = sock->sk; + struct bt_security sec; + int err = 0; +- size_t len; ++ size_t len = optlen; + u32 opt; + + BT_DBG("sk %p", sk); +@@ -694,7 +694,7 @@ static int rfcomm_sock_setsockopt(struct socket *sock, int level, int optname, c + + sec.level = BT_SECURITY_LOW; + +- len = min_t(unsigned int, sizeof(sec), optlen); ++ len = min(sizeof(sec), len); + if (copy_from_user((char *) &sec, optval, len)) { + err = -EFAULT; + break; +diff --git a/net/bluetooth/rfcomm/tty.c b/net/bluetooth/rfcomm/tty.c +index f9c0980a..fcbbfeb 100644 +--- a/net/bluetooth/rfcomm/tty.c ++++ b/net/bluetooth/rfcomm/tty.c +@@ -717,7 +717,7 @@ static int rfcomm_tty_open(struct tty_struct *tty, struct file *filp) + BT_DBG("tty %p id %d", tty, tty->index); + + BT_DBG("dev %p dst %pMR channel %d opened %d", dev, &dev->dst, +- dev->channel, dev->port.count); ++ dev->channel, atomic_read(&dev->port.count)); + + err = tty_port_open(&dev->port, tty, filp); + if (err) +@@ -740,7 +740,7 @@ static void rfcomm_tty_close(struct tty_struct *tty, struct file *filp) + struct rfcomm_dev *dev = (struct rfcomm_dev *) tty->driver_data; + + BT_DBG("tty %p dev %p dlc %p opened %d", tty, dev, dev->dlc, +- dev->port.count); ++ atomic_read(&dev->port.count)); + + tty_port_close(&dev->port, tty, filp); + } +diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c +index 1059ed3..d70846a 100644 +--- a/net/bridge/netfilter/ebtables.c ++++ b/net/bridge/netfilter/ebtables.c +@@ -1524,7 +1524,7 @@ static int do_ebt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len) + tmp.valid_hooks = t->table->valid_hooks; + } + mutex_unlock(&ebt_mutex); +- if (copy_to_user(user, &tmp, *len) != 0) { ++ if (*len > sizeof(tmp) || copy_to_user(user, &tmp, *len) != 0) { + BUGPRINT("c2u Didn't work\n"); + ret = -EFAULT; + break; +@@ -2330,7 +2330,7 @@ static int compat_do_ebt_get_ctl(struct sock *sk, int cmd, + goto out; + tmp.valid_hooks = t->valid_hooks; + +- if (copy_to_user(user, &tmp, *len) != 0) { ++ if (*len > sizeof(tmp) || copy_to_user(user, &tmp, *len) != 0) { + ret = -EFAULT; + break; + } +@@ -2341,7 +2341,7 @@ static int compat_do_ebt_get_ctl(struct sock *sk, int cmd, + tmp.entries_size = t->table->entries_size; + tmp.valid_hooks = t->table->valid_hooks; + +- if (copy_to_user(user, &tmp, *len) != 0) { ++ if (*len > sizeof(tmp) || copy_to_user(user, &tmp, *len) != 0) { + ret = -EFAULT; + break; + } +diff --git a/net/caif/cfctrl.c b/net/caif/cfctrl.c +index 0f45522..dab651f 100644 +--- a/net/caif/cfctrl.c ++++ b/net/caif/cfctrl.c +@@ -10,6 +10,7 @@ + #include <linux/spinlock.h> + #include <linux/slab.h> + #include <linux/pkt_sched.h> ++#include <linux/sched.h> + #include <net/caif/caif_layer.h> + #include <net/caif/cfpkt.h> + #include <net/caif/cfctrl.h> +@@ -43,8 +44,8 @@ struct cflayer *cfctrl_create(void) + memset(&dev_info, 0, sizeof(dev_info)); + dev_info.id = 0xff; + cfsrvl_init(&this->serv, 0, &dev_info, false); +- atomic_set(&this->req_seq_no, 1); +- atomic_set(&this->rsp_seq_no, 1); ++ atomic_set_unchecked(&this->req_seq_no, 1); ++ atomic_set_unchecked(&this->rsp_seq_no, 1); + this->serv.layer.receive = cfctrl_recv; + sprintf(this->serv.layer.name, "ctrl"); + this->serv.layer.ctrlcmd = cfctrl_ctrlcmd; +@@ -130,8 +131,8 @@ static void cfctrl_insert_req(struct cfctrl *ctrl, + struct cfctrl_request_info *req) + { + spin_lock_bh(&ctrl->info_list_lock); +- atomic_inc(&ctrl->req_seq_no); +- req->sequence_no = atomic_read(&ctrl->req_seq_no); ++ atomic_inc_unchecked(&ctrl->req_seq_no); ++ req->sequence_no = atomic_read_unchecked(&ctrl->req_seq_no); + list_add_tail(&req->list, &ctrl->list); + spin_unlock_bh(&ctrl->info_list_lock); + } +@@ -149,7 +150,7 @@ static struct cfctrl_request_info *cfctrl_remove_req(struct cfctrl *ctrl, + if (p != first) + pr_warn("Requests are not received in order\n"); + +- atomic_set(&ctrl->rsp_seq_no, ++ atomic_set_unchecked(&ctrl->rsp_seq_no, + p->sequence_no); + list_del(&p->list); + goto out; +diff --git a/net/can/af_can.c b/net/can/af_can.c +index a27f8aa..67174a3 100644 +--- a/net/can/af_can.c ++++ b/net/can/af_can.c +@@ -863,7 +863,7 @@ static const struct net_proto_family can_family_ops = { + }; + + /* notifier block for netdevice event */ +-static struct notifier_block can_netdev_notifier __read_mostly = { ++static struct notifier_block can_netdev_notifier = { + .notifier_call = can_notifier, + }; + +diff --git a/net/can/bcm.c b/net/can/bcm.c +index dcb75c0..24b1b43 100644 +--- a/net/can/bcm.c ++++ b/net/can/bcm.c +@@ -1624,7 +1624,7 @@ static int __init bcm_module_init(void) + } + + /* create /proc/net/can-bcm directory */ +- proc_dir = proc_mkdir("can-bcm", init_net.proc_net); ++ proc_dir = proc_mkdir_restrict("can-bcm", init_net.proc_net); + return 0; + } + +diff --git a/net/can/gw.c b/net/can/gw.c +index 050a211..bb9fe33 100644 +--- a/net/can/gw.c ++++ b/net/can/gw.c +@@ -80,7 +80,6 @@ MODULE_PARM_DESC(max_hops, + "default: " __stringify(CGW_DEFAULT_HOPS) ")"); + + static HLIST_HEAD(cgw_list); +-static struct notifier_block notifier; + + static struct kmem_cache *cgw_cache __read_mostly; + +@@ -947,6 +946,10 @@ static int cgw_remove_job(struct sk_buff *skb, struct nlmsghdr *nlh) + return err; + } + ++static struct notifier_block notifier = { ++ .notifier_call = cgw_notifier ++}; ++ + static __init int cgw_module_init(void) + { + /* sanitize given module parameter */ +@@ -962,7 +965,6 @@ static __init int cgw_module_init(void) + return -ENOMEM; + + /* set notifier */ +- notifier.notifier_call = cgw_notifier; + register_netdevice_notifier(¬ifier); + + if (__rtnl_register(PF_CAN, RTM_GETROUTE, NULL, cgw_dump_jobs, NULL)) { +diff --git a/net/can/proc.c b/net/can/proc.c +index b543470..d2ddae2 100644 +--- a/net/can/proc.c ++++ b/net/can/proc.c +@@ -468,7 +468,7 @@ static void can_remove_proc_readentry(const char *name) + void can_init_proc(void) + { + /* create /proc/net/can directory */ +- can_dir = proc_mkdir("can", init_net.proc_net); ++ can_dir = proc_mkdir_restrict("can", init_net.proc_net); + + if (!can_dir) { + printk(KERN_INFO "can: failed to create /proc/net/can . " +diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c +index 988721a..947846d 100644 +--- a/net/ceph/messenger.c ++++ b/net/ceph/messenger.c +@@ -187,7 +187,7 @@ static void con_fault(struct ceph_connection *con); + #define MAX_ADDR_STR_LEN 64 /* 54 is enough */ + + static char addr_str[ADDR_STR_COUNT][MAX_ADDR_STR_LEN]; +-static atomic_t addr_str_seq = ATOMIC_INIT(0); ++static atomic_unchecked_t addr_str_seq = ATOMIC_INIT(0); + + static struct page *zero_page; /* used in certain error cases */ + +@@ -198,7 +198,7 @@ const char *ceph_pr_addr(const struct sockaddr_storage *ss) + struct sockaddr_in *in4 = (struct sockaddr_in *) ss; + struct sockaddr_in6 *in6 = (struct sockaddr_in6 *) ss; + +- i = atomic_inc_return(&addr_str_seq) & ADDR_STR_COUNT_MASK; ++ i = atomic_inc_return_unchecked(&addr_str_seq) & ADDR_STR_COUNT_MASK; + s = addr_str[i]; + + switch (ss->ss_family) { +diff --git a/net/compat.c b/net/compat.c +index cbc1a2a..ab7644e 100644 +--- a/net/compat.c ++++ b/net/compat.c +@@ -73,9 +73,9 @@ int get_compat_msghdr(struct msghdr *kmsg, struct compat_msghdr __user *umsg) + return -EFAULT; + if (kmsg->msg_namelen > sizeof(struct sockaddr_storage)) + kmsg->msg_namelen = sizeof(struct sockaddr_storage); +- kmsg->msg_name = compat_ptr(tmp1); +- kmsg->msg_iov = compat_ptr(tmp2); +- kmsg->msg_control = compat_ptr(tmp3); ++ kmsg->msg_name = (void __force_kernel *)compat_ptr(tmp1); ++ kmsg->msg_iov = (void __force_kernel *)compat_ptr(tmp2); ++ kmsg->msg_control = (void __force_kernel *)compat_ptr(tmp3); + return 0; + } + +@@ -87,7 +87,7 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov, + + if (kern_msg->msg_name && kern_msg->msg_namelen) { + if (mode == VERIFY_READ) { +- int err = move_addr_to_kernel(kern_msg->msg_name, ++ int err = move_addr_to_kernel((void __force_user *)kern_msg->msg_name, + kern_msg->msg_namelen, + kern_address); + if (err < 0) +@@ -100,7 +100,7 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov, + } + + tot_len = iov_from_user_compat_to_kern(kern_iov, +- (struct compat_iovec __user *)kern_msg->msg_iov, ++ (struct compat_iovec __force_user *)kern_msg->msg_iov, + kern_msg->msg_iovlen); + if (tot_len >= 0) + kern_msg->msg_iov = kern_iov; +@@ -120,20 +120,20 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov, + + #define CMSG_COMPAT_FIRSTHDR(msg) \ + (((msg)->msg_controllen) >= sizeof(struct compat_cmsghdr) ? \ +- (struct compat_cmsghdr __user *)((msg)->msg_control) : \ ++ (struct compat_cmsghdr __force_user *)((msg)->msg_control) : \ + (struct compat_cmsghdr __user *)NULL) + + #define CMSG_COMPAT_OK(ucmlen, ucmsg, mhdr) \ + ((ucmlen) >= sizeof(struct compat_cmsghdr) && \ + (ucmlen) <= (unsigned long) \ + ((mhdr)->msg_controllen - \ +- ((char *)(ucmsg) - (char *)(mhdr)->msg_control))) ++ ((char __force_kernel *)(ucmsg) - (char *)(mhdr)->msg_control))) + + static inline struct compat_cmsghdr __user *cmsg_compat_nxthdr(struct msghdr *msg, + struct compat_cmsghdr __user *cmsg, int cmsg_len) + { + char __user *ptr = (char __user *)cmsg + CMSG_COMPAT_ALIGN(cmsg_len); +- if ((unsigned long)(ptr + 1 - (char __user *)msg->msg_control) > ++ if ((unsigned long)(ptr + 1 - (char __force_user *)msg->msg_control) > + msg->msg_controllen) + return NULL; + return (struct compat_cmsghdr __user *)ptr; +@@ -223,7 +223,7 @@ Efault: + + int put_cmsg_compat(struct msghdr *kmsg, int level, int type, int len, void *data) + { +- struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __user *) kmsg->msg_control; ++ struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __force_user *) kmsg->msg_control; + struct compat_cmsghdr cmhdr; + struct compat_timeval ctv; + struct compat_timespec cts[3]; +@@ -279,7 +279,7 @@ int put_cmsg_compat(struct msghdr *kmsg, int level, int type, int len, void *dat + + void scm_detach_fds_compat(struct msghdr *kmsg, struct scm_cookie *scm) + { +- struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __user *) kmsg->msg_control; ++ struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __force_user *) kmsg->msg_control; + int fdmax = (kmsg->msg_controllen - sizeof(struct compat_cmsghdr)) / sizeof(int); + int fdnum = scm->fp->count; + struct file **fp = scm->fp->fp; +@@ -367,7 +367,7 @@ static int do_set_sock_timeout(struct socket *sock, int level, + return -EFAULT; + old_fs = get_fs(); + set_fs(KERNEL_DS); +- err = sock_setsockopt(sock, level, optname, (char *)&ktime, sizeof(ktime)); ++ err = sock_setsockopt(sock, level, optname, (char __force_user *)&ktime, sizeof(ktime)); + set_fs(old_fs); + + return err; +@@ -428,7 +428,7 @@ static int do_get_sock_timeout(struct socket *sock, int level, int optname, + len = sizeof(ktime); + old_fs = get_fs(); + set_fs(KERNEL_DS); +- err = sock_getsockopt(sock, level, optname, (char *) &ktime, &len); ++ err = sock_getsockopt(sock, level, optname, (char __force_user *) &ktime, (int __force_user *)&len); + set_fs(old_fs); + + if (!err) { +@@ -571,7 +571,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname, + case MCAST_JOIN_GROUP: + case MCAST_LEAVE_GROUP: + { +- struct compat_group_req __user *gr32 = (void *)optval; ++ struct compat_group_req __user *gr32 = (void __user *)optval; + struct group_req __user *kgr = + compat_alloc_user_space(sizeof(struct group_req)); + u32 interface; +@@ -592,7 +592,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname, + case MCAST_BLOCK_SOURCE: + case MCAST_UNBLOCK_SOURCE: + { +- struct compat_group_source_req __user *gsr32 = (void *)optval; ++ struct compat_group_source_req __user *gsr32 = (void __user *)optval; + struct group_source_req __user *kgsr = compat_alloc_user_space( + sizeof(struct group_source_req)); + u32 interface; +@@ -613,7 +613,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname, + } + case MCAST_MSFILTER: + { +- struct compat_group_filter __user *gf32 = (void *)optval; ++ struct compat_group_filter __user *gf32 = (void __user *)optval; + struct group_filter __user *kgf; + u32 interface, fmode, numsrc; + +@@ -651,7 +651,7 @@ int compat_mc_getsockopt(struct sock *sock, int level, int optname, + char __user *optval, int __user *optlen, + int (*getsockopt)(struct sock *, int, int, char __user *, int __user *)) + { +- struct compat_group_filter __user *gf32 = (void *)optval; ++ struct compat_group_filter __user *gf32 = (void __user *)optval; + struct group_filter __user *kgf; + int __user *koptlen; + u32 interface, fmode, numsrc; +@@ -804,7 +804,7 @@ asmlinkage long compat_sys_socketcall(int call, u32 __user *args) + + if (call < SYS_SOCKET || call > SYS_SENDMMSG) + return -EINVAL; +- if (copy_from_user(a, args, nas[call])) ++ if (nas[call] > sizeof a || copy_from_user(a, args, nas[call])) + return -EFAULT; + a0 = a[0]; + a1 = a[1]; +diff --git a/net/core/datagram.c b/net/core/datagram.c +index a16ed7b..eb44d17 100644 +--- a/net/core/datagram.c ++++ b/net/core/datagram.c +@@ -301,7 +301,7 @@ int skb_kill_datagram(struct sock *sk, struct sk_buff *skb, unsigned int flags) + } + + kfree_skb(skb); +- atomic_inc(&sk->sk_drops); ++ atomic_inc_unchecked(&sk->sk_drops); + sk_mem_reclaim_partial(sk); + + return err; +diff --git a/net/core/dev.c b/net/core/dev.c +index 37bddf7..c78c480 100644 +--- a/net/core/dev.c ++++ b/net/core/dev.c +@@ -1695,14 +1695,14 @@ int dev_forward_skb(struct net_device *dev, struct sk_buff *skb) + { + if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) { + if (skb_copy_ubufs(skb, GFP_ATOMIC)) { +- atomic_long_inc(&dev->rx_dropped); ++ atomic_long_inc_unchecked(&dev->rx_dropped); + kfree_skb(skb); + return NET_RX_DROP; + } + } + + if (unlikely(!is_skb_forwardable(dev, skb))) { +- atomic_long_inc(&dev->rx_dropped); ++ atomic_long_inc_unchecked(&dev->rx_dropped); + kfree_skb(skb); + return NET_RX_DROP; + } +@@ -2460,7 +2460,7 @@ static int illegal_highdma(const struct net_device *dev, struct sk_buff *skb) + + struct dev_gso_cb { + void (*destructor)(struct sk_buff *skb); +-}; ++} __no_const; + + #define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb) + +@@ -3234,7 +3234,7 @@ enqueue: + + local_irq_restore(flags); + +- atomic_long_inc(&skb->dev->rx_dropped); ++ atomic_long_inc_unchecked(&skb->dev->rx_dropped); + kfree_skb(skb); + return NET_RX_DROP; + } +@@ -3315,7 +3315,7 @@ int netif_rx_ni(struct sk_buff *skb) + } + EXPORT_SYMBOL(netif_rx_ni); + +-static void net_tx_action(struct softirq_action *h) ++static __latent_entropy void net_tx_action(void) + { + struct softnet_data *sd = &__get_cpu_var(softnet_data); + +@@ -3652,7 +3652,7 @@ ncls: + ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev); + } else { + drop: +- atomic_long_inc(&skb->dev->rx_dropped); ++ atomic_long_inc_unchecked(&skb->dev->rx_dropped); + kfree_skb(skb); + /* Jamal, now you will not able to escape explaining + * me how you were going to use this. :-) +@@ -4342,7 +4342,7 @@ void netif_napi_del(struct napi_struct *napi) + } + EXPORT_SYMBOL(netif_napi_del); + +-static void net_rx_action(struct softirq_action *h) ++static __latent_entropy void net_rx_action(void) + { + struct softnet_data *sd = &__get_cpu_var(softnet_data); + unsigned long time_limit = jiffies + 2; +@@ -6311,7 +6311,7 @@ struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev, + } else { + netdev_stats_to_stats64(storage, &dev->stats); + } +- storage->rx_dropped += atomic_long_read(&dev->rx_dropped); ++ storage->rx_dropped += atomic_long_read_unchecked(&dev->rx_dropped); + return storage; + } + EXPORT_SYMBOL(dev_get_stats); +diff --git a/net/core/dev_ioctl.c b/net/core/dev_ioctl.c +index cf999e0..c59a975 100644 +--- a/net/core/dev_ioctl.c ++++ b/net/core/dev_ioctl.c +@@ -366,9 +366,13 @@ void dev_load(struct net *net, const char *name) + if (no_module && capable(CAP_NET_ADMIN)) + no_module = request_module("netdev-%s", name); + if (no_module && capable(CAP_SYS_MODULE)) { ++#ifdef CONFIG_GRKERNSEC_MODHARDEN ++ ___request_module(true, "grsec_modharden_netdev", "%s", name); ++#else + if (!request_module("%s", name)) + pr_warn("Loading kernel module for a network device with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%s instead.\n", + name); ++#endif + } + } + EXPORT_SYMBOL(dev_load); +diff --git a/net/core/filter.c b/net/core/filter.c +index ebce437..9fed9d0 100644 +--- a/net/core/filter.c ++++ b/net/core/filter.c +@@ -126,7 +126,7 @@ unsigned int sk_run_filter(const struct sk_buff *skb, + void *ptr; + u32 A = 0; /* Accumulator */ + u32 X = 0; /* Index Register */ +- u32 mem[BPF_MEMWORDS]; /* Scratch Memory Store */ ++ u32 mem[BPF_MEMWORDS] = {}; /* Scratch Memory Store */ + u32 tmp; + int k; + +@@ -292,10 +292,10 @@ load_b: + X = K; + continue; + case BPF_S_LD_MEM: +- A = mem[K]; ++ A = mem[K&15]; + continue; + case BPF_S_LDX_MEM: +- X = mem[K]; ++ X = mem[K&15]; + continue; + case BPF_S_MISC_TAX: + X = A; +@@ -308,10 +308,10 @@ load_b: + case BPF_S_RET_A: + return A; + case BPF_S_ST: +- mem[K] = A; ++ mem[K&15] = A; + continue; + case BPF_S_STX: +- mem[K] = X; ++ mem[K&15] = X; + continue; + case BPF_S_ANC_PROTOCOL: + A = ntohs(skb->protocol); +@@ -395,9 +395,10 @@ load_b: + continue; + #endif + default: +- WARN_RATELIMIT(1, "Unknown code:%u jt:%u tf:%u k:%u\n", ++ WARN(1, KERN_ALERT "Unknown sock filter code:%u jt:%u tf:%u k:%u\n", + fentry->code, fentry->jt, + fentry->jf, fentry->k); ++ BUG(); + return 0; + } + } +@@ -420,7 +421,7 @@ static int check_load_and_stores(struct sock_filter *filter, int flen) + u16 *masks, memvalid = 0; /* one bit per cell, 16 cells */ + int pc, ret = 0; + +- BUILD_BUG_ON(BPF_MEMWORDS > 16); ++ BUILD_BUG_ON(BPF_MEMWORDS != 16); + masks = kmalloc(flen * sizeof(*masks), GFP_KERNEL); + if (!masks) + return -ENOMEM; +@@ -683,7 +684,7 @@ int sk_unattached_filter_create(struct sk_filter **pfp, + fp = kmalloc(sk_filter_size(fprog->len), GFP_KERNEL); + if (!fp) + return -ENOMEM; +- memcpy(fp->insns, fprog->filter, fsize); ++ memcpy(fp->insns, (void __force_kernel *)fprog->filter, fsize); + + atomic_set(&fp->refcnt, 1); + fp->len = fprog->len; +diff --git a/net/core/flow.c b/net/core/flow.c +index dfa602c..3103d88 100644 +--- a/net/core/flow.c ++++ b/net/core/flow.c +@@ -61,7 +61,7 @@ struct flow_cache { + struct timer_list rnd_timer; + }; + +-atomic_t flow_cache_genid = ATOMIC_INIT(0); ++atomic_unchecked_t flow_cache_genid = ATOMIC_INIT(0); + EXPORT_SYMBOL(flow_cache_genid); + static struct flow_cache flow_cache_global; + static struct kmem_cache *flow_cachep __read_mostly; +@@ -86,7 +86,7 @@ static void flow_cache_new_hashrnd(unsigned long arg) + + static int flow_entry_valid(struct flow_cache_entry *fle) + { +- if (atomic_read(&flow_cache_genid) != fle->genid) ++ if (atomic_read_unchecked(&flow_cache_genid) != fle->genid) + return 0; + if (fle->object && !fle->object->ops->check(fle->object)) + return 0; +@@ -258,7 +258,7 @@ flow_cache_lookup(struct net *net, const struct flowi *key, u16 family, u8 dir, + hlist_add_head(&fle->u.hlist, &fcp->hash_table[hash]); + fcp->hash_count++; + } +- } else if (likely(fle->genid == atomic_read(&flow_cache_genid))) { ++ } else if (likely(fle->genid == atomic_read_unchecked(&flow_cache_genid))) { + flo = fle->object; + if (!flo) + goto ret_object; +@@ -279,7 +279,7 @@ nocache: + } + flo = resolver(net, key, family, dir, flo, ctx); + if (fle) { +- fle->genid = atomic_read(&flow_cache_genid); ++ fle->genid = atomic_read_unchecked(&flow_cache_genid); + if (!IS_ERR(flo)) + fle->object = flo; + else +diff --git a/net/core/iovec.c b/net/core/iovec.c +index 26dc006..89e838e 100644 +--- a/net/core/iovec.c ++++ b/net/core/iovec.c +@@ -42,7 +42,7 @@ int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr_storage *a + if (m->msg_name && m->msg_namelen) { + if (mode == VERIFY_READ) { + void __user *namep; +- namep = (void __user __force *) m->msg_name; ++ namep = (void __force_user *) m->msg_name; + err = move_addr_to_kernel(namep, m->msg_namelen, + address); + if (err < 0) +@@ -55,7 +55,7 @@ int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr_storage *a + } + + size = m->msg_iovlen * sizeof(struct iovec); +- if (copy_from_user(iov, (void __user __force *) m->msg_iov, size)) ++ if (copy_from_user(iov, (void __force_user *) m->msg_iov, size)) + return -EFAULT; + + m->msg_iov = iov; +diff --git a/net/core/neighbour.c b/net/core/neighbour.c +index 7d95f69..a6065de 100644 +--- a/net/core/neighbour.c ++++ b/net/core/neighbour.c +@@ -2824,7 +2824,7 @@ static int proc_unres_qlen(struct ctl_table *ctl, int write, + void __user *buffer, size_t *lenp, loff_t *ppos) + { + int size, ret; +- struct ctl_table tmp = *ctl; ++ ctl_table_no_const tmp = *ctl; + + tmp.extra1 = &zero; + tmp.extra2 = &unres_qlen_max; +@@ -2886,7 +2886,7 @@ static int neigh_proc_dointvec_zero_intmax(struct ctl_table *ctl, int write, + void __user *buffer, + size_t *lenp, loff_t *ppos) + { +- struct ctl_table tmp = *ctl; ++ ctl_table_no_const tmp = *ctl; + int ret; + + tmp.extra1 = &zero; +@@ -3058,11 +3058,12 @@ int neigh_sysctl_register(struct net_device *dev, struct neigh_parms *p, + memset(&t->neigh_vars[NEIGH_VAR_GC_INTERVAL], 0, + sizeof(t->neigh_vars[NEIGH_VAR_GC_INTERVAL])); + } else { ++ struct neigh_table *ntable = container_of(p, struct neigh_table, parms); + dev_name_source = "default"; +- t->neigh_vars[NEIGH_VAR_GC_INTERVAL].data = (int *)(p + 1); +- t->neigh_vars[NEIGH_VAR_GC_THRESH1].data = (int *)(p + 1) + 1; +- t->neigh_vars[NEIGH_VAR_GC_THRESH2].data = (int *)(p + 1) + 2; +- t->neigh_vars[NEIGH_VAR_GC_THRESH3].data = (int *)(p + 1) + 3; ++ t->neigh_vars[NEIGH_VAR_GC_INTERVAL].data = &ntable->gc_interval; ++ t->neigh_vars[NEIGH_VAR_GC_THRESH1].data = &ntable->gc_thresh1; ++ t->neigh_vars[NEIGH_VAR_GC_THRESH2].data = &ntable->gc_thresh2; ++ t->neigh_vars[NEIGH_VAR_GC_THRESH3].data = &ntable->gc_thresh3; + } + + if (handler) { +diff --git a/net/core/net-procfs.c b/net/core/net-procfs.c +index 2bf8329..2eb1423 100644 +--- a/net/core/net-procfs.c ++++ b/net/core/net-procfs.c +@@ -79,7 +79,13 @@ static void dev_seq_printf_stats(struct seq_file *seq, struct net_device *dev) + struct rtnl_link_stats64 temp; + const struct rtnl_link_stats64 *stats = dev_get_stats(dev, &temp); + +- seq_printf(seq, "%6s: %7llu %7llu %4llu %4llu %4llu %5llu %10llu %9llu " ++ if (gr_proc_is_restricted()) ++ seq_printf(seq, "%6s: %7llu %7llu %4llu %4llu %4llu %5llu %10llu %9llu " ++ "%8llu %7llu %4llu %4llu %4llu %5llu %7llu %10llu\n", ++ dev->name, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, ++ 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL); ++ else ++ seq_printf(seq, "%6s: %7llu %7llu %4llu %4llu %4llu %5llu %10llu %9llu " + "%8llu %7llu %4llu %4llu %4llu %5llu %7llu %10llu\n", + dev->name, stats->rx_bytes, stats->rx_packets, + stats->rx_errors, +@@ -166,7 +172,7 @@ static int softnet_seq_show(struct seq_file *seq, void *v) + return 0; + } + +-static const struct seq_operations dev_seq_ops = { ++const struct seq_operations dev_seq_ops = { + .start = dev_seq_start, + .next = dev_seq_next, + .stop = dev_seq_stop, +@@ -196,7 +202,7 @@ static const struct seq_operations softnet_seq_ops = { + + static int softnet_seq_open(struct inode *inode, struct file *file) + { +- return seq_open(file, &softnet_seq_ops); ++ return seq_open_restrict(file, &softnet_seq_ops); + } + + static const struct file_operations softnet_seq_fops = { +@@ -283,8 +289,13 @@ static int ptype_seq_show(struct seq_file *seq, void *v) + else + seq_printf(seq, "%04x", ntohs(pt->type)); + ++#ifdef CONFIG_GRKERNSEC_HIDESYM ++ seq_printf(seq, " %-8s %pf\n", ++ pt->dev ? pt->dev->name : "", NULL); ++#else + seq_printf(seq, " %-8s %pf\n", + pt->dev ? pt->dev->name : "", pt->func); ++#endif + } + + return 0; +diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c +index 7c8ffd9..0cb3687 100644 +--- a/net/core/net_namespace.c ++++ b/net/core/net_namespace.c +@@ -443,7 +443,7 @@ static int __register_pernet_operations(struct list_head *list, + int error; + LIST_HEAD(net_exit_list); + +- list_add_tail(&ops->list, list); ++ pax_list_add_tail((struct list_head *)&ops->list, list); + if (ops->init || (ops->id && ops->size)) { + for_each_net(net) { + error = ops_init(ops, net); +@@ -456,7 +456,7 @@ static int __register_pernet_operations(struct list_head *list, + + out_undo: + /* If I have an error cleanup all namespaces I initialized */ +- list_del(&ops->list); ++ pax_list_del((struct list_head *)&ops->list); + ops_exit_list(ops, &net_exit_list); + ops_free_list(ops, &net_exit_list); + return error; +@@ -467,7 +467,7 @@ static void __unregister_pernet_operations(struct pernet_operations *ops) + struct net *net; + LIST_HEAD(net_exit_list); + +- list_del(&ops->list); ++ pax_list_del((struct list_head *)&ops->list); + for_each_net(net) + list_add_tail(&net->exit_list, &net_exit_list); + ops_exit_list(ops, &net_exit_list); +@@ -601,7 +601,7 @@ int register_pernet_device(struct pernet_operations *ops) + mutex_lock(&net_mutex); + error = register_pernet_operations(&pernet_list, ops); + if (!error && (first_device == &pernet_list)) +- first_device = &ops->list; ++ first_device = (struct list_head *)&ops->list; + mutex_unlock(&net_mutex); + return error; + } +diff --git a/net/core/netpoll.c b/net/core/netpoll.c +index df9e6b1..6e68e4e 100644 +--- a/net/core/netpoll.c ++++ b/net/core/netpoll.c +@@ -435,7 +435,7 @@ void netpoll_send_udp(struct netpoll *np, const char *msg, int len) + struct udphdr *udph; + struct iphdr *iph; + struct ethhdr *eth; +- static atomic_t ip_ident; ++ static atomic_unchecked_t ip_ident; + struct ipv6hdr *ip6h; + + udp_len = len + sizeof(*udph); +@@ -506,7 +506,7 @@ void netpoll_send_udp(struct netpoll *np, const char *msg, int len) + put_unaligned(0x45, (unsigned char *)iph); + iph->tos = 0; + put_unaligned(htons(ip_len), &(iph->tot_len)); +- iph->id = htons(atomic_inc_return(&ip_ident)); ++ iph->id = htons(atomic_inc_return_unchecked(&ip_ident)); + iph->frag_off = 0; + iph->ttl = 64; + iph->protocol = IPPROTO_UDP; +diff --git a/net/core/pktgen.c b/net/core/pktgen.c +index fdac61c..e5e5b46 100644 +--- a/net/core/pktgen.c ++++ b/net/core/pktgen.c +@@ -3719,7 +3719,7 @@ static int __net_init pg_net_init(struct net *net) + pn->net = net; + INIT_LIST_HEAD(&pn->pktgen_threads); + pn->pktgen_exiting = false; +- pn->proc_dir = proc_mkdir(PG_PROC_DIR, pn->net->proc_net); ++ pn->proc_dir = proc_mkdir_restrict(PG_PROC_DIR, pn->net->proc_net); + if (!pn->proc_dir) { + pr_warn("cannot create /proc/net/%s\n", PG_PROC_DIR); + return -ENODEV; +diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c +index aef1500..4b61acd 100644 +--- a/net/core/rtnetlink.c ++++ b/net/core/rtnetlink.c +@@ -58,7 +58,7 @@ struct rtnl_link { + rtnl_doit_func doit; + rtnl_dumpit_func dumpit; + rtnl_calcit_func calcit; +-}; ++} __no_const; + + static DEFINE_MUTEX(rtnl_mutex); + +@@ -299,10 +299,13 @@ int __rtnl_link_register(struct rtnl_link_ops *ops) + if (rtnl_link_ops_get(ops->kind)) + return -EEXIST; + +- if (!ops->dellink) +- ops->dellink = unregister_netdevice_queue; ++ if (!ops->dellink) { ++ pax_open_kernel(); ++ *(void **)&ops->dellink = unregister_netdevice_queue; ++ pax_close_kernel(); ++ } + +- list_add_tail(&ops->list, &link_ops); ++ pax_list_add_tail((struct list_head *)&ops->list, &link_ops); + return 0; + } + EXPORT_SYMBOL_GPL(__rtnl_link_register); +@@ -349,7 +352,7 @@ void __rtnl_link_unregister(struct rtnl_link_ops *ops) + for_each_net(net) { + __rtnl_kill_links(net, ops); + } +- list_del(&ops->list); ++ pax_list_del((struct list_head *)&ops->list); + } + EXPORT_SYMBOL_GPL(__rtnl_link_unregister); + +diff --git a/net/core/scm.c b/net/core/scm.c +index b442e7e..6f5b5a2 100644 +--- a/net/core/scm.c ++++ b/net/core/scm.c +@@ -210,7 +210,7 @@ EXPORT_SYMBOL(__scm_send); + int put_cmsg(struct msghdr * msg, int level, int type, int len, void *data) + { + struct cmsghdr __user *cm +- = (__force struct cmsghdr __user *)msg->msg_control; ++ = (struct cmsghdr __force_user *)msg->msg_control; + struct cmsghdr cmhdr; + int cmlen = CMSG_LEN(len); + int err; +@@ -233,7 +233,7 @@ int put_cmsg(struct msghdr * msg, int level, int type, int len, void *data) + err = -EFAULT; + if (copy_to_user(cm, &cmhdr, sizeof cmhdr)) + goto out; +- if (copy_to_user(CMSG_DATA(cm), data, cmlen - sizeof(struct cmsghdr))) ++ if (copy_to_user((void __force_user *)CMSG_DATA((void __force_kernel *)cm), data, cmlen - sizeof(struct cmsghdr))) + goto out; + cmlen = CMSG_SPACE(len); + if (msg->msg_controllen < cmlen) +@@ -249,7 +249,7 @@ EXPORT_SYMBOL(put_cmsg); + void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm) + { + struct cmsghdr __user *cm +- = (__force struct cmsghdr __user*)msg->msg_control; ++ = (struct cmsghdr __force_user *)msg->msg_control; + + int fdmax = 0; + int fdnum = scm->fp->count; +@@ -269,7 +269,7 @@ void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm) + if (fdnum < fdmax) + fdmax = fdnum; + +- for (i=0, cmfptr=(__force int __user *)CMSG_DATA(cm); i<fdmax; ++ for (i=0, cmfptr=(int __force_user *)CMSG_DATA((void __force_kernel *)cm); i<fdmax; + i++, cmfptr++) + { + struct socket *sock; +diff --git a/net/core/skbuff.c b/net/core/skbuff.c +index 8f6391b..40bc442 100644 +--- a/net/core/skbuff.c ++++ b/net/core/skbuff.c +@@ -2003,7 +2003,7 @@ EXPORT_SYMBOL(__skb_checksum); + __wsum skb_checksum(const struct sk_buff *skb, int offset, + int len, __wsum csum) + { +- const struct skb_checksum_ops ops = { ++ static const struct skb_checksum_ops ops = { + .update = csum_partial_ext, + .combine = csum_block_add_ext, + }; +@@ -3221,13 +3221,15 @@ void __init skb_init(void) + skbuff_head_cache = kmem_cache_create("skbuff_head_cache", + sizeof(struct sk_buff), + 0, +- SLAB_HWCACHE_ALIGN|SLAB_PANIC, ++ SLAB_HWCACHE_ALIGN|SLAB_PANIC| ++ SLAB_NO_SANITIZE, + NULL); + skbuff_fclone_cache = kmem_cache_create("skbuff_fclone_cache", + (2*sizeof(struct sk_buff)) + + sizeof(atomic_t), + 0, +- SLAB_HWCACHE_ALIGN|SLAB_PANIC, ++ SLAB_HWCACHE_ALIGN|SLAB_PANIC| ++ SLAB_NO_SANITIZE, + NULL); + } + +diff --git a/net/core/sock.c b/net/core/sock.c +index c806956..e5599ea 100644 +--- a/net/core/sock.c ++++ b/net/core/sock.c +@@ -442,7 +442,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) + struct sk_buff_head *list = &sk->sk_receive_queue; + + if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) { +- atomic_inc(&sk->sk_drops); ++ atomic_inc_unchecked(&sk->sk_drops); + trace_sock_rcvqueue_full(sk, skb); + return -ENOMEM; + } +@@ -452,7 +452,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) + return err; + + if (!sk_rmem_schedule(sk, skb, skb->truesize)) { +- atomic_inc(&sk->sk_drops); ++ atomic_inc_unchecked(&sk->sk_drops); + return -ENOBUFS; + } + +@@ -472,7 +472,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) + skb_dst_force(skb); + + spin_lock_irqsave(&list->lock, flags); +- skb->dropcount = atomic_read(&sk->sk_drops); ++ skb->dropcount = atomic_read_unchecked(&sk->sk_drops); + __skb_queue_tail(list, skb); + spin_unlock_irqrestore(&list->lock, flags); + +@@ -492,7 +492,7 @@ int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested) + skb->dev = NULL; + + if (sk_rcvqueues_full(sk, skb, sk->sk_rcvbuf)) { +- atomic_inc(&sk->sk_drops); ++ atomic_inc_unchecked(&sk->sk_drops); + goto discard_and_relse; + } + if (nested) +@@ -510,7 +510,7 @@ int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested) + mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_); + } else if (sk_add_backlog(sk, skb, sk->sk_rcvbuf)) { + bh_unlock_sock(sk); +- atomic_inc(&sk->sk_drops); ++ atomic_inc_unchecked(&sk->sk_drops); + goto discard_and_relse; + } + +@@ -998,12 +998,12 @@ int sock_getsockopt(struct socket *sock, int level, int optname, + struct timeval tm; + } v; + +- int lv = sizeof(int); +- int len; ++ unsigned int lv = sizeof(int); ++ unsigned int len; + + if (get_user(len, optlen)) + return -EFAULT; +- if (len < 0) ++ if (len > INT_MAX) + return -EINVAL; + + memset(&v, 0, sizeof(v)); +@@ -1155,11 +1155,11 @@ int sock_getsockopt(struct socket *sock, int level, int optname, + + case SO_PEERNAME: + { +- char address[128]; ++ char address[_K_SS_MAXSIZE]; + + if (sock->ops->getname(sock, (struct sockaddr *)address, &lv, 2)) + return -ENOTCONN; +- if (lv < len) ++ if (lv < len || sizeof address < len) + return -EINVAL; + if (copy_to_user(optval, address, len)) + return -EFAULT; +@@ -1240,7 +1240,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname, + + if (len > lv) + len = lv; +- if (copy_to_user(optval, &v, len)) ++ if (len > sizeof(v) || copy_to_user(optval, &v, len)) + return -EFAULT; + lenout: + if (put_user(len, optlen)) +@@ -2375,7 +2375,7 @@ void sock_init_data(struct socket *sock, struct sock *sk) + */ + smp_wmb(); + atomic_set(&sk->sk_refcnt, 1); +- atomic_set(&sk->sk_drops, 0); ++ atomic_set_unchecked(&sk->sk_drops, 0); + } + EXPORT_SYMBOL(sock_init_data); + +@@ -2503,6 +2503,7 @@ void sock_enable_timestamp(struct sock *sk, int flag) + int sock_recv_errqueue(struct sock *sk, struct msghdr *msg, int len, + int level, int type) + { ++ struct sock_extended_err ee; + struct sock_exterr_skb *serr; + struct sk_buff *skb, *skb2; + int copied, err; +@@ -2524,7 +2525,8 @@ int sock_recv_errqueue(struct sock *sk, struct msghdr *msg, int len, + sock_recv_timestamp(msg, sk, skb); + + serr = SKB_EXT_ERR(skb); +- put_cmsg(msg, level, type, sizeof(serr->ee), &serr->ee); ++ ee = serr->ee; ++ put_cmsg(msg, level, type, sizeof ee, &ee); + + msg->msg_flags |= MSG_ERRQUEUE; + err = copied; +diff --git a/net/core/sock_diag.c b/net/core/sock_diag.c +index c38e7a2..773e3d7 100644 +--- a/net/core/sock_diag.c ++++ b/net/core/sock_diag.c +@@ -9,26 +9,33 @@ + #include <linux/inet_diag.h> + #include <linux/sock_diag.h> + +-static const struct sock_diag_handler *sock_diag_handlers[AF_MAX]; ++static const struct sock_diag_handler *sock_diag_handlers[AF_MAX] __read_only; + static int (*inet_rcv_compat)(struct sk_buff *skb, struct nlmsghdr *nlh); + static DEFINE_MUTEX(sock_diag_table_mutex); + + int sock_diag_check_cookie(void *sk, __u32 *cookie) + { ++#ifndef CONFIG_GRKERNSEC_HIDESYM + if ((cookie[0] != INET_DIAG_NOCOOKIE || + cookie[1] != INET_DIAG_NOCOOKIE) && + ((u32)(unsigned long)sk != cookie[0] || + (u32)((((unsigned long)sk) >> 31) >> 1) != cookie[1])) + return -ESTALE; + else ++#endif + return 0; + } + EXPORT_SYMBOL_GPL(sock_diag_check_cookie); + + void sock_diag_save_cookie(void *sk, __u32 *cookie) + { ++#ifdef CONFIG_GRKERNSEC_HIDESYM ++ cookie[0] = 0; ++ cookie[1] = 0; ++#else + cookie[0] = (u32)(unsigned long)sk; + cookie[1] = (u32)(((unsigned long)sk >> 31) >> 1); ++#endif + } + EXPORT_SYMBOL_GPL(sock_diag_save_cookie); + +@@ -113,8 +120,11 @@ int sock_diag_register(const struct sock_diag_handler *hndl) + mutex_lock(&sock_diag_table_mutex); + if (sock_diag_handlers[hndl->family]) + err = -EBUSY; +- else ++ else { ++ pax_open_kernel(); + sock_diag_handlers[hndl->family] = hndl; ++ pax_close_kernel(); ++ } + mutex_unlock(&sock_diag_table_mutex); + + return err; +@@ -130,7 +140,9 @@ void sock_diag_unregister(const struct sock_diag_handler *hnld) + + mutex_lock(&sock_diag_table_mutex); + BUG_ON(sock_diag_handlers[family] != hnld); ++ pax_open_kernel(); + sock_diag_handlers[family] = NULL; ++ pax_close_kernel(); + mutex_unlock(&sock_diag_table_mutex); + } + EXPORT_SYMBOL_GPL(sock_diag_unregister); +diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c +index cf9cd13..8b56af3 100644 +--- a/net/core/sysctl_net_core.c ++++ b/net/core/sysctl_net_core.c +@@ -32,7 +32,7 @@ static int rps_sock_flow_sysctl(struct ctl_table *table, int write, + { + unsigned int orig_size, size; + int ret, i; +- struct ctl_table tmp = { ++ ctl_table_no_const tmp = { + .data = &size, + .maxlen = sizeof(size), + .mode = table->mode +@@ -200,7 +200,7 @@ static int set_default_qdisc(struct ctl_table *table, int write, + void __user *buffer, size_t *lenp, loff_t *ppos) + { + char id[IFNAMSIZ]; +- struct ctl_table tbl = { ++ ctl_table_no_const tbl = { + .data = id, + .maxlen = IFNAMSIZ, + }; +@@ -379,13 +379,12 @@ static struct ctl_table netns_core_table[] = { + + static __net_init int sysctl_core_net_init(struct net *net) + { +- struct ctl_table *tbl; ++ ctl_table_no_const *tbl = NULL; + + net->core.sysctl_somaxconn = SOMAXCONN; + +- tbl = netns_core_table; + if (!net_eq(net, &init_net)) { +- tbl = kmemdup(tbl, sizeof(netns_core_table), GFP_KERNEL); ++ tbl = kmemdup(netns_core_table, sizeof(netns_core_table), GFP_KERNEL); + if (tbl == NULL) + goto err_dup; + +@@ -395,17 +394,16 @@ static __net_init int sysctl_core_net_init(struct net *net) + if (net->user_ns != &init_user_ns) { + tbl[0].procname = NULL; + } +- } +- +- net->core.sysctl_hdr = register_net_sysctl(net, "net/core", tbl); ++ net->core.sysctl_hdr = register_net_sysctl(net, "net/core", tbl); ++ } else ++ net->core.sysctl_hdr = register_net_sysctl(net, "net/core", netns_core_table); + if (net->core.sysctl_hdr == NULL) + goto err_reg; + + return 0; + + err_reg: +- if (tbl != netns_core_table) +- kfree(tbl); ++ kfree(tbl); + err_dup: + return -ENOMEM; + } +@@ -420,7 +418,7 @@ static __net_exit void sysctl_core_net_exit(struct net *net) + kfree(tbl); + } + +-static __net_initdata struct pernet_operations sysctl_core_ops = { ++static __net_initconst struct pernet_operations sysctl_core_ops = { + .init = sysctl_core_net_init, + .exit = sysctl_core_net_exit, + }; +diff --git a/net/decnet/af_decnet.c b/net/decnet/af_decnet.c +index 4c04848..f575934 100644 +--- a/net/decnet/af_decnet.c ++++ b/net/decnet/af_decnet.c +@@ -465,6 +465,7 @@ static struct proto dn_proto = { + .sysctl_rmem = sysctl_decnet_rmem, + .max_header = DN_MAX_NSP_DATA_HEADER + 64, + .obj_size = sizeof(struct dn_sock), ++ .slab_flags = SLAB_USERCOPY, + }; + + static struct sock *dn_alloc_sock(struct net *net, struct socket *sock, gfp_t gfp) +diff --git a/net/decnet/dn_dev.c b/net/decnet/dn_dev.c +index 3b726f3..1af6368 100644 +--- a/net/decnet/dn_dev.c ++++ b/net/decnet/dn_dev.c +@@ -200,7 +200,7 @@ static struct dn_dev_sysctl_table { + .extra1 = &min_t3, + .extra2 = &max_t3 + }, +- {0} ++ { } + }, + }; + +diff --git a/net/decnet/sysctl_net_decnet.c b/net/decnet/sysctl_net_decnet.c +index 5325b54..a0d4d69 100644 +--- a/net/decnet/sysctl_net_decnet.c ++++ b/net/decnet/sysctl_net_decnet.c +@@ -174,7 +174,7 @@ static int dn_node_address_handler(struct ctl_table *table, int write, + + if (len > *lenp) len = *lenp; + +- if (copy_to_user(buffer, addr, len)) ++ if (len > sizeof addr || copy_to_user(buffer, addr, len)) + return -EFAULT; + + *lenp = len; +@@ -237,7 +237,7 @@ static int dn_def_dev_handler(struct ctl_table *table, int write, + + if (len > *lenp) len = *lenp; + +- if (copy_to_user(buffer, devname, len)) ++ if (len > sizeof devname || copy_to_user(buffer, devname, len)) + return -EFAULT; + + *lenp = len; +diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c +index 07bd8ed..c574801 100644 +--- a/net/ipv4/af_inet.c ++++ b/net/ipv4/af_inet.c +@@ -1706,13 +1706,9 @@ static int __init inet_init(void) + + BUILD_BUG_ON(sizeof(struct inet_skb_parm) > FIELD_SIZEOF(struct sk_buff, cb)); + +- sysctl_local_reserved_ports = kzalloc(65536 / 8, GFP_KERNEL); +- if (!sysctl_local_reserved_ports) +- goto out; +- + rc = proto_register(&tcp_prot, 1); + if (rc) +- goto out_free_reserved_ports; ++ goto out; + + rc = proto_register(&udp_prot, 1); + if (rc) +@@ -1819,8 +1815,6 @@ out_unregister_udp_proto: + proto_unregister(&udp_prot); + out_unregister_tcp_proto: + proto_unregister(&tcp_prot); +-out_free_reserved_ports: +- kfree(sysctl_local_reserved_ports); + goto out; + } + +diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c +index bdbf68b..deb4759 100644 +--- a/net/ipv4/devinet.c ++++ b/net/ipv4/devinet.c +@@ -1543,7 +1543,7 @@ static int inet_dump_ifaddr(struct sk_buff *skb, struct netlink_callback *cb) + idx = 0; + head = &net->dev_index_head[h]; + rcu_read_lock(); +- cb->seq = atomic_read(&net->ipv4.dev_addr_genid) ^ ++ cb->seq = atomic_read_unchecked(&net->ipv4.dev_addr_genid) ^ + net->dev_base_seq; + hlist_for_each_entry_rcu(dev, head, index_hlist) { + if (idx < s_idx) +@@ -1861,7 +1861,7 @@ static int inet_netconf_dump_devconf(struct sk_buff *skb, + idx = 0; + head = &net->dev_index_head[h]; + rcu_read_lock(); +- cb->seq = atomic_read(&net->ipv4.dev_addr_genid) ^ ++ cb->seq = atomic_read_unchecked(&net->ipv4.dev_addr_genid) ^ + net->dev_base_seq; + hlist_for_each_entry_rcu(dev, head, index_hlist) { + if (idx < s_idx) +@@ -2096,7 +2096,7 @@ static int ipv4_doint_and_flush(struct ctl_table *ctl, int write, + #define DEVINET_SYSCTL_FLUSHING_ENTRY(attr, name) \ + DEVINET_SYSCTL_COMPLEX_ENTRY(attr, name, ipv4_doint_and_flush) + +-static struct devinet_sysctl_table { ++static const struct devinet_sysctl_table { + struct ctl_table_header *sysctl_header; + struct ctl_table devinet_vars[__IPV4_DEVCONF_MAX]; + } devinet_sysctl = { +@@ -2218,7 +2218,7 @@ static __net_init int devinet_init_net(struct net *net) + int err; + struct ipv4_devconf *all, *dflt; + #ifdef CONFIG_SYSCTL +- struct ctl_table *tbl = ctl_forward_entry; ++ ctl_table_no_const *tbl = NULL; + struct ctl_table_header *forw_hdr; + #endif + +@@ -2236,7 +2236,7 @@ static __net_init int devinet_init_net(struct net *net) + goto err_alloc_dflt; + + #ifdef CONFIG_SYSCTL +- tbl = kmemdup(tbl, sizeof(ctl_forward_entry), GFP_KERNEL); ++ tbl = kmemdup(ctl_forward_entry, sizeof(ctl_forward_entry), GFP_KERNEL); + if (tbl == NULL) + goto err_alloc_ctl; + +@@ -2256,7 +2256,10 @@ static __net_init int devinet_init_net(struct net *net) + goto err_reg_dflt; + + err = -ENOMEM; +- forw_hdr = register_net_sysctl(net, "net/ipv4", tbl); ++ if (!net_eq(net, &init_net)) ++ forw_hdr = register_net_sysctl(net, "net/ipv4", tbl); ++ else ++ forw_hdr = register_net_sysctl(net, "net/ipv4", ctl_forward_entry); + if (forw_hdr == NULL) + goto err_reg_ctl; + net->ipv4.forw_hdr = forw_hdr; +@@ -2272,8 +2275,7 @@ err_reg_ctl: + err_reg_dflt: + __devinet_sysctl_unregister(all); + err_reg_all: +- if (tbl != ctl_forward_entry) +- kfree(tbl); ++ kfree(tbl); + err_alloc_ctl: + #endif + if (dflt != &ipv4_devconf_dflt) +diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c +index c7539e2..b455e51 100644 +--- a/net/ipv4/fib_frontend.c ++++ b/net/ipv4/fib_frontend.c +@@ -1015,12 +1015,12 @@ static int fib_inetaddr_event(struct notifier_block *this, unsigned long event, + #ifdef CONFIG_IP_ROUTE_MULTIPATH + fib_sync_up(dev); + #endif +- atomic_inc(&net->ipv4.dev_addr_genid); ++ atomic_inc_unchecked(&net->ipv4.dev_addr_genid); + rt_cache_flush(dev_net(dev)); + break; + case NETDEV_DOWN: + fib_del_ifaddr(ifa, NULL); +- atomic_inc(&net->ipv4.dev_addr_genid); ++ atomic_inc_unchecked(&net->ipv4.dev_addr_genid); + if (ifa->ifa_dev->ifa_list == NULL) { + /* Last address was deleted from this interface. + * Disable IP. +@@ -1058,7 +1058,7 @@ static int fib_netdev_event(struct notifier_block *this, unsigned long event, vo + #ifdef CONFIG_IP_ROUTE_MULTIPATH + fib_sync_up(dev); + #endif +- atomic_inc(&net->ipv4.dev_addr_genid); ++ atomic_inc_unchecked(&net->ipv4.dev_addr_genid); + rt_cache_flush(net); + break; + case NETDEV_DOWN: +diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c +index 9d43468..ffa28cc 100644 +--- a/net/ipv4/fib_semantics.c ++++ b/net/ipv4/fib_semantics.c +@@ -767,7 +767,7 @@ __be32 fib_info_update_nh_saddr(struct net *net, struct fib_nh *nh) + nh->nh_saddr = inet_select_addr(nh->nh_dev, + nh->nh_gw, + nh->nh_parent->fib_scope); +- nh->nh_saddr_genid = atomic_read(&net->ipv4.dev_addr_genid); ++ nh->nh_saddr_genid = atomic_read_unchecked(&net->ipv4.dev_addr_genid); + + return nh->nh_saddr; + } +diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c +index 0d1e2cb..4501a2c 100644 +--- a/net/ipv4/inet_connection_sock.c ++++ b/net/ipv4/inet_connection_sock.c +@@ -29,7 +29,7 @@ const char inet_csk_timer_bug_msg[] = "inet_csk BUG: unknown timer value\n"; + EXPORT_SYMBOL(inet_csk_timer_bug_msg); + #endif + +-unsigned long *sysctl_local_reserved_ports; ++unsigned long sysctl_local_reserved_ports[65536 / 8 / sizeof(unsigned long)]; + EXPORT_SYMBOL(sysctl_local_reserved_ports); + + void inet_get_local_port_range(struct net *net, int *low, int *high) +diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c +index 8b9cf27..0d8d592 100644 +--- a/net/ipv4/inet_hashtables.c ++++ b/net/ipv4/inet_hashtables.c +@@ -18,6 +18,7 @@ + #include <linux/sched.h> + #include <linux/slab.h> + #include <linux/wait.h> ++#include <linux/security.h> + + #include <net/inet_connection_sock.h> + #include <net/inet_hashtables.h> +@@ -49,6 +50,8 @@ static unsigned int inet_sk_ehashfn(const struct sock *sk) + return inet_ehashfn(net, laddr, lport, faddr, fport); + } + ++extern void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet); ++ + /* + * Allocate and initialize a new local port bind bucket. + * The bindhash mutex for snum's hash chain must be held here. +@@ -554,6 +557,8 @@ ok: + twrefcnt += inet_twsk_bind_unhash(tw, hinfo); + spin_unlock(&head->lock); + ++ gr_update_task_in_ip_table(current, inet_sk(sk)); ++ + if (tw) { + inet_twsk_deschedule(tw, death_row); + while (twrefcnt) { +diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c +index bf2cb4a..d83ba8a 100644 +--- a/net/ipv4/inetpeer.c ++++ b/net/ipv4/inetpeer.c +@@ -482,7 +482,7 @@ relookup: + if (p) { + p->daddr = *daddr; + atomic_set(&p->refcnt, 1); +- atomic_set(&p->rid, 0); ++ atomic_set_unchecked(&p->rid, 0); + p->metrics[RTAX_LOCK-1] = INETPEER_METRICS_NEW; + p->rate_tokens = 0; + /* 60*HZ is arbitrary, but chosen enough high so that the first +diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c +index c10a3ce..dd71f84 100644 +--- a/net/ipv4/ip_fragment.c ++++ b/net/ipv4/ip_fragment.c +@@ -283,7 +283,7 @@ static inline int ip_frag_too_far(struct ipq *qp) + return 0; + + start = qp->rid; +- end = atomic_inc_return(&peer->rid); ++ end = atomic_inc_return_unchecked(&peer->rid); + qp->rid = end; + + rc = qp->q.fragments && (end - start) > max; +@@ -760,12 +760,11 @@ static struct ctl_table ip4_frags_ctl_table[] = { + + static int __net_init ip4_frags_ns_ctl_register(struct net *net) + { +- struct ctl_table *table; ++ ctl_table_no_const *table = NULL; + struct ctl_table_header *hdr; + +- table = ip4_frags_ns_ctl_table; + if (!net_eq(net, &init_net)) { +- table = kmemdup(table, sizeof(ip4_frags_ns_ctl_table), GFP_KERNEL); ++ table = kmemdup(ip4_frags_ns_ctl_table, sizeof(ip4_frags_ns_ctl_table), GFP_KERNEL); + if (table == NULL) + goto err_alloc; + +@@ -776,9 +775,10 @@ static int __net_init ip4_frags_ns_ctl_register(struct net *net) + /* Don't export sysctls to unprivileged users */ + if (net->user_ns != &init_user_ns) + table[0].procname = NULL; +- } ++ hdr = register_net_sysctl(net, "net/ipv4", table); ++ } else ++ hdr = register_net_sysctl(net, "net/ipv4", ip4_frags_ns_ctl_table); + +- hdr = register_net_sysctl(net, "net/ipv4", table); + if (hdr == NULL) + goto err_reg; + +@@ -786,8 +786,7 @@ static int __net_init ip4_frags_ns_ctl_register(struct net *net) + return 0; + + err_reg: +- if (!net_eq(net, &init_net)) +- kfree(table); ++ kfree(table); + err_alloc: + return -ENOMEM; + } +diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c +index 94213c8..8bdb342 100644 +--- a/net/ipv4/ip_gre.c ++++ b/net/ipv4/ip_gre.c +@@ -115,7 +115,7 @@ static bool log_ecn_error = true; + module_param(log_ecn_error, bool, 0644); + MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN"); + +-static struct rtnl_link_ops ipgre_link_ops __read_mostly; ++static struct rtnl_link_ops ipgre_link_ops; + static int ipgre_tunnel_init(struct net_device *dev); + + static int ipgre_net_id __read_mostly; +@@ -732,7 +732,7 @@ static const struct nla_policy ipgre_policy[IFLA_GRE_MAX + 1] = { + [IFLA_GRE_PMTUDISC] = { .type = NLA_U8 }, + }; + +-static struct rtnl_link_ops ipgre_link_ops __read_mostly = { ++static struct rtnl_link_ops ipgre_link_ops = { + .kind = "gre", + .maxtype = IFLA_GRE_MAX, + .policy = ipgre_policy, +@@ -746,7 +746,7 @@ static struct rtnl_link_ops ipgre_link_ops __read_mostly = { + .fill_info = ipgre_fill_info, + }; + +-static struct rtnl_link_ops ipgre_tap_ops __read_mostly = { ++static struct rtnl_link_ops ipgre_tap_ops = { + .kind = "gretap", + .maxtype = IFLA_GRE_MAX, + .policy = ipgre_policy, +diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c +index 580dd96..9fcef7e 100644 +--- a/net/ipv4/ip_sockglue.c ++++ b/net/ipv4/ip_sockglue.c +@@ -1171,7 +1171,8 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname, + len = min_t(unsigned int, len, opt->optlen); + if (put_user(len, optlen)) + return -EFAULT; +- if (copy_to_user(optval, opt->__data, len)) ++ if ((len > (sizeof(optbuf) - sizeof(struct ip_options))) || ++ copy_to_user(optval, opt->__data, len)) + return -EFAULT; + return 0; + } +@@ -1302,7 +1303,7 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname, + if (sk->sk_type != SOCK_STREAM) + return -ENOPROTOOPT; + +- msg.msg_control = optval; ++ msg.msg_control = (void __force_kernel *)optval; + msg.msg_controllen = len; + msg.msg_flags = flags; + +diff --git a/net/ipv4/ip_vti.c b/net/ipv4/ip_vti.c +index e4a8f76..dd8ad72 100644 +--- a/net/ipv4/ip_vti.c ++++ b/net/ipv4/ip_vti.c +@@ -44,7 +44,7 @@ + #include <net/net_namespace.h> + #include <net/netns/generic.h> + +-static struct rtnl_link_ops vti_link_ops __read_mostly; ++static struct rtnl_link_ops vti_link_ops; + + static int vti_net_id __read_mostly; + static int vti_tunnel_init(struct net_device *dev); +@@ -360,7 +360,7 @@ static const struct nla_policy vti_policy[IFLA_VTI_MAX + 1] = { + [IFLA_VTI_REMOTE] = { .len = FIELD_SIZEOF(struct iphdr, daddr) }, + }; + +-static struct rtnl_link_ops vti_link_ops __read_mostly = { ++static struct rtnl_link_ops vti_link_ops = { + .kind = "vti", + .maxtype = IFLA_VTI_MAX, + .policy = vti_policy, +diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c +index b3e86ea..18ce98c 100644 +--- a/net/ipv4/ipconfig.c ++++ b/net/ipv4/ipconfig.c +@@ -334,7 +334,7 @@ static int __init ic_devinet_ioctl(unsigned int cmd, struct ifreq *arg) + + mm_segment_t oldfs = get_fs(); + set_fs(get_ds()); +- res = devinet_ioctl(&init_net, cmd, (struct ifreq __user *) arg); ++ res = devinet_ioctl(&init_net, cmd, (struct ifreq __force_user *) arg); + set_fs(oldfs); + return res; + } +@@ -345,7 +345,7 @@ static int __init ic_dev_ioctl(unsigned int cmd, struct ifreq *arg) + + mm_segment_t oldfs = get_fs(); + set_fs(get_ds()); +- res = dev_ioctl(&init_net, cmd, (struct ifreq __user *) arg); ++ res = dev_ioctl(&init_net, cmd, (struct ifreq __force_user *) arg); + set_fs(oldfs); + return res; + } +@@ -356,7 +356,7 @@ static int __init ic_route_ioctl(unsigned int cmd, struct rtentry *arg) + + mm_segment_t oldfs = get_fs(); + set_fs(get_ds()); +- res = ip_rt_ioctl(&init_net, cmd, (void __user *) arg); ++ res = ip_rt_ioctl(&init_net, cmd, (void __force_user *) arg); + set_fs(oldfs); + return res; + } +diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c +index 62eaa00..29b2dc2 100644 +--- a/net/ipv4/ipip.c ++++ b/net/ipv4/ipip.c +@@ -124,7 +124,7 @@ MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN"); + static int ipip_net_id __read_mostly; + + static int ipip_tunnel_init(struct net_device *dev); +-static struct rtnl_link_ops ipip_link_ops __read_mostly; ++static struct rtnl_link_ops ipip_link_ops; + + static int ipip_err(struct sk_buff *skb, u32 info) + { +@@ -409,7 +409,7 @@ static const struct nla_policy ipip_policy[IFLA_IPTUN_MAX + 1] = { + [IFLA_IPTUN_PMTUDISC] = { .type = NLA_U8 }, + }; + +-static struct rtnl_link_ops ipip_link_ops __read_mostly = { ++static struct rtnl_link_ops ipip_link_ops = { + .kind = "ipip", + .maxtype = IFLA_IPTUN_MAX, + .policy = ipip_policy, +diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c +index f95b6f9..2ee2097 100644 +--- a/net/ipv4/netfilter/arp_tables.c ++++ b/net/ipv4/netfilter/arp_tables.c +@@ -885,14 +885,14 @@ static int compat_table_info(const struct xt_table_info *info, + #endif + + static int get_info(struct net *net, void __user *user, +- const int *len, int compat) ++ int len, int compat) + { + char name[XT_TABLE_MAXNAMELEN]; + struct xt_table *t; + int ret; + +- if (*len != sizeof(struct arpt_getinfo)) { +- duprintf("length %u != %Zu\n", *len, ++ if (len != sizeof(struct arpt_getinfo)) { ++ duprintf("length %u != %Zu\n", len, + sizeof(struct arpt_getinfo)); + return -EINVAL; + } +@@ -929,7 +929,7 @@ static int get_info(struct net *net, void __user *user, + info.size = private->size; + strcpy(info.name, name); + +- if (copy_to_user(user, &info, *len) != 0) ++ if (copy_to_user(user, &info, len) != 0) + ret = -EFAULT; + else + ret = 0; +@@ -1690,7 +1690,7 @@ static int compat_do_arpt_get_ctl(struct sock *sk, int cmd, void __user *user, + + switch (cmd) { + case ARPT_SO_GET_INFO: +- ret = get_info(sock_net(sk), user, len, 1); ++ ret = get_info(sock_net(sk), user, *len, 1); + break; + case ARPT_SO_GET_ENTRIES: + ret = compat_get_entries(sock_net(sk), user, len); +@@ -1735,7 +1735,7 @@ static int do_arpt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len + + switch (cmd) { + case ARPT_SO_GET_INFO: +- ret = get_info(sock_net(sk), user, len, 0); ++ ret = get_info(sock_net(sk), user, *len, 0); + break; + + case ARPT_SO_GET_ENTRIES: +diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c +index 99e810f..3711b81 100644 +--- a/net/ipv4/netfilter/ip_tables.c ++++ b/net/ipv4/netfilter/ip_tables.c +@@ -1073,14 +1073,14 @@ static int compat_table_info(const struct xt_table_info *info, + #endif + + static int get_info(struct net *net, void __user *user, +- const int *len, int compat) ++ int len, int compat) + { + char name[XT_TABLE_MAXNAMELEN]; + struct xt_table *t; + int ret; + +- if (*len != sizeof(struct ipt_getinfo)) { +- duprintf("length %u != %zu\n", *len, ++ if (len != sizeof(struct ipt_getinfo)) { ++ duprintf("length %u != %zu\n", len, + sizeof(struct ipt_getinfo)); + return -EINVAL; + } +@@ -1117,7 +1117,7 @@ static int get_info(struct net *net, void __user *user, + info.size = private->size; + strcpy(info.name, name); + +- if (copy_to_user(user, &info, *len) != 0) ++ if (copy_to_user(user, &info, len) != 0) + ret = -EFAULT; + else + ret = 0; +@@ -1973,7 +1973,7 @@ compat_do_ipt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len) + + switch (cmd) { + case IPT_SO_GET_INFO: +- ret = get_info(sock_net(sk), user, len, 1); ++ ret = get_info(sock_net(sk), user, *len, 1); + break; + case IPT_SO_GET_ENTRIES: + ret = compat_get_entries(sock_net(sk), user, len); +@@ -2020,7 +2020,7 @@ do_ipt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len) + + switch (cmd) { + case IPT_SO_GET_INFO: +- ret = get_info(sock_net(sk), user, len, 0); ++ ret = get_info(sock_net(sk), user, *len, 0); + break; + + case IPT_SO_GET_ENTRIES: +diff --git a/net/ipv4/netfilter/ipt_CLUSTERIP.c b/net/ipv4/netfilter/ipt_CLUSTERIP.c +index 2510c02..cfb34fa 100644 +--- a/net/ipv4/netfilter/ipt_CLUSTERIP.c ++++ b/net/ipv4/netfilter/ipt_CLUSTERIP.c +@@ -720,7 +720,7 @@ static int clusterip_net_init(struct net *net) + spin_lock_init(&cn->lock); + + #ifdef CONFIG_PROC_FS +- cn->procdir = proc_mkdir("ipt_CLUSTERIP", net->proc_net); ++ cn->procdir = proc_mkdir_restrict("ipt_CLUSTERIP", net->proc_net); + if (!cn->procdir) { + pr_err("Unable to proc dir entry\n"); + return -ENOMEM; +diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c +index e21934b..4e7cb58 100644 +--- a/net/ipv4/ping.c ++++ b/net/ipv4/ping.c +@@ -59,7 +59,7 @@ struct ping_table { + }; + + static struct ping_table ping_table; +-struct pingv6_ops pingv6_ops; ++struct pingv6_ops *pingv6_ops; + EXPORT_SYMBOL_GPL(pingv6_ops); + + static u16 ping_port_rover; +@@ -348,7 +348,7 @@ static int ping_check_bind_addr(struct sock *sk, struct inet_sock *isk, + return -ENODEV; + } + } +- has_addr = pingv6_ops.ipv6_chk_addr(net, &addr->sin6_addr, dev, ++ has_addr = pingv6_ops->ipv6_chk_addr(net, &addr->sin6_addr, dev, + scoped); + rcu_read_unlock(); + +@@ -556,7 +556,7 @@ void ping_err(struct sk_buff *skb, int offset, u32 info) + } + #if IS_ENABLED(CONFIG_IPV6) + } else if (skb->protocol == htons(ETH_P_IPV6)) { +- harderr = pingv6_ops.icmpv6_err_convert(type, code, &err); ++ harderr = pingv6_ops->icmpv6_err_convert(type, code, &err); + #endif + } + +@@ -574,7 +574,7 @@ void ping_err(struct sk_buff *skb, int offset, u32 info) + info, (u8 *)icmph); + #if IS_ENABLED(CONFIG_IPV6) + } else if (family == AF_INET6) { +- pingv6_ops.ipv6_icmp_error(sk, skb, err, 0, ++ pingv6_ops->ipv6_icmp_error(sk, skb, err, 0, + info, (u8 *)icmph); + #endif + } +@@ -858,7 +858,7 @@ int ping_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, + return ip_recv_error(sk, msg, len, addr_len); + #if IS_ENABLED(CONFIG_IPV6) + } else if (family == AF_INET6) { +- return pingv6_ops.ipv6_recv_error(sk, msg, len, ++ return pingv6_ops->ipv6_recv_error(sk, msg, len, + addr_len); + #endif + } +@@ -916,10 +916,10 @@ int ping_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, + } + + if (inet6_sk(sk)->rxopt.all) +- pingv6_ops.ip6_datagram_recv_common_ctl(sk, msg, skb); ++ pingv6_ops->ip6_datagram_recv_common_ctl(sk, msg, skb); + if (skb->protocol == htons(ETH_P_IPV6) && + inet6_sk(sk)->rxopt.all) +- pingv6_ops.ip6_datagram_recv_specific_ctl(sk, msg, skb); ++ pingv6_ops->ip6_datagram_recv_specific_ctl(sk, msg, skb); + else if (skb->protocol == htons(ETH_P_IP) && isk->cmsg_flags) + ip_cmsg_recv(msg, skb); + #endif +@@ -1111,7 +1111,7 @@ static void ping_v4_format_sock(struct sock *sp, struct seq_file *f, + from_kuid_munged(seq_user_ns(f), sock_i_uid(sp)), + 0, sock_i_ino(sp), + atomic_read(&sp->sk_refcnt), sp, +- atomic_read(&sp->sk_drops)); ++ atomic_read_unchecked(&sp->sk_drops)); + } + + static int ping_v4_seq_show(struct seq_file *seq, void *v) +diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c +index 11c8d81..d67116b 100644 +--- a/net/ipv4/raw.c ++++ b/net/ipv4/raw.c +@@ -311,7 +311,7 @@ static int raw_rcv_skb(struct sock *sk, struct sk_buff *skb) + int raw_rcv(struct sock *sk, struct sk_buff *skb) + { + if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) { +- atomic_inc(&sk->sk_drops); ++ atomic_inc_unchecked(&sk->sk_drops); + kfree_skb(skb); + return NET_RX_DROP; + } +@@ -748,16 +748,20 @@ static int raw_init(struct sock *sk) + + static int raw_seticmpfilter(struct sock *sk, char __user *optval, int optlen) + { ++ struct icmp_filter filter; ++ + if (optlen > sizeof(struct icmp_filter)) + optlen = sizeof(struct icmp_filter); +- if (copy_from_user(&raw_sk(sk)->filter, optval, optlen)) ++ if (copy_from_user(&filter, optval, optlen)) + return -EFAULT; ++ raw_sk(sk)->filter = filter; + return 0; + } + + static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *optlen) + { + int len, ret = -EFAULT; ++ struct icmp_filter filter; + + if (get_user(len, optlen)) + goto out; +@@ -767,8 +771,8 @@ static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *o + if (len > sizeof(struct icmp_filter)) + len = sizeof(struct icmp_filter); + ret = -EFAULT; +- if (put_user(len, optlen) || +- copy_to_user(optval, &raw_sk(sk)->filter, len)) ++ filter = raw_sk(sk)->filter; ++ if (put_user(len, optlen) || len > sizeof filter || copy_to_user(optval, &filter, len)) + goto out; + ret = 0; + out: return ret; +@@ -997,7 +1001,7 @@ static void raw_sock_seq_show(struct seq_file *seq, struct sock *sp, int i) + 0, 0L, 0, + from_kuid_munged(seq_user_ns(seq), sock_i_uid(sp)), + 0, sock_i_ino(sp), +- atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops)); ++ atomic_read(&sp->sk_refcnt), sp, atomic_read_unchecked(&sp->sk_drops)); + } + + static int raw_seq_show(struct seq_file *seq, void *v) +diff --git a/net/ipv4/route.c b/net/ipv4/route.c +index ca5a01e..8c5cdb4 100644 +--- a/net/ipv4/route.c ++++ b/net/ipv4/route.c +@@ -234,7 +234,7 @@ static const struct seq_operations rt_cache_seq_ops = { + + static int rt_cache_seq_open(struct inode *inode, struct file *file) + { +- return seq_open(file, &rt_cache_seq_ops); ++ return seq_open_restrict(file, &rt_cache_seq_ops); + } + + static const struct file_operations rt_cache_seq_fops = { +@@ -325,7 +325,7 @@ static const struct seq_operations rt_cpu_seq_ops = { + + static int rt_cpu_seq_open(struct inode *inode, struct file *file) + { +- return seq_open(file, &rt_cpu_seq_ops); ++ return seq_open_restrict(file, &rt_cpu_seq_ops); + } + + static const struct file_operations rt_cpu_seq_fops = { +@@ -363,7 +363,7 @@ static int rt_acct_proc_show(struct seq_file *m, void *v) + + static int rt_acct_proc_open(struct inode *inode, struct file *file) + { +- return single_open(file, rt_acct_proc_show, NULL); ++ return single_open_restrict(file, rt_acct_proc_show, NULL); + } + + static const struct file_operations rt_acct_proc_fops = { +@@ -465,11 +465,11 @@ static struct neighbour *ipv4_neigh_lookup(const struct dst_entry *dst, + + #define IP_IDENTS_SZ 2048u + struct ip_ident_bucket { +- atomic_t id; ++ atomic_unchecked_t id; + u32 stamp32; + }; + +-static struct ip_ident_bucket *ip_idents __read_mostly; ++static struct ip_ident_bucket ip_idents[IP_IDENTS_SZ] __read_mostly; + + /* In order to protect privacy, we add a perturbation to identifiers + * if one generator is seldom used. This makes hard for an attacker +@@ -485,7 +485,7 @@ u32 ip_idents_reserve(u32 hash, int segs) + if (old != now && cmpxchg(&bucket->stamp32, old, now) == old) + delta = prandom_u32_max(now - old); + +- return atomic_add_return(segs + delta, &bucket->id) - segs; ++ return atomic_add_return_unchecked(segs + delta, &bucket->id) - segs; + } + EXPORT_SYMBOL(ip_idents_reserve); + +@@ -2631,34 +2631,34 @@ static struct ctl_table ipv4_route_flush_table[] = { + .maxlen = sizeof(int), + .mode = 0200, + .proc_handler = ipv4_sysctl_rtcache_flush, ++ .extra1 = &init_net, + }, + { }, + }; + + static __net_init int sysctl_route_net_init(struct net *net) + { +- struct ctl_table *tbl; ++ ctl_table_no_const *tbl = NULL; + +- tbl = ipv4_route_flush_table; + if (!net_eq(net, &init_net)) { +- tbl = kmemdup(tbl, sizeof(ipv4_route_flush_table), GFP_KERNEL); ++ tbl = kmemdup(ipv4_route_flush_table, sizeof(ipv4_route_flush_table), GFP_KERNEL); + if (tbl == NULL) + goto err_dup; + + /* Don't export sysctls to unprivileged users */ + if (net->user_ns != &init_user_ns) + tbl[0].procname = NULL; +- } +- tbl[0].extra1 = net; ++ tbl[0].extra1 = net; ++ net->ipv4.route_hdr = register_net_sysctl(net, "net/ipv4/route", tbl); ++ } else ++ net->ipv4.route_hdr = register_net_sysctl(net, "net/ipv4/route", ipv4_route_flush_table); + +- net->ipv4.route_hdr = register_net_sysctl(net, "net/ipv4/route", tbl); + if (net->ipv4.route_hdr == NULL) + goto err_reg; + return 0; + + err_reg: +- if (tbl != ipv4_route_flush_table) +- kfree(tbl); ++ kfree(tbl); + err_dup: + return -ENOMEM; + } +@@ -2681,8 +2681,8 @@ static __net_initdata struct pernet_operations sysctl_route_ops = { + + static __net_init int rt_genid_init(struct net *net) + { +- atomic_set(&net->ipv4.rt_genid, 0); +- atomic_set(&net->fnhe_genid, 0); ++ atomic_set_unchecked(&net->ipv4.rt_genid, 0); ++ atomic_set_unchecked(&net->fnhe_genid, 0); + get_random_bytes(&net->ipv4.dev_addr_genid, + sizeof(net->ipv4.dev_addr_genid)); + return 0; +@@ -2725,11 +2725,7 @@ int __init ip_rt_init(void) + { + int rc = 0; + +- ip_idents = kmalloc(IP_IDENTS_SZ * sizeof(*ip_idents), GFP_KERNEL); +- if (!ip_idents) +- panic("IP: failed to allocate ip_idents\n"); +- +- prandom_bytes(ip_idents, IP_IDENTS_SZ * sizeof(*ip_idents)); ++ prandom_bytes(ip_idents, sizeof(ip_idents)); + + #ifdef CONFIG_IP_ROUTE_CLASSID + ip_rt_acct = __alloc_percpu(256 * sizeof(struct ip_rt_acct), __alignof__(struct ip_rt_acct)); +diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c +index 44eba05..b36864b 100644 +--- a/net/ipv4/sysctl_net_ipv4.c ++++ b/net/ipv4/sysctl_net_ipv4.c +@@ -60,7 +60,7 @@ static int ipv4_local_port_range(struct ctl_table *table, int write, + container_of(table->data, struct net, ipv4.sysctl_local_ports.range); + int ret; + int range[2]; +- struct ctl_table tmp = { ++ ctl_table_no_const tmp = { + .data = &range, + .maxlen = sizeof(range), + .mode = table->mode, +@@ -118,7 +118,7 @@ static int ipv4_ping_group_range(struct ctl_table *table, int write, + int ret; + gid_t urange[2]; + kgid_t low, high; +- struct ctl_table tmp = { ++ ctl_table_no_const tmp = { + .data = &urange, + .maxlen = sizeof(urange), + .mode = table->mode, +@@ -149,7 +149,7 @@ static int proc_tcp_congestion_control(struct ctl_table *ctl, int write, + void __user *buffer, size_t *lenp, loff_t *ppos) + { + char val[TCP_CA_NAME_MAX]; +- struct ctl_table tbl = { ++ ctl_table_no_const tbl = { + .data = val, + .maxlen = TCP_CA_NAME_MAX, + }; +@@ -168,7 +168,7 @@ static int proc_tcp_available_congestion_control(struct ctl_table *ctl, + void __user *buffer, size_t *lenp, + loff_t *ppos) + { +- struct ctl_table tbl = { .maxlen = TCP_CA_BUF_MAX, }; ++ ctl_table_no_const tbl = { .maxlen = TCP_CA_BUF_MAX, }; + int ret; + + tbl.data = kmalloc(tbl.maxlen, GFP_USER); +@@ -185,7 +185,7 @@ static int proc_allowed_congestion_control(struct ctl_table *ctl, + void __user *buffer, size_t *lenp, + loff_t *ppos) + { +- struct ctl_table tbl = { .maxlen = TCP_CA_BUF_MAX }; ++ ctl_table_no_const tbl = { .maxlen = TCP_CA_BUF_MAX }; + int ret; + + tbl.data = kmalloc(tbl.maxlen, GFP_USER); +@@ -204,7 +204,7 @@ static int proc_tcp_fastopen_key(struct ctl_table *ctl, int write, + void __user *buffer, size_t *lenp, + loff_t *ppos) + { +- struct ctl_table tbl = { .maxlen = (TCP_FASTOPEN_KEY_LENGTH * 2 + 10) }; ++ ctl_table_no_const tbl = { .maxlen = (TCP_FASTOPEN_KEY_LENGTH * 2 + 10) }; + struct tcp_fastopen_context *ctxt; + int ret; + u32 user_key[4]; /* 16 bytes, matching TCP_FASTOPEN_KEY_LENGTH */ +@@ -438,7 +438,7 @@ static struct ctl_table ipv4_table[] = { + }, + { + .procname = "ip_local_reserved_ports", +- .data = NULL, /* initialized in sysctl_ipv4_init */ ++ .data = sysctl_local_reserved_ports, + .maxlen = 65536, + .mode = 0644, + .proc_handler = proc_do_large_bitmap, +@@ -843,13 +843,12 @@ static struct ctl_table ipv4_net_table[] = { + + static __net_init int ipv4_sysctl_init_net(struct net *net) + { +- struct ctl_table *table; ++ ctl_table_no_const *table = NULL; + +- table = ipv4_net_table; + if (!net_eq(net, &init_net)) { + int i; + +- table = kmemdup(table, sizeof(ipv4_net_table), GFP_KERNEL); ++ table = kmemdup(ipv4_net_table, sizeof(ipv4_net_table), GFP_KERNEL); + if (table == NULL) + goto err_alloc; + +@@ -872,15 +871,17 @@ static __net_init int ipv4_sysctl_init_net(struct net *net) + net->ipv4.sysctl_local_ports.range[0] = 32768; + net->ipv4.sysctl_local_ports.range[1] = 61000; + +- net->ipv4.ipv4_hdr = register_net_sysctl(net, "net/ipv4", table); ++ if (!net_eq(net, &init_net)) ++ net->ipv4.ipv4_hdr = register_net_sysctl(net, "net/ipv4", table); ++ else ++ net->ipv4.ipv4_hdr = register_net_sysctl(net, "net/ipv4", ipv4_net_table); + if (net->ipv4.ipv4_hdr == NULL) + goto err_reg; + + return 0; + + err_reg: +- if (!net_eq(net, &init_net)) +- kfree(table); ++ kfree(table); + err_alloc: + return -ENOMEM; + } +@@ -902,16 +903,6 @@ static __net_initdata struct pernet_operations ipv4_sysctl_ops = { + static __init int sysctl_ipv4_init(void) + { + struct ctl_table_header *hdr; +- struct ctl_table *i; +- +- for (i = ipv4_table; i->procname; i++) { +- if (strcmp(i->procname, "ip_local_reserved_ports") == 0) { +- i->data = sysctl_local_reserved_ports; +- break; +- } +- } +- if (!i->procname) +- return -EINVAL; + + hdr = register_net_sysctl(&init_net, "net/ipv4", ipv4_table); + if (hdr == NULL) +diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c +index 3898694..9bd1a03 100644 +--- a/net/ipv4/tcp_input.c ++++ b/net/ipv4/tcp_input.c +@@ -761,7 +761,7 @@ static void tcp_update_pacing_rate(struct sock *sk) + * without any lock. We want to make sure compiler wont store + * intermediate values in this location. + */ +- ACCESS_ONCE(sk->sk_pacing_rate) = min_t(u64, rate, ++ ACCESS_ONCE_RW(sk->sk_pacing_rate) = min_t(u64, rate, + sk->sk_max_pacing_rate); + } + +@@ -4484,7 +4484,7 @@ static struct sk_buff *tcp_collapse_one(struct sock *sk, struct sk_buff *skb, + * simplifies code) + */ + static void +-tcp_collapse(struct sock *sk, struct sk_buff_head *list, ++__intentional_overflow(5,6) tcp_collapse(struct sock *sk, struct sk_buff_head *list, + struct sk_buff *head, struct sk_buff *tail, + u32 start, u32 end) + { +@@ -5561,6 +5561,7 @@ discard: + tcp_paws_reject(&tp->rx_opt, 0)) + goto discard_and_undo; + ++#ifndef CONFIG_GRKERNSEC_NO_SIMULT_CONNECT + if (th->syn) { + /* We see SYN without ACK. It is attempt of + * simultaneous connect with crossed SYNs. +@@ -5611,6 +5612,7 @@ discard: + goto discard; + #endif + } ++#endif + /* "fifth, if neither of the SYN or RST bits is set then + * drop the segment and return." + */ +@@ -5657,7 +5659,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb, + goto discard; + + if (th->syn) { +- if (th->fin) ++ if (th->fin || th->urg || th->psh) + goto discard; + if (icsk->icsk_af_ops->conn_request(sk, skb) < 0) + return 1; +diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c +index 1e4eac7..a66fa4a 100644 +--- a/net/ipv4/tcp_ipv4.c ++++ b/net/ipv4/tcp_ipv4.c +@@ -91,6 +91,10 @@ int sysctl_tcp_low_latency __read_mostly; + EXPORT_SYMBOL(sysctl_tcp_low_latency); + + ++#ifdef CONFIG_GRKERNSEC_BLACKHOLE ++extern int grsec_enable_blackhole; ++#endif ++ + #ifdef CONFIG_TCP_MD5SIG + static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key, + __be32 daddr, __be32 saddr, const struct tcphdr *th); +@@ -1829,6 +1833,9 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb) + return 0; + + reset: ++#ifdef CONFIG_GRKERNSEC_BLACKHOLE ++ if (!grsec_enable_blackhole) ++#endif + tcp_v4_send_reset(rsk, skb); + discard: + kfree_skb(skb); +@@ -1974,12 +1981,19 @@ int tcp_v4_rcv(struct sk_buff *skb) + TCP_SKB_CB(skb)->sacked = 0; + + sk = __inet_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest); +- if (!sk) ++ if (!sk) { ++#ifdef CONFIG_GRKERNSEC_BLACKHOLE ++ ret = 1; ++#endif + goto no_tcp_socket; +- ++ } + process: +- if (sk->sk_state == TCP_TIME_WAIT) ++ if (sk->sk_state == TCP_TIME_WAIT) { ++#ifdef CONFIG_GRKERNSEC_BLACKHOLE ++ ret = 2; ++#endif + goto do_time_wait; ++ } + + if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) { + NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP); +@@ -2033,6 +2047,10 @@ csum_error: + bad_packet: + TCP_INC_STATS_BH(net, TCP_MIB_INERRS); + } else { ++#ifdef CONFIG_GRKERNSEC_BLACKHOLE ++ if (!grsec_enable_blackhole || (ret == 1 && ++ (skb->dev->flags & IFF_LOOPBACK))) ++#endif + tcp_v4_send_reset(NULL, skb); + } + +diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c +index 7a436c5..1b05c59 100644 +--- a/net/ipv4/tcp_minisocks.c ++++ b/net/ipv4/tcp_minisocks.c +@@ -27,6 +27,10 @@ + #include <net/inet_common.h> + #include <net/xfrm.h> + ++#ifdef CONFIG_GRKERNSEC_BLACKHOLE ++extern int grsec_enable_blackhole; ++#endif ++ + int sysctl_tcp_syncookies __read_mostly = 1; + EXPORT_SYMBOL(sysctl_tcp_syncookies); + +@@ -709,7 +713,10 @@ embryonic_reset: + * avoid becoming vulnerable to outside attack aiming at + * resetting legit local connections. + */ +- req->rsk_ops->send_reset(sk, skb); ++#ifdef CONFIG_GRKERNSEC_BLACKHOLE ++ if (!grsec_enable_blackhole) ++#endif ++ req->rsk_ops->send_reset(sk, skb); + } else if (fastopen) { /* received a valid RST pkt */ + reqsk_fastopen_remove(sk, req, true); + tcp_reset(sk); +diff --git a/net/ipv4/tcp_probe.c b/net/ipv4/tcp_probe.c +index 1f2d376..01d18c4 100644 +--- a/net/ipv4/tcp_probe.c ++++ b/net/ipv4/tcp_probe.c +@@ -238,7 +238,7 @@ static ssize_t tcpprobe_read(struct file *file, char __user *buf, + if (cnt + width >= len) + break; + +- if (copy_to_user(buf + cnt, tbuf, width)) ++ if (width > sizeof tbuf || copy_to_user(buf + cnt, tbuf, width)) + return -EFAULT; + cnt += width; + } +diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c +index 64f0354..a81b39d 100644 +--- a/net/ipv4/tcp_timer.c ++++ b/net/ipv4/tcp_timer.c +@@ -22,6 +22,10 @@ + #include <linux/gfp.h> + #include <net/tcp.h> + ++#ifdef CONFIG_GRKERNSEC_BLACKHOLE ++extern int grsec_lastack_retries; ++#endif ++ + int sysctl_tcp_syn_retries __read_mostly = TCP_SYN_RETRIES; + int sysctl_tcp_synack_retries __read_mostly = TCP_SYNACK_RETRIES; + int sysctl_tcp_keepalive_time __read_mostly = TCP_KEEPALIVE_TIME; +@@ -189,6 +193,13 @@ static int tcp_write_timeout(struct sock *sk) + } + } + ++#ifdef CONFIG_GRKERNSEC_BLACKHOLE ++ if ((sk->sk_state == TCP_LAST_ACK) && ++ (grsec_lastack_retries > 0) && ++ (grsec_lastack_retries < retry_until)) ++ retry_until = grsec_lastack_retries; ++#endif ++ + if (retransmits_timed_out(sk, retry_until, + syn_set ? 0 : icsk->icsk_user_timeout, syn_set)) { + /* Has it gone just too far? */ +diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c +index b25e852..cdc3258 100644 +--- a/net/ipv4/udp.c ++++ b/net/ipv4/udp.c +@@ -87,6 +87,7 @@ + #include <linux/types.h> + #include <linux/fcntl.h> + #include <linux/module.h> ++#include <linux/security.h> + #include <linux/socket.h> + #include <linux/sockios.h> + #include <linux/igmp.h> +@@ -113,6 +114,10 @@ + #include <net/busy_poll.h> + #include "udp_impl.h" + ++#ifdef CONFIG_GRKERNSEC_BLACKHOLE ++extern int grsec_enable_blackhole; ++#endif ++ + struct udp_table udp_table __read_mostly; + EXPORT_SYMBOL(udp_table); + +@@ -615,6 +620,9 @@ found: + return s; + } + ++extern int gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb); ++extern int gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr); ++ + /* + * This routine is called by the ICMP module when it gets some + * sort of error condition. If err < 0 then the socket should +@@ -914,9 +922,18 @@ int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, + dport = usin->sin_port; + if (dport == 0) + return -EINVAL; ++ ++ err = gr_search_udp_sendmsg(sk, usin); ++ if (err) ++ return err; + } else { + if (sk->sk_state != TCP_ESTABLISHED) + return -EDESTADDRREQ; ++ ++ err = gr_search_udp_sendmsg(sk, NULL); ++ if (err) ++ return err; ++ + daddr = inet->inet_daddr; + dport = inet->inet_dport; + /* Open fast path for connected socket. +@@ -1163,7 +1180,7 @@ static unsigned int first_packet_length(struct sock *sk) + IS_UDPLITE(sk)); + UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, + IS_UDPLITE(sk)); +- atomic_inc(&sk->sk_drops); ++ atomic_inc_unchecked(&sk->sk_drops); + __skb_unlink(skb, rcvq); + __skb_queue_tail(&list_kill, skb); + } +@@ -1243,6 +1260,10 @@ try_again: + if (!skb) + goto out; + ++ err = gr_search_udp_recvmsg(sk, skb); ++ if (err) ++ goto out_free; ++ + ulen = skb->len - sizeof(struct udphdr); + copied = len; + if (copied > ulen) +@@ -1276,7 +1297,7 @@ try_again: + if (unlikely(err)) { + trace_kfree_skb(skb, udp_recvmsg); + if (!peeked) { +- atomic_inc(&sk->sk_drops); ++ atomic_inc_unchecked(&sk->sk_drops); + UDP_INC_STATS_USER(sock_net(sk), + UDP_MIB_INERRORS, is_udplite); + } +@@ -1566,7 +1587,7 @@ csum_error: + UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite); + drop: + UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite); +- atomic_inc(&sk->sk_drops); ++ atomic_inc_unchecked(&sk->sk_drops); + kfree_skb(skb); + return -1; + } +@@ -1585,7 +1606,7 @@ static void flush_stack(struct sock **stack, unsigned int count, + skb1 = (i == final) ? skb : skb_clone(skb, GFP_ATOMIC); + + if (!skb1) { +- atomic_inc(&sk->sk_drops); ++ atomic_inc_unchecked(&sk->sk_drops); + UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS, + IS_UDPLITE(sk)); + UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, +@@ -1786,6 +1807,9 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable, + goto csum_error; + + UDP_INC_STATS_BH(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE); ++#ifdef CONFIG_GRKERNSEC_BLACKHOLE ++ if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK)) ++#endif + icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0); + + /* +@@ -2354,7 +2378,7 @@ static void udp4_format_sock(struct sock *sp, struct seq_file *f, + from_kuid_munged(seq_user_ns(f), sock_i_uid(sp)), + 0, sock_i_ino(sp), + atomic_read(&sp->sk_refcnt), sp, +- atomic_read(&sp->sk_drops)); ++ atomic_read_unchecked(&sp->sk_drops)); + } + + int udp4_seq_show(struct seq_file *seq, void *v) +diff --git a/net/ipv4/xfrm4_policy.c b/net/ipv4/xfrm4_policy.c +index e1a6393..f634ce5 100644 +--- a/net/ipv4/xfrm4_policy.c ++++ b/net/ipv4/xfrm4_policy.c +@@ -186,11 +186,11 @@ _decode_session4(struct sk_buff *skb, struct flowi *fl, int reverse) + fl4->flowi4_tos = iph->tos; + } + +-static inline int xfrm4_garbage_collect(struct dst_ops *ops) ++static int xfrm4_garbage_collect(struct dst_ops *ops) + { + struct net *net = container_of(ops, struct net, xfrm.xfrm4_dst_ops); + +- xfrm4_policy_afinfo.garbage_collect(net); ++ xfrm_garbage_collect_deferred(net); + return (dst_entries_get_slow(ops) > ops->gc_thresh * 2); + } + +@@ -269,19 +269,18 @@ static struct ctl_table xfrm4_policy_table[] = { + + static int __net_init xfrm4_net_init(struct net *net) + { +- struct ctl_table *table; ++ ctl_table_no_const *table = NULL; + struct ctl_table_header *hdr; + +- table = xfrm4_policy_table; + if (!net_eq(net, &init_net)) { +- table = kmemdup(table, sizeof(xfrm4_policy_table), GFP_KERNEL); ++ table = kmemdup(xfrm4_policy_table, sizeof(xfrm4_policy_table), GFP_KERNEL); + if (!table) + goto err_alloc; + + table[0].data = &net->xfrm.xfrm4_dst_ops.gc_thresh; +- } +- +- hdr = register_net_sysctl(net, "net/ipv4", table); ++ hdr = register_net_sysctl(net, "net/ipv4", table); ++ } else ++ hdr = register_net_sysctl(net, "net/ipv4", xfrm4_policy_table); + if (!hdr) + goto err_reg; + +@@ -289,8 +288,7 @@ static int __net_init xfrm4_net_init(struct net *net) + return 0; + + err_reg: +- if (!net_eq(net, &init_net)) +- kfree(table); ++ kfree(table); + err_alloc: + return -ENOMEM; + } +diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c +index 6c7fa08..7c5abd70 100644 +--- a/net/ipv6/addrconf.c ++++ b/net/ipv6/addrconf.c +@@ -598,7 +598,7 @@ static int inet6_netconf_dump_devconf(struct sk_buff *skb, + idx = 0; + head = &net->dev_index_head[h]; + rcu_read_lock(); +- cb->seq = atomic_read(&net->ipv6.dev_addr_genid) ^ ++ cb->seq = atomic_read_unchecked(&net->ipv6.dev_addr_genid) ^ + net->dev_base_seq; + hlist_for_each_entry_rcu(dev, head, index_hlist) { + if (idx < s_idx) +@@ -2395,7 +2395,7 @@ int addrconf_set_dstaddr(struct net *net, void __user *arg) + p.iph.ihl = 5; + p.iph.protocol = IPPROTO_IPV6; + p.iph.ttl = 64; +- ifr.ifr_ifru.ifru_data = (__force void __user *)&p; ++ ifr.ifr_ifru.ifru_data = (void __force_user *)&p; + + if (ops->ndo_do_ioctl) { + mm_segment_t oldfs = get_fs(); +@@ -3528,16 +3528,23 @@ static const struct file_operations if6_fops = { + .release = seq_release_net, + }; + ++extern void register_ipv6_seq_ops_addr(struct seq_operations *addr); ++extern void unregister_ipv6_seq_ops_addr(void); ++ + static int __net_init if6_proc_net_init(struct net *net) + { +- if (!proc_create("if_inet6", S_IRUGO, net->proc_net, &if6_fops)) ++ register_ipv6_seq_ops_addr(&if6_seq_ops); ++ if (!proc_create("if_inet6", S_IRUGO, net->proc_net, &if6_fops)) { ++ unregister_ipv6_seq_ops_addr(); + return -ENOMEM; ++ } + return 0; + } + + static void __net_exit if6_proc_net_exit(struct net *net) + { + remove_proc_entry("if_inet6", net->proc_net); ++ unregister_ipv6_seq_ops_addr(); + } + + static struct pernet_operations if6_proc_net_ops = { +@@ -4146,7 +4153,7 @@ static int inet6_dump_addr(struct sk_buff *skb, struct netlink_callback *cb, + s_ip_idx = ip_idx = cb->args[2]; + + rcu_read_lock(); +- cb->seq = atomic_read(&net->ipv6.dev_addr_genid) ^ net->dev_base_seq; ++ cb->seq = atomic_read_unchecked(&net->ipv6.dev_addr_genid) ^ net->dev_base_seq; + for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) { + idx = 0; + head = &net->dev_index_head[h]; +@@ -4746,11 +4753,8 @@ static void __ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp) + + rt = rt6_lookup(dev_net(dev), &ifp->peer_addr, NULL, + dev->ifindex, 1); +- if (rt) { +- dst_hold(&rt->dst); +- if (ip6_del_rt(rt)) +- dst_free(&rt->dst); +- } ++ if (rt && ip6_del_rt(rt)) ++ dst_free(&rt->dst); + } + dst_hold(&ifp->rt->dst); + +@@ -4758,7 +4762,7 @@ static void __ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp) + dst_free(&ifp->rt->dst); + break; + } +- atomic_inc(&net->ipv6.dev_addr_genid); ++ atomic_inc_unchecked(&net->ipv6.dev_addr_genid); + rt_genid_bump_ipv6(net); + } + +@@ -4779,7 +4783,7 @@ int addrconf_sysctl_forward(struct ctl_table *ctl, int write, + int *valp = ctl->data; + int val = *valp; + loff_t pos = *ppos; +- struct ctl_table lctl; ++ ctl_table_no_const lctl; + int ret; + + /* +@@ -4864,7 +4868,7 @@ int addrconf_sysctl_disable(struct ctl_table *ctl, int write, + int *valp = ctl->data; + int val = *valp; + loff_t pos = *ppos; +- struct ctl_table lctl; ++ ctl_table_no_const lctl; + int ret; + + /* +diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c +index d935889..2f64330 100644 +--- a/net/ipv6/af_inet6.c ++++ b/net/ipv6/af_inet6.c +@@ -776,7 +776,7 @@ static int __net_init inet6_net_init(struct net *net) + net->ipv6.sysctl.bindv6only = 0; + net->ipv6.sysctl.icmpv6_time = 1*HZ; + net->ipv6.sysctl.flowlabel_consistency = 1; +- atomic_set(&net->ipv6.rt_genid, 0); ++ atomic_set_unchecked(&net->ipv6.rt_genid, 0); + + err = ipv6_init_mibs(net); + if (err) +diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c +index c3bf2d2..1f00573 100644 +--- a/net/ipv6/datagram.c ++++ b/net/ipv6/datagram.c +@@ -938,5 +938,5 @@ void ip6_dgram_sock_seq_show(struct seq_file *seq, struct sock *sp, + 0, + sock_i_ino(sp), + atomic_read(&sp->sk_refcnt), sp, +- atomic_read(&sp->sk_drops)); ++ atomic_read_unchecked(&sp->sk_drops)); + } +diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c +index 7b32652..0bc348b 100644 +--- a/net/ipv6/icmp.c ++++ b/net/ipv6/icmp.c +@@ -1005,7 +1005,7 @@ static struct ctl_table ipv6_icmp_table_template[] = { + + struct ctl_table * __net_init ipv6_icmp_sysctl_init(struct net *net) + { +- struct ctl_table *table; ++ ctl_table_no_const *table; + + table = kmemdup(ipv6_icmp_table_template, + sizeof(ipv6_icmp_table_template), +diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c +index 2465d18..bc5bf7f 100644 +--- a/net/ipv6/ip6_gre.c ++++ b/net/ipv6/ip6_gre.c +@@ -71,7 +71,7 @@ struct ip6gre_net { + struct net_device *fb_tunnel_dev; + }; + +-static struct rtnl_link_ops ip6gre_link_ops __read_mostly; ++static struct rtnl_link_ops ip6gre_link_ops; + static int ip6gre_tunnel_init(struct net_device *dev); + static void ip6gre_tunnel_setup(struct net_device *dev); + static void ip6gre_tunnel_link(struct ip6gre_net *ign, struct ip6_tnl *t); +@@ -1291,7 +1291,7 @@ static void ip6gre_fb_tunnel_init(struct net_device *dev) + } + + +-static struct inet6_protocol ip6gre_protocol __read_mostly = { ++static struct inet6_protocol ip6gre_protocol = { + .handler = ip6gre_rcv, + .err_handler = ip6gre_err, + .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL, +@@ -1643,7 +1643,7 @@ static const struct nla_policy ip6gre_policy[IFLA_GRE_MAX + 1] = { + [IFLA_GRE_FLAGS] = { .type = NLA_U32 }, + }; + +-static struct rtnl_link_ops ip6gre_link_ops __read_mostly = { ++static struct rtnl_link_ops ip6gre_link_ops = { + .kind = "ip6gre", + .maxtype = IFLA_GRE_MAX, + .policy = ip6gre_policy, +@@ -1657,7 +1657,7 @@ static struct rtnl_link_ops ip6gre_link_ops __read_mostly = { + .fill_info = ip6gre_fill_info, + }; + +-static struct rtnl_link_ops ip6gre_tap_ops __read_mostly = { ++static struct rtnl_link_ops ip6gre_tap_ops = { + .kind = "ip6gretap", + .maxtype = IFLA_GRE_MAX, + .policy = ip6gre_policy, +diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c +index 9120339..cfdd84f 100644 +--- a/net/ipv6/ip6_tunnel.c ++++ b/net/ipv6/ip6_tunnel.c +@@ -86,7 +86,7 @@ static u32 HASH(const struct in6_addr *addr1, const struct in6_addr *addr2) + + static int ip6_tnl_dev_init(struct net_device *dev); + static void ip6_tnl_dev_setup(struct net_device *dev); +-static struct rtnl_link_ops ip6_link_ops __read_mostly; ++static struct rtnl_link_ops ip6_link_ops; + + static int ip6_tnl_net_id __read_mostly; + struct ip6_tnl_net { +@@ -1715,7 +1715,7 @@ static const struct nla_policy ip6_tnl_policy[IFLA_IPTUN_MAX + 1] = { + [IFLA_IPTUN_PROTO] = { .type = NLA_U8 }, + }; + +-static struct rtnl_link_ops ip6_link_ops __read_mostly = { ++static struct rtnl_link_ops ip6_link_ops = { + .kind = "ip6tnl", + .maxtype = IFLA_IPTUN_MAX, + .policy = ip6_tnl_policy, +diff --git a/net/ipv6/ip6_vti.c b/net/ipv6/ip6_vti.c +index 2d19272..3a46322 100644 +--- a/net/ipv6/ip6_vti.c ++++ b/net/ipv6/ip6_vti.c +@@ -62,7 +62,7 @@ static u32 HASH(const struct in6_addr *addr1, const struct in6_addr *addr2) + + static int vti6_dev_init(struct net_device *dev); + static void vti6_dev_setup(struct net_device *dev); +-static struct rtnl_link_ops vti6_link_ops __read_mostly; ++static struct rtnl_link_ops vti6_link_ops; + + static int vti6_net_id __read_mostly; + struct vti6_net { +@@ -901,7 +901,7 @@ static const struct nla_policy vti6_policy[IFLA_VTI_MAX + 1] = { + [IFLA_VTI_OKEY] = { .type = NLA_U32 }, + }; + +-static struct rtnl_link_ops vti6_link_ops __read_mostly = { ++static struct rtnl_link_ops vti6_link_ops = { + .kind = "vti6", + .maxtype = IFLA_VTI_MAX, + .policy = vti6_policy, +diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c +index 0a00f44..bec42b2 100644 +--- a/net/ipv6/ipv6_sockglue.c ++++ b/net/ipv6/ipv6_sockglue.c +@@ -991,7 +991,7 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname, + if (sk->sk_type != SOCK_STREAM) + return -ENOPROTOOPT; + +- msg.msg_control = optval; ++ msg.msg_control = (void __force_kernel *)optval; + msg.msg_controllen = len; + msg.msg_flags = flags; + +diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c +index e080fbb..412b3cf 100644 +--- a/net/ipv6/netfilter/ip6_tables.c ++++ b/net/ipv6/netfilter/ip6_tables.c +@@ -1083,14 +1083,14 @@ static int compat_table_info(const struct xt_table_info *info, + #endif + + static int get_info(struct net *net, void __user *user, +- const int *len, int compat) ++ int len, int compat) + { + char name[XT_TABLE_MAXNAMELEN]; + struct xt_table *t; + int ret; + +- if (*len != sizeof(struct ip6t_getinfo)) { +- duprintf("length %u != %zu\n", *len, ++ if (len != sizeof(struct ip6t_getinfo)) { ++ duprintf("length %u != %zu\n", len, + sizeof(struct ip6t_getinfo)); + return -EINVAL; + } +@@ -1127,7 +1127,7 @@ static int get_info(struct net *net, void __user *user, + info.size = private->size; + strcpy(info.name, name); + +- if (copy_to_user(user, &info, *len) != 0) ++ if (copy_to_user(user, &info, len) != 0) + ret = -EFAULT; + else + ret = 0; +@@ -1983,7 +1983,7 @@ compat_do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len) + + switch (cmd) { + case IP6T_SO_GET_INFO: +- ret = get_info(sock_net(sk), user, len, 1); ++ ret = get_info(sock_net(sk), user, *len, 1); + break; + case IP6T_SO_GET_ENTRIES: + ret = compat_get_entries(sock_net(sk), user, len); +@@ -2030,7 +2030,7 @@ do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len) + + switch (cmd) { + case IP6T_SO_GET_INFO: +- ret = get_info(sock_net(sk), user, len, 0); ++ ret = get_info(sock_net(sk), user, *len, 0); + break; + + case IP6T_SO_GET_ENTRIES: +diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c +index 767ab8d..c5ec70a 100644 +--- a/net/ipv6/netfilter/nf_conntrack_reasm.c ++++ b/net/ipv6/netfilter/nf_conntrack_reasm.c +@@ -90,12 +90,11 @@ static struct ctl_table nf_ct_frag6_sysctl_table[] = { + + static int nf_ct_frag6_sysctl_register(struct net *net) + { +- struct ctl_table *table; ++ ctl_table_no_const *table = NULL; + struct ctl_table_header *hdr; + +- table = nf_ct_frag6_sysctl_table; + if (!net_eq(net, &init_net)) { +- table = kmemdup(table, sizeof(nf_ct_frag6_sysctl_table), ++ table = kmemdup(nf_ct_frag6_sysctl_table, sizeof(nf_ct_frag6_sysctl_table), + GFP_KERNEL); + if (table == NULL) + goto err_alloc; +@@ -103,9 +102,9 @@ static int nf_ct_frag6_sysctl_register(struct net *net) + table[0].data = &net->nf_frag.frags.timeout; + table[1].data = &net->nf_frag.frags.low_thresh; + table[2].data = &net->nf_frag.frags.high_thresh; +- } +- +- hdr = register_net_sysctl(net, "net/netfilter", table); ++ hdr = register_net_sysctl(net, "net/netfilter", table); ++ } else ++ hdr = register_net_sysctl(net, "net/netfilter", nf_ct_frag6_sysctl_table); + if (hdr == NULL) + goto err_reg; + +@@ -113,8 +112,7 @@ static int nf_ct_frag6_sysctl_register(struct net *net) + return 0; + + err_reg: +- if (!net_eq(net, &init_net)) +- kfree(table); ++ kfree(table); + err_alloc: + return -ENOMEM; + } +diff --git a/net/ipv6/ping.c b/net/ipv6/ping.c +index bda7429..469b26b 100644 +--- a/net/ipv6/ping.c ++++ b/net/ipv6/ping.c +@@ -246,6 +246,24 @@ static struct pernet_operations ping_v6_net_ops = { + }; + #endif + ++static struct pingv6_ops real_pingv6_ops = { ++ .ipv6_recv_error = ipv6_recv_error, ++ .ip6_datagram_recv_common_ctl = ip6_datagram_recv_common_ctl, ++ .ip6_datagram_recv_specific_ctl = ip6_datagram_recv_specific_ctl, ++ .icmpv6_err_convert = icmpv6_err_convert, ++ .ipv6_icmp_error = ipv6_icmp_error, ++ .ipv6_chk_addr = ipv6_chk_addr, ++}; ++ ++static struct pingv6_ops dummy_pingv6_ops = { ++ .ipv6_recv_error = dummy_ipv6_recv_error, ++ .ip6_datagram_recv_common_ctl = dummy_ip6_datagram_recv_ctl, ++ .ip6_datagram_recv_specific_ctl = dummy_ip6_datagram_recv_ctl, ++ .icmpv6_err_convert = dummy_icmpv6_err_convert, ++ .ipv6_icmp_error = dummy_ipv6_icmp_error, ++ .ipv6_chk_addr = dummy_ipv6_chk_addr, ++}; ++ + int __init pingv6_init(void) + { + #ifdef CONFIG_PROC_FS +@@ -253,13 +271,7 @@ int __init pingv6_init(void) + if (ret) + return ret; + #endif +- pingv6_ops.ipv6_recv_error = ipv6_recv_error; +- pingv6_ops.ip6_datagram_recv_common_ctl = ip6_datagram_recv_common_ctl; +- pingv6_ops.ip6_datagram_recv_specific_ctl = +- ip6_datagram_recv_specific_ctl; +- pingv6_ops.icmpv6_err_convert = icmpv6_err_convert; +- pingv6_ops.ipv6_icmp_error = ipv6_icmp_error; +- pingv6_ops.ipv6_chk_addr = ipv6_chk_addr; ++ pingv6_ops = &real_pingv6_ops; + return inet6_register_protosw(&pingv6_protosw); + } + +@@ -268,14 +280,9 @@ int __init pingv6_init(void) + */ + void pingv6_exit(void) + { +- pingv6_ops.ipv6_recv_error = dummy_ipv6_recv_error; +- pingv6_ops.ip6_datagram_recv_common_ctl = dummy_ip6_datagram_recv_ctl; +- pingv6_ops.ip6_datagram_recv_specific_ctl = dummy_ip6_datagram_recv_ctl; +- pingv6_ops.icmpv6_err_convert = dummy_icmpv6_err_convert; +- pingv6_ops.ipv6_icmp_error = dummy_ipv6_icmp_error; +- pingv6_ops.ipv6_chk_addr = dummy_ipv6_chk_addr; + #ifdef CONFIG_PROC_FS + unregister_pernet_subsys(&ping_v6_net_ops); + #endif ++ pingv6_ops = &dummy_pingv6_ops; + inet6_unregister_protosw(&pingv6_protosw); + } +diff --git a/net/ipv6/proc.c b/net/ipv6/proc.c +index 091d066..139d410 100644 +--- a/net/ipv6/proc.c ++++ b/net/ipv6/proc.c +@@ -309,7 +309,7 @@ static int __net_init ipv6_proc_init_net(struct net *net) + if (!proc_create("snmp6", S_IRUGO, net->proc_net, &snmp6_seq_fops)) + goto proc_snmp6_fail; + +- net->mib.proc_net_devsnmp6 = proc_mkdir("dev_snmp6", net->proc_net); ++ net->mib.proc_net_devsnmp6 = proc_mkdir_restrict("dev_snmp6", net->proc_net); + if (!net->mib.proc_net_devsnmp6) + goto proc_dev_snmp6_fail; + return 0; +diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c +index 1f29996..7418779 100644 +--- a/net/ipv6/raw.c ++++ b/net/ipv6/raw.c +@@ -388,7 +388,7 @@ static inline int rawv6_rcv_skb(struct sock *sk, struct sk_buff *skb) + { + if ((raw6_sk(sk)->checksum || rcu_access_pointer(sk->sk_filter)) && + skb_checksum_complete(skb)) { +- atomic_inc(&sk->sk_drops); ++ atomic_inc_unchecked(&sk->sk_drops); + kfree_skb(skb); + return NET_RX_DROP; + } +@@ -416,7 +416,7 @@ int rawv6_rcv(struct sock *sk, struct sk_buff *skb) + struct raw6_sock *rp = raw6_sk(sk); + + if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) { +- atomic_inc(&sk->sk_drops); ++ atomic_inc_unchecked(&sk->sk_drops); + kfree_skb(skb); + return NET_RX_DROP; + } +@@ -440,7 +440,7 @@ int rawv6_rcv(struct sock *sk, struct sk_buff *skb) + + if (inet->hdrincl) { + if (skb_checksum_complete(skb)) { +- atomic_inc(&sk->sk_drops); ++ atomic_inc_unchecked(&sk->sk_drops); + kfree_skb(skb); + return NET_RX_DROP; + } +@@ -610,7 +610,7 @@ out: + return err; + } + +-static int rawv6_send_hdrinc(struct sock *sk, void *from, int length, ++static int rawv6_send_hdrinc(struct sock *sk, void *from, unsigned int length, + struct flowi6 *fl6, struct dst_entry **dstp, + unsigned int flags) + { +@@ -922,12 +922,15 @@ do_confirm: + static int rawv6_seticmpfilter(struct sock *sk, int level, int optname, + char __user *optval, int optlen) + { ++ struct icmp6_filter filter; ++ + switch (optname) { + case ICMPV6_FILTER: + if (optlen > sizeof(struct icmp6_filter)) + optlen = sizeof(struct icmp6_filter); +- if (copy_from_user(&raw6_sk(sk)->filter, optval, optlen)) ++ if (copy_from_user(&filter, optval, optlen)) + return -EFAULT; ++ raw6_sk(sk)->filter = filter; + return 0; + default: + return -ENOPROTOOPT; +@@ -940,6 +943,7 @@ static int rawv6_geticmpfilter(struct sock *sk, int level, int optname, + char __user *optval, int __user *optlen) + { + int len; ++ struct icmp6_filter filter; + + switch (optname) { + case ICMPV6_FILTER: +@@ -951,7 +955,8 @@ static int rawv6_geticmpfilter(struct sock *sk, int level, int optname, + len = sizeof(struct icmp6_filter); + if (put_user(len, optlen)) + return -EFAULT; +- if (copy_to_user(optval, &raw6_sk(sk)->filter, len)) ++ filter = raw6_sk(sk)->filter; ++ if (len > sizeof filter || copy_to_user(optval, &filter, len)) + return -EFAULT; + return 0; + default: +diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c +index cc85a9b..526a133 100644 +--- a/net/ipv6/reassembly.c ++++ b/net/ipv6/reassembly.c +@@ -626,12 +626,11 @@ static struct ctl_table ip6_frags_ctl_table[] = { + + static int __net_init ip6_frags_ns_sysctl_register(struct net *net) + { +- struct ctl_table *table; ++ ctl_table_no_const *table = NULL; + struct ctl_table_header *hdr; + +- table = ip6_frags_ns_ctl_table; + if (!net_eq(net, &init_net)) { +- table = kmemdup(table, sizeof(ip6_frags_ns_ctl_table), GFP_KERNEL); ++ table = kmemdup(ip6_frags_ns_ctl_table, sizeof(ip6_frags_ns_ctl_table), GFP_KERNEL); + if (table == NULL) + goto err_alloc; + +@@ -642,9 +641,10 @@ static int __net_init ip6_frags_ns_sysctl_register(struct net *net) + /* Don't export sysctls to unprivileged users */ + if (net->user_ns != &init_user_ns) + table[0].procname = NULL; +- } ++ hdr = register_net_sysctl(net, "net/ipv6", table); ++ } else ++ hdr = register_net_sysctl(net, "net/ipv6", ip6_frags_ns_ctl_table); + +- hdr = register_net_sysctl(net, "net/ipv6", table); + if (hdr == NULL) + goto err_reg; + +@@ -652,8 +652,7 @@ static int __net_init ip6_frags_ns_sysctl_register(struct net *net) + return 0; + + err_reg: +- if (!net_eq(net, &init_net)) +- kfree(table); ++ kfree(table); + err_alloc: + return -ENOMEM; + } +diff --git a/net/ipv6/route.c b/net/ipv6/route.c +index 7cc1102..7785931 100644 +--- a/net/ipv6/route.c ++++ b/net/ipv6/route.c +@@ -2973,7 +2973,7 @@ struct ctl_table ipv6_route_table_template[] = { + + struct ctl_table * __net_init ipv6_route_sysctl_init(struct net *net) + { +- struct ctl_table *table; ++ ctl_table_no_const *table; + + table = kmemdup(ipv6_route_table_template, + sizeof(ipv6_route_table_template), +diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c +index fe548ba..0dfa744 100644 +--- a/net/ipv6/sit.c ++++ b/net/ipv6/sit.c +@@ -74,7 +74,7 @@ static void ipip6_tunnel_setup(struct net_device *dev); + static void ipip6_dev_free(struct net_device *dev); + static bool check_6rd(struct ip_tunnel *tunnel, const struct in6_addr *v6dst, + __be32 *v4dst); +-static struct rtnl_link_ops sit_link_ops __read_mostly; ++static struct rtnl_link_ops sit_link_ops; + + static int sit_net_id __read_mostly; + struct sit_net { +@@ -1683,7 +1683,7 @@ static void ipip6_dellink(struct net_device *dev, struct list_head *head) + unregister_netdevice_queue(dev, head); + } + +-static struct rtnl_link_ops sit_link_ops __read_mostly = { ++static struct rtnl_link_ops sit_link_ops = { + .kind = "sit", + .maxtype = IFLA_IPTUN_MAX, + .policy = ipip6_policy, +diff --git a/net/ipv6/sysctl_net_ipv6.c b/net/ipv6/sysctl_net_ipv6.c +index 7f405a1..eabef92 100644 +--- a/net/ipv6/sysctl_net_ipv6.c ++++ b/net/ipv6/sysctl_net_ipv6.c +@@ -54,7 +54,7 @@ static struct ctl_table ipv6_rotable[] = { + + static int __net_init ipv6_sysctl_net_init(struct net *net) + { +- struct ctl_table *ipv6_table; ++ ctl_table_no_const *ipv6_table; + struct ctl_table *ipv6_route_table; + struct ctl_table *ipv6_icmp_table; + int err; +diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c +index 889079b..a04512c 100644 +--- a/net/ipv6/tcp_ipv6.c ++++ b/net/ipv6/tcp_ipv6.c +@@ -104,6 +104,10 @@ static void inet6_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb) + inet6_sk(sk)->rx_dst_cookie = rt->rt6i_node->fn_sernum; + } + ++#ifdef CONFIG_GRKERNSEC_BLACKHOLE ++extern int grsec_enable_blackhole; ++#endif ++ + static void tcp_v6_hash(struct sock *sk) + { + if (sk->sk_state != TCP_CLOSE) { +@@ -1412,6 +1416,9 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb) + return 0; + + reset: ++#ifdef CONFIG_GRKERNSEC_BLACKHOLE ++ if (!grsec_enable_blackhole) ++#endif + tcp_v6_send_reset(sk, skb); + discard: + if (opt_skb) +@@ -1496,12 +1503,20 @@ static int tcp_v6_rcv(struct sk_buff *skb) + TCP_SKB_CB(skb)->sacked = 0; + + sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest); +- if (!sk) ++ if (!sk) { ++#ifdef CONFIG_GRKERNSEC_BLACKHOLE ++ ret = 1; ++#endif + goto no_tcp_socket; ++ } + + process: +- if (sk->sk_state == TCP_TIME_WAIT) ++ if (sk->sk_state == TCP_TIME_WAIT) { ++#ifdef CONFIG_GRKERNSEC_BLACKHOLE ++ ret = 2; ++#endif + goto do_time_wait; ++ } + + if (hdr->hop_limit < inet6_sk(sk)->min_hopcount) { + NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP); +@@ -1553,6 +1568,10 @@ csum_error: + bad_packet: + TCP_INC_STATS_BH(net, TCP_MIB_INERRS); + } else { ++#ifdef CONFIG_GRKERNSEC_BLACKHOLE ++ if (!grsec_enable_blackhole || (ret == 1 && ++ (skb->dev->flags & IFF_LOOPBACK))) ++#endif + tcp_v6_send_reset(NULL, skb); + } + +diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c +index 20b63d2..31a777d 100644 +--- a/net/ipv6/udp.c ++++ b/net/ipv6/udp.c +@@ -76,6 +76,10 @@ static unsigned int udp6_ehashfn(struct net *net, + udp_ipv6_hash_secret + net_hash_mix(net)); + } + ++#ifdef CONFIG_GRKERNSEC_BLACKHOLE ++extern int grsec_enable_blackhole; ++#endif ++ + int ipv6_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2) + { + const struct in6_addr *sk2_rcv_saddr6 = inet6_rcv_saddr(sk2); +@@ -435,7 +439,7 @@ try_again: + if (unlikely(err)) { + trace_kfree_skb(skb, udpv6_recvmsg); + if (!peeked) { +- atomic_inc(&sk->sk_drops); ++ atomic_inc_unchecked(&sk->sk_drops); + if (is_udp4) + UDP_INC_STATS_USER(sock_net(sk), + UDP_MIB_INERRORS, +@@ -690,7 +694,7 @@ csum_error: + UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite); + drop: + UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite); +- atomic_inc(&sk->sk_drops); ++ atomic_inc_unchecked(&sk->sk_drops); + kfree_skb(skb); + return -1; + } +@@ -747,7 +751,7 @@ static void flush_stack(struct sock **stack, unsigned int count, + if (likely(skb1 == NULL)) + skb1 = (i == final) ? skb : skb_clone(skb, GFP_ATOMIC); + if (!skb1) { +- atomic_inc(&sk->sk_drops); ++ atomic_inc_unchecked(&sk->sk_drops); + UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS, + IS_UDPLITE(sk)); + UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, +@@ -886,6 +890,9 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable, + goto csum_error; + + UDP6_INC_STATS_BH(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE); ++#ifdef CONFIG_GRKERNSEC_BLACKHOLE ++ if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK)) ++#endif + icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0); + + kfree_skb(skb); +diff --git a/net/ipv6/xfrm6_policy.c b/net/ipv6/xfrm6_policy.c +index 5f8e128..865d38e 100644 +--- a/net/ipv6/xfrm6_policy.c ++++ b/net/ipv6/xfrm6_policy.c +@@ -212,11 +212,11 @@ _decode_session6(struct sk_buff *skb, struct flowi *fl, int reverse) + } + } + +-static inline int xfrm6_garbage_collect(struct dst_ops *ops) ++static int xfrm6_garbage_collect(struct dst_ops *ops) + { + struct net *net = container_of(ops, struct net, xfrm.xfrm6_dst_ops); + +- xfrm6_policy_afinfo.garbage_collect(net); ++ xfrm_garbage_collect_deferred(net); + return dst_entries_get_fast(ops) > ops->gc_thresh * 2; + } + +@@ -329,19 +329,19 @@ static struct ctl_table xfrm6_policy_table[] = { + + static int __net_init xfrm6_net_init(struct net *net) + { +- struct ctl_table *table; ++ ctl_table_no_const *table = NULL; + struct ctl_table_header *hdr; + +- table = xfrm6_policy_table; + if (!net_eq(net, &init_net)) { +- table = kmemdup(table, sizeof(xfrm6_policy_table), GFP_KERNEL); ++ table = kmemdup(xfrm6_policy_table, sizeof(xfrm6_policy_table), GFP_KERNEL); + if (!table) + goto err_alloc; + + table[0].data = &net->xfrm.xfrm6_dst_ops.gc_thresh; +- } ++ hdr = register_net_sysctl(net, "net/ipv6", table); ++ } else ++ hdr = register_net_sysctl(net, "net/ipv6", xfrm6_policy_table); + +- hdr = register_net_sysctl(net, "net/ipv6", table); + if (!hdr) + goto err_reg; + +@@ -349,8 +349,7 @@ static int __net_init xfrm6_net_init(struct net *net) + return 0; + + err_reg: +- if (!net_eq(net, &init_net)) +- kfree(table); ++ kfree(table); + err_alloc: + return -ENOMEM; + } +diff --git a/net/ipx/ipx_proc.c b/net/ipx/ipx_proc.c +index e15c16a..7cf07aa 100644 +--- a/net/ipx/ipx_proc.c ++++ b/net/ipx/ipx_proc.c +@@ -289,7 +289,7 @@ int __init ipx_proc_init(void) + struct proc_dir_entry *p; + int rc = -ENOMEM; + +- ipx_proc_dir = proc_mkdir("ipx", init_net.proc_net); ++ ipx_proc_dir = proc_mkdir_restrict("ipx", init_net.proc_net); + + if (!ipx_proc_dir) + goto out; +diff --git a/net/irda/ircomm/ircomm_tty.c b/net/irda/ircomm/ircomm_tty.c +index 2ba8b97..6d33010 100644 +--- a/net/irda/ircomm/ircomm_tty.c ++++ b/net/irda/ircomm/ircomm_tty.c +@@ -317,11 +317,11 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self, + add_wait_queue(&port->open_wait, &wait); + + IRDA_DEBUG(2, "%s(%d):block_til_ready before block on %s open_count=%d\n", +- __FILE__, __LINE__, tty->driver->name, port->count); ++ __FILE__, __LINE__, tty->driver->name, atomic_read(&port->count)); + + spin_lock_irqsave(&port->lock, flags); + if (!tty_hung_up_p(filp)) +- port->count--; ++ atomic_dec(&port->count); + port->blocked_open++; + spin_unlock_irqrestore(&port->lock, flags); + +@@ -356,7 +356,7 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self, + } + + IRDA_DEBUG(1, "%s(%d):block_til_ready blocking on %s open_count=%d\n", +- __FILE__, __LINE__, tty->driver->name, port->count); ++ __FILE__, __LINE__, tty->driver->name, atomic_read(&port->count)); + + schedule(); + } +@@ -366,12 +366,12 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self, + + spin_lock_irqsave(&port->lock, flags); + if (!tty_hung_up_p(filp)) +- port->count++; ++ atomic_inc(&port->count); + port->blocked_open--; + spin_unlock_irqrestore(&port->lock, flags); + + IRDA_DEBUG(1, "%s(%d):block_til_ready after blocking on %s open_count=%d\n", +- __FILE__, __LINE__, tty->driver->name, port->count); ++ __FILE__, __LINE__, tty->driver->name, atomic_read(&port->count)); + + if (!retval) + port->flags |= ASYNC_NORMAL_ACTIVE; +@@ -445,12 +445,12 @@ static int ircomm_tty_open(struct tty_struct *tty, struct file *filp) + + /* ++ is not atomic, so this should be protected - Jean II */ + spin_lock_irqsave(&self->port.lock, flags); +- self->port.count++; ++ atomic_inc(&self->port.count); + spin_unlock_irqrestore(&self->port.lock, flags); + tty_port_tty_set(&self->port, tty); + + IRDA_DEBUG(1, "%s(), %s%d, count = %d\n", __func__ , tty->driver->name, +- self->line, self->port.count); ++ self->line, atomic_read(&self->port.count)); + + /* Not really used by us, but lets do it anyway */ + self->port.low_latency = (self->port.flags & ASYNC_LOW_LATENCY) ? 1 : 0; +@@ -987,7 +987,7 @@ static void ircomm_tty_hangup(struct tty_struct *tty) + tty_kref_put(port->tty); + } + port->tty = NULL; +- port->count = 0; ++ atomic_set(&port->count, 0); + spin_unlock_irqrestore(&port->lock, flags); + + wake_up_interruptible(&port->open_wait); +@@ -1344,7 +1344,7 @@ static void ircomm_tty_line_info(struct ircomm_tty_cb *self, struct seq_file *m) + seq_putc(m, '\n'); + + seq_printf(m, "Role: %s\n", self->client ? "client" : "server"); +- seq_printf(m, "Open count: %d\n", self->port.count); ++ seq_printf(m, "Open count: %d\n", atomic_read(&self->port.count)); + seq_printf(m, "Max data size: %d\n", self->max_data_size); + seq_printf(m, "Max header size: %d\n", self->max_header_size); + +diff --git a/net/irda/irproc.c b/net/irda/irproc.c +index b9ac598..f88cc56 100644 +--- a/net/irda/irproc.c ++++ b/net/irda/irproc.c +@@ -66,7 +66,7 @@ void __init irda_proc_register(void) + { + int i; + +- proc_irda = proc_mkdir("irda", init_net.proc_net); ++ proc_irda = proc_mkdir_restrict("irda", init_net.proc_net); + if (proc_irda == NULL) + return; + +diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c +index 1465363..c7e9f14 100644 +--- a/net/iucv/af_iucv.c ++++ b/net/iucv/af_iucv.c +@@ -773,10 +773,10 @@ static int iucv_sock_autobind(struct sock *sk) + + write_lock_bh(&iucv_sk_list.lock); + +- sprintf(name, "%08x", atomic_inc_return(&iucv_sk_list.autobind_name)); ++ sprintf(name, "%08x", atomic_inc_return_unchecked(&iucv_sk_list.autobind_name)); + while (__iucv_get_sock_by_name(name)) { + sprintf(name, "%08x", +- atomic_inc_return(&iucv_sk_list.autobind_name)); ++ atomic_inc_return_unchecked(&iucv_sk_list.autobind_name)); + } + + write_unlock_bh(&iucv_sk_list.lock); +diff --git a/net/iucv/iucv.c b/net/iucv/iucv.c +index cd5b8ec..f205e6b 100644 +--- a/net/iucv/iucv.c ++++ b/net/iucv/iucv.c +@@ -690,7 +690,7 @@ static int iucv_cpu_notify(struct notifier_block *self, + return NOTIFY_OK; + } + +-static struct notifier_block __refdata iucv_cpu_notifier = { ++static struct notifier_block iucv_cpu_notifier = { + .notifier_call = iucv_cpu_notify, + }; + +diff --git a/net/key/af_key.c b/net/key/af_key.c +index 7932697..a13d158 100644 +--- a/net/key/af_key.c ++++ b/net/key/af_key.c +@@ -3052,10 +3052,10 @@ static int pfkey_send_policy_notify(struct xfrm_policy *xp, int dir, const struc + static u32 get_acqseq(void) + { + u32 res; +- static atomic_t acqseq; ++ static atomic_unchecked_t acqseq; + + do { +- res = atomic_inc_return(&acqseq); ++ res = atomic_inc_return_unchecked(&acqseq); + } while (!res); + return res; + } +diff --git a/net/llc/llc_proc.c b/net/llc/llc_proc.c +index 1a3c7e0..80f8b0c 100644 +--- a/net/llc/llc_proc.c ++++ b/net/llc/llc_proc.c +@@ -247,7 +247,7 @@ int __init llc_proc_init(void) + int rc = -ENOMEM; + struct proc_dir_entry *p; + +- llc_proc_dir = proc_mkdir("llc", init_net.proc_net); ++ llc_proc_dir = proc_mkdir_restrict("llc", init_net.proc_net); + if (!llc_proc_dir) + goto out; + +diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c +index 453e974..b3a43a5 100644 +--- a/net/mac80211/cfg.c ++++ b/net/mac80211/cfg.c +@@ -839,7 +839,7 @@ static int ieee80211_set_monitor_channel(struct wiphy *wiphy, + ret = ieee80211_vif_use_channel(sdata, chandef, + IEEE80211_CHANCTX_EXCLUSIVE); + } +- } else if (local->open_count == local->monitors) { ++ } else if (local_read(&local->open_count) == local->monitors) { + local->_oper_chandef = *chandef; + ieee80211_hw_config(local, 0); + } +@@ -3356,7 +3356,7 @@ static void ieee80211_mgmt_frame_register(struct wiphy *wiphy, + else + local->probe_req_reg--; + +- if (!local->open_count) ++ if (!local_read(&local->open_count)) + break; + + ieee80211_queue_work(&local->hw, &local->reconfig_filter); +@@ -3819,8 +3819,8 @@ static int ieee80211_cfg_get_channel(struct wiphy *wiphy, + if (chanctx_conf) { + *chandef = chanctx_conf->def; + ret = 0; +- } else if (local->open_count > 0 && +- local->open_count == local->monitors && ++ } else if (local_read(&local->open_count) > 0 && ++ local_read(&local->open_count) == local->monitors && + sdata->vif.type == NL80211_IFTYPE_MONITOR) { + if (local->use_chanctx) + *chandef = local->monitor_chandef; +diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h +index b127902..9dc4947 100644 +--- a/net/mac80211/ieee80211_i.h ++++ b/net/mac80211/ieee80211_i.h +@@ -28,6 +28,7 @@ + #include <net/ieee80211_radiotap.h> + #include <net/cfg80211.h> + #include <net/mac80211.h> ++#include <asm/local.h> + #include "key.h" + #include "sta_info.h" + #include "debug.h" +@@ -995,7 +996,7 @@ struct ieee80211_local { + /* also used to protect ampdu_ac_queue and amdpu_ac_stop_refcnt */ + spinlock_t queue_stop_reason_lock; + +- int open_count; ++ local_t open_count; + int monitors, cooked_mntrs; + /* number of interfaces with corresponding FIF_ flags */ + int fif_fcsfail, fif_plcpfail, fif_control, fif_other_bss, fif_pspoll, +diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c +index 8f7fabc..e400523 100644 +--- a/net/mac80211/iface.c ++++ b/net/mac80211/iface.c +@@ -529,7 +529,7 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up) + break; + } + +- if (local->open_count == 0) { ++ if (local_read(&local->open_count) == 0) { + res = drv_start(local); + if (res) + goto err_del_bss; +@@ -576,7 +576,7 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up) + res = drv_add_interface(local, sdata); + if (res) + goto err_stop; +- } else if (local->monitors == 0 && local->open_count == 0) { ++ } else if (local->monitors == 0 && local_read(&local->open_count) == 0) { + res = ieee80211_add_virtual_monitor(local); + if (res) + goto err_stop; +@@ -685,7 +685,7 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up) + atomic_inc(&local->iff_promiscs); + + if (coming_up) +- local->open_count++; ++ local_inc(&local->open_count); + + if (hw_reconf_flags) + ieee80211_hw_config(local, hw_reconf_flags); +@@ -723,7 +723,7 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up) + err_del_interface: + drv_remove_interface(local, sdata); + err_stop: +- if (!local->open_count) ++ if (!local_read(&local->open_count)) + drv_stop(local); + err_del_bss: + sdata->bss = NULL; +@@ -874,7 +874,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata, + } + + if (going_down) +- local->open_count--; ++ local_dec(&local->open_count); + + switch (sdata->vif.type) { + case NL80211_IFTYPE_AP_VLAN: +@@ -933,7 +933,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata, + } + spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags); + +- if (local->open_count == 0) ++ if (local_read(&local->open_count) == 0) + ieee80211_clear_tx_pending(local); + + /* +@@ -973,7 +973,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata, + + ieee80211_recalc_ps(local, -1); + +- if (local->open_count == 0) { ++ if (local_read(&local->open_count) == 0) { + ieee80211_stop_device(local); + + /* no reconfiguring after stop! */ +@@ -984,7 +984,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata, + ieee80211_configure_filter(local); + ieee80211_hw_config(local, hw_reconf_flags); + +- if (local->monitors == local->open_count) ++ if (local->monitors == local_read(&local->open_count)) + ieee80211_add_virtual_monitor(local); + } + +diff --git a/net/mac80211/main.c b/net/mac80211/main.c +index c7a7a86..a74f57b 100644 +--- a/net/mac80211/main.c ++++ b/net/mac80211/main.c +@@ -174,7 +174,7 @@ int ieee80211_hw_config(struct ieee80211_local *local, u32 changed) + changed &= ~(IEEE80211_CONF_CHANGE_CHANNEL | + IEEE80211_CONF_CHANGE_POWER); + +- if (changed && local->open_count) { ++ if (changed && local_read(&local->open_count)) { + ret = drv_config(local, changed); + /* + * Goal: +diff --git a/net/mac80211/pm.c b/net/mac80211/pm.c +index d478b88..8c8d157 100644 +--- a/net/mac80211/pm.c ++++ b/net/mac80211/pm.c +@@ -12,7 +12,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan) + struct ieee80211_sub_if_data *sdata; + struct sta_info *sta; + +- if (!local->open_count) ++ if (!local_read(&local->open_count)) + goto suspend; + + ieee80211_scan_cancel(local); +@@ -58,7 +58,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan) + cancel_work_sync(&local->dynamic_ps_enable_work); + del_timer_sync(&local->dynamic_ps_timer); + +- local->wowlan = wowlan && local->open_count; ++ local->wowlan = wowlan && local_read(&local->open_count); + if (local->wowlan) { + int err = drv_suspend(local, wowlan); + if (err < 0) { +@@ -123,7 +123,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan) + WARN_ON(!list_empty(&local->chanctx_list)); + + /* stop hardware - this must stop RX */ +- if (local->open_count) ++ if (local_read(&local->open_count)) + ieee80211_stop_device(local); + + suspend: +diff --git a/net/mac80211/rate.c b/net/mac80211/rate.c +index 22b223f..ab70070 100644 +--- a/net/mac80211/rate.c ++++ b/net/mac80211/rate.c +@@ -734,7 +734,7 @@ int ieee80211_init_rate_ctrl_alg(struct ieee80211_local *local, + + ASSERT_RTNL(); + +- if (local->open_count) ++ if (local_read(&local->open_count)) + return -EBUSY; + + if (local->hw.flags & IEEE80211_HW_HAS_RATE_CONTROL) { +diff --git a/net/mac80211/rc80211_pid_debugfs.c b/net/mac80211/rc80211_pid_debugfs.c +index 6ff1346..936ca9a 100644 +--- a/net/mac80211/rc80211_pid_debugfs.c ++++ b/net/mac80211/rc80211_pid_debugfs.c +@@ -193,7 +193,7 @@ static ssize_t rate_control_pid_events_read(struct file *file, char __user *buf, + + spin_unlock_irqrestore(&events->lock, status); + +- if (copy_to_user(buf, pb, p)) ++ if (p > sizeof(pb) || copy_to_user(buf, pb, p)) + return -EFAULT; + + return p; +diff --git a/net/mac80211/util.c b/net/mac80211/util.c +index 6427625..afa5a5a 100644 +--- a/net/mac80211/util.c ++++ b/net/mac80211/util.c +@@ -1483,7 +1483,7 @@ int ieee80211_reconfig(struct ieee80211_local *local) + } + #endif + /* everything else happens only if HW was up & running */ +- if (!local->open_count) ++ if (!local_read(&local->open_count)) + goto wake_up; + + /* +@@ -1708,7 +1708,7 @@ int ieee80211_reconfig(struct ieee80211_local *local) + local->in_reconfig = false; + barrier(); + +- if (local->monitors == local->open_count && local->monitors > 0) ++ if (local->monitors == local_read(&local->open_count) && local->monitors > 0) + ieee80211_add_virtual_monitor(local); + + /* +diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig +index e9410d1..77b6378 100644 +--- a/net/netfilter/Kconfig ++++ b/net/netfilter/Kconfig +@@ -1081,6 +1081,16 @@ config NETFILTER_XT_MATCH_ESP + + To compile it as a module, choose M here. If unsure, say N. + ++config NETFILTER_XT_MATCH_GRADM ++ tristate '"gradm" match support' ++ depends on NETFILTER_XTABLES && NETFILTER_ADVANCED ++ depends on GRKERNSEC && !GRKERNSEC_NO_RBAC ++ ---help--- ++ The gradm match allows to match on grsecurity RBAC being enabled. ++ It is useful when iptables rules are applied early on bootup to ++ prevent connections to the machine (except from a trusted host) ++ while the RBAC system is disabled. ++ + config NETFILTER_XT_MATCH_HASHLIMIT + tristate '"hashlimit" match support' + depends on (IP6_NF_IPTABLES || IP6_NF_IPTABLES=n) +diff --git a/net/netfilter/Makefile b/net/netfilter/Makefile +index bffdad7..f9317d1 100644 +--- a/net/netfilter/Makefile ++++ b/net/netfilter/Makefile +@@ -133,6 +133,7 @@ obj-$(CONFIG_NETFILTER_XT_MATCH_DEVGROUP) += xt_devgroup.o + obj-$(CONFIG_NETFILTER_XT_MATCH_DSCP) += xt_dscp.o + obj-$(CONFIG_NETFILTER_XT_MATCH_ECN) += xt_ecn.o + obj-$(CONFIG_NETFILTER_XT_MATCH_ESP) += xt_esp.o ++obj-$(CONFIG_NETFILTER_XT_MATCH_GRADM) += xt_gradm.o + obj-$(CONFIG_NETFILTER_XT_MATCH_HASHLIMIT) += xt_hashlimit.o + obj-$(CONFIG_NETFILTER_XT_MATCH_HELPER) += xt_helper.o + obj-$(CONFIG_NETFILTER_XT_MATCH_HL) += xt_hl.o +diff --git a/net/netfilter/ipset/ip_set_core.c b/net/netfilter/ipset/ip_set_core.c +index de770ec..3fc49d2 100644 +--- a/net/netfilter/ipset/ip_set_core.c ++++ b/net/netfilter/ipset/ip_set_core.c +@@ -1922,7 +1922,7 @@ done: + return ret; + } + +-static struct nf_sockopt_ops so_set __read_mostly = { ++static struct nf_sockopt_ops so_set = { + .pf = PF_INET, + .get_optmin = SO_IP_SET, + .get_optmax = SO_IP_SET + 1, +diff --git a/net/netfilter/ipvs/ip_vs_conn.c b/net/netfilter/ipvs/ip_vs_conn.c +index a8eb0a8..86f2de4 100644 +--- a/net/netfilter/ipvs/ip_vs_conn.c ++++ b/net/netfilter/ipvs/ip_vs_conn.c +@@ -556,7 +556,7 @@ ip_vs_bind_dest(struct ip_vs_conn *cp, struct ip_vs_dest *dest) + /* Increase the refcnt counter of the dest */ + ip_vs_dest_hold(dest); + +- conn_flags = atomic_read(&dest->conn_flags); ++ conn_flags = atomic_read_unchecked(&dest->conn_flags); + if (cp->protocol != IPPROTO_UDP) + conn_flags &= ~IP_VS_CONN_F_ONE_PACKET; + flags = cp->flags; +@@ -900,7 +900,7 @@ ip_vs_conn_new(const struct ip_vs_conn_param *p, + + cp->control = NULL; + atomic_set(&cp->n_control, 0); +- atomic_set(&cp->in_pkts, 0); ++ atomic_set_unchecked(&cp->in_pkts, 0); + + cp->packet_xmit = NULL; + cp->app = NULL; +@@ -1188,7 +1188,7 @@ static inline int todrop_entry(struct ip_vs_conn *cp) + + /* Don't drop the entry if its number of incoming packets is not + located in [0, 8] */ +- i = atomic_read(&cp->in_pkts); ++ i = atomic_read_unchecked(&cp->in_pkts); + if (i > 8 || i < 0) return 0; + + if (!todrop_rate[i]) return 0; +diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c +index 3d2d2c8..c87e4d3 100644 +--- a/net/netfilter/ipvs/ip_vs_core.c ++++ b/net/netfilter/ipvs/ip_vs_core.c +@@ -567,7 +567,7 @@ int ip_vs_leave(struct ip_vs_service *svc, struct sk_buff *skb, + ret = cp->packet_xmit(skb, cp, pd->pp, iph); + /* do not touch skb anymore */ + +- atomic_inc(&cp->in_pkts); ++ atomic_inc_unchecked(&cp->in_pkts); + ip_vs_conn_put(cp); + return ret; + } +@@ -1711,7 +1711,7 @@ ip_vs_in(unsigned int hooknum, struct sk_buff *skb, int af) + if (cp->flags & IP_VS_CONN_F_ONE_PACKET) + pkts = sysctl_sync_threshold(ipvs); + else +- pkts = atomic_add_return(1, &cp->in_pkts); ++ pkts = atomic_add_return_unchecked(1, &cp->in_pkts); + + if (ipvs->sync_state & IP_VS_STATE_MASTER) + ip_vs_sync_conn(net, cp, pkts); +diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c +index 35be035..50f8834 100644 +--- a/net/netfilter/ipvs/ip_vs_ctl.c ++++ b/net/netfilter/ipvs/ip_vs_ctl.c +@@ -794,7 +794,7 @@ __ip_vs_update_dest(struct ip_vs_service *svc, struct ip_vs_dest *dest, + */ + ip_vs_rs_hash(ipvs, dest); + } +- atomic_set(&dest->conn_flags, conn_flags); ++ atomic_set_unchecked(&dest->conn_flags, conn_flags); + + /* bind the service */ + old_svc = rcu_dereference_protected(dest->svc, 1); +@@ -1654,7 +1654,7 @@ proc_do_sync_ports(struct ctl_table *table, int write, + * align with netns init in ip_vs_control_net_init() + */ + +-static struct ctl_table vs_vars[] = { ++static ctl_table_no_const vs_vars[] __read_only = { + { + .procname = "amemthresh", + .maxlen = sizeof(int), +@@ -2075,7 +2075,7 @@ static int ip_vs_info_seq_show(struct seq_file *seq, void *v) + " %-7s %-6d %-10d %-10d\n", + &dest->addr.in6, + ntohs(dest->port), +- ip_vs_fwd_name(atomic_read(&dest->conn_flags)), ++ ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)), + atomic_read(&dest->weight), + atomic_read(&dest->activeconns), + atomic_read(&dest->inactconns)); +@@ -2086,7 +2086,7 @@ static int ip_vs_info_seq_show(struct seq_file *seq, void *v) + "%-7s %-6d %-10d %-10d\n", + ntohl(dest->addr.ip), + ntohs(dest->port), +- ip_vs_fwd_name(atomic_read(&dest->conn_flags)), ++ ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)), + atomic_read(&dest->weight), + atomic_read(&dest->activeconns), + atomic_read(&dest->inactconns)); +@@ -2564,7 +2564,7 @@ __ip_vs_get_dest_entries(struct net *net, const struct ip_vs_get_dests *get, + + entry.addr = dest->addr.ip; + entry.port = dest->port; +- entry.conn_flags = atomic_read(&dest->conn_flags); ++ entry.conn_flags = atomic_read_unchecked(&dest->conn_flags); + entry.weight = atomic_read(&dest->weight); + entry.u_threshold = dest->u_threshold; + entry.l_threshold = dest->l_threshold; +@@ -3107,7 +3107,7 @@ static int ip_vs_genl_fill_dest(struct sk_buff *skb, struct ip_vs_dest *dest) + if (nla_put(skb, IPVS_DEST_ATTR_ADDR, sizeof(dest->addr), &dest->addr) || + nla_put_be16(skb, IPVS_DEST_ATTR_PORT, dest->port) || + nla_put_u32(skb, IPVS_DEST_ATTR_FWD_METHOD, +- (atomic_read(&dest->conn_flags) & ++ (atomic_read_unchecked(&dest->conn_flags) & + IP_VS_CONN_F_FWD_MASK)) || + nla_put_u32(skb, IPVS_DEST_ATTR_WEIGHT, + atomic_read(&dest->weight)) || +@@ -3580,7 +3580,7 @@ out: + } + + +-static const struct genl_ops ip_vs_genl_ops[] __read_mostly = { ++static const struct genl_ops ip_vs_genl_ops[] = { + { + .cmd = IPVS_CMD_NEW_SERVICE, + .flags = GENL_ADMIN_PERM, +@@ -3697,7 +3697,7 @@ static int __net_init ip_vs_control_net_init_sysctl(struct net *net) + { + int idx; + struct netns_ipvs *ipvs = net_ipvs(net); +- struct ctl_table *tbl; ++ ctl_table_no_const *tbl; + + atomic_set(&ipvs->dropentry, 0); + spin_lock_init(&ipvs->dropentry_lock); +diff --git a/net/netfilter/ipvs/ip_vs_lblc.c b/net/netfilter/ipvs/ip_vs_lblc.c +index ca056a3..9cf01ef 100644 +--- a/net/netfilter/ipvs/ip_vs_lblc.c ++++ b/net/netfilter/ipvs/ip_vs_lblc.c +@@ -118,7 +118,7 @@ struct ip_vs_lblc_table { + * IPVS LBLC sysctl table + */ + #ifdef CONFIG_SYSCTL +-static struct ctl_table vs_vars_table[] = { ++static ctl_table_no_const vs_vars_table[] __read_only = { + { + .procname = "lblc_expiration", + .data = NULL, +diff --git a/net/netfilter/ipvs/ip_vs_lblcr.c b/net/netfilter/ipvs/ip_vs_lblcr.c +index 3f21a2f..a112e85 100644 +--- a/net/netfilter/ipvs/ip_vs_lblcr.c ++++ b/net/netfilter/ipvs/ip_vs_lblcr.c +@@ -289,7 +289,7 @@ struct ip_vs_lblcr_table { + * IPVS LBLCR sysctl table + */ + +-static struct ctl_table vs_vars_table[] = { ++static ctl_table_no_const vs_vars_table[] __read_only = { + { + .procname = "lblcr_expiration", + .data = NULL, +diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c +index db80126..ef7110e 100644 +--- a/net/netfilter/ipvs/ip_vs_sync.c ++++ b/net/netfilter/ipvs/ip_vs_sync.c +@@ -609,7 +609,7 @@ static void ip_vs_sync_conn_v0(struct net *net, struct ip_vs_conn *cp, + cp = cp->control; + if (cp) { + if (cp->flags & IP_VS_CONN_F_TEMPLATE) +- pkts = atomic_add_return(1, &cp->in_pkts); ++ pkts = atomic_add_return_unchecked(1, &cp->in_pkts); + else + pkts = sysctl_sync_threshold(ipvs); + ip_vs_sync_conn(net, cp->control, pkts); +@@ -771,7 +771,7 @@ control: + if (!cp) + return; + if (cp->flags & IP_VS_CONN_F_TEMPLATE) +- pkts = atomic_add_return(1, &cp->in_pkts); ++ pkts = atomic_add_return_unchecked(1, &cp->in_pkts); + else + pkts = sysctl_sync_threshold(ipvs); + goto sloop; +@@ -895,7 +895,7 @@ static void ip_vs_proc_conn(struct net *net, struct ip_vs_conn_param *param, + + if (opt) + memcpy(&cp->in_seq, opt, sizeof(*opt)); +- atomic_set(&cp->in_pkts, sysctl_sync_threshold(ipvs)); ++ atomic_set_unchecked(&cp->in_pkts, sysctl_sync_threshold(ipvs)); + cp->state = state; + cp->old_state = cp->state; + /* +diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c +index 7f0e1cf..e9a86e6 100644 +--- a/net/netfilter/ipvs/ip_vs_xmit.c ++++ b/net/netfilter/ipvs/ip_vs_xmit.c +@@ -1102,7 +1102,7 @@ ip_vs_icmp_xmit(struct sk_buff *skb, struct ip_vs_conn *cp, + else + rc = NF_ACCEPT; + /* do not touch skb anymore */ +- atomic_inc(&cp->in_pkts); ++ atomic_inc_unchecked(&cp->in_pkts); + goto out; + } + +@@ -1194,7 +1194,7 @@ ip_vs_icmp_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp, + else + rc = NF_ACCEPT; + /* do not touch skb anymore */ +- atomic_inc(&cp->in_pkts); ++ atomic_inc_unchecked(&cp->in_pkts); + goto out; + } + +diff --git a/net/netfilter/nf_conntrack_acct.c b/net/netfilter/nf_conntrack_acct.c +index a4b5e2a..13b1de3 100644 +--- a/net/netfilter/nf_conntrack_acct.c ++++ b/net/netfilter/nf_conntrack_acct.c +@@ -62,7 +62,7 @@ static struct nf_ct_ext_type acct_extend __read_mostly = { + #ifdef CONFIG_SYSCTL + static int nf_conntrack_acct_init_sysctl(struct net *net) + { +- struct ctl_table *table; ++ ctl_table_no_const *table; + + table = kmemdup(acct_sysctl_table, sizeof(acct_sysctl_table), + GFP_KERNEL); +diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c +index 356bef5..163b56a 100644 +--- a/net/netfilter/nf_conntrack_core.c ++++ b/net/netfilter/nf_conntrack_core.c +@@ -1627,6 +1627,10 @@ void nf_conntrack_init_end(void) + #define DYING_NULLS_VAL ((1<<30)+1) + #define TEMPLATE_NULLS_VAL ((1<<30)+2) + ++#ifdef CONFIG_GRKERNSEC_HIDESYM ++static atomic_unchecked_t conntrack_cache_id = ATOMIC_INIT(0); ++#endif ++ + int nf_conntrack_init_net(struct net *net) + { + int ret; +@@ -1641,7 +1645,11 @@ int nf_conntrack_init_net(struct net *net) + goto err_stat; + } + ++#ifdef CONFIG_GRKERNSEC_HIDESYM ++ net->ct.slabname = kasprintf(GFP_KERNEL, "nf_conntrack_%08x", atomic_inc_return_unchecked(&conntrack_cache_id)); ++#else + net->ct.slabname = kasprintf(GFP_KERNEL, "nf_conntrack_%p", net); ++#endif + if (!net->ct.slabname) { + ret = -ENOMEM; + goto err_slabname; +diff --git a/net/netfilter/nf_conntrack_ecache.c b/net/netfilter/nf_conntrack_ecache.c +index 1df1761..ce8b88a 100644 +--- a/net/netfilter/nf_conntrack_ecache.c ++++ b/net/netfilter/nf_conntrack_ecache.c +@@ -188,7 +188,7 @@ static struct nf_ct_ext_type event_extend __read_mostly = { + #ifdef CONFIG_SYSCTL + static int nf_conntrack_event_init_sysctl(struct net *net) + { +- struct ctl_table *table; ++ ctl_table_no_const *table; + + table = kmemdup(event_sysctl_table, sizeof(event_sysctl_table), + GFP_KERNEL); +diff --git a/net/netfilter/nf_conntrack_helper.c b/net/netfilter/nf_conntrack_helper.c +index 974a2a4..52cc6ff 100644 +--- a/net/netfilter/nf_conntrack_helper.c ++++ b/net/netfilter/nf_conntrack_helper.c +@@ -57,7 +57,7 @@ static struct ctl_table helper_sysctl_table[] = { + + static int nf_conntrack_helper_init_sysctl(struct net *net) + { +- struct ctl_table *table; ++ ctl_table_no_const *table; + + table = kmemdup(helper_sysctl_table, sizeof(helper_sysctl_table), + GFP_KERNEL); +diff --git a/net/netfilter/nf_conntrack_proto.c b/net/netfilter/nf_conntrack_proto.c +index b65d586..beec902 100644 +--- a/net/netfilter/nf_conntrack_proto.c ++++ b/net/netfilter/nf_conntrack_proto.c +@@ -52,7 +52,7 @@ nf_ct_register_sysctl(struct net *net, + + static void + nf_ct_unregister_sysctl(struct ctl_table_header **header, +- struct ctl_table **table, ++ ctl_table_no_const **table, + unsigned int users) + { + if (users > 0) +diff --git a/net/netfilter/nf_conntrack_standalone.c b/net/netfilter/nf_conntrack_standalone.c +index f641751..d3c5b51 100644 +--- a/net/netfilter/nf_conntrack_standalone.c ++++ b/net/netfilter/nf_conntrack_standalone.c +@@ -471,7 +471,7 @@ static struct ctl_table nf_ct_netfilter_table[] = { + + static int nf_conntrack_standalone_init_sysctl(struct net *net) + { +- struct ctl_table *table; ++ ctl_table_no_const *table; + + table = kmemdup(nf_ct_sysctl_table, sizeof(nf_ct_sysctl_table), + GFP_KERNEL); +diff --git a/net/netfilter/nf_conntrack_timestamp.c b/net/netfilter/nf_conntrack_timestamp.c +index 7a394df..bd91a8a 100644 +--- a/net/netfilter/nf_conntrack_timestamp.c ++++ b/net/netfilter/nf_conntrack_timestamp.c +@@ -42,7 +42,7 @@ static struct nf_ct_ext_type tstamp_extend __read_mostly = { + #ifdef CONFIG_SYSCTL + static int nf_conntrack_tstamp_init_sysctl(struct net *net) + { +- struct ctl_table *table; ++ ctl_table_no_const *table; + + table = kmemdup(tstamp_sysctl_table, sizeof(tstamp_sysctl_table), + GFP_KERNEL); +diff --git a/net/netfilter/nf_log.c b/net/netfilter/nf_log.c +index 85296d4..8becdec 100644 +--- a/net/netfilter/nf_log.c ++++ b/net/netfilter/nf_log.c +@@ -243,7 +243,7 @@ static const struct file_operations nflog_file_ops = { + + #ifdef CONFIG_SYSCTL + static char nf_log_sysctl_fnames[NFPROTO_NUMPROTO-NFPROTO_UNSPEC][3]; +-static struct ctl_table nf_log_sysctl_table[NFPROTO_NUMPROTO+1]; ++static ctl_table_no_const nf_log_sysctl_table[NFPROTO_NUMPROTO+1] __read_only; + + static int nf_log_proc_dostring(struct ctl_table *table, int write, + void __user *buffer, size_t *lenp, loff_t *ppos) +@@ -274,14 +274,16 @@ static int nf_log_proc_dostring(struct ctl_table *table, int write, + rcu_assign_pointer(net->nf.nf_loggers[tindex], logger); + mutex_unlock(&nf_log_mutex); + } else { ++ ctl_table_no_const nf_log_table = *table; ++ + mutex_lock(&nf_log_mutex); + logger = rcu_dereference_protected(net->nf.nf_loggers[tindex], + lockdep_is_held(&nf_log_mutex)); + if (!logger) +- table->data = "NONE"; ++ nf_log_table.data = "NONE"; + else +- table->data = logger->name; +- r = proc_dostring(table, write, buffer, lenp, ppos); ++ nf_log_table.data = logger->name; ++ r = proc_dostring(&nf_log_table, write, buffer, lenp, ppos); + mutex_unlock(&nf_log_mutex); + } + +diff --git a/net/netfilter/nf_sockopt.c b/net/netfilter/nf_sockopt.c +index f042ae5..30ea486 100644 +--- a/net/netfilter/nf_sockopt.c ++++ b/net/netfilter/nf_sockopt.c +@@ -45,7 +45,7 @@ int nf_register_sockopt(struct nf_sockopt_ops *reg) + } + } + +- list_add(®->list, &nf_sockopts); ++ pax_list_add((struct list_head *)®->list, &nf_sockopts); + out: + mutex_unlock(&nf_sockopt_mutex); + return ret; +@@ -55,7 +55,7 @@ EXPORT_SYMBOL(nf_register_sockopt); + void nf_unregister_sockopt(struct nf_sockopt_ops *reg) + { + mutex_lock(&nf_sockopt_mutex); +- list_del(®->list); ++ pax_list_del((struct list_head *)®->list); + mutex_unlock(&nf_sockopt_mutex); + } + EXPORT_SYMBOL(nf_unregister_sockopt); +diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c +index c68e5e0..8d52d50 100644 +--- a/net/netfilter/nf_tables_api.c ++++ b/net/netfilter/nf_tables_api.c +@@ -152,8 +152,8 @@ nf_tables_chain_type_lookup(const struct nft_af_info *afi, + #ifdef CONFIG_MODULES + if (autoload) { + nfnl_unlock(NFNL_SUBSYS_NFTABLES); +- request_module("nft-chain-%u-%*.s", afi->family, +- nla_len(nla)-1, (const char *)nla_data(nla)); ++ request_module("nft-chain-%u-%.*s", afi->family, ++ nla_len(nla), (const char *)nla_data(nla)); + nfnl_lock(NFNL_SUBSYS_NFTABLES); + type = __nf_tables_chain_type_lookup(afi->family, nla); + if (type != NULL) +diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c +index a155d19..726b0f2 100644 +--- a/net/netfilter/nfnetlink_log.c ++++ b/net/netfilter/nfnetlink_log.c +@@ -82,7 +82,7 @@ static int nfnl_log_net_id __read_mostly; + struct nfnl_log_net { + spinlock_t instances_lock; + struct hlist_head instance_table[INSTANCE_BUCKETS]; +- atomic_t global_seq; ++ atomic_unchecked_t global_seq; + }; + + static struct nfnl_log_net *nfnl_log_pernet(struct net *net) +@@ -564,7 +564,7 @@ __build_packet_message(struct nfnl_log_net *log, + /* global sequence number */ + if ((inst->flags & NFULNL_CFG_F_SEQ_GLOBAL) && + nla_put_be32(inst->skb, NFULA_SEQ_GLOBAL, +- htonl(atomic_inc_return(&log->global_seq)))) ++ htonl(atomic_inc_return_unchecked(&log->global_seq)))) + goto nla_put_failure; + + if (data_len) { +diff --git a/net/netfilter/nft_compat.c b/net/netfilter/nft_compat.c +index 82cb823..5685dd5 100644 +--- a/net/netfilter/nft_compat.c ++++ b/net/netfilter/nft_compat.c +@@ -216,7 +216,7 @@ target_dump_info(struct sk_buff *skb, const struct xt_target *t, const void *in) + /* We want to reuse existing compat_to_user */ + old_fs = get_fs(); + set_fs(KERNEL_DS); +- t->compat_to_user(out, in); ++ t->compat_to_user((void __force_user *)out, in); + set_fs(old_fs); + ret = nla_put(skb, NFTA_TARGET_INFO, XT_ALIGN(t->targetsize), out); + kfree(out); +@@ -403,7 +403,7 @@ match_dump_info(struct sk_buff *skb, const struct xt_match *m, const void *in) + /* We want to reuse existing compat_to_user */ + old_fs = get_fs(); + set_fs(KERNEL_DS); +- m->compat_to_user(out, in); ++ m->compat_to_user((void __force_user *)out, in); + set_fs(old_fs); + ret = nla_put(skb, NFTA_MATCH_INFO, XT_ALIGN(m->matchsize), out); + kfree(out); +diff --git a/net/netfilter/xt_gradm.c b/net/netfilter/xt_gradm.c +new file mode 100644 +index 0000000..c566332 +--- /dev/null ++++ b/net/netfilter/xt_gradm.c +@@ -0,0 +1,51 @@ ++/* ++ * gradm match for netfilter ++ * Copyright © Zbigniew Krzystolik, 2010 ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License; either version ++ * 2 or 3 as published by the Free Software Foundation. ++ */ ++#include <linux/module.h> ++#include <linux/moduleparam.h> ++#include <linux/skbuff.h> ++#include <linux/netfilter/x_tables.h> ++#include <linux/grsecurity.h> ++#include <linux/netfilter/xt_gradm.h> ++ ++static bool ++gradm_mt(const struct sk_buff *skb, struct xt_action_param *par) ++{ ++ const struct xt_gradm_mtinfo *info = par->matchinfo; ++ bool retval = false; ++ if (gr_acl_is_enabled()) ++ retval = true; ++ return retval ^ info->invflags; ++} ++ ++static struct xt_match gradm_mt_reg __read_mostly = { ++ .name = "gradm", ++ .revision = 0, ++ .family = NFPROTO_UNSPEC, ++ .match = gradm_mt, ++ .matchsize = XT_ALIGN(sizeof(struct xt_gradm_mtinfo)), ++ .me = THIS_MODULE, ++}; ++ ++static int __init gradm_mt_init(void) ++{ ++ return xt_register_match(&gradm_mt_reg); ++} ++ ++static void __exit gradm_mt_exit(void) ++{ ++ xt_unregister_match(&gradm_mt_reg); ++} ++ ++module_init(gradm_mt_init); ++module_exit(gradm_mt_exit); ++MODULE_AUTHOR("Zbigniew Krzystolik <zbyniu@destrukcja.pl>"); ++MODULE_DESCRIPTION("Xtables: Grsecurity RBAC match"); ++MODULE_LICENSE("GPL"); ++MODULE_ALIAS("ipt_gradm"); ++MODULE_ALIAS("ip6t_gradm"); +diff --git a/net/netfilter/xt_hashlimit.c b/net/netfilter/xt_hashlimit.c +index a3910fc..2d2ba14 100644 +--- a/net/netfilter/xt_hashlimit.c ++++ b/net/netfilter/xt_hashlimit.c +@@ -870,11 +870,11 @@ static int __net_init hashlimit_proc_net_init(struct net *net) + { + struct hashlimit_net *hashlimit_net = hashlimit_pernet(net); + +- hashlimit_net->ipt_hashlimit = proc_mkdir("ipt_hashlimit", net->proc_net); ++ hashlimit_net->ipt_hashlimit = proc_mkdir_restrict("ipt_hashlimit", net->proc_net); + if (!hashlimit_net->ipt_hashlimit) + return -ENOMEM; + #if IS_ENABLED(CONFIG_IP6_NF_IPTABLES) +- hashlimit_net->ip6t_hashlimit = proc_mkdir("ip6t_hashlimit", net->proc_net); ++ hashlimit_net->ip6t_hashlimit = proc_mkdir_restrict("ip6t_hashlimit", net->proc_net); + if (!hashlimit_net->ip6t_hashlimit) { + remove_proc_entry("ipt_hashlimit", net->proc_net); + return -ENOMEM; +diff --git a/net/netfilter/xt_recent.c b/net/netfilter/xt_recent.c +index 1e657cf..1eb1c34 100644 +--- a/net/netfilter/xt_recent.c ++++ b/net/netfilter/xt_recent.c +@@ -618,7 +618,7 @@ static int __net_init recent_proc_net_init(struct net *net) + { + struct recent_net *recent_net = recent_pernet(net); + +- recent_net->xt_recent = proc_mkdir("xt_recent", net->proc_net); ++ recent_net->xt_recent = proc_mkdir_restrict("xt_recent", net->proc_net); + if (!recent_net->xt_recent) + return -ENOMEM; + return 0; +diff --git a/net/netfilter/xt_statistic.c b/net/netfilter/xt_statistic.c +index 11de55e..f25e448 100644 +--- a/net/netfilter/xt_statistic.c ++++ b/net/netfilter/xt_statistic.c +@@ -19,7 +19,7 @@ + #include <linux/module.h> + + struct xt_statistic_priv { +- atomic_t count; ++ atomic_unchecked_t count; + } ____cacheline_aligned_in_smp; + + MODULE_LICENSE("GPL"); +@@ -42,9 +42,9 @@ statistic_mt(const struct sk_buff *skb, struct xt_action_param *par) + break; + case XT_STATISTIC_MODE_NTH: + do { +- oval = atomic_read(&info->master->count); ++ oval = atomic_read_unchecked(&info->master->count); + nval = (oval == info->u.nth.every) ? 0 : oval + 1; +- } while (atomic_cmpxchg(&info->master->count, oval, nval) != oval); ++ } while (atomic_cmpxchg_unchecked(&info->master->count, oval, nval) != oval); + if (nval == 0) + ret = !ret; + break; +@@ -64,7 +64,7 @@ static int statistic_mt_check(const struct xt_mtchk_param *par) + info->master = kzalloc(sizeof(*info->master), GFP_KERNEL); + if (info->master == NULL) + return -ENOMEM; +- atomic_set(&info->master->count, info->u.nth.count); ++ atomic_set_unchecked(&info->master->count, info->u.nth.count); + + return 0; + } +diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c +index 0dfe894..7702a84 100644 +--- a/net/netlink/af_netlink.c ++++ b/net/netlink/af_netlink.c +@@ -257,7 +257,7 @@ static void netlink_overrun(struct sock *sk) + sk->sk_error_report(sk); + } + } +- atomic_inc(&sk->sk_drops); ++ atomic_inc_unchecked(&sk->sk_drops); + } + + static void netlink_rcv_wake(struct sock *sk) +@@ -3003,7 +3003,7 @@ static int netlink_seq_show(struct seq_file *seq, void *v) + sk_wmem_alloc_get(s), + nlk->cb_running, + atomic_read(&s->sk_refcnt), +- atomic_read(&s->sk_drops), ++ atomic_read_unchecked(&s->sk_drops), + sock_i_ino(s) + ); + +diff --git a/net/netrom/af_netrom.c b/net/netrom/af_netrom.c +index b74aa07..d41926e 100644 +--- a/net/netrom/af_netrom.c ++++ b/net/netrom/af_netrom.c +@@ -850,7 +850,6 @@ static int nr_getname(struct socket *sock, struct sockaddr *uaddr, + *uaddr_len = sizeof(struct full_sockaddr_ax25); + } else { + sax->fsa_ax25.sax25_family = AF_NETROM; +- sax->fsa_ax25.sax25_ndigis = 0; + sax->fsa_ax25.sax25_call = nr->source_addr; + *uaddr_len = sizeof(struct sockaddr_ax25); + } +diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c +index 48a6a93..d2c096b 100644 +--- a/net/packet/af_packet.c ++++ b/net/packet/af_packet.c +@@ -635,6 +635,7 @@ static void init_prb_bdqc(struct packet_sock *po, + p1->tov_in_jiffies = msecs_to_jiffies(p1->retire_blk_tov); + p1->blk_sizeof_priv = req_u->req3.tp_sizeof_priv; + ++ p1->max_frame_len = p1->kblk_size - BLK_PLUS_PRIV(p1->blk_sizeof_priv); + prb_init_ft_ops(p1, req_u); + prb_setup_retire_blk_timer(po, tx_ring); + prb_open_block(p1, pbd); +@@ -1845,7 +1846,7 @@ static int packet_rcv(struct sk_buff *skb, struct net_device *dev, + + spin_lock(&sk->sk_receive_queue.lock); + po->stats.stats1.tp_packets++; +- skb->dropcount = atomic_read(&sk->sk_drops); ++ skb->dropcount = atomic_read_unchecked(&sk->sk_drops); + __skb_queue_tail(&sk->sk_receive_queue, skb); + spin_unlock(&sk->sk_receive_queue.lock); + sk->sk_data_ready(sk, skb->len); +@@ -1854,7 +1855,7 @@ static int packet_rcv(struct sk_buff *skb, struct net_device *dev, + drop_n_acct: + spin_lock(&sk->sk_receive_queue.lock); + po->stats.stats1.tp_drops++; +- atomic_inc(&sk->sk_drops); ++ atomic_inc_unchecked(&sk->sk_drops); + spin_unlock(&sk->sk_receive_queue.lock); + + drop_n_restore: +@@ -1946,6 +1947,18 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev, + if ((int)snaplen < 0) + snaplen = 0; + } ++ } else if (unlikely(macoff + snaplen > ++ GET_PBDQC_FROM_RB(&po->rx_ring)->max_frame_len)) { ++ u32 nval; ++ ++ nval = GET_PBDQC_FROM_RB(&po->rx_ring)->max_frame_len - macoff; ++ pr_err_once("tpacket_rcv: packet too big, clamped from %u to %u. macoff=%u\n", ++ snaplen, nval, macoff); ++ snaplen = nval; ++ if (unlikely((int)snaplen < 0)) { ++ snaplen = 0; ++ macoff = GET_PBDQC_FROM_RB(&po->rx_ring)->max_frame_len; ++ } + } + spin_lock(&sk->sk_receive_queue.lock); + h.raw = packet_current_rx_frame(po, skb, +@@ -3449,7 +3462,7 @@ static int packet_getsockopt(struct socket *sock, int level, int optname, + case PACKET_HDRLEN: + if (len > sizeof(int)) + len = sizeof(int); +- if (copy_from_user(&val, optval, len)) ++ if (len > sizeof(val) || copy_from_user(&val, optval, len)) + return -EFAULT; + switch (val) { + case TPACKET_V1: +@@ -3495,7 +3508,7 @@ static int packet_getsockopt(struct socket *sock, int level, int optname, + len = lv; + if (put_user(len, optlen)) + return -EFAULT; +- if (copy_to_user(optval, data, len)) ++ if (len > sizeof(st) || copy_to_user(optval, data, len)) + return -EFAULT; + return 0; + } +@@ -3779,6 +3792,10 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u, + goto out; + if (unlikely(req->tp_block_size & (PAGE_SIZE - 1))) + goto out; ++ if (po->tp_version >= TPACKET_V3 && ++ (int)(req->tp_block_size - ++ BLK_PLUS_PRIV(req_u->req3.tp_sizeof_priv)) <= 0) ++ goto out; + if (unlikely(req->tp_frame_size < po->tp_hdrlen + + po->tp_reserve)) + goto out; +diff --git a/net/packet/internal.h b/net/packet/internal.h +index eb9580a..cdddf6a 100644 +--- a/net/packet/internal.h ++++ b/net/packet/internal.h +@@ -29,6 +29,7 @@ struct tpacket_kbdq_core { + char *pkblk_start; + char *pkblk_end; + int kblk_size; ++ unsigned int max_frame_len; + unsigned int knum_blocks; + uint64_t knxt_seq_num; + char *prev; +diff --git a/net/phonet/pep.c b/net/phonet/pep.c +index e774117..900b8b7 100644 +--- a/net/phonet/pep.c ++++ b/net/phonet/pep.c +@@ -388,7 +388,7 @@ static int pipe_do_rcv(struct sock *sk, struct sk_buff *skb) + + case PNS_PEP_CTRL_REQ: + if (skb_queue_len(&pn->ctrlreq_queue) >= PNPIPE_CTRLREQ_MAX) { +- atomic_inc(&sk->sk_drops); ++ atomic_inc_unchecked(&sk->sk_drops); + break; + } + __skb_pull(skb, 4); +@@ -409,7 +409,7 @@ static int pipe_do_rcv(struct sock *sk, struct sk_buff *skb) + } + + if (pn->rx_credits == 0) { +- atomic_inc(&sk->sk_drops); ++ atomic_inc_unchecked(&sk->sk_drops); + err = -ENOBUFS; + break; + } +@@ -580,7 +580,7 @@ static int pipe_handler_do_rcv(struct sock *sk, struct sk_buff *skb) + } + + if (pn->rx_credits == 0) { +- atomic_inc(&sk->sk_drops); ++ atomic_inc_unchecked(&sk->sk_drops); + err = NET_RX_DROP; + break; + } +diff --git a/net/phonet/socket.c b/net/phonet/socket.c +index 008214a..bb68240 100644 +--- a/net/phonet/socket.c ++++ b/net/phonet/socket.c +@@ -611,7 +611,7 @@ static int pn_sock_seq_show(struct seq_file *seq, void *v) + from_kuid_munged(seq_user_ns(seq), sock_i_uid(sk)), + sock_i_ino(sk), + atomic_read(&sk->sk_refcnt), sk, +- atomic_read(&sk->sk_drops)); ++ atomic_read_unchecked(&sk->sk_drops)); + } + seq_pad(seq, '\n'); + return 0; +diff --git a/net/phonet/sysctl.c b/net/phonet/sysctl.c +index c02a8c4..3c5b600 100644 +--- a/net/phonet/sysctl.c ++++ b/net/phonet/sysctl.c +@@ -67,7 +67,7 @@ static int proc_local_port_range(struct ctl_table *table, int write, + { + int ret; + int range[2] = {local_port_range[0], local_port_range[1]}; +- struct ctl_table tmp = { ++ ctl_table_no_const tmp = { + .data = &range, + .maxlen = sizeof(range), + .mode = table->mode, +diff --git a/net/rds/cong.c b/net/rds/cong.c +index e5b65ac..f3b6fb7 100644 +--- a/net/rds/cong.c ++++ b/net/rds/cong.c +@@ -78,7 +78,7 @@ + * finds that the saved generation number is smaller than the global generation + * number, it wakes up the process. + */ +-static atomic_t rds_cong_generation = ATOMIC_INIT(0); ++static atomic_unchecked_t rds_cong_generation = ATOMIC_INIT(0); + + /* + * Congestion monitoring +@@ -233,7 +233,7 @@ void rds_cong_map_updated(struct rds_cong_map *map, uint64_t portmask) + rdsdebug("waking map %p for %pI4\n", + map, &map->m_addr); + rds_stats_inc(s_cong_update_received); +- atomic_inc(&rds_cong_generation); ++ atomic_inc_unchecked(&rds_cong_generation); + if (waitqueue_active(&map->m_waitq)) + wake_up(&map->m_waitq); + if (waitqueue_active(&rds_poll_waitq)) +@@ -259,7 +259,7 @@ EXPORT_SYMBOL_GPL(rds_cong_map_updated); + + int rds_cong_updated_since(unsigned long *recent) + { +- unsigned long gen = atomic_read(&rds_cong_generation); ++ unsigned long gen = atomic_read_unchecked(&rds_cong_generation); + + if (likely(*recent == gen)) + return 0; +diff --git a/net/rds/ib.h b/net/rds/ib.h +index 7280ab8..e04f4ea 100644 +--- a/net/rds/ib.h ++++ b/net/rds/ib.h +@@ -128,7 +128,7 @@ struct rds_ib_connection { + /* sending acks */ + unsigned long i_ack_flags; + #ifdef KERNEL_HAS_ATOMIC64 +- atomic64_t i_ack_next; /* next ACK to send */ ++ atomic64_unchecked_t i_ack_next; /* next ACK to send */ + #else + spinlock_t i_ack_lock; /* protect i_ack_next */ + u64 i_ack_next; /* next ACK to send */ +diff --git a/net/rds/ib_cm.c b/net/rds/ib_cm.c +index 31b74f5..dc1fbfa 100644 +--- a/net/rds/ib_cm.c ++++ b/net/rds/ib_cm.c +@@ -717,7 +717,7 @@ void rds_ib_conn_shutdown(struct rds_connection *conn) + /* Clear the ACK state */ + clear_bit(IB_ACK_IN_FLIGHT, &ic->i_ack_flags); + #ifdef KERNEL_HAS_ATOMIC64 +- atomic64_set(&ic->i_ack_next, 0); ++ atomic64_set_unchecked(&ic->i_ack_next, 0); + #else + ic->i_ack_next = 0; + #endif +diff --git a/net/rds/ib_recv.c b/net/rds/ib_recv.c +index b7ebe23..b6352f6 100644 +--- a/net/rds/ib_recv.c ++++ b/net/rds/ib_recv.c +@@ -596,7 +596,7 @@ static u64 rds_ib_get_ack(struct rds_ib_connection *ic) + static void rds_ib_set_ack(struct rds_ib_connection *ic, u64 seq, + int ack_required) + { +- atomic64_set(&ic->i_ack_next, seq); ++ atomic64_set_unchecked(&ic->i_ack_next, seq); + if (ack_required) { + smp_mb__before_clear_bit(); + set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags); +@@ -608,7 +608,7 @@ static u64 rds_ib_get_ack(struct rds_ib_connection *ic) + clear_bit(IB_ACK_REQUESTED, &ic->i_ack_flags); + smp_mb__after_clear_bit(); + +- return atomic64_read(&ic->i_ack_next); ++ return atomic64_read_unchecked(&ic->i_ack_next); + } + #endif + +diff --git a/net/rds/iw.h b/net/rds/iw.h +index 04ce3b1..48119a6 100644 +--- a/net/rds/iw.h ++++ b/net/rds/iw.h +@@ -134,7 +134,7 @@ struct rds_iw_connection { + /* sending acks */ + unsigned long i_ack_flags; + #ifdef KERNEL_HAS_ATOMIC64 +- atomic64_t i_ack_next; /* next ACK to send */ ++ atomic64_unchecked_t i_ack_next; /* next ACK to send */ + #else + spinlock_t i_ack_lock; /* protect i_ack_next */ + u64 i_ack_next; /* next ACK to send */ +diff --git a/net/rds/iw_cm.c b/net/rds/iw_cm.c +index a91e1db..cf3053f 100644 +--- a/net/rds/iw_cm.c ++++ b/net/rds/iw_cm.c +@@ -663,7 +663,7 @@ void rds_iw_conn_shutdown(struct rds_connection *conn) + /* Clear the ACK state */ + clear_bit(IB_ACK_IN_FLIGHT, &ic->i_ack_flags); + #ifdef KERNEL_HAS_ATOMIC64 +- atomic64_set(&ic->i_ack_next, 0); ++ atomic64_set_unchecked(&ic->i_ack_next, 0); + #else + ic->i_ack_next = 0; + #endif +diff --git a/net/rds/iw_recv.c b/net/rds/iw_recv.c +index 4503335..db566b4 100644 +--- a/net/rds/iw_recv.c ++++ b/net/rds/iw_recv.c +@@ -427,7 +427,7 @@ static u64 rds_iw_get_ack(struct rds_iw_connection *ic) + static void rds_iw_set_ack(struct rds_iw_connection *ic, u64 seq, + int ack_required) + { +- atomic64_set(&ic->i_ack_next, seq); ++ atomic64_set_unchecked(&ic->i_ack_next, seq); + if (ack_required) { + smp_mb__before_clear_bit(); + set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags); +@@ -439,7 +439,7 @@ static u64 rds_iw_get_ack(struct rds_iw_connection *ic) + clear_bit(IB_ACK_REQUESTED, &ic->i_ack_flags); + smp_mb__after_clear_bit(); + +- return atomic64_read(&ic->i_ack_next); ++ return atomic64_read_unchecked(&ic->i_ack_next); + } + #endif + +diff --git a/net/rds/rds.h b/net/rds/rds.h +index 48f8ffc..0ef3eec 100644 +--- a/net/rds/rds.h ++++ b/net/rds/rds.h +@@ -449,7 +449,7 @@ struct rds_transport { + void (*sync_mr)(void *trans_private, int direction); + void (*free_mr)(void *trans_private, int invalidate); + void (*flush_mrs)(void); +-}; ++} __do_const; + + struct rds_sock { + struct sock rs_sk; +diff --git a/net/rds/tcp.c b/net/rds/tcp.c +index edac9ef..16bcb98 100644 +--- a/net/rds/tcp.c ++++ b/net/rds/tcp.c +@@ -59,7 +59,7 @@ void rds_tcp_nonagle(struct socket *sock) + int val = 1; + + set_fs(KERNEL_DS); +- sock->ops->setsockopt(sock, SOL_TCP, TCP_NODELAY, (char __user *)&val, ++ sock->ops->setsockopt(sock, SOL_TCP, TCP_NODELAY, (char __force_user *)&val, + sizeof(val)); + set_fs(oldfs); + } +diff --git a/net/rds/tcp_send.c b/net/rds/tcp_send.c +index 81cf5a4..b5826ff 100644 +--- a/net/rds/tcp_send.c ++++ b/net/rds/tcp_send.c +@@ -43,7 +43,7 @@ static void rds_tcp_cork(struct socket *sock, int val) + + oldfs = get_fs(); + set_fs(KERNEL_DS); +- sock->ops->setsockopt(sock, SOL_TCP, TCP_CORK, (char __user *)&val, ++ sock->ops->setsockopt(sock, SOL_TCP, TCP_CORK, (char __force_user *)&val, + sizeof(val)); + set_fs(oldfs); + } +diff --git a/net/rxrpc/af_rxrpc.c b/net/rxrpc/af_rxrpc.c +index e61aa60..f07cc89 100644 +--- a/net/rxrpc/af_rxrpc.c ++++ b/net/rxrpc/af_rxrpc.c +@@ -40,7 +40,7 @@ static const struct proto_ops rxrpc_rpc_ops; + __be32 rxrpc_epoch; + + /* current debugging ID */ +-atomic_t rxrpc_debug_id; ++atomic_unchecked_t rxrpc_debug_id; + + /* count of skbs currently in use */ + atomic_t rxrpc_n_skbs; +diff --git a/net/rxrpc/ar-ack.c b/net/rxrpc/ar-ack.c +index cd97a0c..0041649 100644 +--- a/net/rxrpc/ar-ack.c ++++ b/net/rxrpc/ar-ack.c +@@ -182,7 +182,7 @@ static void rxrpc_resend(struct rxrpc_call *call) + + _enter("{%d,%d,%d,%d},", + call->acks_hard, call->acks_unacked, +- atomic_read(&call->sequence), ++ atomic_read_unchecked(&call->sequence), + CIRC_CNT(call->acks_head, call->acks_tail, call->acks_winsz)); + + stop = 0; +@@ -206,7 +206,7 @@ static void rxrpc_resend(struct rxrpc_call *call) + + /* each Tx packet has a new serial number */ + sp->hdr.serial = +- htonl(atomic_inc_return(&call->conn->serial)); ++ htonl(atomic_inc_return_unchecked(&call->conn->serial)); + + hdr = (struct rxrpc_header *) txb->head; + hdr->serial = sp->hdr.serial; +@@ -410,7 +410,7 @@ static void rxrpc_rotate_tx_window(struct rxrpc_call *call, u32 hard) + */ + static void rxrpc_clear_tx_window(struct rxrpc_call *call) + { +- rxrpc_rotate_tx_window(call, atomic_read(&call->sequence)); ++ rxrpc_rotate_tx_window(call, atomic_read_unchecked(&call->sequence)); + } + + /* +@@ -636,7 +636,7 @@ process_further: + + latest = ntohl(sp->hdr.serial); + hard = ntohl(ack.firstPacket); +- tx = atomic_read(&call->sequence); ++ tx = atomic_read_unchecked(&call->sequence); + + _proto("Rx ACK %%%u { m=%hu f=#%u p=#%u s=%%%u r=%s n=%u }", + latest, +@@ -1168,7 +1168,7 @@ void rxrpc_process_call(struct work_struct *work) + goto maybe_reschedule; + + send_ACK_with_skew: +- ack.maxSkew = htons(atomic_read(&call->conn->hi_serial) - ++ ack.maxSkew = htons(atomic_read_unchecked(&call->conn->hi_serial) - + ntohl(ack.serial)); + send_ACK: + mtu = call->conn->trans->peer->if_mtu; +@@ -1180,7 +1180,7 @@ send_ACK: + ackinfo.rxMTU = htonl(5692); + ackinfo.jumbo_max = htonl(4); + +- hdr.serial = htonl(atomic_inc_return(&call->conn->serial)); ++ hdr.serial = htonl(atomic_inc_return_unchecked(&call->conn->serial)); + _proto("Tx ACK %%%u { m=%hu f=#%u p=#%u s=%%%u r=%s n=%u }", + ntohl(hdr.serial), + ntohs(ack.maxSkew), +@@ -1198,7 +1198,7 @@ send_ACK: + send_message: + _debug("send message"); + +- hdr.serial = htonl(atomic_inc_return(&call->conn->serial)); ++ hdr.serial = htonl(atomic_inc_return_unchecked(&call->conn->serial)); + _proto("Tx %s %%%u", rxrpc_pkts[hdr.type], ntohl(hdr.serial)); + send_message_2: + +diff --git a/net/rxrpc/ar-call.c b/net/rxrpc/ar-call.c +index a3bbb36..3341fb9 100644 +--- a/net/rxrpc/ar-call.c ++++ b/net/rxrpc/ar-call.c +@@ -83,7 +83,7 @@ static struct rxrpc_call *rxrpc_alloc_call(gfp_t gfp) + spin_lock_init(&call->lock); + rwlock_init(&call->state_lock); + atomic_set(&call->usage, 1); +- call->debug_id = atomic_inc_return(&rxrpc_debug_id); ++ call->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id); + call->state = RXRPC_CALL_CLIENT_SEND_REQUEST; + + memset(&call->sock_node, 0xed, sizeof(call->sock_node)); +diff --git a/net/rxrpc/ar-connection.c b/net/rxrpc/ar-connection.c +index 7bf5b5b..4a3bf2c 100644 +--- a/net/rxrpc/ar-connection.c ++++ b/net/rxrpc/ar-connection.c +@@ -206,7 +206,7 @@ static struct rxrpc_connection *rxrpc_alloc_connection(gfp_t gfp) + rwlock_init(&conn->lock); + spin_lock_init(&conn->state_lock); + atomic_set(&conn->usage, 1); +- conn->debug_id = atomic_inc_return(&rxrpc_debug_id); ++ conn->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id); + conn->avail_calls = RXRPC_MAXCALLS; + conn->size_align = 4; + conn->header_size = sizeof(struct rxrpc_header); +diff --git a/net/rxrpc/ar-connevent.c b/net/rxrpc/ar-connevent.c +index e7ed43a..6afa140 100644 +--- a/net/rxrpc/ar-connevent.c ++++ b/net/rxrpc/ar-connevent.c +@@ -109,7 +109,7 @@ static int rxrpc_abort_connection(struct rxrpc_connection *conn, + + len = iov[0].iov_len + iov[1].iov_len; + +- hdr.serial = htonl(atomic_inc_return(&conn->serial)); ++ hdr.serial = htonl(atomic_inc_return_unchecked(&conn->serial)); + _proto("Tx CONN ABORT %%%u { %d }", ntohl(hdr.serial), abort_code); + + ret = kernel_sendmsg(conn->trans->local->socket, &msg, iov, 2, len); +diff --git a/net/rxrpc/ar-input.c b/net/rxrpc/ar-input.c +index 529572f..c758ca7 100644 +--- a/net/rxrpc/ar-input.c ++++ b/net/rxrpc/ar-input.c +@@ -340,9 +340,9 @@ void rxrpc_fast_process_packet(struct rxrpc_call *call, struct sk_buff *skb) + /* track the latest serial number on this connection for ACK packet + * information */ + serial = ntohl(sp->hdr.serial); +- hi_serial = atomic_read(&call->conn->hi_serial); ++ hi_serial = atomic_read_unchecked(&call->conn->hi_serial); + while (serial > hi_serial) +- hi_serial = atomic_cmpxchg(&call->conn->hi_serial, hi_serial, ++ hi_serial = atomic_cmpxchg_unchecked(&call->conn->hi_serial, hi_serial, + serial); + + /* request ACK generation for any ACK or DATA packet that requests +diff --git a/net/rxrpc/ar-internal.h b/net/rxrpc/ar-internal.h +index 5f43675..ca07817 100644 +--- a/net/rxrpc/ar-internal.h ++++ b/net/rxrpc/ar-internal.h +@@ -272,8 +272,8 @@ struct rxrpc_connection { + int error; /* error code for local abort */ + int debug_id; /* debug ID for printks */ + unsigned int call_counter; /* call ID counter */ +- atomic_t serial; /* packet serial number counter */ +- atomic_t hi_serial; /* highest serial number received */ ++ atomic_unchecked_t serial; /* packet serial number counter */ ++ atomic_unchecked_t hi_serial; /* highest serial number received */ + u8 avail_calls; /* number of calls available */ + u8 size_align; /* data size alignment (for security) */ + u8 header_size; /* rxrpc + security header size */ +@@ -346,7 +346,7 @@ struct rxrpc_call { + spinlock_t lock; + rwlock_t state_lock; /* lock for state transition */ + atomic_t usage; +- atomic_t sequence; /* Tx data packet sequence counter */ ++ atomic_unchecked_t sequence; /* Tx data packet sequence counter */ + u32 abort_code; /* local/remote abort code */ + enum { /* current state of call */ + RXRPC_CALL_CLIENT_SEND_REQUEST, /* - client sending request phase */ +@@ -420,7 +420,7 @@ static inline void rxrpc_abort_call(struct rxrpc_call *call, u32 abort_code) + */ + extern atomic_t rxrpc_n_skbs; + extern __be32 rxrpc_epoch; +-extern atomic_t rxrpc_debug_id; ++extern atomic_unchecked_t rxrpc_debug_id; + extern struct workqueue_struct *rxrpc_workqueue; + + /* +diff --git a/net/rxrpc/ar-local.c b/net/rxrpc/ar-local.c +index 87f7135..74d3703 100644 +--- a/net/rxrpc/ar-local.c ++++ b/net/rxrpc/ar-local.c +@@ -45,7 +45,7 @@ struct rxrpc_local *rxrpc_alloc_local(struct sockaddr_rxrpc *srx) + spin_lock_init(&local->lock); + rwlock_init(&local->services_lock); + atomic_set(&local->usage, 1); +- local->debug_id = atomic_inc_return(&rxrpc_debug_id); ++ local->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id); + memcpy(&local->srx, srx, sizeof(*srx)); + } + +diff --git a/net/rxrpc/ar-output.c b/net/rxrpc/ar-output.c +index d0e8f1c..a3a1686 100644 +--- a/net/rxrpc/ar-output.c ++++ b/net/rxrpc/ar-output.c +@@ -682,9 +682,9 @@ static int rxrpc_send_data(struct kiocb *iocb, + sp->hdr.cid = call->cid; + sp->hdr.callNumber = call->call_id; + sp->hdr.seq = +- htonl(atomic_inc_return(&call->sequence)); ++ htonl(atomic_inc_return_unchecked(&call->sequence)); + sp->hdr.serial = +- htonl(atomic_inc_return(&conn->serial)); ++ htonl(atomic_inc_return_unchecked(&conn->serial)); + sp->hdr.type = RXRPC_PACKET_TYPE_DATA; + sp->hdr.userStatus = 0; + sp->hdr.securityIndex = conn->security_ix; +diff --git a/net/rxrpc/ar-peer.c b/net/rxrpc/ar-peer.c +index bebaa43..2644591 100644 +--- a/net/rxrpc/ar-peer.c ++++ b/net/rxrpc/ar-peer.c +@@ -72,7 +72,7 @@ static struct rxrpc_peer *rxrpc_alloc_peer(struct sockaddr_rxrpc *srx, + INIT_LIST_HEAD(&peer->error_targets); + spin_lock_init(&peer->lock); + atomic_set(&peer->usage, 1); +- peer->debug_id = atomic_inc_return(&rxrpc_debug_id); ++ peer->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id); + memcpy(&peer->srx, srx, sizeof(*srx)); + + rxrpc_assess_MTU_size(peer); +diff --git a/net/rxrpc/ar-proc.c b/net/rxrpc/ar-proc.c +index 38047f7..9f48511 100644 +--- a/net/rxrpc/ar-proc.c ++++ b/net/rxrpc/ar-proc.c +@@ -164,8 +164,8 @@ static int rxrpc_connection_seq_show(struct seq_file *seq, void *v) + atomic_read(&conn->usage), + rxrpc_conn_states[conn->state], + key_serial(conn->key), +- atomic_read(&conn->serial), +- atomic_read(&conn->hi_serial)); ++ atomic_read_unchecked(&conn->serial), ++ atomic_read_unchecked(&conn->hi_serial)); + + return 0; + } +diff --git a/net/rxrpc/ar-transport.c b/net/rxrpc/ar-transport.c +index 92df566..87ec1bf 100644 +--- a/net/rxrpc/ar-transport.c ++++ b/net/rxrpc/ar-transport.c +@@ -47,7 +47,7 @@ static struct rxrpc_transport *rxrpc_alloc_transport(struct rxrpc_local *local, + spin_lock_init(&trans->client_lock); + rwlock_init(&trans->conn_lock); + atomic_set(&trans->usage, 1); +- trans->debug_id = atomic_inc_return(&rxrpc_debug_id); ++ trans->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id); + + if (peer->srx.transport.family == AF_INET) { + switch (peer->srx.transport_type) { +diff --git a/net/rxrpc/rxkad.c b/net/rxrpc/rxkad.c +index f226709..0e735a8 100644 +--- a/net/rxrpc/rxkad.c ++++ b/net/rxrpc/rxkad.c +@@ -610,7 +610,7 @@ static int rxkad_issue_challenge(struct rxrpc_connection *conn) + + len = iov[0].iov_len + iov[1].iov_len; + +- hdr.serial = htonl(atomic_inc_return(&conn->serial)); ++ hdr.serial = htonl(atomic_inc_return_unchecked(&conn->serial)); + _proto("Tx CHALLENGE %%%u", ntohl(hdr.serial)); + + ret = kernel_sendmsg(conn->trans->local->socket, &msg, iov, 2, len); +@@ -660,7 +660,7 @@ static int rxkad_send_response(struct rxrpc_connection *conn, + + len = iov[0].iov_len + iov[1].iov_len + iov[2].iov_len; + +- hdr->serial = htonl(atomic_inc_return(&conn->serial)); ++ hdr->serial = htonl(atomic_inc_return_unchecked(&conn->serial)); + _proto("Tx RESPONSE %%%u", ntohl(hdr->serial)); + + ret = kernel_sendmsg(conn->trans->local->socket, &msg, iov, 3, len); +diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c +index 2b1738e..a9d0fc9 100644 +--- a/net/sctp/ipv6.c ++++ b/net/sctp/ipv6.c +@@ -966,7 +966,7 @@ static const struct inet6_protocol sctpv6_protocol = { + .flags = INET6_PROTO_NOPOLICY | INET6_PROTO_FINAL, + }; + +-static struct sctp_af sctp_af_inet6 = { ++static struct sctp_af sctp_af_inet6 __read_only = { + .sa_family = AF_INET6, + .sctp_xmit = sctp_v6_xmit, + .setsockopt = ipv6_setsockopt, +@@ -998,7 +998,7 @@ static struct sctp_af sctp_af_inet6 = { + #endif + }; + +-static struct sctp_pf sctp_pf_inet6 = { ++static struct sctp_pf sctp_pf_inet6 __read_only = { + .event_msgname = sctp_inet6_event_msgname, + .skb_msgname = sctp_inet6_skb_msgname, + .af_supported = sctp_inet6_af_supported, +@@ -1023,7 +1023,7 @@ void sctp_v6_pf_init(void) + + void sctp_v6_pf_exit(void) + { +- list_del(&sctp_af_inet6.list); ++ pax_list_del(&sctp_af_inet6.list); + } + + /* Initialize IPv6 support and register with socket layer. */ +diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c +index a62a215..0976540 100644 +--- a/net/sctp/protocol.c ++++ b/net/sctp/protocol.c +@@ -836,8 +836,10 @@ int sctp_register_af(struct sctp_af *af) + return 0; + } + ++ pax_open_kernel(); + INIT_LIST_HEAD(&af->list); +- list_add_tail(&af->list, &sctp_address_families); ++ pax_close_kernel(); ++ pax_list_add_tail(&af->list, &sctp_address_families); + return 1; + } + +@@ -967,7 +969,7 @@ static inline int sctp_v4_xmit(struct sk_buff *skb, + + static struct sctp_af sctp_af_inet; + +-static struct sctp_pf sctp_pf_inet = { ++static struct sctp_pf sctp_pf_inet __read_only = { + .event_msgname = sctp_inet_event_msgname, + .skb_msgname = sctp_inet_skb_msgname, + .af_supported = sctp_inet_af_supported, +@@ -1039,7 +1041,7 @@ static const struct net_protocol sctp_protocol = { + }; + + /* IPv4 address related functions. */ +-static struct sctp_af sctp_af_inet = { ++static struct sctp_af sctp_af_inet __read_only = { + .sa_family = AF_INET, + .sctp_xmit = sctp_v4_xmit, + .setsockopt = ip_setsockopt, +@@ -1124,7 +1126,7 @@ static void sctp_v4_pf_init(void) + + static void sctp_v4_pf_exit(void) + { +- list_del(&sctp_af_inet.list); ++ pax_list_del(&sctp_af_inet.list); + } + + static int sctp_v4_protosw_init(void) +diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c +index fef2acd..c705c4f 100644 +--- a/net/sctp/sm_sideeffect.c ++++ b/net/sctp/sm_sideeffect.c +@@ -439,7 +439,7 @@ static void sctp_generate_sack_event(unsigned long data) + sctp_generate_timeout_event(asoc, SCTP_EVENT_TIMEOUT_SACK); + } + +-sctp_timer_event_t *sctp_timer_events[SCTP_NUM_TIMEOUT_TYPES] = { ++sctp_timer_event_t * const sctp_timer_events[SCTP_NUM_TIMEOUT_TYPES] = { + NULL, + sctp_generate_t1_cookie_event, + sctp_generate_t1_init_event, +diff --git a/net/sctp/socket.c b/net/sctp/socket.c +index 604a6ac..f87f0a3 100644 +--- a/net/sctp/socket.c ++++ b/net/sctp/socket.c +@@ -2175,11 +2175,13 @@ static int sctp_setsockopt_events(struct sock *sk, char __user *optval, + { + struct sctp_association *asoc; + struct sctp_ulpevent *event; ++ struct sctp_event_subscribe subscribe; + + if (optlen > sizeof(struct sctp_event_subscribe)) + return -EINVAL; +- if (copy_from_user(&sctp_sk(sk)->subscribe, optval, optlen)) ++ if (copy_from_user(&subscribe, optval, optlen)) + return -EFAULT; ++ sctp_sk(sk)->subscribe = subscribe; + + /* + * At the time when a user app subscribes to SCTP_SENDER_DRY_EVENT, +@@ -4259,13 +4261,16 @@ static int sctp_getsockopt_disable_fragments(struct sock *sk, int len, + static int sctp_getsockopt_events(struct sock *sk, int len, char __user *optval, + int __user *optlen) + { ++ struct sctp_event_subscribe subscribe; ++ + if (len <= 0) + return -EINVAL; + if (len > sizeof(struct sctp_event_subscribe)) + len = sizeof(struct sctp_event_subscribe); + if (put_user(len, optlen)) + return -EFAULT; +- if (copy_to_user(optval, &sctp_sk(sk)->subscribe, len)) ++ subscribe = sctp_sk(sk)->subscribe; ++ if (copy_to_user(optval, &subscribe, len)) + return -EFAULT; + return 0; + } +@@ -4283,6 +4288,8 @@ static int sctp_getsockopt_events(struct sock *sk, int len, char __user *optval, + */ + static int sctp_getsockopt_autoclose(struct sock *sk, int len, char __user *optval, int __user *optlen) + { ++ __u32 autoclose; ++ + /* Applicable to UDP-style socket only */ + if (sctp_style(sk, TCP)) + return -EOPNOTSUPP; +@@ -4291,7 +4298,8 @@ static int sctp_getsockopt_autoclose(struct sock *sk, int len, char __user *optv + len = sizeof(int); + if (put_user(len, optlen)) + return -EFAULT; +- if (copy_to_user(optval, &sctp_sk(sk)->autoclose, sizeof(int))) ++ autoclose = sctp_sk(sk)->autoclose; ++ if (copy_to_user(optval, &autoclose, sizeof(int))) + return -EFAULT; + return 0; + } +@@ -4666,12 +4674,15 @@ static int sctp_getsockopt_delayed_ack(struct sock *sk, int len, + */ + static int sctp_getsockopt_initmsg(struct sock *sk, int len, char __user *optval, int __user *optlen) + { ++ struct sctp_initmsg initmsg; ++ + if (len < sizeof(struct sctp_initmsg)) + return -EINVAL; + len = sizeof(struct sctp_initmsg); + if (put_user(len, optlen)) + return -EFAULT; +- if (copy_to_user(optval, &sctp_sk(sk)->initmsg, len)) ++ initmsg = sctp_sk(sk)->initmsg; ++ if (copy_to_user(optval, &initmsg, len)) + return -EFAULT; + return 0; + } +@@ -4712,6 +4723,8 @@ static int sctp_getsockopt_peer_addrs(struct sock *sk, int len, + addrlen = sctp_get_af_specific(temp.sa.sa_family)->sockaddr_len; + if (space_left < addrlen) + return -ENOMEM; ++ if (addrlen > sizeof(temp) || addrlen < 0) ++ return -EFAULT; + if (copy_to_user(to, &temp, addrlen)) + return -EFAULT; + to += addrlen; +diff --git a/net/sctp/sysctl.c b/net/sctp/sysctl.c +index dfa532f..1dcfb44 100644 +--- a/net/sctp/sysctl.c ++++ b/net/sctp/sysctl.c +@@ -307,7 +307,7 @@ static int proc_sctp_do_hmac_alg(struct ctl_table *ctl, int write, + loff_t *ppos) + { + struct net *net = current->nsproxy->net_ns; +- struct ctl_table tbl; ++ ctl_table_no_const tbl; + bool changed = false; + char *none = "none"; + char tmp[8]; +@@ -355,7 +355,7 @@ static int proc_sctp_do_rto_min(struct ctl_table *ctl, int write, + struct net *net = current->nsproxy->net_ns; + unsigned int min = *(unsigned int *) ctl->extra1; + unsigned int max = *(unsigned int *) ctl->extra2; +- struct ctl_table tbl; ++ ctl_table_no_const tbl; + int ret, new_value; + + memset(&tbl, 0, sizeof(struct ctl_table)); +@@ -384,7 +384,7 @@ static int proc_sctp_do_rto_max(struct ctl_table *ctl, int write, + struct net *net = current->nsproxy->net_ns; + unsigned int min = *(unsigned int *) ctl->extra1; + unsigned int max = *(unsigned int *) ctl->extra2; +- struct ctl_table tbl; ++ ctl_table_no_const tbl; + int ret, new_value; + + memset(&tbl, 0, sizeof(struct ctl_table)); +@@ -411,7 +411,7 @@ static int proc_sctp_do_auth(struct ctl_table *ctl, int write, + loff_t *ppos) + { + struct net *net = current->nsproxy->net_ns; +- struct ctl_table tbl; ++ ctl_table_no_const tbl; + int new_value, ret; + + memset(&tbl, 0, sizeof(struct ctl_table)); +@@ -438,7 +438,7 @@ static int proc_sctp_do_auth(struct ctl_table *ctl, int write, + + int sctp_sysctl_net_register(struct net *net) + { +- struct ctl_table *table = sctp_net_table; ++ ctl_table_no_const *table = NULL; + + if (!net_eq(net, &init_net)) { + int i; +@@ -451,7 +451,10 @@ int sctp_sysctl_net_register(struct net *net) + table[i].data += (char *)(&net->sctp) - (char *)&init_net.sctp; + } + +- net->sctp.sysctl_header = register_net_sysctl(net, "net/sctp", table); ++ if (!net_eq(net, &init_net)) ++ net->sctp.sysctl_header = register_net_sysctl(net, "net/sctp", table); ++ else ++ net->sctp.sysctl_header = register_net_sysctl(net, "net/sctp", sctp_net_table); + return 0; + } + +diff --git a/net/socket.c b/net/socket.c +index a19ae19..89554dc 100644 +--- a/net/socket.c ++++ b/net/socket.c +@@ -88,6 +88,7 @@ + #include <linux/magic.h> + #include <linux/slab.h> + #include <linux/xattr.h> ++#include <linux/in.h> + + #include <asm/uaccess.h> + #include <asm/unistd.h> +@@ -111,6 +112,8 @@ unsigned int sysctl_net_busy_read __read_mostly; + unsigned int sysctl_net_busy_poll __read_mostly; + #endif + ++#include <linux/grsock.h> ++ + static int sock_no_open(struct inode *irrelevant, struct file *dontcare); + static ssize_t sock_aio_read(struct kiocb *iocb, const struct iovec *iov, + unsigned long nr_segs, loff_t pos); +@@ -162,7 +165,7 @@ static const struct file_operations socket_file_ops = { + */ + + static DEFINE_SPINLOCK(net_family_lock); +-static const struct net_proto_family __rcu *net_families[NPROTO] __read_mostly; ++const struct net_proto_family __rcu *net_families[NPROTO] __read_mostly; + + /* + * Statistics counters of the socket lists +@@ -328,7 +331,7 @@ static struct dentry *sockfs_mount(struct file_system_type *fs_type, + &sockfs_dentry_operations, SOCKFS_MAGIC); + } + +-static struct vfsmount *sock_mnt __read_mostly; ++struct vfsmount *sock_mnt __read_mostly; + + static struct file_system_type sock_fs_type = { + .name = "sockfs", +@@ -1256,6 +1259,8 @@ int __sock_create(struct net *net, int family, int type, int protocol, + return -EAFNOSUPPORT; + if (type < 0 || type >= SOCK_MAX) + return -EINVAL; ++ if (protocol < 0) ++ return -EINVAL; + + /* Compatibility. + +@@ -1276,6 +1281,20 @@ int __sock_create(struct net *net, int family, int type, int protocol, + if (err) + return err; + ++ if(!kern && !gr_search_socket(family, type, protocol)) { ++ if (rcu_access_pointer(net_families[family]) == NULL) ++ return -EAFNOSUPPORT; ++ else ++ return -EACCES; ++ } ++ ++ if (!kern && gr_handle_sock_all(family, type, protocol)) { ++ if (rcu_access_pointer(net_families[family]) == NULL) ++ return -EAFNOSUPPORT; ++ else ++ return -EACCES; ++ } ++ + /* + * Allocate the socket and allow the family to set things up. if + * the protocol is 0, the family is instructed to select an appropriate +@@ -1527,6 +1546,14 @@ SYSCALL_DEFINE3(bind, int, fd, struct sockaddr __user *, umyaddr, int, addrlen) + if (sock) { + err = move_addr_to_kernel(umyaddr, addrlen, &address); + if (err >= 0) { ++ if (gr_handle_sock_server((struct sockaddr *)&address)) { ++ err = -EACCES; ++ goto error; ++ } ++ err = gr_search_bind(sock, (struct sockaddr_in *)&address); ++ if (err) ++ goto error; ++ + err = security_socket_bind(sock, + (struct sockaddr *)&address, + addrlen); +@@ -1535,6 +1562,7 @@ SYSCALL_DEFINE3(bind, int, fd, struct sockaddr __user *, umyaddr, int, addrlen) + (struct sockaddr *) + &address, addrlen); + } ++error: + fput_light(sock->file, fput_needed); + } + return err; +@@ -1558,10 +1586,20 @@ SYSCALL_DEFINE2(listen, int, fd, int, backlog) + if ((unsigned int)backlog > somaxconn) + backlog = somaxconn; + ++ if (gr_handle_sock_server_other(sock->sk)) { ++ err = -EPERM; ++ goto error; ++ } ++ ++ err = gr_search_listen(sock); ++ if (err) ++ goto error; ++ + err = security_socket_listen(sock, backlog); + if (!err) + err = sock->ops->listen(sock, backlog); + ++error: + fput_light(sock->file, fput_needed); + } + return err; +@@ -1605,6 +1643,18 @@ SYSCALL_DEFINE4(accept4, int, fd, struct sockaddr __user *, upeer_sockaddr, + newsock->type = sock->type; + newsock->ops = sock->ops; + ++ if (gr_handle_sock_server_other(sock->sk)) { ++ err = -EPERM; ++ sock_release(newsock); ++ goto out_put; ++ } ++ ++ err = gr_search_accept(sock); ++ if (err) { ++ sock_release(newsock); ++ goto out_put; ++ } ++ + /* + * We don't need try_module_get here, as the listening socket (sock) + * has the protocol module (sock->ops->owner) held. +@@ -1650,6 +1700,8 @@ SYSCALL_DEFINE4(accept4, int, fd, struct sockaddr __user *, upeer_sockaddr, + fd_install(newfd, newfile); + err = newfd; + ++ gr_attach_curr_ip(newsock->sk); ++ + out_put: + fput_light(sock->file, fput_needed); + out: +@@ -1682,6 +1734,7 @@ SYSCALL_DEFINE3(connect, int, fd, struct sockaddr __user *, uservaddr, + int, addrlen) + { + struct socket *sock; ++ struct sockaddr *sck; + struct sockaddr_storage address; + int err, fput_needed; + +@@ -1692,6 +1745,17 @@ SYSCALL_DEFINE3(connect, int, fd, struct sockaddr __user *, uservaddr, + if (err < 0) + goto out_put; + ++ sck = (struct sockaddr *)&address; ++ ++ if (gr_handle_sock_client(sck)) { ++ err = -EACCES; ++ goto out_put; ++ } ++ ++ err = gr_search_connect(sock, (struct sockaddr_in *)sck); ++ if (err) ++ goto out_put; ++ + err = + security_socket_connect(sock, (struct sockaddr *)&address, addrlen); + if (err) +@@ -1773,6 +1837,8 @@ SYSCALL_DEFINE3(getpeername, int, fd, struct sockaddr __user *, usockaddr, + * the protocol. + */ + ++asmlinkage long sys_sendto(int, void __user *, size_t, unsigned, struct sockaddr __user *, int); ++ + SYSCALL_DEFINE6(sendto, int, fd, void __user *, buff, size_t, len, + unsigned int, flags, struct sockaddr __user *, addr, + int, addr_len) +@@ -1839,7 +1905,7 @@ SYSCALL_DEFINE6(recvfrom, int, fd, void __user *, ubuf, size_t, size, + struct socket *sock; + struct iovec iov; + struct msghdr msg; +- struct sockaddr_storage address; ++ struct sockaddr_storage address = { }; + int err, err2; + int fput_needed; + +@@ -2065,7 +2131,7 @@ static int ___sys_sendmsg(struct socket *sock, struct msghdr __user *msg, + * checking falls down on this. + */ + if (copy_from_user(ctl_buf, +- (void __user __force *)msg_sys->msg_control, ++ (void __force_user *)msg_sys->msg_control, + ctl_len)) + goto out_freectl; + msg_sys->msg_control = ctl_buf; +@@ -2216,7 +2282,7 @@ static int ___sys_recvmsg(struct socket *sock, struct msghdr __user *msg, + int err, total_len, len; + + /* kernel mode address */ +- struct sockaddr_storage addr; ++ struct sockaddr_storage addr = { }; + + /* user mode address pointers */ + struct sockaddr __user *uaddr; +@@ -2245,7 +2311,7 @@ static int ___sys_recvmsg(struct socket *sock, struct msghdr __user *msg, + /* Save the user-mode address (verify_iovec will change the + * kernel msghdr to use the kernel address space) + */ +- uaddr = (__force void __user *)msg_sys->msg_name; ++ uaddr = (void __force_user *)msg_sys->msg_name; + uaddr_len = COMPAT_NAMELEN(msg); + if (MSG_CMSG_COMPAT & flags) + err = verify_compat_iovec(msg_sys, iov, &addr, VERIFY_WRITE); +@@ -2889,7 +2955,7 @@ static int ethtool_ioctl(struct net *net, struct compat_ifreq __user *ifr32) + ifr = compat_alloc_user_space(buf_size); + rxnfc = (void __user *)ifr + ALIGN(sizeof(struct ifreq), 8); + +- if (copy_in_user(&ifr->ifr_name, &ifr32->ifr_name, IFNAMSIZ)) ++ if (copy_in_user(ifr->ifr_name, ifr32->ifr_name, IFNAMSIZ)) + return -EFAULT; + + if (put_user(convert_in ? rxnfc : compat_ptr(data), +@@ -3000,7 +3066,7 @@ static int bond_ioctl(struct net *net, unsigned int cmd, + old_fs = get_fs(); + set_fs(KERNEL_DS); + err = dev_ioctl(net, cmd, +- (struct ifreq __user __force *) &kifr); ++ (struct ifreq __force_user *) &kifr); + set_fs(old_fs); + + return err; +@@ -3093,7 +3159,7 @@ static int compat_sioc_ifmap(struct net *net, unsigned int cmd, + + old_fs = get_fs(); + set_fs(KERNEL_DS); +- err = dev_ioctl(net, cmd, (void __user __force *)&ifr); ++ err = dev_ioctl(net, cmd, (void __force_user *)&ifr); + set_fs(old_fs); + + if (cmd == SIOCGIFMAP && !err) { +@@ -3177,7 +3243,7 @@ static int routing_ioctl(struct net *net, struct socket *sock, + ret |= get_user(rtdev, &(ur4->rt_dev)); + if (rtdev) { + ret |= copy_from_user(devname, compat_ptr(rtdev), 15); +- r4.rt_dev = (char __user __force *)devname; ++ r4.rt_dev = (char __force_user *)devname; + devname[15] = 0; + } else + r4.rt_dev = NULL; +@@ -3404,8 +3470,8 @@ int kernel_getsockopt(struct socket *sock, int level, int optname, + int __user *uoptlen; + int err; + +- uoptval = (char __user __force *) optval; +- uoptlen = (int __user __force *) optlen; ++ uoptval = (char __force_user *) optval; ++ uoptlen = (int __force_user *) optlen; + + set_fs(KERNEL_DS); + if (level == SOL_SOCKET) +@@ -3425,7 +3491,7 @@ int kernel_setsockopt(struct socket *sock, int level, int optname, + char __user *uoptval; + int err; + +- uoptval = (char __user __force *) optval; ++ uoptval = (char __force_user *) optval; + + set_fs(KERNEL_DS); + if (level == SOL_SOCKET) +diff --git a/net/sunrpc/auth_gss/svcauth_gss.c b/net/sunrpc/auth_gss/svcauth_gss.c +index 0f73f45..a96aa52 100644 +--- a/net/sunrpc/auth_gss/svcauth_gss.c ++++ b/net/sunrpc/auth_gss/svcauth_gss.c +@@ -1140,7 +1140,7 @@ static int gss_proxy_save_rsc(struct cache_detail *cd, + uint64_t *handle) + { + struct rsc rsci, *rscp = NULL; +- static atomic64_t ctxhctr; ++ static atomic64_unchecked_t ctxhctr = ATOMIC64_INIT(0); + long long ctxh; + struct gss_api_mech *gm = NULL; + time_t expiry; +@@ -1151,7 +1151,7 @@ static int gss_proxy_save_rsc(struct cache_detail *cd, + status = -ENOMEM; + /* the handle needs to be just a unique id, + * use a static counter */ +- ctxh = atomic64_inc_return(&ctxhctr); ++ ctxh = atomic64_inc_return_unchecked(&ctxhctr); + + /* make a copy for the caller */ + *handle = ctxh; +diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c +index ae333c1..18521f0 100644 +--- a/net/sunrpc/cache.c ++++ b/net/sunrpc/cache.c +@@ -1609,7 +1609,7 @@ static int create_cache_proc_entries(struct cache_detail *cd, struct net *net) + struct sunrpc_net *sn; + + sn = net_generic(net, sunrpc_net_id); +- cd->u.procfs.proc_ent = proc_mkdir(cd->name, sn->proc_net_rpc); ++ cd->u.procfs.proc_ent = proc_mkdir_restrict(cd->name, sn->proc_net_rpc); + if (cd->u.procfs.proc_ent == NULL) + goto out_nomem; + cd->u.procfs.channel_ent = NULL; +diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c +index 3ea5cda..bfb3e08 100644 +--- a/net/sunrpc/clnt.c ++++ b/net/sunrpc/clnt.c +@@ -1415,7 +1415,9 @@ call_start(struct rpc_task *task) + (RPC_IS_ASYNC(task) ? "async" : "sync")); + + /* Increment call count */ +- task->tk_msg.rpc_proc->p_count++; ++ pax_open_kernel(); ++ (*(unsigned int *)&task->tk_msg.rpc_proc->p_count)++; ++ pax_close_kernel(); + clnt->cl_stats->rpccnt++; + task->tk_action = call_reserve; + } +diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c +index ff3cc4b..7612a9e 100644 +--- a/net/sunrpc/sched.c ++++ b/net/sunrpc/sched.c +@@ -261,9 +261,9 @@ static int rpc_wait_bit_killable(void *word) + #if defined(RPC_DEBUG) || defined(RPC_TRACEPOINTS) + static void rpc_task_set_debuginfo(struct rpc_task *task) + { +- static atomic_t rpc_pid; ++ static atomic_unchecked_t rpc_pid; + +- task->tk_pid = atomic_inc_return(&rpc_pid); ++ task->tk_pid = atomic_inc_return_unchecked(&rpc_pid); + } + #else + static inline void rpc_task_set_debuginfo(struct rpc_task *task) +diff --git a/net/sunrpc/stats.c b/net/sunrpc/stats.c +index 5453049..465669a 100644 +--- a/net/sunrpc/stats.c ++++ b/net/sunrpc/stats.c +@@ -267,7 +267,7 @@ int rpc_proc_init(struct net *net) + + dprintk("RPC: registering /proc/net/rpc\n"); + sn = net_generic(net, sunrpc_net_id); +- sn->proc_net_rpc = proc_mkdir("rpc", net->proc_net); ++ sn->proc_net_rpc = proc_mkdir_restrict("rpc", net->proc_net); + if (sn->proc_net_rpc == NULL) + return -ENOMEM; + +diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c +index 5de6801..b4e330d 100644 +--- a/net/sunrpc/svc.c ++++ b/net/sunrpc/svc.c +@@ -1167,7 +1167,9 @@ svc_process_common(struct svc_rqst *rqstp, struct kvec *argv, struct kvec *resv) + svc_putnl(resv, RPC_SUCCESS); + + /* Bump per-procedure stats counter */ +- procp->pc_count++; ++ pax_open_kernel(); ++ (*(unsigned int *)&procp->pc_count)++; ++ pax_close_kernel(); + + /* Initialize storage for argp and resp */ + memset(rqstp->rq_argp, 0, procp->pc_argsize); +diff --git a/net/sunrpc/svcauth_unix.c b/net/sunrpc/svcauth_unix.c +index 621ca7b..59421dd 100644 +--- a/net/sunrpc/svcauth_unix.c ++++ b/net/sunrpc/svcauth_unix.c +@@ -414,7 +414,7 @@ struct unix_gid { + struct group_info *gi; + }; + +-static int unix_gid_hash(kuid_t uid) ++static int __intentional_overflow(-1) unix_gid_hash(kuid_t uid) + { + return hash_long(from_kuid(&init_user_ns, uid), GID_HASHBITS); + } +@@ -470,7 +470,7 @@ static void unix_gid_request(struct cache_detail *cd, + (*bpp)[-1] = '\n'; + } + +-static struct unix_gid *unix_gid_lookup(struct cache_detail *cd, kuid_t uid); ++static struct unix_gid * __intentional_overflow(-1) unix_gid_lookup(struct cache_detail *cd, kuid_t uid); + + static int unix_gid_parse(struct cache_detail *cd, + char *mesg, int mlen) +diff --git a/net/sunrpc/xprtrdma/svc_rdma.c b/net/sunrpc/xprtrdma/svc_rdma.c +index c1b6270..05089c1 100644 +--- a/net/sunrpc/xprtrdma/svc_rdma.c ++++ b/net/sunrpc/xprtrdma/svc_rdma.c +@@ -62,15 +62,15 @@ unsigned int svcrdma_max_req_size = RPCRDMA_MAX_REQ_SIZE; + static unsigned int min_max_inline = 4096; + static unsigned int max_max_inline = 65536; + +-atomic_t rdma_stat_recv; +-atomic_t rdma_stat_read; +-atomic_t rdma_stat_write; +-atomic_t rdma_stat_sq_starve; +-atomic_t rdma_stat_rq_starve; +-atomic_t rdma_stat_rq_poll; +-atomic_t rdma_stat_rq_prod; +-atomic_t rdma_stat_sq_poll; +-atomic_t rdma_stat_sq_prod; ++atomic_unchecked_t rdma_stat_recv; ++atomic_unchecked_t rdma_stat_read; ++atomic_unchecked_t rdma_stat_write; ++atomic_unchecked_t rdma_stat_sq_starve; ++atomic_unchecked_t rdma_stat_rq_starve; ++atomic_unchecked_t rdma_stat_rq_poll; ++atomic_unchecked_t rdma_stat_rq_prod; ++atomic_unchecked_t rdma_stat_sq_poll; ++atomic_unchecked_t rdma_stat_sq_prod; + + /* Temporary NFS request map and context caches */ + struct kmem_cache *svc_rdma_map_cachep; +@@ -110,7 +110,7 @@ static int read_reset_stat(struct ctl_table *table, int write, + len -= *ppos; + if (len > *lenp) + len = *lenp; +- if (len && copy_to_user(buffer, str_buf, len)) ++ if (len > sizeof str_buf || (len && copy_to_user(buffer, str_buf, len))) + return -EFAULT; + *lenp = len; + *ppos += len; +@@ -151,63 +151,63 @@ static struct ctl_table svcrdma_parm_table[] = { + { + .procname = "rdma_stat_read", + .data = &rdma_stat_read, +- .maxlen = sizeof(atomic_t), ++ .maxlen = sizeof(atomic_unchecked_t), + .mode = 0644, + .proc_handler = read_reset_stat, + }, + { + .procname = "rdma_stat_recv", + .data = &rdma_stat_recv, +- .maxlen = sizeof(atomic_t), ++ .maxlen = sizeof(atomic_unchecked_t), + .mode = 0644, + .proc_handler = read_reset_stat, + }, + { + .procname = "rdma_stat_write", + .data = &rdma_stat_write, +- .maxlen = sizeof(atomic_t), ++ .maxlen = sizeof(atomic_unchecked_t), + .mode = 0644, + .proc_handler = read_reset_stat, + }, + { + .procname = "rdma_stat_sq_starve", + .data = &rdma_stat_sq_starve, +- .maxlen = sizeof(atomic_t), ++ .maxlen = sizeof(atomic_unchecked_t), + .mode = 0644, + .proc_handler = read_reset_stat, + }, + { + .procname = "rdma_stat_rq_starve", + .data = &rdma_stat_rq_starve, +- .maxlen = sizeof(atomic_t), ++ .maxlen = sizeof(atomic_unchecked_t), + .mode = 0644, + .proc_handler = read_reset_stat, + }, + { + .procname = "rdma_stat_rq_poll", + .data = &rdma_stat_rq_poll, +- .maxlen = sizeof(atomic_t), ++ .maxlen = sizeof(atomic_unchecked_t), + .mode = 0644, + .proc_handler = read_reset_stat, + }, + { + .procname = "rdma_stat_rq_prod", + .data = &rdma_stat_rq_prod, +- .maxlen = sizeof(atomic_t), ++ .maxlen = sizeof(atomic_unchecked_t), + .mode = 0644, + .proc_handler = read_reset_stat, + }, + { + .procname = "rdma_stat_sq_poll", + .data = &rdma_stat_sq_poll, +- .maxlen = sizeof(atomic_t), ++ .maxlen = sizeof(atomic_unchecked_t), + .mode = 0644, + .proc_handler = read_reset_stat, + }, + { + .procname = "rdma_stat_sq_prod", + .data = &rdma_stat_sq_prod, +- .maxlen = sizeof(atomic_t), ++ .maxlen = sizeof(atomic_unchecked_t), + .mode = 0644, + .proc_handler = read_reset_stat, + }, +diff --git a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c +index 0ce7552..d074459 100644 +--- a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c ++++ b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c +@@ -501,7 +501,7 @@ next_sge: + svc_rdma_put_context(ctxt, 0); + goto out; + } +- atomic_inc(&rdma_stat_read); ++ atomic_inc_unchecked(&rdma_stat_read); + + if (read_wr.num_sge < chl_map->ch[ch_no].count) { + chl_map->ch[ch_no].count -= read_wr.num_sge; +@@ -611,7 +611,7 @@ int svc_rdma_recvfrom(struct svc_rqst *rqstp) + dto_q); + list_del_init(&ctxt->dto_q); + } else { +- atomic_inc(&rdma_stat_rq_starve); ++ atomic_inc_unchecked(&rdma_stat_rq_starve); + clear_bit(XPT_DATA, &xprt->xpt_flags); + ctxt = NULL; + } +@@ -631,7 +631,7 @@ int svc_rdma_recvfrom(struct svc_rqst *rqstp) + dprintk("svcrdma: processing ctxt=%p on xprt=%p, rqstp=%p, status=%d\n", + ctxt, rdma_xprt, rqstp, ctxt->wc_status); + BUG_ON(ctxt->wc_status != IB_WC_SUCCESS); +- atomic_inc(&rdma_stat_recv); ++ atomic_inc_unchecked(&rdma_stat_recv); + + /* Build up the XDR from the receive buffers. */ + rdma_build_arg_xdr(rqstp, ctxt, ctxt->byte_len); +diff --git a/net/sunrpc/xprtrdma/svc_rdma_sendto.c b/net/sunrpc/xprtrdma/svc_rdma_sendto.c +index c1d124d..acfc59e 100644 +--- a/net/sunrpc/xprtrdma/svc_rdma_sendto.c ++++ b/net/sunrpc/xprtrdma/svc_rdma_sendto.c +@@ -362,7 +362,7 @@ static int send_write(struct svcxprt_rdma *xprt, struct svc_rqst *rqstp, + write_wr.wr.rdma.remote_addr = to; + + /* Post It */ +- atomic_inc(&rdma_stat_write); ++ atomic_inc_unchecked(&rdma_stat_write); + if (svc_rdma_send(xprt, &write_wr)) + goto err; + return 0; +diff --git a/net/sunrpc/xprtrdma/svc_rdma_transport.c b/net/sunrpc/xprtrdma/svc_rdma_transport.c +index ed36cb5..c55d17f 100644 +--- a/net/sunrpc/xprtrdma/svc_rdma_transport.c ++++ b/net/sunrpc/xprtrdma/svc_rdma_transport.c +@@ -293,7 +293,7 @@ static void rq_cq_reap(struct svcxprt_rdma *xprt) + return; + + ib_req_notify_cq(xprt->sc_rq_cq, IB_CQ_NEXT_COMP); +- atomic_inc(&rdma_stat_rq_poll); ++ atomic_inc_unchecked(&rdma_stat_rq_poll); + + while ((ret = ib_poll_cq(xprt->sc_rq_cq, 1, &wc)) > 0) { + ctxt = (struct svc_rdma_op_ctxt *)(unsigned long)wc.wr_id; +@@ -315,7 +315,7 @@ static void rq_cq_reap(struct svcxprt_rdma *xprt) + } + + if (ctxt) +- atomic_inc(&rdma_stat_rq_prod); ++ atomic_inc_unchecked(&rdma_stat_rq_prod); + + set_bit(XPT_DATA, &xprt->sc_xprt.xpt_flags); + /* +@@ -387,7 +387,7 @@ static void sq_cq_reap(struct svcxprt_rdma *xprt) + return; + + ib_req_notify_cq(xprt->sc_sq_cq, IB_CQ_NEXT_COMP); +- atomic_inc(&rdma_stat_sq_poll); ++ atomic_inc_unchecked(&rdma_stat_sq_poll); + while ((ret = ib_poll_cq(cq, 1, &wc)) > 0) { + if (wc.status != IB_WC_SUCCESS) + /* Close the transport */ +@@ -405,7 +405,7 @@ static void sq_cq_reap(struct svcxprt_rdma *xprt) + } + + if (ctxt) +- atomic_inc(&rdma_stat_sq_prod); ++ atomic_inc_unchecked(&rdma_stat_sq_prod); + } + + static void sq_comp_handler(struct ib_cq *cq, void *cq_context) +@@ -1263,7 +1263,7 @@ int svc_rdma_send(struct svcxprt_rdma *xprt, struct ib_send_wr *wr) + spin_lock_bh(&xprt->sc_lock); + if (xprt->sc_sq_depth < atomic_read(&xprt->sc_sq_count) + wr_count) { + spin_unlock_bh(&xprt->sc_lock); +- atomic_inc(&rdma_stat_sq_starve); ++ atomic_inc_unchecked(&rdma_stat_sq_starve); + + /* See if we can opportunistically reap SQ WR to make room */ + sq_cq_reap(xprt); +diff --git a/net/sysctl_net.c b/net/sysctl_net.c +index e7000be..e3b0ba7 100644 +--- a/net/sysctl_net.c ++++ b/net/sysctl_net.c +@@ -46,7 +46,7 @@ static int net_ctl_permissions(struct ctl_table_header *head, + kgid_t root_gid = make_kgid(net->user_ns, 0); + + /* Allow network administrator to have same access as root. */ +- if (ns_capable(net->user_ns, CAP_NET_ADMIN) || ++ if (ns_capable_nolog(net->user_ns, CAP_NET_ADMIN) || + uid_eq(root_uid, current_euid())) { + int mode = (table->mode >> 6) & 7; + return (mode << 6) | (mode << 3) | mode; +diff --git a/net/tipc/subscr.c b/net/tipc/subscr.c +index 6424372..afd36e9 100644 +--- a/net/tipc/subscr.c ++++ b/net/tipc/subscr.c +@@ -97,7 +97,7 @@ static void subscr_send_event(struct tipc_subscription *sub, u32 found_lower, + struct tipc_subscriber *subscriber = sub->subscriber; + struct kvec msg_sect; + +- msg_sect.iov_base = (void *)&sub->evt; ++ msg_sect.iov_base = &sub->evt; + msg_sect.iov_len = sizeof(struct tipc_event); + sub->evt.event = htohl(event, sub->swap); + sub->evt.found_lower = htohl(found_lower, sub->swap); +diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c +index 94404f1..5782191 100644 +--- a/net/unix/af_unix.c ++++ b/net/unix/af_unix.c +@@ -791,6 +791,12 @@ static struct sock *unix_find_other(struct net *net, + err = -ECONNREFUSED; + if (!S_ISSOCK(inode->i_mode)) + goto put_fail; ++ ++ if (!gr_acl_handle_unix(path.dentry, path.mnt)) { ++ err = -EACCES; ++ goto put_fail; ++ } ++ + u = unix_find_socket_byinode(inode); + if (!u) + goto put_fail; +@@ -811,6 +817,13 @@ static struct sock *unix_find_other(struct net *net, + if (u) { + struct dentry *dentry; + dentry = unix_sk(u)->path.dentry; ++ ++ if (!gr_handle_chroot_unix(pid_vnr(u->sk_peer_pid))) { ++ err = -EPERM; ++ sock_put(u); ++ goto fail; ++ } ++ + if (dentry) + touch_atime(&unix_sk(u)->path); + } else +@@ -844,12 +857,18 @@ static int unix_mknod(const char *sun_path, umode_t mode, struct path *res) + */ + err = security_path_mknod(&path, dentry, mode, 0); + if (!err) { ++ if (!gr_acl_handle_mknod(dentry, path.dentry, path.mnt, mode)) { ++ err = -EACCES; ++ goto out; ++ } + err = vfs_mknod(path.dentry->d_inode, dentry, mode, 0); + if (!err) { + res->mnt = mntget(path.mnt); + res->dentry = dget(dentry); ++ gr_handle_create(dentry, path.mnt); + } + } ++out: + done_path_create(&path, dentry); + return err; + } +@@ -2344,9 +2363,13 @@ static int unix_seq_show(struct seq_file *seq, void *v) + seq_puts(seq, "Num RefCount Protocol Flags Type St " + "Inode Path\n"); + else { +- struct sock *s = v; ++ struct sock *s = v, *peer; + struct unix_sock *u = unix_sk(s); + unix_state_lock(s); ++ peer = unix_peer(s); ++ unix_state_unlock(s); ++ ++ unix_state_double_lock(s, peer); + + seq_printf(seq, "%pK: %08X %08X %08X %04X %02X %5lu", + s, +@@ -2373,8 +2396,10 @@ static int unix_seq_show(struct seq_file *seq, void *v) + } + for ( ; i < len; i++) + seq_putc(seq, u->addr->name->sun_path[i]); +- } +- unix_state_unlock(s); ++ } else if (peer) ++ seq_printf(seq, " P%lu", sock_i_ino(peer)); ++ ++ unix_state_double_unlock(s, peer); + seq_putc(seq, '\n'); + } + +diff --git a/net/unix/sysctl_net_unix.c b/net/unix/sysctl_net_unix.c +index b3d5150..ff3a837 100644 +--- a/net/unix/sysctl_net_unix.c ++++ b/net/unix/sysctl_net_unix.c +@@ -28,7 +28,7 @@ static struct ctl_table unix_table[] = { + + int __net_init unix_sysctl_register(struct net *net) + { +- struct ctl_table *table; ++ ctl_table_no_const *table; + + table = kmemdup(unix_table, sizeof(unix_table), GFP_KERNEL); + if (table == NULL) +diff --git a/net/vmw_vsock/vmci_transport_notify.c b/net/vmw_vsock/vmci_transport_notify.c +index 9a73074..aecba9a 100644 +--- a/net/vmw_vsock/vmci_transport_notify.c ++++ b/net/vmw_vsock/vmci_transport_notify.c +@@ -662,19 +662,19 @@ static void vmci_transport_notify_pkt_process_negotiate(struct sock *sk) + + /* Socket control packet based operations. */ + struct vmci_transport_notify_ops vmci_transport_notify_pkt_ops = { +- vmci_transport_notify_pkt_socket_init, +- vmci_transport_notify_pkt_socket_destruct, +- vmci_transport_notify_pkt_poll_in, +- vmci_transport_notify_pkt_poll_out, +- vmci_transport_notify_pkt_handle_pkt, +- vmci_transport_notify_pkt_recv_init, +- vmci_transport_notify_pkt_recv_pre_block, +- vmci_transport_notify_pkt_recv_pre_dequeue, +- vmci_transport_notify_pkt_recv_post_dequeue, +- vmci_transport_notify_pkt_send_init, +- vmci_transport_notify_pkt_send_pre_block, +- vmci_transport_notify_pkt_send_pre_enqueue, +- vmci_transport_notify_pkt_send_post_enqueue, +- vmci_transport_notify_pkt_process_request, +- vmci_transport_notify_pkt_process_negotiate, ++ .socket_init = vmci_transport_notify_pkt_socket_init, ++ .socket_destruct = vmci_transport_notify_pkt_socket_destruct, ++ .poll_in = vmci_transport_notify_pkt_poll_in, ++ .poll_out = vmci_transport_notify_pkt_poll_out, ++ .handle_notify_pkt = vmci_transport_notify_pkt_handle_pkt, ++ .recv_init = vmci_transport_notify_pkt_recv_init, ++ .recv_pre_block = vmci_transport_notify_pkt_recv_pre_block, ++ .recv_pre_dequeue = vmci_transport_notify_pkt_recv_pre_dequeue, ++ .recv_post_dequeue = vmci_transport_notify_pkt_recv_post_dequeue, ++ .send_init = vmci_transport_notify_pkt_send_init, ++ .send_pre_block = vmci_transport_notify_pkt_send_pre_block, ++ .send_pre_enqueue = vmci_transport_notify_pkt_send_pre_enqueue, ++ .send_post_enqueue = vmci_transport_notify_pkt_send_post_enqueue, ++ .process_request = vmci_transport_notify_pkt_process_request, ++ .process_negotiate = vmci_transport_notify_pkt_process_negotiate, + }; +diff --git a/net/vmw_vsock/vmci_transport_notify_qstate.c b/net/vmw_vsock/vmci_transport_notify_qstate.c +index 622bd7a..b92086c 100644 +--- a/net/vmw_vsock/vmci_transport_notify_qstate.c ++++ b/net/vmw_vsock/vmci_transport_notify_qstate.c +@@ -420,19 +420,19 @@ vmci_transport_notify_pkt_send_pre_enqueue( + + /* Socket always on control packet based operations. */ + struct vmci_transport_notify_ops vmci_transport_notify_pkt_q_state_ops = { +- vmci_transport_notify_pkt_socket_init, +- vmci_transport_notify_pkt_socket_destruct, +- vmci_transport_notify_pkt_poll_in, +- vmci_transport_notify_pkt_poll_out, +- vmci_transport_notify_pkt_handle_pkt, +- vmci_transport_notify_pkt_recv_init, +- vmci_transport_notify_pkt_recv_pre_block, +- vmci_transport_notify_pkt_recv_pre_dequeue, +- vmci_transport_notify_pkt_recv_post_dequeue, +- vmci_transport_notify_pkt_send_init, +- vmci_transport_notify_pkt_send_pre_block, +- vmci_transport_notify_pkt_send_pre_enqueue, +- vmci_transport_notify_pkt_send_post_enqueue, +- vmci_transport_notify_pkt_process_request, +- vmci_transport_notify_pkt_process_negotiate, ++ .socket_init = vmci_transport_notify_pkt_socket_init, ++ .socket_destruct = vmci_transport_notify_pkt_socket_destruct, ++ .poll_in = vmci_transport_notify_pkt_poll_in, ++ .poll_out = vmci_transport_notify_pkt_poll_out, ++ .handle_notify_pkt = vmci_transport_notify_pkt_handle_pkt, ++ .recv_init = vmci_transport_notify_pkt_recv_init, ++ .recv_pre_block = vmci_transport_notify_pkt_recv_pre_block, ++ .recv_pre_dequeue = vmci_transport_notify_pkt_recv_pre_dequeue, ++ .recv_post_dequeue = vmci_transport_notify_pkt_recv_post_dequeue, ++ .send_init = vmci_transport_notify_pkt_send_init, ++ .send_pre_block = vmci_transport_notify_pkt_send_pre_block, ++ .send_pre_enqueue = vmci_transport_notify_pkt_send_pre_enqueue, ++ .send_post_enqueue = vmci_transport_notify_pkt_send_post_enqueue, ++ .process_request = vmci_transport_notify_pkt_process_request, ++ .process_negotiate = vmci_transport_notify_pkt_process_negotiate, + }; +diff --git a/net/wireless/wext-core.c b/net/wireless/wext-core.c +index c8717c1..08539f5 100644 +--- a/net/wireless/wext-core.c ++++ b/net/wireless/wext-core.c +@@ -748,8 +748,7 @@ static int ioctl_standard_iw_point(struct iw_point *iwp, unsigned int cmd, + */ + + /* Support for very large requests */ +- if ((descr->flags & IW_DESCR_FLAG_NOMAX) && +- (user_length > descr->max_tokens)) { ++ if (user_length > descr->max_tokens) { + /* Allow userspace to GET more than max so + * we can support any size GET requests. + * There is still a limit : -ENOMEM. +@@ -788,22 +787,6 @@ static int ioctl_standard_iw_point(struct iw_point *iwp, unsigned int cmd, + } + } + +- if (IW_IS_GET(cmd) && !(descr->flags & IW_DESCR_FLAG_NOMAX)) { +- /* +- * If this is a GET, but not NOMAX, it means that the extra +- * data is not bounded by userspace, but by max_tokens. Thus +- * set the length to max_tokens. This matches the extra data +- * allocation. +- * The driver should fill it with the number of tokens it +- * provided, and it may check iwp->length rather than having +- * knowledge of max_tokens. If the driver doesn't change the +- * iwp->length, this ioctl just copies back max_token tokens +- * filled with zeroes. Hopefully the driver isn't claiming +- * them to be valid data. +- */ +- iwp->length = descr->max_tokens; +- } +- + err = handler(dev, info, (union iwreq_data *) iwp, extra); + + iwp->length += essid_compat; +diff --git a/net/x25/sysctl_net_x25.c b/net/x25/sysctl_net_x25.c +index 4323952..a06dfe1 100644 +--- a/net/x25/sysctl_net_x25.c ++++ b/net/x25/sysctl_net_x25.c +@@ -70,7 +70,7 @@ static struct ctl_table x25_table[] = { + .mode = 0644, + .proc_handler = proc_dointvec, + }, +- { 0, }, ++ { }, + }; + + void __init x25_register_sysctl(void) +diff --git a/net/x25/x25_proc.c b/net/x25/x25_proc.c +index 0917f04..f4e3d8c 100644 +--- a/net/x25/x25_proc.c ++++ b/net/x25/x25_proc.c +@@ -209,7 +209,7 @@ static const struct file_operations x25_seq_forward_fops = { + + int __init x25_proc_init(void) + { +- if (!proc_mkdir("x25", init_net.proc_net)) ++ if (!proc_mkdir_restrict("x25", init_net.proc_net)) + return -ENOMEM; + + if (!proc_create("x25/route", S_IRUGO, init_net.proc_net, +diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c +index 1d5c7bf..f762f1f 100644 +--- a/net/xfrm/xfrm_policy.c ++++ b/net/xfrm/xfrm_policy.c +@@ -327,7 +327,7 @@ static void xfrm_policy_kill(struct xfrm_policy *policy) + { + policy->walk.dead = 1; + +- atomic_inc(&policy->genid); ++ atomic_inc_unchecked(&policy->genid); + + if (del_timer(&policy->polq.hold_timer)) + xfrm_pol_put(policy); +@@ -661,7 +661,7 @@ int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl) + hlist_add_head(&policy->bydst, chain); + xfrm_pol_hold(policy); + net->xfrm.policy_count[dir]++; +- atomic_inc(&flow_cache_genid); ++ atomic_inc_unchecked(&flow_cache_genid); + + /* After previous checking, family can either be AF_INET or AF_INET6 */ + if (policy->family == AF_INET) +@@ -1761,7 +1761,7 @@ xfrm_resolve_and_create_bundle(struct xfrm_policy **pols, int num_pols, + + xdst->num_pols = num_pols; + memcpy(xdst->pols, pols, sizeof(struct xfrm_policy *) * num_pols); +- xdst->policy_genid = atomic_read(&pols[0]->genid); ++ xdst->policy_genid = atomic_read_unchecked(&pols[0]->genid); + + return xdst; + } +@@ -2572,11 +2572,12 @@ void xfrm_garbage_collect(struct net *net) + } + EXPORT_SYMBOL(xfrm_garbage_collect); + +-static void xfrm_garbage_collect_deferred(struct net *net) ++void xfrm_garbage_collect_deferred(struct net *net) + { + flow_cache_flush_deferred(); + __xfrm_garbage_collect(net); + } ++EXPORT_SYMBOL(xfrm_garbage_collect_deferred); + + static void xfrm_init_pmtu(struct dst_entry *dst) + { +@@ -2626,7 +2627,7 @@ static int xfrm_bundle_ok(struct xfrm_dst *first) + if (xdst->xfrm_genid != dst->xfrm->genid) + return 0; + if (xdst->num_pols > 0 && +- xdst->policy_genid != atomic_read(&xdst->pols[0]->genid)) ++ xdst->policy_genid != atomic_read_unchecked(&xdst->pols[0]->genid)) + return 0; + + mtu = dst_mtu(dst->child); +@@ -2714,8 +2715,6 @@ int xfrm_policy_register_afinfo(struct xfrm_policy_afinfo *afinfo) + dst_ops->link_failure = xfrm_link_failure; + if (likely(dst_ops->neigh_lookup == NULL)) + dst_ops->neigh_lookup = xfrm_neigh_lookup; +- if (likely(afinfo->garbage_collect == NULL)) +- afinfo->garbage_collect = xfrm_garbage_collect_deferred; + rcu_assign_pointer(xfrm_policy_afinfo[afinfo->family], afinfo); + } + spin_unlock(&xfrm_policy_afinfo_lock); +@@ -2769,7 +2768,6 @@ int xfrm_policy_unregister_afinfo(struct xfrm_policy_afinfo *afinfo) + dst_ops->check = NULL; + dst_ops->negative_advice = NULL; + dst_ops->link_failure = NULL; +- afinfo->garbage_collect = NULL; + } + return err; + } +@@ -3159,7 +3157,7 @@ static int xfrm_policy_migrate(struct xfrm_policy *pol, + sizeof(pol->xfrm_vec[i].saddr)); + pol->xfrm_vec[i].encap_family = mp->new_family; + /* flush bundles */ +- atomic_inc(&pol->genid); ++ atomic_inc_unchecked(&pol->genid); + } + } + +diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c +index 40f1b3e..e33fdfa 100644 +--- a/net/xfrm/xfrm_state.c ++++ b/net/xfrm/xfrm_state.c +@@ -172,12 +172,14 @@ int xfrm_register_type(const struct xfrm_type *type, unsigned short family) + + if (unlikely(afinfo == NULL)) + return -EAFNOSUPPORT; +- typemap = afinfo->type_map; ++ typemap = (const struct xfrm_type **)afinfo->type_map; + spin_lock_bh(&xfrm_type_lock); + +- if (likely(typemap[type->proto] == NULL)) ++ if (likely(typemap[type->proto] == NULL)) { ++ pax_open_kernel(); + typemap[type->proto] = type; +- else ++ pax_close_kernel(); ++ } else + err = -EEXIST; + spin_unlock_bh(&xfrm_type_lock); + xfrm_state_put_afinfo(afinfo); +@@ -193,13 +195,16 @@ int xfrm_unregister_type(const struct xfrm_type *type, unsigned short family) + + if (unlikely(afinfo == NULL)) + return -EAFNOSUPPORT; +- typemap = afinfo->type_map; ++ typemap = (const struct xfrm_type **)afinfo->type_map; + spin_lock_bh(&xfrm_type_lock); + + if (unlikely(typemap[type->proto] != type)) + err = -ENOENT; +- else ++ else { ++ pax_open_kernel(); + typemap[type->proto] = NULL; ++ pax_close_kernel(); ++ } + spin_unlock_bh(&xfrm_type_lock); + xfrm_state_put_afinfo(afinfo); + return err; +@@ -209,7 +214,6 @@ EXPORT_SYMBOL(xfrm_unregister_type); + static const struct xfrm_type *xfrm_get_type(u8 proto, unsigned short family) + { + struct xfrm_state_afinfo *afinfo; +- const struct xfrm_type **typemap; + const struct xfrm_type *type; + int modload_attempted = 0; + +@@ -217,9 +221,8 @@ retry: + afinfo = xfrm_state_get_afinfo(family); + if (unlikely(afinfo == NULL)) + return NULL; +- typemap = afinfo->type_map; + +- type = typemap[proto]; ++ type = afinfo->type_map[proto]; + if (unlikely(type && !try_module_get(type->owner))) + type = NULL; + if (!type && !modload_attempted) { +@@ -253,7 +256,7 @@ int xfrm_register_mode(struct xfrm_mode *mode, int family) + return -EAFNOSUPPORT; + + err = -EEXIST; +- modemap = afinfo->mode_map; ++ modemap = (struct xfrm_mode **)afinfo->mode_map; + spin_lock_bh(&xfrm_mode_lock); + if (modemap[mode->encap]) + goto out; +@@ -262,8 +265,10 @@ int xfrm_register_mode(struct xfrm_mode *mode, int family) + if (!try_module_get(afinfo->owner)) + goto out; + +- mode->afinfo = afinfo; ++ pax_open_kernel(); ++ *(const void **)&mode->afinfo = afinfo; + modemap[mode->encap] = mode; ++ pax_close_kernel(); + err = 0; + + out: +@@ -287,10 +292,12 @@ int xfrm_unregister_mode(struct xfrm_mode *mode, int family) + return -EAFNOSUPPORT; + + err = -ENOENT; +- modemap = afinfo->mode_map; ++ modemap = (struct xfrm_mode **)afinfo->mode_map; + spin_lock_bh(&xfrm_mode_lock); + if (likely(modemap[mode->encap] == mode)) { ++ pax_open_kernel(); + modemap[mode->encap] = NULL; ++ pax_close_kernel(); + module_put(mode->afinfo->owner); + err = 0; + } +@@ -1512,10 +1519,10 @@ EXPORT_SYMBOL(xfrm_find_acq_byseq); + u32 xfrm_get_acqseq(void) + { + u32 res; +- static atomic_t acqseq; ++ static atomic_unchecked_t acqseq; + + do { +- res = atomic_inc_return(&acqseq); ++ res = atomic_inc_return_unchecked(&acqseq); + } while (!res); + + return res; +diff --git a/net/xfrm/xfrm_sysctl.c b/net/xfrm/xfrm_sysctl.c +index 05a6e3d..6716ec9 100644 +--- a/net/xfrm/xfrm_sysctl.c ++++ b/net/xfrm/xfrm_sysctl.c +@@ -42,7 +42,7 @@ static struct ctl_table xfrm_table[] = { + + int __net_init xfrm_sysctl_init(struct net *net) + { +- struct ctl_table *table; ++ ctl_table_no_const *table; + + __xfrm_sysctl_init(net); + +diff --git a/scripts/Makefile b/scripts/Makefile +index 01e7adb..6176d5d 100644 +--- a/scripts/Makefile ++++ b/scripts/Makefile +@@ -40,3 +40,5 @@ subdir-$(CONFIG_DTC) += dtc + + # Let clean descend into subdirs + subdir- += basic kconfig package selinux ++ ++clean-files := randstruct.seed +diff --git a/scripts/Makefile.build b/scripts/Makefile.build +index d5d859c..781cbcb 100644 +--- a/scripts/Makefile.build ++++ b/scripts/Makefile.build +@@ -111,7 +111,7 @@ endif + endif + + # Do not include host rules unless needed +-ifneq ($(hostprogs-y)$(hostprogs-m),) ++ifneq ($(hostprogs-y)$(hostprogs-m)$(hostlibs-y)$(hostlibs-m)$(hostcxxlibs-y)$(hostcxxlibs-m),) + include scripts/Makefile.host + endif + +diff --git a/scripts/Makefile.clean b/scripts/Makefile.clean +index 686cb0d..9d653bf 100644 +--- a/scripts/Makefile.clean ++++ b/scripts/Makefile.clean +@@ -43,7 +43,8 @@ subdir-ymn := $(addprefix $(obj)/,$(subdir-ymn)) + __clean-files := $(extra-y) $(always) \ + $(targets) $(clean-files) \ + $(host-progs) \ +- $(hostprogs-y) $(hostprogs-m) $(hostprogs-) ++ $(hostprogs-y) $(hostprogs-m) $(hostprogs-) \ ++ $(hostlibs-y) $(hostlibs-m) $(hostlibs-) + + __clean-files := $(filter-out $(no-clean-files), $(__clean-files)) + +diff --git a/scripts/Makefile.host b/scripts/Makefile.host +index 1ac414f..38575f7 100644 +--- a/scripts/Makefile.host ++++ b/scripts/Makefile.host +@@ -31,6 +31,8 @@ + # Note: Shared libraries consisting of C++ files are not supported + + __hostprogs := $(sort $(hostprogs-y) $(hostprogs-m)) ++__hostlibs := $(sort $(hostlibs-y) $(hostlibs-m)) ++__hostcxxlibs := $(sort $(hostcxxlibs-y) $(hostcxxlibs-m)) + + # C code + # Executables compiled from a single .c file +@@ -54,11 +56,15 @@ host-cxxobjs := $(sort $(foreach m,$(host-cxxmulti),$($(m)-cxxobjs))) + # Shared libaries (only .c supported) + # Shared libraries (.so) - all .so files referenced in "xxx-objs" + host-cshlib := $(sort $(filter %.so, $(host-cobjs))) ++host-cshlib += $(sort $(filter %.so, $(__hostlibs))) ++host-cxxshlib := $(sort $(filter %.so, $(__hostcxxlibs))) + # Remove .so files from "xxx-objs" + host-cobjs := $(filter-out %.so,$(host-cobjs)) ++host-cxxobjs := $(filter-out %.so,$(host-cxxobjs)) + +-#Object (.o) files used by the shared libaries ++# Object (.o) files used by the shared libaries + host-cshobjs := $(sort $(foreach m,$(host-cshlib),$($(m:.so=-objs)))) ++host-cxxshobjs := $(sort $(foreach m,$(host-cxxshlib),$($(m:.so=-objs)))) + + # output directory for programs/.o files + # hostprogs-y := tools/build may have been specified. Retrieve directory +@@ -82,7 +88,9 @@ host-cobjs := $(addprefix $(obj)/,$(host-cobjs)) + host-cxxmulti := $(addprefix $(obj)/,$(host-cxxmulti)) + host-cxxobjs := $(addprefix $(obj)/,$(host-cxxobjs)) + host-cshlib := $(addprefix $(obj)/,$(host-cshlib)) ++host-cxxshlib := $(addprefix $(obj)/,$(host-cxxshlib)) + host-cshobjs := $(addprefix $(obj)/,$(host-cshobjs)) ++host-cxxshobjs := $(addprefix $(obj)/,$(host-cxxshobjs)) + host-objdirs := $(addprefix $(obj)/,$(host-objdirs)) + + obj-dirs += $(host-objdirs) +@@ -156,6 +164,13 @@ quiet_cmd_host-cshobjs = HOSTCC -fPIC $@ + $(host-cshobjs): $(obj)/%.o: $(src)/%.c FORCE + $(call if_changed_dep,host-cshobjs) + ++# Compile .c file, create position independent .o file ++# host-cxxshobjs -> .o ++quiet_cmd_host-cxxshobjs = HOSTCXX -fPIC $@ ++ cmd_host-cxxshobjs = $(HOSTCXX) $(hostcxx_flags) -fPIC -c -o $@ $< ++$(host-cxxshobjs): $(obj)/%.o: $(src)/%.c FORCE ++ $(call if_changed_dep,host-cxxshobjs) ++ + # Link a shared library, based on position independent .o files + # *.o -> .so shared library (host-cshlib) + quiet_cmd_host-cshlib = HOSTLLD -shared $@ +@@ -165,6 +180,15 @@ quiet_cmd_host-cshlib = HOSTLLD -shared $@ + $(host-cshlib): $(obj)/%: $(host-cshobjs) FORCE + $(call if_changed,host-cshlib) + ++# Link a shared library, based on position independent .o files ++# *.o -> .so shared library (host-cxxshlib) ++quiet_cmd_host-cxxshlib = HOSTLLD -shared $@ ++ cmd_host-cxxshlib = $(HOSTCXX) $(HOSTLDFLAGS) -shared -o $@ \ ++ $(addprefix $(obj)/,$($(@F:.so=-objs))) \ ++ $(HOST_LOADLIBES) $(HOSTLOADLIBES_$(@F)) ++$(host-cxxshlib): $(obj)/%: $(host-cxxshobjs) FORCE ++ $(call if_changed,host-cxxshlib) ++ + targets += $(host-csingle) $(host-cmulti) $(host-cobjs)\ +- $(host-cxxmulti) $(host-cxxobjs) $(host-cshlib) $(host-cshobjs) ++ $(host-cxxmulti) $(host-cxxobjs) $(host-cshlib) $(host-cshobjs) $(host-cxxshlib) $(host-cxxshobjs) + +diff --git a/scripts/basic/fixdep.c b/scripts/basic/fixdep.c +index 078fe1d..fbdb363 100644 +--- a/scripts/basic/fixdep.c ++++ b/scripts/basic/fixdep.c +@@ -161,7 +161,7 @@ static unsigned int strhash(const char *str, unsigned int sz) + /* + * Lookup a value in the configuration string. + */ +-static int is_defined_config(const char *name, int len, unsigned int hash) ++static int is_defined_config(const char *name, unsigned int len, unsigned int hash) + { + struct item *aux; + +@@ -211,10 +211,10 @@ static void clear_config(void) + /* + * Record the use of a CONFIG_* word. + */ +-static void use_config(const char *m, int slen) ++static void use_config(const char *m, unsigned int slen) + { + unsigned int hash = strhash(m, slen); +- int c, i; ++ unsigned int c, i; + + if (is_defined_config(m, slen, hash)) + return; +@@ -235,9 +235,9 @@ static void use_config(const char *m, int slen) + + static void parse_config_file(const char *map, size_t len) + { +- const int *end = (const int *) (map + len); ++ const unsigned int *end = (const unsigned int *) (map + len); + /* start at +1, so that p can never be < map */ +- const int *m = (const int *) map + 1; ++ const unsigned int *m = (const unsigned int *) map + 1; + const char *p, *q; + + for (; m < end; m++) { +@@ -435,7 +435,7 @@ static void print_deps(void) + static void traps(void) + { + static char test[] __attribute__((aligned(sizeof(int)))) = "CONF"; +- int *p = (int *)test; ++ unsigned int *p = (unsigned int *)test; + + if (*p != INT_CONF) { + fprintf(stderr, "fixdep: sizeof(int) != 4 or wrong endianness? %#x\n", +diff --git a/scripts/gcc-plugin.sh b/scripts/gcc-plugin.sh +new file mode 100644 +index 0000000..3fd3699 +--- /dev/null ++++ b/scripts/gcc-plugin.sh +@@ -0,0 +1,43 @@ ++#!/bin/bash ++srctree=$(dirname "$0") ++gccplugins_dir=$($3 -print-file-name=plugin) ++plugincc=$($1 -E - -o /dev/null -I${srctree}/../tools/gcc -I${gccplugins_dir}/include 2>&1 <<EOF ++#include "gcc-common.h" ++#if BUILDING_GCC_VERSION >= 4008 || defined(ENABLE_BUILD_WITH_CXX) ++#warning $2 CXX ++#else ++#warning $1 CC ++#endif ++EOF ++) ++ ++if [ $? -ne 0 ] ++then ++ exit 1 ++fi ++ ++if [[ "$plugincc" =~ "$1 CC" ]] ++then ++ echo "$1" ++ exit 0 ++fi ++ ++if [[ "$plugincc" =~ "$2 CXX" ]] ++then ++plugincc=$($1 -c -x c++ -std=gnu++98 - -o /dev/null -I${srctree}/../tools/gcc -I${gccplugins_dir}/include 2>&1 <<EOF ++#include "gcc-common.h" ++class test { ++public: ++ int test; ++} test = { ++ .test = 1 ++}; ++EOF ++) ++if [ $? -eq 0 ] ++then ++ echo "$2" ++ exit 0 ++fi ++fi ++exit 1 +diff --git a/scripts/headers_install.sh b/scripts/headers_install.sh +index 5de5660..d3deb89 100644 +--- a/scripts/headers_install.sh ++++ b/scripts/headers_install.sh +@@ -32,6 +32,7 @@ do + FILE="$(basename "$i")" + sed -r \ + -e 's/([ \t(])(__user|__force|__iomem)[ \t]/\1/g' \ ++ -e 's/__intentional_overflow\([- \t,0-9]*\)//g' \ + -e 's/__attribute_const__([ \t]|$)/\1/g' \ + -e 's@^#include <linux/compiler.h>@@' \ + -e 's/(^|[^a-zA-Z0-9])__packed([^a-zA-Z0-9_]|$)/\1__attribute__((packed))\2/g' \ +diff --git a/scripts/link-vmlinux.sh b/scripts/link-vmlinux.sh +index 2dcb377..a82c500 100644 +--- a/scripts/link-vmlinux.sh ++++ b/scripts/link-vmlinux.sh +@@ -162,7 +162,7 @@ else + fi; + + # final build of init/ +-${MAKE} -f "${srctree}/scripts/Makefile.build" obj=init ++${MAKE} -f "${srctree}/scripts/Makefile.build" obj=init GCC_PLUGINS_CFLAGS="${GCC_PLUGINS_CFLAGS}" GCC_PLUGINS_AFLAGS="${GCC_PLUGINS_AFLAGS}" + + kallsymso="" + kallsyms_vmlinux="" +diff --git a/scripts/mod/file2alias.c b/scripts/mod/file2alias.c +index 25e5cb0..6e85821 100644 +--- a/scripts/mod/file2alias.c ++++ b/scripts/mod/file2alias.c +@@ -142,7 +142,7 @@ static void device_id_check(const char *modname, const char *device_id, + unsigned long size, unsigned long id_size, + void *symval) + { +- int i; ++ unsigned int i; + + if (size % id_size || size < id_size) { + fatal("%s: sizeof(struct %s_device_id)=%lu is not a modulo " +@@ -170,7 +170,7 @@ static void device_id_check(const char *modname, const char *device_id, + /* USB is special because the bcdDevice can be matched against a numeric range */ + /* Looks like "usb:vNpNdNdcNdscNdpNicNiscNipNinN" */ + static void do_usb_entry(void *symval, +- unsigned int bcdDevice_initial, int bcdDevice_initial_digits, ++ unsigned int bcdDevice_initial, unsigned int bcdDevice_initial_digits, + unsigned char range_lo, unsigned char range_hi, + unsigned char max, struct module *mod) + { +@@ -280,7 +280,7 @@ static void do_usb_entry_multi(void *symval, struct module *mod) + { + unsigned int devlo, devhi; + unsigned char chi, clo, max; +- int ndigits; ++ unsigned int ndigits; + + DEF_FIELD(symval, usb_device_id, match_flags); + DEF_FIELD(symval, usb_device_id, idVendor); +@@ -533,7 +533,7 @@ static void do_pnp_device_entry(void *symval, unsigned long size, + for (i = 0; i < count; i++) { + DEF_FIELD_ADDR(symval + i*id_size, pnp_device_id, id); + char acpi_id[sizeof(*id)]; +- int j; ++ unsigned int j; + + buf_printf(&mod->dev_table_buf, + "MODULE_ALIAS(\"pnp:d%s*\");\n", *id); +@@ -562,7 +562,7 @@ static void do_pnp_card_entries(void *symval, unsigned long size, + + for (j = 0; j < PNP_MAX_DEVICES; j++) { + const char *id = (char *)(*devs)[j].id; +- int i2, j2; ++ unsigned int i2, j2; + int dup = 0; + + if (!id[0]) +@@ -588,7 +588,7 @@ static void do_pnp_card_entries(void *symval, unsigned long size, + /* add an individual alias for every device entry */ + if (!dup) { + char acpi_id[PNP_ID_LEN]; +- int k; ++ unsigned int k; + + buf_printf(&mod->dev_table_buf, + "MODULE_ALIAS(\"pnp:d%s*\");\n", id); +@@ -940,7 +940,7 @@ static void dmi_ascii_filter(char *d, const char *s) + static int do_dmi_entry(const char *filename, void *symval, + char *alias) + { +- int i, j; ++ unsigned int i, j; + DEF_FIELD_ADDR(symval, dmi_system_id, matches); + sprintf(alias, "dmi*"); + +diff --git a/scripts/mod/modpost.c b/scripts/mod/modpost.c +index 99a45fd..4b995a3 100644 +--- a/scripts/mod/modpost.c ++++ b/scripts/mod/modpost.c +@@ -945,6 +945,7 @@ enum mismatch { + ANY_INIT_TO_ANY_EXIT, + ANY_EXIT_TO_ANY_INIT, + EXPORT_TO_INIT_EXIT, ++ DATA_TO_TEXT + }; + + struct sectioncheck { +@@ -1031,6 +1032,12 @@ const struct sectioncheck sectioncheck[] = { + .tosec = { INIT_SECTIONS, EXIT_SECTIONS, NULL }, + .mismatch = EXPORT_TO_INIT_EXIT, + .symbol_white_list = { DEFAULT_SYMBOL_WHITE_LIST, NULL }, ++}, ++/* Do not reference code from writable data */ ++{ ++ .fromsec = { DATA_SECTIONS, NULL }, ++ .tosec = { TEXT_SECTIONS, NULL }, ++ .mismatch = DATA_TO_TEXT + } + }; + +@@ -1151,10 +1158,10 @@ static Elf_Sym *find_elf_symbol(struct elf_info *elf, Elf64_Sword addr, + continue; + if (ELF_ST_TYPE(sym->st_info) == STT_SECTION) + continue; +- if (sym->st_value == addr) +- return sym; + /* Find a symbol nearby - addr are maybe negative */ + d = sym->st_value - addr; ++ if (d == 0) ++ return sym; + if (d < 0) + d = addr - sym->st_value; + if (d < distance) { +@@ -1432,6 +1439,14 @@ static void report_sec_mismatch(const char *modname, + tosym, prl_to, prl_to, tosym); + free(prl_to); + break; ++ case DATA_TO_TEXT: ++#if 0 ++ fprintf(stderr, ++ "The %s %s:%s references\n" ++ "the %s %s:%s%s\n", ++ from, fromsec, fromsym, to, tosec, tosym, to_p); ++#endif ++ break; + } + fprintf(stderr, "\n"); + } +@@ -1679,7 +1694,7 @@ static void section_rel(const char *modname, struct elf_info *elf, + static void check_sec_ref(struct module *mod, const char *modname, + struct elf_info *elf) + { +- int i; ++ unsigned int i; + Elf_Shdr *sechdrs = elf->sechdrs; + + /* Walk through all sections */ +@@ -1798,7 +1813,7 @@ void __attribute__((format(printf, 2, 3))) buf_printf(struct buffer *buf, + va_end(ap); + } + +-void buf_write(struct buffer *buf, const char *s, int len) ++void buf_write(struct buffer *buf, const char *s, unsigned int len) + { + if (buf->size - buf->pos < len) { + buf->size += len + SZ; +@@ -2017,7 +2032,7 @@ static void write_if_changed(struct buffer *b, const char *fname) + if (fstat(fileno(file), &st) < 0) + goto close_write; + +- if (st.st_size != b->pos) ++ if (st.st_size != (off_t)b->pos) + goto close_write; + + tmp = NOFAIL(malloc(b->pos)); +diff --git a/scripts/mod/modpost.h b/scripts/mod/modpost.h +index 51207e4..f7d603d 100644 +--- a/scripts/mod/modpost.h ++++ b/scripts/mod/modpost.h +@@ -92,15 +92,15 @@ void *do_nofail(void *ptr, const char *expr); + + struct buffer { + char *p; +- int pos; +- int size; ++ unsigned int pos; ++ unsigned int size; + }; + + void __attribute__((format(printf, 2, 3))) + buf_printf(struct buffer *buf, const char *fmt, ...); + + void +-buf_write(struct buffer *buf, const char *s, int len); ++buf_write(struct buffer *buf, const char *s, unsigned int len); + + struct module { + struct module *next; +diff --git a/scripts/mod/sumversion.c b/scripts/mod/sumversion.c +index deb2994..af4f63e 100644 +--- a/scripts/mod/sumversion.c ++++ b/scripts/mod/sumversion.c +@@ -470,7 +470,7 @@ static void write_version(const char *filename, const char *sum, + goto out; + } + +- if (write(fd, sum, strlen(sum)+1) != strlen(sum)+1) { ++ if (write(fd, sum, strlen(sum)+1) != (ssize_t)strlen(sum)+1) { + warn("writing sum in %s failed: %s\n", + filename, strerror(errno)); + goto out; +diff --git a/scripts/module-common.lds b/scripts/module-common.lds +index 0865b3e..7235dd4 100644 +--- a/scripts/module-common.lds ++++ b/scripts/module-common.lds +@@ -6,6 +6,10 @@ + SECTIONS { + /DISCARD/ : { *(.discard) } + ++ .rodata : { ++ *(.rodata) *(.rodata.*) ++ *(.data..read_only) ++ } + __ksymtab : { *(SORT(___ksymtab+*)) } + __ksymtab_gpl : { *(SORT(___ksymtab_gpl+*)) } + __ksymtab_unused : { *(SORT(___ksymtab_unused+*)) } +diff --git a/scripts/package/builddeb b/scripts/package/builddeb +index 152d4d2..791684c 100644 +--- a/scripts/package/builddeb ++++ b/scripts/package/builddeb +@@ -291,6 +291,7 @@ fi + (cd $srctree; find . -name Makefile\* -o -name Kconfig\* -o -name \*.pl > "$objtree/debian/hdrsrcfiles") + (cd $srctree; find arch/$SRCARCH/include include scripts -type f >> "$objtree/debian/hdrsrcfiles") + (cd $objtree; find arch/$SRCARCH/include Module.symvers include scripts -type f >> "$objtree/debian/hdrobjfiles") ++(cd $objtree; find tools/gcc -name \*.so >> "$objtree/debian/hdrobjfiles") + destdir=$kernel_headers_dir/usr/src/linux-headers-$version + mkdir -p "$destdir" + (cd $srctree; tar -c -f - -T "$objtree/debian/hdrsrcfiles") | (cd $destdir; tar -xf -) +diff --git a/scripts/pnmtologo.c b/scripts/pnmtologo.c +index 68bb4ef..2f419e1 100644 +--- a/scripts/pnmtologo.c ++++ b/scripts/pnmtologo.c +@@ -244,14 +244,14 @@ static void write_header(void) + fprintf(out, " * Linux logo %s\n", logoname); + fputs(" */\n\n", out); + fputs("#include <linux/linux_logo.h>\n\n", out); +- fprintf(out, "static unsigned char %s_data[] __initdata = {\n", ++ fprintf(out, "static unsigned char %s_data[] = {\n", + logoname); + } + + static void write_footer(void) + { + fputs("\n};\n\n", out); +- fprintf(out, "const struct linux_logo %s __initconst = {\n", logoname); ++ fprintf(out, "const struct linux_logo %s = {\n", logoname); + fprintf(out, "\t.type\t\t= %s,\n", logo_types[logo_type]); + fprintf(out, "\t.width\t\t= %d,\n", logo_width); + fprintf(out, "\t.height\t\t= %d,\n", logo_height); +@@ -381,7 +381,7 @@ static void write_logo_clut224(void) + fputs("\n};\n\n", out); + + /* write logo clut */ +- fprintf(out, "static unsigned char %s_clut[] __initdata = {\n", ++ fprintf(out, "static unsigned char %s_clut[] = {\n", + logoname); + write_hex_cnt = 0; + for (i = 0; i < logo_clutsize; i++) { +diff --git a/scripts/sortextable.h b/scripts/sortextable.h +index 8fac3fd..32ff38d 100644 +--- a/scripts/sortextable.h ++++ b/scripts/sortextable.h +@@ -108,9 +108,9 @@ do_func(Elf_Ehdr *ehdr, char const *const fname, table_sort_t custom_sort) + const char *secstrtab; + const char *strtab; + char *extab_image; +- int extab_index = 0; +- int i; +- int idx; ++ unsigned int extab_index = 0; ++ unsigned int i; ++ unsigned int idx; + unsigned int num_sections; + unsigned int secindex_strings; + +diff --git a/security/Kconfig b/security/Kconfig +index beb86b5..40b1edb 100644 +--- a/security/Kconfig ++++ b/security/Kconfig +@@ -4,6 +4,957 @@ + + menu "Security options" + ++menu "Grsecurity" ++ ++ config ARCH_TRACK_EXEC_LIMIT ++ bool ++ ++ config PAX_KERNEXEC_PLUGIN ++ bool ++ ++ config PAX_PER_CPU_PGD ++ bool ++ ++ config TASK_SIZE_MAX_SHIFT ++ int ++ depends on X86_64 ++ default 47 if !PAX_PER_CPU_PGD ++ default 42 if PAX_PER_CPU_PGD ++ ++ config PAX_ENABLE_PAE ++ bool ++ default y if (X86_32 && (MPENTIUM4 || MK8 || MPSC || MCORE2 || MATOM)) ++ ++ config PAX_USERCOPY_SLABS ++ bool ++ ++config GRKERNSEC ++ bool "Grsecurity" ++ select CRYPTO ++ select CRYPTO_SHA256 ++ select PROC_FS ++ select STOP_MACHINE ++ select TTY ++ select DEBUG_KERNEL ++ select DEBUG_LIST ++ help ++ If you say Y here, you will be able to configure many features ++ that will enhance the security of your system. It is highly ++ recommended that you say Y here and read through the help ++ for each option so that you fully understand the features and ++ can evaluate their usefulness for your machine. ++ ++choice ++ prompt "Configuration Method" ++ depends on GRKERNSEC ++ default GRKERNSEC_CONFIG_CUSTOM ++ help ++ ++config GRKERNSEC_CONFIG_AUTO ++ bool "Automatic" ++ help ++ If you choose this configuration method, you'll be able to answer a small ++ number of simple questions about how you plan to use this kernel. ++ The settings of grsecurity and PaX will be automatically configured for ++ the highest commonly-used settings within the provided constraints. ++ ++ If you require additional configuration, custom changes can still be made ++ from the "custom configuration" menu. ++ ++config GRKERNSEC_CONFIG_CUSTOM ++ bool "Custom" ++ help ++ If you choose this configuration method, you'll be able to configure all ++ grsecurity and PaX settings manually. Via this method, no options are ++ automatically enabled. ++ ++endchoice ++ ++choice ++ prompt "Usage Type" ++ depends on (GRKERNSEC && GRKERNSEC_CONFIG_AUTO) ++ default GRKERNSEC_CONFIG_SERVER ++ help ++ ++config GRKERNSEC_CONFIG_SERVER ++ bool "Server" ++ help ++ Choose this option if you plan to use this kernel on a server. ++ ++config GRKERNSEC_CONFIG_DESKTOP ++ bool "Desktop" ++ help ++ Choose this option if you plan to use this kernel on a desktop. ++ ++endchoice ++ ++choice ++ prompt "Virtualization Type" ++ depends on (GRKERNSEC && X86 && GRKERNSEC_CONFIG_AUTO) ++ default GRKERNSEC_CONFIG_VIRT_NONE ++ help ++ ++config GRKERNSEC_CONFIG_VIRT_NONE ++ bool "None" ++ help ++ Choose this option if this kernel will be run on bare metal. ++ ++config GRKERNSEC_CONFIG_VIRT_GUEST ++ bool "Guest" ++ help ++ Choose this option if this kernel will be run as a VM guest. ++ ++config GRKERNSEC_CONFIG_VIRT_HOST ++ bool "Host" ++ help ++ Choose this option if this kernel will be run as a VM host. ++ ++endchoice ++ ++choice ++ prompt "Virtualization Hardware" ++ depends on (GRKERNSEC && X86 && GRKERNSEC_CONFIG_AUTO && (GRKERNSEC_CONFIG_VIRT_GUEST || GRKERNSEC_CONFIG_VIRT_HOST)) ++ help ++ ++config GRKERNSEC_CONFIG_VIRT_EPT ++ bool "EPT/RVI Processor Support" ++ depends on X86 ++ help ++ Choose this option if your CPU supports the EPT or RVI features of 2nd-gen ++ hardware virtualization. This allows for additional kernel hardening protections ++ to operate without additional performance impact. ++ ++ To see if your Intel processor supports EPT, see: ++ http://ark.intel.com/Products/VirtualizationTechnology ++ (Most Core i3/5/7 support EPT) ++ ++ To see if your AMD processor supports RVI, see: ++ http://support.amd.com/us/kbarticles/Pages/GPU120AMDRVICPUsHyperVWin8.aspx ++ ++config GRKERNSEC_CONFIG_VIRT_SOFT ++ bool "First-gen/No Hardware Virtualization" ++ help ++ Choose this option if you use an Atom/Pentium/Core 2 processor that either doesn't ++ support hardware virtualization or doesn't support the EPT/RVI extensions. ++ ++endchoice ++ ++choice ++ prompt "Virtualization Software" ++ depends on (GRKERNSEC && GRKERNSEC_CONFIG_AUTO && (GRKERNSEC_CONFIG_VIRT_GUEST || GRKERNSEC_CONFIG_VIRT_HOST)) ++ help ++ ++config GRKERNSEC_CONFIG_VIRT_XEN ++ bool "Xen" ++ help ++ Choose this option if this kernel is running as a Xen guest or host. ++ ++config GRKERNSEC_CONFIG_VIRT_VMWARE ++ bool "VMWare" ++ help ++ Choose this option if this kernel is running as a VMWare guest or host. ++ ++config GRKERNSEC_CONFIG_VIRT_KVM ++ bool "KVM" ++ help ++ Choose this option if this kernel is running as a KVM guest or host. ++ ++config GRKERNSEC_CONFIG_VIRT_VIRTUALBOX ++ bool "VirtualBox" ++ help ++ Choose this option if this kernel is running as a VirtualBox guest or host. ++ ++endchoice ++ ++choice ++ prompt "Required Priorities" ++ depends on (GRKERNSEC && GRKERNSEC_CONFIG_AUTO) ++ default GRKERNSEC_CONFIG_PRIORITY_PERF ++ help ++ ++config GRKERNSEC_CONFIG_PRIORITY_PERF ++ bool "Performance" ++ help ++ Choose this option if performance is of highest priority for this deployment ++ of grsecurity. Features like UDEREF on a 64bit kernel, kernel stack clearing, ++ clearing of structures intended for userland, and freed memory sanitizing will ++ be disabled. ++ ++config GRKERNSEC_CONFIG_PRIORITY_SECURITY ++ bool "Security" ++ help ++ Choose this option if security is of highest priority for this deployment of ++ grsecurity. UDEREF, kernel stack clearing, clearing of structures intended ++ for userland, and freed memory sanitizing will be enabled for this kernel. ++ In a worst-case scenario, these features can introduce a 20% performance hit ++ (UDEREF on x64 contributing half of this hit). ++ ++endchoice ++ ++menu "Default Special Groups" ++depends on (GRKERNSEC && GRKERNSEC_CONFIG_AUTO) ++ ++config GRKERNSEC_PROC_GID ++ int "GID exempted from /proc restrictions" ++ default 1001 ++ help ++ Setting this GID determines which group will be exempted from ++ grsecurity's /proc restrictions, allowing users of the specified ++ group to view network statistics and the existence of other users' ++ processes on the system. This GID may also be chosen at boot time ++ via "grsec_proc_gid=" on the kernel commandline. ++ ++config GRKERNSEC_TPE_UNTRUSTED_GID ++ int "GID for TPE-untrusted users" ++ depends on GRKERNSEC_CONFIG_SERVER && GRKERNSEC_TPE && !GRKERNSEC_TPE_INVERT ++ default 1005 ++ help ++ Setting this GID determines which group untrusted users should ++ be added to. These users will be placed under grsecurity's Trusted Path ++ Execution mechanism, preventing them from executing their own binaries. ++ The users will only be able to execute binaries in directories owned and ++ writable only by the root user. If the sysctl option is enabled, a sysctl ++ option with name "tpe_gid" is created. ++ ++config GRKERNSEC_TPE_TRUSTED_GID ++ int "GID for TPE-trusted users" ++ depends on GRKERNSEC_CONFIG_SERVER && GRKERNSEC_TPE && GRKERNSEC_TPE_INVERT ++ default 1005 ++ help ++ Setting this GID determines what group TPE restrictions will be ++ *disabled* for. If the sysctl option is enabled, a sysctl option ++ with name "tpe_gid" is created. ++ ++config GRKERNSEC_SYMLINKOWN_GID ++ int "GID for users with kernel-enforced SymlinksIfOwnerMatch" ++ depends on GRKERNSEC_CONFIG_SERVER ++ default 1006 ++ help ++ Setting this GID determines what group kernel-enforced ++ SymlinksIfOwnerMatch will be enabled for. If the sysctl option ++ is enabled, a sysctl option with name "symlinkown_gid" is created. ++ ++ ++endmenu ++ ++menu "Customize Configuration" ++depends on GRKERNSEC ++ ++menu "PaX" ++ ++config PAX ++ bool "Enable various PaX features" ++ default y if GRKERNSEC_CONFIG_AUTO ++ depends on GRKERNSEC && (ALPHA || ARM || AVR32 || IA64 || MIPS || PARISC || PPC || SPARC || X86) ++ help ++ This allows you to enable various PaX features. PaX adds ++ intrusion prevention mechanisms to the kernel that reduce ++ the risks posed by exploitable memory corruption bugs. ++ ++menu "PaX Control" ++ depends on PAX ++ ++config PAX_SOFTMODE ++ bool 'Support soft mode' ++ help ++ Enabling this option will allow you to run PaX in soft mode, that ++ is, PaX features will not be enforced by default, only on executables ++ marked explicitly. You must also enable PT_PAX_FLAGS or XATTR_PAX_FLAGS ++ support as they are the only way to mark executables for soft mode use. ++ ++ Soft mode can be activated by using the "pax_softmode=1" kernel command ++ line option on boot. Furthermore you can control various PaX features ++ at runtime via the entries in /proc/sys/kernel/pax. ++ ++config PAX_EI_PAX ++ bool 'Use legacy ELF header marking' ++ default y if GRKERNSEC_CONFIG_AUTO ++ help ++ Enabling this option will allow you to control PaX features on ++ a per executable basis via the 'chpax' utility available at ++ http://pax.grsecurity.net/. The control flags will be read from ++ an otherwise reserved part of the ELF header. This marking has ++ numerous drawbacks (no support for soft-mode, toolchain does not ++ know about the non-standard use of the ELF header) therefore it ++ has been deprecated in favour of PT_PAX_FLAGS and XATTR_PAX_FLAGS ++ support. ++ ++ Note that if you enable PT_PAX_FLAGS or XATTR_PAX_FLAGS marking ++ support as well, they will override the legacy EI_PAX marks. ++ ++ If you enable none of the marking options then all applications ++ will run with PaX enabled on them by default. ++ ++config PAX_PT_PAX_FLAGS ++ bool 'Use ELF program header marking' ++ default y if GRKERNSEC_CONFIG_AUTO ++ help ++ Enabling this option will allow you to control PaX features on ++ a per executable basis via the 'paxctl' utility available at ++ http://pax.grsecurity.net/. The control flags will be read from ++ a PaX specific ELF program header (PT_PAX_FLAGS). This marking ++ has the benefits of supporting both soft mode and being fully ++ integrated into the toolchain (the binutils patch is available ++ from http://pax.grsecurity.net). ++ ++ Note that if you enable the legacy EI_PAX marking support as well, ++ the EI_PAX marks will be overridden by the PT_PAX_FLAGS marks. ++ ++ If you enable both PT_PAX_FLAGS and XATTR_PAX_FLAGS support then you ++ must make sure that the marks are the same if a binary has both marks. ++ ++ If you enable none of the marking options then all applications ++ will run with PaX enabled on them by default. ++ ++config PAX_XATTR_PAX_FLAGS ++ bool 'Use filesystem extended attributes marking' ++ default y if GRKERNSEC_CONFIG_AUTO ++ select CIFS_XATTR if CIFS ++ select F2FS_FS_XATTR if F2FS_FS ++ select EXT2_FS_XATTR if EXT2_FS ++ select EXT3_FS_XATTR if EXT3_FS ++ select JFFS2_FS_XATTR if JFFS2_FS ++ select REISERFS_FS_XATTR if REISERFS_FS ++ select SQUASHFS_XATTR if SQUASHFS ++ select TMPFS_XATTR if TMPFS ++ help ++ Enabling this option will allow you to control PaX features on ++ a per executable basis via the 'setfattr' utility. The control ++ flags will be read from the user.pax.flags extended attribute of ++ the file. This marking has the benefit of supporting binary-only ++ applications that self-check themselves (e.g., skype) and would ++ not tolerate chpax/paxctl changes. The main drawback is that ++ extended attributes are not supported by some filesystems (e.g., ++ isofs, udf, vfat) so copying files through such filesystems will ++ lose the extended attributes and these PaX markings. ++ ++ Note that if you enable the legacy EI_PAX marking support as well, ++ the EI_PAX marks will be overridden by the XATTR_PAX_FLAGS marks. ++ ++ If you enable both PT_PAX_FLAGS and XATTR_PAX_FLAGS support then you ++ must make sure that the marks are the same if a binary has both marks. ++ ++ If you enable none of the marking options then all applications ++ will run with PaX enabled on them by default. ++ ++choice ++ prompt 'MAC system integration' ++ default PAX_HAVE_ACL_FLAGS ++ help ++ Mandatory Access Control systems have the option of controlling ++ PaX flags on a per executable basis, choose the method supported ++ by your particular system. ++ ++ - "none": if your MAC system does not interact with PaX, ++ - "direct": if your MAC system defines pax_set_initial_flags() itself, ++ - "hook": if your MAC system uses the pax_set_initial_flags_func callback. ++ ++ NOTE: this option is for developers/integrators only. ++ ++ config PAX_NO_ACL_FLAGS ++ bool 'none' ++ ++ config PAX_HAVE_ACL_FLAGS ++ bool 'direct' ++ ++ config PAX_HOOK_ACL_FLAGS ++ bool 'hook' ++endchoice ++ ++endmenu ++ ++menu "Non-executable pages" ++ depends on PAX ++ ++config PAX_NOEXEC ++ bool "Enforce non-executable pages" ++ default y if GRKERNSEC_CONFIG_AUTO ++ depends on ALPHA || (ARM && (CPU_V6 || CPU_V6K || CPU_V7)) || IA64 || MIPS || PARISC || PPC || S390 || SPARC || X86 ++ help ++ By design some architectures do not allow for protecting memory ++ pages against execution or even if they do, Linux does not make ++ use of this feature. In practice this means that if a page is ++ readable (such as the stack or heap) it is also executable. ++ ++ There is a well known exploit technique that makes use of this ++ fact and a common programming mistake where an attacker can ++ introduce code of his choice somewhere in the attacked program's ++ memory (typically the stack or the heap) and then execute it. ++ ++ If the attacked program was running with different (typically ++ higher) privileges than that of the attacker, then he can elevate ++ his own privilege level (e.g. get a root shell, write to files for ++ which he does not have write access to, etc). ++ ++ Enabling this option will let you choose from various features ++ that prevent the injection and execution of 'foreign' code in ++ a program. ++ ++ This will also break programs that rely on the old behaviour and ++ expect that dynamically allocated memory via the malloc() family ++ of functions is executable (which it is not). Notable examples ++ are the XFree86 4.x server, the java runtime and wine. ++ ++config PAX_PAGEEXEC ++ bool "Paging based non-executable pages" ++ default y if GRKERNSEC_CONFIG_AUTO ++ depends on PAX_NOEXEC && (!X86_32 || M586 || M586TSC || M586MMX || M686 || MPENTIUMII || MPENTIUMIII || MPENTIUMM || MCORE2 || MATOM || MPENTIUM4 || MPSC || MK7 || MK8 || MWINCHIPC6 || MWINCHIP2 || MWINCHIP3D || MVIAC3_2 || MVIAC7) ++ select ARCH_TRACK_EXEC_LIMIT if X86_32 ++ help ++ This implementation is based on the paging feature of the CPU. ++ On i386 without hardware non-executable bit support there is a ++ variable but usually low performance impact, however on Intel's ++ P4 core based CPUs it is very high so you should not enable this ++ for kernels meant to be used on such CPUs. ++ ++ On alpha, avr32, ia64, parisc, sparc, sparc64, x86_64 and i386 ++ with hardware non-executable bit support there is no performance ++ impact, on ppc the impact is negligible. ++ ++ Note that several architectures require various emulations due to ++ badly designed userland ABIs, this will cause a performance impact ++ but will disappear as soon as userland is fixed. For example, ppc ++ userland MUST have been built with secure-plt by a recent toolchain. ++ ++config PAX_SEGMEXEC ++ bool "Segmentation based non-executable pages" ++ default y if GRKERNSEC_CONFIG_AUTO ++ depends on PAX_NOEXEC && X86_32 ++ help ++ This implementation is based on the segmentation feature of the ++ CPU and has a very small performance impact, however applications ++ will be limited to a 1.5 GB address space instead of the normal ++ 3 GB. ++ ++config PAX_EMUTRAMP ++ bool "Emulate trampolines" ++ default y if PARISC || GRKERNSEC_CONFIG_AUTO ++ depends on (PAX_PAGEEXEC || PAX_SEGMEXEC) && (PARISC || X86) ++ help ++ There are some programs and libraries that for one reason or ++ another attempt to execute special small code snippets from ++ non-executable memory pages. Most notable examples are the ++ signal handler return code generated by the kernel itself and ++ the GCC trampolines. ++ ++ If you enabled CONFIG_PAX_PAGEEXEC or CONFIG_PAX_SEGMEXEC then ++ such programs will no longer work under your kernel. ++ ++ As a remedy you can say Y here and use the 'chpax' or 'paxctl' ++ utilities to enable trampoline emulation for the affected programs ++ yet still have the protection provided by the non-executable pages. ++ ++ On parisc you MUST enable this option and EMUSIGRT as well, otherwise ++ your system will not even boot. ++ ++ Alternatively you can say N here and use the 'chpax' or 'paxctl' ++ utilities to disable CONFIG_PAX_PAGEEXEC and CONFIG_PAX_SEGMEXEC ++ for the affected files. ++ ++ NOTE: enabling this feature *may* open up a loophole in the ++ protection provided by non-executable pages that an attacker ++ could abuse. Therefore the best solution is to not have any ++ files on your system that would require this option. This can ++ be achieved by not using libc5 (which relies on the kernel ++ signal handler return code) and not using or rewriting programs ++ that make use of the nested function implementation of GCC. ++ Skilled users can just fix GCC itself so that it implements ++ nested function calls in a way that does not interfere with PaX. ++ ++config PAX_EMUSIGRT ++ bool "Automatically emulate sigreturn trampolines" ++ depends on PAX_EMUTRAMP && PARISC ++ default y ++ help ++ Enabling this option will have the kernel automatically detect ++ and emulate signal return trampolines executing on the stack ++ that would otherwise lead to task termination. ++ ++ This solution is intended as a temporary one for users with ++ legacy versions of libc (libc5, glibc 2.0, uClibc before 0.9.17, ++ Modula-3 runtime, etc) or executables linked to such, basically ++ everything that does not specify its own SA_RESTORER function in ++ normal executable memory like glibc 2.1+ does. ++ ++ On parisc you MUST enable this option, otherwise your system will ++ not even boot. ++ ++ NOTE: this feature cannot be disabled on a per executable basis ++ and since it *does* open up a loophole in the protection provided ++ by non-executable pages, the best solution is to not have any ++ files on your system that would require this option. ++ ++config PAX_MPROTECT ++ bool "Restrict mprotect()" ++ default y if GRKERNSEC_CONFIG_AUTO ++ depends on (PAX_PAGEEXEC || PAX_SEGMEXEC) ++ help ++ Enabling this option will prevent programs from ++ - changing the executable status of memory pages that were ++ not originally created as executable, ++ - making read-only executable pages writable again, ++ - creating executable pages from anonymous memory, ++ - making read-only-after-relocations (RELRO) data pages writable again. ++ ++ You should say Y here to complete the protection provided by ++ the enforcement of non-executable pages. ++ ++ NOTE: you can use the 'chpax' or 'paxctl' utilities to control ++ this feature on a per file basis. ++ ++config PAX_MPROTECT_COMPAT ++ bool "Use legacy/compat protection demoting (read help)" ++ default y if (GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_DESKTOP) ++ depends on PAX_MPROTECT ++ help ++ The current implementation of PAX_MPROTECT denies RWX allocations/mprotects ++ by sending the proper error code to the application. For some broken ++ userland, this can cause problems with Python or other applications. The ++ current implementation however allows for applications like clamav to ++ detect if JIT compilation/execution is allowed and to fall back gracefully ++ to an interpreter-based mode if it does not. While we encourage everyone ++ to use the current implementation as-is and push upstream to fix broken ++ userland (note that the RWX logging option can assist with this), in some ++ environments this may not be possible. Having to disable MPROTECT ++ completely on certain binaries reduces the security benefit of PaX, ++ so this option is provided for those environments to revert to the old ++ behavior. ++ ++config PAX_ELFRELOCS ++ bool "Allow ELF text relocations (read help)" ++ depends on PAX_MPROTECT ++ default n ++ help ++ Non-executable pages and mprotect() restrictions are effective ++ in preventing the introduction of new executable code into an ++ attacked task's address space. There remain only two venues ++ for this kind of attack: if the attacker can execute already ++ existing code in the attacked task then he can either have it ++ create and mmap() a file containing his code or have it mmap() ++ an already existing ELF library that does not have position ++ independent code in it and use mprotect() on it to make it ++ writable and copy his code there. While protecting against ++ the former approach is beyond PaX, the latter can be prevented ++ by having only PIC ELF libraries on one's system (which do not ++ need to relocate their code). If you are sure this is your case, ++ as is the case with all modern Linux distributions, then leave ++ this option disabled. You should say 'n' here. ++ ++config PAX_ETEXECRELOCS ++ bool "Allow ELF ET_EXEC text relocations" ++ depends on PAX_MPROTECT && (ALPHA || IA64 || PARISC) ++ select PAX_ELFRELOCS ++ default y ++ help ++ On some architectures there are incorrectly created applications ++ that require text relocations and would not work without enabling ++ this option. If you are an alpha, ia64 or parisc user, you should ++ enable this option and disable it once you have made sure that ++ none of your applications need it. ++ ++config PAX_EMUPLT ++ bool "Automatically emulate ELF PLT" ++ depends on PAX_MPROTECT && (ALPHA || PARISC || SPARC) ++ default y ++ help ++ Enabling this option will have the kernel automatically detect ++ and emulate the Procedure Linkage Table entries in ELF files. ++ On some architectures such entries are in writable memory, and ++ become non-executable leading to task termination. Therefore ++ it is mandatory that you enable this option on alpha, parisc, ++ sparc and sparc64, otherwise your system would not even boot. ++ ++ NOTE: this feature *does* open up a loophole in the protection ++ provided by the non-executable pages, therefore the proper ++ solution is to modify the toolchain to produce a PLT that does ++ not need to be writable. ++ ++config PAX_DLRESOLVE ++ bool 'Emulate old glibc resolver stub' ++ depends on PAX_EMUPLT && SPARC ++ default n ++ help ++ This option is needed if userland has an old glibc (before 2.4) ++ that puts a 'save' instruction into the runtime generated resolver ++ stub that needs special emulation. ++ ++config PAX_KERNEXEC ++ bool "Enforce non-executable kernel pages" ++ default y if GRKERNSEC_CONFIG_AUTO && (!X86 || GRKERNSEC_CONFIG_VIRT_NONE || (GRKERNSEC_CONFIG_VIRT_EPT && GRKERNSEC_CONFIG_VIRT_GUEST) || (GRKERNSEC_CONFIG_VIRT_EPT && GRKERNSEC_CONFIG_VIRT_KVM)) ++ depends on (X86 || (ARM && (CPU_V6 || CPU_V6K || CPU_V7) && !(ARM_LPAE && MODULES))) && !XEN ++ select PAX_PER_CPU_PGD if X86_64 || (X86_32 && X86_PAE) ++ select PAX_KERNEXEC_PLUGIN if X86_64 ++ help ++ This is the kernel land equivalent of PAGEEXEC and MPROTECT, ++ that is, enabling this option will make it harder to inject ++ and execute 'foreign' code in kernel memory itself. ++ ++choice ++ prompt "Return Address Instrumentation Method" ++ default PAX_KERNEXEC_PLUGIN_METHOD_BTS ++ depends on PAX_KERNEXEC_PLUGIN ++ help ++ Select the method used to instrument function pointer dereferences. ++ Note that binary modules cannot be instrumented by this approach. ++ ++ Note that the implementation requires a gcc with plugin support, ++ i.e., gcc 4.5 or newer. You may need to install the supporting ++ headers explicitly in addition to the normal gcc package. ++ ++ config PAX_KERNEXEC_PLUGIN_METHOD_BTS ++ bool "bts" ++ help ++ This method is compatible with binary only modules but has ++ a higher runtime overhead. ++ ++ config PAX_KERNEXEC_PLUGIN_METHOD_OR ++ bool "or" ++ depends on !PARAVIRT ++ help ++ This method is incompatible with binary only modules but has ++ a lower runtime overhead. ++endchoice ++ ++config PAX_KERNEXEC_PLUGIN_METHOD ++ string ++ default "bts" if PAX_KERNEXEC_PLUGIN_METHOD_BTS ++ default "or" if PAX_KERNEXEC_PLUGIN_METHOD_OR ++ default "" ++ ++config PAX_KERNEXEC_MODULE_TEXT ++ int "Minimum amount of memory reserved for module code" ++ default "4" if (!GRKERNSEC_CONFIG_AUTO || GRKERNSEC_CONFIG_SERVER) ++ default "12" if (GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_DESKTOP) ++ depends on PAX_KERNEXEC && X86_32 ++ help ++ Due to implementation details the kernel must reserve a fixed ++ amount of memory for runtime allocated code (such as modules) ++ at compile time that cannot be changed at runtime. Here you ++ can specify the minimum amount in MB that will be reserved. ++ Due to the same implementation details this size will always ++ be rounded up to the next 2/4 MB boundary (depends on PAE) so ++ the actually available memory for runtime allocated code will ++ usually be more than this minimum. ++ ++ The default 4 MB should be enough for most users but if you have ++ an excessive number of modules (e.g., most distribution configs ++ compile many drivers as modules) or use huge modules such as ++ nvidia's kernel driver, you will need to adjust this amount. ++ A good rule of thumb is to look at your currently loaded kernel ++ modules and add up their sizes. ++ ++endmenu ++ ++menu "Address Space Layout Randomization" ++ depends on PAX ++ ++config PAX_ASLR ++ bool "Address Space Layout Randomization" ++ default y if GRKERNSEC_CONFIG_AUTO ++ help ++ Many if not most exploit techniques rely on the knowledge of ++ certain addresses in the attacked program. The following options ++ will allow the kernel to apply a certain amount of randomization ++ to specific parts of the program thereby forcing an attacker to ++ guess them in most cases. Any failed guess will most likely crash ++ the attacked program which allows the kernel to detect such attempts ++ and react on them. PaX itself provides no reaction mechanisms, ++ instead it is strongly encouraged that you make use of grsecurity's ++ (http://www.grsecurity.net/) built-in crash detection features or ++ develop one yourself. ++ ++ By saying Y here you can choose to randomize the following areas: ++ - top of the task's kernel stack ++ - top of the task's userland stack ++ - base address for mmap() requests that do not specify one ++ (this includes all libraries) ++ - base address of the main executable ++ ++ It is strongly recommended to say Y here as address space layout ++ randomization has negligible impact on performance yet it provides ++ a very effective protection. ++ ++ NOTE: you can use the 'chpax' or 'paxctl' utilities to control ++ this feature on a per file basis. ++ ++config PAX_RANDKSTACK ++ bool "Randomize kernel stack base" ++ default y if GRKERNSEC_CONFIG_AUTO && !(GRKERNSEC_CONFIG_VIRT_HOST && GRKERNSEC_CONFIG_VIRT_VIRTUALBOX) ++ depends on X86_TSC && X86 ++ help ++ By saying Y here the kernel will randomize every task's kernel ++ stack on every system call. This will not only force an attacker ++ to guess it but also prevent him from making use of possible ++ leaked information about it. ++ ++ Since the kernel stack is a rather scarce resource, randomization ++ may cause unexpected stack overflows, therefore you should very ++ carefully test your system. Note that once enabled in the kernel ++ configuration, this feature cannot be disabled on a per file basis. ++ ++config PAX_RANDUSTACK ++ bool ++ ++config PAX_RANDMMAP ++ bool "Randomize user stack and mmap() bases" ++ default y if GRKERNSEC_CONFIG_AUTO ++ depends on PAX_ASLR ++ select PAX_RANDUSTACK ++ help ++ By saying Y here the kernel will randomize every task's userland ++ stack and use a randomized base address for mmap() requests that ++ do not specify one themselves. ++ ++ The stack randomization is done in two steps where the second ++ one may apply a big amount of shift to the top of the stack and ++ cause problems for programs that want to use lots of memory (more ++ than 2.5 GB if SEGMEXEC is not active, or 1.25 GB when it is). ++ ++ As a result of mmap randomization all dynamically loaded libraries ++ will appear at random addresses and therefore be harder to exploit ++ by a technique where an attacker attempts to execute library code ++ for his purposes (e.g. spawn a shell from an exploited program that ++ is running at an elevated privilege level). ++ ++ Furthermore, if a program is relinked as a dynamic ELF file, its ++ base address will be randomized as well, completing the full ++ randomization of the address space layout. Attacking such programs ++ becomes a guess game. You can find an example of doing this at ++ http://pax.grsecurity.net/et_dyn.tar.gz and practical samples at ++ http://www.grsecurity.net/grsec-gcc-specs.tar.gz . ++ ++ NOTE: you can use the 'chpax' or 'paxctl' utilities to control this ++ feature on a per file basis. ++ ++endmenu ++ ++menu "Miscellaneous hardening features" ++ ++config PAX_MEMORY_SANITIZE ++ bool "Sanitize all freed memory" ++ default y if (GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_PRIORITY_SECURITY) ++ help ++ By saying Y here the kernel will erase memory pages and slab objects ++ as soon as they are freed. This in turn reduces the lifetime of data ++ stored in them, making it less likely that sensitive information such ++ as passwords, cryptographic secrets, etc stay in memory for too long. ++ ++ This is especially useful for programs whose runtime is short, long ++ lived processes and the kernel itself benefit from this as long as ++ they ensure timely freeing of memory that may hold sensitive ++ information. ++ ++ A nice side effect of the sanitization of slab objects is the ++ reduction of possible info leaks caused by padding bytes within the ++ leaky structures. Use-after-free bugs for structures containing ++ pointers can also be detected as dereferencing the sanitized pointer ++ will generate an access violation. ++ ++ The tradeoff is performance impact, on a single CPU system kernel ++ compilation sees a 3% slowdown, other systems and workloads may vary ++ and you are advised to test this feature on your expected workload ++ before deploying it. ++ ++ To reduce the performance penalty by sanitizing pages only, albeit ++ limiting the effectiveness of this feature at the same time, slab ++ sanitization can be disabled with the kernel commandline parameter ++ "pax_sanitize_slab=0". ++ ++ Note that this feature does not protect data stored in live pages, ++ e.g., process memory swapped to disk may stay there for a long time. ++ ++config PAX_MEMORY_STACKLEAK ++ bool "Sanitize kernel stack" ++ default y if (GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_PRIORITY_SECURITY) ++ depends on X86 ++ help ++ By saying Y here the kernel will erase the kernel stack before it ++ returns from a system call. This in turn reduces the information ++ that a kernel stack leak bug can reveal. ++ ++ Note that such a bug can still leak information that was put on ++ the stack by the current system call (the one eventually triggering ++ the bug) but traces of earlier system calls on the kernel stack ++ cannot leak anymore. ++ ++ The tradeoff is performance impact: on a single CPU system kernel ++ compilation sees a 1% slowdown, other systems and workloads may vary ++ and you are advised to test this feature on your expected workload ++ before deploying it. ++ ++ Note that the full feature requires a gcc with plugin support, ++ i.e., gcc 4.5 or newer. You may need to install the supporting ++ headers explicitly in addition to the normal gcc package. Using ++ older gcc versions means that functions with large enough stack ++ frames may leave uninitialized memory behind that may be exposed ++ to a later syscall leaking the stack. ++ ++config PAX_MEMORY_STRUCTLEAK ++ bool "Forcibly initialize local variables copied to userland" ++ default y if (GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_PRIORITY_SECURITY) ++ help ++ By saying Y here the kernel will zero initialize some local ++ variables that are going to be copied to userland. This in ++ turn prevents unintended information leakage from the kernel ++ stack should later code forget to explicitly set all parts of ++ the copied variable. ++ ++ The tradeoff is less performance impact than PAX_MEMORY_STACKLEAK ++ at a much smaller coverage. ++ ++ Note that the implementation requires a gcc with plugin support, ++ i.e., gcc 4.5 or newer. You may need to install the supporting ++ headers explicitly in addition to the normal gcc package. ++ ++config PAX_MEMORY_UDEREF ++ bool "Prevent invalid userland pointer dereference" ++ default y if GRKERNSEC_CONFIG_AUTO && !(X86_64 && GRKERNSEC_CONFIG_PRIORITY_PERF) && (!X86 || GRKERNSEC_CONFIG_VIRT_NONE || GRKERNSEC_CONFIG_VIRT_EPT) ++ depends on (X86 || (ARM && (CPU_V6 || CPU_V6K || CPU_V7) && !ARM_LPAE)) && !UML_X86 && !XEN ++ select PAX_PER_CPU_PGD if X86_64 ++ help ++ By saying Y here the kernel will be prevented from dereferencing ++ userland pointers in contexts where the kernel expects only kernel ++ pointers. This is both a useful runtime debugging feature and a ++ security measure that prevents exploiting a class of kernel bugs. ++ ++ The tradeoff is that some virtualization solutions may experience ++ a huge slowdown and therefore you should not enable this feature ++ for kernels meant to run in such environments. Whether a given VM ++ solution is affected or not is best determined by simply trying it ++ out, the performance impact will be obvious right on boot as this ++ mechanism engages from very early on. A good rule of thumb is that ++ VMs running on CPUs without hardware virtualization support (i.e., ++ the majority of IA-32 CPUs) will likely experience the slowdown. ++ ++ On X86_64 the kernel will make use of PCID support when available ++ (Intel's Westmere, Sandy Bridge, etc) for better security (default) ++ or performance impact. Pass pax_weakuderef on the kernel command ++ line to choose the latter. ++ ++config PAX_REFCOUNT ++ bool "Prevent various kernel object reference counter overflows" ++ default y if GRKERNSEC_CONFIG_AUTO ++ depends on GRKERNSEC && ((ARM && (CPU_V6 || CPU_V6K || CPU_V7)) || MIPS || SPARC64 || X86) ++ help ++ By saying Y here the kernel will detect and prevent overflowing ++ various (but not all) kinds of object reference counters. Such ++ overflows can normally occur due to bugs only and are often, if ++ not always, exploitable. ++ ++ The tradeoff is that data structures protected by an overflowed ++ refcount will never be freed and therefore will leak memory. Note ++ that this leak also happens even without this protection but in ++ that case the overflow can eventually trigger the freeing of the ++ data structure while it is still being used elsewhere, resulting ++ in the exploitable situation that this feature prevents. ++ ++ Since this has a negligible performance impact, you should enable ++ this feature. ++ ++config PAX_CONSTIFY_PLUGIN ++ bool "Automatically constify eligible structures" ++ default y ++ depends on !UML && PAX_KERNEXEC ++ help ++ By saying Y here the compiler will automatically constify a class ++ of types that contain only function pointers. This reduces the ++ kernel's attack surface and also produces a better memory layout. ++ ++ Note that the implementation requires a gcc with plugin support, ++ i.e., gcc 4.5 or newer. You may need to install the supporting ++ headers explicitly in addition to the normal gcc package. ++ ++ Note that if some code really has to modify constified variables ++ then the source code will have to be patched to allow it. Examples ++ can be found in PaX itself (the no_const attribute) and for some ++ out-of-tree modules at http://www.grsecurity.net/~paxguy1/ . ++ ++config PAX_USERCOPY ++ bool "Harden heap object copies between kernel and userland" ++ default y if GRKERNSEC_CONFIG_AUTO ++ depends on ARM || IA64 || PPC || SPARC || X86 ++ depends on GRKERNSEC && (SLAB || SLUB || SLOB) ++ select PAX_USERCOPY_SLABS ++ help ++ By saying Y here the kernel will enforce the size of heap objects ++ when they are copied in either direction between the kernel and ++ userland, even if only a part of the heap object is copied. ++ ++ Specifically, this checking prevents information leaking from the ++ kernel heap during kernel to userland copies (if the kernel heap ++ object is otherwise fully initialized) and prevents kernel heap ++ overflows during userland to kernel copies. ++ ++ Note that the current implementation provides the strictest bounds ++ checks for the SLUB allocator. ++ ++ Enabling this option also enables per-slab cache protection against ++ data in a given cache being copied into/out of via userland ++ accessors. Though the whitelist of regions will be reduced over ++ time, it notably protects important data structures like task structs. ++ ++ If frame pointers are enabled on x86, this option will also restrict ++ copies into and out of the kernel stack to local variables within a ++ single frame. ++ ++ Since this has a negligible performance impact, you should enable ++ this feature. ++ ++config PAX_USERCOPY_DEBUG ++ bool ++ depends on X86 && PAX_USERCOPY ++ default n ++ ++config PAX_SIZE_OVERFLOW ++ bool "Prevent various integer overflows in function size parameters" ++ default y if GRKERNSEC_CONFIG_AUTO ++ depends on X86 ++ help ++ By saying Y here the kernel recomputes expressions of function ++ arguments marked by a size_overflow attribute with double integer ++ precision (DImode/TImode for 32/64 bit integer types). ++ ++ The recomputed argument is checked against TYPE_MAX and an event ++ is logged on overflow and the triggering process is killed. ++ ++ Homepage: http://www.grsecurity.net/~ephox/overflow_plugin/ ++ ++ Note that the implementation requires a gcc with plugin support, ++ i.e., gcc 4.5 or newer. You may need to install the supporting ++ headers explicitly in addition to the normal gcc package. ++ ++config PAX_LATENT_ENTROPY ++ bool "Generate some entropy during boot and runtime" ++ default y if GRKERNSEC_CONFIG_AUTO ++ help ++ By saying Y here the kernel will instrument some kernel code to ++ extract some entropy from both original and artificially created ++ program state. This will help especially embedded systems where ++ there is little 'natural' source of entropy normally. The cost ++ is some slowdown of the boot process and fork and irq processing. ++ ++ When pax_extra_latent_entropy is passed on the kernel command line, ++ entropy will be extracted from up to the first 4GB of RAM while the ++ runtime memory allocator is being initialized. This costs even more ++ slowdown of the boot process. ++ ++ Note that the implementation requires a gcc with plugin support, ++ i.e., gcc 4.5 or newer. You may need to install the supporting ++ headers explicitly in addition to the normal gcc package. ++ ++ Note that entropy extracted this way is not cryptographically ++ secure! ++ ++endmenu ++ ++endmenu ++ ++source grsecurity/Kconfig ++ ++endmenu ++ ++endmenu ++ + source security/keys/Kconfig + + config SECURITY_DMESG_RESTRICT +@@ -103,7 +1054,7 @@ config INTEL_TXT + config LSM_MMAP_MIN_ADDR + int "Low address space for LSM to protect from user allocation" + depends on SECURITY && SECURITY_SELINUX +- default 32768 if ARM || (ARM64 && COMPAT) ++ default 32768 if ALPHA || ARM || (ARM64 && COMPAT) || PARISC || SPARC32 + default 65536 + help + This is the portion of low virtual memory which should be protected +diff --git a/security/apparmor/file.c b/security/apparmor/file.c +index fdaa50c..2761dcb 100644 +--- a/security/apparmor/file.c ++++ b/security/apparmor/file.c +@@ -348,8 +348,8 @@ static inline bool xindex_is_subset(u32 link, u32 target) + int aa_path_link(struct aa_profile *profile, struct dentry *old_dentry, + struct path *new_dir, struct dentry *new_dentry) + { +- struct path link = { new_dir->mnt, new_dentry }; +- struct path target = { new_dir->mnt, old_dentry }; ++ struct path link = { .mnt = new_dir->mnt, .dentry = new_dentry }; ++ struct path target = { .mnt = new_dir->mnt, .dentry = old_dentry }; + struct path_cond cond = { + old_dentry->d_inode->i_uid, + old_dentry->d_inode->i_mode +diff --git a/security/apparmor/lsm.c b/security/apparmor/lsm.c +index 4257b7e..2d0732d 100644 +--- a/security/apparmor/lsm.c ++++ b/security/apparmor/lsm.c +@@ -186,7 +186,7 @@ static int common_perm_dir_dentry(int op, struct path *dir, + struct dentry *dentry, u32 mask, + struct path_cond *cond) + { +- struct path path = { dir->mnt, dentry }; ++ struct path path = { .mnt = dir->mnt, .dentry = dentry }; + + return common_perm(op, &path, mask, cond); + } +@@ -203,7 +203,7 @@ static int common_perm_dir_dentry(int op, struct path *dir, + static int common_perm_mnt_dentry(int op, struct vfsmount *mnt, + struct dentry *dentry, u32 mask) + { +- struct path path = { mnt, dentry }; ++ struct path path = { .mnt = mnt, .dentry = dentry }; + struct path_cond cond = { dentry->d_inode->i_uid, + dentry->d_inode->i_mode + }; +@@ -325,8 +325,8 @@ static int apparmor_path_rename(struct path *old_dir, struct dentry *old_dentry, + + profile = aa_current_profile(); + if (!unconfined(profile)) { +- struct path old_path = { old_dir->mnt, old_dentry }; +- struct path new_path = { new_dir->mnt, new_dentry }; ++ struct path old_path = { .mnt = old_dir->mnt, .dentry = old_dentry }; ++ struct path new_path = { .mnt = new_dir->mnt, .dentry = new_dentry }; + struct path_cond cond = { old_dentry->d_inode->i_uid, + old_dentry->d_inode->i_mode + }; +@@ -615,7 +615,7 @@ static int apparmor_task_setrlimit(struct task_struct *task, + return error; + } + +-static struct security_operations apparmor_ops = { ++static struct security_operations apparmor_ops __read_only = { + .name = "apparmor", + + .ptrace_access_check = apparmor_ptrace_access_check, +diff --git a/security/commoncap.c b/security/commoncap.c +index b9d613e..f68305c 100644 +--- a/security/commoncap.c ++++ b/security/commoncap.c +@@ -424,6 +424,32 @@ int get_vfs_caps_from_disk(const struct dentry *dentry, struct cpu_vfs_cap_data + return 0; + } + ++/* returns: ++ 1 for suid privilege ++ 2 for sgid privilege ++ 3 for fscap privilege ++*/ ++int is_privileged_binary(const struct dentry *dentry) ++{ ++ struct cpu_vfs_cap_data capdata; ++ struct inode *inode = dentry->d_inode; ++ ++ if (!inode || S_ISDIR(inode->i_mode)) ++ return 0; ++ ++ if (inode->i_mode & S_ISUID) ++ return 1; ++ if ((inode->i_mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP)) ++ return 2; ++ ++ if (!get_vfs_caps_from_disk(dentry, &capdata)) { ++ if (!cap_isclear(capdata.inheritable) || !cap_isclear(capdata.permitted)) ++ return 3; ++ } ++ ++ return 0; ++} ++ + /* + * Attempt to get the on-exec apply capability sets for an executable file from + * its xattrs and, if present, apply them to the proposed credentials being +@@ -592,6 +618,9 @@ int cap_bprm_secureexec(struct linux_binprm *bprm) + const struct cred *cred = current_cred(); + kuid_t root_uid = make_kuid(cred->user_ns, 0); + ++ if (gr_acl_enable_at_secure()) ++ return 1; ++ + if (!uid_eq(cred->uid, root_uid)) { + if (bprm->cap_effective) + return 1; +diff --git a/security/integrity/ima/ima.h b/security/integrity/ima/ima.h +index f79fa8b..6161868 100644 +--- a/security/integrity/ima/ima.h ++++ b/security/integrity/ima/ima.h +@@ -118,8 +118,8 @@ int ima_init_template(void); + extern spinlock_t ima_queue_lock; + + struct ima_h_table { +- atomic_long_t len; /* number of stored measurements in the list */ +- atomic_long_t violations; ++ atomic_long_unchecked_t len; /* number of stored measurements in the list */ ++ atomic_long_unchecked_t violations; + struct hlist_head queue[IMA_MEASURE_HTABLE_SIZE]; + }; + extern struct ima_h_table ima_htable; +diff --git a/security/integrity/ima/ima_api.c b/security/integrity/ima/ima_api.c +index 025824a..2a681b1 100644 +--- a/security/integrity/ima/ima_api.c ++++ b/security/integrity/ima/ima_api.c +@@ -137,7 +137,7 @@ void ima_add_violation(struct file *file, const unsigned char *filename, + int result; + + /* can overflow, only indicator */ +- atomic_long_inc(&ima_htable.violations); ++ atomic_long_inc_unchecked(&ima_htable.violations); + + result = ima_alloc_init_template(NULL, file, filename, + NULL, 0, &entry); +diff --git a/security/integrity/ima/ima_fs.c b/security/integrity/ima/ima_fs.c +index 468a3ba..9af5cae 100644 +--- a/security/integrity/ima/ima_fs.c ++++ b/security/integrity/ima/ima_fs.c +@@ -28,12 +28,12 @@ + static int valid_policy = 1; + #define TMPBUFLEN 12 + static ssize_t ima_show_htable_value(char __user *buf, size_t count, +- loff_t *ppos, atomic_long_t *val) ++ loff_t *ppos, atomic_long_unchecked_t *val) + { + char tmpbuf[TMPBUFLEN]; + ssize_t len; + +- len = scnprintf(tmpbuf, TMPBUFLEN, "%li\n", atomic_long_read(val)); ++ len = scnprintf(tmpbuf, TMPBUFLEN, "%li\n", atomic_long_read_unchecked(val)); + return simple_read_from_buffer(buf, count, ppos, tmpbuf, len); + } + +diff --git a/security/integrity/ima/ima_queue.c b/security/integrity/ima/ima_queue.c +index d85e997..6992813 100644 +--- a/security/integrity/ima/ima_queue.c ++++ b/security/integrity/ima/ima_queue.c +@@ -80,7 +80,7 @@ static int ima_add_digest_entry(struct ima_template_entry *entry) + INIT_LIST_HEAD(&qe->later); + list_add_tail_rcu(&qe->later, &ima_measurements); + +- atomic_long_inc(&ima_htable.len); ++ atomic_long_inc_unchecked(&ima_htable.len); + key = ima_hash_key(entry->digest); + hlist_add_head_rcu(&qe->hnext, &ima_htable.queue[key]); + return 0; +diff --git a/security/keys/compat.c b/security/keys/compat.c +index bbd32c7..c60c927 100644 +--- a/security/keys/compat.c ++++ b/security/keys/compat.c +@@ -44,7 +44,7 @@ static long compat_keyctl_instantiate_key_iov( + if (ret == 0) + goto no_payload_free; + +- ret = keyctl_instantiate_key_common(id, iov, ioc, ret, ringid); ++ ret = keyctl_instantiate_key_common(id, (const struct iovec __force_user *)iov, ioc, ret, ringid); + err: + if (iov != iovstack) + kfree(iov); +diff --git a/security/keys/internal.h b/security/keys/internal.h +index 80b2aac..bb7ee65 100644 +--- a/security/keys/internal.h ++++ b/security/keys/internal.h +@@ -253,7 +253,7 @@ extern long keyctl_instantiate_key_iov(key_serial_t, + extern long keyctl_invalidate_key(key_serial_t); + + extern long keyctl_instantiate_key_common(key_serial_t, +- const struct iovec *, ++ const struct iovec __user *, + unsigned, size_t, key_serial_t); + #ifdef CONFIG_PERSISTENT_KEYRINGS + extern long keyctl_get_persistent(uid_t, key_serial_t); +diff --git a/security/keys/key.c b/security/keys/key.c +index 6e21c11..9ed67ca 100644 +--- a/security/keys/key.c ++++ b/security/keys/key.c +@@ -285,7 +285,7 @@ struct key *key_alloc(struct key_type *type, const char *desc, + + atomic_set(&key->usage, 1); + init_rwsem(&key->sem); +- lockdep_set_class(&key->sem, &type->lock_class); ++ lockdep_set_class(&key->sem, (struct lock_class_key *)&type->lock_class); + key->index_key.type = type; + key->user = user; + key->quotalen = quotalen; +@@ -1036,7 +1036,9 @@ int register_key_type(struct key_type *ktype) + struct key_type *p; + int ret; + +- memset(&ktype->lock_class, 0, sizeof(ktype->lock_class)); ++ pax_open_kernel(); ++ memset((void *)&ktype->lock_class, 0, sizeof(ktype->lock_class)); ++ pax_close_kernel(); + + ret = -EEXIST; + down_write(&key_types_sem); +@@ -1048,7 +1050,7 @@ int register_key_type(struct key_type *ktype) + } + + /* store the type */ +- list_add(&ktype->link, &key_types_list); ++ pax_list_add((struct list_head *)&ktype->link, &key_types_list); + + pr_notice("Key type %s registered\n", ktype->name); + ret = 0; +@@ -1070,7 +1072,7 @@ EXPORT_SYMBOL(register_key_type); + void unregister_key_type(struct key_type *ktype) + { + down_write(&key_types_sem); +- list_del_init(&ktype->link); ++ pax_list_del_init((struct list_head *)&ktype->link); + downgrade_write(&key_types_sem); + key_gc_keytype(ktype); + pr_notice("Key type %s unregistered\n", ktype->name); +@@ -1088,10 +1090,10 @@ void __init key_init(void) + 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL); + + /* add the special key types */ +- list_add_tail(&key_type_keyring.link, &key_types_list); +- list_add_tail(&key_type_dead.link, &key_types_list); +- list_add_tail(&key_type_user.link, &key_types_list); +- list_add_tail(&key_type_logon.link, &key_types_list); ++ pax_list_add_tail((struct list_head *)&key_type_keyring.link, &key_types_list); ++ pax_list_add_tail((struct list_head *)&key_type_dead.link, &key_types_list); ++ pax_list_add_tail((struct list_head *)&key_type_user.link, &key_types_list); ++ pax_list_add_tail((struct list_head *)&key_type_logon.link, &key_types_list); + + /* record the root user tracking */ + rb_link_node(&root_key_user.node, +diff --git a/security/keys/keyctl.c b/security/keys/keyctl.c +index cee72ce..e46074a 100644 +--- a/security/keys/keyctl.c ++++ b/security/keys/keyctl.c +@@ -987,7 +987,7 @@ static int keyctl_change_reqkey_auth(struct key *key) + /* + * Copy the iovec data from userspace + */ +-static long copy_from_user_iovec(void *buffer, const struct iovec *iov, ++static long copy_from_user_iovec(void *buffer, const struct iovec __user *iov, + unsigned ioc) + { + for (; ioc > 0; ioc--) { +@@ -1009,7 +1009,7 @@ static long copy_from_user_iovec(void *buffer, const struct iovec *iov, + * If successful, 0 will be returned. + */ + long keyctl_instantiate_key_common(key_serial_t id, +- const struct iovec *payload_iov, ++ const struct iovec __user *payload_iov, + unsigned ioc, + size_t plen, + key_serial_t ringid) +@@ -1104,7 +1104,7 @@ long keyctl_instantiate_key(key_serial_t id, + [0].iov_len = plen + }; + +- return keyctl_instantiate_key_common(id, iov, 1, plen, ringid); ++ return keyctl_instantiate_key_common(id, (const struct iovec __force_user *)iov, 1, plen, ringid); + } + + return keyctl_instantiate_key_common(id, NULL, 0, 0, ringid); +@@ -1137,7 +1137,7 @@ long keyctl_instantiate_key_iov(key_serial_t id, + if (ret == 0) + goto no_payload_free; + +- ret = keyctl_instantiate_key_common(id, iov, ioc, ret, ringid); ++ ret = keyctl_instantiate_key_common(id, (const struct iovec __force_user *)iov, ioc, ret, ringid); + err: + if (iov != iovstack) + kfree(iov); +diff --git a/security/min_addr.c b/security/min_addr.c +index f728728..6457a0c 100644 +--- a/security/min_addr.c ++++ b/security/min_addr.c +@@ -14,6 +14,7 @@ unsigned long dac_mmap_min_addr = CONFIG_DEFAULT_MMAP_MIN_ADDR; + */ + static void update_mmap_min_addr(void) + { ++#ifndef SPARC + #ifdef CONFIG_LSM_MMAP_MIN_ADDR + if (dac_mmap_min_addr > CONFIG_LSM_MMAP_MIN_ADDR) + mmap_min_addr = dac_mmap_min_addr; +@@ -22,6 +23,7 @@ static void update_mmap_min_addr(void) + #else + mmap_min_addr = dac_mmap_min_addr; + #endif ++#endif + } + + /* +diff --git a/security/security.c b/security/security.c +index 919cad9..2127be1 100644 +--- a/security/security.c ++++ b/security/security.c +@@ -33,8 +33,8 @@ + static __initdata char chosen_lsm[SECURITY_NAME_MAX + 1] = + CONFIG_DEFAULT_SECURITY; + +-static struct security_operations *security_ops; +-static struct security_operations default_security_ops = { ++struct security_operations *security_ops __read_only; ++struct security_operations default_security_ops __read_only = { + .name = "default", + }; + +@@ -73,11 +73,6 @@ int __init security_init(void) + return 0; + } + +-void reset_security_ops(void) +-{ +- security_ops = &default_security_ops; +-} +- + /* Save user chosen LSM */ + static int __init choose_lsm(char *str) + { +diff --git a/security/selinux/avc.c b/security/selinux/avc.c +index fc3e662..7844c60 100644 +--- a/security/selinux/avc.c ++++ b/security/selinux/avc.c +@@ -59,7 +59,7 @@ struct avc_node { + struct avc_cache { + struct hlist_head slots[AVC_CACHE_SLOTS]; /* head for avc_node->list */ + spinlock_t slots_lock[AVC_CACHE_SLOTS]; /* lock for writes */ +- atomic_t lru_hint; /* LRU hint for reclaim scan */ ++ atomic_unchecked_t lru_hint; /* LRU hint for reclaim scan */ + atomic_t active_nodes; + u32 latest_notif; /* latest revocation notification */ + }; +@@ -167,7 +167,7 @@ void __init avc_init(void) + spin_lock_init(&avc_cache.slots_lock[i]); + } + atomic_set(&avc_cache.active_nodes, 0); +- atomic_set(&avc_cache.lru_hint, 0); ++ atomic_set_unchecked(&avc_cache.lru_hint, 0); + + avc_node_cachep = kmem_cache_create("avc_node", sizeof(struct avc_node), + 0, SLAB_PANIC, NULL); +@@ -242,7 +242,7 @@ static inline int avc_reclaim_node(void) + spinlock_t *lock; + + for (try = 0, ecx = 0; try < AVC_CACHE_SLOTS; try++) { +- hvalue = atomic_inc_return(&avc_cache.lru_hint) & (AVC_CACHE_SLOTS - 1); ++ hvalue = atomic_inc_return_unchecked(&avc_cache.lru_hint) & (AVC_CACHE_SLOTS - 1); + head = &avc_cache.slots[hvalue]; + lock = &avc_cache.slots_lock[hvalue]; + +diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c +index e294b86..eda45c55 100644 +--- a/security/selinux/hooks.c ++++ b/security/selinux/hooks.c +@@ -95,8 +95,6 @@ + #include "audit.h" + #include "avc_ss.h" + +-extern struct security_operations *security_ops; +- + /* SECMARK reference count */ + static atomic_t selinux_secmark_refcount = ATOMIC_INIT(0); + +@@ -5759,7 +5757,7 @@ static int selinux_key_getsecurity(struct key *key, char **_buffer) + + #endif + +-static struct security_operations selinux_ops = { ++static struct security_operations selinux_ops __read_only = { + .name = "selinux", + + .ptrace_access_check = selinux_ptrace_access_check, +@@ -6112,6 +6110,9 @@ static void selinux_nf_ip_exit(void) + #ifdef CONFIG_SECURITY_SELINUX_DISABLE + static int selinux_disabled; + ++extern struct security_operations *security_ops; ++extern struct security_operations default_security_ops; ++ + int selinux_disable(void) + { + if (ss_initialized) { +@@ -6129,7 +6130,9 @@ int selinux_disable(void) + selinux_disabled = 1; + selinux_enabled = 0; + +- reset_security_ops(); ++ pax_open_kernel(); ++ security_ops = &default_security_ops; ++ pax_close_kernel(); + + /* Try to destroy the avc node cache */ + avc_disable(); +diff --git a/security/selinux/include/xfrm.h b/security/selinux/include/xfrm.h +index 9f05847..7933395 100644 +--- a/security/selinux/include/xfrm.h ++++ b/security/selinux/include/xfrm.h +@@ -46,7 +46,7 @@ static inline void selinux_xfrm_notify_policyload(void) + { + struct net *net; + +- atomic_inc(&flow_cache_genid); ++ atomic_inc_unchecked(&flow_cache_genid); + rtnl_lock(); + for_each_net(net) + rt_genid_bump_all(net); +diff --git a/security/smack/smack_lsm.c b/security/smack/smack_lsm.c +index 14f52be..7352368 100644 +--- a/security/smack/smack_lsm.c ++++ b/security/smack/smack_lsm.c +@@ -3726,7 +3726,7 @@ static int smack_inode_getsecctx(struct inode *inode, void **ctx, u32 *ctxlen) + return 0; + } + +-struct security_operations smack_ops = { ++struct security_operations smack_ops __read_only = { + .name = "smack", + + .ptrace_access_check = smack_ptrace_access_check, +diff --git a/security/tomoyo/file.c b/security/tomoyo/file.c +index 4003907..13a2b55 100644 +--- a/security/tomoyo/file.c ++++ b/security/tomoyo/file.c +@@ -692,7 +692,7 @@ int tomoyo_path_number_perm(const u8 type, struct path *path, + { + struct tomoyo_request_info r; + struct tomoyo_obj_info obj = { +- .path1 = *path, ++ .path1 = { .mnt = path->mnt, .dentry = path->dentry }, + }; + int error = -ENOMEM; + struct tomoyo_path_info buf; +@@ -740,7 +740,7 @@ int tomoyo_check_open_permission(struct tomoyo_domain_info *domain, + struct tomoyo_path_info buf; + struct tomoyo_request_info r; + struct tomoyo_obj_info obj = { +- .path1 = *path, ++ .path1 = { .mnt = path->mnt, .dentry = path->dentry }, + }; + int idx; + +@@ -786,7 +786,7 @@ int tomoyo_path_perm(const u8 operation, struct path *path, const char *target) + { + struct tomoyo_request_info r; + struct tomoyo_obj_info obj = { +- .path1 = *path, ++ .path1 = { .mnt = path->mnt, .dentry = path->dentry }, + }; + int error; + struct tomoyo_path_info buf; +@@ -843,7 +843,7 @@ int tomoyo_mkdev_perm(const u8 operation, struct path *path, + { + struct tomoyo_request_info r; + struct tomoyo_obj_info obj = { +- .path1 = *path, ++ .path1 = { .mnt = path->mnt, .dentry = path->dentry }, + }; + int error = -ENOMEM; + struct tomoyo_path_info buf; +@@ -890,8 +890,8 @@ int tomoyo_path2_perm(const u8 operation, struct path *path1, + struct tomoyo_path_info buf2; + struct tomoyo_request_info r; + struct tomoyo_obj_info obj = { +- .path1 = *path1, +- .path2 = *path2, ++ .path1 = { .mnt = path1->mnt, .dentry = path1->dentry }, ++ .path2 = { .mnt = path2->mnt, .dentry = path2->dentry } + }; + int idx; + +diff --git a/security/tomoyo/mount.c b/security/tomoyo/mount.c +index 390c646..f2f8db3 100644 +--- a/security/tomoyo/mount.c ++++ b/security/tomoyo/mount.c +@@ -118,6 +118,10 @@ static int tomoyo_mount_acl(struct tomoyo_request_info *r, + type == tomoyo_mounts[TOMOYO_MOUNT_MOVE]) { + need_dev = -1; /* dev_name is a directory */ + } else { ++ if (!capable(CAP_SYS_ADMIN)) { ++ error = -EPERM; ++ goto out; ++ } + fstype = get_fs_type(type); + if (!fstype) { + error = -ENODEV; +diff --git a/security/tomoyo/tomoyo.c b/security/tomoyo/tomoyo.c +index f0b756e..8aa497b 100644 +--- a/security/tomoyo/tomoyo.c ++++ b/security/tomoyo/tomoyo.c +@@ -146,7 +146,7 @@ static int tomoyo_bprm_check_security(struct linux_binprm *bprm) + */ + static int tomoyo_inode_getattr(struct vfsmount *mnt, struct dentry *dentry) + { +- struct path path = { mnt, dentry }; ++ struct path path = { .mnt = mnt, .dentry = dentry }; + return tomoyo_path_perm(TOMOYO_TYPE_GETATTR, &path, NULL); + } + +@@ -172,7 +172,7 @@ static int tomoyo_path_truncate(struct path *path) + */ + static int tomoyo_path_unlink(struct path *parent, struct dentry *dentry) + { +- struct path path = { parent->mnt, dentry }; ++ struct path path = { .mnt = parent->mnt, .dentry = dentry }; + return tomoyo_path_perm(TOMOYO_TYPE_UNLINK, &path, NULL); + } + +@@ -188,7 +188,7 @@ static int tomoyo_path_unlink(struct path *parent, struct dentry *dentry) + static int tomoyo_path_mkdir(struct path *parent, struct dentry *dentry, + umode_t mode) + { +- struct path path = { parent->mnt, dentry }; ++ struct path path = { .mnt = parent->mnt, .dentry = dentry }; + return tomoyo_path_number_perm(TOMOYO_TYPE_MKDIR, &path, + mode & S_IALLUGO); + } +@@ -203,7 +203,7 @@ static int tomoyo_path_mkdir(struct path *parent, struct dentry *dentry, + */ + static int tomoyo_path_rmdir(struct path *parent, struct dentry *dentry) + { +- struct path path = { parent->mnt, dentry }; ++ struct path path = { .mnt = parent->mnt, .dentry = dentry }; + return tomoyo_path_perm(TOMOYO_TYPE_RMDIR, &path, NULL); + } + +@@ -219,7 +219,7 @@ static int tomoyo_path_rmdir(struct path *parent, struct dentry *dentry) + static int tomoyo_path_symlink(struct path *parent, struct dentry *dentry, + const char *old_name) + { +- struct path path = { parent->mnt, dentry }; ++ struct path path = { .mnt = parent->mnt, .dentry = dentry }; + return tomoyo_path_perm(TOMOYO_TYPE_SYMLINK, &path, old_name); + } + +@@ -236,7 +236,7 @@ static int tomoyo_path_symlink(struct path *parent, struct dentry *dentry, + static int tomoyo_path_mknod(struct path *parent, struct dentry *dentry, + umode_t mode, unsigned int dev) + { +- struct path path = { parent->mnt, dentry }; ++ struct path path = { .mnt = parent->mnt, .dentry = dentry }; + int type = TOMOYO_TYPE_CREATE; + const unsigned int perm = mode & S_IALLUGO; + +@@ -275,8 +275,8 @@ static int tomoyo_path_mknod(struct path *parent, struct dentry *dentry, + static int tomoyo_path_link(struct dentry *old_dentry, struct path *new_dir, + struct dentry *new_dentry) + { +- struct path path1 = { new_dir->mnt, old_dentry }; +- struct path path2 = { new_dir->mnt, new_dentry }; ++ struct path path1 = { .mnt = new_dir->mnt, .dentry = old_dentry }; ++ struct path path2 = { .mnt = new_dir->mnt, .dentry = new_dentry }; + return tomoyo_path2_perm(TOMOYO_TYPE_LINK, &path1, &path2); + } + +@@ -295,8 +295,8 @@ static int tomoyo_path_rename(struct path *old_parent, + struct path *new_parent, + struct dentry *new_dentry) + { +- struct path path1 = { old_parent->mnt, old_dentry }; +- struct path path2 = { new_parent->mnt, new_dentry }; ++ struct path path1 = { .mnt = old_parent->mnt, .dentry = old_dentry }; ++ struct path path2 = { .mnt = new_parent->mnt, .dentry = new_dentry }; + return tomoyo_path2_perm(TOMOYO_TYPE_RENAME, &path1, &path2); + } + +@@ -424,7 +424,7 @@ static int tomoyo_sb_mount(const char *dev_name, struct path *path, + */ + static int tomoyo_sb_umount(struct vfsmount *mnt, int flags) + { +- struct path path = { mnt, mnt->mnt_root }; ++ struct path path = { .mnt = mnt, .dentry = mnt->mnt_root }; + return tomoyo_path_perm(TOMOYO_TYPE_UMOUNT, &path, NULL); + } + +@@ -503,7 +503,7 @@ static int tomoyo_socket_sendmsg(struct socket *sock, struct msghdr *msg, + * tomoyo_security_ops is a "struct security_operations" which is used for + * registering TOMOYO. + */ +-static struct security_operations tomoyo_security_ops = { ++static struct security_operations tomoyo_security_ops __read_only = { + .name = "tomoyo", + .cred_alloc_blank = tomoyo_cred_alloc_blank, + .cred_prepare = tomoyo_cred_prepare, +diff --git a/security/yama/Kconfig b/security/yama/Kconfig +index 20ef514..4182bed 100644 +--- a/security/yama/Kconfig ++++ b/security/yama/Kconfig +@@ -1,6 +1,6 @@ + config SECURITY_YAMA + bool "Yama support" +- depends on SECURITY ++ depends on SECURITY && !GRKERNSEC + select SECURITYFS + select SECURITY_PATH + default n +diff --git a/security/yama/yama_lsm.c b/security/yama/yama_lsm.c +index 13c88fbc..f8c115e 100644 +--- a/security/yama/yama_lsm.c ++++ b/security/yama/yama_lsm.c +@@ -365,7 +365,7 @@ int yama_ptrace_traceme(struct task_struct *parent) + } + + #ifndef CONFIG_SECURITY_YAMA_STACKED +-static struct security_operations yama_ops = { ++static struct security_operations yama_ops __read_only = { + .name = "yama", + + .ptrace_access_check = yama_ptrace_access_check, +@@ -376,28 +376,24 @@ static struct security_operations yama_ops = { + #endif + + #ifdef CONFIG_SYSCTL ++static int zero __read_only; ++static int max_scope __read_only = YAMA_SCOPE_NO_ATTACH; ++ + static int yama_dointvec_minmax(struct ctl_table *table, int write, + void __user *buffer, size_t *lenp, loff_t *ppos) + { +- int rc; ++ ctl_table_no_const yama_table; + + if (write && !capable(CAP_SYS_PTRACE)) + return -EPERM; + +- rc = proc_dointvec_minmax(table, write, buffer, lenp, ppos); +- if (rc) +- return rc; +- ++ yama_table = *table; + /* Lock the max value if it ever gets set. */ +- if (write && *(int *)table->data == *(int *)table->extra2) +- table->extra1 = table->extra2; +- +- return rc; ++ if (ptrace_scope == max_scope) ++ yama_table.extra1 = &max_scope; ++ return proc_dointvec_minmax(&yama_table, write, buffer, lenp, ppos); + } + +-static int zero; +-static int max_scope = YAMA_SCOPE_NO_ATTACH; +- + struct ctl_path yama_sysctl_path[] = { + { .procname = "kernel", }, + { .procname = "yama", }, +diff --git a/sound/aoa/codecs/onyx.c b/sound/aoa/codecs/onyx.c +index 4cedc69..e59d8a3 100644 +--- a/sound/aoa/codecs/onyx.c ++++ b/sound/aoa/codecs/onyx.c +@@ -54,7 +54,7 @@ struct onyx { + spdif_locked:1, + analog_locked:1, + original_mute:2; +- int open_count; ++ local_t open_count; + struct codec_info *codec_info; + + /* mutex serializes concurrent access to the device +@@ -753,7 +753,7 @@ static int onyx_open(struct codec_info_item *cii, + struct onyx *onyx = cii->codec_data; + + mutex_lock(&onyx->mutex); +- onyx->open_count++; ++ local_inc(&onyx->open_count); + mutex_unlock(&onyx->mutex); + + return 0; +@@ -765,8 +765,7 @@ static int onyx_close(struct codec_info_item *cii, + struct onyx *onyx = cii->codec_data; + + mutex_lock(&onyx->mutex); +- onyx->open_count--; +- if (!onyx->open_count) ++ if (local_dec_and_test(&onyx->open_count)) + onyx->spdif_locked = onyx->analog_locked = 0; + mutex_unlock(&onyx->mutex); + +diff --git a/sound/aoa/codecs/onyx.h b/sound/aoa/codecs/onyx.h +index ffd2025..df062c9 100644 +--- a/sound/aoa/codecs/onyx.h ++++ b/sound/aoa/codecs/onyx.h +@@ -11,6 +11,7 @@ + #include <linux/i2c.h> + #include <asm/pmac_low_i2c.h> + #include <asm/prom.h> ++#include <asm/local.h> + + /* PCM3052 register definitions */ + +diff --git a/sound/core/oss/pcm_oss.c b/sound/core/oss/pcm_oss.c +index 4c1cc51..16040040 100644 +--- a/sound/core/oss/pcm_oss.c ++++ b/sound/core/oss/pcm_oss.c +@@ -1189,10 +1189,10 @@ snd_pcm_sframes_t snd_pcm_oss_write3(struct snd_pcm_substream *substream, const + if (in_kernel) { + mm_segment_t fs; + fs = snd_enter_user(); +- ret = snd_pcm_lib_write(substream, (void __force __user *)ptr, frames); ++ ret = snd_pcm_lib_write(substream, (void __force_user *)ptr, frames); + snd_leave_user(fs); + } else { +- ret = snd_pcm_lib_write(substream, (void __force __user *)ptr, frames); ++ ret = snd_pcm_lib_write(substream, (void __force_user *)ptr, frames); + } + if (ret != -EPIPE && ret != -ESTRPIPE) + break; +@@ -1234,10 +1234,10 @@ snd_pcm_sframes_t snd_pcm_oss_read3(struct snd_pcm_substream *substream, char *p + if (in_kernel) { + mm_segment_t fs; + fs = snd_enter_user(); +- ret = snd_pcm_lib_read(substream, (void __force __user *)ptr, frames); ++ ret = snd_pcm_lib_read(substream, (void __force_user *)ptr, frames); + snd_leave_user(fs); + } else { +- ret = snd_pcm_lib_read(substream, (void __force __user *)ptr, frames); ++ ret = snd_pcm_lib_read(substream, (void __force_user *)ptr, frames); + } + if (ret == -EPIPE) { + if (runtime->status->state == SNDRV_PCM_STATE_DRAINING) { +@@ -1337,7 +1337,7 @@ static ssize_t snd_pcm_oss_write2(struct snd_pcm_substream *substream, const cha + struct snd_pcm_plugin_channel *channels; + size_t oss_frame_bytes = (runtime->oss.plugin_first->src_width * runtime->oss.plugin_first->src_format.channels) / 8; + if (!in_kernel) { +- if (copy_from_user(runtime->oss.buffer, (const char __force __user *)buf, bytes)) ++ if (copy_from_user(runtime->oss.buffer, (const char __force_user *)buf, bytes)) + return -EFAULT; + buf = runtime->oss.buffer; + } +@@ -1407,7 +1407,7 @@ static ssize_t snd_pcm_oss_write1(struct snd_pcm_substream *substream, const cha + } + } else { + tmp = snd_pcm_oss_write2(substream, +- (const char __force *)buf, ++ (const char __force_kernel *)buf, + runtime->oss.period_bytes, 0); + if (tmp <= 0) + goto err; +@@ -1433,7 +1433,7 @@ static ssize_t snd_pcm_oss_read2(struct snd_pcm_substream *substream, char *buf, + struct snd_pcm_runtime *runtime = substream->runtime; + snd_pcm_sframes_t frames, frames1; + #ifdef CONFIG_SND_PCM_OSS_PLUGINS +- char __user *final_dst = (char __force __user *)buf; ++ char __user *final_dst = (char __force_user *)buf; + if (runtime->oss.plugin_first) { + struct snd_pcm_plugin_channel *channels; + size_t oss_frame_bytes = (runtime->oss.plugin_last->dst_width * runtime->oss.plugin_last->dst_format.channels) / 8; +@@ -1495,7 +1495,7 @@ static ssize_t snd_pcm_oss_read1(struct snd_pcm_substream *substream, char __use + xfer += tmp; + runtime->oss.buffer_used -= tmp; + } else { +- tmp = snd_pcm_oss_read2(substream, (char __force *)buf, ++ tmp = snd_pcm_oss_read2(substream, (char __force_kernel *)buf, + runtime->oss.period_bytes, 0); + if (tmp <= 0) + goto err; +@@ -1663,7 +1663,7 @@ static int snd_pcm_oss_sync(struct snd_pcm_oss_file *pcm_oss_file) + size1); + size1 /= runtime->channels; /* frames */ + fs = snd_enter_user(); +- snd_pcm_lib_write(substream, (void __force __user *)runtime->oss.buffer, size1); ++ snd_pcm_lib_write(substream, (void __force_user *)runtime->oss.buffer, size1); + snd_leave_user(fs); + } + } else if (runtime->access == SNDRV_PCM_ACCESS_RW_NONINTERLEAVED) { +diff --git a/sound/core/pcm_compat.c b/sound/core/pcm_compat.c +index af49721..e85058e 100644 +--- a/sound/core/pcm_compat.c ++++ b/sound/core/pcm_compat.c +@@ -31,7 +31,7 @@ static int snd_pcm_ioctl_delay_compat(struct snd_pcm_substream *substream, + int err; + + fs = snd_enter_user(); +- err = snd_pcm_delay(substream, &delay); ++ err = snd_pcm_delay(substream, (snd_pcm_sframes_t __force_user *)&delay); + snd_leave_user(fs); + if (err < 0) + return err; +diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c +index 01a5e05..c6bb425 100644 +--- a/sound/core/pcm_native.c ++++ b/sound/core/pcm_native.c +@@ -2811,11 +2811,11 @@ int snd_pcm_kernel_ioctl(struct snd_pcm_substream *substream, + switch (substream->stream) { + case SNDRV_PCM_STREAM_PLAYBACK: + result = snd_pcm_playback_ioctl1(NULL, substream, cmd, +- (void __user *)arg); ++ (void __force_user *)arg); + break; + case SNDRV_PCM_STREAM_CAPTURE: + result = snd_pcm_capture_ioctl1(NULL, substream, cmd, +- (void __user *)arg); ++ (void __force_user *)arg); + break; + default: + result = -EINVAL; +diff --git a/sound/core/seq/oss/seq_oss.c b/sound/core/seq/oss/seq_oss.c +index 8d4d5e8..fdd0826 100644 +--- a/sound/core/seq/oss/seq_oss.c ++++ b/sound/core/seq/oss/seq_oss.c +@@ -75,8 +75,8 @@ static int __init alsa_seq_oss_init(void) + { + int rc; + static struct snd_seq_dev_ops ops = { +- snd_seq_oss_synth_register, +- snd_seq_oss_synth_unregister, ++ .init_device = snd_seq_oss_synth_register, ++ .free_device = snd_seq_oss_synth_unregister, + }; + + snd_seq_autoload_lock(); +diff --git a/sound/core/seq/seq_device.c b/sound/core/seq/seq_device.c +index 040c60e..989a19a 100644 +--- a/sound/core/seq/seq_device.c ++++ b/sound/core/seq/seq_device.c +@@ -64,7 +64,7 @@ struct ops_list { + int argsize; /* argument size */ + + /* operators */ +- struct snd_seq_dev_ops ops; ++ struct snd_seq_dev_ops *ops; + + /* registered devices */ + struct list_head dev_list; /* list of devices */ +@@ -333,7 +333,7 @@ int snd_seq_device_register_driver(char *id, struct snd_seq_dev_ops *entry, + + mutex_lock(&ops->reg_mutex); + /* copy driver operators */ +- ops->ops = *entry; ++ ops->ops = entry; + ops->driver |= DRIVER_LOADED; + ops->argsize = argsize; + +@@ -463,7 +463,7 @@ static int init_device(struct snd_seq_device *dev, struct ops_list *ops) + dev->name, ops->id, ops->argsize, dev->argsize); + return -EINVAL; + } +- if (ops->ops.init_device(dev) >= 0) { ++ if (ops->ops->init_device(dev) >= 0) { + dev->status = SNDRV_SEQ_DEVICE_REGISTERED; + ops->num_init_devices++; + } else { +@@ -490,7 +490,7 @@ static int free_device(struct snd_seq_device *dev, struct ops_list *ops) + dev->name, ops->id, ops->argsize, dev->argsize); + return -EINVAL; + } +- if ((result = ops->ops.free_device(dev)) >= 0 || result == -ENXIO) { ++ if ((result = ops->ops->free_device(dev)) >= 0 || result == -ENXIO) { + dev->status = SNDRV_SEQ_DEVICE_FREE; + dev->driver_data = NULL; + ops->num_init_devices--; +diff --git a/sound/core/seq/seq_midi.c b/sound/core/seq/seq_midi.c +index 64069db..3c6d392 100644 +--- a/sound/core/seq/seq_midi.c ++++ b/sound/core/seq/seq_midi.c +@@ -462,8 +462,8 @@ snd_seq_midisynth_unregister_port(struct snd_seq_device *dev) + static int __init alsa_seq_midi_init(void) + { + static struct snd_seq_dev_ops ops = { +- snd_seq_midisynth_register_port, +- snd_seq_midisynth_unregister_port, ++ .init_device = snd_seq_midisynth_register_port, ++ .free_device = snd_seq_midisynth_unregister_port, + }; + memset(&synths, 0, sizeof(synths)); + snd_seq_autoload_lock(); +diff --git a/sound/core/sound.c b/sound/core/sound.c +index 437c25e..cd040ab 100644 +--- a/sound/core/sound.c ++++ b/sound/core/sound.c +@@ -86,7 +86,7 @@ static void snd_request_other(int minor) + case SNDRV_MINOR_TIMER: str = "snd-timer"; break; + default: return; + } +- request_module(str); ++ request_module("%s", str); + } + + #endif /* modular kernel */ +diff --git a/sound/drivers/mts64.c b/sound/drivers/mts64.c +index 4e0dd22..7a1f32c 100644 +--- a/sound/drivers/mts64.c ++++ b/sound/drivers/mts64.c +@@ -29,6 +29,7 @@ + #include <sound/initval.h> + #include <sound/rawmidi.h> + #include <sound/control.h> ++#include <asm/local.h> + + #define CARD_NAME "Miditerminal 4140" + #define DRIVER_NAME "MTS64" +@@ -67,7 +68,7 @@ struct mts64 { + struct pardevice *pardev; + int pardev_claimed; + +- int open_count; ++ local_t open_count; + int current_midi_output_port; + int current_midi_input_port; + u8 mode[MTS64_NUM_INPUT_PORTS]; +@@ -697,7 +698,7 @@ static int snd_mts64_rawmidi_open(struct snd_rawmidi_substream *substream) + { + struct mts64 *mts = substream->rmidi->private_data; + +- if (mts->open_count == 0) { ++ if (local_read(&mts->open_count) == 0) { + /* We don't need a spinlock here, because this is just called + if the device has not been opened before. + So there aren't any IRQs from the device */ +@@ -705,7 +706,7 @@ static int snd_mts64_rawmidi_open(struct snd_rawmidi_substream *substream) + + msleep(50); + } +- ++(mts->open_count); ++ local_inc(&mts->open_count); + + return 0; + } +@@ -715,8 +716,7 @@ static int snd_mts64_rawmidi_close(struct snd_rawmidi_substream *substream) + struct mts64 *mts = substream->rmidi->private_data; + unsigned long flags; + +- --(mts->open_count); +- if (mts->open_count == 0) { ++ if (local_dec_return(&mts->open_count) == 0) { + /* We need the spinlock_irqsave here because we can still + have IRQs at this point */ + spin_lock_irqsave(&mts->lock, flags); +@@ -725,8 +725,8 @@ static int snd_mts64_rawmidi_close(struct snd_rawmidi_substream *substream) + + msleep(500); + +- } else if (mts->open_count < 0) +- mts->open_count = 0; ++ } else if (local_read(&mts->open_count) < 0) ++ local_set(&mts->open_count, 0); + + return 0; + } +diff --git a/sound/drivers/opl3/opl3_seq.c b/sound/drivers/opl3/opl3_seq.c +index 6839953..7a0f4b9 100644 +--- a/sound/drivers/opl3/opl3_seq.c ++++ b/sound/drivers/opl3/opl3_seq.c +@@ -281,8 +281,8 @@ static int __init alsa_opl3_seq_init(void) + { + static struct snd_seq_dev_ops ops = + { +- snd_opl3_seq_new_device, +- snd_opl3_seq_delete_device ++ .init_device = snd_opl3_seq_new_device, ++ .free_device = snd_opl3_seq_delete_device + }; + + return snd_seq_device_register_driver(SNDRV_SEQ_DEV_ID_OPL3, &ops, +diff --git a/sound/drivers/opl4/opl4_lib.c b/sound/drivers/opl4/opl4_lib.c +index b953fb4..1999c01 100644 +--- a/sound/drivers/opl4/opl4_lib.c ++++ b/sound/drivers/opl4/opl4_lib.c +@@ -29,7 +29,7 @@ MODULE_AUTHOR("Clemens Ladisch <clemens@ladisch.de>"); + MODULE_DESCRIPTION("OPL4 driver"); + MODULE_LICENSE("GPL"); + +-static void inline snd_opl4_wait(struct snd_opl4 *opl4) ++static inline void snd_opl4_wait(struct snd_opl4 *opl4) + { + int timeout = 10; + while ((inb(opl4->fm_port) & OPL4_STATUS_BUSY) && --timeout > 0) +diff --git a/sound/drivers/opl4/opl4_seq.c b/sound/drivers/opl4/opl4_seq.c +index 9919769..d7de36c 100644 +--- a/sound/drivers/opl4/opl4_seq.c ++++ b/sound/drivers/opl4/opl4_seq.c +@@ -198,8 +198,8 @@ static int snd_opl4_seq_delete_device(struct snd_seq_device *dev) + static int __init alsa_opl4_synth_init(void) + { + static struct snd_seq_dev_ops ops = { +- snd_opl4_seq_new_device, +- snd_opl4_seq_delete_device ++ .init_device = snd_opl4_seq_new_device, ++ .free_device = snd_opl4_seq_delete_device + }; + + return snd_seq_device_register_driver(SNDRV_SEQ_DEV_ID_OPL4, &ops, +diff --git a/sound/drivers/portman2x4.c b/sound/drivers/portman2x4.c +index 991018d..8984740 100644 +--- a/sound/drivers/portman2x4.c ++++ b/sound/drivers/portman2x4.c +@@ -48,6 +48,7 @@ + #include <sound/initval.h> + #include <sound/rawmidi.h> + #include <sound/control.h> ++#include <asm/local.h> + + #define CARD_NAME "Portman 2x4" + #define DRIVER_NAME "portman" +@@ -85,7 +86,7 @@ struct portman { + struct pardevice *pardev; + int pardev_claimed; + +- int open_count; ++ local_t open_count; + int mode[PORTMAN_NUM_INPUT_PORTS]; + struct snd_rawmidi_substream *midi_input[PORTMAN_NUM_INPUT_PORTS]; + }; +diff --git a/sound/firewire/amdtp.c b/sound/firewire/amdtp.c +index 9048777..2d8b1fc 100644 +--- a/sound/firewire/amdtp.c ++++ b/sound/firewire/amdtp.c +@@ -488,7 +488,7 @@ static void queue_out_packet(struct amdtp_out_stream *s, unsigned int cycle) + ptr = s->pcm_buffer_pointer + data_blocks; + if (ptr >= pcm->runtime->buffer_size) + ptr -= pcm->runtime->buffer_size; +- ACCESS_ONCE(s->pcm_buffer_pointer) = ptr; ++ ACCESS_ONCE_RW(s->pcm_buffer_pointer) = ptr; + + s->pcm_period_pointer += data_blocks; + if (s->pcm_period_pointer >= pcm->runtime->period_size) { +@@ -655,7 +655,7 @@ EXPORT_SYMBOL(amdtp_out_stream_pcm_pointer); + */ + void amdtp_out_stream_update(struct amdtp_out_stream *s) + { +- ACCESS_ONCE(s->source_node_id_field) = ++ ACCESS_ONCE_RW(s->source_node_id_field) = + (fw_parent_device(s->unit)->card->node_id & 0x3f) << 24; + } + EXPORT_SYMBOL(amdtp_out_stream_update); +diff --git a/sound/firewire/amdtp.h b/sound/firewire/amdtp.h +index 2746ecd..c35dedd 100644 +--- a/sound/firewire/amdtp.h ++++ b/sound/firewire/amdtp.h +@@ -135,7 +135,7 @@ static inline bool amdtp_out_streaming_error(struct amdtp_out_stream *s) + static inline void amdtp_out_stream_pcm_trigger(struct amdtp_out_stream *s, + struct snd_pcm_substream *pcm) + { +- ACCESS_ONCE(s->pcm) = pcm; ++ ACCESS_ONCE_RW(s->pcm) = pcm; + } + + static inline bool cip_sfc_is_base_44100(enum cip_sfc sfc) +diff --git a/sound/firewire/isight.c b/sound/firewire/isight.c +index fd42e6b..c041971 100644 +--- a/sound/firewire/isight.c ++++ b/sound/firewire/isight.c +@@ -96,7 +96,7 @@ static void isight_update_pointers(struct isight *isight, unsigned int count) + ptr += count; + if (ptr >= runtime->buffer_size) + ptr -= runtime->buffer_size; +- ACCESS_ONCE(isight->buffer_pointer) = ptr; ++ ACCESS_ONCE_RW(isight->buffer_pointer) = ptr; + + isight->period_counter += count; + if (isight->period_counter >= runtime->period_size) { +@@ -299,7 +299,7 @@ static int isight_hw_params(struct snd_pcm_substream *substream, + if (err < 0) + return err; + +- ACCESS_ONCE(isight->pcm_active) = true; ++ ACCESS_ONCE_RW(isight->pcm_active) = true; + + return 0; + } +@@ -337,7 +337,7 @@ static int isight_hw_free(struct snd_pcm_substream *substream) + { + struct isight *isight = substream->private_data; + +- ACCESS_ONCE(isight->pcm_active) = false; ++ ACCESS_ONCE_RW(isight->pcm_active) = false; + + mutex_lock(&isight->mutex); + isight_stop_streaming(isight); +@@ -430,10 +430,10 @@ static int isight_trigger(struct snd_pcm_substream *substream, int cmd) + + switch (cmd) { + case SNDRV_PCM_TRIGGER_START: +- ACCESS_ONCE(isight->pcm_running) = true; ++ ACCESS_ONCE_RW(isight->pcm_running) = true; + break; + case SNDRV_PCM_TRIGGER_STOP: +- ACCESS_ONCE(isight->pcm_running) = false; ++ ACCESS_ONCE_RW(isight->pcm_running) = false; + break; + default: + return -EINVAL; +diff --git a/sound/firewire/scs1x.c b/sound/firewire/scs1x.c +index 858023c..83b3d3c 100644 +--- a/sound/firewire/scs1x.c ++++ b/sound/firewire/scs1x.c +@@ -74,7 +74,7 @@ static void scs_output_trigger(struct snd_rawmidi_substream *stream, int up) + { + struct scs *scs = stream->rmidi->private_data; + +- ACCESS_ONCE(scs->output) = up ? stream : NULL; ++ ACCESS_ONCE_RW(scs->output) = up ? stream : NULL; + if (up) { + scs->output_idle = false; + tasklet_schedule(&scs->tasklet); +@@ -257,7 +257,7 @@ static void scs_input_trigger(struct snd_rawmidi_substream *stream, int up) + { + struct scs *scs = stream->rmidi->private_data; + +- ACCESS_ONCE(scs->input) = up ? stream : NULL; ++ ACCESS_ONCE_RW(scs->input) = up ? stream : NULL; + } + + static void scs_input_escaped_byte(struct snd_rawmidi_substream *stream, +@@ -473,8 +473,8 @@ static void scs_remove(struct fw_unit *unit) + + snd_card_disconnect(scs->card); + +- ACCESS_ONCE(scs->output) = NULL; +- ACCESS_ONCE(scs->input) = NULL; ++ ACCESS_ONCE_RW(scs->output) = NULL; ++ ACCESS_ONCE_RW(scs->input) = NULL; + + wait_event(scs->idle_wait, scs->output_idle); + +diff --git a/sound/isa/sb/emu8000_synth.c b/sound/isa/sb/emu8000_synth.c +index 4e3fcfb..ab45a9d 100644 +--- a/sound/isa/sb/emu8000_synth.c ++++ b/sound/isa/sb/emu8000_synth.c +@@ -120,8 +120,8 @@ static int __init alsa_emu8000_init(void) + { + + static struct snd_seq_dev_ops ops = { +- snd_emu8000_new_device, +- snd_emu8000_delete_device, ++ .init_device = snd_emu8000_new_device, ++ .free_device = snd_emu8000_delete_device, + }; + return snd_seq_device_register_driver(SNDRV_SEQ_DEV_ID_EMU8000, &ops, + sizeof(struct snd_emu8000*)); +diff --git a/sound/oss/sb_audio.c b/sound/oss/sb_audio.c +index 048439a..3be9f6f 100644 +--- a/sound/oss/sb_audio.c ++++ b/sound/oss/sb_audio.c +@@ -904,7 +904,7 @@ sb16_copy_from_user(int dev, + buf16 = (signed short *)(localbuf + localoffs); + while (c) + { +- locallen = (c >= LBUFCOPYSIZE ? LBUFCOPYSIZE : c); ++ locallen = ((unsigned)c >= LBUFCOPYSIZE ? LBUFCOPYSIZE : c); + if (copy_from_user(lbuf8, + userbuf+useroffs + p, + locallen)) +diff --git a/sound/oss/swarm_cs4297a.c b/sound/oss/swarm_cs4297a.c +index f851fd0..92576fb 100644 +--- a/sound/oss/swarm_cs4297a.c ++++ b/sound/oss/swarm_cs4297a.c +@@ -2623,7 +2623,6 @@ static int __init cs4297a_init(void) + { + struct cs4297a_state *s; + u32 pwr, id; +- mm_segment_t fs; + int rval; + #ifndef CONFIG_BCM_CS4297A_CSWARM + u64 cfg; +@@ -2713,22 +2712,23 @@ static int __init cs4297a_init(void) + if (!rval) { + char *sb1250_duart_present; + ++#if 0 ++ mm_segment_t fs; + fs = get_fs(); + set_fs(KERNEL_DS); +-#if 0 + val = SOUND_MASK_LINE; + mixer_ioctl(s, SOUND_MIXER_WRITE_RECSRC, (unsigned long) &val); + for (i = 0; i < ARRAY_SIZE(initvol); i++) { + val = initvol[i].vol; + mixer_ioctl(s, initvol[i].mixch, (unsigned long) &val); + } ++ set_fs(fs); + // cs4297a_write_ac97(s, 0x18, 0x0808); + #else + // cs4297a_write_ac97(s, 0x5e, 0x180); + cs4297a_write_ac97(s, 0x02, 0x0808); + cs4297a_write_ac97(s, 0x18, 0x0808); + #endif +- set_fs(fs); + + list_add(&s->list, &cs4297a_devs); + +diff --git a/sound/pci/emu10k1/emu10k1_synth.c b/sound/pci/emu10k1/emu10k1_synth.c +index 4c41c90..37f3631 100644 +--- a/sound/pci/emu10k1/emu10k1_synth.c ++++ b/sound/pci/emu10k1/emu10k1_synth.c +@@ -108,8 +108,8 @@ static int __init alsa_emu10k1_synth_init(void) + { + + static struct snd_seq_dev_ops ops = { +- snd_emu10k1_synth_new_device, +- snd_emu10k1_synth_delete_device, ++ .init_device = snd_emu10k1_synth_new_device, ++ .free_device = snd_emu10k1_synth_delete_device, + }; + return snd_seq_device_register_driver(SNDRV_SEQ_DEV_ID_EMU10K1_SYNTH, &ops, + sizeof(struct snd_emu10k1_synth_arg)); +diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c +index dafcf82..dd9356f 100644 +--- a/sound/pci/hda/hda_codec.c ++++ b/sound/pci/hda/hda_codec.c +@@ -983,14 +983,10 @@ find_codec_preset(struct hda_codec *codec) + mutex_unlock(&preset_mutex); + + if (mod_requested < HDA_MODREQ_MAX_COUNT) { +- char name[32]; + if (!mod_requested) +- snprintf(name, sizeof(name), "snd-hda-codec-id:%08x", +- codec->vendor_id); ++ request_module("snd-hda-codec-id:%08x", codec->vendor_id); + else +- snprintf(name, sizeof(name), "snd-hda-codec-id:%04x*", +- (codec->vendor_id >> 16) & 0xffff); +- request_module(name); ++ request_module("snd-hda-codec-id:%04x*", (codec->vendor_id >> 16) & 0xffff); + mod_requested++; + goto again; + } +@@ -2739,7 +2735,7 @@ static int get_kctl_0dB_offset(struct snd_kcontrol *kctl, int *step_to_check) + /* FIXME: set_fs() hack for obtaining user-space TLV data */ + mm_segment_t fs = get_fs(); + set_fs(get_ds()); +- if (!kctl->tlv.c(kctl, 0, sizeof(_tlv), _tlv)) ++ if (!kctl->tlv.c(kctl, 0, sizeof(_tlv), (unsigned int __force_user *)_tlv)) + tlv = _tlv; + set_fs(fs); + } else if (kctl->vd[0].access & SNDRV_CTL_ELEM_ACCESS_TLV_READ) +diff --git a/sound/pci/ymfpci/ymfpci.h b/sound/pci/ymfpci/ymfpci.h +index 4631a23..001ae57 100644 +--- a/sound/pci/ymfpci/ymfpci.h ++++ b/sound/pci/ymfpci/ymfpci.h +@@ -358,7 +358,7 @@ struct snd_ymfpci { + spinlock_t reg_lock; + spinlock_t voice_lock; + wait_queue_head_t interrupt_sleep; +- atomic_t interrupt_sleep_count; ++ atomic_unchecked_t interrupt_sleep_count; + struct snd_info_entry *proc_entry; + const struct firmware *dsp_microcode; + const struct firmware *controller_microcode; +diff --git a/sound/pci/ymfpci/ymfpci_main.c b/sound/pci/ymfpci/ymfpci_main.c +index d591c15..8cb8f94 100644 +--- a/sound/pci/ymfpci/ymfpci_main.c ++++ b/sound/pci/ymfpci/ymfpci_main.c +@@ -202,8 +202,8 @@ static void snd_ymfpci_hw_stop(struct snd_ymfpci *chip) + if ((snd_ymfpci_readl(chip, YDSXGR_STATUS) & 2) == 0) + break; + } +- if (atomic_read(&chip->interrupt_sleep_count)) { +- atomic_set(&chip->interrupt_sleep_count, 0); ++ if (atomic_read_unchecked(&chip->interrupt_sleep_count)) { ++ atomic_set_unchecked(&chip->interrupt_sleep_count, 0); + wake_up(&chip->interrupt_sleep); + } + __end: +@@ -787,7 +787,7 @@ static void snd_ymfpci_irq_wait(struct snd_ymfpci *chip) + continue; + init_waitqueue_entry(&wait, current); + add_wait_queue(&chip->interrupt_sleep, &wait); +- atomic_inc(&chip->interrupt_sleep_count); ++ atomic_inc_unchecked(&chip->interrupt_sleep_count); + schedule_timeout_uninterruptible(msecs_to_jiffies(50)); + remove_wait_queue(&chip->interrupt_sleep, &wait); + } +@@ -825,8 +825,8 @@ static irqreturn_t snd_ymfpci_interrupt(int irq, void *dev_id) + snd_ymfpci_writel(chip, YDSXGR_MODE, mode); + spin_unlock(&chip->reg_lock); + +- if (atomic_read(&chip->interrupt_sleep_count)) { +- atomic_set(&chip->interrupt_sleep_count, 0); ++ if (atomic_read_unchecked(&chip->interrupt_sleep_count)) { ++ atomic_set_unchecked(&chip->interrupt_sleep_count, 0); + wake_up(&chip->interrupt_sleep); + } + } +@@ -2421,7 +2421,7 @@ int snd_ymfpci_create(struct snd_card *card, + spin_lock_init(&chip->reg_lock); + spin_lock_init(&chip->voice_lock); + init_waitqueue_head(&chip->interrupt_sleep); +- atomic_set(&chip->interrupt_sleep_count, 0); ++ atomic_set_unchecked(&chip->interrupt_sleep_count, 0); + chip->card = card; + chip->pci = pci; + chip->irq = -1; +diff --git a/sound/soc/fsl/fsl_ssi.c b/sound/soc/fsl/fsl_ssi.c +index 5428a1f..474f651 100644 +--- a/sound/soc/fsl/fsl_ssi.c ++++ b/sound/soc/fsl/fsl_ssi.c +@@ -1255,7 +1255,6 @@ static int fsl_ssi_probe(struct platform_device *pdev) + { + struct fsl_ssi_private *ssi_private; + int ret = 0; +- struct device_attribute *dev_attr = NULL; + struct device_node *np = pdev->dev.of_node; + const struct of_device_id *of_id; + enum fsl_ssi_type hw_type; +@@ -1481,7 +1480,7 @@ static int fsl_ssi_probe(struct platform_device *pdev) + &ssi_private->cpu_dai_drv, 1); + if (ret) { + dev_err(&pdev->dev, "failed to register DAI: %d\n", ret); +- goto error_dev; ++ goto error_clk; + } + + ret = fsl_ssi_debugfs_create(ssi_private, &pdev->dev); +@@ -1562,9 +1561,6 @@ error_pcm: + error_dbgfs: + snd_soc_unregister_component(&pdev->dev); + +-error_dev: +- device_remove_file(&pdev->dev, dev_attr); +- + error_clk: + if (ssi_private->ssi_on_imx) { + if (!IS_ERR(ssi_private->baudclk)) +diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c +index fe1df50..09d3be6 100644 +--- a/sound/soc/soc-core.c ++++ b/sound/soc/soc-core.c +@@ -2254,8 +2254,10 @@ int snd_soc_set_ac97_ops_of_reset(struct snd_ac97_bus_ops *ops, + if (ret) + return ret; + +- ops->warm_reset = snd_soc_ac97_warm_reset; +- ops->reset = snd_soc_ac97_reset; ++ pax_open_kernel(); ++ *(void **)&ops->warm_reset = snd_soc_ac97_warm_reset; ++ *(void **)&ops->reset = snd_soc_ac97_reset; ++ pax_close_kernel(); + + snd_ac97_rst_cfg = cfg; + return 0; +diff --git a/sound/synth/emux/emux_seq.c b/sound/synth/emux/emux_seq.c +index 7778b8e..3d619fc 100644 +--- a/sound/synth/emux/emux_seq.c ++++ b/sound/synth/emux/emux_seq.c +@@ -33,13 +33,13 @@ static int snd_emux_unuse(void *private_data, struct snd_seq_port_subscribe *inf + * MIDI emulation operators + */ + static struct snd_midi_op emux_ops = { +- snd_emux_note_on, +- snd_emux_note_off, +- snd_emux_key_press, +- snd_emux_terminate_note, +- snd_emux_control, +- snd_emux_nrpn, +- snd_emux_sysex, ++ .note_on = snd_emux_note_on, ++ .note_off = snd_emux_note_off, ++ .key_press = snd_emux_key_press, ++ .note_terminate = snd_emux_terminate_note, ++ .control = snd_emux_control, ++ .nrpn = snd_emux_nrpn, ++ .sysex = snd_emux_sysex, + }; + + +diff --git a/tools/gcc/.gitignore b/tools/gcc/.gitignore +new file mode 100644 +index 0000000..60e7af2 +--- /dev/null ++++ b/tools/gcc/.gitignore +@@ -0,0 +1,2 @@ ++randomize_layout_seed.h ++randomize_layout_hash.h +diff --git a/tools/gcc/Makefile b/tools/gcc/Makefile +new file mode 100644 +index 0000000..7b8921f +--- /dev/null ++++ b/tools/gcc/Makefile +@@ -0,0 +1,52 @@ ++#CC := gcc ++#PLUGIN_SOURCE_FILES := pax_plugin.c ++#PLUGIN_OBJECT_FILES := $(patsubst %.c,%.o,$(PLUGIN_SOURCE_FILES)) ++GCCPLUGINS_DIR := $(shell $(CC) -print-file-name=plugin) ++#CFLAGS += -I$(GCCPLUGINS_DIR)/include -fPIC -O2 -Wall -W -std=gnu99 ++ ++ifeq ($(PLUGINCC),$(HOSTCC)) ++HOSTLIBS := hostlibs ++HOST_EXTRACFLAGS += -I$(GCCPLUGINS_DIR)/include -I$(src) -std=gnu99 -ggdb ++export HOST_EXTRACFLAGS ++else ++HOSTLIBS := hostcxxlibs ++HOST_EXTRACXXFLAGS += -I$(GCCPLUGINS_DIR)/include -I$(src) -std=gnu++98 -fno-rtti -ggdb -Wno-unused-parameter -Wno-narrowing -Wno-unused-variable ++export HOST_EXTRACXXFLAGS ++endif ++ ++export GCCPLUGINS_DIR HOSTLIBS ++ ++$(HOSTLIBS)-$(CONFIG_PAX_CONSTIFY_PLUGIN) := constify_plugin.so ++$(HOSTLIBS)-$(CONFIG_PAX_MEMORY_STACKLEAK) += stackleak_plugin.so ++$(HOSTLIBS)-$(CONFIG_KALLOCSTAT_PLUGIN) += kallocstat_plugin.so ++$(HOSTLIBS)-$(CONFIG_PAX_KERNEXEC_PLUGIN) += kernexec_plugin.so ++$(HOSTLIBS)-$(CONFIG_CHECKER_PLUGIN) += checker_plugin.so ++$(HOSTLIBS)-y += colorize_plugin.so ++$(HOSTLIBS)-$(CONFIG_PAX_LATENT_ENTROPY) += latent_entropy_plugin.so ++$(HOSTLIBS)-$(CONFIG_PAX_MEMORY_STRUCTLEAK) += structleak_plugin.so ++$(HOSTLIBS)-$(CONFIG_GRKERNSEC_RANDSTRUCT) += randomize_layout_plugin.so ++ ++subdir-$(CONFIG_PAX_SIZE_OVERFLOW) := size_overflow_plugin ++subdir- += size_overflow_plugin ++ ++always := $($(HOSTLIBS)-y) ++ ++constify_plugin-objs := constify_plugin.o ++stackleak_plugin-objs := stackleak_plugin.o ++kallocstat_plugin-objs := kallocstat_plugin.o ++kernexec_plugin-objs := kernexec_plugin.o ++checker_plugin-objs := checker_plugin.o ++colorize_plugin-objs := colorize_plugin.o ++latent_entropy_plugin-objs := latent_entropy_plugin.o ++structleak_plugin-objs := structleak_plugin.o ++randomize_layout_plugin-objs := randomize_layout_plugin.o ++ ++$(obj)/randomize_layout_plugin.o: $(objtree)/$(obj)/randomize_layout_seed.h ++ ++quiet_cmd_create_randomize_layout_seed = GENSEED $@ ++ cmd_create_randomize_layout_seed = \ ++ $(CONFIG_SHELL) $(srctree)/$(src)/gen-random-seed.sh $@ $(objtree)/include/generated/randomize_layout_hash.h ++$(objtree)/$(obj)/randomize_layout_seed.h: FORCE ++ $(call if_changed,create_randomize_layout_seed) ++ ++targets += randomize_layout_seed.h randomize_layout_hash.h +diff --git a/tools/gcc/checker_plugin.c b/tools/gcc/checker_plugin.c +new file mode 100644 +index 0000000..5452feea +--- /dev/null ++++ b/tools/gcc/checker_plugin.c +@@ -0,0 +1,150 @@ ++/* ++ * Copyright 2011-2014 by the PaX Team <pageexec@freemail.hu> ++ * Licensed under the GPL v2 ++ * ++ * Note: the choice of the license means that the compilation process is ++ * NOT 'eligible' as defined by gcc's library exception to the GPL v3, ++ * but for the kernel it doesn't matter since it doesn't link against ++ * any of the gcc libraries ++ * ++ * gcc plugin to implement various sparse (source code checker) features ++ * ++ * TODO: ++ * - define separate __iomem, __percpu and __rcu address spaces (lots of code to patch) ++ * ++ * BUGS: ++ * - none known ++ */ ++ ++#include "gcc-common.h" ++ ++extern void c_register_addr_space (const char *str, addr_space_t as); ++extern enum machine_mode default_addr_space_pointer_mode (addr_space_t); ++extern enum machine_mode default_addr_space_address_mode (addr_space_t); ++extern bool default_addr_space_valid_pointer_mode(enum machine_mode mode, addr_space_t as); ++extern bool default_addr_space_legitimate_address_p(enum machine_mode mode, rtx mem, bool strict, addr_space_t as); ++extern rtx default_addr_space_legitimize_address(rtx x, rtx oldx, enum machine_mode mode, addr_space_t as); ++ ++int plugin_is_GPL_compatible; ++ ++static struct plugin_info checker_plugin_info = { ++ .version = "201304082245", ++ .help = NULL, ++}; ++ ++#define ADDR_SPACE_KERNEL 0 ++#define ADDR_SPACE_FORCE_KERNEL 1 ++#define ADDR_SPACE_USER 2 ++#define ADDR_SPACE_FORCE_USER 3 ++#define ADDR_SPACE_IOMEM 0 ++#define ADDR_SPACE_FORCE_IOMEM 0 ++#define ADDR_SPACE_PERCPU 0 ++#define ADDR_SPACE_FORCE_PERCPU 0 ++#define ADDR_SPACE_RCU 0 ++#define ADDR_SPACE_FORCE_RCU 0 ++ ++static enum machine_mode checker_addr_space_pointer_mode(addr_space_t addrspace) ++{ ++ return default_addr_space_pointer_mode(ADDR_SPACE_GENERIC); ++} ++ ++static enum machine_mode checker_addr_space_address_mode(addr_space_t addrspace) ++{ ++ return default_addr_space_address_mode(ADDR_SPACE_GENERIC); ++} ++ ++static bool checker_addr_space_valid_pointer_mode(enum machine_mode mode, addr_space_t as) ++{ ++ return default_addr_space_valid_pointer_mode(mode, as); ++} ++ ++static bool checker_addr_space_legitimate_address_p(enum machine_mode mode, rtx mem, bool strict, addr_space_t as) ++{ ++ return default_addr_space_legitimate_address_p(mode, mem, strict, ADDR_SPACE_GENERIC); ++} ++ ++static rtx checker_addr_space_legitimize_address(rtx x, rtx oldx, enum machine_mode mode, addr_space_t as) ++{ ++ return default_addr_space_legitimize_address(x, oldx, mode, as); ++} ++ ++static bool checker_addr_space_subset_p(addr_space_t subset, addr_space_t superset) ++{ ++ if (subset == ADDR_SPACE_FORCE_KERNEL && superset == ADDR_SPACE_KERNEL) ++ return true; ++ ++ if (subset == ADDR_SPACE_FORCE_USER && superset == ADDR_SPACE_USER) ++ return true; ++ ++ if (subset == ADDR_SPACE_FORCE_IOMEM && superset == ADDR_SPACE_IOMEM) ++ return true; ++ ++ if (subset == ADDR_SPACE_KERNEL && superset == ADDR_SPACE_FORCE_USER) ++ return true; ++ ++ if (subset == ADDR_SPACE_KERNEL && superset == ADDR_SPACE_FORCE_IOMEM) ++ return true; ++ ++ if (subset == ADDR_SPACE_USER && superset == ADDR_SPACE_FORCE_KERNEL) ++ return true; ++ ++ if (subset == ADDR_SPACE_IOMEM && superset == ADDR_SPACE_FORCE_KERNEL) ++ return true; ++ ++ return subset == superset; ++} ++ ++static rtx checker_addr_space_convert(rtx op, tree from_type, tree to_type) ++{ ++// addr_space_t from_as = TYPE_ADDR_SPACE(TREE_TYPE(from_type)); ++// addr_space_t to_as = TYPE_ADDR_SPACE(TREE_TYPE(to_type)); ++ ++ return op; ++} ++ ++static void register_checker_address_spaces(void *event_data, void *data) ++{ ++ c_register_addr_space("__kernel", ADDR_SPACE_KERNEL); ++ c_register_addr_space("__force_kernel", ADDR_SPACE_FORCE_KERNEL); ++ c_register_addr_space("__user", ADDR_SPACE_USER); ++ c_register_addr_space("__force_user", ADDR_SPACE_FORCE_USER); ++// c_register_addr_space("__iomem", ADDR_SPACE_IOMEM); ++// c_register_addr_space("__force_iomem", ADDR_SPACE_FORCE_IOMEM); ++// c_register_addr_space("__percpu", ADDR_SPACE_PERCPU); ++// c_register_addr_space("__force_percpu", ADDR_SPACE_FORCE_PERCPU); ++// c_register_addr_space("__rcu", ADDR_SPACE_RCU); ++// c_register_addr_space("__force_rcu", ADDR_SPACE_FORCE_RCU); ++ ++ targetm.addr_space.pointer_mode = checker_addr_space_pointer_mode; ++ targetm.addr_space.address_mode = checker_addr_space_address_mode; ++ targetm.addr_space.valid_pointer_mode = checker_addr_space_valid_pointer_mode; ++ targetm.addr_space.legitimate_address_p = checker_addr_space_legitimate_address_p; ++// targetm.addr_space.legitimize_address = checker_addr_space_legitimize_address; ++ targetm.addr_space.subset_p = checker_addr_space_subset_p; ++ targetm.addr_space.convert = checker_addr_space_convert; ++} ++ ++int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version) ++{ ++ const char * const plugin_name = plugin_info->base_name; ++ const int argc = plugin_info->argc; ++ const struct plugin_argument * const argv = plugin_info->argv; ++ int i; ++ ++ if (!plugin_default_version_check(version, &gcc_version)) { ++ error(G_("incompatible gcc/plugin versions")); ++ return 1; ++ } ++ ++ register_callback(plugin_name, PLUGIN_INFO, NULL, &checker_plugin_info); ++ ++ for (i = 0; i < argc; ++i) ++ error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key); ++ ++ if (TARGET_64BIT == 0) ++ return 0; ++ ++ register_callback(plugin_name, PLUGIN_PRAGMAS, register_checker_address_spaces, NULL); ++ ++ return 0; ++} +diff --git a/tools/gcc/colorize_plugin.c b/tools/gcc/colorize_plugin.c +new file mode 100644 +index 0000000..54461af +--- /dev/null ++++ b/tools/gcc/colorize_plugin.c +@@ -0,0 +1,210 @@ ++/* ++ * Copyright 2012-2014 by PaX Team <pageexec@freemail.hu> ++ * Licensed under the GPL v2 ++ * ++ * Note: the choice of the license means that the compilation process is ++ * NOT 'eligible' as defined by gcc's library exception to the GPL v3, ++ * but for the kernel it doesn't matter since it doesn't link against ++ * any of the gcc libraries ++ * ++ * gcc plugin to colorize diagnostic output ++ * ++ */ ++ ++#include "gcc-common.h" ++ ++int plugin_is_GPL_compatible; ++ ++static struct plugin_info colorize_plugin_info = { ++ .version = "201404202350", ++ .help = "color=[never|always|auto]\tdetermine when to colorize\n", ++}; ++ ++#define GREEN "\033[32m\033[K" ++#define LIGHTGREEN "\033[1;32m\033[K" ++#define YELLOW "\033[33m\033[K" ++#define LIGHTYELLOW "\033[1;33m\033[K" ++#define RED "\033[31m\033[K" ++#define LIGHTRED "\033[1;31m\033[K" ++#define BLUE "\033[34m\033[K" ++#define LIGHTBLUE "\033[1;34m\033[K" ++#define BRIGHT "\033[1;m\033[K" ++#define NORMAL "\033[m\033[K" ++ ++static diagnostic_starter_fn old_starter; ++static diagnostic_finalizer_fn old_finalizer; ++ ++static void start_colorize(diagnostic_context *context, diagnostic_info *diagnostic) ++{ ++ const char *color; ++ char *newprefix; ++ ++ switch (diagnostic->kind) { ++ case DK_NOTE: ++ color = LIGHTBLUE; ++ break; ++ ++ case DK_PEDWARN: ++ case DK_WARNING: ++ color = LIGHTYELLOW; ++ break; ++ ++ case DK_ERROR: ++ case DK_FATAL: ++ case DK_ICE: ++ case DK_PERMERROR: ++ case DK_SORRY: ++ color = LIGHTRED; ++ break; ++ ++ default: ++ color = NORMAL; ++ } ++ ++ old_starter(context, diagnostic); ++ if (-1 == asprintf(&newprefix, "%s%s" NORMAL, color, context->printer->prefix)) ++ return; ++ pp_destroy_prefix(context->printer); ++ pp_set_prefix(context->printer, newprefix); ++} ++ ++static void finalize_colorize(diagnostic_context *context, diagnostic_info *diagnostic) ++{ ++ old_finalizer(context, diagnostic); ++} ++ ++static void colorize_arm(void) ++{ ++ old_starter = diagnostic_starter(global_dc); ++ old_finalizer = diagnostic_finalizer(global_dc); ++ ++ diagnostic_starter(global_dc) = start_colorize; ++ diagnostic_finalizer(global_dc) = finalize_colorize; ++} ++ ++static unsigned int execute_colorize_rearm(void) ++{ ++ if (diagnostic_starter(global_dc) == start_colorize) ++ return 0; ++ ++ colorize_arm(); ++ return 0; ++} ++ ++#if BUILDING_GCC_VERSION >= 4009 ++static const struct pass_data colorize_rearm_pass_data = { ++#else ++struct simple_ipa_opt_pass colorize_rearm_pass = { ++ .pass = { ++#endif ++ .type = SIMPLE_IPA_PASS, ++ .name = "colorize_rearm", ++#if BUILDING_GCC_VERSION >= 4008 ++ .optinfo_flags = OPTGROUP_NONE, ++#endif ++#if BUILDING_GCC_VERSION >= 4009 ++ .has_gate = false, ++ .has_execute = true, ++#else ++ .gate = NULL, ++ .execute = execute_colorize_rearm, ++ .sub = NULL, ++ .next = NULL, ++ .static_pass_number = 0, ++#endif ++ .tv_id = TV_NONE, ++ .properties_required = 0, ++ .properties_provided = 0, ++ .properties_destroyed = 0, ++ .todo_flags_start = 0, ++ .todo_flags_finish = 0 ++#if BUILDING_GCC_VERSION < 4009 ++ } ++#endif ++}; ++ ++#if BUILDING_GCC_VERSION >= 4009 ++namespace { ++class colorize_rearm_pass : public simple_ipa_opt_pass { ++public: ++ colorize_rearm_pass() : simple_ipa_opt_pass(colorize_rearm_pass_data, g) {} ++ unsigned int execute() { return execute_colorize_rearm(); } ++}; ++} ++ ++static opt_pass *make_colorize_rearm_pass(void) ++{ ++ return new colorize_rearm_pass(); ++} ++#else ++static struct opt_pass *make_colorize_rearm_pass(void) ++{ ++ return &colorize_rearm_pass.pass; ++} ++#endif ++ ++static void colorize_start_unit(void *gcc_data, void *user_data) ++{ ++ colorize_arm(); ++} ++ ++static bool should_colorize(void) ++{ ++#if BUILDING_GCC_VERSION >= 4009 ++ return false; ++#else ++ char const *t = getenv("TERM"); ++ ++ return t && strcmp(t, "dumb") && isatty(STDERR_FILENO); ++#endif ++} ++ ++int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version) ++{ ++ const char * const plugin_name = plugin_info->base_name; ++ const int argc = plugin_info->argc; ++ const struct plugin_argument * const argv = plugin_info->argv; ++ int i; ++ struct register_pass_info colorize_rearm_pass_info; ++ bool colorize; ++ ++ colorize_rearm_pass_info.pass = make_colorize_rearm_pass(); ++ colorize_rearm_pass_info.reference_pass_name = "*free_lang_data"; ++ colorize_rearm_pass_info.ref_pass_instance_number = 1; ++ colorize_rearm_pass_info.pos_op = PASS_POS_INSERT_AFTER; ++ ++ if (!plugin_default_version_check(version, &gcc_version)) { ++ error(G_("incompatible gcc/plugin versions")); ++ return 1; ++ } ++ ++ register_callback(plugin_name, PLUGIN_INFO, NULL, &colorize_plugin_info); ++ ++ colorize = getenv("GCC_COLORS") ? should_colorize() : false; ++ ++ for (i = 0; i < argc; ++i) { ++ if (!strcmp(argv[i].key, "color")) { ++ if (!argv[i].value) { ++ error(G_("no value supplied for option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key); ++ continue; ++ } ++ if (!strcmp(argv[i].value, "always")) ++ colorize = true; ++ else if (!strcmp(argv[i].value, "never")) ++ colorize = false; ++ else if (!strcmp(argv[i].value, "auto")) ++ colorize = should_colorize(); ++ else ++ error(G_("invalid option argument '-fplugin-arg-%s-%s=%s'"), plugin_name, argv[i].key, argv[i].value); ++ continue; ++ } ++ error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key); ++ } ++ ++ if (colorize) { ++ // TODO: parse GCC_COLORS as used by gcc 4.9+ ++ register_callback(plugin_name, PLUGIN_START_UNIT, &colorize_start_unit, NULL); ++ register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &colorize_rearm_pass_info); ++ } ++ return 0; ++} +diff --git a/tools/gcc/constify_plugin.c b/tools/gcc/constify_plugin.c +new file mode 100644 +index 0000000..82bc5a8 +--- /dev/null ++++ b/tools/gcc/constify_plugin.c +@@ -0,0 +1,557 @@ ++/* ++ * Copyright 2011 by Emese Revfy <re.emese@gmail.com> ++ * Copyright 2011-2014 by PaX Team <pageexec@freemail.hu> ++ * Licensed under the GPL v2, or (at your option) v3 ++ * ++ * This gcc plugin constifies all structures which contain only function pointers or are explicitly marked for constification. ++ * ++ * Homepage: ++ * http://www.grsecurity.net/~ephox/const_plugin/ ++ * ++ * Usage: ++ * $ gcc -I`gcc -print-file-name=plugin`/include -fPIC -shared -O2 -o constify_plugin.so constify_plugin.c ++ * $ gcc -fplugin=constify_plugin.so test.c -O2 ++ */ ++ ++#include "gcc-common.h" ++ ++// unused C type flag in all versions 4.5-4.9 ++#define TYPE_CONSTIFY_VISITED(TYPE) TYPE_LANG_FLAG_4(TYPE) ++ ++int plugin_is_GPL_compatible; ++ ++static struct plugin_info const_plugin_info = { ++ .version = "201401270210", ++ .help = "no-constify\tturn off constification\n", ++}; ++ ++typedef struct { ++ bool has_fptr_field; ++ bool has_writable_field; ++ bool has_do_const_field; ++ bool has_no_const_field; ++} constify_info; ++ ++static const_tree get_field_type(const_tree field) ++{ ++ return strip_array_types(TREE_TYPE(field)); ++} ++ ++static bool is_fptr(const_tree field) ++{ ++ const_tree ptr = get_field_type(field); ++ ++ if (TREE_CODE(ptr) != POINTER_TYPE) ++ return false; ++ ++ return TREE_CODE(TREE_TYPE(ptr)) == FUNCTION_TYPE; ++} ++ ++/* ++ * determine whether the given structure type meets the requirements for automatic constification, ++ * including the constification attributes on nested structure types ++ */ ++static void constifiable(const_tree node, constify_info *cinfo) ++{ ++ const_tree field; ++ ++ gcc_assert(TREE_CODE(node) == RECORD_TYPE || TREE_CODE(node) == UNION_TYPE); ++ ++ // e.g., pointer to structure fields while still constructing the structure type ++ if (TYPE_FIELDS(node) == NULL_TREE) ++ return; ++ ++ for (field = TYPE_FIELDS(node); field; field = TREE_CHAIN(field)) { ++ const_tree type = get_field_type(field); ++ enum tree_code code = TREE_CODE(type); ++ ++ if (node == type) ++ continue; ++ ++ if (is_fptr(field)) ++ cinfo->has_fptr_field = true; ++ else if (!TREE_READONLY(field)) ++ cinfo->has_writable_field = true; ++ ++ if (code == RECORD_TYPE || code == UNION_TYPE) { ++ if (lookup_attribute("do_const", TYPE_ATTRIBUTES(type))) ++ cinfo->has_do_const_field = true; ++ else if (lookup_attribute("no_const", TYPE_ATTRIBUTES(type))) ++ cinfo->has_no_const_field = true; ++ else ++ constifiable(type, cinfo); ++ } ++ } ++} ++ ++static bool constified(const_tree node) ++{ ++ constify_info cinfo = { ++ .has_fptr_field = false, ++ .has_writable_field = false, ++ .has_do_const_field = false, ++ .has_no_const_field = false ++ }; ++ ++ gcc_assert(TREE_CODE(node) == RECORD_TYPE || TREE_CODE(node) == UNION_TYPE); ++ ++ if (lookup_attribute("no_const", TYPE_ATTRIBUTES(node))) { ++// gcc_assert(!TYPE_READONLY(node)); ++ return false; ++ } ++ ++ if (lookup_attribute("do_const", TYPE_ATTRIBUTES(node))) { ++ gcc_assert(TYPE_READONLY(node)); ++ return true; ++ } ++ ++ constifiable(node, &cinfo); ++ if ((!cinfo.has_fptr_field || cinfo.has_writable_field) && !cinfo.has_do_const_field) ++ return false; ++ ++ return TYPE_READONLY(node); ++} ++ ++static void deconstify_tree(tree node); ++ ++static void deconstify_type(tree type) ++{ ++ tree field; ++ ++ gcc_assert(TREE_CODE(type) == RECORD_TYPE || TREE_CODE(type) == UNION_TYPE); ++ ++ for (field = TYPE_FIELDS(type); field; field = TREE_CHAIN(field)) { ++ const_tree fieldtype = get_field_type(field); ++ ++ // special case handling of simple ptr-to-same-array-type members ++ if (TREE_CODE(TREE_TYPE(field)) == POINTER_TYPE) { ++ tree ptrtype = TREE_TYPE(TREE_TYPE(field)); ++ ++ if (TREE_TYPE(TREE_TYPE(field)) == type) ++ continue; ++ if (TREE_CODE(ptrtype) != RECORD_TYPE && TREE_CODE(ptrtype) != UNION_TYPE) ++ continue; ++ if (!constified(ptrtype)) ++ continue; ++ if (TYPE_MAIN_VARIANT(ptrtype) == TYPE_MAIN_VARIANT(type)) { ++ TREE_TYPE(field) = copy_node(TREE_TYPE(field)); ++ TREE_TYPE(TREE_TYPE(field)) = build_qualified_type(type, TYPE_QUALS(ptrtype) & ~TYPE_QUAL_CONST); ++ } ++ continue; ++ } ++ if (TREE_CODE(fieldtype) != RECORD_TYPE && TREE_CODE(fieldtype) != UNION_TYPE) ++ continue; ++ if (!constified(fieldtype)) ++ continue; ++ ++ deconstify_tree(field); ++ TREE_READONLY(field) = 0; ++ } ++ TYPE_READONLY(type) = 0; ++ C_TYPE_FIELDS_READONLY(type) = 0; ++ if (lookup_attribute("do_const", TYPE_ATTRIBUTES(type))) { ++ TYPE_ATTRIBUTES(type) = copy_list(TYPE_ATTRIBUTES(type)); ++ TYPE_ATTRIBUTES(type) = remove_attribute("do_const", TYPE_ATTRIBUTES(type)); ++ } ++} ++ ++static void deconstify_tree(tree node) ++{ ++ tree old_type, new_type, field; ++ ++ old_type = TREE_TYPE(node); ++ while (TREE_CODE(old_type) == ARRAY_TYPE && TREE_CODE(TREE_TYPE(old_type)) != ARRAY_TYPE) { ++ node = TREE_TYPE(node) = copy_node(old_type); ++ old_type = TREE_TYPE(old_type); ++ } ++ ++ gcc_assert(TREE_CODE(old_type) == RECORD_TYPE || TREE_CODE(old_type) == UNION_TYPE); ++ gcc_assert(TYPE_READONLY(old_type) && (TYPE_QUALS(old_type) & TYPE_QUAL_CONST)); ++ ++ new_type = build_qualified_type(old_type, TYPE_QUALS(old_type) & ~TYPE_QUAL_CONST); ++ TYPE_FIELDS(new_type) = copy_list(TYPE_FIELDS(new_type)); ++ for (field = TYPE_FIELDS(new_type); field; field = TREE_CHAIN(field)) ++ DECL_FIELD_CONTEXT(field) = new_type; ++ ++ deconstify_type(new_type); ++ ++ TREE_TYPE(node) = new_type; ++} ++ ++static tree handle_no_const_attribute(tree *node, tree name, tree args, int flags, bool *no_add_attrs) ++{ ++ tree type; ++ constify_info cinfo = { ++ .has_fptr_field = false, ++ .has_writable_field = false, ++ .has_do_const_field = false, ++ .has_no_const_field = false ++ }; ++ ++ *no_add_attrs = true; ++ if (TREE_CODE(*node) == FUNCTION_DECL) { ++ error("%qE attribute does not apply to functions (%qF)", name, *node); ++ return NULL_TREE; ++ } ++ ++ if (TREE_CODE(*node) == PARM_DECL) { ++ error("%qE attribute does not apply to function parameters (%qD)", name, *node); ++ return NULL_TREE; ++ } ++ ++ if (TREE_CODE(*node) == VAR_DECL) { ++ error("%qE attribute does not apply to variables (%qD)", name, *node); ++ return NULL_TREE; ++ } ++ ++ if (TYPE_P(*node)) { ++ type = *node; ++ } else { ++ gcc_assert(TREE_CODE(*node) == TYPE_DECL); ++ type = TREE_TYPE(*node); ++ } ++ ++ if (TREE_CODE(type) != RECORD_TYPE && TREE_CODE(type) != UNION_TYPE) { ++ error("%qE attribute used on %qT applies to struct and union types only", name, type); ++ return NULL_TREE; ++ } ++ ++ if (lookup_attribute(IDENTIFIER_POINTER(name), TYPE_ATTRIBUTES(type))) { ++ error("%qE attribute is already applied to the type %qT", name, type); ++ return NULL_TREE; ++ } ++ ++ if (TYPE_P(*node)) { ++ if (lookup_attribute("do_const", TYPE_ATTRIBUTES(type))) ++ error("%qE attribute used on type %qT is incompatible with 'do_const'", name, type); ++ else ++ *no_add_attrs = false; ++ return NULL_TREE; ++ } ++ ++ constifiable(type, &cinfo); ++ if ((cinfo.has_fptr_field && !cinfo.has_writable_field) || lookup_attribute("do_const", TYPE_ATTRIBUTES(type))) { ++ deconstify_tree(*node); ++ TYPE_CONSTIFY_VISITED(TREE_TYPE(*node)) = 1; ++ return NULL_TREE; ++ } ++ ++ error("%qE attribute used on type %qT that is not constified", name, type); ++ return NULL_TREE; ++} ++ ++static void constify_type(tree type) ++{ ++ TYPE_READONLY(type) = 1; ++ C_TYPE_FIELDS_READONLY(type) = 1; ++ TYPE_CONSTIFY_VISITED(type) = 1; ++// TYPE_ATTRIBUTES(type) = copy_list(TYPE_ATTRIBUTES(type)); ++// TYPE_ATTRIBUTES(type) = tree_cons(get_identifier("do_const"), NULL_TREE, TYPE_ATTRIBUTES(type)); ++} ++ ++static tree handle_do_const_attribute(tree *node, tree name, tree args, int flags, bool *no_add_attrs) ++{ ++ *no_add_attrs = true; ++ if (!TYPE_P(*node)) { ++ error("%qE attribute applies to types only (%qD)", name, *node); ++ return NULL_TREE; ++ } ++ ++ if (TREE_CODE(*node) != RECORD_TYPE && TREE_CODE(*node) != UNION_TYPE) { ++ error("%qE attribute used on %qT applies to struct and union types only", name, *node); ++ return NULL_TREE; ++ } ++ ++ if (lookup_attribute(IDENTIFIER_POINTER(name), TYPE_ATTRIBUTES(*node))) { ++ error("%qE attribute used on %qT is already applied to the type", name, *node); ++ return NULL_TREE; ++ } ++ ++ if (lookup_attribute("no_const", TYPE_ATTRIBUTES(*node))) { ++ error("%qE attribute used on %qT is incompatible with 'no_const'", name, *node); ++ return NULL_TREE; ++ } ++ ++ *no_add_attrs = false; ++ return NULL_TREE; ++} ++ ++static struct attribute_spec no_const_attr = { ++ .name = "no_const", ++ .min_length = 0, ++ .max_length = 0, ++ .decl_required = false, ++ .type_required = false, ++ .function_type_required = false, ++ .handler = handle_no_const_attribute, ++#if BUILDING_GCC_VERSION >= 4007 ++ .affects_type_identity = true ++#endif ++}; ++ ++static struct attribute_spec do_const_attr = { ++ .name = "do_const", ++ .min_length = 0, ++ .max_length = 0, ++ .decl_required = false, ++ .type_required = false, ++ .function_type_required = false, ++ .handler = handle_do_const_attribute, ++#if BUILDING_GCC_VERSION >= 4007 ++ .affects_type_identity = true ++#endif ++}; ++ ++static void register_attributes(void *event_data, void *data) ++{ ++ register_attribute(&no_const_attr); ++ register_attribute(&do_const_attr); ++} ++ ++static void finish_type(void *event_data, void *data) ++{ ++ tree type = (tree)event_data; ++ constify_info cinfo = { ++ .has_fptr_field = false, ++ .has_writable_field = false, ++ .has_do_const_field = false, ++ .has_no_const_field = false ++ }; ++ ++ if (type == NULL_TREE || type == error_mark_node) ++ return; ++ ++ if (TYPE_FIELDS(type) == NULL_TREE || TYPE_CONSTIFY_VISITED(type)) ++ return; ++ ++ constifiable(type, &cinfo); ++ ++ if (lookup_attribute("no_const", TYPE_ATTRIBUTES(type))) { ++ if ((cinfo.has_fptr_field && !cinfo.has_writable_field) || cinfo.has_do_const_field) { ++ deconstify_type(type); ++ TYPE_CONSTIFY_VISITED(type) = 1; ++ } else ++ error("'no_const' attribute used on type %qT that is not constified", type); ++ return; ++ } ++ ++ if (lookup_attribute("do_const", TYPE_ATTRIBUTES(type))) { ++ if (!cinfo.has_writable_field) { ++ error("'do_const' attribute used on type %qT that is%sconstified", type, cinfo.has_fptr_field ? " " : " not "); ++ return; ++ } ++ constify_type(type); ++ return; ++ } ++ ++ if (cinfo.has_fptr_field && !cinfo.has_writable_field) { ++ if (lookup_attribute("do_const", TYPE_ATTRIBUTES(type))) { ++ error("'do_const' attribute used on type %qT that is constified", type); ++ return; ++ } ++ constify_type(type); ++ return; ++ } ++ ++ deconstify_type(type); ++ TYPE_CONSTIFY_VISITED(type) = 1; ++} ++ ++static void check_global_variables(void *event_data, void *data) ++{ ++#if BUILDING_GCC_VERSION >= 4009 ++ varpool_node *node; ++#else ++ struct varpool_node *node; ++#endif ++ ++ FOR_EACH_VARIABLE(node) { ++ tree var = NODE_DECL(node); ++ tree type = TREE_TYPE(var); ++ ++ if (TREE_CODE(type) != RECORD_TYPE && TREE_CODE(type) != UNION_TYPE) ++ continue; ++ ++ if (!TYPE_READONLY(type) || !C_TYPE_FIELDS_READONLY(type)) ++ continue; ++ ++ if (!TYPE_CONSTIFY_VISITED(type)) ++ continue; ++ ++ if (DECL_EXTERNAL(var)) ++ continue; ++ ++ if (DECL_INITIAL(var)) ++ continue; ++ ++ // this works around a gcc bug/feature where uninitialized globals ++ // are moved into the .bss section regardless of any constification ++ DECL_INITIAL(var) = build_constructor(type, NULL); ++// inform(DECL_SOURCE_LOCATION(var), "constified variable %qE moved into .rodata", var); ++ } ++} ++ ++static unsigned int check_local_variables(void) ++{ ++ unsigned int ret = 0; ++ tree var; ++ ++ unsigned int i; ++ ++ FOR_EACH_LOCAL_DECL(cfun, i, var) { ++ tree type = TREE_TYPE(var); ++ ++ gcc_assert(DECL_P(var)); ++ if (is_global_var(var)) ++ continue; ++ ++ if (TREE_CODE(type) != RECORD_TYPE && TREE_CODE(type) != UNION_TYPE) ++ continue; ++ ++ if (!TYPE_READONLY(type) || !C_TYPE_FIELDS_READONLY(type)) ++ continue; ++ ++ if (!TYPE_CONSTIFY_VISITED(type)) ++ continue; ++ ++ error_at(DECL_SOURCE_LOCATION(var), "constified variable %qE cannot be local", var); ++ ret = 1; ++ } ++ return ret; ++} ++ ++#if BUILDING_GCC_VERSION >= 4009 ++static const struct pass_data check_local_variables_pass_data = { ++#else ++static struct gimple_opt_pass check_local_variables_pass = { ++ .pass = { ++#endif ++ .type = GIMPLE_PASS, ++ .name = "check_local_variables", ++#if BUILDING_GCC_VERSION >= 4008 ++ .optinfo_flags = OPTGROUP_NONE, ++#endif ++#if BUILDING_GCC_VERSION >= 4009 ++ .has_gate = false, ++ .has_execute = true, ++#else ++ .gate = NULL, ++ .execute = check_local_variables, ++ .sub = NULL, ++ .next = NULL, ++ .static_pass_number = 0, ++#endif ++ .tv_id = TV_NONE, ++ .properties_required = 0, ++ .properties_provided = 0, ++ .properties_destroyed = 0, ++ .todo_flags_start = 0, ++ .todo_flags_finish = 0 ++#if BUILDING_GCC_VERSION < 4009 ++ } ++#endif ++}; ++ ++#if BUILDING_GCC_VERSION >= 4009 ++namespace { ++class check_local_variables_pass : public gimple_opt_pass { ++public: ++ check_local_variables_pass() : gimple_opt_pass(check_local_variables_pass_data, g) {} ++ unsigned int execute() { return check_local_variables(); } ++}; ++} ++ ++static opt_pass *make_check_local_variables_pass(void) ++{ ++ return new check_local_variables_pass(); ++} ++#else ++static struct opt_pass *make_check_local_variables_pass(void) ++{ ++ return &check_local_variables_pass.pass; ++} ++#endif ++ ++static struct { ++ const char *name; ++ const char *asm_op; ++} sections[] = { ++ {".init.rodata", "\t.section\t.init.rodata,\"a\""}, ++ {".ref.rodata", "\t.section\t.ref.rodata,\"a\""}, ++ {".devinit.rodata", "\t.section\t.devinit.rodata,\"a\""}, ++ {".devexit.rodata", "\t.section\t.devexit.rodata,\"a\""}, ++ {".cpuinit.rodata", "\t.section\t.cpuinit.rodata,\"a\""}, ++ {".cpuexit.rodata", "\t.section\t.cpuexit.rodata,\"a\""}, ++ {".meminit.rodata", "\t.section\t.meminit.rodata,\"a\""}, ++ {".memexit.rodata", "\t.section\t.memexit.rodata,\"a\""}, ++ {".data..read_only", "\t.section\t.data..read_only,\"a\""}, ++}; ++ ++static unsigned int (*old_section_type_flags)(tree decl, const char *name, int reloc); ++ ++static unsigned int constify_section_type_flags(tree decl, const char *name, int reloc) ++{ ++ size_t i; ++ ++ for (i = 0; i < ARRAY_SIZE(sections); i++) ++ if (!strcmp(sections[i].name, name)) ++ return 0; ++ return old_section_type_flags(decl, name, reloc); ++} ++ ++static void constify_start_unit(void *gcc_data, void *user_data) ++{ ++// size_t i; ++ ++// for (i = 0; i < ARRAY_SIZE(sections); i++) ++// sections[i].section = get_unnamed_section(0, output_section_asm_op, sections[i].asm_op); ++// sections[i].section = get_section(sections[i].name, 0, NULL); ++ ++ old_section_type_flags = targetm.section_type_flags; ++ targetm.section_type_flags = constify_section_type_flags; ++} ++ ++int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version) ++{ ++ const char * const plugin_name = plugin_info->base_name; ++ const int argc = plugin_info->argc; ++ const struct plugin_argument * const argv = plugin_info->argv; ++ int i; ++ bool constify = true; ++ ++ struct register_pass_info check_local_variables_pass_info; ++ ++ check_local_variables_pass_info.pass = make_check_local_variables_pass(); ++ check_local_variables_pass_info.reference_pass_name = "ssa"; ++ check_local_variables_pass_info.ref_pass_instance_number = 1; ++ check_local_variables_pass_info.pos_op = PASS_POS_INSERT_BEFORE; ++ ++ if (!plugin_default_version_check(version, &gcc_version)) { ++ error(G_("incompatible gcc/plugin versions")); ++ return 1; ++ } ++ ++ for (i = 0; i < argc; ++i) { ++ if (!(strcmp(argv[i].key, "no-constify"))) { ++ constify = false; ++ continue; ++ } ++ error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key); ++ } ++ ++ if (strcmp(lang_hooks.name, "GNU C")) { ++ inform(UNKNOWN_LOCATION, G_("%s supports C only"), plugin_name); ++ constify = false; ++ } ++ ++ register_callback(plugin_name, PLUGIN_INFO, NULL, &const_plugin_info); ++ if (constify) { ++ register_callback(plugin_name, PLUGIN_ALL_IPA_PASSES_START, check_global_variables, NULL); ++ register_callback(plugin_name, PLUGIN_FINISH_TYPE, finish_type, NULL); ++ register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &check_local_variables_pass_info); ++ register_callback(plugin_name, PLUGIN_START_UNIT, constify_start_unit, NULL); ++ } ++ register_callback(plugin_name, PLUGIN_ATTRIBUTES, register_attributes, NULL); ++ ++ return 0; ++} +diff --git a/tools/gcc/gcc-common.h b/tools/gcc/gcc-common.h +new file mode 100644 +index 0000000..e90c205 +--- /dev/null ++++ b/tools/gcc/gcc-common.h +@@ -0,0 +1,295 @@ ++#ifndef GCC_COMMON_H_INCLUDED ++#define GCC_COMMON_H_INCLUDED ++ ++#include "plugin.h" ++#include "bversion.h" ++#include "plugin-version.h" ++#include "config.h" ++#include "system.h" ++#include "coretypes.h" ++#include "tm.h" ++#include "line-map.h" ++#include "input.h" ++#include "tree.h" ++ ++#include "tree-inline.h" ++#include "version.h" ++#include "rtl.h" ++#include "tm_p.h" ++#include "flags.h" ++//#include "insn-attr.h" ++//#include "insn-config.h" ++//#include "insn-flags.h" ++#include "hard-reg-set.h" ++//#include "recog.h" ++#include "output.h" ++#include "except.h" ++#include "function.h" ++#include "toplev.h" ++//#include "expr.h" ++#include "basic-block.h" ++#include "intl.h" ++#include "ggc.h" ++//#include "regs.h" ++#include "timevar.h" ++ ++#include "params.h" ++#include "pointer-set.h" ++#include "emit-rtl.h" ++//#include "reload.h" ++//#include "ira.h" ++//#include "dwarf2asm.h" ++#include "debug.h" ++#include "target.h" ++#include "langhooks.h" ++#include "cfgloop.h" ++//#include "hosthooks.h" ++#include "cgraph.h" ++#include "opts.h" ++//#include "coverage.h" ++//#include "value-prof.h" ++ ++#if BUILDING_GCC_VERSION >= 4007 ++#include "tree-pretty-print.h" ++#include "gimple-pretty-print.h" ++#include "c-tree.h" ++//#include "alloc-pool.h" ++#endif ++ ++#if BUILDING_GCC_VERSION <= 4008 ++#include "tree-flow.h" ++#else ++#include "tree-cfgcleanup.h" ++#endif ++ ++#include "diagnostic.h" ++//#include "tree-diagnostic.h" ++#include "tree-dump.h" ++#include "tree-pass.h" ++//#include "df.h" ++#include "predict.h" ++#include "ipa-utils.h" ++ ++#if BUILDING_GCC_VERSION >= 4009 ++#include "varasm.h" ++#include "stor-layout.h" ++#include "internal-fn.h" ++#include "gimple-expr.h" ++//#include "diagnostic-color.h" ++#include "context.h" ++#include "tree-ssa-alias.h" ++#include "stringpool.h" ++#include "tree-ssanames.h" ++#include "print-tree.h" ++#include "tree-eh.h" ++#include "stmt.h" ++#endif ++ ++#include "gimple.h" ++ ++#if BUILDING_GCC_VERSION >= 4009 ++#include "tree-ssa-operands.h" ++#include "tree-phinodes.h" ++#include "tree-cfg.h" ++#include "gimple-iterator.h" ++#include "gimple-ssa.h" ++#include "ssa-iterators.h" ++#endif ++ ++//#include "lto/lto.h" ++//#include "data-streamer.h" ++//#include "lto-compress.h" ++ ++//#include "expr.h" where are you... ++extern rtx emit_move_insn(rtx x, rtx y); ++ ++// missing from basic_block.h... ++extern void debug_dominance_info(enum cdi_direction dir); ++extern void debug_dominance_tree(enum cdi_direction dir, basic_block root); ++ ++#define __unused __attribute__((__unused__)) ++ ++#define DECL_NAME_POINTER(node) IDENTIFIER_POINTER(DECL_NAME(node)) ++#define DECL_NAME_LENGTH(node) IDENTIFIER_LENGTH(DECL_NAME(node)) ++#define TYPE_NAME_POINTER(node) IDENTIFIER_POINTER(TYPE_NAME(node)) ++#define TYPE_NAME_LENGTH(node) IDENTIFIER_LENGTH(TYPE_NAME(node)) ++ ++#if BUILDING_GCC_VERSION == 4005 ++#define FOR_EACH_LOCAL_DECL(FUN, I, D) for (tree vars = (FUN)->local_decls; vars && (D = TREE_VALUE(vars)); vars = TREE_CHAIN(vars), I) ++#define DECL_CHAIN(NODE) (TREE_CHAIN(DECL_MINIMAL_CHECK(NODE))) ++#define FOR_EACH_VEC_ELT(T, V, I, P) for (I = 0; VEC_iterate(T, (V), (I), (P)); ++(I)) ++#define TODO_rebuild_cgraph_edges 0 ++ ++static inline bool gimple_call_builtin_p(gimple stmt, enum built_in_function code) ++{ ++ tree fndecl; ++ ++ if (!is_gimple_call(stmt)) ++ return false; ++ fndecl = gimple_call_fndecl(stmt); ++ if (!fndecl || DECL_BUILT_IN_CLASS(fndecl) != BUILT_IN_NORMAL) ++ return false; ++// print_node(stderr, "pax", fndecl, 4); ++ return DECL_FUNCTION_CODE(fndecl) == code; ++} ++ ++static inline bool is_simple_builtin(tree decl) ++{ ++ if (decl && DECL_BUILT_IN_CLASS(decl) != BUILT_IN_NORMAL) ++ return false; ++ ++ switch (DECL_FUNCTION_CODE(decl)) { ++ /* Builtins that expand to constants. */ ++ case BUILT_IN_CONSTANT_P: ++ case BUILT_IN_EXPECT: ++ case BUILT_IN_OBJECT_SIZE: ++ case BUILT_IN_UNREACHABLE: ++ /* Simple register moves or loads from stack. */ ++ case BUILT_IN_RETURN_ADDRESS: ++ case BUILT_IN_EXTRACT_RETURN_ADDR: ++ case BUILT_IN_FROB_RETURN_ADDR: ++ case BUILT_IN_RETURN: ++ case BUILT_IN_AGGREGATE_INCOMING_ADDRESS: ++ case BUILT_IN_FRAME_ADDRESS: ++ case BUILT_IN_VA_END: ++ case BUILT_IN_STACK_SAVE: ++ case BUILT_IN_STACK_RESTORE: ++ /* Exception state returns or moves registers around. */ ++ case BUILT_IN_EH_FILTER: ++ case BUILT_IN_EH_POINTER: ++ case BUILT_IN_EH_COPY_VALUES: ++ return true; ++ ++ default: ++ return false; ++ } ++} ++#endif ++ ++#if BUILDING_GCC_VERSION <= 4006 ++#define ANY_RETURN_P(rtx) (GET_CODE(rtx) == RETURN) ++#define C_DECL_REGISTER(EXP) DECL_LANG_FLAG_4(EXP) ++ ++// should come from c-tree.h if only it were installed for gcc 4.5... ++#define C_TYPE_FIELDS_READONLY(TYPE) TREE_LANG_FLAG_1(TYPE) ++ ++#define get_random_seed(noinit) ({ \ ++ unsigned HOST_WIDE_INT seed; \ ++ sscanf(get_random_seed(noinit), "%" HOST_WIDE_INT_PRINT "x", &seed); \ ++ seed * seed; }) ++ ++static inline bool gimple_clobber_p(gimple s) ++{ ++ return false; ++} ++ ++static inline tree builtin_decl_implicit(enum built_in_function fncode) ++{ ++ return implicit_built_in_decls[fncode]; ++} ++ ++static inline struct cgraph_node *cgraph_get_create_node(tree decl) ++{ ++ struct cgraph_node *node = cgraph_get_node(decl); ++ ++ return node ? node : cgraph_node(decl); ++} ++ ++static inline bool cgraph_function_with_gimple_body_p(struct cgraph_node *node) ++{ ++ return node->analyzed && !node->thunk.thunk_p && !node->alias; ++} ++ ++static inline struct cgraph_node *cgraph_first_function_with_gimple_body(void) ++{ ++ struct cgraph_node *node; ++ ++ for (node = cgraph_nodes; node; node = node->next) ++ if (cgraph_function_with_gimple_body_p(node)) ++ return node; ++ return NULL; ++} ++ ++static inline struct cgraph_node *cgraph_next_function_with_gimple_body(struct cgraph_node *node) ++{ ++ for (node = node->next; node; node = node->next) ++ if (cgraph_function_with_gimple_body_p(node)) ++ return node; ++ return NULL; ++} ++ ++#define FOR_EACH_FUNCTION_WITH_GIMPLE_BODY(node) \ ++ for ((node) = cgraph_first_function_with_gimple_body(); (node); \ ++ (node) = cgraph_next_function_with_gimple_body(node)) ++#endif ++ ++#if BUILDING_GCC_VERSION == 4006 ++extern void debug_gimple_stmt(gimple); ++extern void debug_gimple_seq(gimple_seq); ++extern void print_gimple_seq(FILE *, gimple_seq, int, int); ++extern void print_gimple_stmt(FILE *, gimple, int, int); ++extern void print_gimple_expr(FILE *, gimple, int, int); ++extern void dump_gimple_stmt(pretty_printer *, gimple, int, int); ++#endif ++ ++#if BUILDING_GCC_VERSION <= 4007 ++#define FOR_EACH_VARIABLE(node) for (node = varpool_nodes; node; node = node->next) ++#define PROP_loops 0 ++ ++static inline int bb_loop_depth(const_basic_block bb) ++{ ++ return bb->loop_father ? loop_depth(bb->loop_father) : 0; ++} ++ ++static inline bool gimple_store_p(gimple gs) ++{ ++ tree lhs = gimple_get_lhs(gs); ++ return lhs && !is_gimple_reg(lhs); ++} ++#endif ++ ++#if BUILDING_GCC_VERSION >= 4007 ++#define cgraph_create_edge(caller, callee, call_stmt, count, freq, nest) \ ++ cgraph_create_edge((caller), (callee), (call_stmt), (count), (freq)) ++#define cgraph_create_edge_including_clones(caller, callee, old_call_stmt, call_stmt, count, freq, nest, reason) \ ++ cgraph_create_edge_including_clones((caller), (callee), (old_call_stmt), (call_stmt), (count), (freq), (reason)) ++#endif ++ ++#if BUILDING_GCC_VERSION <= 4008 ++#define ENTRY_BLOCK_PTR_FOR_FN(FN) ENTRY_BLOCK_PTR_FOR_FUNCTION(FN) ++#define EXIT_BLOCK_PTR_FOR_FN(FN) EXIT_BLOCK_PTR_FOR_FUNCTION(FN) ++#define basic_block_info_for_fn(FN) ((FN)->cfg->x_basic_block_info) ++#define n_basic_blocks_for_fn(FN) ((FN)->cfg->x_n_basic_blocks) ++#define n_edges_for_fn(FN) ((FN)->cfg->x_n_edges) ++#define last_basic_block_for_fn(FN) ((FN)->cfg->x_last_basic_block) ++#define label_to_block_map_for_fn(FN) ((FN)->cfg->x_label_to_block_map) ++#define profile_status_for_fn(FN) ((FN)->cfg->x_profile_status) ++ ++static inline const char *get_tree_code_name(enum tree_code code) ++{ ++ gcc_assert(code < MAX_TREE_CODES); ++ return tree_code_name[code]; ++} ++ ++#define ipa_remove_stmt_references(cnode, stmt) ++#endif ++ ++#if BUILDING_GCC_VERSION == 4008 ++#define NODE_DECL(node) node->symbol.decl ++#else ++#define NODE_DECL(node) node->decl ++#endif ++ ++#if BUILDING_GCC_VERSION >= 4008 ++#define add_referenced_var(var) ++#define mark_sym_for_renaming(var) ++#define varpool_mark_needed_node(node) ++#define TODO_dump_func 0 ++#define TODO_dump_cgraph 0 ++#endif ++ ++#if BUILDING_GCC_VERSION >= 4009 ++#define TODO_ggc_collect 0 ++#endif ++ ++#endif +diff --git a/tools/gcc/gen-random-seed.sh b/tools/gcc/gen-random-seed.sh +new file mode 100644 +index 0000000..7514850 +--- /dev/null ++++ b/tools/gcc/gen-random-seed.sh +@@ -0,0 +1,8 @@ ++#!/bin/sh ++ ++if [ ! -f "$1" ]; then ++ SEED=`od -A n -t x8 -N 32 /dev/urandom | tr -d ' \n'` ++ echo "const char *randstruct_seed = \"$SEED\";" > "$1" ++ HASH=`echo -n "$SEED" | sha256sum | cut -d" " -f1 | tr -d ' \n'` ++ echo "#define RANDSTRUCT_HASHED_SEED \"$HASH\"" > "$2" ++fi +diff --git a/tools/gcc/kallocstat_plugin.c b/tools/gcc/kallocstat_plugin.c +new file mode 100644 +index 0000000..d81c094 +--- /dev/null ++++ b/tools/gcc/kallocstat_plugin.c +@@ -0,0 +1,183 @@ ++/* ++ * Copyright 2011-2014 by the PaX Team <pageexec@freemail.hu> ++ * Licensed under the GPL v2 ++ * ++ * Note: the choice of the license means that the compilation process is ++ * NOT 'eligible' as defined by gcc's library exception to the GPL v3, ++ * but for the kernel it doesn't matter since it doesn't link against ++ * any of the gcc libraries ++ * ++ * gcc plugin to find the distribution of k*alloc sizes ++ * ++ * TODO: ++ * ++ * BUGS: ++ * - none known ++ */ ++ ++#include "gcc-common.h" ++ ++int plugin_is_GPL_compatible; ++ ++static struct plugin_info kallocstat_plugin_info = { ++ .version = "201401260140", ++ .help = NULL ++}; ++ ++static const char * const kalloc_functions[] = { ++ "__kmalloc", ++ "kmalloc", ++ "kmalloc_large", ++ "kmalloc_node", ++ "kmalloc_order", ++ "kmalloc_order_trace", ++ "kmalloc_slab", ++ "kzalloc", ++ "kzalloc_node", ++}; ++ ++static bool is_kalloc(const char *fnname) ++{ ++ size_t i; ++ ++ for (i = 0; i < ARRAY_SIZE(kalloc_functions); i++) ++ if (!strcmp(fnname, kalloc_functions[i])) ++ return true; ++ return false; ++} ++ ++static unsigned int execute_kallocstat(void) ++{ ++ basic_block bb; ++ ++ // 1. loop through BBs and GIMPLE statements ++ FOR_EACH_BB_FN(bb, cfun) { ++ gimple_stmt_iterator gsi; ++ for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) { ++ // gimple match: ++ tree fndecl, size; ++ gimple stmt; ++ const char *fnname; ++ ++ // is it a call ++ stmt = gsi_stmt(gsi); ++ if (!is_gimple_call(stmt)) ++ continue; ++ fndecl = gimple_call_fndecl(stmt); ++ if (fndecl == NULL_TREE) ++ continue; ++ if (TREE_CODE(fndecl) != FUNCTION_DECL) ++ continue; ++ ++ // is it a call to k*alloc ++ fnname = DECL_NAME_POINTER(fndecl); ++ if (!is_kalloc(fnname)) ++ continue; ++ ++ // is the size arg const or the result of a simple const assignment ++ size = gimple_call_arg(stmt, 0); ++ while (true) { ++ expanded_location xloc; ++ size_t size_val; ++ ++ if (TREE_CONSTANT(size)) { ++ xloc = expand_location(gimple_location(stmt)); ++ if (!xloc.file) ++ xloc = expand_location(DECL_SOURCE_LOCATION(current_function_decl)); ++ size_val = TREE_INT_CST_LOW(size); ++ fprintf(stderr, "kallocsize: %8zu %8zx %s %s:%u\n", size_val, size_val, fnname, xloc.file, xloc.line); ++ break; ++ } ++ ++ if (TREE_CODE(size) != SSA_NAME) ++ break; ++ stmt = SSA_NAME_DEF_STMT(size); ++//debug_gimple_stmt(stmt); ++//debug_tree(size); ++ if (!stmt || !is_gimple_assign(stmt)) ++ break; ++ if (gimple_num_ops(stmt) != 2) ++ break; ++ size = gimple_assign_rhs1(stmt); ++ } ++//print_gimple_stmt(stderr, call_stmt, 0, TDF_LINENO); ++//debug_tree(gimple_call_fn(call_stmt)); ++//print_node(stderr, "pax", fndecl, 4); ++ } ++ } ++ ++ return 0; ++} ++ ++#if BUILDING_GCC_VERSION >= 4009 ++static const struct pass_data kallocstat_pass_data = { ++#else ++static struct gimple_opt_pass kallocstat_pass = { ++ .pass = { ++#endif ++ .type = GIMPLE_PASS, ++ .name = "kallocstat", ++#if BUILDING_GCC_VERSION >= 4008 ++ .optinfo_flags = OPTGROUP_NONE, ++#endif ++#if BUILDING_GCC_VERSION >= 4009 ++ .has_gate = false, ++ .has_execute = true, ++#else ++ .gate = NULL, ++ .execute = execute_kallocstat, ++ .sub = NULL, ++ .next = NULL, ++ .static_pass_number = 0, ++#endif ++ .tv_id = TV_NONE, ++ .properties_required = 0, ++ .properties_provided = 0, ++ .properties_destroyed = 0, ++ .todo_flags_start = 0, ++ .todo_flags_finish = 0 ++#if BUILDING_GCC_VERSION < 4009 ++ } ++#endif ++}; ++ ++#if BUILDING_GCC_VERSION >= 4009 ++namespace { ++class kallocstat_pass : public gimple_opt_pass { ++public: ++ kallocstat_pass() : gimple_opt_pass(kallocstat_pass_data, g) {} ++ unsigned int execute() { return execute_kallocstat(); } ++}; ++} ++ ++static opt_pass *make_kallocstat_pass(void) ++{ ++ return new kallocstat_pass(); ++} ++#else ++static struct opt_pass *make_kallocstat_pass(void) ++{ ++ return &kallocstat_pass.pass; ++} ++#endif ++ ++int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version) ++{ ++ const char * const plugin_name = plugin_info->base_name; ++ struct register_pass_info kallocstat_pass_info; ++ ++ kallocstat_pass_info.pass = make_kallocstat_pass(); ++ kallocstat_pass_info.reference_pass_name = "ssa"; ++ kallocstat_pass_info.ref_pass_instance_number = 1; ++ kallocstat_pass_info.pos_op = PASS_POS_INSERT_AFTER; ++ ++ if (!plugin_default_version_check(version, &gcc_version)) { ++ error(G_("incompatible gcc/plugin versions")); ++ return 1; ++ } ++ ++ register_callback(plugin_name, PLUGIN_INFO, NULL, &kallocstat_plugin_info); ++ register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &kallocstat_pass_info); ++ ++ return 0; ++} +diff --git a/tools/gcc/kernexec_plugin.c b/tools/gcc/kernexec_plugin.c +new file mode 100644 +index 0000000..89f256d +--- /dev/null ++++ b/tools/gcc/kernexec_plugin.c +@@ -0,0 +1,522 @@ ++/* ++ * Copyright 2011-2014 by the PaX Team <pageexec@freemail.hu> ++ * Licensed under the GPL v2 ++ * ++ * Note: the choice of the license means that the compilation process is ++ * NOT 'eligible' as defined by gcc's library exception to the GPL v3, ++ * but for the kernel it doesn't matter since it doesn't link against ++ * any of the gcc libraries ++ * ++ * gcc plugin to make KERNEXEC/amd64 almost as good as it is on i386 ++ * ++ * TODO: ++ * ++ * BUGS: ++ * - none known ++ */ ++ ++#include "gcc-common.h" ++ ++int plugin_is_GPL_compatible; ++ ++static struct plugin_info kernexec_plugin_info = { ++ .version = "201401260140", ++ .help = "method=[bts|or]\tinstrumentation method\n" ++}; ++ ++static void (*kernexec_instrument_fptr)(gimple_stmt_iterator *); ++static void (*kernexec_instrument_retaddr)(rtx); ++ ++/* ++ * add special KERNEXEC instrumentation: reload %r12 after it has been clobbered ++ */ ++static void kernexec_reload_fptr_mask(gimple_stmt_iterator *gsi) ++{ ++ gimple asm_movabs_stmt; ++ ++ // build asm volatile("movabs $0x8000000000000000, %%r12\n\t" : : : ); ++ asm_movabs_stmt = gimple_build_asm_vec("movabs $0x8000000000000000, %%r12\n\t", NULL, NULL, NULL, NULL); ++ gimple_asm_set_volatile(asm_movabs_stmt, true); ++ gsi_insert_after(gsi, asm_movabs_stmt, GSI_CONTINUE_LINKING); ++ update_stmt(asm_movabs_stmt); ++} ++ ++/* ++ * find all asm() stmts that clobber r12 and add a reload of r12 ++ */ ++static unsigned int execute_kernexec_reload(void) ++{ ++ basic_block bb; ++ ++ // 1. loop through BBs and GIMPLE statements ++ FOR_EACH_BB_FN(bb, cfun) { ++ gimple_stmt_iterator gsi; ++ ++ for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) { ++ // gimple match: __asm__ ("" : : : "r12"); ++ gimple asm_stmt; ++ size_t nclobbers; ++ ++ // is it an asm ... ++ asm_stmt = gsi_stmt(gsi); ++ if (gimple_code(asm_stmt) != GIMPLE_ASM) ++ continue; ++ ++ // ... clobbering r12 ++ nclobbers = gimple_asm_nclobbers(asm_stmt); ++ while (nclobbers--) { ++ tree op = gimple_asm_clobber_op(asm_stmt, nclobbers); ++ if (strcmp(TREE_STRING_POINTER(TREE_VALUE(op)), "r12")) ++ continue; ++ kernexec_reload_fptr_mask(&gsi); ++//print_gimple_stmt(stderr, asm_stmt, 0, TDF_LINENO); ++ break; ++ } ++ } ++ } ++ ++ return 0; ++} ++ ++/* ++ * add special KERNEXEC instrumentation: force MSB of fptr to 1, which will produce ++ * a non-canonical address from a userland ptr and will just trigger a GPF on dereference ++ */ ++static void kernexec_instrument_fptr_bts(gimple_stmt_iterator *gsi) ++{ ++ gimple assign_intptr, assign_new_fptr, call_stmt; ++ tree intptr, orptr, old_fptr, new_fptr, kernexec_mask; ++ ++ call_stmt = gsi_stmt(*gsi); ++ old_fptr = gimple_call_fn(call_stmt); ++ ++ // create temporary unsigned long variable used for bitops and cast fptr to it ++ intptr = create_tmp_var(long_unsigned_type_node, "kernexec_bts"); ++ add_referenced_var(intptr); ++ intptr = make_ssa_name(intptr, NULL); ++ assign_intptr = gimple_build_assign(intptr, fold_convert(long_unsigned_type_node, old_fptr)); ++ SSA_NAME_DEF_STMT(intptr) = assign_intptr; ++ gsi_insert_before(gsi, assign_intptr, GSI_SAME_STMT); ++ update_stmt(assign_intptr); ++ ++ // apply logical or to temporary unsigned long and bitmask ++ kernexec_mask = build_int_cstu(long_long_unsigned_type_node, 0x8000000000000000LL); ++// kernexec_mask = build_int_cstu(long_long_unsigned_type_node, 0xffffffff80000000LL); ++ orptr = fold_build2(BIT_IOR_EXPR, long_long_unsigned_type_node, intptr, kernexec_mask); ++ intptr = make_ssa_name(SSA_NAME_VAR(intptr), NULL); ++ assign_intptr = gimple_build_assign(intptr, orptr); ++ SSA_NAME_DEF_STMT(intptr) = assign_intptr; ++ gsi_insert_before(gsi, assign_intptr, GSI_SAME_STMT); ++ update_stmt(assign_intptr); ++ ++ // cast temporary unsigned long back to a temporary fptr variable ++ new_fptr = create_tmp_var(TREE_TYPE(old_fptr), "kernexec_fptr"); ++ add_referenced_var(new_fptr); ++ new_fptr = make_ssa_name(new_fptr, NULL); ++ assign_new_fptr = gimple_build_assign(new_fptr, fold_convert(TREE_TYPE(old_fptr), intptr)); ++ SSA_NAME_DEF_STMT(new_fptr) = assign_new_fptr; ++ gsi_insert_before(gsi, assign_new_fptr, GSI_SAME_STMT); ++ update_stmt(assign_new_fptr); ++ ++ // replace call stmt fn with the new fptr ++ gimple_call_set_fn(call_stmt, new_fptr); ++ update_stmt(call_stmt); ++} ++ ++static void kernexec_instrument_fptr_or(gimple_stmt_iterator *gsi) ++{ ++ gimple asm_or_stmt, call_stmt; ++ tree old_fptr, new_fptr, input, output; ++#if BUILDING_GCC_VERSION <= 4007 ++ VEC(tree, gc) *inputs = NULL; ++ VEC(tree, gc) *outputs = NULL; ++#else ++ vec<tree, va_gc> *inputs = NULL; ++ vec<tree, va_gc> *outputs = NULL; ++#endif ++ ++ call_stmt = gsi_stmt(*gsi); ++ old_fptr = gimple_call_fn(call_stmt); ++ ++ // create temporary fptr variable ++ new_fptr = create_tmp_var(TREE_TYPE(old_fptr), "kernexec_or"); ++ add_referenced_var(new_fptr); ++ new_fptr = make_ssa_name(new_fptr, NULL); ++ ++ // build asm volatile("orq %%r12, %0\n\t" : "=r"(new_fptr) : "0"(old_fptr)); ++ input = build_tree_list(NULL_TREE, build_string(1, "0")); ++ input = chainon(NULL_TREE, build_tree_list(input, old_fptr)); ++ output = build_tree_list(NULL_TREE, build_string(2, "=r")); ++ output = chainon(NULL_TREE, build_tree_list(output, new_fptr)); ++#if BUILDING_GCC_VERSION <= 4007 ++ VEC_safe_push(tree, gc, inputs, input); ++ VEC_safe_push(tree, gc, outputs, output); ++#else ++ vec_safe_push(inputs, input); ++ vec_safe_push(outputs, output); ++#endif ++ asm_or_stmt = gimple_build_asm_vec("orq %%r12, %0\n\t", inputs, outputs, NULL, NULL); ++ SSA_NAME_DEF_STMT(new_fptr) = asm_or_stmt; ++ gimple_asm_set_volatile(asm_or_stmt, true); ++ gsi_insert_before(gsi, asm_or_stmt, GSI_SAME_STMT); ++ update_stmt(asm_or_stmt); ++ ++ // replace call stmt fn with the new fptr ++ gimple_call_set_fn(call_stmt, new_fptr); ++ update_stmt(call_stmt); ++} ++ ++/* ++ * find all C level function pointer dereferences and forcibly set the highest bit of the pointer ++ */ ++static unsigned int execute_kernexec_fptr(void) ++{ ++ basic_block bb; ++ ++ // 1. loop through BBs and GIMPLE statements ++ FOR_EACH_BB_FN(bb, cfun) { ++ gimple_stmt_iterator gsi; ++ ++ for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) { ++ // gimple match: h_1 = get_fptr (); D.2709_3 = h_1 (x_2(D)); ++ tree fn; ++ gimple call_stmt; ++ ++ // is it a call ... ++ call_stmt = gsi_stmt(gsi); ++ if (!is_gimple_call(call_stmt)) ++ continue; ++ fn = gimple_call_fn(call_stmt); ++ if (TREE_CODE(fn) == ADDR_EXPR) ++ continue; ++ if (TREE_CODE(fn) != SSA_NAME) ++ gcc_unreachable(); ++ ++ // ... through a function pointer ++ if (SSA_NAME_VAR(fn) != NULL_TREE) { ++ fn = SSA_NAME_VAR(fn); ++ if (TREE_CODE(fn) != VAR_DECL && TREE_CODE(fn) != PARM_DECL) { ++ debug_tree(fn); ++ gcc_unreachable(); ++ } ++ } ++ fn = TREE_TYPE(fn); ++ if (TREE_CODE(fn) != POINTER_TYPE) ++ continue; ++ fn = TREE_TYPE(fn); ++ if (TREE_CODE(fn) != FUNCTION_TYPE) ++ continue; ++ ++ kernexec_instrument_fptr(&gsi); ++ ++//debug_tree(gimple_call_fn(call_stmt)); ++//print_gimple_stmt(stderr, call_stmt, 0, TDF_LINENO); ++ } ++ } ++ ++ return 0; ++} ++ ++// add special KERNEXEC instrumentation: btsq $63,(%rsp) just before retn ++static void kernexec_instrument_retaddr_bts(rtx insn) ++{ ++ rtx btsq; ++ rtvec argvec, constraintvec, labelvec; ++ int line; ++ ++ // create asm volatile("btsq $63,(%%rsp)":::) ++ argvec = rtvec_alloc(0); ++ constraintvec = rtvec_alloc(0); ++ labelvec = rtvec_alloc(0); ++ line = expand_location(RTL_LOCATION(insn)).line; ++ btsq = gen_rtx_ASM_OPERANDS(VOIDmode, "btsq $63,(%%rsp)", empty_string, 0, argvec, constraintvec, labelvec, line); ++ MEM_VOLATILE_P(btsq) = 1; ++// RTX_FRAME_RELATED_P(btsq) = 1; // not for ASM_OPERANDS ++ emit_insn_before(btsq, insn); ++} ++ ++// add special KERNEXEC instrumentation: orq %r12,(%rsp) just before retn ++static void kernexec_instrument_retaddr_or(rtx insn) ++{ ++ rtx orq; ++ rtvec argvec, constraintvec, labelvec; ++ int line; ++ ++ // create asm volatile("orq %%r12,(%%rsp)":::) ++ argvec = rtvec_alloc(0); ++ constraintvec = rtvec_alloc(0); ++ labelvec = rtvec_alloc(0); ++ line = expand_location(RTL_LOCATION(insn)).line; ++ orq = gen_rtx_ASM_OPERANDS(VOIDmode, "orq %%r12,(%%rsp)", empty_string, 0, argvec, constraintvec, labelvec, line); ++ MEM_VOLATILE_P(orq) = 1; ++// RTX_FRAME_RELATED_P(orq) = 1; // not for ASM_OPERANDS ++ emit_insn_before(orq, insn); ++} ++ ++/* ++ * find all asm level function returns and forcibly set the highest bit of the return address ++ */ ++static unsigned int execute_kernexec_retaddr(void) ++{ ++ rtx insn; ++ ++// if (stack_realign_drap) ++// inform(DECL_SOURCE_LOCATION(current_function_decl), "drap detected in %s\n", IDENTIFIER_POINTER(DECL_NAME(current_function_decl))); ++ ++ // 1. find function returns ++ for (insn = get_insns(); insn; insn = NEXT_INSN(insn)) { ++ // rtl match: (jump_insn 41 40 42 2 (return) fptr.c:42 634 {return_internal} (nil)) ++ // (jump_insn 12 9 11 2 (parallel [ (return) (unspec [ (0) ] UNSPEC_REP) ]) fptr.c:46 635 {return_internal_long} (nil)) ++ // (jump_insn 97 96 98 6 (simple_return) fptr.c:50 -1 (nil) -> simple_return) ++ rtx body; ++ ++ // is it a retn ++ if (!JUMP_P(insn)) ++ continue; ++ body = PATTERN(insn); ++ if (GET_CODE(body) == PARALLEL) ++ body = XVECEXP(body, 0, 0); ++ if (!ANY_RETURN_P(body)) ++ continue; ++ kernexec_instrument_retaddr(insn); ++ } ++ ++// print_simple_rtl(stderr, get_insns()); ++// print_rtl(stderr, get_insns()); ++ ++ return 0; ++} ++ ++static bool kernexec_cmodel_check(void) ++{ ++ tree section; ++ ++ if (ix86_cmodel != CM_KERNEL) ++ return false; ++ ++ section = lookup_attribute("section", DECL_ATTRIBUTES(current_function_decl)); ++ if (!section || !TREE_VALUE(section)) ++ return true; ++ ++ section = TREE_VALUE(TREE_VALUE(section)); ++ if (strncmp(TREE_STRING_POINTER(section), ".vsyscall_", 10)) ++ return true; ++ ++ return false; ++} ++ ++#if BUILDING_GCC_VERSION >= 4009 ++static const struct pass_data kernexec_reload_pass_data = { ++#else ++static struct gimple_opt_pass kernexec_reload_pass = { ++ .pass = { ++#endif ++ .type = GIMPLE_PASS, ++ .name = "kernexec_reload", ++#if BUILDING_GCC_VERSION >= 4008 ++ .optinfo_flags = OPTGROUP_NONE, ++#endif ++#if BUILDING_GCC_VERSION >= 4009 ++ .has_gate = true, ++ .has_execute = true, ++#else ++ .gate = kernexec_cmodel_check, ++ .execute = execute_kernexec_reload, ++ .sub = NULL, ++ .next = NULL, ++ .static_pass_number = 0, ++#endif ++ .tv_id = TV_NONE, ++ .properties_required = 0, ++ .properties_provided = 0, ++ .properties_destroyed = 0, ++ .todo_flags_start = 0, ++ .todo_flags_finish = TODO_verify_ssa | TODO_verify_stmts | TODO_dump_func | TODO_remove_unused_locals | TODO_update_ssa_no_phi ++#if BUILDING_GCC_VERSION < 4009 ++ } ++#endif ++}; ++ ++#if BUILDING_GCC_VERSION >= 4009 ++static const struct pass_data kernexec_fptr_pass_data = { ++#else ++static struct gimple_opt_pass kernexec_fptr_pass = { ++ .pass = { ++#endif ++ .type = GIMPLE_PASS, ++ .name = "kernexec_fptr", ++#if BUILDING_GCC_VERSION >= 4008 ++ .optinfo_flags = OPTGROUP_NONE, ++#endif ++#if BUILDING_GCC_VERSION >= 4009 ++ .has_gate = true, ++ .has_execute = true, ++#else ++ .gate = kernexec_cmodel_check, ++ .execute = execute_kernexec_fptr, ++ .sub = NULL, ++ .next = NULL, ++ .static_pass_number = 0, ++#endif ++ .tv_id = TV_NONE, ++ .properties_required = 0, ++ .properties_provided = 0, ++ .properties_destroyed = 0, ++ .todo_flags_start = 0, ++ .todo_flags_finish = TODO_verify_ssa | TODO_verify_stmts | TODO_dump_func | TODO_remove_unused_locals | TODO_update_ssa_no_phi ++#if BUILDING_GCC_VERSION < 4009 ++ } ++#endif ++}; ++ ++#if BUILDING_GCC_VERSION >= 4009 ++static const struct pass_data kernexec_retaddr_pass_data = { ++#else ++static struct rtl_opt_pass kernexec_retaddr_pass = { ++ .pass = { ++#endif ++ .type = RTL_PASS, ++ .name = "kernexec_retaddr", ++#if BUILDING_GCC_VERSION >= 4008 ++ .optinfo_flags = OPTGROUP_NONE, ++#endif ++#if BUILDING_GCC_VERSION >= 4009 ++ .has_gate = true, ++ .has_execute = true, ++#else ++ .gate = kernexec_cmodel_check, ++ .execute = execute_kernexec_retaddr, ++ .sub = NULL, ++ .next = NULL, ++ .static_pass_number = 0, ++#endif ++ .tv_id = TV_NONE, ++ .properties_required = 0, ++ .properties_provided = 0, ++ .properties_destroyed = 0, ++ .todo_flags_start = 0, ++ .todo_flags_finish = TODO_dump_func | TODO_ggc_collect ++#if BUILDING_GCC_VERSION < 4009 ++ } ++#endif ++}; ++ ++#if BUILDING_GCC_VERSION >= 4009 ++namespace { ++class kernexec_reload_pass : public gimple_opt_pass { ++public: ++ kernexec_reload_pass() : gimple_opt_pass(kernexec_reload_pass_data, g) {} ++ bool gate() { return kernexec_cmodel_check(); } ++ unsigned int execute() { return execute_kernexec_reload(); } ++}; ++ ++class kernexec_fptr_pass : public gimple_opt_pass { ++public: ++ kernexec_fptr_pass() : gimple_opt_pass(kernexec_fptr_pass_data, g) {} ++ bool gate() { return kernexec_cmodel_check(); } ++ unsigned int execute() { return execute_kernexec_fptr(); } ++}; ++ ++class kernexec_retaddr_pass : public rtl_opt_pass { ++public: ++ kernexec_retaddr_pass() : rtl_opt_pass(kernexec_retaddr_pass_data, g) {} ++ bool gate() { return kernexec_cmodel_check(); } ++ unsigned int execute() { return execute_kernexec_retaddr(); } ++}; ++} ++ ++static opt_pass *make_kernexec_reload_pass(void) ++{ ++ return new kernexec_reload_pass(); ++} ++ ++static opt_pass *make_kernexec_fptr_pass(void) ++{ ++ return new kernexec_fptr_pass(); ++} ++ ++static opt_pass *make_kernexec_retaddr_pass(void) ++{ ++ return new kernexec_retaddr_pass(); ++} ++#else ++static struct opt_pass *make_kernexec_reload_pass(void) ++{ ++ return &kernexec_reload_pass.pass; ++} ++ ++static struct opt_pass *make_kernexec_fptr_pass(void) ++{ ++ return &kernexec_fptr_pass.pass; ++} ++ ++static struct opt_pass *make_kernexec_retaddr_pass(void) ++{ ++ return &kernexec_retaddr_pass.pass; ++} ++#endif ++ ++int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version) ++{ ++ const char * const plugin_name = plugin_info->base_name; ++ const int argc = plugin_info->argc; ++ const struct plugin_argument * const argv = plugin_info->argv; ++ int i; ++ struct register_pass_info kernexec_reload_pass_info; ++ struct register_pass_info kernexec_fptr_pass_info; ++ struct register_pass_info kernexec_retaddr_pass_info; ++ ++ kernexec_reload_pass_info.pass = make_kernexec_reload_pass(); ++ kernexec_reload_pass_info.reference_pass_name = "ssa"; ++ kernexec_reload_pass_info.ref_pass_instance_number = 1; ++ kernexec_reload_pass_info.pos_op = PASS_POS_INSERT_AFTER; ++ ++ kernexec_fptr_pass_info.pass = make_kernexec_fptr_pass(); ++ kernexec_fptr_pass_info.reference_pass_name = "ssa"; ++ kernexec_fptr_pass_info.ref_pass_instance_number = 1; ++ kernexec_fptr_pass_info.pos_op = PASS_POS_INSERT_AFTER; ++ ++ kernexec_retaddr_pass_info.pass = make_kernexec_retaddr_pass(); ++ kernexec_retaddr_pass_info.reference_pass_name = "pro_and_epilogue"; ++ kernexec_retaddr_pass_info.ref_pass_instance_number = 1; ++ kernexec_retaddr_pass_info.pos_op = PASS_POS_INSERT_AFTER; ++ ++ if (!plugin_default_version_check(version, &gcc_version)) { ++ error(G_("incompatible gcc/plugin versions")); ++ return 1; ++ } ++ ++ register_callback(plugin_name, PLUGIN_INFO, NULL, &kernexec_plugin_info); ++ ++ if (TARGET_64BIT == 0) ++ return 0; ++ ++ for (i = 0; i < argc; ++i) { ++ if (!strcmp(argv[i].key, "method")) { ++ if (!argv[i].value) { ++ error(G_("no value supplied for option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key); ++ continue; ++ } ++ if (!strcmp(argv[i].value, "bts")) { ++ kernexec_instrument_fptr = kernexec_instrument_fptr_bts; ++ kernexec_instrument_retaddr = kernexec_instrument_retaddr_bts; ++ } else if (!strcmp(argv[i].value, "or")) { ++ kernexec_instrument_fptr = kernexec_instrument_fptr_or; ++ kernexec_instrument_retaddr = kernexec_instrument_retaddr_or; ++ fix_register("r12", 1, 1); ++ } else ++ error(G_("invalid option argument '-fplugin-arg-%s-%s=%s'"), plugin_name, argv[i].key, argv[i].value); ++ continue; ++ } ++ error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key); ++ } ++ if (!kernexec_instrument_fptr || !kernexec_instrument_retaddr) ++ error(G_("no instrumentation method was selected via '-fplugin-arg-%s-method'"), plugin_name); ++ ++ if (kernexec_instrument_fptr == kernexec_instrument_fptr_or) ++ register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &kernexec_reload_pass_info); ++ register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &kernexec_fptr_pass_info); ++ register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &kernexec_retaddr_pass_info); ++ ++ return 0; ++} +diff --git a/tools/gcc/latent_entropy_plugin.c b/tools/gcc/latent_entropy_plugin.c +new file mode 100644 +index 0000000..39d7cc7 +--- /dev/null ++++ b/tools/gcc/latent_entropy_plugin.c +@@ -0,0 +1,462 @@ ++/* ++ * Copyright 2012-2014 by the PaX Team <pageexec@freemail.hu> ++ * Licensed under the GPL v2 ++ * ++ * Note: the choice of the license means that the compilation process is ++ * NOT 'eligible' as defined by gcc's library exception to the GPL v3, ++ * but for the kernel it doesn't matter since it doesn't link against ++ * any of the gcc libraries ++ * ++ * gcc plugin to help generate a little bit of entropy from program state, ++ * used throughout the uptime of the kernel ++ * ++ * TODO: ++ * - add ipa pass to identify not explicitly marked candidate functions ++ * - mix in more program state (function arguments/return values, loop variables, etc) ++ * - more instrumentation control via attribute parameters ++ * ++ * BUGS: ++ * - LTO needs -flto-partition=none for now ++ */ ++ ++#include "gcc-common.h" ++ ++int plugin_is_GPL_compatible; ++ ++static tree latent_entropy_decl; ++ ++static struct plugin_info latent_entropy_plugin_info = { ++ .version = "201403280150", ++ .help = NULL ++}; ++ ++static unsigned HOST_WIDE_INT seed; ++static unsigned HOST_WIDE_INT get_random_const(void) ++{ ++ unsigned int i; ++ unsigned HOST_WIDE_INT ret = 0; ++ ++ for (i = 0; i < 8 * sizeof ret; i++) { ++ ret = (ret << 1) | (seed & 1); ++ seed >>= 1; ++ if (ret & 1) ++ seed ^= 0xD800000000000000ULL; ++ } ++ ++ return ret; ++} ++ ++static tree handle_latent_entropy_attribute(tree *node, tree name, tree args, int flags, bool *no_add_attrs) ++{ ++ tree type; ++ unsigned long long mask; ++#if BUILDING_GCC_VERSION <= 4007 ++ VEC(constructor_elt, gc) *vals; ++#else ++ vec<constructor_elt, va_gc> *vals; ++#endif ++ ++ switch (TREE_CODE(*node)) { ++ default: ++ *no_add_attrs = true; ++ error("%qE attribute only applies to functions and variables", name); ++ break; ++ ++ case VAR_DECL: ++ if (DECL_INITIAL(*node)) { ++ *no_add_attrs = true; ++ error("variable %qD with %qE attribute must not be initialized", *node, name); ++ break; ++ } ++ ++ if (!TREE_STATIC(*node)) { ++ *no_add_attrs = true; ++ error("variable %qD with %qE attribute must not be local", *node, name); ++ break; ++ } ++ ++ type = TREE_TYPE(*node); ++ switch (TREE_CODE(type)) { ++ default: ++ *no_add_attrs = true; ++ error("variable %qD with %qE attribute must be an integer or a fixed length integer array type or a fixed sized structure with integer fields", *node, name); ++ break; ++ ++ case RECORD_TYPE: { ++ tree field; ++ unsigned int nelt = 0; ++ ++ for (field = TYPE_FIELDS(type); field; nelt++, field = TREE_CHAIN(field)) { ++ tree fieldtype; ++ ++ fieldtype = TREE_TYPE(field); ++ if (TREE_CODE(fieldtype) != INTEGER_TYPE) { ++ *no_add_attrs = true; ++ error("structure variable %qD with %qE attribute has a non-integer field %qE", *node, name, field); ++ break; ++ } ++ } ++ ++ if (field) ++ break; ++ ++#if BUILDING_GCC_VERSION <= 4007 ++ vals = VEC_alloc(constructor_elt, gc, nelt); ++#else ++ vec_alloc(vals, nelt); ++#endif ++ ++ for (field = TYPE_FIELDS(type); field; field = TREE_CHAIN(field)) { ++ tree fieldtype; ++ ++ fieldtype = TREE_TYPE(field); ++ mask = 1ULL << (TREE_INT_CST_LOW(TYPE_SIZE(fieldtype)) - 1); ++ mask = 2 * (mask - 1) + 1; ++ ++ if (TYPE_UNSIGNED(fieldtype)) ++ CONSTRUCTOR_APPEND_ELT(vals, field, build_int_cstu(fieldtype, mask & get_random_const())); ++ else ++ CONSTRUCTOR_APPEND_ELT(vals, field, build_int_cst(fieldtype, mask & get_random_const())); ++ } ++ ++ DECL_INITIAL(*node) = build_constructor(type, vals); ++//debug_tree(DECL_INITIAL(*node)); ++ break; ++ } ++ ++ case INTEGER_TYPE: ++ mask = 1ULL << (TREE_INT_CST_LOW(TYPE_SIZE(type)) - 1); ++ mask = 2 * (mask - 1) + 1; ++ ++ if (TYPE_UNSIGNED(type)) ++ DECL_INITIAL(*node) = build_int_cstu(type, mask & get_random_const()); ++ else ++ DECL_INITIAL(*node) = build_int_cst(type, mask & get_random_const()); ++ break; ++ ++ case ARRAY_TYPE: { ++ tree elt_type, array_size, elt_size; ++ unsigned int i, nelt; ++ ++ elt_type = TREE_TYPE(type); ++ elt_size = TYPE_SIZE_UNIT(TREE_TYPE(type)); ++ array_size = TYPE_SIZE_UNIT(type); ++ ++ if (TREE_CODE(elt_type) != INTEGER_TYPE || !array_size || TREE_CODE(array_size) != INTEGER_CST) { ++ *no_add_attrs = true; ++ error("array variable %qD with %qE attribute must be a fixed length integer array type", *node, name); ++ break; ++ } ++ ++ nelt = TREE_INT_CST_LOW(array_size) / TREE_INT_CST_LOW(elt_size); ++#if BUILDING_GCC_VERSION <= 4007 ++ vals = VEC_alloc(constructor_elt, gc, nelt); ++#else ++ vec_alloc(vals, nelt); ++#endif ++ ++ mask = 1ULL << (TREE_INT_CST_LOW(TYPE_SIZE(elt_type)) - 1); ++ mask = 2 * (mask - 1) + 1; ++ ++ for (i = 0; i < nelt; i++) ++ if (TYPE_UNSIGNED(elt_type)) ++ CONSTRUCTOR_APPEND_ELT(vals, size_int(i), build_int_cstu(elt_type, mask & get_random_const())); ++ else ++ CONSTRUCTOR_APPEND_ELT(vals, size_int(i), build_int_cst(elt_type, mask & get_random_const())); ++ ++ DECL_INITIAL(*node) = build_constructor(type, vals); ++//debug_tree(DECL_INITIAL(*node)); ++ break; ++ } ++ } ++ break; ++ ++ case FUNCTION_DECL: ++ break; ++ } ++ ++ return NULL_TREE; ++} ++ ++static struct attribute_spec latent_entropy_attr = { ++ .name = "latent_entropy", ++ .min_length = 0, ++ .max_length = 0, ++ .decl_required = true, ++ .type_required = false, ++ .function_type_required = false, ++ .handler = handle_latent_entropy_attribute, ++#if BUILDING_GCC_VERSION >= 4007 ++ .affects_type_identity = false ++#endif ++}; ++ ++static void register_attributes(void *event_data, void *data) ++{ ++ register_attribute(&latent_entropy_attr); ++} ++ ++static bool gate_latent_entropy(void) ++{ ++ // don't bother with noreturn functions for now ++ if (TREE_THIS_VOLATILE(current_function_decl)) ++ return false; ++ ++ return lookup_attribute("latent_entropy", DECL_ATTRIBUTES(current_function_decl)) != NULL_TREE; ++} ++ ++static enum tree_code get_op(tree *rhs) ++{ ++ static enum tree_code op; ++ unsigned HOST_WIDE_INT random_const; ++ ++ random_const = get_random_const(); ++ ++ switch (op) { ++ case BIT_XOR_EXPR: ++ op = PLUS_EXPR; ++ break; ++ ++ case PLUS_EXPR: ++ if (rhs) { ++ op = LROTATE_EXPR; ++ random_const &= HOST_BITS_PER_WIDE_INT - 1; ++ break; ++ } ++ ++ case LROTATE_EXPR: ++ default: ++ op = BIT_XOR_EXPR; ++ break; ++ } ++ if (rhs) ++ *rhs = build_int_cstu(unsigned_intDI_type_node, random_const); ++ return op; ++} ++ ++static void perturb_local_entropy(basic_block bb, tree local_entropy) ++{ ++ gimple_stmt_iterator gsi; ++ gimple assign; ++ tree addxorrol, rhs; ++ enum tree_code op; ++ ++ op = get_op(&rhs); ++ addxorrol = fold_build2_loc(UNKNOWN_LOCATION, op, unsigned_intDI_type_node, local_entropy, rhs); ++ assign = gimple_build_assign(local_entropy, addxorrol); ++ gsi = gsi_after_labels(bb); ++ gsi_insert_before(&gsi, assign, GSI_NEW_STMT); ++ update_stmt(assign); ++//debug_bb(bb); ++} ++ ++static void perturb_latent_entropy(basic_block bb, tree rhs) ++{ ++ gimple_stmt_iterator gsi; ++ gimple assign; ++ tree addxorrol, temp; ++ ++ // 1. create temporary copy of latent_entropy ++ temp = create_tmp_var(unsigned_intDI_type_node, "temp_latent_entropy"); ++ add_referenced_var(temp); ++ ++ // 2. read... ++ temp = make_ssa_name(temp, NULL); ++ assign = gimple_build_assign(temp, latent_entropy_decl); ++ SSA_NAME_DEF_STMT(temp) = assign; ++ add_referenced_var(latent_entropy_decl); ++ gsi = gsi_after_labels(bb); ++ gsi_insert_after(&gsi, assign, GSI_NEW_STMT); ++ update_stmt(assign); ++ ++ // 3. ...modify... ++ addxorrol = fold_build2_loc(UNKNOWN_LOCATION, get_op(NULL), unsigned_intDI_type_node, temp, rhs); ++ temp = make_ssa_name(SSA_NAME_VAR(temp), NULL); ++ assign = gimple_build_assign(temp, addxorrol); ++ SSA_NAME_DEF_STMT(temp) = assign; ++ gsi_insert_after(&gsi, assign, GSI_NEW_STMT); ++ update_stmt(assign); ++ ++ // 4. ...write latent_entropy ++ assign = gimple_build_assign(latent_entropy_decl, temp); ++ gsi_insert_after(&gsi, assign, GSI_NEW_STMT); ++ update_stmt(assign); ++} ++ ++static unsigned int execute_latent_entropy(void) ++{ ++ basic_block bb; ++ gimple assign; ++ gimple_stmt_iterator gsi; ++ tree local_entropy; ++ ++ if (!latent_entropy_decl) { ++#if BUILDING_GCC_VERSION >= 4009 ++ varpool_node *node; ++#else ++ struct varpool_node *node; ++#endif ++ ++ FOR_EACH_VARIABLE(node) { ++ tree var = NODE_DECL(node); ++ ++ if (strcmp(IDENTIFIER_POINTER(DECL_NAME(var)), "latent_entropy")) ++ continue; ++ latent_entropy_decl = var; ++// debug_tree(var); ++ break; ++ } ++ if (!latent_entropy_decl) { ++// debug_tree(current_function_decl); ++ return 0; ++ } ++ } ++ ++//fprintf(stderr, "latent_entropy: %s\n", IDENTIFIER_POINTER(DECL_NAME(current_function_decl))); ++ ++ // 1. create local entropy variable ++ local_entropy = create_tmp_var(unsigned_intDI_type_node, "local_entropy"); ++ add_referenced_var(local_entropy); ++ mark_sym_for_renaming(local_entropy); ++ ++ // 2. initialize local entropy variable ++ bb = split_block_after_labels(ENTRY_BLOCK_PTR_FOR_FN(cfun))->dest; ++ if (dom_info_available_p(CDI_DOMINATORS)) ++ set_immediate_dominator(CDI_DOMINATORS, bb, ENTRY_BLOCK_PTR_FOR_FN(cfun)); ++ gsi = gsi_start_bb(bb); ++ ++ assign = gimple_build_assign(local_entropy, build_int_cstu(unsigned_intDI_type_node, get_random_const())); ++// gimple_set_location(assign, loc); ++ gsi_insert_after(&gsi, assign, GSI_NEW_STMT); ++ update_stmt(assign); ++//debug_bb(bb); ++ gcc_assert(single_succ_p(bb)); ++ bb = single_succ(bb); ++ ++ // 3. instrument each BB with an operation on the local entropy variable ++ while (bb != EXIT_BLOCK_PTR_FOR_FN(cfun)) { ++ perturb_local_entropy(bb, local_entropy); ++//debug_bb(bb); ++ bb = bb->next_bb; ++ }; ++ ++ // 4. mix local entropy into the global entropy variable ++ gcc_assert(single_pred_p(EXIT_BLOCK_PTR_FOR_FN(cfun))); ++ perturb_latent_entropy(single_pred(EXIT_BLOCK_PTR_FOR_FN(cfun)), local_entropy); ++//debug_bb(single_pred(EXIT_BLOCK_PTR_FOR_FN(cfun))); ++ return 0; ++} ++ ++static void latent_entropy_start_unit(void *gcc_data, void *user_data) ++{ ++ tree latent_entropy_type; ++ ++ seed = get_random_seed(false); ++ ++ if (in_lto_p) ++ return; ++ ++ // extern volatile u64 latent_entropy ++ gcc_assert(TYPE_PRECISION(long_long_unsigned_type_node) == 64); ++ latent_entropy_type = build_qualified_type(long_long_unsigned_type_node, TYPE_QUALS(long_long_unsigned_type_node) | TYPE_QUAL_VOLATILE); ++ latent_entropy_decl = build_decl(UNKNOWN_LOCATION, VAR_DECL, get_identifier("latent_entropy"), latent_entropy_type); ++ ++ TREE_STATIC(latent_entropy_decl) = 1; ++ TREE_PUBLIC(latent_entropy_decl) = 1; ++ TREE_USED(latent_entropy_decl) = 1; ++ TREE_THIS_VOLATILE(latent_entropy_decl) = 1; ++ DECL_EXTERNAL(latent_entropy_decl) = 1; ++ DECL_ARTIFICIAL(latent_entropy_decl) = 1; ++ lang_hooks.decls.pushdecl(latent_entropy_decl); ++// DECL_ASSEMBLER_NAME(latent_entropy_decl); ++// varpool_finalize_decl(latent_entropy_decl); ++// varpool_mark_needed_node(latent_entropy_decl); ++} ++ ++#if BUILDING_GCC_VERSION >= 4009 ++static const struct pass_data latent_entropy_pass_data = { ++#else ++static struct gimple_opt_pass latent_entropy_pass = { ++ .pass = { ++#endif ++ .type = GIMPLE_PASS, ++ .name = "latent_entropy", ++#if BUILDING_GCC_VERSION >= 4008 ++ .optinfo_flags = OPTGROUP_NONE, ++#endif ++#if BUILDING_GCC_VERSION >= 4009 ++ .has_gate = true, ++ .has_execute = true, ++#else ++ .gate = gate_latent_entropy, ++ .execute = execute_latent_entropy, ++ .sub = NULL, ++ .next = NULL, ++ .static_pass_number = 0, ++#endif ++ .tv_id = TV_NONE, ++ .properties_required = PROP_gimple_leh | PROP_cfg, ++ .properties_provided = 0, ++ .properties_destroyed = 0, ++ .todo_flags_start = 0, //TODO_verify_ssa | TODO_verify_flow | TODO_verify_stmts, ++ .todo_flags_finish = TODO_verify_ssa | TODO_verify_stmts | TODO_dump_func | TODO_update_ssa ++#if BUILDING_GCC_VERSION < 4009 ++ } ++#endif ++}; ++ ++#if BUILDING_GCC_VERSION >= 4009 ++namespace { ++class latent_entropy_pass : public gimple_opt_pass { ++public: ++ latent_entropy_pass() : gimple_opt_pass(latent_entropy_pass_data, g) {} ++ bool gate() { return gate_latent_entropy(); } ++ unsigned int execute() { return execute_latent_entropy(); } ++}; ++} ++ ++static opt_pass *make_latent_entropy_pass(void) ++{ ++ return new latent_entropy_pass(); ++} ++#else ++static struct opt_pass *make_latent_entropy_pass(void) ++{ ++ return &latent_entropy_pass.pass; ++} ++#endif ++ ++int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version) ++{ ++ const char * const plugin_name = plugin_info->base_name; ++ struct register_pass_info latent_entropy_pass_info; ++ ++ latent_entropy_pass_info.pass = make_latent_entropy_pass(); ++ latent_entropy_pass_info.reference_pass_name = "optimized"; ++ latent_entropy_pass_info.ref_pass_instance_number = 1; ++ latent_entropy_pass_info.pos_op = PASS_POS_INSERT_BEFORE; ++ static const struct ggc_root_tab gt_ggc_r_gt_latent_entropy[] = { ++ { ++ .base = &latent_entropy_decl, ++ .nelt = 1, ++ .stride = sizeof(latent_entropy_decl), ++ .cb = >_ggc_mx_tree_node, ++ .pchw = >_pch_nx_tree_node ++ }, ++ LAST_GGC_ROOT_TAB ++ }; ++ ++ if (!plugin_default_version_check(version, &gcc_version)) { ++ error(G_("incompatible gcc/plugin versions")); ++ return 1; ++ } ++ ++ register_callback(plugin_name, PLUGIN_INFO, NULL, &latent_entropy_plugin_info); ++ register_callback(plugin_name, PLUGIN_START_UNIT, &latent_entropy_start_unit, NULL); ++ if (!in_lto_p) ++ register_callback(plugin_name, PLUGIN_REGISTER_GGC_ROOTS, NULL, (void *)>_ggc_r_gt_latent_entropy); ++ register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &latent_entropy_pass_info); ++ register_callback(plugin_name, PLUGIN_ATTRIBUTES, register_attributes, NULL); ++ ++ return 0; ++} +diff --git a/tools/gcc/randomize_layout_plugin.c b/tools/gcc/randomize_layout_plugin.c +new file mode 100644 +index 0000000..a5cb46b +--- /dev/null ++++ b/tools/gcc/randomize_layout_plugin.c +@@ -0,0 +1,915 @@ ++/* ++ * Copyright 2014 by Open Source Security, Inc., Brad Spengler <spender@grsecurity.net> ++ * and PaX Team <pageexec@freemail.hu> ++ * Licensed under the GPL v2 ++ * ++ * Usage: ++ * $ # for 4.5/4.6/C based 4.7 ++ * $ gcc -I`gcc -print-file-name=plugin`/include -I`gcc -print-file-name=plugin`/include/c-family -fPIC -shared -O2 -o randomize_layout_plugin.so randomize_layout_plugin.c ++ * $ # for C++ based 4.7/4.8+ ++ * $ g++ -I`g++ -print-file-name=plugin`/include -I`g++ -print-file-name=plugin`/include/c-family -fPIC -shared -O2 -o randomize_layout_plugin.so randomize_layout_plugin.c ++ * $ gcc -fplugin=./randomize_layout_plugin.so test.c -O2 ++ */ ++ ++#include "gcc-common.h" ++#include "randomize_layout_seed.h" ++ ++#if BUILDING_GCC_MAJOR < 4 || BUILDING_GCC_MINOR < 6 || (BUILDING_GCC_MINOR == 6 && BUILDING_GCC_PATCHLEVEL < 4) ++#error "The RANDSTRUCT plugin requires GCC 4.6.4 or newer." ++#endif ++ ++#define ORIG_TYPE_NAME(node) \ ++ (TYPE_NAME(TYPE_MAIN_VARIANT(node)) != NULL_TREE ? ((const unsigned char *)IDENTIFIER_POINTER(TYPE_NAME(TYPE_MAIN_VARIANT(node)))) : (const unsigned char *)"anonymous") ++ ++int plugin_is_GPL_compatible; ++ ++static int performance_mode; ++ ++static struct plugin_info randomize_layout_plugin_info = { ++ .version = "201402201816", ++ .help = "disable\t\t\tdo not activate plugin\n" ++ "performance-mode\tenable cacheline-aware layout randomization\n" ++}; ++ ++/* from old Linux dcache.h */ ++static inline unsigned long ++partial_name_hash(unsigned long c, unsigned long prevhash) ++{ ++ return (prevhash + (c << 4) + (c >> 4)) * 11; ++} ++static inline unsigned int ++name_hash(const unsigned char *name) ++{ ++ unsigned long hash = 0; ++ unsigned int len = strlen((const char *)name); ++ while (len--) ++ hash = partial_name_hash(*name++, hash); ++ return (unsigned int)hash; ++} ++ ++static tree handle_randomize_layout_attr(tree *node, tree name, tree args, int flags, bool *no_add_attrs) ++{ ++ tree type; ++ ++ *no_add_attrs = true; ++ if (TREE_CODE(*node) == FUNCTION_DECL) { ++ error("%qE attribute does not apply to functions (%qF)", name, *node); ++ return NULL_TREE; ++ } ++ ++ if (TREE_CODE(*node) == PARM_DECL) { ++ error("%qE attribute does not apply to function parameters (%qD)", name, *node); ++ return NULL_TREE; ++ } ++ ++ if (TREE_CODE(*node) == VAR_DECL) { ++ error("%qE attribute does not apply to variables (%qD)", name, *node); ++ return NULL_TREE; ++ } ++ ++ if (TYPE_P(*node)) { ++ type = *node; ++ } else { ++ gcc_assert(TREE_CODE(*node) == TYPE_DECL); ++ type = TREE_TYPE(*node); ++ } ++ ++ if (TREE_CODE(type) != RECORD_TYPE) { ++ error("%qE attribute used on %qT applies to struct types only", name, type); ++ return NULL_TREE; ++ } ++ ++ if (lookup_attribute(IDENTIFIER_POINTER(name), TYPE_ATTRIBUTES(type))) { ++ error("%qE attribute is already applied to the type %qT", name, type); ++ return NULL_TREE; ++ } ++ ++ *no_add_attrs = false; ++ ++ return NULL_TREE; ++} ++ ++/* set on complete types that we don't need to inspect further at all */ ++static tree handle_randomize_considered_attr(tree *node, tree name, tree args, int flags, bool *no_add_attrs) ++{ ++ *no_add_attrs = false; ++ return NULL_TREE; ++} ++ ++/* ++ * set on types that we've performed a shuffle on, to prevent re-shuffling ++ * this does not preclude us from inspecting its fields for potential shuffles ++ */ ++static tree handle_randomize_performed_attr(tree *node, tree name, tree args, int flags, bool *no_add_attrs) ++{ ++ *no_add_attrs = false; ++ return NULL_TREE; ++} ++ ++/* ++ * 64bit variant of Bob Jenkins' public domain PRNG ++ * 256 bits of internal state ++ */ ++ ++typedef unsigned long long u64; ++ ++typedef struct ranctx { u64 a; u64 b; u64 c; u64 d; } ranctx; ++ ++#define rot(x,k) (((x)<<(k))|((x)>>(64-(k)))) ++static u64 ranval(ranctx *x) { ++ u64 e = x->a - rot(x->b, 7); ++ x->a = x->b ^ rot(x->c, 13); ++ x->b = x->c + rot(x->d, 37); ++ x->c = x->d + e; ++ x->d = e + x->a; ++ return x->d; ++} ++ ++static void raninit(ranctx *x, u64 *seed) { ++ int i; ++ ++ x->a = seed[0]; ++ x->b = seed[1]; ++ x->c = seed[2]; ++ x->d = seed[3]; ++ ++ for (i=0; i < 30; ++i) ++ (void)ranval(x); ++} ++ ++static u64 shuffle_seed[4]; ++ ++struct partition_group { ++ tree tree_start; ++ unsigned long start; ++ unsigned long length; ++}; ++ ++static void partition_struct(tree *fields, unsigned long length, struct partition_group *size_groups, unsigned long *num_groups) ++{ ++ unsigned long i; ++ unsigned long accum_size = 0; ++ unsigned long accum_length = 0; ++ unsigned long group_idx = 0; ++ ++ gcc_assert(length < INT_MAX); ++ ++ memset(size_groups, 0, sizeof(struct partition_group) * length); ++ ++ for (i = 0; i < length; i++) { ++ if (size_groups[group_idx].tree_start == NULL_TREE) { ++ size_groups[group_idx].tree_start = fields[i]; ++ size_groups[group_idx].start = i; ++ accum_length = 0; ++ accum_size = 0; ++ } ++ accum_size += (unsigned long)int_size_in_bytes(TREE_TYPE(fields[i])); ++ accum_length++; ++ if (accum_size >= 64) { ++ size_groups[group_idx].length = accum_length; ++ accum_length = 0; ++ group_idx++; ++ } ++ } ++ ++ if (size_groups[group_idx].tree_start != NULL_TREE && ++ !size_groups[group_idx].length) { ++ size_groups[group_idx].length = accum_length; ++ group_idx++; ++ } ++ ++ *num_groups = group_idx; ++} ++ ++static void performance_shuffle(tree *newtree, unsigned long length, ranctx *prng_state) ++{ ++ unsigned long i, x; ++ struct partition_group size_group[length]; ++ unsigned long num_groups = 0; ++ unsigned long randnum; ++ ++ partition_struct(newtree, length, (struct partition_group *)&size_group, &num_groups); ++ for (i = num_groups - 1; i > 0; i--) { ++ struct partition_group tmp; ++ randnum = ranval(prng_state) % (i + 1); ++ tmp = size_group[i]; ++ size_group[i] = size_group[randnum]; ++ size_group[randnum] = tmp; ++ } ++ ++ for (x = 0; x < num_groups; x++) { ++ for (i = size_group[x].start + size_group[x].length - 1; i > size_group[x].start; i--) { ++ tree tmp; ++ if (DECL_BIT_FIELD_TYPE(newtree[i])) ++ continue; ++ randnum = ranval(prng_state) % (i + 1); ++ // we could handle this case differently if desired ++ if (DECL_BIT_FIELD_TYPE(newtree[randnum])) ++ continue; ++ tmp = newtree[i]; ++ newtree[i] = newtree[randnum]; ++ newtree[randnum] = tmp; ++ } ++ } ++} ++ ++static void full_shuffle(tree *newtree, unsigned long length, ranctx *prng_state) ++{ ++ unsigned long i, randnum; ++ ++ for (i = length - 1; i > 0; i--) { ++ tree tmp; ++ randnum = ranval(prng_state) % (i + 1); ++ tmp = newtree[i]; ++ newtree[i] = newtree[randnum]; ++ newtree[randnum] = tmp; ++ } ++} ++ ++/* modern in-place Fisher-Yates shuffle */ ++static void shuffle(const_tree type, tree *newtree, unsigned long length) ++{ ++ unsigned long i; ++ u64 seed[4]; ++ ranctx prng_state; ++ const unsigned char *structname; ++ ++ if (length == 0) ++ return; ++ ++ gcc_assert(TREE_CODE(type) == RECORD_TYPE); ++ ++ structname = ORIG_TYPE_NAME(type); ++ ++#ifdef __DEBUG_PLUGIN ++ fprintf(stderr, "Shuffling struct %s %p\n", (const char *)structname, type); ++#ifdef __DEBUG_VERBOSE ++ debug_tree((tree)type); ++#endif ++#endif ++ ++ for (i = 0; i < 4; i++) { ++ seed[i] = shuffle_seed[i]; ++ seed[i] ^= name_hash(structname); ++ } ++ ++ raninit(&prng_state, (u64 *)&seed); ++ ++ if (performance_mode) ++ performance_shuffle(newtree, length, &prng_state); ++ else ++ full_shuffle(newtree, length, &prng_state); ++} ++ ++static bool is_flexible_array(const_tree field) ++{ ++ const_tree fieldtype; ++ const_tree typesize; ++ const_tree elemtype; ++ const_tree elemsize; ++ ++ fieldtype = TREE_TYPE(field); ++ typesize = TYPE_SIZE(fieldtype); ++ ++ if (TREE_CODE(fieldtype) != ARRAY_TYPE) ++ return false; ++ ++ elemtype = TREE_TYPE(fieldtype); ++ elemsize = TYPE_SIZE(elemtype); ++ ++ /* size of type is represented in bits */ ++ ++ if (typesize == NULL_TREE && TYPE_DOMAIN(fieldtype) != NULL_TREE && ++ TYPE_MAX_VALUE(TYPE_DOMAIN(fieldtype)) == NULL_TREE) ++ return true; ++ ++ if (typesize != NULL_TREE && ++ (TREE_CONSTANT(typesize) && (!TREE_INT_CST_LOW(typesize) || ++ TREE_INT_CST_LOW(typesize) == TREE_INT_CST_LOW(elemsize)))) ++ return true; ++ ++ return false; ++} ++ ++static int relayout_struct(tree type) ++{ ++ unsigned long num_fields = (unsigned long)list_length(TYPE_FIELDS(type)); ++ unsigned long shuffle_length = num_fields; ++ tree field; ++ tree newtree[num_fields]; ++ unsigned long i; ++ tree list; ++ tree variant; ++ expanded_location xloc; ++ ++ if (TYPE_FIELDS(type) == NULL_TREE) ++ return 0; ++ ++ if (num_fields < 2) ++ return 0; ++ ++ gcc_assert(TREE_CODE(type) == RECORD_TYPE); ++ ++ gcc_assert(num_fields < INT_MAX); ++ ++ if (lookup_attribute("randomize_performed", TYPE_ATTRIBUTES(type)) || ++ lookup_attribute("no_randomize_layout", TYPE_ATTRIBUTES(TYPE_MAIN_VARIANT(type)))) ++ return 0; ++ ++ /* Workaround for 3rd-party VirtualBox source that we can't modify ourselves */ ++ if (!strcmp((const char *)ORIG_TYPE_NAME(type), "INTNETTRUNKFACTORY") || ++ !strcmp((const char *)ORIG_TYPE_NAME(type), "RAWPCIFACTORY")) ++ return 0; ++ ++ /* throw out any structs in uapi */ ++ xloc = expand_location(DECL_SOURCE_LOCATION(TYPE_FIELDS(type))); ++ ++ if (strstr(xloc.file, "/uapi/")) ++ error(G_("attempted to randomize userland API struct %s"), ORIG_TYPE_NAME(type)); ++ ++ for (field = TYPE_FIELDS(type), i = 0; field; field = TREE_CHAIN(field), i++) { ++ gcc_assert(TREE_CODE(field) == FIELD_DECL); ++ newtree[i] = field; ++ } ++ ++ /* ++ * enforce that we don't randomize the layout of the last ++ * element of a struct if it's a 0 or 1-length array ++ * or a proper flexible array ++ */ ++ if (is_flexible_array(newtree[num_fields - 1])) ++ shuffle_length--; ++ ++ shuffle(type, (tree *)newtree, shuffle_length); ++ ++ /* ++ * set up a bogus anonymous struct field designed to error out on unnamed struct initializers ++ * as gcc provides no other way to detect such code ++ */ ++ list = make_node(FIELD_DECL); ++ TREE_CHAIN(list) = newtree[0]; ++ TREE_TYPE(list) = void_type_node; ++ DECL_SIZE(list) = bitsize_zero_node; ++ DECL_NONADDRESSABLE_P(list) = 1; ++ DECL_FIELD_BIT_OFFSET(list) = bitsize_zero_node; ++ DECL_SIZE_UNIT(list) = size_zero_node; ++ DECL_FIELD_OFFSET(list) = size_zero_node; ++ // to satisfy the constify plugin ++ TREE_READONLY(list) = 1; ++ ++ for (i = 0; i < num_fields - 1; i++) ++ TREE_CHAIN(newtree[i]) = newtree[i+1]; ++ TREE_CHAIN(newtree[num_fields - 1]) = NULL_TREE; ++ ++ for (variant = TYPE_MAIN_VARIANT(type); variant; variant = TYPE_NEXT_VARIANT(variant)) { ++ TYPE_FIELDS(variant) = list; ++ TYPE_ATTRIBUTES(variant) = copy_list(TYPE_ATTRIBUTES(variant)); ++ TYPE_ATTRIBUTES(variant) = tree_cons(get_identifier("randomize_performed"), NULL_TREE, TYPE_ATTRIBUTES(variant)); ++ // force a re-layout ++ TYPE_SIZE(variant) = NULL_TREE; ++ layout_type(variant); ++ } ++ ++ return 1; ++} ++ ++/* from constify plugin */ ++static const_tree get_field_type(const_tree field) ++{ ++ return strip_array_types(TREE_TYPE(field)); ++} ++ ++/* from constify plugin */ ++static bool is_fptr(const_tree fieldtype) ++{ ++ if (TREE_CODE(fieldtype) != POINTER_TYPE) ++ return false; ++ ++ return TREE_CODE(TREE_TYPE(fieldtype)) == FUNCTION_TYPE; ++} ++ ++/* derived from constify plugin */ ++static int is_pure_ops_struct(const_tree node) ++{ ++ const_tree field; ++ ++ gcc_assert(TREE_CODE(node) == RECORD_TYPE || TREE_CODE(node) == UNION_TYPE); ++ ++ for (field = TYPE_FIELDS(node); field; field = TREE_CHAIN(field)) { ++ const_tree fieldtype = get_field_type(field); ++ enum tree_code code = TREE_CODE(fieldtype); ++ ++ if (node == fieldtype) ++ continue; ++ ++ if (!is_fptr(fieldtype)) ++ return 0; ++ ++ if (code != RECORD_TYPE && code != UNION_TYPE) ++ continue; ++ ++ if (!is_pure_ops_struct(fieldtype)) ++ return 0; ++ } ++ ++ return 1; ++} ++ ++static void randomize_type(tree type) ++{ ++ tree variant; ++ ++ gcc_assert(TREE_CODE(type) == RECORD_TYPE); ++ ++ if (lookup_attribute("randomize_considered", TYPE_ATTRIBUTES(type))) ++ return; ++ ++ if (lookup_attribute("randomize_layout", TYPE_ATTRIBUTES(TYPE_MAIN_VARIANT(type))) || is_pure_ops_struct(type)) ++ relayout_struct(type); ++ ++ for (variant = TYPE_MAIN_VARIANT(type); variant; variant = TYPE_NEXT_VARIANT(variant)) { ++ TYPE_ATTRIBUTES(type) = copy_list(TYPE_ATTRIBUTES(type)); ++ TYPE_ATTRIBUTES(type) = tree_cons(get_identifier("randomize_considered"), NULL_TREE, TYPE_ATTRIBUTES(type)); ++ } ++#ifdef __DEBUG_PLUGIN ++ fprintf(stderr, "Marking randomize_considered on struct %s\n", ORIG_TYPE_NAME(type)); ++#ifdef __DEBUG_VERBOSE ++ debug_tree(type); ++#endif ++#endif ++} ++ ++static void randomize_layout_finish_decl(void *event_data, void *data) ++{ ++ tree decl = (tree)event_data; ++ tree type; ++ ++ if (decl == NULL_TREE || decl == error_mark_node) ++ return; ++ ++ type = TREE_TYPE(decl); ++ ++ if (TREE_CODE(decl) != VAR_DECL) ++ return; ++ ++ if (TREE_CODE(type) != RECORD_TYPE && TREE_CODE(type) != UNION_TYPE) ++ return; ++ ++ if (!lookup_attribute("randomize_performed", TYPE_ATTRIBUTES(type))) ++ return; ++ ++ relayout_decl(decl); ++} ++ ++static void finish_type(void *event_data, void *data) ++{ ++ tree type = (tree)event_data; ++ ++ if (type == NULL_TREE || type == error_mark_node) ++ return; ++ ++ if (TREE_CODE(type) != RECORD_TYPE) ++ return; ++ ++ if (TYPE_FIELDS(type) == NULL_TREE) ++ return; ++ ++ if (lookup_attribute("randomize_considered", TYPE_ATTRIBUTES(type))) ++ return; ++ ++#ifdef __DEBUG_PLUGIN ++ fprintf(stderr, "Calling randomize_type on %s\n", ORIG_TYPE_NAME(type)); ++#endif ++#ifdef __DEBUG_VERBOSE ++ debug_tree(type); ++#endif ++ randomize_type(type); ++ ++ return; ++} ++ ++static struct attribute_spec randomize_layout_attr = { ++ .name = "randomize_layout", ++ // related to args ++ .min_length = 0, ++ .max_length = 0, ++ .decl_required = false, ++ // need type declaration ++ .type_required = true, ++ .function_type_required = false, ++ .handler = handle_randomize_layout_attr, ++#if BUILDING_GCC_VERSION >= 4007 ++ .affects_type_identity = true ++#endif ++}; ++ ++static struct attribute_spec no_randomize_layout_attr = { ++ .name = "no_randomize_layout", ++ // related to args ++ .min_length = 0, ++ .max_length = 0, ++ .decl_required = false, ++ // need type declaration ++ .type_required = true, ++ .function_type_required = false, ++ .handler = handle_randomize_layout_attr, ++#if BUILDING_GCC_VERSION >= 4007 ++ .affects_type_identity = true ++#endif ++}; ++ ++static struct attribute_spec randomize_considered_attr = { ++ .name = "randomize_considered", ++ // related to args ++ .min_length = 0, ++ .max_length = 0, ++ .decl_required = false, ++ // need type declaration ++ .type_required = true, ++ .function_type_required = false, ++ .handler = handle_randomize_considered_attr, ++#if BUILDING_GCC_VERSION >= 4007 ++ .affects_type_identity = false ++#endif ++}; ++ ++static struct attribute_spec randomize_performed_attr = { ++ .name = "randomize_performed", ++ // related to args ++ .min_length = 0, ++ .max_length = 0, ++ .decl_required = false, ++ // need type declaration ++ .type_required = true, ++ .function_type_required = false, ++ .handler = handle_randomize_performed_attr, ++#if BUILDING_GCC_VERSION >= 4007 ++ .affects_type_identity = false ++#endif ++}; ++ ++static void register_attributes(void *event_data, void *data) ++{ ++ register_attribute(&randomize_layout_attr); ++ register_attribute(&no_randomize_layout_attr); ++ register_attribute(&randomize_considered_attr); ++ register_attribute(&randomize_performed_attr); ++} ++ ++static void check_bad_casts_in_constructor(tree var, tree init) ++{ ++ unsigned HOST_WIDE_INT idx; ++ tree field, val; ++ tree field_type, val_type; ++ ++ FOR_EACH_CONSTRUCTOR_ELT(CONSTRUCTOR_ELTS(init), idx, field, val) { ++ if (TREE_CODE(val) == CONSTRUCTOR) { ++ check_bad_casts_in_constructor(var, val); ++ continue; ++ } ++ ++ /* pipacs' plugin creates franken-arrays that differ from those produced by ++ normal code which all have valid 'field' trees. work around this */ ++ if (field == NULL_TREE) ++ continue; ++ field_type = TREE_TYPE(field); ++ val_type = TREE_TYPE(val); ++ ++ if (TREE_CODE(field_type) != POINTER_TYPE || TREE_CODE(val_type) != POINTER_TYPE) ++ continue; ++ ++ if (field_type == val_type) ++ continue; ++ ++ field_type = TYPE_MAIN_VARIANT(strip_array_types(TYPE_MAIN_VARIANT(TREE_TYPE(field_type)))); ++ val_type = TYPE_MAIN_VARIANT(strip_array_types(TYPE_MAIN_VARIANT(TREE_TYPE(val_type)))); ++ ++ if (field_type == void_type_node) ++ continue; ++ if (field_type == val_type) ++ continue; ++ if (TREE_CODE(val_type) != RECORD_TYPE) ++ continue; ++ ++ if (!lookup_attribute("randomize_performed", TYPE_ATTRIBUTES(val_type))) ++ continue; ++ inform(DECL_SOURCE_LOCATION(var), "found mismatched struct pointer types: %qT and %qT\n", TYPE_MAIN_VARIANT(field_type), TYPE_MAIN_VARIANT(val_type)); ++ } ++} ++ ++/* derived from the constify plugin */ ++static void check_global_variables(void *event_data, void *data) ++{ ++ struct varpool_node *node; ++ tree init; ++ ++ FOR_EACH_VARIABLE(node) { ++ tree var = NODE_DECL(node); ++ init = DECL_INITIAL(var); ++ if (init == NULL_TREE) ++ continue; ++ ++ if (TREE_CODE(init) != CONSTRUCTOR) ++ continue; ++ ++ check_bad_casts_in_constructor(var, init); ++ } ++} ++ ++static bool dominated_by_is_err(const_tree rhs, basic_block bb) ++{ ++ basic_block dom; ++ gimple dom_stmt; ++ gimple call_stmt; ++ const_tree dom_lhs; ++ const_tree poss_is_err_cond; ++ const_tree poss_is_err_func; ++ const_tree is_err_arg; ++ ++ dom = get_immediate_dominator(CDI_DOMINATORS, bb); ++ if (!dom) ++ return false; ++ ++ dom_stmt = last_stmt(dom); ++ if (!dom_stmt) ++ return false; ++ ++ if (gimple_code(dom_stmt) != GIMPLE_COND) ++ return false; ++ ++ if (gimple_cond_code(dom_stmt) != NE_EXPR) ++ return false; ++ ++ if (!integer_zerop(gimple_cond_rhs(dom_stmt))) ++ return false; ++ ++ poss_is_err_cond = gimple_cond_lhs(dom_stmt); ++ ++ if (TREE_CODE(poss_is_err_cond) != SSA_NAME) ++ return false; ++ ++ call_stmt = SSA_NAME_DEF_STMT(poss_is_err_cond); ++ ++ if (gimple_code(call_stmt) != GIMPLE_CALL) ++ return false; ++ ++ dom_lhs = gimple_get_lhs(call_stmt); ++ poss_is_err_func = gimple_call_fndecl(call_stmt); ++ if (!poss_is_err_func) ++ return false; ++ if (dom_lhs != poss_is_err_cond) ++ return false; ++ if (strcmp(DECL_NAME_POINTER(poss_is_err_func), "IS_ERR")) ++ return false; ++ ++ is_err_arg = gimple_call_arg(call_stmt, 0); ++ if (!is_err_arg) ++ return false; ++ ++ if (is_err_arg != rhs) ++ return false; ++ ++ return true; ++} ++ ++static void handle_local_var_initializers(void) ++{ ++ tree var; ++ unsigned int i; ++ ++ FOR_EACH_LOCAL_DECL(cfun, i, var) { ++ tree init = DECL_INITIAL(var); ++ if (!init) ++ continue; ++ if (TREE_CODE(init) != CONSTRUCTOR) ++ continue; ++ check_bad_casts_in_constructor(var, init); ++ } ++} ++ ++/* ++ * iterate over all statements to find "bad" casts: ++ * those where the address of the start of a structure is cast ++ * to a pointer of a structure of a different type, or a ++ * structure pointer type is cast to a different structure pointer type ++ */ ++static unsigned int find_bad_casts(void) ++{ ++ basic_block bb; ++ ++ handle_local_var_initializers(); ++ ++ FOR_ALL_BB_FN(bb, cfun) { ++ gimple_stmt_iterator gsi; ++ ++ for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) { ++ gimple stmt; ++ const_tree lhs; ++ const_tree lhs_type; ++ const_tree rhs1; ++ const_tree rhs_type; ++ const_tree ptr_lhs_type; ++ const_tree ptr_rhs_type; ++ const_tree op0; ++ const_tree op0_type; ++ enum tree_code rhs_code; ++ ++ stmt = gsi_stmt(gsi); ++ ++#ifdef __DEBUG_PLUGIN ++#ifdef __DEBUG_VERBOSE ++ debug_gimple_stmt(stmt); ++ debug_tree(gimple_get_lhs(stmt)); ++#endif ++#endif ++ ++ if (gimple_code(stmt) != GIMPLE_ASSIGN) ++ continue; ++ ++#ifdef __DEBUG_PLUGIN ++#ifdef __DEBUG_VERBOSE ++ debug_tree(gimple_assign_rhs1(stmt)); ++#endif ++#endif ++ ++ rhs_code = gimple_assign_rhs_code(stmt); ++ ++ if (rhs_code != ADDR_EXPR && rhs_code != SSA_NAME) ++ continue; ++ ++ lhs = gimple_get_lhs(stmt); ++ lhs_type = TREE_TYPE(lhs); ++ rhs1 = gimple_assign_rhs1(stmt); ++ rhs_type = TREE_TYPE(rhs1); ++ ++ if (TREE_CODE(rhs_type) != POINTER_TYPE || ++ TREE_CODE(lhs_type) != POINTER_TYPE) ++ continue; ++ ++ ptr_lhs_type = TYPE_MAIN_VARIANT(strip_array_types(TYPE_MAIN_VARIANT(TREE_TYPE(lhs_type)))); ++ ptr_rhs_type = TYPE_MAIN_VARIANT(strip_array_types(TYPE_MAIN_VARIANT(TREE_TYPE(rhs_type)))); ++ ++ if (ptr_rhs_type == void_type_node) ++ continue; ++ ++ if (ptr_lhs_type == void_type_node) ++ continue; ++ ++ if (dominated_by_is_err(rhs1, bb)) ++ continue; ++ ++ if (TREE_CODE(ptr_rhs_type) != RECORD_TYPE) { ++#ifndef __DEBUG_PLUGIN ++ if (lookup_attribute("randomize_performed", TYPE_ATTRIBUTES(ptr_lhs_type))) ++#endif ++ inform(gimple_location(stmt), "found mismatched struct pointer types: %qT and %qT\n", ptr_lhs_type, ptr_rhs_type); ++ continue; ++ } ++ ++ if (rhs_code == SSA_NAME && ptr_lhs_type == ptr_rhs_type) ++ continue; ++ ++ if (rhs_code == ADDR_EXPR) { ++ op0 = TREE_OPERAND(rhs1, 0); ++ ++ if (op0 == NULL_TREE) ++ continue; ++ ++ if (TREE_CODE(op0) != VAR_DECL) ++ continue; ++ ++ op0_type = TYPE_MAIN_VARIANT(strip_array_types(TYPE_MAIN_VARIANT(TREE_TYPE(op0)))); ++ if (op0_type == ptr_lhs_type) ++ continue; ++ ++#ifndef __DEBUG_PLUGIN ++ if (lookup_attribute("randomize_performed", TYPE_ATTRIBUTES(op0_type))) ++#endif ++ inform(gimple_location(stmt), "found mismatched struct pointer types: %qT and %qT\n", ptr_lhs_type, op0_type); ++ } else { ++ const_tree ssa_name_var = SSA_NAME_VAR(rhs1); ++ /* skip bogus type casts introduced by container_of */ ++ if (ssa_name_var != NULL_TREE && DECL_NAME(ssa_name_var) && ++ !strcmp((const char *)DECL_NAME_POINTER(ssa_name_var), "__mptr")) ++ continue; ++#ifndef __DEBUG_PLUGIN ++ if (lookup_attribute("randomize_performed", TYPE_ATTRIBUTES(ptr_rhs_type))) ++#endif ++ inform(gimple_location(stmt), "found mismatched struct pointer types: %qT and %qT\n", ptr_lhs_type, ptr_rhs_type); ++ } ++ ++ } ++ } ++ return 0; ++} ++ ++#if BUILDING_GCC_VERSION >= 4009 ++static const struct pass_data randomize_layout_bad_cast_data = { ++#else ++static struct gimple_opt_pass randomize_layout_bad_cast = { ++ .pass = { ++#endif ++ .type = GIMPLE_PASS, ++ .name = "randomize_layout_bad_cast", ++#if BUILDING_GCC_VERSION >= 4008 ++ .optinfo_flags = OPTGROUP_NONE, ++#endif ++#if BUILDING_GCC_VERSION >= 4009 ++ .has_gate = false, ++ .has_execute = true, ++#else ++ .gate = NULL, ++ .execute = find_bad_casts, ++ .sub = NULL, ++ .next = NULL, ++ .static_pass_number = 0, ++#endif ++ .tv_id = TV_NONE, ++ .properties_required = PROP_cfg, ++ .properties_provided = 0, ++ .properties_destroyed = 0, ++ .todo_flags_start = 0, ++ .todo_flags_finish = TODO_dump_func ++#if BUILDING_GCC_VERSION < 4009 ++ } ++#endif ++}; ++ ++#if BUILDING_GCC_VERSION >= 4009 ++namespace { ++class randomize_layout_bad_cast : public gimple_opt_pass { ++public: ++ randomize_layout_bad_cast() : gimple_opt_pass(randomize_layout_bad_cast_data, g) {} ++ unsigned int execute() { return find_bad_casts(); } ++}; ++} ++#endif ++ ++static struct opt_pass *make_randomize_layout_bad_cast(void) ++{ ++#if BUILDING_GCC_VERSION >= 4009 ++ return new randomize_layout_bad_cast(); ++#else ++ return &randomize_layout_bad_cast.pass; ++#endif ++} ++ ++int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version) ++{ ++ int i; ++ const char * const plugin_name = plugin_info->base_name; ++ const int argc = plugin_info->argc; ++ const struct plugin_argument * const argv = plugin_info->argv; ++ bool enable = true; ++ int obtained_seed = 0; ++ struct register_pass_info randomize_layout_bad_cast_info; ++ ++ randomize_layout_bad_cast_info.pass = make_randomize_layout_bad_cast(); ++ randomize_layout_bad_cast_info.reference_pass_name = "ssa"; ++ randomize_layout_bad_cast_info.ref_pass_instance_number = 1; ++ randomize_layout_bad_cast_info.pos_op = PASS_POS_INSERT_AFTER; ++ ++ if (!plugin_default_version_check(version, &gcc_version)) { ++ error(G_("incompatible gcc/plugin versions")); ++ return 1; ++ } ++ ++ if (strcmp(lang_hooks.name, "GNU C")) { ++ inform(UNKNOWN_LOCATION, G_("%s supports C only"), plugin_name); ++ enable = false; ++ } ++ ++ for (i = 0; i < argc; ++i) { ++ if (!strcmp(argv[i].key, "disable")) { ++ enable = false; ++ continue; ++ } ++ if (!strcmp(argv[i].key, "performance-mode")) { ++ performance_mode = 1; ++ continue; ++ } ++ error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key); ++ } ++ ++ if (strlen(randstruct_seed) != 64) { ++ error(G_("invalid seed value supplied for %s plugin"), plugin_name); ++ return 1; ++ } ++ obtained_seed = sscanf(randstruct_seed, "%016llx%016llx%016llx%016llx", ++ &shuffle_seed[0], &shuffle_seed[1], &shuffle_seed[2], &shuffle_seed[3]); ++ if (obtained_seed != 4) { ++ error(G_("Invalid seed supplied for %s plugin"), plugin_name); ++ return 1; ++ } ++ ++ register_callback(plugin_name, PLUGIN_INFO, NULL, &randomize_layout_plugin_info); ++ if (enable) { ++ register_callback(plugin_name, PLUGIN_ALL_IPA_PASSES_START, check_global_variables, NULL); ++ register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &randomize_layout_bad_cast_info); ++ register_callback(plugin_name, PLUGIN_FINISH_TYPE, finish_type, NULL); ++ register_callback(plugin_name, PLUGIN_FINISH_DECL, randomize_layout_finish_decl, NULL); ++ } ++ register_callback(plugin_name, PLUGIN_ATTRIBUTES, register_attributes, NULL); ++ ++ return 0; ++} +diff --git a/tools/gcc/size_overflow_plugin/.gitignore b/tools/gcc/size_overflow_plugin/.gitignore +new file mode 100644 +index 0000000..92d3b0c +--- /dev/null ++++ b/tools/gcc/size_overflow_plugin/.gitignore +@@ -0,0 +1,2 @@ ++size_overflow_hash.h ++size_overflow_hash_aux.h +diff --git a/tools/gcc/size_overflow_plugin/Makefile b/tools/gcc/size_overflow_plugin/Makefile +new file mode 100644 +index 0000000..1ae2ed5 +--- /dev/null ++++ b/tools/gcc/size_overflow_plugin/Makefile +@@ -0,0 +1,20 @@ ++$(HOSTLIBS)-$(CONFIG_PAX_SIZE_OVERFLOW) += size_overflow_plugin.so ++always := $($(HOSTLIBS)-y) ++ ++size_overflow_plugin-objs := $(patsubst $(srctree)/$(src)/%.c,%.o,$(wildcard $(srctree)/$(src)/*.c)) ++ ++$(patsubst $(srctree)/$(src)/%.c,$(obj)/%.o,$(wildcard $(srctree)/$(src)/*.c)): $(objtree)/$(obj)/size_overflow_hash.h $(objtree)/$(obj)/size_overflow_hash_aux.h ++ ++quiet_cmd_build_size_overflow_hash = GENHASH $@ ++ cmd_build_size_overflow_hash = \ ++ $(CONFIG_SHELL) $(srctree)/$(src)/generate_size_overflow_hash.sh -s size_overflow_hash -d $< -o $@ ++$(objtree)/$(obj)/size_overflow_hash.h: $(src)/size_overflow_hash.data FORCE ++ $(call if_changed,build_size_overflow_hash) ++ ++quiet_cmd_build_size_overflow_hash_aux = GENHASH $@ ++ cmd_build_size_overflow_hash_aux = \ ++ $(CONFIG_SHELL) $(srctree)/$(src)/generate_size_overflow_hash.sh -s size_overflow_hash_aux -d $< -o $@ ++$(objtree)/$(obj)/size_overflow_hash_aux.h: $(src)/size_overflow_hash_aux.data FORCE ++ $(call if_changed,build_size_overflow_hash_aux) ++ ++targets += size_overflow_hash.h size_overflow_hash_aux.h +diff --git a/tools/gcc/size_overflow_plugin/generate_size_overflow_hash.sh b/tools/gcc/size_overflow_plugin/generate_size_overflow_hash.sh +new file mode 100644 +index 0000000..12b1e3b +--- /dev/null ++++ b/tools/gcc/size_overflow_plugin/generate_size_overflow_hash.sh +@@ -0,0 +1,102 @@ ++#!/bin/bash ++ ++# This script generates the hash table (size_overflow_hash.h) for the size_overflow gcc plugin (size_overflow_plugin.c). ++ ++header1="size_overflow_hash.h" ++database="size_overflow_hash.data" ++n=65536 ++hashtable_name="size_overflow_hash" ++ ++usage() { ++cat <<EOF ++usage: $0 options ++OPTIONS: ++ -h|--help help ++ -o header file ++ -d database file ++ -n hash array size ++ -s name of the hash table ++EOF ++ return 0 ++} ++ ++while true ++do ++ case "$1" in ++ -h|--help) usage && exit 0;; ++ -n) n=$2; shift 2;; ++ -o) header1="$2"; shift 2;; ++ -d) database="$2"; shift 2;; ++ -s) hashtable_name="$2"; shift 2;; ++ --) shift 1; break ;; ++ *) break ;; ++ esac ++done ++ ++create_defines() { ++ for i in `seq 0 31` ++ do ++ echo -e "#define PARAM"$i" (1U << "$i")" >> "$header1" ++ done ++ echo >> "$header1" ++} ++ ++create_structs() { ++ rm -f "$header1" ++ ++ create_defines ++ ++ cat "$database" | while read data ++ do ++ data_array=($data) ++ struct_hash_name="${data_array[0]}" ++ funcn="${data_array[1]}" ++ params="${data_array[2]}" ++ next="${data_array[4]}" ++ ++ echo "const struct size_overflow_hash $struct_hash_name = {" >> "$header1" ++ ++ echo -e "\t.next\t= $next,\n\t.name\t= \"$funcn\"," >> "$header1" ++ echo -en "\t.param\t= " >> "$header1" ++ line= ++ for param_num in ${params//-/ }; ++ do ++ line="${line}PARAM"$param_num"|" ++ done ++ ++ echo -e "${line%?},\n};\n" >> "$header1" ++ done ++} ++ ++create_headers() { ++ echo "const struct size_overflow_hash * const $hashtable_name[$n] = {" >> "$header1" ++} ++ ++create_array_elements() { ++ index=0 ++ grep -v "nohasharray" $database | sort -n -k 4 | while read data ++ do ++ data_array=($data) ++ i="${data_array[3]}" ++ hash="${data_array[0]}" ++ while [[ $index -lt $i ]] ++ do ++ echo -e "\t["$index"]\t= NULL," >> "$header1" ++ index=$(($index + 1)) ++ done ++ index=$(($index + 1)) ++ echo -e "\t["$i"]\t= &"$hash"," >> "$header1" ++ done ++ echo '};' >> $header1 ++} ++ ++size_overflow_plugin_dir=`dirname $header1` ++if [ "$size_overflow_plugin_dir" != '.' ]; then ++ mkdir -p "$size_overflow_plugin_dir" 2> /dev/null ++fi ++ ++create_structs ++create_headers ++create_array_elements ++ ++exit 0 +diff --git a/tools/gcc/size_overflow_plugin/insert_size_overflow_asm.c b/tools/gcc/size_overflow_plugin/insert_size_overflow_asm.c +new file mode 100644 +index 0000000..c43901f +--- /dev/null ++++ b/tools/gcc/size_overflow_plugin/insert_size_overflow_asm.c +@@ -0,0 +1,748 @@ ++/* ++ * Copyright 2011-2014 by Emese Revfy <re.emese@gmail.com> ++ * Licensed under the GPL v2, or (at your option) v3 ++ * ++ * Homepage: ++ * http://www.grsecurity.net/~ephox/overflow_plugin/ ++ * ++ * Documentation: ++ * http://forums.grsecurity.net/viewtopic.php?f=7&t=3043 ++ * ++ * This plugin recomputes expressions of function arguments marked by a size_overflow attribute ++ * with double integer precision (DImode/TImode for 32/64 bit integer types). ++ * The recomputed argument is checked against TYPE_MAX and an event is logged on overflow and the triggering process is killed. ++ * ++ * Usage: ++ * $ make ++ * $ make run ++ */ ++ ++#include "gcc-common.h" ++#include "size_overflow.h" ++ ++static void search_size_overflow_attribute(struct pointer_set_t *visited, tree lhs); ++static enum mark search_intentional(struct pointer_set_t *visited, const_tree lhs); ++ ++// data for the size_overflow asm stmt ++struct asm_data { ++ gimple def_stmt; ++ tree input; ++ tree output; ++}; ++ ++#if BUILDING_GCC_VERSION <= 4007 ++static VEC(tree, gc) *create_asm_io_list(tree string, tree io) ++#else ++static vec<tree, va_gc> *create_asm_io_list(tree string, tree io) ++#endif ++{ ++ tree list; ++#if BUILDING_GCC_VERSION <= 4007 ++ VEC(tree, gc) *vec_list = NULL; ++#else ++ vec<tree, va_gc> *vec_list = NULL; ++#endif ++ ++ list = build_tree_list(NULL_TREE, string); ++ list = chainon(NULL_TREE, build_tree_list(list, io)); ++#if BUILDING_GCC_VERSION <= 4007 ++ VEC_safe_push(tree, gc, vec_list, list); ++#else ++ vec_safe_push(vec_list, list); ++#endif ++ return vec_list; ++} ++ ++static void create_asm_stmt(const char *str, tree str_input, tree str_output, struct asm_data *asm_data) ++{ ++ gimple asm_stmt; ++ gimple_stmt_iterator gsi; ++#if BUILDING_GCC_VERSION <= 4007 ++ VEC(tree, gc) *input, *output = NULL; ++#else ++ vec<tree, va_gc> *input, *output = NULL; ++#endif ++ ++ input = create_asm_io_list(str_input, asm_data->input); ++ ++ if (asm_data->output) ++ output = create_asm_io_list(str_output, asm_data->output); ++ ++ asm_stmt = gimple_build_asm_vec(str, input, output, NULL, NULL); ++ gsi = gsi_for_stmt(asm_data->def_stmt); ++ gsi_insert_after(&gsi, asm_stmt, GSI_NEW_STMT); ++ ++ if (asm_data->output) ++ SSA_NAME_DEF_STMT(asm_data->output) = asm_stmt; ++} ++ ++static void replace_call_lhs(const struct asm_data *asm_data) ++{ ++ gimple_set_lhs(asm_data->def_stmt, asm_data->input); ++ update_stmt(asm_data->def_stmt); ++ SSA_NAME_DEF_STMT(asm_data->input) = asm_data->def_stmt; ++} ++ ++static enum mark search_intentional_phi(struct pointer_set_t *visited, const_tree result) ++{ ++ enum mark cur_fndecl_attr; ++ gimple phi = get_def_stmt(result); ++ unsigned int i, n = gimple_phi_num_args(phi); ++ ++ pointer_set_insert(visited, phi); ++ for (i = 0; i < n; i++) { ++ tree arg = gimple_phi_arg_def(phi, i); ++ ++ cur_fndecl_attr = search_intentional(visited, arg); ++ if (cur_fndecl_attr != MARK_NO) ++ return cur_fndecl_attr; ++ } ++ return MARK_NO; ++} ++ ++static enum mark search_intentional_binary(struct pointer_set_t *visited, const_tree lhs) ++{ ++ enum mark cur_fndecl_attr; ++ const_tree rhs1, rhs2; ++ gimple def_stmt = get_def_stmt(lhs); ++ ++ rhs1 = gimple_assign_rhs1(def_stmt); ++ rhs2 = gimple_assign_rhs2(def_stmt); ++ ++ cur_fndecl_attr = search_intentional(visited, rhs1); ++ if (cur_fndecl_attr != MARK_NO) ++ return cur_fndecl_attr; ++ return search_intentional(visited, rhs2); ++} ++ ++// Look up the intentional_overflow attribute on the caller and the callee functions. ++static enum mark search_intentional(struct pointer_set_t *visited, const_tree lhs) ++{ ++ const_gimple def_stmt; ++ ++ if (TREE_CODE(lhs) != SSA_NAME) ++ return get_intentional_attr_type(lhs); ++ ++ def_stmt = get_def_stmt(lhs); ++ if (!def_stmt) ++ return MARK_NO; ++ ++ if (pointer_set_contains(visited, def_stmt)) ++ return MARK_NO; ++ ++ switch (gimple_code(def_stmt)) { ++ case GIMPLE_NOP: ++ return search_intentional(visited, SSA_NAME_VAR(lhs)); ++ case GIMPLE_ASM: ++ if (is_size_overflow_intentional_asm_turn_off(def_stmt)) ++ return MARK_TURN_OFF; ++ return MARK_NO; ++ case GIMPLE_CALL: ++ return MARK_NO; ++ case GIMPLE_PHI: ++ return search_intentional_phi(visited, lhs); ++ case GIMPLE_ASSIGN: ++ switch (gimple_num_ops(def_stmt)) { ++ case 2: ++ return search_intentional(visited, gimple_assign_rhs1(def_stmt)); ++ case 3: ++ return search_intentional_binary(visited, lhs); ++ } ++ case GIMPLE_RETURN: ++ return MARK_NO; ++ default: ++ debug_gimple_stmt((gimple)def_stmt); ++ error("%s: unknown gimple code", __func__); ++ gcc_unreachable(); ++ } ++} ++ ++// Check the intentional_overflow attribute and create the asm comment string for the size_overflow asm stmt. ++static enum mark check_intentional_attribute_gimple(const_tree arg, const_gimple stmt, unsigned int argnum) ++{ ++ const_tree fndecl; ++ struct pointer_set_t *visited; ++ enum mark cur_fndecl_attr, decl_attr = MARK_NO; ++ ++ fndecl = get_interesting_orig_fndecl(stmt, argnum); ++ if (is_end_intentional_intentional_attr(fndecl, argnum)) ++ decl_attr = MARK_NOT_INTENTIONAL; ++ else if (is_yes_intentional_attr(fndecl, argnum)) ++ decl_attr = MARK_YES; ++ else if (is_turn_off_intentional_attr(fndecl) || is_turn_off_intentional_attr(DECL_ORIGIN(current_function_decl))) { ++ return MARK_TURN_OFF; ++ } ++ ++ visited = pointer_set_create(); ++ cur_fndecl_attr = search_intentional(visited, arg); ++ pointer_set_destroy(visited); ++ ++ switch (cur_fndecl_attr) { ++ case MARK_NO: ++ case MARK_TURN_OFF: ++ return cur_fndecl_attr; ++ default: ++ print_missing_intentional(decl_attr, cur_fndecl_attr, fndecl, argnum); ++ return MARK_YES; ++ } ++} ++ ++static void check_missing_size_overflow_attribute(tree var) ++{ ++ tree orig_fndecl; ++ unsigned int num; ++ ++ if (is_a_return_check(var)) ++ orig_fndecl = DECL_ORIGIN(var); ++ else ++ orig_fndecl = DECL_ORIGIN(current_function_decl); ++ ++ num = get_function_num(var, orig_fndecl); ++ if (num == CANNOT_FIND_ARG) ++ return; ++ ++ is_missing_function(orig_fndecl, num); ++} ++ ++static void search_size_overflow_attribute_phi(struct pointer_set_t *visited, const_tree result) ++{ ++ gimple phi = get_def_stmt(result); ++ unsigned int i, n = gimple_phi_num_args(phi); ++ ++ pointer_set_insert(visited, phi); ++ for (i = 0; i < n; i++) { ++ tree arg = gimple_phi_arg_def(phi, i); ++ ++ search_size_overflow_attribute(visited, arg); ++ } ++} ++ ++static void search_size_overflow_attribute_binary(struct pointer_set_t *visited, const_tree lhs) ++{ ++ const_gimple def_stmt = get_def_stmt(lhs); ++ tree rhs1, rhs2; ++ ++ rhs1 = gimple_assign_rhs1(def_stmt); ++ rhs2 = gimple_assign_rhs2(def_stmt); ++ ++ search_size_overflow_attribute(visited, rhs1); ++ search_size_overflow_attribute(visited, rhs2); ++} ++ ++static void search_size_overflow_attribute(struct pointer_set_t *visited, tree lhs) ++{ ++ const_gimple def_stmt; ++ ++ if (TREE_CODE(lhs) == PARM_DECL) { ++ check_missing_size_overflow_attribute(lhs); ++ return; ++ } ++ ++ def_stmt = get_def_stmt(lhs); ++ if (!def_stmt) ++ return; ++ ++ if (pointer_set_insert(visited, def_stmt)) ++ return; ++ ++ switch (gimple_code(def_stmt)) { ++ case GIMPLE_NOP: ++ return search_size_overflow_attribute(visited, SSA_NAME_VAR(lhs)); ++ case GIMPLE_ASM: ++ return; ++ case GIMPLE_CALL: { ++ tree fndecl = gimple_call_fndecl(def_stmt); ++ ++ if (fndecl == NULL_TREE) ++ return; ++ check_missing_size_overflow_attribute(fndecl); ++ return; ++ } ++ case GIMPLE_PHI: ++ return search_size_overflow_attribute_phi(visited, lhs); ++ case GIMPLE_ASSIGN: ++ switch (gimple_num_ops(def_stmt)) { ++ case 2: ++ return search_size_overflow_attribute(visited, gimple_assign_rhs1(def_stmt)); ++ case 3: ++ return search_size_overflow_attribute_binary(visited, lhs); ++ } ++ default: ++ debug_gimple_stmt((gimple)def_stmt); ++ error("%s: unknown gimple code", __func__); ++ gcc_unreachable(); ++ } ++} ++ ++// Search missing entries in the hash table (invoked from the gimple pass) ++static void search_missing_size_overflow_attribute_gimple(const_gimple stmt, unsigned int num) ++{ ++ tree fndecl = NULL_TREE; ++ tree lhs; ++ struct pointer_set_t *visited; ++ ++ if (is_turn_off_intentional_attr(DECL_ORIGIN(current_function_decl))) ++ return; ++ ++ if (num == 0) { ++ gcc_assert(gimple_code(stmt) == GIMPLE_RETURN); ++ lhs = gimple_return_retval(stmt); ++ } else { ++ gcc_assert(is_gimple_call(stmt)); ++ lhs = gimple_call_arg(stmt, num - 1); ++ fndecl = gimple_call_fndecl(stmt); ++ } ++ ++ if (fndecl != NULL_TREE && is_turn_off_intentional_attr(DECL_ORIGIN(fndecl))) ++ return; ++ ++ visited = pointer_set_create(); ++ search_size_overflow_attribute(visited, lhs); ++ pointer_set_destroy(visited); ++} ++ ++static void create_output_from_phi(gimple stmt, unsigned int argnum, struct asm_data *asm_data) ++{ ++ gimple_stmt_iterator gsi; ++ gimple assign; ++ ++ assign = gimple_build_assign(asm_data->input, asm_data->output); ++ gsi = gsi_for_stmt(stmt); ++ gsi_insert_before(&gsi, assign, GSI_NEW_STMT); ++ asm_data->def_stmt = assign; ++ ++ asm_data->output = create_new_var(TREE_TYPE(asm_data->output)); ++ asm_data->output = make_ssa_name(asm_data->output, stmt); ++ if (gimple_code(stmt) == GIMPLE_RETURN) ++ gimple_return_set_retval(stmt, asm_data->output); ++ else ++ gimple_call_set_arg(stmt, argnum - 1, asm_data->output); ++ update_stmt(stmt); ++} ++ ++static char *create_asm_comment(unsigned int argnum, const_gimple stmt , const char *mark_str) ++{ ++ const char *fn_name; ++ char *asm_comment; ++ unsigned int len; ++ ++ if (argnum == 0) ++ fn_name = DECL_NAME_POINTER(current_function_decl); ++ else ++ fn_name = DECL_NAME_POINTER(gimple_call_fndecl(stmt)); ++ ++ len = asprintf(&asm_comment, "%s %s %u", mark_str, fn_name, argnum); ++ gcc_assert(len > 0); ++ ++ return asm_comment; ++} ++ ++static const char *convert_mark_to_str(enum mark mark) ++{ ++ switch (mark) { ++ case MARK_NO: ++ return OK_ASM_STR; ++ case MARK_YES: ++ case MARK_NOT_INTENTIONAL: ++ return YES_ASM_STR; ++ case MARK_TURN_OFF: ++ return TURN_OFF_ASM_STR; ++ } ++ ++ gcc_unreachable(); ++} ++ ++/* Create the input of the size_overflow asm stmt. ++ * When the arg of the callee function is a parm_decl it creates this kind of size_overflow asm stmt: ++ * __asm__("# size_overflow MARK_YES" : : "rm" size_1(D)); ++ * The input field in asm_data will be empty if there is no need for further size_overflow asm stmt insertion. ++ * otherwise create the input (for a phi stmt the output too) of the asm stmt. ++ */ ++static void create_asm_input(gimple stmt, unsigned int argnum, struct asm_data *asm_data) ++{ ++ if (!asm_data->def_stmt) { ++ asm_data->input = NULL_TREE; ++ return; ++ } ++ ++ asm_data->input = create_new_var(TREE_TYPE(asm_data->output)); ++ asm_data->input = make_ssa_name(asm_data->input, asm_data->def_stmt); ++ ++ switch (gimple_code(asm_data->def_stmt)) { ++ case GIMPLE_ASSIGN: ++ case GIMPLE_CALL: ++ replace_call_lhs(asm_data); ++ break; ++ case GIMPLE_PHI: ++ create_output_from_phi(stmt, argnum, asm_data); ++ break; ++ case GIMPLE_NOP: { ++ enum mark mark; ++ const char *mark_str; ++ char *asm_comment; ++ ++ mark = check_intentional_attribute_gimple(asm_data->output, stmt, argnum); ++ ++ asm_data->input = asm_data->output; ++ asm_data->output = NULL; ++ asm_data->def_stmt = stmt; ++ ++ mark_str = convert_mark_to_str(mark); ++ asm_comment = create_asm_comment(argnum, stmt, mark_str); ++ ++ create_asm_stmt(asm_comment, build_string(3, "rm"), NULL, asm_data); ++ free(asm_comment); ++ asm_data->input = NULL_TREE; ++ break; ++ } ++ case GIMPLE_ASM: ++ if (is_size_overflow_asm(asm_data->def_stmt)) { ++ asm_data->input = NULL_TREE; ++ break; ++ } ++ default: ++ debug_gimple_stmt(asm_data->def_stmt); ++ gcc_unreachable(); ++ } ++} ++ ++/* This is the gimple part of searching for a missing size_overflow attribute. If the intentional_overflow attribute type ++ * is of the right kind create the appropriate size_overflow asm stmts: ++ * __asm__("# size_overflow" : =rm" D.3344_8 : "0" cicus.4_16); ++ * __asm__("# size_overflow MARK_YES" : : "rm" size_1(D)); ++ */ ++static void create_size_overflow_asm(gimple stmt, tree output_node, unsigned int argnum) ++{ ++ struct asm_data asm_data; ++ const char *mark_str; ++ char *asm_comment; ++ enum mark mark; ++ ++ if (is_gimple_constant(output_node)) ++ return; ++ ++ asm_data.output = output_node; ++ mark = check_intentional_attribute_gimple(asm_data.output, stmt, argnum); ++ if (mark != MARK_TURN_OFF) ++ search_missing_size_overflow_attribute_gimple(stmt, argnum); ++ ++ asm_data.def_stmt = get_def_stmt(asm_data.output); ++ if (is_size_overflow_intentional_asm_turn_off(asm_data.def_stmt)) ++ return; ++ ++ create_asm_input(stmt, argnum, &asm_data); ++ if (asm_data.input == NULL_TREE) ++ return; ++ ++ mark_str = convert_mark_to_str(mark); ++ asm_comment = create_asm_comment(argnum, stmt, mark_str); ++ create_asm_stmt(asm_comment, build_string(2, "0"), build_string(4, "=rm"), &asm_data); ++ free(asm_comment); ++} ++ ++// Insert an asm stmt with "MARK_TURN_OFF", "MARK_YES" or "MARK_NOT_INTENTIONAL". ++static bool create_mark_asm(gimple stmt, enum mark mark) ++{ ++ struct asm_data asm_data; ++ const char *asm_str; ++ ++ switch (mark) { ++ case MARK_TURN_OFF: ++ asm_str = TURN_OFF_ASM_STR; ++ break; ++ case MARK_NOT_INTENTIONAL: ++ case MARK_YES: ++ asm_str = YES_ASM_STR; ++ break; ++ default: ++ gcc_unreachable(); ++ } ++ ++ asm_data.def_stmt = stmt; ++ asm_data.output = gimple_call_lhs(stmt); ++ ++ if (asm_data.output == NULL_TREE) { ++ asm_data.input = gimple_call_arg(stmt, 0); ++ if (is_gimple_constant(asm_data.input)) ++ return false; ++ asm_data.output = NULL; ++ create_asm_stmt(asm_str, build_string(3, "rm"), NULL, &asm_data); ++ return true; ++ } ++ ++ create_asm_input(stmt, 0, &asm_data); ++ gcc_assert(asm_data.input != NULL_TREE); ++ ++ create_asm_stmt(asm_str, build_string(2, "0"), build_string(4, "=rm"), &asm_data); ++ return true; ++} ++ ++static void walk_use_def_ptr(struct pointer_set_t *visited, const_tree lhs) ++{ ++ gimple def_stmt; ++ ++ def_stmt = get_def_stmt(lhs); ++ if (!def_stmt) ++ return; ++ ++ if (pointer_set_insert(visited, def_stmt)) ++ return; ++ ++ switch (gimple_code(def_stmt)) { ++ case GIMPLE_NOP: ++ case GIMPLE_ASM: ++ case GIMPLE_CALL: ++ break; ++ case GIMPLE_PHI: { ++ unsigned int i, n = gimple_phi_num_args(def_stmt); ++ ++ pointer_set_insert(visited, def_stmt); ++ ++ for (i = 0; i < n; i++) { ++ tree arg = gimple_phi_arg_def(def_stmt, i); ++ ++ walk_use_def_ptr(visited, arg); ++ } ++ } ++ case GIMPLE_ASSIGN: ++ switch (gimple_num_ops(def_stmt)) { ++ case 2: ++ walk_use_def_ptr(visited, gimple_assign_rhs1(def_stmt)); ++ return; ++ case 3: ++ walk_use_def_ptr(visited, gimple_assign_rhs1(def_stmt)); ++ walk_use_def_ptr(visited, gimple_assign_rhs2(def_stmt)); ++ return; ++ default: ++ return; ++ } ++ default: ++ debug_gimple_stmt((gimple)def_stmt); ++ error("%s: unknown gimple code", __func__); ++ gcc_unreachable(); ++ } ++} ++ ++// Look for a ptr - ptr expression (e.g., cpuset_common_file_read() s - page) ++static void insert_mark_not_intentional_asm_at_ptr(const_tree arg) ++{ ++ struct pointer_set_t *visited; ++ ++ visited = pointer_set_create(); ++ walk_use_def_ptr(visited, arg); ++ pointer_set_destroy(visited); ++} ++ ++// Determine the return value and insert the asm stmt to mark the return stmt. ++static void insert_asm_ret(gimple stmt) ++{ ++ tree ret; ++ ++ ret = gimple_return_retval(stmt); ++ create_size_overflow_asm(stmt, ret, 0); ++} ++ ++// Determine the correct arg index and arg and insert the asm stmt to mark the stmt. ++static void insert_asm_arg(gimple stmt, unsigned int orig_argnum) ++{ ++ tree arg; ++ unsigned int argnum; ++ ++ argnum = get_correct_arg_count(orig_argnum, gimple_call_fndecl(stmt)); ++ gcc_assert(argnum != 0); ++ if (argnum == CANNOT_FIND_ARG) ++ return; ++ ++ arg = gimple_call_arg(stmt, argnum - 1); ++ gcc_assert(arg != NULL_TREE); ++ ++ // skip all ptr - ptr expressions ++ insert_mark_not_intentional_asm_at_ptr(arg); ++ ++ create_size_overflow_asm(stmt, arg, argnum); ++} ++ ++// If a function arg or the return value is marked by the size_overflow attribute then set its index in the array. ++static void set_argnum_attribute(const_tree attr, bool *argnums) ++{ ++ unsigned int argnum; ++ tree attr_value; ++ ++ for (attr_value = TREE_VALUE(attr); attr_value; attr_value = TREE_CHAIN(attr_value)) { ++ argnum = TREE_INT_CST_LOW(TREE_VALUE(attr_value)); ++ argnums[argnum] = true; ++ } ++} ++ ++// If a function arg or the return value is in the hash table then set its index in the array. ++static void set_argnum_hash(tree fndecl, bool *argnums) ++{ ++ unsigned int num; ++ const struct size_overflow_hash *hash; ++ ++ hash = get_function_hash(DECL_ORIGIN(fndecl)); ++ if (!hash) ++ return; ++ ++ for (num = 0; num <= MAX_PARAM; num++) { ++ if (!(hash->param & (1U << num))) ++ continue; ++ ++ argnums[num] = true; ++ } ++} ++ ++static bool is_all_the_argnums_empty(bool *argnums) ++{ ++ unsigned int i; ++ ++ for (i = 0; i <= MAX_PARAM; i++) ++ if (argnums[i]) ++ return false; ++ return true; ++} ++ ++// Check whether the arguments or the return value of the function are in the hash table or are marked by the size_overflow attribute. ++static void search_interesting_args(tree fndecl, bool *argnums) ++{ ++ const_tree attr; ++ ++ set_argnum_hash(fndecl, argnums); ++ if (!is_all_the_argnums_empty(argnums)) ++ return; ++ ++ attr = lookup_attribute("size_overflow", DECL_ATTRIBUTES(fndecl)); ++ if (attr && TREE_VALUE(attr)) ++ set_argnum_attribute(attr, argnums); ++} ++ ++/* ++ * Look up the intentional_overflow attribute that turns off ipa based duplication ++ * on the callee function. ++ */ ++static bool is_mark_turn_off_attribute(gimple stmt) ++{ ++ enum mark mark; ++ const_tree fndecl = gimple_call_fndecl(stmt); ++ ++ mark = get_intentional_attr_type(DECL_ORIGIN(fndecl)); ++ if (mark == MARK_TURN_OFF) ++ return true; ++ return false; ++} ++ ++// If the argument(s) of the callee function is/are in the hash table or are marked by an attribute then mark the call stmt with an asm stmt ++static void handle_interesting_function(gimple stmt) ++{ ++ unsigned int argnum; ++ tree fndecl; ++ bool orig_argnums[MAX_PARAM + 1] = {false}; ++ ++ if (gimple_call_num_args(stmt) == 0) ++ return; ++ fndecl = gimple_call_fndecl(stmt); ++ if (fndecl == NULL_TREE) ++ return; ++ fndecl = DECL_ORIGIN(fndecl); ++ ++ if (is_mark_turn_off_attribute(stmt)) { ++ create_mark_asm(stmt, MARK_TURN_OFF); ++ return; ++ } ++ ++ search_interesting_args(fndecl, orig_argnums); ++ ++ for (argnum = 1; argnum < MAX_PARAM; argnum++) ++ if (orig_argnums[argnum]) ++ insert_asm_arg(stmt, argnum); ++} ++ ++// If the return value of the caller function is in hash table (its index is 0) then mark the return stmt with an asm stmt ++static void handle_interesting_ret(gimple stmt) ++{ ++ bool orig_argnums[MAX_PARAM + 1] = {false}; ++ ++ search_interesting_args(current_function_decl, orig_argnums); ++ ++ if (orig_argnums[0]) ++ insert_asm_ret(stmt); ++} ++ ++// Iterate over all the stmts and search for call and return stmts and mark them if they're in the hash table ++static unsigned int search_interesting_functions(void) ++{ ++ basic_block bb; ++ ++ FOR_ALL_BB_FN(bb, cfun) { ++ gimple_stmt_iterator gsi; ++ ++ for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) { ++ gimple stmt = gsi_stmt(gsi); ++ ++ if (is_size_overflow_asm(stmt)) ++ continue; ++ ++ if (is_gimple_call(stmt)) ++ handle_interesting_function(stmt); ++ else if (gimple_code(stmt) == GIMPLE_RETURN) ++ handle_interesting_ret(stmt); ++ } ++ } ++ return 0; ++} ++ ++/* ++ * A lot of functions get inlined before the ipa passes so after the build_ssa gimple pass ++ * this pass inserts asm stmts to mark the interesting args ++ * that the ipa pass will detect and insert the size overflow checks for. ++ */ ++#if BUILDING_GCC_VERSION >= 4009 ++static const struct pass_data insert_size_overflow_asm_pass_data = { ++#else ++static struct gimple_opt_pass insert_size_overflow_asm_pass = { ++ .pass = { ++#endif ++ .type = GIMPLE_PASS, ++ .name = "insert_size_overflow_asm", ++#if BUILDING_GCC_VERSION >= 4008 ++ .optinfo_flags = OPTGROUP_NONE, ++#endif ++#if BUILDING_GCC_VERSION >= 4009 ++ .has_gate = false, ++ .has_execute = true, ++#else ++ .gate = NULL, ++ .execute = search_interesting_functions, ++ .sub = NULL, ++ .next = NULL, ++ .static_pass_number = 0, ++#endif ++ .tv_id = TV_NONE, ++ .properties_required = PROP_cfg, ++ .properties_provided = 0, ++ .properties_destroyed = 0, ++ .todo_flags_start = 0, ++ .todo_flags_finish = TODO_dump_func | TODO_verify_ssa | TODO_verify_stmts | TODO_remove_unused_locals | TODO_update_ssa_no_phi | TODO_cleanup_cfg | TODO_ggc_collect | TODO_verify_flow ++#if BUILDING_GCC_VERSION < 4009 ++ } ++#endif ++}; ++ ++#if BUILDING_GCC_VERSION >= 4009 ++namespace { ++class insert_size_overflow_asm_pass : public gimple_opt_pass { ++public: ++ insert_size_overflow_asm_pass() : gimple_opt_pass(insert_size_overflow_asm_pass_data, g) {} ++ unsigned int execute() { return search_interesting_functions(); } ++}; ++} ++#endif ++ ++struct opt_pass *make_insert_size_overflow_asm_pass(void) ++{ ++#if BUILDING_GCC_VERSION >= 4009 ++ return new insert_size_overflow_asm_pass(); ++#else ++ return &insert_size_overflow_asm_pass.pass; ++#endif ++} +diff --git a/tools/gcc/size_overflow_plugin/insert_size_overflow_check_core.c b/tools/gcc/size_overflow_plugin/insert_size_overflow_check_core.c +new file mode 100644 +index 0000000..73f0a12 +--- /dev/null ++++ b/tools/gcc/size_overflow_plugin/insert_size_overflow_check_core.c +@@ -0,0 +1,943 @@ ++/* ++ * Copyright 2011-2014 by Emese Revfy <re.emese@gmail.com> ++ * Licensed under the GPL v2, or (at your option) v3 ++ * ++ * Homepage: ++ * http://www.grsecurity.net/~ephox/overflow_plugin/ ++ * ++ * Documentation: ++ * http://forums.grsecurity.net/viewtopic.php?f=7&t=3043 ++ * ++ * This plugin recomputes expressions of function arguments marked by a size_overflow attribute ++ * with double integer precision (DImode/TImode for 32/64 bit integer types). ++ * The recomputed argument is checked against TYPE_MAX and an event is logged on overflow and the triggering process is killed. ++ * ++ * Usage: ++ * $ make ++ * $ make run ++ */ ++ ++#include "gcc-common.h" ++#include "size_overflow.h" ++ ++#define MIN_CHECK true ++#define MAX_CHECK false ++ ++static tree get_size_overflow_type(struct visited *visited, const_gimple stmt, const_tree node) ++{ ++ const_tree type; ++ tree new_type; ++ ++ gcc_assert(node != NULL_TREE); ++ ++ type = TREE_TYPE(node); ++ ++ if (pointer_set_contains(visited->my_stmts, stmt)) ++ return TREE_TYPE(node); ++ ++ switch (TYPE_MODE(type)) { ++ case QImode: ++ new_type = size_overflow_type_HI; ++ break; ++ case HImode: ++ new_type = size_overflow_type_SI; ++ break; ++ case SImode: ++ new_type = size_overflow_type_DI; ++ break; ++ case DImode: ++ if (LONG_TYPE_SIZE == GET_MODE_BITSIZE(SImode)) ++ new_type = TYPE_UNSIGNED(type) ? unsigned_intDI_type_node : intDI_type_node; ++ else ++ new_type = size_overflow_type_TI; ++ break; ++ case TImode: ++ gcc_assert(!TYPE_UNSIGNED(type)); ++ new_type = size_overflow_type_TI; ++ break; ++ default: ++ debug_tree((tree)node); ++ error("%s: unsupported gcc configuration (%qE).", __func__, current_function_decl); ++ gcc_unreachable(); ++ } ++ ++ if (TYPE_QUALS(type) != 0) ++ return build_qualified_type(new_type, TYPE_QUALS(type)); ++ return new_type; ++} ++ ++static tree get_lhs(const_gimple stmt) ++{ ++ switch (gimple_code(stmt)) { ++ case GIMPLE_ASSIGN: ++ case GIMPLE_CALL: ++ return gimple_get_lhs(stmt); ++ case GIMPLE_PHI: ++ return gimple_phi_result(stmt); ++ default: ++ return NULL_TREE; ++ } ++} ++ ++static tree cast_to_new_size_overflow_type(struct visited *visited, gimple stmt, tree rhs, tree size_overflow_type, bool before) ++{ ++ gimple_stmt_iterator gsi; ++ tree lhs; ++ gimple new_stmt; ++ ++ if (rhs == NULL_TREE) ++ return NULL_TREE; ++ ++ gsi = gsi_for_stmt(stmt); ++ new_stmt = build_cast_stmt(visited, size_overflow_type, rhs, CREATE_NEW_VAR, &gsi, before, false); ++ pointer_set_insert(visited->my_stmts, new_stmt); ++ ++ lhs = get_lhs(new_stmt); ++ gcc_assert(lhs != NULL_TREE); ++ return lhs; ++} ++ ++tree create_assign(struct visited *visited, gimple oldstmt, tree rhs1, bool before) ++{ ++ tree lhs, dst_type; ++ gimple_stmt_iterator gsi; ++ ++ if (rhs1 == NULL_TREE) { ++ debug_gimple_stmt(oldstmt); ++ error("%s: rhs1 is NULL_TREE", __func__); ++ gcc_unreachable(); ++ } ++ ++ switch (gimple_code(oldstmt)) { ++ case GIMPLE_ASM: ++ lhs = rhs1; ++ break; ++ case GIMPLE_CALL: ++ case GIMPLE_ASSIGN: ++ lhs = gimple_get_lhs(oldstmt); ++ break; ++ default: ++ debug_gimple_stmt(oldstmt); ++ gcc_unreachable(); ++ } ++ ++ gsi = gsi_for_stmt(oldstmt); ++ pointer_set_insert(visited->stmts, oldstmt); ++ if (lookup_stmt_eh_lp(oldstmt) != 0) { ++ basic_block next_bb, cur_bb; ++ const_edge e; ++ ++ gcc_assert(before == false); ++ gcc_assert(stmt_can_throw_internal(oldstmt)); ++ gcc_assert(gimple_code(oldstmt) == GIMPLE_CALL); ++ gcc_assert(!gsi_end_p(gsi)); ++ ++ cur_bb = gimple_bb(oldstmt); ++ next_bb = cur_bb->next_bb; ++ e = find_edge(cur_bb, next_bb); ++ gcc_assert(e != NULL); ++ gcc_assert(e->flags & EDGE_FALLTHRU); ++ ++ gsi = gsi_after_labels(next_bb); ++ gcc_assert(!gsi_end_p(gsi)); ++ ++ before = true; ++ oldstmt = gsi_stmt(gsi); ++ } ++ ++ dst_type = get_size_overflow_type(visited, oldstmt, lhs); ++ ++ if (is_gimple_constant(rhs1)) ++ return cast_a_tree(dst_type, rhs1); ++ return cast_to_new_size_overflow_type(visited, oldstmt, rhs1, dst_type, before); ++} ++ ++tree dup_assign(struct visited *visited, gimple oldstmt, const_tree node, tree rhs1, tree rhs2, tree __unused rhs3) ++{ ++ gimple stmt; ++ gimple_stmt_iterator gsi; ++ tree size_overflow_type, new_var, lhs = gimple_assign_lhs(oldstmt); ++ ++ if (pointer_set_contains(visited->my_stmts, oldstmt)) ++ return lhs; ++ ++ if (gimple_num_ops(oldstmt) != 4 && rhs1 == NULL_TREE) { ++ rhs1 = gimple_assign_rhs1(oldstmt); ++ rhs1 = create_assign(visited, oldstmt, rhs1, BEFORE_STMT); ++ } ++ if (gimple_num_ops(oldstmt) == 3 && rhs2 == NULL_TREE) { ++ rhs2 = gimple_assign_rhs2(oldstmt); ++ rhs2 = create_assign(visited, oldstmt, rhs2, BEFORE_STMT); ++ } ++ ++ stmt = gimple_copy(oldstmt); ++ gimple_set_location(stmt, gimple_location(oldstmt)); ++ pointer_set_insert(visited->my_stmts, stmt); ++ ++ if (gimple_assign_rhs_code(oldstmt) == WIDEN_MULT_EXPR) ++ gimple_assign_set_rhs_code(stmt, MULT_EXPR); ++ ++ size_overflow_type = get_size_overflow_type(visited, oldstmt, node); ++ ++ new_var = create_new_var(size_overflow_type); ++ new_var = make_ssa_name(new_var, stmt); ++ gimple_assign_set_lhs(stmt, new_var); ++ ++ if (rhs1 != NULL_TREE) ++ gimple_assign_set_rhs1(stmt, rhs1); ++ ++ if (rhs2 != NULL_TREE) ++ gimple_assign_set_rhs2(stmt, rhs2); ++#if BUILDING_GCC_VERSION >= 4006 ++ if (rhs3 != NULL_TREE) ++ gimple_assign_set_rhs3(stmt, rhs3); ++#endif ++ gimple_set_vuse(stmt, gimple_vuse(oldstmt)); ++ gimple_set_vdef(stmt, gimple_vdef(oldstmt)); ++ ++ gsi = gsi_for_stmt(oldstmt); ++ gsi_insert_after(&gsi, stmt, GSI_SAME_STMT); ++ update_stmt(stmt); ++ pointer_set_insert(visited->stmts, oldstmt); ++ return gimple_assign_lhs(stmt); ++} ++ ++static tree cast_parm_decl(struct visited *visited, tree phi_ssa_name, tree arg, tree size_overflow_type, basic_block bb) ++{ ++ gimple assign; ++ gimple_stmt_iterator gsi; ++ basic_block first_bb; ++ ++ gcc_assert(SSA_NAME_IS_DEFAULT_DEF(arg)); ++ ++ if (bb->index == 0) { ++ first_bb = split_block_after_labels(ENTRY_BLOCK_PTR_FOR_FN(cfun))->dest; ++ gcc_assert(dom_info_available_p(CDI_DOMINATORS)); ++ set_immediate_dominator(CDI_DOMINATORS, first_bb, ENTRY_BLOCK_PTR_FOR_FN(cfun)); ++ bb = first_bb; ++ } ++ ++ gsi = gsi_after_labels(bb); ++ assign = build_cast_stmt(visited, size_overflow_type, arg, phi_ssa_name, &gsi, BEFORE_STMT, false); ++ pointer_set_insert(visited->my_stmts, assign); ++ ++ return gimple_assign_lhs(assign); ++} ++ ++static tree use_phi_ssa_name(struct visited *visited, tree ssa_name_var, tree new_arg) ++{ ++ gimple_stmt_iterator gsi; ++ gimple assign, def_stmt = get_def_stmt(new_arg); ++ ++ if (gimple_code(def_stmt) == GIMPLE_PHI) { ++ gsi = gsi_after_labels(gimple_bb(def_stmt)); ++ assign = build_cast_stmt(visited, TREE_TYPE(new_arg), new_arg, ssa_name_var, &gsi, BEFORE_STMT, true); ++ } else { ++ gsi = gsi_for_stmt(def_stmt); ++ assign = build_cast_stmt(visited, TREE_TYPE(new_arg), new_arg, ssa_name_var, &gsi, AFTER_STMT, true); ++ } ++ ++ pointer_set_insert(visited->my_stmts, assign); ++ return gimple_assign_lhs(assign); ++} ++ ++static tree cast_visited_phi_arg(struct visited *visited, tree ssa_name_var, tree arg, tree size_overflow_type) ++{ ++ basic_block bb; ++ gimple_stmt_iterator gsi; ++ const_gimple def_stmt; ++ gimple assign; ++ ++ def_stmt = get_def_stmt(arg); ++ bb = gimple_bb(def_stmt); ++ gcc_assert(bb->index != 0); ++ gsi = gsi_after_labels(bb); ++ ++ assign = build_cast_stmt(visited, size_overflow_type, arg, ssa_name_var, &gsi, BEFORE_STMT, false); ++ pointer_set_insert(visited->my_stmts, assign); ++ return gimple_assign_lhs(assign); ++} ++ ++static tree create_new_phi_arg(struct visited *visited, tree ssa_name_var, tree new_arg, gimple oldstmt, unsigned int i) ++{ ++ tree size_overflow_type; ++ tree arg; ++ const_gimple def_stmt; ++ ++ if (new_arg != NULL_TREE && is_gimple_constant(new_arg)) ++ return new_arg; ++ ++ arg = gimple_phi_arg_def(oldstmt, i); ++ def_stmt = get_def_stmt(arg); ++ gcc_assert(def_stmt != NULL); ++ size_overflow_type = get_size_overflow_type(visited, oldstmt, arg); ++ ++ switch (gimple_code(def_stmt)) { ++ case GIMPLE_PHI: ++ return cast_visited_phi_arg(visited, ssa_name_var, arg, size_overflow_type); ++ case GIMPLE_NOP: { ++ basic_block bb; ++ ++ bb = gimple_phi_arg_edge(oldstmt, i)->src; ++ return cast_parm_decl(visited, ssa_name_var, arg, size_overflow_type, bb); ++ } ++ case GIMPLE_ASM: { ++ gimple_stmt_iterator gsi; ++ gimple assign, stmt = get_def_stmt(arg); ++ ++ gsi = gsi_for_stmt(stmt); ++ assign = build_cast_stmt(visited, size_overflow_type, arg, ssa_name_var, &gsi, AFTER_STMT, false); ++ pointer_set_insert(visited->my_stmts, assign); ++ return gimple_assign_lhs(assign); ++ } ++ default: ++ gcc_assert(new_arg != NULL_TREE); ++ gcc_assert(types_compatible_p(TREE_TYPE(new_arg), size_overflow_type)); ++ return use_phi_ssa_name(visited, ssa_name_var, new_arg); ++ } ++} ++ ++static gimple overflow_create_phi_node(struct visited *visited, gimple oldstmt, tree result) ++{ ++ basic_block bb; ++ gimple phi; ++ gimple_seq seq; ++ gimple_stmt_iterator gsi = gsi_for_stmt(oldstmt); ++ ++ bb = gsi_bb(gsi); ++ ++ if (result == NULL_TREE) { ++ tree old_result = gimple_phi_result(oldstmt); ++ tree size_overflow_type = get_size_overflow_type(visited, oldstmt, old_result); ++ ++ result = create_new_var(size_overflow_type); ++ } ++ ++ phi = create_phi_node(result, bb); ++ gimple_phi_set_result(phi, make_ssa_name(result, phi)); ++ seq = phi_nodes(bb); ++ gsi = gsi_last(seq); ++ gsi_remove(&gsi, false); ++ ++ gsi = gsi_for_stmt(oldstmt); ++ gsi_insert_after(&gsi, phi, GSI_NEW_STMT); ++ gimple_set_bb(phi, bb); ++ return phi; ++} ++ ++#if BUILDING_GCC_VERSION <= 4007 ++static tree create_new_phi_node(struct visited *visited, VEC(tree, heap) **args, tree ssa_name_var, gimple oldstmt) ++#else ++static tree create_new_phi_node(struct visited *visited, vec<tree, va_heap, vl_embed> *&args, tree ssa_name_var, gimple oldstmt) ++#endif ++{ ++ gimple new_phi; ++ unsigned int i; ++ tree arg, result; ++ location_t loc = gimple_location(oldstmt); ++ ++#if BUILDING_GCC_VERSION <= 4007 ++ gcc_assert(!VEC_empty(tree, *args)); ++#else ++ gcc_assert(!args->is_empty()); ++#endif ++ ++ new_phi = overflow_create_phi_node(visited, oldstmt, ssa_name_var); ++ result = gimple_phi_result(new_phi); ++ ssa_name_var = SSA_NAME_VAR(result); ++ ++#if BUILDING_GCC_VERSION <= 4007 ++ FOR_EACH_VEC_ELT(tree, *args, i, arg) { ++#else ++ FOR_EACH_VEC_SAFE_ELT(args, i, arg) { ++#endif ++ arg = create_new_phi_arg(visited, ssa_name_var, arg, oldstmt, i); ++ add_phi_arg(new_phi, arg, gimple_phi_arg_edge(oldstmt, i), loc); ++ } ++ ++#if BUILDING_GCC_VERSION <= 4007 ++ VEC_free(tree, heap, *args); ++#else ++ vec_free(args); ++#endif ++ update_stmt(new_phi); ++ pointer_set_insert(visited->my_stmts, new_phi); ++ return result; ++} ++ ++static tree handle_phi(struct visited *visited, struct cgraph_node *caller_node, tree orig_result) ++{ ++ tree ssa_name_var = NULL_TREE; ++#if BUILDING_GCC_VERSION <= 4007 ++ VEC(tree, heap) *args = NULL; ++#else ++ vec<tree, va_heap, vl_embed> *args = NULL; ++#endif ++ gimple oldstmt = get_def_stmt(orig_result); ++ unsigned int i, len = gimple_phi_num_args(oldstmt); ++ ++ pointer_set_insert(visited->stmts, oldstmt); ++ for (i = 0; i < len; i++) { ++ tree arg, new_arg; ++ ++ arg = gimple_phi_arg_def(oldstmt, i); ++ new_arg = expand(visited, caller_node, arg); ++ ++ if (ssa_name_var == NULL_TREE && new_arg != NULL_TREE) ++ ssa_name_var = SSA_NAME_VAR(new_arg); ++ ++ if (is_gimple_constant(arg)) { ++ tree size_overflow_type = get_size_overflow_type(visited, oldstmt, arg); ++ ++ new_arg = cast_a_tree(size_overflow_type, arg); ++ } ++ ++#if BUILDING_GCC_VERSION <= 4007 ++ VEC_safe_push(tree, heap, args, new_arg); ++#else ++ vec_safe_push(args, new_arg); ++#endif ++ } ++ ++#if BUILDING_GCC_VERSION <= 4007 ++ return create_new_phi_node(visited, &args, ssa_name_var, oldstmt); ++#else ++ return create_new_phi_node(visited, args, ssa_name_var, oldstmt); ++#endif ++} ++ ++static tree create_cast_assign(struct visited *visited, gimple stmt) ++{ ++ tree rhs1 = gimple_assign_rhs1(stmt); ++ tree lhs = gimple_assign_lhs(stmt); ++ const_tree rhs1_type = TREE_TYPE(rhs1); ++ const_tree lhs_type = TREE_TYPE(lhs); ++ ++ if (TYPE_UNSIGNED(rhs1_type) == TYPE_UNSIGNED(lhs_type)) ++ return create_assign(visited, stmt, lhs, AFTER_STMT); ++ ++ return create_assign(visited, stmt, rhs1, AFTER_STMT); ++} ++ ++static bool skip_lhs_cast_check(const_gimple stmt) ++{ ++ const_tree rhs = gimple_assign_rhs1(stmt); ++ const_gimple def_stmt = get_def_stmt(rhs); ++ ++ // 3.8.2 kernel/futex_compat.c compat_exit_robust_list(): get_user() 64 ulong -> int (compat_long_t), int max ++ if (gimple_code(def_stmt) == GIMPLE_ASM) ++ return true; ++ ++ if (is_const_plus_unsigned_signed_truncation(rhs)) ++ return true; ++ ++ return false; ++} ++ ++static tree create_string_param(tree string) ++{ ++ tree i_type, a_type; ++ const int length = TREE_STRING_LENGTH(string); ++ ++ gcc_assert(length > 0); ++ ++ i_type = build_index_type(build_int_cst(NULL_TREE, length - 1)); ++ a_type = build_array_type(char_type_node, i_type); ++ ++ TREE_TYPE(string) = a_type; ++ TREE_CONSTANT(string) = 1; ++ TREE_READONLY(string) = 1; ++ ++ return build1(ADDR_EXPR, ptr_type_node, string); ++} ++ ++static void insert_cond(basic_block cond_bb, tree arg, enum tree_code cond_code, tree type_value) ++{ ++ gimple cond_stmt; ++ gimple_stmt_iterator gsi = gsi_last_bb(cond_bb); ++ ++ cond_stmt = gimple_build_cond(cond_code, arg, type_value, NULL_TREE, NULL_TREE); ++ gsi_insert_after(&gsi, cond_stmt, GSI_CONTINUE_LINKING); ++ update_stmt(cond_stmt); ++} ++ ++static void insert_cond_result(struct cgraph_node *caller_node, basic_block bb_true, const_gimple stmt, const_tree arg, bool min) ++{ ++ gimple func_stmt; ++ const_gimple def_stmt; ++ const_tree loc_line; ++ tree loc_file, ssa_name, current_func; ++ expanded_location xloc; ++ char *ssa_name_buf; ++ int len; ++ struct cgraph_edge *edge; ++ struct cgraph_node *callee_node; ++ int frequency; ++ gimple_stmt_iterator gsi = gsi_start_bb(bb_true); ++ ++ def_stmt = get_def_stmt(arg); ++ xloc = expand_location(gimple_location(def_stmt)); ++ ++ if (!gimple_has_location(def_stmt)) { ++ xloc = expand_location(gimple_location(stmt)); ++ if (!gimple_has_location(stmt)) ++ xloc = expand_location(DECL_SOURCE_LOCATION(current_function_decl)); ++ } ++ ++ loc_line = build_int_cstu(unsigned_type_node, xloc.line); ++ ++ loc_file = build_string(strlen(xloc.file) + 1, xloc.file); ++ loc_file = create_string_param(loc_file); ++ ++ current_func = build_string(DECL_NAME_LENGTH(current_function_decl) + 1, DECL_NAME_POINTER(current_function_decl)); ++ current_func = create_string_param(current_func); ++ ++ gcc_assert(DECL_NAME(SSA_NAME_VAR(arg)) != NULL); ++ call_count++; ++ len = asprintf(&ssa_name_buf, "%s_%u %s, count: %u\n", DECL_NAME_POINTER(SSA_NAME_VAR(arg)), SSA_NAME_VERSION(arg), min ? "min" : "max", call_count); ++ gcc_assert(len > 0); ++ ssa_name = build_string(len + 1, ssa_name_buf); ++ free(ssa_name_buf); ++ ssa_name = create_string_param(ssa_name); ++ ++ // void report_size_overflow(const char *file, unsigned int line, const char *func, const char *ssa_name) ++ func_stmt = gimple_build_call(report_size_overflow_decl, 4, loc_file, loc_line, current_func, ssa_name); ++ gsi_insert_after(&gsi, func_stmt, GSI_CONTINUE_LINKING); ++ ++ callee_node = cgraph_get_create_node(report_size_overflow_decl); ++ frequency = compute_call_stmt_bb_frequency(current_function_decl, bb_true); ++ ++ edge = cgraph_create_edge(caller_node, callee_node, func_stmt, bb_true->count, frequency, bb_true->loop_depth); ++ gcc_assert(edge != NULL); ++} ++ ++static void insert_check_size_overflow(struct cgraph_node *caller_node, gimple stmt, enum tree_code cond_code, tree arg, tree type_value, bool before, bool min) ++{ ++ basic_block cond_bb, join_bb, bb_true; ++ edge e; ++ gimple_stmt_iterator gsi = gsi_for_stmt(stmt); ++ ++ cond_bb = gimple_bb(stmt); ++ if (before) ++ gsi_prev(&gsi); ++ if (gsi_end_p(gsi)) ++ e = split_block_after_labels(cond_bb); ++ else ++ e = split_block(cond_bb, gsi_stmt(gsi)); ++ cond_bb = e->src; ++ join_bb = e->dest; ++ e->flags = EDGE_FALSE_VALUE; ++ e->probability = REG_BR_PROB_BASE; ++ ++ bb_true = create_empty_bb(cond_bb); ++ make_edge(cond_bb, bb_true, EDGE_TRUE_VALUE); ++ make_edge(cond_bb, join_bb, EDGE_FALSE_VALUE); ++ make_edge(bb_true, join_bb, EDGE_FALLTHRU); ++ ++ gcc_assert(dom_info_available_p(CDI_DOMINATORS)); ++ set_immediate_dominator(CDI_DOMINATORS, bb_true, cond_bb); ++ set_immediate_dominator(CDI_DOMINATORS, join_bb, cond_bb); ++ ++ if (current_loops != NULL) { ++ gcc_assert(cond_bb->loop_father == join_bb->loop_father); ++ add_bb_to_loop(bb_true, cond_bb->loop_father); ++ } ++ ++ insert_cond(cond_bb, arg, cond_code, type_value); ++ insert_cond_result(caller_node, bb_true, stmt, arg, min); ++ ++// print_the_code_insertions(stmt); ++} ++ ++void check_size_overflow(struct cgraph_node *caller_node, gimple stmt, tree size_overflow_type, tree cast_rhs, tree rhs, bool before) ++{ ++ const_tree rhs_type = TREE_TYPE(rhs); ++ tree cast_rhs_type, type_max_type, type_min_type, type_max, type_min; ++ ++ gcc_assert(rhs_type != NULL_TREE); ++ if (TREE_CODE(rhs_type) == POINTER_TYPE) ++ return; ++ ++ gcc_assert(TREE_CODE(rhs_type) == INTEGER_TYPE || TREE_CODE(rhs_type) == ENUMERAL_TYPE); ++ ++ if (is_const_plus_unsigned_signed_truncation(rhs)) ++ return; ++ ++ type_max = cast_a_tree(size_overflow_type, TYPE_MAX_VALUE(rhs_type)); ++ // typemax (-1) < typemin (0) ++ if (TREE_OVERFLOW(type_max)) ++ return; ++ ++ type_min = cast_a_tree(size_overflow_type, TYPE_MIN_VALUE(rhs_type)); ++ ++ cast_rhs_type = TREE_TYPE(cast_rhs); ++ type_max_type = TREE_TYPE(type_max); ++ gcc_assert(types_compatible_p(cast_rhs_type, type_max_type)); ++ ++ insert_check_size_overflow(caller_node, stmt, GT_EXPR, cast_rhs, type_max, before, MAX_CHECK); ++ ++ // special case: get_size_overflow_type(), 32, u64->s ++ if (LONG_TYPE_SIZE == GET_MODE_BITSIZE(SImode) && TYPE_UNSIGNED(size_overflow_type) && !TYPE_UNSIGNED(rhs_type)) ++ return; ++ ++ type_min_type = TREE_TYPE(type_min); ++ gcc_assert(types_compatible_p(type_max_type, type_min_type)); ++ insert_check_size_overflow(caller_node, stmt, LT_EXPR, cast_rhs, type_min, before, MIN_CHECK); ++} ++ ++static tree create_cast_overflow_check(struct visited *visited, struct cgraph_node *caller_node, tree new_rhs1, gimple stmt) ++{ ++ bool cast_lhs, cast_rhs; ++ tree lhs = gimple_assign_lhs(stmt); ++ tree rhs = gimple_assign_rhs1(stmt); ++ const_tree lhs_type = TREE_TYPE(lhs); ++ const_tree rhs_type = TREE_TYPE(rhs); ++ enum machine_mode lhs_mode = TYPE_MODE(lhs_type); ++ enum machine_mode rhs_mode = TYPE_MODE(rhs_type); ++ unsigned int lhs_size = GET_MODE_BITSIZE(lhs_mode); ++ unsigned int rhs_size = GET_MODE_BITSIZE(rhs_mode); ++ ++ static bool check_lhs[3][4] = { ++ // ss su us uu ++ { false, true, true, false }, // lhs > rhs ++ { false, false, false, false }, // lhs = rhs ++ { true, true, true, true }, // lhs < rhs ++ }; ++ ++ static bool check_rhs[3][4] = { ++ // ss su us uu ++ { true, false, true, true }, // lhs > rhs ++ { true, false, true, true }, // lhs = rhs ++ { true, false, true, true }, // lhs < rhs ++ }; ++ ++ // skip lhs check on signed SI -> HI cast or signed SI -> QI cast !!!! ++ if (rhs_mode == SImode && !TYPE_UNSIGNED(rhs_type) && (lhs_mode == HImode || lhs_mode == QImode)) ++ return create_assign(visited, stmt, lhs, AFTER_STMT); ++ ++ if (lhs_size > rhs_size) { ++ cast_lhs = check_lhs[0][TYPE_UNSIGNED(rhs_type) + 2 * TYPE_UNSIGNED(lhs_type)]; ++ cast_rhs = check_rhs[0][TYPE_UNSIGNED(rhs_type) + 2 * TYPE_UNSIGNED(lhs_type)]; ++ } else if (lhs_size == rhs_size) { ++ cast_lhs = check_lhs[1][TYPE_UNSIGNED(rhs_type) + 2 * TYPE_UNSIGNED(lhs_type)]; ++ cast_rhs = check_rhs[1][TYPE_UNSIGNED(rhs_type) + 2 * TYPE_UNSIGNED(lhs_type)]; ++ } else { ++ cast_lhs = check_lhs[2][TYPE_UNSIGNED(rhs_type) + 2 * TYPE_UNSIGNED(lhs_type)]; ++ cast_rhs = check_rhs[2][TYPE_UNSIGNED(rhs_type) + 2 * TYPE_UNSIGNED(lhs_type)]; ++ } ++ ++ if (!cast_lhs && !cast_rhs) ++ return dup_assign(visited, stmt, lhs, new_rhs1, NULL_TREE, NULL_TREE); ++ ++ if (cast_lhs && !skip_lhs_cast_check(stmt)) ++ check_size_overflow(caller_node, stmt, TREE_TYPE(new_rhs1), new_rhs1, lhs, BEFORE_STMT); ++ ++ if (cast_rhs) ++ check_size_overflow(caller_node, stmt, TREE_TYPE(new_rhs1), new_rhs1, rhs, BEFORE_STMT); ++ ++ return dup_assign(visited, stmt, lhs, new_rhs1, NULL_TREE, NULL_TREE); ++} ++ ++static tree handle_unary_rhs(struct visited *visited, struct cgraph_node *caller_node, gimple stmt) ++{ ++ enum tree_code rhs_code; ++ tree rhs1, new_rhs1, lhs = gimple_assign_lhs(stmt); ++ ++ if (pointer_set_contains(visited->my_stmts, stmt)) ++ return lhs; ++ ++ rhs1 = gimple_assign_rhs1(stmt); ++ if (TREE_CODE(TREE_TYPE(rhs1)) == POINTER_TYPE) ++ return create_assign(visited, stmt, lhs, AFTER_STMT); ++ ++ new_rhs1 = expand(visited, caller_node, rhs1); ++ ++ if (new_rhs1 == NULL_TREE) ++ return create_cast_assign(visited, stmt); ++ ++ if (pointer_set_contains(visited->no_cast_check, stmt)) ++ return dup_assign(visited, stmt, lhs, new_rhs1, NULL_TREE, NULL_TREE); ++ ++ rhs_code = gimple_assign_rhs_code(stmt); ++ if (rhs_code == BIT_NOT_EXPR || rhs_code == NEGATE_EXPR) { ++ tree size_overflow_type = get_size_overflow_type(visited, stmt, rhs1); ++ ++ new_rhs1 = cast_to_new_size_overflow_type(visited, stmt, new_rhs1, size_overflow_type, BEFORE_STMT); ++ check_size_overflow(caller_node, stmt, size_overflow_type, new_rhs1, rhs1, BEFORE_STMT); ++ return create_assign(visited, stmt, lhs, AFTER_STMT); ++ } ++ ++ if (!gimple_assign_cast_p(stmt)) ++ return dup_assign(visited, stmt, lhs, new_rhs1, NULL_TREE, NULL_TREE); ++ ++ return create_cast_overflow_check(visited, caller_node, new_rhs1, stmt); ++} ++ ++static tree handle_unary_ops(struct visited *visited, struct cgraph_node *caller_node, gimple stmt) ++{ ++ tree rhs1, lhs = gimple_assign_lhs(stmt); ++ gimple def_stmt = get_def_stmt(lhs); ++ ++ gcc_assert(gimple_code(def_stmt) != GIMPLE_NOP); ++ rhs1 = gimple_assign_rhs1(def_stmt); ++ ++ if (is_gimple_constant(rhs1)) ++ return create_assign(visited, def_stmt, lhs, AFTER_STMT); ++ ++ switch (TREE_CODE(rhs1)) { ++ case SSA_NAME: { ++ tree ret = handle_unary_rhs(visited, caller_node, def_stmt); ++ ++ if (gimple_assign_cast_p(stmt)) ++ unsigned_signed_cast_intentional_overflow(visited, stmt); ++ return ret; ++ } ++ case ARRAY_REF: ++ case BIT_FIELD_REF: ++ case ADDR_EXPR: ++ case COMPONENT_REF: ++ case INDIRECT_REF: ++#if BUILDING_GCC_VERSION >= 4006 ++ case MEM_REF: ++#endif ++ case TARGET_MEM_REF: ++ case VIEW_CONVERT_EXPR: ++ return create_assign(visited, def_stmt, lhs, AFTER_STMT); ++ case PARM_DECL: ++ case VAR_DECL: ++ return create_assign(visited, stmt, lhs, AFTER_STMT); ++ ++ default: ++ debug_gimple_stmt(def_stmt); ++ debug_tree(rhs1); ++ gcc_unreachable(); ++ } ++} ++ ++static void __unused print_the_code_insertions(const_gimple stmt) ++{ ++ location_t loc = gimple_location(stmt); ++ ++ inform(loc, "Integer size_overflow check applied here."); ++} ++ ++static bool is_from_cast(const_tree node) ++{ ++ gimple def_stmt = get_def_stmt(node); ++ ++ if (!def_stmt) ++ return false; ++ ++ if (gimple_assign_cast_p(def_stmt)) ++ return true; ++ ++ return false; ++} ++ ++// Skip duplication when there is a minus expr and the type of rhs1 or rhs2 is a pointer_type. ++static bool is_a_ptr_minus(gimple stmt) ++{ ++ const_tree rhs1, rhs2, ptr1_rhs, ptr2_rhs; ++ ++ if (gimple_assign_rhs_code(stmt) != MINUS_EXPR) ++ return false; ++ ++ rhs1 = gimple_assign_rhs1(stmt); ++ if (!is_from_cast(rhs1)) ++ return false; ++ ++ rhs2 = gimple_assign_rhs2(stmt); ++ if (!is_from_cast(rhs2)) ++ return false; ++ ++ ptr1_rhs = gimple_assign_rhs1(get_def_stmt(rhs1)); ++ ptr2_rhs = gimple_assign_rhs1(get_def_stmt(rhs2)); ++ ++ if (TREE_CODE(TREE_TYPE(ptr1_rhs)) != POINTER_TYPE && TREE_CODE(TREE_TYPE(ptr2_rhs)) != POINTER_TYPE) ++ return false; ++ ++ return true; ++} ++ ++static tree handle_binary_ops(struct visited *visited, struct cgraph_node *caller_node, tree lhs) ++{ ++ enum intentional_overflow_type res; ++ tree rhs1, rhs2, new_lhs; ++ gimple def_stmt = get_def_stmt(lhs); ++ tree new_rhs1 = NULL_TREE; ++ tree new_rhs2 = NULL_TREE; ++ ++ if (is_a_ptr_minus(def_stmt)) ++ return create_assign(visited, def_stmt, lhs, AFTER_STMT); ++ ++ rhs1 = gimple_assign_rhs1(def_stmt); ++ rhs2 = gimple_assign_rhs2(def_stmt); ++ ++ /* no DImode/TImode division in the 32/64 bit kernel */ ++ switch (gimple_assign_rhs_code(def_stmt)) { ++ case RDIV_EXPR: ++ case TRUNC_DIV_EXPR: ++ case CEIL_DIV_EXPR: ++ case FLOOR_DIV_EXPR: ++ case ROUND_DIV_EXPR: ++ case TRUNC_MOD_EXPR: ++ case CEIL_MOD_EXPR: ++ case FLOOR_MOD_EXPR: ++ case ROUND_MOD_EXPR: ++ case EXACT_DIV_EXPR: ++ case POINTER_PLUS_EXPR: ++ case BIT_AND_EXPR: ++ return create_assign(visited, def_stmt, lhs, AFTER_STMT); ++ default: ++ break; ++ } ++ ++ new_lhs = handle_integer_truncation(visited, caller_node, lhs); ++ if (new_lhs != NULL_TREE) ++ return new_lhs; ++ ++ if (TREE_CODE(rhs1) == SSA_NAME) ++ new_rhs1 = expand(visited, caller_node, rhs1); ++ if (TREE_CODE(rhs2) == SSA_NAME) ++ new_rhs2 = expand(visited, caller_node, rhs2); ++ ++ res = add_mul_intentional_overflow(def_stmt); ++ if (res != NO_INTENTIONAL_OVERFLOW) { ++ new_lhs = dup_assign(visited, def_stmt, lhs, new_rhs1, new_rhs2, NULL_TREE); ++ insert_cast_expr(visited, get_def_stmt(new_lhs), res); ++ return new_lhs; ++ } ++ ++ if (skip_expr_on_double_type(def_stmt)) { ++ new_lhs = dup_assign(visited, def_stmt, lhs, new_rhs1, new_rhs2, NULL_TREE); ++ insert_cast_expr(visited, get_def_stmt(new_lhs), NO_INTENTIONAL_OVERFLOW); ++ return new_lhs; ++ } ++ ++ if (is_a_neg_overflow(def_stmt, rhs2)) ++ return handle_intentional_overflow(visited, caller_node, true, def_stmt, new_rhs1, NULL_TREE); ++ if (is_a_neg_overflow(def_stmt, rhs1)) ++ return handle_intentional_overflow(visited, caller_node, true, def_stmt, new_rhs2, new_rhs2); ++ ++ ++ if (is_a_constant_overflow(def_stmt, rhs2)) ++ return handle_intentional_overflow(visited, caller_node, !is_a_cast_and_const_overflow(rhs1), def_stmt, new_rhs1, NULL_TREE); ++ if (is_a_constant_overflow(def_stmt, rhs1)) ++ return handle_intentional_overflow(visited, caller_node, !is_a_cast_and_const_overflow(rhs2), def_stmt, new_rhs2, new_rhs2); ++ ++ // the const is between 0 and (signed) MAX ++ if (is_gimple_constant(rhs1)) ++ new_rhs1 = create_assign(visited, def_stmt, rhs1, BEFORE_STMT); ++ if (is_gimple_constant(rhs2)) ++ new_rhs2 = create_assign(visited, def_stmt, rhs2, BEFORE_STMT); ++ ++ return dup_assign(visited, def_stmt, lhs, new_rhs1, new_rhs2, NULL_TREE); ++} ++ ++#if BUILDING_GCC_VERSION >= 4006 ++static tree get_new_rhs(struct visited *visited, struct cgraph_node *caller_node, tree size_overflow_type, tree rhs) ++{ ++ if (is_gimple_constant(rhs)) ++ return cast_a_tree(size_overflow_type, rhs); ++ if (TREE_CODE(rhs) != SSA_NAME) ++ return NULL_TREE; ++ return expand(visited, caller_node, rhs); ++} ++ ++static tree handle_ternary_ops(struct visited *visited, struct cgraph_node *caller_node, tree lhs) ++{ ++ tree rhs1, rhs2, rhs3, new_rhs1, new_rhs2, new_rhs3, size_overflow_type; ++ gimple def_stmt = get_def_stmt(lhs); ++ ++ size_overflow_type = get_size_overflow_type(visited, def_stmt, lhs); ++ ++ rhs1 = gimple_assign_rhs1(def_stmt); ++ rhs2 = gimple_assign_rhs2(def_stmt); ++ rhs3 = gimple_assign_rhs3(def_stmt); ++ new_rhs1 = get_new_rhs(visited, caller_node, size_overflow_type, rhs1); ++ new_rhs2 = get_new_rhs(visited, caller_node, size_overflow_type, rhs2); ++ new_rhs3 = get_new_rhs(visited, caller_node, size_overflow_type, rhs3); ++ ++ return dup_assign(visited, def_stmt, lhs, new_rhs1, new_rhs2, new_rhs3); ++} ++#endif ++ ++static tree get_my_stmt_lhs(struct visited *visited, gimple stmt) ++{ ++ gimple_stmt_iterator gsi; ++ gimple next_stmt = NULL; ++ ++ gsi = gsi_for_stmt(stmt); ++ ++ do { ++ gsi_next(&gsi); ++ next_stmt = gsi_stmt(gsi); ++ ++ if (gimple_code(stmt) == GIMPLE_PHI && !pointer_set_contains(visited->my_stmts, next_stmt)) ++ return NULL_TREE; ++ ++ if (pointer_set_contains(visited->my_stmts, next_stmt) && !pointer_set_contains(visited->skip_expr_casts, next_stmt)) ++ break; ++ ++ gcc_assert(pointer_set_contains(visited->my_stmts, next_stmt)); ++ } while (!gsi_end_p(gsi)); ++ ++ gcc_assert(next_stmt); ++ return get_lhs(next_stmt); ++} ++ ++static tree expand_visited(struct visited *visited, gimple def_stmt) ++{ ++ gimple_stmt_iterator gsi; ++ enum gimple_code code = gimple_code(def_stmt); ++ ++ if (code == GIMPLE_ASM) ++ return NULL_TREE; ++ ++ gsi = gsi_for_stmt(def_stmt); ++ gsi_next(&gsi); ++ ++ if (gimple_code(def_stmt) == GIMPLE_PHI && gsi_end_p(gsi)) ++ return NULL_TREE; ++ return get_my_stmt_lhs(visited, def_stmt); ++} ++ ++tree expand(struct visited *visited, struct cgraph_node *caller_node, tree lhs) ++{ ++ gimple def_stmt; ++ ++ def_stmt = get_def_stmt(lhs); ++ ++ if (!def_stmt || gimple_code(def_stmt) == GIMPLE_NOP) ++ return NULL_TREE; ++ ++ if (pointer_set_contains(visited->my_stmts, def_stmt)) ++ return lhs; ++ ++ if (pointer_set_contains(visited->stmts, def_stmt)) ++ return expand_visited(visited, def_stmt); ++ ++ switch (gimple_code(def_stmt)) { ++ case GIMPLE_PHI: ++ return handle_phi(visited, caller_node, lhs); ++ case GIMPLE_CALL: ++ case GIMPLE_ASM: ++ return create_assign(visited, def_stmt, lhs, AFTER_STMT); ++ case GIMPLE_ASSIGN: ++ switch (gimple_num_ops(def_stmt)) { ++ case 2: ++ return handle_unary_ops(visited, caller_node, def_stmt); ++ case 3: ++ return handle_binary_ops(visited, caller_node, lhs); ++#if BUILDING_GCC_VERSION >= 4006 ++ case 4: ++ return handle_ternary_ops(visited, caller_node, lhs); ++#endif ++ } ++ default: ++ debug_gimple_stmt(def_stmt); ++ error("%s: unknown gimple code", __func__); ++ gcc_unreachable(); ++ } ++} ++ +diff --git a/tools/gcc/size_overflow_plugin/insert_size_overflow_check_ipa.c b/tools/gcc/size_overflow_plugin/insert_size_overflow_check_ipa.c +new file mode 100644 +index 0000000..df50164 +--- /dev/null ++++ b/tools/gcc/size_overflow_plugin/insert_size_overflow_check_ipa.c +@@ -0,0 +1,1141 @@ ++/* ++ * Copyright 2011-2014 by Emese Revfy <re.emese@gmail.com> ++ * Licensed under the GPL v2, or (at your option) v3 ++ * ++ * Homepage: ++ * http://www.grsecurity.net/~ephox/overflow_plugin/ ++ * ++ * Documentation: ++ * http://forums.grsecurity.net/viewtopic.php?f=7&t=3043 ++ * ++ * This plugin recomputes expressions of function arguments marked by a size_overflow attribute ++ * with double integer precision (DImode/TImode for 32/64 bit integer types). ++ * The recomputed argument is checked against TYPE_MAX and an event is logged on overflow and the triggering process is killed. ++ * ++ * Usage: ++ * $ make ++ * $ make run ++ */ ++ ++#include "gcc-common.h" ++#include "size_overflow.h" ++ ++#define VEC_LEN 128 ++#define RET_CHECK NULL_TREE ++#define WRONG_NODE 32 ++#define NOT_INTENTIONAL_ASM NULL ++ ++unsigned int call_count; ++ ++static void set_conditions(struct pointer_set_t *visited, bool *interesting_conditions, const_tree lhs); ++static void walk_use_def(struct pointer_set_t *visited, struct interesting_node *cur_node, tree lhs); ++ ++struct visited_fns { ++ struct visited_fns *next; ++ const_tree fndecl; ++ unsigned int num; ++ const_gimple first_stmt; ++}; ++ ++struct next_cgraph_node { ++ struct next_cgraph_node *next; ++ struct cgraph_node *current_function; ++ tree callee_fndecl; ++ unsigned int num; ++}; ++ ++// Don't want to duplicate entries in next_cgraph_node ++static bool is_in_next_cgraph_node(struct next_cgraph_node *head, struct cgraph_node *node, const_tree fndecl, unsigned int num) ++{ ++ const_tree new_callee_fndecl; ++ struct next_cgraph_node *cur_node; ++ ++ if (fndecl == RET_CHECK) ++ new_callee_fndecl = NODE_DECL(node); ++ else ++ new_callee_fndecl = fndecl; ++ ++ for (cur_node = head; cur_node; cur_node = cur_node->next) { ++ if (!operand_equal_p(NODE_DECL(cur_node->current_function), NODE_DECL(node), 0)) ++ continue; ++ if (!operand_equal_p(cur_node->callee_fndecl, new_callee_fndecl, 0)) ++ continue; ++ if (num == cur_node->num) ++ return true; ++ } ++ return false; ++} ++ ++/* Add a next_cgraph_node into the list for handle_function(). ++ * handle_function() iterates over all the next cgraph nodes and ++ * starts the overflow check insertion process. ++ */ ++static struct next_cgraph_node *create_new_next_cgraph_node(struct next_cgraph_node *head, struct cgraph_node *node, tree fndecl, unsigned int num) ++{ ++ struct next_cgraph_node *new_node; ++ ++ if (is_in_next_cgraph_node(head, node, fndecl, num)) ++ return head; ++ ++ new_node = (struct next_cgraph_node *)xmalloc(sizeof(*new_node)); ++ new_node->current_function = node; ++ new_node->next = NULL; ++ new_node->num = num; ++ if (fndecl == RET_CHECK) ++ new_node->callee_fndecl = NODE_DECL(node); ++ else ++ new_node->callee_fndecl = fndecl; ++ ++ if (!head) ++ return new_node; ++ ++ new_node->next = head; ++ return new_node; ++} ++ ++static struct next_cgraph_node *create_new_next_cgraph_nodes(struct next_cgraph_node *head, struct cgraph_node *node, unsigned int num) ++{ ++ struct cgraph_edge *e; ++ ++ if (num == 0) ++ return create_new_next_cgraph_node(head, node, RET_CHECK, num); ++ ++ for (e = node->callers; e; e = e->next_caller) { ++ tree fndecl = gimple_call_fndecl(e->call_stmt); ++ ++ gcc_assert(fndecl != NULL_TREE); ++ head = create_new_next_cgraph_node(head, e->caller, fndecl, num); ++ } ++ ++ return head; ++} ++ ++struct missing_functions { ++ struct missing_functions *next; ++ const_tree node; ++ tree fndecl; ++}; ++ ++static struct missing_functions *create_new_missing_function(struct missing_functions *missing_fn_head, tree node) ++{ ++ struct missing_functions *new_function; ++ ++ new_function = (struct missing_functions *)xmalloc(sizeof(*new_function)); ++ new_function->node = node; ++ new_function->next = NULL; ++ ++ if (TREE_CODE(node) == FUNCTION_DECL) ++ new_function->fndecl = node; ++ else ++ new_function->fndecl = current_function_decl; ++ gcc_assert(new_function->fndecl); ++ ++ if (!missing_fn_head) ++ return new_function; ++ ++ new_function->next = missing_fn_head; ++ return new_function; ++} ++ ++/* If the function is missing from the hash table and it is a static function ++ * then create a next_cgraph_node from it for handle_function() ++ */ ++static struct next_cgraph_node *check_missing_overflow_attribute_and_create_next_node(struct next_cgraph_node *cnodes, struct missing_functions *missing_fn_head) ++{ ++ unsigned int num; ++ const_tree orig_fndecl; ++ struct cgraph_node *next_node = NULL; ++ ++ orig_fndecl = DECL_ORIGIN(missing_fn_head->fndecl); ++ ++ num = get_function_num(missing_fn_head->node, orig_fndecl); ++ if (num == CANNOT_FIND_ARG) ++ return cnodes; ++ ++ if (!is_missing_function(orig_fndecl, num)) ++ return cnodes; ++ ++ next_node = cgraph_get_node(missing_fn_head->fndecl); ++ if (next_node && next_node->local.local) ++ cnodes = create_new_next_cgraph_nodes(cnodes, next_node, num); ++ return cnodes; ++} ++ ++/* Search for missing size_overflow attributes on the last nodes in ipa and collect them ++ * into the next_cgraph_node list. They will be the next interesting returns or callees. ++ */ ++static struct next_cgraph_node *search_overflow_attribute(struct next_cgraph_node *cnodes, struct interesting_node *cur_node) ++{ ++ unsigned int i; ++ tree node; ++ struct missing_functions *cur, *missing_fn_head = NULL; ++ ++#if BUILDING_GCC_VERSION <= 4007 ++ FOR_EACH_VEC_ELT(tree, cur_node->last_nodes, i, node) { ++#else ++ FOR_EACH_VEC_ELT(*cur_node->last_nodes, i, node) { ++#endif ++ switch (TREE_CODE(node)) { ++ case PARM_DECL: ++ if (TREE_CODE(TREE_TYPE(node)) != INTEGER_TYPE) ++ break; ++ case FUNCTION_DECL: ++ missing_fn_head = create_new_missing_function(missing_fn_head, node); ++ break; ++ default: ++ break; ++ } ++ } ++ ++ while (missing_fn_head) { ++ cnodes = check_missing_overflow_attribute_and_create_next_node(cnodes, missing_fn_head); ++ ++ cur = missing_fn_head->next; ++ free(missing_fn_head); ++ missing_fn_head = cur; ++ } ++ ++ return cnodes; ++} ++ ++static void walk_phi_set_conditions(struct pointer_set_t *visited, bool *interesting_conditions, const_tree result) ++{ ++ gimple phi = get_def_stmt(result); ++ unsigned int i, n = gimple_phi_num_args(phi); ++ ++ pointer_set_insert(visited, phi); ++ for (i = 0; i < n; i++) { ++ const_tree arg = gimple_phi_arg_def(phi, i); ++ ++ set_conditions(visited, interesting_conditions, arg); ++ } ++} ++ ++enum conditions { ++ FROM_CONST, NOT_UNARY, CAST, RET, PHI ++}; ++ ++// Search for constants, cast assignments and binary/ternary assignments ++static void set_conditions(struct pointer_set_t *visited, bool *interesting_conditions, const_tree lhs) ++{ ++ gimple def_stmt = get_def_stmt(lhs); ++ ++ if (is_gimple_constant(lhs)) { ++ interesting_conditions[FROM_CONST] = true; ++ return; ++ } ++ ++ if (!def_stmt) ++ return; ++ ++ if (pointer_set_contains(visited, def_stmt)) ++ return; ++ ++ switch (gimple_code(def_stmt)) { ++ case GIMPLE_CALL: ++ if (lhs == gimple_call_lhs(def_stmt)) ++ interesting_conditions[RET] = true; ++ return; ++ case GIMPLE_NOP: ++ case GIMPLE_ASM: ++ return; ++ case GIMPLE_PHI: ++ interesting_conditions[PHI] = true; ++ return walk_phi_set_conditions(visited, interesting_conditions, lhs); ++ case GIMPLE_ASSIGN: ++ if (gimple_num_ops(def_stmt) == 2) { ++ const_tree rhs = gimple_assign_rhs1(def_stmt); ++ ++ if (gimple_assign_cast_p(def_stmt)) ++ interesting_conditions[CAST] = true; ++ ++ return set_conditions(visited, interesting_conditions, rhs); ++ } else { ++ interesting_conditions[NOT_UNARY] = true; ++ return; ++ } ++ default: ++ debug_gimple_stmt(def_stmt); ++ gcc_unreachable(); ++ } ++} ++ ++// determine whether duplication will be necessary or not. ++static void search_interesting_conditions(struct interesting_node *cur_node, bool *interesting_conditions) ++{ ++ struct pointer_set_t *visited; ++ ++ if (gimple_assign_cast_p(cur_node->first_stmt)) ++ interesting_conditions[CAST] = true; ++ else if (is_gimple_assign(cur_node->first_stmt) && gimple_num_ops(cur_node->first_stmt) > 2) ++ interesting_conditions[NOT_UNARY] = true; ++ ++ visited = pointer_set_create(); ++ set_conditions(visited, interesting_conditions, cur_node->node); ++ pointer_set_destroy(visited); ++} ++ ++// Remove the size_overflow asm stmt and create an assignment from the input and output of the asm ++static void replace_size_overflow_asm_with_assign(gimple asm_stmt, tree lhs, tree rhs) ++{ ++ gimple assign; ++ gimple_stmt_iterator gsi; ++ ++ // already removed ++ if (gimple_bb(asm_stmt) == NULL) ++ return; ++ gsi = gsi_for_stmt(asm_stmt); ++ ++ assign = gimple_build_assign(lhs, rhs); ++ gsi_insert_before(&gsi, assign, GSI_SAME_STMT); ++ SSA_NAME_DEF_STMT(lhs) = assign; ++ ++ gsi_remove(&gsi, true); ++} ++ ++/* Get the fndecl of an interesting stmt, the fndecl is the caller function if the interesting ++ * stmt is a return otherwise it is the callee function. ++ */ ++const_tree get_interesting_orig_fndecl(const_gimple stmt, unsigned int argnum) ++{ ++ const_tree fndecl; ++ ++ if (argnum == 0) ++ fndecl = current_function_decl; ++ else ++ fndecl = gimple_call_fndecl(stmt); ++ ++ if (fndecl == NULL_TREE) ++ return NULL_TREE; ++ ++ return DECL_ORIGIN(fndecl); ++} ++ ++// e.g., 3.8.2, 64, arch/x86/ia32/ia32_signal.c copy_siginfo_from_user32(): compat_ptr() u32 max ++static bool skip_asm(const_tree arg) ++{ ++ gimple def_stmt = get_def_stmt(arg); ++ ++ if (!def_stmt || !gimple_assign_cast_p(def_stmt)) ++ return false; ++ ++ def_stmt = get_def_stmt(gimple_assign_rhs1(def_stmt)); ++ return def_stmt && gimple_code(def_stmt) == GIMPLE_ASM; ++} ++ ++static void walk_use_def_phi(struct pointer_set_t *visited, struct interesting_node *cur_node, tree result) ++{ ++ gimple phi = get_def_stmt(result); ++ unsigned int i, n = gimple_phi_num_args(phi); ++ ++ pointer_set_insert(visited, phi); ++ for (i = 0; i < n; i++) { ++ tree arg = gimple_phi_arg_def(phi, i); ++ ++ walk_use_def(visited, cur_node, arg); ++ } ++} ++ ++static void walk_use_def_binary(struct pointer_set_t *visited, struct interesting_node *cur_node, tree lhs) ++{ ++ gimple def_stmt = get_def_stmt(lhs); ++ tree rhs1, rhs2; ++ ++ rhs1 = gimple_assign_rhs1(def_stmt); ++ rhs2 = gimple_assign_rhs2(def_stmt); ++ ++ walk_use_def(visited, cur_node, rhs1); ++ walk_use_def(visited, cur_node, rhs2); ++} ++ ++static void insert_last_node(struct interesting_node *cur_node, tree node) ++{ ++ unsigned int i; ++ tree element; ++ enum tree_code code; ++ ++ gcc_assert(node != NULL_TREE); ++ ++ if (is_gimple_constant(node)) ++ return; ++ ++ code = TREE_CODE(node); ++ if (code == VAR_DECL) { ++ node = DECL_ORIGIN(node); ++ code = TREE_CODE(node); ++ } ++ ++ if (code != PARM_DECL && code != FUNCTION_DECL && code != COMPONENT_REF) ++ return; ++ ++#if BUILDING_GCC_VERSION <= 4007 ++ FOR_EACH_VEC_ELT(tree, cur_node->last_nodes, i, element) { ++#else ++ FOR_EACH_VEC_ELT(*cur_node->last_nodes, i, element) { ++#endif ++ if (operand_equal_p(node, element, 0)) ++ return; ++ } ++ ++#if BUILDING_GCC_VERSION <= 4007 ++ gcc_assert(VEC_length(tree, cur_node->last_nodes) < VEC_LEN); ++ VEC_safe_push(tree, gc, cur_node->last_nodes, node); ++#else ++ gcc_assert(cur_node->last_nodes->length() < VEC_LEN); ++ vec_safe_push(cur_node->last_nodes, node); ++#endif ++} ++ ++// a size_overflow asm stmt in the control flow doesn't stop the recursion ++static void handle_asm_stmt(struct pointer_set_t *visited, struct interesting_node *cur_node, tree lhs, const_gimple stmt) ++{ ++ if (!is_size_overflow_asm(stmt)) ++ walk_use_def(visited, cur_node, SSA_NAME_VAR(lhs)); ++} ++ ++/* collect the parm_decls and fndecls (for checking a missing size_overflow attribute (ret or arg) or intentional_overflow) ++ * and component refs (for checking the intentional_overflow attribute). ++ */ ++static void walk_use_def(struct pointer_set_t *visited, struct interesting_node *cur_node, tree lhs) ++{ ++ const_gimple def_stmt; ++ ++ if (TREE_CODE(lhs) != SSA_NAME) { ++ insert_last_node(cur_node, lhs); ++ return; ++ } ++ ++ def_stmt = get_def_stmt(lhs); ++ if (!def_stmt) ++ return; ++ ++ if (pointer_set_insert(visited, def_stmt)) ++ return; ++ ++ switch (gimple_code(def_stmt)) { ++ case GIMPLE_NOP: ++ return walk_use_def(visited, cur_node, SSA_NAME_VAR(lhs)); ++ case GIMPLE_ASM: ++ return handle_asm_stmt(visited, cur_node, lhs, def_stmt); ++ case GIMPLE_CALL: { ++ tree fndecl = gimple_call_fndecl(def_stmt); ++ ++ if (fndecl == NULL_TREE) ++ return; ++ insert_last_node(cur_node, fndecl); ++ return; ++ } ++ case GIMPLE_PHI: ++ return walk_use_def_phi(visited, cur_node, lhs); ++ case GIMPLE_ASSIGN: ++ switch (gimple_num_ops(def_stmt)) { ++ case 2: ++ return walk_use_def(visited, cur_node, gimple_assign_rhs1(def_stmt)); ++ case 3: ++ return walk_use_def_binary(visited, cur_node, lhs); ++ } ++ default: ++ debug_gimple_stmt((gimple)def_stmt); ++ error("%s: unknown gimple code", __func__); ++ gcc_unreachable(); ++ } ++} ++ ++// Collect all the last nodes for checking the intentional_overflow and size_overflow attributes ++static void set_last_nodes(struct interesting_node *cur_node) ++{ ++ struct pointer_set_t *visited; ++ ++ visited = pointer_set_create(); ++ walk_use_def(visited, cur_node, cur_node->node); ++ pointer_set_destroy(visited); ++} ++ ++enum precond { ++ NO_ATTRIBUTE_SEARCH, NO_CHECK_INSERT, NONE ++}; ++ ++/* If there is a mark_turn_off intentional attribute on the caller or the callee then there is no duplication and missing size_overflow attribute check anywhere. ++ * There is only missing size_overflow attribute checking if the intentional_overflow attribute is the mark_no type. ++ * Stmt duplication is unnecessary if there are no binary/ternary assignements or if the unary assignment isn't a cast. ++ * It skips the possible error codes too. ++ */ ++static enum precond check_preconditions(struct interesting_node *cur_node) ++{ ++ bool interesting_conditions[5] = {false, false, false, false, false}; ++ ++ set_last_nodes(cur_node); ++ ++ check_intentional_attribute_ipa(cur_node); ++ if (cur_node->intentional_attr_decl == MARK_TURN_OFF || cur_node->intentional_attr_cur_fndecl == MARK_TURN_OFF) ++ return NO_ATTRIBUTE_SEARCH; ++ ++ search_interesting_conditions(cur_node, interesting_conditions); ++ ++ // error code: a phi, unary assign (not cast) and returns only ++ if (!interesting_conditions[NOT_UNARY] && interesting_conditions[PHI] && interesting_conditions[RET] && !interesting_conditions[CAST]) ++ return NO_ATTRIBUTE_SEARCH; ++ ++ // error code: def_stmts trace back to a constant and there are no binary/ternary assigments ++ if (interesting_conditions[CAST] && interesting_conditions[FROM_CONST] && !interesting_conditions[NOT_UNARY]) ++ return NO_ATTRIBUTE_SEARCH; ++ ++ // unnecessary overflow check ++ if (!interesting_conditions[CAST] && !interesting_conditions[NOT_UNARY]) ++ return NO_CHECK_INSERT; ++ ++ if (cur_node->intentional_attr_cur_fndecl != MARK_NO) ++ return NO_CHECK_INSERT; ++ ++ return NONE; ++} ++ ++static tree cast_to_orig_type(struct visited *visited, gimple stmt, const_tree orig_node, tree new_node) ++{ ++ const_gimple assign; ++ tree orig_type = TREE_TYPE(orig_node); ++ gimple_stmt_iterator gsi = gsi_for_stmt(stmt); ++ ++ assign = build_cast_stmt(visited, orig_type, new_node, CREATE_NEW_VAR, &gsi, BEFORE_STMT, false); ++ return gimple_assign_lhs(assign); ++} ++ ++static void change_orig_node(struct visited *visited, struct interesting_node *cur_node, tree new_node) ++{ ++ void (*set_rhs)(gimple, tree); ++ gimple stmt = cur_node->first_stmt; ++ const_tree orig_node = cur_node->node; ++ ++ switch (gimple_code(stmt)) { ++ case GIMPLE_RETURN: ++ gimple_return_set_retval(stmt, cast_to_orig_type(visited, stmt, orig_node, new_node)); ++ break; ++ case GIMPLE_CALL: ++ gimple_call_set_arg(stmt, cur_node->num - 1, cast_to_orig_type(visited, stmt, orig_node, new_node)); ++ break; ++ case GIMPLE_ASSIGN: ++ switch (cur_node->num) { ++ case 1: ++ set_rhs = &gimple_assign_set_rhs1; ++ break; ++ case 2: ++ set_rhs = &gimple_assign_set_rhs2; ++ break; ++#if BUILDING_GCC_VERSION >= 4006 ++ case 3: ++ set_rhs = &gimple_assign_set_rhs3; ++ break; ++#endif ++ default: ++ gcc_unreachable(); ++ } ++ ++ set_rhs(stmt, cast_to_orig_type(visited, stmt, orig_node, new_node)); ++ break; ++ default: ++ debug_gimple_stmt(stmt); ++ gcc_unreachable(); ++ } ++ ++ update_stmt(stmt); ++} ++ ++static struct visited *create_visited(void) ++{ ++ struct visited *new_node; ++ ++ new_node = (struct visited *)xmalloc(sizeof(*new_node)); ++ new_node->stmts = pointer_set_create(); ++ new_node->my_stmts = pointer_set_create(); ++ new_node->skip_expr_casts = pointer_set_create(); ++ new_node->no_cast_check = pointer_set_create(); ++ return new_node; ++} ++ ++static void free_visited(struct visited *visited) ++{ ++ pointer_set_destroy(visited->stmts); ++ pointer_set_destroy(visited->my_stmts); ++ pointer_set_destroy(visited->skip_expr_casts); ++ pointer_set_destroy(visited->no_cast_check); ++ ++ free(visited); ++} ++ ++/* This function calls the main recursion function (expand) that duplicates the stmts. Before that it checks the intentional_overflow attribute and asm stmts, ++ * it decides whether the duplication is necessary or not and it searches for missing size_overflow attributes. After expand() it changes the orig node to the duplicated node ++ * in the original stmt (first stmt) and it inserts the overflow check for the arg of the callee or for the return value. ++ */ ++static struct next_cgraph_node *handle_interesting_stmt(struct visited *visited, struct next_cgraph_node *cnodes, struct interesting_node *cur_node, struct cgraph_node *caller_node) ++{ ++ enum precond ret; ++ tree new_node, orig_node = cur_node->node; ++ ++ ret = check_preconditions(cur_node); ++ if (ret == NO_ATTRIBUTE_SEARCH) ++ return cnodes; ++ ++ cnodes = search_overflow_attribute(cnodes, cur_node); ++ ++ if (ret == NO_CHECK_INSERT) ++ return cnodes; ++ ++ new_node = expand(visited, caller_node, orig_node); ++ if (new_node == NULL_TREE) ++ return cnodes; ++ ++ change_orig_node(visited, cur_node, new_node); ++ check_size_overflow(caller_node, cur_node->first_stmt, TREE_TYPE(new_node), new_node, orig_node, BEFORE_STMT); ++ ++ return cnodes; ++} ++ ++// Check visited_fns interesting nodes. ++static bool is_in_interesting_node(struct interesting_node *head, const_gimple first_stmt, const_tree node, unsigned int num) ++{ ++ struct interesting_node *cur; ++ ++ for (cur = head; cur; cur = cur->next) { ++ if (!operand_equal_p(node, cur->node, 0)) ++ continue; ++ if (num != cur->num) ++ continue; ++ if (first_stmt == cur->first_stmt) ++ return true; ++ } ++ return false; ++} ++ ++/* Create an interesting node. The ipa pass starts to duplicate from these stmts. ++ first_stmt: it is the call or assignment or ret stmt, change_orig_node() will change the original node (retval, or function arg) in this ++ last_nodes: they are the last stmts in the recursion (they haven't a def_stmt). They are useful in the missing size_overflow attribute check and ++ the intentional_overflow attribute check. They are collected by set_last_nodes(). ++ num: arg count of a call stmt or 0 when it is a ret ++ node: the recursion starts from here, it is a call arg or a return value ++ fndecl: the fndecl of the interesting node when the node is an arg. it is the fndecl of the callee function otherwise it is the fndecl of the caller (current_function_fndecl) function. ++ intentional_attr_decl: intentional_overflow attribute of the callee function ++ intentional_attr_cur_fndecl: intentional_overflow attribute of the caller function ++ intentional_mark_from_gimple: the intentional overflow type of size_overflow asm stmt from gimple if it exists ++ */ ++static struct interesting_node *create_new_interesting_node(struct interesting_node *head, gimple first_stmt, tree node, unsigned int num, gimple asm_stmt) ++{ ++ struct interesting_node *new_node; ++ tree fndecl; ++ enum gimple_code code; ++ ++ gcc_assert(node != NULL_TREE); ++ code = gimple_code(first_stmt); ++ gcc_assert(code == GIMPLE_CALL || code == GIMPLE_ASM || code == GIMPLE_ASSIGN || code == GIMPLE_RETURN); ++ ++ if (num == CANNOT_FIND_ARG) ++ return head; ++ ++ if (skip_types(node)) ++ return head; ++ ++ if (skip_asm(node)) ++ return head; ++ ++ if (is_gimple_call(first_stmt)) ++ fndecl = gimple_call_fndecl(first_stmt); ++ else ++ fndecl = current_function_decl; ++ ++ if (fndecl == NULL_TREE) ++ return head; ++ ++ if (is_in_interesting_node(head, first_stmt, node, num)) ++ return head; ++ ++ new_node = (struct interesting_node *)xmalloc(sizeof(*new_node)); ++ ++ new_node->next = NULL; ++ new_node->first_stmt = first_stmt; ++#if BUILDING_GCC_VERSION <= 4007 ++ new_node->last_nodes = VEC_alloc(tree, gc, VEC_LEN); ++#else ++ vec_alloc(new_node->last_nodes, VEC_LEN); ++#endif ++ new_node->num = num; ++ new_node->node = node; ++ new_node->fndecl = fndecl; ++ new_node->intentional_attr_decl = MARK_NO; ++ new_node->intentional_attr_cur_fndecl = MARK_NO; ++ new_node->intentional_mark_from_gimple = asm_stmt; ++ ++ if (!head) ++ return new_node; ++ ++ new_node->next = head; ++ return new_node; ++} ++ ++/* Check the ret stmts in the functions on the next cgraph node list (these functions will be in the hash table and they are reachable from ipa). ++ * If the ret stmt is in the next cgraph node list then it's an interesting ret. ++ */ ++static struct interesting_node *handle_stmt_by_cgraph_nodes_ret(struct interesting_node *head, gimple stmt, struct next_cgraph_node *next_node) ++{ ++ struct next_cgraph_node *cur_node; ++ tree ret = gimple_return_retval(stmt); ++ ++ if (ret == NULL_TREE) ++ return head; ++ ++ for (cur_node = next_node; cur_node; cur_node = cur_node->next) { ++ if (!operand_equal_p(cur_node->callee_fndecl, DECL_ORIGIN(current_function_decl), 0)) ++ continue; ++ if (cur_node->num == 0) ++ head = create_new_interesting_node(head, stmt, ret, 0, NOT_INTENTIONAL_ASM); ++ } ++ ++ return head; ++} ++ ++/* Check the call stmts in the functions on the next cgraph node list (these functions will be in the hash table and they are reachable from ipa). ++ * If the call stmt is in the next cgraph node list then it's an interesting call. ++ */ ++static struct interesting_node *handle_stmt_by_cgraph_nodes_call(struct interesting_node *head, gimple stmt, struct next_cgraph_node *next_node) ++{ ++ unsigned int argnum; ++ tree arg; ++ const_tree fndecl; ++ struct next_cgraph_node *cur_node; ++ ++ fndecl = gimple_call_fndecl(stmt); ++ if (fndecl == NULL_TREE) ++ return head; ++ ++ for (cur_node = next_node; cur_node; cur_node = cur_node->next) { ++ if (!operand_equal_p(cur_node->callee_fndecl, fndecl, 0)) ++ continue; ++ argnum = get_correct_arg_count(cur_node->num, fndecl); ++ gcc_assert(argnum != CANNOT_FIND_ARG); ++ if (argnum == 0) ++ continue; ++ ++ arg = gimple_call_arg(stmt, argnum - 1); ++ head = create_new_interesting_node(head, stmt, arg, argnum, NOT_INTENTIONAL_ASM); ++ } ++ ++ return head; ++} ++ ++static unsigned int check_ops(const_tree orig_node, const_tree node, unsigned int ret_count) ++{ ++ if (!operand_equal_p(orig_node, node, 0)) ++ return WRONG_NODE; ++ if (skip_types(node)) ++ return WRONG_NODE; ++ return ret_count; ++} ++ ++// Get the index of the rhs node in an assignment ++static unsigned int get_assign_ops_count(const_gimple stmt, tree node) ++{ ++ const_tree rhs1, rhs2; ++ unsigned int ret; ++ ++ gcc_assert(stmt); ++ gcc_assert(is_gimple_assign(stmt)); ++ ++ rhs1 = gimple_assign_rhs1(stmt); ++ gcc_assert(rhs1 != NULL_TREE); ++ ++ switch (gimple_num_ops(stmt)) { ++ case 2: ++ return check_ops(node, rhs1, 1); ++ case 3: ++ ret = check_ops(node, rhs1, 1); ++ if (ret != WRONG_NODE) ++ return ret; ++ ++ rhs2 = gimple_assign_rhs2(stmt); ++ gcc_assert(rhs2 != NULL_TREE); ++ return check_ops(node, rhs2, 2); ++ default: ++ gcc_unreachable(); ++ } ++} ++ ++// Find the correct arg number of a call stmt. It is needed when the interesting function is a cloned function. ++static unsigned int find_arg_number_gimple(const_tree arg, const_gimple stmt) ++{ ++ unsigned int i; ++ ++ if (gimple_call_fndecl(stmt) == NULL_TREE) ++ return CANNOT_FIND_ARG; ++ ++ for (i = 0; i < gimple_call_num_args(stmt); i++) { ++ tree node; ++ ++ node = gimple_call_arg(stmt, i); ++ if (!operand_equal_p(arg, node, 0)) ++ continue; ++ if (!skip_types(node)) ++ return i + 1; ++ } ++ ++ return CANNOT_FIND_ARG; ++} ++ ++/* starting from the size_overflow asm stmt collect interesting stmts. They can be ++ * any of return, call or assignment stmts (because of inlining). ++ */ ++static struct interesting_node *get_interesting_ret_or_call(struct pointer_set_t *visited, struct interesting_node *head, tree node, gimple intentional_asm) ++{ ++ use_operand_p use_p; ++ imm_use_iterator imm_iter; ++ unsigned int argnum; ++ ++ gcc_assert(TREE_CODE(node) == SSA_NAME); ++ ++ if (pointer_set_insert(visited, node)) ++ return head; ++ ++ FOR_EACH_IMM_USE_FAST(use_p, imm_iter, node) { ++ gimple stmt = USE_STMT(use_p); ++ ++ if (stmt == NULL) ++ return head; ++ if (is_gimple_debug(stmt)) ++ continue; ++ ++ switch (gimple_code(stmt)) { ++ case GIMPLE_CALL: ++ argnum = find_arg_number_gimple(node, stmt); ++ head = create_new_interesting_node(head, stmt, node, argnum, intentional_asm); ++ break; ++ case GIMPLE_RETURN: ++ head = create_new_interesting_node(head, stmt, node, 0, intentional_asm); ++ break; ++ case GIMPLE_ASSIGN: ++ argnum = get_assign_ops_count(stmt, node); ++ head = create_new_interesting_node(head, stmt, node, argnum, intentional_asm); ++ break; ++ case GIMPLE_PHI: { ++ tree result = gimple_phi_result(stmt); ++ head = get_interesting_ret_or_call(visited, head, result, intentional_asm); ++ break; ++ } ++ case GIMPLE_ASM: ++ if (gimple_asm_noutputs(stmt) != 0) ++ break; ++ if (!is_size_overflow_asm(stmt)) ++ break; ++ head = create_new_interesting_node(head, stmt, node, 1, intentional_asm); ++ break; ++ case GIMPLE_COND: ++ case GIMPLE_SWITCH: ++ break; ++ default: ++ debug_gimple_stmt(stmt); ++ gcc_unreachable(); ++ break; ++ } ++ } ++ return head; ++} ++ ++static void remove_size_overflow_asm(gimple stmt) ++{ ++ gimple_stmt_iterator gsi; ++ tree input, output; ++ ++ if (!is_size_overflow_asm(stmt)) ++ return; ++ ++ if (gimple_asm_noutputs(stmt) == 0) { ++ gsi = gsi_for_stmt(stmt); ++ ipa_remove_stmt_references(cgraph_get_create_node(current_function_decl), stmt); ++ gsi_remove(&gsi, true); ++ return; ++ } ++ ++ input = gimple_asm_input_op(stmt, 0); ++ output = gimple_asm_output_op(stmt, 0); ++ replace_size_overflow_asm_with_assign(stmt, TREE_VALUE(output), TREE_VALUE(input)); ++} ++ ++/* handle the size_overflow asm stmts from the gimple pass and collect the interesting stmts. ++ * If the asm stmt is a parm_decl kind (noutputs == 0) then remove it. ++ * If it is a simple asm stmt then replace it with an assignment from the asm input to the asm output. ++ */ ++static struct interesting_node *handle_stmt_by_size_overflow_asm(gimple stmt, struct interesting_node *head) ++{ ++ const_tree output; ++ struct pointer_set_t *visited; ++ gimple intentional_asm = NOT_INTENTIONAL_ASM; ++ ++ if (!is_size_overflow_asm(stmt)) ++ return head; ++ ++ if (is_size_overflow_intentional_asm_yes(stmt) || is_size_overflow_intentional_asm_turn_off(stmt)) ++ intentional_asm = stmt; ++ ++ gcc_assert(gimple_asm_ninputs(stmt) == 1); ++ ++ if (gimple_asm_noutputs(stmt) == 0 && is_size_overflow_intentional_asm_turn_off(stmt)) ++ return head; ++ ++ if (gimple_asm_noutputs(stmt) == 0) { ++ const_tree input; ++ ++ if (!is_size_overflow_intentional_asm_turn_off(stmt)) ++ return head; ++ ++ input = gimple_asm_input_op(stmt, 0); ++ remove_size_overflow_asm(stmt); ++ if (is_gimple_constant(TREE_VALUE(input))) ++ return head; ++ visited = pointer_set_create(); ++ head = get_interesting_ret_or_call(visited, head, TREE_VALUE(input), intentional_asm); ++ pointer_set_destroy(visited); ++ return head; ++ } ++ ++ if (!is_size_overflow_intentional_asm_yes(stmt) && !is_size_overflow_intentional_asm_turn_off(stmt)) ++ remove_size_overflow_asm(stmt); ++ ++ visited = pointer_set_create(); ++ output = gimple_asm_output_op(stmt, 0); ++ head = get_interesting_ret_or_call(visited, head, TREE_VALUE(output), intentional_asm); ++ pointer_set_destroy(visited); ++ return head; ++} ++ ++/* Iterate over all the stmts of a function and look for the size_overflow asm stmts (they were created in the gimple pass) ++ * or a call stmt or a return stmt and store them in the interesting_node list ++ */ ++static struct interesting_node *collect_interesting_stmts(struct next_cgraph_node *next_node) ++{ ++ basic_block bb; ++ struct interesting_node *head = NULL; ++ ++ FOR_ALL_BB_FN(bb, cfun) { ++ gimple_stmt_iterator gsi; ++ ++ for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) { ++ enum gimple_code code; ++ gimple stmt = gsi_stmt(gsi); ++ ++ code = gimple_code(stmt); ++ ++ if (code == GIMPLE_ASM) ++ head = handle_stmt_by_size_overflow_asm(stmt, head); ++ ++ if (!next_node) ++ continue; ++ if (code == GIMPLE_CALL) ++ head = handle_stmt_by_cgraph_nodes_call(head, stmt, next_node); ++ if (code == GIMPLE_RETURN) ++ head = handle_stmt_by_cgraph_nodes_ret(head, stmt, next_node); ++ } ++ } ++ return head; ++} ++ ++static void free_interesting_node(struct interesting_node *head) ++{ ++ struct interesting_node *cur; ++ ++ while (head) { ++ cur = head->next; ++#if BUILDING_GCC_VERSION <= 4007 ++ VEC_free(tree, gc, head->last_nodes); ++#else ++ vec_free(head->last_nodes); ++#endif ++ free(head); ++ head = cur; ++ } ++} ++ ++static struct visited_fns *insert_visited_fns_function(struct visited_fns *head, struct interesting_node *cur_node) ++{ ++ struct visited_fns *new_visited_fns; ++ ++ new_visited_fns = (struct visited_fns *)xmalloc(sizeof(*new_visited_fns)); ++ new_visited_fns->fndecl = cur_node->fndecl; ++ new_visited_fns->num = cur_node->num; ++ new_visited_fns->first_stmt = cur_node->first_stmt; ++ new_visited_fns->next = NULL; ++ ++ if (!head) ++ return new_visited_fns; ++ ++ new_visited_fns->next = head; ++ return new_visited_fns; ++} ++ ++/* Check whether the function was already visited_fns. If the fndecl, the arg count of the fndecl and the first_stmt (call or return) are same then ++ * it is a visited_fns function. ++ */ ++static bool is_visited_fns_function(struct visited_fns *head, struct interesting_node *cur_node) ++{ ++ struct visited_fns *cur; ++ ++ if (!head) ++ return false; ++ ++ for (cur = head; cur; cur = cur->next) { ++ if (cur_node->first_stmt != cur->first_stmt) ++ continue; ++ if (!operand_equal_p(cur_node->fndecl, cur->fndecl, 0)) ++ continue; ++ if (cur_node->num == cur->num) ++ return true; ++ } ++ return false; ++} ++ ++static void free_next_cgraph_node(struct next_cgraph_node *head) ++{ ++ struct next_cgraph_node *cur; ++ ++ while (head) { ++ cur = head->next; ++ free(head); ++ head = cur; ++ } ++} ++ ++static void remove_all_size_overflow_asm(void) ++{ ++ basic_block bb; ++ ++ FOR_ALL_BB_FN(bb, cfun) { ++ gimple_stmt_iterator si; ++ ++ for (si = gsi_start_bb(bb); !gsi_end_p(si); gsi_next(&si)) ++ remove_size_overflow_asm(gsi_stmt(si)); ++ } ++} ++ ++/* Main recursive walk of the ipa pass: iterate over the collected interesting stmts in a function ++ * (they are interesting if they have an associated size_overflow asm stmt) and recursively walk ++ * the newly collected interesting functions (they are interesting if there is control flow between ++ * the interesting stmts and them). ++ */ ++static struct visited_fns *handle_function(struct cgraph_node *node, struct next_cgraph_node *next_node, struct visited_fns *visited_fns) ++{ ++ struct visited *visited; ++ struct interesting_node *head, *cur_node; ++ struct next_cgraph_node *cur_cnodes, *cnodes_head = NULL; ++ ++ set_current_function_decl(NODE_DECL(node)); ++ call_count = 0; ++ ++ head = collect_interesting_stmts(next_node); ++ ++ visited = create_visited(); ++ for (cur_node = head; cur_node; cur_node = cur_node->next) { ++ if (is_visited_fns_function(visited_fns, cur_node)) ++ continue; ++ cnodes_head = handle_interesting_stmt(visited, cnodes_head, cur_node, node); ++ visited_fns = insert_visited_fns_function(visited_fns, cur_node); ++ } ++ ++ free_visited(visited); ++ free_interesting_node(head); ++ remove_all_size_overflow_asm(); ++ unset_current_function_decl(); ++ ++ for (cur_cnodes = cnodes_head; cur_cnodes; cur_cnodes = cur_cnodes->next) ++ visited_fns = handle_function(cur_cnodes->current_function, cur_cnodes, visited_fns); ++ ++ free_next_cgraph_node(cnodes_head); ++ return visited_fns; ++} ++ ++static void free_visited_fns(struct visited_fns *head) ++{ ++ struct visited_fns *cur; ++ ++ while (head) { ++ cur = head->next; ++ free(head); ++ head = cur; ++ } ++} ++ ++// Main entry point of the ipa pass: erases the plf flag of all stmts and iterates over all the functions ++unsigned int search_function(void) ++{ ++ struct cgraph_node *node; ++ struct visited_fns *visited_fns = NULL; ++ ++ FOR_EACH_FUNCTION_WITH_GIMPLE_BODY(node) { ++ gcc_assert(cgraph_function_flags_ready); ++#if BUILDING_GCC_VERSION <= 4007 ++ gcc_assert(node->reachable); ++#endif ++ ++ visited_fns = handle_function(node, NULL, visited_fns); ++ } ++ ++ free_visited_fns(visited_fns); ++ return 0; ++} ++ ++#if BUILDING_GCC_VERSION >= 4009 ++static const struct pass_data insert_size_overflow_check_data = { ++#else ++static struct ipa_opt_pass_d insert_size_overflow_check = { ++ .pass = { ++#endif ++ .type = SIMPLE_IPA_PASS, ++ .name = "size_overflow", ++#if BUILDING_GCC_VERSION >= 4008 ++ .optinfo_flags = OPTGROUP_NONE, ++#endif ++#if BUILDING_GCC_VERSION >= 4009 ++ .has_gate = false, ++ .has_execute = true, ++#else ++ .gate = NULL, ++ .execute = search_function, ++ .sub = NULL, ++ .next = NULL, ++ .static_pass_number = 0, ++#endif ++ .tv_id = TV_NONE, ++ .properties_required = 0, ++ .properties_provided = 0, ++ .properties_destroyed = 0, ++ .todo_flags_start = 0, ++ .todo_flags_finish = TODO_verify_ssa | TODO_verify_stmts | TODO_remove_unused_locals | TODO_ggc_collect | TODO_verify_flow | TODO_dump_cgraph | TODO_dump_func | TODO_update_ssa_no_phi, ++#if BUILDING_GCC_VERSION < 4009 ++ }, ++ .generate_summary = NULL, ++ .write_summary = NULL, ++ .read_summary = NULL, ++#if BUILDING_GCC_VERSION >= 4006 ++ .write_optimization_summary = NULL, ++ .read_optimization_summary = NULL, ++#endif ++ .stmt_fixup = NULL, ++ .function_transform_todo_flags_start = 0, ++ .function_transform = NULL, ++ .variable_transform = NULL, ++#endif ++}; ++ ++#if BUILDING_GCC_VERSION >= 4009 ++namespace { ++class insert_size_overflow_check : public ipa_opt_pass_d { ++public: ++ insert_size_overflow_check() : ipa_opt_pass_d(insert_size_overflow_check_data, g, NULL, NULL, NULL, NULL, NULL, NULL, 0, NULL, NULL) {} ++ unsigned int execute() { return search_function(); } ++}; ++} ++#endif ++ ++struct opt_pass *make_insert_size_overflow_check(void) ++{ ++#if BUILDING_GCC_VERSION >= 4009 ++ return new insert_size_overflow_check(); ++#else ++ return &insert_size_overflow_check.pass; ++#endif ++} ++ +diff --git a/tools/gcc/size_overflow_plugin/intentional_overflow.c b/tools/gcc/size_overflow_plugin/intentional_overflow.c +new file mode 100644 +index 0000000..d71d72a +--- /dev/null ++++ b/tools/gcc/size_overflow_plugin/intentional_overflow.c +@@ -0,0 +1,736 @@ ++/* ++ * Copyright 2011-2014 by Emese Revfy <re.emese@gmail.com> ++ * Licensed under the GPL v2, or (at your option) v3 ++ * ++ * Homepage: ++ * http://www.grsecurity.net/~ephox/overflow_plugin/ ++ * ++ * Documentation: ++ * http://forums.grsecurity.net/viewtopic.php?f=7&t=3043 ++ * ++ * This plugin recomputes expressions of function arguments marked by a size_overflow attribute ++ * with double integer precision (DImode/TImode for 32/64 bit integer types). ++ * The recomputed argument is checked against TYPE_MAX and an event is logged on overflow and the triggering process is killed. ++ * ++ * Usage: ++ * $ make ++ * $ make run ++ */ ++ ++#include "gcc-common.h" ++#include "size_overflow.h" ++ ++/* Get the param of the intentional_overflow attribute. ++ * * 0: MARK_NOT_INTENTIONAL ++ * * 1..MAX_PARAM: MARK_YES ++ * * -1: MARK_TURN_OFF ++ */ ++static tree get_attribute_param(const_tree decl) ++{ ++ const_tree attr; ++ ++ if (decl == NULL_TREE) ++ return NULL_TREE; ++ ++ attr = lookup_attribute("intentional_overflow", DECL_ATTRIBUTES(decl)); ++ if (!attr || !TREE_VALUE(attr)) ++ return NULL_TREE; ++ ++ return TREE_VALUE(attr); ++} ++ ++// MARK_TURN_OFF ++bool is_turn_off_intentional_attr(const_tree decl) ++{ ++ const_tree param_head; ++ ++ param_head = get_attribute_param(decl); ++ if (param_head == NULL_TREE) ++ return false; ++ ++ if (TREE_INT_CST_HIGH(TREE_VALUE(param_head)) == -1) ++ return true; ++ return false; ++} ++ ++// MARK_NOT_INTENTIONAL ++bool is_end_intentional_intentional_attr(const_tree decl, unsigned int argnum) ++{ ++ const_tree param_head; ++ ++ if (argnum == 0) ++ return false; ++ ++ param_head = get_attribute_param(decl); ++ if (param_head == NULL_TREE) ++ return false; ++ ++ if (!TREE_INT_CST_LOW(TREE_VALUE(param_head))) ++ return true; ++ return false; ++} ++ ++// MARK_YES ++bool is_yes_intentional_attr(const_tree decl, unsigned int argnum) ++{ ++ tree param, param_head; ++ ++ if (argnum == 0) ++ return false; ++ ++ param_head = get_attribute_param(decl); ++ for (param = param_head; param; param = TREE_CHAIN(param)) ++ if (argnum == TREE_INT_CST_LOW(TREE_VALUE(param))) ++ return true; ++ return false; ++} ++ ++void print_missing_intentional(enum mark callee_attr, enum mark caller_attr, const_tree decl, unsigned int argnum) ++{ ++ location_t loc; ++ ++ if (caller_attr == MARK_NO || caller_attr == MARK_NOT_INTENTIONAL || caller_attr == MARK_TURN_OFF) ++ return; ++ ++ if (callee_attr == MARK_NOT_INTENTIONAL || callee_attr == MARK_YES) ++ return; ++ ++ loc = DECL_SOURCE_LOCATION(decl); ++ inform(loc, "The intentional_overflow attribute is missing from +%s+%u+", DECL_NAME_POINTER(decl), argnum); ++} ++ ++// Get the field decl of a component ref for intentional_overflow checking ++static const_tree search_field_decl(const_tree comp_ref) ++{ ++ const_tree field = NULL_TREE; ++ unsigned int i, len = TREE_OPERAND_LENGTH(comp_ref); ++ ++ for (i = 0; i < len; i++) { ++ field = TREE_OPERAND(comp_ref, i); ++ if (TREE_CODE(field) == FIELD_DECL) ++ break; ++ } ++ gcc_assert(TREE_CODE(field) == FIELD_DECL); ++ return field; ++} ++ ++/* Get the type of the intentional_overflow attribute of a node ++ * * MARK_TURN_OFF ++ * * MARK_YES ++ * * MARK_NO ++ * * MARK_NOT_INTENTIONAL ++ */ ++enum mark get_intentional_attr_type(const_tree node) ++{ ++ const_tree cur_decl; ++ ++ if (node == NULL_TREE) ++ return MARK_NO; ++ ++ switch (TREE_CODE(node)) { ++ case COMPONENT_REF: ++ cur_decl = search_field_decl(node); ++ if (is_turn_off_intentional_attr(cur_decl)) ++ return MARK_TURN_OFF; ++ if (is_end_intentional_intentional_attr(cur_decl, 1)) ++ return MARK_YES; ++ break; ++ case PARM_DECL: { ++ unsigned int argnum; ++ ++ cur_decl = DECL_ORIGIN(current_function_decl); ++ argnum = find_arg_number_tree(node, cur_decl); ++ if (argnum == CANNOT_FIND_ARG) ++ return MARK_NO; ++ if (is_yes_intentional_attr(cur_decl, argnum)) ++ return MARK_YES; ++ if (is_end_intentional_intentional_attr(cur_decl, argnum)) ++ return MARK_NOT_INTENTIONAL; ++ break; ++ } ++ case FUNCTION_DECL: ++ if (is_turn_off_intentional_attr(DECL_ORIGIN(node))) ++ return MARK_TURN_OFF; ++ break; ++ default: ++ break; ++ } ++ return MARK_NO; ++} ++ ++// Search for the intentional_overflow attribute on the last nodes ++static enum mark search_last_nodes_intentional(struct interesting_node *cur_node) ++{ ++ unsigned int i; ++ tree last_node; ++ enum mark mark = MARK_NO; ++ ++#if BUILDING_GCC_VERSION <= 4007 ++ FOR_EACH_VEC_ELT(tree, cur_node->last_nodes, i, last_node) { ++#else ++ FOR_EACH_VEC_ELT(*cur_node->last_nodes, i, last_node) { ++#endif ++ mark = get_intentional_attr_type(last_node); ++ if (mark != MARK_NO) ++ break; ++ } ++ return mark; ++} ++ ++/* Check the intentional kind of size_overflow asm stmt (created by the gimple pass) and ++ * set the appropriate intentional_overflow type. Delete the asm stmt in the end. ++ */ ++static bool is_intentional_attribute_from_gimple(struct interesting_node *cur_node) ++{ ++ if (!cur_node->intentional_mark_from_gimple) ++ return false; ++ ++ if (is_size_overflow_intentional_asm_yes(cur_node->intentional_mark_from_gimple)) ++ cur_node->intentional_attr_cur_fndecl = MARK_YES; ++ else ++ cur_node->intentional_attr_cur_fndecl = MARK_TURN_OFF; ++ ++ // skip param decls ++ if (gimple_asm_noutputs(cur_node->intentional_mark_from_gimple) == 0) ++ return true; ++ return true; ++} ++ ++/* Search intentional_overflow attribute on caller and on callee too. ++ * 0</MARK_YES: no dup, search size_overflow and intentional_overflow attributes ++ * 0/MARK_NOT_INTENTIONAL: no dup, search size_overflow attribute (int) ++ * -1/MARK_TURN_OFF: no dup, no search, current_function_decl -> no dup ++*/ ++void check_intentional_attribute_ipa(struct interesting_node *cur_node) ++{ ++ const_tree fndecl; ++ ++ if (is_intentional_attribute_from_gimple(cur_node)) ++ return; ++ ++ if (is_turn_off_intentional_attr(DECL_ORIGIN(current_function_decl))) { ++ cur_node->intentional_attr_cur_fndecl = MARK_TURN_OFF; ++ return; ++ } ++ ++ if (gimple_code(cur_node->first_stmt) == GIMPLE_ASM) { ++ cur_node->intentional_attr_cur_fndecl = MARK_NOT_INTENTIONAL; ++ return; ++ } ++ ++ if (gimple_code(cur_node->first_stmt) == GIMPLE_ASSIGN) ++ return; ++ ++ fndecl = get_interesting_orig_fndecl(cur_node->first_stmt, cur_node->num); ++ if (is_turn_off_intentional_attr(fndecl)) { ++ cur_node->intentional_attr_decl = MARK_TURN_OFF; ++ return; ++ } ++ ++ if (is_end_intentional_intentional_attr(fndecl, cur_node->num)) ++ cur_node->intentional_attr_decl = MARK_NOT_INTENTIONAL; ++ else if (is_yes_intentional_attr(fndecl, cur_node->num)) ++ cur_node->intentional_attr_decl = MARK_YES; ++ ++ cur_node->intentional_attr_cur_fndecl = search_last_nodes_intentional(cur_node); ++ print_missing_intentional(cur_node->intentional_attr_decl, cur_node->intentional_attr_cur_fndecl, cur_node->fndecl, cur_node->num); ++} ++ ++bool is_a_cast_and_const_overflow(const_tree no_const_rhs) ++{ ++ const_tree rhs1, lhs, rhs1_type, lhs_type; ++ enum machine_mode lhs_mode, rhs_mode; ++ gimple def_stmt = get_def_stmt(no_const_rhs); ++ ++ if (!def_stmt || !gimple_assign_cast_p(def_stmt)) ++ return false; ++ ++ rhs1 = gimple_assign_rhs1(def_stmt); ++ lhs = gimple_assign_lhs(def_stmt); ++ rhs1_type = TREE_TYPE(rhs1); ++ lhs_type = TREE_TYPE(lhs); ++ rhs_mode = TYPE_MODE(rhs1_type); ++ lhs_mode = TYPE_MODE(lhs_type); ++ if (TYPE_UNSIGNED(lhs_type) == TYPE_UNSIGNED(rhs1_type) || lhs_mode != rhs_mode) ++ return false; ++ ++ return true; ++} ++ ++static unsigned int uses_num(tree node) ++{ ++ imm_use_iterator imm_iter; ++ use_operand_p use_p; ++ unsigned int num = 0; ++ ++ FOR_EACH_IMM_USE_FAST(use_p, imm_iter, node) { ++ gimple use_stmt = USE_STMT(use_p); ++ ++ if (use_stmt == NULL) ++ return num; ++ if (is_gimple_debug(use_stmt)) ++ continue; ++ if (gimple_assign_cast_p(use_stmt) && is_size_overflow_type(gimple_assign_lhs(use_stmt))) ++ continue; ++ num++; ++ } ++ return num; ++} ++ ++static bool no_uses(tree node) ++{ ++ return !uses_num(node); ++} ++ ++// 3.8.5 mm/page-writeback.c __ilog2_u64(): ret, uint + uintmax; uint -> int; int max ++bool is_const_plus_unsigned_signed_truncation(const_tree lhs) ++{ ++ tree rhs1, lhs_type, rhs_type, rhs2, not_const_rhs; ++ gimple def_stmt = get_def_stmt(lhs); ++ ++ if (!def_stmt || !gimple_assign_cast_p(def_stmt)) ++ return false; ++ ++ rhs1 = gimple_assign_rhs1(def_stmt); ++ rhs_type = TREE_TYPE(rhs1); ++ lhs_type = TREE_TYPE(lhs); ++ if (TYPE_UNSIGNED(lhs_type) || !TYPE_UNSIGNED(rhs_type)) ++ return false; ++ if (TYPE_MODE(lhs_type) != TYPE_MODE(rhs_type)) ++ return false; ++ ++ def_stmt = get_def_stmt(rhs1); ++ if (!def_stmt || !is_gimple_assign(def_stmt) || gimple_num_ops(def_stmt) != 3) ++ return false; ++ ++ if (gimple_assign_rhs_code(def_stmt) != PLUS_EXPR) ++ return false; ++ ++ rhs1 = gimple_assign_rhs1(def_stmt); ++ rhs2 = gimple_assign_rhs2(def_stmt); ++ if (!is_gimple_constant(rhs1) && !is_gimple_constant(rhs2)) ++ return false; ++ ++ if (is_gimple_constant(rhs2)) ++ not_const_rhs = rhs1; ++ else ++ not_const_rhs = rhs2; ++ ++ return no_uses(not_const_rhs); ++} ++ ++static bool is_lt_signed_type_max(const_tree rhs) ++{ ++ const_tree new_type, type_max, type = TREE_TYPE(rhs); ++ ++ if (!TYPE_UNSIGNED(type)) ++ return true; ++ ++ switch (TYPE_MODE(type)) { ++ case QImode: ++ new_type = intQI_type_node; ++ break; ++ case HImode: ++ new_type = intHI_type_node; ++ break; ++ case SImode: ++ new_type = intSI_type_node; ++ break; ++ case DImode: ++ new_type = intDI_type_node; ++ break; ++ default: ++ debug_tree((tree)type); ++ gcc_unreachable(); ++ } ++ ++ type_max = TYPE_MAX_VALUE(new_type); ++ if (!tree_int_cst_lt(type_max, rhs)) ++ return true; ++ ++ return false; ++} ++ ++static bool is_gt_zero(const_tree rhs) ++{ ++ const_tree type = TREE_TYPE(rhs); ++ ++ if (TYPE_UNSIGNED(type)) ++ return true; ++ ++ if (!tree_int_cst_lt(rhs, integer_zero_node)) ++ return true; ++ ++ return false; ++} ++ ++bool is_a_constant_overflow(const_gimple stmt, const_tree rhs) ++{ ++ if (gimple_assign_rhs_code(stmt) == MIN_EXPR) ++ return false; ++ if (!is_gimple_constant(rhs)) ++ return false; ++ ++ // If the const is between 0 and the max value of the signed type of the same bitsize then there is no intentional overflow ++ if (is_lt_signed_type_max(rhs) && is_gt_zero(rhs)) ++ return false; ++ ++ return true; ++} ++ ++static tree change_assign_rhs(struct visited *visited, gimple stmt, const_tree orig_rhs, tree new_rhs) ++{ ++ gimple assign; ++ gimple_stmt_iterator gsi = gsi_for_stmt(stmt); ++ tree origtype = TREE_TYPE(orig_rhs); ++ ++ gcc_assert(is_gimple_assign(stmt)); ++ ++ assign = build_cast_stmt(visited, origtype, new_rhs, CREATE_NEW_VAR, &gsi, BEFORE_STMT, false); ++ pointer_set_insert(visited->my_stmts, assign); ++ return gimple_assign_lhs(assign); ++} ++ ++tree handle_intentional_overflow(struct visited *visited, struct cgraph_node *caller_node, bool check_overflow, gimple stmt, tree change_rhs, tree new_rhs2) ++{ ++ tree new_rhs, orig_rhs; ++ void (*gimple_assign_set_rhs)(gimple, tree); ++ tree rhs1 = gimple_assign_rhs1(stmt); ++ tree rhs2 = gimple_assign_rhs2(stmt); ++ tree lhs = gimple_assign_lhs(stmt); ++ ++ if (!check_overflow) ++ return create_assign(visited, stmt, lhs, AFTER_STMT); ++ ++ if (change_rhs == NULL_TREE) ++ return create_assign(visited, stmt, lhs, AFTER_STMT); ++ ++ if (new_rhs2 == NULL_TREE) { ++ orig_rhs = rhs1; ++ gimple_assign_set_rhs = &gimple_assign_set_rhs1; ++ } else { ++ orig_rhs = rhs2; ++ gimple_assign_set_rhs = &gimple_assign_set_rhs2; ++ } ++ ++ check_size_overflow(caller_node, stmt, TREE_TYPE(change_rhs), change_rhs, orig_rhs, BEFORE_STMT); ++ ++ new_rhs = change_assign_rhs(visited, stmt, orig_rhs, change_rhs); ++ gimple_assign_set_rhs(stmt, new_rhs); ++ update_stmt(stmt); ++ ++ return create_assign(visited, stmt, lhs, AFTER_STMT); ++} ++ ++static bool is_subtraction_special(struct visited *visited, const_gimple stmt) ++{ ++ gimple rhs1_def_stmt, rhs2_def_stmt; ++ const_tree rhs1_def_stmt_rhs1, rhs2_def_stmt_rhs1, rhs1_def_stmt_lhs, rhs2_def_stmt_lhs; ++ enum machine_mode rhs1_def_stmt_rhs1_mode, rhs2_def_stmt_rhs1_mode, rhs1_def_stmt_lhs_mode, rhs2_def_stmt_lhs_mode; ++ const_tree rhs1 = gimple_assign_rhs1(stmt); ++ const_tree rhs2 = gimple_assign_rhs2(stmt); ++ ++ if (is_gimple_constant(rhs1) || is_gimple_constant(rhs2)) ++ return false; ++ ++ gcc_assert(TREE_CODE(rhs1) == SSA_NAME && TREE_CODE(rhs2) == SSA_NAME); ++ ++ if (gimple_assign_rhs_code(stmt) != MINUS_EXPR) ++ return false; ++ ++ rhs1_def_stmt = get_def_stmt(rhs1); ++ rhs2_def_stmt = get_def_stmt(rhs2); ++ if (!gimple_assign_cast_p(rhs1_def_stmt) || !gimple_assign_cast_p(rhs2_def_stmt)) ++ return false; ++ ++ rhs1_def_stmt_rhs1 = gimple_assign_rhs1(rhs1_def_stmt); ++ rhs2_def_stmt_rhs1 = gimple_assign_rhs1(rhs2_def_stmt); ++ rhs1_def_stmt_lhs = gimple_assign_lhs(rhs1_def_stmt); ++ rhs2_def_stmt_lhs = gimple_assign_lhs(rhs2_def_stmt); ++ rhs1_def_stmt_rhs1_mode = TYPE_MODE(TREE_TYPE(rhs1_def_stmt_rhs1)); ++ rhs2_def_stmt_rhs1_mode = TYPE_MODE(TREE_TYPE(rhs2_def_stmt_rhs1)); ++ rhs1_def_stmt_lhs_mode = TYPE_MODE(TREE_TYPE(rhs1_def_stmt_lhs)); ++ rhs2_def_stmt_lhs_mode = TYPE_MODE(TREE_TYPE(rhs2_def_stmt_lhs)); ++ if (GET_MODE_BITSIZE(rhs1_def_stmt_rhs1_mode) <= GET_MODE_BITSIZE(rhs1_def_stmt_lhs_mode)) ++ return false; ++ if (GET_MODE_BITSIZE(rhs2_def_stmt_rhs1_mode) <= GET_MODE_BITSIZE(rhs2_def_stmt_lhs_mode)) ++ return false; ++ ++ pointer_set_insert(visited->no_cast_check, rhs1_def_stmt); ++ pointer_set_insert(visited->no_cast_check, rhs2_def_stmt); ++ return true; ++} ++ ++static gimple create_binary_assign(struct visited *visited, enum tree_code code, gimple stmt, tree rhs1, tree rhs2) ++{ ++ gimple assign; ++ gimple_stmt_iterator gsi = gsi_for_stmt(stmt); ++ tree type = TREE_TYPE(rhs1); ++ tree lhs = create_new_var(type); ++ ++ gcc_assert(types_compatible_p(type, TREE_TYPE(rhs2))); ++ assign = gimple_build_assign_with_ops(code, lhs, rhs1, rhs2); ++ gimple_assign_set_lhs(assign, make_ssa_name(lhs, assign)); ++ ++ gsi_insert_before(&gsi, assign, GSI_NEW_STMT); ++ update_stmt(assign); ++ pointer_set_insert(visited->my_stmts, assign); ++ return assign; ++} ++ ++static tree cast_to_TI_type(struct visited *visited, gimple stmt, tree node) ++{ ++ gimple_stmt_iterator gsi; ++ gimple cast_stmt; ++ tree type = TREE_TYPE(node); ++ ++ if (types_compatible_p(type, intTI_type_node)) ++ return node; ++ ++ gsi = gsi_for_stmt(stmt); ++ cast_stmt = build_cast_stmt(visited, intTI_type_node, node, CREATE_NEW_VAR, &gsi, BEFORE_STMT, false); ++ pointer_set_insert(visited->my_stmts, cast_stmt); ++ return gimple_assign_lhs(cast_stmt); ++} ++ ++static tree get_def_stmt_rhs(struct visited *visited, const_tree var) ++{ ++ tree rhs1, def_stmt_rhs1; ++ gimple rhs1_def_stmt, def_stmt_rhs1_def_stmt, def_stmt; ++ ++ def_stmt = get_def_stmt(var); ++ if (!gimple_assign_cast_p(def_stmt)) ++ return NULL_TREE; ++ gcc_assert(gimple_code(def_stmt) != GIMPLE_NOP && pointer_set_contains(visited->my_stmts, def_stmt) && gimple_assign_cast_p(def_stmt)); ++ ++ rhs1 = gimple_assign_rhs1(def_stmt); ++ rhs1_def_stmt = get_def_stmt(rhs1); ++ if (!gimple_assign_cast_p(rhs1_def_stmt)) ++ return rhs1; ++ ++ def_stmt_rhs1 = gimple_assign_rhs1(rhs1_def_stmt); ++ def_stmt_rhs1_def_stmt = get_def_stmt(def_stmt_rhs1); ++ ++ switch (gimple_code(def_stmt_rhs1_def_stmt)) { ++ case GIMPLE_CALL: ++ case GIMPLE_NOP: ++ case GIMPLE_ASM: ++ case GIMPLE_PHI: ++ return def_stmt_rhs1; ++ case GIMPLE_ASSIGN: ++ return rhs1; ++ default: ++ debug_gimple_stmt(def_stmt_rhs1_def_stmt); ++ gcc_unreachable(); ++ } ++} ++ ++tree handle_integer_truncation(struct visited *visited, struct cgraph_node *caller_node, const_tree lhs) ++{ ++ tree new_rhs1, new_rhs2; ++ tree new_rhs1_def_stmt_rhs1, new_rhs2_def_stmt_rhs1, new_lhs; ++ gimple assign, stmt = get_def_stmt(lhs); ++ tree rhs1 = gimple_assign_rhs1(stmt); ++ tree rhs2 = gimple_assign_rhs2(stmt); ++ ++ if (!is_subtraction_special(visited, stmt)) ++ return NULL_TREE; ++ ++ new_rhs1 = expand(visited, caller_node, rhs1); ++ new_rhs2 = expand(visited, caller_node, rhs2); ++ ++ new_rhs1_def_stmt_rhs1 = get_def_stmt_rhs(visited, new_rhs1); ++ new_rhs2_def_stmt_rhs1 = get_def_stmt_rhs(visited, new_rhs2); ++ ++ if (new_rhs1_def_stmt_rhs1 == NULL_TREE || new_rhs2_def_stmt_rhs1 == NULL_TREE) ++ return NULL_TREE; ++ ++ if (!types_compatible_p(TREE_TYPE(new_rhs1_def_stmt_rhs1), TREE_TYPE(new_rhs2_def_stmt_rhs1))) { ++ new_rhs1_def_stmt_rhs1 = cast_to_TI_type(visited, stmt, new_rhs1_def_stmt_rhs1); ++ new_rhs2_def_stmt_rhs1 = cast_to_TI_type(visited, stmt, new_rhs2_def_stmt_rhs1); ++ } ++ ++ assign = create_binary_assign(visited, MINUS_EXPR, stmt, new_rhs1_def_stmt_rhs1, new_rhs2_def_stmt_rhs1); ++ new_lhs = gimple_assign_lhs(assign); ++ check_size_overflow(caller_node, assign, TREE_TYPE(new_lhs), new_lhs, rhs1, AFTER_STMT); ++ ++ return dup_assign(visited, stmt, lhs, new_rhs1, new_rhs2, NULL_TREE); ++} ++ ++bool is_a_neg_overflow(const_gimple stmt, const_tree rhs) ++{ ++ const_gimple def_stmt; ++ ++ if (TREE_CODE(rhs) != SSA_NAME) ++ return false; ++ ++ if (gimple_assign_rhs_code(stmt) != PLUS_EXPR) ++ return false; ++ ++ def_stmt = get_def_stmt(rhs); ++ if (!is_gimple_assign(def_stmt) || gimple_assign_rhs_code(def_stmt) != BIT_NOT_EXPR) ++ return false; ++ ++ return true; ++} ++ ++/* e.g., drivers/acpi/acpica/utids.c acpi_ut_execute_CID() ++ * ((count - 1) * sizeof(struct acpi_pnp_dee_id_list) -> (count + fffffff) * 16 ++ * fffffff * 16 > signed max -> truncate ++ */ ++static bool look_for_mult_and_add(const_gimple stmt) ++{ ++ const_tree res; ++ tree rhs1, rhs2, def_rhs1, def_rhs2, const_rhs, def_const_rhs; ++ const_gimple def_stmt; ++ ++ if (!stmt || gimple_code(stmt) == GIMPLE_NOP) ++ return false; ++ if (!is_gimple_assign(stmt)) ++ return false; ++ if (gimple_assign_rhs_code(stmt) != MULT_EXPR) ++ return false; ++ ++ rhs1 = gimple_assign_rhs1(stmt); ++ rhs2 = gimple_assign_rhs2(stmt); ++ if (is_gimple_constant(rhs1)) { ++ const_rhs = rhs1; ++ def_stmt = get_def_stmt(rhs2); ++ } else if (is_gimple_constant(rhs2)) { ++ const_rhs = rhs2; ++ def_stmt = get_def_stmt(rhs1); ++ } else ++ return false; ++ ++ if (!is_gimple_assign(def_stmt)) ++ return false; ++ ++ if (gimple_assign_rhs_code(def_stmt) != PLUS_EXPR && gimple_assign_rhs_code(def_stmt) != MINUS_EXPR) ++ return false; ++ ++ def_rhs1 = gimple_assign_rhs1(def_stmt); ++ def_rhs2 = gimple_assign_rhs2(def_stmt); ++ if (is_gimple_constant(def_rhs1)) ++ def_const_rhs = def_rhs1; ++ else if (is_gimple_constant(def_rhs2)) ++ def_const_rhs = def_rhs2; ++ else ++ return false; ++ ++ res = fold_binary_loc(gimple_location(def_stmt), MULT_EXPR, TREE_TYPE(const_rhs), const_rhs, def_const_rhs); ++ if (is_lt_signed_type_max(res) && is_gt_zero(res)) ++ return false; ++ return true; ++} ++ ++enum intentional_overflow_type add_mul_intentional_overflow(const_gimple stmt) ++{ ++ const_gimple def_stmt_1, def_stmt_2; ++ const_tree rhs1, rhs2; ++ bool add_mul_rhs1, add_mul_rhs2; ++ ++ rhs1 = gimple_assign_rhs1(stmt); ++ def_stmt_1 = get_def_stmt(rhs1); ++ add_mul_rhs1 = look_for_mult_and_add(def_stmt_1); ++ ++ rhs2 = gimple_assign_rhs2(stmt); ++ def_stmt_2 = get_def_stmt(rhs2); ++ add_mul_rhs2 = look_for_mult_and_add(def_stmt_2); ++ ++ if (add_mul_rhs1) ++ return RHS1_INTENTIONAL_OVERFLOW; ++ if (add_mul_rhs2) ++ return RHS2_INTENTIONAL_OVERFLOW; ++ return NO_INTENTIONAL_OVERFLOW; ++} ++ ++static gimple get_dup_stmt(struct visited *visited, gimple stmt) ++{ ++ gimple my_stmt; ++ gimple_stmt_iterator gsi = gsi_for_stmt(stmt); ++ ++ gsi_next(&gsi); ++ my_stmt = gsi_stmt(gsi); ++ ++ gcc_assert(pointer_set_contains(visited->my_stmts, my_stmt)); ++ gcc_assert(gimple_assign_rhs_code(stmt) == gimple_assign_rhs_code(my_stmt)); ++ ++ return my_stmt; ++} ++ ++/* unsigned type -> unary or binary assign (rhs1 or rhs2 is constant) ++ * unsigned type cast to signed type, unsigned type: no more uses ++ * e.g., lib/vsprintf.c:simple_strtol() ++ * _10 = (unsigned long int) _9 ++ * _11 = -_10; ++ * _12 = (long int) _11; (_11_ no more uses) ++ */ ++static bool is_call_or_cast(gimple stmt) ++{ ++ return gimple_assign_cast_p(stmt) || is_gimple_call(stmt); ++} ++ ++static bool is_unsigned_cast_or_call_def_stmt(const_tree node) ++{ ++ const_tree rhs; ++ gimple def_stmt; ++ ++ if (node == NULL_TREE) ++ return true; ++ if (is_gimple_constant(node)) ++ return true; ++ ++ def_stmt = get_def_stmt(node); ++ if (!def_stmt) ++ return false; ++ ++ if (is_call_or_cast(def_stmt)) ++ return true; ++ ++ if (!is_gimple_assign(def_stmt) || gimple_num_ops(def_stmt) != 2) ++ return false; ++ rhs = gimple_assign_rhs1(def_stmt); ++ def_stmt = get_def_stmt(rhs); ++ if (!def_stmt) ++ return false; ++ return is_call_or_cast(def_stmt); ++} ++ ++void unsigned_signed_cast_intentional_overflow(struct visited *visited, gimple stmt) ++{ ++ unsigned int use_num; ++ gimple so_stmt; ++ const_gimple def_stmt; ++ const_tree rhs1, rhs2; ++ tree rhs = gimple_assign_rhs1(stmt); ++ tree lhs_type = TREE_TYPE(gimple_assign_lhs(stmt)); ++ const_tree rhs_type = TREE_TYPE(rhs); ++ ++ if (!(TYPE_UNSIGNED(rhs_type) && !TYPE_UNSIGNED(lhs_type))) ++ return; ++ if (GET_MODE_BITSIZE(TYPE_MODE(rhs_type)) != GET_MODE_BITSIZE(TYPE_MODE(lhs_type))) ++ return; ++ use_num = uses_num(rhs); ++ if (use_num != 1) ++ return; ++ ++ def_stmt = get_def_stmt(rhs); ++ if (!def_stmt) ++ return; ++ if (!is_gimple_assign(def_stmt)) ++ return; ++ ++ rhs1 = gimple_assign_rhs1(def_stmt); ++ if (!is_unsigned_cast_or_call_def_stmt(rhs1)) ++ return; ++ ++ rhs2 = gimple_assign_rhs2(def_stmt); ++ if (!is_unsigned_cast_or_call_def_stmt(rhs2)) ++ return; ++ if (gimple_num_ops(def_stmt) == 3 && !is_gimple_constant(rhs1) && !is_gimple_constant(rhs2)) ++ return; ++ ++ so_stmt = get_dup_stmt(visited, stmt); ++ create_up_and_down_cast(visited, so_stmt, lhs_type, gimple_assign_rhs1(so_stmt)); ++} ++ +diff --git a/tools/gcc/size_overflow_plugin/misc.c b/tools/gcc/size_overflow_plugin/misc.c +new file mode 100644 +index 0000000..4bddad2 +--- /dev/null ++++ b/tools/gcc/size_overflow_plugin/misc.c +@@ -0,0 +1,203 @@ ++/* ++ * Copyright 2011-2014 by Emese Revfy <re.emese@gmail.com> ++ * Licensed under the GPL v2, or (at your option) v3 ++ * ++ * Homepage: ++ * http://www.grsecurity.net/~ephox/overflow_plugin/ ++ * ++ * Documentation: ++ * http://forums.grsecurity.net/viewtopic.php?f=7&t=3043 ++ * ++ * This plugin recomputes expressions of function arguments marked by a size_overflow attribute ++ * with double integer precision (DImode/TImode for 32/64 bit integer types). ++ * The recomputed argument is checked against TYPE_MAX and an event is logged on overflow and the triggering process is killed. ++ * ++ * Usage: ++ * $ make ++ * $ make run ++ */ ++ ++#include "gcc-common.h" ++#include "size_overflow.h" ++ ++void set_current_function_decl(tree fndecl) ++{ ++ gcc_assert(fndecl != NULL_TREE); ++ ++ push_cfun(DECL_STRUCT_FUNCTION(fndecl)); ++ calculate_dominance_info(CDI_DOMINATORS); ++ current_function_decl = fndecl; ++} ++ ++void unset_current_function_decl(void) ++{ ++ free_dominance_info(CDI_DOMINATORS); ++ pop_cfun(); ++ current_function_decl = NULL_TREE; ++} ++ ++static bool is_bool(const_tree node) ++{ ++ const_tree type; ++ ++ if (node == NULL_TREE) ++ return false; ++ ++ type = TREE_TYPE(node); ++ if (!INTEGRAL_TYPE_P(type)) ++ return false; ++ if (TREE_CODE(type) == BOOLEAN_TYPE) ++ return true; ++ if (TYPE_PRECISION(type) == 1) ++ return true; ++ return false; ++} ++ ++bool skip_types(const_tree var) ++{ ++ tree type; ++ enum tree_code code; ++ ++ if (is_gimple_constant(var)) ++ return true; ++ ++ switch (TREE_CODE(var)) { ++ case ADDR_EXPR: ++#if BUILDING_GCC_VERSION >= 4006 ++ case MEM_REF: ++#endif ++ case ARRAY_REF: ++ case BIT_FIELD_REF: ++ case INDIRECT_REF: ++ case TARGET_MEM_REF: ++ case COMPONENT_REF: ++ case VAR_DECL: ++ case VIEW_CONVERT_EXPR: ++ return true; ++ default: ++ break; ++ } ++ ++ code = TREE_CODE(var); ++ gcc_assert(code == SSA_NAME || code == PARM_DECL); ++ ++ type = TREE_TYPE(var); ++ switch (TREE_CODE(type)) { ++ case INTEGER_TYPE: ++ case ENUMERAL_TYPE: ++ return false; ++ case BOOLEAN_TYPE: ++ return is_bool(var); ++ default: ++ return true; ++ } ++} ++ ++gimple get_def_stmt(const_tree node) ++{ ++ gcc_assert(node != NULL_TREE); ++ ++ if (skip_types(node)) ++ return NULL; ++ ++ if (TREE_CODE(node) != SSA_NAME) ++ return NULL; ++ return SSA_NAME_DEF_STMT(node); ++} ++ ++tree create_new_var(tree type) ++{ ++ tree new_var = create_tmp_var(type, "cicus"); ++ ++ add_referenced_var(new_var); ++ return new_var; ++} ++ ++static bool skip_cast(tree dst_type, const_tree rhs, bool force) ++{ ++ const_gimple def_stmt = get_def_stmt(rhs); ++ ++ if (force) ++ return false; ++ ++ if (is_gimple_constant(rhs)) ++ return false; ++ ++ if (!def_stmt || gimple_code(def_stmt) == GIMPLE_NOP) ++ return false; ++ ++ if (!types_compatible_p(dst_type, TREE_TYPE(rhs))) ++ return false; ++ ++ // DI type can be on 32 bit (from create_assign) but overflow type stays DI ++ if (LONG_TYPE_SIZE == GET_MODE_BITSIZE(SImode)) ++ return false; ++ ++ return true; ++} ++ ++tree cast_a_tree(tree type, tree var) ++{ ++ gcc_assert(type != NULL_TREE); ++ gcc_assert(var != NULL_TREE); ++ gcc_assert(fold_convertible_p(type, var)); ++ ++ return fold_convert(type, var); ++} ++ ++gimple build_cast_stmt(struct visited *visited, tree dst_type, tree rhs, tree lhs, gimple_stmt_iterator *gsi, bool before, bool force) ++{ ++ gimple assign, def_stmt; ++ ++ gcc_assert(dst_type != NULL_TREE && rhs != NULL_TREE); ++ gcc_assert(!is_gimple_constant(rhs)); ++ if (gsi_end_p(*gsi) && before == AFTER_STMT) ++ gcc_unreachable(); ++ ++ def_stmt = get_def_stmt(rhs); ++ if (def_stmt && gimple_code(def_stmt) != GIMPLE_NOP && skip_cast(dst_type, rhs, force) && pointer_set_contains(visited->my_stmts, def_stmt)) ++ return def_stmt; ++ ++ if (lhs == CREATE_NEW_VAR) ++ lhs = create_new_var(dst_type); ++ ++ assign = gimple_build_assign(lhs, cast_a_tree(dst_type, rhs)); ++ ++ if (!gsi_end_p(*gsi)) { ++ location_t loc = gimple_location(gsi_stmt(*gsi)); ++ gimple_set_location(assign, loc); ++ } ++ ++ gimple_assign_set_lhs(assign, make_ssa_name(lhs, assign)); ++ ++ if (before) ++ gsi_insert_before(gsi, assign, GSI_NEW_STMT); ++ else ++ gsi_insert_after(gsi, assign, GSI_NEW_STMT); ++ update_stmt(assign); ++ return assign; ++} ++ ++bool is_size_overflow_type(const_tree var) ++{ ++ const char *name; ++ const_tree type_name, type; ++ ++ if (var == NULL_TREE) ++ return false; ++ ++ type = TREE_TYPE(var); ++ type_name = TYPE_NAME(type); ++ if (type_name == NULL_TREE) ++ return false; ++ ++ if (DECL_P(type_name)) ++ name = DECL_NAME_POINTER(type_name); ++ else ++ name = IDENTIFIER_POINTER(type_name); ++ ++ if (!strncmp(name, "size_overflow_type", 18)) ++ return true; ++ return false; ++} ++ +diff --git a/tools/gcc/size_overflow_plugin/remove_unnecessary_dup.c b/tools/gcc/size_overflow_plugin/remove_unnecessary_dup.c +new file mode 100644 +index 0000000..7c9e6d1 +--- /dev/null ++++ b/tools/gcc/size_overflow_plugin/remove_unnecessary_dup.c +@@ -0,0 +1,138 @@ ++/* ++ * Copyright 2011-2014 by Emese Revfy <re.emese@gmail.com> ++ * Licensed under the GPL v2, or (at your option) v3 ++ * ++ * Homepage: ++ * http://www.grsecurity.net/~ephox/overflow_plugin/ ++ * ++ * Documentation: ++ * http://forums.grsecurity.net/viewtopic.php?f=7&t=3043 ++ * ++ * This plugin recomputes expressions of function arguments marked by a size_overflow attribute ++ * with double integer precision (DImode/TImode for 32/64 bit integer types). ++ * The recomputed argument is checked against TYPE_MAX and an event is logged on overflow and the triggering process is killed. ++ * ++ * Usage: ++ * $ make ++ * $ make run ++ */ ++ ++#include "gcc-common.h" ++#include "size_overflow.h" ++ ++bool skip_expr_on_double_type(const_gimple stmt) ++{ ++ enum tree_code code = gimple_assign_rhs_code(stmt); ++ ++ switch (code) { ++ case RSHIFT_EXPR: ++ case TRUNC_DIV_EXPR: ++ case CEIL_DIV_EXPR: ++ case FLOOR_DIV_EXPR: ++ case ROUND_DIV_EXPR: ++ case EXACT_DIV_EXPR: ++ case RDIV_EXPR: ++ case TRUNC_MOD_EXPR: ++ case CEIL_MOD_EXPR: ++ case FLOOR_MOD_EXPR: ++ case ROUND_MOD_EXPR: ++ return true; ++ default: ++ return false; ++ } ++} ++ ++void create_up_and_down_cast(struct visited *visited, gimple use_stmt, tree orig_type, tree rhs) ++{ ++ const_tree orig_rhs1; ++ tree down_lhs, new_lhs, dup_type = TREE_TYPE(rhs); ++ gimple down_cast, up_cast; ++ gimple_stmt_iterator gsi = gsi_for_stmt(use_stmt); ++ ++ down_cast = build_cast_stmt(visited, orig_type, rhs, CREATE_NEW_VAR, &gsi, BEFORE_STMT, false); ++ down_lhs = gimple_assign_lhs(down_cast); ++ ++ gsi = gsi_for_stmt(use_stmt); ++ up_cast = build_cast_stmt(visited, dup_type, down_lhs, CREATE_NEW_VAR, &gsi, BEFORE_STMT, false); ++ new_lhs = gimple_assign_lhs(up_cast); ++ ++ orig_rhs1 = gimple_assign_rhs1(use_stmt); ++ if (operand_equal_p(orig_rhs1, rhs, 0)) ++ gimple_assign_set_rhs1(use_stmt, new_lhs); ++ else ++ gimple_assign_set_rhs2(use_stmt, new_lhs); ++ update_stmt(use_stmt); ++ ++ pointer_set_insert(visited->my_stmts, up_cast); ++ pointer_set_insert(visited->my_stmts, down_cast); ++ pointer_set_insert(visited->skip_expr_casts, up_cast); ++ pointer_set_insert(visited->skip_expr_casts, down_cast); ++} ++ ++static tree get_proper_unsigned_half_type(const_tree node) ++{ ++ tree new_type, type; ++ ++ gcc_assert(is_size_overflow_type(node)); ++ ++ type = TREE_TYPE(node); ++ switch (TYPE_MODE(type)) { ++ case HImode: ++ new_type = unsigned_intQI_type_node; ++ break; ++ case SImode: ++ new_type = unsigned_intHI_type_node; ++ break; ++ case DImode: ++ new_type = unsigned_intSI_type_node; ++ break; ++ case TImode: ++ new_type = unsigned_intDI_type_node; ++ break; ++ default: ++ gcc_unreachable(); ++ } ++ ++ if (TYPE_QUALS(type) != 0) ++ return build_qualified_type(new_type, TYPE_QUALS(type)); ++ return new_type; ++} ++ ++static void insert_cast_rhs(struct visited *visited, gimple stmt, tree rhs) ++{ ++ tree type; ++ ++ if (rhs == NULL_TREE) ++ return; ++ if (!is_size_overflow_type(rhs)) ++ return; ++ ++ type = get_proper_unsigned_half_type(rhs); ++ if (is_gimple_constant(rhs)) ++ return; ++ create_up_and_down_cast(visited, stmt, type, rhs); ++} ++ ++static void insert_cast(struct visited *visited, gimple stmt, tree rhs) ++{ ++ if (LONG_TYPE_SIZE == GET_MODE_BITSIZE(SImode) && !is_size_overflow_type(rhs)) ++ return; ++ gcc_assert(is_size_overflow_type(rhs)); ++ insert_cast_rhs(visited, stmt, rhs); ++} ++ ++void insert_cast_expr(struct visited *visited, gimple stmt, enum intentional_overflow_type type) ++{ ++ tree rhs1, rhs2; ++ ++ if (type == NO_INTENTIONAL_OVERFLOW || type == RHS1_INTENTIONAL_OVERFLOW) { ++ rhs1 = gimple_assign_rhs1(stmt); ++ insert_cast(visited, stmt, rhs1); ++ } ++ ++ if (type == NO_INTENTIONAL_OVERFLOW || type == RHS2_INTENTIONAL_OVERFLOW) { ++ rhs2 = gimple_assign_rhs2(stmt); ++ insert_cast(visited, stmt, rhs2); ++ } ++} ++ +diff --git a/tools/gcc/size_overflow_plugin/size_overflow.h b/tools/gcc/size_overflow_plugin/size_overflow.h +new file mode 100644 +index 0000000..e5b4e50 +--- /dev/null ++++ b/tools/gcc/size_overflow_plugin/size_overflow.h +@@ -0,0 +1,127 @@ ++#ifndef SIZE_OVERFLOW_H ++#define SIZE_OVERFLOW_H ++ ++#define CREATE_NEW_VAR NULL_TREE ++#define CANNOT_FIND_ARG 32 ++#define MAX_PARAM 31 ++#define BEFORE_STMT true ++#define AFTER_STMT false ++ ++#define TURN_OFF_ASM_STR "# size_overflow MARK_TURN_OFF " ++#define YES_ASM_STR "# size_overflow MARK_YES " ++#define OK_ASM_STR "# size_overflow " ++ ++enum mark { ++ MARK_NO, MARK_YES, MARK_NOT_INTENTIONAL, MARK_TURN_OFF ++}; ++ ++enum intentional_overflow_type { ++ NO_INTENTIONAL_OVERFLOW, RHS1_INTENTIONAL_OVERFLOW, RHS2_INTENTIONAL_OVERFLOW ++}; ++ ++struct visited { ++ struct pointer_set_t *stmts; ++ struct pointer_set_t *my_stmts; ++ struct pointer_set_t *skip_expr_casts; ++ struct pointer_set_t *no_cast_check; ++}; ++ ++// size_overflow_plugin.c ++extern tree report_size_overflow_decl; ++extern tree size_overflow_type_HI; ++extern tree size_overflow_type_SI; ++extern tree size_overflow_type_DI; ++extern tree size_overflow_type_TI; ++ ++ ++// size_overflow_plugin_hash.c ++struct size_overflow_hash { ++ const struct size_overflow_hash * const next; ++ const char * const name; ++ const unsigned int param; ++}; ++ ++struct interesting_node { ++ struct interesting_node *next; ++ gimple first_stmt; ++ const_tree fndecl; ++ tree node; ++#if BUILDING_GCC_VERSION <= 4007 ++ VEC(tree, gc) *last_nodes; ++#else ++ vec<tree, va_gc> *last_nodes; ++#endif ++ unsigned int num; ++ enum mark intentional_attr_decl; ++ enum mark intentional_attr_cur_fndecl; ++ gimple intentional_mark_from_gimple; ++}; ++ ++extern bool is_size_overflow_asm(const_gimple stmt); ++extern unsigned int get_function_num(const_tree node, const_tree orig_fndecl); ++extern unsigned int get_correct_arg_count(unsigned int argnum, const_tree fndecl); ++extern bool is_missing_function(const_tree orig_fndecl, unsigned int num); ++extern bool is_a_return_check(const_tree node); ++extern const struct size_overflow_hash *get_function_hash(const_tree fndecl); ++extern unsigned int find_arg_number_tree(const_tree arg, const_tree func); ++ ++ ++// size_overflow_debug.c ++extern struct opt_pass *make_dump_pass(void); ++ ++ ++// intentional_overflow.c ++extern enum mark get_intentional_attr_type(const_tree node); ++extern bool is_size_overflow_intentional_asm_yes(const_gimple stmt); ++extern bool is_size_overflow_intentional_asm_turn_off(const_gimple stmt); ++extern bool is_end_intentional_intentional_attr(const_tree decl, unsigned int argnum); ++extern bool is_yes_intentional_attr(const_tree decl, unsigned int argnum); ++extern bool is_turn_off_intentional_attr(const_tree decl); ++extern void print_missing_intentional(enum mark callee_attr, enum mark caller_attr, const_tree decl, unsigned int argnum); ++extern void check_intentional_attribute_ipa(struct interesting_node *cur_node); ++extern bool is_a_cast_and_const_overflow(const_tree no_const_rhs); ++extern bool is_const_plus_unsigned_signed_truncation(const_tree lhs); ++extern bool is_a_constant_overflow(const_gimple stmt, const_tree rhs); ++extern tree handle_intentional_overflow(struct visited *visited, struct cgraph_node *caller_node, bool check_overflow, gimple stmt, tree change_rhs, tree new_rhs2); ++extern tree handle_integer_truncation(struct visited *visited, struct cgraph_node *caller_node, const_tree lhs); ++extern bool is_a_neg_overflow(const_gimple stmt, const_tree rhs); ++extern enum intentional_overflow_type add_mul_intentional_overflow(const_gimple def_stmt); ++extern void unsigned_signed_cast_intentional_overflow(struct visited *visited, gimple stmt); ++ ++ ++// insert_size_overflow_check_ipa.c ++extern unsigned int search_function(void); ++extern unsigned int call_count; ++extern struct opt_pass *make_insert_size_overflow_check(void); ++extern const_tree get_interesting_orig_fndecl(const_gimple stmt, unsigned int argnum); ++ ++ ++// insert_size_overflow_asm.c ++extern struct opt_pass *make_insert_size_overflow_asm_pass(void); ++ ++ ++// misc.c ++extern void set_current_function_decl(tree fndecl); ++extern void unset_current_function_decl(void); ++extern gimple get_def_stmt(const_tree node); ++extern tree create_new_var(tree type); ++extern gimple build_cast_stmt(struct visited *visited, tree dst_type, tree rhs, tree lhs, gimple_stmt_iterator *gsi, bool before, bool force); ++extern bool skip_types(const_tree var); ++extern tree cast_a_tree(tree type, tree var); ++extern bool is_size_overflow_type(const_tree var); ++ ++ ++// insert_size_overflow_check_core.c ++extern tree expand(struct visited *visited, struct cgraph_node *caller_node, tree lhs); ++extern void check_size_overflow(struct cgraph_node *caller_node, gimple stmt, tree size_overflow_type, tree cast_rhs, tree rhs, bool before); ++extern tree dup_assign(struct visited *visited, gimple oldstmt, const_tree node, tree rhs1, tree rhs2, tree __unused rhs3); ++extern tree create_assign(struct visited *visited, gimple oldstmt, tree rhs1, bool before); ++ ++ ++// remove_unnecessary_dup.c ++extern struct opt_pass *make_remove_unnecessary_dup_pass(void); ++extern void insert_cast_expr(struct visited *visited, gimple stmt, enum intentional_overflow_type type); ++extern bool skip_expr_on_double_type(const_gimple stmt); ++extern void create_up_and_down_cast(struct visited *visited, gimple use_stmt, tree orig_type, tree rhs); ++ ++#endif +diff --git a/tools/gcc/size_overflow_plugin/size_overflow_debug.c b/tools/gcc/size_overflow_plugin/size_overflow_debug.c +new file mode 100644 +index 0000000..4378111 +--- /dev/null ++++ b/tools/gcc/size_overflow_plugin/size_overflow_debug.c +@@ -0,0 +1,116 @@ ++/* ++ * Copyright 2011-2014 by Emese Revfy <re.emese@gmail.com> ++ * Licensed under the GPL v2, or (at your option) v3 ++ * ++ * Homepage: ++ * http://www.grsecurity.net/~ephox/overflow_plugin/ ++ * ++ * Documentation: ++ * http://forums.grsecurity.net/viewtopic.php?f=7&t=3043 ++ * ++ * This plugin recomputes expressions of function arguments marked by a size_overflow attribute ++ * with double integer precision (DImode/TImode for 32/64 bit integer types). ++ * The recomputed argument is checked against TYPE_MAX and an event is logged on overflow and the triggering process is killed. ++ * ++ * Usage: ++ * $ make ++ * $ make run ++ */ ++ ++#include "gcc-common.h" ++ ++static unsigned int dump_functions(void) ++{ ++ struct cgraph_node *node; ++ ++ FOR_EACH_FUNCTION_WITH_GIMPLE_BODY(node) { ++ basic_block bb; ++ ++ push_cfun(DECL_STRUCT_FUNCTION(NODE_DECL(node))); ++ current_function_decl = NODE_DECL(node); ++ ++ fprintf(stderr, "-----------------------------------------\n%s\n-----------------------------------------\n", DECL_NAME_POINTER(current_function_decl)); ++ ++ FOR_ALL_BB_FN(bb, cfun) { ++ gimple_stmt_iterator si; ++ ++ fprintf(stderr, "<bb %u>:\n", bb->index); ++ for (si = gsi_start_phis(bb); !gsi_end_p(si); gsi_next(&si)) ++ debug_gimple_stmt(gsi_stmt(si)); ++ for (si = gsi_start_bb(bb); !gsi_end_p(si); gsi_next(&si)) ++ debug_gimple_stmt(gsi_stmt(si)); ++ fprintf(stderr, "\n"); ++ } ++ ++ fprintf(stderr, "-------------------------------------------------------------------------\n"); ++ ++ pop_cfun(); ++ current_function_decl = NULL_TREE; ++ } ++ ++ fprintf(stderr, "###############################################################################\n"); ++ ++ return 0; ++} ++ ++#if BUILDING_GCC_VERSION >= 4009 ++static const struct pass_data dump_pass_data = { ++#else ++static struct ipa_opt_pass_d dump_pass = { ++ .pass = { ++#endif ++ .type = SIMPLE_IPA_PASS, ++ .name = "dump", ++#if BUILDING_GCC_VERSION >= 4008 ++ .optinfo_flags = OPTGROUP_NONE, ++#endif ++#if BUILDING_GCC_VERSION >= 4009 ++ .has_gate = false, ++ .has_execute = true, ++#else ++ .gate = NULL, ++ .execute = dump_functions, ++ .sub = NULL, ++ .next = NULL, ++ .static_pass_number = 0, ++#endif ++ .tv_id = TV_NONE, ++ .properties_required = 0, ++ .properties_provided = 0, ++ .properties_destroyed = 0, ++ .todo_flags_start = 0, ++ .todo_flags_finish = 0, ++#if BUILDING_GCC_VERSION < 4009 ++ }, ++ .generate_summary = NULL, ++ .write_summary = NULL, ++ .read_summary = NULL, ++#if BUILDING_GCC_VERSION >= 4006 ++ .write_optimization_summary = NULL, ++ .read_optimization_summary = NULL, ++#endif ++ .stmt_fixup = NULL, ++ .function_transform_todo_flags_start = 0, ++ .function_transform = NULL, ++ .variable_transform = NULL, ++#endif ++}; ++ ++#if BUILDING_GCC_VERSION >= 4009 ++namespace { ++class dump_pass : public ipa_opt_pass_d { ++public: ++ dump_pass() : ipa_opt_pass_d(dump_pass_data, g, NULL, NULL, NULL, NULL, NULL, NULL, 0, NULL, NULL) {} ++ unsigned int execute() { return dump_functions(); } ++}; ++} ++#endif ++ ++struct opt_pass *make_dump_pass(void) ++{ ++#if BUILDING_GCC_VERSION >= 4009 ++ return new dump_pass(); ++#else ++ return &dump_pass.pass; ++#endif ++} +diff --git a/tools/gcc/size_overflow_plugin/size_overflow_hash.data b/tools/gcc/size_overflow_plugin/size_overflow_hash.data +new file mode 100644 +index 0000000..4077712 +--- /dev/null ++++ b/tools/gcc/size_overflow_plugin/size_overflow_hash.data +@@ -0,0 +1,5988 @@ ++intel_fake_agp_alloc_by_type_1 intel_fake_agp_alloc_by_type 1 1 NULL ++ocfs2_get_refcount_tree_3 ocfs2_get_refcount_tree 0 3 NULL ++storvsc_connect_to_vsp_22 storvsc_connect_to_vsp 2 22 NULL ++compat_sock_setsockopt_23 compat_sock_setsockopt 5 23 NULL ++carl9170_alloc_27 carl9170_alloc 1 27 NULL ++sel_read_policyvers_55 sel_read_policyvers 3 55 NULL nohasharray ++padzero_55 padzero 1 55 &sel_read_policyvers_55 ++cfg80211_disconnected_57 cfg80211_disconnected 4 57 NULL ++__skb_to_sgvec_72 __skb_to_sgvec 0 72 NULL ++snd_korg1212_copy_to_92 snd_korg1212_copy_to 6 92 NULL ++load_msg_95 load_msg 2 95 NULL ++ipath_verbs_send_117 ipath_verbs_send 5-3 117 NULL ++init_q_132 init_q 4 132 NULL ++memstick_alloc_host_142 memstick_alloc_host 1 142 NULL ++gfs2_glock_get_147 gfs2_glock_get 0 147 NULL ++hva_to_gfn_memslot_149 hva_to_gfn_memslot 0-1 149 NULL ++ping_v6_sendmsg_152 ping_v6_sendmsg 4 152 NULL ++ext4_ext_get_actual_len_153 ext4_ext_get_actual_len 0 153 NULL nohasharray ++tracing_trace_options_write_153 tracing_trace_options_write 3 153 &ext4_ext_get_actual_len_153 ++pci_request_selected_regions_169 pci_request_selected_regions 0 169 NULL ++xfs_buf_item_get_format_189 xfs_buf_item_get_format 2 189 NULL ++xfs_bmap_btalloc_192 xfs_bmap_btalloc 0 192 NULL ++iscsi_session_setup_196 iscsi_session_setup 4-5 196 NULL ++ll_xattr_cache_seq_write_250 ll_xattr_cache_seq_write 3 250 NULL ++br_port_info_size_268 br_port_info_size 0 268 NULL ++generic_file_direct_write_291 generic_file_direct_write 0 291 NULL ++read_file_war_stats_292 read_file_war_stats 3 292 NULL ++xfs_zero_last_block_298 xfs_zero_last_block 0 298 NULL ++SYSC_connect_304 SYSC_connect 3 304 NULL ++syslog_print_307 syslog_print 2 307 NULL ++dn_setsockopt_314 dn_setsockopt 5 314 NULL ++mlx5_core_access_reg_361 mlx5_core_access_reg 3-5 361 NULL ++aio_read_events_ring_410 aio_read_events_ring 3-0 410 NULL ++lbs_rdmac_read_418 lbs_rdmac_read 3 418 NULL ++snd_ca0106_ptr_read_467 snd_ca0106_ptr_read 0 467 NULL ++cfs_trace_set_debug_mb_usrstr_486 cfs_trace_set_debug_mb_usrstr 2 486 NULL ++nvme_trans_modesel_data_488 nvme_trans_modesel_data 4 488 NULL ++iwl_dbgfs_protection_mode_write_502 iwl_dbgfs_protection_mode_write 3 502 NULL ++rx_rx_defrag_end_read_505 rx_rx_defrag_end_read 3 505 NULL ++ocfs2_validate_meta_ecc_bhs_527 ocfs2_validate_meta_ecc_bhs 0 527 NULL ++zlib_deflate_workspacesize_537 zlib_deflate_workspacesize 0-1-2 537 NULL ++iwl_dbgfs_wowlan_sram_read_540 iwl_dbgfs_wowlan_sram_read 3 540 NULL ++sco_sock_setsockopt_552 sco_sock_setsockopt 5 552 NULL ++lpfc_nlp_state_name_556 lpfc_nlp_state_name 2 556 NULL ++snd_aw2_saa7146_get_hw_ptr_playback_558 snd_aw2_saa7146_get_hw_ptr_playback 0 558 NULL ++start_isoc_chain_565 start_isoc_chain 2 565 NULL nohasharray ++dev_hard_header_565 dev_hard_header 0 565 &start_isoc_chain_565 ++ocfs2_refcounted_xattr_delete_need_584 ocfs2_refcounted_xattr_delete_need 0 584 NULL ++smk_write_load_self2_591 smk_write_load_self2 3 591 NULL ++btrfs_stack_file_extent_offset_607 btrfs_stack_file_extent_offset 0 607 NULL ++ni_gpct_device_construct_610 ni_gpct_device_construct 5 610 NULL ++fuse_request_alloc_nofs_617 fuse_request_alloc_nofs 1 617 NULL ++ptlrpc_lprocfs_nrs_seq_write_621 ptlrpc_lprocfs_nrs_seq_write 3 621 NULL ++viafb_dfpl_proc_write_627 viafb_dfpl_proc_write 3 627 NULL ++ceph_osdc_new_request_635 ceph_osdc_new_request 6 635 NULL ++cfs_hash_bkt_size_643 cfs_hash_bkt_size 0 643 NULL ++unlink_queued_645 unlink_queued 4 645 NULL ++dtim_interval_read_654 dtim_interval_read 3 654 NULL ++mem_rx_free_mem_blks_read_675 mem_rx_free_mem_blks_read 3 675 NULL ++persistent_ram_vmap_709 persistent_ram_vmap 1-2 709 NULL ++xfs_bmap_eof_728 xfs_bmap_eof 0 728 NULL ++sctp_setsockopt_peer_addr_params_734 sctp_setsockopt_peer_addr_params 3 734 NULL ++dvb_video_write_754 dvb_video_write 3 754 NULL ++cfs_trace_allocate_string_buffer_781 cfs_trace_allocate_string_buffer 2 781 NULL ++ath6kl_disconnect_timeout_write_794 ath6kl_disconnect_timeout_write 3 794 NULL ++if_writecmd_815 if_writecmd 2 815 NULL ++aac_change_queue_depth_825 aac_change_queue_depth 2 825 NULL ++error_state_read_859 error_state_read 6 859 NULL ++o2net_send_message_vec_879 o2net_send_message_vec 4 879 NULL nohasharray ++iwl_dbgfs_fh_reg_read_879 iwl_dbgfs_fh_reg_read 3 879 &o2net_send_message_vec_879 ++snd_pcm_action_single_905 snd_pcm_action_single 0 905 NULL ++carl9170_cmd_buf_950 carl9170_cmd_buf 3 950 NULL ++__nodes_weight_956 __nodes_weight 2-0 956 NULL ++bnx2x_fill_fw_str_968 bnx2x_fill_fw_str 3 968 NULL ++mnt_want_write_975 mnt_want_write 0 975 NULL ++usnic_ib_qp_grp_dump_hdr_989 usnic_ib_qp_grp_dump_hdr 2 989 NULL ++memcmp_990 memcmp 0 990 NULL ++readreg_1017 readreg 0-1 1017 NULL ++smk_write_cipso2_1021 smk_write_cipso2 3 1021 NULL ++gigaset_initdriver_1060 gigaset_initdriver 2 1060 NULL ++mce_request_packet_1073 mce_request_packet 3 1073 NULL ++agp_create_memory_1075 agp_create_memory 1 1075 NULL ++_scsih_adjust_queue_depth_1083 _scsih_adjust_queue_depth 2 1083 NULL ++llcp_sock_sendmsg_1092 llcp_sock_sendmsg 4 1092 NULL ++nfs4_init_nonuniform_client_string_1097 nfs4_init_nonuniform_client_string 3 1097 NULL ++utf8s_to_utf16s_1115 utf8s_to_utf16s 0 1115 NULL ++cfg80211_report_obss_beacon_1133 cfg80211_report_obss_beacon 3 1133 NULL ++i2400m_rx_ctl_1157 i2400m_rx_ctl 4 1157 NULL ++ipc_alloc_1192 ipc_alloc 1 1192 NULL ++ib_create_send_mad_1196 ib_create_send_mad 5 1196 NULL ++pstore_ftrace_knob_write_1198 pstore_ftrace_knob_write 3 1198 NULL ++i2400m_rx_ctl_ack_1199 i2400m_rx_ctl_ack 3 1199 NULL ++dgrp_dpa_read_1204 dgrp_dpa_read 3 1204 NULL ++i2cdev_read_1206 i2cdev_read 3 1206 NULL ++lov_ost_pool_init_1215 lov_ost_pool_init 2 1215 NULL ++fsync_buffers_list_1219 fsync_buffers_list 0 1219 NULL ++kernfs_file_direct_read_1238 kernfs_file_direct_read 3 1238 NULL ++acpi_battery_write_alarm_1240 acpi_battery_write_alarm 3 1240 NULL ++ocfs2_extend_file_1266 ocfs2_extend_file 3 1266 NULL ++qla4xxx_change_queue_depth_1268 qla4xxx_change_queue_depth 2 1268 NULL ++ioctl_private_iw_point_1273 ioctl_private_iw_point 7 1273 NULL ++SyS_flistxattr_1287 SyS_flistxattr 3 1287 NULL ++tx_frag_in_process_called_read_1290 tx_frag_in_process_called_read 3 1290 NULL ++posix_acl_xattr_set_1301 posix_acl_xattr_set 4 1301 NULL ++tcf_hash_create_1305 tcf_hash_create 4 1305 NULL ++ffs_1322 ffs 0 1322 NULL ++qlcnic_pci_sriov_configure_1327 qlcnic_pci_sriov_configure 2 1327 NULL ++btrfs_submit_compressed_write_1347 btrfs_submit_compressed_write 5 1347 NULL ++snd_pcm_lib_write1_1358 snd_pcm_lib_write1 0-3 1358 NULL ++ipx_sendmsg_1362 ipx_sendmsg 4 1362 NULL ++fw_stats_raw_read_1369 fw_stats_raw_read 3 1369 NULL ++ocfs2_prepare_inode_for_write_1372 ocfs2_prepare_inode_for_write 3 1372 NULL ++sctp_setsockopt_initmsg_1383 sctp_setsockopt_initmsg 3 1383 NULL ++do_msgsnd_1387 do_msgsnd 4 1387 NULL ++SYSC_io_getevents_1392 SYSC_io_getevents 3 1392 NULL ++file_read_actor_1401 file_read_actor 4-0 1401 NULL ++cfs_trace_copyout_string_1416 cfs_trace_copyout_string 2 1416 NULL ++init_rs_internal_1436 init_rs_internal 1 1436 NULL ++stack_max_size_read_1445 stack_max_size_read 3 1445 NULL ++tx_queue_len_read_1463 tx_queue_len_read 3 1463 NULL ++xprt_alloc_1475 xprt_alloc 2 1475 NULL ++SYSC_syslog_1477 SYSC_syslog 3 1477 NULL ++sta_num_ps_buf_frames_read_1488 sta_num_ps_buf_frames_read 3 1488 NULL ++fpregs_set_1497 fpregs_set 4 1497 NULL ++tomoyo_round2_1518 tomoyo_round2 0 1518 NULL ++alloc_perm_bits_1532 alloc_perm_bits 2 1532 NULL ++ath6kl_init_get_fwcaps_1557 ath6kl_init_get_fwcaps 3 1557 NULL ++ffs_mutex_lock_1564 ffs_mutex_lock 0 1564 NULL ++ieee80211_if_read_dot11MeshHWMPnetDiameterTraversalTime_1589 ieee80211_if_read_dot11MeshHWMPnetDiameterTraversalTime 3 1589 NULL ++ipath_ht_handle_hwerrors_1592 ipath_ht_handle_hwerrors 3 1592 NULL ++packet_buffer_init_1607 packet_buffer_init 2 1607 NULL ++btmrvl_hscmd_read_1614 btmrvl_hscmd_read 3 1614 NULL ++v9fs_fid_xattr_get_1618 v9fs_fid_xattr_get 0 1618 NULL ++ikconfig_read_current_1658 ikconfig_read_current 3 1658 NULL ++mei_cl_recv_1665 mei_cl_recv 3 1665 NULL ++rmap_add_1677 rmap_add 3 1677 NULL ++configfs_read_file_1683 configfs_read_file 3 1683 NULL ++pdu_write_u_1710 pdu_write_u 3 1710 NULL ++coda_psdev_write_1711 coda_psdev_write 3 1711 NULL ++btrfs_dir_data_len_1714 btrfs_dir_data_len 0 1714 NULL ++internal_create_group_1733 internal_create_group 0 1733 NULL ++dev_irnet_read_1741 dev_irnet_read 3 1741 NULL ++usb_ep_align_maybe_1743 usb_ep_align_maybe 0-3 1743 NULL ++tx_frag_called_read_1748 tx_frag_called_read 3 1748 NULL ++cosa_write_1774 cosa_write 3 1774 NULL ++fcoe_ctlr_device_add_1793 fcoe_ctlr_device_add 3 1793 NULL ++__nodelist_scnprintf_1815 __nodelist_scnprintf 2-0 1815 NULL ++sb_issue_zeroout_1884 sb_issue_zeroout 3 1884 NULL ++rx_defrag_called_read_1897 rx_defrag_called_read 3 1897 NULL ++nfs_parse_server_name_1899 nfs_parse_server_name 2 1899 NULL ++SyS_add_key_1900 SyS_add_key 4 1900 NULL ++uhid_char_read_1920 uhid_char_read 3 1920 NULL ++tx_tx_retry_data_read_1926 tx_tx_retry_data_read 3 1926 NULL ++bdev_erase_1933 bdev_erase 3 1933 NULL ++ext3_fiemap_1936 ext3_fiemap 4 1936 NULL ++cyttsp_probe_1940 cyttsp_probe 4 1940 NULL ++ieee80211_if_fmt_dot11MeshConfirmTimeout_1945 ieee80211_if_fmt_dot11MeshConfirmTimeout 3 1945 NULL ++update_qd_1955 update_qd 0 1955 NULL ++ivtv_v4l2_read_1964 ivtv_v4l2_read 3 1964 NULL ++sel_read_avc_hash_stats_1984 sel_read_avc_hash_stats 3 1984 NULL ++gpio_power_write_1991 gpio_power_write 3 1991 NULL ++__alloc_bootmem_node_1992 __alloc_bootmem_node 2 1992 NULL ++rx_rx_defrag_read_2010 rx_rx_defrag_read 3 2010 NULL ++xfs_mru_cache_insert_2013 xfs_mru_cache_insert 0 2013 NULL ++ocfs2_global_qinit_alloc_2018 ocfs2_global_qinit_alloc 0 2018 NULL ++write_flush_pipefs_2021 write_flush_pipefs 3 2021 NULL ++BcmCopySection_2035 BcmCopySection 5 2035 NULL ++ath6kl_fwlog_mask_read_2050 ath6kl_fwlog_mask_read 3 2050 NULL ++ocfs2_expand_inline_dir_2063 ocfs2_expand_inline_dir 3 2063 NULL ++__generic_copy_from_user_intel_2073 __generic_copy_from_user_intel 0-3 2073 NULL ++diva_set_driver_dbg_mask_2077 diva_set_driver_dbg_mask 0 2077 NULL ++iwl_dbgfs_current_sleep_command_read_2081 iwl_dbgfs_current_sleep_command_read 3 2081 NULL ++idetape_chrdev_read_2097 idetape_chrdev_read 3 2097 NULL ++audit_expand_2098 audit_expand 0 2098 NULL ++__set_print_fmt_2106 __set_print_fmt 0 2106 NULL ++iwl_dbgfs_log_event_read_2107 iwl_dbgfs_log_event_read 3 2107 NULL ++ecryptfs_encrypt_and_encode_filename_2109 ecryptfs_encrypt_and_encode_filename 6 2109 NULL ++btrfs_file_extent_inline_len_2116 btrfs_file_extent_inline_len 0 2116 NULL ++enable_read_2117 enable_read 3 2117 NULL ++pcf50633_write_block_2124 pcf50633_write_block 2-3 2124 NULL ++xfs_recover_inode_owner_change_2132 xfs_recover_inode_owner_change 0 2132 NULL ++check_load_and_stores_2143 check_load_and_stores 2 2143 NULL ++iov_iter_count_2152 iov_iter_count 0 2152 NULL ++__copy_to_user_ll_2157 __copy_to_user_ll 0-3 2157 NULL ++_ore_get_io_state_2166 _ore_get_io_state 3-4-5 2166 NULL ++bio_integrity_alloc_2194 bio_integrity_alloc 3 2194 NULL ++picolcd_debug_reset_write_2195 picolcd_debug_reset_write 3 2195 NULL ++xfs_inobt_update_2206 xfs_inobt_update 0 2206 NULL ++u32_array_read_2219 u32_array_read 3 2219 NULL nohasharray ++mei_dbgfs_read_meclients_2219 mei_dbgfs_read_meclients 3 2219 &u32_array_read_2219 ++__ocfs2_journal_access_2241 __ocfs2_journal_access 0 2241 NULL ++ieee80211_if_read_dot11MeshHWMPRannInterval_2249 ieee80211_if_read_dot11MeshHWMPRannInterval 3 2249 NULL ++netlbl_secattr_catmap_walk_2255 netlbl_secattr_catmap_walk 0-2 2255 NULL ++sel_write_avc_cache_threshold_2256 sel_write_avc_cache_threshold 3 2256 NULL ++do_update_counters_2259 do_update_counters 4 2259 NULL ++ath6kl_wmi_bssinfo_event_rx_2275 ath6kl_wmi_bssinfo_event_rx 3 2275 NULL ++debug_debug5_read_2291 debug_debug5_read 3 2291 NULL ++sr_read_cmd_2299 sr_read_cmd 5 2299 NULL ++kvm_clear_guest_page_2308 kvm_clear_guest_page 4 2308 NULL ++intel_sdvo_set_value_2311 intel_sdvo_set_value 4 2311 NULL ++hfsplus_find_init_2318 hfsplus_find_init 0 2318 NULL nohasharray ++picolcd_fb_write_2318 picolcd_fb_write 3 2318 &hfsplus_find_init_2318 ++dice_hwdep_read_2326 dice_hwdep_read 3 2326 NULL ++__erst_read_to_erange_2341 __erst_read_to_erange 0 2341 NULL ++zr364xx_read_2354 zr364xx_read 3 2354 NULL ++ntfs_file_aio_write_nolock_2360 ntfs_file_aio_write_nolock 0 2360 NULL ++viafb_iga2_odev_proc_write_2363 viafb_iga2_odev_proc_write 3 2363 NULL ++xfs_buf_map_from_irec_2368 xfs_buf_map_from_irec 5-0 2368 NULL nohasharray ++rose_recvmsg_2368 rose_recvmsg 4 2368 &xfs_buf_map_from_irec_2368 ++il_dbgfs_sensitivity_read_2370 il_dbgfs_sensitivity_read 3 2370 NULL ++rxpipe_rx_prep_beacon_drop_read_2403 rxpipe_rx_prep_beacon_drop_read 3 2403 NULL ++isdn_v110_open_2418 isdn_v110_open 3 2418 NULL ++raid1_size_2419 raid1_size 0-2 2419 NULL ++b43legacy_debugfs_read_2473 b43legacy_debugfs_read 3 2473 NULL ++wiphy_new_2482 wiphy_new 2 2482 NULL ++bio_alloc_bioset_2484 bio_alloc_bioset 2 2484 NULL ++squashfs_read_fragment_index_table_2506 squashfs_read_fragment_index_table 4 2506 NULL ++v9fs_cached_file_read_2514 v9fs_cached_file_read 3 2514 NULL ++ext4_get_inode_loc_2516 ext4_get_inode_loc 0 2516 NULL ++batadv_tvlv_container_list_size_2524 batadv_tvlv_container_list_size 0 2524 NULL ++smk_write_syslog_2529 smk_write_syslog 3 2529 NULL ++__ceph_setxattr_2532 __ceph_setxattr 4 2532 NULL ++gspca_dev_probe_2570 gspca_dev_probe 4 2570 NULL ++pcm_sanity_check_2574 pcm_sanity_check 0 2574 NULL ++mdc_max_rpcs_in_flight_seq_write_2594 mdc_max_rpcs_in_flight_seq_write 3 2594 NULL ++slot_bytes_2609 slot_bytes 0 2609 NULL ++smk_write_logging_2618 smk_write_logging 3 2618 NULL ++switch_status_2629 switch_status 5 2629 NULL ++tcp_xmit_size_goal_2661 tcp_xmit_size_goal 2 2661 NULL ++osc_build_ppga_2670 osc_build_ppga 2 2670 NULL ++ffs_ep0_read_2672 ffs_ep0_read 3 2672 NULL ++oti6858_write_2692 oti6858_write 4 2692 NULL ++nfc_llcp_send_ui_frame_2702 nfc_llcp_send_ui_frame 5 2702 NULL ++memcpy_fromiovecend_2707 memcpy_fromiovecend 3-4 2707 NULL ++lprocfs_stats_counter_size_2708 lprocfs_stats_counter_size 0 2708 NULL ++gfs2_glock_nq_num_2747 gfs2_glock_nq_num 0 2747 NULL ++xfs_readdir_2767 xfs_readdir 3 2767 NULL ++mon_bin_ioctl_2771 mon_bin_ioctl 3 2771 NULL ++set_msr_hyperv_pw_2785 set_msr_hyperv_pw 3 2785 NULL ++device_add_attrs_2789 device_add_attrs 0 2789 NULL ++iwl_dbgfs_clear_ucode_statistics_write_2804 iwl_dbgfs_clear_ucode_statistics_write 3 2804 NULL ++sel_read_enforce_2828 sel_read_enforce 3 2828 NULL ++vb2_dc_get_userptr_2829 vb2_dc_get_userptr 2-3 2829 NULL ++wait_for_avail_2847 wait_for_avail 0 2847 NULL ++sfq_alloc_2861 sfq_alloc 1 2861 NULL ++irnet_ctrl_read_2863 irnet_ctrl_read 4 2863 NULL ++move_addr_to_user_2868 move_addr_to_user 2 2868 NULL ++nla_padlen_2883 nla_padlen 1 2883 NULL ++cmm_write_2896 cmm_write 3 2896 NULL ++osc_import_seq_write_2923 osc_import_seq_write 3 2923 NULL ++xfs_trans_get_buf_map_2927 xfs_trans_get_buf_map 4 2927 NULL ++nes_read_indexed_2946 nes_read_indexed 0 2946 NULL ++tm6000_i2c_recv_regs16_2949 tm6000_i2c_recv_regs16 5 2949 NULL ++i40e_dbg_prep_dump_buf_2951 i40e_dbg_prep_dump_buf 2 2951 NULL ++set_fast_connectable_2952 set_fast_connectable 4 2952 NULL ++free_area_init_core_2962 free_area_init_core 2-3 2962 NULL ++bio_setup_sector_2970 bio_setup_sector 3 2970 NULL ++do_strnlen_user_2976 do_strnlen_user 0-2 2976 NULL ++p9_nr_pages_2992 p9_nr_pages 0-2 2992 NULL ++_xfs_filestream_pick_ag_3007 _xfs_filestream_pick_ag 0 3007 NULL ++lov_stripetype_seq_write_3013 lov_stripetype_seq_write 3 3013 NULL ++do_dmabuf_dirty_sou_3017 do_dmabuf_dirty_sou 7 3017 NULL ++depth_write_3021 depth_write 3 3021 NULL ++snd_azf3328_codec_inl_3022 snd_azf3328_codec_inl 0 3022 NULL ++nvme_split_and_submit_3027 nvme_split_and_submit 3 3027 NULL ++kvm_unmap_hva_3028 kvm_unmap_hva 2 3028 NULL ++xfrm_dst_alloc_copy_3034 xfrm_dst_alloc_copy 3 3034 NULL ++lpfc_idiag_mbxacc_write_3038 lpfc_idiag_mbxacc_write 3 3038 NULL nohasharray ++iwl_dbgfs_sleep_level_override_read_3038 iwl_dbgfs_sleep_level_override_read 3 3038 &lpfc_idiag_mbxacc_write_3038 ++nr_free_buffer_pages_3044 nr_free_buffer_pages 0 3044 NULL ++il3945_ucode_rx_stats_read_3048 il3945_ucode_rx_stats_read 3 3048 NULL ++qp_alloc_ppn_set_3068 qp_alloc_ppn_set 2-4 3068 NULL ++__blk_end_bidi_request_3070 __blk_end_bidi_request 3-4 3070 NULL ++dac960_user_command_proc_write_3071 dac960_user_command_proc_write 3 3071 NULL ++read_file_antenna_diversity_3077 read_file_antenna_diversity 3 3077 NULL ++ttusb2_msg_3100 ttusb2_msg 4 3100 NULL ++rb_alloc_3102 rb_alloc 1 3102 NULL ++simple_write_to_buffer_3122 simple_write_to_buffer 5-2 3122 NULL ++print_time_3132 print_time 0 3132 NULL ++fill_write_buffer_3142 fill_write_buffer 3 3142 NULL ++CIFSSMBSetPosixACL_3154 CIFSSMBSetPosixACL 5 3154 NULL ++compat_sys_migrate_pages_3157 compat_sys_migrate_pages 2 3157 NULL ++gfs2_rindex_update_3165 gfs2_rindex_update 0 3165 NULL ++uv_num_possible_blades_3177 uv_num_possible_blades 0 3177 NULL ++uvc_video_stats_dump_3181 uvc_video_stats_dump 3 3181 NULL ++compat_do_ip6t_set_ctl_3184 compat_do_ip6t_set_ctl 4 3184 NULL ++mempool_create_node_3191 mempool_create_node 1 3191 NULL ++alloc_context_3194 alloc_context 1 3194 NULL ++shmem_pread_slow_3198 shmem_pread_slow 3-2 3198 NULL ++codec_reg_write_file_3204 codec_reg_write_file 3 3204 NULL ++SyS_sendto_3219 SyS_sendto 6 3219 NULL ++btrfs_prealloc_file_range_3227 btrfs_prealloc_file_range 3 3227 NULL ++kimage_crash_alloc_3233 kimage_crash_alloc 3 3233 NULL ++write_adapter_mem_3234 write_adapter_mem 3 3234 NULL ++do_read_log_to_user_3236 do_read_log_to_user 4 3236 NULL ++ext3_xattr_find_entry_3237 ext3_xattr_find_entry 0 3237 NULL ++key_key_read_3241 key_key_read 3 3241 NULL ++__ilog2_u64_3284 __ilog2_u64 0 3284 NULL ++__set_extent_bit_3305 __set_extent_bit 0 3305 NULL ++__iovec_copy_from_user_inatomic_3314 __iovec_copy_from_user_inatomic 0-4-3 3314 NULL ++_iwl_dbgfs_d3_sram_write_3315 _iwl_dbgfs_d3_sram_write 3 3315 NULL ++dbDiscardAG_3322 dbDiscardAG 3 3322 NULL ++compat_sys_setsockopt_3326 compat_sys_setsockopt 5 3326 NULL ++read_from_oldmem_3337 read_from_oldmem 2 3337 NULL ++sysfs_create_group_3339 sysfs_create_group 0 3339 NULL ++tty_port_register_device_attr_3341 tty_port_register_device_attr 3 3341 NULL ++il_dbgfs_interrupt_read_3351 il_dbgfs_interrupt_read 3 3351 NULL ++gsm_control_rls_3353 gsm_control_rls 3 3353 NULL ++scnprintf_3360 scnprintf 0-2 3360 NULL ++ReadByteAmd7930_3365 ReadByteAmd7930 0 3365 NULL ++sr_read_3366 sr_read 3 3366 NULL ++mtdchar_writeoob_3393 mtdchar_writeoob 4 3393 NULL ++send_stream_3397 send_stream 4 3397 NULL ++isdn_readbchan_3401 isdn_readbchan 0-5 3401 NULL ++mei_io_cb_alloc_resp_buf_3414 mei_io_cb_alloc_resp_buf 2 3414 NULL ++pci_add_cap_save_buffer_3426 pci_add_cap_save_buffer 3 3426 NULL ++crystalhd_create_dio_pool_3427 crystalhd_create_dio_pool 2 3427 NULL ++SyS_msgsnd_3436 SyS_msgsnd 3 3436 NULL ++pipe_iov_copy_to_user_3447 pipe_iov_copy_to_user 3 3447 NULL ++softsynth_write_3455 softsynth_write 3 3455 NULL ++snd_pcm_lib_readv_transfer_3464 snd_pcm_lib_readv_transfer 5-4-2 3464 NULL ++security_context_to_sid_default_3492 security_context_to_sid_default 2 3492 NULL ++xfrm_migrate_msgsize_3496 xfrm_migrate_msgsize 1 3496 NULL ++mem_tx_free_mem_blks_read_3521 mem_tx_free_mem_blks_read 3 3521 NULL ++SyS_semtimedop_3532 SyS_semtimedop 3 3532 NULL ++SyS_readv_3539 SyS_readv 3 3539 NULL ++btrfs_dir_name_len_3549 btrfs_dir_name_len 0 3549 NULL ++alloc_smp_resp_3566 alloc_smp_resp 1 3566 NULL ++evtchn_read_3569 evtchn_read 3 3569 NULL ++ll_track_ppid_seq_write_3582 ll_track_ppid_seq_write 3 3582 NULL ++vc_resize_3585 vc_resize 3-2 3585 NULL ++kvm_mmu_notifier_change_pte_3596 kvm_mmu_notifier_change_pte 3 3596 NULL ++sctp_getsockopt_events_3607 sctp_getsockopt_events 2 3607 NULL ++edac_mc_alloc_3611 edac_mc_alloc 4 3611 NULL ++tx_tx_starts_read_3617 tx_tx_starts_read 3 3617 NULL ++aligned_kmalloc_3628 aligned_kmalloc 1 3628 NULL ++ath6kl_disconnect_timeout_read_3650 ath6kl_disconnect_timeout_read 3 3650 NULL ++i915_compat_ioctl_3656 i915_compat_ioctl 2 3656 NULL ++xfs_attr3_leaf_list_int_3661 xfs_attr3_leaf_list_int 0 3661 NULL ++_iwl_dbgfs_tx_flush_write_3675 _iwl_dbgfs_tx_flush_write 3 3675 NULL ++snd_m3_assp_read_3703 snd_m3_assp_read 0 3703 NULL ++ci_ll_write_3740 ci_ll_write 4 3740 NULL ++sctp_setsockopt_auth_key_3793 sctp_setsockopt_auth_key 3 3793 NULL ++ncp_file_write_3813 ncp_file_write 3 3813 NULL ++llc_ui_recvmsg_3826 llc_ui_recvmsg 4 3826 NULL ++hfsplus_direct_IO_3835 hfsplus_direct_IO 4 3835 NULL ++create_one_cdev_3852 create_one_cdev 2 3852 NULL ++smk_read_onlycap_3855 smk_read_onlycap 3 3855 NULL ++get_fd_set_3866 get_fd_set 1 3866 NULL ++apei_res_sub_3873 apei_res_sub 0 3873 NULL ++garp_attr_create_3883 garp_attr_create 3 3883 NULL ++efivarfs_file_read_3893 efivarfs_file_read 3 3893 NULL ++nvram_write_3894 nvram_write 3 3894 NULL ++pipeline_pre_proc_swi_read_3898 pipeline_pre_proc_swi_read 3 3898 NULL ++comedi_buf_read_n_available_3899 comedi_buf_read_n_available 0 3899 NULL ++vcs_write_3910 vcs_write 3 3910 NULL ++SyS_move_pages_3920 SyS_move_pages 2 3920 NULL ++hdlc_irq_one_3944 hdlc_irq_one 2 3944 NULL ++brcmf_debugfs_fws_stats_read_3947 brcmf_debugfs_fws_stats_read 3 3947 NULL ++mite_bytes_written_to_memory_lb_3987 mite_bytes_written_to_memory_lb 0 3987 NULL ++do_add_counters_3992 do_add_counters 3 3992 NULL ++xfs_bmbt_lookup_eq_3997 xfs_bmbt_lookup_eq 0 3997 NULL ++obd_alloc_memmd_4002 obd_alloc_memmd 0 4002 NULL ++userspace_status_4004 userspace_status 4 4004 NULL ++xfs_check_block_4005 xfs_check_block 4 4005 NULL nohasharray ++mei_write_4005 mei_write 3 4005 &xfs_check_block_4005 ++snd_hdsp_capture_copy_4011 snd_hdsp_capture_copy 5 4011 NULL ++blk_end_request_4024 blk_end_request 3 4024 NULL ++ext4_xattr_find_entry_4025 ext4_xattr_find_entry 0 4025 NULL ++xfs_free_ag_extent_4036 xfs_free_ag_extent 0 4036 NULL ++mtip_hw_read_registers_4037 mtip_hw_read_registers 3 4037 NULL ++read_file_queues_4078 read_file_queues 3 4078 NULL ++fbcon_do_set_font_4079 fbcon_do_set_font 2-3 4079 NULL ++tm6000_read_4151 tm6000_read 3 4151 NULL ++mpt_raid_phys_disk_get_num_paths_4155 mpt_raid_phys_disk_get_num_paths 0 4155 NULL ++msg_bits_4158 msg_bits 0-3-4 4158 NULL ++get_alua_req_4166 get_alua_req 3 4166 NULL ++blk_dropped_read_4168 blk_dropped_read 3 4168 NULL ++read_file_bool_4180 read_file_bool 3 4180 NULL ++ocfs2_find_cpos_for_right_leaf_4194 ocfs2_find_cpos_for_right_leaf 0 4194 NULL ++vring_new_virtqueue_4199 vring_new_virtqueue 2 4199 NULL ++f1x_determine_channel_4202 f1x_determine_channel 2 4202 NULL ++_osd_req_list_objects_4204 _osd_req_list_objects 6 4204 NULL ++__snd_gf1_read_addr_4210 __snd_gf1_read_addr 0 4210 NULL ++ath6kl_force_roam_write_4282 ath6kl_force_roam_write 3 4282 NULL ++goldfish_audio_write_4284 goldfish_audio_write 3 4284 NULL ++__usbnet_read_cmd_4299 __usbnet_read_cmd 7 4299 NULL ++dvb_ringbuffer_pkt_read_user_4303 dvb_ringbuffer_pkt_read_user 2-3-5 4303 NULL ++count_strings_4315 count_strings 0 4315 NULL ++nouveau_fifo_create__4327 nouveau_fifo_create_ 5-6 4327 NULL ++snd_rawmidi_kernel_read_4328 snd_rawmidi_kernel_read 3 4328 NULL ++ima_eventdigest_init_common_4338 ima_eventdigest_init_common 2 4338 NULL ++__copy_from_user_inatomic_4365 __copy_from_user_inatomic 0-3 4365 NULL nohasharray ++lookup_string_4365 lookup_string 0 4365 &__copy_from_user_inatomic_4365 ++irda_sendmsg_4388 irda_sendmsg 4 4388 NULL ++access_process_vm_4412 access_process_vm 0 4412 NULL nohasharray ++cxacru_cm_get_array_4412 cxacru_cm_get_array 4 4412 &access_process_vm_4412 ++libfc_vport_create_4415 libfc_vport_create 2 4415 NULL ++rtw_android_get_rssi_4421 rtw_android_get_rssi 0 4421 NULL ++do_pages_stat_4437 do_pages_stat 2 4437 NULL ++at76_set_card_command_4471 at76_set_card_command 4 4471 NULL ++snd_seq_expand_var_event_4481 snd_seq_expand_var_event 5-0 4481 NULL ++vmbus_establish_gpadl_4495 vmbus_establish_gpadl 3 4495 NULL ++set_link_security_4502 set_link_security 4 4502 NULL ++xfs_btree_kill_root_4526 xfs_btree_kill_root 0 4526 NULL ++ll_max_readahead_per_file_mb_seq_write_4531 ll_max_readahead_per_file_mb_seq_write 3 4531 NULL ++tty_register_device_4544 tty_register_device 2 4544 NULL ++btrfs_file_extent_inline_item_len_4575 btrfs_file_extent_inline_item_len 0 4575 NULL ++xfs_buf_get_maps_4581 xfs_buf_get_maps 2 4581 NULL ++bch_alloc_4593 bch_alloc 1 4593 NULL ++ocfs2_refcount_lock_4595 ocfs2_refcount_lock 0 4595 NULL ++ll_rw_extents_stats_seq_write_4633 ll_rw_extents_stats_seq_write 3 4633 NULL ++iwl_dbgfs_tx_queue_read_4635 iwl_dbgfs_tx_queue_read 3 4635 NULL ++skb_add_data_nocache_4682 skb_add_data_nocache 4 4682 NULL ++cx18_read_pos_4683 cx18_read_pos 3 4683 NULL ++short_retry_limit_read_4687 short_retry_limit_read 3 4687 NULL ++kone_receive_4690 kone_receive 4 4690 NULL ++hash_netportnet6_expire_4702 hash_netportnet6_expire 4 4702 NULL ++cxgbi_alloc_big_mem_4707 cxgbi_alloc_big_mem 1 4707 NULL ++ati_create_gatt_pages_4722 ati_create_gatt_pages 1 4722 NULL nohasharray ++show_header_4722 show_header 3 4722 &ati_create_gatt_pages_4722 ++bitmap_startwrite_4736 bitmap_startwrite 2 4736 NULL nohasharray ++ll_rw_offset_stats_seq_write_4736 ll_rw_offset_stats_seq_write 3 4736 &bitmap_startwrite_4736 ++lu_buf_alloc_4753 lu_buf_alloc 2 4753 NULL ++pwr_rcvd_bcns_cnt_read_4774 pwr_rcvd_bcns_cnt_read 3 4774 NULL ++create_subvol_4791 create_subvol 4 4791 NULL ++ncp__vol2io_4804 ncp__vol2io 5 4804 NULL ++repair_io_failure_4815 repair_io_failure 4-3 4815 NULL ++comedi_buf_write_free_4847 comedi_buf_write_free 2 4847 NULL ++gigaset_if_receive_4861 gigaset_if_receive 3 4861 NULL ++key_tx_spec_read_4862 key_tx_spec_read 3 4862 NULL ++ocfs2_defrag_extent_4873 ocfs2_defrag_extent 2 4873 NULL ++hid_register_field_4874 hid_register_field 2-3 4874 NULL ++vga_arb_read_4886 vga_arb_read 3 4886 NULL ++ieee80211_if_fmt_ave_beacon_4941 ieee80211_if_fmt_ave_beacon 3 4941 NULL ++ocfs2_should_refresh_lock_res_4958 ocfs2_should_refresh_lock_res 0 4958 NULL ++compat_rawv6_setsockopt_4967 compat_rawv6_setsockopt 5 4967 NULL ++ath10k_read_chip_id_4969 ath10k_read_chip_id 3 4969 NULL ++skb_network_header_len_4971 skb_network_header_len 0 4971 NULL ++ieee80211_if_fmt_dot11MeshHWMPconfirmationInterval_4976 ieee80211_if_fmt_dot11MeshHWMPconfirmationInterval 3 4976 NULL ++compat_SyS_ipc_5000 compat_SyS_ipc 3 5000 NULL ++do_mincore_5018 do_mincore 0-2-1 5018 NULL ++btrfs_punch_hole_5041 btrfs_punch_hole 2 5041 NULL ++cfg80211_rx_mgmt_5056 cfg80211_rx_mgmt 5 5056 NULL ++ocfs2_check_range_for_holes_5066 ocfs2_check_range_for_holes 3-2 5066 NULL ++snd_mixart_BA1_read_5082 snd_mixart_BA1_read 5 5082 NULL ++snd_emu10k1_ptr20_read_5087 snd_emu10k1_ptr20_read 0 5087 NULL ++kfifo_copy_from_user_5091 kfifo_copy_from_user 3-4-0 5091 NULL nohasharray ++get_random_bytes_5091 get_random_bytes 2 5091 &kfifo_copy_from_user_5091 nohasharray ++blk_rq_sectors_5091 blk_rq_sectors 0 5091 &get_random_bytes_5091 ++sound_write_5102 sound_write 3 5102 NULL ++i40e_dbg_netdev_ops_write_5117 i40e_dbg_netdev_ops_write 3 5117 NULL ++qib_7220_handle_hwerrors_5142 qib_7220_handle_hwerrors 3 5142 NULL ++__uwb_addr_print_5161 __uwb_addr_print 2 5161 NULL ++iwl_dbgfs_status_read_5171 iwl_dbgfs_status_read 3 5171 NULL ++acpi_pcc_get_sqty_5176 acpi_pcc_get_sqty 0 5176 NULL ++ppp_cp_parse_cr_5214 ppp_cp_parse_cr 4 5214 NULL ++dwc2_hcd_urb_alloc_5217 dwc2_hcd_urb_alloc 2 5217 NULL ++ath6kl_debug_roam_tbl_event_5224 ath6kl_debug_roam_tbl_event 3 5224 NULL ++usb_descriptor_fillbuf_5302 usb_descriptor_fillbuf 0 5302 NULL ++r592_write_fifo_pio_5315 r592_write_fifo_pio 3 5315 NULL ++sbc_get_write_same_sectors_5317 sbc_get_write_same_sectors 0 5317 NULL ++pwr_elp_enter_read_5324 pwr_elp_enter_read 3 5324 NULL ++cq_free_res_5355 cq_free_res 5 5355 NULL ++ps_pspoll_utilization_read_5361 ps_pspoll_utilization_read 3 5361 NULL ++cciss_allocate_sg_chain_blocks_5368 cciss_allocate_sg_chain_blocks 3-2 5368 NULL ++xfs_free_extent_5411 xfs_free_extent 0 5411 NULL ++xfs_efd_init_5463 xfs_efd_init 3 5463 NULL ++ll_xattr_cache_refill_5468 ll_xattr_cache_refill 0 5468 NULL ++kernfs_fop_write_5471 kernfs_fop_write 3 5471 NULL ++xfs_efi_init_5476 xfs_efi_init 2 5476 NULL ++cifs_security_flags_proc_write_5484 cifs_security_flags_proc_write 3 5484 NULL ++tty_write_5494 tty_write 3 5494 NULL ++tomoyo_update_domain_5498 tomoyo_update_domain 2 5498 NULL nohasharray ++ieee80211_if_fmt_last_beacon_5498 ieee80211_if_fmt_last_beacon 3 5498 &tomoyo_update_domain_5498 ++__max_nr_grant_frames_5505 __max_nr_grant_frames 0 5505 NULL ++ieee80211_if_fmt_auto_open_plinks_5534 ieee80211_if_fmt_auto_open_plinks 3 5534 NULL ++get_entry_msg_len_5552 get_entry_msg_len 0 5552 NULL ++le_readq_5557 le_readq 0 5557 NULL ++inw_5558 inw 0 5558 NULL ++gfs2_extent_map_5561 gfs2_extent_map 0 5561 NULL ++bioset_create_5580 bioset_create 1 5580 NULL ++oz_ep_alloc_5587 oz_ep_alloc 1 5587 NULL ++__remove_suid_5618 __remove_suid 0 5618 NULL ++gfs2_unstuffer_page_5620 gfs2_unstuffer_page 0 5620 NULL ++SYSC_fsetxattr_5639 SYSC_fsetxattr 4 5639 NULL ++ext4_xattr_get_5661 ext4_xattr_get 0 5661 NULL ++posix_clock_register_5662 posix_clock_register 2 5662 NULL ++get_arg_5694 get_arg 3 5694 NULL ++subbuf_read_actor_5708 subbuf_read_actor 3 5708 NULL ++vmw_kms_readback_5727 vmw_kms_readback 6 5727 NULL ++rts51x_transfer_data_partial_5735 rts51x_transfer_data_partial 6 5735 NULL ++sctp_setsockopt_autoclose_5775 sctp_setsockopt_autoclose 3 5775 NULL ++__vxge_hw_blockpool_malloc_5786 __vxge_hw_blockpool_malloc 2 5786 NULL ++nvme_trans_bdev_char_page_5797 nvme_trans_bdev_char_page 3 5797 NULL ++skb_copy_datagram_iovec_5806 skb_copy_datagram_iovec 2-4 5806 NULL ++nv50_disp_pioc_create__5812 nv50_disp_pioc_create_ 5 5812 NULL ++ceph_x_encrypt_buflen_5829 ceph_x_encrypt_buflen 0-1 5829 NULL ++ceph_msg_new_5846 ceph_msg_new 2 5846 NULL ++setup_req_5848 setup_req 3 5848 NULL ++ria_page_count_5849 ria_page_count 0 5849 NULL ++rx_filter_max_arp_queue_dep_read_5851 rx_filter_max_arp_queue_dep_read 3 5851 NULL ++config_buf_5862 config_buf 0 5862 NULL ++lprocfs_fid_width_seq_write_5889 lprocfs_fid_width_seq_write 3 5889 NULL ++port_show_regs_5904 port_show_regs 3 5904 NULL ++rbd_segment_length_5907 rbd_segment_length 0-3-2 5907 NULL ++uhci_debug_read_5911 uhci_debug_read 3 5911 NULL ++lbs_highsnr_read_5931 lbs_highsnr_read 3 5931 NULL ++ps_poll_ps_poll_timeouts_read_5934 ps_poll_ps_poll_timeouts_read 3 5934 NULL ++edac_device_alloc_ctl_info_5941 edac_device_alloc_ctl_info 1 5941 NULL ++ll_statahead_one_5962 ll_statahead_one 3 5962 NULL ++__apu_get_register_5967 __apu_get_register 0 5967 NULL ++ieee80211_if_fmt_rc_rateidx_mask_5ghz_5971 ieee80211_if_fmt_rc_rateidx_mask_5ghz 3 5971 NULL ++alloc_msg_6072 alloc_msg 1 6072 NULL ++sctp_setsockopt_connectx_6073 sctp_setsockopt_connectx 3 6073 NULL ++rts51x_ms_rw_multi_sector_6076 rts51x_ms_rw_multi_sector 3-4 6076 NULL ++__mnt_want_write_6091 __mnt_want_write 0 6091 NULL ++ipmi_addr_length_6110 ipmi_addr_length 0 6110 NULL ++dfs_global_file_write_6112 dfs_global_file_write 3 6112 NULL ++nouveau_parent_create__6131 nouveau_parent_create_ 7 6131 NULL ++ieee80211_if_fmt_beacon_timeout_6153 ieee80211_if_fmt_beacon_timeout 3 6153 NULL ++ivtv_copy_buf_to_user_6159 ivtv_copy_buf_to_user 4 6159 NULL ++maybe_insert_hole_6167 maybe_insert_hole 3 6167 NULL ++wl1251_cmd_template_set_6172 wl1251_cmd_template_set 4 6172 NULL ++SyS_setgroups_6182 SyS_setgroups 1 6182 NULL ++mxt_show_instance_6207 mxt_show_instance 2-0 6207 NULL ++v4l2_ctrl_new_std_menu_6221 v4l2_ctrl_new_std_menu 4 6221 NULL ++mqueue_read_file_6228 mqueue_read_file 3 6228 NULL ++f_hidg_read_6238 f_hidg_read 3 6238 NULL ++fbcon_prepare_logo_6246 fbcon_prepare_logo 5 6246 NULL ++tx_tx_start_null_frame_read_6281 tx_tx_start_null_frame_read 3 6281 NULL ++snd_hda_override_conn_list_6282 snd_hda_override_conn_list 3-0 6282 NULL nohasharray ++xenbus_file_write_6282 xenbus_file_write 3 6282 &snd_hda_override_conn_list_6282 ++posix_acl_fix_xattr_to_user_6283 posix_acl_fix_xattr_to_user 2 6283 NULL ++serial_port_in_6291 serial_port_in 0 6291 NULL ++qlcnic_sriov_alloc_bc_msg_6309 qlcnic_sriov_alloc_bc_msg 2 6309 NULL ++hfa384x_inw_6329 hfa384x_inw 0 6329 NULL nohasharray ++SyS_mincore_6329 SyS_mincore 2-1 6329 &hfa384x_inw_6329 ++fuse_get_req_for_background_6337 fuse_get_req_for_background 2 6337 NULL ++ucs2_strnlen_6342 ucs2_strnlen 0 6342 NULL ++regcache_sync_block_raw_6350 regcache_sync_block_raw 5-4 6350 NULL ++mei_dbgfs_read_devstate_6352 mei_dbgfs_read_devstate 3 6352 NULL ++_proc_do_string_6376 _proc_do_string 2 6376 NULL ++osd_req_read_sg_kern_6378 osd_req_read_sg_kern 5 6378 NULL ++xfs_bmap_extents_to_btree_6387 xfs_bmap_extents_to_btree 0 6387 NULL ++posix_acl_fix_xattr_userns_6420 posix_acl_fix_xattr_userns 4 6420 NULL ++add_transaction_credits_6422 add_transaction_credits 2-3 6422 NULL ++ipr_change_queue_depth_6431 ipr_change_queue_depth 2 6431 NULL ++__alloc_bootmem_node_nopanic_6432 __alloc_bootmem_node_nopanic 2 6432 NULL ++ieee80211_if_fmt_dot11MeshMaxRetries_6476 ieee80211_if_fmt_dot11MeshMaxRetries 3 6476 NULL ++qp_memcpy_from_queue_6479 qp_memcpy_from_queue 5-4 6479 NULL ++cipso_v4_map_lvl_hton_6490 cipso_v4_map_lvl_hton 0 6490 NULL ++dbg_intr_buf_6501 dbg_intr_buf 2 6501 NULL ++mei_read_6507 mei_read 3 6507 NULL ++rndis_set_oid_6547 rndis_set_oid 4 6547 NULL ++wdm_read_6549 wdm_read 3 6549 NULL ++dm_stats_create_6551 dm_stats_create 4-2-3 6551 NULL ++fb_alloc_cmap_6554 fb_alloc_cmap 2 6554 NULL ++SyS_semtimedop_6563 SyS_semtimedop 3 6563 NULL ++xfs_iozero_6573 xfs_iozero 0 6573 NULL ++ecryptfs_filldir_6622 ecryptfs_filldir 3 6622 NULL ++process_rcvd_data_6679 process_rcvd_data 3 6679 NULL ++btrfs_lookup_csums_range_6696 btrfs_lookup_csums_range 2-3 6696 NULL ++ps_pspoll_max_apturn_read_6699 ps_pspoll_max_apturn_read 3 6699 NULL ++bnad_debugfs_write_regrd_6706 bnad_debugfs_write_regrd 3 6706 NULL ++mpeg_read_6708 mpeg_read 3 6708 NULL ++ibmpex_query_sensor_count_6709 ibmpex_query_sensor_count 0 6709 NULL ++video_proc_write_6724 video_proc_write 3 6724 NULL ++posix_acl_xattr_count_6725 posix_acl_xattr_count 0-1 6725 NULL ++kobject_add_varg_6781 kobject_add_varg 0 6781 NULL ++iwl_dbgfs_channels_read_6784 iwl_dbgfs_channels_read 3 6784 NULL ++ieee80211_if_read_6785 ieee80211_if_read 3 6785 NULL ++zone_spanned_pages_in_node_6787 zone_spanned_pages_in_node 0-3-4 6787 NULL ++hdlcdrv_register_6792 hdlcdrv_register 2 6792 NULL ++ll_xattr_cache_find_6798 ll_xattr_cache_find 0 6798 NULL ++tx_tx_done_data_read_6799 tx_tx_done_data_read 3 6799 NULL ++lbs_rdrf_write_6826 lbs_rdrf_write 3 6826 NULL ++calc_pages_for_6838 calc_pages_for 0-1-2 6838 NULL ++mon_bin_read_6841 mon_bin_read 3 6841 NULL ++snd_cs4281_BA0_read_6847 snd_cs4281_BA0_read 5 6847 NULL ++xfs_rtany_summary_6851 xfs_rtany_summary 0 6851 NULL ++perf_output_sample_ustack_6868 perf_output_sample_ustack 2 6868 NULL ++dio_complete_6879 dio_complete 0-2-3 6879 NULL ++raw_seticmpfilter_6888 raw_seticmpfilter 3 6888 NULL nohasharray ++ieee80211_if_fmt_path_refresh_time_6888 ieee80211_if_fmt_path_refresh_time 3 6888 &raw_seticmpfilter_6888 ++dlmfs_file_write_6892 dlmfs_file_write 3 6892 NULL ++proc_sessionid_read_6911 proc_sessionid_read 3 6911 NULL nohasharray ++spi_show_regs_6911 spi_show_regs 3 6911 &proc_sessionid_read_6911 nohasharray ++acm_alloc_minor_6911 acm_alloc_minor 0 6911 &spi_show_regs_6911 ++__kfifo_dma_in_finish_r_6913 __kfifo_dma_in_finish_r 2-3 6913 NULL ++lops_scan_elements_6916 lops_scan_elements 0 6916 NULL ++cache_do_downcall_6926 cache_do_downcall 3 6926 NULL ++ipath_verbs_send_dma_6929 ipath_verbs_send_dma 6 6929 NULL ++qsfp_cks_6945 qsfp_cks 2-0 6945 NULL ++tg3_nvram_write_block_unbuffered_6955 tg3_nvram_write_block_unbuffered 3 6955 NULL ++pch_uart_hal_read_6961 pch_uart_hal_read 0 6961 NULL ++rsa_extract_mpi_6973 rsa_extract_mpi 5 6973 NULL nohasharray ++i40e_dbg_dump_write_6973 i40e_dbg_dump_write 3 6973 &rsa_extract_mpi_6973 ++request_key_async_6990 request_key_async 4 6990 NULL ++tpl_write_6998 tpl_write 3 6998 NULL ++r871x_set_wpa_ie_7000 r871x_set_wpa_ie 3 7000 NULL ++cipso_v4_gentag_enum_7006 cipso_v4_gentag_enum 0 7006 NULL ++tracing_cpumask_read_7010 tracing_cpumask_read 3 7010 NULL ++ld_usb_write_7022 ld_usb_write 3 7022 NULL ++wimax_msg_7030 wimax_msg 4 7030 NULL ++ceph_kvmalloc_7033 ceph_kvmalloc 1 7033 NULL ++ipath_get_base_info_7043 ipath_get_base_info 3 7043 NULL ++snd_pcm_oss_bytes_7051 snd_pcm_oss_bytes 2 7051 NULL ++hci_sock_recvmsg_7072 hci_sock_recvmsg 4 7072 NULL ++event_enable_read_7074 event_enable_read 3 7074 NULL ++beacon_interval_read_7091 beacon_interval_read 3 7091 NULL ++pipeline_enc_rx_stat_fifo_int_read_7107 pipeline_enc_rx_stat_fifo_int_read 3 7107 NULL ++osc_resend_count_seq_write_7120 osc_resend_count_seq_write 3 7120 NULL ++qib_format_hwerrors_7133 qib_format_hwerrors 5 7133 NULL ++kvm_mmu_notifier_test_young_7139 kvm_mmu_notifier_test_young 3 7139 NULL ++__alloc_objio_seg_7203 __alloc_objio_seg 1 7203 NULL ++hdlc_loop_7255 hdlc_loop 0 7255 NULL ++f_midi_start_ep_7270 f_midi_start_ep 0 7270 NULL ++rx_rate_rx_frames_per_rates_read_7282 rx_rate_rx_frames_per_rates_read 3 7282 NULL ++get_string_7302 get_string 0 7302 NULL ++security_inode_need_killpriv_7322 security_inode_need_killpriv 0 7322 NULL ++pci_vpd_info_field_size_7324 pci_vpd_info_field_size 0 7324 NULL ++mgmt_control_7349 mgmt_control 3 7349 NULL ++at_est2timeout_7365 at_est2timeout 0-1 7365 NULL ++ieee80211_if_read_dot11MeshHWMPactivePathTimeout_7368 ieee80211_if_read_dot11MeshHWMPactivePathTimeout 3 7368 NULL ++xfs_btree_delete_7384 xfs_btree_delete 0 7384 NULL ++ath10k_read_fw_stats_7387 ath10k_read_fw_stats 3 7387 NULL ++hweight_long_7388 hweight_long 1-0 7388 NULL ++sl_change_mtu_7396 sl_change_mtu 2 7396 NULL ++_ore_add_stripe_unit_7399 _ore_add_stripe_unit 6-3 7399 NULL ++readb_7401 readb 0 7401 NULL ++drm_property_create_blob_7414 drm_property_create_blob 2 7414 NULL ++__copy_to_user_nocheck_7443 __copy_to_user_nocheck 0-3 7443 NULL ++ip_options_get_alloc_7448 ip_options_get_alloc 1 7448 NULL ++SYSC_setgroups_7454 SYSC_setgroups 1 7454 NULL ++rt2x00debug_read_queue_stats_7455 rt2x00debug_read_queue_stats 3 7455 NULL ++l2tp_ip6_sendmsg_7461 l2tp_ip6_sendmsg 4 7461 NULL ++garp_request_join_7471 garp_request_join 4 7471 NULL nohasharray ++ReadHSCX_7471 ReadHSCX 0 7471 &garp_request_join_7471 ++snd_pcm_lib_read1_7491 snd_pcm_lib_read1 0-3 7491 NULL ++iwl_mvm_power_dbgfs_read_7502 iwl_mvm_power_dbgfs_read 0 7502 NULL ++ahash_instance_headroom_7509 ahash_instance_headroom 0 7509 NULL nohasharray ++sdhci_alloc_host_7509 sdhci_alloc_host 2 7509 &ahash_instance_headroom_7509 ++array_zalloc_7519 array_zalloc 1-2 7519 NULL ++ath10k_read_htt_stats_mask_7557 ath10k_read_htt_stats_mask 3 7557 NULL ++smk_read_mapped_7562 smk_read_mapped 3 7562 NULL ++cfs_cpt_num_estimate_7571 cfs_cpt_num_estimate 0 7571 NULL ++ocfs2_lock_create_7612 ocfs2_lock_create 0 7612 NULL ++groups_alloc_7614 groups_alloc 1 7614 NULL nohasharray ++create_dir_7614 create_dir 0 7614 &groups_alloc_7614 ++_rtw_zmalloc_7636 _rtw_zmalloc 1 7636 NULL ++xfs_bmap_btalloc_nullfb_7654 xfs_bmap_btalloc_nullfb 0 7654 NULL ++fault_inject_write_7662 fault_inject_write 3 7662 NULL ++acpi_ex_allocate_name_string_7685 acpi_ex_allocate_name_string 2-1 7685 NULL ++acpi_ns_get_pathname_length_7699 acpi_ns_get_pathname_length 0 7699 NULL ++dev_write_7708 dev_write 3 7708 NULL ++pci_raw_set_power_state_7729 pci_raw_set_power_state 0 7729 NULL ++vxge_device_register_7752 vxge_device_register 4 7752 NULL ++iwl_dbgfs_bt_cmd_read_7770 iwl_dbgfs_bt_cmd_read 3 7770 NULL ++alloc_candev_7776 alloc_candev 1-2 7776 NULL ++dfs_global_file_read_7787 dfs_global_file_read 3 7787 NULL ++bnx2_nvram_write_7790 bnx2_nvram_write 4-2 7790 NULL ++diva_os_copy_from_user_7792 diva_os_copy_from_user 4 7792 NULL nohasharray ++lustre_packed_msg_size_7792 lustre_packed_msg_size 0 7792 &diva_os_copy_from_user_7792 ++xfs_alloc_find_best_extent_7837 xfs_alloc_find_best_extent 0 7837 NULL ++cfs_trace_dump_debug_buffer_usrstr_7861 cfs_trace_dump_debug_buffer_usrstr 2 7861 NULL ++tipc_alloc_entry_7875 tipc_alloc_entry 2 7875 NULL ++config_desc_7878 config_desc 0 7878 NULL ++gfs2_permission_7884 gfs2_permission 0 7884 NULL ++dvb_dmxdev_read_sec_7892 dvb_dmxdev_read_sec 4 7892 NULL ++xfs_trans_get_efi_7898 xfs_trans_get_efi 2 7898 NULL ++libfc_host_alloc_7917 libfc_host_alloc 2 7917 NULL ++f_hidg_write_7932 f_hidg_write 3 7932 NULL ++integrity_digsig_verify_7956 integrity_digsig_verify 3-0 7956 NULL ++smk_write_load_self_7958 smk_write_load_self 3 7958 NULL ++tt3650_ci_msg_locked_8013 tt3650_ci_msg_locked 4 8013 NULL ++vcs_read_8017 vcs_read 3 8017 NULL ++vhost_add_used_and_signal_n_8038 vhost_add_used_and_signal_n 4 8038 NULL ++ms_read_multiple_pages_8052 ms_read_multiple_pages 5-4 8052 NULL ++dgrp_mon_read_8065 dgrp_mon_read 3 8065 NULL ++spi_write_then_read_8073 spi_write_then_read 5-3 8073 NULL ++qla4xxx_post_ping_evt_work_8074 qla4xxx_post_ping_evt_work 4 8074 NULL ++venus_lookup_8121 venus_lookup 4 8121 NULL ++ieee80211_if_fmt_num_buffered_multicast_8127 ieee80211_if_fmt_num_buffered_multicast 3 8127 NULL ++__sk_mem_schedule_8185 __sk_mem_schedule 2 8185 NULL ++ieee80211_if_fmt_dot11MeshHoldingTimeout_8187 ieee80211_if_fmt_dot11MeshHoldingTimeout 3 8187 NULL ++recent_mt_proc_write_8206 recent_mt_proc_write 3 8206 NULL ++__ocfs2_lock_refcount_tree_8207 __ocfs2_lock_refcount_tree 0 8207 NULL ++rt2x00debug_write_bbp_8212 rt2x00debug_write_bbp 3 8212 NULL ++ad7879_spi_multi_read_8218 ad7879_spi_multi_read 3 8218 NULL ++play_iframe_8219 play_iframe 3 8219 NULL ++kvm_mmu_page_set_gfn_8225 kvm_mmu_page_set_gfn 2 8225 NULL ++sctp_ssnmap_size_8228 sctp_ssnmap_size 0-1-2 8228 NULL ++check_xattr_ref_inode_8244 check_xattr_ref_inode 0 8244 NULL ++t3_init_l2t_8261 t3_init_l2t 1 8261 NULL ++init_cdev_8274 init_cdev 1 8274 NULL ++rproc_recovery_write_8281 rproc_recovery_write 3 8281 NULL ++qib_decode_7220_err_8315 qib_decode_7220_err 3 8315 NULL ++ipwireless_send_packet_8328 ipwireless_send_packet 4 8328 NULL ++tracing_entries_read_8345 tracing_entries_read 3 8345 NULL ++ieee80211_if_fmt_ht_opmode_8347 ieee80211_if_fmt_ht_opmode 3 8347 NULL ++generic_write_sync_8358 generic_write_sync 0 8358 NULL ++ping_getfrag_8360 ping_getfrag 4-3 8360 NULL ++ath6kl_lrssi_roam_write_8362 ath6kl_lrssi_roam_write 3 8362 NULL ++xdi_copy_from_user_8395 xdi_copy_from_user 4 8395 NULL ++zd_rf_scnprint_id_8406 zd_rf_scnprint_id 0-3 8406 NULL ++smk_write_change_rule_8411 smk_write_change_rule 3 8411 NULL nohasharray ++uvc_v4l2_ioctl_8411 uvc_v4l2_ioctl 2 8411 &smk_write_change_rule_8411 ++roccat_common2_sysfs_read_8431 roccat_common2_sysfs_read 6 8431 NULL ++afs_cell_lookup_8482 afs_cell_lookup 2 8482 NULL ++fore200e_chunk_alloc_8501 fore200e_chunk_alloc 4-3 8501 NULL ++batadv_tt_len_8502 batadv_tt_len 0-1 8502 NULL ++dev_config_8506 dev_config 3 8506 NULL ++ACL_to_cifs_posix_8509 ACL_to_cifs_posix 3 8509 NULL ++opticon_process_data_packet_8524 opticon_process_data_packet 3 8524 NULL ++user_on_off_8552 user_on_off 2 8552 NULL ++profile_remove_8556 profile_remove 3 8556 NULL ++cache_slow_downcall_8570 cache_slow_downcall 2 8570 NULL ++isr_dma0_done_read_8574 isr_dma0_done_read 3 8574 NULL ++tower_write_8580 tower_write 3 8580 NULL ++cfs_cpt_number_8618 cfs_cpt_number 0 8618 NULL ++shash_setkey_unaligned_8620 shash_setkey_unaligned 3 8620 NULL ++xfs_qm_dqattach_locked_8625 xfs_qm_dqattach_locked 0 8625 NULL ++it821x_firmware_command_8628 it821x_firmware_command 3 8628 NULL ++scsi_dma_map_8632 scsi_dma_map 0 8632 NULL ++fuse_send_write_pages_8636 fuse_send_write_pages 0-5 8636 NULL ++mlx5_vzalloc_8663 mlx5_vzalloc 1 8663 NULL ++dio_bio_alloc_8677 dio_bio_alloc 5 8677 NULL ++lbs_bcnmiss_read_8678 lbs_bcnmiss_read 3 8678 NULL ++rproc_trace_read_8686 rproc_trace_read 3 8686 NULL ++skb_frag_size_8695 skb_frag_size 0 8695 NULL ++arcfb_write_8702 arcfb_write 3 8702 NULL ++i_size_read_8703 i_size_read 0 8703 NULL nohasharray ++init_header_8703 init_header 0 8703 &i_size_read_8703 ++HDLC_irq_8709 HDLC_irq 2 8709 NULL ++ctrl_out_8712 ctrl_out 3-5 8712 NULL ++tracing_max_lat_write_8728 tracing_max_lat_write 3 8728 NULL ++jffs2_acl_count_8729 jffs2_acl_count 0-1 8729 NULL ++__create_irqs_8733 __create_irqs 2 8733 NULL ++tx_tx_exch_expiry_read_8749 tx_tx_exch_expiry_read 3 8749 NULL ++compound_order_8750 compound_order 0 8750 NULL ++ocfs2_find_path_8754 ocfs2_find_path 0 8754 NULL ++yurex_write_8761 yurex_write 3 8761 NULL ++joydev_compat_ioctl_8765 joydev_compat_ioctl 2 8765 NULL ++kstrtoint_from_user_8778 kstrtoint_from_user 2 8778 NULL ++paging32_prefetch_gpte_8783 paging32_prefetch_gpte 4 8783 NULL ++ext4_try_to_write_inline_data_8785 ext4_try_to_write_inline_data 3-4 8785 NULL ++__bitmap_weight_8796 __bitmap_weight 0-2 8796 NULL ++gfs2_glock_nq_8808 gfs2_glock_nq 0 8808 NULL ++ntfs_commit_pages_after_write_8809 ntfs_commit_pages_after_write 0 8809 NULL ++metronomefb_write_8823 metronomefb_write 3 8823 NULL ++SyS_llistxattr_8824 SyS_llistxattr 3 8824 NULL ++extent_read_full_page_8826 extent_read_full_page 0 8826 NULL ++ll_xattr_cache_get_8829 ll_xattr_cache_get 0 8829 NULL ++get_queue_depth_8833 get_queue_depth 0 8833 NULL ++dvb_ringbuffer_pkt_next_8834 dvb_ringbuffer_pkt_next 0-2 8834 NULL ++usb_ep_queue_8839 usb_ep_queue 0 8839 NULL ++iwl_rx_packet_len_8854 iwl_rx_packet_len 0 8854 NULL ++debug_debug1_read_8856 debug_debug1_read 3 8856 NULL ++wa_nep_queue_8858 wa_nep_queue 2 8858 NULL ++radeon_drm_ioctl_8875 radeon_drm_ioctl 2 8875 NULL ++compressed_bio_size_8887 compressed_bio_size 0-2 8887 NULL ++ab3100_get_set_reg_8890 ab3100_get_set_reg 3 8890 NULL nohasharray ++tracing_max_lat_read_8890 tracing_max_lat_read 3 8890 &ab3100_get_set_reg_8890 ++sdio_max_byte_size_8907 sdio_max_byte_size 0 8907 NULL ++sysfs_merge_group_8917 sysfs_merge_group 0 8917 NULL ++write_file_ani_8918 write_file_ani 3 8918 NULL ++layout_commit_8926 layout_commit 3 8926 NULL ++adjust_priv_size_8935 adjust_priv_size 0-1 8935 NULL ++driver_stats_read_8944 driver_stats_read 3 8944 NULL ++read_file_tgt_stats_8959 read_file_tgt_stats 3 8959 NULL ++usb_allocate_stream_buffers_8964 usb_allocate_stream_buffers 3 8964 NULL ++qib_qsfp_dump_8966 qib_qsfp_dump 0-3 8966 NULL ++venus_mkdir_8967 venus_mkdir 4 8967 NULL ++seq_open_net_8968 seq_open_net 4 8968 NULL nohasharray ++vol_cdev_read_8968 vol_cdev_read 3 8968 &seq_open_net_8968 ++bio_integrity_get_tag_8974 bio_integrity_get_tag 3 8974 NULL ++jbd2_journal_blocks_per_page_9004 jbd2_journal_blocks_per_page 0 9004 NULL ++il_dbgfs_clear_ucode_stats_write_9016 il_dbgfs_clear_ucode_stats_write 3 9016 NULL ++xfs_inobt_get_rec_9023 xfs_inobt_get_rec 0 9023 NULL ++snd_emu10k1_ptr_read_9026 snd_emu10k1_ptr_read 0-2 9026 NULL ++fd_ioctl_9028 fd_ioctl 3 9028 NULL ++nla_put_9042 nla_put 3 9042 NULL ++ffs_func_revmap_intf_9043 ffs_func_revmap_intf 0 9043 NULL ++sta_tx_latency_stat_header_9050 sta_tx_latency_stat_header 0-3-4 9050 NULL ++snd_emu10k1_synth_copy_from_user_9061 snd_emu10k1_synth_copy_from_user 3-5 9061 NULL ++snd_gus_dram_peek_9062 snd_gus_dram_peek 4 9062 NULL ++fib_info_hash_alloc_9075 fib_info_hash_alloc 1 9075 NULL ++create_queues_9088 create_queues 2-3 9088 NULL ++ftdi_prepare_write_buffer_9093 ftdi_prepare_write_buffer 3 9093 NULL ++adxl34x_spi_read_block_9108 adxl34x_spi_read_block 3 9108 NULL ++caif_stream_sendmsg_9110 caif_stream_sendmsg 4 9110 NULL nohasharray ++gfn_to_rmap_9110 gfn_to_rmap 3-2 9110 &caif_stream_sendmsg_9110 nohasharray ++jhead_scan_9110 jhead_scan 0 9110 &gfn_to_rmap_9110 ++udf_direct_IO_9111 udf_direct_IO 4 9111 NULL ++pmcraid_change_queue_depth_9116 pmcraid_change_queue_depth 2 9116 NULL ++apei_resources_merge_9149 apei_resources_merge 0 9149 NULL ++vb2_dma_sg_alloc_9157 vb2_dma_sg_alloc 2 9157 NULL ++dbg_command_buf_9165 dbg_command_buf 2 9165 NULL ++isr_irqs_read_9181 isr_irqs_read 3 9181 NULL ++count_leading_zeros_9183 count_leading_zeros 0 9183 NULL ++xfs_btree_rshift_9187 xfs_btree_rshift 0 9187 NULL ++altera_swap_ir_9194 altera_swap_ir 2 9194 NULL ++snd_m3_get_pointer_9206 snd_m3_get_pointer 0 9206 NULL ++virtqueue_add_9217 virtqueue_add 4-5 9217 NULL ++tx_tx_prepared_descs_read_9221 tx_tx_prepared_descs_read 3 9221 NULL ++sctp_getsockopt_delayed_ack_9232 sctp_getsockopt_delayed_ack 2 9232 NULL ++xfs_error_trap_9239 xfs_error_trap 0-1 9239 NULL ++hfsplus_bnode_read_u16_9262 hfsplus_bnode_read_u16 0 9262 NULL ++hdpvr_read_9273 hdpvr_read 3 9273 NULL ++flakey_status_9274 flakey_status 5 9274 NULL ++iwl_dbgfs_stations_read_9309 iwl_dbgfs_stations_read 3 9309 NULL ++ceph_sync_setxattr_9310 ceph_sync_setxattr 4 9310 NULL ++ieee80211_if_fmt_txpower_9334 ieee80211_if_fmt_txpower 3 9334 NULL ++nvme_trans_fmt_get_parm_header_9340 nvme_trans_fmt_get_parm_header 2 9340 NULL ++ocfs2_orphan_for_truncate_9342 ocfs2_orphan_for_truncate 4 9342 NULL ++ll_direct_rw_pages_9361 ll_direct_rw_pages 0 9361 NULL ++sta_beacon_loss_count_read_9370 sta_beacon_loss_count_read 3 9370 NULL ++get_request_type_9393 get_request_type 0 9393 NULL nohasharray ++mlx4_bitmap_init_9393 mlx4_bitmap_init 5-2 9393 &get_request_type_9393 ++virtqueue_add_outbuf_9395 virtqueue_add_outbuf 3 9395 NULL ++read_9397 read 3 9397 NULL ++hash_ipportip4_expire_9415 hash_ipportip4_expire 4 9415 NULL ++btrfs_drop_extents_9423 btrfs_drop_extents 4 9423 NULL ++bm_realloc_pages_9431 bm_realloc_pages 2 9431 NULL ++ffs_ep0_write_9438 ffs_ep0_write 3 9438 NULL ++ieee80211_if_fmt_fwded_unicast_9454 ieee80211_if_fmt_fwded_unicast 3 9454 NULL ++agp_generic_alloc_user_9470 agp_generic_alloc_user 1 9470 NULL nohasharray ++get_registers_9470 get_registers 4 9470 &agp_generic_alloc_user_9470 ++crypt_status_9492 crypt_status 5 9492 NULL ++lbs_threshold_write_9502 lbs_threshold_write 5 9502 NULL ++lp_write_9511 lp_write 3 9511 NULL ++mext_calc_swap_extents_9517 mext_calc_swap_extents 4 9517 NULL ++scsi_tgt_kspace_exec_9522 scsi_tgt_kspace_exec 8 9522 NULL ++ll_max_read_ahead_whole_mb_seq_write_9528 ll_max_read_ahead_whole_mb_seq_write 3 9528 NULL ++read_file_dma_9530 read_file_dma 3 9530 NULL ++iwl_dbgfs_bf_params_read_9542 iwl_dbgfs_bf_params_read 3 9542 NULL ++xfs_ialloc_read_agi_9545 xfs_ialloc_read_agi 0 9545 NULL ++il_dbgfs_missed_beacon_write_9546 il_dbgfs_missed_beacon_write 3 9546 NULL ++compat_SyS_pwritev64_9548 compat_SyS_pwritev64 3 9548 NULL ++fw_node_create_9559 fw_node_create 2 9559 NULL ++kobj_map_9566 kobj_map 2-3 9566 NULL ++f2fs_read_data_pages_9574 f2fs_read_data_pages 4 9574 NULL ++snd_emu10k1_fx8010_read_9605 snd_emu10k1_fx8010_read 5-6 9605 NULL ++xfs_qm_dqattach_one_9612 xfs_qm_dqattach_one 0 9612 NULL ++lov_ost_pool_add_9626 lov_ost_pool_add 3 9626 NULL ++saa7164_buffer_alloc_user_9627 saa7164_buffer_alloc_user 2 9627 NULL ++ceph_copy_user_to_page_vector_9635 ceph_copy_user_to_page_vector 4-3 9635 NULL ++acpi_ex_insert_into_field_9638 acpi_ex_insert_into_field 3 9638 NULL ++compat_sys_keyctl_9639 compat_sys_keyctl 4 9639 NULL ++ll_checksum_seq_write_9648 ll_checksum_seq_write 3 9648 NULL ++ocfs2_xattr_get_rec_9652 ocfs2_xattr_get_rec 0 9652 NULL ++queue_received_packet_9657 queue_received_packet 5 9657 NULL ++snd_opl4_mem_proc_write_9670 snd_opl4_mem_proc_write 5 9670 NULL ++dns_query_9676 dns_query 3 9676 NULL ++qib_7322_handle_hwerrors_9678 qib_7322_handle_hwerrors 3 9678 NULL ++__erst_read_from_storage_9690 __erst_read_from_storage 0 9690 NULL ++vx_transfer_end_9701 vx_transfer_end 0 9701 NULL ++fuse_iter_npages_9705 fuse_iter_npages 0 9705 NULL nohasharray ++ieee80211_if_read_aid_9705 ieee80211_if_read_aid 3 9705 &fuse_iter_npages_9705 ++cfg80211_tx_mlme_mgmt_9715 cfg80211_tx_mlme_mgmt 3 9715 NULL ++btrfs_stack_file_extent_num_bytes_9720 btrfs_stack_file_extent_num_bytes 0 9720 NULL ++SYSC_ppoll_9721 SYSC_ppoll 2 9721 NULL ++nla_get_u8_9736 nla_get_u8 0 9736 NULL ++ieee80211_if_fmt_num_mcast_sta_9738 ieee80211_if_fmt_num_mcast_sta 3 9738 NULL ++shmem_replace_page_9740 shmem_replace_page 0 9740 NULL ++ddb_input_read_9743 ddb_input_read 3-0 9743 NULL ++sta_last_ack_signal_read_9751 sta_last_ack_signal_read 3 9751 NULL ++btrfs_super_root_9763 btrfs_super_root 0 9763 NULL ++__blk_queue_init_tags_9778 __blk_queue_init_tags 2 9778 NULL ++kvm_age_hva_9795 kvm_age_hva 2 9795 NULL ++parse_uac2_sample_rate_range_9801 parse_uac2_sample_rate_range 0 9801 NULL ++tpm_data_in_9802 tpm_data_in 0 9802 NULL ++ieee80211_if_read_state_9813 ieee80211_if_read_state 3 9813 NULL nohasharray ++udpv6_recvmsg_9813 udpv6_recvmsg 4 9813 &ieee80211_if_read_state_9813 ++pmcraid_alloc_sglist_9864 pmcraid_alloc_sglist 1 9864 NULL ++btrfs_free_reserved_extent_9867 btrfs_free_reserved_extent 2 9867 NULL ++f1x_translate_sysaddr_to_cs_9868 f1x_translate_sysaddr_to_cs 2 9868 NULL ++wil_read_file_ioblob_9878 wil_read_file_ioblob 3 9878 NULL ++snd_midi_event_new_9893 snd_midi_event_new 1 9893 NULL nohasharray ++bm_register_write_9893 bm_register_write 3 9893 &snd_midi_event_new_9893 ++snd_gf1_pcm_playback_copy_9895 snd_gf1_pcm_playback_copy 5-3 9895 NULL ++nonpaging_page_fault_9908 nonpaging_page_fault 2 9908 NULL ++root_nfs_parse_options_9937 root_nfs_parse_options 3 9937 NULL ++pstore_ftrace_knob_read_9947 pstore_ftrace_knob_read 3 9947 NULL ++read_file_misc_9948 read_file_misc 3 9948 NULL ++csum_partial_copy_fromiovecend_9957 csum_partial_copy_fromiovecend 3-4 9957 NULL ++xfs_buf_geterror_9959 xfs_buf_geterror 0 9959 NULL ++SyS_gethostname_9964 SyS_gethostname 2 9964 NULL ++get_free_serial_index_9969 get_free_serial_index 0 9969 NULL ++btrfs_add_link_9973 btrfs_add_link 5 9973 NULL ++gameport_read_9983 gameport_read 0 9983 NULL ++SYSC_move_pages_9986 SYSC_move_pages 2 9986 NULL ++ceph_oloc_oid_to_pg_10003 ceph_oloc_oid_to_pg 0 10003 NULL ++aat2870_dump_reg_10019 aat2870_dump_reg 0 10019 NULL ++ieee80211_set_probe_resp_10077 ieee80211_set_probe_resp 3 10077 NULL ++xfs_attr_rmtval_get_10092 xfs_attr_rmtval_get 0 10092 NULL ++xfs_btree_check_lptr_10104 xfs_btree_check_lptr 0 10104 NULL ++get_elem_size_10110 get_elem_size 0-2 10110 NULL nohasharray ++dynamic_ps_timeout_read_10110 dynamic_ps_timeout_read 3 10110 &get_elem_size_10110 ++gfs2_meta_read_10112 gfs2_meta_read 0 10112 NULL ++SyS_migrate_pages_10134 SyS_migrate_pages 2 10134 NULL ++aes_decrypt_packets_read_10155 aes_decrypt_packets_read 3 10155 NULL ++rx_out_of_mem_read_10157 rx_out_of_mem_read 3 10157 NULL ++hidg_alloc_ep_req_10159 hidg_alloc_ep_req 2 10159 NULL ++asd_store_update_bios_10165 asd_store_update_bios 4 10165 NULL ++kstrtol_from_user_10168 kstrtol_from_user 2 10168 NULL ++proc_pid_attr_read_10173 proc_pid_attr_read 3 10173 NULL ++jffs2_user_setxattr_10182 jffs2_user_setxattr 4 10182 NULL ++xfs_attr_rmtval_copyout_10222 xfs_attr_rmtval_copyout 0 10222 NULL nohasharray ++xfs_btree_read_buf_block_10222 xfs_btree_read_buf_block 0 10222 &xfs_attr_rmtval_copyout_10222 ++hdlc_rpr_irq_10240 hdlc_rpr_irq 2 10240 NULL ++cciss_proc_write_10259 cciss_proc_write 3 10259 NULL ++__qlcnic_pci_sriov_enable_10281 __qlcnic_pci_sriov_enable 2 10281 NULL ++snd_rme9652_capture_copy_10287 snd_rme9652_capture_copy 5 10287 NULL ++read_emulate_10310 read_emulate 2-4 10310 NULL ++read_file_spectral_count_10320 read_file_spectral_count 3 10320 NULL ++compat_SyS_writev_10327 compat_SyS_writev 3 10327 NULL ++tun_sendmsg_10337 tun_sendmsg 4 10337 NULL ++ufx_alloc_urb_list_10349 ufx_alloc_urb_list 3 10349 NULL ++whci_add_cap_10350 whci_add_cap 0 10350 NULL ++dbAllocAny_10354 dbAllocAny 0 10354 NULL ++ath6kl_listen_int_read_10355 ath6kl_listen_int_read 3 10355 NULL ++ms_write_multiple_pages_10362 ms_write_multiple_pages 6-5 10362 NULL ++sta_ht_capa_read_10366 sta_ht_capa_read 3 10366 NULL ++ecryptfs_decode_and_decrypt_filename_10379 ecryptfs_decode_and_decrypt_filename 5 10379 NULL ++do_compat_pselect_10398 do_compat_pselect 1 10398 NULL ++fwtty_rx_10434 fwtty_rx 3 10434 NULL ++event_phy_transmit_error_read_10471 event_phy_transmit_error_read 3 10471 NULL ++hash_ipportip6_expire_10478 hash_ipportip6_expire 4 10478 NULL ++nouveau_pwr_create__10483 nouveau_pwr_create_ 4 10483 NULL ++ext4_itable_unused_count_10501 ext4_itable_unused_count 0 10501 NULL ++qib_alloc_fast_reg_page_list_10507 qib_alloc_fast_reg_page_list 2 10507 NULL ++sel_write_disable_10511 sel_write_disable 3 10511 NULL ++osd_req_write_sg_kern_10514 osd_req_write_sg_kern 5 10514 NULL ++rds_message_alloc_10517 rds_message_alloc 1 10517 NULL ++qlcnic_pci_sriov_enable_10519 qlcnic_pci_sriov_enable 2 10519 NULL ++kstrtouint_from_user_10536 kstrtouint_from_user 2 10536 NULL nohasharray ++snd_pcm_lib_read_10536 snd_pcm_lib_read 0-3 10536 &kstrtouint_from_user_10536 ++ext4_write_begin_10576 ext4_write_begin 3-4 10576 NULL ++scrub_remap_extent_10588 scrub_remap_extent 2 10588 NULL ++otp_read_10594 otp_read 2-4-5 10594 NULL ++supply_map_read_file_10608 supply_map_read_file 3 10608 NULL ++ima_show_htable_violations_10619 ima_show_htable_violations 3 10619 NULL ++fq_alloc_node_10633 fq_alloc_node 1 10633 NULL ++nfs_idmap_lookup_id_10660 nfs_idmap_lookup_id 2 10660 NULL ++efx_max_tx_len_10662 efx_max_tx_len 0-2 10662 NULL ++parport_write_10669 parport_write 0 10669 NULL ++edge_write_10692 edge_write 4 10692 NULL ++selinux_inode_setxattr_10708 selinux_inode_setxattr 4 10708 NULL nohasharray ++inl_10708 inl 0 10708 &selinux_inode_setxattr_10708 ++shash_async_setkey_10720 shash_async_setkey 3 10720 NULL nohasharray ++pvr2_ioread_read_10720 pvr2_ioread_read 3 10720 &shash_async_setkey_10720 ++spi_sync_10731 spi_sync 0 10731 NULL ++sctp_getsockopt_maxseg_10737 sctp_getsockopt_maxseg 2 10737 NULL nohasharray ++apu_get_register_10737 apu_get_register 0 10737 &sctp_getsockopt_maxseg_10737 ++SyS_io_getevents_10756 SyS_io_getevents 3 10756 NULL ++vhost_add_used_n_10760 vhost_add_used_n 3 10760 NULL ++rd_build_prot_space_10761 rd_build_prot_space 2-3 10761 NULL ++kvm_read_guest_atomic_10765 kvm_read_guest_atomic 4 10765 NULL ++__qp_memcpy_to_queue_10779 __qp_memcpy_to_queue 2-4 10779 NULL ++diva_set_trace_filter_10820 diva_set_trace_filter 0-1 10820 NULL ++lbs_sleepparams_read_10840 lbs_sleepparams_read 3 10840 NULL ++ida_get_new_above_10853 ida_get_new_above 0 10853 NULL ++fuse_conn_max_background_read_10855 fuse_conn_max_background_read 3 10855 NULL ++snd_pcm_oss_write1_10872 snd_pcm_oss_write1 3 10872 NULL ++wiidebug_drm_write_10879 wiidebug_drm_write 3 10879 NULL ++get_scq_10897 get_scq 2 10897 NULL ++tifm_alloc_adapter_10903 tifm_alloc_adapter 1 10903 NULL ++lprocfs_wr_atomic_10912 lprocfs_wr_atomic 3 10912 NULL ++__copy_from_user_10918 __copy_from_user 0-3 10918 NULL ++kobject_add_10919 kobject_add 0 10919 NULL ++ar9003_dump_modal_eeprom_10959 ar9003_dump_modal_eeprom 3-2-0 10959 NULL ++ci_port_test_write_10962 ci_port_test_write 3 10962 NULL ++bm_entry_read_10976 bm_entry_read 3 10976 NULL ++sched_autogroup_write_10984 sched_autogroup_write 3 10984 NULL ++xfrm_hash_alloc_10997 xfrm_hash_alloc 1 10997 NULL ++rx_filter_accum_arp_pend_requests_read_11003 rx_filter_accum_arp_pend_requests_read 3 11003 NULL ++gfs2_dir_read_11017 gfs2_dir_read 0 11017 NULL ++SetLineNumber_11023 SetLineNumber 0 11023 NULL ++tda10048_writeregbulk_11050 tda10048_writeregbulk 4 11050 NULL ++insert_inline_extent_backref_11063 insert_inline_extent_backref 8 11063 NULL ++tcp_send_mss_11079 tcp_send_mss 0 11079 NULL ++count_argc_11083 count_argc 0 11083 NULL ++kvm_write_guest_cached_11106 kvm_write_guest_cached 4 11106 NULL ++tw_change_queue_depth_11116 tw_change_queue_depth 2 11116 NULL ++page_offset_11120 page_offset 0 11120 NULL ++cea_db_payload_len_11124 cea_db_payload_len 0 11124 NULL nohasharray ++tracing_buffers_read_11124 tracing_buffers_read 3 11124 &cea_db_payload_len_11124 ++snd_gf1_pcm_playback_silence_11172 snd_gf1_pcm_playback_silence 4-3 11172 NULL ++il_dbgfs_rx_queue_read_11221 il_dbgfs_rx_queue_read 3 11221 NULL ++comedi_alloc_spriv_11234 comedi_alloc_spriv 2 11234 NULL ++hugetlbfs_read_11268 hugetlbfs_read 3 11268 NULL ++ath6kl_power_params_write_11274 ath6kl_power_params_write 3 11274 NULL ++__proc_daemon_file_11305 __proc_daemon_file 5 11305 NULL ++ext4_xattr_check_names_11314 ext4_xattr_check_names 0 11314 NULL ++bcache_dev_sectors_dirty_add_11315 bcache_dev_sectors_dirty_add 3-4 11315 NULL ++sk_filter_size_11316 sk_filter_size 0 11316 NULL nohasharray ++tcp_send_rcvq_11316 tcp_send_rcvq 3 11316 &sk_filter_size_11316 ++shmem_radix_tree_replace_11325 shmem_radix_tree_replace 0 11325 NULL ++construct_key_11329 construct_key 3 11329 NULL nohasharray ++__kfifo_out_peek_11329 __kfifo_out_peek 0-3 11329 &construct_key_11329 ++next_segment_11330 next_segment 0-2-1 11330 NULL ++persistent_ram_buffer_map_11332 persistent_ram_buffer_map 2-1 11332 NULL ++ext4_get_inline_size_11349 ext4_get_inline_size 0 11349 NULL ++sel_write_create_11353 sel_write_create 3 11353 NULL nohasharray ++nl80211_send_mgmt_11353 nl80211_send_mgmt 7 11353 &sel_write_create_11353 ++qib_get_base_info_11369 qib_get_base_info 3 11369 NULL ++nft_value_dump_11381 nft_value_dump 3 11381 NULL ++isku_sysfs_read_keys_capslock_11392 isku_sysfs_read_keys_capslock 6 11392 NULL ++dev_irnet_write_11398 dev_irnet_write 3 11398 NULL ++lprocfs_wr_evict_client_11402 lprocfs_wr_evict_client 3 11402 NULL ++___alloc_bootmem_11410 ___alloc_bootmem 1 11410 NULL ++str_to_user_11411 str_to_user 2 11411 NULL ++mem_fw_gen_free_mem_blks_read_11413 mem_fw_gen_free_mem_blks_read 3 11413 NULL ++ath6kl_wmi_test_rx_11414 ath6kl_wmi_test_rx 3 11414 NULL ++xfs_btree_lookup_11417 xfs_btree_lookup 0 11417 NULL nohasharray ++adis16480_show_firmware_revision_11417 adis16480_show_firmware_revision 3 11417 &xfs_btree_lookup_11417 ++trace_options_read_11419 trace_options_read 3 11419 NULL ++i40e_dbg_command_write_11421 i40e_dbg_command_write 3 11421 NULL ++xd_read_multiple_pages_11422 xd_read_multiple_pages 5-4 11422 NULL ++bttv_read_11432 bttv_read 3 11432 NULL ++create_zero_mask_11453 create_zero_mask 0-1 11453 NULL ++do_blockdev_direct_IO_11455 do_blockdev_direct_IO 0-6 11455 NULL ++pci_set_power_state_11479 pci_set_power_state 0 11479 NULL nohasharray ++sca3000_read_first_n_hw_rb_11479 sca3000_read_first_n_hw_rb 2 11479 &pci_set_power_state_11479 ++xfs_file_buffered_aio_write_11492 xfs_file_buffered_aio_write 4-0 11492 NULL ++sd_do_mode_sense_11507 sd_do_mode_sense 5 11507 NULL ++kmem_zalloc_11510 kmem_zalloc 1 11510 NULL ++ll_direct_IO_26_seg_11518 ll_direct_IO_26_seg 0 11518 NULL ++twl_direction_in_11527 twl_direction_in 2 11527 NULL ++xfs_rtcheck_alloc_range_11553 xfs_rtcheck_alloc_range 0 11553 NULL ++radix_tree_extend_11555 radix_tree_extend 0 11555 NULL ++skb_cow_data_11565 skb_cow_data 0 11565 NULL ++lpfc_idiag_ctlacc_write_11576 lpfc_idiag_ctlacc_write 3 11576 NULL ++oprofilefs_ulong_to_user_11582 oprofilefs_ulong_to_user 3 11582 NULL ++batadv_iv_ogm_orig_add_if_11586 batadv_iv_ogm_orig_add_if 2 11586 NULL ++snd_pcm_action_11589 snd_pcm_action 0 11589 NULL ++fw_device_op_ioctl_11595 fw_device_op_ioctl 2 11595 NULL ++batadv_iv_ogm_orig_del_if_11604 batadv_iv_ogm_orig_del_if 2 11604 NULL ++SYSC_mq_timedsend_11607 SYSC_mq_timedsend 3 11607 NULL ++sisusb_send_bridge_packet_11649 sisusb_send_bridge_packet 2 11649 NULL ++nla_total_size_11658 nla_total_size 1-0 11658 NULL ++slab_ksize_11664 slab_ksize 0 11664 NULL ++ide_queue_pc_tail_11673 ide_queue_pc_tail 5 11673 NULL ++compat_SyS_msgsnd_11675 compat_SyS_msgsnd 3 11675 NULL ++btrfs_alloc_delayed_item_11678 btrfs_alloc_delayed_item 1 11678 NULL ++sctp_setsockopt_hmac_ident_11687 sctp_setsockopt_hmac_ident 3 11687 NULL ++split_11691 split 2 11691 NULL ++snd_ctl_elem_user_tlv_11695 snd_ctl_elem_user_tlv 3 11695 NULL ++blk_rq_cur_bytes_11723 blk_rq_cur_bytes 0 11723 NULL ++dm_bio_prison_create_11749 dm_bio_prison_create 1 11749 NULL ++iwl_dbgfs_qos_read_11753 iwl_dbgfs_qos_read 3 11753 NULL ++ieee80211_get_num_supported_channels_11768 ieee80211_get_num_supported_channels 0 11768 NULL ++ps_pspoll_timeouts_read_11776 ps_pspoll_timeouts_read 3 11776 NULL ++btrfs_key_blockptr_11786 btrfs_key_blockptr 0 11786 NULL ++pcpu_fc_alloc_11818 pcpu_fc_alloc 2 11818 NULL ++xfs_ialloc_11819 xfs_ialloc 0 11819 NULL ++umc_device_register_11824 umc_device_register 0 11824 NULL ++zerocopy_sg_from_iovec_11828 zerocopy_sg_from_iovec 3 11828 NULL ++sctp_setsockopt_maxseg_11829 sctp_setsockopt_maxseg 3 11829 NULL ++rts51x_read_status_11830 rts51x_read_status 4 11830 NULL ++unix_stream_connect_11844 unix_stream_connect 3 11844 NULL ++xfs_file_aio_write_checks_11851 xfs_file_aio_write_checks 0 11851 NULL ++ecryptfs_copy_filename_11868 ecryptfs_copy_filename 4 11868 NULL ++ieee80211_rx_bss_info_11887 ieee80211_rx_bss_info 3 11887 NULL ++mdc_rename_11899 mdc_rename 4-6 11899 NULL ++xstateregs_get_11906 xstateregs_get 4 11906 NULL ++ti_write_11916 ti_write 4 11916 NULL ++fs_devrw_entry_11924 fs_devrw_entry 3 11924 NULL ++bitmap_remap_11929 bitmap_remap 5 11929 NULL ++atomic_sub_return_11939 atomic_sub_return 0-1 11939 NULL ++r1_sync_page_io_11963 r1_sync_page_io 3 11963 NULL ++f1x_swap_interleaved_region_11970 f1x_swap_interleaved_region 0-2 11970 NULL ++read_and_add_raw_conns_11987 read_and_add_raw_conns 0 11987 NULL ++i40e_pci_sriov_configure_12011 i40e_pci_sriov_configure 2 12011 NULL ++ftdi_elan_total_command_size_12045 ftdi_elan_total_command_size 0 12045 NULL ++ieee80211_if_read_user_power_level_12050 ieee80211_if_read_user_power_level 3 12050 NULL ++il4965_ucode_tx_stats_read_12064 il4965_ucode_tx_stats_read 3 12064 NULL ++ptc_proc_write_12076 ptc_proc_write 3 12076 NULL ++batadv_tt_global_size_mod_12085 batadv_tt_global_size_mod 3 12085 NULL ++rtw_malloc2d_12102 rtw_malloc2d 1-2-3 12102 NULL ++gfs2_find_jhead_12117 gfs2_find_jhead 0 12117 NULL ++alloc_bulk_urbs_generic_12127 alloc_bulk_urbs_generic 5 12127 NULL ++set_powered_12129 set_powered 4 12129 NULL ++ramoops_init_prz_12134 ramoops_init_prz 5 12134 NULL ++xfs_handle_to_dentry_12135 xfs_handle_to_dentry 3 12135 NULL ++rawv6_seticmpfilter_12137 rawv6_seticmpfilter 5 12137 NULL ++rawsock_recvmsg_12144 rawsock_recvmsg 4 12144 NULL ++btmrvl_sdio_host_to_card_12152 btmrvl_sdio_host_to_card 3 12152 NULL ++vmbus_open_12154 vmbus_open 2-3 12154 NULL ++fnic_reset_stats_write_12177 fnic_reset_stats_write 3 12177 NULL ++LNetEQAlloc_12178 LNetEQAlloc 1 12178 NULL ++ddp_make_gl_12179 ddp_make_gl 1 12179 NULL ++compat_do_arpt_set_ctl_12184 compat_do_arpt_set_ctl 4 12184 NULL ++ip_generic_getfrag_12187 ip_generic_getfrag 3-4 12187 NULL ++snd_pcm_kernel_ioctl_12219 snd_pcm_kernel_ioctl 0 12219 NULL ++fuse_get_req_12221 fuse_get_req 2 12221 NULL nohasharray ++aat2870_reg_read_file_12221 aat2870_reg_read_file 3 12221 &fuse_get_req_12221 ++xfs_attr3_leaf_read_12222 xfs_attr3_leaf_read 0 12222 NULL ++__alloc_bootmem_low_nopanic_12235 __alloc_bootmem_low_nopanic 1 12235 NULL ++usnic_ib_qp_grp_dump_rows_12239 usnic_ib_qp_grp_dump_rows 3 12239 NULL ++ib_uverbs_unmarshall_recv_12251 ib_uverbs_unmarshall_recv 5 12251 NULL ++shash_compat_setkey_12267 shash_compat_setkey 3 12267 NULL ++add_sctp_bind_addr_12269 add_sctp_bind_addr 3 12269 NULL ++note_last_dentry_12285 note_last_dentry 3 12285 NULL ++roundup_to_multiple_of_64_12288 roundup_to_multiple_of_64 0-1 12288 NULL nohasharray ++il_dbgfs_nvm_read_12288 il_dbgfs_nvm_read 3 12288 &roundup_to_multiple_of_64_12288 ++bt_sock_recvmsg_12316 bt_sock_recvmsg 4 12316 NULL ++usnic_transport_sock_to_str_12322 usnic_transport_sock_to_str 2-0 12322 NULL ++pcbit_writecmd_12332 pcbit_writecmd 2 12332 NULL ++xfs_bmap_last_extent_12335 xfs_bmap_last_extent 0 12335 NULL ++mptctl_ioctl_12355 mptctl_ioctl 2 12355 NULL ++__nf_ct_ext_add_length_12364 __nf_ct_ext_add_length 3 12364 NULL ++xfs_iext_inline_to_direct_12384 xfs_iext_inline_to_direct 2 12384 NULL ++populate_dir_12391 populate_dir 0 12391 NULL nohasharray ++write_file_dump_12391 write_file_dump 3 12391 &populate_dir_12391 ++hbucket_elem_add_12416 hbucket_elem_add 3 12416 NULL ++ieee80211_if_read_num_mcast_sta_12419 ieee80211_if_read_num_mcast_sta 3 12419 NULL ++cfs_array_alloc_12441 cfs_array_alloc 2 12441 NULL ++skb_do_copy_data_nocache_12465 skb_do_copy_data_nocache 5 12465 NULL ++x25_sendmsg_12487 x25_sendmsg 4 12487 NULL ++fnic_trace_ctrl_read_12497 fnic_trace_ctrl_read 3 12497 NULL ++__ceph_osdc_start_request_12502 __ceph_osdc_start_request 0 12502 NULL ++qib_alloc_fast_reg_mr_12526 qib_alloc_fast_reg_mr 2 12526 NULL ++write_inode_now_12565 write_inode_now 0 12565 NULL ++hvc_alloc_12579 hvc_alloc 4 12579 NULL ++pcpu_extend_area_map_12589 pcpu_extend_area_map 2 12589 NULL ++tlbflush_write_file_12598 tlbflush_write_file 3 12598 NULL ++vhci_put_user_12604 vhci_put_user 4 12604 NULL ++sdhci_pltfm_init_12627 sdhci_pltfm_init 3 12627 NULL ++pwr_rcvd_awake_bcns_cnt_read_12632 pwr_rcvd_awake_bcns_cnt_read 3 12632 NULL ++pn_sendmsg_12640 pn_sendmsg 4 12640 NULL ++dwc3_link_state_write_12641 dwc3_link_state_write 3 12641 NULL ++nr_recvmsg_12649 nr_recvmsg 4 12649 NULL ++rtw_android_get_link_speed_12655 rtw_android_get_link_speed 0 12655 NULL ++ocfs2_read_block_12659 ocfs2_read_block 0 12659 NULL ++sel_read_class_12669 sel_read_class 3 12669 NULL nohasharray ++sparse_mem_maps_populate_node_12669 sparse_mem_maps_populate_node 4 12669 &sel_read_class_12669 ++iwl_dbgfs_calib_disabled_write_12707 iwl_dbgfs_calib_disabled_write 3 12707 NULL ++ieee80211_if_read_num_buffered_multicast_12716 ieee80211_if_read_num_buffered_multicast 3 12716 NULL ++ivtv_write_12721 ivtv_write 3 12721 NULL ++key_rx_spec_read_12736 key_rx_spec_read 3 12736 NULL ++__mei_cl_async_send_12737 __mei_cl_async_send 3 12737 NULL ++ieee80211_if_read_dot11MeshMaxRetries_12756 ieee80211_if_read_dot11MeshMaxRetries 3 12756 NULL ++listxattr_12769 listxattr 3 12769 NULL ++sctp_ssnmap_init_12772 sctp_ssnmap_init 2-3 12772 NULL ++scsi_adjust_queue_depth_12802 scsi_adjust_queue_depth 3 12802 NULL ++xfs_inumbers_fmt_12817 xfs_inumbers_fmt 3 12817 NULL ++readq_12825 readq 0 12825 NULL ++SyS_add_key_12834 SyS_add_key 4 12834 NULL ++gfs2_log_reserve_12835 gfs2_log_reserve 0 12835 NULL ++TSS_authhmac_12839 TSS_authhmac 3 12839 NULL ++spidev_sync_12842 spidev_sync 0 12842 NULL ++spidev_ioctl_12846 spidev_ioctl 2 12846 NULL ++xfs_rtallocate_extent_exact_12865 xfs_rtallocate_extent_exact 0 12865 NULL ++ath9k_dump_4k_modal_eeprom_12883 ath9k_dump_4k_modal_eeprom 3-2 12883 NULL ++get_leb_cnt_12892 get_leb_cnt 0-2 12892 NULL ++get_virtual_node_size_12908 get_virtual_node_size 0 12908 NULL ++rds_pages_in_vec_12922 rds_pages_in_vec 0 12922 NULL ++do_inode_permission_12946 do_inode_permission 0 12946 NULL ++bm_status_write_12964 bm_status_write 3 12964 NULL ++raid56_parity_recover_12987 raid56_parity_recover 5 12987 NULL ++TransmitTcb_12989 TransmitTcb 4 12989 NULL ++sk_peek_offset_12991 sk_peek_offset 0 12991 NULL ++bset_prev_bytes_13020 bset_prev_bytes 0 13020 NULL ++subsystem_filter_write_13022 subsystem_filter_write 3 13022 NULL ++generic_segment_checks_13041 generic_segment_checks 0 13041 NULL ++ocfs2_write_begin_13045 ocfs2_write_begin 3-4 13045 NULL ++__dn_setsockopt_13060 __dn_setsockopt 5 13060 NULL nohasharray ++ptlrpc_lprocfs_threads_min_seq_write_13060 ptlrpc_lprocfs_threads_min_seq_write 3 13060 &__dn_setsockopt_13060 ++biovec_create_pool_13079 biovec_create_pool 2 13079 NULL ++xattr_getsecurity_13090 xattr_getsecurity 0 13090 NULL ++ttm_dma_pool_alloc_new_pages_13105 ttm_dma_pool_alloc_new_pages 3 13105 NULL ++snd_rme96_playback_copy_13111 snd_rme96_playback_copy 5 13111 NULL ++bfad_debugfs_read_13119 bfad_debugfs_read 3 13119 NULL ++blk_update_request_13146 blk_update_request 3 13146 NULL ++caif_stream_recvmsg_13173 caif_stream_recvmsg 4 13173 NULL ++pwr_disable_ps_read_13176 pwr_disable_ps_read 3 13176 NULL ++ucs2_strlen_13178 ucs2_strlen 0 13178 NULL ++dgrp_net_ioctl_13183 dgrp_net_ioctl 2 13183 NULL ++create_trace_uprobe_13184 create_trace_uprobe 1 13184 NULL ++comedi_read_13199 comedi_read 3 13199 NULL ++hash_ipport4_expire_13201 hash_ipport4_expire 4 13201 NULL ++mmc_ext_csd_read_13205 mmc_ext_csd_read 3 13205 NULL ++svm_msrpm_offset_13220 svm_msrpm_offset 0-1 13220 NULL ++fnic_trace_ctrl_write_13229 fnic_trace_ctrl_write 3 13229 NULL ++_iwl_dbgfs_disable_power_off_write_13243 _iwl_dbgfs_disable_power_off_write 3 13243 NULL ++asix_read_cmd_13245 asix_read_cmd 5 13245 NULL ++init_tid_tabs_13252 init_tid_tabs 2-3-4 13252 NULL ++bio_integrity_trim_13259 bio_integrity_trim 3-2 13259 NULL ++simple_attr_write_13260 simple_attr_write 3 13260 NULL ++pmcraid_notify_aen_13274 pmcraid_notify_aen 3 13274 NULL ++il4965_stats_flag_13281 il4965_stats_flag 0-3 13281 NULL ++lpfc_idiag_mbxacc_get_setup_13282 lpfc_idiag_mbxacc_get_setup 0 13282 NULL ++sd_major_13294 sd_major 0-1 13294 NULL ++module_param_sysfs_setup_13296 module_param_sysfs_setup 0 13296 NULL ++read_file_phy_err_13318 read_file_phy_err 3 13318 NULL ++kvm_read_nested_guest_page_13337 kvm_read_nested_guest_page 5 13337 NULL ++round_event_name_len_13348 round_event_name_len 0 13348 NULL ++hscx_empty_fifo_13360 hscx_empty_fifo 2 13360 NULL ++xfs_btree_delrec_13364 xfs_btree_delrec 0 13364 NULL ++iso_sched_alloc_13377 iso_sched_alloc 1 13377 NULL nohasharray ++wep_key_not_found_read_13377 wep_key_not_found_read 3 13377 &iso_sched_alloc_13377 ++ext4_meta_trans_blocks_13380 ext4_meta_trans_blocks 0-3-2 13380 NULL ++lov_mds_md_size_13388 lov_mds_md_size 0-1 13388 NULL nohasharray ++dis_bypass_write_13388 dis_bypass_write 3 13388 &lov_mds_md_size_13388 ++netxen_alloc_sds_rings_13417 netxen_alloc_sds_rings 2 13417 NULL ++sctp_setsockopt_peer_primary_addr_13440 sctp_setsockopt_peer_primary_addr 3 13440 NULL ++ath6kl_cfg80211_connect_event_13443 ath6kl_cfg80211_connect_event 8-9-7 13443 NULL ++sb_init_dio_done_wq_13482 sb_init_dio_done_wq 0 13482 NULL ++data_read_13494 data_read 3 13494 NULL ++ioat_chansts_32_13506 ioat_chansts_32 0 13506 NULL ++core_status_13515 core_status 4 13515 NULL ++smk_write_mapped_13519 smk_write_mapped 3 13519 NULL ++bm_init_13529 bm_init 2 13529 NULL ++llcp_sock_recvmsg_13556 llcp_sock_recvmsg 4 13556 NULL ++ieee80211_if_read_ap_power_level_13558 ieee80211_if_read_ap_power_level 3 13558 NULL ++hash_net4_expire_13559 hash_net4_expire 4 13559 NULL ++read_file_antenna_13574 read_file_antenna 3 13574 NULL ++cache_write_13589 cache_write 3 13589 NULL ++Rd_Indx_13602 Rd_Indx 3-2 13602 NULL ++wm8994_bulk_write_13615 wm8994_bulk_write 2-3 13615 NULL ++__ntfs_grab_cache_pages_13617 __ntfs_grab_cache_pages 0 13617 NULL ++pmcraid_get_minor_13619 pmcraid_get_minor 0 13619 NULL ++packet_snd_13634 packet_snd 3 13634 NULL ++blk_msg_write_13655 blk_msg_write 3 13655 NULL ++cache_downcall_13666 cache_downcall 3 13666 NULL ++ext3_xattr_list_entries_13682 ext3_xattr_list_entries 0 13682 NULL ++usb_get_string_13693 usb_get_string 0 13693 NULL ++fw_iso_buffer_alloc_13704 fw_iso_buffer_alloc 2 13704 NULL ++audit_unpack_string_13748 audit_unpack_string 3 13748 NULL ++ieee802154_alloc_device_13767 ieee802154_alloc_device 1 13767 NULL ++fb_sys_read_13778 fb_sys_read 3 13778 NULL ++ath6kl_mgmt_powersave_ap_13791 ath6kl_mgmt_powersave_ap 6 13791 NULL ++random_read_13815 random_read 3 13815 NULL ++mutex_lock_interruptible_nested_13817 mutex_lock_interruptible_nested 0 13817 NULL ++hsi_register_board_info_13820 hsi_register_board_info 2 13820 NULL ++___mei_cl_send_13821 ___mei_cl_send 3 13821 NULL ++enc_pools_insert_13849 enc_pools_insert 3 13849 NULL ++evdev_ioctl_compat_13851 evdev_ioctl_compat 2 13851 NULL ++compat_ip_setsockopt_13870 compat_ip_setsockopt 5 13870 NULL ++qp_memcpy_to_queue_13886 qp_memcpy_to_queue 5-2 13886 NULL ++snd_pcm_aio_read_13900 snd_pcm_aio_read 3 13900 NULL ++cfg80211_inform_bss_width_13933 cfg80211_inform_bss_width 9 13933 NULL ++ext3_xattr_block_get_13936 ext3_xattr_block_get 0 13936 NULL ++ieee80211_if_read_dot11MeshForwarding_13940 ieee80211_if_read_dot11MeshForwarding 3 13940 NULL nohasharray ++ocfs2_xa_value_truncate_13940 ocfs2_xa_value_truncate 2 13940 &ieee80211_if_read_dot11MeshForwarding_13940 ++iwl_dbgfs_protection_mode_read_13943 iwl_dbgfs_protection_mode_read 3 13943 NULL ++ieee80211_if_read_min_discovery_timeout_13946 ieee80211_if_read_min_discovery_timeout 3 13946 NULL ++lpfc_idiag_queacc_read_13950 lpfc_idiag_queacc_read 3 13950 NULL nohasharray ++pagecache_write_end_13950 pagecache_write_end 0 13950 &lpfc_idiag_queacc_read_13950 ++osc_grant_shrink_interval_seq_write_13952 osc_grant_shrink_interval_seq_write 3 13952 NULL ++ocfs2_refresh_slot_info_13957 ocfs2_refresh_slot_info 0 13957 NULL ++snd_pcm_plug_slave_size_13967 snd_pcm_plug_slave_size 0-2 13967 NULL ++qcam_read_13977 qcam_read 3 13977 NULL ++dsp_read_13980 dsp_read 2 13980 NULL ++dvb_demux_read_13981 dvb_demux_read 3 13981 NULL ++sddr09_write_data_14014 sddr09_write_data 3 14014 NULL ++btrfs_get_blocks_direct_14016 btrfs_get_blocks_direct 2 14016 NULL ++dmi_format_ids_14018 dmi_format_ids 2 14018 NULL ++iscsi_create_flashnode_conn_14022 iscsi_create_flashnode_conn 4 14022 NULL ++pci_add_ext_cap_save_buffer_14032 pci_add_ext_cap_save_buffer 3 14032 NULL ++dvb_usercopy_14036 dvb_usercopy 2 14036 NULL ++read_def_modal_eeprom_14041 read_def_modal_eeprom 3 14041 NULL ++xfs_bmap_alloc_14044 xfs_bmap_alloc 0 14044 NULL ++ieee80211_if_fmt_aid_14055 ieee80211_if_fmt_aid 3 14055 NULL ++ovs_nla_alloc_flow_actions_14056 ovs_nla_alloc_flow_actions 1 14056 NULL ++sta_agg_status_read_14058 sta_agg_status_read 3 14058 NULL ++lov_stripeoffset_seq_write_14078 lov_stripeoffset_seq_write 3 14078 NULL ++do_proc_readlink_14096 do_proc_readlink 3 14096 NULL ++compat_sys_pselect6_14105 compat_sys_pselect6 1 14105 NULL ++ext4_journal_blocks_per_page_14127 ext4_journal_blocks_per_page 0 14127 NULL ++isku_sysfs_read_light_14140 isku_sysfs_read_light 6 14140 NULL ++em_canid_change_14150 em_canid_change 3 14150 NULL ++gsm_dlci_data_14155 gsm_dlci_data 3 14155 NULL ++midi_alloc_ep_req_14159 midi_alloc_ep_req 2 14159 NULL ++print_input_mask_14168 print_input_mask 3-0 14168 NULL ++ocfs2_xattr_value_truncate_14183 ocfs2_xattr_value_truncate 3 14183 NULL ++datafab_read_data_14186 datafab_read_data 4 14186 NULL ++hfsplus_brec_find_14200 hfsplus_brec_find 0 14200 NULL ++alloc_async_14208 alloc_async 1 14208 NULL ++ath6kl_regread_write_14220 ath6kl_regread_write 3 14220 NULL ++ieee80211_if_write_uapsd_max_sp_len_14233 ieee80211_if_write_uapsd_max_sp_len 3 14233 NULL ++dma_declare_coherent_memory_14244 dma_declare_coherent_memory 4 14244 NULL ++btrfs_token_file_extent_ram_bytes_14247 btrfs_token_file_extent_ram_bytes 0 14247 NULL ++ath6kl_connect_event_14267 ath6kl_connect_event 7-8-9 14267 NULL ++rr_status_14293 rr_status 5 14293 NULL ++read_default_ldt_14302 read_default_ldt 2 14302 NULL ++update_rgrp_lvb_14303 update_rgrp_lvb 0 14303 NULL ++xfs_qm_qino_alloc_14309 xfs_qm_qino_alloc 0 14309 NULL ++oo_objects_14319 oo_objects 0 14319 NULL ++ll_get_user_pages_14328 ll_get_user_pages 3-2-0 14328 NULL ++p9_client_zc_rpc_14345 p9_client_zc_rpc 7 14345 NULL ++alloc_tx_struct_14349 alloc_tx_struct 1 14349 NULL ++hash_ipportnet4_expire_14354 hash_ipportnet4_expire 4 14354 NULL ++snd_pcm_lib_readv_14363 snd_pcm_lib_readv 0-3 14363 NULL ++lowpan_read_14369 lowpan_read 3 14369 NULL ++ath6kl_regdump_read_14393 ath6kl_regdump_read 3 14393 NULL ++smk_write_onlycap_14400 smk_write_onlycap 3 14400 NULL ++mtd_concat_create_14416 mtd_concat_create 2 14416 NULL ++get_kcore_size_14425 get_kcore_size 0 14425 NULL ++_iwl_dbgfs_sram_write_14439 _iwl_dbgfs_sram_write 3 14439 NULL ++block_size_14443 block_size 0 14443 NULL ++lmv_user_md_size_14456 lmv_user_md_size 0-1 14456 NULL ++snd_emu10k1_proc_spdif_status_14457 snd_emu10k1_proc_spdif_status 4-5 14457 NULL ++ath10k_write_htt_stats_mask_14458 ath10k_write_htt_stats_mask 3 14458 NULL ++lustre_msg_size_v2_14470 lustre_msg_size_v2 0 14470 NULL ++dma_transfer_size_14473 dma_transfer_size 0 14473 NULL ++udplite_getfrag_14479 udplite_getfrag 3-4 14479 NULL ++ieee80211_if_read_dot11MeshGateAnnouncementProtocol_14486 ieee80211_if_read_dot11MeshGateAnnouncementProtocol 3 14486 NULL ++split_state_14491 split_state 0 14491 NULL ++ocfs2_debug_read_14507 ocfs2_debug_read 3 14507 NULL ++ep0_write_14536 ep0_write 3 14536 NULL nohasharray ++dataflash_read_user_otp_14536 dataflash_read_user_otp 3-2 14536 &ep0_write_14536 ++picolcd_debug_eeprom_read_14549 picolcd_debug_eeprom_read 3 14549 NULL ++qp_host_alloc_queue_14566 qp_host_alloc_queue 1 14566 NULL ++SyS_setdomainname_14569 SyS_setdomainname 2 14569 NULL ++stuffed_readpage_14581 stuffed_readpage 0 14581 NULL ++idmap_pipe_downcall_14591 idmap_pipe_downcall 3 14591 NULL ++ceph_osdc_alloc_request_14597 ceph_osdc_alloc_request 3 14597 NULL ++dbJoin_14644 dbJoin 0 14644 NULL ++profile_replace_14652 profile_replace 3 14652 NULL ++usnic_vnic_dump_14662 usnic_vnic_dump 3 14662 NULL ++add_to_page_cache_locked_14668 add_to_page_cache_locked 0 14668 NULL ++min_bytes_needed_14675 min_bytes_needed 0 14675 NULL ++nvme_trans_log_info_exceptions_14677 nvme_trans_log_info_exceptions 3 14677 NULL ++pipeline_enc_tx_stat_fifo_int_read_14680 pipeline_enc_tx_stat_fifo_int_read 3 14680 NULL ++ieee80211_if_fmt_rc_rateidx_mask_2ghz_14683 ieee80211_if_fmt_rc_rateidx_mask_2ghz 3 14683 NULL ++SyS_fsetxattr_14702 SyS_fsetxattr 4 14702 NULL ++persistent_ram_ecc_string_14704 persistent_ram_ecc_string 0 14704 NULL ++u_audio_playback_14709 u_audio_playback 3 14709 NULL ++rtw_cbuf_alloc_14710 rtw_cbuf_alloc 1 14710 NULL ++cgroup_path_14713 cgroup_path 3 14713 NULL ++vfd_write_14717 vfd_write 3 14717 NULL ++__blk_end_request_14729 __blk_end_request 3 14729 NULL ++raid1_resize_14740 raid1_resize 2 14740 NULL ++i915_error_state_buf_init_14742 i915_error_state_buf_init 2 14742 NULL ++btrfs_inode_extref_name_len_14752 btrfs_inode_extref_name_len 0 14752 NULL ++rx_rx_cmplt_read_14753 rx_rx_cmplt_read 3 14753 NULL ++regmap_range_read_file_14775 regmap_range_read_file 3 14775 NULL ++sta_dev_read_14782 sta_dev_read 3 14782 NULL ++keys_proc_write_14792 keys_proc_write 3 14792 NULL ++ext4_kvmalloc_14796 ext4_kvmalloc 1 14796 NULL ++__kfifo_in_14797 __kfifo_in 3-0 14797 NULL ++hpet_readl_14801 hpet_readl 0 14801 NULL nohasharray ++snd_als300_gcr_read_14801 snd_als300_gcr_read 0 14801 &hpet_readl_14801 ++security_inode_rename_14805 security_inode_rename 0 14805 NULL ++xfs_btree_kill_iroot_14824 xfs_btree_kill_iroot 0 14824 NULL ++mrp_attr_create_14853 mrp_attr_create 3 14853 NULL ++lcd_write_14857 lcd_write 3 14857 NULL ++get_user_cpu_mask_14861 get_user_cpu_mask 2 14861 NULL ++xfs_bmapi_convert_unwritten_14886 xfs_bmapi_convert_unwritten 0 14886 NULL ++gmux_index_read8_14890 gmux_index_read8 0 14890 NULL ++acpi_os_allocate_14892 acpi_os_allocate 1 14892 NULL ++SYSC_readv_14901 SYSC_readv 3 14901 NULL ++__arch_hweight64_14923 __arch_hweight64 0 14923 NULL nohasharray ++qp_memcpy_to_queue_iov_14923 qp_memcpy_to_queue_iov 5-2 14923 &__arch_hweight64_14923 ++ocfs2_expand_nonsparse_inode_14936 ocfs2_expand_nonsparse_inode 3-4 14936 NULL ++queue_cnt_14951 queue_cnt 0 14951 NULL ++unix_dgram_recvmsg_14952 unix_dgram_recvmsg 4 14952 NULL ++videobuf_read_stream_14956 videobuf_read_stream 3 14956 NULL ++mce_flush_rx_buffer_14976 mce_flush_rx_buffer 2 14976 NULL ++setkey_14987 setkey 3 14987 NULL nohasharray ++gpio_twl4030_write_14987 gpio_twl4030_write 1 14987 &setkey_14987 ++blk_integrity_tuple_size_15027 blk_integrity_tuple_size 0 15027 NULL ++read_file_node_aggr_15040 read_file_node_aggr 3 15040 NULL ++cld_pipe_downcall_15058 cld_pipe_downcall 3 15058 NULL ++ieee80211_if_read_uapsd_max_sp_len_15067 ieee80211_if_read_uapsd_max_sp_len 3 15067 NULL ++nfs4_write_cached_acl_15070 nfs4_write_cached_acl 4 15070 NULL ++ntfs_copy_from_user_15072 ntfs_copy_from_user 3-5-0 15072 NULL ++pppoe_recvmsg_15073 pppoe_recvmsg 4 15073 NULL ++smscore_load_firmware_family2_15086 smscore_load_firmware_family2 3 15086 NULL ++xfs_btree_insrec_15090 xfs_btree_insrec 0 15090 NULL ++btrfs_readpage_15094 btrfs_readpage 0 15094 NULL ++hex_dump_to_buffer_15121 hex_dump_to_buffer 6 15121 NULL ++start_port_15124 start_port 0 15124 NULL ++ipwireless_ppp_mru_15153 ipwireless_ppp_mru 0 15153 NULL ++SYSC_setdomainname_15180 SYSC_setdomainname 2 15180 NULL ++iscsi_create_endpoint_15193 iscsi_create_endpoint 1 15193 NULL ++mtt_alloc_res_15211 mtt_alloc_res 5 15211 NULL ++bfad_debugfs_write_regrd_15218 bfad_debugfs_write_regrd 3 15218 NULL ++iwl_dbgfs_sram_write_15239 iwl_dbgfs_sram_write 3 15239 NULL ++il_dbgfs_rx_stats_read_15243 il_dbgfs_rx_stats_read 3 15243 NULL ++simple_strtol_15273 simple_strtol 0 15273 NULL ++fw_realloc_buffer_15280 fw_realloc_buffer 2 15280 NULL ++ocfs2_read_refcount_block_15305 ocfs2_read_refcount_block 0 15305 NULL ++kovaplus_sysfs_read_15337 kovaplus_sysfs_read 6 15337 NULL ++ioread16_15342 ioread16 0 15342 NULL ++ept_prefetch_gpte_15348 ept_prefetch_gpte 4 15348 NULL ++acpi_ut_create_string_object_15360 acpi_ut_create_string_object 1 15360 NULL ++ext4_direct_IO_15369 ext4_direct_IO 4 15369 NULL ++graph_depth_read_15371 graph_depth_read 3 15371 NULL ++compat_sys_process_vm_readv_15374 compat_sys_process_vm_readv 3-5 15374 NULL ++fq_codel_zalloc_15378 fq_codel_zalloc 1 15378 NULL ++alloc_fddidev_15382 alloc_fddidev 1 15382 NULL ++pipeline_csum_to_rx_xfer_swi_read_15403 pipeline_csum_to_rx_xfer_swi_read 3 15403 NULL ++get_modalias_15406 get_modalias 2 15406 NULL ++blockdev_direct_IO_15408 blockdev_direct_IO 5 15408 NULL ++__videobuf_copy_to_user_15423 __videobuf_copy_to_user 4-0 15423 NULL ++tcp_mtu_to_mss_15438 tcp_mtu_to_mss 2-0 15438 NULL ++hpsa_change_queue_depth_15449 hpsa_change_queue_depth 2 15449 NULL ++memweight_15450 memweight 2 15450 NULL ++zd_chip_is_zd1211b_15518 zd_chip_is_zd1211b 0 15518 NULL ++ifx_spi_write_15531 ifx_spi_write 3 15531 NULL ++p9_check_zc_errors_15534 p9_check_zc_errors 4 15534 NULL ++xfrm_state_mtu_15548 xfrm_state_mtu 0-2 15548 NULL ++persistent_status_15574 persistent_status 4 15574 NULL ++bnx2fc_process_unsol_compl_15576 bnx2fc_process_unsol_compl 2 15576 NULL ++vme_user_write_15587 vme_user_write 3 15587 NULL ++compat_fillonedir_15620 compat_fillonedir 3 15620 NULL ++proc_loginuid_read_15631 proc_loginuid_read 3 15631 NULL ++tomoyo_scan_bprm_15642 tomoyo_scan_bprm 2-4 15642 NULL nohasharray ++sk_memory_allocated_add_15642 sk_memory_allocated_add 2 15642 &tomoyo_scan_bprm_15642 nohasharray ++pipeline_hs_tx_stat_fifo_int_read_15642 pipeline_hs_tx_stat_fifo_int_read 3 15642 &sk_memory_allocated_add_15642 ++joydev_handle_JSIOCSBTNMAP_15643 joydev_handle_JSIOCSBTNMAP 3 15643 NULL ++fs_path_add_15648 fs_path_add 3 15648 NULL ++__do_readpage_15652 __do_readpage 0 15652 NULL ++xsd_read_15653 xsd_read 3 15653 NULL ++unix_bind_15668 unix_bind 3 15668 NULL ++dm_read_15674 dm_read 3 15674 NULL nohasharray ++SyS_connect_15674 SyS_connect 3 15674 &dm_read_15674 ++tracing_snapshot_write_15719 tracing_snapshot_write 3 15719 NULL ++HiSax_readstatus_15752 HiSax_readstatus 2 15752 NULL ++smk_read_direct_15803 smk_read_direct 3 15803 NULL ++nameseq_list_15817 nameseq_list 3-0 15817 NULL nohasharray ++gnttab_expand_15817 gnttab_expand 1 15817 &nameseq_list_15817 ++afs_proc_rootcell_write_15822 afs_proc_rootcell_write 3 15822 NULL ++table_size_15851 table_size 0-1-2 15851 NULL ++write_file_tx99_15856 write_file_tx99 3 15856 NULL ++media_entity_init_15870 media_entity_init 2-4 15870 NULL ++__mptctl_ioctl_15875 __mptctl_ioctl 2 15875 NULL ++native_read_msr_15905 native_read_msr 0 15905 NULL ++parse_audio_stream_data_15937 parse_audio_stream_data 3 15937 NULL ++power_read_15939 power_read 3 15939 NULL ++lpfc_idiag_drbacc_read_15948 lpfc_idiag_drbacc_read 3 15948 NULL ++snd_pcm_lib_read_transfer_15952 snd_pcm_lib_read_transfer 5-2-4 15952 NULL ++memblock_virt_alloc_try_nid_15954 memblock_virt_alloc_try_nid 1 15954 NULL ++viafb_vt1636_proc_write_16018 viafb_vt1636_proc_write 3 16018 NULL ++dccp_recvmsg_16056 dccp_recvmsg 4 16056 NULL ++read_file_spectral_period_16057 read_file_spectral_period 3 16057 NULL ++si5351_msynth_params_address_16062 si5351_msynth_params_address 0-1 16062 NULL ++isr_tx_exch_complete_read_16103 isr_tx_exch_complete_read 3 16103 NULL ++isr_hw_pm_mode_changes_read_16110 isr_hw_pm_mode_changes_read 3 16110 NULL nohasharray ++dma_tx_requested_read_16110 dma_tx_requested_read 3 16110 &isr_hw_pm_mode_changes_read_16110 ++gfs2_jdesc_check_16122 gfs2_jdesc_check 0 16122 NULL ++snd_dma_pointer_16126 snd_dma_pointer 0-2 16126 NULL ++compat_sys_select_16131 compat_sys_select 1 16131 NULL ++fsm_init_16134 fsm_init 2 16134 NULL ++ext4_xattr_block_get_16148 ext4_xattr_block_get 0 16148 NULL ++optimal_reclaimed_pages_16172 optimal_reclaimed_pages 0 16172 NULL ++mapping_level_16188 mapping_level 2-0 16188 NULL ++i40e_allocate_virt_mem_d_16191 i40e_allocate_virt_mem_d 3 16191 NULL ++ath10k_htt_rx_ring_size_16201 ath10k_htt_rx_ring_size 0 16201 NULL ++cipso_v4_map_cat_rng_hton_16203 cipso_v4_map_cat_rng_hton 0 16203 NULL ++SyS_pselect6_16210 SyS_pselect6 1 16210 NULL ++create_table_16213 create_table 2 16213 NULL ++ath9k_hw_ar9287_dump_eeprom_16224 ath9k_hw_ar9287_dump_eeprom 5-4 16224 NULL ++atomic_read_file_16227 atomic_read_file 3 16227 NULL ++BcmGetSectionValStartOffset_16235 BcmGetSectionValStartOffset 0 16235 NULL ++lov_prep_brw_set_16246 lov_prep_brw_set 3 16246 NULL ++btrfs_dev_extent_chunk_offset_16247 btrfs_dev_extent_chunk_offset 0 16247 NULL nohasharray ++i40e_dbg_dump_read_16247 i40e_dbg_dump_read 3 16247 &btrfs_dev_extent_chunk_offset_16247 ++il_dbgfs_disable_ht40_write_16249 il_dbgfs_disable_ht40_write 3 16249 NULL ++SyS_fgetxattr_16254 SyS_fgetxattr 4 16254 NULL ++reiserfs_acl_count_16265 reiserfs_acl_count 0-1 16265 NULL ++ocfs2_xattr_bucket_value_truncate_16279 ocfs2_xattr_bucket_value_truncate 4 16279 NULL ++nand_bch_init_16280 nand_bch_init 3-2 16280 NULL nohasharray ++drbd_setsockopt_16280 drbd_setsockopt 5 16280 &nand_bch_init_16280 ++account_16283 account 0-4-2 16283 NULL nohasharray ++mirror_status_16283 mirror_status 5 16283 &account_16283 ++jumpshot_read_data_16287 jumpshot_read_data 4 16287 NULL ++mo_xattr_get_16288 mo_xattr_get 0 16288 NULL ++stk_allocate_buffers_16291 stk_allocate_buffers 2 16291 NULL ++rbd_segment_offset_16293 rbd_segment_offset 0-2 16293 NULL ++rsc_mgr_init_16299 rsc_mgr_init 3 16299 NULL ++kvm_handle_hva_range_16312 kvm_handle_hva_range 3-2 16312 NULL ++sysfs_create_groups_16360 sysfs_create_groups 0 16360 NULL ++total_ps_buffered_read_16365 total_ps_buffered_read 3 16365 NULL ++xfs_inobt_lookup_16367 xfs_inobt_lookup 0 16367 NULL ++iscsi_tcp_conn_setup_16376 iscsi_tcp_conn_setup 2 16376 NULL ++gfs2_dinode_in_16378 gfs2_dinode_in 0 16378 NULL ++xfs_btree_check_block_16419 xfs_btree_check_block 0 16419 NULL ++ieee80211_if_read_tsf_16420 ieee80211_if_read_tsf 3 16420 NULL ++rxrpc_server_keyring_16431 rxrpc_server_keyring 3 16431 NULL ++__bio_add_page_16435 __bio_add_page 0-4 16435 NULL ++cmdline_store_16442 cmdline_store 4 16442 NULL ++btrfs_truncate_inode_items_16452 btrfs_truncate_inode_items 4 16452 NULL ++netlink_change_ngroups_16457 netlink_change_ngroups 2 16457 NULL ++req_capsule_get_size_16467 req_capsule_get_size 0 16467 NULL ++tracing_readme_read_16493 tracing_readme_read 3 16493 NULL ++KEY_OFFSET_16504 KEY_OFFSET 0 16504 NULL ++snd_interval_max_16529 snd_interval_max 0 16529 NULL ++raid10_resize_16537 raid10_resize 2 16537 NULL ++lpfc_debugfs_read_16566 lpfc_debugfs_read 3 16566 NULL ++agp_allocate_memory_wrap_16576 agp_allocate_memory_wrap 1 16576 NULL ++lustre_msg_hdr_size_v2_16589 lustre_msg_hdr_size_v2 0 16589 NULL ++gmux_index_read32_16604 gmux_index_read32 0 16604 NULL ++rtw_set_wpa_ie_16633 rtw_set_wpa_ie 3 16633 NULL ++btrfs_get_token_32_16651 btrfs_get_token_32 0 16651 NULL ++__wa_populate_dto_urb_16699 __wa_populate_dto_urb 3-4 16699 NULL ++__proc_lnet_buffers_16717 __proc_lnet_buffers 5 16717 NULL ++__copy_to_user_swizzled_16748 __copy_to_user_swizzled 3-4 16748 NULL ++arcmsr_adjust_disk_queue_depth_16756 arcmsr_adjust_disk_queue_depth 2 16756 NULL ++blk_rq_map_user_iov_16772 blk_rq_map_user_iov 5 16772 NULL ++i2o_parm_issue_16790 i2o_parm_issue 0 16790 NULL ++get_server_iovec_16804 get_server_iovec 2 16804 NULL ++drm_malloc_ab_16831 drm_malloc_ab 1-2 16831 NULL ++scsi_mode_sense_16835 scsi_mode_sense 5 16835 NULL ++hfsplus_min_io_size_16859 hfsplus_min_io_size 0 16859 NULL ++xfs_dialloc_ag_16868 xfs_dialloc_ag 0 16868 NULL ++alloc_idx_lebs_16872 alloc_idx_lebs 2 16872 NULL nohasharray ++xfs_iget_16872 xfs_iget 0 16872 &alloc_idx_lebs_16872 ++carl9170_debugfs_ampdu_state_read_16873 carl9170_debugfs_ampdu_state_read 3 16873 NULL ++st_write_16874 st_write 3 16874 NULL ++__kfifo_peek_n_16877 __kfifo_peek_n 0 16877 NULL ++transport_init_session_tags_16878 transport_init_session_tags 1-2 16878 NULL ++snd_gf1_mem_proc_dump_16926 snd_gf1_mem_proc_dump 5 16926 NULL nohasharray ++psb_unlocked_ioctl_16926 psb_unlocked_ioctl 2 16926 &snd_gf1_mem_proc_dump_16926 ++_sp2d_alloc_16944 _sp2d_alloc 1-2-3 16944 NULL ++squashfs_read_table_16945 squashfs_read_table 3 16945 NULL ++keyctl_instantiate_key_iov_16969 keyctl_instantiate_key_iov 3 16969 NULL ++ocfs2_read_quota_phys_block_16990 ocfs2_read_quota_phys_block 0 16990 NULL ++ceph_read_dir_17005 ceph_read_dir 3 17005 NULL ++copy_counters_to_user_17027 copy_counters_to_user 5 17027 NULL ++jffs2_trusted_setxattr_17048 jffs2_trusted_setxattr 4 17048 NULL ++__arch_hweight32_17060 __arch_hweight32 0 17060 NULL ++sddr55_read_data_17072 sddr55_read_data 4 17072 NULL ++dvb_dvr_read_17073 dvb_dvr_read 3 17073 NULL ++simple_transaction_read_17076 simple_transaction_read 3 17076 NULL ++carl9170_debugfs_mem_usage_read_17084 carl9170_debugfs_mem_usage_read 3 17084 NULL ++entry_length_17093 entry_length 0 17093 NULL ++ocfs2_get_refcount_cpos_end_17113 ocfs2_get_refcount_cpos_end 0 17113 NULL ++write_mem_17114 write_mem 3 17114 NULL ++pvr2_hdw_state_report_17121 pvr2_hdw_state_report 3 17121 NULL ++nouveau_instobj_create__17144 nouveau_instobj_create_ 4 17144 NULL ++jumpshot_write_data_17151 jumpshot_write_data 4 17151 NULL ++sep_read_17161 sep_read 3 17161 NULL ++befs_nls2utf_17163 befs_nls2utf 3 17163 NULL ++tx_tx_start_templates_read_17164 tx_tx_start_templates_read 3 17164 NULL ++UniStrnlen_17169 UniStrnlen 0 17169 NULL ++access_remote_vm_17189 access_remote_vm 0 17189 NULL nohasharray ++iwl_dbgfs_txfifo_flush_write_17189 iwl_dbgfs_txfifo_flush_write 3 17189 &access_remote_vm_17189 nohasharray ++ocfs2_flock_handle_signal_17189 ocfs2_flock_handle_signal 0 17189 &iwl_dbgfs_txfifo_flush_write_17189 ++iscsit_find_cmd_from_itt_or_dump_17194 iscsit_find_cmd_from_itt_or_dump 3 17194 NULL nohasharray ++driver_state_read_17194 driver_state_read 3 17194 &iscsit_find_cmd_from_itt_or_dump_17194 ++sync_request_17208 sync_request 2 17208 NULL ++dn_recvmsg_17213 dn_recvmsg 4 17213 NULL ++lprocfs_read_frac_helper_17261 lprocfs_read_frac_helper 0 17261 NULL ++error_error_frame_cts_nul_flid_read_17262 error_error_frame_cts_nul_flid_read 3 17262 NULL ++alloc_ep_17269 alloc_ep 1 17269 NULL ++pg_read_17276 pg_read 3 17276 NULL ++raw_recvmsg_17277 raw_recvmsg 4 17277 NULL ++hmac_sha256_17278 hmac_sha256 2 17278 NULL ++neigh_hash_grow_17283 neigh_hash_grow 2 17283 NULL ++minstrel_stats_read_17290 minstrel_stats_read 3 17290 NULL ++ieee80211_if_fmt_dot11MeshForwarding_17301 ieee80211_if_fmt_dot11MeshForwarding 3 17301 NULL ++mb_cache_create_17307 mb_cache_create 2 17307 NULL ++gnttab_map_frames_v2_17314 gnttab_map_frames_v2 2 17314 NULL ++ieee80211_if_read_dot11MeshHWMPperrMinInterval_17346 ieee80211_if_read_dot11MeshHWMPperrMinInterval 3 17346 NULL ++ath6kl_wmi_send_mgmt_cmd_17347 ath6kl_wmi_send_mgmt_cmd 7 17347 NULL ++mdc_import_seq_write_17409 mdc_import_seq_write 3 17409 NULL ++lpfc_debugfs_dif_err_write_17424 lpfc_debugfs_dif_err_write 3 17424 NULL ++compat_sys_ppoll_17430 compat_sys_ppoll 2 17430 NULL ++sta_connected_time_read_17435 sta_connected_time_read 3 17435 NULL ++libcfs_ipif_enumerate_17445 libcfs_ipif_enumerate 0 17445 NULL ++xfs_btree_lshift_17448 xfs_btree_lshift 0 17448 NULL ++nla_get_u32_17455 nla_get_u32 0 17455 NULL ++__ref_totlen_17461 __ref_totlen 0 17461 NULL ++probe_kernel_write_17481 probe_kernel_write 3 17481 NULL ++TSS_rawhmac_17486 TSS_rawhmac 3 17486 NULL ++lbs_highrssi_write_17515 lbs_highrssi_write 3 17515 NULL ++qp_free_res_17541 qp_free_res 5 17541 NULL ++__copy_to_user_17551 __copy_to_user 3-0 17551 NULL ++copy_from_user_17559 copy_from_user 0-3 17559 NULL ++hash_netport4_expire_17573 hash_netport4_expire 4 17573 NULL ++acpi_ut_create_package_object_17594 acpi_ut_create_package_object 1 17594 NULL ++neigh_hash_alloc_17595 neigh_hash_alloc 1 17595 NULL ++osst_execute_17607 osst_execute 7-6 17607 NULL ++ieee80211_if_read_dot11MeshHWMPactivePathToRootTimeout_17618 ieee80211_if_read_dot11MeshHWMPactivePathToRootTimeout 3 17618 NULL ++dma_map_page_17628 dma_map_page 0 17628 NULL ++twl4030_set_gpio_direction_17645 twl4030_set_gpio_direction 1 17645 NULL ++SYSC_migrate_pages_17657 SYSC_migrate_pages 2 17657 NULL ++packet_setsockopt_17662 packet_setsockopt 5 17662 NULL ++pwr_enable_ps_read_17686 pwr_enable_ps_read 3 17686 NULL ++filemap_fdatawait_17688 filemap_fdatawait 0 17688 NULL ++venus_rename_17707 venus_rename 4-5 17707 NULL nohasharray ++__einj_error_trigger_17707 __einj_error_trigger 0 17707 &venus_rename_17707 ++exofs_read_lookup_dev_table_17733 exofs_read_lookup_dev_table 3 17733 NULL ++sctpprobe_read_17741 sctpprobe_read 3 17741 NULL ++dgap_do_fep_load_17765 dgap_do_fep_load 3 17765 NULL ++shrink_slab_node_17794 shrink_slab_node 3 17794 NULL ++gnet_stats_copy_app_17821 gnet_stats_copy_app 3 17821 NULL ++cipso_v4_gentag_rbm_17836 cipso_v4_gentag_rbm 0 17836 NULL ++em28xx_audio_ep_packet_size_17844 em28xx_audio_ep_packet_size 0 17844 NULL ++dm_stats_message_17863 dm_stats_message 5 17863 NULL ++sisusb_send_bulk_msg_17864 sisusb_send_bulk_msg 3 17864 NULL ++alloc_sja1000dev_17868 alloc_sja1000dev 1 17868 NULL ++virtio_cread32_17873 virtio_cread32 0 17873 NULL ++ray_cs_essid_proc_write_17875 ray_cs_essid_proc_write 3 17875 NULL ++orinoco_set_key_17878 orinoco_set_key 5-7 17878 NULL nohasharray ++i40e_align_l2obj_base_17878 i40e_align_l2obj_base 0-1 17878 &orinoco_set_key_17878 ++init_per_cpu_17880 init_per_cpu 1 17880 NULL ++ieee80211_if_fmt_dot11MeshMaxPeerLinks_17883 ieee80211_if_fmt_dot11MeshMaxPeerLinks 3 17883 NULL ++ieee80211_if_fmt_dot11MeshHWMPRootMode_17890 ieee80211_if_fmt_dot11MeshHWMPRootMode 3 17890 NULL ++xfs_buf_associate_memory_17915 xfs_buf_associate_memory 3 17915 NULL ++xfs_rtallocate_extent_near_17916 xfs_rtallocate_extent_near 0 17916 NULL ++scsi_bufflen_17933 scsi_bufflen 0 17933 NULL ++__mutex_lock_check_stamp_17947 __mutex_lock_check_stamp 0 17947 NULL ++beacon_interval_write_17952 beacon_interval_write 3 17952 NULL ++calc_nr_buckets_17976 calc_nr_buckets 0 17976 NULL ++ext4_ext_calc_credits_for_single_extent_17983 ext4_ext_calc_credits_for_single_extent 0-2 17983 NULL ++smk_write_cipso_17989 smk_write_cipso 3 17989 NULL ++gnttab_max_grant_frames_17993 gnttab_max_grant_frames 0 17993 NULL ++pvr2_v4l2_read_18006 pvr2_v4l2_read 3 18006 NULL ++alloc_rx_desc_ring_18016 alloc_rx_desc_ring 2 18016 NULL ++cpufreq_add_dev_symlink_18028 cpufreq_add_dev_symlink 0 18028 NULL ++o2hb_highest_node_18034 o2hb_highest_node 0 18034 NULL ++cryptd_alloc_instance_18048 cryptd_alloc_instance 2-3 18048 NULL ++__btrfs_drop_extents_18049 __btrfs_drop_extents 5 18049 NULL ++ddebug_proc_write_18055 ddebug_proc_write 3 18055 NULL ++lua_sysfs_read_18062 lua_sysfs_read 6 18062 NULL ++fpregs_get_18066 fpregs_get 4 18066 NULL ++kvm_read_guest_page_18074 kvm_read_guest_page 5 18074 NULL ++SYSC_pselect6_18076 SYSC_pselect6 1 18076 NULL ++SYSC_semtimedop_18091 SYSC_semtimedop 3 18091 NULL ++mpi_alloc_18094 mpi_alloc 1 18094 NULL ++hfs_direct_IO_18104 hfs_direct_IO 4 18104 NULL ++dfs_file_read_18116 dfs_file_read 3 18116 NULL ++svc_getnl_18120 svc_getnl 0 18120 NULL ++paging32_gpte_to_gfn_lvl_18131 paging32_gpte_to_gfn_lvl 0-2-1 18131 NULL ++xfs_zero_eof_18134 xfs_zero_eof 0 18134 NULL ++selinux_inode_setsecurity_18148 selinux_inode_setsecurity 4 18148 NULL ++pccard_store_cis_18176 pccard_store_cis 6 18176 NULL ++orinoco_add_extscan_result_18207 orinoco_add_extscan_result 3 18207 NULL ++gsm_control_message_18209 gsm_control_message 4 18209 NULL ++read_rindex_entry_18213 read_rindex_entry 0 18213 NULL ++do_ipv6_setsockopt_18215 do_ipv6_setsockopt 5 18215 NULL ++gnttab_alloc_grant_references_18240 gnttab_alloc_grant_references 1 18240 NULL ++alloc_trace_uprobe_18247 alloc_trace_uprobe 3 18247 NULL ++rfcomm_sock_setsockopt_18254 rfcomm_sock_setsockopt 5 18254 NULL ++qdisc_class_hash_alloc_18262 qdisc_class_hash_alloc 1 18262 NULL ++gfs2_alloc_sort_buffer_18275 gfs2_alloc_sort_buffer 1 18275 NULL ++alloc_ring_18278 alloc_ring 2-4 18278 NULL ++bio_phys_segments_18281 bio_phys_segments 0 18281 NULL nohasharray ++nouveau_subdev_create__18281 nouveau_subdev_create_ 7 18281 &bio_phys_segments_18281 ++ext4_readpages_18283 ext4_readpages 4 18283 NULL ++mmc_send_bus_test_18285 mmc_send_bus_test 4 18285 NULL ++um_idi_write_18293 um_idi_write 3 18293 NULL ++nouveau_disp_create__18305 nouveau_disp_create_ 4-7 18305 NULL ++vga_r_18310 vga_r 0 18310 NULL ++class_add_profile_18315 class_add_profile 1-3-5 18315 NULL ++csio_mem_read_18319 csio_mem_read 3 18319 NULL ++alloc_and_copy_string_18321 alloc_and_copy_string 2 18321 NULL ++ecryptfs_send_message_18322 ecryptfs_send_message 2 18322 NULL ++bio_integrity_advance_18324 bio_integrity_advance 2 18324 NULL ++lcd_proc_write_18351 lcd_proc_write 3 18351 NULL ++pwr_power_save_off_read_18355 pwr_power_save_off_read 3 18355 NULL ++SyS_process_vm_readv_18366 SyS_process_vm_readv 3-5 18366 NULL ++ep_io_18367 ep_io 0 18367 NULL ++qib_user_sdma_num_pages_18371 qib_user_sdma_num_pages 0 18371 NULL ++__ceph_getxattr_18386 __ceph_getxattr 0 18386 NULL ++ci_role_write_18388 ci_role_write 3 18388 NULL ++hdlc_empty_fifo_18397 hdlc_empty_fifo 2 18397 NULL ++adis16136_show_serial_18402 adis16136_show_serial 3 18402 NULL ++crystalhd_user_data_18407 crystalhd_user_data 3 18407 NULL ++memblock_virt_alloc_node_nopanic_18431 memblock_virt_alloc_node_nopanic 1 18431 NULL ++iscsi_create_flashnode_sess_18433 iscsi_create_flashnode_sess 4 18433 NULL ++snd_hda_get_connections_18437 snd_hda_get_connections 0 18437 NULL ++flash_dev_cache_miss_18454 flash_dev_cache_miss 4 18454 NULL ++fuse_perform_write_18457 fuse_perform_write 4 18457 NULL ++regset_tls_set_18459 regset_tls_set 4 18459 NULL ++pci_vpd_lrdt_size_18479 pci_vpd_lrdt_size 0 18479 NULL nohasharray ++mite_bytes_in_transit_18479 mite_bytes_in_transit 0 18479 &pci_vpd_lrdt_size_18479 ++udpv6_setsockopt_18487 udpv6_setsockopt 5 18487 NULL ++btrfs_fiemap_18501 btrfs_fiemap 3 18501 NULL ++__copy_user_zeroing_intel_18510 __copy_user_zeroing_intel 0-3 18510 NULL ++__block_write_begin_18511 __block_write_begin 0 18511 NULL ++snd_vx_inb_18514 snd_vx_inb 0 18514 NULL ++snd_gus_dram_poke_18525 snd_gus_dram_poke 4 18525 NULL ++nouveau_fifo_channel_create__18530 nouveau_fifo_channel_create_ 9 18530 NULL ++seq_copy_in_user_18543 seq_copy_in_user 3 18543 NULL ++sas_change_queue_depth_18555 sas_change_queue_depth 2 18555 NULL ++smk_write_rules_list_18565 smk_write_rules_list 3 18565 NULL ++debug_output_18575 debug_output 3 18575 NULL ++xfs_btree_read_bufl_18597 xfs_btree_read_bufl 0 18597 NULL ++filemap_fdatawait_range_18600 filemap_fdatawait_range 0 18600 NULL ++iowarrior_write_18604 iowarrior_write 3 18604 NULL ++nvc0_ram_create__18624 nvc0_ram_create_ 4 18624 NULL ++from_buffer_18625 from_buffer 3 18625 NULL ++snd_pcm_oss_write3_18657 snd_pcm_oss_write3 0-3 18657 NULL ++ieee80211_if_fmt_rssi_threshold_18664 ieee80211_if_fmt_rssi_threshold 3 18664 NULL ++xfs_iext_insert_18667 xfs_iext_insert 3 18667 NULL ++fnic_stats_debugfs_read_18688 fnic_stats_debugfs_read 3 18688 NULL ++echo_client_prep_commit_18693 echo_client_prep_commit 8 18693 NULL ++iwl_dbgfs_rx_handlers_read_18708 iwl_dbgfs_rx_handlers_read 3 18708 NULL ++ceph_alloc_page_vector_18710 ceph_alloc_page_vector 1 18710 NULL ++blk_rq_bytes_18715 blk_rq_bytes 0 18715 NULL ++snd_als4k_gcr_read_addr_18741 snd_als4k_gcr_read_addr 0 18741 NULL ++o2hb_debug_create_18744 o2hb_debug_create 4 18744 NULL ++__erst_read_to_erange_from_nvram_18748 __erst_read_to_erange_from_nvram 0 18748 NULL ++wep_packets_read_18751 wep_packets_read 3 18751 NULL ++read_file_dump_nfcal_18766 read_file_dump_nfcal 3 18766 NULL ++ffs_epfile_read_18775 ffs_epfile_read 3 18775 NULL ++SyS_lsetxattr_18776 SyS_lsetxattr 4 18776 NULL ++alloc_fcdev_18780 alloc_fcdev 1 18780 NULL ++dm_stats_print_18815 dm_stats_print 7 18815 NULL ++sys_modify_ldt_18824 sys_modify_ldt 3 18824 NULL ++mtf_test_write_18844 mtf_test_write 3 18844 NULL ++sctp_setsockopt_events_18862 sctp_setsockopt_events 3 18862 NULL ++ieee80211_if_read_element_ttl_18869 ieee80211_if_read_element_ttl 3 18869 NULL ++xlog_find_verify_log_record_18870 xlog_find_verify_log_record 2 18870 NULL ++ceph_setxattr_18913 ceph_setxattr 4 18913 NULL ++ieee80211_rx_mgmt_disassoc_18927 ieee80211_rx_mgmt_disassoc 3 18927 NULL ++snapshot_write_next_18937 snapshot_write_next 0 18937 NULL ++clean_journal_18955 clean_journal 0 18955 NULL ++__nla_reserve_18974 __nla_reserve 3 18974 NULL ++__blockdev_direct_IO_18977 __blockdev_direct_IO 0-6 18977 NULL ++layout_in_gaps_19006 layout_in_gaps 2 19006 NULL ++huge_page_size_19008 huge_page_size 0 19008 NULL ++hash_netport6_expire_19013 hash_netport6_expire 4 19013 NULL ++sysfs_create_dir_ns_19033 sysfs_create_dir_ns 0 19033 NULL ++revalidate_19043 revalidate 2 19043 NULL ++afs_vnode_store_data_19048 afs_vnode_store_data 2-3-4-5 19048 NULL ++osc_pinger_recov_seq_write_19056 osc_pinger_recov_seq_write 3 19056 NULL ++get_log_header_19063 get_log_header 0 19063 NULL ++create_gpadl_header_19064 create_gpadl_header 2 19064 NULL ++ceph_create_snap_context_19082 ceph_create_snap_context 1 19082 NULL ++sta_last_seq_ctrl_read_19106 sta_last_seq_ctrl_read 3 19106 NULL ++cifs_readv_from_socket_19109 cifs_readv_from_socket 3 19109 NULL ++snd_als4k_iobase_readl_19136 snd_als4k_iobase_readl 0 19136 NULL ++alloc_irdadev_19140 alloc_irdadev 1 19140 NULL ++sleep_auth_read_19159 sleep_auth_read 3 19159 NULL ++smk_write_access2_19170 smk_write_access2 3 19170 NULL ++iwl_dbgfs_reply_tx_error_read_19205 iwl_dbgfs_reply_tx_error_read 3 19205 NULL ++vmw_unlocked_ioctl_19212 vmw_unlocked_ioctl 2 19212 NULL ++__copy_to_user_inatomic_19214 __copy_to_user_inatomic 3-0 19214 NULL ++dev_counters_read_19216 dev_counters_read 3 19216 NULL ++wbcir_tx_19219 wbcir_tx 3 19219 NULL ++snd_mask_max_19224 snd_mask_max 0 19224 NULL ++bio_alloc_mddev_19238 bio_alloc_mddev 2 19238 NULL ++ucma_query_19260 ucma_query 4 19260 NULL ++il_dbgfs_rxon_filter_flags_read_19281 il_dbgfs_rxon_filter_flags_read 3 19281 NULL ++batadv_tt_save_orig_buffer_19288 batadv_tt_save_orig_buffer 4 19288 NULL nohasharray ++cfg80211_rx_unprot_mlme_mgmt_19288 cfg80211_rx_unprot_mlme_mgmt 3 19288 &batadv_tt_save_orig_buffer_19288 ++qc_capture_19298 qc_capture 3 19298 NULL ++ocfs2_prepare_inode_for_refcount_19303 ocfs2_prepare_inode_for_refcount 4-3 19303 NULL ++event_tx_stuck_read_19305 event_tx_stuck_read 3 19305 NULL ++debug_read_19322 debug_read 3 19322 NULL ++lbs_host_sleep_write_19332 lbs_host_sleep_write 3 19332 NULL nohasharray ++cfg80211_inform_bss_19332 cfg80211_inform_bss 8 19332 &lbs_host_sleep_write_19332 ++closure_sub_19359 closure_sub 2 19359 NULL ++firmware_data_write_19360 firmware_data_write 6-5 19360 NULL ++read_zero_19366 read_zero 3 19366 NULL ++interpret_user_input_19393 interpret_user_input 2 19393 NULL ++sync_fill_pt_info_19397 sync_fill_pt_info 0 19397 NULL ++pep_recvmsg_19402 pep_recvmsg 4 19402 NULL ++dvbdmx_write_19423 dvbdmx_write 3 19423 NULL ++SyS_sched_getaffinity_19444 SyS_sched_getaffinity 2 19444 NULL ++xfrm_alg_auth_len_19454 xfrm_alg_auth_len 0 19454 NULL ++gnet_stats_copy_19458 gnet_stats_copy 4 19458 NULL ++gp2ap020a00f_get_thresh_reg_19468 gp2ap020a00f_get_thresh_reg 0 19468 NULL ++sky2_read16_19475 sky2_read16 0 19475 NULL ++__read_status_pciv2_19492 __read_status_pciv2 0 19492 NULL ++kstrtoll_from_user_19500 kstrtoll_from_user 2 19500 NULL ++ext4_add_new_descs_19509 ext4_add_new_descs 3 19509 NULL ++batadv_tvlv_container_register_19520 batadv_tvlv_container_register 5 19520 NULL ++apei_exec_pre_map_gars_19529 apei_exec_pre_map_gars 0 19529 NULL nohasharray ++cfc_write_array_to_buffer_19529 cfc_write_array_to_buffer 3 19529 &apei_exec_pre_map_gars_19529 ++nfc_llcp_build_tlv_19536 nfc_llcp_build_tlv 3 19536 NULL ++gfn_to_index_19558 gfn_to_index 0-1-3-2 19558 NULL ++ocfs2_control_message_19564 ocfs2_control_message 3 19564 NULL ++nfsd_read_19568 nfsd_read 5 19568 NULL ++ieee80211_key_alloc_19575 ieee80211_key_alloc 3 19575 NULL ++mnt_want_write_file_19579 mnt_want_write_file 0 19579 NULL ++bm_status_read_19583 bm_status_read 3 19583 NULL ++load_xattr_datum_19594 load_xattr_datum 0 19594 NULL ++fallocate_chunk_19610 fallocate_chunk 0 19610 NULL ++__mei_cl_recv_19636 __mei_cl_recv 3 19636 NULL ++LoadBitmap_19658 LoadBitmap 2 19658 NULL ++memblock_virt_alloc_low_nopanic_19714 memblock_virt_alloc_low_nopanic 1 19714 NULL ++ocfs2_control_get_this_node_19721 ocfs2_control_get_this_node 0 19721 NULL ++read_reg_19723 read_reg 0 19723 NULL ++wm8350_block_write_19727 wm8350_block_write 2-3 19727 NULL ++memcpy_toiovecend_19736 memcpy_toiovecend 4-3 19736 NULL ++snd_es1968_get_dma_ptr_19747 snd_es1968_get_dma_ptr 0 19747 NULL ++p9_client_read_19750 p9_client_read 5-0 19750 NULL ++pnpbios_proc_write_19758 pnpbios_proc_write 3 19758 NULL ++ocfs2_readpages_19759 ocfs2_readpages 4 19759 NULL ++jffs2_acl_from_medium_19762 jffs2_acl_from_medium 2 19762 NULL ++readhscx_19769 readhscx 0 19769 NULL ++irda_setsockopt_19824 irda_setsockopt 5 19824 NULL ++xfs_bmap_add_extent_hole_real_19828 xfs_bmap_add_extent_hole_real 0 19828 NULL ++vfs_getxattr_19832 vfs_getxattr 0 19832 NULL ++crypt_alloc_buffer_19846 crypt_alloc_buffer 2 19846 NULL ++cfg80211_mlme_register_mgmt_19852 cfg80211_mlme_register_mgmt 5 19852 NULL ++__nla_put_19857 __nla_put 3 19857 NULL ++mrp_request_join_19882 mrp_request_join 4 19882 NULL ++aes_decrypt_interrupt_read_19910 aes_decrypt_interrupt_read 3 19910 NULL ++ps_upsd_max_apturn_read_19918 ps_upsd_max_apturn_read 3 19918 NULL ++mangle_name_19923 mangle_name 0 19923 NULL ++cgroup_task_count_19930 cgroup_task_count 0 19930 NULL ++guest_read_tsc_19931 guest_read_tsc 0 19931 NULL ++iwl_dbgfs_rx_queue_read_19943 iwl_dbgfs_rx_queue_read 3 19943 NULL ++cfg80211_rx_assoc_resp_19944 cfg80211_rx_assoc_resp 4 19944 NULL ++ll_xattr_cache_list_19954 ll_xattr_cache_list 0 19954 NULL ++get_jack_mode_name_19976 get_jack_mode_name 4 19976 NULL ++attach_hdlc_protocol_19986 attach_hdlc_protocol 3 19986 NULL ++rtw_set_wps_probe_resp_19989 rtw_set_wps_probe_resp 3 19989 NULL ++diva_um_idi_read_20003 diva_um_idi_read 0 20003 NULL ++event_trigger_write_20009 event_trigger_write 3 20009 NULL nohasharray ++lov_stripe_md_size_20009 lov_stripe_md_size 0-1 20009 &event_trigger_write_20009 ++tree_mod_log_eb_move_20011 tree_mod_log_eb_move 5 20011 NULL ++SYSC_fgetxattr_20027 SYSC_fgetxattr 4 20027 NULL ++split_scan_timeout_read_20029 split_scan_timeout_read 3 20029 NULL ++alloc_ieee80211_20063 alloc_ieee80211 1 20063 NULL ++iwl_mvm_power_mac_dbgfs_read_20067 iwl_mvm_power_mac_dbgfs_read 4 20067 NULL ++target_message_20072 target_message 2 20072 NULL ++rawv6_sendmsg_20080 rawv6_sendmsg 4 20080 NULL ++fuse_conn_limit_read_20084 fuse_conn_limit_read 3 20084 NULL ++aat2870_reg_write_file_20086 aat2870_reg_write_file 3 20086 NULL ++team_options_register_20091 team_options_register 3 20091 NULL ++qla2x00_adjust_sdev_qdepth_up_20097 qla2x00_adjust_sdev_qdepth_up 2 20097 NULL ++xfs_qm_dqget_20103 xfs_qm_dqget 0 20103 NULL ++root_nfs_copy_20111 root_nfs_copy 3 20111 NULL ++hptiop_adjust_disk_queue_depth_20122 hptiop_adjust_disk_queue_depth 2 20122 NULL ++tomoyo_commit_ok_20167 tomoyo_commit_ok 2 20167 NULL ++read_flush_pipefs_20171 read_flush_pipefs 3 20171 NULL ++wep_addr_key_count_read_20174 wep_addr_key_count_read 3 20174 NULL ++crystalhd_map_dio_20181 crystalhd_map_dio 3 20181 NULL ++pvr2_ctrl_value_to_sym_20229 pvr2_ctrl_value_to_sym 5 20229 NULL ++rose_sendmsg_20249 rose_sendmsg 4 20249 NULL ++tm6000_i2c_send_regs_20250 tm6000_i2c_send_regs 5 20250 NULL ++btrfs_header_nritems_20296 btrfs_header_nritems 0 20296 NULL ++r10_sync_page_io_20307 r10_sync_page_io 3 20307 NULL ++dm_get_reserved_bio_based_ios_20315 dm_get_reserved_bio_based_ios 0 20315 NULL ++tx_tx_burst_programmed_read_20320 tx_tx_burst_programmed_read 3 20320 NULL ++vx_send_msg_nolock_20322 vx_send_msg_nolock 0 20322 NULL ++snd_cs4281_BA1_read_20323 snd_cs4281_BA1_read 5 20323 NULL ++gfs2_glock_nq_m_20347 gfs2_glock_nq_m 1 20347 NULL ++handle_arr_calc_size_20355 handle_arr_calc_size 0-1 20355 NULL ++smk_set_cipso_20379 smk_set_cipso 3 20379 NULL ++snd_nm256_readl_20394 snd_nm256_readl 0 20394 NULL nohasharray ++read_7220_creg32_20394 read_7220_creg32 0 20394 &snd_nm256_readl_20394 ++__kfifo_from_user_20399 __kfifo_from_user 3 20399 NULL nohasharray ++SyS_get_mempolicy_20399 SyS_get_mempolicy 3 20399 &__kfifo_from_user_20399 ++compat_ipv6_setsockopt_20468 compat_ipv6_setsockopt 5 20468 NULL ++read_buf_20469 read_buf 2 20469 NULL ++bio_trim_20472 bio_trim 2 20472 NULL ++btrfs_get_32_20476 btrfs_get_32 0 20476 NULL ++xfs_iext_realloc_direct_20521 xfs_iext_realloc_direct 2 20521 NULL ++drbd_bm_resize_20522 drbd_bm_resize 2 20522 NULL ++amd_create_gatt_pages_20537 amd_create_gatt_pages 1 20537 NULL ++scsi_report_opcode_20551 scsi_report_opcode 3 20551 NULL ++venus_create_20555 venus_create 4 20555 NULL ++btrfs_super_log_root_20565 btrfs_super_log_root 0 20565 NULL ++crypto_ahash_reqsize_20569 crypto_ahash_reqsize 0 20569 NULL ++xfs_bmap_isaeof_20570 xfs_bmap_isaeof 0 20570 NULL ++ocfs2_cluster_lock_20588 ocfs2_cluster_lock 0 20588 NULL ++kvm_test_age_hva_20593 kvm_test_age_hva 2 20593 NULL ++sync_timeline_create_20601 sync_timeline_create 2 20601 NULL ++lirc_write_20604 lirc_write 3 20604 NULL ++qib_qsfp_write_20614 qib_qsfp_write 0-2-4 20614 NULL ++snd_pcm_oss_prepare_20641 snd_pcm_oss_prepare 0 20641 NULL ++get_extent_skip_holes_20642 get_extent_skip_holes 2 20642 NULL ++kfifo_copy_to_user_20646 kfifo_copy_to_user 3-4 20646 NULL ++cpulist_scnprintf_20648 cpulist_scnprintf 2-0 20648 NULL ++oz_add_farewell_20652 oz_add_farewell 5 20652 NULL ++oz_cdev_read_20659 oz_cdev_read 3 20659 NULL ++snd_hdsp_playback_copy_20676 snd_hdsp_playback_copy 5 20676 NULL nohasharray ++btrfs_qgroup_reserve_20676 btrfs_qgroup_reserve 0 20676 &snd_hdsp_playback_copy_20676 ++dvb_dmxdev_buffer_read_20682 dvb_dmxdev_buffer_read 0-4 20682 NULL ++cpumask_size_20683 cpumask_size 0 20683 NULL ++btrfs_node_blockptr_20685 btrfs_node_blockptr 0 20685 NULL ++read_file_tgt_int_stats_20697 read_file_tgt_int_stats 3 20697 NULL ++__maestro_read_20700 __maestro_read 0 20700 NULL ++cipso_v4_gentag_rng_20703 cipso_v4_gentag_rng 0 20703 NULL ++pcpu_page_first_chunk_20712 pcpu_page_first_chunk 1 20712 NULL ++ocfs2_read_xattr_bucket_20722 ocfs2_read_xattr_bucket 0 20722 NULL ++security_context_to_sid_force_20724 security_context_to_sid_force 2 20724 NULL ++fb_prepare_logo_20743 fb_prepare_logo 0 20743 NULL ++vol_cdev_direct_write_20751 vol_cdev_direct_write 3 20751 NULL ++ocfs2_align_bytes_to_clusters_20754 ocfs2_align_bytes_to_clusters 2 20754 NULL ++brcmf_p2p_escan_20763 brcmf_p2p_escan 2 20763 NULL nohasharray ++mnt_clone_write_20763 mnt_clone_write 0 20763 &brcmf_p2p_escan_20763 ++fb_alloc_cmap_gfp_20792 fb_alloc_cmap_gfp 2 20792 NULL ++iwl_dbgfs_rxon_flags_read_20795 iwl_dbgfs_rxon_flags_read 3 20795 NULL ++lowpan_write_20800 lowpan_write 3 20800 NULL ++strndup_user_20819 strndup_user 2 20819 NULL nohasharray ++do_glock_20819 do_glock 0 20819 &strndup_user_20819 ++tipc_msg_build_20825 tipc_msg_build 3 20825 NULL ++wl1271_format_buffer_20834 wl1271_format_buffer 2 20834 NULL ++uvc_alloc_entity_20836 uvc_alloc_entity 4-3 20836 NULL ++p9_tag_alloc_20845 p9_tag_alloc 3 20845 NULL ++nvme_trans_supported_vpd_pages_20847 nvme_trans_supported_vpd_pages 4 20847 NULL ++get_name_20855 get_name 4 20855 NULL ++iwl_dbgfs_pm_params_read_20866 iwl_dbgfs_pm_params_read 3 20866 NULL ++snd_pcm_capture_avail_20867 snd_pcm_capture_avail 0 20867 NULL ++srq_free_res_20868 srq_free_res 5 20868 NULL ++cfs_cpt_table_create_20884 cfs_cpt_table_create 1 20884 NULL ++rb_simple_write_20890 rb_simple_write 3 20890 NULL ++sisusb_send_packet_20891 sisusb_send_packet 2 20891 NULL ++key_icverrors_read_20895 key_icverrors_read 3 20895 NULL ++vfio_msi_enable_20906 vfio_msi_enable 2 20906 NULL ++lbs_rdbbp_write_20918 lbs_rdbbp_write 3 20918 NULL ++htable_bits_20933 htable_bits 0 20933 NULL ++altera_set_ir_post_20948 altera_set_ir_post 2 20948 NULL ++rx_rx_phy_hdr_read_20950 rx_rx_phy_hdr_read 3 20950 NULL ++rsxx_cram_read_20957 rsxx_cram_read 3 20957 NULL ++nfs_map_name_to_uid_20962 nfs_map_name_to_uid 3 20962 NULL ++snd_rme9652_playback_copy_20970 snd_rme9652_playback_copy 5 20970 NULL ++alg_setsockopt_20985 alg_setsockopt 5 20985 NULL ++qib_verbs_send_20999 qib_verbs_send 5-3 20999 NULL ++btrfs_dirty_pages_21019 btrfs_dirty_pages 0 21019 NULL ++btrfs_inode_ref_name_len_21024 btrfs_inode_ref_name_len 0 21024 NULL ++xfs_btree_new_root_21028 xfs_btree_new_root 0 21028 NULL ++rx_defrag_tkip_called_read_21031 rx_defrag_tkip_called_read 3 21031 NULL ++srp_change_queue_depth_21038 srp_change_queue_depth 2 21038 NULL ++lbs_threshold_read_21046 lbs_threshold_read 5 21046 NULL ++reiserfs_direct_IO_21051 reiserfs_direct_IO 4 21051 NULL ++proc_fault_inject_write_21058 proc_fault_inject_write 3 21058 NULL ++qdisc_get_default_21072 qdisc_get_default 2 21072 NULL ++event_calibration_read_21083 event_calibration_read 3 21083 NULL ++bl_add_page_to_bio_21094 bl_add_page_to_bio 2 21094 NULL nohasharray ++multipath_status_21094 multipath_status 5 21094 &bl_add_page_to_bio_21094 ++rate_control_pid_events_read_21099 rate_control_pid_events_read 3 21099 NULL ++ath6kl_send_go_probe_resp_21113 ath6kl_send_go_probe_resp 3 21113 NULL ++_efx_mcdi_rpc_async_21119 _efx_mcdi_rpc_async 4-5 21119 NULL ++i2400m_rx_trace_21127 i2400m_rx_trace 3 21127 NULL ++cx18_v4l2_read_21196 cx18_v4l2_read 3 21196 NULL ++ipc_rcu_alloc_21208 ipc_rcu_alloc 1 21208 NULL ++scsi_execute_req_flags_21215 scsi_execute_req_flags 5 21215 NULL ++get_numpages_21227 get_numpages 0-1-2 21227 NULL ++input_ff_create_21240 input_ff_create 2 21240 NULL ++cfg80211_notify_new_peer_candidate_21242 cfg80211_notify_new_peer_candidate 4 21242 NULL ++use_debug_keys_read_21251 use_debug_keys_read 3 21251 NULL ++fru_length_21257 fru_length 0 21257 NULL ++rtw_set_wps_beacon_21262 rtw_set_wps_beacon 3 21262 NULL ++xfs_alloc_ag_vextent_size_21276 xfs_alloc_ag_vextent_size 0 21276 NULL ++do_msg_fill_21307 do_msg_fill 3 21307 NULL ++add_res_range_21310 add_res_range 4 21310 NULL ++get_zeroed_page_21322 get_zeroed_page 0 21322 NULL ++ftrace_profile_read_21327 ftrace_profile_read 3 21327 NULL ++read_file_bool_bmps_21344 read_file_bool_bmps 3 21344 NULL ++gfs2_ea_get_copy_21353 gfs2_ea_get_copy 0 21353 NULL ++alloc_orinocodev_21371 alloc_orinocodev 1 21371 NULL ++SYSC_rt_sigpending_21379 SYSC_rt_sigpending 2 21379 NULL ++video_ioctl2_21380 video_ioctl2 2 21380 NULL ++insert_ptr_21386 insert_ptr 6 21386 NULL ++diva_get_driver_dbg_mask_21399 diva_get_driver_dbg_mask 0 21399 NULL ++__clone_and_map_simple_bio_21404 __clone_and_map_simple_bio 4 21404 NULL ++snd_m3_inw_21406 snd_m3_inw 0 21406 NULL ++usnic_ib_dump_vf_hdr_21423 usnic_ib_dump_vf_hdr 3 21423 NULL ++snapshot_read_next_21426 snapshot_read_next 0 21426 NULL ++tcp_bound_to_half_wnd_21429 tcp_bound_to_half_wnd 0-2 21429 NULL ++tracing_saved_cmdlines_read_21434 tracing_saved_cmdlines_read 3 21434 NULL ++aggr_size_tx_agg_vs_rate_read_21438 aggr_size_tx_agg_vs_rate_read 3 21438 NULL ++__ertm_hdr_size_21450 __ertm_hdr_size 0 21450 NULL ++ReadISAR_21453 ReadISAR 0 21453 NULL ++mei_nfc_send_21477 mei_nfc_send 3 21477 NULL ++read_file_xmit_21487 read_file_xmit 3 21487 NULL ++mmc_alloc_sg_21504 mmc_alloc_sg 1 21504 NULL ++btrfs_file_aio_write_21520 btrfs_file_aio_write 4 21520 NULL ++il_dbgfs_stations_read_21532 il_dbgfs_stations_read 3 21532 NULL ++cipso_v4_map_cat_enum_hton_21540 cipso_v4_map_cat_enum_hton 0 21540 NULL ++rxrpc_send_data_21553 rxrpc_send_data 5 21553 NULL ++rx_rx_beacon_early_term_read_21559 rx_rx_beacon_early_term_read 3 21559 NULL ++xfs_buf_read_uncached_21585 xfs_buf_read_uncached 3 21585 NULL ++snd_es18xx_mixer_read_21586 snd_es18xx_mixer_read 0 21586 NULL ++ocfs2_acl_from_xattr_21604 ocfs2_acl_from_xattr 2 21604 NULL ++filemap_get_page_21606 filemap_get_page 2 21606 NULL ++gfs2_glock_nq_init_21624 gfs2_glock_nq_init 0 21624 NULL ++__jfs_getxattr_21631 __jfs_getxattr 0 21631 NULL ++atalk_sendmsg_21677 atalk_sendmsg 4 21677 NULL ++ocfs2_xattr_get_nolock_21678 ocfs2_xattr_get_nolock 0 21678 NULL ++rtllib_alloc_txb_21687 rtllib_alloc_txb 1 21687 NULL ++evdev_ioctl_handler_21705 evdev_ioctl_handler 2 21705 NULL ++xfs_btree_insert_21712 xfs_btree_insert 0 21712 NULL ++update_time_21719 update_time 0 21719 NULL ++unix_skb_len_21722 unix_skb_len 0 21722 NULL ++lprocfs_wr_import_21728 lprocfs_wr_import 3 21728 NULL ++mthca_alloc_init_21754 mthca_alloc_init 2 21754 NULL ++usbat_flash_read_data_21762 usbat_flash_read_data 4 21762 NULL ++gen_pool_add_21776 gen_pool_add 3 21776 NULL ++xfs_da_grow_inode_int_21785 xfs_da_grow_inode_int 3 21785 NULL ++dvb_generic_ioctl_21810 dvb_generic_ioctl 2 21810 NULL ++__ocfs2_cluster_lock_21812 __ocfs2_cluster_lock 0 21812 NULL ++_iwl_dbgfs_sta_drain_write_21837 _iwl_dbgfs_sta_drain_write 3 21837 NULL ++oom_adj_read_21847 oom_adj_read 3 21847 NULL ++lpfc_idiag_extacc_avail_get_21865 lpfc_idiag_extacc_avail_get 0-3 21865 NULL ++brcms_debugfs_hardware_read_21867 brcms_debugfs_hardware_read 3 21867 NULL ++sisusbcon_bmove_21873 sisusbcon_bmove 6-5-7 21873 NULL ++ldlm_lock_create_21888 ldlm_lock_create 7 21888 NULL ++dbAllocCtl_21911 dbAllocCtl 0 21911 NULL ++qsfp_1_read_21915 qsfp_1_read 3 21915 NULL ++__build_xattrs_21979 __build_xattrs 0 21979 NULL ++SYSC_prctl_21980 SYSC_prctl 4 21980 NULL ++compat_rw_copy_check_uvector_22001 compat_rw_copy_check_uvector 0-3 22001 NULL nohasharray ++rxpipe_descr_host_int_trig_rx_data_read_22001 rxpipe_descr_host_int_trig_rx_data_read 3 22001 &compat_rw_copy_check_uvector_22001 ++regcache_sync_block_raw_flush_22021 regcache_sync_block_raw_flush 3-4 22021 NULL ++btrfs_get_16_22023 btrfs_get_16 0 22023 NULL ++_sp2d_min_pg_22032 _sp2d_min_pg 0 22032 NULL ++lookup_metapath_22039 lookup_metapath 0 22039 NULL ++zd_usb_read_fw_22049 zd_usb_read_fw 4 22049 NULL ++ieee80211_if_fmt_dropped_frames_ttl_22054 ieee80211_if_fmt_dropped_frames_ttl 3 22054 NULL ++btrfs_reloc_clone_csums_22077 btrfs_reloc_clone_csums 2-3 22077 NULL ++mem_rw_22085 mem_rw 3 22085 NULL ++xfs_bmapi_reserve_delalloc_22086 xfs_bmapi_reserve_delalloc 0 22086 NULL ++kstrtos32_from_user_22087 kstrtos32_from_user 2 22087 NULL ++rt2x00debug_read_crypto_stats_22109 rt2x00debug_read_crypto_stats 3 22109 NULL ++shmem_add_to_page_cache_22121 shmem_add_to_page_cache 0 22121 NULL ++snd_hda_codec_read_22130 snd_hda_codec_read 0 22130 NULL ++SyS_sched_setaffinity_22148 SyS_sched_setaffinity 2 22148 NULL ++do_tcp_sendpages_22155 do_tcp_sendpages 4 22155 NULL ++__kfifo_alloc_22173 __kfifo_alloc 3 22173 NULL ++rfcomm_sock_recvmsg_22227 rfcomm_sock_recvmsg 4 22227 NULL ++mem_write_22232 mem_write 3 22232 NULL ++p9_virtio_zc_request_22240 p9_virtio_zc_request 6-5 22240 NULL ++prepare_to_wait_event_22247 prepare_to_wait_event 0 22247 NULL ++compat_process_vm_rw_22254 compat_process_vm_rw 3-5 22254 NULL ++ping_common_sendmsg_22261 ping_common_sendmsg 5 22261 NULL ++add_res_tree_22263 add_res_tree 7 22263 NULL ++__btrfs_direct_write_22273 __btrfs_direct_write 4-0 22273 NULL ++queue_max_sectors_22280 queue_max_sectors 0 22280 NULL ++__tun_chr_ioctl_22300 __tun_chr_ioctl 4 22300 NULL nohasharray ++pci_vpd_srdt_size_22300 pci_vpd_srdt_size 0 22300 &__tun_chr_ioctl_22300 ++mesh_table_alloc_22305 mesh_table_alloc 1 22305 NULL ++lov_setstripe_22307 lov_setstripe 2 22307 NULL ++udpv6_sendmsg_22316 udpv6_sendmsg 4 22316 NULL ++atomic_read_22342 atomic_read 0 22342 NULL ++ll_lazystatfs_seq_write_22353 ll_lazystatfs_seq_write 3 22353 NULL ++snd_pcm_alsa_frames_22363 snd_pcm_alsa_frames 2 22363 NULL ++evdev_ioctl_22371 evdev_ioctl 2 22371 NULL ++alloc_large_system_hash_22391 alloc_large_system_hash 2 22391 NULL ++zoran_write_22404 zoran_write 3 22404 NULL ++queue_reply_22416 queue_reply 3 22416 NULL ++__set_enter_print_fmt_22431 __set_enter_print_fmt 0 22431 NULL ++queue_max_segments_22441 queue_max_segments 0 22441 NULL ++handle_received_packet_22457 handle_received_packet 3 22457 NULL ++ecryptfs_write_22488 ecryptfs_write 4-3 22488 NULL ++qib_user_sdma_alloc_header_22490 qib_user_sdma_alloc_header 2 22490 NULL ++cache_write_procfs_22491 cache_write_procfs 3 22491 NULL ++mutex_lock_interruptible_22505 mutex_lock_interruptible 0 22505 NULL ++trim_no_bitmap_22524 trim_no_bitmap 4-3 22524 NULL ++ocfs2_read_extent_block_22550 ocfs2_read_extent_block 0 22550 NULL ++agp_alloc_page_array_22554 agp_alloc_page_array 1 22554 NULL ++dbFindCtl_22587 dbFindCtl 0 22587 NULL ++snapshot_read_22601 snapshot_read 3 22601 NULL ++sctp_setsockopt_connectx_old_22631 sctp_setsockopt_connectx_old 3 22631 NULL ++ide_core_cp_entry_22636 ide_core_cp_entry 3 22636 NULL ++wl1271_rx_filter_get_fields_size_22638 wl1271_rx_filter_get_fields_size 0 22638 NULL ++pwr_wake_on_timer_exp_read_22640 pwr_wake_on_timer_exp_read 3 22640 NULL ++iwl_dbgfs_calib_disabled_read_22649 iwl_dbgfs_calib_disabled_read 3 22649 NULL ++ext4_ext_direct_IO_22679 ext4_ext_direct_IO 4 22679 NULL ++l2tp_ip_recvmsg_22681 l2tp_ip_recvmsg 4 22681 NULL ++bch_dump_read_22685 bch_dump_read 3 22685 NULL ++reg_umr_22686 reg_umr 5 22686 NULL ++alloc_libipw_22708 alloc_libipw 1 22708 NULL ++cx18_copy_buf_to_user_22735 cx18_copy_buf_to_user 4-0 22735 NULL ++ceph_decode_32_22738 ceph_decode_32 0 22738 NULL nohasharray ++__mei_cl_send_22738 __mei_cl_send 3 22738 &ceph_decode_32_22738 ++__writeback_single_inode_22739 __writeback_single_inode 0 22739 NULL ++iio_debugfs_write_reg_22742 iio_debugfs_write_reg 3 22742 NULL ++qlcnic_sriov_init_22762 qlcnic_sriov_init 2 22762 NULL ++print_frame_22769 print_frame 0 22769 NULL ++ftrace_arch_read_dyn_info_22773 ftrace_arch_read_dyn_info 0 22773 NULL ++__break_lease_22777 __break_lease 0 22777 NULL ++vnic_dev_get_res_count_22791 vnic_dev_get_res_count 0 22791 NULL ++pla_ocp_write_22802 pla_ocp_write 4 22802 NULL ++__generic_copy_to_user_intel_22806 __generic_copy_to_user_intel 0-3 22806 NULL ++read_file_rcstat_22854 read_file_rcstat 3 22854 NULL ++create_attr_set_22861 create_attr_set 1 22861 NULL ++hash_ip6_expire_22867 hash_ip6_expire 4 22867 NULL ++vmw_execbuf_process_22885 vmw_execbuf_process 5 22885 NULL ++usblp_new_writeurb_22894 usblp_new_writeurb 2 22894 NULL ++mdc800_device_read_22896 mdc800_device_read 3 22896 NULL ++ion_handle_test_kernel_22900 ion_handle_test_kernel 4-3 22900 NULL nohasharray ++policy_emit_config_values_22900 policy_emit_config_values 3 22900 &ion_handle_test_kernel_22900 ++__set_xattr_22923 __set_xattr 0 22923 NULL ++xstateregs_set_22932 xstateregs_set 4 22932 NULL ++pcpu_mem_zalloc_22948 pcpu_mem_zalloc 1 22948 NULL ++alloc_sglist_22960 alloc_sglist 2-3 22960 NULL ++caif_seqpkt_sendmsg_22961 caif_seqpkt_sendmsg 4 22961 NULL ++vme_get_size_22964 vme_get_size 0 22964 NULL ++tx_frag_key_not_found_read_22971 tx_frag_key_not_found_read 3 22971 NULL ++cached_dev_cache_miss_22979 cached_dev_cache_miss 4 22979 NULL ++usb_get_langid_22983 usb_get_langid 0 22983 NULL ++remote_settings_file_write_22987 remote_settings_file_write 3 22987 NULL ++viafb_dvp0_proc_write_23023 viafb_dvp0_proc_write 3 23023 NULL ++cifs_local_to_utf16_bytes_23025 cifs_local_to_utf16_bytes 0 23025 NULL ++ocfs2_refcount_cow_xattr_23029 ocfs2_refcount_cow_xattr 0 23029 NULL ++st_status_23032 st_status 5 23032 NULL ++nv50_disp_chan_create__23056 nv50_disp_chan_create_ 5 23056 NULL ++comedi_buf_write_n_available_23057 comedi_buf_write_n_available 0 23057 NULL ++security_inode_killpriv_23060 security_inode_killpriv 0 23060 NULL ++reiserfs_add_entry_23062 reiserfs_add_entry 4 23062 NULL nohasharray ++unix_seqpacket_recvmsg_23062 unix_seqpacket_recvmsg 4 23062 &reiserfs_add_entry_23062 ++mei_cl_send_23068 mei_cl_send 3 23068 NULL ++kvm_mmu_gva_to_gpa_write_23075 kvm_mmu_gva_to_gpa_write 0 23075 NULL ++raw_sendmsg_23078 raw_sendmsg 4 23078 NULL ++get_user_hdr_len_23079 get_user_hdr_len 0 23079 NULL ++isr_tx_procs_read_23084 isr_tx_procs_read 3 23084 NULL ++rt2x00debug_write_eeprom_23091 rt2x00debug_write_eeprom 3 23091 NULL ++ntfs_ucstonls_23097 ntfs_ucstonls 3-5 23097 NULL ++xfs_dir_ialloc_23100 xfs_dir_ialloc 0 23100 NULL ++pipe_iov_copy_from_user_23102 pipe_iov_copy_from_user 3 23102 NULL ++dgram_recvmsg_23104 dgram_recvmsg 4 23104 NULL ++mwl8k_cmd_set_beacon_23110 mwl8k_cmd_set_beacon 4 23110 NULL ++nl80211_send_rx_auth_23111 nl80211_send_rx_auth 4 23111 NULL nohasharray ++bset_tree_bytes_23111 bset_tree_bytes 0 23111 &nl80211_send_rx_auth_23111 ++__clear_user_23118 __clear_user 0-2 23118 NULL ++drm_mode_create_tv_properties_23122 drm_mode_create_tv_properties 2 23122 NULL ++ata_scsi_change_queue_depth_23126 ata_scsi_change_queue_depth 2 23126 NULL ++read_file_ani_23161 read_file_ani 3 23161 NULL ++usblp_write_23178 usblp_write 3 23178 NULL ++gss_pipe_downcall_23182 gss_pipe_downcall 3 23182 NULL ++mpi_alloc_limb_space_23190 mpi_alloc_limb_space 1 23190 NULL ++tty_buffer_request_room_23228 tty_buffer_request_room 2-0 23228 NULL ++xlog_get_bp_23229 xlog_get_bp 2 23229 NULL nohasharray ++__read_status_pci_23229 __read_status_pci 0 23229 &xlog_get_bp_23229 ++ft1000_read_dpram_mag_32_23232 ft1000_read_dpram_mag_32 0 23232 NULL ++rxrpc_client_sendmsg_23236 rxrpc_client_sendmsg 5 23236 NULL ++__gfn_to_rmap_23240 __gfn_to_rmap 2-1 23240 NULL ++nv50_ram_create__23241 nv50_ram_create_ 4 23241 NULL ++sctp_recvmsg_23265 sctp_recvmsg 4 23265 NULL ++uwb_dev_addr_print_23282 uwb_dev_addr_print 2 23282 NULL ++diva_get_trace_filter_23286 diva_get_trace_filter 0 23286 NULL ++i2cdev_write_23310 i2cdev_write 3 23310 NULL ++__aa_kvmalloc_23320 __aa_kvmalloc 1 23320 NULL ++page_readlink_23346 page_readlink 3 23346 NULL ++kmem_zalloc_large_23351 kmem_zalloc_large 1 23351 NULL ++get_dst_timing_23358 get_dst_timing 0 23358 NULL nohasharray ++write_inode_23358 write_inode 0 23358 &get_dst_timing_23358 ++fd_setup_write_same_buf_23369 fd_setup_write_same_buf 3 23369 NULL ++iscsi_change_queue_depth_23416 iscsi_change_queue_depth 2 23416 NULL ++vga_mm_r_23419 vga_mm_r 0 23419 NULL ++ocfs2_zero_tail_23447 ocfs2_zero_tail 3 23447 NULL ++hidraw_send_report_23449 hidraw_send_report 3 23449 NULL ++__ata_change_queue_depth_23484 __ata_change_queue_depth 3 23484 NULL ++linear_conf_23485 linear_conf 2 23485 NULL nohasharray ++sync_mapping_buffers_23485 sync_mapping_buffers 0 23485 &linear_conf_23485 ++event_filter_read_23494 event_filter_read 3 23494 NULL ++lustre_acl_xattr_merge2ext_23502 lustre_acl_xattr_merge2ext 2 23502 NULL ++devm_iio_device_alloc_23511 devm_iio_device_alloc 2 23511 NULL ++__proc_cpt_table_23516 __proc_cpt_table 5 23516 NULL ++ima_show_measurements_count_23536 ima_show_measurements_count 3 23536 NULL ++tcp_current_mss_23552 tcp_current_mss 0 23552 NULL ++btrfs_super_bytenr_23561 btrfs_super_bytenr 0 23561 NULL ++venus_symlink_23570 venus_symlink 6-4 23570 NULL ++iwl_dbgfs_interrupt_read_23574 iwl_dbgfs_interrupt_read 3 23574 NULL ++xfpregs_get_23586 xfpregs_get 4 23586 NULL ++snd_interval_min_23590 snd_interval_min 0 23590 NULL ++islpci_mgt_transaction_23610 islpci_mgt_transaction 5 23610 NULL ++xfs_qm_dqread_23613 xfs_qm_dqread 0 23613 NULL ++ocfs2_journal_access_23616 ocfs2_journal_access 0 23616 NULL ++__i2400mu_send_barker_23652 __i2400mu_send_barker 3 23652 NULL ++sInW_23663 sInW 0 23663 NULL ++SyS_connect_23669 SyS_connect 3 23669 NULL ++cx18_read_23699 cx18_read 3 23699 NULL ++at_get_23708 at_get 0 23708 NULL ++rx_rx_dropped_frame_read_23748 rx_rx_dropped_frame_read 3 23748 NULL ++__kfifo_max_r_23768 __kfifo_max_r 0-2-1 23768 NULL ++__build_packet_message_23778 __build_packet_message 4-10 23778 NULL ++security_inode_getxattr_23781 security_inode_getxattr 0 23781 NULL ++cfg80211_inform_bss_width_frame_23782 cfg80211_inform_bss_width_frame 5 23782 NULL ++mpt_free_res_23793 mpt_free_res 5 23793 NULL ++map_write_23795 map_write 3 23795 NULL ++rx_path_reset_read_23801 rx_path_reset_read 3 23801 NULL ++ocfs2_replace_cow_23803 ocfs2_replace_cow 0 23803 NULL ++__earlyonly_bootmem_alloc_23824 __earlyonly_bootmem_alloc 2 23824 NULL ++lustre_msg_buflen_23827 lustre_msg_buflen 0 23827 NULL ++ceph_copy_page_vector_to_user_23829 ceph_copy_page_vector_to_user 0-4-3 23829 NULL ++pgdat_end_pfn_23842 pgdat_end_pfn 0 23842 NULL ++iwl_dbgfs_nvm_read_23845 iwl_dbgfs_nvm_read 3 23845 NULL ++p54_init_common_23850 p54_init_common 1 23850 NULL ++bin_to_hex_dup_23853 bin_to_hex_dup 2 23853 NULL ++ocfs2_xattr_get_clusters_23857 ocfs2_xattr_get_clusters 0 23857 NULL ++ieee80211_if_read_dot11MeshMaxPeerLinks_23878 ieee80211_if_read_dot11MeshMaxPeerLinks 3 23878 NULL ++writeback_single_inode_23881 writeback_single_inode 0 23881 NULL nohasharray ++nouveau_clock_create__23881 nouveau_clock_create_ 5 23881 &writeback_single_inode_23881 ++tipc_snprintf_23893 tipc_snprintf 2-0 23893 NULL ++add_new_gdb_meta_bg_23911 add_new_gdb_meta_bg 3 23911 NULL nohasharray ++ieee80211_if_read_hw_queues_23911 ieee80211_if_read_hw_queues 3 23911 &add_new_gdb_meta_bg_23911 ++f2fs_getxattr_23917 f2fs_getxattr 0 23917 NULL ++mpihelp_mul_karatsuba_case_23918 mpihelp_mul_karatsuba_case 5-3 23918 NULL nohasharray ++ipath_reg_phys_mr_23918 ipath_reg_phys_mr 3 23918 &mpihelp_mul_karatsuba_case_23918 ++kvm_read_guest_23928 kvm_read_guest 4-2 23928 NULL ++uvc_endpoint_max_bpi_23944 uvc_endpoint_max_bpi 0 23944 NULL ++cifs_setxattr_23957 cifs_setxattr 4 23957 NULL ++size_roundup_power2_23958 size_roundup_power2 0-1 23958 NULL ++sddr55_write_data_23983 sddr55_write_data 4 23983 NULL ++zd_usb_iowrite16v_async_23984 zd_usb_iowrite16v_async 3 23984 NULL ++cxgb_alloc_mem_24007 cxgb_alloc_mem 1 24007 NULL ++give_pages_24021 give_pages 3 24021 NULL ++adis16400_show_serial_number_24037 adis16400_show_serial_number 3 24037 NULL ++hmac_setkey_24043 hmac_setkey 3 24043 NULL ++afs_cell_alloc_24052 afs_cell_alloc 2 24052 NULL ++blkcipher_copy_iv_24075 blkcipher_copy_iv 3 24075 NULL ++vb2_fop_read_24080 vb2_fop_read 3 24080 NULL ++pipeline_post_proc_swi_read_24108 pipeline_post_proc_swi_read 3 24108 NULL ++request_key_auth_read_24109 request_key_auth_read 3 24109 NULL ++lov_brw_24122 lov_brw 4 24122 NULL ++mpu401_read_24126 mpu401_read 3-0 24126 NULL ++_picolcd_flash_write_24134 _picolcd_flash_write 4 24134 NULL ++irnet_ctrl_write_24139 irnet_ctrl_write 3 24139 NULL nohasharray ++xfs_btree_increment_24139 xfs_btree_increment 0 24139 &irnet_ctrl_write_24139 ++SyS_sethostname_24150 SyS_sethostname 2 24150 NULL ++trim_bitmaps_24158 trim_bitmaps 3 24158 NULL ++adu_read_24177 adu_read 3 24177 NULL ++safe_prepare_write_buffer_24187 safe_prepare_write_buffer 3 24187 NULL ++irq_remapping_setup_msi_irqs_24194 irq_remapping_setup_msi_irqs 2 24194 NULL ++ieee80211_if_read_dot11MeshHWMPpreqMinInterval_24208 ieee80211_if_read_dot11MeshHWMPpreqMinInterval 3 24208 NULL ++tcpprobe_sprint_24222 tcpprobe_sprint 0-2 24222 NULL ++pcpu_embed_first_chunk_24224 pcpu_embed_first_chunk 3-2-1 24224 NULL nohasharray ++mei_amthif_read_24224 mei_amthif_read 4 24224 &pcpu_embed_first_chunk_24224 ++pci_num_vf_24235 pci_num_vf 0 24235 NULL ++sel_read_bool_24236 sel_read_bool 3 24236 NULL ++xfs_bmap_rtalloc_24237 xfs_bmap_rtalloc 0 24237 NULL ++em28xx_alloc_urbs_24260 em28xx_alloc_urbs 4-6 24260 NULL ++calculate_sizes_24273 calculate_sizes 2 24273 NULL ++thin_status_24278 thin_status 5 24278 NULL ++msg_size_24288 msg_size 0 24288 NULL ++gserial_connect_24302 gserial_connect 0 24302 NULL ++btmrvl_pscmd_read_24308 btmrvl_pscmd_read 3 24308 NULL ++reserve_metadata_bytes_24313 reserve_metadata_bytes 0 24313 NULL ++ath6kl_add_bss_if_needed_24317 ath6kl_add_bss_if_needed 6 24317 NULL ++si476x_radio_read_acf_blob_24336 si476x_radio_read_acf_blob 3 24336 NULL ++prepare_pages_24349 prepare_pages 0 24349 NULL ++kzalloc_node_24352 kzalloc_node 1 24352 NULL ++qla2x00_handle_queue_full_24365 qla2x00_handle_queue_full 2 24365 NULL ++cfi_read_pri_24366 cfi_read_pri 3 24366 NULL ++btrfs_item_size_nr_24367 btrfs_item_size_nr 0 24367 NULL ++igetword_24373 igetword 0 24373 NULL nohasharray ++break_deleg_24373 break_deleg 0 24373 &igetword_24373 ++max_io_len_24384 max_io_len 0-1 24384 NULL ++mpt_alloc_res_24387 mpt_alloc_res 5 24387 NULL ++xfs_bmapi_read_24392 xfs_bmapi_read 0 24392 NULL ++osc_cur_grant_bytes_seq_write_24396 osc_cur_grant_bytes_seq_write 3 24396 NULL ++getxattr_24398 getxattr 4 24398 NULL nohasharray ++pvr2_v4l2_ioctl_24398 pvr2_v4l2_ioctl 2 24398 &getxattr_24398 ++blk_update_bidi_request_24415 blk_update_bidi_request 3-4 24415 NULL ++nvme_trans_log_supp_pages_24418 nvme_trans_log_supp_pages 3 24418 NULL ++b43_debugfs_read_24425 b43_debugfs_read 3 24425 NULL ++xenbus_file_read_24427 xenbus_file_read 3 24427 NULL ++ieee80211_rx_mgmt_beacon_24430 ieee80211_rx_mgmt_beacon 3 24430 NULL ++copy_and_ioctl_24434 copy_and_ioctl 4 24434 NULL ++ixgbe_alloc_q_vector_24439 ixgbe_alloc_q_vector 4-6 24439 NULL ++smk_user_access_24440 smk_user_access 3 24440 NULL nohasharray ++rtw_set_wps_assoc_resp_24440 rtw_set_wps_assoc_resp 3 24440 &smk_user_access_24440 ++evdev_do_ioctl_24459 evdev_do_ioctl 2 24459 NULL ++lbs_highsnr_write_24460 lbs_highsnr_write 3 24460 NULL ++skb_copy_and_csum_datagram_iovec_24466 skb_copy_and_csum_datagram_iovec 2 24466 NULL ++dut_mode_read_24489 dut_mode_read 3 24489 NULL ++read_file_spec_scan_ctl_24491 read_file_spec_scan_ctl 3 24491 NULL ++pd_video_read_24510 pd_video_read 3 24510 NULL ++request_key_with_auxdata_24515 request_key_with_auxdata 4 24515 NULL ++xfs_buf_get_map_24522 xfs_buf_get_map 3 24522 NULL ++do_mpage_readpage_24536 do_mpage_readpage 3 24536 NULL ++write_cache_pages_24562 write_cache_pages 0 24562 NULL ++SyS_pselect6_24582 SyS_pselect6 1 24582 NULL ++udf_compute_nr_groups_24594 udf_compute_nr_groups 0 24594 NULL ++sensor_hub_get_physical_device_count_24605 sensor_hub_get_physical_device_count 0 24605 NULL nohasharray ++lov_alloc_memmd_24605 lov_alloc_memmd 2 24605 &sensor_hub_get_physical_device_count_24605 ++SyS_poll_24620 SyS_poll 2 24620 NULL ++context_alloc_24645 context_alloc 3 24645 NULL ++blk_rq_err_bytes_24650 blk_rq_err_bytes 0 24650 NULL ++xfs_log_commit_cil_24653 xfs_log_commit_cil 0 24653 NULL ++btrfs_check_data_free_space_24692 btrfs_check_data_free_space 0 24692 NULL ++datafab_write_data_24696 datafab_write_data 4 24696 NULL ++intelfbhw_get_p1p2_24703 intelfbhw_get_p1p2 2 24703 NULL ++simple_attr_read_24738 simple_attr_read 3 24738 NULL ++qla2x00_change_queue_depth_24742 qla2x00_change_queue_depth 2 24742 NULL ++get_dma_residue_24749 get_dma_residue 0 24749 NULL ++kgdb_hex2mem_24755 kgdb_hex2mem 3 24755 NULL ++ocfs2_read_blocks_24777 ocfs2_read_blocks 0 24777 NULL ++datablob_hmac_verify_24786 datablob_hmac_verify 4 24786 NULL ++cache_read_24790 cache_read 3 24790 NULL ++user_regset_copyout_24796 user_regset_copyout 7 24796 NULL ++kvm_read_guest_virt_helper_24804 kvm_read_guest_virt_helper 3-1 24804 NULL ++ath6kl_fwlog_mask_write_24810 ath6kl_fwlog_mask_write 3 24810 NULL ++net2272_read_24825 net2272_read 0 24825 NULL ++snd_als4k_gcr_read_24840 snd_als4k_gcr_read 0 24840 NULL ++snd_pcm_lib_buffer_bytes_24865 snd_pcm_lib_buffer_bytes 0 24865 NULL ++pnp_alloc_24869 pnp_alloc 1 24869 NULL nohasharray ++l2cap_create_basic_pdu_24869 l2cap_create_basic_pdu 3 24869 &pnp_alloc_24869 ++queues_read_24877 queues_read 3 24877 NULL ++__vxge_hw_vp_initialize_24885 __vxge_hw_vp_initialize 2 24885 NULL ++xfs_qm_dqattach_24898 xfs_qm_dqattach 0 24898 NULL ++codec_list_read_file_24910 codec_list_read_file 3 24910 NULL ++v4l2_ctrl_new_24927 v4l2_ctrl_new 7 24927 NULL nohasharray ++__btrfs_free_extent_24927 __btrfs_free_extent 7 24927 &v4l2_ctrl_new_24927 ++ocfs2_fiemap_24949 ocfs2_fiemap 4-3 24949 NULL ++packet_sendmsg_24954 packet_sendmsg 4 24954 NULL ++twl_i2c_write_u8_24976 twl_i2c_write_u8 3 24976 NULL ++llc_ui_sendmsg_24987 llc_ui_sendmsg 4 24987 NULL ++slot_get_24999 slot_get 0 24999 NULL ++key_conf_hw_key_idx_read_25003 key_conf_hw_key_idx_read 3 25003 NULL ++il_dbgfs_channels_read_25005 il_dbgfs_channels_read 3 25005 NULL ++ni_660x_num_counters_25031 ni_660x_num_counters 0 25031 NULL ++nfs_dns_resolve_name_25036 nfs_dns_resolve_name 3 25036 NULL ++load_unaligned_zeropad_25050 load_unaligned_zeropad 0 25050 NULL ++btrfs_stack_key_blockptr_25058 btrfs_stack_key_blockptr 0 25058 NULL ++gs_buf_alloc_25067 gs_buf_alloc 2 25067 NULL ++ll_track_pid_seq_write_25068 ll_track_pid_seq_write 3 25068 NULL ++SYSC_listxattr_25072 SYSC_listxattr 3 25072 NULL ++ima_appraise_measurement_25093 ima_appraise_measurement 6 25093 NULL ++blkg_path_25099 blkg_path 3 25099 NULL ++snd_rawmidi_kernel_write_25106 snd_rawmidi_kernel_write 3 25106 NULL ++gfs2_quota_check_25130 gfs2_quota_check 0 25130 NULL ++ipath_init_qp_table_25167 ipath_init_qp_table 2 25167 NULL ++kvm_mmu_notifier_change_pte_25169 kvm_mmu_notifier_change_pte 3 25169 NULL ++sctp_getsockopt_local_addrs_25178 sctp_getsockopt_local_addrs 2 25178 NULL ++mon_stat_read_25238 mon_stat_read 3 25238 NULL ++stripe_status_25259 stripe_status 5 25259 NULL ++snd_pcm_start_25273 snd_pcm_start 0 25273 NULL ++crypto_alloc_instance2_25277 crypto_alloc_instance2 3 25277 NULL ++vfs_writev_25278 vfs_writev 3 25278 NULL ++l2tp_session_create_25286 l2tp_session_create 1 25286 NULL ++ath9k_debugfs_read_buf_25316 ath9k_debugfs_read_buf 3 25316 NULL ++rng_buffer_size_25348 rng_buffer_size 0 25348 NULL ++SYSC_kexec_load_25361 SYSC_kexec_load 2 25361 NULL ++unix_mkname_25368 unix_mkname 0-2 25368 NULL ++sel_read_mls_25369 sel_read_mls 3 25369 NULL ++vsp1_entity_init_25407 vsp1_entity_init 3 25407 NULL ++xfs_ialloc_pagi_init_25411 xfs_ialloc_pagi_init 0 25411 NULL ++dai_list_read_file_25421 dai_list_read_file 3 25421 NULL ++xfs_qm_dqtobp_25448 xfs_qm_dqtobp 0 25448 NULL ++generic_file_buffered_write_25464 generic_file_buffered_write 4-0-7 25464 NULL ++ipath_decode_err_25468 ipath_decode_err 3 25468 NULL ++crypto_hash_digestsize_25469 crypto_hash_digestsize 0 25469 NULL ++ivtv_buf_copy_from_user_25502 ivtv_buf_copy_from_user 4-0 25502 NULL ++snd_pcm_plugin_build_25505 snd_pcm_plugin_build 5 25505 NULL ++xfs_alloc_fix_freelist_25514 xfs_alloc_fix_freelist 0 25514 NULL ++sb_permission_25523 sb_permission 0 25523 NULL ++ext3_get_inode_loc_25542 ext3_get_inode_loc 0 25542 NULL ++ieee80211_if_read_path_refresh_time_25545 ieee80211_if_read_path_refresh_time 3 25545 NULL ++wimax_addr_scnprint_25548 wimax_addr_scnprint 2 25548 NULL ++ht_print_chan_25556 ht_print_chan 0-3-4 25556 NULL ++skb_tailroom_25567 skb_tailroom 0 25567 NULL ++ping_recvmsg_25597 ping_recvmsg 4 25597 NULL ++copy_user_generic_25611 copy_user_generic 0 25611 NULL ++proc_coredump_filter_write_25625 proc_coredump_filter_write 3 25625 NULL ++befs_utf2nls_25628 befs_utf2nls 3 25628 NULL nohasharray ++__get_user_pages_25628 __get_user_pages 0 25628 &befs_utf2nls_25628 ++__direct_map_25647 __direct_map 5-6 25647 NULL ++aircable_prepare_write_buffer_25669 aircable_prepare_write_buffer 3 25669 NULL ++lpfc_idiag_cmd_get_25672 lpfc_idiag_cmd_get 2 25672 NULL ++sta_inactive_ms_read_25690 sta_inactive_ms_read 3 25690 NULL ++rx_filter_mc_filter_read_25712 rx_filter_mc_filter_read 3 25712 NULL ++ibmasm_new_command_25714 ibmasm_new_command 2 25714 NULL ++__alloc_bootmem_low_node_25726 __alloc_bootmem_low_node 2 25726 NULL nohasharray ++sel_write_context_25726 sel_write_context 3 25726 &__alloc_bootmem_low_node_25726 ++cxgbi_device_portmap_create_25747 cxgbi_device_portmap_create 3 25747 NULL ++event_rx_pool_read_25792 event_rx_pool_read 3 25792 NULL ++sg_read_25799 sg_read 3 25799 NULL ++xfs_alloc_ag_vextent_exact_25810 xfs_alloc_ag_vextent_exact 0 25810 NULL ++system_enable_read_25815 system_enable_read 3 25815 NULL ++realloc_buffer_25816 realloc_buffer 2 25816 NULL ++pwr_missing_bcns_read_25824 pwr_missing_bcns_read 3 25824 NULL ++parport_read_25855 parport_read 0 25855 NULL ++xfs_dir2_sf_hdr_size_25858 xfs_dir2_sf_hdr_size 0 25858 NULL ++key_attr_size_25865 key_attr_size 0 25865 NULL ++ath6kl_regread_read_25884 ath6kl_regread_read 3 25884 NULL ++run_delalloc_nocow_25896 run_delalloc_nocow 3-4 25896 NULL ++sisusbcon_scroll_area_25899 sisusbcon_scroll_area 4-3 25899 NULL ++lpfc_change_queue_depth_25905 lpfc_change_queue_depth 2 25905 NULL ++nvme_trans_mode_page_create_25908 nvme_trans_mode_page_create 7-4 25908 NULL ++do_jffs2_setxattr_25910 do_jffs2_setxattr 5 25910 NULL ++rcname_read_25919 rcname_read 3 25919 NULL ++snd_es1938_capture_copy_25930 snd_es1938_capture_copy 5 25930 NULL ++key_flags_read_25931 key_flags_read 3 25931 NULL ++copy_play_buf_25932 copy_play_buf 3 25932 NULL ++flush_25957 flush 2 25957 NULL ++udp_setsockopt_25985 udp_setsockopt 5 25985 NULL ++lustre_msg_buflen_v2_25997 lustre_msg_buflen_v2 0 25997 NULL ++SyS_process_vm_readv_26019 SyS_process_vm_readv 3-5 26019 NULL ++mptscsih_change_queue_depth_26036 mptscsih_change_queue_depth 2 26036 NULL ++selinux_inode_post_setxattr_26037 selinux_inode_post_setxattr 4 26037 NULL ++keyctl_update_key_26061 keyctl_update_key 3 26061 NULL ++btrfs_wait_ordered_range_26086 btrfs_wait_ordered_range 0 26086 NULL ++rx_rx_wa_density_dropped_frame_read_26095 rx_rx_wa_density_dropped_frame_read 3 26095 NULL ++i8042_pnp_id_to_string_26108 i8042_pnp_id_to_string 3 26108 NULL ++read_sb_page_26119 read_sb_page 5 26119 NULL ++ath9k_hw_name_26146 ath9k_hw_name 3 26146 NULL ++copy_oldmem_page_26164 copy_oldmem_page 3 26164 NULL ++gfs2_xattr_acl_get_26166 gfs2_xattr_acl_get 0 26166 NULL nohasharray ++ath6kl_roam_table_read_26166 ath6kl_roam_table_read 3 26166 &gfs2_xattr_acl_get_26166 ++disk_devt_26180 disk_devt 0 26180 NULL ++cgroup_setxattr_26188 cgroup_setxattr 4 26188 NULL ++ieee80211_if_fmt_dot11MeshTTL_26198 ieee80211_if_fmt_dot11MeshTTL 3 26198 NULL ++xfs_idata_realloc_26199 xfs_idata_realloc 2 26199 NULL ++mce_write_26201 mce_write 3 26201 NULL ++mwifiex_regrdwr_write_26225 mwifiex_regrdwr_write 3 26225 NULL ++_scsih_change_queue_depth_26230 _scsih_change_queue_depth 2 26230 NULL ++rxrpc_recvmsg_26233 rxrpc_recvmsg 4 26233 NULL ++simple_setattr_26234 simple_setattr 0 26234 NULL ++genwqe_ffdc_buff_size_26263 genwqe_ffdc_buff_size 0 26263 NULL ++crypto_ctxsize_26278 crypto_ctxsize 0 26278 NULL ++apei_resources_request_26279 apei_resources_request 0 26279 NULL ++wacom_set_device_mode_26280 wacom_set_device_mode 3 26280 NULL ++snd_pcm_plug_client_channels_buf_26309 snd_pcm_plug_client_channels_buf 0-3 26309 NULL ++pwr_wake_on_host_read_26321 pwr_wake_on_host_read 3 26321 NULL ++check_can_nocow_26336 check_can_nocow 2 26336 NULL ++snd_vx_check_reg_bit_26344 snd_vx_check_reg_bit 0 26344 NULL ++ocfs2_duplicate_clusters_by_page_26357 ocfs2_duplicate_clusters_by_page 6-3 26357 NULL ++cifs_readdata_alloc_26360 cifs_readdata_alloc 1 26360 NULL ++invalidate_inode_pages2_range_26403 invalidate_inode_pages2_range 0 26403 NULL ++ntty_write_26404 ntty_write 3 26404 NULL ++firmware_store_26408 firmware_store 4 26408 NULL ++pagemap_read_26441 pagemap_read 3 26441 NULL ++tower_read_26461 tower_read 3 26461 NULL nohasharray ++enc_pools_add_pages_26461 enc_pools_add_pages 1 26461 &tower_read_26461 ++ib_alloc_device_26483 ib_alloc_device 1 26483 NULL ++ulong_write_file_26485 ulong_write_file 3 26485 NULL ++dvb_ca_en50221_io_ioctl_26490 dvb_ca_en50221_io_ioctl 2 26490 NULL ++read_vmcore_26501 read_vmcore 3 26501 NULL ++uhid_char_write_26502 uhid_char_write 3 26502 NULL ++vfio_pci_set_msi_trigger_26507 vfio_pci_set_msi_trigger 4-3 26507 NULL ++iwl_dbgfs_rf_reset_read_26512 iwl_dbgfs_rf_reset_read 3 26512 NULL ++alloc_ep_req_26521 alloc_ep_req 3-2 26521 NULL ++SyS_rt_sigpending_26538 SyS_rt_sigpending 2 26538 NULL ++__vhost_add_used_n_26554 __vhost_add_used_n 3 26554 NULL ++dio_new_bio_26562 dio_new_bio 0 26562 NULL ++rts51x_read_mem_26577 rts51x_read_mem 4 26577 NULL ++xfs_rtcheck_range_26614 xfs_rtcheck_range 0 26614 NULL ++pwr_fix_tsf_ps_read_26627 pwr_fix_tsf_ps_read 3 26627 NULL ++irq_alloc_generic_chip_26650 irq_alloc_generic_chip 2 26650 NULL nohasharray ++inb_p_26650 inb_p 0 26650 &irq_alloc_generic_chip_26650 ++nouveau_volt_create__26654 nouveau_volt_create_ 4 26654 NULL ++cipso_v4_map_cat_rbm_hton_26680 cipso_v4_map_cat_rbm_hton 0 26680 NULL ++flowinfo_read_26683 flowinfo_read 3 26683 NULL ++sysfs_add_file_26716 sysfs_add_file 0 26716 NULL ++nouveau_namedb_create__26732 nouveau_namedb_create_ 7 26732 NULL ++pipeline_tcp_rx_stat_fifo_int_read_26745 pipeline_tcp_rx_stat_fifo_int_read 3 26745 NULL ++bos_desc_26752 bos_desc 0 26752 NULL ++snd_hda_get_raw_connections_26762 snd_hda_get_raw_connections 0 26762 NULL ++dma_map_single_attrs_26779 dma_map_single_attrs 0 26779 NULL ++qlcnic_alloc_sds_rings_26795 qlcnic_alloc_sds_rings 2 26795 NULL ++cipso_v4_genopt_26812 cipso_v4_genopt 0 26812 NULL ++iwl_trans_read_mem32_26825 iwl_trans_read_mem32 0 26825 NULL ++smk_write_load_26829 smk_write_load 3 26829 NULL ++xfs_alloc_pagf_init_26834 xfs_alloc_pagf_init 0 26834 NULL ++scnprint_id_26842 scnprint_id 3-0 26842 NULL ++ecryptfs_miscdev_write_26847 ecryptfs_miscdev_write 3 26847 NULL ++ss_alloc_ep_req_26848 ss_alloc_ep_req 2 26848 NULL ++tipc_conn_sendmsg_26867 tipc_conn_sendmsg 5 26867 NULL ++ath6kl_create_qos_write_26879 ath6kl_create_qos_write 3 26879 NULL ++svc_print_xprts_26881 svc_print_xprts 0 26881 NULL ++skb_zerocopy_headlen_26910 skb_zerocopy_headlen 0 26910 NULL ++hhf_zalloc_26912 hhf_zalloc 1 26912 NULL ++cfg80211_process_auth_26916 cfg80211_process_auth 3 26916 NULL ++x25_asy_change_mtu_26928 x25_asy_change_mtu 2 26928 NULL ++scsi_tgt_copy_sense_26933 scsi_tgt_copy_sense 3 26933 NULL ++sctp_setsockopt_adaptation_layer_26935 sctp_setsockopt_adaptation_layer 3 26935 NULL nohasharray ++pwr_ps_enter_read_26935 pwr_ps_enter_read 3 26935 &sctp_setsockopt_adaptation_layer_26935 ++hecubafb_write_26942 hecubafb_write 3 26942 NULL ++do_trimming_26952 do_trimming 3 26952 NULL nohasharray ++extract_entropy_user_26952 extract_entropy_user 3 26952 &do_trimming_26952 ++do_direct_IO_26979 do_direct_IO 0 26979 NULL ++xfs_filestream_associate_27030 xfs_filestream_associate 0 27030 NULL ++__videobuf_alloc_vb_27062 __videobuf_alloc_vb 1 27062 NULL ++ext4_convert_unwritten_extents_27064 ext4_convert_unwritten_extents 4-3-0 27064 NULL ++snd_pcm_lib_period_bytes_27071 snd_pcm_lib_period_bytes 0 27071 NULL ++paravirt_read_msr_27077 paravirt_read_msr 0 27077 NULL ++alloc_fdmem_27083 alloc_fdmem 1 27083 NULL ++btmrvl_hscmd_write_27089 btmrvl_hscmd_write 3 27089 NULL nohasharray ++ath9k_hw_4k_dump_eeprom_27089 ath9k_hw_4k_dump_eeprom 5-4 27089 &btmrvl_hscmd_write_27089 ++__devcgroup_inode_permission_27108 __devcgroup_inode_permission 0 27108 NULL ++get_kernel_page_27133 get_kernel_page 0 27133 NULL ++drbd_get_capacity_27141 drbd_get_capacity 0 27141 NULL ++pms_capture_27142 pms_capture 4 27142 NULL ++btmrvl_hscfgcmd_write_27143 btmrvl_hscfgcmd_write 3 27143 NULL ++snd_compr_calc_avail_27165 snd_compr_calc_avail 0 27165 NULL ++ieee80211_if_read_rc_rateidx_mask_5ghz_27183 ieee80211_if_read_rc_rateidx_mask_5ghz 3 27183 NULL ++__sg_alloc_table_27198 __sg_alloc_table 0 27198 NULL ++write_kmem_27225 write_kmem 3 27225 NULL ++dbAllocAG_27228 dbAllocAG 0 27228 NULL ++rxrpc_request_key_27235 rxrpc_request_key 3 27235 NULL ++ll_track_gid_seq_write_27267 ll_track_gid_seq_write 3 27267 NULL ++comedi_alloc_devpriv_27272 comedi_alloc_devpriv 2 27272 NULL ++copy_from_buf_27308 copy_from_buf 4-2 27308 NULL ++virtqueue_add_inbuf_27312 virtqueue_add_inbuf 3 27312 NULL ++snd_pcm_oss_write2_27332 snd_pcm_oss_write2 3-0 27332 NULL ++afs_cell_create_27346 afs_cell_create 2 27346 NULL ++iwl_dbgfs_csr_write_27363 iwl_dbgfs_csr_write 3 27363 NULL ++pcbit_stat_27364 pcbit_stat 2 27364 NULL ++seq_read_27411 seq_read 3 27411 NULL ++ib_dma_map_sg_27413 ib_dma_map_sg 0 27413 NULL ++ieee80211_if_read_smps_27416 ieee80211_if_read_smps 3 27416 NULL ++ocfs2_refcount_cal_cow_clusters_27422 ocfs2_refcount_cal_cow_clusters 0 27422 NULL ++cypress_write_27423 cypress_write 4 27423 NULL ++sddr09_read_data_27447 sddr09_read_data 3 27447 NULL ++xfs_btree_lookup_get_block_27448 xfs_btree_lookup_get_block 0 27448 NULL ++v4l2_ctrl_new_std_menu_items_27487 v4l2_ctrl_new_std_menu_items 4 27487 NULL ++hcd_buffer_alloc_27495 hcd_buffer_alloc 2 27495 NULL ++ip_set_get_h32_27498 ip_set_get_h32 0 27498 NULL ++btrfs_get_64_27499 btrfs_get_64 0 27499 NULL ++garmin_read_process_27509 garmin_read_process 3 27509 NULL ++oti_alloc_cookies_27510 oti_alloc_cookies 2 27510 NULL ++ib_copy_to_udata_27525 ib_copy_to_udata 3 27525 NULL ++snd_sonicvibes_getdmaa_27552 snd_sonicvibes_getdmaa 0 27552 NULL ++SyS_fgetxattr_27571 SyS_fgetxattr 4 27571 NULL ++sco_sock_recvmsg_27572 sco_sock_recvmsg 4 27572 NULL ++libipw_alloc_txb_27579 libipw_alloc_txb 1 27579 NULL ++ocfs2_xattr_ibody_get_27642 ocfs2_xattr_ibody_get 0 27642 NULL nohasharray ++nl80211_send_connect_result_27642 nl80211_send_connect_result 5-7 27642 &ocfs2_xattr_ibody_get_27642 nohasharray ++read_flush_procfs_27642 read_flush_procfs 3 27642 &nl80211_send_connect_result_27642 nohasharray ++ocfs2_direct_IO_27642 ocfs2_direct_IO 4 27642 &read_flush_procfs_27642 nohasharray ++xfs_alloc_vextent_27642 xfs_alloc_vextent 0 27642 &ocfs2_direct_IO_27642 ++add_new_gdb_27643 add_new_gdb 3 27643 NULL ++btrfs_fallocate_27647 btrfs_fallocate 3-4 27647 NULL ++qnx6_readpages_27657 qnx6_readpages 4 27657 NULL ++cdrom_read_cdda_old_27664 cdrom_read_cdda_old 4 27664 NULL ++ocfs2_extend_dir_27695 ocfs2_extend_dir 4 27695 NULL ++fs_path_add_from_extent_buffer_27702 fs_path_add_from_extent_buffer 4 27702 NULL ++evm_write_key_27715 evm_write_key 3 27715 NULL ++ieee80211_if_fmt_dot11MeshGateAnnouncementProtocol_27722 ieee80211_if_fmt_dot11MeshGateAnnouncementProtocol 3 27722 NULL ++xfs_dir2_block_sfsize_27727 xfs_dir2_block_sfsize 0 27727 NULL ++SyS_setsockopt_27759 SyS_setsockopt 5 27759 NULL ++__lov_setstripe_27782 __lov_setstripe 2 27782 NULL ++twl4030_set_gpio_dataout_27792 twl4030_set_gpio_dataout 1 27792 NULL ++SyS_readv_27804 SyS_readv 3 27804 NULL ++mpihelp_mul_27805 mpihelp_mul 5-3 27805 NULL ++hpt374_read_freq_27828 hpt374_read_freq 0 27828 NULL ++init_header_complete_27833 init_header_complete 0 27833 NULL ++read_profile_27859 read_profile 3 27859 NULL ++sky2_pci_read16_27863 sky2_pci_read16 0 27863 NULL ++ieee80211_if_read_dot11MeshHWMProotInterval_27873 ieee80211_if_read_dot11MeshHWMProotInterval 3 27873 NULL ++unix_seqpacket_sendmsg_27893 unix_seqpacket_sendmsg 4 27893 NULL ++check_mapped_name_27943 check_mapped_name 3 27943 NULL ++bio_next_split_27961 bio_next_split 2 27961 NULL nohasharray ++tracing_clock_write_27961 tracing_clock_write 3 27961 &bio_next_split_27961 ++security_path_chown_27966 security_path_chown 0 27966 NULL ++tipc_media_addr_printf_27971 tipc_media_addr_printf 2 27971 NULL ++device_register_27972 device_register 0 27972 NULL nohasharray ++mic_rx_pkts_read_27972 mic_rx_pkts_read 3 27972 &device_register_27972 ++xfs_alloc_fixup_trees_27975 xfs_alloc_fixup_trees 0 27975 NULL ++pci_enable_device_flags_27977 pci_enable_device_flags 0 27977 NULL nohasharray ++__kernfs_setattr_27977 __kernfs_setattr 0 27977 &pci_enable_device_flags_27977 ++edt_ft5x06_debugfs_raw_data_read_28002 edt_ft5x06_debugfs_raw_data_read 3 28002 NULL ++seq_get_buf_28006 seq_get_buf 0 28006 NULL ++snd_rawmidi_write_28008 snd_rawmidi_write 3 28008 NULL ++powercap_register_zone_28028 powercap_register_zone 6 28028 NULL ++sctp_setsockopt_maxburst_28041 sctp_setsockopt_maxburst 3 28041 NULL ++rts51x_xd_rw_28046 rts51x_xd_rw 3-4 28046 NULL ++cx231xx_init_vbi_isoc_28053 cx231xx_init_vbi_isoc 3-2-4 28053 NULL ++pool_status_28055 pool_status 5 28055 NULL ++init_rs_non_canonical_28059 init_rs_non_canonical 1 28059 NULL ++lpfc_idiag_mbxacc_read_28061 lpfc_idiag_mbxacc_read 3 28061 NULL ++tx_frag_bad_mblk_num_read_28064 tx_frag_bad_mblk_num_read 3 28064 NULL ++mmc_test_alloc_mem_28102 mmc_test_alloc_mem 3-2 28102 NULL ++rx_defrag_need_defrag_read_28117 rx_defrag_need_defrag_read 3 28117 NULL ++vgacon_adjust_height_28124 vgacon_adjust_height 2 28124 NULL ++memblock_virt_alloc_from_nopanic_28146 memblock_virt_alloc_from_nopanic 1 28146 NULL ++video_read_28148 video_read 3 28148 NULL ++snd_midi_channel_alloc_set_28153 snd_midi_channel_alloc_set 1 28153 NULL ++stats_dot11FCSErrorCount_read_28154 stats_dot11FCSErrorCount_read 3 28154 NULL ++vread_28173 vread 0-3 28173 NULL ++macvtap_get_user_28185 macvtap_get_user 4 28185 NULL ++counter_free_res_28187 counter_free_res 5 28187 NULL ++read_disk_sb_28188 read_disk_sb 2 28188 NULL ++nouveau_mxm_create__28200 nouveau_mxm_create_ 4 28200 NULL ++__qp_memcpy_from_queue_28220 __qp_memcpy_from_queue 3-4 28220 NULL ++line6_alloc_sysex_buffer_28225 line6_alloc_sysex_buffer 4 28225 NULL ++amd_nb_num_28228 amd_nb_num 0 28228 NULL ++fuse_direct_IO_28275 fuse_direct_IO 4 28275 NULL ++usemap_size_28281 usemap_size 0 28281 NULL ++inline_xattr_size_28285 inline_xattr_size 0 28285 NULL ++dma_map_sg_attrs_28289 dma_map_sg_attrs 0 28289 NULL ++SyS_ppoll_28290 SyS_ppoll 2 28290 NULL ++kstrtos16_from_user_28300 kstrtos16_from_user 2 28300 NULL ++nouveau_compat_ioctl_28305 nouveau_compat_ioctl 2 28305 NULL ++snd_pcm_oss_read_28317 snd_pcm_oss_read 3 28317 NULL ++security_inode_link_28327 security_inode_link 0 28327 NULL ++generic_write_checks_28329 generic_write_checks 0 28329 NULL ++bm_entry_write_28338 bm_entry_write 3 28338 NULL ++tcp_copy_to_iovec_28344 tcp_copy_to_iovec 3 28344 NULL ++snapshot_write_28351 snapshot_write 3 28351 NULL ++xfs_iomap_write_unwritten_28365 xfs_iomap_write_unwritten 3-2 28365 NULL ++batadv_handle_tt_response_28370 batadv_handle_tt_response 4 28370 NULL ++dlmfs_file_read_28385 dlmfs_file_read 3 28385 NULL ++tx_frag_cache_miss_read_28394 tx_frag_cache_miss_read 3 28394 NULL ++bypass_pwup_write_28416 bypass_pwup_write 3 28416 NULL ++subdev_ioctl_28417 subdev_ioctl 2 28417 NULL ++ksocknal_alloc_tx_28426 ksocknal_alloc_tx 2 28426 NULL ++mpage_readpages_28436 mpage_readpages 3 28436 NULL ++xfs_rtfind_back_28450 xfs_rtfind_back 0 28450 NULL ++snd_emu10k1_efx_read_28452 snd_emu10k1_efx_read 2 28452 NULL ++key_mic_failures_read_28457 key_mic_failures_read 3 28457 NULL ++alloc_irq_cpu_rmap_28459 alloc_irq_cpu_rmap 1 28459 NULL ++__filemap_fdatawrite_28485 __filemap_fdatawrite 0 28485 NULL ++ps_poll_upsd_utilization_read_28519 ps_poll_upsd_utilization_read 3 28519 NULL ++i2400m_tx_stats_read_28527 i2400m_tx_stats_read 3 28527 NULL ++sel_read_policycap_28544 sel_read_policycap 3 28544 NULL ++mptctl_getiocinfo_28545 mptctl_getiocinfo 2 28545 NULL nohasharray ++run_delalloc_range_28545 run_delalloc_range 3-4 28545 &mptctl_getiocinfo_28545 nohasharray ++aio_read_events_28545 aio_read_events 3 28545 &run_delalloc_range_28545 ++sysfs_create_bin_file_28551 sysfs_create_bin_file 0 28551 NULL ++b43legacy_debugfs_write_28556 b43legacy_debugfs_write 3 28556 NULL ++asymmetric_verify_28567 asymmetric_verify 3 28567 NULL ++gfs2_meta_indirect_buffer_28573 gfs2_meta_indirect_buffer 0 28573 NULL ++oxygen_read32_28582 oxygen_read32 0 28582 NULL ++extract_entropy_28604 extract_entropy 5-3 28604 NULL ++kfifo_unused_28612 kfifo_unused 0 28612 NULL ++snd_nm256_capture_copy_28622 snd_nm256_capture_copy 5-3 28622 NULL ++setup_usemap_28636 setup_usemap 3-4 28636 NULL ++qib_handle_6120_hwerrors_28642 qib_handle_6120_hwerrors 3 28642 NULL ++xfs_bmap_finish_28644 xfs_bmap_finish 0 28644 NULL ++p9_fcall_alloc_28652 p9_fcall_alloc 1 28652 NULL ++read_nic_io_byte_28654 read_nic_io_byte 0 28654 NULL ++blk_queue_resize_tags_28670 blk_queue_resize_tags 2 28670 NULL ++SyS_setgroups16_28686 SyS_setgroups16 1 28686 NULL ++kvm_mmu_get_page_28692 kvm_mmu_get_page 2 28692 NULL ++drm_plane_init_28731 drm_plane_init 6 28731 NULL ++spi_execute_28736 spi_execute 5 28736 NULL ++snd_pcm_aio_write_28738 snd_pcm_aio_write 3 28738 NULL ++read_file_btcoex_28743 read_file_btcoex 3 28743 NULL ++max_hw_blocks_28748 max_hw_blocks 0 28748 NULL ++rpc_pipe_generic_upcall_28766 rpc_pipe_generic_upcall 4 28766 NULL ++ath6kl_get_num_reg_28780 ath6kl_get_num_reg 0 28780 NULL ++sel_write_member_28800 sel_write_member 3 28800 NULL ++iwl_dbgfs_rxon_filter_flags_read_28832 iwl_dbgfs_rxon_filter_flags_read 3 28832 NULL ++vp_request_msix_vectors_28849 vp_request_msix_vectors 2 28849 NULL ++ipv6_renew_options_28867 ipv6_renew_options 5 28867 NULL ++max_io_len_target_boundary_28879 max_io_len_target_boundary 0-1 28879 NULL ++packet_sendmsg_spkt_28885 packet_sendmsg_spkt 4 28885 NULL ++da9055_group_write_28904 da9055_group_write 2-3 28904 NULL ++ps_upsd_timeouts_read_28924 ps_upsd_timeouts_read 3 28924 NULL ++iwl_dbgfs_sleep_level_override_write_28925 iwl_dbgfs_sleep_level_override_write 3 28925 NULL ++push_rx_28939 push_rx 3 28939 NULL ++mxuport_prepare_write_buffer_28943 mxuport_prepare_write_buffer 3 28943 NULL ++btrfs_trim_block_group_28963 btrfs_trim_block_group 3-4 28963 NULL ++alloc_sched_domains_28972 alloc_sched_domains 1 28972 NULL ++hash_net6_expire_28979 hash_net6_expire 4 28979 NULL ++xfs_alloc_update_28982 xfs_alloc_update 0 28982 NULL ++hci_sock_setsockopt_28993 hci_sock_setsockopt 5 28993 NULL ++bin_uuid_28999 bin_uuid 3 28999 NULL ++fd_execute_rw_29004 fd_execute_rw 3 29004 NULL ++ieee80211_if_read_ht_opmode_29044 ieee80211_if_read_ht_opmode 3 29044 NULL ++rxrpc_sendmsg_29049 rxrpc_sendmsg 4 29049 NULL ++btrfs_root_bytenr_29058 btrfs_root_bytenr 0 29058 NULL ++iso_packets_buffer_init_29061 iso_packets_buffer_init 3-4 29061 NULL ++lpfc_idiag_extacc_drivr_get_29067 lpfc_idiag_extacc_drivr_get 0-3 29067 NULL ++sctp_getsockopt_assoc_stats_29074 sctp_getsockopt_assoc_stats 2 29074 NULL ++xfs_alloc_ag_vextent_small_29084 xfs_alloc_ag_vextent_small 0 29084 NULL ++iwl_dbgfs_log_event_write_29088 iwl_dbgfs_log_event_write 3 29088 NULL ++i915_error_object_create_sized_29091 i915_error_object_create_sized 3 29091 NULL ++ccp_init_dm_workarea_29097 ccp_init_dm_workarea 3 29097 NULL ++isdn_ppp_write_29109 isdn_ppp_write 4 29109 NULL ++snprintf_29125 snprintf 0 29125 NULL ++iov_shorten_29130 iov_shorten 0 29130 NULL ++proc_scsi_write_29142 proc_scsi_write 3 29142 NULL ++kvm_mmu_notifier_clear_flush_young_29154 kvm_mmu_notifier_clear_flush_young 3 29154 NULL ++drm_property_create_enum_29201 drm_property_create_enum 5 29201 NULL ++wusb_prf_256_29203 wusb_prf_256 7 29203 NULL ++iwl_dbgfs_temperature_read_29224 iwl_dbgfs_temperature_read 3 29224 NULL ++nvme_trans_copy_from_user_29227 nvme_trans_copy_from_user 3 29227 NULL ++irq_domain_add_linear_29236 irq_domain_add_linear 2 29236 NULL ++evdev_handle_get_val_29242 evdev_handle_get_val 5-6 29242 NULL ++security_context_to_sid_core_29248 security_context_to_sid_core 2 29248 NULL ++prism2_set_genericelement_29277 prism2_set_genericelement 3 29277 NULL ++ext4_fiemap_29296 ext4_fiemap 4 29296 NULL ++xfs_bmap_btree_to_extents_29297 xfs_bmap_btree_to_extents 0 29297 NULL ++sn9c102_read_29305 sn9c102_read 3 29305 NULL ++__fuse_get_req_29315 __fuse_get_req 2 29315 NULL ++lprocfs_write_helper_29323 lprocfs_write_helper 2 29323 NULL ++xfs_bmbt_change_owner_29325 xfs_bmbt_change_owner 0 29325 NULL ++kvm_handle_hva_29326 kvm_handle_hva 2 29326 NULL ++tun_put_user_29337 tun_put_user 5 29337 NULL ++__alloc_ei_netdev_29338 __alloc_ei_netdev 1 29338 NULL ++l2cap_sock_setsockopt_old_29346 l2cap_sock_setsockopt_old 4 29346 NULL ++read_file_tx99_power_29405 read_file_tx99_power 3 29405 NULL ++mempool_create_29437 mempool_create 1 29437 NULL ++crypto_ahash_alignmask_29445 crypto_ahash_alignmask 0 29445 NULL ++p9_client_prepare_req_29448 p9_client_prepare_req 3 29448 NULL ++validate_scan_freqs_29462 validate_scan_freqs 0 29462 NULL ++SyS_flistxattr_29474 SyS_flistxattr 3 29474 NULL ++do_register_entry_29478 do_register_entry 4 29478 NULL ++simple_strtoul_29480 simple_strtoul 0 29480 NULL ++btmrvl_pscmd_write_29504 btmrvl_pscmd_write 3 29504 NULL ++btrfs_file_extent_disk_bytenr_29505 btrfs_file_extent_disk_bytenr 0 29505 NULL ++usnic_vnic_spec_dump_29508 usnic_vnic_spec_dump 2 29508 NULL ++write_file_regidx_29517 write_file_regidx 3 29517 NULL ++atk_debugfs_ggrp_read_29522 atk_debugfs_ggrp_read 3 29522 NULL ++add_to_page_cache_lru_29534 add_to_page_cache_lru 0 29534 NULL ++ftrace_write_29551 ftrace_write 3 29551 NULL ++idetape_queue_rw_tail_29562 idetape_queue_rw_tail 3 29562 NULL ++leaf_dealloc_29566 leaf_dealloc 3 29566 NULL ++kvm_read_guest_virt_system_29569 kvm_read_guest_virt_system 4-2 29569 NULL ++lbs_lowsnr_read_29571 lbs_lowsnr_read 3 29571 NULL ++security_path_chmod_29578 security_path_chmod 0 29578 NULL ++iwl_dbgfs_missed_beacon_write_29586 iwl_dbgfs_missed_beacon_write 3 29586 NULL ++pvr2_hdw_report_unlocked_29589 pvr2_hdw_report_unlocked 4-0 29589 NULL ++dio_set_defer_completion_29599 dio_set_defer_completion 0 29599 NULL ++slots_per_page_29601 slots_per_page 0 29601 NULL ++osc_cached_mb_seq_write_29610 osc_cached_mb_seq_write 3 29610 NULL ++nla_get_u16_29624 nla_get_u16 0 29624 NULL ++gfs2_alloc_blocks_29630 gfs2_alloc_blocks 0 29630 NULL ++tx_frag_cache_hit_read_29639 tx_frag_cache_hit_read 3 29639 NULL ++sctp_make_abort_user_29654 sctp_make_abort_user 3 29654 NULL ++sisusb_write_mem_bulk_29678 sisusb_write_mem_bulk 4 29678 NULL ++lustre_posix_acl_xattr_2ext_29693 lustre_posix_acl_xattr_2ext 2 29693 NULL ++posix_acl_from_xattr_29708 posix_acl_from_xattr 3 29708 NULL ++probes_write_29711 probes_write 3 29711 NULL ++read_cis_cache_29735 read_cis_cache 4 29735 NULL ++xfs_new_eof_29737 xfs_new_eof 2 29737 NULL ++std_nic_write_29752 std_nic_write 3 29752 NULL ++dbAlloc_29794 dbAlloc 0 29794 NULL ++tcp_sendpage_29829 tcp_sendpage 4 29829 NULL ++__probe_kernel_write_29842 __probe_kernel_write 3 29842 NULL ++kvm_read_hva_atomic_29848 kvm_read_hva_atomic 3 29848 NULL ++count_partial_29850 count_partial 0 29850 NULL ++xfs_rtfind_forw_29866 xfs_rtfind_forw 0 29866 NULL ++write_file_bool_bmps_29870 write_file_bool_bmps 3 29870 NULL ++ipv6_setsockopt_29871 ipv6_setsockopt 5 29871 NULL ++scsi_end_request_29876 scsi_end_request 3 29876 NULL ++crypto_aead_alignmask_29885 crypto_aead_alignmask 0 29885 NULL ++xfs_alloc_read_agf_29893 xfs_alloc_read_agf 0 29893 NULL ++lov_ost_pool_extend_29914 lov_ost_pool_extend 2 29914 NULL ++write_file_queue_29922 write_file_queue 3 29922 NULL ++__btrfs_getxattr_29947 __btrfs_getxattr 0 29947 NULL nohasharray ++ipv6_recv_error_29947 ipv6_recv_error 3 29947 &__btrfs_getxattr_29947 ++dev_mem_write_30028 dev_mem_write 3 30028 NULL ++alloc_netdev_mqs_30030 alloc_netdev_mqs 1 30030 NULL ++sysfs_add_file_mode_ns_30038 sysfs_add_file_mode_ns 0 30038 NULL ++scsi_vpd_inquiry_30040 scsi_vpd_inquiry 4 30040 NULL ++drp_wmove_30043 drp_wmove 4 30043 NULL ++mem_cgroup_charge_common_30047 mem_cgroup_charge_common 0 30047 NULL ++__pci_request_selected_regions_30058 __pci_request_selected_regions 0 30058 NULL ++cxgbi_ddp_reserve_30091 cxgbi_ddp_reserve 4 30091 NULL ++snd_midi_channel_init_set_30092 snd_midi_channel_init_set 1 30092 NULL ++rx_filter_data_filter_read_30098 rx_filter_data_filter_read 3 30098 NULL ++defragment_dma_buffer_30113 defragment_dma_buffer 0 30113 NULL ++xfs_iget_cache_miss_30115 xfs_iget_cache_miss 0 30115 NULL ++spi_async_locked_30117 spi_async_locked 0 30117 NULL ++recv_stream_30138 recv_stream 4 30138 NULL ++u_memcpya_30139 u_memcpya 3-2 30139 NULL ++dbg_port_buf_30145 dbg_port_buf 2 30145 NULL ++elfcorehdr_read_30159 elfcorehdr_read 2 30159 NULL ++alloc_switch_ctx_30165 alloc_switch_ctx 2 30165 NULL ++expand_inode_data_30169 expand_inode_data 3-2 30169 NULL ++mempool_create_page_pool_30189 mempool_create_page_pool 1 30189 NULL ++drm_property_create_bitmask_30195 drm_property_create_bitmask 5 30195 NULL ++__genwqe_readq_30197 __genwqe_readq 0 30197 NULL ++usblp_ioctl_30203 usblp_ioctl 2 30203 NULL ++read_4k_modal_eeprom_30212 read_4k_modal_eeprom 3 30212 NULL ++bitmap_file_set_bit_30228 bitmap_file_set_bit 2 30228 NULL ++shmem_unuse_inode_30263 shmem_unuse_inode 0 30263 NULL ++rawv6_recvmsg_30265 rawv6_recvmsg 4 30265 NULL ++try_break_deleg_30271 try_break_deleg 0 30271 NULL nohasharray ++isr_pci_pm_read_30271 isr_pci_pm_read 3 30271 &try_break_deleg_30271 ++compat_readv_30273 compat_readv 3 30273 NULL ++skcipher_sendmsg_30290 skcipher_sendmsg 4 30290 NULL ++pipeline_sec_frag_swi_read_30294 pipeline_sec_frag_swi_read 3 30294 NULL ++tcp_sendmsg_30296 tcp_sendmsg 4 30296 NULL ++osc_contention_seconds_seq_write_30305 osc_contention_seconds_seq_write 3 30305 NULL ++ext4_acl_from_disk_30320 ext4_acl_from_disk 2 30320 NULL ++i8254_read_30330 i8254_read 0 30330 NULL ++resource_from_user_30341 resource_from_user 3 30341 NULL ++o2nm_this_node_30342 o2nm_this_node 0 30342 NULL ++gfs2_trans_begin_30359 gfs2_trans_begin 0 30359 NULL ++kstrtou32_from_user_30361 kstrtou32_from_user 2 30361 NULL ++pagecache_write_begin_30364 pagecache_write_begin 0 30364 NULL ++C_SYSC_readv_30369 C_SYSC_readv 3 30369 NULL ++blkdev_issue_zeroout_30392 blkdev_issue_zeroout 3 30392 NULL ++c4iw_init_resource_30393 c4iw_init_resource 2-3 30393 NULL ++get_kernel_pages_30397 get_kernel_pages 0 30397 NULL ++vb2_fop_write_30420 vb2_fop_write 3 30420 NULL ++tx_tx_template_prepared_read_30424 tx_tx_template_prepared_read 3 30424 NULL ++lstcon_session_info_30425 lstcon_session_info 6 30425 NULL ++enable_write_30456 enable_write 3 30456 NULL ++tx_tx_template_programmed_read_30461 tx_tx_template_programmed_read 3 30461 NULL ++urandom_read_30462 urandom_read 3 30462 NULL ++zoran_ioctl_30465 zoran_ioctl 2 30465 NULL ++i2c_ctrl_read_30467 i2c_ctrl_read 0 30467 NULL ++adu_write_30487 adu_write 3 30487 NULL ++dtim_interval_write_30489 dtim_interval_write 3 30489 NULL ++batadv_send_tt_request_30493 batadv_send_tt_request 5 30493 NULL ++__send_duplicate_bios_30498 __send_duplicate_bios 4 30498 NULL ++memblock_virt_alloc_node_30515 memblock_virt_alloc_node 1 30515 NULL ++dwc3_testmode_write_30516 dwc3_testmode_write 3 30516 NULL ++set_config_30526 set_config 0 30526 NULL nohasharray ++debug_debug2_read_30526 debug_debug2_read 3 30526 &set_config_30526 ++xfs_sb_version_hasftype_30559 xfs_sb_version_hasftype 0 30559 NULL ++disk_expand_part_tbl_30561 disk_expand_part_tbl 2 30561 NULL ++set_le_30581 set_le 4 30581 NULL ++blk_init_tags_30592 blk_init_tags 1 30592 NULL ++sgl_map_user_pages_30610 sgl_map_user_pages 2 30610 NULL ++macvtap_sendmsg_30629 macvtap_sendmsg 4 30629 NULL ++ieee80211_if_read_dot11MeshAwakeWindowDuration_30631 ieee80211_if_read_dot11MeshAwakeWindowDuration 3 30631 NULL ++compat_raw_setsockopt_30634 compat_raw_setsockopt 5 30634 NULL ++mlx5_ib_alloc_fast_reg_page_list_30638 mlx5_ib_alloc_fast_reg_page_list 2 30638 NULL ++SyS_listxattr_30647 SyS_listxattr 3 30647 NULL ++jffs2_flash_read_30667 jffs2_flash_read 0 30667 NULL ++ni_ai_fifo_read_30681 ni_ai_fifo_read 3 30681 NULL ++dccp_setsockopt_ccid_30701 dccp_setsockopt_ccid 4 30701 NULL nohasharray ++xfs_bmap_add_extent_unwritten_real_30701 xfs_bmap_add_extent_unwritten_real 0 30701 &dccp_setsockopt_ccid_30701 ++lbs_wrbbp_write_30712 lbs_wrbbp_write 3 30712 NULL ++lbs_debugfs_read_30721 lbs_debugfs_read 3 30721 NULL ++snd_nm256_playback_silence_30727 snd_nm256_playback_silence 4-3 30727 NULL ++snapshot_status_30744 snapshot_status 5 30744 NULL ++fuse_conn_limit_write_30777 fuse_conn_limit_write 3 30777 NULL ++__bio_alloc_30787 __bio_alloc 3 30787 NULL ++smk_read_doi_30813 smk_read_doi 3 30813 NULL ++xlog_grant_head_wait_30829 xlog_grant_head_wait 0 30829 NULL ++get_kobj_path_length_30831 get_kobj_path_length 0 30831 NULL ++ath10k_write_fw_dbglog_30835 ath10k_write_fw_dbglog 3 30835 NULL ++sctp_setsockopt_auth_chunk_30843 sctp_setsockopt_auth_chunk 3 30843 NULL ++wd_autoreset_write_30862 wd_autoreset_write 3 30862 NULL ++ieee80211_if_fmt_dropped_frames_no_route_30884 ieee80211_if_fmt_dropped_frames_no_route 3 30884 NULL ++pn_recvmsg_30887 pn_recvmsg 4 30887 NULL ++usnic_debugfs_buildinfo_read_30928 usnic_debugfs_buildinfo_read 3 30928 NULL ++sctp_setsockopt_rtoinfo_30941 sctp_setsockopt_rtoinfo 3 30941 NULL ++tty_insert_flip_string_flags_30969 tty_insert_flip_string_flags 4 30969 NULL ++huge_page_mask_30981 huge_page_mask 0 30981 NULL ++read_file_bt_ant_diversity_30983 read_file_bt_ant_diversity 3 30983 NULL ++lbs_host_sleep_read_31013 lbs_host_sleep_read 3 31013 NULL ++ima_eventsig_init_31022 ima_eventsig_init 5 31022 NULL ++template_fmt_size_31033 template_fmt_size 0 31033 NULL ++do_setup_msi_irqs_31043 do_setup_msi_irqs 2 31043 NULL ++stride_pg_count_31053 stride_pg_count 0-3-2-1-4-5 31053 NULL ++lbs_failcount_read_31063 lbs_failcount_read 3 31063 NULL ++sctp_setsockopt_context_31091 sctp_setsockopt_context 3 31091 NULL ++proc_gid_map_write_31093 proc_gid_map_write 3 31093 NULL ++compat_sys_get_mempolicy_31109 compat_sys_get_mempolicy 3 31109 NULL ++depth_read_31112 depth_read 3 31112 NULL ++hash_ipportnet6_expire_31118 hash_ipportnet6_expire 4 31118 NULL ++kimage_normal_alloc_31140 kimage_normal_alloc 3 31140 NULL ++size_inside_page_31141 size_inside_page 0 31141 NULL ++w9966_v4l_read_31148 w9966_v4l_read 3 31148 NULL ++ch_do_scsi_31171 ch_do_scsi 4 31171 NULL ++r592_read_fifo_pio_31198 r592_read_fifo_pio 3 31198 NULL ++mtdchar_readoob_31200 mtdchar_readoob 4 31200 NULL ++__btrfs_free_reserved_extent_31207 __btrfs_free_reserved_extent 2 31207 NULL ++cpumask_weight_31215 cpumask_weight 0 31215 NULL ++__read_reg_31216 __read_reg 0 31216 NULL ++atm_get_addr_31221 atm_get_addr 3 31221 NULL ++tcp_recvmsg_31238 tcp_recvmsg 4 31238 NULL ++cyy_readb_31240 cyy_readb 0 31240 NULL ++_create_sg_bios_31244 _create_sg_bios 4 31244 NULL ++ieee80211_if_read_last_beacon_31257 ieee80211_if_read_last_beacon 3 31257 NULL ++hash_netportnet4_expire_31290 hash_netportnet4_expire 4 31290 NULL ++uvc_simplify_fraction_31303 uvc_simplify_fraction 3 31303 NULL ++sisusbcon_scroll_31315 sisusbcon_scroll 5-2-3 31315 NULL ++command_file_write_31318 command_file_write 3 31318 NULL ++hwerr_crcbits_31334 hwerr_crcbits 4 31334 NULL ++radix_tree_insert_31336 radix_tree_insert 0 31336 NULL ++em28xx_init_usb_xfer_31337 em28xx_init_usb_xfer 4-6 31337 NULL ++outlen_write_31358 outlen_write 3 31358 NULL ++ieee80211_rx_mgmt_auth_31366 ieee80211_rx_mgmt_auth 3 31366 NULL ++xprt_rdma_allocate_31372 xprt_rdma_allocate 2 31372 NULL ++vb2_vmalloc_get_userptr_31374 vb2_vmalloc_get_userptr 3-2 31374 NULL ++trace_parser_get_init_31379 trace_parser_get_init 2 31379 NULL ++inb_31388 inb 0 31388 NULL ++key_ifindex_read_31411 key_ifindex_read 3 31411 NULL ++_sp2d_max_pg_31422 _sp2d_max_pg 0 31422 NULL ++TSS_checkhmac1_31429 TSS_checkhmac1 5 31429 NULL ++snd_aw2_saa7146_get_hw_ptr_capture_31431 snd_aw2_saa7146_get_hw_ptr_capture 0 31431 NULL ++transport_alloc_session_tags_31449 transport_alloc_session_tags 2-3 31449 NULL ++opera1_xilinx_rw_31453 opera1_xilinx_rw 5 31453 NULL ++xfs_btree_get_numrecs_31477 xfs_btree_get_numrecs 0 31477 NULL ++alg_setkey_31485 alg_setkey 3 31485 NULL ++rds_message_map_pages_31487 rds_message_map_pages 2 31487 NULL ++qsfp_2_read_31491 qsfp_2_read 3 31491 NULL ++__alloc_bootmem_31498 __alloc_bootmem 1 31498 NULL ++hidraw_write_31536 hidraw_write 3 31536 NULL ++usbvision_read_31555 usbvision_read 3 31555 NULL ++tx_frag_tkip_called_read_31575 tx_frag_tkip_called_read 3 31575 NULL ++get_max_inline_xattr_value_size_31578 get_max_inline_xattr_value_size 0 31578 NULL ++osst_write_31581 osst_write 3 31581 NULL ++snd_compr_get_avail_31584 snd_compr_get_avail 0 31584 NULL ++iwl_dbgfs_ucode_tx_stats_read_31611 iwl_dbgfs_ucode_tx_stats_read 3 31611 NULL ++mtd_get_user_prot_info_31616 mtd_get_user_prot_info 0 31616 NULL ++arvo_sysfs_read_31617 arvo_sysfs_read 6 31617 NULL nohasharray ++memblock_virt_alloc_nopanic_31617 memblock_virt_alloc_nopanic 1 31617 &arvo_sysfs_read_31617 ++usnic_ib_dump_vf_31623 usnic_ib_dump_vf 3 31623 NULL ++videobuf_read_one_31637 videobuf_read_one 3 31637 NULL ++pod_alloc_sysex_buffer_31651 pod_alloc_sysex_buffer 3 31651 NULL ++xfer_secondary_pool_31661 xfer_secondary_pool 2 31661 NULL ++__lgread_31668 __lgread 4 31668 NULL ++copy_from_user_nmi_31672 copy_from_user_nmi 3-0 31672 NULL ++forced_ps_read_31685 forced_ps_read 3 31685 NULL ++fst_recover_rx_error_31687 fst_recover_rx_error 3 31687 NULL ++gfs2_dir_check_31711 gfs2_dir_check 0 31711 NULL ++rs_pretty_print_rate_31727 rs_pretty_print_rate 0 31727 NULL ++utf16s_to_utf8s_31735 utf16s_to_utf8s 0 31735 NULL nohasharray ++lu_buf_check_and_grow_31735 lu_buf_check_and_grow 2 31735 &utf16s_to_utf8s_31735 ++shmem_pwrite_slow_31741 shmem_pwrite_slow 3-2 31741 NULL ++input_abs_get_max_31742 input_abs_get_max 0 31742 NULL nohasharray ++NCR_700_change_queue_depth_31742 NCR_700_change_queue_depth 2 31742 &input_abs_get_max_31742 ++bcm_char_read_31750 bcm_char_read 3 31750 NULL ++snd_seq_device_new_31753 snd_seq_device_new 4 31753 NULL ++SyS_lsetxattr_31766 SyS_lsetxattr 4 31766 NULL ++osync_buffers_list_31789 osync_buffers_list 0 31789 NULL ++usblp_cache_device_id_string_31790 usblp_cache_device_id_string 0 31790 NULL ++ecryptfs_send_message_locked_31801 ecryptfs_send_message_locked 2 31801 NULL ++isr_rx_procs_read_31804 isr_rx_procs_read 3 31804 NULL ++data_write_31805 data_write 3 31805 NULL ++SyS_msgsnd_31814 SyS_msgsnd 3 31814 NULL ++strnlen_user_31815 strnlen_user 0-2 31815 NULL ++sta_last_signal_read_31818 sta_last_signal_read 3 31818 NULL ++clone_bio_31854 clone_bio 4-3 31854 NULL ++SyS_ppoll_31855 SyS_ppoll 2 31855 NULL ++foreach_descriptor_31873 foreach_descriptor 0 31873 NULL ++iwl_dbgfs_disable_ht40_write_31876 iwl_dbgfs_disable_ht40_write 3 31876 NULL ++drm_mode_crtc_set_gamma_size_31881 drm_mode_crtc_set_gamma_size 2 31881 NULL ++ddb_output_write_31902 ddb_output_write 3-0 31902 NULL ++xattr_permission_31907 xattr_permission 0 31907 NULL ++lu_buf_realloc_31915 lu_buf_realloc 2 31915 NULL ++new_dir_31919 new_dir 3 31919 NULL ++kmem_alloc_31920 kmem_alloc 1 31920 NULL ++SYSC_sethostname_31940 SYSC_sethostname 2 31940 NULL ++read_mem_31942 read_mem 3 31942 NULL nohasharray ++iov_iter_copy_from_user_31942 iov_iter_copy_from_user 4-0 31942 &read_mem_31942 ++vb2_write_31948 vb2_write 3 31948 NULL ++pvr2_ctrl_get_valname_31951 pvr2_ctrl_get_valname 4 31951 NULL ++regcache_rbtree_sync_31964 regcache_rbtree_sync 2 31964 NULL ++iwl_rx_packet_payload_len_31965 iwl_rx_packet_payload_len 0 31965 NULL ++copy_from_user_toio_31966 copy_from_user_toio 3 31966 NULL ++iblock_execute_rw_31982 iblock_execute_rw 3 31982 NULL nohasharray ++vx_read_status_31982 vx_read_status 0 31982 &iblock_execute_rw_31982 ++find_next_zero_bit_31990 find_next_zero_bit 0 31990 NULL ++lustre_acl_xattr_merge2posix_31992 lustre_acl_xattr_merge2posix 2 31992 NULL ++sysfs_create_file_31996 sysfs_create_file 0 31996 NULL ++calc_hmac_32010 calc_hmac 3 32010 NULL ++aead_len_32021 aead_len 0 32021 NULL ++stk_read_32038 stk_read 3 32038 NULL ++SYSC_llistxattr_32061 SYSC_llistxattr 3 32061 NULL ++proc_scsi_devinfo_write_32064 proc_scsi_devinfo_write 3 32064 NULL ++xfs_buf_iowait_32068 xfs_buf_iowait 0 32068 NULL ++cow_file_range_inline_32091 cow_file_range_inline 3 32091 NULL ++bio_alloc_32095 bio_alloc 2 32095 NULL ++ath6kl_fwlog_read_32101 ath6kl_fwlog_read 3 32101 NULL ++disk_status_32120 disk_status 4 32120 NULL ++kobject_add_internal_32133 kobject_add_internal 0 32133 NULL ++venus_link_32165 venus_link 5 32165 NULL ++do_writepages_32173 do_writepages 0 32173 NULL ++del_ptr_32197 del_ptr 4 32197 NULL ++wusb_ccm_mac_32199 wusb_ccm_mac 7 32199 NULL ++__mem_cgroup_try_charge_swapin_32204 __mem_cgroup_try_charge_swapin 0 32204 NULL ++riva_get_cmap_len_32218 riva_get_cmap_len 0 32218 NULL ++caif_seqpkt_recvmsg_32241 caif_seqpkt_recvmsg 4 32241 NULL ++lbs_lowrssi_read_32242 lbs_lowrssi_read 3 32242 NULL ++ocfs2_xattr_find_entry_32260 ocfs2_xattr_find_entry 0 32260 NULL ++kvm_set_spte_hva_32312 kvm_set_spte_hva 2 32312 NULL ++cas_calc_tabort_32316 cas_calc_tabort 0 32316 NULL ++SyS_select_32319 SyS_select 1 32319 NULL ++nouveau_bar_create__32332 nouveau_bar_create_ 4 32332 NULL ++nl80211_send_mlme_event_32337 nl80211_send_mlme_event 4 32337 NULL ++xfs_bmap_extsize_align_32338 xfs_bmap_extsize_align 0 32338 NULL ++t4_alloc_mem_32342 t4_alloc_mem 1 32342 NULL ++dispatch_ioctl_32357 dispatch_ioctl 2 32357 NULL nohasharray ++rx_streaming_always_write_32357 rx_streaming_always_write 3 32357 &dispatch_ioctl_32357 ++ReadHDLCPCI_32362 ReadHDLCPCI 0 32362 NULL nohasharray ++sel_read_initcon_32362 sel_read_initcon 3 32362 &ReadHDLCPCI_32362 ++ocfs2_cancel_convert_32392 ocfs2_cancel_convert 0 32392 NULL ++ll_setxattr_common_32398 ll_setxattr_common 4 32398 NULL ++xfs_iext_add_indirect_multi_32400 xfs_iext_add_indirect_multi 3 32400 NULL ++vmci_qp_alloc_32405 vmci_qp_alloc 5-3 32405 NULL ++xfs_alloc_put_freelist_32437 xfs_alloc_put_freelist 0 32437 NULL ++cache_status_32462 cache_status 5 32462 NULL ++fill_readbuf_32464 fill_readbuf 3 32464 NULL ++dgap_usertoboard_32490 dgap_usertoboard 4 32490 NULL ++ide_driver_proc_write_32493 ide_driver_proc_write 3 32493 NULL ++bypass_pwoff_write_32499 bypass_pwoff_write 3 32499 NULL ++mdc_pinger_recov_seq_write_32510 mdc_pinger_recov_seq_write 3 32510 NULL ++ctrl_std_val_to_sym_32516 ctrl_std_val_to_sym 5 32516 NULL ++disconnect_32521 disconnect 4 32521 NULL ++qsfp_read_32522 qsfp_read 0-2-4 32522 NULL ++ocfs2_refresh_qinfo_32524 ocfs2_refresh_qinfo 0 32524 NULL ++ilo_read_32531 ilo_read 3 32531 NULL ++ieee80211_if_read_estab_plinks_32533 ieee80211_if_read_estab_plinks 3 32533 NULL ++format_devstat_counter_32550 format_devstat_counter 3 32550 NULL ++aes_encrypt_fail_read_32562 aes_encrypt_fail_read 3 32562 NULL ++osc_iocontrol_32565 osc_iocontrol 3 32565 NULL ++mem_swapout_entry_32586 mem_swapout_entry 3 32586 NULL ++pipeline_tcp_tx_stat_fifo_int_read_32589 pipeline_tcp_tx_stat_fifo_int_read 3 32589 NULL ++read_file_beacon_32595 read_file_beacon 3 32595 NULL ++ieee80211_if_read_dropped_frames_congestion_32603 ieee80211_if_read_dropped_frames_congestion 3 32603 NULL ++xfs_bmap_last_offset_32614 xfs_bmap_last_offset 0 32614 NULL ++irda_recvmsg_dgram_32631 irda_recvmsg_dgram 4 32631 NULL ++cfg80211_roamed_32632 cfg80211_roamed 5-7 32632 NULL ++kvmalloc_32646 kvmalloc 1 32646 NULL ++ib_sg_dma_len_32649 ib_sg_dma_len 0 32649 NULL ++generic_readlink_32654 generic_readlink 3 32654 NULL ++move_addr_to_kernel_32673 move_addr_to_kernel 2 32673 NULL ++apei_res_add_32674 apei_res_add 0 32674 NULL ++jfs_readpages_32702 jfs_readpages 4 32702 NULL ++xfs_filestream_new_ag_32711 xfs_filestream_new_ag 0 32711 NULL ++rt2x00debug_read_queue_dump_32712 rt2x00debug_read_queue_dump 3 32712 NULL ++xfs_bmbt_update_32713 xfs_bmbt_update 0 32713 NULL ++i40e_pci_sriov_enable_32742 i40e_pci_sriov_enable 2 32742 NULL ++megasas_change_queue_depth_32747 megasas_change_queue_depth 2 32747 NULL ++stats_read_ul_32751 stats_read_ul 3 32751 NULL ++vmci_transport_dgram_dequeue_32775 vmci_transport_dgram_dequeue 4 32775 NULL ++sctp_tsnmap_grow_32784 sctp_tsnmap_grow 2 32784 NULL ++xfs_trans_read_buf_32795 xfs_trans_read_buf 0 32795 NULL ++rproc_name_read_32805 rproc_name_read 3 32805 NULL ++sta_tx_latency_stat_read_32862 sta_tx_latency_stat_read 3 32862 NULL ++new_tape_buffer_32866 new_tape_buffer 2 32866 NULL ++cifs_writedata_alloc_32880 cifs_writedata_alloc 1 32880 NULL nohasharray ++ath6kl_usb_submit_ctrl_in_32880 ath6kl_usb_submit_ctrl_in 6 32880 &cifs_writedata_alloc_32880 ++vp702x_usb_inout_cmd_32884 vp702x_usb_inout_cmd 4-6 32884 NULL ++il_dbgfs_tx_stats_read_32913 il_dbgfs_tx_stats_read 3 32913 NULL ++zlib_inflate_workspacesize_32927 zlib_inflate_workspacesize 0 32927 NULL ++rmap_recycle_32938 rmap_recycle 3 32938 NULL ++xfs_log_reserve_32959 xfs_log_reserve 0 32959 NULL ++compat_filldir_32999 compat_filldir 3 32999 NULL ++SyS_syslog_33007 SyS_syslog 3 33007 NULL ++br_multicast_set_hash_max_33012 br_multicast_set_hash_max 2 33012 NULL ++write_file_bt_ant_diversity_33019 write_file_bt_ant_diversity 3 33019 NULL ++mic_virtio_copy_to_user_33048 mic_virtio_copy_to_user 3 33048 NULL ++SYSC_lgetxattr_33049 SYSC_lgetxattr 4 33049 NULL ++pipeline_dec_packet_in_fifo_full_read_33052 pipeline_dec_packet_in_fifo_full_read 3 33052 NULL ++ebt_compat_match_offset_33053 ebt_compat_match_offset 0-2 33053 NULL ++bitmap_resize_33054 bitmap_resize 2 33054 NULL ++stats_dot11RTSSuccessCount_read_33065 stats_dot11RTSSuccessCount_read 3 33065 NULL ++sel_read_checkreqprot_33068 sel_read_checkreqprot 3 33068 NULL ++alloc_tio_33077 alloc_tio 3 33077 NULL ++acl_permission_check_33083 acl_permission_check 0 33083 NULL ++fb_sys_write_33130 fb_sys_write 3 33130 NULL ++notify_change_33143 notify_change 0 33143 NULL ++SyS_poll_33152 SyS_poll 2 33152 NULL ++_pci_add_cap_save_buffer_33153 _pci_add_cap_save_buffer 4 33153 NULL ++debug_debug6_read_33168 debug_debug6_read 3 33168 NULL ++dataflash_read_fact_otp_33204 dataflash_read_fact_otp 3-2 33204 NULL ++pp_read_33210 pp_read 3 33210 NULL ++xfs_file_aio_write_33234 xfs_file_aio_write 4 33234 NULL ++snd_pcm_plug_client_size_33267 snd_pcm_plug_client_size 0-2 33267 NULL ++cachefiles_cook_key_33274 cachefiles_cook_key 2 33274 NULL ++sync_pt_create_33282 sync_pt_create 2 33282 NULL ++mcs7830_get_reg_33308 mcs7830_get_reg 3 33308 NULL ++isku_sysfs_read_keys_easyzone_33318 isku_sysfs_read_keys_easyzone 6 33318 NULL ++vx_send_irq_dsp_33329 vx_send_irq_dsp 0 33329 NULL ++joydev_ioctl_33343 joydev_ioctl 2 33343 NULL ++lov_stripesize_seq_write_33353 lov_stripesize_seq_write 3 33353 NULL ++create_xattr_datum_33356 create_xattr_datum 5 33356 NULL nohasharray ++irq_pkt_threshold_read_33356 irq_pkt_threshold_read 3 33356 &create_xattr_datum_33356 ++read_file_regidx_33370 read_file_regidx 3 33370 NULL ++ieee80211_if_read_dropped_frames_no_route_33383 ieee80211_if_read_dropped_frames_no_route 3 33383 NULL ++scsi_varlen_cdb_length_33385 scsi_varlen_cdb_length 0 33385 NULL ++ocfs2_allocate_unwritten_extents_33394 ocfs2_allocate_unwritten_extents 3-2 33394 NULL ++cfs_trace_copyin_string_33396 cfs_trace_copyin_string 4 33396 NULL ++snd_pcm_capture_ioctl1_33408 snd_pcm_capture_ioctl1 0 33408 NULL ++filemap_fdatawrite_33415 filemap_fdatawrite 0 33415 NULL ++hash_netiface6_expire_33421 hash_netiface6_expire 4 33421 NULL ++dis_tap_write_33426 dis_tap_write 3 33426 NULL ++message_stats_list_33440 message_stats_list 5 33440 NULL ++ovs_vport_alloc_33475 ovs_vport_alloc 1 33475 NULL ++create_entry_33479 create_entry 2 33479 NULL ++ip_setsockopt_33487 ip_setsockopt 5 33487 NULL ++res_counter_read_33499 res_counter_read 4 33499 NULL ++hash_netnet4_expire_33500 hash_netnet4_expire 4 33500 NULL ++fb_read_33506 fb_read 3 33506 NULL ++musb_test_mode_write_33518 musb_test_mode_write 3 33518 NULL ++ahash_setkey_unaligned_33521 ahash_setkey_unaligned 3 33521 NULL ++nes_alloc_fast_reg_page_list_33523 nes_alloc_fast_reg_page_list 2 33523 NULL ++aggr_size_rx_size_read_33526 aggr_size_rx_size_read 3 33526 NULL ++tomoyo_read_self_33539 tomoyo_read_self 3 33539 NULL nohasharray ++osc_max_rpcs_in_flight_seq_write_33539 osc_max_rpcs_in_flight_seq_write 3 33539 &tomoyo_read_self_33539 ++count_subheaders_33591 count_subheaders 0 33591 NULL ++scsi_execute_33596 scsi_execute 5 33596 NULL ++comedi_buf_write_n_allocated_33604 comedi_buf_write_n_allocated 0 33604 NULL ++xt_compat_target_offset_33608 xt_compat_target_offset 0 33608 NULL ++usb_gstrings_attach_33615 usb_gstrings_attach 3 33615 NULL nohasharray ++il_dbgfs_qos_read_33615 il_dbgfs_qos_read 3 33615 &usb_gstrings_attach_33615 ++xfs_btree_check_sblock_33618 xfs_btree_check_sblock 0 33618 NULL ++stride_page_count_33641 stride_page_count 2 33641 NULL ++irq_blk_threshold_read_33666 irq_blk_threshold_read 3 33666 NULL ++inw_p_33668 inw_p 0 33668 NULL ++arp_hdr_len_33671 arp_hdr_len 0 33671 NULL ++i2c_hid_alloc_buffers_33673 i2c_hid_alloc_buffers 2 33673 NULL ++submit_one_bio_33683 submit_one_bio 0 33683 NULL ++nv50_disp_dmac_create__33696 nv50_disp_dmac_create_ 6 33696 NULL ++netlink_sendmsg_33708 netlink_sendmsg 4 33708 NULL ++tipc_link_stats_33716 tipc_link_stats 3 33716 NULL ++ext4_wb_update_i_disksize_33717 ext4_wb_update_i_disksize 2 33717 NULL ++pvr2_stream_buffer_count_33719 pvr2_stream_buffer_count 2 33719 NULL ++write_file_spectral_count_33723 write_file_spectral_count 3 33723 NULL ++read_file_node_recv_33729 read_file_node_recv 3 33729 NULL ++__mutex_lock_interruptible_slowpath_33735 __mutex_lock_interruptible_slowpath 0 33735 NULL ++vifs_state_read_33762 vifs_state_read 3 33762 NULL ++hashtab_create_33769 hashtab_create 3 33769 NULL ++if_sdio_read_rx_len_33800 if_sdio_read_rx_len 0 33800 NULL ++filter_write_33819 filter_write 3 33819 NULL ++sep_create_msgarea_context_33829 sep_create_msgarea_context 4 33829 NULL ++scrub_setup_recheck_block_33831 scrub_setup_recheck_block 5-4 33831 NULL ++ext4_journal_extend_33835 ext4_journal_extend 2 33835 NULL ++oz_cdev_write_33852 oz_cdev_write 3 33852 NULL ++get_user_pages_33908 get_user_pages 0 33908 NULL ++sg_nents_33909 sg_nents 0 33909 NULL ++ath6kl_roam_mode_write_33912 ath6kl_roam_mode_write 3 33912 NULL ++queue_logical_block_size_33918 queue_logical_block_size 0 33918 NULL ++sel_read_avc_cache_threshold_33942 sel_read_avc_cache_threshold 3 33942 NULL ++lpfc_idiag_ctlacc_read_33943 lpfc_idiag_ctlacc_read 3 33943 NULL ++read_file_tgt_rx_stats_33944 read_file_tgt_rx_stats 3 33944 NULL ++__proc_dump_kernel_33954 __proc_dump_kernel 5 33954 NULL ++btrfs_delalloc_reserve_metadata_33963 btrfs_delalloc_reserve_metadata 0 33963 NULL ++vga_switcheroo_debugfs_write_33984 vga_switcheroo_debugfs_write 3 33984 NULL ++lbs_lowrssi_write_34025 lbs_lowrssi_write 3 34025 NULL ++ppp_write_34034 ppp_write 3 34034 NULL ++tty_insert_flip_string_34042 tty_insert_flip_string 3-0 34042 NULL ++memcg_update_all_caches_34068 memcg_update_all_caches 1 34068 NULL ++xfs_dialloc_34078 xfs_dialloc 0 34078 NULL ++pipeline_pipeline_fifo_full_read_34095 pipeline_pipeline_fifo_full_read 3 34095 NULL ++__irq_domain_add_34101 __irq_domain_add 2 34101 NULL ++proc_scsi_host_write_34107 proc_scsi_host_write 3 34107 NULL ++islpci_mgt_transmit_34133 islpci_mgt_transmit 5 34133 NULL ++ttm_dma_page_pool_free_34135 ttm_dma_page_pool_free 2-0 34135 NULL ++ixgbe_dbg_netdev_ops_write_34141 ixgbe_dbg_netdev_ops_write 3 34141 NULL ++shmem_pread_fast_34147 shmem_pread_fast 3 34147 NULL ++ocfs2_xattr_list_entry_34165 ocfs2_xattr_list_entry 0 34165 NULL ++skb_to_sgvec_34171 skb_to_sgvec 0 34171 NULL ++xfs_bmapi_write_34208 xfs_bmapi_write 0 34208 NULL ++ext4_da_write_begin_34215 ext4_da_write_begin 3-4 34215 NULL ++bl_pipe_downcall_34264 bl_pipe_downcall 3 34264 NULL ++ocfs2_dlm_lock_34265 ocfs2_dlm_lock 0 34265 NULL ++device_private_init_34279 device_private_init 0 34279 NULL ++ext4_get_groups_count_34324 ext4_get_groups_count 0 34324 NULL ++pcpu_need_to_extend_34326 pcpu_need_to_extend 0 34326 NULL nohasharray ++iov_iter_single_seg_count_34326 iov_iter_single_seg_count 0 34326 &pcpu_need_to_extend_34326 ++crypto_ablkcipher_ivsize_34363 crypto_ablkcipher_ivsize 0 34363 NULL nohasharray ++sync_page_io_34363 sync_page_io 3 34363 &crypto_ablkcipher_ivsize_34363 ++rngapi_reset_34366 rngapi_reset 3 34366 NULL ++ea_read_34378 ea_read 0 34378 NULL ++fuse_send_read_34379 fuse_send_read 4 34379 NULL ++av7110_vbi_write_34384 av7110_vbi_write 3 34384 NULL ++usbvision_v4l2_read_34386 usbvision_v4l2_read 3 34386 NULL ++read_rbu_image_type_34387 read_rbu_image_type 6 34387 NULL ++iwl_calib_set_34400 iwl_calib_set 3 34400 NULL nohasharray ++ivtv_read_pos_34400 ivtv_read_pos 3 34400 &iwl_calib_set_34400 ++wd_exp_mode_write_34407 wd_exp_mode_write 3 34407 NULL ++nl80211_send_disassoc_34424 nl80211_send_disassoc 4 34424 NULL ++__extent_read_full_page_34437 __extent_read_full_page 0 34437 NULL ++usbtest_alloc_urb_34446 usbtest_alloc_urb 3-5 34446 NULL ++mwifiex_regrdwr_read_34472 mwifiex_regrdwr_read 3 34472 NULL ++skcipher_sndbuf_34476 skcipher_sndbuf 0 34476 NULL ++i2o_parm_field_get_34477 i2o_parm_field_get 5 34477 NULL ++security_inode_permission_34488 security_inode_permission 0 34488 NULL ++SyS_pwritev_34494 SyS_pwritev 3 34494 NULL ++qp_alloc_res_34496 qp_alloc_res 5 34496 NULL ++lu_buf_check_and_alloc_34505 lu_buf_check_and_alloc 2 34505 NULL ++ext4_fallocate_34537 ext4_fallocate 4-3 34537 NULL nohasharray ++tracing_stats_read_34537 tracing_stats_read 3 34537 &ext4_fallocate_34537 ++hugetlbfs_read_actor_34547 hugetlbfs_read_actor 2-5-4-0 34547 NULL ++dbBackSplit_34561 dbBackSplit 0 34561 NULL ++alloc_ieee80211_rsl_34564 alloc_ieee80211_rsl 1 34564 NULL ++lov_stripecount_seq_write_34582 lov_stripecount_seq_write 3 34582 NULL ++init_send_hfcd_34586 init_send_hfcd 1 34586 NULL ++inet6_ifla6_size_34591 inet6_ifla6_size 0 34591 NULL ++ceph_msgpool_init_34599 ceph_msgpool_init 4 34599 NULL nohasharray ++cw1200_queue_init_34599 cw1200_queue_init 4 34599 &ceph_msgpool_init_34599 ++bio_integrity_bytes_34602 bio_integrity_bytes 2 34602 NULL ++__jffs2_ref_totlen_34609 __jffs2_ref_totlen 0 34609 NULL ++apei_get_nvs_resources_34616 apei_get_nvs_resources 0 34616 NULL ++__cfg80211_disconnected_34622 __cfg80211_disconnected 3 34622 NULL ++cnic_alloc_dma_34641 cnic_alloc_dma 3 34641 NULL ++kvm_set_spte_hva_34671 kvm_set_spte_hva 2 34671 NULL ++sleep_auth_write_34676 sleep_auth_write 3 34676 NULL ++isr_fiqs_read_34687 isr_fiqs_read 3 34687 NULL ++batadv_tvlv_realloc_packet_buff_34688 batadv_tvlv_realloc_packet_buff 3-4 34688 NULL ++port_print_34704 port_print 3 34704 NULL ++ieee80211_if_read_num_sta_ps_34722 ieee80211_if_read_num_sta_ps 3 34722 NULL ++platform_list_read_file_34734 platform_list_read_file 3 34734 NULL ++reg_w_ixbuf_34736 reg_w_ixbuf 4 34736 NULL ++lsm_alloc_plain_34755 lsm_alloc_plain 1 34755 NULL ++bootmode_store_34762 bootmode_store 4 34762 NULL ++device_add_34766 device_add 0 34766 NULL ++xfs_iget_cache_hit_34767 xfs_iget_cache_hit 0 34767 NULL ++qib_cdev_init_34778 qib_cdev_init 1 34778 NULL ++SYSC_keyctl_34800 SYSC_keyctl 4 34800 NULL ++can_nocow_extent_34801 can_nocow_extent 2 34801 NULL ++drbd_get_max_capacity_34804 drbd_get_max_capacity 0 34804 NULL ++ll_setxattr_34806 ll_setxattr 4 34806 NULL ++file_page_index_34820 file_page_index 0-2 34820 NULL ++bio_segments_34832 bio_segments 0 34832 NULL ++b43_debugfs_write_34838 b43_debugfs_write 3 34838 NULL ++nl_portid_hash_zalloc_34843 nl_portid_hash_zalloc 1 34843 NULL ++acpi_system_write_wakeup_device_34853 acpi_system_write_wakeup_device 3 34853 NULL ++usb_serial_generic_prepare_write_buffer_34857 usb_serial_generic_prepare_write_buffer 3 34857 NULL ++ieee80211_if_read_txpower_34871 ieee80211_if_read_txpower 3 34871 NULL ++msg_print_text_34889 msg_print_text 0 34889 NULL ++ieee80211_if_write_34894 ieee80211_if_write 3 34894 NULL ++gfs2_glock_wait_34913 gfs2_glock_wait 0 34913 NULL ++si476x_radio_read_rsq_primary_blob_34916 si476x_radio_read_rsq_primary_blob 3 34916 NULL ++btrfs_super_chunk_root_34925 btrfs_super_chunk_root 0 34925 NULL nohasharray ++__inode_permission_34925 __inode_permission 0 34925 &btrfs_super_chunk_root_34925 ++ceph_aio_write_34930 ceph_aio_write 4 34930 NULL ++sec_flags2str_34933 sec_flags2str 3 34933 NULL ++snd_info_entry_read_34938 snd_info_entry_read 3 34938 NULL ++i2c_transfer_34958 i2c_transfer 0 34958 NULL ++do_add_page_to_bio_34974 do_add_page_to_bio 2-10 34974 NULL ++print_message_35000 print_message 0 35000 NULL ++rx_rx_hdr_overflow_read_35002 rx_rx_hdr_overflow_read 3 35002 NULL ++l2cap_skbuff_fromiovec_35003 l2cap_skbuff_fromiovec 4-3 35003 NULL ++sisusb_copy_memory_35016 sisusb_copy_memory 4 35016 NULL ++coda_psdev_read_35029 coda_psdev_read 3 35029 NULL ++xfs_rtallocate_extent_35052 xfs_rtallocate_extent 0 35052 NULL ++pwr_connection_out_of_sync_read_35061 pwr_connection_out_of_sync_read 3 35061 NULL ++ntfs_attr_extend_initialized_35084 ntfs_attr_extend_initialized 0 35084 NULL ++__kfifo_uint_must_check_helper_35097 __kfifo_uint_must_check_helper 0-1 35097 NULL ++capi_write_35104 capi_write 3 35104 NULL nohasharray ++tx_tx_done_template_read_35104 tx_tx_done_template_read 3 35104 &capi_write_35104 ++ide_settings_proc_write_35110 ide_settings_proc_write 3 35110 NULL ++ceph_osdc_start_request_35122 ceph_osdc_start_request 0 35122 NULL ++message_stats_print_35158 message_stats_print 6 35158 NULL ++iscsi_conn_setup_35159 iscsi_conn_setup 2 35159 NULL ++ieee80211_if_read_bssid_35161 ieee80211_if_read_bssid 3 35161 NULL ++unix_stream_recvmsg_35210 unix_stream_recvmsg 4 35210 NULL ++striped_read_35218 striped_read 0-2 35218 NULL nohasharray ++security_key_getsecurity_35218 security_key_getsecurity 0 35218 &striped_read_35218 ++rx_rx_cmplt_task_read_35226 rx_rx_cmplt_task_read 3 35226 NULL ++set_fd_set_35249 set_fd_set 1 35249 NULL ++ioapic_setup_resources_35255 ioapic_setup_resources 1 35255 NULL ++jbd2_journal_get_write_access_35263 jbd2_journal_get_write_access 0 35263 NULL ++dis_disc_write_35265 dis_disc_write 3 35265 NULL ++dma_show_regs_35266 dma_show_regs 3 35266 NULL ++irda_recvmsg_stream_35280 irda_recvmsg_stream 4 35280 NULL ++i2o_block_end_request_35282 i2o_block_end_request 3 35282 NULL ++isr_rx_rdys_read_35283 isr_rx_rdys_read 3 35283 NULL ++__btrfs_buffered_write_35311 __btrfs_buffered_write 3-0 35311 NULL nohasharray ++brcmf_sdio_forensic_read_35311 brcmf_sdio_forensic_read 3 35311 &__btrfs_buffered_write_35311 ++tracing_read_pipe_35312 tracing_read_pipe 3 35312 NULL ++sta_tx_latency_stat_write_35323 sta_tx_latency_stat_write 3 35323 NULL ++xfs_btree_check_lblock_35333 xfs_btree_check_lblock 0 35333 NULL ++ieee80211_if_fmt_ap_power_level_35347 ieee80211_if_fmt_ap_power_level 3 35347 NULL ++nouveau_devinit_create__35348 nouveau_devinit_create_ 4 35348 NULL ++ieee80211_rx_mgmt_deauth_35351 ieee80211_rx_mgmt_deauth 3 35351 NULL ++compat_filldir64_35354 compat_filldir64 3 35354 NULL ++read_kmem_35372 read_kmem 3 35372 NULL ++SyS_getxattr_35408 SyS_getxattr 4 35408 NULL ++rawv6_send_hdrinc_35425 rawv6_send_hdrinc 3 35425 NULL ++buffer_to_user_35439 buffer_to_user 3 35439 NULL ++efx_mcdi_rpc_async_quiet_35460 efx_mcdi_rpc_async_quiet 4-5 35460 NULL ++macvtap_do_read_35475 macvtap_do_read 3 35475 NULL ++fiemap_prepare_and_copy_exts_35494 fiemap_prepare_and_copy_exts 5 35494 NULL ++btrfs_prealloc_file_range_trans_35500 btrfs_prealloc_file_range_trans 4 35500 NULL ++async_setkey_35521 async_setkey 3 35521 NULL ++__filemap_fdatawrite_range_35528 __filemap_fdatawrite_range 0 35528 NULL ++iwl_dbgfs_bt_traffic_read_35534 iwl_dbgfs_bt_traffic_read 3 35534 NULL ++pstore_mkfile_35536 pstore_mkfile 7 35536 NULL ++rxpipe_tx_xfr_host_int_trig_rx_data_read_35538 rxpipe_tx_xfr_host_int_trig_rx_data_read 3 35538 NULL ++ocfs2_write_zero_page_35539 ocfs2_write_zero_page 3 35539 NULL ++ibnl_put_attr_35541 ibnl_put_attr 3 35541 NULL ++ieee80211_if_write_smps_35550 ieee80211_if_write_smps 3 35550 NULL ++ext4_blocks_for_truncate_35579 ext4_blocks_for_truncate 0 35579 NULL ++ext2_acl_from_disk_35580 ext2_acl_from_disk 2 35580 NULL ++spk_msg_set_35586 spk_msg_set 3 35586 NULL ++kernel_readv_35617 kernel_readv 3 35617 NULL ++reiserfs_readpages_35629 reiserfs_readpages 4 35629 NULL ++pci_request_regions_35635 pci_request_regions 0 35635 NULL ++ptlrpcd_steal_rqset_35637 ptlrpcd_steal_rqset 0 35637 NULL ++spi_register_board_info_35651 spi_register_board_info 2 35651 NULL ++rdmaltWithLock_35669 rdmaltWithLock 0 35669 NULL ++SYSC_pwritev_35690 SYSC_pwritev 3 35690 NULL ++rds_page_copy_user_35691 rds_page_copy_user 4 35691 NULL ++md_super_write_35703 md_super_write 4 35703 NULL ++iwl_dbgfs_disable_ht40_read_35761 iwl_dbgfs_disable_ht40_read 3 35761 NULL ++udf_alloc_i_data_35786 udf_alloc_i_data 2 35786 NULL ++pvr2_hdw_cpufw_get_35824 pvr2_hdw_cpufw_get 0-4-2 35824 NULL ++tx_tx_cmplt_read_35854 tx_tx_cmplt_read 3 35854 NULL ++vx_query_hbuffer_size_35859 vx_query_hbuffer_size 0 35859 NULL ++mthca_buf_alloc_35861 mthca_buf_alloc 2 35861 NULL ++fls64_35862 fls64 0 35862 NULL ++kvm_dirty_bitmap_bytes_35886 kvm_dirty_bitmap_bytes 0 35886 NULL ++ieee80211_if_fmt_dot11MeshRetryTimeout_35890 ieee80211_if_fmt_dot11MeshRetryTimeout 3 35890 NULL ++uwb_rc_cmd_done_35892 uwb_rc_cmd_done 4 35892 NULL ++SyS_set_mempolicy_35909 SyS_set_mempolicy 3 35909 NULL ++kernel_setsockopt_35913 kernel_setsockopt 5 35913 NULL ++rbio_nr_pages_35916 rbio_nr_pages 0-1-2 35916 NULL ++sctp_tsnmap_mark_35929 sctp_tsnmap_mark 2 35929 NULL ++rx_defrag_init_called_read_35935 rx_defrag_init_called_read 3 35935 NULL ++put_cmsg_compat_35937 put_cmsg_compat 4 35937 NULL ++ext_rts51x_sd_execute_write_data_35971 ext_rts51x_sd_execute_write_data 9 35971 NULL ++ceph_buffer_new_35974 ceph_buffer_new 1 35974 NULL nohasharray ++generic_ocp_read_35974 generic_ocp_read 3 35974 &ceph_buffer_new_35974 ++acl_alloc_35979 acl_alloc 1 35979 NULL ++device_add_class_symlinks_35985 device_add_class_symlinks 0 35985 NULL ++write_file_antenna_35998 write_file_antenna 3 35998 NULL nohasharray ++kuc_alloc_35998 kuc_alloc 1 35998 &write_file_antenna_35998 ++il3945_ucode_tx_stats_read_36016 il3945_ucode_tx_stats_read 3 36016 NULL ++__videobuf_alloc_36031 __videobuf_alloc 1 36031 NULL ++account_shadowed_36048 account_shadowed 2 36048 NULL ++gpio_power_read_36059 gpio_power_read 3 36059 NULL ++write_emulate_36065 write_emulate 2-4 36065 NULL ++stack_max_size_write_36068 stack_max_size_write 3 36068 NULL ++radeon_vm_num_pdes_36070 radeon_vm_num_pdes 0 36070 NULL ++ieee80211_if_fmt_peer_36071 ieee80211_if_fmt_peer 3 36071 NULL ++ieee80211_if_write_tsf_36077 ieee80211_if_write_tsf 3 36077 NULL ++snd_pcm_plug_read_transfer_36080 snd_pcm_plug_read_transfer 0-3 36080 NULL ++mtip_hw_read_device_status_36082 mtip_hw_read_device_status 3 36082 NULL ++vga_arb_write_36112 vga_arb_write 3 36112 NULL ++simple_xattr_alloc_36118 simple_xattr_alloc 2 36118 NULL ++ext3_readpages_36144 ext3_readpages 4 36144 NULL ++twl_set_36154 twl_set 2 36154 NULL ++b1_alloc_card_36155 b1_alloc_card 1 36155 NULL ++snd_korg1212_copy_from_36169 snd_korg1212_copy_from 6 36169 NULL ++SyS_kexec_load_36176 SyS_kexec_load 2 36176 NULL ++ramoops_init_przs_36199 ramoops_init_przs 4 36199 NULL ++SYSC_sched_getaffinity_36208 SYSC_sched_getaffinity 2 36208 NULL ++SYSC_process_vm_readv_36216 SYSC_process_vm_readv 3-5 36216 NULL ++atomic_stats_read_36228 atomic_stats_read 3 36228 NULL ++viafb_iga1_odev_proc_write_36241 viafb_iga1_odev_proc_write 3 36241 NULL ++SYSC_getxattr_36242 SYSC_getxattr 4 36242 NULL ++rproc_recovery_read_36245 rproc_recovery_read 3 36245 NULL ++scrub_stripe_36248 scrub_stripe 5-4 36248 NULL ++compat_sys_mbind_36256 compat_sys_mbind 5 36256 NULL ++usb_buffer_alloc_36276 usb_buffer_alloc 2 36276 NULL nohasharray ++cfs_hash_buckets_realloc_36276 cfs_hash_buckets_realloc 4 36276 &usb_buffer_alloc_36276 ++codec_reg_read_file_36280 codec_reg_read_file 3 36280 NULL ++crypto_shash_digestsize_36284 crypto_shash_digestsize 0 36284 NULL ++nouveau_cli_create_36293 nouveau_cli_create 3 36293 NULL ++lpfc_debugfs_dif_err_read_36303 lpfc_debugfs_dif_err_read 3 36303 NULL ++cfg80211_rx_mlme_mgmt_36306 cfg80211_rx_mlme_mgmt 3 36306 NULL ++ad7879_spi_xfer_36311 ad7879_spi_xfer 3 36311 NULL ++fat_compat_ioctl_filldir_36328 fat_compat_ioctl_filldir 3 36328 NULL ++lc_create_36332 lc_create 4 36332 NULL ++jbd2_journal_init_revoke_table_36336 jbd2_journal_init_revoke_table 1 36336 NULL ++isku_sysfs_read_key_mask_36343 isku_sysfs_read_key_mask 6 36343 NULL ++ath6kl_regwrite_write_36351 ath6kl_regwrite_write 3 36351 NULL ++v9fs_file_readn_36353 v9fs_file_readn 4 36353 NULL ++to_sector_36361 to_sector 0-1 36361 NULL ++tunables_read_36385 tunables_read 3 36385 NULL ++afs_alloc_flat_call_36399 afs_alloc_flat_call 2-3 36399 NULL ++sierra_write_36402 sierra_write 4 36402 NULL ++qdsb_get_36409 qdsb_get 0 36409 NULL ++SyS_sethostname_36417 SyS_sethostname 2 36417 NULL ++ReadW6692B_36445 ReadW6692B 0 36445 NULL ++sctp_tsnmap_init_36446 sctp_tsnmap_init 2 36446 NULL ++alloc_etherdev_mqs_36450 alloc_etherdev_mqs 1 36450 NULL ++SyS_process_vm_writev_36476 SyS_process_vm_writev 3-5 36476 NULL ++lock_and_cleanup_extent_if_need_36480 lock_and_cleanup_extent_if_need 0 36480 NULL ++b43_nphy_load_samples_36481 b43_nphy_load_samples 3 36481 NULL ++ip6_append_data_36490 ip6_append_data 4 36490 NULL nohasharray ++tx_tx_checksum_result_read_36490 tx_tx_checksum_result_read 3 36490 &ip6_append_data_36490 ++cmd_loop_36491 cmd_loop 0 36491 NULL ++__hwahc_op_set_ptk_36510 __hwahc_op_set_ptk 5 36510 NULL ++mcam_v4l_read_36513 mcam_v4l_read 3 36513 NULL ++_iwl_dbgfs_fw_nmi_write_36515 _iwl_dbgfs_fw_nmi_write 3 36515 NULL ++get_param_l_36518 get_param_l 0 36518 NULL ++ieee80211_if_read_fwded_frames_36520 ieee80211_if_read_fwded_frames 3 36520 NULL ++crypto_aead_authsize_36537 crypto_aead_authsize 0 36537 NULL ++cpu_type_read_36540 cpu_type_read 3 36540 NULL ++__kfifo_to_user_36555 __kfifo_to_user 3-0 36555 NULL ++btrfs_get_token_64_36572 btrfs_get_token_64 0 36572 NULL ++__erst_read_36579 __erst_read 0 36579 NULL ++put_cmsg_36589 put_cmsg 4 36589 NULL ++fat_ioctl_filldir_36621 fat_ioctl_filldir 3 36621 NULL ++vxge_config_vpaths_36636 vxge_config_vpaths 0 36636 NULL ++convert_extent_item_v0_36645 convert_extent_item_v0 4 36645 NULL ++ced_ioctl_36647 ced_ioctl 2 36647 NULL ++lpfc_idiag_extacc_alloc_get_36648 lpfc_idiag_extacc_alloc_get 0-3 36648 NULL ++osd_req_list_collection_objects_36664 osd_req_list_collection_objects 5 36664 NULL ++iscsi_host_alloc_36671 iscsi_host_alloc 2 36671 NULL ++xillybus_read_36678 xillybus_read 3 36678 NULL ++gsmtty_write_36702 gsmtty_write 3 36702 NULL ++snd_rawmidi_kernel_read1_36740 snd_rawmidi_kernel_read1 4-0 36740 NULL ++cxgbi_device_register_36746 cxgbi_device_register 1-2 36746 NULL ++ps_poll_upsd_timeouts_read_36755 ps_poll_upsd_timeouts_read 3 36755 NULL ++ptp_filter_init_36780 ptp_filter_init 2 36780 NULL ++i40e_init_lan_hmc_36796 i40e_init_lan_hmc 5-4-3-2 36796 NULL ++proc_fault_inject_read_36802 proc_fault_inject_read 3 36802 NULL ++hiddev_ioctl_36816 hiddev_ioctl 2 36816 NULL ++int_hardware_entry_36833 int_hardware_entry 3 36833 NULL ++fc_change_queue_depth_36841 fc_change_queue_depth 2 36841 NULL ++keyctl_describe_key_36853 keyctl_describe_key 3 36853 NULL ++cm_write_36858 cm_write 3 36858 NULL ++tx_tx_data_programmed_read_36871 tx_tx_data_programmed_read 3 36871 NULL ++svc_setsockopt_36876 svc_setsockopt 5 36876 NULL ++raid56_parity_write_36877 raid56_parity_write 5 36877 NULL ++__btrfs_map_block_36883 __btrfs_map_block 3 36883 NULL ++ib_ucm_alloc_data_36885 ib_ucm_alloc_data 3 36885 NULL ++selinux_inode_notifysecctx_36896 selinux_inode_notifysecctx 3 36896 NULL ++OS_kmalloc_36909 OS_kmalloc 1 36909 NULL ++crypto_blkcipher_ivsize_36944 crypto_blkcipher_ivsize 0 36944 NULL ++il4965_rs_sta_dbgfs_scale_table_write_36979 il4965_rs_sta_dbgfs_scale_table_write 3 36979 NULL ++xfs_btree_check_sptr_36984 xfs_btree_check_sptr 0 36984 NULL ++drbd_new_dev_size_36998 drbd_new_dev_size 0-3 36998 NULL ++auok190xfb_write_37001 auok190xfb_write 3 37001 NULL ++setxattr_37006 setxattr 4 37006 NULL ++ocfs2_dlm_unlock_37037 ocfs2_dlm_unlock 0 37037 NULL ++command_file_read_37038 command_file_read 3 37038 NULL ++figure_loop_size_37051 figure_loop_size 2-3 37051 NULL ++ieee80211_if_read_drop_unencrypted_37053 ieee80211_if_read_drop_unencrypted 3 37053 NULL nohasharray ++qp_broker_create_37053 qp_broker_create 6-5 37053 &ieee80211_if_read_drop_unencrypted_37053 ++SYSC_setxattr_37078 SYSC_setxattr 4 37078 NULL ++parse_command_37079 parse_command 2 37079 NULL ++pipeline_cs_rx_packet_in_read_37089 pipeline_cs_rx_packet_in_read 3 37089 NULL ++tun_get_user_37094 tun_get_user 5 37094 NULL ++xlog_grant_head_check_37116 xlog_grant_head_check 0 37116 NULL ++has_wrprotected_page_37123 has_wrprotected_page 3-2 37123 NULL ++snd_hda_get_conn_list_37132 snd_hda_get_conn_list 0 37132 NULL ++mtt_free_res_37144 mtt_free_res 5 37144 NULL ++msg_word_37164 msg_word 0 37164 NULL ++f2fs_direct_IO_37167 f2fs_direct_IO 4 37167 NULL ++vcc_recvmsg_37198 vcc_recvmsg 4 37198 NULL ++forced_ps_write_37209 forced_ps_write 3 37209 NULL ++crypto_shash_descsize_37212 crypto_shash_descsize 0 37212 NULL nohasharray ++ext4_ind_direct_IO_37212 ext4_ind_direct_IO 0-4 37212 &crypto_shash_descsize_37212 ++bchannel_get_rxbuf_37213 bchannel_get_rxbuf 2-0 37213 NULL ++regmap_access_read_file_37223 regmap_access_read_file 3 37223 NULL ++__do_replace_37227 __do_replace 5 37227 NULL ++iwl_dbgfs_d3_sram_read_37237 iwl_dbgfs_d3_sram_read 3 37237 NULL ++rx_filter_dup_filter_read_37238 rx_filter_dup_filter_read 3 37238 NULL ++xfs_reclaim_inode_37257 xfs_reclaim_inode 0 37257 NULL ++exofs_max_io_pages_37263 exofs_max_io_pages 0-2 37263 NULL ++_iwl_dbgfs_fw_restart_write_37270 _iwl_dbgfs_fw_restart_write 3 37270 NULL ++ieee80211_if_read_power_mode_37305 ieee80211_if_read_power_mode 3 37305 NULL ++ext3_direct_IO_37308 ext3_direct_IO 4 37308 NULL ++jffs2_write_dirent_37311 jffs2_write_dirent 5 37311 NULL ++send_msg_37323 send_msg 4 37323 NULL ++l2cap_create_connless_pdu_37327 l2cap_create_connless_pdu 3 37327 NULL nohasharray ++bnx2x_vf_fill_fw_str_37327 bnx2x_vf_fill_fw_str 3 37327 &l2cap_create_connless_pdu_37327 ++scsi_mode_select_37330 scsi_mode_select 6 37330 NULL ++rxrpc_server_sendmsg_37331 rxrpc_server_sendmsg 4 37331 NULL ++xfs_iomap_write_allocate_37336 xfs_iomap_write_allocate 0 37336 NULL ++security_inode_getsecurity_37354 security_inode_getsecurity 0 37354 NULL ++hci_sock_sendmsg_37420 hci_sock_sendmsg 4 37420 NULL ++acpi_os_allocate_zeroed_37422 acpi_os_allocate_zeroed 1 37422 NULL ++tty_insert_flip_string_fixed_flag_37428 tty_insert_flip_string_fixed_flag 4-0 37428 NULL ++iwl_print_last_event_logs_37433 iwl_print_last_event_logs 0-7-9 37433 NULL ++fru_alloc_37442 fru_alloc 1 37442 NULL ++tcp_established_options_37450 tcp_established_options 0 37450 NULL nohasharray ++tipc_send2port_37450 tipc_send2port 4 37450 &tcp_established_options_37450 ++xfs_btree_dec_cursor_37452 xfs_btree_dec_cursor 0 37452 NULL ++brcmf_sdio_dump_console_37455 brcmf_sdio_dump_console 4 37455 NULL ++get_est_timing_37484 get_est_timing 0 37484 NULL ++kmem_realloc_37489 kmem_realloc 2 37489 NULL ++bitmap_dirty_bits_37503 bitmap_dirty_bits 2 37503 NULL ++osc_active_seq_write_37514 osc_active_seq_write 3 37514 NULL ++bdev_writeseg_37519 bdev_writeseg 2-3 37519 NULL ++xz_dec_test_write_37527 xz_dec_test_write 3 37527 NULL ++fault_inject_read_37534 fault_inject_read 3 37534 NULL ++hdr_size_37536 hdr_size 0 37536 NULL ++extent_map_end_37550 extent_map_end 0 37550 NULL ++sep_create_dcb_dmatables_context_37551 sep_create_dcb_dmatables_context 6 37551 NULL ++fat_cont_expand_37552 fat_cont_expand 0 37552 NULL ++ioat_chansts_37558 ioat_chansts 0 37558 NULL ++xhci_alloc_streams_37586 xhci_alloc_streams 5 37586 NULL ++qla2x00_debounce_register_37597 qla2x00_debounce_register 0 37597 NULL ++kvm_read_guest_page_mmu_37611 kvm_read_guest_page_mmu 6 37611 NULL ++SYSC_mbind_37622 SYSC_mbind 5 37622 NULL ++SyS_mbind_37638 SyS_mbind 5 37638 NULL ++may_delete_37656 may_delete 0 37656 NULL ++bio_copy_user_iov_37660 bio_copy_user_iov 4 37660 NULL ++rfcomm_sock_sendmsg_37661 rfcomm_sock_sendmsg 4 37661 NULL nohasharray ++vmw_framebuffer_dmabuf_dirty_37661 vmw_framebuffer_dmabuf_dirty 6 37661 &rfcomm_sock_sendmsg_37661 ++SYSC_get_mempolicy_37664 SYSC_get_mempolicy 3 37664 NULL ++__wa_seg_calculate_isoc_frame_count_37672 __wa_seg_calculate_isoc_frame_count 0 37672 NULL ++ieee80211_if_read_rc_rateidx_mcs_mask_2ghz_37675 ieee80211_if_read_rc_rateidx_mcs_mask_2ghz 3 37675 NULL ++regmap_map_read_file_37685 regmap_map_read_file 3 37685 NULL ++nametbl_header_37698 nametbl_header 2-0 37698 NULL ++__le32_to_cpup_37702 __le32_to_cpup 0 37702 NULL ++dynamic_ps_timeout_write_37713 dynamic_ps_timeout_write 3 37713 NULL ++read_enabled_file_bool_37744 read_enabled_file_bool 3 37744 NULL ++xfs_read_agf_37749 xfs_read_agf 0 37749 NULL ++ocfs2_control_cfu_37750 ocfs2_control_cfu 2 37750 NULL ++ipath_cdev_init_37752 ipath_cdev_init 1 37752 NULL ++dccp_setsockopt_cscov_37766 dccp_setsockopt_cscov 2 37766 NULL ++il4965_rs_sta_dbgfs_rate_scale_data_read_37792 il4965_rs_sta_dbgfs_rate_scale_data_read 3 37792 NULL ++smk_read_logging_37804 smk_read_logging 3 37804 NULL ++ocrdma_alloc_frmr_page_list_37815 ocrdma_alloc_frmr_page_list 2 37815 NULL ++rx_decrypt_key_not_found_read_37820 rx_decrypt_key_not_found_read 3 37820 NULL ++android_get_p2p_addr_37832 android_get_p2p_addr 0 37832 NULL ++jbd2_journal_get_undo_access_37837 jbd2_journal_get_undo_access 0 37837 NULL ++o2hb_debug_read_37851 o2hb_debug_read 3 37851 NULL ++xfs_dir2_block_to_sf_37868 xfs_dir2_block_to_sf 3 37868 NULL ++set_registers_37883 set_registers 4 37883 NULL ++btrfs_stack_file_extent_disk_bytenr_37888 btrfs_stack_file_extent_disk_bytenr 0 37888 NULL ++pkt_alloc_packet_data_37928 pkt_alloc_packet_data 1 37928 NULL nohasharray ++_rtw_malloc_37928 _rtw_malloc 1 37928 &pkt_alloc_packet_data_37928 ++read_rbu_packet_size_37939 read_rbu_packet_size 6 37939 NULL ++write_file_bool_37957 write_file_bool 3 37957 NULL ++fifo_alloc_37961 fifo_alloc 1 37961 NULL ++rds_rdma_extra_size_37990 rds_rdma_extra_size 0 37990 NULL ++persistent_ram_old_size_37997 persistent_ram_old_size 0 37997 NULL ++vfs_readv_38011 vfs_readv 3 38011 NULL ++aggr_recv_addba_req_evt_38037 aggr_recv_addba_req_evt 4 38037 NULL ++il_dbgfs_chain_noise_read_38044 il_dbgfs_chain_noise_read 3 38044 NULL nohasharray ++klsi_105_prepare_write_buffer_38044 klsi_105_prepare_write_buffer 3 38044 &il_dbgfs_chain_noise_read_38044 ++SyS_llistxattr_38048 SyS_llistxattr 3 38048 NULL ++sysfs_do_create_link_38051 sysfs_do_create_link 0 38051 NULL ++_xfs_buf_alloc_38058 _xfs_buf_alloc 3 38058 NULL ++nsm_create_handle_38060 nsm_create_handle 4 38060 NULL ++alloc_ltalkdev_38071 alloc_ltalkdev 1 38071 NULL ++xfs_buf_readahead_map_38081 xfs_buf_readahead_map 3 38081 NULL nohasharray ++wcn36xx_smd_rsp_process_38081 wcn36xx_smd_rsp_process 3 38081 &xfs_buf_readahead_map_38081 ++uwb_mac_addr_print_38085 uwb_mac_addr_print 2 38085 NULL ++request_key_auth_new_38092 request_key_auth_new 3 38092 NULL ++proc_self_readlink_38094 proc_self_readlink 3 38094 NULL ++ep0_read_38095 ep0_read 3 38095 NULL ++sk_wmem_schedule_38096 sk_wmem_schedule 2 38096 NULL nohasharray ++osc_checksum_seq_write_38096 osc_checksum_seq_write 3 38096 &sk_wmem_schedule_38096 ++o2hb_read_slots_38105 o2hb_read_slots 2 38105 NULL ++snd_pcm_oss_write_38108 snd_pcm_oss_write 3 38108 NULL ++vmw_kms_present_38130 vmw_kms_present 9 38130 NULL ++__ntfs_copy_from_user_iovec_inatomic_38153 __ntfs_copy_from_user_iovec_inatomic 0-4-3 38153 NULL ++btrfs_extent_same_38163 btrfs_extent_same 3-2 38163 NULL ++kvm_clear_guest_38164 kvm_clear_guest 3-2 38164 NULL ++cdev_add_38176 cdev_add 2-3 38176 NULL ++rt2x00debug_write_rf_38195 rt2x00debug_write_rf 3 38195 NULL ++get_ucode_user_38202 get_ucode_user 3 38202 NULL ++osd_req_list_partition_collections_38223 osd_req_list_partition_collections 5 38223 NULL nohasharray ++xfs_rtallocate_range_38223 xfs_rtallocate_range 0 38223 &osd_req_list_partition_collections_38223 ++ceph_decode_16_38239 ceph_decode_16 0 38239 NULL ++_ipw_read_reg32_38245 _ipw_read_reg32 0 38245 NULL ++xfs_qm_dqrepair_38262 xfs_qm_dqrepair 0 38262 NULL ++mthca_alloc_icm_table_38268 mthca_alloc_icm_table 4-3 38268 NULL nohasharray ++ieee80211_if_read_auto_open_plinks_38268 ieee80211_if_read_auto_open_plinks 3 38268 &mthca_alloc_icm_table_38268 ++xfs_bmbt_to_bmdr_38275 xfs_bmbt_to_bmdr 3 38275 NULL nohasharray ++xfs_bmdr_to_bmbt_38275 xfs_bmdr_to_bmbt 5 38275 &xfs_bmbt_to_bmdr_38275 ++ftdi_process_packet_38281 ftdi_process_packet 4 38281 NULL ++ucma_query_path_38305 ucma_query_path 3 38305 NULL ++isr_rx_headers_read_38325 isr_rx_headers_read 3 38325 NULL ++ida_simple_get_38326 ida_simple_get 0 38326 NULL ++__snd_gf1_look8_38333 __snd_gf1_look8 0 38333 NULL ++btrfs_file_extent_disk_num_bytes_38363 btrfs_file_extent_disk_num_bytes 0 38363 NULL ++dn_sendmsg_38390 dn_sendmsg 4 38390 NULL ++ieee80211_if_read_dtim_count_38419 ieee80211_if_read_dtim_count 3 38419 NULL ++pmcraid_copy_sglist_38431 pmcraid_copy_sglist 3 38431 NULL ++kvm_write_guest_38454 kvm_write_guest 4-2 38454 NULL ++_iwl_dbgfs_scan_ant_rxchain_write_38479 _iwl_dbgfs_scan_ant_rxchain_write 3 38479 NULL ++blk_end_bidi_request_38482 blk_end_bidi_request 3-4 38482 NULL ++dev_names_read_38509 dev_names_read 3 38509 NULL ++iscsi_create_iface_38510 iscsi_create_iface 5 38510 NULL ++event_rx_mismatch_read_38518 event_rx_mismatch_read 3 38518 NULL ++ubifs_idx_node_sz_38546 ubifs_idx_node_sz 0-2 38546 NULL ++btrfs_discard_extent_38547 btrfs_discard_extent 2 38547 NULL ++kuc_len_38557 kuc_len 0-1 38557 NULL ++irda_sendmsg_dgram_38563 irda_sendmsg_dgram 4 38563 NULL ++il4965_rs_sta_dbgfs_scale_table_read_38564 il4965_rs_sta_dbgfs_scale_table_read 3 38564 NULL ++_ipw_read32_38565 _ipw_read32 0 38565 NULL ++snd_nm256_playback_copy_38567 snd_nm256_playback_copy 5-3 38567 NULL ++copy_ctl_value_to_user_38587 copy_ctl_value_to_user 4 38587 NULL ++rd_allocate_sgl_table_38607 rd_allocate_sgl_table 3 38607 NULL ++icn_writecmd_38629 icn_writecmd 2 38629 NULL ++write_enabled_file_bool_38630 write_enabled_file_bool 3 38630 NULL ++ext2_readpages_38640 ext2_readpages 4 38640 NULL ++audit_init_entry_38644 audit_init_entry 1 38644 NULL ++qp_broker_alloc_38646 qp_broker_alloc 6-5 38646 NULL ++mmc_send_cxd_data_38655 mmc_send_cxd_data 5 38655 NULL ++nouveau_instmem_create__38664 nouveau_instmem_create_ 4 38664 NULL ++snd_es1371_wait_src_ready_38673 snd_es1371_wait_src_ready 0 38673 NULL ++iscsit_dump_data_payload_38683 iscsit_dump_data_payload 2 38683 NULL ++rbio_add_io_page_38700 rbio_add_io_page 6 38700 NULL ++w83977af_sir_interrupt_38738 w83977af_sir_interrupt 0 38738 NULL ++udf_readpages_38761 udf_readpages 4 38761 NULL ++iwl_dbgfs_thermal_throttling_read_38779 iwl_dbgfs_thermal_throttling_read 3 38779 NULL ++bcache_device_init_38781 bcache_device_init 3 38781 NULL ++snd_gus_dram_write_38784 snd_gus_dram_write 4 38784 NULL ++do_pci_enable_device_38802 do_pci_enable_device 0 38802 NULL ++err_decode_38804 err_decode 2 38804 NULL ++ipv6_renew_option_38813 ipv6_renew_option 3 38813 NULL ++direct_entry_38836 direct_entry 3 38836 NULL ++compat_udp_setsockopt_38840 compat_udp_setsockopt 5 38840 NULL ++read_nic_io_word_38853 read_nic_io_word 0 38853 NULL ++interfaces_38859 interfaces 2 38859 NULL ++dbgfs_state_38894 dbgfs_state 3 38894 NULL ++il_dbgfs_sram_write_38942 il_dbgfs_sram_write 3 38942 NULL ++__ath6kl_wmi_send_mgmt_cmd_38971 __ath6kl_wmi_send_mgmt_cmd 7 38971 NULL ++usb_maxpacket_38977 usb_maxpacket 0 38977 NULL nohasharray ++C_SYSC_preadv64_38977 C_SYSC_preadv64 3 38977 &usb_maxpacket_38977 ++OSDSetBlock_38986 OSDSetBlock 2-4 38986 NULL ++bio_clone_range_38997 bio_clone_range 2 38997 NULL ++lpfc_idiag_extacc_write_38998 lpfc_idiag_extacc_write 3 38998 NULL ++get_nodes_39012 get_nodes 3 39012 NULL ++twl6030_interrupt_unmask_39013 twl6030_interrupt_unmask 2 39013 NULL ++__blkdev_issue_zeroout_39020 __blkdev_issue_zeroout 3 39020 NULL ++_zd_iowrite32v_async_locked_39034 _zd_iowrite32v_async_locked 3 39034 NULL ++do_write_kmem_39051 do_write_kmem 0-1-3 39051 NULL ++ReadHFC_39104 ReadHFC 0 39104 NULL ++tomoyo_truncate_39105 tomoyo_truncate 0 39105 NULL ++__kfifo_to_user_r_39123 __kfifo_to_user_r 5-3 39123 NULL ++ea_foreach_39133 ea_foreach 0 39133 NULL ++generic_permission_39150 generic_permission 0 39150 NULL ++proc_coredump_filter_read_39153 proc_coredump_filter_read 3 39153 NULL ++ath9k_hw_ar9003_dump_eeprom_39156 ath9k_hw_ar9003_dump_eeprom 5-4 39156 NULL ++echo_client_kbrw_39170 echo_client_kbrw 6 39170 NULL ++ext3_xattr_check_names_39174 ext3_xattr_check_names 0 39174 NULL ++ubi_more_update_data_39189 ubi_more_update_data 4 39189 NULL ++qcam_read_bytes_39205 qcam_read_bytes 0 39205 NULL ++ivtv_v4l2_write_39226 ivtv_v4l2_write 3 39226 NULL ++posix_acl_to_xattr_39237 posix_acl_to_xattr 0 39237 NULL ++snd_pcm_capture_forward_39248 snd_pcm_capture_forward 2 39248 NULL ++r128_compat_ioctl_39250 r128_compat_ioctl 2 39250 NULL nohasharray ++pwr_cont_miss_bcns_spread_read_39250 pwr_cont_miss_bcns_spread_read 3 39250 &r128_compat_ioctl_39250 ++i915_error_state_read_39254 i915_error_state_read 3 39254 NULL ++rx_filter_protection_filter_read_39282 rx_filter_protection_filter_read 3 39282 NULL ++_iwl_dbgfs_pm_params_write_39325 _iwl_dbgfs_pm_params_write 3 39325 NULL ++__cfg80211_connect_result_39326 __cfg80211_connect_result 4-6 39326 NULL ++insert_reserved_file_extent_39327 insert_reserved_file_extent 3 39327 NULL ++wimax_msg_alloc_39343 wimax_msg_alloc 4 39343 NULL ++ide_complete_rq_39354 ide_complete_rq 3 39354 NULL ++do_write_log_from_user_39362 do_write_log_from_user 3-0 39362 NULL ++vortex_wtdma_getlinearpos_39371 vortex_wtdma_getlinearpos 0 39371 NULL ++regmap_name_read_file_39379 regmap_name_read_file 3 39379 NULL ++fnic_trace_debugfs_read_39380 fnic_trace_debugfs_read 3 39380 NULL ++ps_poll_ps_poll_utilization_read_39383 ps_poll_ps_poll_utilization_read 3 39383 NULL ++__send_to_port_39386 __send_to_port 3 39386 NULL ++xfs_btree_dup_cursor_39394 xfs_btree_dup_cursor 0 39394 NULL ++gfs2_internal_read_39413 gfs2_internal_read 0 39413 NULL ++user_power_read_39414 user_power_read 3 39414 NULL ++alloc_agpphysmem_i8xx_39427 alloc_agpphysmem_i8xx 1 39427 NULL ++mic_desc_size_39464 mic_desc_size 0 39464 NULL ++apei_resources_add_39470 apei_resources_add 0 39470 NULL ++setkey_unaligned_39474 setkey_unaligned 3 39474 NULL ++ieee80211_if_fmt_dot11MeshHWMPmaxPREQretries_39499 ieee80211_if_fmt_dot11MeshHWMPmaxPREQretries 3 39499 NULL ++cl_req_alloc_39523 cl_req_alloc 4 39523 NULL ++int_proc_write_39542 int_proc_write 3 39542 NULL ++pp_write_39554 pp_write 3 39554 NULL ++datablob_format_39571 datablob_format 2 39571 NULL nohasharray ++ieee80211_if_read_fwded_mcast_39571 ieee80211_if_read_fwded_mcast 3 39571 &datablob_format_39571 ++memblock_virt_alloc_internal_39600 memblock_virt_alloc_internal 1 39600 NULL ++ext_depth_39607 ext_depth 0 39607 NULL ++batadv_tt_tvlv_generate_39615 batadv_tt_tvlv_generate 4 39615 NULL ++nfs_idmap_get_key_39616 nfs_idmap_get_key 2 39616 NULL ++sdio_readb_39618 sdio_readb 0 39618 NULL ++prepare_uptodate_page_39622 prepare_uptodate_page 0 39622 NULL ++set_dev_class_39645 set_dev_class 4 39645 NULL ++snd_rme32_capture_copy_39653 snd_rme32_capture_copy 5 39653 NULL ++tcp_try_rmem_schedule_39657 tcp_try_rmem_schedule 3 39657 NULL ++kvm_read_guest_cached_39666 kvm_read_guest_cached 4 39666 NULL ++v4l_stk_read_39672 v4l_stk_read 3 39672 NULL ++hsc_msg_len_get_39673 hsc_msg_len_get 0 39673 NULL ++do_surface_dirty_sou_39678 do_surface_dirty_sou 7 39678 NULL ++sd_completed_bytes_39705 sd_completed_bytes 0 39705 NULL ++ftrace_pid_write_39710 ftrace_pid_write 3 39710 NULL ++adt7316_spi_multi_read_39765 adt7316_spi_multi_read 3 39765 NULL ++security_inode_listsecurity_39812 security_inode_listsecurity 0 39812 NULL ++snd_pcm_oss_writev3_39818 snd_pcm_oss_writev3 3 39818 NULL ++get_priv_size_39828 get_priv_size 0-1 39828 NULL ++pkt_add_39897 pkt_add 3 39897 NULL ++read_file_modal_eeprom_39909 read_file_modal_eeprom 3 39909 NULL ++gen_pool_add_virt_39913 gen_pool_add_virt 4 39913 NULL ++dw210x_op_rw_39915 dw210x_op_rw 6 39915 NULL ++__mnt_want_write_file_39917 __mnt_want_write_file 0 39917 NULL ++aes_encrypt_interrupt_read_39919 aes_encrypt_interrupt_read 3 39919 NULL ++exofs_read_kern_39921 exofs_read_kern 6 39921 NULL nohasharray ++oom_score_adj_read_39921 oom_score_adj_read 3 39921 &exofs_read_kern_39921 ++__spi_async_39932 __spi_async 0 39932 NULL ++__get_order_39935 __get_order 0 39935 NULL ++error_error_frame_read_39947 error_error_frame_read 3 39947 NULL ++tty_prepare_flip_string_39955 tty_prepare_flip_string 3-0 39955 NULL ++lstcon_group_list_39958 lstcon_group_list 2 39958 NULL ++bio_chain_clone_range_39967 bio_chain_clone_range 3 39967 NULL ++dma_push_rx_39973 dma_push_rx 2 39973 NULL ++broadsheetfb_write_39976 broadsheetfb_write 3 39976 NULL ++mthca_array_init_39987 mthca_array_init 2 39987 NULL ++fw_device_op_read_39990 fw_device_op_read 3 39990 NULL ++server_name2svname_39998 server_name2svname 4 39998 NULL ++xen_hvm_config_40018 xen_hvm_config 2 40018 NULL ++ivtvfb_write_40023 ivtvfb_write 3 40023 NULL ++disc_pwup_write_40027 disc_pwup_write 3 40027 NULL ++ea_foreach_i_40028 ea_foreach_i 0 40028 NULL ++datablob_hmac_append_40038 datablob_hmac_append 3 40038 NULL ++l2cap_create_iframe_pdu_40055 l2cap_create_iframe_pdu 3 40055 NULL nohasharray ++add_tty_40055 add_tty 1 40055 &l2cap_create_iframe_pdu_40055 ++atomic_xchg_40070 atomic_xchg 0 40070 NULL ++xfs_rtbuf_get_40107 xfs_rtbuf_get 0 40107 NULL ++sctp_setsockopt_delayed_ack_40129 sctp_setsockopt_delayed_ack 3 40129 NULL ++dwc2_max_desc_num_40132 dwc2_max_desc_num 0 40132 NULL ++rx_rx_frame_checksum_read_40140 rx_rx_frame_checksum_read 3 40140 NULL ++ath10k_write_simulate_fw_crash_40143 ath10k_write_simulate_fw_crash 3 40143 NULL ++iwch_alloc_fastreg_pbl_40153 iwch_alloc_fastreg_pbl 2 40153 NULL ++pt_write_40159 pt_write 3 40159 NULL ++scsi_sg_count_40182 scsi_sg_count 0 40182 NULL ++ipr_alloc_ucode_buffer_40199 ipr_alloc_ucode_buffer 1 40199 NULL ++allocate_probes_40204 allocate_probes 1 40204 NULL ++au0828_v4l2_read_40220 au0828_v4l2_read 3 40220 NULL ++compress_file_range_40225 compress_file_range 3-4 40225 NULL ++osst_read_40237 osst_read 3 40237 NULL ++lpage_info_slot_40243 lpage_info_slot 3-1 40243 NULL ++ocfs2_zero_extend_get_range_40248 ocfs2_zero_extend_get_range 4 40248 NULL ++rs_sta_dbgfs_scale_table_read_40262 rs_sta_dbgfs_scale_table_read 3 40262 NULL ++ext2_fiemap_40271 ext2_fiemap 4 40271 NULL ++usbnet_read_cmd_40275 usbnet_read_cmd 7 40275 NULL ++rx_xfr_hint_trig_read_40283 rx_xfr_hint_trig_read 3 40283 NULL ++SyS_bind_40303 SyS_bind 3 40303 NULL ++ib_get_mad_data_offset_40336 ib_get_mad_data_offset 0 40336 NULL ++mmio_read_40348 mmio_read 4 40348 NULL ++event_rx_mem_empty_read_40363 event_rx_mem_empty_read 3 40363 NULL ++ocfs2_check_range_for_refcount_40365 ocfs2_check_range_for_refcount 3-2 40365 NULL ++get_chars_40373 get_chars 3 40373 NULL ++fb_prepare_extra_logos_40429 fb_prepare_extra_logos 0-2 40429 NULL ++tomoyo_update_policy_40458 tomoyo_update_policy 2 40458 NULL ++zd_usb_scnprint_id_40459 zd_usb_scnprint_id 0-3 40459 NULL ++gp2ap020a00f_write_event_threshold_40461 gp2ap020a00f_write_event_threshold 2 40461 NULL ++SyS_writev_40467 SyS_writev 3 40467 NULL ++SyS_select_40473 SyS_select 1 40473 NULL ++afs_fs_store_data_40484 afs_fs_store_data 3-4-5-6 40484 NULL ++batadv_hash_new_40491 batadv_hash_new 1 40491 NULL ++devcgroup_inode_permission_40492 devcgroup_inode_permission 0 40492 NULL ++xfs_iread_extents_40510 xfs_iread_extents 0 40510 NULL ++__ethtool_get_sset_count_40511 __ethtool_get_sset_count 0 40511 NULL ++TSS_checkhmac2_40520 TSS_checkhmac2 5-7 40520 NULL ++ixgbe_dbg_reg_ops_read_40540 ixgbe_dbg_reg_ops_read 3 40540 NULL ++ima_write_policy_40548 ima_write_policy 3 40548 NULL ++esp_alloc_tmp_40558 esp_alloc_tmp 3-2 40558 NULL ++b1_get_byte_40597 b1_get_byte 0 40597 NULL ++get_priv_descr_and_size_40612 get_priv_descr_and_size 0 40612 NULL ++twl4030_kpwrite_u8_40665 twl4030_kpwrite_u8 3 40665 NULL ++__cfg80211_roamed_40668 __cfg80211_roamed 4-6 40668 NULL ++pipeline_rx_complete_stat_fifo_int_read_40671 pipeline_rx_complete_stat_fifo_int_read 3 40671 NULL ++fops_read_40672 fops_read 3 40672 NULL ++idr_get_empty_slot_40674 idr_get_empty_slot 0 40674 NULL ++alloc_rbio_40676 alloc_rbio 4 40676 NULL ++videobuf_dma_init_user_locked_40678 videobuf_dma_init_user_locked 4-3 40678 NULL ++pci_enable_resources_40680 pci_enable_resources 0 40680 NULL ++nfc_hci_set_param_40697 nfc_hci_set_param 5 40697 NULL ++__seq_open_private_40715 __seq_open_private 3 40715 NULL ++set_extent_bit_40719 set_extent_bit 0 40719 NULL ++fuse_readpages_40737 fuse_readpages 4 40737 NULL ++xfs_iext_remove_direct_40744 xfs_iext_remove_direct 3 40744 NULL ++security_inode_listxattr_40752 security_inode_listxattr 0 40752 NULL ++card_send_command_40757 card_send_command 3 40757 NULL ++ad1889_readl_40765 ad1889_readl 0 40765 NULL ++pg_write_40766 pg_write 3 40766 NULL ++kernfs_fop_read_40770 kernfs_fop_read 3 40770 NULL ++show_list_40775 show_list 3-0 40775 NULL ++kfifo_out_copy_r_40784 kfifo_out_copy_r 3-0 40784 NULL ++bitmap_weight_40791 bitmap_weight 0-2 40791 NULL ++pyra_sysfs_read_40795 pyra_sysfs_read 6 40795 NULL ++add_action_40811 add_action 4 40811 NULL ++nl80211_send_roamed_40825 nl80211_send_roamed 5-7 40825 NULL ++SyS_mbind_40828 SyS_mbind 5 40828 NULL ++nilfs_mdt_init_40849 nilfs_mdt_init 3 40849 NULL ++v9fs_file_read_40858 v9fs_file_read 3 40858 NULL ++read_file_queue_40895 read_file_queue 3 40895 NULL ++waiters_read_40902 waiters_read 3 40902 NULL ++isdn_add_channels_40905 isdn_add_channels 3 40905 NULL ++gfs2_ea_find_40913 gfs2_ea_find 0 40913 NULL ++vol_cdev_write_40915 vol_cdev_write 3 40915 NULL ++snd_vx_create_40948 snd_vx_create 4 40948 NULL nohasharray ++sg_alloc_table_40948 sg_alloc_table 0 40948 &snd_vx_create_40948 ++rds_sendmsg_40976 rds_sendmsg 4 40976 NULL ++il_dbgfs_fh_reg_read_40993 il_dbgfs_fh_reg_read 3 40993 NULL ++iwl_dbgfs_scan_ant_rxchain_read_40999 iwl_dbgfs_scan_ant_rxchain_read 3 40999 NULL ++mac80211_format_buffer_41010 mac80211_format_buffer 2 41010 NULL ++__proc_dobitmasks_41029 __proc_dobitmasks 5 41029 NULL ++_req_append_segment_41031 _req_append_segment 2 41031 NULL ++mISDN_sock_sendmsg_41035 mISDN_sock_sendmsg 4 41035 NULL ++ocfs2_xattr_index_block_find_41040 ocfs2_xattr_index_block_find 0 41040 NULL ++lprocfs_write_frac_helper_41050 lprocfs_write_frac_helper 2 41050 NULL ++vfs_listxattr_41062 vfs_listxattr 0 41062 NULL nohasharray ++beacon_filtering_write_41062 beacon_filtering_write 3 41062 &vfs_listxattr_41062 ++cfg80211_inform_bss_frame_41078 cfg80211_inform_bss_frame 4 41078 NULL ++nvme_map_user_pages_41093 nvme_map_user_pages 4-3 41093 NULL nohasharray ++roccat_read_41093 roccat_read 3 41093 &nvme_map_user_pages_41093 ++dma_attach_41094 dma_attach 5-6 41094 NULL ++provide_user_output_41105 provide_user_output 3 41105 NULL ++f_audio_buffer_alloc_41110 f_audio_buffer_alloc 1 41110 NULL ++ath10k_read_wmi_services_41112 ath10k_read_wmi_services 3 41112 NULL ++v4l2_ctrl_new_int_menu_41151 v4l2_ctrl_new_int_menu 4 41151 NULL ++tx_frag_mpdu_alloc_failed_read_41167 tx_frag_mpdu_alloc_failed_read 3 41167 NULL ++dvb_ca_write_41171 dvb_ca_write 3 41171 NULL ++compat_sys_process_vm_writev_41194 compat_sys_process_vm_writev 3-5 41194 NULL ++dfs_file_write_41196 dfs_file_write 3 41196 NULL ++nfs_page_array_len_41219 nfs_page_array_len 0-2-1 41219 NULL ++cfg80211_process_disassoc_41231 cfg80211_process_disassoc 3 41231 NULL ++hiddev_compat_ioctl_41255 hiddev_compat_ioctl 2 41255 NULL ++erst_read_41260 erst_read 0 41260 NULL ++alloc_context_41283 alloc_context 1 41283 NULL ++o2hb_setup_one_bio_41341 o2hb_setup_one_bio 4 41341 NULL ++twl_change_queue_depth_41342 twl_change_queue_depth 2 41342 NULL ++rtw_android_set_block_41347 rtw_android_set_block 0 41347 NULL ++ceph_do_getattr_41349 ceph_do_getattr 0 41349 NULL ++cnic_init_id_tbl_41354 cnic_init_id_tbl 2 41354 NULL ++kmp_init_41373 kmp_init 2 41373 NULL ++isr_commands_read_41398 isr_commands_read 3 41398 NULL ++rx_defrag_decrypt_failed_read_41411 rx_defrag_decrypt_failed_read 3 41411 NULL ++xfs_iext_add_41422 xfs_iext_add 3 41422 NULL ++isdn_ppp_fill_rq_41428 isdn_ppp_fill_rq 2 41428 NULL ++lbs_rdrf_read_41431 lbs_rdrf_read 3 41431 NULL ++iio_device_alloc_41440 iio_device_alloc 1 41440 NULL ++ntfs_file_buffered_write_41442 ntfs_file_buffered_write 6-4-0 41442 NULL ++pcpu_build_alloc_info_41443 pcpu_build_alloc_info 1-2-3 41443 NULL ++se_io_cb_41461 se_io_cb 3 41461 NULL ++layout_leb_in_gaps_41470 layout_leb_in_gaps 0 41470 NULL ++rt2x00debug_write_rfcsr_41473 rt2x00debug_write_rfcsr 3 41473 NULL ++bl_alloc_init_bio_41478 bl_alloc_init_bio 1 41478 NULL ++kvm_unmap_hva_range_41484 kvm_unmap_hva_range 3-2 41484 NULL ++wep_interrupt_read_41492 wep_interrupt_read 3 41492 NULL ++SyS_get_mempolicy_41495 SyS_get_mempolicy 3 41495 NULL ++hpfs_translate_name_41497 hpfs_translate_name 3 41497 NULL ++xfrm_hash_new_size_41505 xfrm_hash_new_size 0-1 41505 NULL ++SyS_preadv_41523 SyS_preadv 3 41523 NULL ++dm_get_reserved_rq_based_ios_41529 dm_get_reserved_rq_based_ios 0 41529 NULL ++tx_tx_frame_checksum_read_41553 tx_tx_frame_checksum_read 3 41553 NULL ++ath6kl_endpoint_stats_read_41554 ath6kl_endpoint_stats_read 3 41554 NULL ++nr_status_frames_41559 nr_status_frames 0-1 41559 NULL nohasharray ++si476x_radio_fops_read_41559 si476x_radio_fops_read 3 41559 &nr_status_frames_41559 ++rng_dev_read_41581 rng_dev_read 3 41581 NULL ++batadv_tvlv_container_ogm_append_41588 batadv_tvlv_container_ogm_append 4 41588 NULL ++vga_io_r_41609 vga_io_r 0 41609 NULL ++tcp_hdrlen_41610 tcp_hdrlen 0 41610 NULL ++lbs_bcnmiss_write_41613 lbs_bcnmiss_write 3 41613 NULL nohasharray ++usb_endpoint_maxp_41613 usb_endpoint_maxp 0 41613 &lbs_bcnmiss_write_41613 ++a2mp_send_41615 a2mp_send 4 41615 NULL ++lstcon_batch_list_41627 lstcon_batch_list 2 41627 NULL ++mempool_create_kmalloc_pool_41650 mempool_create_kmalloc_pool 1 41650 NULL ++rx_rx_pre_complt_read_41653 rx_rx_pre_complt_read 3 41653 NULL ++get_std_timing_41654 get_std_timing 0 41654 NULL ++ieee80211_if_fmt_bssid_41677 ieee80211_if_fmt_bssid 3 41677 NULL ++fill_pcm_stream_name_41685 fill_pcm_stream_name 2 41685 NULL ++lov_unpackmd_41701 lov_unpackmd 4 41701 NULL ++apei_exec_for_each_entry_41717 apei_exec_for_each_entry 0 41717 NULL ++fillonedir_41746 fillonedir 3 41746 NULL ++iwl_dbgfs_bt_notif_read_41794 iwl_dbgfs_bt_notif_read 3 41794 NULL ++hsi_alloc_controller_41802 hsi_alloc_controller 1 41802 NULL ++rtw_android_get_macaddr_41812 rtw_android_get_macaddr 0 41812 NULL ++sco_send_frame_41815 sco_send_frame 3 41815 NULL ++ixgbe_dbg_netdev_ops_read_41839 ixgbe_dbg_netdev_ops_read 3 41839 NULL ++do_ip_setsockopt_41852 do_ip_setsockopt 5 41852 NULL ++keyctl_instantiate_key_41855 keyctl_instantiate_key 3 41855 NULL ++xfs_rtget_summary_41864 xfs_rtget_summary 0 41864 NULL ++pci_map_single_41869 pci_map_single 0 41869 NULL ++usb_gadget_get_string_41871 usb_gadget_get_string 0 41871 NULL ++v_APCI3120_InterruptDmaMoveBlock16bit_41914 v_APCI3120_InterruptDmaMoveBlock16bit 4 41914 NULL ++get_fdb_entries_41916 get_fdb_entries 3 41916 NULL ++ext4_da_write_inline_data_begin_41935 ext4_da_write_inline_data_begin 3-4 41935 NULL ++sci_rxfill_41945 sci_rxfill 0 41945 NULL ++read_gssp_41947 read_gssp 3 41947 NULL ++ocfs2_xattr_bucket_get_name_value_41949 ocfs2_xattr_bucket_get_name_value 0 41949 NULL ++portnames_read_41958 portnames_read 3 41958 NULL ++dst_mtu_41969 dst_mtu 0 41969 NULL ++cx24116_writeregN_41975 cx24116_writeregN 4 41975 NULL ++xfs_qm_dqalloc_41985 xfs_qm_dqalloc 0 41985 NULL ++pool_allocate_42012 pool_allocate 3 42012 NULL ++spidev_sync_read_42014 spidev_sync_read 0 42014 NULL ++rs_sta_dbgfs_scale_table_write_42017 rs_sta_dbgfs_scale_table_write 3 42017 NULL ++acpi_ut_create_buffer_object_42030 acpi_ut_create_buffer_object 1 42030 NULL ++__hwahc_op_set_gtk_42038 __hwahc_op_set_gtk 4 42038 NULL ++alloc_trace_kprobe_42041 alloc_trace_kprobe 6 42041 NULL ++irda_sendmsg_ultra_42047 irda_sendmsg_ultra 4 42047 NULL ++jffs2_do_link_42048 jffs2_do_link 6 42048 NULL ++ps_poll_upsd_max_ap_turn_read_42050 ps_poll_upsd_max_ap_turn_read 3 42050 NULL ++InterfaceTransmitPacket_42058 InterfaceTransmitPacket 3 42058 NULL ++scsi_execute_req_42088 scsi_execute_req 5 42088 NULL ++sk_chk_filter_42095 sk_chk_filter 2 42095 NULL ++submit_inquiry_42108 submit_inquiry 3 42108 NULL ++dw_dma_cyclic_prep_42113 dw_dma_cyclic_prep 3-4 42113 NULL ++blk_ioctl_zeroout_42160 blk_ioctl_zeroout 3 42160 NULL ++mmc_align_data_size_42161 mmc_align_data_size 0-2 42161 NULL ++read_file_base_eeprom_42168 read_file_base_eeprom 3 42168 NULL ++oprofilefs_str_to_user_42182 oprofilefs_str_to_user 3 42182 NULL ++write_file_beacon_42185 write_file_beacon 3 42185 NULL ++get_znodes_to_commit_42201 get_znodes_to_commit 0 42201 NULL ++pla_ocp_read_42235 pla_ocp_read 3 42235 NULL ++xfs_rtfree_range_42244 xfs_rtfree_range 0 42244 NULL ++rx_defrag_need_decrypt_read_42253 rx_defrag_need_decrypt_read 3 42253 NULL ++find_last_bit_42260 find_last_bit 0 42260 NULL ++__pcpu_size_to_slot_42271 __pcpu_size_to_slot 0 42271 NULL ++__tty_buffer_request_room_42276 __tty_buffer_request_room 2-0 42276 NULL ++snd_pcm_hw_param_value_max_42280 snd_pcm_hw_param_value_max 0 42280 NULL ++__cpus_weight_42299 __cpus_weight 2-0 42299 NULL ++sel_read_perm_42302 sel_read_perm 3 42302 NULL ++sctp_setsockopt_del_key_42304 sctp_setsockopt_del_key 3 42304 NULL nohasharray ++ulong_read_file_42304 ulong_read_file 3 42304 &sctp_setsockopt_del_key_42304 ++xfs_vm_readpages_42308 xfs_vm_readpages 4 42308 NULL ++hysdn_conf_read_42324 hysdn_conf_read 3 42324 NULL ++tcp_sync_mss_42330 tcp_sync_mss 2-0 42330 NULL ++ide_raw_taskfile_42355 ide_raw_taskfile 4 42355 NULL ++tipc_send_42374 tipc_send 3 42374 NULL ++drbd_md_last_sector_42378 drbd_md_last_sector 0 42378 NULL ++il_dbgfs_disable_ht40_read_42386 il_dbgfs_disable_ht40_read 3 42386 NULL ++msnd_fifo_read_42406 msnd_fifo_read 0-3 42406 NULL ++krng_get_random_42420 krng_get_random 3 42420 NULL ++gsm_data_alloc_42437 gsm_data_alloc 3 42437 NULL ++key_conf_keyidx_read_42443 key_conf_keyidx_read 3 42443 NULL ++snd_pcm_action_group_42452 snd_pcm_action_group 0 42452 NULL ++tcm_loop_change_queue_depth_42454 tcm_loop_change_queue_depth 2 42454 NULL ++kuc_free_42455 kuc_free 2 42455 NULL ++__simple_xattr_set_42474 __simple_xattr_set 4 42474 NULL ++omfs_readpages_42490 omfs_readpages 4 42490 NULL ++bypass_write_42498 bypass_write 3 42498 NULL ++SyS_mincore_42511 SyS_mincore 1-2 42511 NULL ++kvm_write_wall_clock_42520 kvm_write_wall_clock 2 42520 NULL ++dio_bio_complete_42524 dio_bio_complete 0 42524 NULL ++smk_write_netlbladdr_42525 smk_write_netlbladdr 3 42525 NULL ++dbAllocNear_42546 dbAllocNear 0 42546 NULL ++ath6kl_wmi_proc_events_vif_42549 ath6kl_wmi_proc_events_vif 5 42549 NULL ++udp_recvmsg_42558 udp_recvmsg 4 42558 NULL ++iwl_print_event_log_42566 iwl_print_event_log 7-5-0 42566 NULL ++xfrm_new_hash_mask_42579 xfrm_new_hash_mask 0-1 42579 NULL ++oom_score_adj_write_42594 oom_score_adj_write 3 42594 NULL ++ieee80211_if_fmt_dot11MeshHWMPactivePathTimeout_42635 ieee80211_if_fmt_dot11MeshHWMPactivePathTimeout 3 42635 NULL ++scsi_activate_tcq_42640 scsi_activate_tcq 2 42640 NULL ++br_mdb_rehash_42643 br_mdb_rehash 2 42643 NULL ++parport_pc_compat_write_block_pio_42644 parport_pc_compat_write_block_pio 3 42644 NULL ++_regmap_raw_write_42652 _regmap_raw_write 4-2 42652 NULL ++request_key_and_link_42693 request_key_and_link 4 42693 NULL ++vb2_read_42703 vb2_read 3 42703 NULL ++read_status_42722 read_status 0 42722 NULL ++dvb_demux_ioctl_42733 dvb_demux_ioctl 2 42733 NULL ++set_aoe_iflist_42737 set_aoe_iflist 2 42737 NULL ++ax25_setsockopt_42740 ax25_setsockopt 5 42740 NULL ++dpm_sysfs_add_42756 dpm_sysfs_add 0 42756 NULL ++x25_recvmsg_42777 x25_recvmsg 4 42777 NULL ++snd_midi_event_decode_42780 snd_midi_event_decode 0 42780 NULL ++cryptd_hash_setkey_42781 cryptd_hash_setkey 3 42781 NULL nohasharray ++isku_sysfs_read_info_42781 isku_sysfs_read_info 6 42781 &cryptd_hash_setkey_42781 ++elfcorehdr_read_notes_42786 elfcorehdr_read_notes 2 42786 NULL ++koneplus_sysfs_read_42792 koneplus_sysfs_read 6 42792 NULL ++ntfs_attr_extend_allocation_42796 ntfs_attr_extend_allocation 0 42796 NULL ++fw_device_op_compat_ioctl_42804 fw_device_op_compat_ioctl 2 42804 NULL ++drm_ioctl_42813 drm_ioctl 2 42813 NULL ++iwl_dbgfs_ucode_bt_stats_read_42820 iwl_dbgfs_ucode_bt_stats_read 3 42820 NULL ++set_arg_42824 set_arg 3 42824 NULL ++si476x_radio_read_rsq_blob_42827 si476x_radio_read_rsq_blob 3 42827 NULL ++ocfs2_clusters_for_bytes_42872 ocfs2_clusters_for_bytes 0-2 42872 NULL ++nvme_trans_unit_serial_page_42879 nvme_trans_unit_serial_page 4 42879 NULL ++xpc_kmalloc_cacheline_aligned_42895 xpc_kmalloc_cacheline_aligned 1 42895 NULL ++hd_end_request_42904 hd_end_request 2 42904 NULL ++sta_last_rx_rate_read_42909 sta_last_rx_rate_read 3 42909 NULL ++xfs_bmapi_allocate_42938 xfs_bmapi_allocate 0 42938 NULL ++sctp_getsockopt_maxburst_42941 sctp_getsockopt_maxburst 2 42941 NULL ++vx_reset_chk_42946 vx_reset_chk 0 42946 NULL ++blkdev_direct_IO_42962 blkdev_direct_IO 4 42962 NULL ++compat_udpv6_setsockopt_42981 compat_udpv6_setsockopt 5 42981 NULL ++nfs_idmap_get_desc_42990 nfs_idmap_get_desc 4-2 42990 NULL nohasharray ++rtw_os_xmit_resource_alloc_42990 rtw_os_xmit_resource_alloc 3 42990 &nfs_idmap_get_desc_42990 ++xfs_da_read_buf_43008 xfs_da_read_buf 0 43008 NULL ++isr_rx_mem_overflow_read_43025 isr_rx_mem_overflow_read 3 43025 NULL ++wep_default_key_count_read_43035 wep_default_key_count_read 3 43035 NULL ++nouveau_gpuobj_create__43072 nouveau_gpuobj_create_ 9 43072 NULL ++nfs_map_group_to_gid_43082 nfs_map_group_to_gid 3 43082 NULL ++_xfer_secondary_pool_43089 _xfer_secondary_pool 2 43089 NULL ++sysfs_create_file_ns_43103 sysfs_create_file_ns 0 43103 NULL ++ieee80211_if_fmt_drop_unencrypted_43107 ieee80211_if_fmt_drop_unencrypted 3 43107 NULL ++calculate_node_totalpages_43118 calculate_node_totalpages 2-3 43118 NULL ++read_file_dfs_43145 read_file_dfs 3 43145 NULL ++cfs_cpt_table_alloc_43159 cfs_cpt_table_alloc 1 43159 NULL ++usb_string_sub_43164 usb_string_sub 0 43164 NULL ++il_dbgfs_power_save_status_read_43165 il_dbgfs_power_save_status_read 3 43165 NULL ++ath6kl_set_assoc_req_ies_43185 ath6kl_set_assoc_req_ies 3 43185 NULL ++ext4_xattr_ibody_get_43200 ext4_xattr_ibody_get 0 43200 NULL ++uio_write_43202 uio_write 3 43202 NULL ++iso_callback_43208 iso_callback 3 43208 NULL ++ath10k_p2p_calc_noa_ie_len_43209 ath10k_p2p_calc_noa_ie_len 0 43209 NULL ++f2fs_acl_from_disk_43210 f2fs_acl_from_disk 2 43210 NULL ++atomic_long_add_return_43217 atomic_long_add_return 1-0 43217 NULL ++inode_init_always_43225 inode_init_always 0 43225 NULL ++batadv_tt_tvlv_unicast_handler_v1_43239 batadv_tt_tvlv_unicast_handler_v1 5 43239 NULL ++vmemmap_alloc_block_43245 vmemmap_alloc_block 1 43245 NULL ++ide_end_rq_43269 ide_end_rq 4 43269 NULL ++nilfs_direct_IO_43271 nilfs_direct_IO 4 43271 NULL ++parport_pc_ecp_write_block_pio_43278 parport_pc_ecp_write_block_pio 3 43278 NULL nohasharray ++evtchn_write_43278 evtchn_write 3 43278 &parport_pc_ecp_write_block_pio_43278 ++filemap_write_and_wait_range_43279 filemap_write_and_wait_range 0 43279 NULL ++mpage_alloc_43299 mpage_alloc 3 43299 NULL ++mmu_set_spte_43327 mmu_set_spte 6-7 43327 NULL ++__ext4_get_inode_loc_43332 __ext4_get_inode_loc 0 43332 NULL ++xfs_log_regrant_43350 xfs_log_regrant 0 43350 NULL ++gfs2_rgrp_bh_get_43375 gfs2_rgrp_bh_get 0 43375 NULL ++xfs_btree_new_iroot_43392 xfs_btree_new_iroot 0 43392 NULL ++xenfb_write_43412 xenfb_write 3 43412 NULL ++__alloc_bootmem_low_43423 __alloc_bootmem_low 1 43423 NULL ++usb_alloc_urb_43436 usb_alloc_urb 1 43436 NULL ++cifs_writev_43437 cifs_writev 4 43437 NULL ++ath6kl_wmi_roam_tbl_event_rx_43440 ath6kl_wmi_roam_tbl_event_rx 3 43440 NULL ++usb_string_43443 usb_string 0 43443 NULL nohasharray ++usemap_size_43443 usemap_size 0-2-1 43443 &usb_string_43443 ++get_vm_area_size_43444 get_vm_area_size 0 43444 NULL ++nvme_trans_device_id_page_43466 nvme_trans_device_id_page 4 43466 NULL ++tx_tx_data_prepared_read_43497 tx_tx_data_prepared_read 3 43497 NULL ++ieee80211_if_fmt_dot11MeshHWMPnetDiameterTraversalTime_43505 ieee80211_if_fmt_dot11MeshHWMPnetDiameterTraversalTime 3 43505 NULL ++do_readlink_43518 do_readlink 2 43518 NULL ++dvb_ca_en50221_io_write_43533 dvb_ca_en50221_io_write 3 43533 NULL ++read_events_43534 read_events 3 43534 NULL ++cachefiles_daemon_write_43535 cachefiles_daemon_write 3 43535 NULL ++tx_frag_failed_read_43540 tx_frag_failed_read 3 43540 NULL ++request_resource_43548 request_resource 0 43548 NULL ++rpc_malloc_43573 rpc_malloc 2 43573 NULL ++handle_frequent_errors_43599 handle_frequent_errors 4 43599 NULL ++lpfc_idiag_drbacc_read_reg_43606 lpfc_idiag_drbacc_read_reg 0-3 43606 NULL ++proc_read_43614 proc_read 3 43614 NULL ++disable_dma_on_even_43618 disable_dma_on_even 0 43618 NULL ++alloc_thread_groups_43625 alloc_thread_groups 2 43625 NULL ++random_write_43656 random_write 3 43656 NULL ++bio_integrity_tag_43658 bio_integrity_tag 3 43658 NULL ++ext4_acl_count_43659 ext4_acl_count 0-1 43659 NULL ++write_file_tx99_power_43670 write_file_tx99_power 3 43670 NULL ++dmam_declare_coherent_memory_43679 dmam_declare_coherent_memory 4 43679 NULL ++max77693_bulk_write_43698 max77693_bulk_write 2-3 43698 NULL ++drbd_md_first_sector_43729 drbd_md_first_sector 0 43729 NULL ++snd_rme32_playback_copy_43732 snd_rme32_playback_copy 5 43732 NULL ++fuse_conn_congestion_threshold_write_43736 fuse_conn_congestion_threshold_write 3 43736 NULL ++gigaset_initcs_43753 gigaset_initcs 2 43753 NULL ++sctp_setsockopt_active_key_43755 sctp_setsockopt_active_key 3 43755 NULL ++xfs_alloc_lookup_eq_43775 xfs_alloc_lookup_eq 0 43775 NULL ++ocfs2_xattr_get_value_outside_43787 ocfs2_xattr_get_value_outside 0 43787 NULL nohasharray ++byte_pos_43787 byte_pos 0-2 43787 &ocfs2_xattr_get_value_outside_43787 ++xfs_trans_reserve_quota_bydquots_43797 xfs_trans_reserve_quota_bydquots 0 43797 NULL ++btrfs_copy_from_user_43806 btrfs_copy_from_user 0-3-1 43806 NULL ++_xfs_filestream_update_ag_43824 _xfs_filestream_update_ag 0 43824 NULL ++ieee80211_if_fmt_element_ttl_43825 ieee80211_if_fmt_element_ttl 3 43825 NULL ++ieee80211_alloc_hw_43829 ieee80211_alloc_hw 1 43829 NULL ++read_flush_43851 read_flush 3 43851 NULL ++pm860x_bulk_write_43875 pm860x_bulk_write 2-3 43875 NULL ++SendString_43928 SendString 3 43928 NULL ++stats_dot11RTSFailureCount_read_43948 stats_dot11RTSFailureCount_read 3 43948 NULL ++xfs_trans_dqresv_43960 xfs_trans_dqresv 0 43960 NULL ++__get_required_blob_size_43980 __get_required_blob_size 0-3-2 43980 NULL ++nla_reserve_43984 nla_reserve 3 43984 NULL ++__clkdev_alloc_43990 __clkdev_alloc 1 43990 NULL ++scsi_command_size_43992 scsi_command_size 0 43992 NULL nohasharray ++kvm_read_guest_virt_43992 kvm_read_guest_virt 4-2 43992 &scsi_command_size_43992 nohasharray ++bcm_recvmsg_43992 bcm_recvmsg 4 43992 &kvm_read_guest_virt_43992 ++ulist_add_43994 ulist_add 0 43994 NULL ++emit_flags_44006 emit_flags 4-3 44006 NULL ++write_flush_procfs_44011 write_flush_procfs 3 44011 NULL ++fru_strlen_44046 fru_strlen 0 44046 NULL ++ath9k_def_dump_modal_eeprom_44078 ath9k_def_dump_modal_eeprom 3-2-0 44078 NULL ++SYSC_add_key_44079 SYSC_add_key 4 44079 NULL ++pci_msix_vec_count_44093 pci_msix_vec_count 0 44093 NULL nohasharray ++__vxge_hw_vpath_tim_configure_44093 __vxge_hw_vpath_tim_configure 2 44093 &pci_msix_vec_count_44093 ++xlog_recover_add_to_cont_trans_44102 xlog_recover_add_to_cont_trans 4 44102 NULL ++skb_frag_dma_map_44112 skb_frag_dma_map 0 44112 NULL ++file_remove_suid_44122 file_remove_suid 0 44122 NULL nohasharray ++tracing_set_trace_read_44122 tracing_set_trace_read 3 44122 &file_remove_suid_44122 ++SyS_process_vm_writev_44129 SyS_process_vm_writev 3-5 44129 NULL ++ttm_get_pages_44142 ttm_get_pages 2 44142 NULL ++scsi_get_resid_44147 scsi_get_resid 0 44147 NULL ++ocfs2_xattr_bucket_find_44174 ocfs2_xattr_bucket_find 0 44174 NULL ++SYSC_set_mempolicy_44176 SYSC_set_mempolicy 3 44176 NULL ++readreg_ipac_44186 readreg_ipac 0 44186 NULL ++handle_eviocgbit_44193 handle_eviocgbit 3 44193 NULL ++srp_alloc_iu_44227 srp_alloc_iu 2 44227 NULL ++scsi_track_queue_full_44239 scsi_track_queue_full 2 44239 NULL ++sigma_action_write_regmap_44240 sigma_action_write_regmap 3 44240 NULL ++apei_resources_sub_44252 apei_resources_sub 0 44252 NULL ++device_create_file_44285 device_create_file 0 44285 NULL ++ath6kl_keepalive_read_44303 ath6kl_keepalive_read 3 44303 NULL ++bitmap_scnprintf_44318 bitmap_scnprintf 0-2 44318 NULL ++dispatch_proc_write_44320 dispatch_proc_write 3 44320 NULL ++ccp_init_data_44324 ccp_init_data 5 44324 NULL ++rs_init_44327 rs_init 1 44327 NULL ++radix_tree_maybe_preload_44346 radix_tree_maybe_preload 0 44346 NULL ++blk_queue_init_tags_44355 blk_queue_init_tags 2 44355 NULL nohasharray ++nfs_fscache_get_super_cookie_44355 nfs_fscache_get_super_cookie 3 44355 &blk_queue_init_tags_44355 ++rts_threshold_read_44384 rts_threshold_read 3 44384 NULL ++mtip_hw_read_flags_44396 mtip_hw_read_flags 3 44396 NULL ++aoedev_flush_44398 aoedev_flush 2 44398 NULL ++strlcpy_44400 strlcpy 3 44400 NULL ++drm_buffer_alloc_44405 drm_buffer_alloc 2 44405 NULL ++osst_do_scsi_44410 osst_do_scsi 4 44410 NULL ++ieee80211_if_read_rc_rateidx_mcs_mask_5ghz_44423 ieee80211_if_read_rc_rateidx_mcs_mask_5ghz 3 44423 NULL ++xfs_mod_incore_sb_44439 xfs_mod_incore_sb 0 44439 NULL ++write_file_debug_44476 write_file_debug 3 44476 NULL ++btrfs_chunk_item_size_44478 btrfs_chunk_item_size 0-1 44478 NULL ++sdio_align_size_44489 sdio_align_size 0-2 44489 NULL ++bio_advance_44496 bio_advance 2 44496 NULL ++ieee80211_if_read_dropped_frames_ttl_44500 ieee80211_if_read_dropped_frames_ttl 3 44500 NULL ++ac_register_board_44504 ac_register_board 3 44504 NULL ++security_getprocattr_44505 security_getprocattr 0 44505 NULL nohasharray ++iwl_dbgfs_sram_read_44505 iwl_dbgfs_sram_read 3 44505 &security_getprocattr_44505 ++spidev_write_44510 spidev_write 3 44510 NULL ++SyS_io_getevents_44519 SyS_io_getevents 3 44519 NULL ++ieee80211_rx_mgmt_assoc_resp_44525 ieee80211_rx_mgmt_assoc_resp 3 44525 NULL ++comm_write_44537 comm_write 3 44537 NULL ++dgrp_config_proc_write_44571 dgrp_config_proc_write 3 44571 NULL ++btrfs_set_extent_delalloc_44587 btrfs_set_extent_delalloc 0 44587 NULL ++nouveau_perfmon_create__44602 nouveau_perfmon_create_ 4 44602 NULL ++xfs_bmapi_delay_44630 xfs_bmapi_delay 0 44630 NULL ++alloc_ctrl_packet_44667 alloc_ctrl_packet 1 44667 NULL ++mpi_resize_44674 mpi_resize 2 44674 NULL ++sysfs_create_link_44685 sysfs_create_link 0 44685 NULL ++ts_read_44687 ts_read 3 44687 NULL ++lov_emerg_alloc_44698 lov_emerg_alloc 1 44698 NULL ++xfer_to_user_44713 xfer_to_user 3 44713 NULL nohasharray ++__generic_block_fiemap_44713 __generic_block_fiemap 4 44713 &xfer_to_user_44713 ++_zd_iowrite32v_locked_44725 _zd_iowrite32v_locked 3 44725 NULL ++clusterip_proc_write_44729 clusterip_proc_write 3 44729 NULL ++fib_count_nexthops_44730 fib_count_nexthops 0 44730 NULL ++key_tx_rx_count_read_44742 key_tx_rx_count_read 3 44742 NULL ++bch_bio_max_sectors_44755 bch_bio_max_sectors 0 44755 NULL ++tnode_new_44757 tnode_new 3 44757 NULL nohasharray ++pty_write_44757 pty_write 3 44757 &tnode_new_44757 ++__videobuf_copy_stream_44769 __videobuf_copy_stream 4-0 44769 NULL ++handsfree_ramp_44777 handsfree_ramp 2 44777 NULL ++irq_domain_add_legacy_44781 irq_domain_add_legacy 4-2 44781 NULL ++sctp_setsockopt_44788 sctp_setsockopt 5 44788 NULL ++rx_dropped_read_44799 rx_dropped_read 3 44799 NULL ++qla4xxx_alloc_work_44813 qla4xxx_alloc_work 2 44813 NULL ++mei_cl_read_start_44824 mei_cl_read_start 2 44824 NULL ++rmap_write_protect_44833 rmap_write_protect 2 44833 NULL ++sisusb_write_44834 sisusb_write 3 44834 NULL ++kvm_read_hva_44847 kvm_read_hva 3 44847 NULL ++qib_verbs_send_dma_44850 qib_verbs_send_dma 6 44850 NULL ++copydesc_user_44855 copydesc_user 3 44855 NULL ++set_advertising_44870 set_advertising 4 44870 NULL ++init_rs_44873 init_rs 1 44873 NULL ++skb_availroom_44883 skb_availroom 0 44883 NULL ++ocfs2_wait_for_mask_44893 ocfs2_wait_for_mask 0 44893 NULL ++do_tty_write_44896 do_tty_write 5 44896 NULL ++regmap_spi_read_44921 regmap_spi_read 3-5 44921 NULL ++tx_queue_status_read_44978 tx_queue_status_read 3 44978 NULL ++bytepos_delta_45017 bytepos_delta 0-2 45017 NULL ++ptrace_writedata_45021 ptrace_writedata 4 45021 NULL ++dm_kvzalloc_45025 dm_kvzalloc 1 45025 NULL ++sysfs_do_create_link_sd_45057 sysfs_do_create_link_sd 0 45057 NULL ++sel_write_user_45060 sel_write_user 3 45060 NULL ++snd_mixart_BA0_read_45069 snd_mixart_BA0_read 5 45069 NULL ++kvm_mmu_page_get_gfn_45110 kvm_mmu_page_get_gfn 0-2 45110 NULL ++pwr_missing_bcns_cnt_read_45113 pwr_missing_bcns_cnt_read 3 45113 NULL ++usbdev_read_45114 usbdev_read 3 45114 NULL ++send_to_tty_45141 send_to_tty 3 45141 NULL ++cfs_trace_daemon_command_usrstr_45147 cfs_trace_daemon_command_usrstr 2 45147 NULL ++gen_bitmask_string_45149 gen_bitmask_string 6 45149 NULL ++device_write_45156 device_write 3 45156 NULL nohasharray ++ocfs2_remove_inode_range_45156 ocfs2_remove_inode_range 3 45156 &device_write_45156 ++tomoyo_write_self_45161 tomoyo_write_self 3 45161 NULL ++sta_agg_status_write_45164 sta_agg_status_write 3 45164 NULL ++snd_sb_csp_load_user_45190 snd_sb_csp_load_user 3 45190 NULL nohasharray ++sctp_pack_cookie_45190 sctp_pack_cookie 6 45190 &snd_sb_csp_load_user_45190 ++__radix_tree_preload_45197 __radix_tree_preload 0 45197 NULL ++iso_alloc_urb_45206 iso_alloc_urb 4-5 45206 NULL ++spi_alloc_master_45223 spi_alloc_master 2 45223 NULL ++ieee80211_if_read_peer_45233 ieee80211_if_read_peer 3 45233 NULL ++event_enable_write_45238 event_enable_write 3 45238 NULL ++prism2_pda_proc_read_45246 prism2_pda_proc_read 3 45246 NULL ++input_mt_init_slots_45279 input_mt_init_slots 2 45279 NULL ++gfs2_fiemap_45282 gfs2_fiemap 4 45282 NULL ++snd_pcm_oss_sync1_45298 snd_pcm_oss_sync1 2 45298 NULL ++xfs_alert_fsblock_zero_45304 xfs_alert_fsblock_zero 0 45304 NULL ++e1000_tx_map_45309 e1000_tx_map 5 45309 NULL ++copy_vm86_regs_from_user_45340 copy_vm86_regs_from_user 3 45340 NULL ++brcmf_sdio_died_dump_45359 brcmf_sdio_died_dump 3 45359 NULL ++null_alloc_repbuf_45375 null_alloc_repbuf 3 45375 NULL ++sock_recv_errqueue_45412 sock_recv_errqueue 3 45412 NULL ++ieee80211_if_fmt_dot11MeshHWMProotInterval_45421 ieee80211_if_fmt_dot11MeshHWMProotInterval 3 45421 NULL ++ll_iocontrol_register_45430 ll_iocontrol_register 2 45430 NULL ++tty_buffer_alloc_45437 tty_buffer_alloc 2 45437 NULL ++__node_remap_45458 __node_remap 4 45458 NULL ++rds_ib_set_wr_signal_state_45463 rds_ib_set_wr_signal_state 0 45463 NULL ++tracing_read_dyn_info_45468 tracing_read_dyn_info 3 45468 NULL ++__calc_request_pg_45470 __calc_request_pg 0 45470 NULL ++xfs_btree_block_change_owner_45476 xfs_btree_block_change_owner 0 45476 NULL ++rds_message_copy_from_user_45510 rds_message_copy_from_user 3 45510 NULL ++i40e_alloc_vfs_45511 i40e_alloc_vfs 2 45511 NULL ++copy_macs_45534 copy_macs 4 45534 NULL ++nla_attr_size_45545 nla_attr_size 0-1 45545 NULL ++v9fs_direct_read_45546 v9fs_direct_read 3 45546 NULL ++cx18_copy_mdl_to_user_45549 cx18_copy_mdl_to_user 4 45549 NULL ++stats_dot11ACKFailureCount_read_45558 stats_dot11ACKFailureCount_read 3 45558 NULL ++_regmap_bus_raw_write_45559 _regmap_bus_raw_write 2 45559 NULL ++posix_acl_xattr_size_45561 posix_acl_xattr_size 0-1 45561 NULL ++venus_rmdir_45564 venus_rmdir 4 45564 NULL ++ath6kl_keepalive_write_45600 ath6kl_keepalive_write 3 45600 NULL ++hidraw_get_report_45609 hidraw_get_report 3 45609 NULL ++compat_mpctl_ioctl_45671 compat_mpctl_ioctl 2 45671 NULL ++dgram_sendmsg_45679 dgram_sendmsg 4 45679 NULL ++smk_write_ambient_45691 smk_write_ambient 3 45691 NULL ++unix_dgram_sendmsg_45699 unix_dgram_sendmsg 4 45699 NULL nohasharray ++bscnl_emit_45699 bscnl_emit 2-5-0 45699 &unix_dgram_sendmsg_45699 ++sg_proc_write_adio_45704 sg_proc_write_adio 3 45704 NULL ++snd_cs46xx_io_read_45734 snd_cs46xx_io_read 5 45734 NULL nohasharray ++task_cgroup_path_45734 task_cgroup_path 3 45734 &snd_cs46xx_io_read_45734 ++rw_copy_check_uvector_45748 rw_copy_check_uvector 3-0 45748 NULL nohasharray ++v4l2_ctrl_new_std_45748 v4l2_ctrl_new_std 5 45748 &rw_copy_check_uvector_45748 ++lkdtm_debugfs_read_45752 lkdtm_debugfs_read 3 45752 NULL ++alloc_ts_config_45775 alloc_ts_config 1 45775 NULL ++osc_checksum_type_seq_write_45785 osc_checksum_type_seq_write 3 45785 NULL ++raw_setsockopt_45800 raw_setsockopt 5 45800 NULL ++rds_tcp_inc_copy_to_user_45804 rds_tcp_inc_copy_to_user 3 45804 NULL ++lbs_rdbbp_read_45805 lbs_rdbbp_read 3 45805 NULL ++pcpu_alloc_alloc_info_45813 pcpu_alloc_alloc_info 1-2 45813 NULL ++ll_max_readahead_mb_seq_write_45815 ll_max_readahead_mb_seq_write 3 45815 NULL ++memcg_update_cache_size_45828 memcg_update_cache_size 2 45828 NULL ++ipv6_recv_rxpmtu_45830 ipv6_recv_rxpmtu 3 45830 NULL ++insert_state_45848 insert_state 0 45848 NULL ++x509_process_extension_45854 x509_process_extension 5 45854 NULL ++efx_tx_queue_insert_45859 efx_tx_queue_insert 2 45859 NULL ++isdn_write_45863 isdn_write 3 45863 NULL ++tpm_config_in_45880 tpm_config_in 0 45880 NULL ++get_rdac_req_45882 get_rdac_req 3 45882 NULL ++ocfs2_xattr_block_find_45891 ocfs2_xattr_block_find 0 45891 NULL ++cfs_cpt_weight_45903 cfs_cpt_weight 0 45903 NULL ++wm_adsp_region_to_reg_45915 wm_adsp_region_to_reg 0-2 45915 NULL ++dbgfs_frame_45917 dbgfs_frame 3 45917 NULL ++btree_keys_cachelines_45928 btree_keys_cachelines 0 45928 NULL ++alloc_mr_45935 alloc_mr 1 45935 NULL ++copy_to_45969 copy_to 3 45969 NULL ++rb_simple_read_45972 rb_simple_read 3 45972 NULL ++ioat2_dca_count_dca_slots_45984 ioat2_dca_count_dca_slots 0 45984 NULL ++kobject_init_and_add_46003 kobject_init_and_add 0 46003 NULL ++sierra_setup_urb_46029 sierra_setup_urb 5 46029 NULL ++fnic_reset_stats_read_46030 fnic_reset_stats_read 3 46030 NULL nohasharray ++get_free_entries_46030 get_free_entries 1 46030 &fnic_reset_stats_read_46030 ++__access_remote_vm_46031 __access_remote_vm 0 46031 NULL ++snd_emu10k1x_ptr_read_46049 snd_emu10k1x_ptr_read 0 46049 NULL ++__ocfs2_move_extent_46060 __ocfs2_move_extent 3-4 46060 NULL nohasharray ++dma_tx_errors_read_46060 dma_tx_errors_read 3 46060 &__ocfs2_move_extent_46060 ++sel_commit_bools_write_46077 sel_commit_bools_write 3 46077 NULL ++il3945_ucode_general_stats_read_46111 il3945_ucode_general_stats_read 3 46111 NULL nohasharray ++memcg_update_array_size_46111 memcg_update_array_size 1 46111 &il3945_ucode_general_stats_read_46111 ++C_SYSC_writev_46113 C_SYSC_writev 3 46113 NULL ++mlx4_ib_alloc_fast_reg_page_list_46119 mlx4_ib_alloc_fast_reg_page_list 2 46119 NULL ++rtw_buf_update_46138 rtw_buf_update 4 46138 NULL ++vb2_dma_sg_get_userptr_46146 vb2_dma_sg_get_userptr 3-2 46146 NULL ++__netlink_change_ngroups_46156 __netlink_change_ngroups 2 46156 NULL ++twl_direction_out_46182 twl_direction_out 2 46182 NULL ++vxge_os_dma_malloc_46184 vxge_os_dma_malloc 2 46184 NULL ++fq_resize_46195 fq_resize 2 46195 NULL ++add_conn_list_46197 add_conn_list 3-0 46197 NULL ++i2400m_op_msg_from_user_46213 i2400m_op_msg_from_user 4 46213 NULL ++tm6000_i2c_recv_regs_46215 tm6000_i2c_recv_regs 5 46215 NULL ++dsp_write_46218 dsp_write 2 46218 NULL ++hash_netiface4_expire_46226 hash_netiface4_expire 4 46226 NULL ++xen_setup_msi_irqs_46245 xen_setup_msi_irqs 2 46245 NULL ++mpi_read_raw_data_46248 mpi_read_raw_data 2 46248 NULL ++ReadReg_46277 ReadReg 0 46277 NULL ++sg_proc_write_dressz_46316 sg_proc_write_dressz 3 46316 NULL ++__hwahc_dev_set_key_46328 __hwahc_dev_set_key 5 46328 NULL nohasharray ++compat_SyS_readv_46328 compat_SyS_readv 3 46328 &__hwahc_dev_set_key_46328 ++iwl_dbgfs_chain_noise_read_46355 iwl_dbgfs_chain_noise_read 3 46355 NULL ++smk_write_direct_46363 smk_write_direct 3 46363 NULL ++fuse_file_aio_write_46399 fuse_file_aio_write 4 46399 NULL ++crypto_ablkcipher_reqsize_46411 crypto_ablkcipher_reqsize 0 46411 NULL ++ttm_page_pool_get_pages_46431 ttm_page_pool_get_pages 0-5 46431 NULL ++cfs_power2_roundup_46433 cfs_power2_roundup 0-1 46433 NULL ++cp210x_set_config_46447 cp210x_set_config 4 46447 NULL ++parport_pc_fifo_write_block_46455 parport_pc_fifo_write_block 3 46455 NULL ++il_dbgfs_clear_traffic_stats_write_46458 il_dbgfs_clear_traffic_stats_write 3 46458 NULL ++filldir64_46469 filldir64 3 46469 NULL ++fill_in_write_vector_46498 fill_in_write_vector 0 46498 NULL ++pin_code_reply_46510 pin_code_reply 4 46510 NULL ++mthca_alloc_cq_buf_46512 mthca_alloc_cq_buf 3 46512 NULL ++kmsg_read_46514 kmsg_read 3 46514 NULL nohasharray ++nouveau_drm_ioctl_46514 nouveau_drm_ioctl 2 46514 &kmsg_read_46514 ++nl80211_send_rx_assoc_46538 nl80211_send_rx_assoc 4 46538 NULL ++dn_current_mss_46574 dn_current_mss 0 46574 NULL ++serverworks_create_gatt_pages_46582 serverworks_create_gatt_pages 1 46582 NULL ++snd_compr_write_data_46592 snd_compr_write_data 3 46592 NULL ++il3945_stats_flag_46606 il3945_stats_flag 0-3 46606 NULL ++vscnprintf_46617 vscnprintf 0-2 46617 NULL ++__kfifo_out_r_46623 __kfifo_out_r 3-0 46623 NULL ++request_key_async_with_auxdata_46624 request_key_async_with_auxdata 4 46624 NULL ++pci_enable_device_46642 pci_enable_device 0 46642 NULL ++vfs_getxattr_alloc_46649 vfs_getxattr_alloc 0 46649 NULL ++e1000_tx_map_46672 e1000_tx_map 4 46672 NULL ++l2cap_create_le_flowctl_pdu_46682 l2cap_create_le_flowctl_pdu 3 46682 NULL ++alloc_data_packet_46698 alloc_data_packet 1 46698 NULL ++__ilog2_u32_46706 __ilog2_u32 0 46706 NULL ++erst_dbg_write_46715 erst_dbg_write 3 46715 NULL ++wl1271_rx_filter_alloc_field_46721 wl1271_rx_filter_alloc_field 5 46721 NULL ++irq_domain_add_simple_46734 irq_domain_add_simple 2 46734 NULL ++read_file_tx99_46741 read_file_tx99 3 46741 NULL ++ext4_count_free_46754 ext4_count_free 2 46754 NULL ++hest_ghes_dev_register_46766 hest_ghes_dev_register 1 46766 NULL ++int_hw_irq_en_46776 int_hw_irq_en 3 46776 NULL ++_xfs_buf_get_pages_46811 _xfs_buf_get_pages 2 46811 NULL ++xfs_iroot_realloc_46826 xfs_iroot_realloc 2 46826 NULL ++readreg_46845 readreg 0 46845 NULL ++spi_async_46857 spi_async 0 46857 NULL ++SyS_move_pages_46863 SyS_move_pages 2 46863 NULL nohasharray ++vsnprintf_46863 vsnprintf 0 46863 &SyS_move_pages_46863 ++nvme_alloc_queue_46865 nvme_alloc_queue 3 46865 NULL ++qp_memcpy_from_queue_iov_46874 qp_memcpy_from_queue_iov 5-4 46874 NULL ++lov_iocontrol_46876 lov_iocontrol 3 46876 NULL ++ixgbe_dbg_reg_ops_write_46895 ixgbe_dbg_reg_ops_write 3 46895 NULL ++sk_mem_pages_46896 sk_mem_pages 0-1 46896 NULL ++ieee80211_if_fmt_power_mode_46906 ieee80211_if_fmt_power_mode 3 46906 NULL ++wlcore_alloc_hw_46917 wlcore_alloc_hw 1-3 46917 NULL ++fb_write_46924 fb_write 3 46924 NULL ++__sctp_setsockopt_connectx_46949 __sctp_setsockopt_connectx 3 46949 NULL ++qla4xxx_post_aen_work_46953 qla4xxx_post_aen_work 3 46953 NULL nohasharray ++ntfs_truncate_46953 ntfs_truncate 0 46953 &qla4xxx_post_aen_work_46953 ++SYSC_poll_46965 SYSC_poll 2 46965 NULL ++crypto_tfm_alg_alignmask_46971 crypto_tfm_alg_alignmask 0 46971 NULL ++mgmt_pending_add_46976 mgmt_pending_add 5 46976 NULL ++strlcat_46985 strlcat 3 46985 NULL ++bitmap_file_clear_bit_46990 bitmap_file_clear_bit 2 46990 NULL ++sel_write_bool_46996 sel_write_bool 3 46996 NULL ++blk_rq_map_kern_47004 blk_rq_map_kern 4 47004 NULL ++cx231xx_init_bulk_47024 cx231xx_init_bulk 3-2-4 47024 NULL ++fs_path_len_47060 fs_path_len 0 47060 NULL ++event_trigger_regex_write_47067 event_trigger_regex_write 3 47067 NULL ++ext4_xattr_list_entries_47070 ext4_xattr_list_entries 0 47070 NULL ++pipeline_dec_packet_in_read_47076 pipeline_dec_packet_in_read 3 47076 NULL ++scsi_deactivate_tcq_47086 scsi_deactivate_tcq 2 47086 NULL ++iwl_dump_nic_event_log_47089 iwl_dump_nic_event_log 0 47089 NULL ++ptlrpc_lprocfs_threads_max_seq_write_47104 ptlrpc_lprocfs_threads_max_seq_write 3 47104 NULL ++mousedev_read_47123 mousedev_read 3 47123 NULL ++upcall_msg_size_47141 upcall_msg_size 2 47141 NULL ++acpi_ut_initialize_buffer_47143 acpi_ut_initialize_buffer 2 47143 NULL nohasharray ++ses_recv_diag_47143 ses_recv_diag 4 47143 &acpi_ut_initialize_buffer_47143 ++mxms_headerlen_47161 mxms_headerlen 0 47161 NULL ++rs_sta_dbgfs_rate_scale_data_read_47165 rs_sta_dbgfs_rate_scale_data_read 3 47165 NULL ++rts51x_ms_rw_47171 rts51x_ms_rw 3-4 47171 NULL ++xfs_btree_get_buf_block_47197 xfs_btree_get_buf_block 0 47197 NULL ++options_write_47243 options_write 3 47243 NULL ++portcntrs_1_read_47253 portcntrs_1_read 3 47253 NULL ++ablkcipher_next_slow_47274 ablkcipher_next_slow 4-3 47274 NULL ++gfs2_readpages_47285 gfs2_readpages 4 47285 NULL ++vsnprintf_47291 vsnprintf 0 47291 NULL ++tx_internal_desc_overflow_read_47300 tx_internal_desc_overflow_read 3 47300 NULL ++xfs_trans_reserve_quota_nblks_47313 xfs_trans_reserve_quota_nblks 0 47313 NULL ++nouveau_fb_create__47316 nouveau_fb_create_ 4 47316 NULL ++ieee80211_if_read_dot11MeshHoldingTimeout_47356 ieee80211_if_read_dot11MeshHoldingTimeout 3 47356 NULL ++gfs2_replay_read_block_47357 gfs2_replay_read_block 0 47357 NULL ++avc_get_hash_stats_47359 avc_get_hash_stats 0 47359 NULL ++kvm_arch_create_memslot_47364 kvm_arch_create_memslot 3 47364 NULL nohasharray ++__output_copy_user_47364 __output_copy_user 3 47364 &kvm_arch_create_memslot_47364 ++__bio_map_kern_47379 __bio_map_kern 3 47379 NULL ++trace_options_core_read_47390 trace_options_core_read 3 47390 NULL nohasharray ++nv_rd32_47390 nv_rd32 0 47390 &trace_options_core_read_47390 ++nametbl_list_47391 nametbl_list 2 47391 NULL ++dgrp_net_write_47392 dgrp_net_write 3 47392 NULL ++pfkey_sendmsg_47394 pfkey_sendmsg 4 47394 NULL ++lbs_wrmac_write_47400 lbs_wrmac_write 3 47400 NULL ++sta_vht_capa_read_47409 sta_vht_capa_read 3 47409 NULL ++crypto_ablkcipher_alignmask_47410 crypto_ablkcipher_alignmask 0 47410 NULL ++lbs_wrrf_write_47418 lbs_wrrf_write 3 47418 NULL ++nvme_trans_send_fw_cmd_47479 nvme_trans_send_fw_cmd 4 47479 NULL ++newpart_47485 newpart 6-4 47485 NULL ++core_sys_select_47494 core_sys_select 1 47494 NULL ++unlink_simple_47506 unlink_simple 3 47506 NULL ++pstore_decompress_47510 pstore_decompress 0 47510 NULL ++__proc_lnet_portal_rotor_47529 __proc_lnet_portal_rotor 5 47529 NULL ++process_vm_rw_47533 process_vm_rw 3-5 47533 NULL nohasharray ++vscnprintf_47533 vscnprintf 0-2 47533 &process_vm_rw_47533 ++einj_check_trigger_header_47534 einj_check_trigger_header 0 47534 NULL ++ieee80211_if_fmt_min_discovery_timeout_47539 ieee80211_if_fmt_min_discovery_timeout 3 47539 NULL ++read_ldt_47570 read_ldt 2 47570 NULL ++isku_sysfs_read_last_set_47572 isku_sysfs_read_last_set 6 47572 NULL ++btrfs_stack_header_bytenr_47589 btrfs_stack_header_bytenr 0 47589 NULL ++ext4_kvzalloc_47605 ext4_kvzalloc 1 47605 NULL ++sctp_ssnmap_new_47608 sctp_ssnmap_new 2-1 47608 NULL ++cache_read_pipefs_47615 cache_read_pipefs 3 47615 NULL ++twl4030_clear_set_47624 twl4030_clear_set 4 47624 NULL ++ccp_sha_setkey_47633 ccp_sha_setkey 3 47633 NULL ++get_size_47644 get_size 1-2 47644 NULL ++packet_recvmsg_47700 packet_recvmsg 4 47700 NULL nohasharray ++ipath_format_hwmsg_47700 ipath_format_hwmsg 2 47700 &packet_recvmsg_47700 ++save_microcode_47717 save_microcode 3 47717 NULL ++bits_to_user_47733 bits_to_user 2-3 47733 NULL ++carl9170_debugfs_read_47738 carl9170_debugfs_read 3 47738 NULL ++ir_prepare_write_buffer_47747 ir_prepare_write_buffer 3 47747 NULL ++mvumi_alloc_mem_resource_47750 mvumi_alloc_mem_resource 3 47750 NULL ++alloc_sched_domains_47756 alloc_sched_domains 1 47756 NULL ++uwb_ie_dump_hex_47774 uwb_ie_dump_hex 4 47774 NULL ++SyS_setgroups16_47780 SyS_setgroups16 1 47780 NULL ++error_error_numll_frame_cts_start_read_47781 error_error_numll_frame_cts_start_read 3 47781 NULL ++posix_acl_fix_xattr_from_user_47793 posix_acl_fix_xattr_from_user 2 47793 NULL ++W6692_empty_Bfifo_47804 W6692_empty_Bfifo 2 47804 NULL ++lov_packmd_47810 lov_packmd 0 47810 NULL ++tree_mod_log_insert_move_47823 tree_mod_log_insert_move 5 47823 NULL ++pinconf_dbg_config_write_47835 pinconf_dbg_config_write 3 47835 NULL ++KEY_SIZE_47855 KEY_SIZE 0 47855 NULL ++vhci_read_47878 vhci_read 3 47878 NULL ++keyctl_instantiate_key_common_47889 keyctl_instantiate_key_common 4 47889 NULL ++cfs_percpt_alloc_47918 cfs_percpt_alloc 2 47918 NULL ++comedi_write_47926 comedi_write 3 47926 NULL ++nvme_trans_get_blk_desc_len_47946 nvme_trans_get_blk_desc_len 0-2 47946 NULL ++gether_get_ifname_47972 gether_get_ifname 3 47972 NULL ++mempool_resize_47983 mempool_resize 2 47983 NULL nohasharray ++iwl_dbgfs_ucode_tracing_read_47983 iwl_dbgfs_ucode_tracing_read 3 47983 &mempool_resize_47983 ++dbg_port_buf_47990 dbg_port_buf 2 47990 NULL ++ib_umad_write_47993 ib_umad_write 3 47993 NULL ++lustre_cfg_len_48002 lustre_cfg_len 0 48002 NULL ++gdm_tty_recv_complete_48011 gdm_tty_recv_complete 2 48011 NULL ++ffs_epfile_write_48014 ffs_epfile_write 3 48014 NULL ++bio_integrity_set_tag_48035 bio_integrity_set_tag 3 48035 NULL ++pppoe_sendmsg_48039 pppoe_sendmsg 4 48039 NULL ++SYSC_writev_48040 SYSC_writev 3 48040 NULL ++wpan_phy_alloc_48056 wpan_phy_alloc 1 48056 NULL ++posix_acl_alloc_48063 posix_acl_alloc 1 48063 NULL ++palmas_bulk_write_48068 palmas_bulk_write 2-3-5 48068 NULL ++disc_write_48070 disc_write 3 48070 NULL ++mmc_alloc_host_48097 mmc_alloc_host 1 48097 NULL ++xfs_read_agi_48100 xfs_read_agi 0 48100 NULL ++skb_copy_datagram_const_iovec_48102 skb_copy_datagram_const_iovec 4-2-5 48102 NULL ++vmw_framebuffer_surface_dirty_48132 vmw_framebuffer_surface_dirty 6 48132 NULL ++set_discoverable_48141 set_discoverable 4 48141 NULL ++dn_fib_count_nhs_48145 dn_fib_count_nhs 0 48145 NULL ++_add_to_r4w_48152 _add_to_r4w 4 48152 NULL ++isr_dma1_done_read_48159 isr_dma1_done_read 3 48159 NULL ++c4iw_id_table_alloc_48163 c4iw_id_table_alloc 3 48163 NULL ++rbd_obj_method_sync_48170 rbd_obj_method_sync 8 48170 NULL ++alloc_cc770dev_48186 alloc_cc770dev 1 48186 NULL ++cfg80211_process_deauth_48200 cfg80211_process_deauth 3 48200 NULL ++ext4_index_trans_blocks_48205 ext4_index_trans_blocks 0-2 48205 NULL ++snd_seq_dump_var_event_48209 snd_seq_dump_var_event 0 48209 NULL ++ll_direct_IO_26_48216 ll_direct_IO_26 4 48216 NULL ++uv_blade_nr_possible_cpus_48226 uv_blade_nr_possible_cpus 0 48226 NULL ++nilfs_readpages_48229 nilfs_readpages 4 48229 NULL ++read_file_recv_48232 read_file_recv 3 48232 NULL ++unaccount_shadowed_48233 unaccount_shadowed 2 48233 NULL ++nouveau_i2c_port_create__48240 nouveau_i2c_port_create_ 7 48240 NULL ++nfsctl_transaction_read_48250 nfsctl_transaction_read 3 48250 NULL ++batadv_socket_read_48257 batadv_socket_read 3 48257 NULL ++cache_write_pipefs_48270 cache_write_pipefs 3 48270 NULL ++trace_options_write_48275 trace_options_write 3 48275 NULL ++send_set_info_48288 send_set_info 7 48288 NULL ++lpfc_idiag_extacc_read_48301 lpfc_idiag_extacc_read 3 48301 NULL ++timblogiw_read_48305 timblogiw_read 3 48305 NULL ++hash_setkey_48310 hash_setkey 3 48310 NULL ++kvm_mmu_pte_write_48340 kvm_mmu_pte_write 2 48340 NULL ++skb_add_data_48363 skb_add_data 3 48363 NULL ++tx_frag_init_called_read_48377 tx_frag_init_called_read 3 48377 NULL ++lbs_debugfs_write_48413 lbs_debugfs_write 3 48413 NULL ++uhid_event_from_user_48417 uhid_event_from_user 2 48417 NULL ++div64_u64_rem_48418 div64_u64_rem 0-1-2 48418 NULL ++pwr_tx_without_ps_read_48423 pwr_tx_without_ps_read 3 48423 NULL ++print_filtered_48442 print_filtered 2-0 48442 NULL ++tun_recvmsg_48463 tun_recvmsg 4 48463 NULL ++compat_SyS_preadv64_48469 compat_SyS_preadv64 3 48469 NULL ++ipath_format_hwerrors_48487 ipath_format_hwerrors 5 48487 NULL ++r8712_usbctrl_vendorreq_48489 r8712_usbctrl_vendorreq 6 48489 NULL ++send_control_msg_48498 send_control_msg 6 48498 NULL ++count_masked_bytes_48507 count_masked_bytes 0-1 48507 NULL ++diva_os_copy_to_user_48508 diva_os_copy_to_user 4 48508 NULL ++brcmf_sdio_trap_info_48510 brcmf_sdio_trap_info 4 48510 NULL ++phantom_get_free_48514 phantom_get_free 0 48514 NULL ++drbd_bm_capacity_48530 drbd_bm_capacity 0 48530 NULL ++raid10_size_48571 raid10_size 0-2-3 48571 NULL ++llog_data_len_48607 llog_data_len 1 48607 NULL ++do_ip_vs_set_ctl_48641 do_ip_vs_set_ctl 4 48641 NULL ++ll_rw_extents_stats_pp_seq_write_48651 ll_rw_extents_stats_pp_seq_write 3 48651 NULL ++mtd_read_48655 mtd_read 0 48655 NULL ++aes_encrypt_packets_read_48666 aes_encrypt_packets_read 3 48666 NULL ++sm501_create_subdev_48668 sm501_create_subdev 3-4 48668 NULL ++security_inode_setattr_48689 security_inode_setattr 0 48689 NULL ++hysdn_log_write_48694 hysdn_log_write 3 48694 NULL ++altera_drscan_48698 altera_drscan 2 48698 NULL ++kvm_set_irq_routing_48704 kvm_set_irq_routing 3 48704 NULL ++recv_msg_48709 recv_msg 4 48709 NULL ++lpfc_idiag_drbacc_write_48712 lpfc_idiag_drbacc_write 3 48712 NULL ++SyS_lgetxattr_48719 SyS_lgetxattr 4 48719 NULL ++ath6kl_usb_bmi_read_48745 ath6kl_usb_bmi_read 3 48745 NULL ++ath6kl_regwrite_read_48747 ath6kl_regwrite_read 3 48747 NULL ++l2cap_segment_sdu_48772 l2cap_segment_sdu 4 48772 NULL ++gfs2_direct_IO_48774 gfs2_direct_IO 4 48774 NULL ++il3945_sta_dbgfs_stats_table_read_48802 il3945_sta_dbgfs_stats_table_read 3 48802 NULL ++twa_change_queue_depth_48808 twa_change_queue_depth 2 48808 NULL ++atomic_counters_read_48827 atomic_counters_read 3 48827 NULL ++azx_get_position_48841 azx_get_position 0 48841 NULL ++vc_do_resize_48842 vc_do_resize 3-4 48842 NULL ++comedi_buf_write_alloc_48846 comedi_buf_write_alloc 0-2 48846 NULL ++suspend_dtim_interval_write_48854 suspend_dtim_interval_write 3 48854 NULL ++C_SYSC_pwritev64_48864 C_SYSC_pwritev64 3 48864 NULL nohasharray ++viafb_dvp1_proc_write_48864 viafb_dvp1_proc_write 3 48864 &C_SYSC_pwritev64_48864 ++__ffs_ep0_read_events_48868 __ffs_ep0_read_events 3 48868 NULL ++crypto_cipher_ctxsize_48890 crypto_cipher_ctxsize 0 48890 NULL ++joydev_handle_JSIOCSAXMAP_48898 joydev_handle_JSIOCSAXMAP 3 48898 NULL ++xdi_copy_to_user_48900 xdi_copy_to_user 4 48900 NULL ++msg_hdr_sz_48908 msg_hdr_sz 0 48908 NULL ++sep_crypto_dma_48937 sep_crypto_dma 0 48937 NULL ++si5351_write_parameters_48940 si5351_write_parameters 2 48940 NULL ++event_heart_beat_read_48961 event_heart_beat_read 3 48961 NULL ++nand_ecc_test_run_48966 nand_ecc_test_run 1 48966 NULL ++vmci_handle_arr_create_48971 vmci_handle_arr_create 1 48971 NULL ++xfs_trans_commit_48982 xfs_trans_commit 0 48982 NULL ++gfs2_dir_add_48987 gfs2_dir_add 0 48987 NULL ++rds_rm_size_48996 rds_rm_size 0-2 48996 NULL ++sel_write_enforce_48998 sel_write_enforce 3 48998 NULL ++null_alloc_rs_49019 null_alloc_rs 2 49019 NULL ++filemap_check_errors_49022 filemap_check_errors 0 49022 NULL ++transient_status_49027 transient_status 4 49027 NULL ++ll_xattr_cache_add_49032 ll_xattr_cache_add 4-0 49032 NULL ++iwl_mvm_power_legacy_dbgfs_read_49038 iwl_mvm_power_legacy_dbgfs_read 4 49038 NULL ++scsi_register_49094 scsi_register 2 49094 NULL ++compat_do_readv_writev_49102 compat_do_readv_writev 4 49102 NULL ++gfs2_diradd_alloc_required_49105 gfs2_diradd_alloc_required 0 49105 NULL ++xfrm_replay_state_esn_len_49119 xfrm_replay_state_esn_len 0 49119 NULL ++ll_max_cached_mb_seq_write_49122 ll_max_cached_mb_seq_write 3 49122 NULL ++pt_read_49136 pt_read 3 49136 NULL ++ipwireless_tty_received_49154 ipwireless_tty_received 3 49154 NULL ++f2fs_acl_count_49155 f2fs_acl_count 0-1 49155 NULL ++ipw_queue_tx_init_49161 ipw_queue_tx_init 3 49161 NULL ++__jfs_setxattr_49175 __jfs_setxattr 5 49175 NULL ++ath6kl_bgscan_int_write_49178 ath6kl_bgscan_int_write 3 49178 NULL ++dvb_dvr_ioctl_49182 dvb_dvr_ioctl 2 49182 NULL ++print_queue_49191 print_queue 4-0 49191 NULL ++root_nfs_cat_49192 root_nfs_cat 3 49192 NULL ++iwl_dbgfs_ucode_general_stats_read_49199 iwl_dbgfs_ucode_general_stats_read 3 49199 NULL ++il4965_rs_sta_dbgfs_stats_table_read_49206 il4965_rs_sta_dbgfs_stats_table_read 3 49206 NULL ++do_jffs2_getxattr_49210 do_jffs2_getxattr 0 49210 NULL ++nouveau_therm_create__49228 nouveau_therm_create_ 4 49228 NULL ++ieee80211_if_read_rssi_threshold_49260 ieee80211_if_read_rssi_threshold 3 49260 NULL ++isku_sysfs_read_keys_media_49268 isku_sysfs_read_keys_media 6 49268 NULL ++ptlrpc_check_set_49277 ptlrpc_check_set 0 49277 NULL ++rx_filter_beacon_filter_read_49279 rx_filter_beacon_filter_read 3 49279 NULL ++viafb_dfph_proc_write_49288 viafb_dfph_proc_write 3 49288 NULL ++uio_read_49300 uio_read 3 49300 NULL ++isku_sysfs_read_keys_macro_49312 isku_sysfs_read_keys_macro 6 49312 NULL ++SYSC_mincore_49319 SYSC_mincore 2-1 49319 NULL ++fwtty_port_handler_49327 fwtty_port_handler 9 49327 NULL ++srpt_alloc_ioctx_ring_49330 srpt_alloc_ioctx_ring 2-4-3 49330 NULL ++joydev_ioctl_common_49359 joydev_ioctl_common 2 49359 NULL ++iscsi_alloc_session_49390 iscsi_alloc_session 3 49390 NULL ++ext4_ext_index_trans_blocks_49396 ext4_ext_index_trans_blocks 0 49396 NULL ++rx_streaming_always_read_49401 rx_streaming_always_read 3 49401 NULL ++tnode_alloc_49407 tnode_alloc 1 49407 NULL ++samples_to_bytes_49426 samples_to_bytes 0-2 49426 NULL ++compat_do_msg_fill_49440 compat_do_msg_fill 3 49440 NULL ++__hfsplus_getxattr_49460 __hfsplus_getxattr 0 49460 NULL ++agp_3_5_isochronous_node_enable_49465 agp_3_5_isochronous_node_enable 3 49465 NULL ++xfs_iformat_local_49472 xfs_iformat_local 4-0 49472 NULL ++isr_decrypt_done_read_49490 isr_decrypt_done_read 3 49490 NULL ++iwl_dbgfs_disable_power_off_read_49517 iwl_dbgfs_disable_power_off_read 3 49517 NULL ++SyS_listxattr_49519 SyS_listxattr 3 49519 NULL ++emulator_write_phys_49520 emulator_write_phys 2-4 49520 NULL ++smk_write_access_49561 smk_write_access 3 49561 NULL ++alloc_chunk_49575 alloc_chunk 1 49575 NULL ++sctp_setsockopt_default_send_param_49578 sctp_setsockopt_default_send_param 3 49578 NULL ++readfifo_49583 readfifo 1 49583 NULL ++gfs2_quota_lock_49587 gfs2_quota_lock 0 49587 NULL ++evm_inode_setattr_49594 evm_inode_setattr 0 49594 NULL ++tap_write_49595 tap_write 3 49595 NULL ++isr_wakeups_read_49607 isr_wakeups_read 3 49607 NULL ++btrfs_mksubvol_49616 btrfs_mksubvol 3 49616 NULL ++heap_init_49617 heap_init 2 49617 NULL ++smk_write_doi_49621 smk_write_doi 3 49621 NULL ++port_fops_read_49626 port_fops_read 3 49626 NULL ++btrfsic_cmp_log_and_dev_bytenr_49628 btrfsic_cmp_log_and_dev_bytenr 2 49628 NULL ++xfs_ialloc_get_rec_49648 xfs_ialloc_get_rec 0 49648 NULL ++aa_simple_write_to_buffer_49683 aa_simple_write_to_buffer 4-3 49683 NULL ++SyS_pwritev_49688 SyS_pwritev 3 49688 NULL ++__copy_from_user_nocheck_49699 __copy_from_user_nocheck 0-3 49699 NULL ++cx2341x_ctrl_new_menu_49700 cx2341x_ctrl_new_menu 3 49700 NULL ++write_pool_49718 write_pool 3 49718 NULL ++kvm_mmu_notifier_invalidate_page_49723 kvm_mmu_notifier_invalidate_page 3 49723 NULL ++sep_create_dcb_dmatables_context_kernel_49728 sep_create_dcb_dmatables_context_kernel 6 49728 NULL ++zd_usb_iowrite16v_49744 zd_usb_iowrite16v 3 49744 NULL ++btrfs_chunk_num_stripes_49751 btrfs_chunk_num_stripes 0 49751 NULL ++fuse_wr_pages_49753 fuse_wr_pages 0-1-2 49753 NULL ++key_conf_keylen_read_49758 key_conf_keylen_read 3 49758 NULL ++fuse_conn_waiting_read_49762 fuse_conn_waiting_read 3 49762 NULL ++w83977af_fir_interrupt_49775 w83977af_fir_interrupt 0 49775 NULL ++ceph_osdc_readpages_49789 ceph_osdc_readpages 0 49789 NULL ++nfs4_acl_new_49806 nfs4_acl_new 1 49806 NULL ++ntfs_copy_from_user_iovec_49829 ntfs_copy_from_user_iovec 3-6-0 49829 NULL ++add_uuid_49831 add_uuid 4 49831 NULL ++iraw_loop_49842 iraw_loop 0-1 49842 NULL ++twl4030_write_49846 twl4030_write 2 49846 NULL ++scsi_dispatch_cmd_entry_49848 scsi_dispatch_cmd_entry 3 49848 NULL ++timeradd_entry_49850 timeradd_entry 3 49850 NULL ++fiemap_count_to_size_49869 fiemap_count_to_size 0-1 49869 NULL ++sctp_setsockopt_bindx_49870 sctp_setsockopt_bindx 3 49870 NULL ++osc_brw_49896 osc_brw 4 49896 NULL ++config_ep_by_speed_49939 config_ep_by_speed 0 49939 NULL ++xfs_ialloc_ag_alloc_49960 xfs_ialloc_ag_alloc 0 49960 NULL ++ieee80211_if_fmt_dtim_count_49987 ieee80211_if_fmt_dtim_count 3 49987 NULL ++drm_buffer_copy_from_user_49990 drm_buffer_copy_from_user 3 49990 NULL ++l2cap_chan_send_49995 l2cap_chan_send 3 49995 NULL ++dn_mss_from_pmtu_50011 dn_mss_from_pmtu 0-2 50011 NULL ++xfs_ialloc_inode_init_50015 xfs_ialloc_inode_init 0 50015 NULL ++security_context_to_sid_50019 security_context_to_sid 2 50019 NULL ++isdn_read_50021 isdn_read 3 50021 NULL ++mdc_rename_pack_50023 mdc_rename_pack 4-6 50023 NULL ++brcmf_debugfs_chipinfo_read_50033 brcmf_debugfs_chipinfo_read 3 50033 NULL ++ioread8_50049 ioread8 0 50049 NULL ++fuse_conn_max_background_write_50061 fuse_conn_max_background_write 3 50061 NULL ++__kfifo_dma_in_prepare_50081 __kfifo_dma_in_prepare 4 50081 NULL ++dev_set_alias_50084 dev_set_alias 3 50084 NULL ++libcfs_ioctl_popdata_50087 libcfs_ioctl_popdata 3 50087 NULL ++sock_setsockopt_50088 sock_setsockopt 5 50088 NULL ++altera_swap_dr_50090 altera_swap_dr 2 50090 NULL ++android_set_cntry_50100 android_set_cntry 0 50100 NULL ++read_file_slot_50111 read_file_slot 3 50111 NULL ++rx_streaming_interval_write_50120 rx_streaming_interval_write 3 50120 NULL ++jfs_direct_IO_50125 jfs_direct_IO 4 50125 NULL ++SYSC_preadv_50134 SYSC_preadv 3 50134 NULL ++tx_frag_need_fragmentation_read_50153 tx_frag_need_fragmentation_read 3 50153 NULL ++xfs_bwrite_50154 xfs_bwrite 0 50154 NULL ++kmalloc_node_50163 kmalloc_node 1 50163 NULL ++rx_filter_ibss_filter_read_50167 rx_filter_ibss_filter_read 3 50167 NULL ++ahd_probe_stack_size_50168 ahd_probe_stack_size 0 50168 NULL ++odev_update_50169 odev_update 2 50169 NULL ++ubi_resize_volume_50172 ubi_resize_volume 2 50172 NULL nohasharray ++ieee80211_if_fmt_dot11MeshHWMPRannInterval_50172 ieee80211_if_fmt_dot11MeshHWMPRannInterval 3 50172 &ubi_resize_volume_50172 ++cfg80211_roamed_bss_50198 cfg80211_roamed_bss 4-6 50198 NULL ++cyttsp4_probe_50201 cyttsp4_probe 4 50201 NULL ++rx_rx_timeout_wa_read_50204 rx_rx_timeout_wa_read 3 50204 NULL ++mthca_buddy_init_50206 mthca_buddy_init 2 50206 NULL ++l2cap_sock_setsockopt_50207 l2cap_sock_setsockopt 5 50207 NULL ++mon_bin_compat_ioctl_50234 mon_bin_compat_ioctl 3 50234 NULL ++sg_kmalloc_50240 sg_kmalloc 1 50240 NULL ++rxrpc_setsockopt_50286 rxrpc_setsockopt 5 50286 NULL ++soc_codec_reg_show_50302 soc_codec_reg_show 0-3 50302 NULL ++SYSC_flistxattr_50307 SYSC_flistxattr 3 50307 NULL ++SYSC_sched_setaffinity_50310 SYSC_sched_setaffinity 2 50310 NULL ++soc_camera_read_50319 soc_camera_read 3 50319 NULL ++do_launder_page_50329 do_launder_page 0 50329 NULL ++nouveau_engine_create__50331 nouveau_engine_create_ 7 50331 NULL ++lpfc_idiag_pcicfg_read_50334 lpfc_idiag_pcicfg_read 3 50334 NULL ++snd_pcm_lib_writev_50337 snd_pcm_lib_writev 0-3 50337 NULL ++tpm_read_50344 tpm_read 3 50344 NULL ++isdn_ppp_read_50356 isdn_ppp_read 4 50356 NULL ++iwl_dbgfs_echo_test_write_50362 iwl_dbgfs_echo_test_write 3 50362 NULL ++xfrm_send_migrate_50365 xfrm_send_migrate 5 50365 NULL ++roccat_common2_receive_50369 roccat_common2_receive 4 50369 NULL ++sl_alloc_bufs_50380 sl_alloc_bufs 2 50380 NULL ++l2tp_ip_sendmsg_50411 l2tp_ip_sendmsg 4 50411 NULL ++iscsi_create_conn_50425 iscsi_create_conn 2 50425 NULL ++validate_acl_mac_addrs_50429 validate_acl_mac_addrs 0 50429 NULL ++xfs_alloc_update_counters_50441 xfs_alloc_update_counters 0 50441 NULL ++btrfs_error_discard_extent_50444 btrfs_error_discard_extent 2 50444 NULL ++pgctrl_write_50453 pgctrl_write 3 50453 NULL ++device_create_sys_dev_entry_50458 device_create_sys_dev_entry 0 50458 NULL ++cfs_size_round_50472 cfs_size_round 0-1 50472 NULL ++cdrom_read_cdda_50478 cdrom_read_cdda 4 50478 NULL ++xfs_iformat_extents_50486 xfs_iformat_extents 0 50486 NULL ++gfs2_block_map_50492 gfs2_block_map 0 50492 NULL ++mei_io_cb_alloc_req_buf_50493 mei_io_cb_alloc_req_buf 2 50493 NULL ++pwr_rcvd_awake_beacons_read_50505 pwr_rcvd_awake_beacons_read 3 50505 NULL ++ath6kl_set_ap_probe_resp_ies_50539 ath6kl_set_ap_probe_resp_ies 3 50539 NULL ++bh_get_50543 bh_get 0 50543 NULL ++gfs2_meta_inode_buffer_50544 gfs2_meta_inode_buffer 0 50544 NULL ++usbat_flash_write_data_50553 usbat_flash_write_data 4 50553 NULL ++fat_readpages_50582 fat_readpages 4 50582 NULL ++iwl_dbgfs_missed_beacon_read_50584 iwl_dbgfs_missed_beacon_read 3 50584 NULL ++xillybus_write_50605 xillybus_write 3 50605 NULL ++rx_rx_checksum_result_read_50617 rx_rx_checksum_result_read 3 50617 NULL ++sparse_early_usemaps_alloc_node_50623 sparse_early_usemaps_alloc_node 4 50623 NULL ++simple_transaction_get_50633 simple_transaction_get 3 50633 NULL ++gfs2_unstuff_dinode_50644 gfs2_unstuff_dinode 0 50644 NULL ++ath6kl_tm_rx_event_50664 ath6kl_tm_rx_event 3 50664 NULL ++bnad_debugfs_read_50665 bnad_debugfs_read 3 50665 NULL ++prism2_read_fid_reg_50689 prism2_read_fid_reg 0 50689 NULL ++xfs_growfs_get_hdr_buf_50697 xfs_growfs_get_hdr_buf 3 50697 NULL ++dev_mem_read_50706 dev_mem_read 3 50706 NULL ++blk_check_plugged_50736 blk_check_plugged 3 50736 NULL ++__ext3_get_inode_loc_50744 __ext3_get_inode_loc 0 50744 NULL ++ocfs2_xattr_block_get_50773 ocfs2_xattr_block_get 0 50773 NULL ++tm6000_read_write_usb_50774 tm6000_read_write_usb 7 50774 NULL ++bio_alloc_map_data_50782 bio_alloc_map_data 2 50782 NULL ++tpm_write_50798 tpm_write 3 50798 NULL ++tun_do_read_50800 tun_do_read 4 50800 NULL ++write_flush_50803 write_flush 3 50803 NULL ++dvb_play_50814 dvb_play 3 50814 NULL ++btrfs_stack_file_extent_disk_num_bytes_50825 btrfs_stack_file_extent_disk_num_bytes 0 50825 NULL ++dpcm_show_state_50827 dpcm_show_state 0 50827 NULL ++SetArea_50835 SetArea 4 50835 NULL ++videobuf_dma_init_user_50839 videobuf_dma_init_user 4-3 50839 NULL ++carl9170_debugfs_write_50857 carl9170_debugfs_write 3 50857 NULL ++SyS_lgetxattr_50889 SyS_lgetxattr 4 50889 NULL ++netlbl_secattr_catmap_walk_rng_50894 netlbl_secattr_catmap_walk_rng 0-2 50894 NULL ++__bdev_writeseg_50903 __bdev_writeseg 4 50903 NULL ++xfs_alloc_get_freelist_50906 xfs_alloc_get_freelist 0 50906 NULL ++xfs_iext_remove_50909 xfs_iext_remove 3 50909 NULL ++blk_rq_cur_sectors_50910 blk_rq_cur_sectors 0 50910 NULL ++hash_recvmsg_50924 hash_recvmsg 4 50924 NULL ++chd_dec_fetch_cdata_50926 chd_dec_fetch_cdata 3 50926 NULL ++show_device_status_50947 show_device_status 0 50947 NULL ++irq_timeout_write_50950 irq_timeout_write 3 50950 NULL ++virtio_cread16_50951 virtio_cread16 0 50951 NULL ++sdio_uart_write_50954 sdio_uart_write 3 50954 NULL ++SyS_setxattr_50957 SyS_setxattr 4 50957 NULL ++iwl_statistics_flag_50981 iwl_statistics_flag 0-3 50981 NULL ++timeout_write_50991 timeout_write 3 50991 NULL ++proc_write_51003 proc_write 3 51003 NULL ++jbd2_journal_extend_51012 jbd2_journal_extend 2 51012 NULL ++lbs_dev_info_51023 lbs_dev_info 3 51023 NULL ++fuse_conn_congestion_threshold_read_51028 fuse_conn_congestion_threshold_read 3 51028 NULL ++BcmGetSectionValEndOffset_51039 BcmGetSectionValEndOffset 0 51039 NULL ++dump_midi_51040 dump_midi 3 51040 NULL ++usb_get_descriptor_51041 usb_get_descriptor 0 51041 NULL ++srpt_alloc_ioctx_51042 srpt_alloc_ioctx 2-3 51042 NULL ++do_arpt_set_ctl_51053 do_arpt_set_ctl 4 51053 NULL ++wusb_prf_64_51065 wusb_prf_64 7 51065 NULL ++jbd2_journal_init_revoke_51088 jbd2_journal_init_revoke 2 51088 NULL ++__ocfs2_find_path_51096 __ocfs2_find_path 0 51096 NULL ++ti_recv_51110 ti_recv 3 51110 NULL ++alloc_rtllib_51136 alloc_rtllib 1 51136 NULL ++simple_xattr_set_51140 simple_xattr_set 4 51140 NULL ++xfs_trans_get_efd_51148 xfs_trans_get_efd 3 51148 NULL ++nf_ct_ext_create_51232 nf_ct_ext_create 3 51232 NULL ++snd_pcm_write_51235 snd_pcm_write 3 51235 NULL ++drm_property_create_51239 drm_property_create 4 51239 NULL ++__mxt_read_reg_51249 __mxt_read_reg 0 51249 NULL ++st_read_51251 st_read 3 51251 NULL ++compat_dccp_setsockopt_51263 compat_dccp_setsockopt 5 51263 NULL ++target_alloc_sgl_51264 target_alloc_sgl 3 51264 NULL ++dvb_audio_write_51275 dvb_audio_write 3 51275 NULL ++ipwireless_network_packet_received_51277 ipwireless_network_packet_received 4 51277 NULL ++pvr2_std_id_to_str_51288 pvr2_std_id_to_str 2 51288 NULL ++bnad_debugfs_read_regrd_51308 bnad_debugfs_read_regrd 3 51308 NULL ++init_map_ipmac_51317 init_map_ipmac 5 51317 NULL ++alloc_hippi_dev_51320 alloc_hippi_dev 1 51320 NULL ++ext2_xattr_get_51327 ext2_xattr_get 0 51327 NULL ++alloc_smp_req_51337 alloc_smp_req 1 51337 NULL ++ipw_get_event_log_len_51341 ipw_get_event_log_len 0 51341 NULL ++ieee80211_if_fmt_estab_plinks_51370 ieee80211_if_fmt_estab_plinks 3 51370 NULL ++radeon_kms_compat_ioctl_51371 radeon_kms_compat_ioctl 2 51371 NULL ++blk_register_region_51424 blk_register_region 1-2 51424 NULL ++mwifiex_rdeeprom_read_51429 mwifiex_rdeeprom_read 3 51429 NULL ++hfsplus_brec_read_51436 hfsplus_brec_read 0 51436 NULL ++xfs_mod_incore_sb_unlocked_51439 xfs_mod_incore_sb_unlocked 0 51439 NULL ++ieee80211_if_read_dot11MeshHWMPRootMode_51441 ieee80211_if_read_dot11MeshHWMPRootMode 3 51441 NULL ++print_devstats_dot11ACKFailureCount_51443 print_devstats_dot11ACKFailureCount 3 51443 NULL ++____alloc_ei_netdev_51475 ____alloc_ei_netdev 1 51475 NULL ++xfs_buf_get_uncached_51477 xfs_buf_get_uncached 2 51477 NULL ++kvm_fetch_guest_virt_51493 kvm_fetch_guest_virt 4-2 51493 NULL ++ieee80211_if_write_uapsd_queues_51526 ieee80211_if_write_uapsd_queues 3 51526 NULL ++__alloc_eip_netdev_51549 __alloc_eip_netdev 1 51549 NULL ++batadv_tt_prepare_tvlv_local_data_51568 batadv_tt_prepare_tvlv_local_data 0 51568 NULL ++ixgb_get_eeprom_len_51586 ixgb_get_eeprom_len 0 51586 NULL ++aac_convert_sgraw2_51598 aac_convert_sgraw2 4 51598 NULL ++table_size_to_number_of_entries_51613 table_size_to_number_of_entries 0-1 51613 NULL ++extent_fiemap_51621 extent_fiemap 3 51621 NULL ++sctp_auth_create_key_51641 sctp_auth_create_key 1 51641 NULL ++iscsi_create_session_51647 iscsi_create_session 3 51647 NULL ++xfs_iformat_btree_51651 xfs_iformat_btree 0 51651 NULL ++ps_upsd_utilization_read_51669 ps_upsd_utilization_read 3 51669 NULL ++sctp_setsockopt_associnfo_51684 sctp_setsockopt_associnfo 3 51684 NULL ++host_mapping_level_51696 host_mapping_level 0 51696 NULL ++sel_write_access_51704 sel_write_access 3 51704 NULL ++tty_cdev_add_51714 tty_cdev_add 2-4 51714 NULL ++v9fs_alloc_rdir_buf_51716 v9fs_alloc_rdir_buf 2 51716 NULL ++drm_compat_ioctl_51717 drm_compat_ioctl 2 51717 NULL ++sg_read_oxfer_51724 sg_read_oxfer 3 51724 NULL ++cm4040_read_51732 cm4040_read 3 51732 NULL ++get_user_pages_fast_51751 get_user_pages_fast 0 51751 NULL ++ifx_spi_insert_flip_string_51752 ifx_spi_insert_flip_string 3 51752 NULL ++if_write_51756 if_write 3 51756 NULL ++qib_alloc_devdata_51819 qib_alloc_devdata 2 51819 NULL ++buffer_from_user_51826 buffer_from_user 3 51826 NULL ++ioread32_51847 ioread32 0 51847 NULL nohasharray ++read_file_tgt_tx_stats_51847 read_file_tgt_tx_stats 3 51847 &ioread32_51847 ++do_readv_writev_51849 do_readv_writev 4 51849 NULL ++SYSC_sendto_51852 SYSC_sendto 6 51852 NULL ++bm_page_io_async_51858 bm_page_io_async 2 51858 NULL ++pointer_size_read_51863 pointer_size_read 3 51863 NULL ++get_indirect_ea_51869 get_indirect_ea 4 51869 NULL ++user_read_51881 user_read 3 51881 NULL ++dbAdjCtl_51888 dbAdjCtl 0 51888 NULL ++SyS_mq_timedsend_51896 SyS_mq_timedsend 3 51896 NULL ++wmi_set_ie_51919 wmi_set_ie 3 51919 NULL ++dbg_status_buf_51930 dbg_status_buf 2 51930 NULL ++__tcp_mtu_to_mss_51938 __tcp_mtu_to_mss 0-2 51938 NULL ++xfrm_alg_len_51940 xfrm_alg_len 0 51940 NULL ++scsi_get_vpd_page_51951 scsi_get_vpd_page 4 51951 NULL ++snd_mask_min_51969 snd_mask_min 0 51969 NULL ++__blkdev_get_51972 __blkdev_get 0 51972 NULL ++get_zone_51981 get_zone 0-1 51981 NULL ++cifs_strict_writev_51984 cifs_strict_writev 4 51984 NULL ++ath6kl_sdio_alloc_prep_scat_req_51986 ath6kl_sdio_alloc_prep_scat_req 2 51986 NULL ++_c4iw_write_mem_dma_51991 _c4iw_write_mem_dma 3 51991 NULL ++dwc3_mode_write_51997 dwc3_mode_write 3 51997 NULL ++skb_copy_datagram_from_iovec_52014 skb_copy_datagram_from_iovec 4-2-5 52014 NULL ++rdmalt_52022 rdmalt 0 52022 NULL ++override_release_52032 override_release 2 52032 NULL ++end_port_52042 end_port 0 52042 NULL ++dma_rx_errors_read_52045 dma_rx_errors_read 3 52045 NULL ++msnd_fifo_write_52052 msnd_fifo_write 0-3 52052 NULL ++dvb_ringbuffer_avail_52057 dvb_ringbuffer_avail 0 52057 NULL ++__fuse_request_alloc_52060 __fuse_request_alloc 1 52060 NULL ++isofs_readpages_52067 isofs_readpages 4 52067 NULL ++mxuport_process_read_urb_data_52072 mxuport_process_read_urb_data 3 52072 NULL ++nsm_get_handle_52089 nsm_get_handle 4 52089 NULL ++ulist_add_merge_52096 ulist_add_merge 0 52096 NULL ++o2net_debug_read_52105 o2net_debug_read 3 52105 NULL ++split_scan_timeout_write_52128 split_scan_timeout_write 3 52128 NULL ++retry_count_read_52129 retry_count_read 3 52129 NULL ++xfs_btree_change_owner_52137 xfs_btree_change_owner 0 52137 NULL ++gdm_usb_hci_send_52138 gdm_usb_hci_send 3 52138 NULL ++sub_alloc_52140 sub_alloc 0 52140 NULL ++hysdn_conf_write_52145 hysdn_conf_write 3 52145 NULL ++htable_size_52148 htable_size 0-1 52148 NULL ++gfs2_rs_alloc_52152 gfs2_rs_alloc 0 52152 NULL ++smk_write_load2_52155 smk_write_load2 3 52155 NULL ++ieee80211_if_read_dot11MeshRetryTimeout_52168 ieee80211_if_read_dot11MeshRetryTimeout 3 52168 NULL ++mga_compat_ioctl_52170 mga_compat_ioctl 2 52170 NULL ++print_prefix_52176 print_prefix 0 52176 NULL ++proc_pid_readlink_52186 proc_pid_readlink 3 52186 NULL ++reiserfs_posix_acl_from_disk_52191 reiserfs_posix_acl_from_disk 2 52191 NULL ++vmci_qp_broker_alloc_52216 vmci_qp_broker_alloc 6-5 52216 NULL ++fuse_request_alloc_52243 fuse_request_alloc 1 52243 NULL nohasharray ++xfs_iomap_eof_align_last_fsb_52243 xfs_iomap_eof_align_last_fsb 0 52243 &fuse_request_alloc_52243 ++mdiobus_alloc_size_52259 mdiobus_alloc_size 1 52259 NULL ++shrink_slab_52261 shrink_slab 2 52261 NULL ++sisusbcon_do_font_op_52271 sisusbcon_do_font_op 9 52271 NULL ++handle_supp_msgs_52284 handle_supp_msgs 4 52284 NULL ++kobject_set_name_vargs_52309 kobject_set_name_vargs 0 52309 NULL ++read_file_reset_52310 read_file_reset 3 52310 NULL ++request_asymmetric_key_52317 request_asymmetric_key 2-4 52317 NULL ++hwflags_read_52318 hwflags_read 3 52318 NULL ++test_unaligned_bulk_52333 test_unaligned_bulk 3 52333 NULL ++hur_len_52339 hur_len 0 52339 NULL ++bytes_to_frames_52362 bytes_to_frames 0-2 52362 NULL nohasharray ++cap_inode_killpriv_52362 cap_inode_killpriv 0 52362 &bytes_to_frames_52362 ++copy_entries_to_user_52367 copy_entries_to_user 1 52367 NULL ++iwl_dump_fh_52371 iwl_dump_fh 0 52371 NULL ++hfsplus_find_attr_52374 hfsplus_find_attr 0 52374 NULL ++mq_emit_config_values_52378 mq_emit_config_values 3 52378 NULL ++isdn_writebuf_stub_52383 isdn_writebuf_stub 4 52383 NULL ++jfs_setxattr_52389 jfs_setxattr 4 52389 NULL ++aer_inject_write_52399 aer_inject_write 3 52399 NULL ++cgroup_file_write_52417 cgroup_file_write 3 52417 NULL ++line6_midibuf_init_52425 line6_midibuf_init 2 52425 NULL ++hso_serial_common_create_52428 hso_serial_common_create 4 52428 NULL ++delay_status_52431 delay_status 5 52431 NULL ++ath6kl_delete_qos_write_52435 ath6kl_delete_qos_write 3 52435 NULL ++ieee80211_if_fmt_num_sta_ps_52438 ieee80211_if_fmt_num_sta_ps 3 52438 NULL ++acpi_nvs_for_each_region_52448 acpi_nvs_for_each_region 0 52448 NULL ++alauda_read_data_52452 alauda_read_data 3 52452 NULL ++ieee80211_alloc_txb_52477 ieee80211_alloc_txb 1 52477 NULL ++usb_tranzport_write_52479 usb_tranzport_write 3 52479 NULL ++ocfs2_extend_no_holes_52483 ocfs2_extend_no_holes 3-4 52483 NULL ++fd_do_rw_52495 fd_do_rw 3 52495 NULL ++int_tasklet_entry_52500 int_tasklet_entry 3 52500 NULL ++xfs_alloc_get_rec_52502 xfs_alloc_get_rec 0 52502 NULL ++lmv_get_easize_52504 lmv_get_easize 0 52504 NULL ++pm_qos_power_write_52513 pm_qos_power_write 3 52513 NULL ++bt_sock_stream_recvmsg_52518 bt_sock_stream_recvmsg 4 52518 NULL ++dup_variable_bug_52525 dup_variable_bug 3 52525 NULL ++raw_recvmsg_52529 raw_recvmsg 4 52529 NULL ++dccpprobe_read_52549 dccpprobe_read 3 52549 NULL ++ntfs_prepare_pages_for_non_resident_write_52556 ntfs_prepare_pages_for_non_resident_write 0 52556 NULL ++debug_level_proc_write_52572 debug_level_proc_write 3 52572 NULL ++kernfs_setattr_52583 kernfs_setattr 0 52583 NULL ++isku_sysfs_read_macro_52587 isku_sysfs_read_macro 6 52587 NULL ++SyS_setsockopt_52610 SyS_setsockopt 5 52610 NULL ++ll_sa_entry_alloc_52611 ll_sa_entry_alloc 4 52611 NULL ++tps80031_writes_52638 tps80031_writes 3-4 52638 NULL ++brcmf_sdio_assert_info_52653 brcmf_sdio_assert_info 4 52653 NULL ++nvme_queue_extra_52661 nvme_queue_extra 0-1 52661 NULL ++SYSC_gethostname_52677 SYSC_gethostname 2 52677 NULL ++nvd0_disp_pioc_create__52693 nvd0_disp_pioc_create_ 5 52693 NULL ++nouveau_client_create__52715 nouveau_client_create_ 5 52715 NULL ++__dm_stat_bio_52722 __dm_stat_bio 3 52722 NULL ++cx25840_ir_rx_read_52724 cx25840_ir_rx_read 3 52724 NULL ++blkcipher_next_slow_52733 blkcipher_next_slow 3-4 52733 NULL ++relay_alloc_page_array_52735 relay_alloc_page_array 1 52735 NULL ++hfcsusb_rx_frame_52745 hfcsusb_rx_frame 3 52745 NULL ++carl9170_debugfs_vif_dump_read_52755 carl9170_debugfs_vif_dump_read 3 52755 NULL ++ieee80211_if_read_beacon_timeout_52756 ieee80211_if_read_beacon_timeout 3 52756 NULL ++nvme_trans_ext_inq_page_52776 nvme_trans_ext_inq_page 3 52776 NULL ++pwr_rcvd_beacons_read_52836 pwr_rcvd_beacons_read 3 52836 NULL ++lb_alloc_ep_req_52837 lb_alloc_ep_req 2 52837 NULL ++mon_bin_get_event_52863 mon_bin_get_event 4-6 52863 NULL ++twl6030_gpadc_write_52867 twl6030_gpadc_write 1 52867 NULL ++qib_decode_6120_err_52876 qib_decode_6120_err 3 52876 NULL ++twlreg_write_52880 twlreg_write 3 52880 NULL ++pvr2_ctrl_value_to_sym_internal_52881 pvr2_ctrl_value_to_sym_internal 5 52881 NULL ++cache_read_procfs_52882 cache_read_procfs 3 52882 NULL ++kvm_kvzalloc_52894 kvm_kvzalloc 1 52894 NULL ++dio_bio_reap_52913 dio_bio_reap 0 52913 NULL ++__kfifo_out_peek_r_52919 __kfifo_out_peek_r 3 52919 NULL ++iblock_get_bio_52936 iblock_get_bio 3 52936 NULL ++__nodes_remap_52951 __nodes_remap 5 52951 NULL ++send_packet_52960 send_packet 4 52960 NULL ++ieee80211_if_fmt_fwded_mcast_52961 ieee80211_if_fmt_fwded_mcast 3 52961 NULL ++tx_tx_exch_read_52986 tx_tx_exch_read 3 52986 NULL ++num_node_state_52989 num_node_state 0 52989 NULL ++btrfs_free_and_pin_reserved_extent_53016 btrfs_free_and_pin_reserved_extent 2 53016 NULL ++tx_tx_exch_pending_read_53018 tx_tx_exch_pending_read 3 53018 NULL ++xfs_rtfree_extent_53024 xfs_rtfree_extent 0 53024 NULL ++bio_cur_bytes_53037 bio_cur_bytes 0 53037 NULL ++regcache_lzo_block_count_53056 regcache_lzo_block_count 0 53056 NULL ++cfi_read_query_53066 cfi_read_query 0 53066 NULL ++iwl_dbgfs_interrupt_write_53069 iwl_dbgfs_interrupt_write 3 53069 NULL ++mwifiex_debug_read_53074 mwifiex_debug_read 3 53074 NULL ++mic_virtio_copy_from_user_53107 mic_virtio_copy_from_user 3 53107 NULL ++verity_status_53120 verity_status 5 53120 NULL ++brcmf_usb_dl_cmd_53130 brcmf_usb_dl_cmd 4 53130 NULL ++inode_newsize_ok_53140 inode_newsize_ok 0 53140 NULL nohasharray ++ps_poll_ps_poll_max_ap_turn_read_53140 ps_poll_ps_poll_max_ap_turn_read 3 53140 &inode_newsize_ok_53140 ++ieee80211_bss_info_update_53170 ieee80211_bss_info_update 4 53170 NULL ++btrfs_io_bio_alloc_53179 btrfs_io_bio_alloc 2 53179 NULL ++find_good_lh_53183 find_good_lh 0 53183 NULL ++clear_capture_buf_53192 clear_capture_buf 2 53192 NULL ++xfs_btree_updkey_53195 xfs_btree_updkey 0 53195 NULL ++tx_tx_start_data_read_53219 tx_tx_start_data_read 3 53219 NULL ++ptlrpc_lprocfs_req_history_max_seq_write_53243 ptlrpc_lprocfs_req_history_max_seq_write 3 53243 NULL ++xfs_trans_read_buf_map_53258 xfs_trans_read_buf_map 5-0 53258 NULL ++wil_write_file_ssid_53266 wil_write_file_ssid 3 53266 NULL ++btrfs_file_extent_num_bytes_53269 btrfs_file_extent_num_bytes 0 53269 NULL ++ftrace_profile_write_53327 ftrace_profile_write 3 53327 NULL ++find_nr_power_limit_53330 find_nr_power_limit 0 53330 NULL ++gsm_control_reply_53333 gsm_control_reply 4 53333 NULL ++btree_keys_bytes_53348 btree_keys_bytes 0 53348 NULL ++read_6120_creg32_53363 read_6120_creg32 0 53363 NULL ++sock_setbindtodevice_53369 sock_setbindtodevice 3 53369 NULL ++get_random_bytes_arch_53370 get_random_bytes_arch 2 53370 NULL ++xfs_imap_53389 xfs_imap 0 53389 NULL ++isr_cmd_cmplt_read_53439 isr_cmd_cmplt_read 3 53439 NULL ++mwifiex_info_read_53447 mwifiex_info_read 3 53447 NULL ++apei_exec_run_optional_53452 apei_exec_run_optional 0 53452 NULL ++paging64_prefetch_gpte_53468 paging64_prefetch_gpte 4 53468 NULL ++ima_write_template_field_data_53475 ima_write_template_field_data 2 53475 NULL nohasharray ++create_trace_kprobe_53475 create_trace_kprobe 1 53475 &ima_write_template_field_data_53475 ++iowarrior_read_53483 iowarrior_read 3 53483 NULL ++osd_req_write_kern_53486 osd_req_write_kern 5 53486 NULL ++do_verify_xattr_datum_53499 do_verify_xattr_datum 0 53499 NULL ++snd_pcm_format_physical_width_53505 snd_pcm_format_physical_width 0 53505 NULL ++dbAllocNext_53506 dbAllocNext 0 53506 NULL ++check_acl_53512 check_acl 0 53512 NULL ++ll_xattr_cache_update_53515 ll_xattr_cache_update 4 53515 NULL ++nft_data_dump_53549 nft_data_dump 5 53549 NULL ++SYSC_bind_53582 SYSC_bind 3 53582 NULL ++cifs_utf16_bytes_53593 cifs_utf16_bytes 0 53593 NULL ++proc_uid_map_write_53596 proc_uid_map_write 3 53596 NULL ++pfkey_recvmsg_53604 pfkey_recvmsg 4 53604 NULL ++___alloc_bootmem_nopanic_53626 ___alloc_bootmem_nopanic 1 53626 NULL ++xd_write_multiple_pages_53633 xd_write_multiple_pages 6-5 53633 NULL ++ccid_getsockopt_builtin_ccids_53634 ccid_getsockopt_builtin_ccids 2 53634 NULL ++nr_sendmsg_53656 nr_sendmsg 4 53656 NULL ++fuse_fill_write_pages_53682 fuse_fill_write_pages 0-4 53682 NULL ++v4l2_event_subscribe_53687 v4l2_event_subscribe 3 53687 NULL ++bdev_logical_block_size_53690 bdev_logical_block_size 0 53690 NULL nohasharray ++igb_alloc_q_vector_53690 igb_alloc_q_vector 4-6 53690 &bdev_logical_block_size_53690 ++find_overflow_devnum_53711 find_overflow_devnum 0 53711 NULL ++__proc_debug_mb_53732 __proc_debug_mb 5 53732 NULL ++wdm_write_53735 wdm_write 3 53735 NULL ++amdtp_out_stream_get_max_payload_53755 amdtp_out_stream_get_max_payload 0 53755 NULL nohasharray ++lpfc_idiag_queacc_read_qe_53755 lpfc_idiag_queacc_read_qe 0-2 53755 &amdtp_out_stream_get_max_payload_53755 ++wa_populate_buf_in_urb_53758 wa_populate_buf_in_urb 3-4 53758 NULL ++ext2_acl_count_53773 ext2_acl_count 0-1 53773 NULL ++__kfifo_dma_in_prepare_r_53792 __kfifo_dma_in_prepare_r 4-5 53792 NULL ++qp_alloc_host_work_53798 qp_alloc_host_work 5-3 53798 NULL ++regmap_raw_write_53803 regmap_raw_write 2-4 53803 NULL ++lpfc_idiag_ctlacc_read_reg_53809 lpfc_idiag_ctlacc_read_reg 0-3 53809 NULL ++nls_nullsize_53815 nls_nullsize 0 53815 NULL ++setup_data_read_53822 setup_data_read 3 53822 NULL ++pms_read_53873 pms_read 3 53873 NULL ++ieee80211_if_fmt_dropped_frames_congestion_53883 ieee80211_if_fmt_dropped_frames_congestion 3 53883 NULL ++SyS_setgroups_53900 SyS_setgroups 1 53900 NULL ++posix_acl_chmod_53904 posix_acl_chmod 0 53904 NULL ++batadv_tt_tvlv_ogm_handler_v1_53909 batadv_tt_tvlv_ogm_handler_v1 5 53909 NULL ++usb_serial_generic_write_53927 usb_serial_generic_write 4 53927 NULL ++ocfs2_make_clusters_writable_53938 ocfs2_make_clusters_writable 0 53938 NULL ++idetape_chrdev_write_53976 idetape_chrdev_write 3 53976 NULL ++__ocfs2_xattr_set_value_outside_53981 __ocfs2_xattr_set_value_outside 5 53981 NULL ++ieee80211_if_fmt_dot11MeshHWMPperrMinInterval_53998 ieee80211_if_fmt_dot11MeshHWMPperrMinInterval 3 53998 NULL ++hfsplus_attr_build_key_54013 hfsplus_attr_build_key 0 54013 NULL ++snd_pcm_lib_write_transfer_54018 snd_pcm_lib_write_transfer 5-2-4 54018 NULL ++mdc_kuc_write_54019 mdc_kuc_write 3 54019 NULL ++ipxrtr_route_packet_54036 ipxrtr_route_packet 4 54036 NULL ++batadv_tt_update_orig_54049 batadv_tt_update_orig 6-4 54049 NULL ++pipeline_dec_packet_out_read_54052 pipeline_dec_packet_out_read 3 54052 NULL ++nl80211_send_disconnected_54056 nl80211_send_disconnected 5 54056 NULL ++rproc_state_read_54057 rproc_state_read 3 54057 NULL ++_malloc_54077 _malloc 1 54077 NULL ++bitmap_bitremap_54096 bitmap_bitremap 4 54096 NULL ++altera_set_ir_pre_54103 altera_set_ir_pre 2 54103 NULL nohasharray ++lustre_posix_acl_xattr_filter_54103 lustre_posix_acl_xattr_filter 2-0 54103 &altera_set_ir_pre_54103 ++__comedi_buf_write_alloc_54112 __comedi_buf_write_alloc 0-2 54112 NULL ++strn_len_54122 strn_len 0 54122 NULL ++isku_receive_54130 isku_receive 4 54130 NULL ++isr_host_acknowledges_read_54136 isr_host_acknowledges_read 3 54136 NULL ++irq_blk_threshold_write_54138 irq_blk_threshold_write 3 54138 NULL ++memcpy_toiovec_54166 memcpy_toiovec 3 54166 NULL ++nouveau_falcon_create__54169 nouveau_falcon_create_ 8 54169 NULL ++p9_client_prepare_req_54175 p9_client_prepare_req 3 54175 NULL ++do_sys_poll_54221 do_sys_poll 2 54221 NULL ++__register_chrdev_54223 __register_chrdev 2-3 54223 NULL ++pi_read_regr_54231 pi_read_regr 0 54231 NULL ++reada_add_block_54247 reada_add_block 2 54247 NULL ++xfs_dir2_sf_addname_hard_54254 xfs_dir2_sf_addname_hard 3 54254 NULL ++ceph_msgpool_get_54258 ceph_msgpool_get 2 54258 NULL ++audio_write_54261 audio_write 4 54261 NULL nohasharray ++wusb_prf_54261 wusb_prf 7 54261 &audio_write_54261 ++mwifiex_getlog_read_54269 mwifiex_getlog_read 3 54269 NULL ++kstrtou16_from_user_54274 kstrtou16_from_user 2 54274 NULL ++tipc_multicast_54285 tipc_multicast 4 54285 NULL ++altera_set_dr_post_54291 altera_set_dr_post 2 54291 NULL ++dlm_alloc_pagevec_54296 dlm_alloc_pagevec 1 54296 NULL ++reclaim_pages_54301 reclaim_pages 3 54301 NULL ++sprintf_54306 sprintf 0 54306 NULL ++bio_add_pc_page_54319 bio_add_pc_page 4 54319 NULL ++br_fdb_fillbuf_54339 br_fdb_fillbuf 0 54339 NULL ++__alloc_dev_table_54343 __alloc_dev_table 2 54343 NULL ++__get_free_pages_54352 __get_free_pages 0 54352 NULL ++read_file_credit_dist_stats_54367 read_file_credit_dist_stats 3 54367 NULL ++vfs_readlink_54368 vfs_readlink 3 54368 NULL ++do_dccp_setsockopt_54377 do_dccp_setsockopt 5 54377 NULL nohasharray ++intel_sdvo_write_cmd_54377 intel_sdvo_write_cmd 4 54377 &do_dccp_setsockopt_54377 ++ah_alloc_tmp_54378 ah_alloc_tmp 3-2 54378 NULL ++snd_pcm_oss_read2_54387 snd_pcm_oss_read2 3-0 54387 NULL ++iwl_dbgfs_power_save_status_read_54392 iwl_dbgfs_power_save_status_read 3 54392 NULL ++ll_ra_count_get_54410 ll_ra_count_get 3 54410 NULL ++copy_gadget_strings_54417 copy_gadget_strings 2-3 54417 NULL ++sparse_early_mem_maps_alloc_node_54485 sparse_early_mem_maps_alloc_node 4 54485 NULL ++simple_strtoull_54493 simple_strtoull 0 54493 NULL ++btrfs_ordered_sum_size_54509 btrfs_ordered_sum_size 0-2 54509 NULL ++rfc4106_set_key_54519 rfc4106_set_key 3 54519 NULL ++vmci_transport_dgram_enqueue_54525 vmci_transport_dgram_enqueue 4 54525 NULL ++viacam_read_54526 viacam_read 3 54526 NULL ++unix_dgram_connect_54535 unix_dgram_connect 3 54535 NULL ++setsockopt_54539 setsockopt 5 54539 NULL ++lbs_lowsnr_write_54549 lbs_lowsnr_write 3 54549 NULL ++SYSC_setsockopt_54561 SYSC_setsockopt 5 54561 NULL ++nfsd_vfs_write_54577 nfsd_vfs_write 6 54577 NULL ++fw_iso_buffer_init_54582 fw_iso_buffer_init 3 54582 NULL ++nvme_npages_54601 nvme_npages 0-1 54601 NULL ++irq_pkt_threshold_write_54605 irq_pkt_threshold_write 3 54605 NULL ++port_fops_write_54627 port_fops_write 3 54627 NULL ++irq_timeout_read_54653 irq_timeout_read 3 54653 NULL ++dns_resolver_read_54658 dns_resolver_read 3 54658 NULL ++twl6030_interrupt_mask_54659 twl6030_interrupt_mask 2 54659 NULL ++tdp_page_fault_54663 tdp_page_fault 2 54663 NULL ++bus_add_device_54665 bus_add_device 0 54665 NULL ++cw1200_queue_stats_init_54670 cw1200_queue_stats_init 2 54670 NULL ++bio_kmalloc_54672 bio_kmalloc 2 54672 NULL ++evm_read_key_54674 evm_read_key 3 54674 NULL ++tipc_link_send_sections_fast_54689 tipc_link_send_sections_fast 3 54689 NULL ++__btrfs_inc_extent_ref_54706 __btrfs_inc_extent_ref 7 54706 NULL ++rfkill_fop_read_54711 rfkill_fop_read 3 54711 NULL ++ocfs2_control_write_54737 ocfs2_control_write 3 54737 NULL ++kzalloc_54740 kzalloc 1 54740 NULL ++wep_iv_read_54744 wep_iv_read 3 54744 NULL ++lpfc_idiag_pcicfg_write_54749 lpfc_idiag_pcicfg_write 3 54749 NULL ++iio_event_chrdev_read_54757 iio_event_chrdev_read 3 54757 NULL ++adis16480_show_firmware_date_54762 adis16480_show_firmware_date 3 54762 NULL ++ldsem_atomic_update_54774 ldsem_atomic_update 1 54774 NULL ++xfs_rtallocate_extent_block_54791 xfs_rtallocate_extent_block 0 54791 NULL ++flexcop_device_kmalloc_54793 flexcop_device_kmalloc 1 54793 NULL ++nfsd_write_54809 nfsd_write 6 54809 NULL ++ar9287_dump_modal_eeprom_54814 ar9287_dump_modal_eeprom 3-2 54814 NULL ++crypto_tfm_ctx_alignment_54815 crypto_tfm_ctx_alignment 0 54815 NULL nohasharray ++kvzalloc_54815 kvzalloc 1 54815 &crypto_tfm_ctx_alignment_54815 nohasharray ++aes_decrypt_fail_read_54815 aes_decrypt_fail_read 3 54815 &kvzalloc_54815 ++generic_perform_write_54832 generic_perform_write 3-0 54832 NULL ++write_rio_54837 write_rio 3 54837 NULL ++ext3_acl_from_disk_54839 ext3_acl_from_disk 2 54839 NULL nohasharray ++nouveau_engctx_create__54839 nouveau_engctx_create_ 8 54839 &ext3_acl_from_disk_54839 ++ufx_ops_write_54848 ufx_ops_write 3 54848 NULL ++printer_read_54851 printer_read 3 54851 NULL ++broadsheet_spiflash_rewrite_sector_54864 broadsheet_spiflash_rewrite_sector 2 54864 NULL ++prism_build_supp_rates_54865 prism_build_supp_rates 0 54865 NULL ++xfs_alloc_read_agfl_54879 xfs_alloc_read_agfl 0 54879 NULL ++iscsi_pool_init_54913 iscsi_pool_init 2-4 54913 NULL nohasharray ++kobject_set_name_vargs_54913 kobject_set_name_vargs 0 54913 &iscsi_pool_init_54913 ++btrfs_stack_chunk_num_stripes_54923 btrfs_stack_chunk_num_stripes 0 54923 NULL ++bio_add_page_54933 bio_add_page 0-3 54933 NULL ++mxms_structlen_54939 mxms_structlen 0 54939 NULL ++add_port_54941 add_port 2 54941 NULL ++ath9k_dump_btcoex_54949 ath9k_dump_btcoex 3-0 54949 NULL ++alauda_write_data_54967 alauda_write_data 3 54967 NULL ++c4_add_card_54968 c4_add_card 3 54968 NULL ++ext3_xattr_get_54989 ext3_xattr_get 0 54989 NULL ++cx231xx_v4l2_read_55014 cx231xx_v4l2_read 3 55014 NULL ++error_error_null_Frame_tx_start_read_55024 error_error_null_Frame_tx_start_read 3 55024 NULL ++dgap_do_bios_load_55025 dgap_do_bios_load 3 55025 NULL ++apei_exec_run_55075 apei_exec_run 0 55075 NULL ++bitmap_storage_alloc_55077 bitmap_storage_alloc 2 55077 NULL ++read_dma_55086 read_dma 3 55086 NULL ++rxpipe_beacon_buffer_thres_host_int_trig_rx_data_read_55106 rxpipe_beacon_buffer_thres_host_int_trig_rx_data_read 3 55106 NULL ++crypto_ahash_setkey_55134 crypto_ahash_setkey 3 55134 NULL ++filldir_55137 filldir 3 55137 NULL ++ocfs2_truncate_file_55148 ocfs2_truncate_file 3 55148 NULL ++xfs_check_agi_freecount_55149 xfs_check_agi_freecount 0 55149 NULL nohasharray ++npages_to_npools_55149 npages_to_npools 0-1 55149 &xfs_check_agi_freecount_55149 ++ieee80211_if_read_uapsd_queues_55150 ieee80211_if_read_uapsd_queues 3 55150 NULL ++xfs_icsb_modify_counters_55156 xfs_icsb_modify_counters 0 55156 NULL ++gfs2_ri_update_55185 gfs2_ri_update 0 55185 NULL ++mtd_get_fact_prot_info_55186 mtd_get_fact_prot_info 0 55186 NULL ++sel_write_relabel_55195 sel_write_relabel 3 55195 NULL ++sched_feat_write_55202 sched_feat_write 3 55202 NULL ++ht40allow_map_read_55209 ht40allow_map_read 3 55209 NULL ++__kfifo_dma_out_prepare_r_55211 __kfifo_dma_out_prepare_r 4-5 55211 NULL ++do_raw_setsockopt_55215 do_raw_setsockopt 5 55215 NULL ++qxl_alloc_client_monitors_config_55216 qxl_alloc_client_monitors_config 2 55216 NULL ++nouveau_mc_create__55217 nouveau_mc_create_ 4 55217 NULL ++dbAllocDmap_55227 dbAllocDmap 0 55227 NULL ++memcpy_fromiovec_55247 memcpy_fromiovec 3 55247 NULL ++lbs_failcount_write_55276 lbs_failcount_write 3 55276 NULL ++persistent_ram_new_55286 persistent_ram_new 2-1 55286 NULL ++rx_streaming_interval_read_55291 rx_streaming_interval_read 3 55291 NULL ++lov_get_stripecnt_55297 lov_get_stripecnt 0-3 55297 NULL ++gsm_control_modem_55303 gsm_control_modem 3 55303 NULL ++wimax_msg_len_55304 wimax_msg_len 0 55304 NULL ++qp_alloc_guest_work_55305 qp_alloc_guest_work 5-3 55305 NULL ++__vxge_hw_vpath_initialize_55328 __vxge_hw_vpath_initialize 2 55328 NULL ++vme_user_read_55338 vme_user_read 3 55338 NULL ++__wa_xfer_setup_sizes_55342 __wa_xfer_setup_sizes 0 55342 NULL nohasharray ++sctp_datamsg_from_user_55342 sctp_datamsg_from_user 4 55342 &__wa_xfer_setup_sizes_55342 ++tipc_send2name_55373 tipc_send2name 5 55373 NULL ++cw1200_sdio_align_size_55391 cw1200_sdio_align_size 2 55391 NULL ++iwl_dbgfs_plcp_delta_read_55407 iwl_dbgfs_plcp_delta_read 3 55407 NULL ++sysfs_chmod_file_55408 sysfs_chmod_file 0 55408 NULL ++si476x_radio_read_rds_blckcnt_blob_55427 si476x_radio_read_rds_blckcnt_blob 3 55427 NULL ++__vxge_hw_channel_allocate_55462 __vxge_hw_channel_allocate 3 55462 NULL ++cx23888_ir_rx_read_55473 cx23888_ir_rx_read 3 55473 NULL ++snd_pcm_lib_write_55483 snd_pcm_lib_write 0-3 55483 NULL ++i2o_pool_alloc_55485 i2o_pool_alloc 4 55485 NULL ++batadv_tt_entries_55487 batadv_tt_entries 0-1 55487 NULL ++ras_stride_increase_window_55501 ras_stride_increase_window 3 55501 NULL ++tx_tx_done_int_template_read_55511 tx_tx_done_int_template_read 3 55511 NULL ++xfs_btree_split_55515 xfs_btree_split 0 55515 NULL ++ea_get_55522 ea_get 3-0 55522 NULL ++buffer_size_55534 buffer_size 0 55534 NULL ++set_msr_interception_55538 set_msr_interception 2 55538 NULL ++tty_port_register_device_55543 tty_port_register_device 3 55543 NULL ++dgap_do_config_load_55548 dgap_do_config_load 2 55548 NULL ++hash_ipport6_expire_55549 hash_ipport6_expire 4 55549 NULL ++dm_stats_list_55551 dm_stats_list 4 55551 NULL ++add_partition_55588 add_partition 2 55588 NULL ++kstrtou8_from_user_55599 kstrtou8_from_user 2 55599 NULL ++SyS_keyctl_55602 SyS_keyctl 4 55602 NULL ++macvtap_put_user_55609 macvtap_put_user 4 55609 NULL ++selinux_setprocattr_55611 selinux_setprocattr 4 55611 NULL ++edge_tty_recv_55622 edge_tty_recv 3 55622 NULL ++pktgen_if_write_55628 pktgen_if_write 3 55628 NULL nohasharray ++reiserfs_xattr_get_55628 reiserfs_xattr_get 0 55628 &pktgen_if_write_55628 ++osc_obd_max_pages_per_rpc_seq_write_55636 osc_obd_max_pages_per_rpc_seq_write 3 55636 NULL ++xfs_bmbt_maxrecs_55649 xfs_bmbt_maxrecs 0-2 55649 NULL ++lpfc_idiag_queinfo_read_55662 lpfc_idiag_queinfo_read 3 55662 NULL ++il_dbgfs_tx_queue_read_55668 il_dbgfs_tx_queue_read 3 55668 NULL ++get_info_55681 get_info 3 55681 NULL ++iwl_dbgfs_plcp_delta_write_55682 iwl_dbgfs_plcp_delta_write 3 55682 NULL ++genl_allocate_reserve_groups_55705 genl_allocate_reserve_groups 1 55705 NULL ++pm8001_store_update_fw_55716 pm8001_store_update_fw 4 55716 NULL ++ocfs2_lock_refcount_tree_55719 ocfs2_lock_refcount_tree 0 55719 NULL ++tap_pwup_write_55723 tap_pwup_write 3 55723 NULL ++__iio_allocate_kfifo_55738 __iio_allocate_kfifo 2 55738 NULL ++set_local_name_55757 set_local_name 4 55757 NULL ++strlen_55778 strlen 0 55778 NULL ++set_spte_55783 set_spte 4-5 55783 NULL ++req_bio_endio_55786 req_bio_endio 3 55786 NULL nohasharray ++conf_read_55786 conf_read 3 55786 &req_bio_endio_55786 ++uwb_rc_neh_grok_event_55799 uwb_rc_neh_grok_event 3 55799 NULL ++sb16_copy_from_user_55836 sb16_copy_from_user 10-6-7 55836 NULL ++ip_hdrlen_55849 ip_hdrlen 0 55849 NULL ++hcd_alloc_coherent_55862 hcd_alloc_coherent 5 55862 NULL ++shmem_setxattr_55867 shmem_setxattr 4 55867 NULL ++hsc_write_55875 hsc_write 3 55875 NULL ++ramdisk_store_55885 ramdisk_store 4 55885 NULL ++pm_qos_power_read_55891 pm_qos_power_read 3 55891 NULL ++hash_ip4_expire_55911 hash_ip4_expire 4 55911 NULL ++snd_pcm_hw_param_value_min_55917 snd_pcm_hw_param_value_min 0 55917 NULL ++ext2_direct_IO_55932 ext2_direct_IO 4 55932 NULL ++kvm_write_guest_virt_system_55944 kvm_write_guest_virt_system 4-2 55944 NULL ++sel_read_policy_55947 sel_read_policy 3 55947 NULL ++ceph_get_direct_page_vector_55956 ceph_get_direct_page_vector 2 55956 NULL ++simple_read_from_buffer_55957 simple_read_from_buffer 2-5 55957 NULL ++tx_tx_imm_resp_read_55964 tx_tx_imm_resp_read 3 55964 NULL ++btrfs_clone_55977 btrfs_clone 5-3 55977 NULL ++wa_xfer_create_subset_sg_55992 wa_xfer_create_subset_sg 3-2 55992 NULL ++nvme_alloc_iod_56027 nvme_alloc_iod 1-2 56027 NULL ++usb_ocp_write_56047 usb_ocp_write 4 56047 NULL ++dccp_sendmsg_56058 dccp_sendmsg 4 56058 NULL ++pscsi_get_bio_56103 pscsi_get_bio 1 56103 NULL ++add_sysfs_param_56108 add_sysfs_param 0 56108 NULL ++usb_alloc_stream_buffers_56123 usb_alloc_stream_buffers 3 56123 NULL ++sel_read_handle_status_56139 sel_read_handle_status 3 56139 NULL ++write_file_frameerrors_56145 write_file_frameerrors 3 56145 NULL ++__i2c_transfer_56162 __i2c_transfer 0 56162 NULL ++rawv6_setsockopt_56165 rawv6_setsockopt 5 56165 NULL ++ath9k_dump_legacy_btcoex_56194 ath9k_dump_legacy_btcoex 3-0 56194 NULL ++vring_add_indirect_56222 vring_add_indirect 4 56222 NULL ++ocfs2_find_xe_in_bucket_56224 ocfs2_find_xe_in_bucket 0 56224 NULL ++do_ipt_set_ctl_56238 do_ipt_set_ctl 4 56238 NULL ++fd_copyin_56247 fd_copyin 3 56247 NULL ++sk_rmem_schedule_56255 sk_rmem_schedule 3 56255 NULL ++il4965_ucode_general_stats_read_56277 il4965_ucode_general_stats_read 3 56277 NULL ++ieee80211_if_fmt_user_power_level_56283 ieee80211_if_fmt_user_power_level 3 56283 NULL ++RESIZE_IF_NEEDED_56286 RESIZE_IF_NEEDED 2 56286 NULL ++dvb_aplay_56296 dvb_aplay 3 56296 NULL ++btmrvl_hscfgcmd_read_56303 btmrvl_hscfgcmd_read 3 56303 NULL ++speakup_file_write_56310 speakup_file_write 3 56310 NULL ++pipeline_pre_to_defrag_swi_read_56321 pipeline_pre_to_defrag_swi_read 3 56321 NULL ++journal_init_revoke_table_56331 journal_init_revoke_table 1 56331 NULL ++snd_rawmidi_read_56337 snd_rawmidi_read 3 56337 NULL ++vxge_os_dma_malloc_async_56348 vxge_os_dma_malloc_async 3 56348 NULL ++mite_device_bytes_transferred_56355 mite_device_bytes_transferred 0 56355 NULL ++qd_get_56365 qd_get 0 56365 NULL ++iov_iter_copy_from_user_atomic_56368 iov_iter_copy_from_user_atomic 4-0 56368 NULL ++dev_read_56369 dev_read 3 56369 NULL ++ath10k_read_simulate_fw_crash_56371 ath10k_read_simulate_fw_crash 3 56371 NULL ++write_gssp_56404 write_gssp 3 56404 NULL ++ocfs2_control_read_56405 ocfs2_control_read 3 56405 NULL ++do_get_write_access_56410 do_get_write_access 0 56410 NULL ++store_msg_56417 store_msg 3 56417 NULL ++pppol2tp_sendmsg_56420 pppol2tp_sendmsg 4 56420 NULL ++l2cap_segment_le_sdu_56426 l2cap_segment_le_sdu 4 56426 NULL ++fl_create_56435 fl_create 5 56435 NULL ++gnttab_map_56439 gnttab_map 2 56439 NULL ++cx231xx_init_isoc_56453 cx231xx_init_isoc 3-2-4 56453 NULL ++set_connectable_56458 set_connectable 4 56458 NULL ++osd_req_list_partition_objects_56464 osd_req_list_partition_objects 5 56464 NULL ++putused_user_56467 putused_user 3 56467 NULL ++ocfs2_zero_extend_range_56468 ocfs2_zero_extend_range 3-2 56468 NULL ++lbs_rdmac_write_56471 lbs_rdmac_write 3 56471 NULL ++calc_linear_pos_56472 calc_linear_pos 0-3 56472 NULL ++crypto_shash_alignmask_56486 crypto_shash_alignmask 0 56486 NULL ++ieee80211_rx_mgmt_probe_beacon_56491 ieee80211_rx_mgmt_probe_beacon 3 56491 NULL ++memblock_virt_alloc_56501 memblock_virt_alloc 1 56501 NULL ++init_map_ip_56508 init_map_ip 5 56508 NULL ++lustre_posix_acl_xattr_reduce_space_56512 lustre_posix_acl_xattr_reduce_space 3-0-2 56512 NULL ++cfg80211_connect_result_56515 cfg80211_connect_result 4-6 56515 NULL ++ip_options_get_56538 ip_options_get 4 56538 NULL ++ll_wr_track_id_56544 ll_wr_track_id 2 56544 NULL ++alloc_apertures_56561 alloc_apertures 1 56561 NULL ++rs_sta_dbgfs_stats_table_read_56573 rs_sta_dbgfs_stats_table_read 3 56573 NULL ++portcntrs_2_read_56586 portcntrs_2_read 3 56586 NULL ++event_filter_write_56609 event_filter_write 3 56609 NULL ++nvme_trans_log_temperature_56613 nvme_trans_log_temperature 3 56613 NULL ++edac_device_create_block_56619 edac_device_create_block 0 56619 NULL ++gather_array_56641 gather_array 3 56641 NULL ++lookup_extent_backref_56644 lookup_extent_backref 9 56644 NULL ++uvc_debugfs_stats_read_56651 uvc_debugfs_stats_read 3 56651 NULL ++tg3_nvram_write_block_56666 tg3_nvram_write_block 3 56666 NULL ++snd_gus_dram_read_56686 snd_gus_dram_read 4 56686 NULL ++dvb_ringbuffer_read_user_56702 dvb_ringbuffer_read_user 3-0 56702 NULL ++sta_flags_read_56710 sta_flags_read 3 56710 NULL ++ipv6_getsockopt_sticky_56711 ipv6_getsockopt_sticky 5 56711 NULL ++xfs_btree_decrement_56718 xfs_btree_decrement 0 56718 NULL ++__wa_xfer_setup_segs_56725 __wa_xfer_setup_segs 2 56725 NULL ++__copy_from_user_ll_56738 __copy_from_user_ll 0-3 56738 NULL ++drm_agp_bind_pages_56748 drm_agp_bind_pages 3 56748 NULL ++btrfsic_map_block_56751 btrfsic_map_block 2 56751 NULL ++ttm_alloc_new_pages_56792 ttm_alloc_new_pages 5 56792 NULL ++ion_ioctl_56806 ion_ioctl 2 56806 NULL ++do_syslog_56807 do_syslog 3 56807 NULL ++mtdchar_write_56831 mtdchar_write 3 56831 NULL ++snd_rawmidi_kernel_write1_56847 snd_rawmidi_kernel_write1 4-0 56847 NULL ++si476x_radio_read_agc_blob_56849 si476x_radio_read_agc_blob 3 56849 NULL ++ext3_xattr_ibody_get_56880 ext3_xattr_ibody_get 0 56880 NULL ++pvr2_debugifc_print_status_56890 pvr2_debugifc_print_status 3 56890 NULL ++debug_debug3_read_56894 debug_debug3_read 3 56894 NULL ++batadv_tt_update_changes_56895 batadv_tt_update_changes 3 56895 NULL ++hfsplus_find_cat_56899 hfsplus_find_cat 0 56899 NULL ++strcspn_56913 strcspn 0 56913 NULL ++__kfifo_out_56927 __kfifo_out 0-3 56927 NULL ++journal_init_revoke_56933 journal_init_revoke 2 56933 NULL ++xfs_alloc_ag_vextent_56943 xfs_alloc_ag_vextent 0 56943 NULL ++nouveau_xtensa_create__56952 nouveau_xtensa_create_ 8 56952 NULL ++diva_get_driver_info_56967 diva_get_driver_info 0 56967 NULL ++nouveau_device_create__56984 nouveau_device_create_ 6 56984 NULL ++sptlrpc_secflags2str_56995 sptlrpc_secflags2str 3 56995 NULL ++vlsi_alloc_ring_57003 vlsi_alloc_ring 3-4 57003 NULL ++btrfs_super_csum_size_57004 btrfs_super_csum_size 0 57004 NULL ++aircable_process_packet_57027 aircable_process_packet 4 57027 NULL ++ieee80211_if_fmt_state_57043 ieee80211_if_fmt_state 3 57043 NULL nohasharray ++skb_network_offset_57043 skb_network_offset 0 57043 &ieee80211_if_fmt_state_57043 ++bytes_to_samples_57049 bytes_to_samples 0-2 57049 NULL ++xfs_buf_read_map_57053 xfs_buf_read_map 3 57053 NULL ++cx2341x_ctrl_new_std_57061 cx2341x_ctrl_new_std 4 57061 NULL ++sca3000_read_data_57064 sca3000_read_data 4 57064 NULL ++pcmcia_replace_cis_57066 pcmcia_replace_cis 3 57066 NULL ++tracing_set_trace_write_57096 tracing_set_trace_write 3 57096 NULL ++altera_get_note_57099 altera_get_note 6 57099 NULL ++hpfs_readpages_57106 hpfs_readpages 4 57106 NULL ++crypto_compress_ctxsize_57109 crypto_compress_ctxsize 0 57109 NULL ++cipso_v4_gentag_loc_57119 cipso_v4_gentag_loc 0 57119 NULL ++rds_ib_sub_signaled_57136 rds_ib_sub_signaled 2 57136 NULL nohasharray ++nl80211_send_deauth_57136 nl80211_send_deauth 4 57136 &rds_ib_sub_signaled_57136 nohasharray ++ima_show_htable_value_57136 ima_show_htable_value 2 57136 &nl80211_send_deauth_57136 ++snd_sonicvibes_getdmac_57140 snd_sonicvibes_getdmac 0 57140 NULL ++_iwl_dbgfs_bf_params_write_57141 _iwl_dbgfs_bf_params_write 3 57141 NULL ++udl_prime_create_57159 udl_prime_create 2 57159 NULL ++stk_prepare_sio_buffers_57168 stk_prepare_sio_buffers 2 57168 NULL ++rx_hw_stuck_read_57179 rx_hw_stuck_read 3 57179 NULL ++hash_netnet6_expire_57191 hash_netnet6_expire 4 57191 NULL ++tt3650_ci_msg_57219 tt3650_ci_msg 4 57219 NULL ++dma_fifo_alloc_57236 dma_fifo_alloc 5-3-2 57236 NULL ++flush_space_57241 flush_space 0 57241 NULL ++rsxx_cram_write_57244 rsxx_cram_write 3 57244 NULL ++ieee80211_if_fmt_tsf_57249 ieee80211_if_fmt_tsf 3 57249 NULL ++oprofilefs_ulong_from_user_57251 oprofilefs_ulong_from_user 3 57251 NULL ++alloc_flex_gd_57259 alloc_flex_gd 1 57259 NULL ++lbs_sleepparams_write_57283 lbs_sleepparams_write 3 57283 NULL ++pstore_file_read_57288 pstore_file_read 3 57288 NULL ++snd_pcm_read_57289 snd_pcm_read 3 57289 NULL ++ftdi_elan_write_57309 ftdi_elan_write 3 57309 NULL ++xfs_iread_57313 xfs_iread 0 57313 NULL nohasharray ++write_file_regval_57313 write_file_regval 3 57313 &xfs_iread_57313 ++__mxt_write_reg_57326 __mxt_write_reg 3 57326 NULL ++usblp_read_57342 usblp_read 3 57342 NULL ++print_devstats_dot11RTSFailureCount_57347 print_devstats_dot11RTSFailureCount 3 57347 NULL ++dio_send_cur_page_57348 dio_send_cur_page 0 57348 NULL ++tipc_bclink_stats_57372 tipc_bclink_stats 2 57372 NULL ++tty_register_device_attr_57381 tty_register_device_attr 2 57381 NULL ++read_file_blob_57406 read_file_blob 3 57406 NULL ++enclosure_register_57412 enclosure_register 3 57412 NULL ++compat_keyctl_instantiate_key_iov_57431 compat_keyctl_instantiate_key_iov 3 57431 NULL ++copy_to_user_fromio_57432 copy_to_user_fromio 3 57432 NULL ++__roundup_pow_of_two_57461 __roundup_pow_of_two 0 57461 NULL ++sisusb_clear_vram_57466 sisusb_clear_vram 2-3 57466 NULL ++ieee80211_if_read_flags_57470 ieee80211_if_read_flags 3 57470 NULL ++tipc_port_reject_sections_57478 tipc_port_reject_sections 4 57478 NULL ++bnad_debugfs_write_regwr_57500 bnad_debugfs_write_regwr 3 57500 NULL ++skb_headlen_57501 skb_headlen 0 57501 NULL ++copy_in_user_57502 copy_in_user 3 57502 NULL ++ckhdid_printf_57505 ckhdid_printf 2 57505 NULL nohasharray ++gfs2_quota_hold_57505 gfs2_quota_hold 0 57505 &ckhdid_printf_57505 ++init_tag_map_57515 init_tag_map 3 57515 NULL ++il_dbgfs_force_reset_read_57517 il_dbgfs_force_reset_read 3 57517 NULL nohasharray ++wil_read_file_ssid_57517 wil_read_file_ssid 3 57517 &il_dbgfs_force_reset_read_57517 ++cmm_read_57520 cmm_read 3 57520 NULL ++inode_permission_57531 inode_permission 0 57531 NULL ++acpi_dev_get_resources_57534 acpi_dev_get_resources 0 57534 NULL ++ptlrpc_lprocfs_hp_ratio_seq_write_57537 ptlrpc_lprocfs_hp_ratio_seq_write 3 57537 NULL ++ReadHDLCPnP_57559 ReadHDLCPnP 0 57559 NULL ++obd_unpackmd_57563 obd_unpackmd 0 57563 NULL ++snd_pcm_playback_ioctl1_57569 snd_pcm_playback_ioctl1 0 57569 NULL ++get_bridge_ifindices_57579 get_bridge_ifindices 0 57579 NULL ++ldlm_cli_enqueue_local_57582 ldlm_cli_enqueue_local 11 57582 NULL ++il_dbgfs_interrupt_write_57591 il_dbgfs_interrupt_write 3 57591 NULL ++read_file_spectral_fft_period_57593 read_file_spectral_fft_period 3 57593 NULL ++tx_tx_retry_template_read_57623 tx_tx_retry_template_read 3 57623 NULL ++sisusbcon_putcs_57630 sisusbcon_putcs 3 57630 NULL ++mem_read_57631 mem_read 3 57631 NULL ++xfs_alloc_ag_vextent_near_57653 xfs_alloc_ag_vextent_near 0 57653 NULL ++r3964_write_57662 r3964_write 4 57662 NULL ++proc_ns_readlink_57664 proc_ns_readlink 3 57664 NULL ++__lgwrite_57669 __lgwrite 4 57669 NULL ++f1x_match_to_this_node_57695 f1x_match_to_this_node 3 57695 NULL ++i2400m_rx_stats_read_57706 i2400m_rx_stats_read 3 57706 NULL ++ieee80211_if_read_dot11MeshHWMPconfirmationInterval_57722 ieee80211_if_read_dot11MeshHWMPconfirmationInterval 3 57722 NULL ++nouveau_gpio_create__57735 nouveau_gpio_create_ 4-5 57735 NULL ++pppol2tp_recvmsg_57742 pppol2tp_recvmsg 4 57742 NULL nohasharray ++compat_sys_set_mempolicy_57742 compat_sys_set_mempolicy 3 57742 &pppol2tp_recvmsg_57742 ++ieee80211_if_fmt_dot11MeshHWMPpreqMinInterval_57762 ieee80211_if_fmt_dot11MeshHWMPpreqMinInterval 3 57762 NULL ++SYSC_process_vm_writev_57776 SYSC_process_vm_writev 3-5 57776 NULL ++apei_exec_collect_resources_57788 apei_exec_collect_resources 0 57788 NULL ++security_inode_unlink_57791 security_inode_unlink 0 57791 NULL ++ld2_57794 ld2 0 57794 NULL ++ivtv_read_57796 ivtv_read 3 57796 NULL ++ion_test_ioctl_57799 ion_test_ioctl 2 57799 NULL ++bfad_debugfs_read_regrd_57830 bfad_debugfs_read_regrd 3 57830 NULL ++copy_to_user_57835 copy_to_user 3-0 57835 NULL ++xfs_rtpick_extent_57843 xfs_rtpick_extent 0 57843 NULL nohasharray ++flash_read_57843 flash_read 3 57843 &xfs_rtpick_extent_57843 ++kiblnd_create_tx_pool_57846 kiblnd_create_tx_pool 2 57846 NULL ++radeon_ttm_gtt_read_57879 radeon_ttm_gtt_read 3 57879 NULL ++xt_alloc_table_info_57903 xt_alloc_table_info 1 57903 NULL ++iio_read_first_n_kfifo_57910 iio_read_first_n_kfifo 2 57910 NULL ++ll_xattr_find_get_lock_57912 ll_xattr_find_get_lock 0 57912 NULL ++memcg_caches_array_size_57918 memcg_caches_array_size 0-1 57918 NULL ++twl_i2c_write_57923 twl_i2c_write 3-4 57923 NULL ++__snd_gf1_look16_57925 __snd_gf1_look16 0 57925 NULL ++sel_read_handle_unknown_57933 sel_read_handle_unknown 3 57933 NULL ++xfs_inode_item_format_convert_57937 xfs_inode_item_format_convert 0 57937 NULL ++xfs_mru_cache_create_57943 xfs_mru_cache_create 3 57943 NULL ++key_algorithm_read_57946 key_algorithm_read 3 57946 NULL ++ip_set_alloc_57953 ip_set_alloc 1 57953 NULL nohasharray ++ioat3_dca_count_dca_slots_57953 ioat3_dca_count_dca_slots 0 57953 &ip_set_alloc_57953 ++rx_reset_counter_read_58001 rx_reset_counter_read 3 58001 NULL ++iwl_dbgfs_ucode_rx_stats_read_58023 iwl_dbgfs_ucode_rx_stats_read 3 58023 NULL ++io_playback_transfer_58030 io_playback_transfer 4 58030 NULL ++mce_async_out_58056 mce_async_out 3 58056 NULL ++ocfs2_find_leaf_58065 ocfs2_find_leaf 0 58065 NULL ++dt3155_alloc_coherent_58073 dt3155_alloc_coherent 2 58073 NULL ++cm4040_write_58079 cm4040_write 3 58079 NULL ++ipv6_flowlabel_opt_58135 ipv6_flowlabel_opt 3 58135 NULL nohasharray ++slhc_init_58135 slhc_init 1-2 58135 &ipv6_flowlabel_opt_58135 ++rd_build_prot_space_58137 rd_build_prot_space 2 58137 NULL ++xfs_bmap_add_extent_delay_real_58151 xfs_bmap_add_extent_delay_real 0 58151 NULL ++xfs_btree_update_58167 xfs_btree_update 0 58167 NULL ++garmin_write_bulk_58191 garmin_write_bulk 3 58191 NULL ++ieee80211_if_fmt_flags_58205 ieee80211_if_fmt_flags 3 58205 NULL ++i40evf_allocate_virt_mem_d_58210 i40evf_allocate_virt_mem_d 3 58210 NULL ++btrfsic_create_link_to_next_block_58246 btrfsic_create_link_to_next_block 4 58246 NULL ++read_file_debug_58256 read_file_debug 3 58256 NULL ++osc_max_dirty_mb_seq_write_58263 osc_max_dirty_mb_seq_write 3 58263 NULL ++cfg80211_mgmt_tx_status_58266 cfg80211_mgmt_tx_status 4 58266 NULL ++profile_load_58267 profile_load 3 58267 NULL ++kstrtos8_from_user_58268 kstrtos8_from_user 2 58268 NULL ++acpi_ds_build_internal_package_obj_58271 acpi_ds_build_internal_package_obj 3 58271 NULL ++iscsi_decode_text_input_58292 iscsi_decode_text_input 4 58292 NULL ++ieee80211_if_read_dot11MeshTTL_58307 ieee80211_if_read_dot11MeshTTL 3 58307 NULL ++tx_tx_start_int_templates_read_58324 tx_tx_start_int_templates_read 3 58324 NULL ++ext4_ext_truncate_extend_restart_58331 ext4_ext_truncate_extend_restart 3 58331 NULL ++__copy_from_user_swizzled_58337 __copy_from_user_swizzled 2-4 58337 NULL ++SyS_migrate_pages_58348 SyS_migrate_pages 2 58348 NULL ++brcmf_debugfs_sdio_counter_read_58369 brcmf_debugfs_sdio_counter_read 3 58369 NULL ++il_dbgfs_status_read_58388 il_dbgfs_status_read 3 58388 NULL ++_drbd_md_sync_page_io_58403 _drbd_md_sync_page_io 6 58403 NULL ++kvm_mmu_write_protect_pt_masked_58406 kvm_mmu_write_protect_pt_masked 3 58406 NULL nohasharray ++idetape_pad_zeros_58406 idetape_pad_zeros 2 58406 &kvm_mmu_write_protect_pt_masked_58406 ++xfs_btree_get_rec_58410 xfs_btree_get_rec 0 58410 NULL ++i2400m_pld_size_58415 i2400m_pld_size 0 58415 NULL ++capabilities_read_58457 capabilities_read 3 58457 NULL ++usnic_vnic_get_resources_58462 usnic_vnic_get_resources 3 58462 NULL ++lpfc_idiag_baracc_read_58466 lpfc_idiag_baracc_read 3 58466 NULL nohasharray ++compat_do_ipt_set_ctl_58466 compat_do_ipt_set_ctl 4 58466 &lpfc_idiag_baracc_read_58466 ++nv_rd08_58472 nv_rd08 0 58472 NULL ++acpi_tables_sysfs_init_58477 acpi_tables_sysfs_init 0 58477 NULL ++snd_gf1_read_addr_58483 snd_gf1_read_addr 0 58483 NULL ++snd_rme96_capture_copy_58484 snd_rme96_capture_copy 5 58484 NULL ++btrfs_cont_expand_58498 btrfs_cont_expand 0-2-3 58498 NULL ++gfs2_dir_get_new_buffer_58509 gfs2_dir_get_new_buffer 0 58509 NULL ++rndis_add_response_58544 rndis_add_response 2 58544 NULL ++wep_decrypt_fail_read_58567 wep_decrypt_fail_read 3 58567 NULL ++scnprint_mac_oui_58578 scnprint_mac_oui 3-0 58578 NULL ++get_rhf_errstring_58582 get_rhf_errstring 3 58582 NULL ++ea_read_inline_58589 ea_read_inline 0 58589 NULL ++isku_sysfs_read_keys_thumbster_58590 isku_sysfs_read_keys_thumbster 6 58590 NULL ++xip_file_read_58592 xip_file_read 3 58592 NULL ++ecryptfs_write_end_58594 ecryptfs_write_end 5-3 58594 NULL ++radeon_bo_size_58606 radeon_bo_size 0 58606 NULL ++skb_copy_to_page_nocache_58624 skb_copy_to_page_nocache 6 58624 NULL ++filemap_fdatawrite_range_58630 filemap_fdatawrite_range 0 58630 NULL ++tx_tx_start_fw_gen_read_58648 tx_tx_start_fw_gen_read 3 58648 NULL ++iwl_dbgfs_rx_handlers_write_58655 iwl_dbgfs_rx_handlers_write 3 58655 NULL ++find_zero_58685 find_zero 0-1 58685 NULL ++uwb_bce_print_IEs_58686 uwb_bce_print_IEs 4 58686 NULL ++tps6586x_writes_58689 tps6586x_writes 2-3 58689 NULL ++vx_send_msg_58711 vx_send_msg 0 58711 NULL ++csum_exist_in_range_58730 csum_exist_in_range 2-3 58730 NULL ++frames_to_bytes_58741 frames_to_bytes 0-2 58741 NULL ++ieee80211_if_write_tkip_mic_test_58748 ieee80211_if_write_tkip_mic_test 3 58748 NULL ++agp_allocate_memory_58761 agp_allocate_memory 2 58761 NULL ++regmap_calc_reg_len_58795 regmap_calc_reg_len 0 58795 NULL ++raw_send_hdrinc_58803 raw_send_hdrinc 4 58803 NULL ++isku_sysfs_read_58806 isku_sysfs_read 5 58806 NULL ++ep_read_58813 ep_read 3 58813 NULL ++command_write_58841 command_write 3 58841 NULL ++ath6kl_wmi_send_action_cmd_58860 ath6kl_wmi_send_action_cmd 7 58860 NULL ++gs_alloc_req_58883 gs_alloc_req 2 58883 NULL ++esas2r_change_queue_depth_58886 esas2r_change_queue_depth 2 58886 NULL ++lprocfs_wr_pinger_recov_58914 lprocfs_wr_pinger_recov 3 58914 NULL ++print_devstats_dot11FCSErrorCount_58919 print_devstats_dot11FCSErrorCount 3 58919 NULL ++pipeline_cs_rx_packet_out_read_58926 pipeline_cs_rx_packet_out_read 3 58926 NULL ++xfs_bmap_read_extents_58936 xfs_bmap_read_extents 0 58936 NULL ++wait_table_hash_nr_entries_58962 wait_table_hash_nr_entries 0 58962 NULL ++ieee80211_if_fmt_dot11MeshHWMPactivePathToRootTimeout_58965 ieee80211_if_fmt_dot11MeshHWMPactivePathToRootTimeout 3 58965 NULL ++crypto_aead_ivsize_58970 crypto_aead_ivsize 0 58970 NULL ++__mem_cgroup_try_charge_58976 __mem_cgroup_try_charge 0 58976 NULL ++init_list_set_59005 init_list_set 3 59005 NULL ++ep_write_59008 ep_write 3 59008 NULL ++lpfc_idiag_baracc_write_59014 lpfc_idiag_baracc_write 3 59014 NULL ++SyS_preadv_59029 SyS_preadv 3 59029 NULL ++init_pci_cap_msi_perm_59033 init_pci_cap_msi_perm 2 59033 NULL ++selinux_transaction_write_59038 selinux_transaction_write 3 59038 NULL ++crypto_aead_reqsize_59039 crypto_aead_reqsize 0 59039 NULL ++regmap_bulk_write_59049 regmap_bulk_write 2-4 59049 NULL ++mmc_sd_num_wr_blocks_59112 mmc_sd_num_wr_blocks 0 59112 NULL ++scsi_io_completion_59122 scsi_io_completion 2 59122 NULL nohasharray ++sta_tx_latency_stat_table_59122 sta_tx_latency_stat_table 0-4-5 59122 &scsi_io_completion_59122 ++nfc_llcp_send_i_frame_59130 nfc_llcp_send_i_frame 3 59130 NULL ++print_devstats_dot11RTSSuccessCount_59145 print_devstats_dot11RTSSuccessCount 3 59145 NULL nohasharray ++framebuffer_alloc_59145 framebuffer_alloc 1 59145 &print_devstats_dot11RTSSuccessCount_59145 ++radeon_compat_ioctl_59150 radeon_compat_ioctl 2 59150 NULL ++pvr2_hdw_report_clients_59152 pvr2_hdw_report_clients 3 59152 NULL ++ksize_59176 ksize 0 59176 NULL ++setup_window_59178 setup_window 4-2-5-7 59178 NULL ++ocfs2_move_extent_59187 ocfs2_move_extent 2-5 59187 NULL ++xfs_ialloc_next_rec_59193 xfs_ialloc_next_rec 0 59193 NULL ++xfs_iext_realloc_indirect_59211 xfs_iext_realloc_indirect 2 59211 NULL ++check_mapped_selector_name_59216 check_mapped_selector_name 5 59216 NULL ++dt3155_read_59226 dt3155_read 3 59226 NULL ++paging64_gpte_to_gfn_lvl_59229 paging64_gpte_to_gfn_lvl 0-1-2 59229 NULL ++nla_len_59258 nla_len 0 59258 NULL ++drbd_bm_write_page_59290 drbd_bm_write_page 2 59290 NULL ++btrfs_insert_dir_item_59304 btrfs_insert_dir_item 4 59304 NULL ++xfs_iformat_fork_59312 xfs_iformat_fork 0 59312 NULL ++fd_copyout_59323 fd_copyout 3 59323 NULL ++read_9287_modal_eeprom_59327 read_9287_modal_eeprom 3 59327 NULL ++rx_defrag_in_process_called_read_59338 rx_defrag_in_process_called_read 3 59338 NULL ++xfs_attrmulti_attr_set_59346 xfs_attrmulti_attr_set 4 59346 NULL nohasharray ++xfs_alloc_lookup_le_59346 xfs_alloc_lookup_le 0 59346 &xfs_attrmulti_attr_set_59346 ++__map_request_59350 __map_request 0 59350 NULL ++gfs2_quota_lock_check_59353 gfs2_quota_lock_check 0 59353 NULL ++f2fs_fallocate_59377 f2fs_fallocate 4-3 59377 NULL ++pvr2_debugifc_print_info_59380 pvr2_debugifc_print_info 3 59380 NULL ++journal_init_dev_59384 journal_init_dev 5 59384 NULL ++__net_get_random_once_59389 __net_get_random_once 2 59389 NULL ++isku_sysfs_read_keys_function_59412 isku_sysfs_read_keys_function 6 59412 NULL ++pci_ctrl_read_59424 pci_ctrl_read 0 59424 NULL ++vxge_hw_ring_rxds_per_block_get_59425 vxge_hw_ring_rxds_per_block_get 0 59425 NULL ++SyS_sched_setaffinity_59442 SyS_sched_setaffinity 2 59442 NULL ++fs_path_ensure_buf_59445 fs_path_ensure_buf 2 59445 NULL ++ib_copy_from_udata_59502 ib_copy_from_udata 3 59502 NULL ++mic_vringh_copy_59523 mic_vringh_copy 4 59523 NULL ++mpi_get_nbits_59551 mpi_get_nbits 0 59551 NULL ++tunables_write_59563 tunables_write 3 59563 NULL ++bio_split_59564 bio_split 2 59564 NULL ++__copy_from_user_ll_nozero_59571 __copy_from_user_ll_nozero 0-3 59571 NULL ++write_pbl_59583 write_pbl 4 59583 NULL ++memdup_user_59590 memdup_user 2 59590 NULL ++xrcdn_free_res_59616 xrcdn_free_res 5 59616 NULL nohasharray ++mem_fwlog_free_mem_blks_read_59616 mem_fwlog_free_mem_blks_read 3 59616 &xrcdn_free_res_59616 ++ath6kl_endpoint_stats_write_59621 ath6kl_endpoint_stats_write 3 59621 NULL ++mtrr_write_59622 mtrr_write 3 59622 NULL ++find_first_zero_bit_59636 find_first_zero_bit 0 59636 NULL ++SyS_setdomainname_59646 SyS_setdomainname 2 59646 NULL ++file_update_time_59647 file_update_time 0 59647 NULL ++hidraw_read_59650 hidraw_read 3 59650 NULL ++v9fs_xattr_set_acl_59651 v9fs_xattr_set_acl 4 59651 NULL ++__devcgroup_check_permission_59665 __devcgroup_check_permission 0 59665 NULL ++iwl_dbgfs_mac_params_read_59666 iwl_dbgfs_mac_params_read 3 59666 NULL ++alloc_dca_provider_59670 alloc_dca_provider 2 59670 NULL ++mic_calc_failure_read_59700 mic_calc_failure_read 3 59700 NULL ++ioperm_get_59701 ioperm_get 4-3 59701 NULL ++prism2_info_scanresults_59729 prism2_info_scanresults 3 59729 NULL ++ieee80211_if_read_fwded_unicast_59740 ieee80211_if_read_fwded_unicast 3 59740 NULL ++fat_direct_IO_59741 fat_direct_IO 4 59741 NULL ++qib_decode_7220_sdma_errs_59745 qib_decode_7220_sdma_errs 4 59745 NULL ++strnlen_59746 strnlen 0 59746 NULL ++ext3_acl_count_59754 ext3_acl_count 0-1 59754 NULL ++long_retry_limit_read_59766 long_retry_limit_read 3 59766 NULL nohasharray ++cap_inode_need_killpriv_59766 cap_inode_need_killpriv 0 59766 &long_retry_limit_read_59766 ++venus_remove_59781 venus_remove 4 59781 NULL ++mei_nfc_recv_59784 mei_nfc_recv 3 59784 NULL ++ipw_write_59807 ipw_write 3 59807 NULL ++scsi_init_shared_tag_map_59812 scsi_init_shared_tag_map 2 59812 NULL ++ieee80211_if_read_dot11MeshHWMPmaxPREQretries_59829 ieee80211_if_read_dot11MeshHWMPmaxPREQretries 3 59829 NULL ++gspca_dev_probe2_59833 gspca_dev_probe2 4 59833 NULL ++regmap_raw_write_async_59849 regmap_raw_write_async 2-4 59849 NULL ++pvr2_ioread_set_sync_key_59882 pvr2_ioread_set_sync_key 3 59882 NULL ++l2cap_sock_recvmsg_59886 l2cap_sock_recvmsg 4 59886 NULL ++ffs_prepare_buffer_59892 ffs_prepare_buffer 2 59892 NULL ++kvm_mmu_notifier_invalidate_range_start_59944 kvm_mmu_notifier_invalidate_range_start 3-4 59944 NULL ++ath10k_read_dfs_stats_59949 ath10k_read_dfs_stats 3 59949 NULL ++dapm_widget_power_read_file_59950 dapm_widget_power_read_file 3 59950 NULL nohasharray ++il_dbgfs_rxon_flags_read_59950 il_dbgfs_rxon_flags_read 3 59950 &dapm_widget_power_read_file_59950 ++il_dbgfs_missed_beacon_read_59956 il_dbgfs_missed_beacon_read 3 59956 NULL ++__arch_hweight16_59975 __arch_hweight16 0 59975 NULL ++osd_req_read_kern_59990 osd_req_read_kern 5 59990 NULL ++ghash_async_setkey_60001 ghash_async_setkey 3 60001 NULL ++ieee80211_if_fmt_dot11MeshAwakeWindowDuration_60006 ieee80211_if_fmt_dot11MeshAwakeWindowDuration 3 60006 NULL ++copy_items_60009 copy_items 7 60009 NULL ++rawsock_sendmsg_60010 rawsock_sendmsg 4 60010 NULL ++mthca_init_cq_60011 mthca_init_cq 2 60011 NULL ++osd_req_list_dev_partitions_60027 osd_req_list_dev_partitions 4 60027 NULL ++xlog_bread_offset_60030 xlog_bread_offset 3 60030 NULL ++xfs_imap_to_bp_60034 xfs_imap_to_bp 0 60034 NULL ++bio_integrity_hw_sectors_60039 bio_integrity_hw_sectors 0-2 60039 NULL ++do_ip6t_set_ctl_60040 do_ip6t_set_ctl 4 60040 NULL ++vcs_size_60050 vcs_size 0 60050 NULL ++gru_alloc_gts_60056 gru_alloc_gts 3-2 60056 NULL ++compat_writev_60063 compat_writev 3 60063 NULL ++ath6kl_listen_int_write_60066 ath6kl_listen_int_write 3 60066 NULL ++c4iw_num_stags_60073 c4iw_num_stags 0 60073 NULL ++rxrpc_kernel_send_data_60083 rxrpc_kernel_send_data 3 60083 NULL ++ieee80211_if_fmt_fwded_frames_60103 ieee80211_if_fmt_fwded_frames 3 60103 NULL ++SYSC_msgsnd_60113 SYSC_msgsnd 3 60113 NULL ++nfs_idmap_request_key_60124 nfs_idmap_request_key 2 60124 NULL ++__mutex_lock_common_60134 __mutex_lock_common 0 60134 NULL ++ld_usb_read_60156 ld_usb_read 3 60156 NULL ++jmb38x_ms_count_slots_60164 jmb38x_ms_count_slots 0 60164 NULL ++init_state_60165 init_state 2 60165 NULL ++jffs2_alloc_full_dirent_60179 jffs2_alloc_full_dirent 1 60179 NULL nohasharray ++sg_build_sgat_60179 sg_build_sgat 3 60179 &jffs2_alloc_full_dirent_60179 ++fuse_async_req_send_60183 fuse_async_req_send 0-3 60183 NULL ++rx_rx_tkip_replays_read_60193 rx_rx_tkip_replays_read 3 60193 NULL ++qib_reg_phys_mr_60202 qib_reg_phys_mr 3 60202 NULL ++btrfs_get_token_16_60220 btrfs_get_token_16 0 60220 NULL ++irq_alloc_domain_generic_chips_60264 irq_alloc_domain_generic_chips 2-3 60264 NULL ++display_crc_ctl_write_60273 display_crc_ctl_write 3 60273 NULL ++printer_write_60276 printer_write 3 60276 NULL ++do_xip_mapping_read_60297 do_xip_mapping_read 5 60297 NULL ++getDataLength_60301 getDataLength 0 60301 NULL ++usb_alphatrack_write_60341 usb_alphatrack_write 3 60341 NULL ++__kfifo_from_user_r_60345 __kfifo_from_user_r 5-3 60345 NULL ++dccp_setsockopt_60367 dccp_setsockopt 5 60367 NULL ++mthca_alloc_resize_buf_60394 mthca_alloc_resize_buf 3 60394 NULL ++ocfs2_zero_extend_60396 ocfs2_zero_extend 3 60396 NULL ++driver_names_read_60399 driver_names_read 3 60399 NULL ++simple_alloc_urb_60420 simple_alloc_urb 3 60420 NULL ++excessive_retries_read_60425 excessive_retries_read 3 60425 NULL ++kmalloc_60432 kmalloc 1 60432 NULL nohasharray ++tstats_write_60432 tstats_write 3 60432 &kmalloc_60432 ++snd_hda_get_num_raw_conns_60462 snd_hda_get_num_raw_conns 0 60462 NULL ++crypto_shash_setkey_60483 crypto_shash_setkey 3 60483 NULL ++lustre_msg_early_size_60496 lustre_msg_early_size 0 60496 NULL ++xfs_btree_make_block_unfull_60511 xfs_btree_make_block_unfull 0 60511 NULL ++v9fs_fid_readn_60544 v9fs_fid_readn 4 60544 NULL ++nonpaging_map_60551 nonpaging_map 4 60551 NULL ++osc_lockless_truncate_seq_write_60553 osc_lockless_truncate_seq_write 3 60553 NULL ++tracing_entries_write_60563 tracing_entries_write 3 60563 NULL ++memblock_virt_alloc_try_nid_nopanic_60604 memblock_virt_alloc_try_nid_nopanic 1 60604 NULL ++inode_change_ok_60614 inode_change_ok 0 60614 NULL ++skb_transport_offset_60619 skb_transport_offset 0 60619 NULL ++wl1273_fm_fops_write_60621 wl1273_fm_fops_write 3 60621 NULL ++acl_alloc_stack_init_60630 acl_alloc_stack_init 1 60630 NULL ++__proc_lnet_stats_60647 __proc_lnet_stats 5 60647 NULL ++if_sdio_host_to_card_60666 if_sdio_host_to_card 4 60666 NULL ++ieee80211_if_read_dot11MeshConfirmTimeout_60670 ieee80211_if_read_dot11MeshConfirmTimeout 3 60670 NULL ++vga_rcrt_60731 vga_rcrt 0 60731 NULL ++snd_ice1712_ds_read_60754 snd_ice1712_ds_read 0 60754 NULL ++raid_status_60755 raid_status 5 60755 NULL ++sel_write_checkreqprot_60774 sel_write_checkreqprot 3 60774 NULL ++opticon_write_60775 opticon_write 4 60775 NULL ++acl_alloc_num_60778 acl_alloc_num 1-2 60778 NULL ++snd_pcm_oss_readv3_60792 snd_pcm_oss_readv3 3 60792 NULL ++gfs2_bmap_alloc_60822 gfs2_bmap_alloc 0 60822 NULL ++pwr_tx_with_ps_read_60851 pwr_tx_with_ps_read 3 60851 NULL ++__clone_and_map_data_bio_60861 __clone_and_map_data_bio 4-3 60861 NULL ++alloc_buf_60864 alloc_buf 3-2 60864 NULL ++generic_writepages_60871 generic_writepages 0 60871 NULL ++ext4_update_inline_data_60888 ext4_update_inline_data 3 60888 NULL ++iio_debugfs_read_reg_60908 iio_debugfs_read_reg 3 60908 NULL ++libcfs_sock_ioctl_60915 libcfs_sock_ioctl 0 60915 NULL ++mgt_set_varlen_60916 mgt_set_varlen 4 60916 NULL ++scrub_chunk_60926 scrub_chunk 5 60926 NULL ++submit_extent_page_60928 submit_extent_page 5 60928 NULL ++xfs_rtallocate_extent_size_60939 xfs_rtallocate_extent_size 0 60939 NULL ++pti_char_write_60960 pti_char_write 3 60960 NULL ++mwifiex_alloc_sdio_mpa_buffers_60961 mwifiex_alloc_sdio_mpa_buffers 2-3 60961 NULL ++__a2mp_build_60987 __a2mp_build 3 60987 NULL ++hsc_msg_alloc_60990 hsc_msg_alloc 1 60990 NULL ++ath6kl_lrssi_roam_read_61022 ath6kl_lrssi_roam_read 3 61022 NULL ++graph_depth_write_61024 graph_depth_write 3 61024 NULL ++sdhci_pltfm_register_61031 sdhci_pltfm_register 3 61031 NULL ++lpfc_idiag_queacc_write_61043 lpfc_idiag_queacc_write 3 61043 NULL ++symtab_init_61050 symtab_init 2 61050 NULL ++fuse_send_write_61053 fuse_send_write 0-4 61053 NULL ++bitmap_scnlistprintf_61062 bitmap_scnlistprintf 0-2 61062 NULL ++ahash_align_buffer_size_61070 ahash_align_buffer_size 0-1-2 61070 NULL ++xfs_dabuf_map_61093 xfs_dabuf_map 0 61093 NULL ++get_derived_key_61100 get_derived_key 4 61100 NULL ++mem_cgroup_cache_charge_61101 mem_cgroup_cache_charge 0 61101 NULL ++i40e_calculate_l2fpm_size_61104 i40e_calculate_l2fpm_size 0-4-3-2-1 61104 NULL ++alloc_chrdev_region_61112 alloc_chrdev_region 0 61112 NULL ++__probe_kernel_read_61119 __probe_kernel_read 3 61119 NULL ++vmemmap_alloc_block_buf_61126 vmemmap_alloc_block_buf 1 61126 NULL ++afs_proc_cells_write_61139 afs_proc_cells_write 3 61139 NULL ++pair_device_61175 pair_device 4 61175 NULL nohasharray ++event_oom_late_read_61175 event_oom_late_read 3 61175 &pair_device_61175 ++dio_bio_add_page_61178 dio_bio_add_page 0 61178 NULL ++SyS_prctl_61202 SyS_prctl 4 61202 NULL ++arch_hibernation_header_save_61212 arch_hibernation_header_save 0 61212 NULL ++smk_read_ambient_61220 smk_read_ambient 3 61220 NULL ++v9fs_mmap_file_read_61262 v9fs_mmap_file_read 3 61262 NULL ++btrfs_bio_alloc_61270 btrfs_bio_alloc 3 61270 NULL nohasharray ++find_get_pages_tag_61270 find_get_pages_tag 0 61270 &btrfs_bio_alloc_61270 nohasharray ++ifalias_store_61270 ifalias_store 4 61270 &find_get_pages_tag_61270 ++vortex_adbdma_getlinearpos_61283 vortex_adbdma_getlinearpos 0 61283 NULL nohasharray ++hfsplus_getxattr_finder_info_61283 hfsplus_getxattr_finder_info 0 61283 &vortex_adbdma_getlinearpos_61283 ++nvme_trans_copy_to_user_61288 nvme_trans_copy_to_user 3 61288 NULL ++xfer_from_user_61307 xfer_from_user 3 61307 NULL ++xfrm_user_sec_ctx_size_61320 xfrm_user_sec_ctx_size 0 61320 NULL ++C_SYSC_msgsnd_61330 C_SYSC_msgsnd 3 61330 NULL ++write_file_spectral_short_repeat_61335 write_file_spectral_short_repeat 3 61335 NULL ++st5481_setup_isocpipes_61340 st5481_setup_isocpipes 6-4 61340 NULL ++rx_rx_wa_ba_not_expected_read_61341 rx_rx_wa_ba_not_expected_read 3 61341 NULL ++__dm_get_reserved_ios_61342 __dm_get_reserved_ios 0-3-2 61342 NULL ++f1x_map_sysaddr_to_csrow_61344 f1x_map_sysaddr_to_csrow 2 61344 NULL ++debug_debug4_read_61367 debug_debug4_read 3 61367 NULL ++system_enable_write_61396 system_enable_write 3 61396 NULL ++unix_stream_sendmsg_61455 unix_stream_sendmsg 4 61455 NULL ++snd_pcm_lib_writev_transfer_61483 snd_pcm_lib_writev_transfer 5-4-2 61483 NULL ++btrfs_item_size_61485 btrfs_item_size 0 61485 NULL ++ocfs2_get_refcount_rec_61514 ocfs2_get_refcount_rec 0 61514 NULL ++erst_errno_61526 erst_errno 0 61526 NULL ++trace_options_core_write_61551 trace_options_core_write 3 61551 NULL ++dvb_net_ioctl_61559 dvb_net_ioctl 2 61559 NULL ++parport_pc_fifo_write_block_dma_61568 parport_pc_fifo_write_block_dma 3 61568 NULL ++fan_proc_write_61569 fan_proc_write 3 61569 NULL ++ieee80211_if_read_rc_rateidx_mask_2ghz_61570 ieee80211_if_read_rc_rateidx_mask_2ghz 3 61570 NULL ++ldlm_pool_rw_atomic_seq_write_61572 ldlm_pool_rw_atomic_seq_write 3 61572 NULL ++seq_open_private_61589 seq_open_private 3 61589 NULL ++ept_gpte_to_gfn_lvl_61591 ept_gpte_to_gfn_lvl 0-1-2 61591 NULL ++netlink_recvmsg_61600 netlink_recvmsg 4 61600 NULL ++nfs4_init_uniform_client_string_61601 nfs4_init_uniform_client_string 3 61601 NULL ++configfs_write_file_61621 configfs_write_file 3 61621 NULL ++ieee80211_if_fmt_hw_queues_61629 ieee80211_if_fmt_hw_queues 3 61629 NULL ++i2o_parm_table_get_61635 i2o_parm_table_get 6 61635 NULL ++snd_pcm_oss_read3_61643 snd_pcm_oss_read3 0-3 61643 NULL nohasharray ++tcf_hashinfo_init_61643 tcf_hashinfo_init 2 61643 &snd_pcm_oss_read3_61643 ++resize_stripes_61650 resize_stripes 2 61650 NULL ++ttm_page_pool_free_61661 ttm_page_pool_free 2-0 61661 NULL ++insert_one_name_61668 insert_one_name 7 61668 NULL ++qib_format_hwmsg_61679 qib_format_hwmsg 2 61679 NULL ++lock_loop_61681 lock_loop 1 61681 NULL ++filter_read_61692 filter_read 3 61692 NULL ++iov_length_61716 iov_length 0 61716 NULL ++fragmentation_threshold_read_61718 fragmentation_threshold_read 3 61718 NULL ++null_alloc_reqbuf_61719 null_alloc_reqbuf 3 61719 NULL ++read_file_interrupt_61742 read_file_interrupt 3 61742 NULL nohasharray ++read_file_regval_61742 read_file_regval 3 61742 &read_file_interrupt_61742 ++SyS_sendto_61763 SyS_sendto 6 61763 NULL ++gfs2_meta_wait_61773 gfs2_meta_wait 0 61773 NULL ++xfs_file_dio_aio_write_61801 xfs_file_dio_aio_write 0 61801 NULL ++mls_compute_context_len_61812 mls_compute_context_len 0 61812 NULL ++bfad_debugfs_write_regwr_61841 bfad_debugfs_write_regwr 3 61841 NULL ++regcache_sync_block_61846 regcache_sync_block 5-4 61846 NULL ++ath9k_hw_def_dump_eeprom_61853 ath9k_hw_def_dump_eeprom 5-4 61853 NULL ++fs_path_prepare_for_add_61854 fs_path_prepare_for_add 2 61854 NULL ++evdev_compute_buffer_size_61863 evdev_compute_buffer_size 0 61863 NULL ++SYSC_lsetxattr_61869 SYSC_lsetxattr 4 61869 NULL ++get_fw_name_61874 get_fw_name 3 61874 NULL ++btrfs_ioctl_clone_61886 btrfs_ioctl_clone 3-4-5 61886 NULL ++lprocfs_write_frac_u64_helper_61897 lprocfs_write_frac_u64_helper 2 61897 NULL ++lov_mds_md_stripecnt_61899 lov_mds_md_stripecnt 0-1 61899 NULL ++clear_refs_write_61904 clear_refs_write 3 61904 NULL ++rx_filter_arp_filter_read_61914 rx_filter_arp_filter_read 3 61914 NULL ++au0828_init_isoc_61917 au0828_init_isoc 3-2-4 61917 NULL ++sctp_sendmsg_61919 sctp_sendmsg 4 61919 NULL ++edac_device_create_instance_61940 edac_device_create_instance 0 61940 NULL ++SyS_kexec_load_61946 SyS_kexec_load 2 61946 NULL ++il4965_ucode_rx_stats_read_61948 il4965_ucode_rx_stats_read 3 61948 NULL ++squashfs_read_id_index_table_61961 squashfs_read_id_index_table 4 61961 NULL ++fix_read_error_61965 fix_read_error 4 61965 NULL ++fd_locked_ioctl_61978 fd_locked_ioctl 3 61978 NULL ++cow_file_range_61979 cow_file_range 3 61979 NULL ++set_extent_delalloc_61982 set_extent_delalloc 0 61982 NULL ++dequeue_event_62000 dequeue_event 3 62000 NULL ++xt_compat_match_offset_62011 xt_compat_match_offset 0 62011 NULL ++SyS_setxattr_62019 SyS_setxattr 4 62019 NULL ++jffs2_do_unlink_62020 jffs2_do_unlink 4 62020 NULL ++SYSC_select_62024 SYSC_select 1 62024 NULL ++pmcraid_build_passthrough_ioadls_62034 pmcraid_build_passthrough_ioadls 2 62034 NULL ++sctp_user_addto_chunk_62047 sctp_user_addto_chunk 2-3 62047 NULL ++do_pselect_62061 do_pselect 1 62061 NULL ++pcpu_alloc_bootmem_62074 pcpu_alloc_bootmem 2 62074 NULL ++_xfs_log_force_lsn_62083 _xfs_log_force_lsn 0 62083 NULL ++fat_setattr_62084 fat_setattr 0 62084 NULL ++jffs2_security_setxattr_62107 jffs2_security_setxattr 4 62107 NULL ++btrfs_direct_IO_62114 btrfs_direct_IO 4 62114 NULL ++ip_recv_error_62117 ip_recv_error 3 62117 NULL ++generic_block_fiemap_62122 generic_block_fiemap 4 62122 NULL ++llc_ui_header_len_62131 llc_ui_header_len 0 62131 NULL ++kobject_add_varg_62133 kobject_add_varg 0 62133 NULL nohasharray ++qib_diag_write_62133 qib_diag_write 3 62133 &kobject_add_varg_62133 ++device_add_attrs_62135 device_add_attrs 0 62135 NULL nohasharray ++ql_status_62135 ql_status 5 62135 &device_add_attrs_62135 ++video_usercopy_62151 video_usercopy 2 62151 NULL ++SyS_getxattr_62166 SyS_getxattr 4 62166 NULL ++prism54_wpa_bss_ie_get_62173 prism54_wpa_bss_ie_get 0 62173 NULL ++write_file_dfs_62180 write_file_dfs 3 62180 NULL ++alloc_upcall_62186 alloc_upcall 2 62186 NULL ++sock_kmalloc_62205 sock_kmalloc 2 62205 NULL ++smk_read_syslog_62227 smk_read_syslog 3 62227 NULL ++SYSC_setgroups16_62232 SYSC_setgroups16 1 62232 NULL ++nfsd_read_file_62241 nfsd_read_file 6 62241 NULL ++subtract_dirty_62242 subtract_dirty 2-3 62242 NULL ++ion_handle_test_dma_62262 ion_handle_test_dma 4-5 62262 NULL ++il_dbgfs_sram_read_62296 il_dbgfs_sram_read 3 62296 NULL ++sparse_early_usemaps_alloc_pgdat_section_62304 sparse_early_usemaps_alloc_pgdat_section 2 62304 NULL ++subsystem_filter_read_62310 subsystem_filter_read 3 62310 NULL ++Wb35Reg_BurstWrite_62327 Wb35Reg_BurstWrite 4 62327 NULL ++subseq_list_62332 subseq_list 3-0 62332 NULL ++ll_statahead_max_seq_write_62333 ll_statahead_max_seq_write 3 62333 NULL ++flash_write_62354 flash_write 3 62354 NULL ++xfpregs_set_62363 xfpregs_set 4 62363 NULL ++rx_rx_timeout_read_62389 rx_rx_timeout_read 3 62389 NULL ++altera_irscan_62396 altera_irscan 2 62396 NULL ++set_ssp_62411 set_ssp 4 62411 NULL ++udf_expand_file_adinicb_62470 udf_expand_file_adinicb 0 62470 NULL ++ext_rts51x_sd_execute_read_data_62501 ext_rts51x_sd_execute_read_data 9 62501 NULL ++pep_sendmsg_62524 pep_sendmsg 4 62524 NULL ++test_iso_queue_62534 test_iso_queue 5 62534 NULL ++debugfs_read_62535 debugfs_read 3 62535 NULL ++sco_sock_sendmsg_62542 sco_sock_sendmsg 4 62542 NULL ++qib_refresh_qsfp_cache_62547 qib_refresh_qsfp_cache 0 62547 NULL ++link_send_sections_long_62557 link_send_sections_long 3 62557 NULL ++compute_bitstructs_62570 compute_bitstructs 0 62570 NULL ++xfrm_user_policy_62573 xfrm_user_policy 4 62573 NULL ++get_subdir_62581 get_subdir 3 62581 NULL ++nfsd_vfs_read_62605 nfsd_vfs_read 6 62605 NULL ++tipc_port_recv_sections_62609 tipc_port_recv_sections 3 62609 NULL ++dut_mode_write_62630 dut_mode_write 3 62630 NULL ++vfs_fsync_range_62635 vfs_fsync_range 0 62635 NULL ++lpfc_sli4_queue_alloc_62646 lpfc_sli4_queue_alloc 3 62646 NULL ++printer_req_alloc_62687 printer_req_alloc 2 62687 NULL ++bioset_integrity_create_62708 bioset_integrity_create 2 62708 NULL ++gfs2_log_write_62717 gfs2_log_write 3 62717 NULL ++rdm_62719 rdm 0 62719 NULL ++add_to_page_cache_62724 add_to_page_cache 0 62724 NULL ++obd_ioctl_popdata_62741 obd_ioctl_popdata 3 62741 NULL ++key_replays_read_62746 key_replays_read 3 62746 NULL ++lov_verify_lmm_62747 lov_verify_lmm 2 62747 NULL ++mwifiex_rdeeprom_write_62754 mwifiex_rdeeprom_write 3 62754 NULL ++ax25_sendmsg_62770 ax25_sendmsg 4 62770 NULL ++C_SYSC_ipc_62776 C_SYSC_ipc 3 62776 NULL ++SyS_sched_getaffinity_62786 SyS_sched_getaffinity 2 62786 NULL ++dm_stats_account_io_62787 dm_stats_account_io 3 62787 NULL ++tracing_total_entries_read_62817 tracing_total_entries_read 3 62817 NULL ++__rounddown_pow_of_two_62836 __rounddown_pow_of_two 0 62836 NULL ++bio_get_nr_vecs_62838 bio_get_nr_vecs 0 62838 NULL ++xlog_recover_add_to_trans_62839 xlog_recover_add_to_trans 4 62839 NULL ++rx_fcs_err_read_62844 rx_fcs_err_read 3 62844 NULL ++read_nic_io_dword_62859 read_nic_io_dword 0 62859 NULL ++l2tp_ip6_recvmsg_62874 l2tp_ip6_recvmsg 4 62874 NULL ++xfs_rtmodify_range_62877 xfs_rtmodify_range 0 62877 NULL ++aoechr_write_62883 aoechr_write 3 62883 NULL ++if_spi_host_to_card_62890 if_spi_host_to_card 4 62890 NULL ++ocfs2_validate_gd_parent_62905 ocfs2_validate_gd_parent 0 62905 NULL ++mempool_create_slab_pool_62907 mempool_create_slab_pool 1 62907 NULL ++getdqbuf_62908 getdqbuf 1 62908 NULL ++ll_statahead_agl_seq_write_62928 ll_statahead_agl_seq_write 3 62928 NULL ++agp_create_user_memory_62955 agp_create_user_memory 1 62955 NULL ++kstrtoull_from_user_63026 kstrtoull_from_user 2 63026 NULL nohasharray ++xfs_trans_reserve_63026 xfs_trans_reserve 0 63026 &kstrtoull_from_user_63026 ++__vb2_perform_fileio_63033 __vb2_perform_fileio 3 63033 NULL ++pipeline_defrag_to_csum_swi_read_63037 pipeline_defrag_to_csum_swi_read 3 63037 NULL ++scsi_host_alloc_63041 scsi_host_alloc 2 63041 NULL ++unlink1_63059 unlink1 3 63059 NULL ++xen_set_nslabs_63066 xen_set_nslabs 0 63066 NULL ++iwl_dbgfs_fw_rx_stats_read_63070 iwl_dbgfs_fw_rx_stats_read 3 63070 NULL ++sep_prepare_input_output_dma_table_in_dcb_63087 sep_prepare_input_output_dma_table_in_dcb 4-5 63087 NULL ++iwl_dbgfs_sensitivity_read_63116 iwl_dbgfs_sensitivity_read 3 63116 NULL ++ext4_chunk_trans_blocks_63123 ext4_chunk_trans_blocks 0-2 63123 NULL ++smk_write_revoke_subj_63173 smk_write_revoke_subj 3 63173 NULL ++SyS_syslog_63178 SyS_syslog 3 63178 NULL ++vme_master_read_63221 vme_master_read 0 63221 NULL ++SyS_gethostname_63227 SyS_gethostname 2 63227 NULL ++ptp_read_63251 ptp_read 4 63251 NULL ++xfs_dir2_leaf_getdents_63262 xfs_dir2_leaf_getdents 3 63262 NULL ++raid5_resize_63306 raid5_resize 2 63306 NULL ++ath10k_read_fw_dbglog_63323 ath10k_read_fw_dbglog 3 63323 NULL ++proc_info_read_63344 proc_info_read 3 63344 NULL ++ps_upsd_max_sptime_read_63362 ps_upsd_max_sptime_read 3 63362 NULL ++idmouse_read_63374 idmouse_read 3 63374 NULL ++usbnet_read_cmd_nopm_63388 usbnet_read_cmd_nopm 7 63388 NULL nohasharray ++edac_pci_alloc_ctl_info_63388 edac_pci_alloc_ctl_info 1 63388 &usbnet_read_cmd_nopm_63388 ++rxpipe_missed_beacon_host_int_trig_rx_data_read_63405 rxpipe_missed_beacon_host_int_trig_rx_data_read 3 63405 NULL ++nouveau_event_create_63411 nouveau_event_create 1 63411 NULL ++l2cap_sock_sendmsg_63427 l2cap_sock_sendmsg 4 63427 NULL ++nfsd_symlink_63442 nfsd_symlink 6 63442 NULL ++si5351_bulk_write_63468 si5351_bulk_write 2-3 63468 NULL ++snd_info_entry_write_63474 snd_info_entry_write 3 63474 NULL ++reada_find_extent_63486 reada_find_extent 2 63486 NULL ++read_kcore_63488 read_kcore 3 63488 NULL ++snd_pcm_plug_write_transfer_63503 snd_pcm_plug_write_transfer 0-3 63503 NULL ++efx_mcdi_rpc_async_63529 efx_mcdi_rpc_async 4-5 63529 NULL ++ubi_more_leb_change_data_63534 ubi_more_leb_change_data 4 63534 NULL nohasharray ++generic_cont_expand_simple_63534 generic_cont_expand_simple 0 63534 &ubi_more_leb_change_data_63534 ++write_file_spectral_period_63536 write_file_spectral_period 3 63536 NULL ++if_sdio_read_scratch_63540 if_sdio_read_scratch 0 63540 NULL ++append_to_buffer_63550 append_to_buffer 3 63550 NULL ++kvm_write_guest_page_63555 kvm_write_guest_page 5 63555 NULL ++rproc_alloc_63577 rproc_alloc 5 63577 NULL ++write_debug_level_63613 write_debug_level 3 63613 NULL ++__spi_validate_63618 __spi_validate 0 63618 NULL ++symbol_build_supp_rates_63634 symbol_build_supp_rates 0 63634 NULL ++proc_loginuid_write_63648 proc_loginuid_write 3 63648 NULL ++ValidateDSDParamsChecksum_63654 ValidateDSDParamsChecksum 3 63654 NULL ++ldlm_cli_enqueue_63657 ldlm_cli_enqueue 8 63657 NULL ++hidraw_ioctl_63658 hidraw_ioctl 2 63658 NULL ++vbi_read_63673 vbi_read 3 63673 NULL ++write_file_spectral_fft_period_63696 write_file_spectral_fft_period 3 63696 NULL ++nouveau_object_create__63715 nouveau_object_create_ 5 63715 NULL ++btrfs_insert_delayed_dir_index_63720 btrfs_insert_delayed_dir_index 4 63720 NULL ++selinux_secctx_to_secid_63744 selinux_secctx_to_secid 2 63744 NULL ++snd_pcm_oss_read1_63771 snd_pcm_oss_read1 3 63771 NULL ++snd_opl4_mem_proc_read_63774 snd_opl4_mem_proc_read 5 63774 NULL ++spidev_compat_ioctl_63778 spidev_compat_ioctl 2 63778 NULL ++mwifiex_11n_create_rx_reorder_tbl_63806 mwifiex_11n_create_rx_reorder_tbl 4 63806 NULL ++copy_nodes_to_user_63807 copy_nodes_to_user 2 63807 NULL ++sel_write_load_63830 sel_write_load 3 63830 NULL ++ll_readlink_63836 ll_readlink 3 63836 NULL ++proc_pid_attr_write_63845 proc_pid_attr_write 3 63845 NULL ++xhci_alloc_stream_info_63902 xhci_alloc_stream_info 3 63902 NULL ++uvc_alloc_urb_buffers_63922 uvc_alloc_urb_buffers 0-2-3 63922 NULL ++snd_compr_write_63923 snd_compr_write 3 63923 NULL ++afs_send_simple_reply_63940 afs_send_simple_reply 3 63940 NULL ++__team_options_register_63941 __team_options_register 3 63941 NULL ++macvtap_recvmsg_63949 macvtap_recvmsg 4 63949 NULL ++xfs_iflush_63956 xfs_iflush 0 63956 NULL ++set_bredr_63975 set_bredr 4 63975 NULL ++construct_key_and_link_63985 construct_key_and_link 3 63985 NULL ++rs_extent_to_bm_page_63996 rs_extent_to_bm_page 0-1 63996 NULL ++read_file_frameerrors_64001 read_file_frameerrors 3 64001 NULL ++SyS_rt_sigpending_64018 SyS_rt_sigpending 2 64018 NULL ++dbAllocDmapLev_64030 dbAllocDmapLev 0 64030 NULL ++SyS_fsetxattr_64039 SyS_fsetxattr 4 64039 NULL ++__generic_file_aio_write_64049 __generic_file_aio_write 0 64049 NULL ++get_u8_64076 get_u8 0 64076 NULL ++xilly_malloc_64077 xilly_malloc 2 64077 NULL ++sl_realloc_bufs_64086 sl_realloc_bufs 2 64086 NULL ++vmci_handle_arr_get_size_64088 vmci_handle_arr_get_size 0 64088 NULL ++lbs_highrssi_read_64089 lbs_highrssi_read 3 64089 NULL ++gfs2_inplace_reserve_64096 gfs2_inplace_reserve 0 64096 NULL nohasharray ++SyS_set_mempolicy_64096 SyS_set_mempolicy 3 64096 &gfs2_inplace_reserve_64096 ++to_bytes_64103 to_bytes 0-1 64103 NULL ++SyS_mq_timedsend_64107 SyS_mq_timedsend 3 64107 NULL ++rdma_addr_size_64116 rdma_addr_size 0 64116 NULL ++do_load_xattr_datum_64118 do_load_xattr_datum 0 64118 NULL ++bypass_wd_write_64120 bypass_wd_write 3 64120 NULL ++ext4_prepare_inline_data_64124 ext4_prepare_inline_data 3 64124 NULL ++init_bch_64130 init_bch 1-2 64130 NULL ++ablkcipher_copy_iv_64140 ablkcipher_copy_iv 3 64140 NULL ++dlfb_ops_write_64150 dlfb_ops_write 3 64150 NULL ++cpumask_scnprintf_64170 cpumask_scnprintf 0-2 64170 NULL ++kernfs_iop_setxattr_64220 kernfs_iop_setxattr 4 64220 NULL ++xfs_vm_direct_IO_64223 xfs_vm_direct_IO 4 64223 NULL ++read_pulse_64227 read_pulse 0-3 64227 NULL ++ea_len_64229 ea_len 0 64229 NULL ++xfs_rtmodify_summary_64265 xfs_rtmodify_summary 0 64265 NULL ++io_capture_transfer_64276 io_capture_transfer 4 64276 NULL ++btrfs_file_extent_offset_64278 btrfs_file_extent_offset 0 64278 NULL ++sta_current_tx_rate_read_64286 sta_current_tx_rate_read 3 64286 NULL ++xfs_dir_cilookup_result_64288 xfs_dir_cilookup_result 3-0 64288 NULL nohasharray ++event_id_read_64288 event_id_read 3 64288 &xfs_dir_cilookup_result_64288 ++ocfs2_block_check_validate_bhs_64302 ocfs2_block_check_validate_bhs 0 64302 NULL ++snd_hda_get_sub_nodes_64304 snd_hda_get_sub_nodes 0 64304 NULL ++error_error_bar_retry_read_64305 error_error_bar_retry_read 3 64305 NULL ++sisusbcon_clear_64329 sisusbcon_clear 4-3-5 64329 NULL ++ts_write_64336 ts_write 3 64336 NULL ++usbtmc_write_64340 usbtmc_write 3 64340 NULL ++bnx2x_vfop_mcast_cmd_64354 bnx2x_vfop_mcast_cmd 5 64354 NULL ++user_regset_copyin_64360 user_regset_copyin 7 64360 NULL ++wlc_phy_loadsampletable_nphy_64367 wlc_phy_loadsampletable_nphy 3 64367 NULL ++reg_create_64372 reg_create 5 64372 NULL ++ilo_write_64378 ilo_write 3 64378 NULL ++btrfs_map_block_64379 btrfs_map_block 3 64379 NULL ++vmcs_readl_64381 vmcs_readl 0 64381 NULL ++nilfs_alloc_seg_bio_64383 nilfs_alloc_seg_bio 3 64383 NULL ++ir_lirc_transmit_ir_64403 ir_lirc_transmit_ir 3 64403 NULL ++pidlist_allocate_64404 pidlist_allocate 1 64404 NULL ++rx_hdr_overflow_read_64407 rx_hdr_overflow_read 3 64407 NULL ++snd_card_create_64418 snd_card_create 4 64418 NULL nohasharray ++keyctl_get_security_64418 keyctl_get_security 3 64418 &snd_card_create_64418 ++oom_adj_write_64428 oom_adj_write 3 64428 NULL ++read_file_spectral_short_repeat_64431 read_file_spectral_short_repeat 3 64431 NULL ++ax25_recvmsg_64441 ax25_recvmsg 4 64441 NULL ++single_open_size_64483 single_open_size 4 64483 NULL ++p54_parse_rssical_64493 p54_parse_rssical 3 64493 NULL nohasharray ++xfs_inode_ag_walk_64493 xfs_inode_ag_walk 0 64493 &p54_parse_rssical_64493 ++msg_data_sz_64503 msg_data_sz 0 64503 NULL ++remove_uuid_64505 remove_uuid 4 64505 NULL ++crypto_blkcipher_alignmask_64520 crypto_blkcipher_alignmask 0 64520 NULL ++opera1_usb_i2c_msgxfer_64521 opera1_usb_i2c_msgxfer 4 64521 NULL ++iwl_dbgfs_ucode_tracing_write_64524 iwl_dbgfs_ucode_tracing_write 3 64524 NULL ++ses_send_diag_64527 ses_send_diag 4 64527 NULL ++prctl_set_mm_64538 prctl_set_mm 3 64538 NULL ++SyS_bind_64544 SyS_bind 3 64544 NULL ++rbd_obj_read_sync_64554 rbd_obj_read_sync 4-3 64554 NULL ++__btrfs_prealloc_file_range_64557 __btrfs_prealloc_file_range 3 64557 NULL ++__spi_sync_64561 __spi_sync 0 64561 NULL nohasharray ++ll_max_rw_chunk_seq_write_64561 ll_max_rw_chunk_seq_write 3 64561 &__spi_sync_64561 ++__apei_exec_run_64563 __apei_exec_run 0 64563 NULL ++kstrtoul_from_user_64569 kstrtoul_from_user 2 64569 NULL ++do_erase_64574 do_erase 4 64574 NULL ++fanotify_write_64623 fanotify_write 3 64623 NULL ++regmap_read_debugfs_64658 regmap_read_debugfs 5 64658 NULL ++ocfs2_read_xattr_block_64661 ocfs2_read_xattr_block 0 64661 NULL nohasharray ++tlbflush_read_file_64661 tlbflush_read_file 3 64661 &ocfs2_read_xattr_block_64661 ++efx_tsoh_get_buffer_64664 efx_tsoh_get_buffer 3 64664 NULL ++rx_rx_out_of_mpdu_nodes_read_64668 rx_rx_out_of_mpdu_nodes_read 3 64668 NULL ++nr_free_zone_pages_64680 nr_free_zone_pages 0 64680 NULL ++snd_pcm_oss_capture_position_fixup_64713 snd_pcm_oss_capture_position_fixup 0 64713 NULL ++dapm_bias_read_file_64715 dapm_bias_read_file 3 64715 NULL ++atomic_add_return_64720 atomic_add_return 0-1 64720 NULL ++i2400m_msg_to_dev_64722 i2400m_msg_to_dev 3 64722 NULL ++AscGetChipVersion_64737 AscGetChipVersion 0 64737 NULL ++squashfs_read_inode_lookup_table_64739 squashfs_read_inode_lookup_table 4 64739 NULL ++bio_map_kern_64751 bio_map_kern 3 64751 NULL ++rt2x00debug_write_csr_64753 rt2x00debug_write_csr 3 64753 NULL ++message_for_md_64777 message_for_md 5 64777 NULL ++isr_low_rssi_read_64789 isr_low_rssi_read 3 64789 NULL ++regmap_reg_ranges_read_file_64798 regmap_reg_ranges_read_file 3 64798 NULL ++nfsctl_transaction_write_64800 nfsctl_transaction_write 3 64800 NULL ++rfkill_fop_write_64808 rfkill_fop_write 3 64808 NULL ++proc_projid_map_write_64810 proc_projid_map_write 3 64810 NULL ++megaraid_change_queue_depth_64815 megaraid_change_queue_depth 2 64815 NULL ++ecryptfs_send_miscdev_64816 ecryptfs_send_miscdev 2 64816 NULL ++do_kimage_alloc_64827 do_kimage_alloc 3 64827 NULL ++altera_set_dr_pre_64862 altera_set_dr_pre 2 64862 NULL ++pci_vc_do_save_buffer_64876 pci_vc_do_save_buffer 0 64876 NULL ++lprocfs_write_u64_helper_64880 lprocfs_write_u64_helper 2 64880 NULL ++ffs_epfile_io_64886 ffs_epfile_io 3 64886 NULL ++xfs_imap_lookup_64906 xfs_imap_lookup 0 64906 NULL ++ieee80211_if_read_ave_beacon_64924 ieee80211_if_read_ave_beacon 3 64924 NULL ++ip_options_get_from_user_64958 ip_options_get_from_user 4 64958 NULL ++traceprobe_probes_write_64969 traceprobe_probes_write 3 64969 NULL ++suspend_dtim_interval_read_64971 suspend_dtim_interval_read 3 64971 NULL ++crypto_ahash_digestsize_65014 crypto_ahash_digestsize 0 65014 NULL ++insert_dent_65034 insert_dent 7 65034 NULL ++snd_hda_get_pin_label_65035 snd_hda_get_pin_label 5 65035 NULL ++ext4_ind_trans_blocks_65053 ext4_ind_trans_blocks 0-2 65053 NULL ++pcibios_enable_device_65059 pcibios_enable_device 0 65059 NULL ++__alloc_bootmem_node_high_65076 __alloc_bootmem_node_high 2 65076 NULL ++batadv_socket_write_65083 batadv_socket_write 3 65083 NULL ++ocfs2_truncate_cluster_pages_65086 ocfs2_truncate_cluster_pages 2 65086 NULL ++ath9k_dump_mci_btcoex_65090 ath9k_dump_mci_btcoex 3-0 65090 NULL ++generic_ocp_write_65107 generic_ocp_write 4 65107 NULL ++__xfs_bmapi_allocate_65142 __xfs_bmapi_allocate 0 65142 NULL ++rx_rx_done_read_65217 rx_rx_done_read 3 65217 NULL ++print_endpoint_stat_65232 print_endpoint_stat 3-4-0 65232 NULL ++whci_n_caps_65247 whci_n_caps 0 65247 NULL ++kmalloc_parameter_65279 kmalloc_parameter 1 65279 NULL ++xfs_btree_check_ptr_65281 xfs_btree_check_ptr 0 65281 NULL ++compat_core_sys_select_65285 compat_core_sys_select 1 65285 NULL ++mpi_set_buffer_65294 mpi_set_buffer 3 65294 NULL ++redirected_tty_write_65297 redirected_tty_write 3 65297 NULL ++get_var_len_65304 get_var_len 0 65304 NULL ++unpack_array_65318 unpack_array 0 65318 NULL ++pci_vpd_find_tag_65325 pci_vpd_find_tag 0-2 65325 NULL ++dccp_setsockopt_service_65336 dccp_setsockopt_service 4 65336 NULL ++dma_rx_requested_read_65354 dma_rx_requested_read 3 65354 NULL ++alloc_cpu_rmap_65363 alloc_cpu_rmap 1 65363 NULL ++SyS_writev_65372 SyS_writev 3 65372 NULL ++__alloc_bootmem_nopanic_65397 __alloc_bootmem_nopanic 1 65397 NULL ++trace_seq_to_user_65398 trace_seq_to_user 3 65398 NULL ++__read_vmcore_65402 __read_vmcore 2 65402 NULL ++usb_ep_enable_65405 usb_ep_enable 0 65405 NULL ++ocfs2_write_begin_nolock_65410 ocfs2_write_begin_nolock 3-4 65410 NULL ++device_add_groups_65423 device_add_groups 0 65423 NULL ++xpc_kzalloc_cacheline_aligned_65433 xpc_kzalloc_cacheline_aligned 1 65433 NULL ++usb_alloc_coherent_65444 usb_alloc_coherent 2 65444 NULL ++il_dbgfs_wd_timeout_write_65464 il_dbgfs_wd_timeout_write 3 65464 NULL ++clear_user_65470 clear_user 2 65470 NULL ++xfs_alloc_lookup_ge_65481 xfs_alloc_lookup_ge 0 65481 NULL ++dpcm_state_read_file_65489 dpcm_state_read_file 3 65489 NULL ++lookup_inline_extent_backref_65493 lookup_inline_extent_backref 9 65493 NULL ++nvme_trans_standard_inquiry_page_65526 nvme_trans_standard_inquiry_page 4 65526 NULL ++tree_mod_log_eb_copy_65535 tree_mod_log_eb_copy 6 65535 NULL +diff --git a/tools/gcc/size_overflow_plugin/size_overflow_hash_aux.data b/tools/gcc/size_overflow_plugin/size_overflow_hash_aux.data +new file mode 100644 +index 0000000..560cd7b +--- /dev/null ++++ b/tools/gcc/size_overflow_plugin/size_overflow_hash_aux.data +@@ -0,0 +1,92 @@ ++spa_set_aux_vdevs_746 spa_set_aux_vdevs 3 746 NULL ++zfs_lookup_2144 zfs_lookup 0 2144 NULL ++mappedread_2627 mappedread 2 2627 NULL ++vdev_disk_dio_alloc_2957 vdev_disk_dio_alloc 1 2957 NULL ++nv_alloc_pushpage_spl_4286 nv_alloc_pushpage_spl 2 4286 NULL ++zpl_xattr_get_4574 zpl_xattr_get 0 4574 NULL ++sa_replace_all_by_template_5699 sa_replace_all_by_template 3 5699 NULL ++dmu_write_6048 dmu_write 4-3 6048 NULL ++dmu_buf_hold_array_6095 dmu_buf_hold_array 4-3 6095 NULL ++update_pages_6225 update_pages 2-3 6225 NULL ++bio_nr_pages_7117 bio_nr_pages 0-2 7117 NULL ++dmu_buf_hold_array_by_bonus_8562 dmu_buf_hold_array_by_bonus 3-2 8562 NULL ++zpios_dmu_write_8858 zpios_dmu_write 4-5 8858 NULL ++ddi_copyout_9401 ddi_copyout 3 9401 NULL ++avl_numnodes_12384 avl_numnodes 0 12384 NULL ++dmu_write_uio_dnode_12473 dmu_write_uio_dnode 3 12473 NULL ++dmu_xuio_init_12866 dmu_xuio_init 2 12866 NULL ++zpl_read_common_14389 zpl_read_common 0 14389 NULL ++dmu_snapshot_realname_14632 dmu_snapshot_realname 4 14632 NULL ++kmem_alloc_debug_14852 kmem_alloc_debug 1 14852 NULL ++kmalloc_node_nofail_15151 kmalloc_node_nofail 1 15151 NULL ++dmu_write_uio_16351 dmu_write_uio 4 16351 NULL ++zfs_log_write_16524 zfs_log_write 6-5 16524 NULL ++sa_build_layouts_16910 sa_build_layouts 3 16910 NULL ++dsl_dir_namelen_17053 dsl_dir_namelen 0 17053 NULL ++kcopy_copy_to_user_17336 kcopy_copy_to_user 5 17336 NULL ++sa_add_layout_entry_17507 sa_add_layout_entry 3 17507 NULL ++sa_attr_table_setup_18029 sa_attr_table_setup 3 18029 NULL ++uiocopy_18680 uiocopy 2 18680 NULL ++dmu_buf_hold_array_by_dnode_19125 dmu_buf_hold_array_by_dnode 2-3 19125 NULL ++zpl_acl_from_xattr_21141 zpl_acl_from_xattr 2 21141 NULL ++dsl_pool_tx_assign_init_22518 dsl_pool_tx_assign_init 2 22518 NULL ++nvlist_lookup_byte_array_22527 nvlist_lookup_byte_array 0 22527 NULL ++sa_replace_all_by_template_locked_22533 sa_replace_all_by_template_locked 3 22533 NULL ++tsd_hash_table_init_22559 tsd_hash_table_init 1 22559 NULL ++spa_vdev_remove_aux_23966 spa_vdev_remove_aux 4 23966 NULL ++zpl_xattr_acl_set_access_24129 zpl_xattr_acl_set_access 4 24129 NULL ++dmu_assign_arcbuf_24622 dmu_assign_arcbuf 2 24622 NULL ++zap_lookup_norm_25166 zap_lookup_norm 9 25166 NULL ++dmu_prealloc_25456 dmu_prealloc 4-3 25456 NULL ++kmalloc_nofail_26347 kmalloc_nofail 1 26347 NULL ++zfsctl_snapshot_zpath_27578 zfsctl_snapshot_zpath 2 27578 NULL ++zpios_dmu_read_30015 zpios_dmu_read 4-5 30015 NULL ++splat_write_30943 splat_write 3 30943 NULL ++zpl_xattr_get_sa_31183 zpl_xattr_get_sa 0 31183 NULL ++dmu_read_uio_31467 dmu_read_uio 4 31467 NULL ++zfs_replay_fuids_31479 zfs_replay_fuids 4 31479 NULL ++spa_history_log_to_phys_31632 spa_history_log_to_phys 0-1 31632 NULL ++__zpl_xattr_get_32601 __zpl_xattr_get 0 32601 NULL ++proc_copyout_string_34049 proc_copyout_string 2 34049 NULL ++nv_alloc_sleep_spl_34544 nv_alloc_sleep_spl 2 34544 NULL ++nv_alloc_nosleep_spl_34761 nv_alloc_nosleep_spl 2 34761 NULL ++zap_leaf_array_match_36922 zap_leaf_array_match 4 36922 NULL ++copyinstr_36980 copyinstr 3 36980 NULL ++zpl_xattr_acl_set_default_37864 zpl_xattr_acl_set_default 4 37864 NULL ++splat_read_38116 splat_read 3 38116 NULL ++sa_setup_38756 sa_setup 4 38756 NULL ++vdev_disk_physio_39898 vdev_disk_physio 3 39898 NULL ++arc_buf_size_39982 arc_buf_size 0 39982 NULL ++kzalloc_nofail_40719 kzalloc_nofail 1 40719 NULL ++fuidstr_to_sid_40777 fuidstr_to_sid 4 40777 NULL ++vdev_raidz_matrix_reconstruct_40852 vdev_raidz_matrix_reconstruct 2-3 40852 NULL ++sa_find_layout_40892 sa_find_layout 4 40892 NULL ++zpl_xattr_get_dir_41918 zpl_xattr_get_dir 0 41918 NULL ++zfs_sa_get_xattr_42600 zfs_sa_get_xattr 0 42600 NULL ++zpl_xattr_acl_set_42808 zpl_xattr_acl_set 4 42808 NULL ++xdr_dec_array_43091 xdr_dec_array 5 43091 NULL ++dsl_dataset_namelen_43136 dsl_dataset_namelen 0 43136 NULL ++kcopy_write_43683 kcopy_write 3 43683 NULL ++uiomove_44355 uiomove 2 44355 NULL ++dmu_read_44418 dmu_read 4-3 44418 NULL ++ddi_copyin_44846 ddi_copyin 3 44846 NULL ++kcopy_do_get_45061 kcopy_do_get 5 45061 NULL ++copyin_45945 copyin 3 45945 NULL ++zil_itx_create_46555 zil_itx_create 2 46555 NULL ++dmu_write_uio_dbuf_48064 dmu_write_uio_dbuf 3 48064 NULL ++blk_rq_pos_48233 blk_rq_pos 0 48233 NULL ++spa_history_write_49650 spa_history_write 3 49650 NULL ++kcopy_copy_pages_to_user_49823 kcopy_copy_pages_to_user 3-4 49823 NULL ++zfs_log_write_50162 zfs_log_write 6-5 50162 NULL ++i_fm_alloc_51038 i_fm_alloc 2 51038 NULL ++copyout_51409 copyout 3 51409 NULL ++zvol_log_write_54898 zvol_log_write 4-3 54898 NULL ++zfs_acl_node_alloc_55641 zfs_acl_node_alloc 1 55641 NULL ++get_nvlist_56685 get_nvlist 2 56685 NULL ++zprop_get_numprops_56820 zprop_get_numprops 0 56820 NULL ++splat_taskq_test4_common_59829 splat_taskq_test4_common 5 59829 NULL ++zfs_replay_domain_cnt_61399 zfs_replay_domain_cnt 0 61399 NULL ++zpios_write_61823 zpios_write 3 61823 NULL ++proc_copyin_string_62019 proc_copyin_string 4 62019 NULL ++random_get_pseudo_bytes_64611 random_get_pseudo_bytes 2 64611 NULL ++zpios_read_64734 zpios_read 3 64734 NULL +diff --git a/tools/gcc/size_overflow_plugin/size_overflow_plugin.c b/tools/gcc/size_overflow_plugin/size_overflow_plugin.c +new file mode 100644 +index 0000000..95f7abd +--- /dev/null ++++ b/tools/gcc/size_overflow_plugin/size_overflow_plugin.c +@@ -0,0 +1,259 @@ ++/* ++ * Copyright 2011-2014 by Emese Revfy <re.emese@gmail.com> ++ * Licensed under the GPL v2, or (at your option) v3 ++ * ++ * Homepage: ++ * http://www.grsecurity.net/~ephox/overflow_plugin/ ++ * ++ * Documentation: ++ * http://forums.grsecurity.net/viewtopic.php?f=7&t=3043 ++ * ++ * This plugin recomputes expressions of function arguments marked by a size_overflow attribute ++ * with double integer precision (DImode/TImode for 32/64 bit integer types). ++ * The recomputed argument is checked against TYPE_MAX and an event is logged on overflow and the triggering process is killed. ++ * ++ * Usage: ++ * $ make ++ * $ make run ++ */ ++ ++#include "gcc-common.h" ++#include "size_overflow.h" ++ ++int plugin_is_GPL_compatible; ++ ++tree report_size_overflow_decl; ++ ++tree size_overflow_type_HI; ++tree size_overflow_type_SI; ++tree size_overflow_type_DI; ++tree size_overflow_type_TI; ++ ++static struct plugin_info size_overflow_plugin_info = { ++ .version = "20140725", ++ .help = "no-size-overflow\tturn off size overflow checking\n", ++}; ++ ++static tree handle_size_overflow_attribute(tree *node, tree __unused name, tree args, int __unused flags, bool *no_add_attrs) ++{ ++ unsigned int arg_count; ++ enum tree_code code = TREE_CODE(*node); ++ ++ switch (code) { ++ case FUNCTION_DECL: ++ arg_count = type_num_arguments(TREE_TYPE(*node)); ++ break; ++ case FUNCTION_TYPE: ++ case METHOD_TYPE: ++ arg_count = type_num_arguments(*node); ++ break; ++ default: ++ *no_add_attrs = true; ++ error("%s: %qE attribute only applies to functions", __func__, name); ++ return NULL_TREE; ++ } ++ ++ for (; args; args = TREE_CHAIN(args)) { ++ tree position = TREE_VALUE(args); ++ if (TREE_CODE(position) != INTEGER_CST || TREE_INT_CST_LOW(position) > arg_count ) { ++ error("%s: parameter %u is outside range.", __func__, (unsigned int)TREE_INT_CST_LOW(position)); ++ *no_add_attrs = true; ++ } ++ } ++ return NULL_TREE; ++} ++ ++static tree handle_intentional_overflow_attribute(tree *node, tree __unused name, tree args, int __unused flags, bool *no_add_attrs) ++{ ++ unsigned int arg_count; ++ enum tree_code code = TREE_CODE(*node); ++ ++ switch (code) { ++ case FUNCTION_DECL: ++ arg_count = type_num_arguments(TREE_TYPE(*node)); ++ break; ++ case FUNCTION_TYPE: ++ case METHOD_TYPE: ++ arg_count = type_num_arguments(*node); ++ break; ++ case FIELD_DECL: ++ return NULL_TREE; ++ default: ++ *no_add_attrs = true; ++ error("%qE attribute only applies to functions", name); ++ return NULL_TREE; ++ } ++ ++ if (TREE_INT_CST_HIGH(TREE_VALUE(args)) != 0) ++ return NULL_TREE; ++ ++ for (; args; args = TREE_CHAIN(args)) { ++ tree position = TREE_VALUE(args); ++ if (TREE_CODE(position) != INTEGER_CST || TREE_INT_CST_LOW(position) > arg_count ) { ++ error("%s: parameter %u is outside range.", __func__, (unsigned int)TREE_INT_CST_LOW(position)); ++ *no_add_attrs = true; ++ } ++ } ++ return NULL_TREE; ++} ++ ++static struct attribute_spec size_overflow_attr = { ++ .name = "size_overflow", ++ .min_length = 1, ++ .max_length = -1, ++ .decl_required = true, ++ .type_required = false, ++ .function_type_required = false, ++ .handler = handle_size_overflow_attribute, ++#if BUILDING_GCC_VERSION >= 4007 ++ .affects_type_identity = false ++#endif ++}; ++ ++static struct attribute_spec intentional_overflow_attr = { ++ .name = "intentional_overflow", ++ .min_length = 1, ++ .max_length = -1, ++ .decl_required = true, ++ .type_required = false, ++ .function_type_required = false, ++ .handler = handle_intentional_overflow_attribute, ++#if BUILDING_GCC_VERSION >= 4007 ++ .affects_type_identity = false ++#endif ++}; ++ ++static void register_attributes(void __unused *event_data, void __unused *data) ++{ ++ register_attribute(&size_overflow_attr); ++ register_attribute(&intentional_overflow_attr); ++} ++ ++static tree create_typedef(tree type, const char* ident) ++{ ++ tree new_type, decl; ++ ++ new_type = build_variant_type_copy(type); ++ decl = build_decl(BUILTINS_LOCATION, TYPE_DECL, get_identifier(ident), new_type); ++ DECL_ORIGINAL_TYPE(decl) = type; ++ TYPE_NAME(new_type) = decl; ++ return new_type; ++} ++ ++// Create the noreturn report_size_overflow() function decl. ++static void size_overflow_start_unit(void __unused *gcc_data, void __unused *user_data) ++{ ++ tree const_char_ptr_type_node; ++ tree fntype; ++ ++ const_char_ptr_type_node = build_pointer_type(build_type_variant(char_type_node, 1, 0)); ++ ++ size_overflow_type_HI = create_typedef(intHI_type_node, "size_overflow_type_HI"); ++ size_overflow_type_SI = create_typedef(intSI_type_node, "size_overflow_type_SI"); ++ size_overflow_type_DI = create_typedef(intDI_type_node, "size_overflow_type_DI"); ++ size_overflow_type_TI = create_typedef(intTI_type_node, "size_overflow_type_TI"); ++ ++ // void report_size_overflow(const char *loc_file, unsigned int loc_line, const char *current_func, const char *ssa_var) ++ fntype = build_function_type_list(void_type_node, ++ const_char_ptr_type_node, ++ unsigned_type_node, ++ const_char_ptr_type_node, ++ const_char_ptr_type_node, ++ NULL_TREE); ++ report_size_overflow_decl = build_fn_decl("report_size_overflow", fntype); ++ ++ DECL_ASSEMBLER_NAME(report_size_overflow_decl); ++ TREE_PUBLIC(report_size_overflow_decl) = 1; ++ DECL_EXTERNAL(report_size_overflow_decl) = 1; ++ DECL_ARTIFICIAL(report_size_overflow_decl) = 1; ++ TREE_THIS_VOLATILE(report_size_overflow_decl) = 1; ++} ++ ++ ++extern struct gimple_opt_pass pass_dce; ++ ++static struct opt_pass *make_dce_pass(void) ++{ ++#if BUILDING_GCC_VERSION >= 4009 ++ return make_pass_dce(g); ++#else ++ return &pass_dce.pass; ++#endif ++} ++ ++ ++int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version) ++{ ++ int i; ++ const char * const plugin_name = plugin_info->base_name; ++ const int argc = plugin_info->argc; ++ const struct plugin_argument * const argv = plugin_info->argv; ++ bool enable = true; ++ struct register_pass_info insert_size_overflow_asm_pass_info; ++ struct register_pass_info __unused dump_before_pass_info; ++ struct register_pass_info __unused dump_after_pass_info; ++ struct register_pass_info insert_size_overflow_check_info; ++ struct register_pass_info dce_pass_info; ++ static const struct ggc_root_tab gt_ggc_r_gt_size_overflow[] = { ++ { ++ .base = &report_size_overflow_decl, ++ .nelt = 1, ++ .stride = sizeof(report_size_overflow_decl), ++ .cb = >_ggc_mx_tree_node, ++ .pchw = >_pch_nx_tree_node ++ }, ++ LAST_GGC_ROOT_TAB ++ }; ++ ++ insert_size_overflow_asm_pass_info.pass = make_insert_size_overflow_asm_pass(); ++ insert_size_overflow_asm_pass_info.reference_pass_name = "ssa"; ++ insert_size_overflow_asm_pass_info.ref_pass_instance_number = 1; ++ insert_size_overflow_asm_pass_info.pos_op = PASS_POS_INSERT_AFTER; ++ ++ dump_before_pass_info.pass = make_dump_pass(); ++ dump_before_pass_info.reference_pass_name = "increase_alignment"; ++ dump_before_pass_info.ref_pass_instance_number = 1; ++ dump_before_pass_info.pos_op = PASS_POS_INSERT_BEFORE; ++ ++ insert_size_overflow_check_info.pass = make_insert_size_overflow_check(); ++ insert_size_overflow_check_info.reference_pass_name = "increase_alignment"; ++ insert_size_overflow_check_info.ref_pass_instance_number = 1; ++ insert_size_overflow_check_info.pos_op = PASS_POS_INSERT_BEFORE; ++ ++ dump_after_pass_info.pass = make_dump_pass(); ++ dump_after_pass_info.reference_pass_name = "increase_alignment"; ++ dump_after_pass_info.ref_pass_instance_number = 1; ++ dump_after_pass_info.pos_op = PASS_POS_INSERT_BEFORE; ++ ++ dce_pass_info.pass = make_dce_pass(); ++ dce_pass_info.reference_pass_name = "vrp"; ++ dce_pass_info.ref_pass_instance_number = 1; ++ dce_pass_info.pos_op = PASS_POS_INSERT_AFTER; ++ ++ if (!plugin_default_version_check(version, &gcc_version)) { ++ error(G_("incompatible gcc/plugin versions")); ++ return 1; ++ } ++ ++ for (i = 0; i < argc; ++i) { ++ if (!strcmp(argv[i].key, "no-size-overflow")) { ++ enable = false; ++ continue; ++ } ++ error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key); ++ } ++ ++ register_callback(plugin_name, PLUGIN_INFO, NULL, &size_overflow_plugin_info); ++ if (enable) { ++ register_callback(plugin_name, PLUGIN_START_UNIT, &size_overflow_start_unit, NULL); ++ register_callback(plugin_name, PLUGIN_REGISTER_GGC_ROOTS, NULL, (void *)>_ggc_r_gt_size_overflow); ++ register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &insert_size_overflow_asm_pass_info); ++// register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &dump_before_pass_info); ++ register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &insert_size_overflow_check_info); ++// register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &dump_after_pass_info); ++ register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &dce_pass_info); ++ } ++ register_callback(plugin_name, PLUGIN_ATTRIBUTES, register_attributes, NULL); ++ ++ return 0; ++} +diff --git a/tools/gcc/size_overflow_plugin/size_overflow_plugin_hash.c b/tools/gcc/size_overflow_plugin/size_overflow_plugin_hash.c +new file mode 100644 +index 0000000..0888f6c +--- /dev/null ++++ b/tools/gcc/size_overflow_plugin/size_overflow_plugin_hash.c +@@ -0,0 +1,364 @@ ++/* ++ * Copyright 2011-2014 by Emese Revfy <re.emese@gmail.com> ++ * Licensed under the GPL v2, or (at your option) v3 ++ * ++ * Homepage: ++ * http://www.grsecurity.net/~ephox/overflow_plugin/ ++ * ++ * Documentation: ++ * http://forums.grsecurity.net/viewtopic.php?f=7&t=3043 ++ * ++ * This plugin recomputes expressions of function arguments marked by a size_overflow attribute ++ * with double integer precision (DImode/TImode for 32/64 bit integer types). ++ * The recomputed argument is checked against TYPE_MAX and an event is logged on overflow and the triggering process is killed. ++ * ++ * Usage: ++ * $ make ++ * $ make run ++ */ ++ ++#include "gcc-common.h" ++#include "size_overflow.h" ++ ++#include "size_overflow_hash.h" ++#include "size_overflow_hash_aux.h" ++ ++#define CODES_LIMIT 32 ++ ++static unsigned char get_tree_code(const_tree type) ++{ ++ switch (TREE_CODE(type)) { ++ case ARRAY_TYPE: ++ return 0; ++ case BOOLEAN_TYPE: ++ return 1; ++ case ENUMERAL_TYPE: ++ return 2; ++ case FUNCTION_TYPE: ++ return 3; ++ case INTEGER_TYPE: ++ return 4; ++ case POINTER_TYPE: ++ return 5; ++ case RECORD_TYPE: ++ return 6; ++ case UNION_TYPE: ++ return 7; ++ case VOID_TYPE: ++ return 8; ++ case REAL_TYPE: ++ return 9; ++ case VECTOR_TYPE: ++ return 10; ++ case REFERENCE_TYPE: ++ return 11; ++ case OFFSET_TYPE: ++ return 12; ++ case COMPLEX_TYPE: ++ return 13; ++ default: ++ debug_tree((tree)type); ++ gcc_unreachable(); ++ } ++} ++ ++struct function_hash { ++ size_t tree_codes_len; ++ unsigned char tree_codes[CODES_LIMIT]; ++ const_tree fndecl; ++ unsigned int hash; ++}; ++ ++// http://www.team5150.com/~andrew/noncryptohashzoo2~/CrapWow.html ++static unsigned int CrapWow(const char *key, unsigned int len, unsigned int seed) ++{ ++#define cwfold( a, b, lo, hi ) { p = (unsigned int)(a) * (unsigned long long)(b); lo ^= (unsigned int)p; hi ^= (unsigned int)(p >> 32); } ++#define cwmixa( in ) { cwfold( in, m, k, h ); } ++#define cwmixb( in ) { cwfold( in, n, h, k ); } ++ ++ unsigned int m = 0x57559429; ++ unsigned int n = 0x5052acdb; ++ const unsigned int *key4 = (const unsigned int *)key; ++ unsigned int h = len; ++ unsigned int k = len + seed + n; ++ unsigned long long p; ++ ++ while (len >= 8) { ++ cwmixb(key4[0]) cwmixa(key4[1]) key4 += 2; ++ len -= 8; ++ } ++ if (len >= 4) { ++ cwmixb(key4[0]) key4 += 1; ++ len -= 4; ++ } ++ if (len) ++ cwmixa(key4[0] & ((1 << (len * 8)) - 1 )); ++ cwmixb(h ^ (k + n)); ++ return k ^ h; ++ ++#undef cwfold ++#undef cwmixa ++#undef cwmixb ++} ++ ++static void set_hash(const char *fn_name, struct function_hash *fn_hash_data) ++{ ++ unsigned int fn, codes, seed = 0; ++ ++ fn = CrapWow(fn_name, strlen(fn_name), seed) & 0xffff; ++ codes = CrapWow((const char*)fn_hash_data->tree_codes, fn_hash_data->tree_codes_len, seed) & 0xffff; ++ ++ fn_hash_data->hash = fn ^ codes; ++} ++ ++static void set_node_codes(const_tree type, struct function_hash *fn_hash_data) ++{ ++ gcc_assert(type != NULL_TREE); ++ gcc_assert(TREE_CODE_CLASS(TREE_CODE(type)) == tcc_type); ++ ++ while (type && fn_hash_data->tree_codes_len < CODES_LIMIT) { ++ fn_hash_data->tree_codes[fn_hash_data->tree_codes_len] = get_tree_code(type); ++ fn_hash_data->tree_codes_len++; ++ type = TREE_TYPE(type); ++ } ++} ++ ++static void set_result_codes(const_tree node, struct function_hash *fn_hash_data) ++{ ++ const_tree result; ++ ++ gcc_assert(node != NULL_TREE); ++ ++ if (DECL_P(node)) { ++ result = DECL_RESULT(node); ++ if (result != NULL_TREE) ++ return set_node_codes(TREE_TYPE(result), fn_hash_data); ++ return set_result_codes(TREE_TYPE(node), fn_hash_data); ++ } ++ ++ gcc_assert(TYPE_P(node)); ++ ++ if (TREE_CODE(node) == FUNCTION_TYPE) ++ return set_result_codes(TREE_TYPE(node), fn_hash_data); ++ ++ return set_node_codes(node, fn_hash_data); ++} ++ ++static void set_function_codes(struct function_hash *fn_hash_data) ++{ ++ const_tree arg, type = TREE_TYPE(fn_hash_data->fndecl); ++ enum tree_code code = TREE_CODE(type); ++ ++ gcc_assert(code == FUNCTION_TYPE || code == METHOD_TYPE); ++ ++ set_result_codes(fn_hash_data->fndecl, fn_hash_data); ++ ++ for (arg = TYPE_ARG_TYPES(type); arg != NULL_TREE && fn_hash_data->tree_codes_len < CODES_LIMIT; arg = TREE_CHAIN(arg)) ++ set_node_codes(TREE_VALUE(arg), fn_hash_data); ++} ++ ++static const struct size_overflow_hash *get_proper_hash_chain(const struct size_overflow_hash *entry, const char *func_name) ++{ ++ while (entry) { ++ if (!strcmp(entry->name, func_name)) ++ return entry; ++ entry = entry->next; ++ } ++ return NULL; ++} ++ ++const struct size_overflow_hash *get_function_hash(const_tree fndecl) ++{ ++ const struct size_overflow_hash *entry; ++ struct function_hash fn_hash_data; ++ const char *func_name; ++ ++ // skip builtins __builtin_constant_p ++ if (DECL_BUILT_IN(fndecl)) ++ return NULL; ++ ++ fn_hash_data.fndecl = fndecl; ++ fn_hash_data.tree_codes_len = 0; ++ ++ set_function_codes(&fn_hash_data); ++ gcc_assert(fn_hash_data.tree_codes_len != 0); ++ ++ func_name = DECL_NAME_POINTER(fn_hash_data.fndecl); ++ set_hash(func_name, &fn_hash_data); ++ ++ entry = size_overflow_hash[fn_hash_data.hash]; ++ entry = get_proper_hash_chain(entry, func_name); ++ if (entry) ++ return entry; ++ entry = size_overflow_hash_aux[fn_hash_data.hash]; ++ return get_proper_hash_chain(entry, func_name); ++} ++ ++static void print_missing_msg(const_tree func, unsigned int argnum) ++{ ++ location_t loc; ++ const char *curfunc; ++ struct function_hash fn_hash_data; ++ ++ fn_hash_data.fndecl = DECL_ORIGIN(func); ++ fn_hash_data.tree_codes_len = 0; ++ ++ loc = DECL_SOURCE_LOCATION(fn_hash_data.fndecl); ++ curfunc = DECL_NAME_POINTER(fn_hash_data.fndecl); ++ ++ set_function_codes(&fn_hash_data); ++ set_hash(curfunc, &fn_hash_data); ++ ++ inform(loc, "Function %s is missing from the size_overflow hash table +%s+%u+%u+", curfunc, curfunc, argnum, fn_hash_data.hash); ++} ++ ++unsigned int find_arg_number_tree(const_tree arg, const_tree func) ++{ ++ tree var; ++ unsigned int argnum = 1; ++ ++ if (TREE_CODE(arg) == SSA_NAME) ++ arg = SSA_NAME_VAR(arg); ++ ++ for (var = DECL_ARGUMENTS(func); var; var = TREE_CHAIN(var), argnum++) { ++ if (!operand_equal_p(arg, var, 0) && strcmp(DECL_NAME_POINTER(var), DECL_NAME_POINTER(arg))) ++ continue; ++ if (!skip_types(var)) ++ return argnum; ++ } ++ ++ return CANNOT_FIND_ARG; ++} ++ ++static const char *get_asm_string(const_gimple stmt) ++{ ++ if (!stmt) ++ return NULL; ++ if (gimple_code(stmt) != GIMPLE_ASM) ++ return NULL; ++ ++ return gimple_asm_string(stmt); ++} ++ ++bool is_size_overflow_intentional_asm_turn_off(const_gimple stmt) ++{ ++ const char *str; ++ ++ str = get_asm_string(stmt); ++ if (!str) ++ return false; ++ return !strncmp(str, TURN_OFF_ASM_STR, sizeof(TURN_OFF_ASM_STR) - 1); ++} ++ ++bool is_size_overflow_intentional_asm_yes(const_gimple stmt) ++{ ++ const char *str; ++ ++ str = get_asm_string(stmt); ++ if (!str) ++ return false; ++ return !strncmp(str, YES_ASM_STR, sizeof(YES_ASM_STR) - 1); ++} ++ ++bool is_size_overflow_asm(const_gimple stmt) ++{ ++ const char *str; ++ ++ str = get_asm_string(stmt); ++ if (!str) ++ return false; ++ return !strncmp(str, OK_ASM_STR, sizeof(OK_ASM_STR) - 1); ++} ++ ++bool is_a_return_check(const_tree node) ++{ ++ if (TREE_CODE(node) == FUNCTION_DECL) ++ return true; ++ ++ gcc_assert(TREE_CODE(node) == PARM_DECL); ++ return false; ++} ++ ++// Get the argnum of a function decl, if node is a return then the argnum is 0 ++unsigned int get_function_num(const_tree node, const_tree orig_fndecl) ++{ ++ if (is_a_return_check(node)) ++ return 0; ++ else ++ return find_arg_number_tree(node, orig_fndecl); ++} ++ ++unsigned int get_correct_arg_count(unsigned int argnum, const_tree fndecl) ++{ ++ const struct size_overflow_hash *hash; ++ unsigned int new_argnum; ++ tree arg; ++ const_tree origarg; ++ ++ if (argnum == 0) ++ return argnum; ++ ++ hash = get_function_hash(fndecl); ++ if (hash && hash->param & (1U << argnum)) ++ return argnum; ++ ++ if (DECL_EXTERNAL(fndecl)) ++ return argnum; ++ ++ origarg = DECL_ARGUMENTS(DECL_ORIGIN(fndecl)); ++ argnum--; ++ while (origarg && argnum) { ++ origarg = TREE_CHAIN(origarg); ++ argnum--; ++ } ++ gcc_assert(argnum == 0); ++ gcc_assert(origarg != NULL_TREE); ++ ++ for (arg = DECL_ARGUMENTS(fndecl), new_argnum = 1; arg; arg = TREE_CHAIN(arg), new_argnum++) ++ if (operand_equal_p(origarg, arg, 0) || !strcmp(DECL_NAME_POINTER(origarg), DECL_NAME_POINTER(arg))) ++ return new_argnum; ++ ++ return CANNOT_FIND_ARG; ++} ++ ++static bool is_in_hash_table(const_tree fndecl, unsigned int num) ++{ ++ const struct size_overflow_hash *hash; ++ ++ hash = get_function_hash(fndecl); ++ if (hash && (hash->param & (1U << num))) ++ return true; ++ return false; ++} ++ ++/* Check if the function has a size_overflow attribute or it is in the size_overflow hash table. ++ * If the function is missing everywhere then print the missing message into stderr. ++ */ ++bool is_missing_function(const_tree orig_fndecl, unsigned int num) ++{ ++ switch (DECL_FUNCTION_CODE(orig_fndecl)) { ++#if BUILDING_GCC_VERSION >= 4008 ++ case BUILT_IN_BSWAP16: ++#endif ++ case BUILT_IN_BSWAP32: ++ case BUILT_IN_BSWAP64: ++ case BUILT_IN_EXPECT: ++ case BUILT_IN_MEMCMP: ++ return false; ++ default: ++ break; ++ } ++ ++ // skip test.c ++ if (strcmp(DECL_NAME_POINTER(current_function_decl), "coolmalloc")) { ++ if (lookup_attribute("size_overflow", DECL_ATTRIBUTES(orig_fndecl))) ++ warning(0, "unnecessary size_overflow attribute on: %s\n", DECL_NAME_POINTER(orig_fndecl)); ++ } ++ ++ if (is_in_hash_table(orig_fndecl, num)) ++ return false; ++ ++ print_missing_msg(orig_fndecl, num); ++ return true; ++} ++ +diff --git a/tools/gcc/stackleak_plugin.c b/tools/gcc/stackleak_plugin.c +new file mode 100644 +index 0000000..924652b +--- /dev/null ++++ b/tools/gcc/stackleak_plugin.c +@@ -0,0 +1,395 @@ ++/* ++ * Copyright 2011-2014 by the PaX Team <pageexec@freemail.hu> ++ * Licensed under the GPL v2 ++ * ++ * Note: the choice of the license means that the compilation process is ++ * NOT 'eligible' as defined by gcc's library exception to the GPL v3, ++ * but for the kernel it doesn't matter since it doesn't link against ++ * any of the gcc libraries ++ * ++ * gcc plugin to help implement various PaX features ++ * ++ * - track lowest stack pointer ++ * ++ * TODO: ++ * - initialize all local variables ++ * ++ * BUGS: ++ * - none known ++ */ ++ ++#include "gcc-common.h" ++ ++int plugin_is_GPL_compatible; ++ ++static int track_frame_size = -1; ++static const char track_function[] = "pax_track_stack"; ++static const char check_function[] = "pax_check_alloca"; ++static tree track_function_decl, check_function_decl; ++static bool init_locals; ++ ++static struct plugin_info stackleak_plugin_info = { ++ .version = "201408011900", ++ .help = "track-lowest-sp=nn\ttrack sp in functions whose frame size is at least nn bytes\n" ++// "initialize-locals\t\tforcibly initialize all stack frames\n" ++}; ++ ++static void stackleak_check_alloca(gimple_stmt_iterator *gsi) ++{ ++ gimple check_alloca; ++ tree alloca_size; ++ ++ // insert call to void pax_check_alloca(unsigned long size) ++ alloca_size = gimple_call_arg(gsi_stmt(*gsi), 0); ++ check_alloca = gimple_build_call(check_function_decl, 1, alloca_size); ++ gsi_insert_before(gsi, check_alloca, GSI_SAME_STMT); ++} ++ ++static void stackleak_add_instrumentation(gimple_stmt_iterator *gsi) ++{ ++ gimple track_stack; ++ ++ // insert call to void pax_track_stack(void) ++ track_stack = gimple_build_call(track_function_decl, 0); ++ gsi_insert_after(gsi, track_stack, GSI_CONTINUE_LINKING); ++} ++ ++static bool is_alloca(gimple stmt) ++{ ++ if (gimple_call_builtin_p(stmt, BUILT_IN_ALLOCA)) ++ return true; ++ ++#if BUILDING_GCC_VERSION >= 4007 ++ if (gimple_call_builtin_p(stmt, BUILT_IN_ALLOCA_WITH_ALIGN)) ++ return true; ++#endif ++ ++ return false; ++} ++ ++static unsigned int execute_stackleak_tree_instrument(void) ++{ ++ basic_block bb, entry_bb; ++ bool prologue_instrumented = false, is_leaf = true; ++ ++ entry_bb = ENTRY_BLOCK_PTR_FOR_FN(cfun)->next_bb; ++ ++ // 1. loop through BBs and GIMPLE statements ++ FOR_EACH_BB_FN(bb, cfun) { ++ gimple_stmt_iterator gsi; ++ ++ for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) { ++ gimple stmt; ++ ++ stmt = gsi_stmt(gsi); ++ ++ if (is_gimple_call(stmt)) ++ is_leaf = false; ++ ++ // gimple match: align 8 built-in BUILT_IN_NORMAL:BUILT_IN_ALLOCA attributes <tree_list 0xb7576450> ++ if (!is_alloca(stmt)) ++ continue; ++ ++ // 2. insert stack overflow check before each __builtin_alloca call ++ stackleak_check_alloca(&gsi); ++ ++ // 3. insert track call after each __builtin_alloca call ++ stackleak_add_instrumentation(&gsi); ++ if (bb == entry_bb) ++ prologue_instrumented = true; ++ } ++ } ++ ++ // special cases for some bad linux code: taking the address of static inline functions will materialize them ++ // but we mustn't instrument some of them as the resulting stack alignment required by the function call ABI ++ // will break other assumptions regarding the expected (but not otherwise enforced) register clobbering ABI. ++ // case in point: native_save_fl on amd64 when optimized for size clobbers rdx if it were instrumented here. ++ if (is_leaf && !TREE_PUBLIC(current_function_decl) && DECL_DECLARED_INLINE_P(current_function_decl)) ++ return 0; ++ if (is_leaf && !strncmp(IDENTIFIER_POINTER(DECL_NAME(current_function_decl)), "_paravirt_", 10)) ++ return 0; ++ ++ // 4. insert track call at the beginning ++ if (!prologue_instrumented) { ++ gimple_stmt_iterator gsi; ++ ++ bb = split_block_after_labels(ENTRY_BLOCK_PTR_FOR_FN(cfun))->dest; ++ if (dom_info_available_p(CDI_DOMINATORS)) ++ set_immediate_dominator(CDI_DOMINATORS, bb, ENTRY_BLOCK_PTR_FOR_FN(cfun)); ++ gsi = gsi_start_bb(bb); ++ stackleak_add_instrumentation(&gsi); ++ } ++ ++ return 0; ++} ++ ++static unsigned int execute_stackleak_final(void) ++{ ++ rtx insn, next; ++ ++ if (cfun->calls_alloca) ++ return 0; ++ ++ // keep calls only if function frame is big enough ++ if (get_frame_size() >= track_frame_size) ++ return 0; ++ ++ // 1. find pax_track_stack calls ++ for (insn = get_insns(); insn; insn = next) { ++ // rtl match: (call_insn 8 7 9 3 (call (mem (symbol_ref ("pax_track_stack") [flags 0x41] <function_decl 0xb7470e80 pax_track_stack>) [0 S1 A8]) (4)) -1 (nil) (nil)) ++ rtx body; ++ ++ next = NEXT_INSN(insn); ++ if (!CALL_P(insn)) ++ continue; ++ body = PATTERN(insn); ++ if (GET_CODE(body) != CALL) ++ continue; ++ body = XEXP(body, 0); ++ if (GET_CODE(body) != MEM) ++ continue; ++ body = XEXP(body, 0); ++ if (GET_CODE(body) != SYMBOL_REF) ++ continue; ++// if (strcmp(XSTR(body, 0), track_function)) ++ if (SYMBOL_REF_DECL(body) != track_function_decl) ++ continue; ++// warning(0, "track_frame_size: %d %ld %d", cfun->calls_alloca, get_frame_size(), track_frame_size); ++ // 2. delete call ++ delete_insn_and_edges(insn); ++#if BUILDING_GCC_VERSION >= 4007 ++ if (GET_CODE(next) == NOTE && NOTE_KIND(next) == NOTE_INSN_CALL_ARG_LOCATION) { ++ insn = next; ++ next = NEXT_INSN(insn); ++ delete_insn_and_edges(insn); ++ } ++#endif ++ } ++ ++// print_simple_rtl(stderr, get_insns()); ++// print_rtl(stderr, get_insns()); ++// warning(0, "track_frame_size: %d %ld %d", cfun->calls_alloca, get_frame_size(), track_frame_size); ++ ++ return 0; ++} ++ ++static bool gate_stackleak_track_stack(void) ++{ ++ tree section; ++ ++ if (ix86_cmodel != CM_KERNEL) ++ return false; ++ ++ section = lookup_attribute("section", DECL_ATTRIBUTES(current_function_decl)); ++ if (section && TREE_VALUE(section)) { ++ section = TREE_VALUE(TREE_VALUE(section)); ++ ++ if (!strncmp(TREE_STRING_POINTER(section), ".init.text", 10)) ++ return false; ++ if (!strncmp(TREE_STRING_POINTER(section), ".devinit.text", 13)) ++ return false; ++ if (!strncmp(TREE_STRING_POINTER(section), ".cpuinit.text", 13)) ++ return false; ++ if (!strncmp(TREE_STRING_POINTER(section), ".meminit.text", 13)) ++ return false; ++ } ++ ++ return track_frame_size >= 0; ++} ++ ++static void stackleak_start_unit(void *gcc_data, void *user_data) ++{ ++ tree fntype; ++ ++ // void pax_track_stack(void) ++ fntype = build_function_type_list(void_type_node, NULL_TREE); ++ track_function_decl = build_fn_decl(track_function, fntype); ++ DECL_ASSEMBLER_NAME(track_function_decl); // for LTO ++ TREE_PUBLIC(track_function_decl) = 1; ++ DECL_EXTERNAL(track_function_decl) = 1; ++ DECL_ARTIFICIAL(track_function_decl) = 1; ++ ++ // void pax_check_alloca(unsigned long) ++ fntype = build_function_type_list(void_type_node, long_unsigned_type_node, NULL_TREE); ++ check_function_decl = build_fn_decl(check_function, fntype); ++ DECL_ASSEMBLER_NAME(check_function_decl); // for LTO ++ TREE_PUBLIC(check_function_decl) = 1; ++ DECL_EXTERNAL(check_function_decl) = 1; ++ DECL_ARTIFICIAL(check_function_decl) = 1; ++} ++ ++#if BUILDING_GCC_VERSION >= 4009 ++static const struct pass_data stackleak_tree_instrument_pass_data = { ++#else ++static struct gimple_opt_pass stackleak_tree_instrument_pass = { ++ .pass = { ++#endif ++ .type = GIMPLE_PASS, ++ .name = "stackleak_tree_instrument", ++#if BUILDING_GCC_VERSION >= 4008 ++ .optinfo_flags = OPTGROUP_NONE, ++#endif ++#if BUILDING_GCC_VERSION >= 4009 ++ .has_gate = true, ++ .has_execute = true, ++#else ++ .gate = gate_stackleak_track_stack, ++ .execute = execute_stackleak_tree_instrument, ++ .sub = NULL, ++ .next = NULL, ++ .static_pass_number = 0, ++#endif ++ .tv_id = TV_NONE, ++ .properties_required = PROP_gimple_leh | PROP_cfg, ++ .properties_provided = 0, ++ .properties_destroyed = 0, ++ .todo_flags_start = 0, //TODO_verify_ssa | TODO_verify_flow | TODO_verify_stmts, ++ .todo_flags_finish = TODO_verify_ssa | TODO_verify_stmts | TODO_dump_func | TODO_update_ssa | TODO_rebuild_cgraph_edges ++#if BUILDING_GCC_VERSION < 4009 ++ } ++#endif ++}; ++ ++#if BUILDING_GCC_VERSION >= 4009 ++static const struct pass_data stackleak_final_rtl_opt_pass_data = { ++#else ++static struct rtl_opt_pass stackleak_final_rtl_opt_pass = { ++ .pass = { ++#endif ++ .type = RTL_PASS, ++ .name = "stackleak_final", ++#if BUILDING_GCC_VERSION >= 4008 ++ .optinfo_flags = OPTGROUP_NONE, ++#endif ++#if BUILDING_GCC_VERSION >= 4009 ++ .has_gate = true, ++ .has_execute = true, ++#else ++ .gate = gate_stackleak_track_stack, ++ .execute = execute_stackleak_final, ++ .sub = NULL, ++ .next = NULL, ++ .static_pass_number = 0, ++#endif ++ .tv_id = TV_NONE, ++ .properties_required = 0, ++ .properties_provided = 0, ++ .properties_destroyed = 0, ++ .todo_flags_start = 0, ++ .todo_flags_finish = TODO_dump_func ++#if BUILDING_GCC_VERSION < 4009 ++ } ++#endif ++}; ++ ++#if BUILDING_GCC_VERSION >= 4009 ++namespace { ++class stackleak_tree_instrument_pass : public gimple_opt_pass { ++public: ++ stackleak_tree_instrument_pass() : gimple_opt_pass(stackleak_tree_instrument_pass_data, g) {} ++ bool gate() { return gate_stackleak_track_stack(); } ++ unsigned int execute() { return execute_stackleak_tree_instrument(); } ++}; ++ ++class stackleak_final_rtl_opt_pass : public rtl_opt_pass { ++public: ++ stackleak_final_rtl_opt_pass() : rtl_opt_pass(stackleak_final_rtl_opt_pass_data, g) {} ++ bool gate() { return gate_stackleak_track_stack(); } ++ unsigned int execute() { return execute_stackleak_final(); } ++}; ++} ++ ++static opt_pass *make_stackleak_tree_instrument_pass(void) ++{ ++ return new stackleak_tree_instrument_pass(); ++} ++ ++static opt_pass *make_stackleak_final_rtl_opt_pass(void) ++{ ++ return new stackleak_final_rtl_opt_pass(); ++} ++#else ++static struct opt_pass *make_stackleak_tree_instrument_pass(void) ++{ ++ return &stackleak_tree_instrument_pass.pass; ++} ++ ++static struct opt_pass *make_stackleak_final_rtl_opt_pass(void) ++{ ++ return &stackleak_final_rtl_opt_pass.pass; ++} ++#endif ++ ++int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version) ++{ ++ const char * const plugin_name = plugin_info->base_name; ++ const int argc = plugin_info->argc; ++ const struct plugin_argument * const argv = plugin_info->argv; ++ int i; ++ struct register_pass_info stackleak_tree_instrument_pass_info; ++ struct register_pass_info stackleak_final_pass_info; ++ static const struct ggc_root_tab gt_ggc_r_gt_stackleak[] = { ++ { ++ .base = &track_function_decl, ++ .nelt = 1, ++ .stride = sizeof(track_function_decl), ++ .cb = >_ggc_mx_tree_node, ++ .pchw = >_pch_nx_tree_node ++ }, ++ { ++ .base = &check_function_decl, ++ .nelt = 1, ++ .stride = sizeof(check_function_decl), ++ .cb = >_ggc_mx_tree_node, ++ .pchw = >_pch_nx_tree_node ++ }, ++ LAST_GGC_ROOT_TAB ++ }; ++ ++ stackleak_tree_instrument_pass_info.pass = make_stackleak_tree_instrument_pass(); ++// stackleak_tree_instrument_pass_info.reference_pass_name = "tree_profile"; ++ stackleak_tree_instrument_pass_info.reference_pass_name = "optimized"; ++ stackleak_tree_instrument_pass_info.ref_pass_instance_number = 1; ++ stackleak_tree_instrument_pass_info.pos_op = PASS_POS_INSERT_BEFORE; ++ ++ stackleak_final_pass_info.pass = make_stackleak_final_rtl_opt_pass(); ++ stackleak_final_pass_info.reference_pass_name = "final"; ++ stackleak_final_pass_info.ref_pass_instance_number = 1; ++ stackleak_final_pass_info.pos_op = PASS_POS_INSERT_BEFORE; ++ ++ if (!plugin_default_version_check(version, &gcc_version)) { ++ error(G_("incompatible gcc/plugin versions")); ++ return 1; ++ } ++ ++ register_callback(plugin_name, PLUGIN_INFO, NULL, &stackleak_plugin_info); ++ ++ for (i = 0; i < argc; ++i) { ++ if (!strcmp(argv[i].key, "track-lowest-sp")) { ++ if (!argv[i].value) { ++ error(G_("no value supplied for option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key); ++ continue; ++ } ++ track_frame_size = atoi(argv[i].value); ++ if (argv[i].value[0] < '0' || argv[i].value[0] > '9' || track_frame_size < 0) ++ error(G_("invalid option argument '-fplugin-arg-%s-%s=%s'"), plugin_name, argv[i].key, argv[i].value); ++ continue; ++ } ++ if (!strcmp(argv[i].key, "initialize-locals")) { ++ if (argv[i].value) { ++ error(G_("invalid option argument '-fplugin-arg-%s-%s=%s'"), plugin_name, argv[i].key, argv[i].value); ++ continue; ++ } ++ init_locals = true; ++ continue; ++ } ++ error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key); ++ } ++ ++ register_callback(plugin_name, PLUGIN_START_UNIT, &stackleak_start_unit, NULL); ++ register_callback(plugin_name, PLUGIN_REGISTER_GGC_ROOTS, NULL, (void *)>_ggc_r_gt_stackleak); ++ register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &stackleak_tree_instrument_pass_info); ++ register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &stackleak_final_pass_info); ++ ++ return 0; ++} +diff --git a/tools/gcc/structleak_plugin.c b/tools/gcc/structleak_plugin.c +new file mode 100644 +index 0000000..4ee2231 +--- /dev/null ++++ b/tools/gcc/structleak_plugin.c +@@ -0,0 +1,274 @@ ++/* ++ * Copyright 2013-2014 by PaX Team <pageexec@freemail.hu> ++ * Licensed under the GPL v2 ++ * ++ * Note: the choice of the license means that the compilation process is ++ * NOT 'eligible' as defined by gcc's library exception to the GPL v3, ++ * but for the kernel it doesn't matter since it doesn't link against ++ * any of the gcc libraries ++ * ++ * gcc plugin to forcibly initialize certain local variables that could ++ * otherwise leak kernel stack to userland if they aren't properly initialized ++ * by later code ++ * ++ * Homepage: http://pax.grsecurity.net/ ++ * ++ * Usage: ++ * $ # for 4.5/4.6/C based 4.7 ++ * $ gcc -I`gcc -print-file-name=plugin`/include -I`gcc -print-file-name=plugin`/include/c-family -fPIC -shared -O2 -o structleak_plugin.so structleak_plugin.c ++ * $ # for C++ based 4.7/4.8+ ++ * $ g++ -I`g++ -print-file-name=plugin`/include -I`g++ -print-file-name=plugin`/include/c-family -fPIC -shared -O2 -o structleak_plugin.so structleak_plugin.c ++ * $ gcc -fplugin=./structleak_plugin.so test.c -O2 ++ * ++ * TODO: eliminate redundant initializers ++ * increase type coverage ++ */ ++ ++#include "gcc-common.h" ++ ++// unused C type flag in all versions 4.5-4.9 ++#define TYPE_USERSPACE(TYPE) TYPE_LANG_FLAG_3(TYPE) ++ ++int plugin_is_GPL_compatible; ++ ++static struct plugin_info structleak_plugin_info = { ++ .version = "201401260140", ++ .help = "disable\tdo not activate plugin\n", ++}; ++ ++static tree handle_user_attribute(tree *node, tree name, tree args, int flags, bool *no_add_attrs) ++{ ++ *no_add_attrs = true; ++ ++ // check for types? for now accept everything linux has to offer ++ if (TREE_CODE(*node) != FIELD_DECL) ++ return NULL_TREE; ++ ++ *no_add_attrs = false; ++ return NULL_TREE; ++} ++ ++static struct attribute_spec user_attr = { ++ .name = "user", ++ .min_length = 0, ++ .max_length = 0, ++ .decl_required = false, ++ .type_required = false, ++ .function_type_required = false, ++ .handler = handle_user_attribute, ++#if BUILDING_GCC_VERSION >= 4007 ++ .affects_type_identity = true ++#endif ++}; ++ ++static void register_attributes(void *event_data, void *data) ++{ ++ register_attribute(&user_attr); ++// register_attribute(&force_attr); ++} ++ ++static tree get_field_type(tree field) ++{ ++ return strip_array_types(TREE_TYPE(field)); ++} ++ ++static bool is_userspace_type(tree type) ++{ ++ tree field; ++ ++ for (field = TYPE_FIELDS(type); field; field = TREE_CHAIN(field)) { ++ tree fieldtype = get_field_type(field); ++ enum tree_code code = TREE_CODE(fieldtype); ++ ++ if (code == RECORD_TYPE || code == UNION_TYPE) ++ if (is_userspace_type(fieldtype)) ++ return true; ++ ++ if (lookup_attribute("user", DECL_ATTRIBUTES(field))) ++ return true; ++ } ++ return false; ++} ++ ++static void finish_type(void *event_data, void *data) ++{ ++ tree type = (tree)event_data; ++ ++ if (TYPE_USERSPACE(type)) ++ return; ++ ++ if (is_userspace_type(type)) ++ TYPE_USERSPACE(type) = 1; ++} ++ ++static void initialize(tree var) ++{ ++ basic_block bb; ++ gimple_stmt_iterator gsi; ++ tree initializer; ++ gimple init_stmt; ++ ++ // this is the original entry bb before the forced split ++ // TODO: check further BBs in case more splits occured before us ++ bb = ENTRY_BLOCK_PTR_FOR_FN(cfun)->next_bb->next_bb; ++ ++ // first check if the variable is already initialized, warn otherwise ++ for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) { ++ gimple stmt = gsi_stmt(gsi); ++ tree rhs1; ++ ++ // we're looking for an assignment of a single rhs... ++ if (!gimple_assign_single_p(stmt)) ++ continue; ++ rhs1 = gimple_assign_rhs1(stmt); ++#if BUILDING_GCC_VERSION >= 4007 ++ // ... of a non-clobbering expression... ++ if (TREE_CLOBBER_P(rhs1)) ++ continue; ++#endif ++ // ... to our variable... ++ if (gimple_get_lhs(stmt) != var) ++ continue; ++ // if it's an initializer then we're good ++ if (TREE_CODE(rhs1) == CONSTRUCTOR) ++ return; ++ } ++ ++ // these aren't the 0days you're looking for ++// inform(DECL_SOURCE_LOCATION(var), "userspace variable will be forcibly initialized"); ++ ++ // build the initializer expression ++ initializer = build_constructor(TREE_TYPE(var), NULL); ++ ++ // build the initializer stmt ++ init_stmt = gimple_build_assign(var, initializer); ++ gsi = gsi_start_bb(ENTRY_BLOCK_PTR_FOR_FN(cfun)->next_bb); ++ gsi_insert_before(&gsi, init_stmt, GSI_NEW_STMT); ++ update_stmt(init_stmt); ++} ++ ++static unsigned int handle_function(void) ++{ ++ basic_block bb; ++ unsigned int ret = 0; ++ tree var; ++ unsigned int i; ++ ++ // split the first bb where we can put the forced initializers ++ bb = split_block_after_labels(ENTRY_BLOCK_PTR_FOR_FN(cfun))->dest; ++ if (dom_info_available_p(CDI_DOMINATORS)) ++ set_immediate_dominator(CDI_DOMINATORS, bb, ENTRY_BLOCK_PTR_FOR_FN(cfun)); ++ ++ // enumarate all local variables and forcibly initialize our targets ++ FOR_EACH_LOCAL_DECL(cfun, i, var) { ++ tree type = TREE_TYPE(var); ++ ++ gcc_assert(DECL_P(var)); ++ if (!auto_var_in_fn_p(var, current_function_decl)) ++ continue; ++ ++ // only care about structure types ++ if (TREE_CODE(type) != RECORD_TYPE && TREE_CODE(type) != UNION_TYPE) ++ continue; ++ ++ // if the type is of interest, examine the variable ++ if (TYPE_USERSPACE(type)) ++ initialize(var); ++ } ++ ++ return ret; ++} ++ ++#if BUILDING_GCC_VERSION >= 4009 ++static const struct pass_data structleak_pass_data = { ++#else ++static struct gimple_opt_pass structleak_pass = { ++ .pass = { ++#endif ++ .type = GIMPLE_PASS, ++ .name = "structleak", ++#if BUILDING_GCC_VERSION >= 4008 ++ .optinfo_flags = OPTGROUP_NONE, ++#endif ++#if BUILDING_GCC_VERSION >= 4009 ++ .has_gate = false, ++ .has_execute = true, ++#else ++ .gate = NULL, ++ .execute = handle_function, ++ .sub = NULL, ++ .next = NULL, ++ .static_pass_number = 0, ++#endif ++ .tv_id = TV_NONE, ++ .properties_required = PROP_cfg, ++ .properties_provided = 0, ++ .properties_destroyed = 0, ++ .todo_flags_start = 0, ++ .todo_flags_finish = TODO_verify_ssa | TODO_verify_stmts | TODO_dump_func | TODO_remove_unused_locals | TODO_update_ssa | TODO_ggc_collect | TODO_verify_flow ++#if BUILDING_GCC_VERSION < 4009 ++ } ++#endif ++}; ++ ++#if BUILDING_GCC_VERSION >= 4009 ++namespace { ++class structleak_pass : public gimple_opt_pass { ++public: ++ structleak_pass() : gimple_opt_pass(structleak_pass_data, g) {} ++ unsigned int execute() { return handle_function(); } ++}; ++} ++ ++static opt_pass *make_structleak_pass(void) ++{ ++ return new structleak_pass(); ++} ++#else ++static struct opt_pass *make_structleak_pass(void) ++{ ++ return &structleak_pass.pass; ++} ++#endif ++ ++int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version) ++{ ++ int i; ++ const char * const plugin_name = plugin_info->base_name; ++ const int argc = plugin_info->argc; ++ const struct plugin_argument * const argv = plugin_info->argv; ++ bool enable = true; ++ struct register_pass_info structleak_pass_info; ++ ++ structleak_pass_info.pass = make_structleak_pass(); ++ structleak_pass_info.reference_pass_name = "ssa"; ++ structleak_pass_info.ref_pass_instance_number = 1; ++ structleak_pass_info.pos_op = PASS_POS_INSERT_AFTER; ++ ++ if (!plugin_default_version_check(version, &gcc_version)) { ++ error(G_("incompatible gcc/plugin versions")); ++ return 1; ++ } ++ ++ if (strcmp(lang_hooks.name, "GNU C")) { ++ inform(UNKNOWN_LOCATION, G_("%s supports C only"), plugin_name); ++ enable = false; ++ } ++ ++ for (i = 0; i < argc; ++i) { ++ if (!strcmp(argv[i].key, "disable")) { ++ enable = false; ++ continue; ++ } ++ error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key); ++ } ++ ++ register_callback(plugin_name, PLUGIN_INFO, NULL, &structleak_plugin_info); ++ if (enable) { ++ register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &structleak_pass_info); ++ register_callback(plugin_name, PLUGIN_FINISH_TYPE, finish_type, NULL); ++ } ++ register_callback(plugin_name, PLUGIN_ATTRIBUTES, register_attributes, NULL); ++ ++ return 0; ++} +diff --git a/tools/include/linux/compiler.h b/tools/include/linux/compiler.h +index fbc6665..5e9ce79 100644 +--- a/tools/include/linux/compiler.h ++++ b/tools/include/linux/compiler.h +@@ -35,4 +35,12 @@ + # define unlikely(x) __builtin_expect(!!(x), 0) + #endif + ++#ifndef __size_overflow ++# define __size_overflow(...) ++#endif ++ ++#ifndef __intentional_overflow ++# define __intentional_overflow(...) ++#endif ++ + #endif /* _TOOLS_LINUX_COMPILER_H */ +diff --git a/tools/lib/api/Makefile b/tools/lib/api/Makefile +index ed2f51e..cc2d8f6 100644 +--- a/tools/lib/api/Makefile ++++ b/tools/lib/api/Makefile +@@ -14,7 +14,7 @@ LIB_OBJS += $(OUTPUT)fs/debugfs.o + + LIBFILE = libapikfs.a + +-CFLAGS = -ggdb3 -Wall -Wextra -std=gnu99 -Werror -O6 -D_FORTIFY_SOURCE=2 $(EXTRA_WARNINGS) $(EXTRA_CFLAGS) -fPIC ++CFLAGS = -ggdb3 -Wall -Wextra -std=gnu99 -Werror -O6 -U_FORTIFY_SOURCE -D_FORTIFY_SOURCE=2 $(EXTRA_WARNINGS) $(EXTRA_CFLAGS) -fPIC + EXTLIBS = -lelf -lpthread -lrt -lm + ALL_CFLAGS = $(CFLAGS) $(BASIC_CFLAGS) -D_LARGEFILE64_SOURCE -D_FILE_OFFSET_BITS=64 + ALL_LDFLAGS = $(LDFLAGS) +diff --git a/tools/perf/util/include/asm/alternative-asm.h b/tools/perf/util/include/asm/alternative-asm.h +index 6789d788..4afd019e 100644 +--- a/tools/perf/util/include/asm/alternative-asm.h ++++ b/tools/perf/util/include/asm/alternative-asm.h +@@ -5,4 +5,7 @@ + + #define altinstruction_entry # + ++ .macro pax_force_retaddr rip=0, reload=0 ++ .endm ++ + #endif +diff --git a/tools/virtio/linux/uaccess.h b/tools/virtio/linux/uaccess.h +index 0a578fe..b81f62d 100644 +--- a/tools/virtio/linux/uaccess.h ++++ b/tools/virtio/linux/uaccess.h +@@ -13,7 +13,7 @@ static inline void __chk_user_ptr(const volatile void *p, size_t size) + ({ \ + typeof(ptr) __pu_ptr = (ptr); \ + __chk_user_ptr(__pu_ptr, sizeof(*__pu_ptr)); \ +- ACCESS_ONCE(*(__pu_ptr)) = x; \ ++ ACCESS_ONCE_RW(*(__pu_ptr)) = x; \ + 0; \ + }) + +diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c +index 03a0381..8b31923 100644 +--- a/virt/kvm/kvm_main.c ++++ b/virt/kvm/kvm_main.c +@@ -76,12 +76,17 @@ LIST_HEAD(vm_list); + + static cpumask_var_t cpus_hardware_enabled; + static int kvm_usage_count = 0; +-static atomic_t hardware_enable_failed; ++static atomic_unchecked_t hardware_enable_failed; + + struct kmem_cache *kvm_vcpu_cache; + EXPORT_SYMBOL_GPL(kvm_vcpu_cache); + +-static __read_mostly struct preempt_ops kvm_preempt_ops; ++static void kvm_sched_in(struct preempt_notifier *pn, int cpu); ++static void kvm_sched_out(struct preempt_notifier *pn, struct task_struct *next); ++static struct preempt_ops kvm_preempt_ops = { ++ .sched_in = kvm_sched_in, ++ .sched_out = kvm_sched_out, ++}; + + struct dentry *kvm_debugfs_dir; + +@@ -758,7 +763,7 @@ int __kvm_set_memory_region(struct kvm *kvm, + /* We can read the guest memory with __xxx_user() later on. */ + if ((mem->slot < KVM_USER_MEM_SLOTS) && + ((mem->userspace_addr & (PAGE_SIZE - 1)) || +- !access_ok(VERIFY_WRITE, ++ !access_ok_noprefault(VERIFY_WRITE, + (void __user *)(unsigned long)mem->userspace_addr, + mem->memory_size))) + goto out; +@@ -1615,9 +1620,17 @@ EXPORT_SYMBOL_GPL(kvm_read_guest_cached); + + int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len) + { +- const void *zero_page = (const void *) __va(page_to_phys(ZERO_PAGE(0))); ++ int r; ++ unsigned long addr; + +- return kvm_write_guest_page(kvm, gfn, zero_page, offset, len); ++ addr = gfn_to_hva(kvm, gfn); ++ if (kvm_is_error_hva(addr)) ++ return -EFAULT; ++ r = __clear_user((void __user *)addr + offset, len); ++ if (r) ++ return -EFAULT; ++ mark_page_dirty(kvm, gfn); ++ return 0; + } + EXPORT_SYMBOL_GPL(kvm_clear_guest_page); + +@@ -1867,7 +1880,7 @@ static int kvm_vcpu_release(struct inode *inode, struct file *filp) + return 0; + } + +-static struct file_operations kvm_vcpu_fops = { ++static file_operations_no_const kvm_vcpu_fops __read_only = { + .release = kvm_vcpu_release, + .unlocked_ioctl = kvm_vcpu_ioctl, + #ifdef CONFIG_COMPAT +@@ -2532,7 +2545,7 @@ out: + } + #endif + +-static struct file_operations kvm_vm_fops = { ++static file_operations_no_const kvm_vm_fops __read_only = { + .release = kvm_vm_release, + .unlocked_ioctl = kvm_vm_ioctl, + #ifdef CONFIG_COMPAT +@@ -2632,7 +2645,7 @@ out: + return r; + } + +-static struct file_operations kvm_chardev_ops = { ++static file_operations_no_const kvm_chardev_ops __read_only = { + .unlocked_ioctl = kvm_dev_ioctl, + .compat_ioctl = kvm_dev_ioctl, + .llseek = noop_llseek, +@@ -2658,7 +2671,7 @@ static void hardware_enable_nolock(void *junk) + + if (r) { + cpumask_clear_cpu(cpu, cpus_hardware_enabled); +- atomic_inc(&hardware_enable_failed); ++ atomic_inc_unchecked(&hardware_enable_failed); + printk(KERN_INFO "kvm: enabling virtualization on " + "CPU%d failed\n", cpu); + } +@@ -2714,10 +2727,10 @@ static int hardware_enable_all(void) + + kvm_usage_count++; + if (kvm_usage_count == 1) { +- atomic_set(&hardware_enable_failed, 0); ++ atomic_set_unchecked(&hardware_enable_failed, 0); + on_each_cpu(hardware_enable_nolock, NULL, 1); + +- if (atomic_read(&hardware_enable_failed)) { ++ if (atomic_read_unchecked(&hardware_enable_failed)) { + hardware_disable_all_nolock(); + r = -EBUSY; + } +@@ -3121,7 +3134,7 @@ static void kvm_sched_out(struct preempt_notifier *pn, + kvm_arch_vcpu_put(vcpu); + } + +-int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align, ++int kvm_init(const void *opaque, unsigned vcpu_size, unsigned vcpu_align, + struct module *module) + { + int r; +@@ -3168,7 +3181,7 @@ int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align, + if (!vcpu_align) + vcpu_align = __alignof__(struct kvm_vcpu); + kvm_vcpu_cache = kmem_cache_create("kvm_vcpu", vcpu_size, vcpu_align, +- 0, NULL); ++ SLAB_USERCOPY, NULL); + if (!kvm_vcpu_cache) { + r = -ENOMEM; + goto out_free_3; +@@ -3178,9 +3191,11 @@ int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align, + if (r) + goto out_free; + ++ pax_open_kernel(); + kvm_chardev_ops.owner = module; + kvm_vm_fops.owner = module; + kvm_vcpu_fops.owner = module; ++ pax_close_kernel(); + + r = misc_register(&kvm_dev); + if (r) { +@@ -3190,9 +3205,6 @@ int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align, + + register_syscore_ops(&kvm_syscore_ops); + +- kvm_preempt_ops.sched_in = kvm_sched_in; +- kvm_preempt_ops.sched_out = kvm_sched_out; +- + r = kvm_init_debug(); + if (r) { + printk(KERN_ERR "kvm: create debugfs files failed\n"); |