diff --git a/Documentation/dontdiff b/Documentation/dontdiff index d9bcffd..32fb41b 100644 --- a/Documentation/dontdiff +++ b/Documentation/dontdiff @@ -3,6 +3,7 @@ *.bin *.cpio *.csp +*.dbg *.dsp *.dvi *.elf @@ -38,8 +39,10 @@ *.tab.h *.tex *.ver +*.vim *.xml *_MODULES +*_reg_safe.h *_vga16.c *~ *.9 @@ -49,11 +52,16 @@ 53c700_d.h CVS ChangeSet +GPATH +GRTAGS +GSYMS +GTAGS Image Kerntypes Module.markers Module.symvers PENDING +PERF* SCCS System.map* TAGS @@ -76,7 +84,10 @@ btfixupprep build bvmlinux bzImage* +capflags.c classlist.h* +clut_vga16.c +common-cmds.h comp*.log compile.h* conf @@ -100,19 +111,22 @@ fore200e_mkfirm fore200e_pca_fw.c* gconf gen-devlist +gen-kdb_cmds.c gen_crc32table gen_init_cpio generated genheaders genksyms *_gray256.c +hash ihex2fw ikconfig.h* +inat-tables.c initramfs_data.cpio +initramfs_data.cpio.bz2 initramfs_data.cpio.gz initramfs_list kallsyms -kconfig keywords.c ksym.c* ksym.h* @@ -136,10 +150,13 @@ mkboot mkbugboot mkcpustr mkdep +mkpiggy mkprep +mkregtable mktables mktree modpost +modules.builtin modules.order modversions.h* ncscope.* @@ -151,7 +168,9 @@ parse.h patches* pca200e.bin pca200e_ecd.bin2 +perf-archive piggy.gz +piggy.S piggyback pnmtologo ppc_defs.h* @@ -160,12 +179,14 @@ qconf raid6altivec*.c raid6int*.c raid6tables.c +regdb.c relocs series setup setup.bin setup.elf sImage +slabinfo sm_tbl* split-include syscalltab.h @@ -189,14 +210,20 @@ version.h* vmlinux vmlinux-* vmlinux.aout +vmlinux.bin.all +vmlinux.bin.bz2 vmlinux.lds +vmlinux.relocs +voffset.h vsyscall.lds vsyscall_32.lds wanxlfw.inc uImage unifdef +utsrelease.h wakeup.bin wakeup.elf wakeup.lds zImage* zconf.hash.c +zoffset.h diff --git a/Documentation/filesystems/sysfs.txt b/Documentation/filesystems/sysfs.txt index 931c806..e6f6ff1 100644 --- a/Documentation/filesystems/sysfs.txt +++ b/Documentation/filesystems/sysfs.txt @@ -123,8 +123,8 @@ set of sysfs operations for forwarding read and write calls to the show and store methods of the attribute owners. struct sysfs_ops { - ssize_t (*show)(struct kobject *, struct attribute *, char *); - ssize_t (*store)(struct kobject *, struct attribute *, const char *); + ssize_t (* const show)(struct kobject *, struct attribute *, char *); + ssize_t (* const store)(struct kobject *, struct attribute *, const char *); }; [ Subsystems should have already defined a struct kobj_type as a diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt index 2b2407d..4ebd036 100644 --- a/Documentation/kernel-parameters.txt +++ b/Documentation/kernel-parameters.txt @@ -1910,6 +1910,12 @@ and is between 256 and 4096 characters. It is defined in the file the specified number of seconds. This is to be used if your oopses keep scrolling off the screen. + pax_nouderef [X86-32] disables UDEREF. Most likely needed under certain + virtualization environments that don't cope well with the + expand down segment used by UDEREF on X86-32. + + pax_softmode= [X86-32] 0/1 to disable/enable PaX softmode on boot already. + pcbit= [HW,ISDN] pcd. [PARIDE] diff --git a/Makefile b/Makefile index 6538501..6616857 100644 --- a/Makefile +++ b/Makefile @@ -230,8 +230,8 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH" ]; then echo $$BASH; \ HOSTCC = gcc HOSTCXX = g++ -HOSTCFLAGS = -Wall -Wmissing-prototypes -Wstrict-prototypes -O2 -fomit-frame-pointer -HOSTCXXFLAGS = -O2 +HOSTCFLAGS = -Wall -W -Wmissing-prototypes -Wstrict-prototypes -O2 -fomit-frame-pointer -fno-delete-null-pointer-checks +HOSTCXXFLAGS = -O2 -fno-delete-null-pointer-checks # Decide whether to build built-in, modular, or both. # Normally, just do built-in. @@ -650,7 +650,7 @@ export mod_strip_cmd ifeq ($(KBUILD_EXTMOD),) -core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/ +core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/ vmlinux-dirs := $(patsubst %/,%,$(filter %/, $(init-y) $(init-m) \ $(core-y) $(core-m) $(drivers-y) $(drivers-m) \ diff --git a/arch/alpha/include/asm/dma-mapping.h b/arch/alpha/include/asm/dma-mapping.h index 1bce816..d5b403b 100644 --- a/arch/alpha/include/asm/dma-mapping.h +++ b/arch/alpha/include/asm/dma-mapping.h @@ -3,9 +3,9 @@ #include -extern struct dma_map_ops *dma_ops; +extern const struct dma_map_ops *dma_ops; -static inline struct dma_map_ops *get_dma_ops(struct device *dev) +static inline const struct dma_map_ops *get_dma_ops(struct device *dev) { return dma_ops; } diff --git a/arch/alpha/include/asm/elf.h b/arch/alpha/include/asm/elf.h index 9baae8a..8d8bc6b 100644 --- a/arch/alpha/include/asm/elf.h +++ b/arch/alpha/include/asm/elf.h @@ -90,6 +90,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG]; #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x1000000) +#ifdef CONFIG_PAX_ASLR +#define PAX_ELF_ET_DYN_BASE (current->personality & ADDR_LIMIT_32BIT ? 0x10000 : 0x120000000UL) + +#define PAX_DELTA_MMAP_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 28) +#define PAX_DELTA_STACK_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 19) +#endif + /* $0 is set by ld.so to a pointer to a function which might be registered using atexit. This provides a mean for the dynamic linker to call DT_FINI functions for shared libraries that have diff --git a/arch/alpha/include/asm/pgtable.h b/arch/alpha/include/asm/pgtable.h index 71a2432..bb30d23 100644 --- a/arch/alpha/include/asm/pgtable.h +++ b/arch/alpha/include/asm/pgtable.h @@ -101,6 +101,17 @@ struct vm_area_struct; #define PAGE_SHARED __pgprot(_PAGE_VALID | __ACCESS_BITS) #define PAGE_COPY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW) #define PAGE_READONLY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW) + +#ifdef CONFIG_PAX_PAGEEXEC +# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOE) +# define PAGE_COPY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE) +# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE) +#else +# define PAGE_SHARED_NOEXEC PAGE_SHARED +# define PAGE_COPY_NOEXEC PAGE_COPY +# define PAGE_READONLY_NOEXEC PAGE_READONLY +#endif + #define PAGE_KERNEL __pgprot(_PAGE_VALID | _PAGE_ASM | _PAGE_KRE | _PAGE_KWE) #define _PAGE_NORMAL(x) __pgprot(_PAGE_VALID | __ACCESS_BITS | (x)) diff --git a/arch/alpha/kernel/module.c b/arch/alpha/kernel/module.c index ebc3c89..20cfa63 100644 --- a/arch/alpha/kernel/module.c +++ b/arch/alpha/kernel/module.c @@ -182,7 +182,7 @@ apply_relocate_add(Elf64_Shdr *sechdrs, const char *strtab, /* The small sections were sorted to the end of the segment. The following should definitely cover them. */ - gp = (u64)me->module_core + me->core_size - 0x8000; + gp = (u64)me->module_core_rw + me->core_size_rw - 0x8000; got = sechdrs[me->arch.gotsecindex].sh_addr; for (i = 0; i < n; i++) { diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c index de9d397..22afe8a 100644 --- a/arch/alpha/kernel/osf_sys.c +++ b/arch/alpha/kernel/osf_sys.c @@ -1170,7 +1170,7 @@ arch_get_unmapped_area_1(unsigned long addr, unsigned long len, /* At this point: (!vma || addr < vma->vm_end). */ if (limit - len < addr) return -ENOMEM; - if (!vma || addr + len <= vma->vm_start) + if (check_heap_stack_gap(vma, addr, len)) return addr; addr = vma->vm_end; vma = vma->vm_next; @@ -1206,6 +1206,10 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr, merely specific addresses, but regions of memory -- perhaps this feature should be incorporated into all ports? */ +#ifdef CONFIG_PAX_RANDMMAP + if (!(current->mm->pax_flags & MF_PAX_RANDMMAP)) +#endif + if (addr) { addr = arch_get_unmapped_area_1 (PAGE_ALIGN(addr), len, limit); if (addr != (unsigned long) -ENOMEM) @@ -1213,8 +1217,8 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr, } /* Next, try allocating at TASK_UNMAPPED_BASE. */ - addr = arch_get_unmapped_area_1 (PAGE_ALIGN(TASK_UNMAPPED_BASE), - len, limit); + addr = arch_get_unmapped_area_1 (PAGE_ALIGN(current->mm->mmap_base), len, limit); + if (addr != (unsigned long) -ENOMEM) return addr; diff --git a/arch/alpha/kernel/pci-noop.c b/arch/alpha/kernel/pci-noop.c index 246100e..f05bd14 100644 --- a/arch/alpha/kernel/pci-noop.c +++ b/arch/alpha/kernel/pci-noop.c @@ -173,7 +173,7 @@ static int alpha_noop_set_mask(struct device *dev, u64 mask) return 0; } -struct dma_map_ops alpha_noop_ops = { +const struct dma_map_ops alpha_noop_ops = { .alloc_coherent = alpha_noop_alloc_coherent, .free_coherent = alpha_noop_free_coherent, .map_page = alpha_noop_map_page, @@ -183,7 +183,7 @@ struct dma_map_ops alpha_noop_ops = { .set_dma_mask = alpha_noop_set_mask, }; -struct dma_map_ops *dma_ops = &alpha_noop_ops; +const struct dma_map_ops *dma_ops = &alpha_noop_ops; EXPORT_SYMBOL(dma_ops); void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen) diff --git a/arch/alpha/kernel/pci_iommu.c b/arch/alpha/kernel/pci_iommu.c index d1dbd9a..664a19c 100644 --- a/arch/alpha/kernel/pci_iommu.c +++ b/arch/alpha/kernel/pci_iommu.c @@ -950,7 +950,7 @@ static int alpha_pci_set_mask(struct device *dev, u64 mask) return 0; } -struct dma_map_ops alpha_pci_ops = { +const struct dma_map_ops alpha_pci_ops = { .alloc_coherent = alpha_pci_alloc_coherent, .free_coherent = alpha_pci_free_coherent, .map_page = alpha_pci_map_page, @@ -962,5 +962,5 @@ struct dma_map_ops alpha_pci_ops = { .set_dma_mask = alpha_pci_set_mask, }; -struct dma_map_ops *dma_ops = &alpha_pci_ops; +const struct dma_map_ops *dma_ops = &alpha_pci_ops; EXPORT_SYMBOL(dma_ops); diff --git a/arch/alpha/mm/fault.c b/arch/alpha/mm/fault.c index fadd5f8..3168191 100644 --- a/arch/alpha/mm/fault.c +++ b/arch/alpha/mm/fault.c @@ -54,6 +54,124 @@ __load_new_mm_context(struct mm_struct *next_mm) __reload_thread(pcb); } +#ifdef CONFIG_PAX_PAGEEXEC +/* + * PaX: decide what to do with offenders (regs->pc = fault address) + * + * returns 1 when task should be killed + * 2 when patched PLT trampoline was detected + * 3 when unpatched PLT trampoline was detected + */ +static int pax_handle_fetch_fault(struct pt_regs *regs) +{ + +#ifdef CONFIG_PAX_EMUPLT + int err; + + do { /* PaX: patched PLT emulation #1 */ + unsigned int ldah, ldq, jmp; + + err = get_user(ldah, (unsigned int *)regs->pc); + err |= get_user(ldq, (unsigned int *)(regs->pc+4)); + err |= get_user(jmp, (unsigned int *)(regs->pc+8)); + + if (err) + break; + + if ((ldah & 0xFFFF0000U) == 0x277B0000U && + (ldq & 0xFFFF0000U) == 0xA77B0000U && + jmp == 0x6BFB0000U) + { + unsigned long r27, addr; + unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16; + unsigned long addrl = ldq | 0xFFFFFFFFFFFF0000UL; + + addr = regs->r27 + ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL); + err = get_user(r27, (unsigned long *)addr); + if (err) + break; + + regs->r27 = r27; + regs->pc = r27; + return 2; + } + } while (0); + + do { /* PaX: patched PLT emulation #2 */ + unsigned int ldah, lda, br; + + err = get_user(ldah, (unsigned int *)regs->pc); + err |= get_user(lda, (unsigned int *)(regs->pc+4)); + err |= get_user(br, (unsigned int *)(regs->pc+8)); + + if (err) + break; + + if ((ldah & 0xFFFF0000U) == 0x277B0000U && + (lda & 0xFFFF0000U) == 0xA77B0000U && + (br & 0xFFE00000U) == 0xC3E00000U) + { + unsigned long addr = br | 0xFFFFFFFFFFE00000UL; + unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16; + unsigned long addrl = lda | 0xFFFFFFFFFFFF0000UL; + + regs->r27 += ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL); + regs->pc += 12 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2); + return 2; + } + } while (0); + + do { /* PaX: unpatched PLT emulation */ + unsigned int br; + + err = get_user(br, (unsigned int *)regs->pc); + + if (!err && (br & 0xFFE00000U) == 0xC3800000U) { + unsigned int br2, ldq, nop, jmp; + unsigned long addr = br | 0xFFFFFFFFFFE00000UL, resolver; + + addr = regs->pc + 4 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2); + err = get_user(br2, (unsigned int *)addr); + err |= get_user(ldq, (unsigned int *)(addr+4)); + err |= get_user(nop, (unsigned int *)(addr+8)); + err |= get_user(jmp, (unsigned int *)(addr+12)); + err |= get_user(resolver, (unsigned long *)(addr+16)); + + if (err) + break; + + if (br2 == 0xC3600000U && + ldq == 0xA77B000CU && + nop == 0x47FF041FU && + jmp == 0x6B7B0000U) + { + regs->r28 = regs->pc+4; + regs->r27 = addr+16; + regs->pc = resolver; + return 3; + } + } + } while (0); +#endif + + return 1; +} + +void pax_report_insns(void *pc, void *sp) +{ + unsigned long i; + + printk(KERN_ERR "PAX: bytes at PC: "); + for (i = 0; i < 5; i++) { + unsigned int c; + if (get_user(c, (unsigned int *)pc+i)) + printk(KERN_CONT "???????? "); + else + printk(KERN_CONT "%08x ", c); + } + printk("\n"); +} +#endif /* * This routine handles page faults. It determines the address, @@ -131,8 +249,29 @@ do_page_fault(unsigned long address, unsigned long mmcsr, good_area: si_code = SEGV_ACCERR; if (cause < 0) { - if (!(vma->vm_flags & VM_EXEC)) + if (!(vma->vm_flags & VM_EXEC)) { + +#ifdef CONFIG_PAX_PAGEEXEC + if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->pc) + goto bad_area; + + up_read(&mm->mmap_sem); + switch (pax_handle_fetch_fault(regs)) { + +#ifdef CONFIG_PAX_EMUPLT + case 2: + case 3: + return; +#endif + + } + pax_report_fault(regs, (void *)regs->pc, (void *)rdusp()); + do_group_exit(SIGKILL); +#else goto bad_area; +#endif + + } } else if (!cause) { /* Allow reads even for write-only mappings */ if (!(vma->vm_flags & (VM_READ | VM_WRITE))) diff --git a/arch/arm/include/asm/elf.h b/arch/arm/include/asm/elf.h index 51662fe..2939eb4 100644 --- a/arch/arm/include/asm/elf.h +++ b/arch/arm/include/asm/elf.h @@ -111,7 +111,14 @@ int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs); the loader. We need to make sure that it is out of the way of the program that it will "exec", and that there is sufficient room for the brk. */ -#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3) +#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2) + +#ifdef CONFIG_PAX_ASLR +#define PAX_ELF_ET_DYN_BASE 0x00008000UL + +#define PAX_DELTA_MMAP_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10) +#define PAX_DELTA_STACK_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10) +#endif /* When the program starts, a1 contains a pointer to a function to be registered with atexit, as per the SVR4 ABI. A value of 0 means we diff --git a/arch/arm/include/asm/kmap_types.h b/arch/arm/include/asm/kmap_types.h index e51b1e8..32a3113 100644 --- a/arch/arm/include/asm/kmap_types.h +++ b/arch/arm/include/asm/kmap_types.h @@ -21,6 +21,7 @@ enum km_type { KM_L1_CACHE, KM_L2_CACHE, KM_KDB, + KM_CLEARPAGE, KM_TYPE_NR }; diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h index 33e4a48..243db72 100644 --- a/arch/arm/include/asm/uaccess.h +++ b/arch/arm/include/asm/uaccess.h @@ -403,6 +403,9 @@ extern unsigned long __must_check __strnlen_user(const char __user *s, long n); static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n) { + if ((long)n < 0) + return n; + if (access_ok(VERIFY_READ, from, n)) n = __copy_from_user(to, from, n); else /* security hole - plug it */ @@ -412,6 +415,9 @@ static inline unsigned long __must_check copy_from_user(void *to, const void __u static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n) { + if ((long)n < 0) + return n; + if (access_ok(VERIFY_WRITE, to, n)) n = __copy_to_user(to, from, n); return n; diff --git a/arch/arm/kernel/kgdb.c b/arch/arm/kernel/kgdb.c index c868a88..7c22c9f 100644 --- a/arch/arm/kernel/kgdb.c +++ b/arch/arm/kernel/kgdb.c @@ -208,7 +208,7 @@ void kgdb_arch_exit(void) * and we handle the normal undef case within the do_undefinstr * handler. */ -struct kgdb_arch arch_kgdb_ops = { +const struct kgdb_arch arch_kgdb_ops = { #ifndef __ARMEB__ .gdb_bpt_instr = {0xfe, 0xde, 0xff, 0xe7} #else /* ! __ARMEB__ */ diff --git a/arch/arm/mach-at91/pm.c b/arch/arm/mach-at91/pm.c index 6156689..fca5b29 100644 --- a/arch/arm/mach-at91/pm.c +++ b/arch/arm/mach-at91/pm.c @@ -294,7 +294,7 @@ static void at91_pm_end(void) } -static struct platform_suspend_ops at91_pm_ops ={ +static const struct platform_suspend_ops at91_pm_ops ={ .valid = at91_pm_valid_state, .begin = at91_pm_begin, .enter = at91_pm_enter, diff --git a/arch/arm/mach-davinci/pm.c b/arch/arm/mach-davinci/pm.c index fab953b..1bd73a0 100644 --- a/arch/arm/mach-davinci/pm.c +++ b/arch/arm/mach-davinci/pm.c @@ -110,7 +110,7 @@ static int davinci_pm_enter(suspend_state_t state) return ret; } -static struct platform_suspend_ops davinci_pm_ops = { +static const struct platform_suspend_ops davinci_pm_ops = { .enter = davinci_pm_enter, .valid = suspend_valid_only_mem, }; diff --git a/arch/arm/mach-msm/last_radio_log.c b/arch/arm/mach-msm/last_radio_log.c index b64ba5a..fb24fa6 100644 --- a/arch/arm/mach-msm/last_radio_log.c +++ b/arch/arm/mach-msm/last_radio_log.c @@ -47,6 +47,7 @@ static ssize_t last_radio_log_read(struct file *file, char __user *buf, return count; } +/* cannot be const, see msm_init_last_radio_log */ static struct file_operations last_radio_log_fops = { .read = last_radio_log_read }; diff --git a/arch/arm/mach-omap1/pm.c b/arch/arm/mach-omap1/pm.c index b1d3f9f..6e126ce 100644 --- a/arch/arm/mach-omap1/pm.c +++ b/arch/arm/mach-omap1/pm.c @@ -647,7 +647,7 @@ static struct irqaction omap_wakeup_irq = { -static struct platform_suspend_ops omap_pm_ops ={ +static const struct platform_suspend_ops omap_pm_ops ={ .prepare = omap_pm_prepare, .enter = omap_pm_enter, .finish = omap_pm_finish, diff --git a/arch/arm/mach-omap2/pm24xx.c b/arch/arm/mach-omap2/pm24xx.c index e321281..57b0f5c 100644 --- a/arch/arm/mach-omap2/pm24xx.c +++ b/arch/arm/mach-omap2/pm24xx.c @@ -325,7 +325,7 @@ static void omap2_pm_finish(void) enable_hlt(); } -static struct platform_suspend_ops omap_pm_ops = { +static const struct platform_suspend_ops omap_pm_ops = { .prepare = omap2_pm_prepare, .enter = omap2_pm_enter, .finish = omap2_pm_finish, diff --git a/arch/arm/mach-omap2/pm34xx.c b/arch/arm/mach-omap2/pm34xx.c index b88737f..1514908 100644 --- a/arch/arm/mach-omap2/pm34xx.c +++ b/arch/arm/mach-omap2/pm34xx.c @@ -669,7 +669,7 @@ static void omap3_pm_end(void) return; } -static struct platform_suspend_ops omap_pm_ops = { +static const struct platform_suspend_ops omap_pm_ops = { .begin = omap3_pm_begin, .end = omap3_pm_end, .prepare = omap3_pm_prepare, diff --git a/arch/arm/mach-pnx4008/pm.c b/arch/arm/mach-pnx4008/pm.c index ee3c29c..f3e60a0 100644 --- a/arch/arm/mach-pnx4008/pm.c +++ b/arch/arm/mach-pnx4008/pm.c @@ -119,7 +119,7 @@ static int pnx4008_pm_valid(suspend_state_t state) (state == PM_SUSPEND_MEM); } -static struct platform_suspend_ops pnx4008_pm_ops = { +static const struct platform_suspend_ops pnx4008_pm_ops = { .enter = pnx4008_pm_enter, .valid = pnx4008_pm_valid, }; diff --git a/arch/arm/mach-pxa/pm.c b/arch/arm/mach-pxa/pm.c index 166c15f..978e1b2 100644 --- a/arch/arm/mach-pxa/pm.c +++ b/arch/arm/mach-pxa/pm.c @@ -96,7 +96,7 @@ void pxa_pm_finish(void) pxa_cpu_pm_fns->finish(); } -static struct platform_suspend_ops pxa_pm_ops = { +static const struct platform_suspend_ops pxa_pm_ops = { .valid = pxa_pm_valid, .enter = pxa_pm_enter, .prepare = pxa_pm_prepare, diff --git a/arch/arm/mach-pxa/sharpsl_pm.c b/arch/arm/mach-pxa/sharpsl_pm.c index cb47672..0a366cc 100644 --- a/arch/arm/mach-pxa/sharpsl_pm.c +++ b/arch/arm/mach-pxa/sharpsl_pm.c @@ -891,7 +891,7 @@ static void sharpsl_apm_get_power_status(struct apm_power_info *info) } #ifdef CONFIG_PM -static struct platform_suspend_ops sharpsl_pm_ops = { +static const struct platform_suspend_ops sharpsl_pm_ops = { .prepare = pxa_pm_prepare, .finish = pxa_pm_finish, .enter = corgi_pxa_pm_enter, diff --git a/arch/arm/mach-sa1100/pm.c b/arch/arm/mach-sa1100/pm.c index c83fdc8..ab9fc44 100644 --- a/arch/arm/mach-sa1100/pm.c +++ b/arch/arm/mach-sa1100/pm.c @@ -120,7 +120,7 @@ unsigned long sleep_phys_sp(void *sp) return virt_to_phys(sp); } -static struct platform_suspend_ops sa11x0_pm_ops = { +static const struct platform_suspend_ops sa11x0_pm_ops = { .enter = sa11x0_pm_enter, .valid = suspend_valid_only_mem, }; diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c index cbfb2ed..de319e0 100644 --- a/arch/arm/mm/fault.c +++ b/arch/arm/mm/fault.c @@ -167,6 +167,13 @@ __do_user_fault(struct task_struct *tsk, unsigned long addr, } #endif +#ifdef CONFIG_PAX_PAGEEXEC + if (fsr & FSR_LNX_PF) { + pax_report_fault(regs, (void *)regs->ARM_pc, (void *)regs->ARM_sp); + do_group_exit(SIGKILL); + } +#endif + tsk->thread.address = addr; tsk->thread.error_code = fsr; tsk->thread.trap_no = 14; @@ -364,6 +371,33 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs) } #endif /* CONFIG_MMU */ +#ifdef CONFIG_PAX_PAGEEXEC +void pax_report_insns(void *pc, void *sp) +{ + long i; + + printk(KERN_ERR "PAX: bytes at PC: "); + for (i = 0; i < 20; i++) { + unsigned char c; + if (get_user(c, (__force unsigned char __user *)pc+i)) + printk(KERN_CONT "?? "); + else + printk(KERN_CONT "%02x ", c); + } + printk("\n"); + + printk(KERN_ERR "PAX: bytes at SP-4: "); + for (i = -1; i < 20; i++) { + unsigned long c; + if (get_user(c, (__force unsigned long __user *)sp+i)) + printk(KERN_CONT "???????? "); + else + printk(KERN_CONT "%08lx ", c); + } + printk("\n"); +} +#endif + /* * First Level Translation Fault Handler * diff --git a/arch/arm/mm/mmap.c b/arch/arm/mm/mmap.c index f5abc51..7ec524c 100644 --- a/arch/arm/mm/mmap.c +++ b/arch/arm/mm/mmap.c @@ -63,6 +63,10 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr, if (len > TASK_SIZE) return -ENOMEM; +#ifdef CONFIG_PAX_RANDMMAP + if (!(mm->pax_flags & MF_PAX_RANDMMAP)) +#endif + if (addr) { if (do_align) addr = COLOUR_ALIGN(addr, pgoff); @@ -70,15 +74,14 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr, addr = PAGE_ALIGN(addr); vma = find_vma(mm, addr); - if (TASK_SIZE - len >= addr && - (!vma || addr + len <= vma->vm_start)) + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len)) return addr; } if (len > mm->cached_hole_size) { - start_addr = addr = mm->free_area_cache; + start_addr = addr = mm->free_area_cache; } else { - start_addr = addr = TASK_UNMAPPED_BASE; - mm->cached_hole_size = 0; + start_addr = addr = mm->mmap_base; + mm->cached_hole_size = 0; } full_search: @@ -94,14 +97,14 @@ full_search: * Start a new search - just in case we missed * some holes. */ - if (start_addr != TASK_UNMAPPED_BASE) { - start_addr = addr = TASK_UNMAPPED_BASE; + if (start_addr != mm->mmap_base) { + start_addr = addr = mm->mmap_base; mm->cached_hole_size = 0; goto full_search; } return -ENOMEM; } - if (!vma || addr + len <= vma->vm_start) { + if (check_heap_stack_gap(vma, addr, len)) { /* * Remember the place where we stopped the search: */ diff --git a/arch/arm/plat-samsung/pm.c b/arch/arm/plat-samsung/pm.c index 27cfca5..5bf3f2f 100644 --- a/arch/arm/plat-samsung/pm.c +++ b/arch/arm/plat-samsung/pm.c @@ -355,7 +355,7 @@ static void s3c_pm_finish(void) s3c_pm_check_cleanup(); } -static struct platform_suspend_ops s3c_pm_ops = { +static const struct platform_suspend_ops s3c_pm_ops = { .enter = s3c_pm_enter, .prepare = s3c_pm_prepare, .finish = s3c_pm_finish, diff --git a/arch/avr32/include/asm/elf.h b/arch/avr32/include/asm/elf.h index 3b3159b..425ea94 100644 --- a/arch/avr32/include/asm/elf.h +++ b/arch/avr32/include/asm/elf.h @@ -84,8 +84,14 @@ typedef struct user_fpu_struct elf_fpregset_t; the loader. We need to make sure that it is out of the way of the program that it will "exec", and that there is sufficient room for the brk. */ -#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3) +#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2) +#ifdef CONFIG_PAX_ASLR +#define PAX_ELF_ET_DYN_BASE 0x00001000UL + +#define PAX_DELTA_MMAP_LEN 15 +#define PAX_DELTA_STACK_LEN 15 +#endif /* This yields a mask that user programs can use to figure out what instruction set this CPU supports. This could be done in user space, diff --git a/arch/avr32/include/asm/kmap_types.h b/arch/avr32/include/asm/kmap_types.h index b7f5c68..556135c 100644 --- a/arch/avr32/include/asm/kmap_types.h +++ b/arch/avr32/include/asm/kmap_types.h @@ -22,7 +22,8 @@ D(10) KM_IRQ0, D(11) KM_IRQ1, D(12) KM_SOFTIRQ0, D(13) KM_SOFTIRQ1, -D(14) KM_TYPE_NR +D(14) KM_CLEARPAGE, +D(15) KM_TYPE_NR }; #undef D diff --git a/arch/avr32/mach-at32ap/pm.c b/arch/avr32/mach-at32ap/pm.c index f021edf..32d680e 100644 --- a/arch/avr32/mach-at32ap/pm.c +++ b/arch/avr32/mach-at32ap/pm.c @@ -176,7 +176,7 @@ out: return 0; } -static struct platform_suspend_ops avr32_pm_ops = { +static const struct platform_suspend_ops avr32_pm_ops = { .valid = avr32_pm_valid_state, .enter = avr32_pm_enter, }; diff --git a/arch/avr32/mm/fault.c b/arch/avr32/mm/fault.c index b61d86d..b7cf88f 100644 --- a/arch/avr32/mm/fault.c +++ b/arch/avr32/mm/fault.c @@ -41,6 +41,23 @@ static inline int notify_page_fault(struct pt_regs *regs, int trap) int exception_trace = 1; +#ifdef CONFIG_PAX_PAGEEXEC +void pax_report_insns(void *pc, void *sp) +{ + unsigned long i; + + printk(KERN_ERR "PAX: bytes at PC: "); + for (i = 0; i < 20; i++) { + unsigned char c; + if (get_user(c, (unsigned char *)pc+i)) + printk(KERN_CONT "???????? "); + else + printk(KERN_CONT "%02x ", c); + } + printk("\n"); +} +#endif + /* * This routine handles page faults. It determines the address and the * problem, and then passes it off to one of the appropriate routines. @@ -157,6 +174,16 @@ bad_area: up_read(&mm->mmap_sem); if (user_mode(regs)) { + +#ifdef CONFIG_PAX_PAGEEXEC + if (mm->pax_flags & MF_PAX_PAGEEXEC) { + if (ecr == ECR_PROTECTION_X || ecr == ECR_TLB_MISS_X) { + pax_report_fault(regs, (void *)regs->pc, (void *)regs->sp); + do_group_exit(SIGKILL); + } + } +#endif + if (exception_trace && printk_ratelimit()) printk("%s%s[%d]: segfault at %08lx pc %08lx " "sp %08lx ecr %lu\n", diff --git a/arch/blackfin/kernel/kgdb.c b/arch/blackfin/kernel/kgdb.c index 08bc44e..a6566ab 100644 --- a/arch/blackfin/kernel/kgdb.c +++ b/arch/blackfin/kernel/kgdb.c @@ -397,7 +397,7 @@ int kgdb_arch_handle_exception(int vector, int signo, return -1; /* this means that we do not want to exit from the handler */ } -struct kgdb_arch arch_kgdb_ops = { +const struct kgdb_arch arch_kgdb_ops = { .gdb_bpt_instr = {0xa1}, #ifdef CONFIG_SMP .flags = KGDB_HW_BREAKPOINT|KGDB_THR_PROC_SWAP, diff --git a/arch/blackfin/mach-common/pm.c b/arch/blackfin/mach-common/pm.c index ea7f95f..a1b1ba5 100644 --- a/arch/blackfin/mach-common/pm.c +++ b/arch/blackfin/mach-common/pm.c @@ -232,7 +232,7 @@ static int bfin_pm_enter(suspend_state_t state) return 0; } -struct platform_suspend_ops bfin_pm_ops = { +const struct platform_suspend_ops bfin_pm_ops = { .enter = bfin_pm_enter, .valid = bfin_pm_valid, }; diff --git a/arch/blackfin/mm/maccess.c b/arch/blackfin/mm/maccess.c index b71cebc..e253211 100644 --- a/arch/blackfin/mm/maccess.c +++ b/arch/blackfin/mm/maccess.c @@ -16,7 +16,7 @@ static int validate_memory_access_address(unsigned long addr, int size) return bfin_mem_access_type(addr, size); } -long probe_kernel_read(void *dst, void *src, size_t size) +long probe_kernel_read(void *dst, const void *src, size_t size) { unsigned long lsrc = (unsigned long)src; int mem_type; @@ -55,7 +55,7 @@ long probe_kernel_read(void *dst, void *src, size_t size) return -EFAULT; } -long probe_kernel_write(void *dst, void *src, size_t size) +long probe_kernel_write(void *dst, const void *src, size_t size) { unsigned long ldst = (unsigned long)dst; int mem_type; diff --git a/arch/frv/include/asm/kmap_types.h b/arch/frv/include/asm/kmap_types.h index f8e16b2..c73ff79 100644 --- a/arch/frv/include/asm/kmap_types.h +++ b/arch/frv/include/asm/kmap_types.h @@ -23,6 +23,7 @@ enum km_type { KM_IRQ1, KM_SOFTIRQ0, KM_SOFTIRQ1, + KM_CLEARPAGE, KM_TYPE_NR }; diff --git a/arch/frv/mm/elf-fdpic.c b/arch/frv/mm/elf-fdpic.c index 385fd30..6c3d97e 100644 --- a/arch/frv/mm/elf-fdpic.c +++ b/arch/frv/mm/elf-fdpic.c @@ -73,8 +73,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi if (addr) { addr = PAGE_ALIGN(addr); vma = find_vma(current->mm, addr); - if (TASK_SIZE - len >= addr && - (!vma || addr + len <= vma->vm_start)) + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len)) goto success; } @@ -89,7 +88,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi for (; vma; vma = vma->vm_next) { if (addr > limit) break; - if (addr + len <= vma->vm_start) + if (check_heap_stack_gap(vma, addr, len)) goto success; addr = vma->vm_end; } @@ -104,7 +103,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi for (; vma; vma = vma->vm_next) { if (addr > limit) break; - if (addr + len <= vma->vm_start) + if (check_heap_stack_gap(vma, addr, len)) goto success; addr = vma->vm_end; } diff --git a/arch/ia64/hp/common/hwsw_iommu.c b/arch/ia64/hp/common/hwsw_iommu.c index e4a80d8..11a7ea1 100644 --- a/arch/ia64/hp/common/hwsw_iommu.c +++ b/arch/ia64/hp/common/hwsw_iommu.c @@ -17,7 +17,7 @@ #include #include -extern struct dma_map_ops sba_dma_ops, swiotlb_dma_ops; +extern const struct dma_map_ops sba_dma_ops, swiotlb_dma_ops; /* swiotlb declarations & definitions: */ extern int swiotlb_late_init_with_default_size (size_t size); @@ -33,7 +33,7 @@ static inline int use_swiotlb(struct device *dev) !sba_dma_ops.dma_supported(dev, *dev->dma_mask); } -struct dma_map_ops *hwsw_dma_get_ops(struct device *dev) +const struct dma_map_ops *hwsw_dma_get_ops(struct device *dev) { if (use_swiotlb(dev)) return &swiotlb_dma_ops; diff --git a/arch/ia64/hp/common/sba_iommu.c b/arch/ia64/hp/common/sba_iommu.c index 4ce8d13..62d8118 100644 --- a/arch/ia64/hp/common/sba_iommu.c +++ b/arch/ia64/hp/common/sba_iommu.c @@ -2097,7 +2097,7 @@ static struct acpi_driver acpi_sba_ioc_driver = { }, }; -extern struct dma_map_ops swiotlb_dma_ops; +extern const struct dma_map_ops swiotlb_dma_ops; static int __init sba_init(void) @@ -2211,7 +2211,7 @@ sba_page_override(char *str) __setup("sbapagesize=",sba_page_override); -struct dma_map_ops sba_dma_ops = { +const struct dma_map_ops sba_dma_ops = { .alloc_coherent = sba_alloc_coherent, .free_coherent = sba_free_coherent, .map_page = sba_map_page, diff --git a/arch/ia64/include/asm/dma-mapping.h b/arch/ia64/include/asm/dma-mapping.h index 7d09a09..a74b3b0 100644 --- a/arch/ia64/include/asm/dma-mapping.h +++ b/arch/ia64/include/asm/dma-mapping.h @@ -12,7 +12,7 @@ #define ARCH_HAS_DMA_GET_REQUIRED_MASK -extern struct dma_map_ops *dma_ops; +extern const struct dma_map_ops *dma_ops; extern struct ia64_machine_vector ia64_mv; extern void set_iommu_machvec(void); @@ -24,7 +24,7 @@ extern void machvec_dma_sync_sg(struct device *, struct scatterlist *, int, static inline void *dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *daddr, gfp_t gfp) { - struct dma_map_ops *ops = platform_dma_get_ops(dev); + const struct dma_map_ops *ops = platform_dma_get_ops(dev); void *caddr; caddr = ops->alloc_coherent(dev, size, daddr, gfp); @@ -35,7 +35,7 @@ static inline void *dma_alloc_coherent(struct device *dev, size_t size, static inline void dma_free_coherent(struct device *dev, size_t size, void *caddr, dma_addr_t daddr) { - struct dma_map_ops *ops = platform_dma_get_ops(dev); + const struct dma_map_ops *ops = platform_dma_get_ops(dev); debug_dma_free_coherent(dev, size, caddr, daddr); ops->free_coherent(dev, size, caddr, daddr); } @@ -49,13 +49,13 @@ static inline void dma_free_coherent(struct device *dev, size_t size, static inline int dma_mapping_error(struct device *dev, dma_addr_t daddr) { - struct dma_map_ops *ops = platform_dma_get_ops(dev); + const struct dma_map_ops *ops = platform_dma_get_ops(dev); return ops->mapping_error(dev, daddr); } static inline int dma_supported(struct device *dev, u64 mask) { - struct dma_map_ops *ops = platform_dma_get_ops(dev); + const struct dma_map_ops *ops = platform_dma_get_ops(dev); return ops->dma_supported(dev, mask); } diff --git a/arch/ia64/include/asm/elf.h b/arch/ia64/include/asm/elf.h index b5298eb..67c6e62 100644 --- a/arch/ia64/include/asm/elf.h +++ b/arch/ia64/include/asm/elf.h @@ -42,6 +42,13 @@ */ #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x800000000UL) +#ifdef CONFIG_PAX_ASLR +#define PAX_ELF_ET_DYN_BASE (current->personality == PER_LINUX32 ? 0x08048000UL : 0x4000000000000000UL) + +#define PAX_DELTA_MMAP_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13) +#define PAX_DELTA_STACK_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13) +#endif + #define PT_IA_64_UNWIND 0x70000001 /* IA-64 relocations: */ diff --git a/arch/ia64/include/asm/machvec.h b/arch/ia64/include/asm/machvec.h index 367d299..9ad4279 100644 --- a/arch/ia64/include/asm/machvec.h +++ b/arch/ia64/include/asm/machvec.h @@ -45,7 +45,7 @@ typedef void ia64_mv_kernel_launch_event_t(void); /* DMA-mapping interface: */ typedef void ia64_mv_dma_init (void); typedef u64 ia64_mv_dma_get_required_mask (struct device *); -typedef struct dma_map_ops *ia64_mv_dma_get_ops(struct device *); +typedef const struct dma_map_ops *ia64_mv_dma_get_ops(struct device *); /* * WARNING: The legacy I/O space is _architected_. Platforms are @@ -251,7 +251,7 @@ extern void machvec_init_from_cmdline(const char *cmdline); # endif /* CONFIG_IA64_GENERIC */ extern void swiotlb_dma_init(void); -extern struct dma_map_ops *dma_get_ops(struct device *); +extern const struct dma_map_ops *dma_get_ops(struct device *); /* * Define default versions so we can extend machvec for new platforms without having diff --git a/arch/ia64/include/asm/pgtable.h b/arch/ia64/include/asm/pgtable.h index c3286f4..ed33359 100644 --- a/arch/ia64/include/asm/pgtable.h +++ b/arch/ia64/include/asm/pgtable.h @@ -12,7 +12,7 @@ * David Mosberger-Tang */ - +#include #include #include #include @@ -143,6 +143,17 @@ #define PAGE_READONLY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R) #define PAGE_COPY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R) #define PAGE_COPY_EXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX) + +#ifdef CONFIG_PAX_PAGEEXEC +# define PAGE_SHARED_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RW) +# define PAGE_READONLY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R) +# define PAGE_COPY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R) +#else +# define PAGE_SHARED_NOEXEC PAGE_SHARED +# define PAGE_READONLY_NOEXEC PAGE_READONLY +# define PAGE_COPY_NOEXEC PAGE_COPY +#endif + #define PAGE_GATE __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_X_RX) #define PAGE_KERNEL __pgprot(__DIRTY_BITS | _PAGE_PL_0 | _PAGE_AR_RWX) #define PAGE_KERNELRX __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_RX) diff --git a/arch/ia64/include/asm/uaccess.h b/arch/ia64/include/asm/uaccess.h index 449c8c0..432a3d2 100644 --- a/arch/ia64/include/asm/uaccess.h +++ b/arch/ia64/include/asm/uaccess.h @@ -257,7 +257,7 @@ __copy_from_user (void *to, const void __user *from, unsigned long count) const void *__cu_from = (from); \ long __cu_len = (n); \ \ - if (__access_ok(__cu_to, __cu_len, get_fs())) \ + if (__cu_len > 0 && __cu_len <= INT_MAX && __access_ok(__cu_to, __cu_len, get_fs())) \ __cu_len = __copy_user(__cu_to, (__force void __user *) __cu_from, __cu_len); \ __cu_len; \ }) @@ -269,7 +269,7 @@ __copy_from_user (void *to, const void __user *from, unsigned long count) long __cu_len = (n); \ \ __chk_user_ptr(__cu_from); \ - if (__access_ok(__cu_from, __cu_len, get_fs())) \ + if (__cu_len > 0 && __cu_len <= INT_MAX && __access_ok(__cu_from, __cu_len, get_fs())) \ __cu_len = __copy_user((__force void __user *) __cu_to, __cu_from, __cu_len); \ __cu_len; \ }) diff --git a/arch/ia64/kernel/dma-mapping.c b/arch/ia64/kernel/dma-mapping.c index f2c1600..969398a 100644 --- a/arch/ia64/kernel/dma-mapping.c +++ b/arch/ia64/kernel/dma-mapping.c @@ -3,7 +3,7 @@ /* Set this to 1 if there is a HW IOMMU in the system */ int iommu_detected __read_mostly; -struct dma_map_ops *dma_ops; +const struct dma_map_ops *dma_ops; EXPORT_SYMBOL(dma_ops); #define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16) @@ -16,7 +16,7 @@ static int __init dma_init(void) } fs_initcall(dma_init); -struct dma_map_ops *dma_get_ops(struct device *dev) +const struct dma_map_ops *dma_get_ops(struct device *dev) { return dma_ops; } diff --git a/arch/ia64/kernel/module.c b/arch/ia64/kernel/module.c index 1481b0a..e7d38ff 100644 --- a/arch/ia64/kernel/module.c +++ b/arch/ia64/kernel/module.c @@ -315,8 +315,7 @@ module_alloc (unsigned long size) void module_free (struct module *mod, void *module_region) { - if (mod && mod->arch.init_unw_table && - module_region == mod->module_init) { + if (mod && mod->arch.init_unw_table && module_region == mod->module_init_rx) { unw_remove_unwind_table(mod->arch.init_unw_table); mod->arch.init_unw_table = NULL; } @@ -502,15 +501,39 @@ module_frob_arch_sections (Elf_Ehdr *ehdr, Elf_Shdr *sechdrs, char *secstrings, } static inline int +in_init_rx (const struct module *mod, uint64_t addr) +{ + return addr - (uint64_t) mod->module_init_rx < mod->init_size_rx; +} + +static inline int +in_init_rw (const struct module *mod, uint64_t addr) +{ + return addr - (uint64_t) mod->module_init_rw < mod->init_size_rw; +} + +static inline int in_init (const struct module *mod, uint64_t addr) { - return addr - (uint64_t) mod->module_init < mod->init_size; + return in_init_rx(mod, addr) || in_init_rw(mod, addr); +} + +static inline int +in_core_rx (const struct module *mod, uint64_t addr) +{ + return addr - (uint64_t) mod->module_core_rx < mod->core_size_rx; +} + +static inline int +in_core_rw (const struct module *mod, uint64_t addr) +{ + return addr - (uint64_t) mod->module_core_rw < mod->core_size_rw; } static inline int in_core (const struct module *mod, uint64_t addr) { - return addr - (uint64_t) mod->module_core < mod->core_size; + return in_core_rx(mod, addr) || in_core_rw(mod, addr); } static inline int @@ -693,7 +716,14 @@ do_reloc (struct module *mod, uint8_t r_type, Elf64_Sym *sym, uint64_t addend, break; case RV_BDREL: - val -= (uint64_t) (in_init(mod, val) ? mod->module_init : mod->module_core); + if (in_init_rx(mod, val)) + val -= (uint64_t) mod->module_init_rx; + else if (in_init_rw(mod, val)) + val -= (uint64_t) mod->module_init_rw; + else if (in_core_rx(mod, val)) + val -= (uint64_t) mod->module_core_rx; + else if (in_core_rw(mod, val)) + val -= (uint64_t) mod->module_core_rw; break; case RV_LTV: @@ -828,15 +858,15 @@ apply_relocate_add (Elf64_Shdr *sechdrs, const char *strtab, unsigned int symind * addresses have been selected... */ uint64_t gp; - if (mod->core_size > MAX_LTOFF) + if (mod->core_size_rx + mod->core_size_rw > MAX_LTOFF) /* * This takes advantage of fact that SHF_ARCH_SMALL gets allocated * at the end of the module. */ - gp = mod->core_size - MAX_LTOFF / 2; + gp = mod->core_size_rx + mod->core_size_rw - MAX_LTOFF / 2; else - gp = mod->core_size / 2; - gp = (uint64_t) mod->module_core + ((gp + 7) & -8); + gp = (mod->core_size_rx + mod->core_size_rw) / 2; + gp = (uint64_t) mod->module_core_rx + ((gp + 7) & -8); mod->arch.gp = gp; DEBUGP("%s: placing gp at 0x%lx\n", __func__, gp); } diff --git a/arch/ia64/kernel/pci-dma.c b/arch/ia64/kernel/pci-dma.c index f6b1ff0..ccacc2f 100644 --- a/arch/ia64/kernel/pci-dma.c +++ b/arch/ia64/kernel/pci-dma.c @@ -43,7 +43,7 @@ struct device fallback_dev = { .dma_mask = &fallback_dev.coherent_dma_mask, }; -extern struct dma_map_ops intel_dma_ops; +extern const struct dma_map_ops intel_dma_ops; static int __init pci_iommu_init(void) { diff --git a/arch/ia64/kernel/pci-swiotlb.c b/arch/ia64/kernel/pci-swiotlb.c index d9485d9..e3deb12 100644 --- a/arch/ia64/kernel/pci-swiotlb.c +++ b/arch/ia64/kernel/pci-swiotlb.c @@ -22,7 +22,7 @@ static void *ia64_swiotlb_alloc_coherent(struct device *dev, size_t size, return swiotlb_alloc_coherent(dev, size, dma_handle, gfp); } -struct dma_map_ops swiotlb_dma_ops = { +const struct dma_map_ops swiotlb_dma_ops = { .alloc_coherent = ia64_swiotlb_alloc_coherent, .free_coherent = swiotlb_free_coherent, .map_page = swiotlb_map_page, diff --git a/arch/ia64/kernel/sys_ia64.c b/arch/ia64/kernel/sys_ia64.c index 609d500..7dde2a8 100644 --- a/arch/ia64/kernel/sys_ia64.c +++ b/arch/ia64/kernel/sys_ia64.c @@ -43,6 +43,13 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len if (REGION_NUMBER(addr) == RGN_HPAGE) addr = 0; #endif + +#ifdef CONFIG_PAX_RANDMMAP + if (mm->pax_flags & MF_PAX_RANDMMAP) + addr = mm->free_area_cache; + else +#endif + if (!addr) addr = mm->free_area_cache; @@ -61,14 +68,14 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len for (vma = find_vma(mm, addr); ; vma = vma->vm_next) { /* At this point: (!vma || addr < vma->vm_end). */ if (TASK_SIZE - len < addr || RGN_MAP_LIMIT - len < REGION_OFFSET(addr)) { - if (start_addr != TASK_UNMAPPED_BASE) { + if (start_addr != mm->mmap_base) { /* Start a new search --- just in case we missed some holes. */ - addr = TASK_UNMAPPED_BASE; + addr = mm->mmap_base; goto full_search; } return -ENOMEM; } - if (!vma || addr + len <= vma->vm_start) { + if (check_heap_stack_gap(vma, addr, len)) { /* Remember the address where we stopped this search: */ mm->free_area_cache = addr + len; return addr; diff --git a/arch/ia64/kernel/vmlinux.lds.S b/arch/ia64/kernel/vmlinux.lds.S index e07218a..fe87c0f 100644 --- a/arch/ia64/kernel/vmlinux.lds.S +++ b/arch/ia64/kernel/vmlinux.lds.S @@ -196,7 +196,7 @@ SECTIONS /* Per-cpu data: */ . = ALIGN(PERCPU_PAGE_SIZE); PERCPU_VADDR(PERCPU_ADDR, :percpu) - __phys_per_cpu_start = __per_cpu_load; + __phys_per_cpu_start = per_cpu_load; . = __phys_per_cpu_start + PERCPU_PAGE_SIZE; /* ensure percpu data fits * into percpu page size */ diff --git a/arch/ia64/mm/fault.c b/arch/ia64/mm/fault.c index 0799fea..4879544 100644 --- a/arch/ia64/mm/fault.c +++ b/arch/ia64/mm/fault.c @@ -72,6 +72,23 @@ mapped_kernel_page_is_present (unsigned long address) return pte_present(pte); } +#ifdef CONFIG_PAX_PAGEEXEC +void pax_report_insns(void *pc, void *sp) +{ + unsigned long i; + + printk(KERN_ERR "PAX: bytes at PC: "); + for (i = 0; i < 8; i++) { + unsigned int c; + if (get_user(c, (unsigned int *)pc+i)) + printk(KERN_CONT "???????? "); + else + printk(KERN_CONT "%08x ", c); + } + printk("\n"); +} +#endif + void __kprobes ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *regs) { @@ -145,9 +162,23 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re mask = ( (((isr >> IA64_ISR_X_BIT) & 1UL) << VM_EXEC_BIT) | (((isr >> IA64_ISR_W_BIT) & 1UL) << VM_WRITE_BIT)); - if ((vma->vm_flags & mask) != mask) + if ((vma->vm_flags & mask) != mask) { + +#ifdef CONFIG_PAX_PAGEEXEC + if (!(vma->vm_flags & VM_EXEC) && (mask & VM_EXEC)) { + if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->cr_iip) + goto bad_area; + + up_read(&mm->mmap_sem); + pax_report_fault(regs, (void *)regs->cr_iip, (void *)regs->r12); + do_group_exit(SIGKILL); + } +#endif + goto bad_area; + } + /* * If for any reason at all we couldn't handle the fault, make * sure we exit gracefully rather than endlessly redo the diff --git a/arch/ia64/mm/hugetlbpage.c b/arch/ia64/mm/hugetlbpage.c index 1841ee7..3d78dd6 100644 --- a/arch/ia64/mm/hugetlbpage.c +++ b/arch/ia64/mm/hugetlbpage.c @@ -171,7 +171,7 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, u /* At this point: (!vmm || addr < vmm->vm_end). */ if (REGION_OFFSET(addr) + len > RGN_MAP_LIMIT) return -ENOMEM; - if (!vmm || (addr + len) <= vmm->vm_start) + if (check_heap_stack_gap(vmm, addr, len)) return addr; addr = ALIGN(vmm->vm_end, HPAGE_SIZE); } diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c index ed41759..fcaf88a 100644 --- a/arch/ia64/mm/init.c +++ b/arch/ia64/mm/init.c @@ -122,6 +122,19 @@ ia64_init_addr_space (void) vma->vm_start = current->thread.rbs_bot & PAGE_MASK; vma->vm_end = vma->vm_start + PAGE_SIZE; vma->vm_flags = VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT; + +#ifdef CONFIG_PAX_PAGEEXEC + if (current->mm->pax_flags & MF_PAX_PAGEEXEC) { + vma->vm_flags &= ~VM_EXEC; + +#ifdef CONFIG_PAX_MPROTECT + if (current->mm->pax_flags & MF_PAX_MPROTECT) + vma->vm_flags &= ~VM_MAYEXEC; +#endif + + } +#endif + vma->vm_page_prot = vm_get_page_prot(vma->vm_flags); down_write(¤t->mm->mmap_sem); if (insert_vm_struct(current->mm, vma)) { diff --git a/arch/ia64/sn/pci/pci_dma.c b/arch/ia64/sn/pci/pci_dma.c index a9d310d..447c7cc 100644 --- a/arch/ia64/sn/pci/pci_dma.c +++ b/arch/ia64/sn/pci/pci_dma.c @@ -465,7 +465,7 @@ int sn_pci_legacy_write(struct pci_bus *bus, u16 port, u32 val, u8 size) return ret; } -static struct dma_map_ops sn_dma_ops = { +static const struct dma_map_ops sn_dma_ops = { .alloc_coherent = sn_dma_alloc_coherent, .free_coherent = sn_dma_free_coherent, .map_page = sn_dma_map_page, diff --git a/arch/m32r/lib/usercopy.c b/arch/m32r/lib/usercopy.c index 82abd15..d95ae5d 100644 --- a/arch/m32r/lib/usercopy.c +++ b/arch/m32r/lib/usercopy.c @@ -14,6 +14,9 @@ unsigned long __generic_copy_to_user(void __user *to, const void *from, unsigned long n) { + if ((long)n < 0) + return n; + prefetch(from); if (access_ok(VERIFY_WRITE, to, n)) __copy_user(to,from,n); @@ -23,6 +26,9 @@ __generic_copy_to_user(void __user *to, const void *from, unsigned long n) unsigned long __generic_copy_from_user(void *to, const void __user *from, unsigned long n) { + if ((long)n < 0) + return n; + prefetchw(to); if (access_ok(VERIFY_READ, from, n)) __copy_user_zeroing(to,from,n); diff --git a/arch/microblaze/include/asm/device.h b/arch/microblaze/include/asm/device.h index 123b2fe..f8926eb 100644 --- a/arch/microblaze/include/asm/device.h +++ b/arch/microblaze/include/asm/device.h @@ -13,7 +13,7 @@ struct device_node; struct dev_archdata { /* DMA operations on that device */ - struct dma_map_ops *dma_ops; + const struct dma_map_ops *dma_ops; void *dma_data; }; diff --git a/arch/microblaze/include/asm/dma-mapping.h b/arch/microblaze/include/asm/dma-mapping.h index 18b3731..714c068 100644 --- a/arch/microblaze/include/asm/dma-mapping.h +++ b/arch/microblaze/include/asm/dma-mapping.h @@ -43,14 +43,14 @@ static inline unsigned long device_to_mask(struct device *dev) return 0xfffffffful; } -extern struct dma_map_ops *dma_ops; +extern const struct dma_map_ops *dma_ops; /* * Available generic sets of operations */ -extern struct dma_map_ops dma_direct_ops; +extern const struct dma_map_ops dma_direct_ops; -static inline struct dma_map_ops *get_dma_ops(struct device *dev) +static inline const struct dma_map_ops *get_dma_ops(struct device *dev) { /* We don't handle the NULL dev case for ISA for now. We could * do it via an out of line call but it is not needed for now. The @@ -63,14 +63,14 @@ static inline struct dma_map_ops *get_dma_ops(struct device *dev) return dev->archdata.dma_ops; } -static inline void set_dma_ops(struct device *dev, struct dma_map_ops *ops) +static inline void set_dma_ops(struct device *dev, const struct dma_map_ops *ops) { dev->archdata.dma_ops = ops; } static inline int dma_supported(struct device *dev, u64 mask) { - struct dma_map_ops *ops = get_dma_ops(dev); + const struct dma_map_ops *ops = get_dma_ops(dev); if (unlikely(!ops)) return 0; @@ -87,7 +87,7 @@ static inline int dma_supported(struct device *dev, u64 mask) static inline int dma_set_mask(struct device *dev, u64 dma_mask) { - struct dma_map_ops *ops = get_dma_ops(dev); + const struct dma_map_ops *ops = get_dma_ops(dev); if (unlikely(ops == NULL)) return -EIO; @@ -103,7 +103,7 @@ static inline int dma_set_mask(struct device *dev, u64 dma_mask) static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) { - struct dma_map_ops *ops = get_dma_ops(dev); + const struct dma_map_ops *ops = get_dma_ops(dev); if (ops->mapping_error) return ops->mapping_error(dev, dma_addr); @@ -117,7 +117,7 @@ static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) static inline void *dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flag) { - struct dma_map_ops *ops = get_dma_ops(dev); + const struct dma_map_ops *ops = get_dma_ops(dev); void *memory; BUG_ON(!ops); @@ -131,7 +131,7 @@ static inline void *dma_alloc_coherent(struct device *dev, size_t size, static inline void dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, dma_addr_t dma_handle) { - struct dma_map_ops *ops = get_dma_ops(dev); + const struct dma_map_ops *ops = get_dma_ops(dev); BUG_ON(!ops); debug_dma_free_coherent(dev, size, cpu_addr, dma_handle); diff --git a/arch/microblaze/include/asm/pci.h b/arch/microblaze/include/asm/pci.h index 5a388ee..446d58f 100644 --- a/arch/microblaze/include/asm/pci.h +++ b/arch/microblaze/include/asm/pci.h @@ -54,8 +54,8 @@ static inline void pcibios_penalize_isa_irq(int irq, int active) } #ifdef CONFIG_PCI -extern void set_pci_dma_ops(struct dma_map_ops *dma_ops); -extern struct dma_map_ops *get_pci_dma_ops(void); +extern void set_pci_dma_ops(const struct dma_map_ops *dma_ops); +extern const struct dma_map_ops *get_pci_dma_ops(void); #else /* CONFIG_PCI */ #define set_pci_dma_ops(d) #define get_pci_dma_ops() NULL diff --git a/arch/microblaze/kernel/dma.c b/arch/microblaze/kernel/dma.c index 79c7465..95a4dbe 100644 --- a/arch/microblaze/kernel/dma.c +++ b/arch/microblaze/kernel/dma.c @@ -133,7 +133,7 @@ static inline void dma_direct_unmap_page(struct device *dev, __dma_sync_page(dma_address, 0 , size, direction); } -struct dma_map_ops dma_direct_ops = { +const struct dma_map_ops dma_direct_ops = { .alloc_coherent = dma_direct_alloc_coherent, .free_coherent = dma_direct_free_coherent, .map_sg = dma_direct_map_sg, diff --git a/arch/microblaze/pci/pci-common.c b/arch/microblaze/pci/pci-common.c index 23be25f..d32c14f 100644 --- a/arch/microblaze/pci/pci-common.c +++ b/arch/microblaze/pci/pci-common.c @@ -46,14 +46,14 @@ resource_size_t isa_mem_base; /* Default PCI flags is 0 on ppc32, modified at boot on ppc64 */ unsigned int pci_flags; -static struct dma_map_ops *pci_dma_ops = &dma_direct_ops; +static const struct dma_map_ops *pci_dma_ops = &dma_direct_ops; -void set_pci_dma_ops(struct dma_map_ops *dma_ops) +void set_pci_dma_ops(const struct dma_map_ops *dma_ops) { pci_dma_ops = dma_ops; } -struct dma_map_ops *get_pci_dma_ops(void) +const struct dma_map_ops *get_pci_dma_ops(void) { return pci_dma_ops; } diff --git a/arch/mips/alchemy/devboards/pm.c b/arch/mips/alchemy/devboards/pm.c index 4bbd313..acaf91b 100644 --- a/arch/mips/alchemy/devboards/pm.c +++ b/arch/mips/alchemy/devboards/pm.c @@ -110,7 +110,7 @@ static void db1x_pm_end(void) } -static struct platform_suspend_ops db1x_pm_ops = { +static const struct platform_suspend_ops db1x_pm_ops = { .valid = suspend_valid_only_mem, .begin = db1x_pm_begin, .enter = db1x_pm_enter, diff --git a/arch/mips/include/asm/elf.h b/arch/mips/include/asm/elf.h index ea77a42..829dd86 100644 --- a/arch/mips/include/asm/elf.h +++ b/arch/mips/include/asm/elf.h @@ -368,6 +368,13 @@ extern const char *__elf_platform; #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2) #endif +#ifdef CONFIG_PAX_ASLR +#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT_ADDR) ? 0x00400000UL : 0x00400000UL) + +#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT) +#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT) +#endif + #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1 struct linux_binprm; extern int arch_setup_additional_pages(struct linux_binprm *bprm, diff --git a/arch/mips/include/asm/page.h b/arch/mips/include/asm/page.h index a16beaf..02e1fae 100644 --- a/arch/mips/include/asm/page.h +++ b/arch/mips/include/asm/page.h @@ -93,7 +93,7 @@ extern void copy_user_highpage(struct page *to, struct page *from, #ifdef CONFIG_CPU_MIPS32 typedef struct { unsigned long pte_low, pte_high; } pte_t; #define pte_val(x) ((x).pte_low | ((unsigned long long)(x).pte_high << 32)) - #define __pte(x) ({ pte_t __pte = {(x), ((unsigned long long)(x)) >> 32}; __pte; }) + #define __pte(x) ({ pte_t __pte = {(x), (x) >> 32}; __pte; }) #else typedef struct { unsigned long long pte; } pte_t; #define pte_val(x) ((x).pte) diff --git a/arch/mips/include/asm/system.h b/arch/mips/include/asm/system.h index bb937cc..0b614cd 100644 --- a/arch/mips/include/asm/system.h +++ b/arch/mips/include/asm/system.h @@ -234,6 +234,6 @@ extern void per_cpu_trap_init(void); */ #define __ARCH_WANT_UNLOCKED_CTXSW -extern unsigned long arch_align_stack(unsigned long sp); +#define arch_align_stack(x) ((x) & ALMASK) #endif /* _ASM_SYSTEM_H */ diff --git a/arch/mips/kernel/binfmt_elfn32.c b/arch/mips/kernel/binfmt_elfn32.c index 9fdd8bc..fcf9d68 100644 --- a/arch/mips/kernel/binfmt_elfn32.c +++ b/arch/mips/kernel/binfmt_elfn32.c @@ -50,6 +50,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG]; #undef ELF_ET_DYN_BASE #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2) +#ifdef CONFIG_PAX_ASLR +#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT_ADDR) ? 0x00400000UL : 0x00400000UL) + +#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT) +#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT) +#endif + #include #include #include diff --git a/arch/mips/kernel/binfmt_elfo32.c b/arch/mips/kernel/binfmt_elfo32.c index ff44823..cf0b48a 100644 --- a/arch/mips/kernel/binfmt_elfo32.c +++ b/arch/mips/kernel/binfmt_elfo32.c @@ -52,6 +52,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG]; #undef ELF_ET_DYN_BASE #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2) +#ifdef CONFIG_PAX_ASLR +#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT_ADDR) ? 0x00400000UL : 0x00400000UL) + +#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT) +#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT) +#endif + #include /* diff --git a/arch/mips/kernel/kgdb.c b/arch/mips/kernel/kgdb.c index 9b78ff6..1228099 100644 --- a/arch/mips/kernel/kgdb.c +++ b/arch/mips/kernel/kgdb.c @@ -270,6 +270,7 @@ int kgdb_arch_handle_exception(int vector, int signo, int err_code, return -1; } +/* cannot be const, see kgdb_arch_init */ struct kgdb_arch arch_kgdb_ops; /* diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c index 9996094..ce0968a 100644 --- a/arch/mips/kernel/process.c +++ b/arch/mips/kernel/process.c @@ -474,15 +474,3 @@ unsigned long get_wchan(struct task_struct *task) out: return pc; } - -/* - * Don't forget that the stack pointer must be aligned on a 8 bytes - * boundary for 32-bits ABI and 16 bytes for 64-bits ABI. - */ -unsigned long arch_align_stack(unsigned long sp) -{ - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space) - sp -= get_random_int() & ~PAGE_MASK; - - return sp & ALMASK; -} diff --git a/arch/mips/kernel/syscall.c b/arch/mips/kernel/syscall.c index dd81b0f..2653a71 100644 --- a/arch/mips/kernel/syscall.c +++ b/arch/mips/kernel/syscall.c @@ -106,17 +106,21 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, do_color_align = 0; if (filp || (flags & MAP_SHARED)) do_color_align = 1; + +#ifdef CONFIG_PAX_RANDMMAP + if (!(current->mm->pax_flags & MF_PAX_RANDMMAP)) +#endif + if (addr) { if (do_color_align) addr = COLOUR_ALIGN(addr, pgoff); else addr = PAGE_ALIGN(addr); vmm = find_vma(current->mm, addr); - if (task_size - len >= addr && - (!vmm || addr + len <= vmm->vm_start)) + if (task_size - len >= addr && check_heap_stack_gap(vmm, addr, len)) return addr; } - addr = TASK_UNMAPPED_BASE; + addr = current->mm->mmap_base; if (do_color_align) addr = COLOUR_ALIGN(addr, pgoff); else @@ -126,7 +130,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, /* At this point: (!vmm || addr < vmm->vm_end). */ if (task_size - len < addr) return -ENOMEM; - if (!vmm || addr + len <= vmm->vm_start) + if (check_heap_stack_gap(vmm, addr, len)) return addr; addr = vmm->vm_end; if (do_color_align) diff --git a/arch/mips/loongson/common/pm.c b/arch/mips/loongson/common/pm.c index 6c1fd90..f55e07a 100644 --- a/arch/mips/loongson/common/pm.c +++ b/arch/mips/loongson/common/pm.c @@ -147,7 +147,7 @@ static int loongson_pm_valid_state(suspend_state_t state) } } -static struct platform_suspend_ops loongson_pm_ops = { +static const struct platform_suspend_ops loongson_pm_ops = { .valid = loongson_pm_valid_state, .enter = loongson_pm_enter, }; diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c index b78f7d9..ed674d8 100644 --- a/arch/mips/mm/fault.c +++ b/arch/mips/mm/fault.c @@ -26,6 +26,23 @@ #include #include /* For VMALLOC_END */ +#ifdef CONFIG_PAX_PAGEEXEC +void pax_report_insns(void *pc, void *sp) +{ + unsigned long i; + + printk(KERN_ERR "PAX: bytes at PC: "); + for (i = 0; i < 5; i++) { + unsigned int c; + if (get_user(c, (unsigned int *)pc+i)) + printk(KERN_CONT "???????? "); + else + printk(KERN_CONT "%08x ", c); + } + printk("\n"); +} +#endif + /* * This routine handles page faults. It determines the address, * and the problem, and then passes it off to one of the appropriate diff --git a/arch/parisc/include/asm/elf.h b/arch/parisc/include/asm/elf.h index 19f6cb1..6c78cf2 100644 --- a/arch/parisc/include/asm/elf.h +++ b/arch/parisc/include/asm/elf.h @@ -342,6 +342,13 @@ struct pt_regs; /* forward declaration... */ #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x01000000) +#ifdef CONFIG_PAX_ASLR +#define PAX_ELF_ET_DYN_BASE 0x10000UL + +#define PAX_DELTA_MMAP_LEN 16 +#define PAX_DELTA_STACK_LEN 16 +#endif + /* This yields a mask that user programs can use to figure out what instruction set this CPU supports. This could be done in user space, but it's not easy, and we've already done it here. */ diff --git a/arch/parisc/include/asm/pgtable.h b/arch/parisc/include/asm/pgtable.h index 01c1503..4b0fcf4 100644 --- a/arch/parisc/include/asm/pgtable.h +++ b/arch/parisc/include/asm/pgtable.h @@ -207,6 +207,17 @@ #define PAGE_EXECREAD __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_EXEC |_PAGE_ACCESSED) #define PAGE_COPY PAGE_EXECREAD #define PAGE_RWX __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_EXEC |_PAGE_ACCESSED) + +#ifdef CONFIG_PAX_PAGEEXEC +# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_ACCESSED) +# define PAGE_COPY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED) +# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED) +#else +# define PAGE_SHARED_NOEXEC PAGE_SHARED +# define PAGE_COPY_NOEXEC PAGE_COPY +# define PAGE_READONLY_NOEXEC PAGE_READONLY +#endif + #define PAGE_KERNEL __pgprot(_PAGE_KERNEL) #define PAGE_KERNEL_RO __pgprot(_PAGE_KERNEL & ~_PAGE_WRITE) #define PAGE_KERNEL_UNC __pgprot(_PAGE_KERNEL | _PAGE_NO_CACHE) diff --git a/arch/parisc/kernel/module.c b/arch/parisc/kernel/module.c index 159a2b8..196c0d8 100644 --- a/arch/parisc/kernel/module.c +++ b/arch/parisc/kernel/module.c @@ -96,16 +96,38 @@ /* three functions to determine where in the module core * or init pieces the location is */ +static inline int in_init_rx(struct module *me, void *loc) +{ + return (loc >= me->module_init_rx && + loc < (me->module_init_rx + me->init_size_rx)); +} + +static inline int in_init_rw(struct module *me, void *loc) +{ + return (loc >= me->module_init_rw && + loc < (me->module_init_rw + me->init_size_rw)); +} + static inline int in_init(struct module *me, void *loc) { - return (loc >= me->module_init && - loc <= (me->module_init + me->init_size)); + return in_init_rx(me, loc) || in_init_rw(me, loc); +} + +static inline int in_core_rx(struct module *me, void *loc) +{ + return (loc >= me->module_core_rx && + loc < (me->module_core_rx + me->core_size_rx)); +} + +static inline int in_core_rw(struct module *me, void *loc) +{ + return (loc >= me->module_core_rw && + loc < (me->module_core_rw + me->core_size_rw)); } static inline int in_core(struct module *me, void *loc) { - return (loc >= me->module_core && - loc <= (me->module_core + me->core_size)); + return in_core_rx(me, loc) || in_core_rw(me, loc); } static inline int in_local(struct module *me, void *loc) @@ -365,13 +387,13 @@ int module_frob_arch_sections(CONST Elf_Ehdr *hdr, } /* align things a bit */ - me->core_size = ALIGN(me->core_size, 16); - me->arch.got_offset = me->core_size; - me->core_size += gots * sizeof(struct got_entry); + me->core_size_rw = ALIGN(me->core_size_rw, 16); + me->arch.got_offset = me->core_size_rw; + me->core_size_rw += gots * sizeof(struct got_entry); - me->core_size = ALIGN(me->core_size, 16); - me->arch.fdesc_offset = me->core_size; - me->core_size += fdescs * sizeof(Elf_Fdesc); + me->core_size_rw = ALIGN(me->core_size_rw, 16); + me->arch.fdesc_offset = me->core_size_rw; + me->core_size_rw += fdescs * sizeof(Elf_Fdesc); me->arch.got_max = gots; me->arch.fdesc_max = fdescs; @@ -389,7 +411,7 @@ static Elf64_Word get_got(struct module *me, unsigned long value, long addend) BUG_ON(value == 0); - got = me->module_core + me->arch.got_offset; + got = me->module_core_rw + me->arch.got_offset; for (i = 0; got[i].addr; i++) if (got[i].addr == value) goto out; @@ -407,7 +429,7 @@ static Elf64_Word get_got(struct module *me, unsigned long value, long addend) #ifdef CONFIG_64BIT static Elf_Addr get_fdesc(struct module *me, unsigned long value) { - Elf_Fdesc *fdesc = me->module_core + me->arch.fdesc_offset; + Elf_Fdesc *fdesc = me->module_core_rw + me->arch.fdesc_offset; if (!value) { printk(KERN_ERR "%s: zero OPD requested!\n", me->name); @@ -425,7 +447,7 @@ static Elf_Addr get_fdesc(struct module *me, unsigned long value) /* Create new one */ fdesc->addr = value; - fdesc->gp = (Elf_Addr)me->module_core + me->arch.got_offset; + fdesc->gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset; return (Elf_Addr)fdesc; } #endif /* CONFIG_64BIT */ @@ -849,7 +871,7 @@ register_unwind_table(struct module *me, table = (unsigned char *)sechdrs[me->arch.unwind_section].sh_addr; end = table + sechdrs[me->arch.unwind_section].sh_size; - gp = (Elf_Addr)me->module_core + me->arch.got_offset; + gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset; DEBUGP("register_unwind_table(), sect = %d at 0x%p - 0x%p (gp=0x%lx)\n", me->arch.unwind_section, table, end, gp); diff --git a/arch/parisc/kernel/sys_parisc.c b/arch/parisc/kernel/sys_parisc.c index c9b9322..02d8940 100644 --- a/arch/parisc/kernel/sys_parisc.c +++ b/arch/parisc/kernel/sys_parisc.c @@ -43,7 +43,7 @@ static unsigned long get_unshared_area(unsigned long addr, unsigned long len) /* At this point: (!vma || addr < vma->vm_end). */ if (TASK_SIZE - len < addr) return -ENOMEM; - if (!vma || addr + len <= vma->vm_start) + if (check_heap_stack_gap(vma, addr, len)) return addr; addr = vma->vm_end; } @@ -79,7 +79,7 @@ static unsigned long get_shared_area(struct address_space *mapping, /* At this point: (!vma || addr < vma->vm_end). */ if (TASK_SIZE - len < addr) return -ENOMEM; - if (!vma || addr + len <= vma->vm_start) + if (check_heap_stack_gap(vma, addr, len)) return addr; addr = DCACHE_ALIGN(vma->vm_end - offset) + offset; if (addr < vma->vm_end) /* handle wraparound */ @@ -98,7 +98,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, if (flags & MAP_FIXED) return addr; if (!addr) - addr = TASK_UNMAPPED_BASE; + addr = current->mm->mmap_base; if (filp) { addr = get_shared_area(filp->f_mapping, addr, len, pgoff); diff --git a/arch/parisc/kernel/traps.c b/arch/parisc/kernel/traps.c index 8b58bf0..7afff03 100644 --- a/arch/parisc/kernel/traps.c +++ b/arch/parisc/kernel/traps.c @@ -733,9 +733,7 @@ void notrace handle_interruption(int code, struct pt_regs *regs) down_read(¤t->mm->mmap_sem); vma = find_vma(current->mm,regs->iaoq[0]); - if (vma && (regs->iaoq[0] >= vma->vm_start) - && (vma->vm_flags & VM_EXEC)) { - + if (vma && (regs->iaoq[0] >= vma->vm_start)) { fault_address = regs->iaoq[0]; fault_space = regs->iasq[0]; diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c index 18162ce..4d274ba 100644 --- a/arch/parisc/mm/fault.c +++ b/arch/parisc/mm/fault.c @@ -15,6 +15,7 @@ #include #include #include +#include #include #include @@ -52,7 +53,7 @@ DEFINE_PER_CPU(struct exception_data, exception_data); static unsigned long parisc_acctyp(unsigned long code, unsigned int inst) { - if (code == 6 || code == 16) + if (code == 6 || code == 7 || code == 16) return VM_EXEC; switch (inst & 0xf0000000) { @@ -138,6 +139,116 @@ parisc_acctyp(unsigned long code, unsigned int inst) } #endif +#ifdef CONFIG_PAX_PAGEEXEC +/* + * PaX: decide what to do with offenders (instruction_pointer(regs) = fault address) + * + * returns 1 when task should be killed + * 2 when rt_sigreturn trampoline was detected + * 3 when unpatched PLT trampoline was detected + */ +static int pax_handle_fetch_fault(struct pt_regs *regs) +{ + +#ifdef CONFIG_PAX_EMUPLT + int err; + + do { /* PaX: unpatched PLT emulation */ + unsigned int bl, depwi; + + err = get_user(bl, (unsigned int *)instruction_pointer(regs)); + err |= get_user(depwi, (unsigned int *)(instruction_pointer(regs)+4)); + + if (err) + break; + + if (bl == 0xEA9F1FDDU && depwi == 0xD6801C1EU) { + unsigned int ldw, bv, ldw2, addr = instruction_pointer(regs)-12; + + err = get_user(ldw, (unsigned int *)addr); + err |= get_user(bv, (unsigned int *)(addr+4)); + err |= get_user(ldw2, (unsigned int *)(addr+8)); + + if (err) + break; + + if (ldw == 0x0E801096U && + bv == 0xEAC0C000U && + ldw2 == 0x0E881095U) + { + unsigned int resolver, map; + + err = get_user(resolver, (unsigned int *)(instruction_pointer(regs)+8)); + err |= get_user(map, (unsigned int *)(instruction_pointer(regs)+12)); + if (err) + break; + + regs->gr[20] = instruction_pointer(regs)+8; + regs->gr[21] = map; + regs->gr[22] = resolver; + regs->iaoq[0] = resolver | 3UL; + regs->iaoq[1] = regs->iaoq[0] + 4; + return 3; + } + } + } while (0); +#endif + +#ifdef CONFIG_PAX_EMUTRAMP + +#ifndef CONFIG_PAX_EMUSIGRT + if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP)) + return 1; +#endif + + do { /* PaX: rt_sigreturn emulation */ + unsigned int ldi1, ldi2, bel, nop; + + err = get_user(ldi1, (unsigned int *)instruction_pointer(regs)); + err |= get_user(ldi2, (unsigned int *)(instruction_pointer(regs)+4)); + err |= get_user(bel, (unsigned int *)(instruction_pointer(regs)+8)); + err |= get_user(nop, (unsigned int *)(instruction_pointer(regs)+12)); + + if (err) + break; + + if ((ldi1 == 0x34190000U || ldi1 == 0x34190002U) && + ldi2 == 0x3414015AU && + bel == 0xE4008200U && + nop == 0x08000240U) + { + regs->gr[25] = (ldi1 & 2) >> 1; + regs->gr[20] = __NR_rt_sigreturn; + regs->gr[31] = regs->iaoq[1] + 16; + regs->sr[0] = regs->iasq[1]; + regs->iaoq[0] = 0x100UL; + regs->iaoq[1] = regs->iaoq[0] + 4; + regs->iasq[0] = regs->sr[2]; + regs->iasq[1] = regs->sr[2]; + return 2; + } + } while (0); +#endif + + return 1; +} + +void pax_report_insns(void *pc, void *sp) +{ + unsigned long i; + + printk(KERN_ERR "PAX: bytes at PC: "); + for (i = 0; i < 5; i++) { + unsigned int c; + if (get_user(c, (unsigned int *)pc+i)) + printk(KERN_CONT "???????? "); + else + printk(KERN_CONT "%08x ", c); + } + printk("\n"); +} +#endif + int fixup_exception(struct pt_regs *regs) { const struct exception_table_entry *fix; @@ -192,8 +303,33 @@ good_area: acc_type = parisc_acctyp(code,regs->iir); - if ((vma->vm_flags & acc_type) != acc_type) + if ((vma->vm_flags & acc_type) != acc_type) { + +#ifdef CONFIG_PAX_PAGEEXEC + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && (acc_type & VM_EXEC) && + (address & ~3UL) == instruction_pointer(regs)) + { + up_read(&mm->mmap_sem); + switch (pax_handle_fetch_fault(regs)) { + +#ifdef CONFIG_PAX_EMUPLT + case 3: + return; +#endif + +#ifdef CONFIG_PAX_EMUTRAMP + case 2: + return; +#endif + + } + pax_report_fault(regs, (void *)instruction_pointer(regs), (void *)regs->gr[30]); + do_group_exit(SIGKILL); + } +#endif + goto bad_area; + } /* * If for any reason at all we couldn't handle the fault, make diff --git a/arch/powerpc/include/asm/device.h b/arch/powerpc/include/asm/device.h index a3954e4..5db6a83 100644 --- a/arch/powerpc/include/asm/device.h +++ b/arch/powerpc/include/asm/device.h @@ -11,7 +11,7 @@ struct device_node; struct dev_archdata { /* DMA operations on that device */ - struct dma_map_ops *dma_ops; + const struct dma_map_ops *dma_ops; /* * When an iommu is in use, dma_data is used as a ptr to the base of the diff --git a/arch/powerpc/include/asm/dma-mapping.h b/arch/powerpc/include/asm/dma-mapping.h index c85ef23..01138ae 100644 --- a/arch/powerpc/include/asm/dma-mapping.h +++ b/arch/powerpc/include/asm/dma-mapping.h @@ -66,12 +66,13 @@ static inline unsigned long device_to_mask(struct device *dev) /* * Available generic sets of operations */ +/* cannot be const */ #ifdef CONFIG_PPC64 extern struct dma_map_ops dma_iommu_ops; #endif -extern struct dma_map_ops dma_direct_ops; +extern const struct dma_map_ops dma_direct_ops; -static inline struct dma_map_ops *get_dma_ops(struct device *dev) +static inline const struct dma_map_ops *get_dma_ops(struct device *dev) { /* We don't handle the NULL dev case for ISA for now. We could * do it via an out of line call but it is not needed for now. The @@ -84,7 +85,7 @@ static inline struct dma_map_ops *get_dma_ops(struct device *dev) return dev->archdata.dma_ops; } -static inline void set_dma_ops(struct device *dev, struct dma_map_ops *ops) +static inline void set_dma_ops(struct device *dev, const struct dma_map_ops *ops) { dev->archdata.dma_ops = ops; } @@ -118,7 +119,7 @@ static inline void set_dma_offset(struct device *dev, dma_addr_t off) static inline int dma_supported(struct device *dev, u64 mask) { - struct dma_map_ops *dma_ops = get_dma_ops(dev); + const struct dma_map_ops *dma_ops = get_dma_ops(dev); if (unlikely(dma_ops == NULL)) return 0; @@ -129,7 +130,7 @@ static inline int dma_supported(struct device *dev, u64 mask) static inline int dma_set_mask(struct device *dev, u64 dma_mask) { - struct dma_map_ops *dma_ops = get_dma_ops(dev); + const struct dma_map_ops *dma_ops = get_dma_ops(dev); if (unlikely(dma_ops == NULL)) return -EIO; @@ -144,7 +145,7 @@ static inline int dma_set_mask(struct device *dev, u64 dma_mask) static inline void *dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flag) { - struct dma_map_ops *dma_ops = get_dma_ops(dev); + const struct dma_map_ops *dma_ops = get_dma_ops(dev); void *cpu_addr; BUG_ON(!dma_ops); @@ -159,7 +160,7 @@ static inline void *dma_alloc_coherent(struct device *dev, size_t size, static inline void dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, dma_addr_t dma_handle) { - struct dma_map_ops *dma_ops = get_dma_ops(dev); + const struct dma_map_ops *dma_ops = get_dma_ops(dev); BUG_ON(!dma_ops); @@ -170,7 +171,7 @@ static inline void dma_free_coherent(struct device *dev, size_t size, static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) { - struct dma_map_ops *dma_ops = get_dma_ops(dev); + const struct dma_map_ops *dma_ops = get_dma_ops(dev); if (dma_ops->mapping_error) return dma_ops->mapping_error(dev, dma_addr); diff --git a/arch/powerpc/include/asm/elf.h b/arch/powerpc/include/asm/elf.h index c376eda..a8cd687 100644 --- a/arch/powerpc/include/asm/elf.h +++ b/arch/powerpc/include/asm/elf.h @@ -178,8 +178,19 @@ typedef elf_fpreg_t elf_vsrreghalf_t32[ELF_NVSRHALFREG]; the loader. We need to make sure that it is out of the way of the program that it will "exec", and that there is sufficient room for the brk. */ -extern unsigned long randomize_et_dyn(unsigned long base); -#define ELF_ET_DYN_BASE (randomize_et_dyn(0x20000000)) +#define ELF_ET_DYN_BASE (0x20000000) + +#ifdef CONFIG_PAX_ASLR +#define PAX_ELF_ET_DYN_BASE (0x10000000UL) + +#ifdef __powerpc64__ +#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT) ? 16 : 28) +#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT) ? 16 : 28) +#else +#define PAX_DELTA_MMAP_LEN 15 +#define PAX_DELTA_STACK_LEN 15 +#endif +#endif /* * Our registers are always unsigned longs, whether we're a 32 bit @@ -274,9 +285,6 @@ extern int arch_setup_additional_pages(struct linux_binprm *bprm, (0x7ff >> (PAGE_SHIFT - 12)) : \ (0x3ffff >> (PAGE_SHIFT - 12))) -extern unsigned long arch_randomize_brk(struct mm_struct *mm); -#define arch_randomize_brk arch_randomize_brk - #endif /* __KERNEL__ */ /* diff --git a/arch/powerpc/include/asm/iommu.h b/arch/powerpc/include/asm/iommu.h index edfc980..1766f59 100644 --- a/arch/powerpc/include/asm/iommu.h +++ b/arch/powerpc/include/asm/iommu.h @@ -116,6 +116,9 @@ extern void iommu_init_early_iSeries(void); extern void iommu_init_early_dart(void); extern void iommu_init_early_pasemi(void); +/* dma-iommu.c */ +extern int dma_iommu_dma_supported(struct device *dev, u64 mask); + #ifdef CONFIG_PCI extern void pci_iommu_init(void); extern void pci_direct_iommu_init(void); diff --git a/arch/powerpc/include/asm/kmap_types.h b/arch/powerpc/include/asm/kmap_types.h index bca8fdc..61e9580 100644 --- a/arch/powerpc/include/asm/kmap_types.h +++ b/arch/powerpc/include/asm/kmap_types.h @@ -27,6 +27,7 @@ enum km_type { KM_PPC_SYNC_PAGE, KM_PPC_SYNC_ICACHE, KM_KDB, + KM_CLEARPAGE, KM_TYPE_NR }; diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h index 53b64be..82be2e0 100644 --- a/arch/powerpc/include/asm/page.h +++ b/arch/powerpc/include/asm/page.h @@ -129,8 +129,9 @@ extern phys_addr_t kernstart_addr; * and needs to be executable. This means the whole heap ends * up being executable. */ -#define VM_DATA_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \ - VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) +#define VM_DATA_DEFAULT_FLAGS32 \ + (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \ + VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) #define VM_DATA_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \ VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) @@ -158,6 +159,9 @@ extern phys_addr_t kernstart_addr; #define is_kernel_addr(x) ((x) >= PAGE_OFFSET) #endif +#define ktla_ktva(addr) (addr) +#define ktva_ktla(addr) (addr) + #ifndef __ASSEMBLY__ #undef STRICT_MM_TYPECHECKS diff --git a/arch/powerpc/include/asm/page_64.h b/arch/powerpc/include/asm/page_64.h index 358ff14..fbf4ef6 100644 --- a/arch/powerpc/include/asm/page_64.h +++ b/arch/powerpc/include/asm/page_64.h @@ -172,15 +172,18 @@ do { \ * stack by default, so in the absense of a PT_GNU_STACK program header * we turn execute permission off. */ -#define VM_STACK_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \ - VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) +#define VM_STACK_DEFAULT_FLAGS32 \ + (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \ + VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) #define VM_STACK_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \ VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) +#ifndef CONFIG_PAX_PAGEEXEC #define VM_STACK_DEFAULT_FLAGS \ (test_thread_flag(TIF_32BIT) ? \ VM_STACK_DEFAULT_FLAGS32 : VM_STACK_DEFAULT_FLAGS64) +#endif #include diff --git a/arch/powerpc/include/asm/pci.h b/arch/powerpc/include/asm/pci.h index a20a9ad..158bb9a 100644 --- a/arch/powerpc/include/asm/pci.h +++ b/arch/powerpc/include/asm/pci.h @@ -65,8 +65,8 @@ static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel) } #ifdef CONFIG_PCI -extern void set_pci_dma_ops(struct dma_map_ops *dma_ops); -extern struct dma_map_ops *get_pci_dma_ops(void); +extern void set_pci_dma_ops(const struct dma_map_ops *dma_ops); +extern const struct dma_map_ops *get_pci_dma_ops(void); #else /* CONFIG_PCI */ #define set_pci_dma_ops(d) #define get_pci_dma_ops() NULL diff --git a/arch/powerpc/include/asm/pte-hash32.h b/arch/powerpc/include/asm/pte-hash32.h index 4aad413..85d86bf 100644 --- a/arch/powerpc/include/asm/pte-hash32.h +++ b/arch/powerpc/include/asm/pte-hash32.h @@ -21,6 +21,7 @@ #define _PAGE_FILE 0x004 /* when !present: nonlinear file mapping */ #define _PAGE_USER 0x004 /* usermode access allowed */ #define _PAGE_GUARDED 0x008 /* G: prohibit speculative access */ +#define _PAGE_EXEC _PAGE_GUARDED #define _PAGE_COHERENT 0x010 /* M: enforce memory coherence (SMP systems) */ #define _PAGE_NO_CACHE 0x020 /* I: cache inhibit */ #define _PAGE_WRITETHRU 0x040 /* W: cache write-through */ diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h index d62fdf4..fb32167 100644 --- a/arch/powerpc/include/asm/reg.h +++ b/arch/powerpc/include/asm/reg.h @@ -191,6 +191,7 @@ #define SPRN_DBCR 0x136 /* e300 Data Breakpoint Control Reg */ #define SPRN_DSISR 0x012 /* Data Storage Interrupt Status Register */ #define DSISR_NOHPTE 0x40000000 /* no translation found */ +#define DSISR_GUARDED 0x10000000 /* fetch from guarded storage */ #define DSISR_PROTFAULT 0x08000000 /* protection fault */ #define DSISR_ISSTORE 0x02000000 /* access was a store */ #define DSISR_DABRMATCH 0x00400000 /* hit data breakpoint */ diff --git a/arch/powerpc/include/asm/swiotlb.h b/arch/powerpc/include/asm/swiotlb.h index 8979d4c..d2fd0d3 100644 --- a/arch/powerpc/include/asm/swiotlb.h +++ b/arch/powerpc/include/asm/swiotlb.h @@ -13,7 +13,7 @@ #include -extern struct dma_map_ops swiotlb_dma_ops; +extern const struct dma_map_ops swiotlb_dma_ops; static inline void dma_mark_clean(void *addr, size_t size) {} diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h index bd0fb84..a42a14b 100644 --- a/arch/powerpc/include/asm/uaccess.h +++ b/arch/powerpc/include/asm/uaccess.h @@ -13,6 +13,8 @@ #define VERIFY_READ 0 #define VERIFY_WRITE 1 +extern void check_object_size(const void *ptr, unsigned long n, bool to); + /* * The fs value determines whether argument validity checking should be * performed or not. If get_fs() == USER_DS, checking is performed, with @@ -327,52 +329,6 @@ do { \ extern unsigned long __copy_tofrom_user(void __user *to, const void __user *from, unsigned long size); -#ifndef __powerpc64__ - -static inline unsigned long copy_from_user(void *to, - const void __user *from, unsigned long n) -{ - unsigned long over; - - if (access_ok(VERIFY_READ, from, n)) - return __copy_tofrom_user((__force void __user *)to, from, n); - if ((unsigned long)from < TASK_SIZE) { - over = (unsigned long)from + n - TASK_SIZE; - return __copy_tofrom_user((__force void __user *)to, from, - n - over) + over; - } - return n; -} - -static inline unsigned long copy_to_user(void __user *to, - const void *from, unsigned long n) -{ - unsigned long over; - - if (access_ok(VERIFY_WRITE, to, n)) - return __copy_tofrom_user(to, (__force void __user *)from, n); - if ((unsigned long)to < TASK_SIZE) { - over = (unsigned long)to + n - TASK_SIZE; - return __copy_tofrom_user(to, (__force void __user *)from, - n - over) + over; - } - return n; -} - -#else /* __powerpc64__ */ - -#define __copy_in_user(to, from, size) \ - __copy_tofrom_user((to), (from), (size)) - -extern unsigned long copy_from_user(void *to, const void __user *from, - unsigned long n); -extern unsigned long copy_to_user(void __user *to, const void *from, - unsigned long n); -extern unsigned long copy_in_user(void __user *to, const void __user *from, - unsigned long n); - -#endif /* __powerpc64__ */ - static inline unsigned long __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n) { @@ -396,6 +352,10 @@ static inline unsigned long __copy_from_user_inatomic(void *to, if (ret == 0) return 0; } + + if (!__builtin_constant_p(n)) + check_object_size(to, n, false); + return __copy_tofrom_user((__force void __user *)to, from, n); } @@ -422,6 +382,10 @@ static inline unsigned long __copy_to_user_inatomic(void __user *to, if (ret == 0) return 0; } + + if (!__builtin_constant_p(n)) + check_object_size(from, n, true); + return __copy_tofrom_user(to, (__force const void __user *)from, n); } @@ -439,6 +403,92 @@ static inline unsigned long __copy_to_user(void __user *to, return __copy_to_user_inatomic(to, from, size); } +#ifndef __powerpc64__ + +static inline unsigned long __must_check copy_from_user(void *to, + const void __user *from, unsigned long n) +{ + unsigned long over; + + if ((long)n < 0) + return n; + + if (access_ok(VERIFY_READ, from, n)) { + if (!__builtin_constant_p(n)) + check_object_size(to, n, false); + return __copy_tofrom_user((__force void __user *)to, from, n); + } + if ((unsigned long)from < TASK_SIZE) { + over = (unsigned long)from + n - TASK_SIZE; + if (!__builtin_constant_p(n - over)) + check_object_size(to, n - over, false); + return __copy_tofrom_user((__force void __user *)to, from, + n - over) + over; + } + return n; +} + +static inline unsigned long __must_check copy_to_user(void __user *to, + const void *from, unsigned long n) +{ + unsigned long over; + + if ((long)n < 0) + return n; + + if (access_ok(VERIFY_WRITE, to, n)) { + if (!__builtin_constant_p(n)) + check_object_size(from, n, true); + return __copy_tofrom_user(to, (__force void __user *)from, n); + } + if ((unsigned long)to < TASK_SIZE) { + over = (unsigned long)to + n - TASK_SIZE; + if (!__builtin_constant_p(n)) + check_object_size(from, n - over, true); + return __copy_tofrom_user(to, (__force void __user *)from, + n - over) + over; + } + return n; +} + +#else /* __powerpc64__ */ + +#define __copy_in_user(to, from, size) \ + __copy_tofrom_user((to), (from), (size)) + +static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n) +{ + if ((long)n < 0 || n > INT_MAX) + return n; + + if (!__builtin_constant_p(n)) + check_object_size(to, n, false); + + if (likely(access_ok(VERIFY_READ, from, n))) + n = __copy_from_user(to, from, n); + else + memset(to, 0, n); + return n; +} + +static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n) +{ + if ((long)n < 0 || n > INT_MAX) + return n; + + if (likely(access_ok(VERIFY_WRITE, to, n))) { + if (!__builtin_constant_p(n)) + check_object_size(from, n, true); + n = __copy_to_user(to, from, n); + } + return n; +} + +extern unsigned long copy_in_user(void __user *to, const void __user *from, + unsigned long n); + +#endif /* __powerpc64__ */ + extern unsigned long __clear_user(void __user *addr, unsigned long size); static inline unsigned long clear_user(void __user *addr, unsigned long size) diff --git a/arch/powerpc/kernel/dma-iommu.c b/arch/powerpc/kernel/dma-iommu.c index 37771a5..648530c 100644 --- a/arch/powerpc/kernel/dma-iommu.c +++ b/arch/powerpc/kernel/dma-iommu.c @@ -70,7 +70,7 @@ static void dma_iommu_unmap_sg(struct device *dev, struct scatterlist *sglist, } /* We support DMA to/from any memory page via the iommu */ -static int dma_iommu_dma_supported(struct device *dev, u64 mask) +int dma_iommu_dma_supported(struct device *dev, u64 mask) { struct iommu_table *tbl = get_iommu_table_base(dev); diff --git a/arch/powerpc/kernel/dma-swiotlb.c b/arch/powerpc/kernel/dma-swiotlb.c index 02f724f..065adf6 100644 --- a/arch/powerpc/kernel/dma-swiotlb.c +++ b/arch/powerpc/kernel/dma-swiotlb.c @@ -31,7 +31,7 @@ unsigned int ppc_swiotlb_enable; * map_page, and unmap_page on highmem, use normal dma_ops * for everything else. */ -struct dma_map_ops swiotlb_dma_ops = { +const struct dma_map_ops swiotlb_dma_ops = { .alloc_coherent = dma_direct_alloc_coherent, .free_coherent = dma_direct_free_coherent, .map_sg = swiotlb_map_sg_attrs, diff --git a/arch/powerpc/kernel/dma.c b/arch/powerpc/kernel/dma.c index 84d6367..df07362 100644 --- a/arch/powerpc/kernel/dma.c +++ b/arch/powerpc/kernel/dma.c @@ -135,7 +135,7 @@ static inline void dma_direct_sync_single(struct device *dev, } #endif -struct dma_map_ops dma_direct_ops = { +const struct dma_map_ops dma_direct_ops = { .alloc_coherent = dma_direct_alloc_coherent, .free_coherent = dma_direct_free_coherent, .map_sg = dma_direct_map_sg, diff --git a/arch/powerpc/kernel/exceptions-64e.S b/arch/powerpc/kernel/exceptions-64e.S index 24dcc0e..a300455 100644 --- a/arch/powerpc/kernel/exceptions-64e.S +++ b/arch/powerpc/kernel/exceptions-64e.S @@ -455,6 +455,7 @@ storage_fault_common: std r14,_DAR(r1) std r15,_DSISR(r1) addi r3,r1,STACK_FRAME_OVERHEAD + bl .save_nvgprs mr r4,r14 mr r5,r15 ld r14,PACA_EXGEN+EX_R14(r13) @@ -464,8 +465,7 @@ storage_fault_common: cmpdi r3,0 bne- 1f b .ret_from_except_lite -1: bl .save_nvgprs - mr r5,r3 +1: mr r5,r3 addi r3,r1,STACK_FRAME_OVERHEAD ld r4,_DAR(r1) bl .bad_page_fault diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S index 3e423fb..34f47a0 100644 --- a/arch/powerpc/kernel/exceptions-64s.S +++ b/arch/powerpc/kernel/exceptions-64s.S @@ -840,10 +840,10 @@ handle_page_fault: 11: ld r4,_DAR(r1) ld r5,_DSISR(r1) addi r3,r1,STACK_FRAME_OVERHEAD + bl .save_nvgprs bl .do_page_fault cmpdi r3,0 beq+ 13f - bl .save_nvgprs mr r5,r3 addi r3,r1,STACK_FRAME_OVERHEAD lwz r4,_DAR(r1) diff --git a/arch/powerpc/kernel/ibmebus.c b/arch/powerpc/kernel/ibmebus.c index 21266ab..e27733b 100644 --- a/arch/powerpc/kernel/ibmebus.c +++ b/arch/powerpc/kernel/ibmebus.c @@ -128,7 +128,7 @@ static int ibmebus_dma_supported(struct device *dev, u64 mask) return 1; } -static struct dma_map_ops ibmebus_dma_ops = { +static const struct dma_map_ops ibmebus_dma_ops = { .alloc_coherent = ibmebus_alloc_coherent, .free_coherent = ibmebus_free_coherent, .map_sg = ibmebus_map_sg, diff --git a/arch/powerpc/kernel/kgdb.c b/arch/powerpc/kernel/kgdb.c index 82a7b22..fe7180e 100644 --- a/arch/powerpc/kernel/kgdb.c +++ b/arch/powerpc/kernel/kgdb.c @@ -128,7 +128,7 @@ static int kgdb_handle_breakpoint(struct pt_regs *regs) if (kgdb_handle_exception(1, SIGTRAP, 0, regs) != 0) return 0; - if (*(u32 *) (regs->nip) == *(u32 *) (&arch_kgdb_ops.gdb_bpt_instr)) + if (*(u32 *) (regs->nip) == *(const u32 *) (&arch_kgdb_ops.gdb_bpt_instr)) regs->nip += 4; return 1; @@ -360,7 +360,7 @@ int kgdb_arch_handle_exception(int vector, int signo, int err_code, /* * Global data */ -struct kgdb_arch arch_kgdb_ops = { +const struct kgdb_arch arch_kgdb_ops = { .gdb_bpt_instr = {0x7d, 0x82, 0x10, 0x08}, }; diff --git a/arch/powerpc/kernel/module.c b/arch/powerpc/kernel/module.c index 477c663..4f50234 100644 --- a/arch/powerpc/kernel/module.c +++ b/arch/powerpc/kernel/module.c @@ -31,11 +31,24 @@ LIST_HEAD(module_bug_list); +#ifdef CONFIG_PAX_KERNEXEC void *module_alloc(unsigned long size) { if (size == 0) return NULL; + return vmalloc(size); +} + +void *module_alloc_exec(unsigned long size) +#else +void *module_alloc(unsigned long size) +#endif + +{ + if (size == 0) + return NULL; + return vmalloc_exec(size); } @@ -45,6 +58,13 @@ void module_free(struct module *mod, void *module_region) vfree(module_region); } +#ifdef CONFIG_PAX_KERNEXEC +void module_free_exec(struct module *mod, void *module_region) +{ + module_free(mod, module_region); +} +#endif + static const Elf_Shdr *find_section(const Elf_Ehdr *hdr, const Elf_Shdr *sechdrs, const char *name) diff --git a/arch/powerpc/kernel/module_32.c b/arch/powerpc/kernel/module_32.c index f832773..0507238 100644 --- a/arch/powerpc/kernel/module_32.c +++ b/arch/powerpc/kernel/module_32.c @@ -162,7 +162,7 @@ int module_frob_arch_sections(Elf32_Ehdr *hdr, me->arch.core_plt_section = i; } if (!me->arch.core_plt_section || !me->arch.init_plt_section) { - printk("Module doesn't contain .plt or .init.plt sections.\n"); + printk("Module %s doesn't contain .plt or .init.plt sections.\n", me->name); return -ENOEXEC; } @@ -203,11 +203,16 @@ static uint32_t do_plt_call(void *location, DEBUGP("Doing plt for call to 0x%x at 0x%x\n", val, (unsigned int)location); /* Init, or core PLT? */ - if (location >= mod->module_core - && location < mod->module_core + mod->core_size) + if ((location >= mod->module_core_rx && location < mod->module_core_rx + mod->core_size_rx) || + (location >= mod->module_core_rw && location < mod->module_core_rw + mod->core_size_rw)) entry = (void *)sechdrs[mod->arch.core_plt_section].sh_addr; - else + else if ((location >= mod->module_init_rx && location < mod->module_init_rx + mod->init_size_rx) || + (location >= mod->module_init_rw && location < mod->module_init_rw + mod->init_size_rw)) entry = (void *)sechdrs[mod->arch.init_plt_section].sh_addr; + else { + printk(KERN_ERR "%s: invalid R_PPC_REL24 entry found\n", mod->name); + return ~0UL; + } /* Find this entry, or if that fails, the next avail. entry */ while (entry->jump[0]) { diff --git a/arch/powerpc/kernel/pci-common.c b/arch/powerpc/kernel/pci-common.c index 5b38f6a..8175940 100644 --- a/arch/powerpc/kernel/pci-common.c +++ b/arch/powerpc/kernel/pci-common.c @@ -51,14 +51,14 @@ resource_size_t isa_mem_base; unsigned int ppc_pci_flags = 0; -static struct dma_map_ops *pci_dma_ops = &dma_direct_ops; +static const struct dma_map_ops *pci_dma_ops = &dma_direct_ops; -void set_pci_dma_ops(struct dma_map_ops *dma_ops) +void set_pci_dma_ops(const struct dma_map_ops *dma_ops) { pci_dma_ops = dma_ops; } -struct dma_map_ops *get_pci_dma_ops(void) +const struct dma_map_ops *get_pci_dma_ops(void) { return pci_dma_ops; } diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c index 773424d..1d2df74 100644 --- a/arch/powerpc/kernel/process.c +++ b/arch/powerpc/kernel/process.c @@ -1215,51 +1215,3 @@ unsigned long arch_align_stack(unsigned long sp) sp -= get_random_int() & ~PAGE_MASK; return sp & ~0xf; } - -static inline unsigned long brk_rnd(void) -{ - unsigned long rnd = 0; - - /* 8MB for 32bit, 1GB for 64bit */ - if (is_32bit_task()) - rnd = (long)(get_random_int() % (1<<(23-PAGE_SHIFT))); - else - rnd = (long)(get_random_int() % (1<<(30-PAGE_SHIFT))); - - return rnd << PAGE_SHIFT; -} - -unsigned long arch_randomize_brk(struct mm_struct *mm) -{ - unsigned long base = mm->brk; - unsigned long ret; - -#ifdef CONFIG_PPC_STD_MMU_64 - /* - * If we are using 1TB segments and we are allowed to randomise - * the heap, we can put it above 1TB so it is backed by a 1TB - * segment. Otherwise the heap will be in the bottom 1TB - * which always uses 256MB segments and this may result in a - * performance penalty. - */ - if (!is_32bit_task() && (mmu_highuser_ssize == MMU_SEGSIZE_1T)) - base = max_t(unsigned long, mm->brk, 1UL << SID_SHIFT_1T); -#endif - - ret = PAGE_ALIGN(base + brk_rnd()); - - if (ret < mm->brk) - return mm->brk; - - return ret; -} - -unsigned long randomize_et_dyn(unsigned long base) -{ - unsigned long ret = PAGE_ALIGN(base + brk_rnd()); - - if (ret < base) - return base; - - return ret; -} diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c index 2666101..e908fba 100644 --- a/arch/powerpc/kernel/signal_32.c +++ b/arch/powerpc/kernel/signal_32.c @@ -857,7 +857,7 @@ int handle_rt_signal32(unsigned long sig, struct k_sigaction *ka, /* Save user registers on the stack */ frame = &rt_sf->uc.uc_mcontext; addr = frame; - if (vdso32_rt_sigtramp && current->mm->context.vdso_base) { + if (vdso32_rt_sigtramp && current->mm->context.vdso_base != ~0UL) { if (save_user_regs(regs, frame, 0, 1)) goto badframe; regs->link = current->mm->context.vdso_base + vdso32_rt_sigtramp; diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c index 2fe6fc6..ada0d96 100644 --- a/arch/powerpc/kernel/signal_64.c +++ b/arch/powerpc/kernel/signal_64.c @@ -429,7 +429,7 @@ int handle_rt_signal64(int signr, struct k_sigaction *ka, siginfo_t *info, current->thread.fpscr.val = 0; /* Set up to return from userspace. */ - if (vdso64_rt_sigtramp && current->mm->context.vdso_base) { + if (vdso64_rt_sigtramp && current->mm->context.vdso_base != ~0UL) { regs->link = current->mm->context.vdso_base + vdso64_rt_sigtramp; } else { err |= setup_trampoline(__NR_rt_sigreturn, &frame->tramp[0]); diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c index 13002fe..e25f4b1 100644 --- a/arch/powerpc/kernel/vdso.c +++ b/arch/powerpc/kernel/vdso.c @@ -36,6 +36,7 @@ #include #include #include +#include #include "setup.h" @@ -220,7 +221,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) vdso_base = VDSO32_MBASE; #endif - current->mm->context.vdso_base = 0; + current->mm->context.vdso_base = ~0UL; /* vDSO has a problem and was disabled, just don't "enable" it for the * process @@ -240,7 +241,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) vdso_base = get_unmapped_area(NULL, vdso_base, (vdso_pages << PAGE_SHIFT) + ((VDSO_ALIGNMENT - 1) & PAGE_MASK), - 0, 0); + 0, MAP_PRIVATE | MAP_EXECUTABLE); if (IS_ERR_VALUE(vdso_base)) { rc = vdso_base; goto fail_mmapsem; diff --git a/arch/powerpc/kernel/vio.c b/arch/powerpc/kernel/vio.c index 00b9436..699269a 100644 --- a/arch/powerpc/kernel/vio.c +++ b/arch/powerpc/kernel/vio.c @@ -602,11 +602,12 @@ static void vio_dma_iommu_unmap_sg(struct device *dev, vio_cmo_dealloc(viodev, alloc_size); } -struct dma_map_ops vio_dma_mapping_ops = { +static const struct dma_map_ops vio_dma_mapping_ops = { .alloc_coherent = vio_dma_iommu_alloc_coherent, .free_coherent = vio_dma_iommu_free_coherent, .map_sg = vio_dma_iommu_map_sg, .unmap_sg = vio_dma_iommu_unmap_sg, + .dma_supported = dma_iommu_dma_supported, .map_page = vio_dma_iommu_map_page, .unmap_page = vio_dma_iommu_unmap_page, @@ -860,7 +861,6 @@ static void vio_cmo_bus_remove(struct vio_dev *viodev) static void vio_cmo_set_dma_ops(struct vio_dev *viodev) { - vio_dma_mapping_ops.dma_supported = dma_iommu_ops.dma_supported; viodev->dev.archdata.dma_ops = &vio_dma_mapping_ops; } diff --git a/arch/powerpc/lib/usercopy_64.c b/arch/powerpc/lib/usercopy_64.c index 5eea6f3..5d10396 100644 --- a/arch/powerpc/lib/usercopy_64.c +++ b/arch/powerpc/lib/usercopy_64.c @@ -9,22 +9,6 @@ #include #include -unsigned long copy_from_user(void *to, const void __user *from, unsigned long n) -{ - if (likely(access_ok(VERIFY_READ, from, n))) - n = __copy_from_user(to, from, n); - else - memset(to, 0, n); - return n; -} - -unsigned long copy_to_user(void __user *to, const void *from, unsigned long n) -{ - if (likely(access_ok(VERIFY_WRITE, to, n))) - n = __copy_to_user(to, from, n); - return n; -} - unsigned long copy_in_user(void __user *to, const void __user *from, unsigned long n) { @@ -35,7 +19,5 @@ unsigned long copy_in_user(void __user *to, const void __user *from, return n; } -EXPORT_SYMBOL(copy_from_user); -EXPORT_SYMBOL(copy_to_user); EXPORT_SYMBOL(copy_in_user); diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c index 1bd712c..4a0026d 100644 --- a/arch/powerpc/mm/fault.c +++ b/arch/powerpc/mm/fault.c @@ -30,6 +30,10 @@ #include #include #include +#include +#include +#include +#include #include #include @@ -41,6 +45,7 @@ #include #include #include +#include #ifdef CONFIG_KPROBES static inline int notify_page_fault(struct pt_regs *regs) @@ -64,6 +69,33 @@ static inline int notify_page_fault(struct pt_regs *regs) } #endif +#ifdef CONFIG_PAX_PAGEEXEC +/* + * PaX: decide what to do with offenders (regs->nip = fault address) + * + * returns 1 when task should be killed + */ +static int pax_handle_fetch_fault(struct pt_regs *regs) +{ + return 1; +} + +void pax_report_insns(void *pc, void *sp) +{ + unsigned long i; + + printk(KERN_ERR "PAX: bytes at PC: "); + for (i = 0; i < 5; i++) { + unsigned int c; + if (get_user(c, (unsigned int __user *)pc+i)) + printk(KERN_CONT "???????? "); + else + printk(KERN_CONT "%08x ", c); + } + printk("\n"); +} +#endif + /* * Check whether the instruction at regs->nip is a store using * an update addressing form which will update r1. @@ -134,7 +166,7 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address, * indicate errors in DSISR but can validly be set in SRR1. */ if (trap == 0x400) - error_code &= 0x48200000; + error_code &= 0x58200000; else is_write = error_code & DSISR_ISSTORE; #else @@ -257,7 +289,7 @@ good_area: * "undefined". Of those that can be set, this is the only * one which seems bad. */ - if (error_code & 0x10000000) + if (error_code & DSISR_GUARDED) /* Guarded storage error. */ goto bad_area; #endif /* CONFIG_8xx */ @@ -272,7 +304,7 @@ good_area: * processors use the same I/D cache coherency mechanism * as embedded. */ - if (error_code & DSISR_PROTFAULT) + if (error_code & (DSISR_PROTFAULT | DSISR_GUARDED)) goto bad_area; #endif /* CONFIG_PPC_STD_MMU */ @@ -341,6 +373,23 @@ bad_area: bad_area_nosemaphore: /* User mode accesses cause a SIGSEGV */ if (user_mode(regs)) { + +#ifdef CONFIG_PAX_PAGEEXEC + if (mm->pax_flags & MF_PAX_PAGEEXEC) { +#ifdef CONFIG_PPC_STD_MMU + if (is_exec && (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))) { +#else + if (is_exec && regs->nip == address) { +#endif + switch (pax_handle_fetch_fault(regs)) { + } + + pax_report_fault(regs, (void *)regs->nip, (void *)regs->gpr[PT_R1]); + do_group_exit(SIGKILL); + } + } +#endif + _exception(SIGSEGV, regs, code, address); return 0; } diff --git a/arch/powerpc/mm/mmap_64.c b/arch/powerpc/mm/mmap_64.c index 5a783d8..c23e14b 100644 --- a/arch/powerpc/mm/mmap_64.c +++ b/arch/powerpc/mm/mmap_64.c @@ -99,10 +99,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm) */ if (mmap_is_legacy()) { mm->mmap_base = TASK_UNMAPPED_BASE; + +#ifdef CONFIG_PAX_RANDMMAP + if (mm->pax_flags & MF_PAX_RANDMMAP) + mm->mmap_base += mm->delta_mmap; +#endif + mm->get_unmapped_area = arch_get_unmapped_area; mm->unmap_area = arch_unmap_area; } else { mm->mmap_base = mmap_base(); + +#ifdef CONFIG_PAX_RANDMMAP + if (mm->pax_flags & MF_PAX_RANDMMAP) + mm->mmap_base -= mm->delta_mmap + mm->delta_stack; +#endif + mm->get_unmapped_area = arch_get_unmapped_area_topdown; mm->unmap_area = arch_unmap_area_topdown; } diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/slice.c index ba51948..165f6a1 100644 --- a/arch/powerpc/mm/slice.c +++ b/arch/powerpc/mm/slice.c @@ -98,7 +98,7 @@ static int slice_area_is_free(struct mm_struct *mm, unsigned long addr, if ((mm->task_size - len) < addr) return 0; vma = find_vma(mm, addr); - return (!vma || (addr + len) <= vma->vm_start); + return check_heap_stack_gap(vma, addr, len); } static int slice_low_has_vma(struct mm_struct *mm, unsigned long slice) @@ -256,7 +256,7 @@ full_search: addr = _ALIGN_UP(addr + 1, 1ul << SLICE_HIGH_SHIFT); continue; } - if (!vma || addr + len <= vma->vm_start) { + if (check_heap_stack_gap(vma, addr, len)) { /* * Remember the place where we stopped the search: */ @@ -336,7 +336,7 @@ static unsigned long slice_find_area_topdown(struct mm_struct *mm, * return with success: */ vma = find_vma(mm, addr); - if (!vma || (addr + len) <= vma->vm_start) { + if (check_heap_stack_gap(vma, addr, len)) { /* remember the address as a hint for next time */ if (use_cache) mm->free_area_cache = addr; @@ -426,6 +426,11 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len, if (fixed && addr > (mm->task_size - len)) return -EINVAL; +#ifdef CONFIG_PAX_RANDMMAP + if (!fixed && (mm->pax_flags & MF_PAX_RANDMMAP)) + addr = 0; +#endif + /* If hint, make sure it matches our alignment restrictions */ if (!fixed && addr) { addr = _ALIGN_UP(addr, 1ul << pshift); diff --git a/arch/powerpc/platforms/52xx/lite5200_pm.c b/arch/powerpc/platforms/52xx/lite5200_pm.c index b5c753d..8f01abe 100644 --- a/arch/powerpc/platforms/52xx/lite5200_pm.c +++ b/arch/powerpc/platforms/52xx/lite5200_pm.c @@ -235,7 +235,7 @@ static void lite5200_pm_end(void) lite5200_pm_target_state = PM_SUSPEND_ON; } -static struct platform_suspend_ops lite5200_pm_ops = { +static const struct platform_suspend_ops lite5200_pm_ops = { .valid = lite5200_pm_valid, .begin = lite5200_pm_begin, .prepare = lite5200_pm_prepare, diff --git a/arch/powerpc/platforms/52xx/mpc52xx_pm.c b/arch/powerpc/platforms/52xx/mpc52xx_pm.c index 7672253..4dfe095 100644 --- a/arch/powerpc/platforms/52xx/mpc52xx_pm.c +++ b/arch/powerpc/platforms/52xx/mpc52xx_pm.c @@ -189,7 +189,7 @@ void mpc52xx_pm_finish(void) iounmap(mbar); } -static struct platform_suspend_ops mpc52xx_pm_ops = { +static const struct platform_suspend_ops mpc52xx_pm_ops = { .valid = mpc52xx_pm_valid, .prepare = mpc52xx_pm_prepare, .enter = mpc52xx_pm_enter, diff --git a/arch/powerpc/platforms/83xx/suspend.c b/arch/powerpc/platforms/83xx/suspend.c index ebe6c35..8914e7b 100644 --- a/arch/powerpc/platforms/83xx/suspend.c +++ b/arch/powerpc/platforms/83xx/suspend.c @@ -311,7 +311,7 @@ static int mpc83xx_is_pci_agent(void) return ret; } -static struct platform_suspend_ops mpc83xx_suspend_ops = { +static const struct platform_suspend_ops mpc83xx_suspend_ops = { .valid = mpc83xx_suspend_valid, .begin = mpc83xx_suspend_begin, .enter = mpc83xx_suspend_enter, diff --git a/arch/powerpc/platforms/cell/iommu.c b/arch/powerpc/platforms/cell/iommu.c index 3712900..645f19f 100644 --- a/arch/powerpc/platforms/cell/iommu.c +++ b/arch/powerpc/platforms/cell/iommu.c @@ -642,7 +642,7 @@ static int dma_fixed_dma_supported(struct device *dev, u64 mask) static int dma_set_mask_and_switch(struct device *dev, u64 dma_mask); -struct dma_map_ops dma_iommu_fixed_ops = { +const struct dma_map_ops dma_iommu_fixed_ops = { .alloc_coherent = dma_fixed_alloc_coherent, .free_coherent = dma_fixed_free_coherent, .map_sg = dma_fixed_map_sg, diff --git a/arch/powerpc/platforms/ps3/system-bus.c b/arch/powerpc/platforms/ps3/system-bus.c index 23083c3..ec00e40 100644 --- a/arch/powerpc/platforms/ps3/system-bus.c +++ b/arch/powerpc/platforms/ps3/system-bus.c @@ -695,7 +695,7 @@ static int ps3_dma_supported(struct device *_dev, u64 mask) return mask >= DMA_BIT_MASK(32); } -static struct dma_map_ops ps3_sb_dma_ops = { +static const struct dma_map_ops ps3_sb_dma_ops = { .alloc_coherent = ps3_alloc_coherent, .free_coherent = ps3_free_coherent, .map_sg = ps3_sb_map_sg, @@ -705,7 +705,7 @@ static struct dma_map_ops ps3_sb_dma_ops = { .unmap_page = ps3_unmap_page, }; -static struct dma_map_ops ps3_ioc0_dma_ops = { +static const struct dma_map_ops ps3_ioc0_dma_ops = { .alloc_coherent = ps3_alloc_coherent, .free_coherent = ps3_free_coherent, .map_sg = ps3_ioc0_map_sg, diff --git a/arch/powerpc/sysdev/fsl_pmc.c b/arch/powerpc/sysdev/fsl_pmc.c index 9082eb9..47d3ac2 100644 --- a/arch/powerpc/sysdev/fsl_pmc.c +++ b/arch/powerpc/sysdev/fsl_pmc.c @@ -53,7 +53,7 @@ static int pmc_suspend_valid(suspend_state_t state) return 1; } -static struct platform_suspend_ops pmc_suspend_ops = { +static const struct platform_suspend_ops pmc_suspend_ops = { .valid = pmc_suspend_valid, .enter = pmc_suspend_enter, }; diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig index bee1c0f..cdd44f3 100644 --- a/arch/s390/Kconfig +++ b/arch/s390/Kconfig @@ -230,13 +230,12 @@ config AUDIT_ARCH config S390_EXEC_PROTECT bool "Data execute protection" + default y help This option allows to enable a buffer overflow protection for user - space programs and it also selects the addressing mode option above. - The kernel parameter noexec=on will enable this feature and also - switch the addressing modes, default is disabled. Enabling this (via - kernel parameter) on machines earlier than IBM System z9-109 EC/BC - will reduce system performance. + space programs. + Enabling this on machines earlier than IBM System z9-109 EC/BC will + reduce system performance. comment "Code generation options" diff --git a/arch/s390/include/asm/elf.h b/arch/s390/include/asm/elf.h index 354d426..883892c 100644 --- a/arch/s390/include/asm/elf.h +++ b/arch/s390/include/asm/elf.h @@ -163,6 +163,13 @@ extern unsigned int vdso_enabled; that it will "exec", and that there is sufficient room for the brk. */ #define ELF_ET_DYN_BASE (STACK_TOP / 3 * 2) +#ifdef CONFIG_PAX_ASLR +#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_31BIT) ? 0x10000UL : 0x80000000UL) + +#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26 ) +#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26 ) +#endif + /* This yields a mask that user programs can use to figure out what instruction set this CPU supports. */ diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h index d6b1ed0..071ffbd 100644 --- a/arch/s390/include/asm/uaccess.h +++ b/arch/s390/include/asm/uaccess.h @@ -234,6 +234,10 @@ static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n) { might_fault(); + + if ((long)n < 0) + return n; + if (access_ok(VERIFY_WRITE, to, n)) n = __copy_to_user(to, from, n); return n; @@ -259,6 +263,9 @@ copy_to_user(void __user *to, const void *from, unsigned long n) static inline unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n) { + if ((long)n < 0) + return n; + if (__builtin_constant_p(n) && (n <= 256)) return uaccess.copy_from_user_small(n, from, to); else @@ -293,6 +300,10 @@ copy_from_user(void *to, const void __user *from, unsigned long n) unsigned int sz = __compiletime_object_size(to); might_fault(); + + if ((long)n < 0) + return n; + if (unlikely(sz != -1 && sz < n)) { copy_from_user_overflow(); return n; diff --git a/arch/s390/kernel/module.c b/arch/s390/kernel/module.c index 22cfd63..a70f1db 100644 --- a/arch/s390/kernel/module.c +++ b/arch/s390/kernel/module.c @@ -168,11 +168,11 @@ module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs, /* Increase core size by size of got & plt and set start offsets for got and plt. */ - me->core_size = ALIGN(me->core_size, 4); - me->arch.got_offset = me->core_size; - me->core_size += me->arch.got_size; - me->arch.plt_offset = me->core_size; - me->core_size += me->arch.plt_size; + me->core_size_rw = ALIGN(me->core_size_rw, 4); + me->arch.got_offset = me->core_size_rw; + me->core_size_rw += me->arch.got_size; + me->arch.plt_offset = me->core_size_rx; + me->core_size_rx += me->arch.plt_size; return 0; } @@ -258,7 +258,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab, if (info->got_initialized == 0) { Elf_Addr *gotent; - gotent = me->module_core + me->arch.got_offset + + gotent = me->module_core_rw + me->arch.got_offset + info->got_offset; *gotent = val; info->got_initialized = 1; @@ -282,7 +282,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab, else if (r_type == R_390_GOTENT || r_type == R_390_GOTPLTENT) *(unsigned int *) loc = - (val + (Elf_Addr) me->module_core - loc) >> 1; + (val + (Elf_Addr) me->module_core_rw - loc) >> 1; else if (r_type == R_390_GOT64 || r_type == R_390_GOTPLT64) *(unsigned long *) loc = val; @@ -296,7 +296,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab, case R_390_PLTOFF64: /* 16 bit offset from GOT to PLT. */ if (info->plt_initialized == 0) { unsigned int *ip; - ip = me->module_core + me->arch.plt_offset + + ip = me->module_core_rx + me->arch.plt_offset + info->plt_offset; #ifndef CONFIG_64BIT ip[0] = 0x0d105810; /* basr 1,0; l 1,6(1); br 1 */ @@ -321,7 +321,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab, val - loc + 0xffffUL < 0x1ffffeUL) || (r_type == R_390_PLT32DBL && val - loc + 0xffffffffULL < 0x1fffffffeULL))) - val = (Elf_Addr) me->module_core + + val = (Elf_Addr) me->module_core_rx + me->arch.plt_offset + info->plt_offset; val += rela->r_addend - loc; @@ -343,7 +343,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab, case R_390_GOTOFF32: /* 32 bit offset to GOT. */ case R_390_GOTOFF64: /* 64 bit offset to GOT. */ val = val + rela->r_addend - - ((Elf_Addr) me->module_core + me->arch.got_offset); + ((Elf_Addr) me->module_core_rw + me->arch.got_offset); if (r_type == R_390_GOTOFF16) *(unsigned short *) loc = val; else if (r_type == R_390_GOTOFF32) @@ -353,7 +353,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab, break; case R_390_GOTPC: /* 32 bit PC relative offset to GOT. */ case R_390_GOTPCDBL: /* 32 bit PC rel. off. to GOT shifted by 1. */ - val = (Elf_Addr) me->module_core + me->arch.got_offset + + val = (Elf_Addr) me->module_core_rw + me->arch.got_offset + rela->r_addend - loc; if (r_type == R_390_GOTPC) *(unsigned int *) loc = val; diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c index c8e8e13..73bca9f 100644 --- a/arch/s390/kernel/setup.c +++ b/arch/s390/kernel/setup.c @@ -281,7 +281,7 @@ static int __init early_parse_mem(char *p) } early_param("mem", early_parse_mem); -unsigned int user_mode = HOME_SPACE_MODE; +unsigned int user_mode = SECONDARY_SPACE_MODE; EXPORT_SYMBOL_GPL(user_mode); static int set_amode_and_uaccess(unsigned long user_amode, @@ -310,17 +310,6 @@ static int set_amode_and_uaccess(unsigned long user_amode, } } -/* - * Switch kernel/user addressing modes? - */ -static int __init early_parse_switch_amode(char *p) -{ - if (user_mode != SECONDARY_SPACE_MODE) - user_mode = PRIMARY_SPACE_MODE; - return 0; -} -early_param("switch_amode", early_parse_switch_amode); - static int __init early_parse_user_mode(char *p) { if (p && strcmp(p, "primary") == 0) @@ -337,20 +326,6 @@ static int __init early_parse_user_mode(char *p) } early_param("user_mode", early_parse_user_mode); -#ifdef CONFIG_S390_EXEC_PROTECT -/* - * Enable execute protection? - */ -static int __init early_parse_noexec(char *p) -{ - if (!strncmp(p, "off", 3)) - return 0; - user_mode = SECONDARY_SPACE_MODE; - return 0; -} -early_param("noexec", early_parse_noexec); -#endif /* CONFIG_S390_EXEC_PROTECT */ - static void setup_addressing_mode(void) { if (user_mode == SECONDARY_SPACE_MODE) { diff --git a/arch/s390/mm/maccess.c b/arch/s390/mm/maccess.c index a8c2af8..93a9d95 100644 --- a/arch/s390/mm/maccess.c +++ b/arch/s390/mm/maccess.c @@ -45,7 +45,7 @@ static long probe_kernel_write_odd(void *dst, void *src, size_t size) return rc ? rc : count; } -long probe_kernel_write(void *dst, void *src, size_t size) +long probe_kernel_write(void *dst, const void *src, size_t size) { long copied = 0; diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c index 869efba..b9d76aa 100644 --- a/arch/s390/mm/mmap.c +++ b/arch/s390/mm/mmap.c @@ -78,10 +78,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm) */ if (mmap_is_legacy()) { mm->mmap_base = TASK_UNMAPPED_BASE; + +#ifdef CONFIG_PAX_RANDMMAP + if (mm->pax_flags & MF_PAX_RANDMMAP) + mm->mmap_base += mm->delta_mmap; +#endif + mm->get_unmapped_area = arch_get_unmapped_area; mm->unmap_area = arch_unmap_area; } else { mm->mmap_base = mmap_base(); + +#ifdef CONFIG_PAX_RANDMMAP + if (mm->pax_flags & MF_PAX_RANDMMAP) + mm->mmap_base -= mm->delta_mmap + mm->delta_stack; +#endif + mm->get_unmapped_area = arch_get_unmapped_area_topdown; mm->unmap_area = arch_unmap_area_topdown; } @@ -153,10 +165,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm) */ if (mmap_is_legacy()) { mm->mmap_base = TASK_UNMAPPED_BASE; + +#ifdef CONFIG_PAX_RANDMMAP + if (mm->pax_flags & MF_PAX_RANDMMAP) + mm->mmap_base += mm->delta_mmap; +#endif + mm->get_unmapped_area = s390_get_unmapped_area; mm->unmap_area = arch_unmap_area; } else { mm->mmap_base = mmap_base(); + +#ifdef CONFIG_PAX_RANDMMAP + if (mm->pax_flags & MF_PAX_RANDMMAP) + mm->mmap_base -= mm->delta_mmap + mm->delta_stack; +#endif + mm->get_unmapped_area = s390_get_unmapped_area_topdown; mm->unmap_area = arch_unmap_area_topdown; } diff --git a/arch/sh/boards/mach-hp6xx/pm.c b/arch/sh/boards/mach-hp6xx/pm.c index 4499a37..adc9b4b 100644 --- a/arch/sh/boards/mach-hp6xx/pm.c +++ b/arch/sh/boards/mach-hp6xx/pm.c @@ -143,7 +143,7 @@ static int hp6x0_pm_enter(suspend_state_t state) return 0; } -static struct platform_suspend_ops hp6x0_pm_ops = { +static const struct platform_suspend_ops hp6x0_pm_ops = { .enter = hp6x0_pm_enter, .valid = suspend_valid_only_mem, }; diff --git a/arch/sh/include/asm/dma-mapping.h b/arch/sh/include/asm/dma-mapping.h index bea3337..0c92252 100644 --- a/arch/sh/include/asm/dma-mapping.h +++ b/arch/sh/include/asm/dma-mapping.h @@ -1,10 +1,10 @@ #ifndef __ASM_SH_DMA_MAPPING_H #define __ASM_SH_DMA_MAPPING_H -extern struct dma_map_ops *dma_ops; +extern const struct dma_map_ops *dma_ops; extern void no_iommu_init(void); -static inline struct dma_map_ops *get_dma_ops(struct device *dev) +static inline const struct dma_map_ops *get_dma_ops(struct device *dev) { return dma_ops; } @@ -14,7 +14,7 @@ static inline struct dma_map_ops *get_dma_ops(struct device *dev) static inline int dma_supported(struct device *dev, u64 mask) { - struct dma_map_ops *ops = get_dma_ops(dev); + const struct dma_map_ops *ops = get_dma_ops(dev); if (ops->dma_supported) return ops->dma_supported(dev, mask); @@ -24,7 +24,7 @@ static inline int dma_supported(struct device *dev, u64 mask) static inline int dma_set_mask(struct device *dev, u64 mask) { - struct dma_map_ops *ops = get_dma_ops(dev); + const struct dma_map_ops *ops = get_dma_ops(dev); if (!dev->dma_mask || !dma_supported(dev, mask)) return -EIO; @@ -59,7 +59,7 @@ static inline int dma_get_cache_alignment(void) static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) { - struct dma_map_ops *ops = get_dma_ops(dev); + const struct dma_map_ops *ops = get_dma_ops(dev); if (ops->mapping_error) return ops->mapping_error(dev, dma_addr); @@ -70,7 +70,7 @@ static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) static inline void *dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t gfp) { - struct dma_map_ops *ops = get_dma_ops(dev); + const struct dma_map_ops *ops = get_dma_ops(dev); void *memory; if (dma_alloc_from_coherent(dev, size, dma_handle, &memory)) @@ -87,7 +87,7 @@ static inline void *dma_alloc_coherent(struct device *dev, size_t size, static inline void dma_free_coherent(struct device *dev, size_t size, void *vaddr, dma_addr_t dma_handle) { - struct dma_map_ops *ops = get_dma_ops(dev); + const struct dma_map_ops *ops = get_dma_ops(dev); if (dma_release_from_coherent(dev, get_order(size), vaddr)) return; diff --git a/arch/sh/kernel/cpu/shmobile/pm.c b/arch/sh/kernel/cpu/shmobile/pm.c index e559687..a6f95ae 100644 --- a/arch/sh/kernel/cpu/shmobile/pm.c +++ b/arch/sh/kernel/cpu/shmobile/pm.c @@ -141,7 +141,7 @@ static int sh_pm_enter(suspend_state_t state) return 0; } -static struct platform_suspend_ops sh_pm_ops = { +static const struct platform_suspend_ops sh_pm_ops = { .enter = sh_pm_enter, .valid = suspend_valid_only_mem, }; diff --git a/arch/sh/kernel/dma-nommu.c b/arch/sh/kernel/dma-nommu.c index 3c55b87..92ccab3 100644 --- a/arch/sh/kernel/dma-nommu.c +++ b/arch/sh/kernel/dma-nommu.c @@ -62,7 +62,7 @@ static void nommu_sync_sg(struct device *dev, struct scatterlist *sg, } #endif -struct dma_map_ops nommu_dma_ops = { +const struct dma_map_ops nommu_dma_ops = { .alloc_coherent = dma_generic_alloc_coherent, .free_coherent = dma_generic_free_coherent, .map_page = nommu_map_page, diff --git a/arch/sh/kernel/kgdb.c b/arch/sh/kernel/kgdb.c index efb6d39..49e7304 100644 --- a/arch/sh/kernel/kgdb.c +++ b/arch/sh/kernel/kgdb.c @@ -319,7 +319,7 @@ void kgdb_arch_exit(void) unregister_die_notifier(&kgdb_notifier); } -struct kgdb_arch arch_kgdb_ops = { +const struct kgdb_arch arch_kgdb_ops = { /* Breakpoint instruction: trapa #0x3c */ #ifdef CONFIG_CPU_LITTLE_ENDIAN .gdb_bpt_instr = { 0x3c, 0xc3 }, diff --git a/arch/sh/mm/consistent.c b/arch/sh/mm/consistent.c index c86a085..819f30c 100644 --- a/arch/sh/mm/consistent.c +++ b/arch/sh/mm/consistent.c @@ -22,7 +22,7 @@ #define PREALLOC_DMA_DEBUG_ENTRIES 4096 -struct dma_map_ops *dma_ops; +const struct dma_map_ops *dma_ops; EXPORT_SYMBOL(dma_ops); static int __init dma_init(void) diff --git a/arch/sh/mm/mmap.c b/arch/sh/mm/mmap.c index afeb710..fa68ac9 100644 --- a/arch/sh/mm/mmap.c +++ b/arch/sh/mm/mmap.c @@ -74,8 +74,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, addr = PAGE_ALIGN(addr); vma = find_vma(mm, addr); - if (TASK_SIZE - len >= addr && - (!vma || addr + len <= vma->vm_start)) + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len)) return addr; } @@ -106,7 +105,7 @@ full_search: } return -ENOMEM; } - if (likely(!vma || addr + len <= vma->vm_start)) { + if (likely(check_heap_stack_gap(vma, addr, len))) { /* * Remember the place where we stopped the search: */ @@ -157,8 +156,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, addr = PAGE_ALIGN(addr); vma = find_vma(mm, addr); - if (TASK_SIZE - len >= addr && - (!vma || addr + len <= vma->vm_start)) + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len)) return addr; } @@ -179,7 +177,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, /* make sure it can fit in the remaining address space */ if (likely(addr > len)) { vma = find_vma(mm, addr-len); - if (!vma || addr <= vma->vm_start) { + if (check_heap_stack_gap(vma, addr - len, len)) { /* remember the address as a hint for next time */ return (mm->free_area_cache = addr-len); } @@ -199,7 +197,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, * return with success: */ vma = find_vma(mm, addr); - if (likely(!vma || addr+len <= vma->vm_start)) { + if (likely(check_heap_stack_gap(vma, addr, len))) { /* remember the address as a hint for next time */ return (mm->free_area_cache = addr); } diff --git a/arch/sparc/Makefile b/arch/sparc/Makefile index 113225b..7fd04e7 100644 --- a/arch/sparc/Makefile +++ b/arch/sparc/Makefile @@ -75,7 +75,7 @@ drivers-$(CONFIG_OPROFILE) += arch/sparc/oprofile/ # Export what is needed by arch/sparc/boot/Makefile export VMLINUX_INIT VMLINUX_MAIN VMLINUX_INIT := $(head-y) $(init-y) -VMLINUX_MAIN := $(core-y) kernel/ mm/ fs/ ipc/ security/ crypto/ block/ +VMLINUX_MAIN := $(core-y) kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/ VMLINUX_MAIN += $(patsubst %/, %/lib.a, $(libs-y)) $(libs-y) VMLINUX_MAIN += $(drivers-y) $(net-y) diff --git a/arch/sparc/include/asm/atomic_64.h b/arch/sparc/include/asm/atomic_64.h index bdb2ff8..69e69f3 100644 --- a/arch/sparc/include/asm/atomic_64.h +++ b/arch/sparc/include/asm/atomic_64.h @@ -14,18 +14,40 @@ #define ATOMIC64_INIT(i) { (i) } #define atomic_read(v) (*(volatile int *)&(v)->counter) +static inline int atomic_read_unchecked(const atomic_unchecked_t *v) +{ + return v->counter; +} #define atomic64_read(v) (*(volatile long *)&(v)->counter) +static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v) +{ + return v->counter; +} #define atomic_set(v, i) (((v)->counter) = i) +static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i) +{ + v->counter = i; +} #define atomic64_set(v, i) (((v)->counter) = i) +static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i) +{ + v->counter = i; +} extern void atomic_add(int, atomic_t *); +extern void atomic_add_unchecked(int, atomic_unchecked_t *); extern void atomic64_add(long, atomic64_t *); +extern void atomic64_add_unchecked(long, atomic64_unchecked_t *); extern void atomic_sub(int, atomic_t *); +extern void atomic_sub_unchecked(int, atomic_unchecked_t *); extern void atomic64_sub(long, atomic64_t *); +extern void atomic64_sub_unchecked(long, atomic64_unchecked_t *); extern int atomic_add_ret(int, atomic_t *); +extern int atomic_add_ret_unchecked(int, atomic_unchecked_t *); extern long atomic64_add_ret(long, atomic64_t *); +extern long atomic64_add_ret_unchecked(long, atomic64_unchecked_t *); extern int atomic_sub_ret(int, atomic_t *); extern long atomic64_sub_ret(long, atomic64_t *); @@ -33,12 +55,24 @@ extern long atomic64_sub_ret(long, atomic64_t *); #define atomic64_dec_return(v) atomic64_sub_ret(1, v) #define atomic_inc_return(v) atomic_add_ret(1, v) +static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v) +{ + return atomic_add_ret_unchecked(1, v); +} #define atomic64_inc_return(v) atomic64_add_ret(1, v) +static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v) +{ + return atomic64_add_ret_unchecked(1, v); +} #define atomic_sub_return(i, v) atomic_sub_ret(i, v) #define atomic64_sub_return(i, v) atomic64_sub_ret(i, v) #define atomic_add_return(i, v) atomic_add_ret(i, v) +static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v) +{ + return atomic_add_ret_unchecked(i, v); +} #define atomic64_add_return(i, v) atomic64_add_ret(i, v) /* @@ -59,10 +93,26 @@ extern long atomic64_sub_ret(long, atomic64_t *); #define atomic64_dec_and_test(v) (atomic64_sub_ret(1, v) == 0) #define atomic_inc(v) atomic_add(1, v) +static inline void atomic_inc_unchecked(atomic_unchecked_t *v) +{ + atomic_add_unchecked(1, v); +} #define atomic64_inc(v) atomic64_add(1, v) +static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v) +{ + atomic64_add_unchecked(1, v); +} #define atomic_dec(v) atomic_sub(1, v) +static inline void atomic_dec_unchecked(atomic_unchecked_t *v) +{ + atomic_sub_unchecked(1, v); +} #define atomic64_dec(v) atomic64_sub(1, v) +static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v) +{ + atomic64_sub_unchecked(1, v); +} #define atomic_add_negative(i, v) (atomic_add_ret(i, v) < 0) #define atomic64_add_negative(i, v) (atomic64_add_ret(i, v) < 0) @@ -72,17 +122,28 @@ extern long atomic64_sub_ret(long, atomic64_t *); static inline int atomic_add_unless(atomic_t *v, int a, int u) { - int c, old; + int c, old, new; c = atomic_read(v); for (;;) { - if (unlikely(c == (u))) + if (unlikely(c == u)) break; - old = atomic_cmpxchg((v), c, c + (a)); + + asm volatile("addcc %2, %0, %0\n" + +#ifdef CONFIG_PAX_REFCOUNT + "tvs %%icc, 6\n" +#endif + + : "=r" (new) + : "0" (c), "ir" (a) + : "cc"); + + old = atomic_cmpxchg(v, c, new); if (likely(old == c)) break; c = old; } - return c != (u); + return c != u; } #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) @@ -93,17 +154,28 @@ static inline int atomic_add_unless(atomic_t *v, int a, int u) static inline long atomic64_add_unless(atomic64_t *v, long a, long u) { - long c, old; + long c, old, new; c = atomic64_read(v); for (;;) { - if (unlikely(c == (u))) + if (unlikely(c == u)) break; - old = atomic64_cmpxchg((v), c, c + (a)); + + asm volatile("addcc %2, %0, %0\n" + +#ifdef CONFIG_PAX_REFCOUNT + "tvs %%xcc, 6\n" +#endif + + : "=r" (new) + : "0" (c), "ir" (a) + : "cc"); + + old = atomic64_cmpxchg(v, c, new); if (likely(old == c)) break; c = old; } - return c != (u); + return c != u; } #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0) diff --git a/arch/sparc/include/asm/dma-mapping.h b/arch/sparc/include/asm/dma-mapping.h index 4b4a0c0..29925cb 100644 --- a/arch/sparc/include/asm/dma-mapping.h +++ b/arch/sparc/include/asm/dma-mapping.h @@ -13,10 +13,10 @@ extern int dma_supported(struct device *dev, u64 mask); #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) #define dma_is_consistent(d, h) (1) -extern struct dma_map_ops *dma_ops, pci32_dma_ops; +extern const struct dma_map_ops *dma_ops, pci32_dma_ops; extern struct bus_type pci_bus_type; -static inline struct dma_map_ops *get_dma_ops(struct device *dev) +static inline const struct dma_map_ops *get_dma_ops(struct device *dev) { #if defined(CONFIG_SPARC32) && defined(CONFIG_PCI) if (dev->bus == &pci_bus_type) @@ -30,7 +30,7 @@ static inline struct dma_map_ops *get_dma_ops(struct device *dev) static inline void *dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flag) { - struct dma_map_ops *ops = get_dma_ops(dev); + const struct dma_map_ops *ops = get_dma_ops(dev); void *cpu_addr; cpu_addr = ops->alloc_coherent(dev, size, dma_handle, flag); @@ -41,7 +41,7 @@ static inline void *dma_alloc_coherent(struct device *dev, size_t size, static inline void dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, dma_addr_t dma_handle) { - struct dma_map_ops *ops = get_dma_ops(dev); + const struct dma_map_ops *ops = get_dma_ops(dev); debug_dma_free_coherent(dev, size, cpu_addr, dma_handle); ops->free_coherent(dev, size, cpu_addr, dma_handle); diff --git a/arch/sparc/include/asm/elf_32.h b/arch/sparc/include/asm/elf_32.h index 4269ca6..e3da77f 100644 --- a/arch/sparc/include/asm/elf_32.h +++ b/arch/sparc/include/asm/elf_32.h @@ -114,6 +114,13 @@ typedef struct { #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE) +#ifdef CONFIG_PAX_ASLR +#define PAX_ELF_ET_DYN_BASE 0x10000UL + +#define PAX_DELTA_MMAP_LEN 16 +#define PAX_DELTA_STACK_LEN 16 +#endif + /* This yields a mask that user programs can use to figure out what instruction set this cpu supports. This can NOT be done in userspace on Sparc. */ diff --git a/arch/sparc/include/asm/elf_64.h b/arch/sparc/include/asm/elf_64.h index e678803..dfb3071 100644 --- a/arch/sparc/include/asm/elf_64.h +++ b/arch/sparc/include/asm/elf_64.h @@ -162,6 +162,12 @@ typedef struct { #define ELF_ET_DYN_BASE 0x0000010000000000UL #define COMPAT_ELF_ET_DYN_BASE 0x0000000070000000UL +#ifdef CONFIG_PAX_ASLR +#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT) ? 0x10000UL : 0x100000UL) + +#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT) ? 14 : 28) +#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT) ? 15 : 29) +#endif /* This yields a mask that user programs can use to figure out what instruction set this cpu supports. */ diff --git a/arch/sparc/include/asm/pgtable_32.h b/arch/sparc/include/asm/pgtable_32.h index 0ece77f..6242e98 100644 --- a/arch/sparc/include/asm/pgtable_32.h +++ b/arch/sparc/include/asm/pgtable_32.h @@ -43,6 +43,13 @@ BTFIXUPDEF_SIMM13(user_ptrs_per_pgd) BTFIXUPDEF_INT(page_none) BTFIXUPDEF_INT(page_copy) BTFIXUPDEF_INT(page_readonly) + +#ifdef CONFIG_PAX_PAGEEXEC +BTFIXUPDEF_INT(page_shared_noexec) +BTFIXUPDEF_INT(page_copy_noexec) +BTFIXUPDEF_INT(page_readonly_noexec) +#endif + BTFIXUPDEF_INT(page_kernel) #define PMD_SHIFT SUN4C_PMD_SHIFT @@ -64,6 +71,16 @@ extern pgprot_t PAGE_SHARED; #define PAGE_COPY __pgprot(BTFIXUP_INT(page_copy)) #define PAGE_READONLY __pgprot(BTFIXUP_INT(page_readonly)) +#ifdef CONFIG_PAX_PAGEEXEC +extern pgprot_t PAGE_SHARED_NOEXEC; +# define PAGE_COPY_NOEXEC __pgprot(BTFIXUP_INT(page_copy_noexec)) +# define PAGE_READONLY_NOEXEC __pgprot(BTFIXUP_INT(page_readonly_noexec)) +#else +# define PAGE_SHARED_NOEXEC PAGE_SHARED +# define PAGE_COPY_NOEXEC PAGE_COPY +# define PAGE_READONLY_NOEXEC PAGE_READONLY +#endif + extern unsigned long page_kernel; #ifdef MODULE diff --git a/arch/sparc/include/asm/pgtsrmmu.h b/arch/sparc/include/asm/pgtsrmmu.h index 1407c07..7e10231 100644 --- a/arch/sparc/include/asm/pgtsrmmu.h +++ b/arch/sparc/include/asm/pgtsrmmu.h @@ -115,6 +115,13 @@ SRMMU_EXEC | SRMMU_REF) #define SRMMU_PAGE_RDONLY __pgprot(SRMMU_VALID | SRMMU_CACHE | \ SRMMU_EXEC | SRMMU_REF) + +#ifdef CONFIG_PAX_PAGEEXEC +#define SRMMU_PAGE_SHARED_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_WRITE | SRMMU_REF) +#define SRMMU_PAGE_COPY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF) +#define SRMMU_PAGE_RDONLY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF) +#endif + #define SRMMU_PAGE_KERNEL __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_PRIV | \ SRMMU_DIRTY | SRMMU_REF) diff --git a/arch/sparc/include/asm/spinlock_64.h b/arch/sparc/include/asm/spinlock_64.h index 073936a..9bcd257 100644 --- a/arch/sparc/include/asm/spinlock_64.h +++ b/arch/sparc/include/asm/spinlock_64.h @@ -99,7 +99,12 @@ static void inline arch_read_lock(arch_rwlock_t *lock) __asm__ __volatile__ ( "1: ldsw [%2], %0\n" " brlz,pn %0, 2f\n" -"4: add %0, 1, %1\n" +"4: addcc %0, 1, %1\n" + +#ifdef CONFIG_PAX_REFCOUNT +" tvs %%icc, 6\n" +#endif + " cas [%2], %0, %1\n" " cmp %0, %1\n" " bne,pn %%icc, 1b\n" @@ -112,7 +117,7 @@ static void inline arch_read_lock(arch_rwlock_t *lock) " .previous" : "=&r" (tmp1), "=&r" (tmp2) : "r" (lock) - : "memory"); + : "memory", "cc"); } static int inline arch_read_trylock(arch_rwlock_t *lock) @@ -123,7 +128,12 @@ static int inline arch_read_trylock(arch_rwlock_t *lock) "1: ldsw [%2], %0\n" " brlz,a,pn %0, 2f\n" " mov 0, %0\n" -" add %0, 1, %1\n" +" addcc %0, 1, %1\n" + +#ifdef CONFIG_PAX_REFCOUNT +" tvs %%icc, 6\n" +#endif + " cas [%2], %0, %1\n" " cmp %0, %1\n" " bne,pn %%icc, 1b\n" @@ -142,7 +152,12 @@ static void inline arch_read_unlock(arch_rwlock_t *lock) __asm__ __volatile__( "1: lduw [%2], %0\n" -" sub %0, 1, %1\n" +" subcc %0, 1, %1\n" + +#ifdef CONFIG_PAX_REFCOUNT +" tvs %%icc, 6\n" +#endif + " cas [%2], %0, %1\n" " cmp %0, %1\n" " bne,pn %%xcc, 1b\n" diff --git a/arch/sparc/include/asm/uaccess.h b/arch/sparc/include/asm/uaccess.h index e88fbe5..96b0ce5 100644 --- a/arch/sparc/include/asm/uaccess.h +++ b/arch/sparc/include/asm/uaccess.h @@ -1,5 +1,13 @@ #ifndef ___ASM_SPARC_UACCESS_H #define ___ASM_SPARC_UACCESS_H + +#ifdef __KERNEL__ +#ifndef __ASSEMBLY__ +#include +extern void check_object_size(const void *ptr, unsigned long n, bool to); +#endif +#endif + #if defined(__sparc__) && defined(__arch64__) #include #else diff --git a/arch/sparc/include/asm/uaccess_32.h b/arch/sparc/include/asm/uaccess_32.h index 25f1d10..a78c5fe 100644 --- a/arch/sparc/include/asm/uaccess_32.h +++ b/arch/sparc/include/asm/uaccess_32.h @@ -249,14 +249,25 @@ extern unsigned long __copy_user(void __user *to, const void __user *from, unsig static inline unsigned long copy_to_user(void __user *to, const void *from, unsigned long n) { - if (n && __access_ok((unsigned long) to, n)) + if ((long)n < 0) + return n; + + if (n && __access_ok((unsigned long) to, n)) { + if (!__builtin_constant_p(n)) + check_object_size(from, n, true); return __copy_user(to, (__force void __user *) from, n); - else + } else return n; } static inline unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n) { + if ((long)n < 0) + return n; + + if (!__builtin_constant_p(n)) + check_object_size(from, n, true); + return __copy_user(to, (__force void __user *) from, n); } @@ -272,19 +283,27 @@ static inline unsigned long copy_from_user(void *to, const void __user *from, un { int sz = __compiletime_object_size(to); + if ((long)n < 0) + return n; + if (unlikely(sz != -1 && sz < n)) { copy_from_user_overflow(); return n; } - if (n && __access_ok((unsigned long) from, n)) + if (n && __access_ok((unsigned long) from, n)) { + if (!__builtin_constant_p(n)) + check_object_size(to, n, false); return __copy_user((__force void __user *) to, from, n); - else + } else return n; } static inline unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n) { + if ((long)n < 0) + return n; + return __copy_user((__force void __user *) to, from, n); } diff --git a/arch/sparc/include/asm/uaccess_64.h b/arch/sparc/include/asm/uaccess_64.h index 2406788..02da64f 100644 --- a/arch/sparc/include/asm/uaccess_64.h +++ b/arch/sparc/include/asm/uaccess_64.h @@ -10,6 +10,7 @@ #include #include #include +#include #include #include #include @@ -224,6 +225,12 @@ copy_from_user(void *to, const void __user *from, unsigned long size) int sz = __compiletime_object_size(to); unsigned long ret = size; + if ((long)size < 0 || size > INT_MAX) + return size; + + if (!__builtin_constant_p(size)) + check_object_size(to, size, false); + if (likely(sz == -1 || sz >= size)) { ret = ___copy_from_user(to, from, size); if (unlikely(ret)) @@ -243,8 +250,15 @@ extern unsigned long copy_to_user_fixup(void __user *to, const void *from, static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long size) { - unsigned long ret = ___copy_to_user(to, from, size); + unsigned long ret; + + if ((long)size < 0 || size > INT_MAX) + return size; + + if (!__builtin_constant_p(size)) + check_object_size(from, size, true); + ret = ___copy_to_user(to, from, size); if (unlikely(ret)) ret = copy_to_user_fixup(to, from, size); return ret; diff --git a/arch/sparc/kernel/Makefile b/arch/sparc/kernel/Makefile index 0c2dc1f..7ec02c7 100644 --- a/arch/sparc/kernel/Makefile +++ b/arch/sparc/kernel/Makefile @@ -3,7 +3,7 @@ # asflags-y := -ansi -ccflags-y := -Werror +#ccflags-y := -Werror extra-y := head_$(BITS).o extra-y += init_task.o diff --git a/arch/sparc/kernel/iommu.c b/arch/sparc/kernel/iommu.c index 47977a7..639bf96 100644 --- a/arch/sparc/kernel/iommu.c +++ b/arch/sparc/kernel/iommu.c @@ -828,7 +828,7 @@ static void dma_4u_sync_sg_for_cpu(struct device *dev, spin_unlock_irqrestore(&iommu->lock, flags); } -static struct dma_map_ops sun4u_dma_ops = { +static const struct dma_map_ops sun4u_dma_ops = { .alloc_coherent = dma_4u_alloc_coherent, .free_coherent = dma_4u_free_coherent, .map_page = dma_4u_map_page, @@ -839,7 +839,7 @@ static struct dma_map_ops sun4u_dma_ops = { .sync_sg_for_cpu = dma_4u_sync_sg_for_cpu, }; -struct dma_map_ops *dma_ops = &sun4u_dma_ops; +const struct dma_map_ops *dma_ops = &sun4u_dma_ops; EXPORT_SYMBOL(dma_ops); extern int pci64_dma_supported(struct pci_dev *pdev, u64 device_mask); diff --git a/arch/sparc/kernel/ioport.c b/arch/sparc/kernel/ioport.c index 703e4aa..043a5f2 100644 --- a/arch/sparc/kernel/ioport.c +++ b/arch/sparc/kernel/ioport.c @@ -397,7 +397,7 @@ static void sbus_sync_sg_for_device(struct device *dev, struct scatterlist *sg, BUG(); } -struct dma_map_ops sbus_dma_ops = { +const struct dma_map_ops sbus_dma_ops = { .alloc_coherent = sbus_alloc_coherent, .free_coherent = sbus_free_coherent, .map_page = sbus_map_page, @@ -408,7 +408,7 @@ struct dma_map_ops sbus_dma_ops = { .sync_sg_for_device = sbus_sync_sg_for_device, }; -struct dma_map_ops *dma_ops = &sbus_dma_ops; +const struct dma_map_ops *dma_ops = &sbus_dma_ops; EXPORT_SYMBOL(dma_ops); static int __init sparc_register_ioport(void) @@ -645,7 +645,7 @@ static void pci32_sync_sg_for_device(struct device *device, struct scatterlist * } } -struct dma_map_ops pci32_dma_ops = { +const struct dma_map_ops pci32_dma_ops = { .alloc_coherent = pci32_alloc_coherent, .free_coherent = pci32_free_coherent, .map_page = pci32_map_page, diff --git a/arch/sparc/kernel/kgdb_32.c b/arch/sparc/kernel/kgdb_32.c index 539243b..d61227d 100644 --- a/arch/sparc/kernel/kgdb_32.c +++ b/arch/sparc/kernel/kgdb_32.c @@ -164,7 +164,7 @@ void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long ip) regs->npc = regs->pc + 4; } -struct kgdb_arch arch_kgdb_ops = { +const struct kgdb_arch arch_kgdb_ops = { /* Breakpoint instruction: ta 0x7d */ .gdb_bpt_instr = { 0x91, 0xd0, 0x20, 0x7d }, }; diff --git a/arch/sparc/kernel/kgdb_64.c b/arch/sparc/kernel/kgdb_64.c index 768290a..d8a4286 100644 --- a/arch/sparc/kernel/kgdb_64.c +++ b/arch/sparc/kernel/kgdb_64.c @@ -187,7 +187,7 @@ void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long ip) regs->tnpc = regs->tpc + 4; } -struct kgdb_arch arch_kgdb_ops = { +const struct kgdb_arch arch_kgdb_ops = { /* Breakpoint instruction: ta 0x72 */ .gdb_bpt_instr = { 0x91, 0xd0, 0x20, 0x72 }, }; diff --git a/arch/sparc/kernel/pci_sun4v.c b/arch/sparc/kernel/pci_sun4v.c index a24af6f..bc0f1cf 100644 --- a/arch/sparc/kernel/pci_sun4v.c +++ b/arch/sparc/kernel/pci_sun4v.c @@ -525,7 +525,7 @@ static void dma_4v_unmap_sg(struct device *dev, struct scatterlist *sglist, spin_unlock_irqrestore(&iommu->lock, flags); } -static struct dma_map_ops sun4v_dma_ops = { +static const struct dma_map_ops sun4v_dma_ops = { .alloc_coherent = dma_4v_alloc_coherent, .free_coherent = dma_4v_free_coherent, .map_page = dma_4v_map_page, diff --git a/arch/sparc/kernel/sys_sparc_32.c b/arch/sparc/kernel/sys_sparc_32.c index ee995b7..2393b36 100644 --- a/arch/sparc/kernel/sys_sparc_32.c +++ b/arch/sparc/kernel/sys_sparc_32.c @@ -57,7 +57,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi if (ARCH_SUN4C && len > 0x20000000) return -ENOMEM; if (!addr) - addr = TASK_UNMAPPED_BASE; + addr = current->mm->mmap_base; if (flags & MAP_SHARED) addr = COLOUR_ALIGN(addr); @@ -72,7 +72,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi } if (TASK_SIZE - PAGE_SIZE - len < addr) return -ENOMEM; - if (!vmm || addr + len <= vmm->vm_start) + if (check_heap_stack_gap(vmm, addr, len)) return addr; addr = vmm->vm_end; if (flags & MAP_SHARED) diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c index 3d435c4..14d838e 100644 --- a/arch/sparc/kernel/sys_sparc_64.c +++ b/arch/sparc/kernel/sys_sparc_64.c @@ -124,7 +124,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi /* We do not accept a shared mapping if it would violate * cache aliasing constraints. */ - if ((flags & MAP_SHARED) && + if ((filp || (flags & MAP_SHARED)) && ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1))) return -EINVAL; return addr; @@ -139,6 +139,10 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi if (filp || (flags & MAP_SHARED)) do_color_align = 1; +#ifdef CONFIG_PAX_RANDMMAP + if (!(mm->pax_flags & MF_PAX_RANDMMAP)) +#endif + if (addr) { if (do_color_align) addr = COLOUR_ALIGN(addr, pgoff); @@ -146,15 +150,14 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi addr = PAGE_ALIGN(addr); vma = find_vma(mm, addr); - if (task_size - len >= addr && - (!vma || addr + len <= vma->vm_start)) + if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len)) return addr; } if (len > mm->cached_hole_size) { - start_addr = addr = mm->free_area_cache; + start_addr = addr = mm->free_area_cache; } else { - start_addr = addr = TASK_UNMAPPED_BASE; + start_addr = addr = mm->mmap_base; mm->cached_hole_size = 0; } @@ -174,14 +177,14 @@ full_search: vma = find_vma(mm, VA_EXCLUDE_END); } if (unlikely(task_size < addr)) { - if (start_addr != TASK_UNMAPPED_BASE) { - start_addr = addr = TASK_UNMAPPED_BASE; + if (start_addr != mm->mmap_base) { + start_addr = addr = mm->mmap_base; mm->cached_hole_size = 0; goto full_search; } return -ENOMEM; } - if (likely(!vma || addr + len <= vma->vm_start)) { + if (likely(check_heap_stack_gap(vma, addr, len))) { /* * Remember the place where we stopped the search: */ @@ -215,7 +218,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, /* We do not accept a shared mapping if it would violate * cache aliasing constraints. */ - if ((flags & MAP_SHARED) && + if ((filp || (flags & MAP_SHARED)) && ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1))) return -EINVAL; return addr; @@ -236,8 +239,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, addr = PAGE_ALIGN(addr); vma = find_vma(mm, addr); - if (task_size - len >= addr && - (!vma || addr + len <= vma->vm_start)) + if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len)) return addr; } @@ -258,7 +260,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, /* make sure it can fit in the remaining address space */ if (likely(addr > len)) { vma = find_vma(mm, addr-len); - if (!vma || addr <= vma->vm_start) { + if (check_heap_stack_gap(vma, addr - len, len)) { /* remember the address as a hint for next time */ return (mm->free_area_cache = addr-len); } @@ -278,7 +280,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, * return with success: */ vma = find_vma(mm, addr); - if (likely(!vma || addr+len <= vma->vm_start)) { + if (likely(check_heap_stack_gap(vma, addr, len))) { /* remember the address as a hint for next time */ return (mm->free_area_cache = addr); } @@ -385,6 +387,12 @@ void arch_pick_mmap_layout(struct mm_struct *mm) gap == RLIM_INFINITY || sysctl_legacy_va_layout) { mm->mmap_base = TASK_UNMAPPED_BASE + random_factor; + +#ifdef CONFIG_PAX_RANDMMAP + if (mm->pax_flags & MF_PAX_RANDMMAP) + mm->mmap_base += mm->delta_mmap; +#endif + mm->get_unmapped_area = arch_get_unmapped_area; mm->unmap_area = arch_unmap_area; } else { @@ -397,6 +405,12 @@ void arch_pick_mmap_layout(struct mm_struct *mm) gap = (task_size / 6 * 5); mm->mmap_base = PAGE_ALIGN(task_size - gap - random_factor); + +#ifdef CONFIG_PAX_RANDMMAP + if (mm->pax_flags & MF_PAX_RANDMMAP) + mm->mmap_base -= mm->delta_mmap + mm->delta_stack; +#endif + mm->get_unmapped_area = arch_get_unmapped_area_topdown; mm->unmap_area = arch_unmap_area_topdown; } diff --git a/arch/sparc/kernel/traps_64.c b/arch/sparc/kernel/traps_64.c index 42ad2ba..3aafd82 100644 --- a/arch/sparc/kernel/traps_64.c +++ b/arch/sparc/kernel/traps_64.c @@ -95,6 +95,12 @@ void bad_trap(struct pt_regs *regs, long lvl) lvl -= 0x100; if (regs->tstate & TSTATE_PRIV) { + +#ifdef CONFIG_PAX_REFCOUNT + if (lvl == 6) + pax_report_refcount_overflow(regs); +#endif + sprintf(buffer, "Kernel bad sw trap %lx", lvl); die_if_kernel(buffer, regs); } @@ -113,11 +119,16 @@ void bad_trap(struct pt_regs *regs, long lvl) void bad_trap_tl1(struct pt_regs *regs, long lvl) { char buffer[32]; - + if (notify_die(DIE_TRAP_TL1, "bad trap tl1", regs, 0, lvl, SIGTRAP) == NOTIFY_STOP) return; +#ifdef CONFIG_PAX_REFCOUNT + if (lvl == 6) + pax_report_refcount_overflow(regs); +#endif + dump_tl1_traplog((struct tl1_traplog *)(regs + 1)); sprintf (buffer, "Bad trap %lx at tl>0", lvl); diff --git a/arch/sparc/lib/atomic_64.S b/arch/sparc/lib/atomic_64.S index 0268210..f0291ca 100644 --- a/arch/sparc/lib/atomic_64.S +++ b/arch/sparc/lib/atomic_64.S @@ -18,7 +18,12 @@ atomic_add: /* %o0 = increment, %o1 = atomic_ptr */ BACKOFF_SETUP(%o2) 1: lduw [%o1], %g1 - add %g1, %o0, %g7 + addcc %g1, %o0, %g7 + +#ifdef CONFIG_PAX_REFCOUNT + tvs %icc, 6 +#endif + cas [%o1], %g1, %g7 cmp %g1, %g7 bne,pn %icc, 2f @@ -28,12 +33,32 @@ atomic_add: /* %o0 = increment, %o1 = atomic_ptr */ 2: BACKOFF_SPIN(%o2, %o3, 1b) .size atomic_add, .-atomic_add + .globl atomic_add_unchecked + .type atomic_add_unchecked,#function +atomic_add_unchecked: /* %o0 = increment, %o1 = atomic_ptr */ + BACKOFF_SETUP(%o2) +1: lduw [%o1], %g1 + add %g1, %o0, %g7 + cas [%o1], %g1, %g7 + cmp %g1, %g7 + bne,pn %icc, 2f + nop + retl + nop +2: BACKOFF_SPIN(%o2, %o3, 1b) + .size atomic_add_unchecked, .-atomic_add_unchecked + .globl atomic_sub .type atomic_sub,#function atomic_sub: /* %o0 = decrement, %o1 = atomic_ptr */ BACKOFF_SETUP(%o2) 1: lduw [%o1], %g1 - sub %g1, %o0, %g7 + subcc %g1, %o0, %g7 + +#ifdef CONFIG_PAX_REFCOUNT + tvs %icc, 6 +#endif + cas [%o1], %g1, %g7 cmp %g1, %g7 bne,pn %icc, 2f @@ -43,12 +68,32 @@ atomic_sub: /* %o0 = decrement, %o1 = atomic_ptr */ 2: BACKOFF_SPIN(%o2, %o3, 1b) .size atomic_sub, .-atomic_sub + .globl atomic_sub_unchecked + .type atomic_sub_unchecked,#function +atomic_sub_unchecked: /* %o0 = decrement, %o1 = atomic_ptr */ + BACKOFF_SETUP(%o2) +1: lduw [%o1], %g1 + sub %g1, %o0, %g7 + cas [%o1], %g1, %g7 + cmp %g1, %g7 + bne,pn %icc, 2f + nop + retl + nop +2: BACKOFF_SPIN(%o2, %o3, 1b) + .size atomic_sub_unchecked, .-atomic_sub_unchecked + .globl atomic_add_ret .type atomic_add_ret,#function atomic_add_ret: /* %o0 = increment, %o1 = atomic_ptr */ BACKOFF_SETUP(%o2) 1: lduw [%o1], %g1 - add %g1, %o0, %g7 + addcc %g1, %o0, %g7 + +#ifdef CONFIG_PAX_REFCOUNT + tvs %icc, 6 +#endif + cas [%o1], %g1, %g7 cmp %g1, %g7 bne,pn %icc, 2f @@ -59,12 +104,33 @@ atomic_add_ret: /* %o0 = increment, %o1 = atomic_ptr */ 2: BACKOFF_SPIN(%o2, %o3, 1b) .size atomic_add_ret, .-atomic_add_ret + .globl atomic_add_ret_unchecked + .type atomic_add_ret_unchecked,#function +atomic_add_ret_unchecked: /* %o0 = increment, %o1 = atomic_ptr */ + BACKOFF_SETUP(%o2) +1: lduw [%o1], %g1 + addcc %g1, %o0, %g7 + cas [%o1], %g1, %g7 + cmp %g1, %g7 + bne,pn %icc, 2f + add %g7, %o0, %g7 + sra %g7, 0, %o0 + retl + nop +2: BACKOFF_SPIN(%o2, %o3, 1b) + .size atomic_add_ret_unchecked, .-atomic_add_ret_unchecked + .globl atomic_sub_ret .type atomic_sub_ret,#function atomic_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */ BACKOFF_SETUP(%o2) 1: lduw [%o1], %g1 - sub %g1, %o0, %g7 + subcc %g1, %o0, %g7 + +#ifdef CONFIG_PAX_REFCOUNT + tvs %icc, 6 +#endif + cas [%o1], %g1, %g7 cmp %g1, %g7 bne,pn %icc, 2f @@ -80,7 +146,12 @@ atomic_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */ atomic64_add: /* %o0 = increment, %o1 = atomic_ptr */ BACKOFF_SETUP(%o2) 1: ldx [%o1], %g1 - add %g1, %o0, %g7 + addcc %g1, %o0, %g7 + +#ifdef CONFIG_PAX_REFCOUNT + tvs %xcc, 6 +#endif + casx [%o1], %g1, %g7 cmp %g1, %g7 bne,pn %xcc, 2f @@ -90,12 +161,32 @@ atomic64_add: /* %o0 = increment, %o1 = atomic_ptr */ 2: BACKOFF_SPIN(%o2, %o3, 1b) .size atomic64_add, .-atomic64_add + .globl atomic64_add_unchecked + .type atomic64_add_unchecked,#function +atomic64_add_unchecked: /* %o0 = increment, %o1 = atomic_ptr */ + BACKOFF_SETUP(%o2) +1: ldx [%o1], %g1 + addcc %g1, %o0, %g7 + casx [%o1], %g1, %g7 + cmp %g1, %g7 + bne,pn %xcc, 2f + nop + retl + nop +2: BACKOFF_SPIN(%o2, %o3, 1b) + .size atomic64_add_unchecked, .-atomic64_add_unchecked + .globl atomic64_sub .type atomic64_sub,#function atomic64_sub: /* %o0 = decrement, %o1 = atomic_ptr */ BACKOFF_SETUP(%o2) 1: ldx [%o1], %g1 - sub %g1, %o0, %g7 + subcc %g1, %o0, %g7 + +#ifdef CONFIG_PAX_REFCOUNT + tvs %xcc, 6 +#endif + casx [%o1], %g1, %g7 cmp %g1, %g7 bne,pn %xcc, 2f @@ -105,12 +196,32 @@ atomic64_sub: /* %o0 = decrement, %o1 = atomic_ptr */ 2: BACKOFF_SPIN(%o2, %o3, 1b) .size atomic64_sub, .-atomic64_sub + .globl atomic64_sub_unchecked + .type atomic64_sub_unchecked,#function +atomic64_sub_unchecked: /* %o0 = decrement, %o1 = atomic_ptr */ + BACKOFF_SETUP(%o2) +1: ldx [%o1], %g1 + subcc %g1, %o0, %g7 + casx [%o1], %g1, %g7 + cmp %g1, %g7 + bne,pn %xcc, 2f + nop + retl + nop +2: BACKOFF_SPIN(%o2, %o3, 1b) + .size atomic64_sub_unchecked, .-atomic64_sub_unchecked + .globl atomic64_add_ret .type atomic64_add_ret,#function atomic64_add_ret: /* %o0 = increment, %o1 = atomic_ptr */ BACKOFF_SETUP(%o2) 1: ldx [%o1], %g1 - add %g1, %o0, %g7 + addcc %g1, %o0, %g7 + +#ifdef CONFIG_PAX_REFCOUNT + tvs %xcc, 6 +#endif + casx [%o1], %g1, %g7 cmp %g1, %g7 bne,pn %xcc, 2f @@ -121,12 +232,33 @@ atomic64_add_ret: /* %o0 = increment, %o1 = atomic_ptr */ 2: BACKOFF_SPIN(%o2, %o3, 1b) .size atomic64_add_ret, .-atomic64_add_ret + .globl atomic64_add_ret_unchecked + .type atomic64_add_ret_unchecked,#function +atomic64_add_ret_unchecked: /* %o0 = increment, %o1 = atomic_ptr */ + BACKOFF_SETUP(%o2) +1: ldx [%o1], %g1 + addcc %g1, %o0, %g7 + casx [%o1], %g1, %g7 + cmp %g1, %g7 + bne,pn %xcc, 2f + add %g7, %o0, %g7 + mov %g7, %o0 + retl + nop +2: BACKOFF_SPIN(%o2, %o3, 1b) + .size atomic64_add_ret_unchecked, .-atomic64_add_ret_unchecked + .globl atomic64_sub_ret .type atomic64_sub_ret,#function atomic64_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */ BACKOFF_SETUP(%o2) 1: ldx [%o1], %g1 - sub %g1, %o0, %g7 + subcc %g1, %o0, %g7 + +#ifdef CONFIG_PAX_REFCOUNT + tvs %xcc, 6 +#endif + casx [%o1], %g1, %g7 cmp %g1, %g7 bne,pn %xcc, 2f diff --git a/arch/sparc/lib/ksyms.c b/arch/sparc/lib/ksyms.c index 1b30bb3..ab5bb67 100644 --- a/arch/sparc/lib/ksyms.c +++ b/arch/sparc/lib/ksyms.c @@ -142,12 +142,17 @@ EXPORT_SYMBOL(__downgrade_write); /* Atomic counter implementation. */ EXPORT_SYMBOL(atomic_add); +EXPORT_SYMBOL(atomic_add_unchecked); EXPORT_SYMBOL(atomic_add_ret); EXPORT_SYMBOL(atomic_sub); +EXPORT_SYMBOL(atomic_sub_unchecked); EXPORT_SYMBOL(atomic_sub_ret); EXPORT_SYMBOL(atomic64_add); +EXPORT_SYMBOL(atomic64_add_unchecked); EXPORT_SYMBOL(atomic64_add_ret); +EXPORT_SYMBOL(atomic64_add_ret_unchecked); EXPORT_SYMBOL(atomic64_sub); +EXPORT_SYMBOL(atomic64_sub_unchecked); EXPORT_SYMBOL(atomic64_sub_ret); /* Atomic bit operations. */ diff --git a/arch/sparc/lib/rwsem_64.S b/arch/sparc/lib/rwsem_64.S index 91a7d29..ce75c29 100644 --- a/arch/sparc/lib/rwsem_64.S +++ b/arch/sparc/lib/rwsem_64.S @@ -11,7 +11,12 @@ .globl __down_read __down_read: 1: lduw [%o0], %g1 - add %g1, 1, %g7 + addcc %g1, 1, %g7 + +#ifdef CONFIG_PAX_REFCOUNT + tvs %icc, 6 +#endif + cas [%o0], %g1, %g7 cmp %g1, %g7 bne,pn %icc, 1b @@ -33,7 +38,12 @@ __down_read: .globl __down_read_trylock __down_read_trylock: 1: lduw [%o0], %g1 - add %g1, 1, %g7 + addcc %g1, 1, %g7 + +#ifdef CONFIG_PAX_REFCOUNT + tvs %icc, 6 +#endif + cmp %g7, 0 bl,pn %icc, 2f mov 0, %o1 @@ -51,7 +61,12 @@ __down_write: or %g1, %lo(RWSEM_ACTIVE_WRITE_BIAS), %g1 1: lduw [%o0], %g3 - add %g3, %g1, %g7 + addcc %g3, %g1, %g7 + +#ifdef CONFIG_PAX_REFCOUNT + tvs %icc, 6 +#endif + cas [%o0], %g3, %g7 cmp %g3, %g7 bne,pn %icc, 1b @@ -77,7 +92,12 @@ __down_write_trylock: cmp %g3, 0 bne,pn %icc, 2f mov 0, %o1 - add %g3, %g1, %g7 + addcc %g3, %g1, %g7 + +#ifdef CONFIG_PAX_REFCOUNT + tvs %icc, 6 +#endif + cas [%o0], %g3, %g7 cmp %g3, %g7 bne,pn %icc, 1b @@ -90,7 +110,12 @@ __down_write_trylock: __up_read: 1: lduw [%o0], %g1 - sub %g1, 1, %g7 + subcc %g1, 1, %g7 + +#ifdef CONFIG_PAX_REFCOUNT + tvs %icc, 6 +#endif + cas [%o0], %g1, %g7 cmp %g1, %g7 bne,pn %icc, 1b @@ -118,7 +143,12 @@ __up_write: or %g1, %lo(RWSEM_ACTIVE_WRITE_BIAS), %g1 1: lduw [%o0], %g3 - sub %g3, %g1, %g7 + subcc %g3, %g1, %g7 + +#ifdef CONFIG_PAX_REFCOUNT + tvs %icc, 6 +#endif + cas [%o0], %g3, %g7 cmp %g3, %g7 bne,pn %icc, 1b @@ -143,7 +173,12 @@ __downgrade_write: or %g1, %lo(RWSEM_WAITING_BIAS), %g1 1: lduw [%o0], %g3 - sub %g3, %g1, %g7 + subcc %g3, %g1, %g7 + +#ifdef CONFIG_PAX_REFCOUNT + tvs %icc, 6 +#endif + cas [%o0], %g3, %g7 cmp %g3, %g7 bne,pn %icc, 1b diff --git a/arch/sparc/mm/Makefile b/arch/sparc/mm/Makefile index 79836a7..62f47a2 100644 --- a/arch/sparc/mm/Makefile +++ b/arch/sparc/mm/Makefile @@ -2,7 +2,7 @@ # asflags-y := -ansi -ccflags-y := -Werror +#ccflags-y := -Werror obj-$(CONFIG_SPARC64) += ultra.o tlb.o tsb.o obj-y += fault_$(BITS).o diff --git a/arch/sparc/mm/fault_32.c b/arch/sparc/mm/fault_32.c index bd86016..1417ea6 100644 --- a/arch/sparc/mm/fault_32.c +++ b/arch/sparc/mm/fault_32.c @@ -22,6 +22,9 @@ #include #include #include +#include +#include +#include #include #include @@ -209,6 +212,268 @@ static unsigned long compute_si_addr(struct pt_regs *regs, int text_fault) return safe_compute_effective_address(regs, insn); } +#ifdef CONFIG_PAX_PAGEEXEC +#ifdef CONFIG_PAX_DLRESOLVE +static void pax_emuplt_close(struct vm_area_struct *vma) +{ + vma->vm_mm->call_dl_resolve = 0UL; +} + +static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf) +{ + unsigned int *kaddr; + + vmf->page = alloc_page(GFP_HIGHUSER); + if (!vmf->page) + return VM_FAULT_OOM; + + kaddr = kmap(vmf->page); + memset(kaddr, 0, PAGE_SIZE); + kaddr[0] = 0x9DE3BFA8U; /* save */ + flush_dcache_page(vmf->page); + kunmap(vmf->page); + return VM_FAULT_MAJOR; +} + +static const struct vm_operations_struct pax_vm_ops = { + .close = pax_emuplt_close, + .fault = pax_emuplt_fault +}; + +static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr) +{ + int ret; + + INIT_LIST_HEAD(&vma->anon_vma_chain); + vma->vm_mm = current->mm; + vma->vm_start = addr; + vma->vm_end = addr + PAGE_SIZE; + vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC; + vma->vm_page_prot = vm_get_page_prot(vma->vm_flags); + vma->vm_ops = &pax_vm_ops; + + ret = insert_vm_struct(current->mm, vma); + if (ret) + return ret; + + ++current->mm->total_vm; + return 0; +} +#endif + +/* + * PaX: decide what to do with offenders (regs->pc = fault address) + * + * returns 1 when task should be killed + * 2 when patched PLT trampoline was detected + * 3 when unpatched PLT trampoline was detected + */ +static int pax_handle_fetch_fault(struct pt_regs *regs) +{ + +#ifdef CONFIG_PAX_EMUPLT + int err; + + do { /* PaX: patched PLT emulation #1 */ + unsigned int sethi1, sethi2, jmpl; + + err = get_user(sethi1, (unsigned int *)regs->pc); + err |= get_user(sethi2, (unsigned int *)(regs->pc+4)); + err |= get_user(jmpl, (unsigned int *)(regs->pc+8)); + + if (err) + break; + + if ((sethi1 & 0xFFC00000U) == 0x03000000U && + (sethi2 & 0xFFC00000U) == 0x03000000U && + (jmpl & 0xFFFFE000U) == 0x81C06000U) + { + unsigned int addr; + + regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10; + addr = regs->u_regs[UREG_G1]; + addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U); + regs->pc = addr; + regs->npc = addr+4; + return 2; + } + } while (0); + + { /* PaX: patched PLT emulation #2 */ + unsigned int ba; + + err = get_user(ba, (unsigned int *)regs->pc); + + if (!err && (ba & 0xFFC00000U) == 0x30800000U) { + unsigned int addr; + + addr = regs->pc + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2); + regs->pc = addr; + regs->npc = addr+4; + return 2; + } + } + + do { /* PaX: patched PLT emulation #3 */ + unsigned int sethi, jmpl, nop; + + err = get_user(sethi, (unsigned int *)regs->pc); + err |= get_user(jmpl, (unsigned int *)(regs->pc+4)); + err |= get_user(nop, (unsigned int *)(regs->pc+8)); + + if (err) + break; + + if ((sethi & 0xFFC00000U) == 0x03000000U && + (jmpl & 0xFFFFE000U) == 0x81C06000U && + nop == 0x01000000U) + { + unsigned int addr; + + addr = (sethi & 0x003FFFFFU) << 10; + regs->u_regs[UREG_G1] = addr; + addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U); + regs->pc = addr; + regs->npc = addr+4; + return 2; + } + } while (0); + + do { /* PaX: unpatched PLT emulation step 1 */ + unsigned int sethi, ba, nop; + + err = get_user(sethi, (unsigned int *)regs->pc); + err |= get_user(ba, (unsigned int *)(regs->pc+4)); + err |= get_user(nop, (unsigned int *)(regs->pc+8)); + + if (err) + break; + + if ((sethi & 0xFFC00000U) == 0x03000000U && + ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) && + nop == 0x01000000U) + { + unsigned int addr, save, call; + + if ((ba & 0xFFC00000U) == 0x30800000U) + addr = regs->pc + 4 + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2); + else + addr = regs->pc + 4 + ((((ba | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2); + + err = get_user(save, (unsigned int *)addr); + err |= get_user(call, (unsigned int *)(addr+4)); + err |= get_user(nop, (unsigned int *)(addr+8)); + if (err) + break; + +#ifdef CONFIG_PAX_DLRESOLVE + if (save == 0x9DE3BFA8U && + (call & 0xC0000000U) == 0x40000000U && + nop == 0x01000000U) + { + struct vm_area_struct *vma; + unsigned long call_dl_resolve; + + down_read(¤t->mm->mmap_sem); + call_dl_resolve = current->mm->call_dl_resolve; + up_read(¤t->mm->mmap_sem); + if (likely(call_dl_resolve)) + goto emulate; + + vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL); + + down_write(¤t->mm->mmap_sem); + if (current->mm->call_dl_resolve) { + call_dl_resolve = current->mm->call_dl_resolve; + up_write(¤t->mm->mmap_sem); + if (vma) + kmem_cache_free(vm_area_cachep, vma); + goto emulate; + } + + call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE); + if (!vma || (call_dl_resolve & ~PAGE_MASK)) { + up_write(¤t->mm->mmap_sem); + if (vma) + kmem_cache_free(vm_area_cachep, vma); + return 1; + } + + if (pax_insert_vma(vma, call_dl_resolve)) { + up_write(¤t->mm->mmap_sem); + kmem_cache_free(vm_area_cachep, vma); + return 1; + } + + current->mm->call_dl_resolve = call_dl_resolve; + up_write(¤t->mm->mmap_sem); + +emulate: + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10; + regs->pc = call_dl_resolve; + regs->npc = addr+4; + return 3; + } +#endif + + /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */ + if ((save & 0xFFC00000U) == 0x05000000U && + (call & 0xFFFFE000U) == 0x85C0A000U && + nop == 0x01000000U) + { + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10; + regs->u_regs[UREG_G2] = addr + 4; + addr = (save & 0x003FFFFFU) << 10; + addr += (((call | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U); + regs->pc = addr; + regs->npc = addr+4; + return 3; + } + } + } while (0); + + do { /* PaX: unpatched PLT emulation step 2 */ + unsigned int save, call, nop; + + err = get_user(save, (unsigned int *)(regs->pc-4)); + err |= get_user(call, (unsigned int *)regs->pc); + err |= get_user(nop, (unsigned int *)(regs->pc+4)); + if (err) + break; + + if (save == 0x9DE3BFA8U && + (call & 0xC0000000U) == 0x40000000U && + nop == 0x01000000U) + { + unsigned int dl_resolve = regs->pc + ((((call | 0xC0000000U) ^ 0x20000000U) + 0x20000000U) << 2); + + regs->u_regs[UREG_RETPC] = regs->pc; + regs->pc = dl_resolve; + regs->npc = dl_resolve+4; + return 3; + } + } while (0); +#endif + + return 1; +} + +void pax_report_insns(void *pc, void *sp) +{ + unsigned long i; + + printk(KERN_ERR "PAX: bytes at PC: "); + for (i = 0; i < 8; i++) { + unsigned int c; + if (get_user(c, (unsigned int *)pc+i)) + printk(KERN_CONT "???????? "); + else + printk(KERN_CONT "%08x ", c); + } + printk("\n"); +} +#endif + static noinline void do_fault_siginfo(int code, int sig, struct pt_regs *regs, int text_fault) { @@ -282,6 +547,24 @@ good_area: if(!(vma->vm_flags & VM_WRITE)) goto bad_area; } else { + +#ifdef CONFIG_PAX_PAGEEXEC + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && text_fault && !(vma->vm_flags & VM_EXEC)) { + up_read(&mm->mmap_sem); + switch (pax_handle_fetch_fault(regs)) { + +#ifdef CONFIG_PAX_EMUPLT + case 2: + case 3: + return; +#endif + + } + pax_report_fault(regs, (void *)regs->pc, (void *)regs->u_regs[UREG_FP]); + do_group_exit(SIGKILL); + } +#endif + /* Allow reads even for write-only mappings */ if(!(vma->vm_flags & (VM_READ | VM_EXEC))) goto bad_area; diff --git a/arch/sparc/mm/fault_64.c b/arch/sparc/mm/fault_64.c index f92ce56..359834d 100644 --- a/arch/sparc/mm/fault_64.c +++ b/arch/sparc/mm/fault_64.c @@ -21,6 +21,9 @@ #include #include #include +#include +#include +#include #include #include @@ -272,6 +275,457 @@ static void noinline __kprobes bogus_32bit_fault_address(struct pt_regs *regs, show_regs(regs); } +#ifdef CONFIG_PAX_PAGEEXEC +#ifdef CONFIG_PAX_DLRESOLVE +static void pax_emuplt_close(struct vm_area_struct *vma) +{ + vma->vm_mm->call_dl_resolve = 0UL; +} + +static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf) +{ + unsigned int *kaddr; + + vmf->page = alloc_page(GFP_HIGHUSER); + if (!vmf->page) + return VM_FAULT_OOM; + + kaddr = kmap(vmf->page); + memset(kaddr, 0, PAGE_SIZE); + kaddr[0] = 0x9DE3BFA8U; /* save */ + flush_dcache_page(vmf->page); + kunmap(vmf->page); + return VM_FAULT_MAJOR; +} + +static const struct vm_operations_struct pax_vm_ops = { + .close = pax_emuplt_close, + .fault = pax_emuplt_fault +}; + +static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr) +{ + int ret; + + INIT_LIST_HEAD(&vma->anon_vma_chain); + vma->vm_mm = current->mm; + vma->vm_start = addr; + vma->vm_end = addr + PAGE_SIZE; + vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC; + vma->vm_page_prot = vm_get_page_prot(vma->vm_flags); + vma->vm_ops = &pax_vm_ops; + + ret = insert_vm_struct(current->mm, vma); + if (ret) + return ret; + + ++current->mm->total_vm; + return 0; +} +#endif + +/* + * PaX: decide what to do with offenders (regs->tpc = fault address) + * + * returns 1 when task should be killed + * 2 when patched PLT trampoline was detected + * 3 when unpatched PLT trampoline was detected + */ +static int pax_handle_fetch_fault(struct pt_regs *regs) +{ + +#ifdef CONFIG_PAX_EMUPLT + int err; + + do { /* PaX: patched PLT emulation #1 */ + unsigned int sethi1, sethi2, jmpl; + + err = get_user(sethi1, (unsigned int *)regs->tpc); + err |= get_user(sethi2, (unsigned int *)(regs->tpc+4)); + err |= get_user(jmpl, (unsigned int *)(regs->tpc+8)); + + if (err) + break; + + if ((sethi1 & 0xFFC00000U) == 0x03000000U && + (sethi2 & 0xFFC00000U) == 0x03000000U && + (jmpl & 0xFFFFE000U) == 0x81C06000U) + { + unsigned long addr; + + regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10; + addr = regs->u_regs[UREG_G1]; + addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL); + + if (test_thread_flag(TIF_32BIT)) + addr &= 0xFFFFFFFFUL; + + regs->tpc = addr; + regs->tnpc = addr+4; + return 2; + } + } while (0); + + { /* PaX: patched PLT emulation #2 */ + unsigned int ba; + + err = get_user(ba, (unsigned int *)regs->tpc); + + if (!err && (ba & 0xFFC00000U) == 0x30800000U) { + unsigned long addr; + + addr = regs->tpc + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2); + + if (test_thread_flag(TIF_32BIT)) + addr &= 0xFFFFFFFFUL; + + regs->tpc = addr; + regs->tnpc = addr+4; + return 2; + } + } + + do { /* PaX: patched PLT emulation #3 */ + unsigned int sethi, jmpl, nop; + + err = get_user(sethi, (unsigned int *)regs->tpc); + err |= get_user(jmpl, (unsigned int *)(regs->tpc+4)); + err |= get_user(nop, (unsigned int *)(regs->tpc+8)); + + if (err) + break; + + if ((sethi & 0xFFC00000U) == 0x03000000U && + (jmpl & 0xFFFFE000U) == 0x81C06000U && + nop == 0x01000000U) + { + unsigned long addr; + + addr = (sethi & 0x003FFFFFU) << 10; + regs->u_regs[UREG_G1] = addr; + addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL); + + if (test_thread_flag(TIF_32BIT)) + addr &= 0xFFFFFFFFUL; + + regs->tpc = addr; + regs->tnpc = addr+4; + return 2; + } + } while (0); + + do { /* PaX: patched PLT emulation #4 */ + unsigned int sethi, mov1, call, mov2; + + err = get_user(sethi, (unsigned int *)regs->tpc); + err |= get_user(mov1, (unsigned int *)(regs->tpc+4)); + err |= get_user(call, (unsigned int *)(regs->tpc+8)); + err |= get_user(mov2, (unsigned int *)(regs->tpc+12)); + + if (err) + break; + + if ((sethi & 0xFFC00000U) == 0x03000000U && + mov1 == 0x8210000FU && + (call & 0xC0000000U) == 0x40000000U && + mov2 == 0x9E100001U) + { + unsigned long addr; + + regs->u_regs[UREG_G1] = regs->u_regs[UREG_RETPC]; + addr = regs->tpc + 4 + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2); + + if (test_thread_flag(TIF_32BIT)) + addr &= 0xFFFFFFFFUL; + + regs->tpc = addr; + regs->tnpc = addr+4; + return 2; + } + } while (0); + + do { /* PaX: patched PLT emulation #5 */ + unsigned int sethi, sethi1, sethi2, or1, or2, sllx, jmpl, nop; + + err = get_user(sethi, (unsigned int *)regs->tpc); + err |= get_user(sethi1, (unsigned int *)(regs->tpc+4)); + err |= get_user(sethi2, (unsigned int *)(regs->tpc+8)); + err |= get_user(or1, (unsigned int *)(regs->tpc+12)); + err |= get_user(or2, (unsigned int *)(regs->tpc+16)); + err |= get_user(sllx, (unsigned int *)(regs->tpc+20)); + err |= get_user(jmpl, (unsigned int *)(regs->tpc+24)); + err |= get_user(nop, (unsigned int *)(regs->tpc+28)); + + if (err) + break; + + if ((sethi & 0xFFC00000U) == 0x03000000U && + (sethi1 & 0xFFC00000U) == 0x03000000U && + (sethi2 & 0xFFC00000U) == 0x0B000000U && + (or1 & 0xFFFFE000U) == 0x82106000U && + (or2 & 0xFFFFE000U) == 0x8A116000U && + sllx == 0x83287020U && + jmpl == 0x81C04005U && + nop == 0x01000000U) + { + unsigned long addr; + + regs->u_regs[UREG_G1] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU); + regs->u_regs[UREG_G1] <<= 32; + regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU); + addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5]; + regs->tpc = addr; + regs->tnpc = addr+4; + return 2; + } + } while (0); + + do { /* PaX: patched PLT emulation #6 */ + unsigned int sethi, sethi1, sethi2, sllx, or, jmpl, nop; + + err = get_user(sethi, (unsigned int *)regs->tpc); + err |= get_user(sethi1, (unsigned int *)(regs->tpc+4)); + err |= get_user(sethi2, (unsigned int *)(regs->tpc+8)); + err |= get_user(sllx, (unsigned int *)(regs->tpc+12)); + err |= get_user(or, (unsigned int *)(regs->tpc+16)); + err |= get_user(jmpl, (unsigned int *)(regs->tpc+20)); + err |= get_user(nop, (unsigned int *)(regs->tpc+24)); + + if (err) + break; + + if ((sethi & 0xFFC00000U) == 0x03000000U && + (sethi1 & 0xFFC00000U) == 0x03000000U && + (sethi2 & 0xFFC00000U) == 0x0B000000U && + sllx == 0x83287020U && + (or & 0xFFFFE000U) == 0x8A116000U && + jmpl == 0x81C04005U && + nop == 0x01000000U) + { + unsigned long addr; + + regs->u_regs[UREG_G1] = (sethi1 & 0x003FFFFFU) << 10; + regs->u_regs[UREG_G1] <<= 32; + regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or & 0x3FFU); + addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5]; + regs->tpc = addr; + regs->tnpc = addr+4; + return 2; + } + } while (0); + + do { /* PaX: unpatched PLT emulation step 1 */ + unsigned int sethi, ba, nop; + + err = get_user(sethi, (unsigned int *)regs->tpc); + err |= get_user(ba, (unsigned int *)(regs->tpc+4)); + err |= get_user(nop, (unsigned int *)(regs->tpc+8)); + + if (err) + break; + + if ((sethi & 0xFFC00000U) == 0x03000000U && + ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) && + nop == 0x01000000U) + { + unsigned long addr; + unsigned int save, call; + unsigned int sethi1, sethi2, or1, or2, sllx, add, jmpl; + + if ((ba & 0xFFC00000U) == 0x30800000U) + addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2); + else + addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2); + + if (test_thread_flag(TIF_32BIT)) + addr &= 0xFFFFFFFFUL; + + err = get_user(save, (unsigned int *)addr); + err |= get_user(call, (unsigned int *)(addr+4)); + err |= get_user(nop, (unsigned int *)(addr+8)); + if (err) + break; + +#ifdef CONFIG_PAX_DLRESOLVE + if (save == 0x9DE3BFA8U && + (call & 0xC0000000U) == 0x40000000U && + nop == 0x01000000U) + { + struct vm_area_struct *vma; + unsigned long call_dl_resolve; + + down_read(¤t->mm->mmap_sem); + call_dl_resolve = current->mm->call_dl_resolve; + up_read(¤t->mm->mmap_sem); + if (likely(call_dl_resolve)) + goto emulate; + + vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL); + + down_write(¤t->mm->mmap_sem); + if (current->mm->call_dl_resolve) { + call_dl_resolve = current->mm->call_dl_resolve; + up_write(¤t->mm->mmap_sem); + if (vma) + kmem_cache_free(vm_area_cachep, vma); + goto emulate; + } + + call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE); + if (!vma || (call_dl_resolve & ~PAGE_MASK)) { + up_write(¤t->mm->mmap_sem); + if (vma) + kmem_cache_free(vm_area_cachep, vma); + return 1; + } + + if (pax_insert_vma(vma, call_dl_resolve)) { + up_write(¤t->mm->mmap_sem); + kmem_cache_free(vm_area_cachep, vma); + return 1; + } + + current->mm->call_dl_resolve = call_dl_resolve; + up_write(¤t->mm->mmap_sem); + +emulate: + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10; + regs->tpc = call_dl_resolve; + regs->tnpc = addr+4; + return 3; + } +#endif + + /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */ + if ((save & 0xFFC00000U) == 0x05000000U && + (call & 0xFFFFE000U) == 0x85C0A000U && + nop == 0x01000000U) + { + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10; + regs->u_regs[UREG_G2] = addr + 4; + addr = (save & 0x003FFFFFU) << 10; + addr += (((call | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL); + + if (test_thread_flag(TIF_32BIT)) + addr &= 0xFFFFFFFFUL; + + regs->tpc = addr; + regs->tnpc = addr+4; + return 3; + } + + /* PaX: 64-bit PLT stub */ + err = get_user(sethi1, (unsigned int *)addr); + err |= get_user(sethi2, (unsigned int *)(addr+4)); + err |= get_user(or1, (unsigned int *)(addr+8)); + err |= get_user(or2, (unsigned int *)(addr+12)); + err |= get_user(sllx, (unsigned int *)(addr+16)); + err |= get_user(add, (unsigned int *)(addr+20)); + err |= get_user(jmpl, (unsigned int *)(addr+24)); + err |= get_user(nop, (unsigned int *)(addr+28)); + if (err) + break; + + if ((sethi1 & 0xFFC00000U) == 0x09000000U && + (sethi2 & 0xFFC00000U) == 0x0B000000U && + (or1 & 0xFFFFE000U) == 0x88112000U && + (or2 & 0xFFFFE000U) == 0x8A116000U && + sllx == 0x89293020U && + add == 0x8A010005U && + jmpl == 0x89C14000U && + nop == 0x01000000U) + { + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10; + regs->u_regs[UREG_G4] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU); + regs->u_regs[UREG_G4] <<= 32; + regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU); + regs->u_regs[UREG_G5] += regs->u_regs[UREG_G4]; + regs->u_regs[UREG_G4] = addr + 24; + addr = regs->u_regs[UREG_G5]; + regs->tpc = addr; + regs->tnpc = addr+4; + return 3; + } + } + } while (0); + +#ifdef CONFIG_PAX_DLRESOLVE + do { /* PaX: unpatched PLT emulation step 2 */ + unsigned int save, call, nop; + + err = get_user(save, (unsigned int *)(regs->tpc-4)); + err |= get_user(call, (unsigned int *)regs->tpc); + err |= get_user(nop, (unsigned int *)(regs->tpc+4)); + if (err) + break; + + if (save == 0x9DE3BFA8U && + (call & 0xC0000000U) == 0x40000000U && + nop == 0x01000000U) + { + unsigned long dl_resolve = regs->tpc + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2); + + if (test_thread_flag(TIF_32BIT)) + dl_resolve &= 0xFFFFFFFFUL; + + regs->u_regs[UREG_RETPC] = regs->tpc; + regs->tpc = dl_resolve; + regs->tnpc = dl_resolve+4; + return 3; + } + } while (0); +#endif + + do { /* PaX: patched PLT emulation #7, must be AFTER the unpatched PLT emulation */ + unsigned int sethi, ba, nop; + + err = get_user(sethi, (unsigned int *)regs->tpc); + err |= get_user(ba, (unsigned int *)(regs->tpc+4)); + err |= get_user(nop, (unsigned int *)(regs->tpc+8)); + + if (err) + break; + + if ((sethi & 0xFFC00000U) == 0x03000000U && + (ba & 0xFFF00000U) == 0x30600000U && + nop == 0x01000000U) + { + unsigned long addr; + + addr = (sethi & 0x003FFFFFU) << 10; + regs->u_regs[UREG_G1] = addr; + addr = regs->tpc + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2); + + if (test_thread_flag(TIF_32BIT)) + addr &= 0xFFFFFFFFUL; + + regs->tpc = addr; + regs->tnpc = addr+4; + return 2; + } + } while (0); + +#endif + + return 1; +} + +void pax_report_insns(void *pc, void *sp) +{ + unsigned long i; + + printk(KERN_ERR "PAX: bytes at PC: "); + for (i = 0; i < 8; i++) { + unsigned int c; + if (get_user(c, (unsigned int *)pc+i)) + printk(KERN_CONT "???????? "); + else + printk(KERN_CONT "%08x ", c); + } + printk("\n"); +} +#endif + asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs) { struct mm_struct *mm = current->mm; @@ -340,6 +794,29 @@ asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs) if (!vma) goto bad_area; +#ifdef CONFIG_PAX_PAGEEXEC + /* PaX: detect ITLB misses on non-exec pages */ + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && vma->vm_start <= address && + !(vma->vm_flags & VM_EXEC) && (fault_code & FAULT_CODE_ITLB)) + { + if (address != regs->tpc) + goto good_area; + + up_read(&mm->mmap_sem); + switch (pax_handle_fetch_fault(regs)) { + +#ifdef CONFIG_PAX_EMUPLT + case 2: + case 3: + return; +#endif + + } + pax_report_fault(regs, (void *)regs->tpc, (void *)(regs->u_regs[UREG_FP] + STACK_BIAS)); + do_group_exit(SIGKILL); + } +#endif + /* Pure DTLB misses do not tell us whether the fault causing * load/store/atomic was a write or not, it only says that there * was no match. So in such a case we (carefully) read the diff --git a/arch/sparc/mm/hugetlbpage.c b/arch/sparc/mm/hugetlbpage.c index 5fdddf1..bfa67f0 100644 --- a/arch/sparc/mm/hugetlbpage.c +++ b/arch/sparc/mm/hugetlbpage.c @@ -68,7 +68,7 @@ full_search: } return -ENOMEM; } - if (likely(!vma || addr + len <= vma->vm_start)) { + if (likely(check_heap_stack_gap(vma, addr, len))) { /* * Remember the place where we stopped the search: */ @@ -107,7 +107,7 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, /* make sure it can fit in the remaining address space */ if (likely(addr > len)) { vma = find_vma(mm, addr-len); - if (!vma || addr <= vma->vm_start) { + if (check_heap_stack_gap(vma, addr - len, len)) { /* remember the address as a hint for next time */ return (mm->free_area_cache = addr-len); } @@ -125,7 +125,7 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, * return with success: */ vma = find_vma(mm, addr); - if (likely(!vma || addr+len <= vma->vm_start)) { + if (likely(check_heap_stack_gap(vma, addr, len))) { /* remember the address as a hint for next time */ return (mm->free_area_cache = addr); } @@ -182,8 +182,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr, if (addr) { addr = ALIGN(addr, HPAGE_SIZE); vma = find_vma(mm, addr); - if (task_size - len >= addr && - (!vma || addr + len <= vma->vm_start)) + if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len)) return addr; } if (mm->get_unmapped_area == arch_get_unmapped_area) diff --git a/arch/sparc/mm/init_32.c b/arch/sparc/mm/init_32.c index 6d0e02c..c0b5fa7 100644 --- a/arch/sparc/mm/init_32.c +++ b/arch/sparc/mm/init_32.c @@ -318,6 +318,9 @@ extern void device_scan(void); pgprot_t PAGE_SHARED __read_mostly; EXPORT_SYMBOL(PAGE_SHARED); +pgprot_t PAGE_SHARED_NOEXEC __read_mostly; +EXPORT_SYMBOL(PAGE_SHARED_NOEXEC); + void __init paging_init(void) { switch(sparc_cpu_model) { @@ -346,17 +349,17 @@ void __init paging_init(void) /* Initialize the protection map with non-constant, MMU dependent values. */ protection_map[0] = PAGE_NONE; - protection_map[1] = PAGE_READONLY; - protection_map[2] = PAGE_COPY; - protection_map[3] = PAGE_COPY; + protection_map[1] = PAGE_READONLY_NOEXEC; + protection_map[2] = PAGE_COPY_NOEXEC; + protection_map[3] = PAGE_COPY_NOEXEC; protection_map[4] = PAGE_READONLY; protection_map[5] = PAGE_READONLY; protection_map[6] = PAGE_COPY; protection_map[7] = PAGE_COPY; protection_map[8] = PAGE_NONE; - protection_map[9] = PAGE_READONLY; - protection_map[10] = PAGE_SHARED; - protection_map[11] = PAGE_SHARED; + protection_map[9] = PAGE_READONLY_NOEXEC; + protection_map[10] = PAGE_SHARED_NOEXEC; + protection_map[11] = PAGE_SHARED_NOEXEC; protection_map[12] = PAGE_READONLY; protection_map[13] = PAGE_READONLY; protection_map[14] = PAGE_SHARED; diff --git a/arch/sparc/mm/srmmu.c b/arch/sparc/mm/srmmu.c index b0b43aa..22982cd 100644 --- a/arch/sparc/mm/srmmu.c +++ b/arch/sparc/mm/srmmu.c @@ -2198,6 +2198,13 @@ void __init ld_mmu_srmmu(void) PAGE_SHARED = pgprot_val(SRMMU_PAGE_SHARED); BTFIXUPSET_INT(page_copy, pgprot_val(SRMMU_PAGE_COPY)); BTFIXUPSET_INT(page_readonly, pgprot_val(SRMMU_PAGE_RDONLY)); + +#ifdef CONFIG_PAX_PAGEEXEC + PAGE_SHARED_NOEXEC = pgprot_val(SRMMU_PAGE_SHARED_NOEXEC); + BTFIXUPSET_INT(page_copy_noexec, pgprot_val(SRMMU_PAGE_COPY_NOEXEC)); + BTFIXUPSET_INT(page_readonly_noexec, pgprot_val(SRMMU_PAGE_RDONLY_NOEXEC)); +#endif + BTFIXUPSET_INT(page_kernel, pgprot_val(SRMMU_PAGE_KERNEL)); page_kernel = pgprot_val(SRMMU_PAGE_KERNEL); diff --git a/arch/um/include/asm/kmap_types.h b/arch/um/include/asm/kmap_types.h index 6c03acd..a5e0215 100644 --- a/arch/um/include/asm/kmap_types.h +++ b/arch/um/include/asm/kmap_types.h @@ -23,6 +23,7 @@ enum km_type { KM_IRQ1, KM_SOFTIRQ0, KM_SOFTIRQ1, + KM_CLEARPAGE, KM_TYPE_NR }; diff --git a/arch/um/include/asm/page.h b/arch/um/include/asm/page.h index 4cc9b6c..02e5029 100644 --- a/arch/um/include/asm/page.h +++ b/arch/um/include/asm/page.h @@ -14,6 +14,9 @@ #define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT) #define PAGE_MASK (~(PAGE_SIZE-1)) +#define ktla_ktva(addr) (addr) +#define ktva_ktla(addr) (addr) + #ifndef __ASSEMBLY__ struct page; diff --git a/arch/um/sys-i386/syscalls.c b/arch/um/sys-i386/syscalls.c index 70ca357..728d1cc 100644 --- a/arch/um/sys-i386/syscalls.c +++ b/arch/um/sys-i386/syscalls.c @@ -11,6 +11,21 @@ #include "asm/uaccess.h" #include "asm/unistd.h" +int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags) +{ + unsigned long pax_task_size = TASK_SIZE; + +#ifdef CONFIG_PAX_SEGMEXEC + if (current->mm->pax_flags & MF_PAX_SEGMEXEC) + pax_task_size = SEGMEXEC_TASK_SIZE; +#endif + + if (len > pax_task_size || addr > pax_task_size - len) + return -EINVAL; + + return 0; +} + /* * The prototype on i386 is: * diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index f942bb7..d2fc06f 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -1038,7 +1038,7 @@ choice config NOHIGHMEM bool "off" - depends on !X86_NUMAQ + depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE) ---help--- Linux can use up to 64 Gigabytes of physical memory on x86 systems. However, the address space of 32-bit x86 processors is only 4 @@ -1075,7 +1075,7 @@ config NOHIGHMEM config HIGHMEM4G bool "4GB" - depends on !X86_NUMAQ + depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE) ---help--- Select this if you have a 32-bit processor and between 1 and 4 gigabytes of physical RAM. @@ -1129,7 +1129,7 @@ config PAGE_OFFSET hex default 0xB0000000 if VMSPLIT_3G_OPT default 0x80000000 if VMSPLIT_2G - default 0x78000000 if VMSPLIT_2G_OPT + default 0x70000000 if VMSPLIT_2G_OPT default 0x40000000 if VMSPLIT_1G default 0xC0000000 depends on X86_32 @@ -1461,7 +1461,7 @@ config ARCH_USES_PG_UNCACHED config EFI bool "EFI runtime service support" - depends on ACPI + depends on ACPI && !PAX_KERNEXEC ---help--- This enables the kernel to use EFI runtime services that are available (such as the EFI variable services). @@ -1548,6 +1548,7 @@ config KEXEC_JUMP config PHYSICAL_START hex "Physical address where the kernel is loaded" if (EMBEDDED || CRASH_DUMP) default "0x1000000" + range 0x400000 0x40000000 ---help--- This gives the physical address where the kernel is loaded. @@ -1611,6 +1612,7 @@ config X86_NEED_RELOCS config PHYSICAL_ALIGN hex "Alignment value to which kernel should be aligned" if X86_32 default "0x1000000" + range 0x400000 0x1000000 if PAX_KERNEXEC range 0x2000 0x1000000 ---help--- This value puts the alignment restrictions on physical address @@ -1642,9 +1644,10 @@ config HOTPLUG_CPU Say N if you want to disable CPU hotplug. config COMPAT_VDSO - def_bool y + def_bool n prompt "Compat VDSO support" depends on X86_32 || IA32_EMULATION + depends on !PAX_NOEXEC && !PAX_MEMORY_UDEREF ---help--- Map the 32-bit VDSO to the predictable old-style address too. diff --git a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu index 2ac9069..483cbad 100644 --- a/arch/x86/Kconfig.cpu +++ b/arch/x86/Kconfig.cpu @@ -336,7 +336,7 @@ config X86_PPRO_FENCE config X86_F00F_BUG def_bool y - depends on M586MMX || M586TSC || M586 || M486 || M386 + depends on (M586MMX || M586TSC || M586 || M486 || M386) && !PAX_KERNEXEC config X86_INVD_BUG def_bool y @@ -360,7 +360,7 @@ config X86_POPAD_OK config X86_ALIGNMENT_16 def_bool y - depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || X86_ELAN || MK6 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1 + depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || X86_ELAN || MK8 || MK7 || MK6 || MCORE2 || MPENTIUM4 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1 config X86_INTEL_USERCOPY def_bool y @@ -406,7 +406,7 @@ config X86_CMPXCHG64 # generates cmov. config X86_CMOV def_bool y - depends on (MK8 || MK7 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX) + depends on (MK8 || MK7 || MCORE2 || MPSC || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX) config X86_MINIMUM_CPU_FAMILY int diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug index 7508508..2c53b5e 100644 --- a/arch/x86/Kconfig.debug +++ b/arch/x86/Kconfig.debug @@ -97,7 +97,7 @@ config X86_PTDUMP config DEBUG_RODATA bool "Write protect kernel read-only data structures" default y - depends on DEBUG_KERNEL + depends on DEBUG_KERNEL && BROKEN ---help--- Mark the kernel read-only data as write-protected in the pagetables, in order to catch accidental (and incorrect) writes to such const diff --git a/arch/x86/Makefile b/arch/x86/Makefile index 8aa1b59..4094bef 100644 --- a/arch/x86/Makefile +++ b/arch/x86/Makefile @@ -191,3 +191,12 @@ define archhelp echo ' FDARGS="..." arguments for the booted kernel' echo ' FDINITRD=file initrd for the booted kernel' endef + +define OLD_LD + +*** ${VERSION}.${PATCHLEVEL} PaX kernels no longer build correctly with old versions of binutils. +*** Please upgrade your binutils to 2.18 or newer +endef + +archprepare: + $(if $(LDFLAGS_BUILD_ID),,$(error $(OLD_LD))) diff --git a/arch/x86/boot/bitops.h b/arch/x86/boot/bitops.h index 878e4b9..20537ab 100644 --- a/arch/x86/boot/bitops.h +++ b/arch/x86/boot/bitops.h @@ -26,7 +26,7 @@ static inline int variable_test_bit(int nr, const void *addr) u8 v; const u32 *p = (const u32 *)addr; - asm("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr)); + asm volatile("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr)); return v; } @@ -37,7 +37,7 @@ static inline int variable_test_bit(int nr, const void *addr) static inline void set_bit(int nr, void *addr) { - asm("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr)); + asm volatile("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr)); } #endif /* BOOT_BITOPS_H */ diff --git a/arch/x86/boot/boot.h b/arch/x86/boot/boot.h index 98239d2..f40214c 100644 --- a/arch/x86/boot/boot.h +++ b/arch/x86/boot/boot.h @@ -82,7 +82,7 @@ static inline void io_delay(void) static inline u16 ds(void) { u16 seg; - asm("movw %%ds,%0" : "=rm" (seg)); + asm volatile("movw %%ds,%0" : "=rm" (seg)); return seg; } @@ -178,7 +178,7 @@ static inline void wrgs32(u32 v, addr_t addr) static inline int memcmp(const void *s1, const void *s2, size_t len) { u8 diff; - asm("repe; cmpsb; setnz %0" + asm volatile("repe; cmpsb; setnz %0" : "=qm" (diff), "+D" (s1), "+S" (s2), "+c" (len)); return diff; } diff --git a/arch/x86/boot/compressed/head_32.S b/arch/x86/boot/compressed/head_32.S index f543b70..b60fba8 100644 --- a/arch/x86/boot/compressed/head_32.S +++ b/arch/x86/boot/compressed/head_32.S @@ -76,7 +76,7 @@ ENTRY(startup_32) notl %eax andl %eax, %ebx #else - movl $LOAD_PHYSICAL_ADDR, %ebx + movl $____LOAD_PHYSICAL_ADDR, %ebx #endif /* Target address to relocate to for decompression */ @@ -149,7 +149,7 @@ relocated: * and where it was actually loaded. */ movl %ebp, %ebx - subl $LOAD_PHYSICAL_ADDR, %ebx + subl $____LOAD_PHYSICAL_ADDR, %ebx jz 2f /* Nothing to be done if loaded at compiled addr. */ /* * Process relocations. @@ -157,8 +157,7 @@ relocated: 1: subl $4, %edi movl (%edi), %ecx - testl %ecx, %ecx - jz 2f + jecxz 2f addl %ebx, -__PAGE_OFFSET(%ebx, %ecx) jmp 1b 2: diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S index faff0dc..f859ead 100644 --- a/arch/x86/boot/compressed/head_64.S +++ b/arch/x86/boot/compressed/head_64.S @@ -91,7 +91,7 @@ ENTRY(startup_32) notl %eax andl %eax, %ebx #else - movl $LOAD_PHYSICAL_ADDR, %ebx + movl $____LOAD_PHYSICAL_ADDR, %ebx #endif /* Target address to relocate to for decompression */ @@ -233,7 +233,7 @@ ENTRY(startup_64) notq %rax andq %rax, %rbp #else - movq $LOAD_PHYSICAL_ADDR, %rbp + movq $____LOAD_PHYSICAL_ADDR, %rbp #endif /* Target address to relocate to for decompression */ diff --git a/arch/x86/boot/compressed/misc.c b/arch/x86/boot/compressed/misc.c index 51e2407..c05c3f6 100644 --- a/arch/x86/boot/compressed/misc.c +++ b/arch/x86/boot/compressed/misc.c @@ -285,7 +285,7 @@ static void parse_elf(void *output) case PT_LOAD: #ifdef CONFIG_RELOCATABLE dest = output; - dest += (phdr->p_paddr - LOAD_PHYSICAL_ADDR); + dest += (phdr->p_paddr - ____LOAD_PHYSICAL_ADDR); #else dest = (void *)(phdr->p_paddr); #endif @@ -332,7 +332,7 @@ asmlinkage void decompress_kernel(void *rmode, memptr heap, error("Destination address too large"); #endif #ifndef CONFIG_RELOCATABLE - if ((unsigned long)output != LOAD_PHYSICAL_ADDR) + if ((unsigned long)output != ____LOAD_PHYSICAL_ADDR) error("Wrong destination address"); #endif diff --git a/arch/x86/boot/compressed/mkpiggy.c b/arch/x86/boot/compressed/mkpiggy.c index 5c22812..1138426 100644 --- a/arch/x86/boot/compressed/mkpiggy.c +++ b/arch/x86/boot/compressed/mkpiggy.c @@ -74,7 +74,7 @@ int main(int argc, char *argv[]) offs = (olen > ilen) ? olen - ilen : 0; offs += olen >> 12; /* Add 8 bytes for each 32K block */ - offs += 32*1024 + 18; /* Add 32K + 18 bytes slack */ + offs += 64*1024; /* Add 64K bytes slack */ offs = (offs+4095) & ~4095; /* Round to a 4K boundary */ printf(".section \".rodata..compressed\",\"a\",@progbits\n"); diff --git a/arch/x86/boot/compressed/relocs.c b/arch/x86/boot/compressed/relocs.c index 89bbf4e..869908e 100644 --- a/arch/x86/boot/compressed/relocs.c +++ b/arch/x86/boot/compressed/relocs.c @@ -13,8 +13,11 @@ static void die(char *fmt, ...); +#include "../../../../include/generated/autoconf.h" + #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0])) static Elf32_Ehdr ehdr; +static Elf32_Phdr *phdr; static unsigned long reloc_count, reloc_idx; static unsigned long *relocs; @@ -270,9 +273,39 @@ static void read_ehdr(FILE *fp) } } +static void read_phdrs(FILE *fp) +{ + unsigned int i; + + phdr = calloc(ehdr.e_phnum, sizeof(Elf32_Phdr)); + if (!phdr) { + die("Unable to allocate %d program headers\n", + ehdr.e_phnum); + } + if (fseek(fp, ehdr.e_phoff, SEEK_SET) < 0) { + die("Seek to %d failed: %s\n", + ehdr.e_phoff, strerror(errno)); + } + if (fread(phdr, sizeof(*phdr), ehdr.e_phnum, fp) != ehdr.e_phnum) { + die("Cannot read ELF program headers: %s\n", + strerror(errno)); + } + for(i = 0; i < ehdr.e_phnum; i++) { + phdr[i].p_type = elf32_to_cpu(phdr[i].p_type); + phdr[i].p_offset = elf32_to_cpu(phdr[i].p_offset); + phdr[i].p_vaddr = elf32_to_cpu(phdr[i].p_vaddr); + phdr[i].p_paddr = elf32_to_cpu(phdr[i].p_paddr); + phdr[i].p_filesz = elf32_to_cpu(phdr[i].p_filesz); + phdr[i].p_memsz = elf32_to_cpu(phdr[i].p_memsz); + phdr[i].p_flags = elf32_to_cpu(phdr[i].p_flags); + phdr[i].p_align = elf32_to_cpu(phdr[i].p_align); + } + +} + static void read_shdrs(FILE *fp) { - int i; + unsigned int i; Elf32_Shdr shdr; secs = calloc(ehdr.e_shnum, sizeof(struct section)); @@ -307,7 +340,7 @@ static void read_shdrs(FILE *fp) static void read_strtabs(FILE *fp) { - int i; + unsigned int i; for (i = 0; i < ehdr.e_shnum; i++) { struct section *sec = &secs[i]; if (sec->shdr.sh_type != SHT_STRTAB) { @@ -332,7 +365,7 @@ static void read_strtabs(FILE *fp) static void read_symtabs(FILE *fp) { - int i,j; + unsigned int i,j; for (i = 0; i < ehdr.e_shnum; i++) { struct section *sec = &secs[i]; if (sec->shdr.sh_type != SHT_SYMTAB) { @@ -365,7 +398,9 @@ static void read_symtabs(FILE *fp) static void read_relocs(FILE *fp) { - int i,j; + unsigned int i,j; + uint32_t base; + for (i = 0; i < ehdr.e_shnum; i++) { struct section *sec = &secs[i]; if (sec->shdr.sh_type != SHT_REL) { @@ -385,9 +420,18 @@ static void read_relocs(FILE *fp) die("Cannot read symbol table: %s\n", strerror(errno)); } + base = 0; + for (j = 0; j < ehdr.e_phnum; j++) { + if (phdr[j].p_type != PT_LOAD ) + continue; + if (secs[sec->shdr.sh_info].shdr.sh_offset < phdr[j].p_offset || secs[sec->shdr.sh_info].shdr.sh_offset >= phdr[j].p_offset + phdr[j].p_filesz) + continue; + base = CONFIG_PAGE_OFFSET + phdr[j].p_paddr - phdr[j].p_vaddr; + break; + } for (j = 0; j < sec->shdr.sh_size/sizeof(Elf32_Rel); j++) { Elf32_Rel *rel = &sec->reltab[j]; - rel->r_offset = elf32_to_cpu(rel->r_offset); + rel->r_offset = elf32_to_cpu(rel->r_offset) + base; rel->r_info = elf32_to_cpu(rel->r_info); } } @@ -396,14 +440,14 @@ static void read_relocs(FILE *fp) static void print_absolute_symbols(void) { - int i; + unsigned int i; printf("Absolute symbols\n"); printf(" Num: Value Size Type Bind Visibility Name\n"); for (i = 0; i < ehdr.e_shnum; i++) { struct section *sec = &secs[i]; char *sym_strtab; Elf32_Sym *sh_symtab; - int j; + unsigned int j; if (sec->shdr.sh_type != SHT_SYMTAB) { continue; @@ -431,14 +475,14 @@ static void print_absolute_symbols(void) static void print_absolute_relocs(void) { - int i, printed = 0; + unsigned int i, printed = 0; for (i = 0; i < ehdr.e_shnum; i++) { struct section *sec = &secs[i]; struct section *sec_applies, *sec_symtab; char *sym_strtab; Elf32_Sym *sh_symtab; - int j; + unsigned int j; if (sec->shdr.sh_type != SHT_REL) { continue; } @@ -499,13 +543,13 @@ static void print_absolute_relocs(void) static void walk_relocs(void (*visit)(Elf32_Rel *rel, Elf32_Sym *sym)) { - int i; + unsigned int i; /* Walk through the relocations */ for (i = 0; i < ehdr.e_shnum; i++) { char *sym_strtab; Elf32_Sym *sh_symtab; struct section *sec_applies, *sec_symtab; - int j; + unsigned int j; struct section *sec = &secs[i]; if (sec->shdr.sh_type != SHT_REL) { @@ -530,6 +574,22 @@ static void walk_relocs(void (*visit)(Elf32_Rel *rel, Elf32_Sym *sym)) !is_rel_reloc(sym_name(sym_strtab, sym))) { continue; } + /* Don't relocate actual per-cpu variables, they are absolute indices, not addresses */ + if (!strcmp(sec_name(sym->st_shndx), ".data..percpu") && strcmp(sym_name(sym_strtab, sym), "__per_cpu_load")) + continue; + +#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_X86_32) + /* Don't relocate actual code, they are relocated implicitly by the base address of KERNEL_CS */ + if (!strcmp(sec_name(sym->st_shndx), ".module.text") && !strcmp(sym_name(sym_strtab, sym), "_etext")) + continue; + if (!strcmp(sec_name(sym->st_shndx), ".init.text")) + continue; + if (!strcmp(sec_name(sym->st_shndx), ".exit.text")) + continue; + if (!strcmp(sec_name(sym->st_shndx), ".text") && strcmp(sym_name(sym_strtab, sym), "__LOAD_PHYSICAL_ADDR")) + continue; +#endif + switch (r_type) { case R_386_NONE: case R_386_PC32: @@ -571,7 +631,7 @@ static int cmp_relocs(const void *va, const void *vb) static void emit_relocs(int as_text) { - int i; + unsigned int i; /* Count how many relocations I have and allocate space for them. */ reloc_count = 0; walk_relocs(count_reloc); @@ -665,6 +725,7 @@ int main(int argc, char **argv) fname, strerror(errno)); } read_ehdr(fp); + read_phdrs(fp); read_shdrs(fp); read_strtabs(fp); read_symtabs(fp); diff --git a/arch/x86/boot/cpucheck.c b/arch/x86/boot/cpucheck.c index 4d3ff03..e4972ff 100644 --- a/arch/x86/boot/cpucheck.c +++ b/arch/x86/boot/cpucheck.c @@ -74,7 +74,7 @@ static int has_fpu(void) u16 fcw = -1, fsw = -1; u32 cr0; - asm("movl %%cr0,%0" : "=r" (cr0)); + asm volatile("movl %%cr0,%0" : "=r" (cr0)); if (cr0 & (X86_CR0_EM|X86_CR0_TS)) { cr0 &= ~(X86_CR0_EM|X86_CR0_TS); asm volatile("movl %0,%%cr0" : : "r" (cr0)); @@ -90,7 +90,7 @@ static int has_eflag(u32 mask) { u32 f0, f1; - asm("pushfl ; " + asm volatile("pushfl ; " "pushfl ; " "popl %0 ; " "movl %0,%1 ; " @@ -115,7 +115,7 @@ static void get_flags(void) set_bit(X86_FEATURE_FPU, cpu.flags); if (has_eflag(X86_EFLAGS_ID)) { - asm("cpuid" + asm volatile("cpuid" : "=a" (max_intel_level), "=b" (cpu_vendor[0]), "=d" (cpu_vendor[1]), @@ -124,7 +124,7 @@ static void get_flags(void) if (max_intel_level >= 0x00000001 && max_intel_level <= 0x0000ffff) { - asm("cpuid" + asm volatile("cpuid" : "=a" (tfms), "=c" (cpu.flags[4]), "=d" (cpu.flags[0]) @@ -136,7 +136,7 @@ static void get_flags(void) cpu.model += ((tfms >> 16) & 0xf) << 4; } - asm("cpuid" + asm volatile("cpuid" : "=a" (max_amd_level) : "a" (0x80000000) : "ebx", "ecx", "edx"); @@ -144,7 +144,7 @@ static void get_flags(void) if (max_amd_level >= 0x80000001 && max_amd_level <= 0x8000ffff) { u32 eax = 0x80000001; - asm("cpuid" + asm volatile("cpuid" : "+a" (eax), "=c" (cpu.flags[6]), "=d" (cpu.flags[1]) @@ -203,9 +203,9 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr) u32 ecx = MSR_K7_HWCR; u32 eax, edx; - asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx)); + asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx)); eax &= ~(1 << 15); - asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx)); + asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx)); get_flags(); /* Make sure it really did something */ err = check_flags(); @@ -218,9 +218,9 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr) u32 ecx = MSR_VIA_FCR; u32 eax, edx; - asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx)); + asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx)); eax |= (1<<1)|(1<<7); - asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx)); + asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx)); set_bit(X86_FEATURE_CX8, cpu.flags); err = check_flags(); @@ -231,12 +231,12 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr) u32 eax, edx; u32 level = 1; - asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx)); - asm("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx)); - asm("cpuid" + asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx)); + asm volatile("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx)); + asm volatile("cpuid" : "+a" (level), "=d" (cpu.flags[0]) : : "ecx", "ebx"); - asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx)); + asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx)); err = check_flags(); } diff --git a/arch/x86/boot/header.S b/arch/x86/boot/header.S index 93e689f..504ba09 100644 --- a/arch/x86/boot/header.S +++ b/arch/x86/boot/header.S @@ -224,7 +224,7 @@ setup_data: .quad 0 # 64-bit physical pointer to # single linked list of # struct setup_data -pref_address: .quad LOAD_PHYSICAL_ADDR # preferred load addr +pref_address: .quad ____LOAD_PHYSICAL_ADDR # preferred load addr #define ZO_INIT_SIZE (ZO__end - ZO_startup_32 + ZO_z_extract_offset) #define VO_INIT_SIZE (VO__end - VO__text) diff --git a/arch/x86/boot/memory.c b/arch/x86/boot/memory.c index cae3feb..ff8ff2a 100644 --- a/arch/x86/boot/memory.c +++ b/arch/x86/boot/memory.c @@ -19,7 +19,7 @@ static int detect_memory_e820(void) { - int count = 0; + unsigned int count = 0; struct biosregs ireg, oreg; struct e820entry *desc = boot_params.e820_map; static struct e820entry buf; /* static so it is zeroed */ diff --git a/arch/x86/boot/video-vesa.c b/arch/x86/boot/video-vesa.c index 11e8c6e..fdbb1ed 100644 --- a/arch/x86/boot/video-vesa.c +++ b/arch/x86/boot/video-vesa.c @@ -200,6 +200,7 @@ static void vesa_store_pm_info(void) boot_params.screen_info.vesapm_seg = oreg.es; boot_params.screen_info.vesapm_off = oreg.di; + boot_params.screen_info.vesapm_size = oreg.cx; } /* diff --git a/arch/x86/boot/video.c b/arch/x86/boot/video.c index 43eda28..5ab5fdb 100644 --- a/arch/x86/boot/video.c +++ b/arch/x86/boot/video.c @@ -96,7 +96,7 @@ static void store_mode_params(void) static unsigned int get_entry(void) { char entry_buf[4]; - int i, len = 0; + unsigned int i, len = 0; int key; unsigned int v; diff --git a/arch/x86/ia32/ia32_aout.c b/arch/x86/ia32/ia32_aout.c index 0350311..a2de328 100644 --- a/arch/x86/ia32/ia32_aout.c +++ b/arch/x86/ia32/ia32_aout.c @@ -168,6 +168,8 @@ static int aout_core_dump(long signr, struct pt_regs *regs, struct file *file, unsigned long dump_start, dump_size; struct user32 dump; + memset(&dump, 0, sizeof(dump)); + fs = get_fs(); set_fs(KERNEL_DS); has_dumped = 1; @@ -217,12 +219,6 @@ static int aout_core_dump(long signr, struct pt_regs *regs, struct file *file, dump_size = dump.u_ssize << PAGE_SHIFT; DUMP_WRITE(dump_start, dump_size); } - /* - * Finally dump the task struct. Not be used by gdb, but - * could be useful - */ - set_fs(KERNEL_DS); - DUMP_WRITE(current, sizeof(*current)); end_coredump: set_fs(fs); return has_dumped; diff --git a/arch/x86/ia32/ia32_signal.c b/arch/x86/ia32/ia32_signal.c index 588a7aa..d49dacb 100644 --- a/arch/x86/ia32/ia32_signal.c +++ b/arch/x86/ia32/ia32_signal.c @@ -403,7 +403,7 @@ static void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, sp -= frame_size; /* Align the stack pointer according to the i386 ABI, * i.e. so that on function entry ((sp + 4) & 15) == 0. */ - sp = ((sp + 4) & -16ul) - 4; + sp = ((sp - 12) & -16ul) - 4; return (void __user *) sp; } @@ -503,7 +503,7 @@ int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, 0xb8, __NR_ia32_rt_sigreturn, 0x80cd, - 0, + 0 }; frame = get_sigframe(ka, regs, sizeof(*frame), &fpstate); @@ -533,9 +533,11 @@ int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, if (ka->sa.sa_flags & SA_RESTORER) restorer = ka->sa.sa_restorer; + else if (current->mm->context.vdso) + /* Return stub is in 32bit vsyscall page */ + restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn); else - restorer = VDSO32_SYMBOL(current->mm->context.vdso, - rt_sigreturn); + restorer = &frame->retcode; put_user_ex(ptr_to_compat(restorer), &frame->pretcode); /* diff --git a/arch/x86/ia32/ia32entry.S b/arch/x86/ia32/ia32entry.S index 4f5f71e..392161a 100644 --- a/arch/x86/ia32/ia32entry.S +++ b/arch/x86/ia32/ia32entry.S @@ -13,6 +13,7 @@ #include #include #include +#include #include /* Avoid __ASSEMBLER__'ifying just for this. */ @@ -120,6 +121,11 @@ ENTRY(ia32_sysenter_target) SWAPGS_UNSAFE_STACK movq PER_CPU_VAR(kernel_stack), %rsp addq $(KERNEL_STACK_OFFSET),%rsp + +#ifdef CONFIG_PAX_MEMORY_UDEREF + call pax_enter_kernel_user +#endif + /* * No need to follow this irqs on/off section: the syscall * disabled irqs, here we enable it straight after entry: @@ -150,6 +156,12 @@ ENTRY(ia32_sysenter_target) SAVE_ARGS 0,0,1 /* no need to do an access_ok check here because rbp has been 32bit zero extended */ + +#ifdef CONFIG_PAX_MEMORY_UDEREF + mov $PAX_USER_SHADOW_BASE,%r10 + add %r10,%rbp +#endif + 1: movl (%rbp),%ebp .section __ex_table,"a" .quad 1b,ia32_badarg @@ -172,6 +184,11 @@ sysenter_dispatch: testl $_TIF_ALLWORK_MASK,TI_flags(%r10) jnz sysexit_audit sysexit_from_sys_call: + +#ifdef CONFIG_PAX_MEMORY_UDEREF + call pax_exit_kernel_user +#endif + andl $~TS_COMPAT,TI_status(%r10) /* clear IF, that popfq doesn't enable interrupts early */ andl $~0x200,EFLAGS-R11(%rsp) @@ -290,6 +307,11 @@ ENTRY(ia32_cstar_target) movl %esp,%r8d CFI_REGISTER rsp,r8 movq PER_CPU_VAR(kernel_stack),%rsp + +#ifdef CONFIG_PAX_MEMORY_UDEREF + call pax_enter_kernel_user +#endif + /* * No need to follow this irqs on/off section: the syscall * disabled irqs and here we enable it straight after entry: @@ -311,6 +333,12 @@ ENTRY(ia32_cstar_target) /* no need to do an access_ok check here because r8 has been 32bit zero extended */ /* hardware stack frame is complete now */ + +#ifdef CONFIG_PAX_MEMORY_UDEREF + mov $PAX_USER_SHADOW_BASE,%r10 + add %r10,%r8 +#endif + 1: movl (%r8),%r9d .section __ex_table,"a" .quad 1b,ia32_badarg @@ -333,6 +361,11 @@ cstar_dispatch: testl $_TIF_ALLWORK_MASK,TI_flags(%r10) jnz sysretl_audit sysretl_from_sys_call: + +#ifdef CONFIG_PAX_MEMORY_UDEREF + call pax_exit_kernel_user +#endif + andl $~TS_COMPAT,TI_status(%r10) RESTORE_ARGS 1,-ARG_SKIP,1,1,1 movl RIP-ARGOFFSET(%rsp),%ecx @@ -415,6 +448,11 @@ ENTRY(ia32_syscall) CFI_REL_OFFSET rip,RIP-RIP PARAVIRT_ADJUST_EXCEPTION_FRAME SWAPGS + +#ifdef CONFIG_PAX_MEMORY_UDEREF + call pax_enter_kernel_user +#endif + /* * No need to follow this irqs on/off section: the syscall * disabled irqs and here we enable it straight after entry: diff --git a/arch/x86/include/asm/alternative.h b/arch/x86/include/asm/alternative.h index 03b6bb5..ed0fb81 100644 --- a/arch/x86/include/asm/alternative.h +++ b/arch/x86/include/asm/alternative.h @@ -91,7 +91,7 @@ static inline int alternatives_text_reserved(void *start, void *end) " .byte 664f-663f\n" /* replacementlen */ \ " .byte 0xff + (664f-663f) - (662b-661b)\n" /* rlen <= slen */ \ ".previous\n" \ - ".section .altinstr_replacement, \"ax\"\n" \ + ".section .altinstr_replacement, \"a\"\n" \ "663:\n\t" newinstr "\n664:\n" /* replacement */ \ ".previous" diff --git a/arch/x86/include/asm/apm.h b/arch/x86/include/asm/apm.h index 20370c6..a2eb9b0 100644 --- a/arch/x86/include/asm/apm.h +++ b/arch/x86/include/asm/apm.h @@ -34,7 +34,7 @@ static inline void apm_bios_call_asm(u32 func, u32 ebx_in, u32 ecx_in, __asm__ __volatile__(APM_DO_ZERO_SEGS "pushl %%edi\n\t" "pushl %%ebp\n\t" - "lcall *%%cs:apm_bios_entry\n\t" + "lcall *%%ss:apm_bios_entry\n\t" "setc %%al\n\t" "popl %%ebp\n\t" "popl %%edi\n\t" @@ -58,7 +58,7 @@ static inline u8 apm_bios_call_simple_asm(u32 func, u32 ebx_in, __asm__ __volatile__(APM_DO_ZERO_SEGS "pushl %%edi\n\t" "pushl %%ebp\n\t" - "lcall *%%cs:apm_bios_entry\n\t" + "lcall *%%ss:apm_bios_entry\n\t" "setc %%bl\n\t" "popl %%ebp\n\t" "popl %%edi\n\t" diff --git a/arch/x86/include/asm/asm.h b/arch/x86/include/asm/asm.h index b3ed1e1..6b18beb 100644 --- a/arch/x86/include/asm/asm.h +++ b/arch/x86/include/asm/asm.h @@ -37,6 +37,12 @@ #define _ASM_SI __ASM_REG(si) #define _ASM_DI __ASM_REG(di) +#ifdef CONFIG_X86_32 +#define _ASM_INTO "into" +#else +#define _ASM_INTO "int $4" +#endif + /* Exception table entry */ #ifdef __ASSEMBLY__ # define _ASM_EXTABLE(from,to) \ diff --git a/arch/x86/include/asm/atomic.h b/arch/x86/include/asm/atomic.h index 952a826..c4830ea 100644 --- a/arch/x86/include/asm/atomic.h +++ b/arch/x86/include/asm/atomic.h @@ -26,6 +26,17 @@ static inline int atomic_read(const atomic_t *v) } /** + * atomic_read_unchecked - read atomic variable + * @v: pointer of type atomic_unchecked_t + * + * Atomically reads the value of @v. + */ +static inline int atomic_read_unchecked(const atomic_unchecked_t *v) +{ + return v->counter; +} + +/** * atomic_set - set atomic variable * @v: pointer of type atomic_t * @i: required value @@ -38,6 +49,18 @@ static inline void atomic_set(atomic_t *v, int i) } /** + * atomic_set_unchecked - set atomic variable + * @v: pointer of type atomic_unchecked_t + * @i: required value + * + * Atomically sets the value of @v to @i. + */ +static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i) +{ + v->counter = i; +} + +/** * atomic_add - add integer to atomic variable * @i: integer value to add * @v: pointer of type atomic_t @@ -46,7 +69,29 @@ static inline void atomic_set(atomic_t *v, int i) */ static inline void atomic_add(int i, atomic_t *v) { - asm volatile(LOCK_PREFIX "addl %1,%0" + asm volatile(LOCK_PREFIX "addl %1,%0\n" + +#ifdef CONFIG_PAX_REFCOUNT + "jno 0f\n" + LOCK_PREFIX "subl %1,%0\n" + _ASM_INTO "\n0:\n" + _ASM_EXTABLE(0b, 0b) +#endif + + : "+m" (v->counter) + : "ir" (i)); +} + +/** + * atomic_add_unchecked - add integer to atomic variable + * @i: integer value to add + * @v: pointer of type atomic_unchecked_t + * + * Atomically adds @i to @v. + */ +static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v) +{ + asm volatile(LOCK_PREFIX "addl %1,%0\n" : "+m" (v->counter) : "ir" (i)); } @@ -60,7 +105,29 @@ static inline void atomic_add(int i, atomic_t *v) */ static inline void atomic_sub(int i, atomic_t *v) { - asm volatile(LOCK_PREFIX "subl %1,%0" + asm volatile(LOCK_PREFIX "subl %1,%0\n" + +#ifdef CONFIG_PAX_REFCOUNT + "jno 0f\n" + LOCK_PREFIX "addl %1,%0\n" + _ASM_INTO "\n0:\n" + _ASM_EXTABLE(0b, 0b) +#endif + + : "+m" (v->counter) + : "ir" (i)); +} + +/** + * atomic_sub_unchecked - subtract integer from atomic variable + * @i: integer value to subtract + * @v: pointer of type atomic_t + * + * Atomically subtracts @i from @v. + */ +static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v) +{ + asm volatile(LOCK_PREFIX "subl %1,%0\n" : "+m" (v->counter) : "ir" (i)); } @@ -78,7 +145,16 @@ static inline int atomic_sub_and_test(int i, atomic_t *v) { unsigned char c; - asm volatile(LOCK_PREFIX "subl %2,%0; sete %1" + asm volatile(LOCK_PREFIX "subl %2,%0\n" + +#ifdef CONFIG_PAX_REFCOUNT + "jno 0f\n" + LOCK_PREFIX "addl %2,%0\n" + _ASM_INTO "\n0:\n" + _ASM_EXTABLE(0b, 0b) +#endif + + "sete %1\n" : "+m" (v->counter), "=qm" (c) : "ir" (i) : "memory"); return c; @@ -92,7 +168,27 @@ static inline int atomic_sub_and_test(int i, atomic_t *v) */ static inline void atomic_inc(atomic_t *v) { - asm volatile(LOCK_PREFIX "incl %0" + asm volatile(LOCK_PREFIX "incl %0\n" + +#ifdef CONFIG_PAX_REFCOUNT + "jno 0f\n" + LOCK_PREFIX "decl %0\n" + _ASM_INTO "\n0:\n" + _ASM_EXTABLE(0b, 0b) +#endif + + : "+m" (v->counter)); +} + +/** + * atomic_inc_unchecked - increment atomic variable + * @v: pointer of type atomic_unchecked_t + * + * Atomically increments @v by 1. + */ +static inline void atomic_inc_unchecked(atomic_unchecked_t *v) +{ + asm volatile(LOCK_PREFIX "incl %0\n" : "+m" (v->counter)); } @@ -104,7 +200,27 @@ static inline void atomic_inc(atomic_t *v) */ static inline void atomic_dec(atomic_t *v) { - asm volatile(LOCK_PREFIX "decl %0" + asm volatile(LOCK_PREFIX "decl %0\n" + +#ifdef CONFIG_PAX_REFCOUNT + "jno 0f\n" + LOCK_PREFIX "incl %0\n" + _ASM_INTO "\n0:\n" + _ASM_EXTABLE(0b, 0b) +#endif + + : "+m" (v->counter)); +} + +/** + * atomic_dec_unchecked - decrement atomic variable + * @v: pointer of type atomic_t + * + * Atomically decrements @v by 1. + */ +static inline void atomic_dec_unchecked(atomic_unchecked_t *v) +{ + asm volatile(LOCK_PREFIX "decl %0\n" : "+m" (v->counter)); } @@ -120,7 +236,16 @@ static inline int atomic_dec_and_test(atomic_t *v) { unsigned char c; - asm volatile(LOCK_PREFIX "decl %0; sete %1" + asm volatile(LOCK_PREFIX "decl %0\n" + +#ifdef CONFIG_PAX_REFCOUNT + "jno 0f\n" + LOCK_PREFIX "incl %0\n" + _ASM_INTO "\n0:\n" + _ASM_EXTABLE(0b, 0b) +#endif + + "sete %1\n" : "+m" (v->counter), "=qm" (c) : : "memory"); return c != 0; @@ -138,7 +263,16 @@ static inline int atomic_inc_and_test(atomic_t *v) { unsigned char c; - asm volatile(LOCK_PREFIX "incl %0; sete %1" + asm volatile(LOCK_PREFIX "incl %0\n" + +#ifdef CONFIG_PAX_REFCOUNT + "jno 0f\n" + LOCK_PREFIX "decl %0\n" + _ASM_INTO "\n0:\n" + _ASM_EXTABLE(0b, 0b) +#endif + + "sete %1\n" : "+m" (v->counter), "=qm" (c) : : "memory"); return c != 0; @@ -157,7 +291,16 @@ static inline int atomic_add_negative(int i, atomic_t *v) { unsigned char c; - asm volatile(LOCK_PREFIX "addl %2,%0; sets %1" + asm volatile(LOCK_PREFIX "addl %2,%0\n" + +#ifdef CONFIG_PAX_REFCOUNT + "jno 0f\n" + LOCK_PREFIX "subl %2,%0\n" + _ASM_INTO "\n0:\n" + _ASM_EXTABLE(0b, 0b) +#endif + + "sets %1\n" : "+m" (v->counter), "=qm" (c) : "ir" (i) : "memory"); return c; @@ -180,6 +323,46 @@ static inline int atomic_add_return(int i, atomic_t *v) #endif /* Modern 486+ processor */ __i = i; + asm volatile(LOCK_PREFIX "xaddl %0, %1\n" + +#ifdef CONFIG_PAX_REFCOUNT + "jno 0f\n" + "movl %0, %1\n" + _ASM_INTO "\n0:\n" + _ASM_EXTABLE(0b, 0b) +#endif + + : "+r" (i), "+m" (v->counter) + : : "memory"); + return i + __i; + +#ifdef CONFIG_M386 +no_xadd: /* Legacy 386 processor */ + local_irq_save(flags); + __i = atomic_read(v); + atomic_set(v, i + __i); + local_irq_restore(flags); + return i + __i; +#endif +} + +/** + * atomic_add_return_unchecked - add integer and return + * @v: pointer of type atomic_unchecked_t + * @i: integer value to add + * + * Atomically adds @i to @v and returns @i + @v + */ +static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v) +{ + int __i; +#ifdef CONFIG_M386 + unsigned long flags; + if (unlikely(boot_cpu_data.x86 <= 3)) + goto no_xadd; +#endif + /* Modern 486+ processor */ + __i = i; asm volatile(LOCK_PREFIX "xaddl %0, %1" : "+r" (i), "+m" (v->counter) : : "memory"); @@ -208,6 +391,10 @@ static inline int atomic_sub_return(int i, atomic_t *v) } #define atomic_inc_return(v) (atomic_add_return(1, v)) +static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v) +{ + return atomic_add_return_unchecked(1, v); +} #define atomic_dec_return(v) (atomic_sub_return(1, v)) static inline int atomic_cmpxchg(atomic_t *v, int old, int new) @@ -231,17 +418,30 @@ static inline int atomic_xchg(atomic_t *v, int new) */ static inline int atomic_add_unless(atomic_t *v, int a, int u) { - int c, old; + int c, old, new; c = atomic_read(v); for (;;) { - if (unlikely(c == (u))) + if (unlikely(c == u)) break; - old = atomic_cmpxchg((v), c, c + (a)); + + asm volatile("addl %2,%0\n" + +#ifdef CONFIG_PAX_REFCOUNT + "jno 0f\n" + "subl %2,%0\n" + _ASM_INTO "\n0:\n" + _ASM_EXTABLE(0b, 0b) +#endif + + : "=r" (new) + : "0" (c), "ir" (a)); + + old = atomic_cmpxchg(v, c, new); if (likely(old == c)) break; c = old; } - return c != (u); + return c != u; } #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) diff --git a/arch/x86/include/asm/atomic64_32.h b/arch/x86/include/asm/atomic64_32.h index 2a934aa..5c1a942 100644 --- a/arch/x86/include/asm/atomic64_32.h +++ b/arch/x86/include/asm/atomic64_32.h @@ -12,6 +12,14 @@ typedef struct { u64 __aligned(8) counter; } atomic64_t; +#ifdef CONFIG_PAX_REFCOUNT +typedef struct { + u64 __aligned(8) counter; +} atomic64_unchecked_t; +#else +typedef atomic64_t atomic64_unchecked_t; +#endif + #define ATOMIC64_INIT(val) { (val) } #ifdef CONFIG_X86_CMPXCHG64 diff --git a/arch/x86/include/asm/atomic64_64.h b/arch/x86/include/asm/atomic64_64.h index 49fd1ea..6b79575 100644 --- a/arch/x86/include/asm/atomic64_64.h +++ b/arch/x86/include/asm/atomic64_64.h @@ -22,6 +22,18 @@ static inline long atomic64_read(const atomic64_t *v) } /** + * atomic64_read_unchecked - read atomic64 variable + * @v: pointer of type atomic64_unchecked_t + * + * Atomically reads the value of @v. + * Doesn't imply a read memory barrier. + */ +static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v) +{ + return v->counter; +} + +/** * atomic64_set - set atomic64 variable * @v: pointer to type atomic64_t * @i: required value @@ -34,6 +46,18 @@ static inline void atomic64_set(atomic64_t *v, long i) } /** + * atomic64_set_unchecked - set atomic64 variable + * @v: pointer to type atomic64_unchecked_t + * @i: required value + * + * Atomically sets the value of @v to @i. + */ +static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i) +{ + v->counter = i; +} + +/** * atomic64_add - add integer to atomic64 variable * @i: integer value to add * @v: pointer to type atomic64_t @@ -42,6 +66,28 @@ static inline void atomic64_set(atomic64_t *v, long i) */ static inline void atomic64_add(long i, atomic64_t *v) { + asm volatile(LOCK_PREFIX "addq %1,%0\n" + +#ifdef CONFIG_PAX_REFCOUNT + "jno 0f\n" + LOCK_PREFIX "subq %1,%0\n" + "int $4\n0:\n" + _ASM_EXTABLE(0b, 0b) +#endif + + : "=m" (v->counter) + : "er" (i), "m" (v->counter)); +} + +/** + * atomic64_add_unchecked - add integer to atomic64 variable + * @i: integer value to add + * @v: pointer to type atomic64_unchecked_t + * + * Atomically adds @i to @v. + */ +static inline void atomic64_add_unchecked(long i, atomic64_unchecked_t *v) +{ asm volatile(LOCK_PREFIX "addq %1,%0" : "=m" (v->counter) : "er" (i), "m" (v->counter)); @@ -56,7 +102,15 @@ static inline void atomic64_add(long i, atomic64_t *v) */ static inline void atomic64_sub(long i, atomic64_t *v) { - asm volatile(LOCK_PREFIX "subq %1,%0" + asm volatile(LOCK_PREFIX "subq %1,%0\n" + +#ifdef CONFIG_PAX_REFCOUNT + "jno 0f\n" + LOCK_PREFIX "addq %1,%0\n" + "int $4\n0:\n" + _ASM_EXTABLE(0b, 0b) +#endif + : "=m" (v->counter) : "er" (i), "m" (v->counter)); } @@ -74,7 +128,16 @@ static inline int atomic64_sub_and_test(long i, atomic64_t *v) { unsigned char c; - asm volatile(LOCK_PREFIX "subq %2,%0; sete %1" + asm volatile(LOCK_PREFIX "subq %2,%0\n" + +#ifdef CONFIG_PAX_REFCOUNT + "jno 0f\n" + LOCK_PREFIX "addq %2,%0\n" + "int $4\n0:\n" + _ASM_EXTABLE(0b, 0b) +#endif + + "sete %1\n" : "=m" (v->counter), "=qm" (c) : "er" (i), "m" (v->counter) : "memory"); return c; @@ -88,6 +151,27 @@ static inline int atomic64_sub_and_test(long i, atomic64_t *v) */ static inline void atomic64_inc(atomic64_t *v) { + asm volatile(LOCK_PREFIX "incq %0\n" + +#ifdef CONFIG_PAX_REFCOUNT + "jno 0f\n" + LOCK_PREFIX "decq %0\n" + "int $4\n0:\n" + _ASM_EXTABLE(0b, 0b) +#endif + + : "=m" (v->counter) + : "m" (v->counter)); +} + +/** + * atomic64_inc_unchecked - increment atomic64 variable + * @v: pointer to type atomic64_unchecked_t + * + * Atomically increments @v by 1. + */ +static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v) +{ asm volatile(LOCK_PREFIX "incq %0" : "=m" (v->counter) : "m" (v->counter)); @@ -101,7 +185,28 @@ static inline void atomic64_inc(atomic64_t *v) */ static inline void atomic64_dec(atomic64_t *v) { - asm volatile(LOCK_PREFIX "decq %0" + asm volatile(LOCK_PREFIX "decq %0\n" + +#ifdef CONFIG_PAX_REFCOUNT + "jno 0f\n" + LOCK_PREFIX "incq %0\n" + "int $4\n0:\n" + _ASM_EXTABLE(0b, 0b) +#endif + + : "=m" (v->counter) + : "m" (v->counter)); +} + +/** + * atomic64_dec_unchecked - decrement atomic64 variable + * @v: pointer to type atomic64_t + * + * Atomically decrements @v by 1. + */ +static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v) +{ + asm volatile(LOCK_PREFIX "decq %0\n" : "=m" (v->counter) : "m" (v->counter)); } @@ -118,7 +223,16 @@ static inline int atomic64_dec_and_test(atomic64_t *v) { unsigned char c; - asm volatile(LOCK_PREFIX "decq %0; sete %1" + asm volatile(LOCK_PREFIX "decq %0\n" + +#ifdef CONFIG_PAX_REFCOUNT + "jno 0f\n" + LOCK_PREFIX "incq %0\n" + "int $4\n0:\n" + _ASM_EXTABLE(0b, 0b) +#endif + + "sete %1\n" : "=m" (v->counter), "=qm" (c) : "m" (v->counter) : "memory"); return c != 0; @@ -136,7 +250,16 @@ static inline int atomic64_inc_and_test(atomic64_t *v) { unsigned char c; - asm volatile(LOCK_PREFIX "incq %0; sete %1" + asm volatile(LOCK_PREFIX "incq %0\n" + +#ifdef CONFIG_PAX_REFCOUNT + "jno 0f\n" + LOCK_PREFIX "decq %0\n" + "int $4\n0:\n" + _ASM_EXTABLE(0b, 0b) +#endif + + "sete %1\n" : "=m" (v->counter), "=qm" (c) : "m" (v->counter) : "memory"); return c != 0; @@ -155,7 +278,16 @@ static inline int atomic64_add_negative(long i, atomic64_t *v) { unsigned char c; - asm volatile(LOCK_PREFIX "addq %2,%0; sets %1" + asm volatile(LOCK_PREFIX "addq %2,%0\n" + +#ifdef CONFIG_PAX_REFCOUNT + "jno 0f\n" + LOCK_PREFIX "subq %2,%0\n" + "int $4\n0:\n" + _ASM_EXTABLE(0b, 0b) +#endif + + "sets %1\n" : "=m" (v->counter), "=qm" (c) : "er" (i), "m" (v->counter) : "memory"); return c; @@ -171,7 +303,31 @@ static inline int atomic64_add_negative(long i, atomic64_t *v) static inline long atomic64_add_return(long i, atomic64_t *v) { long __i = i; - asm volatile(LOCK_PREFIX "xaddq %0, %1;" + asm volatile(LOCK_PREFIX "xaddq %0, %1\n" + +#ifdef CONFIG_PAX_REFCOUNT + "jno 0f\n" + "movq %0, %1\n" + "int $4\n0:\n" + _ASM_EXTABLE(0b, 0b) +#endif + + : "+r" (i), "+m" (v->counter) + : : "memory"); + return i + __i; +} + +/** + * atomic64_add_return_unchecked - add and return + * @i: integer value to add + * @v: pointer to type atomic64_unchecked_t + * + * Atomically adds @i to @v and returns @i + @v + */ +static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v) +{ + long __i = i; + asm volatile(LOCK_PREFIX "xaddq %0, %1" : "+r" (i), "+m" (v->counter) : : "memory"); return i + __i; @@ -183,6 +339,10 @@ static inline long atomic64_sub_return(long i, atomic64_t *v) } #define atomic64_inc_return(v) (atomic64_add_return(1, (v))) +static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v) +{ + return atomic64_add_return_unchecked(1, v); +} #define atomic64_dec_return(v) (atomic64_sub_return(1, (v))) static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new) @@ -206,17 +366,30 @@ static inline long atomic64_xchg(atomic64_t *v, long new) */ static inline int atomic64_add_unless(atomic64_t *v, long a, long u) { - long c, old; + long c, old, new; c = atomic64_read(v); for (;;) { - if (unlikely(c == (u))) + if (unlikely(c == u)) break; - old = atomic64_cmpxchg((v), c, c + (a)); + + asm volatile("add %2,%0\n" + +#ifdef CONFIG_PAX_REFCOUNT + "jno 0f\n" + "sub %2,%0\n" + "int $4\n0:\n" + _ASM_EXTABLE(0b, 0b) +#endif + + : "=r" (new) + : "0" (c), "ir" (a)); + + old = atomic64_cmpxchg(v, c, new); if (likely(old == c)) break; c = old; } - return c != (u); + return c != u; } #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0) diff --git a/arch/x86/include/asm/boot.h b/arch/x86/include/asm/boot.h index 3b62ab5..ea86950 100644 --- a/arch/x86/include/asm/boot.h +++ b/arch/x86/include/asm/boot.h @@ -11,10 +11,15 @@ #include /* Physical address where kernel should be loaded. */ -#define LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \ +#define ____LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \ + (CONFIG_PHYSICAL_ALIGN - 1)) \ & ~(CONFIG_PHYSICAL_ALIGN - 1)) +#ifndef __ASSEMBLY__ +extern unsigned char __LOAD_PHYSICAL_ADDR[]; +#define LOAD_PHYSICAL_ADDR ((unsigned long)__LOAD_PHYSICAL_ADDR) +#endif + /* Minimum kernel alignment, as a power of two */ #ifdef CONFIG_X86_64 #define MIN_KERNEL_ALIGN_LG2 PMD_SHIFT diff --git a/arch/x86/include/asm/cache.h b/arch/x86/include/asm/cache.h index 48f99f1..982b21c 100644 --- a/arch/x86/include/asm/cache.h +++ b/arch/x86/include/asm/cache.h @@ -8,6 +8,7 @@ #define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT) #define __read_mostly __attribute__((__section__(".data..read_mostly"))) +#define __read_only __attribute__((__section__(".data..read_only"))) #define INTERNODE_CACHE_SHIFT CONFIG_X86_INTERNODE_CACHE_SHIFT #define INTERNODE_CACHE_BYTES (1 << INTERNODE_CACHE_SHIFT) diff --git a/arch/x86/include/asm/cacheflush.h b/arch/x86/include/asm/cacheflush.h index 63e35ec..718f045 100644 --- a/arch/x86/include/asm/cacheflush.h +++ b/arch/x86/include/asm/cacheflush.h @@ -66,7 +66,7 @@ static inline unsigned long get_page_memtype(struct page *pg) unsigned long pg_flags = pg->flags & _PGMT_MASK; if (pg_flags == _PGMT_DEFAULT) - return -1; + return ~0UL; else if (pg_flags == _PGMT_WC) return _PAGE_CACHE_WC; else if (pg_flags == _PGMT_UC_MINUS) diff --git a/arch/x86/include/asm/checksum_32.h b/arch/x86/include/asm/checksum_32.h index 46fc474..b02b0f9 100644 --- a/arch/x86/include/asm/checksum_32.h +++ b/arch/x86/include/asm/checksum_32.h @@ -31,6 +31,14 @@ asmlinkage __wsum csum_partial_copy_generic(const void *src, void *dst, int len, __wsum sum, int *src_err_ptr, int *dst_err_ptr); +asmlinkage __wsum csum_partial_copy_generic_to_user(const void *src, void *dst, + int len, __wsum sum, + int *src_err_ptr, int *dst_err_ptr); + +asmlinkage __wsum csum_partial_copy_generic_from_user(const void *src, void *dst, + int len, __wsum sum, + int *src_err_ptr, int *dst_err_ptr); + /* * Note: when you get a NULL pointer exception here this means someone * passed in an incorrect kernel address to one of these functions. @@ -50,7 +58,7 @@ static inline __wsum csum_partial_copy_from_user(const void __user *src, int *err_ptr) { might_sleep(); - return csum_partial_copy_generic((__force void *)src, dst, + return csum_partial_copy_generic_from_user((__force void *)src, dst, len, sum, err_ptr, NULL); } @@ -178,7 +186,7 @@ static inline __wsum csum_and_copy_to_user(const void *src, { might_sleep(); if (access_ok(VERIFY_WRITE, dst, len)) - return csum_partial_copy_generic(src, (__force void *)dst, + return csum_partial_copy_generic_to_user(src, (__force void *)dst, len, sum, NULL, err_ptr); if (len) diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h index 14e0ee1..c5ff697 100644 --- a/arch/x86/include/asm/cpufeature.h +++ b/arch/x86/include/asm/cpufeature.h @@ -323,7 +323,7 @@ static __always_inline __pure bool __static_cpu_has(u8 bit) " .byte 4f - 3f\n" /* replacement len */ " .byte 0xff + (4f-3f) - (2b-1b)\n" /* padding */ ".previous\n" - ".section .altinstr_replacement,\"ax\"\n" + ".section .altinstr_replacement,\"a\"\n" "3: movb $1,%0\n" "4:\n" ".previous\n" diff --git a/arch/x86/include/asm/desc.h b/arch/x86/include/asm/desc.h index 617bd56..8d4356d 100644 --- a/arch/x86/include/asm/desc.h +++ b/arch/x86/include/asm/desc.h @@ -4,6 +4,7 @@ #include #include #include +#include #include static inline void fill_ldt(struct desc_struct *desc, @@ -15,6 +16,7 @@ static inline void fill_ldt(struct desc_struct *desc, desc->base1 = (info->base_addr & 0x00ff0000) >> 16; desc->type = (info->read_exec_only ^ 1) << 1; desc->type |= info->contents << 2; + desc->type |= info->seg_not_present ^ 1; desc->s = 1; desc->dpl = 0x3; desc->p = info->seg_not_present ^ 1; @@ -31,16 +33,12 @@ static inline void fill_ldt(struct desc_struct *desc, } extern struct desc_ptr idt_descr; -extern gate_desc idt_table[]; - -struct gdt_page { - struct desc_struct gdt[GDT_ENTRIES]; -} __attribute__((aligned(PAGE_SIZE))); -DECLARE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page); +extern gate_desc idt_table[256]; +extern struct desc_struct cpu_gdt_table[NR_CPUS][PAGE_SIZE / sizeof(struct desc_struct)]; static inline struct desc_struct *get_cpu_gdt_table(unsigned int cpu) { - return per_cpu(gdt_page, cpu).gdt; + return cpu_gdt_table[cpu]; } #ifdef CONFIG_X86_64 @@ -115,19 +113,24 @@ static inline void paravirt_free_ldt(struct desc_struct *ldt, unsigned entries) static inline void native_write_idt_entry(gate_desc *idt, int entry, const gate_desc *gate) { + pax_open_kernel(); memcpy(&idt[entry], gate, sizeof(*gate)); + pax_close_kernel(); } static inline void native_write_ldt_entry(struct desc_struct *ldt, int entry, const void *desc) { + pax_open_kernel(); memcpy(&ldt[entry], desc, 8); + pax_close_kernel(); } static inline void native_write_gdt_entry(struct desc_struct *gdt, int entry, const void *desc, int type) { unsigned int size; + switch (type) { case DESC_TSS: size = sizeof(tss_desc); @@ -139,7 +142,10 @@ static inline void native_write_gdt_entry(struct desc_struct *gdt, int entry, size = sizeof(struct desc_struct); break; } + + pax_open_kernel(); memcpy(&gdt[entry], desc, size); + pax_close_kernel(); } static inline void pack_descriptor(struct desc_struct *desc, unsigned long base, @@ -211,7 +217,9 @@ static inline void native_set_ldt(const void *addr, unsigned int entries) static inline void native_load_tr_desc(void) { + pax_open_kernel(); asm volatile("ltr %w0"::"q" (GDT_ENTRY_TSS*8)); + pax_close_kernel(); } static inline void native_load_gdt(const struct desc_ptr *dtr) @@ -246,8 +254,10 @@ static inline void native_load_tls(struct thread_struct *t, unsigned int cpu) unsigned int i; struct desc_struct *gdt = get_cpu_gdt_table(cpu); + pax_open_kernel(); for (i = 0; i < GDT_ENTRY_TLS_ENTRIES; i++) gdt[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i]; + pax_close_kernel(); } #define _LDT_empty(info) \ @@ -309,7 +319,7 @@ static inline void set_desc_limit(struct desc_struct *desc, unsigned long limit) desc->limit = (limit >> 16) & 0xf; } -static inline void _set_gate(int gate, unsigned type, void *addr, +static inline void _set_gate(int gate, unsigned type, const void *addr, unsigned dpl, unsigned ist, unsigned seg) { gate_desc s; @@ -327,7 +337,7 @@ static inline void _set_gate(int gate, unsigned type, void *addr, * Pentium F0 0F bugfix can have resulted in the mapped * IDT being write-protected. */ -static inline void set_intr_gate(unsigned int n, void *addr) +static inline void set_intr_gate(unsigned int n, const void *addr) { BUG_ON((unsigned)n > 0xFF); _set_gate(n, GATE_INTERRUPT, addr, 0, 0, __KERNEL_CS); @@ -356,19 +366,19 @@ static inline void alloc_intr_gate(unsigned int n, void *addr) /* * This routine sets up an interrupt gate at directory privilege level 3. */ -static inline void set_system_intr_gate(unsigned int n, void *addr) +static inline void set_system_intr_gate(unsigned int n, const void *addr) { BUG_ON((unsigned)n > 0xFF); _set_gate(n, GATE_INTERRUPT, addr, 0x3, 0, __KERNEL_CS); } -static inline void set_system_trap_gate(unsigned int n, void *addr) +static inline void set_system_trap_gate(unsigned int n, const void *addr) { BUG_ON((unsigned)n > 0xFF); _set_gate(n, GATE_TRAP, addr, 0x3, 0, __KERNEL_CS); } -static inline void set_trap_gate(unsigned int n, void *addr) +static inline void set_trap_gate(unsigned int n, const void *addr) { BUG_ON((unsigned)n > 0xFF); _set_gate(n, GATE_TRAP, addr, 0, 0, __KERNEL_CS); @@ -377,19 +387,31 @@ static inline void set_trap_gate(unsigned int n, void *addr) static inline void set_task_gate(unsigned int n, unsigned int gdt_entry) { BUG_ON((unsigned)n > 0xFF); - _set_gate(n, GATE_TASK, (void *)0, 0, 0, (gdt_entry<<3)); + _set_gate(n, GATE_TASK, (const void *)0, 0, 0, (gdt_entry<<3)); } -static inline void set_intr_gate_ist(int n, void *addr, unsigned ist) +static inline void set_intr_gate_ist(int n, const void *addr, unsigned ist) { BUG_ON((unsigned)n > 0xFF); _set_gate(n, GATE_INTERRUPT, addr, 0, ist, __KERNEL_CS); } -static inline void set_system_intr_gate_ist(int n, void *addr, unsigned ist) +static inline void set_system_intr_gate_ist(int n, const void *addr, unsigned ist) { BUG_ON((unsigned)n > 0xFF); _set_gate(n, GATE_INTERRUPT, addr, 0x3, ist, __KERNEL_CS); } +#ifdef CONFIG_X86_32 +static inline void set_user_cs(unsigned long base, unsigned long limit, int cpu) +{ + struct desc_struct d; + + if (likely(limit)) + limit = (limit - 1UL) >> PAGE_SHIFT; + pack_descriptor(&d, base, limit, 0xFB, 0xC); + write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_DEFAULT_USER_CS, &d, DESCTYPE_S); +} +#endif + #endif /* _ASM_X86_DESC_H */ diff --git a/arch/x86/include/asm/device.h b/arch/x86/include/asm/device.h index 029f230..0b9b1b3 100644 --- a/arch/x86/include/asm/device.h +++ b/arch/x86/include/asm/device.h @@ -6,7 +6,7 @@ struct dev_archdata { void *acpi_handle; #endif #ifdef CONFIG_X86_64 -struct dma_map_ops *dma_ops; + const struct dma_map_ops *dma_ops; #endif #if defined(CONFIG_DMAR) || defined(CONFIG_AMD_IOMMU) void *iommu; /* hook for IOMMU specific extension */ diff --git a/arch/x86/include/asm/dma-mapping.h b/arch/x86/include/asm/dma-mapping.h index ac91eed..a3eae19 100644 --- a/arch/x86/include/asm/dma-mapping.h +++ b/arch/x86/include/asm/dma-mapping.h @@ -26,9 +26,9 @@ extern int iommu_merge; extern struct device x86_dma_fallback_dev; extern int panic_on_overflow; -extern struct dma_map_ops *dma_ops; +extern const struct dma_map_ops *dma_ops; -static inline struct dma_map_ops *get_dma_ops(struct device *dev) +static inline const struct dma_map_ops *get_dma_ops(struct device *dev) { #ifdef CONFIG_X86_32 return dma_ops; @@ -45,7 +45,7 @@ static inline struct dma_map_ops *get_dma_ops(struct device *dev) /* Make sure we keep the same behaviour */ static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) { - struct dma_map_ops *ops = get_dma_ops(dev); + const struct dma_map_ops *ops = get_dma_ops(dev); if (ops->mapping_error) return ops->mapping_error(dev, dma_addr); @@ -123,7 +123,7 @@ static inline void * dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t gfp) { - struct dma_map_ops *ops = get_dma_ops(dev); + const struct dma_map_ops *ops = get_dma_ops(dev); void *memory; gfp &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32); @@ -150,7 +150,7 @@ dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, static inline void dma_free_coherent(struct device *dev, size_t size, void *vaddr, dma_addr_t bus) { - struct dma_map_ops *ops = get_dma_ops(dev); + const struct dma_map_ops *ops = get_dma_ops(dev); WARN_ON(irqs_disabled()); /* for portability */ diff --git a/arch/x86/include/asm/e820.h b/arch/x86/include/asm/e820.h index ec8a52d..7bbff69 100644 --- a/arch/x86/include/asm/e820.h +++ b/arch/x86/include/asm/e820.h @@ -69,7 +69,7 @@ struct e820map { #define ISA_START_ADDRESS 0xa0000 #define ISA_END_ADDRESS 0x100000 -#define BIOS_BEGIN 0x000a0000 +#define BIOS_BEGIN 0x000c0000 #define BIOS_END 0x00100000 #ifdef __KERNEL__ diff --git a/arch/x86/include/asm/elf.h b/arch/x86/include/asm/elf.h index f2ad216..569e1f7 100644 --- a/arch/x86/include/asm/elf.h +++ b/arch/x86/include/asm/elf.h @@ -237,7 +237,25 @@ extern int force_personality32; the loader. We need to make sure that it is out of the way of the program that it will "exec", and that there is sufficient room for the brk. */ +#ifdef CONFIG_PAX_SEGMEXEC +#define ELF_ET_DYN_BASE ((current->mm->pax_flags & MF_PAX_SEGMEXEC) ? SEGMEXEC_TASK_SIZE/3*2 : TASK_SIZE/3*2) +#else #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2) +#endif + +#ifdef CONFIG_PAX_ASLR +#ifdef CONFIG_X86_32 +#define PAX_ELF_ET_DYN_BASE 0x10000000UL + +#define PAX_DELTA_MMAP_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16) +#define PAX_DELTA_STACK_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16) +#else +#define PAX_ELF_ET_DYN_BASE 0x400000UL + +#define PAX_DELTA_MMAP_LEN ((test_thread_flag(TIF_IA32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3) +#define PAX_DELTA_STACK_LEN ((test_thread_flag(TIF_IA32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3) +#endif +#endif /* This yields a mask that user programs can use to figure out what instruction set this CPU supports. This could be done in user space, @@ -291,8 +309,7 @@ do { \ #define ARCH_DLINFO \ do { \ if (vdso_enabled) \ - NEW_AUX_ENT(AT_SYSINFO_EHDR, \ - (unsigned long)current->mm->context.vdso); \ + NEW_AUX_ENT(AT_SYSINFO_EHDR, current->mm->context.vdso);\ } while (0) #define AT_SYSINFO 32 @@ -303,7 +320,7 @@ do { \ #endif /* !CONFIG_X86_32 */ -#define VDSO_CURRENT_BASE ((unsigned long)current->mm->context.vdso) +#define VDSO_CURRENT_BASE (current->mm->context.vdso) #define VDSO_ENTRY \ ((unsigned long)VDSO32_SYMBOL(VDSO_CURRENT_BASE, vsyscall)) @@ -317,7 +334,4 @@ extern int arch_setup_additional_pages(struct linux_binprm *bprm, extern int syscall32_setup_pages(struct linux_binprm *, int exstack); #define compat_arch_setup_additional_pages syscall32_setup_pages -extern unsigned long arch_randomize_brk(struct mm_struct *mm); -#define arch_randomize_brk arch_randomize_brk - #endif /* _ASM_X86_ELF_H */ diff --git a/arch/x86/include/asm/futex.h b/arch/x86/include/asm/futex.h index 1f11ce4..c8cfd20 100644 --- a/arch/x86/include/asm/futex.h +++ b/arch/x86/include/asm/futex.h @@ -11,17 +11,54 @@ #include #include +#ifdef CONFIG_X86_32 #define __futex_atomic_op1(insn, ret, oldval, uaddr, oparg) \ + asm volatile( \ + "movw\t%w6, %%ds\n" \ + "1:\t" insn "\n" \ + "2:\tpushl\t%%ss\n" \ + "\tpopl\t%%ds\n" \ + "\t.section .fixup,\"ax\"\n" \ + "3:\tmov\t%3, %1\n" \ + "\tjmp\t2b\n" \ + "\t.previous\n" \ + _ASM_EXTABLE(1b, 3b) \ + : "=r" (oldval), "=r" (ret), "+m" (*uaddr) \ + : "i" (-EFAULT), "0" (oparg), "1" (0), "r" (__USER_DS)) + +#define __futex_atomic_op2(insn, ret, oldval, uaddr, oparg) \ + asm volatile("movw\t%w7, %%es\n" \ + "1:\tmovl\t%%es:%2, %0\n" \ + "\tmovl\t%0, %3\n" \ + "\t" insn "\n" \ + "2:\t" LOCK_PREFIX "cmpxchgl %3, %%es:%2\n"\ + "\tjnz\t1b\n" \ + "3:\tpushl\t%%ss\n" \ + "\tpopl\t%%es\n" \ + "\t.section .fixup,\"ax\"\n" \ + "4:\tmov\t%5, %1\n" \ + "\tjmp\t3b\n" \ + "\t.previous\n" \ + _ASM_EXTABLE(1b, 4b) \ + _ASM_EXTABLE(2b, 4b) \ + : "=&a" (oldval), "=&r" (ret), \ + "+m" (*uaddr), "=&r" (tem) \ + : "r" (oparg), "i" (-EFAULT), "1" (0), "r" (__USER_DS)) +#else +#define __futex_atomic_op1(insn, ret, oldval, uaddr, oparg) \ + typecheck(u32 *, uaddr); \ asm volatile("1:\t" insn "\n" \ "2:\t.section .fixup,\"ax\"\n" \ "3:\tmov\t%3, %1\n" \ "\tjmp\t2b\n" \ "\t.previous\n" \ _ASM_EXTABLE(1b, 3b) \ - : "=r" (oldval), "=r" (ret), "+m" (*uaddr) \ + : "=r" (oldval), "=r" (ret), \ + "+m" (*(uaddr + PAX_USER_SHADOW_BASE / 4))\ : "i" (-EFAULT), "0" (oparg), "1" (0)) #define __futex_atomic_op2(insn, ret, oldval, uaddr, oparg) \ + typecheck(u32 *, uaddr); \ asm volatile("1:\tmovl %2, %0\n" \ "\tmovl\t%0, %3\n" \ "\t" insn "\n" \ @@ -34,10 +71,12 @@ _ASM_EXTABLE(1b, 4b) \ _ASM_EXTABLE(2b, 4b) \ : "=&a" (oldval), "=&r" (ret), \ - "+m" (*uaddr), "=&r" (tem) \ + "+m" (*(uaddr + PAX_USER_SHADOW_BASE / 4)),\ + "=&r" (tem) \ : "r" (oparg), "i" (-EFAULT), "1" (0)) +#endif -static inline int futex_atomic_op_inuser(int encoded_op, int __user *uaddr) +static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr) { int op = (encoded_op >> 28) & 7; int cmp = (encoded_op >> 24) & 15; @@ -61,11 +100,20 @@ static inline int futex_atomic_op_inuser(int encoded_op, int __user *uaddr) switch (op) { case FUTEX_OP_SET: +#ifdef CONFIG_X86_32 + __futex_atomic_op1("xchgl %0, %%ds:%2", ret, oldval, uaddr, oparg); +#else __futex_atomic_op1("xchgl %0, %2", ret, oldval, uaddr, oparg); +#endif break; case FUTEX_OP_ADD: +#ifdef CONFIG_X86_32 + __futex_atomic_op1(LOCK_PREFIX "xaddl %0, %%ds:%2", ret, oldval, + uaddr, oparg); +#else __futex_atomic_op1(LOCK_PREFIX "xaddl %0, %2", ret, oldval, uaddr, oparg); +#endif break; case FUTEX_OP_OR: __futex_atomic_op2("orl %4, %3", ret, oldval, uaddr, oparg); @@ -109,7 +157,7 @@ static inline int futex_atomic_op_inuser(int encoded_op, int __user *uaddr) return ret; } -static inline int futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, +static inline int futex_atomic_cmpxchg_inatomic(u32 __user *uaddr, int oldval, int newval) { @@ -119,17 +167,31 @@ static inline int futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, return -ENOSYS; #endif - if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int))) + if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))) return -EFAULT; - asm volatile("1:\t" LOCK_PREFIX "cmpxchgl %3, %1\n" - "2:\t.section .fixup, \"ax\"\n" + asm volatile( +#ifdef CONFIG_X86_32 + "\tmovw %w5, %%ds\n" + "1:\t" LOCK_PREFIX "cmpxchgl %3, %%ds:%1\n" + "2:\tpushl %%ss\n" + "\tpopl %%ds\n" +#else + "1:\t" LOCK_PREFIX "cmpxchgl %3, %1\n" + "2:\n" +#endif + "\t.section .fixup, \"ax\"\n" "3:\tmov %2, %0\n" "\tjmp 2b\n" "\t.previous\n" _ASM_EXTABLE(1b, 3b) +#ifdef CONFIG_X86_32 : "=a" (oldval), "+m" (*uaddr) + : "i" (-EFAULT), "r" (newval), "0" (oldval), "r" (__USER_DS) +#else + : "=a" (oldval), "+m" (*(uaddr + PAX_USER_SHADOW_BASE / 4)) : "i" (-EFAULT), "r" (newval), "0" (oldval) +#endif : "memory" ); diff --git a/arch/x86/include/asm/i387.h b/arch/x86/include/asm/i387.h index c991b3a..e47dda2 100644 --- a/arch/x86/include/asm/i387.h +++ b/arch/x86/include/asm/i387.h @@ -77,6 +77,11 @@ static inline int fxrstor_checking(struct i387_fxsave_struct *fx) { int err; +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF) + if ((unsigned long)fx < PAX_USER_SHADOW_BASE) + fx = (struct i387_fxsave_struct *)((void *)fx + PAX_USER_SHADOW_BASE); +#endif + asm volatile("1: rex64/fxrstor (%[fx])\n\t" "2:\n" ".section .fixup,\"ax\"\n" @@ -127,6 +132,11 @@ static inline int fxsave_user(struct i387_fxsave_struct __user *fx) { int err; +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF) + if ((unsigned long)fx < PAX_USER_SHADOW_BASE) + fx = (struct i387_fxsave_struct __user *)((void __user *)fx + PAX_USER_SHADOW_BASE); +#endif + asm volatile("1: rex64/fxsave (%[fx])\n\t" "2:\n" ".section .fixup,\"ax\"\n" @@ -220,13 +230,8 @@ static inline int fxrstor_checking(struct i387_fxsave_struct *fx) } /* We need a safe address that is cheap to find and that is already - in L1 during context switch. The best choices are unfortunately - different for UP and SMP */ -#ifdef CONFIG_SMP -#define safe_address (__per_cpu_offset[0]) -#else -#define safe_address (kstat_cpu(0).cpustat.user) -#endif + in L1 during context switch. */ +#define safe_address (init_tss[smp_processor_id()].x86_tss.sp0) /* * These must be called with preempt disabled diff --git a/arch/x86/include/asm/io.h b/arch/x86/include/asm/io.h index 6a45ec4..2104674 100644 --- a/arch/x86/include/asm/io.h +++ b/arch/x86/include/asm/io.h @@ -214,6 +214,17 @@ extern void set_iounmap_nonlazy(void); #include +#define ARCH_HAS_VALID_PHYS_ADDR_RANGE +static inline int valid_phys_addr_range(unsigned long addr, size_t count) +{ + return ((addr + count + PAGE_SIZE - 1) >> PAGE_SHIFT) < (1 << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0; +} + +static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t count) +{ + return (pfn + (count >> PAGE_SHIFT)) < (1 << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0; +} + /* * Convert a virtual cached pointer to an uncached pointer */ diff --git a/arch/x86/include/asm/iommu.h b/arch/x86/include/asm/iommu.h index 345c99c..7938698 100644 --- a/arch/x86/include/asm/iommu.h +++ b/arch/x86/include/asm/iommu.h @@ -1,7 +1,7 @@ #ifndef _ASM_X86_IOMMU_H #define _ASM_X86_IOMMU_H -extern struct dma_map_ops nommu_dma_ops; +extern const struct dma_map_ops nommu_dma_ops; extern int force_iommu, no_iommu; extern int iommu_detected; extern int iommu_pass_through; diff --git a/arch/x86/include/asm/irqflags.h b/arch/x86/include/asm/irqflags.h index 9e2b952..557206e 100644 --- a/arch/x86/include/asm/irqflags.h +++ b/arch/x86/include/asm/irqflags.h @@ -142,6 +142,11 @@ static inline unsigned long __raw_local_irq_save(void) sti; \ sysexit +#define GET_CR0_INTO_RDI mov %cr0, %rdi +#define SET_RDI_INTO_CR0 mov %rdi, %cr0 +#define GET_CR3_INTO_RDI mov %cr3, %rdi +#define SET_RDI_INTO_CR3 mov %rdi, %cr3 + #else #define INTERRUPT_RETURN iret #define ENABLE_INTERRUPTS_SYSEXIT sti; sysexit diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 76f5483..ae8e300 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -536,7 +536,7 @@ struct kvm_x86_ops { const struct trace_print_flags *exit_reasons_str; }; -extern struct kvm_x86_ops *kvm_x86_ops; +extern const struct kvm_x86_ops *kvm_x86_ops; int kvm_mmu_module_init(void); void kvm_mmu_module_exit(void); diff --git a/arch/x86/include/asm/local.h b/arch/x86/include/asm/local.h index 2e99724..1fcf079 100644 --- a/arch/x86/include/asm/local.h +++ b/arch/x86/include/asm/local.h @@ -18,26 +18,90 @@ typedef struct { static inline void local_inc(local_t *l) { - asm volatile(_ASM_INC "%0" + asm volatile(_ASM_INC "%0\n" + +#ifdef CONFIG_PAX_REFCOUNT +#ifdef CONFIG_X86_32 + "into\n0:\n" +#else + "jno 0f\n" + "int $4\n0:\n" +#endif + ".pushsection .fixup,\"ax\"\n" + "1:\n" + _ASM_DEC "%0\n" + "jmp 0b\n" + ".popsection\n" + _ASM_EXTABLE(0b, 1b) +#endif + : "+m" (l->a.counter)); } static inline void local_dec(local_t *l) { - asm volatile(_ASM_DEC "%0" + asm volatile(_ASM_DEC "%0\n" + +#ifdef CONFIG_PAX_REFCOUNT +#ifdef CONFIG_X86_32 + "into\n0:\n" +#else + "jno 0f\n" + "int $4\n0:\n" +#endif + ".pushsection .fixup,\"ax\"\n" + "1:\n" + _ASM_INC "%0\n" + "jmp 0b\n" + ".popsection\n" + _ASM_EXTABLE(0b, 1b) +#endif + : "+m" (l->a.counter)); } static inline void local_add(long i, local_t *l) { - asm volatile(_ASM_ADD "%1,%0" + asm volatile(_ASM_ADD "%1,%0\n" + +#ifdef CONFIG_PAX_REFCOUNT +#ifdef CONFIG_X86_32 + "into\n0:\n" +#else + "jno 0f\n" + "int $4\n0:\n" +#endif + ".pushsection .fixup,\"ax\"\n" + "1:\n" + _ASM_SUB "%1,%0\n" + "jmp 0b\n" + ".popsection\n" + _ASM_EXTABLE(0b, 1b) +#endif + : "+m" (l->a.counter) : "ir" (i)); } static inline void local_sub(long i, local_t *l) { - asm volatile(_ASM_SUB "%1,%0" + asm volatile(_ASM_SUB "%1,%0\n" + +#ifdef CONFIG_PAX_REFCOUNT +#ifdef CONFIG_X86_32 + "into\n0:\n" +#else + "jno 0f\n" + "int $4\n0:\n" +#endif + ".pushsection .fixup,\"ax\"\n" + "1:\n" + _ASM_ADD "%1,%0\n" + "jmp 0b\n" + ".popsection\n" + _ASM_EXTABLE(0b, 1b) +#endif + : "+m" (l->a.counter) : "ir" (i)); } @@ -55,7 +119,24 @@ static inline int local_sub_and_test(long i, local_t *l) { unsigned char c; - asm volatile(_ASM_SUB "%2,%0; sete %1" + asm volatile(_ASM_SUB "%2,%0\n" + +#ifdef CONFIG_PAX_REFCOUNT +#ifdef CONFIG_X86_32 + "into\n0:\n" +#else + "jno 0f\n" + "int $4\n0:\n" +#endif + ".pushsection .fixup,\"ax\"\n" + "1:\n" + _ASM_ADD "%2,%0\n" + "jmp 0b\n" + ".popsection\n" + _ASM_EXTABLE(0b, 1b) +#endif + + "sete %1\n" : "+m" (l->a.counter), "=qm" (c) : "ir" (i) : "memory"); return c; @@ -73,7 +154,24 @@ static inline int local_dec_and_test(local_t *l) { unsigned char c; - asm volatile(_ASM_DEC "%0; sete %1" + asm volatile(_ASM_DEC "%0\n" + +#ifdef CONFIG_PAX_REFCOUNT +#ifdef CONFIG_X86_32 + "into\n0:\n" +#else + "jno 0f\n" + "int $4\n0:\n" +#endif + ".pushsection .fixup,\"ax\"\n" + "1:\n" + _ASM_INC "%0\n" + "jmp 0b\n" + ".popsection\n" + _ASM_EXTABLE(0b, 1b) +#endif + + "sete %1\n" : "+m" (l->a.counter), "=qm" (c) : : "memory"); return c != 0; @@ -91,7 +189,24 @@ static inline int local_inc_and_test(local_t *l) { unsigned char c; - asm volatile(_ASM_INC "%0; sete %1" + asm volatile(_ASM_INC "%0\n" + +#ifdef CONFIG_PAX_REFCOUNT +#ifdef CONFIG_X86_32 + "into\n0:\n" +#else + "jno 0f\n" + "int $4\n0:\n" +#endif + ".pushsection .fixup,\"ax\"\n" + "1:\n" + _ASM_DEC "%0\n" + "jmp 0b\n" + ".popsection\n" + _ASM_EXTABLE(0b, 1b) +#endif + + "sete %1\n" : "+m" (l->a.counter), "=qm" (c) : : "memory"); return c != 0; @@ -110,7 +225,24 @@ static inline int local_add_negative(long i, local_t *l) { unsigned char c; - asm volatile(_ASM_ADD "%2,%0; sets %1" + asm volatile(_ASM_ADD "%2,%0\n" + +#ifdef CONFIG_PAX_REFCOUNT +#ifdef CONFIG_X86_32 + "into\n0:\n" +#else + "jno 0f\n" + "int $4\n0:\n" +#endif + ".pushsection .fixup,\"ax\"\n" + "1:\n" + _ASM_SUB "%2,%0\n" + "jmp 0b\n" + ".popsection\n" + _ASM_EXTABLE(0b, 1b) +#endif + + "sets %1\n" : "+m" (l->a.counter), "=qm" (c) : "ir" (i) : "memory"); return c; @@ -133,7 +265,23 @@ static inline long local_add_return(long i, local_t *l) #endif /* Modern 486+ processor */ __i = i; - asm volatile(_ASM_XADD "%0, %1;" + asm volatile(_ASM_XADD "%0, %1\n" + +#ifdef CONFIG_PAX_REFCOUNT +#ifdef CONFIG_X86_32 + "into\n0:\n" +#else + "jno 0f\n" + "int $4\n0:\n" +#endif + ".pushsection .fixup,\"ax\"\n" + "1:\n" + _ASM_MOV "%0,%1\n" + "jmp 0b\n" + ".popsection\n" + _ASM_EXTABLE(0b, 1b) +#endif + : "+r" (i), "+m" (l->a.counter) : : "memory"); return i + __i; diff --git a/arch/x86/include/asm/mc146818rtc.h b/arch/x86/include/asm/mc146818rtc.h index 01fdf56..3bb1b14 100644 --- a/arch/x86/include/asm/mc146818rtc.h +++ b/arch/x86/include/asm/mc146818rtc.h @@ -81,8 +81,8 @@ static inline unsigned char current_lock_cmos_reg(void) #else #define lock_cmos_prefix(reg) do {} while (0) #define lock_cmos_suffix(reg) do {} while (0) -#define lock_cmos(reg) -#define unlock_cmos() +#define lock_cmos(reg) do {} while (0) +#define unlock_cmos() do {} while (0) #define do_i_have_lock_cmos() 0 #define current_lock_cmos_reg() 0 #endif diff --git a/arch/x86/include/asm/microcode.h b/arch/x86/include/asm/microcode.h index ef51b50..514ba37 100644 --- a/arch/x86/include/asm/microcode.h +++ b/arch/x86/include/asm/microcode.h @@ -12,13 +12,13 @@ struct device; enum ucode_state { UCODE_ERROR, UCODE_OK, UCODE_NFOUND }; struct microcode_ops { - enum ucode_state (*request_microcode_user) (int cpu, + enum ucode_state (* const request_microcode_user) (int cpu, const void __user *buf, size_t size); - enum ucode_state (*request_microcode_fw) (int cpu, + enum ucode_state (* const request_microcode_fw) (int cpu, struct device *device); - void (*microcode_fini_cpu) (int cpu); + void (* const microcode_fini_cpu) (int cpu); /* * The generic 'microcode_core' part guarantees that @@ -38,18 +38,18 @@ struct ucode_cpu_info { extern struct ucode_cpu_info ucode_cpu_info[]; #ifdef CONFIG_MICROCODE_INTEL -extern struct microcode_ops * __init init_intel_microcode(void); +extern const struct microcode_ops * __init init_intel_microcode(void); #else -static inline struct microcode_ops * __init init_intel_microcode(void) +static inline const struct microcode_ops * __init init_intel_microcode(void) { return NULL; } #endif /* CONFIG_MICROCODE_INTEL */ #ifdef CONFIG_MICROCODE_AMD -extern struct microcode_ops * __init init_amd_microcode(void); +extern const struct microcode_ops * __init init_amd_microcode(void); #else -static inline struct microcode_ops * __init init_amd_microcode(void) +static inline const struct microcode_ops * __init init_amd_microcode(void) { return NULL; } diff --git a/arch/x86/include/asm/mman.h b/arch/x86/include/asm/mman.h index 593e51d..fa69c9a 100644 --- a/arch/x86/include/asm/mman.h +++ b/arch/x86/include/asm/mman.h @@ -5,4 +5,14 @@ #include +#ifdef __KERNEL__ +#ifndef __ASSEMBLY__ +#ifdef CONFIG_X86_32 +#define arch_mmap_check i386_mmap_check +int i386_mmap_check(unsigned long addr, unsigned long len, + unsigned long flags); +#endif +#endif +#endif + #endif /* _ASM_X86_MMAN_H */ diff --git a/arch/x86/include/asm/mmu.h b/arch/x86/include/asm/mmu.h index 80a1dee..239c67d 100644 --- a/arch/x86/include/asm/mmu.h +++ b/arch/x86/include/asm/mmu.h @@ -9,10 +9,23 @@ * we put the segment information here. */ typedef struct { - void *ldt; + struct desc_struct *ldt; int size; struct mutex lock; - void *vdso; + unsigned long vdso; + +#ifdef CONFIG_X86_32 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC) + unsigned long user_cs_base; + unsigned long user_cs_limit; + +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP) + cpumask_t cpu_user_cs_mask; +#endif + +#endif +#endif + } mm_context_t; #ifdef CONFIG_SMP diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h index 4a2d4e0..a896757 100644 --- a/arch/x86/include/asm/mmu_context.h +++ b/arch/x86/include/asm/mmu_context.h @@ -24,6 +24,21 @@ void destroy_context(struct mm_struct *mm); static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) { + +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF) + unsigned int i; + pgd_t *pgd; + + pax_open_kernel(); + pgd = get_cpu_pgd(smp_processor_id()); + for (i = USER_PGD_PTRS; i < 2 * USER_PGD_PTRS; ++i) + if (paravirt_enabled()) + set_pgd(pgd+i, native_make_pgd(0)); + else + pgd[i] = native_make_pgd(0); + pax_close_kernel(); +#endif + #ifdef CONFIG_SMP if (percpu_read(cpu_tlbstate.state) == TLBSTATE_OK) percpu_write(cpu_tlbstate.state, TLBSTATE_LAZY); @@ -34,27 +49,70 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, struct task_struct *tsk) { unsigned cpu = smp_processor_id(); +#if defined(CONFIG_X86_32) && defined(CONFIG_SMP) + int tlbstate = TLBSTATE_OK; +#endif if (likely(prev != next)) { /* stop flush ipis for the previous mm */ cpumask_clear_cpu(cpu, mm_cpumask(prev)); #ifdef CONFIG_SMP +#ifdef CONFIG_X86_32 + tlbstate = percpu_read(cpu_tlbstate.state); +#endif percpu_write(cpu_tlbstate.state, TLBSTATE_OK); percpu_write(cpu_tlbstate.active_mm, next); #endif cpumask_set_cpu(cpu, mm_cpumask(next)); /* Re-load page tables */ +#ifdef CONFIG_PAX_PER_CPU_PGD + pax_open_kernel(); + __clone_user_pgds(get_cpu_pgd(cpu), next->pgd, USER_PGD_PTRS); + __shadow_user_pgds(get_cpu_pgd(cpu) + USER_PGD_PTRS, next->pgd, USER_PGD_PTRS); + pax_close_kernel(); + load_cr3(get_cpu_pgd(cpu)); +#else load_cr3(next->pgd); +#endif /* * load the LDT, if the LDT is different: */ if (unlikely(prev->context.ldt != next->context.ldt)) load_LDT_nolock(&next->context); - } + +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP) + if (!(__supported_pte_mask & _PAGE_NX)) { + smp_mb__before_clear_bit(); + cpu_clear(cpu, prev->context.cpu_user_cs_mask); + smp_mb__after_clear_bit(); + cpu_set(cpu, next->context.cpu_user_cs_mask); + } +#endif + +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)) + if (unlikely(prev->context.user_cs_base != next->context.user_cs_base || + prev->context.user_cs_limit != next->context.user_cs_limit)) + set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu); #ifdef CONFIG_SMP + else if (unlikely(tlbstate != TLBSTATE_OK)) + set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu); +#endif +#endif + + } else { + +#ifdef CONFIG_PAX_PER_CPU_PGD + pax_open_kernel(); + __clone_user_pgds(get_cpu_pgd(cpu), next->pgd, USER_PGD_PTRS); + __shadow_user_pgds(get_cpu_pgd(cpu) + USER_PGD_PTRS, next->pgd, USER_PGD_PTRS); + pax_close_kernel(); + load_cr3(get_cpu_pgd(cpu)); +#endif + +#ifdef CONFIG_SMP percpu_write(cpu_tlbstate.state, TLBSTATE_OK); BUG_ON(percpu_read(cpu_tlbstate.active_mm) != next); @@ -63,11 +121,28 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, * tlb flush IPI delivery. We must reload CR3 * to make sure to use no freed page tables. */ + +#ifndef CONFIG_PAX_PER_CPU_PGD load_cr3(next->pgd); +#endif + load_LDT_nolock(&next->context); + +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC) + if (!(__supported_pte_mask & _PAGE_NX)) + cpu_set(cpu, next->context.cpu_user_cs_mask); +#endif + +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)) +#ifdef CONFIG_PAX_PAGEEXEC + if (!((next->pax_flags & MF_PAX_PAGEEXEC) && (__supported_pte_mask & _PAGE_NX))) +#endif + set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu); +#endif + } - } #endif + } } #define activate_mm(prev, next) \ diff --git a/arch/x86/include/asm/module.h b/arch/x86/include/asm/module.h index 3e2ce58..51d8f7e 100644 --- a/arch/x86/include/asm/module.h +++ b/arch/x86/include/asm/module.h @@ -59,13 +59,31 @@ #error unknown processor family #endif +#ifdef CONFIG_PAX_MEMORY_UDEREF +#define MODULE_PAX_UDEREF "UDEREF " +#else +#define MODULE_PAX_UDEREF "" +#endif + #ifdef CONFIG_X86_32 # ifdef CONFIG_4KSTACKS # define MODULE_STACKSIZE "4KSTACKS " # else # define MODULE_STACKSIZE "" # endif -# define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_STACKSIZE +# ifdef CONFIG_PAX_KERNEXEC +# define MODULE_PAX_KERNEXEC "KERNEXEC " +# else +# define MODULE_PAX_KERNEXEC "" +# endif +# ifdef CONFIG_GRKERNSEC +# define MODULE_GRSEC "GRSECURITY " +# else +# define MODULE_GRSEC "" +# endif +# define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_STACKSIZE MODULE_GRSEC MODULE_PAX_KERNEXEC MODULE_PAX_UDEREF +#else +# define MODULE_ARCH_VERMAGIC MODULE_PAX_UDEREF #endif #endif /* _ASM_X86_MODULE_H */ diff --git a/arch/x86/include/asm/page_32_types.h b/arch/x86/include/asm/page_32_types.h index 6f1b733..fe54285 100644 --- a/arch/x86/include/asm/page_32_types.h +++ b/arch/x86/include/asm/page_32_types.h @@ -15,6 +15,10 @@ */ #define __PAGE_OFFSET _AC(CONFIG_PAGE_OFFSET, UL) +#ifdef CONFIG_PAX_PAGEEXEC +#define CONFIG_ARCH_TRACK_EXEC_LIMIT 1 +#endif + #ifdef CONFIG_4KSTACKS #define THREAD_ORDER 0 #else diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h index 5653f43..122e562 100644 --- a/arch/x86/include/asm/paravirt.h +++ b/arch/x86/include/asm/paravirt.h @@ -720,6 +720,21 @@ static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx, pv_mmu_ops.set_fixmap(idx, phys, flags); } +#ifdef CONFIG_PAX_KERNEXEC +static inline unsigned long pax_open_kernel(void) +{ + return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_open_kernel); +} + +static inline unsigned long pax_close_kernel(void) +{ + return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_close_kernel); +} +#else +static inline unsigned long pax_open_kernel(void) { return 0; } +static inline unsigned long pax_close_kernel(void) { return 0; } +#endif + #if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS) static inline int arch_spin_is_locked(struct arch_spinlock *lock) @@ -936,7 +951,7 @@ extern void default_banner(void); #define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 4) #define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .long, 4) -#define PARA_INDIRECT(addr) *%cs:addr +#define PARA_INDIRECT(addr) *%ss:addr #endif #define INTERRUPT_RETURN \ @@ -1013,6 +1028,21 @@ extern void default_banner(void); PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_sysexit), \ CLBR_NONE, \ jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_irq_enable_sysexit)) + +#define GET_CR0_INTO_RDI \ + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0); \ + mov %rax,%rdi + +#define SET_RDI_INTO_CR0 \ + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0) + +#define GET_CR3_INTO_RDI \ + call PARA_INDIRECT(pv_mmu_ops+PV_MMU_read_cr3); \ + mov %rax,%rdi + +#define SET_RDI_INTO_CR3 \ + call PARA_INDIRECT(pv_mmu_ops+PV_MMU_write_cr3) + #endif /* CONFIG_X86_32 */ #endif /* __ASSEMBLY__ */ diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h index db9ef55..7578058 100644 --- a/arch/x86/include/asm/paravirt_types.h +++ b/arch/x86/include/asm/paravirt_types.h @@ -312,6 +312,12 @@ struct pv_mmu_ops { an mfn. We can tell which is which from the index. */ void (*set_fixmap)(unsigned /* enum fixed_addresses */ idx, phys_addr_t phys, pgprot_t flags); + +#ifdef CONFIG_PAX_KERNEXEC + unsigned long (*pax_open_kernel)(void); + unsigned long (*pax_close_kernel)(void); +#endif + }; struct arch_spinlock; diff --git a/arch/x86/include/asm/pci_x86.h b/arch/x86/include/asm/pci_x86.h index cd2a31d..f199fa7 100644 --- a/arch/x86/include/asm/pci_x86.h +++ b/arch/x86/include/asm/pci_x86.h @@ -91,16 +91,16 @@ extern int (*pcibios_enable_irq)(struct pci_dev *dev); extern void (*pcibios_disable_irq)(struct pci_dev *dev); struct pci_raw_ops { - int (*read)(unsigned int domain, unsigned int bus, unsigned int devfn, + int (* const read)(unsigned int domain, unsigned int bus, unsigned int devfn, int reg, int len, u32 *val); - int (*write)(unsigned int domain, unsigned int bus, unsigned int devfn, + int (* const write)(unsigned int domain, unsigned int bus, unsigned int devfn, int reg, int len, u32 val); }; -extern struct pci_raw_ops *raw_pci_ops; -extern struct pci_raw_ops *raw_pci_ext_ops; +extern const struct pci_raw_ops *raw_pci_ops; +extern const struct pci_raw_ops *raw_pci_ext_ops; -extern struct pci_raw_ops pci_direct_conf1; +extern const struct pci_raw_ops pci_direct_conf1; extern bool port_cf9_safe; /* arch_initcall level */ diff --git a/arch/x86/include/asm/pgalloc.h b/arch/x86/include/asm/pgalloc.h index 271de94..ef944d6 100644 --- a/arch/x86/include/asm/pgalloc.h +++ b/arch/x86/include/asm/pgalloc.h @@ -63,6 +63,13 @@ static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pte_t *pte) { paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT); + set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE)); +} + +static inline void pmd_populate_user(struct mm_struct *mm, + pmd_t *pmd, pte_t *pte) +{ + paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT); set_pmd(pmd, __pmd(__pa(pte) | _PAGE_TABLE)); } diff --git a/arch/x86/include/asm/pgtable-2level.h b/arch/x86/include/asm/pgtable-2level.h index 2334982..70bc412 100644 --- a/arch/x86/include/asm/pgtable-2level.h +++ b/arch/x86/include/asm/pgtable-2level.h @@ -18,7 +18,9 @@ static inline void native_set_pte(pte_t *ptep , pte_t pte) static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd) { + pax_open_kernel(); *pmdp = pmd; + pax_close_kernel(); } static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte) diff --git a/arch/x86/include/asm/pgtable-3level.h b/arch/x86/include/asm/pgtable-3level.h index 177b016..a526e3c 100644 --- a/arch/x86/include/asm/pgtable-3level.h +++ b/arch/x86/include/asm/pgtable-3level.h @@ -38,12 +38,16 @@ static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte) static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd) { + pax_open_kernel(); set_64bit((unsigned long long *)(pmdp), native_pmd_val(pmd)); + pax_close_kernel(); } static inline void native_set_pud(pud_t *pudp, pud_t pud) { + pax_open_kernel(); set_64bit((unsigned long long *)(pudp), native_pud_val(pud)); + pax_close_kernel(); } /* diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h index a34c785..5df98f8 100644 --- a/arch/x86/include/asm/pgtable.h +++ b/arch/x86/include/asm/pgtable.h @@ -76,12 +76,51 @@ extern struct list_head pgd_list; #define arch_end_context_switch(prev) do {} while(0) +#define pax_open_kernel() native_pax_open_kernel() +#define pax_close_kernel() native_pax_close_kernel() #endif /* CONFIG_PARAVIRT */ +#define __HAVE_ARCH_PAX_OPEN_KERNEL +#define __HAVE_ARCH_PAX_CLOSE_KERNEL + +#ifdef CONFIG_PAX_KERNEXEC +static inline unsigned long native_pax_open_kernel(void) +{ + unsigned long cr0; + + preempt_disable(); + barrier(); + cr0 = read_cr0() ^ X86_CR0_WP; + BUG_ON(unlikely(cr0 & X86_CR0_WP)); + write_cr0(cr0); + return cr0 ^ X86_CR0_WP; +} + +static inline unsigned long native_pax_close_kernel(void) +{ + unsigned long cr0; + + cr0 = read_cr0() ^ X86_CR0_WP; + BUG_ON(unlikely(!(cr0 & X86_CR0_WP))); + write_cr0(cr0); + barrier(); + preempt_enable_no_resched(); + return cr0 ^ X86_CR0_WP; +} +#else +static inline unsigned long native_pax_open_kernel(void) { return 0; } +static inline unsigned long native_pax_close_kernel(void) { return 0; } +#endif + /* * The following only work if pte_present() is true. * Undefined behaviour if not.. */ +static inline int pte_user(pte_t pte) +{ + return pte_val(pte) & _PAGE_USER; +} + static inline int pte_dirty(pte_t pte) { return pte_flags(pte) & _PAGE_DIRTY; @@ -169,9 +208,29 @@ static inline pte_t pte_wrprotect(pte_t pte) return pte_clear_flags(pte, _PAGE_RW); } +static inline pte_t pte_mkread(pte_t pte) +{ + return __pte(pte_val(pte) | _PAGE_USER); +} + static inline pte_t pte_mkexec(pte_t pte) { - return pte_clear_flags(pte, _PAGE_NX); +#ifdef CONFIG_X86_PAE + if (__supported_pte_mask & _PAGE_NX) + return pte_clear_flags(pte, _PAGE_NX); + else +#endif + return pte_set_flags(pte, _PAGE_USER); +} + +static inline pte_t pte_exprotect(pte_t pte) +{ +#ifdef CONFIG_X86_PAE + if (__supported_pte_mask & _PAGE_NX) + return pte_set_flags(pte, _PAGE_NX); + else +#endif + return pte_clear_flags(pte, _PAGE_USER); } static inline pte_t pte_mkdirty(pte_t pte) @@ -304,6 +363,15 @@ pte_t *populate_extra_pte(unsigned long vaddr); #endif #ifndef __ASSEMBLY__ + +#ifdef CONFIG_PAX_PER_CPU_PGD +extern pgd_t cpu_pgd[NR_CPUS][PTRS_PER_PGD]; +static inline pgd_t *get_cpu_pgd(unsigned int cpu) +{ + return cpu_pgd[cpu]; +} +#endif + #include static inline int pte_none(pte_t pte) @@ -474,7 +542,7 @@ static inline pud_t *pud_offset(pgd_t *pgd, unsigned long address) static inline int pgd_bad(pgd_t pgd) { - return (pgd_flags(pgd) & ~_PAGE_USER) != _KERNPG_TABLE; + return (pgd_flags(pgd) & ~(_PAGE_USER | _PAGE_NX)) != _KERNPG_TABLE; } static inline int pgd_none(pgd_t pgd) @@ -497,7 +565,12 @@ static inline int pgd_none(pgd_t pgd) * pgd_offset() returns a (pgd_t *) * pgd_index() is used get the offset into the pgd page's array of pgd_t's; */ -#define pgd_offset(mm, address) ((mm)->pgd + pgd_index((address))) +#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address)) + +#ifdef CONFIG_PAX_PER_CPU_PGD +#define pgd_offset_cpu(cpu, address) (get_cpu_pgd(cpu) + pgd_index(address)) +#endif + /* * a shortcut which implies the use of the kernel's pgd, instead * of a process's @@ -508,6 +581,20 @@ static inline int pgd_none(pgd_t pgd) #define KERNEL_PGD_BOUNDARY pgd_index(PAGE_OFFSET) #define KERNEL_PGD_PTRS (PTRS_PER_PGD - KERNEL_PGD_BOUNDARY) +#ifdef CONFIG_X86_32 +#define USER_PGD_PTRS KERNEL_PGD_BOUNDARY +#else +#define TASK_SIZE_MAX_SHIFT CONFIG_TASK_SIZE_MAX_SHIFT +#define USER_PGD_PTRS (_AC(1,UL) << (TASK_SIZE_MAX_SHIFT - PGDIR_SHIFT)) + +#ifdef CONFIG_PAX_MEMORY_UDEREF +#define PAX_USER_SHADOW_BASE (_AC(1,UL) << TASK_SIZE_MAX_SHIFT) +#else +#define PAX_USER_SHADOW_BASE (_AC(0,UL)) +#endif + +#endif + #ifndef __ASSEMBLY__ extern int direct_gbpages; @@ -613,11 +700,23 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm, * dst and src can be on the same page, but the range must not overlap, * and must not cross a page boundary. */ -static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count) +static inline void clone_pgd_range(pgd_t *dst, const pgd_t *src, int count) { - memcpy(dst, src, count * sizeof(pgd_t)); + pax_open_kernel(); + while (count--) + *dst++ = *src++; + pax_close_kernel(); } +#ifdef CONFIG_PAX_PER_CPU_PGD +extern void __clone_user_pgds(pgd_t *dst, const pgd_t *src, int count); +#endif + +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF) +extern void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count); +#else +static inline void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count) {} +#endif #include #endif /* __ASSEMBLY__ */ diff --git a/arch/x86/include/asm/pgtable_32.h b/arch/x86/include/asm/pgtable_32.h index f686f49..e16ed74 100644 --- a/arch/x86/include/asm/pgtable_32.h +++ b/arch/x86/include/asm/pgtable_32.h @@ -25,9 +25,6 @@ struct mm_struct; struct vm_area_struct; -extern pgd_t swapper_pg_dir[1024]; -extern pgd_t trampoline_pg_dir[1024]; - static inline void pgtable_cache_init(void) { } static inline void check_pgt_cache(void) { } void paging_init(void); @@ -48,6 +45,12 @@ extern void set_pmd_pfn(unsigned long, unsigned long, pgprot_t); # include #endif +extern pgd_t swapper_pg_dir[PTRS_PER_PGD]; +extern pgd_t trampoline_pg_dir[PTRS_PER_PGD]; +#ifdef CONFIG_X86_PAE +extern pmd_t swapper_pm_dir[PTRS_PER_PGD][PTRS_PER_PMD]; +#endif + #if defined(CONFIG_HIGHPTE) #define __KM_PTE \ (in_nmi() ? KM_NMI_PTE : \ @@ -72,7 +75,9 @@ extern void set_pmd_pfn(unsigned long, unsigned long, pgprot_t); /* Clear a kernel PTE and flush it from the TLB */ #define kpte_clear_flush(ptep, vaddr) \ do { \ + pax_open_kernel(); \ pte_clear(&init_mm, (vaddr), (ptep)); \ + pax_close_kernel(); \ __flush_tlb_one((vaddr)); \ } while (0) @@ -84,6 +89,9 @@ do { \ #endif /* !__ASSEMBLY__ */ +#define HAVE_ARCH_UNMAPPED_AREA +#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN + /* * kern_addr_valid() is (1) for FLATMEM and (0) for * SPARSEMEM and DISCONTIGMEM diff --git a/arch/x86/include/asm/pgtable_32_types.h b/arch/x86/include/asm/pgtable_32_types.h index ed5903b..c7fe163 100644 --- a/arch/x86/include/asm/pgtable_32_types.h +++ b/arch/x86/include/asm/pgtable_32_types.h @@ -8,7 +8,7 @@ */ #ifdef CONFIG_X86_PAE # include -# define PMD_SIZE (1UL << PMD_SHIFT) +# define PMD_SIZE (_AC(1, UL) << PMD_SHIFT) # define PMD_MASK (~(PMD_SIZE - 1)) #else # include @@ -46,6 +46,19 @@ extern bool __vmalloc_start_set; /* set once high_memory is set */ # define VMALLOC_END (FIXADDR_START - 2 * PAGE_SIZE) #endif +#ifdef CONFIG_PAX_KERNEXEC +#ifndef __ASSEMBLY__ +extern unsigned char MODULES_EXEC_VADDR[]; +extern unsigned char MODULES_EXEC_END[]; +#endif +#include +#define ktla_ktva(addr) (addr + LOAD_PHYSICAL_ADDR + PAGE_OFFSET) +#define ktva_ktla(addr) (addr - LOAD_PHYSICAL_ADDR - PAGE_OFFSET) +#else +#define ktla_ktva(addr) (addr) +#define ktva_ktla(addr) (addr) +#endif + #define MODULES_VADDR VMALLOC_START #define MODULES_END VMALLOC_END #define MODULES_LEN (MODULES_VADDR - MODULES_END) diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h index 181be52..7ab9c31 100644 --- a/arch/x86/include/asm/pgtable_64.h +++ b/arch/x86/include/asm/pgtable_64.h @@ -16,10 +16,13 @@ extern pud_t level3_kernel_pgt[512]; extern pud_t level3_ident_pgt[512]; +extern pud_t level3_vmalloc_pgt[512]; +extern pud_t level3_vmemmap_pgt[512]; +extern pud_t level2_vmemmap_pgt[512]; extern pmd_t level2_kernel_pgt[512]; extern pmd_t level2_fixmap_pgt[512]; -extern pmd_t level2_ident_pgt[512]; -extern pgd_t init_level4_pgt[]; +extern pmd_t level2_ident_pgt[512*2]; +extern pgd_t init_level4_pgt[512]; #define swapper_pg_dir init_level4_pgt @@ -74,7 +77,9 @@ static inline pte_t native_ptep_get_and_clear(pte_t *xp) static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd) { + pax_open_kernel(); *pmdp = pmd; + pax_close_kernel(); } static inline void native_pmd_clear(pmd_t *pmd) @@ -94,7 +99,9 @@ static inline void native_pud_clear(pud_t *pud) static inline void native_set_pgd(pgd_t *pgdp, pgd_t pgd) { + pax_open_kernel(); *pgdp = pgd; + pax_close_kernel(); } static inline void native_pgd_clear(pgd_t *pgd) diff --git a/arch/x86/include/asm/pgtable_64_types.h b/arch/x86/include/asm/pgtable_64_types.h index 766ea16..5b96cb3 100644 --- a/arch/x86/include/asm/pgtable_64_types.h +++ b/arch/x86/include/asm/pgtable_64_types.h @@ -59,5 +59,10 @@ typedef struct { pteval_t pte; } pte_t; #define MODULES_VADDR _AC(0xffffffffa0000000, UL) #define MODULES_END _AC(0xffffffffff000000, UL) #define MODULES_LEN (MODULES_END - MODULES_VADDR) +#define MODULES_EXEC_VADDR MODULES_VADDR +#define MODULES_EXEC_END MODULES_END + +#define ktla_ktva(addr) (addr) +#define ktva_ktla(addr) (addr) #endif /* _ASM_X86_PGTABLE_64_DEFS_H */ diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h index d1f4a76..e1f9b51 100644 --- a/arch/x86/include/asm/pgtable_types.h +++ b/arch/x86/include/asm/pgtable_types.h @@ -16,12 +16,11 @@ #define _PAGE_BIT_PSE 7 /* 4 MB (or 2MB) page */ #define _PAGE_BIT_PAT 7 /* on 4KB pages */ #define _PAGE_BIT_GLOBAL 8 /* Global TLB entry PPro+ */ -#define _PAGE_BIT_UNUSED1 9 /* available for programmer */ +#define _PAGE_BIT_SPECIAL 9 /* special mappings, no associated struct page */ #define _PAGE_BIT_IOMAP 10 /* flag used to indicate IO mapping */ #define _PAGE_BIT_HIDDEN 11 /* hidden by kmemcheck */ #define _PAGE_BIT_PAT_LARGE 12 /* On 2MB or 1GB pages */ -#define _PAGE_BIT_SPECIAL _PAGE_BIT_UNUSED1 -#define _PAGE_BIT_CPA_TEST _PAGE_BIT_UNUSED1 +#define _PAGE_BIT_CPA_TEST _PAGE_BIT_SPECIAL #define _PAGE_BIT_NX 63 /* No execute: only valid after cpuid check */ /* If _PAGE_BIT_PRESENT is clear, we use these: */ @@ -39,7 +38,6 @@ #define _PAGE_DIRTY (_AT(pteval_t, 1) << _PAGE_BIT_DIRTY) #define _PAGE_PSE (_AT(pteval_t, 1) << _PAGE_BIT_PSE) #define _PAGE_GLOBAL (_AT(pteval_t, 1) << _PAGE_BIT_GLOBAL) -#define _PAGE_UNUSED1 (_AT(pteval_t, 1) << _PAGE_BIT_UNUSED1) #define _PAGE_IOMAP (_AT(pteval_t, 1) << _PAGE_BIT_IOMAP) #define _PAGE_PAT (_AT(pteval_t, 1) << _PAGE_BIT_PAT) #define _PAGE_PAT_LARGE (_AT(pteval_t, 1) << _PAGE_BIT_PAT_LARGE) @@ -55,8 +53,10 @@ #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE) #define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_NX) -#else +#elif defined(CONFIG_KMEMCHECK) #define _PAGE_NX (_AT(pteval_t, 0)) +#else +#define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_HIDDEN) #endif #define _PAGE_FILE (_AT(pteval_t, 1) << _PAGE_BIT_FILE) @@ -93,6 +93,9 @@ #define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \ _PAGE_ACCESSED) +#define PAGE_READONLY_NOEXEC PAGE_READONLY +#define PAGE_SHARED_NOEXEC PAGE_SHARED + #define __PAGE_KERNEL_EXEC \ (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_GLOBAL) #define __PAGE_KERNEL (__PAGE_KERNEL_EXEC | _PAGE_NX) @@ -103,8 +106,8 @@ #define __PAGE_KERNEL_WC (__PAGE_KERNEL | _PAGE_CACHE_WC) #define __PAGE_KERNEL_NOCACHE (__PAGE_KERNEL | _PAGE_PCD | _PAGE_PWT) #define __PAGE_KERNEL_UC_MINUS (__PAGE_KERNEL | _PAGE_PCD) -#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RX | _PAGE_USER) -#define __PAGE_KERNEL_VSYSCALL_NOCACHE (__PAGE_KERNEL_VSYSCALL | _PAGE_PCD | _PAGE_PWT) +#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RO | _PAGE_USER) +#define __PAGE_KERNEL_VSYSCALL_NOCACHE (__PAGE_KERNEL_RO | _PAGE_PCD | _PAGE_PWT | _PAGE_USER) #define __PAGE_KERNEL_LARGE (__PAGE_KERNEL | _PAGE_PSE) #define __PAGE_KERNEL_LARGE_NOCACHE (__PAGE_KERNEL | _PAGE_CACHE_UC | _PAGE_PSE) #define __PAGE_KERNEL_LARGE_EXEC (__PAGE_KERNEL_EXEC | _PAGE_PSE) @@ -163,8 +166,8 @@ * bits are combined, this will alow user to access the high address mapped * VDSO in the presence of CONFIG_COMPAT_VDSO */ -#define PTE_IDENT_ATTR 0x003 /* PRESENT+RW */ -#define PDE_IDENT_ATTR 0x067 /* PRESENT+RW+USER+DIRTY+ACCESSED */ +#define PTE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */ +#define PDE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */ #define PGD_IDENT_ATTR 0x001 /* PRESENT (no other attributes) */ #endif @@ -202,7 +205,17 @@ static inline pgdval_t pgd_flags(pgd_t pgd) { return native_pgd_val(pgd) & PTE_FLAGS_MASK; } +#endif +#if PAGETABLE_LEVELS == 3 +#include +#endif + +#if PAGETABLE_LEVELS == 2 +#include +#endif + +#ifndef __ASSEMBLY__ #if PAGETABLE_LEVELS > 3 typedef struct { pudval_t pud; } pud_t; @@ -216,8 +229,6 @@ static inline pudval_t native_pud_val(pud_t pud) return pud.pud; } #else -#include - static inline pudval_t native_pud_val(pud_t pud) { return native_pgd_val(pud.pgd); @@ -237,8 +248,6 @@ static inline pmdval_t native_pmd_val(pmd_t pmd) return pmd.pmd; } #else -#include - static inline pmdval_t native_pmd_val(pmd_t pmd) { return native_pgd_val(pmd.pud.pgd); @@ -278,7 +287,6 @@ typedef struct page *pgtable_t; extern pteval_t __supported_pte_mask; extern void set_nx(void); -extern int nx_enabled; #define pgprot_writecombine pgprot_writecombine extern pgprot_t pgprot_writecombine(pgprot_t prot); diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h index 7e5c6a6..7093da3 100644 --- a/arch/x86/include/asm/processor.h +++ b/arch/x86/include/asm/processor.h @@ -269,7 +269,7 @@ struct tss_struct { } ____cacheline_aligned; -DECLARE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss); +extern struct tss_struct init_tss[NR_CPUS]; /* * Save the original ist values for checking stack pointers during debugging @@ -884,8 +884,15 @@ static inline void spin_lock_prefetch(const void *x) */ #define TASK_SIZE PAGE_OFFSET #define TASK_SIZE_MAX TASK_SIZE + +#ifdef CONFIG_PAX_SEGMEXEC +#define SEGMEXEC_TASK_SIZE (TASK_SIZE / 2) +#define STACK_TOP ((current->mm->pax_flags & MF_PAX_SEGMEXEC)?SEGMEXEC_TASK_SIZE:TASK_SIZE) +#else #define STACK_TOP TASK_SIZE -#define STACK_TOP_MAX STACK_TOP +#endif + +#define STACK_TOP_MAX TASK_SIZE #define INIT_THREAD { \ .sp0 = sizeof(init_stack) + (long)&init_stack, \ @@ -902,7 +909,7 @@ static inline void spin_lock_prefetch(const void *x) */ #define INIT_TSS { \ .x86_tss = { \ - .sp0 = sizeof(init_stack) + (long)&init_stack, \ + .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \ .ss0 = __KERNEL_DS, \ .ss1 = __KERNEL_CS, \ .io_bitmap_base = INVALID_IO_BITMAP_OFFSET, \ @@ -913,11 +920,7 @@ static inline void spin_lock_prefetch(const void *x) extern unsigned long thread_saved_pc(struct task_struct *tsk); #define THREAD_SIZE_LONGS (THREAD_SIZE/sizeof(unsigned long)) -#define KSTK_TOP(info) \ -({ \ - unsigned long *__ptr = (unsigned long *)(info); \ - (unsigned long)(&__ptr[THREAD_SIZE_LONGS]); \ -}) +#define KSTK_TOP(info) ((info)->task.thread.sp0) /* * The below -8 is to reserve 8 bytes on top of the ring0 stack. @@ -932,7 +935,7 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk); #define task_pt_regs(task) \ ({ \ struct pt_regs *__regs__; \ - __regs__ = (struct pt_regs *)(KSTK_TOP(task_stack_page(task))-8); \ + __regs__ = (struct pt_regs *)((task)->thread.sp0); \ __regs__ - 1; \ }) @@ -942,13 +945,13 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk); /* * User space process size. 47bits minus one guard page. */ -#define TASK_SIZE_MAX ((1UL << 47) - PAGE_SIZE) +#define TASK_SIZE_MAX ((1UL << TASK_SIZE_MAX_SHIFT) - PAGE_SIZE) /* This decides where the kernel will search for a free chunk of vm * space during mmap's. */ #define IA32_PAGE_OFFSET ((current->personality & ADDR_LIMIT_3GB) ? \ - 0xc0000000 : 0xFFFFe000) + 0xc0000000 : 0xFFFFf000) #define TASK_SIZE (test_thread_flag(TIF_IA32) ? \ IA32_PAGE_OFFSET : TASK_SIZE_MAX) @@ -985,6 +988,10 @@ extern void start_thread(struct pt_regs *regs, unsigned long new_ip, */ #define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3)) +#ifdef CONFIG_PAX_SEGMEXEC +#define SEGMEXEC_TASK_UNMAPPED_BASE (PAGE_ALIGN(SEGMEXEC_TASK_SIZE / 3)) +#endif + #define KSTK_EIP(task) (task_pt_regs(task)->ip) /* Get/set a process' ability to use the timestamp counter instruction */ diff --git a/arch/x86/include/asm/ptrace.h b/arch/x86/include/asm/ptrace.h index 78cd1ea..37bd171 100644 --- a/arch/x86/include/asm/ptrace.h +++ b/arch/x86/include/asm/ptrace.h @@ -152,28 +152,29 @@ static inline unsigned long regs_return_value(struct pt_regs *regs) } /* - * user_mode_vm(regs) determines whether a register set came from user mode. + * user_mode(regs) determines whether a register set came from user mode. * This is true if V8086 mode was enabled OR if the register set was from * protected mode with RPL-3 CS value. This tricky test checks that with * one comparison. Many places in the kernel can bypass this full check - * if they have already ruled out V8086 mode, so user_mode(regs) can be used. + * if they have already ruled out V8086 mode, so user_mode_novm(regs) can + * be used. */ -static inline int user_mode(struct pt_regs *regs) +static inline int user_mode_novm(struct pt_regs *regs) { #ifdef CONFIG_X86_32 return (regs->cs & SEGMENT_RPL_MASK) == USER_RPL; #else - return !!(regs->cs & 3); + return !!(regs->cs & SEGMENT_RPL_MASK); #endif } -static inline int user_mode_vm(struct pt_regs *regs) +static inline int user_mode(struct pt_regs *regs) { #ifdef CONFIG_X86_32 return ((regs->cs & SEGMENT_RPL_MASK) | (regs->flags & X86_VM_MASK)) >= USER_RPL; #else - return user_mode(regs); + return user_mode_novm(regs); #endif } diff --git a/arch/x86/include/asm/reboot.h b/arch/x86/include/asm/reboot.h index 562d4fd..cb2d6ba 100644 --- a/arch/x86/include/asm/reboot.h +++ b/arch/x86/include/asm/reboot.h @@ -18,7 +18,7 @@ extern struct machine_ops machine_ops; void native_machine_crash_shutdown(struct pt_regs *regs); void native_machine_shutdown(void); -void machine_real_restart(const unsigned char *code, int length); +void machine_real_restart(const unsigned char *code, unsigned int length); typedef void (*nmi_shootdown_cb)(int, struct die_args*); void nmi_shootdown_cpus(nmi_shootdown_cb callback); diff --git a/arch/x86/include/asm/rwsem.h b/arch/x86/include/asm/rwsem.h index 606ede1..45b2044 100644 --- a/arch/x86/include/asm/rwsem.h +++ b/arch/x86/include/asm/rwsem.h @@ -118,10 +118,26 @@ static inline void __down_read(struct rw_semaphore *sem) { asm volatile("# beginning down_read\n\t" LOCK_PREFIX _ASM_INC "(%1)\n\t" + +#ifdef CONFIG_PAX_REFCOUNT +#ifdef CONFIG_X86_32 + "into\n0:\n" +#else + "jno 0f\n" + "int $4\n0:\n" +#endif + ".pushsection .fixup,\"ax\"\n" + "1:\n" + LOCK_PREFIX _ASM_DEC "(%1)\n" + "jmp 0b\n" + ".popsection\n" + _ASM_EXTABLE(0b, 1b) +#endif + /* adds 0x00000001, returns the old value */ - " jns 1f\n" + " jns 2f\n" " call call_rwsem_down_read_failed\n" - "1:\n\t" + "2:\n\t" "# ending down_read\n\t" : "+m" (sem->count) : "a" (sem) @@ -136,13 +152,29 @@ static inline int __down_read_trylock(struct rw_semaphore *sem) rwsem_count_t result, tmp; asm volatile("# beginning __down_read_trylock\n\t" " mov %0,%1\n\t" - "1:\n\t" + "2:\n\t" " mov %1,%2\n\t" " add %3,%2\n\t" - " jle 2f\n\t" + +#ifdef CONFIG_PAX_REFCOUNT +#ifdef CONFIG_X86_32 + "into\n0:\n" +#else + "jno 0f\n" + "int $4\n0:\n" +#endif + ".pushsection .fixup,\"ax\"\n" + "1:\n" + "sub %3,%2\n" + "jmp 0b\n" + ".popsection\n" + _ASM_EXTABLE(0b, 1b) +#endif + + " jle 3f\n\t" LOCK_PREFIX " cmpxchg %2,%0\n\t" - " jnz 1b\n\t" - "2:\n\t" + " jnz 2b\n\t" + "3:\n\t" "# ending __down_read_trylock\n\t" : "+m" (sem->count), "=&a" (result), "=&r" (tmp) : "i" (RWSEM_ACTIVE_READ_BIAS) @@ -160,12 +192,28 @@ static inline void __down_write_nested(struct rw_semaphore *sem, int subclass) tmp = RWSEM_ACTIVE_WRITE_BIAS; asm volatile("# beginning down_write\n\t" LOCK_PREFIX " xadd %1,(%2)\n\t" + +#ifdef CONFIG_PAX_REFCOUNT +#ifdef CONFIG_X86_32 + "into\n0:\n" +#else + "jno 0f\n" + "int $4\n0:\n" +#endif + ".pushsection .fixup,\"ax\"\n" + "1:\n" + "mov %1,(%2)\n" + "jmp 0b\n" + ".popsection\n" + _ASM_EXTABLE(0b, 1b) +#endif + /* subtract 0x0000ffff, returns the old value */ " test %1,%1\n\t" /* was the count 0 before? */ - " jz 1f\n" + " jz 2f\n" " call call_rwsem_down_write_failed\n" - "1:\n" + "2:\n" "# ending down_write" : "+m" (sem->count), "=d" (tmp) : "a" (sem), "1" (tmp) @@ -198,10 +246,26 @@ static inline void __up_read(struct rw_semaphore *sem) rwsem_count_t tmp = -RWSEM_ACTIVE_READ_BIAS; asm volatile("# beginning __up_read\n\t" LOCK_PREFIX " xadd %1,(%2)\n\t" + +#ifdef CONFIG_PAX_REFCOUNT +#ifdef CONFIG_X86_32 + "into\n0:\n" +#else + "jno 0f\n" + "int $4\n0:\n" +#endif + ".pushsection .fixup,\"ax\"\n" + "1:\n" + "mov %1,(%2)\n" + "jmp 0b\n" + ".popsection\n" + _ASM_EXTABLE(0b, 1b) +#endif + /* subtracts 1, returns the old value */ - " jns 1f\n\t" + " jns 2f\n\t" " call call_rwsem_wake\n" - "1:\n" + "2:\n" "# ending __up_read\n" : "+m" (sem->count), "=d" (tmp) : "a" (sem), "1" (tmp) @@ -216,11 +280,27 @@ static inline void __up_write(struct rw_semaphore *sem) rwsem_count_t tmp; asm volatile("# beginning __up_write\n\t" LOCK_PREFIX " xadd %1,(%2)\n\t" + +#ifdef CONFIG_PAX_REFCOUNT +#ifdef CONFIG_X86_32 + "into\n0:\n" +#else + "jno 0f\n" + "int $4\n0:\n" +#endif + ".pushsection .fixup,\"ax\"\n" + "1:\n" + "mov %1,(%2)\n" + "jmp 0b\n" + ".popsection\n" + _ASM_EXTABLE(0b, 1b) +#endif + /* tries to transition 0xffff0001 -> 0x00000000 */ - " jz 1f\n" + " jz 2f\n" " call call_rwsem_wake\n" - "1:\n\t" + "2:\n\t" "# ending __up_write\n" : "+m" (sem->count), "=d" (tmp) : "a" (sem), "1" (-RWSEM_ACTIVE_WRITE_BIAS) @@ -234,13 +314,29 @@ static inline void __downgrade_write(struct rw_semaphore *sem) { asm volatile("# beginning __downgrade_write\n\t" LOCK_PREFIX _ASM_ADD "%2,(%1)\n\t" + +#ifdef CONFIG_PAX_REFCOUNT +#ifdef CONFIG_X86_32 + "into\n0:\n" +#else + "jno 0f\n" + "int $4\n0:\n" +#endif + ".pushsection .fixup,\"ax\"\n" + "1:\n" + LOCK_PREFIX _ASM_SUB "%2,(%1)\n" + "jmp 0b\n" + ".popsection\n" + _ASM_EXTABLE(0b, 1b) +#endif + /* * transitions 0xZZZZ0001 -> 0xYYYY0001 (i386) * 0xZZZZZZZZ00000001 -> 0xYYYYYYYY00000001 (x86_64) */ - " jns 1f\n\t" + " jns 2f\n\t" " call call_rwsem_downgrade_wake\n" - "1:\n\t" + "2:\n\t" "# ending __downgrade_write\n" : "+m" (sem->count) : "a" (sem), "er" (-RWSEM_WAITING_BIAS) @@ -253,7 +349,23 @@ static inline void __downgrade_write(struct rw_semaphore *sem) static inline void rwsem_atomic_add(rwsem_count_t delta, struct rw_semaphore *sem) { - asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0" + asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0\n" + +#ifdef CONFIG_PAX_REFCOUNT +#ifdef CONFIG_X86_32 + "into\n0:\n" +#else + "jno 0f\n" + "int $4\n0:\n" +#endif + ".pushsection .fixup,\"ax\"\n" + "1:\n" + LOCK_PREFIX _ASM_SUB "%1,%0\n" + "jmp 0b\n" + ".popsection\n" + _ASM_EXTABLE(0b, 1b) +#endif + : "+m" (sem->count) : "er" (delta)); } @@ -266,7 +378,23 @@ static inline rwsem_count_t rwsem_atomic_update(rwsem_count_t delta, { rwsem_count_t tmp = delta; - asm volatile(LOCK_PREFIX "xadd %0,%1" + asm volatile(LOCK_PREFIX "xadd %0,%1\n" + +#ifdef CONFIG_PAX_REFCOUNT +#ifdef CONFIG_X86_32 + "into\n0:\n" +#else + "jno 0f\n" + "int $4\n0:\n" +#endif + ".pushsection .fixup,\"ax\"\n" + "1:\n" + "mov %0,%1\n" + "jmp 0b\n" + ".popsection\n" + _ASM_EXTABLE(0b, 1b) +#endif + : "+r" (tmp), "+m" (sem->count) : : "memory"); diff --git a/arch/x86/include/asm/segment.h b/arch/x86/include/asm/segment.h index 14e0ed8..7bad9b6 100644 --- a/arch/x86/include/asm/segment.h +++ b/arch/x86/include/asm/segment.h @@ -62,8 +62,8 @@ * 26 - ESPFIX small SS * 27 - per-cpu [ offset to per-cpu data area ] * 28 - stack_canary-20 [ for stack protector ] - * 29 - unused - * 30 - unused + * 29 - PCI BIOS CS + * 30 - PCI BIOS DS * 31 - TSS for double fault handler */ #define GDT_ENTRY_TLS_MIN 6 @@ -77,6 +77,8 @@ #define GDT_ENTRY_KERNEL_CS (GDT_ENTRY_KERNEL_BASE + 0) +#define GDT_ENTRY_KERNEXEC_KERNEL_CS (4) + #define GDT_ENTRY_KERNEL_DS (GDT_ENTRY_KERNEL_BASE + 1) #define GDT_ENTRY_TSS (GDT_ENTRY_KERNEL_BASE + 4) @@ -88,7 +90,7 @@ #define GDT_ENTRY_ESPFIX_SS (GDT_ENTRY_KERNEL_BASE + 14) #define __ESPFIX_SS (GDT_ENTRY_ESPFIX_SS * 8) -#define GDT_ENTRY_PERCPU (GDT_ENTRY_KERNEL_BASE + 15) +#define GDT_ENTRY_PERCPU (GDT_ENTRY_KERNEL_BASE + 15) #ifdef CONFIG_SMP #define __KERNEL_PERCPU (GDT_ENTRY_PERCPU * 8) #else @@ -102,6 +104,12 @@ #define __KERNEL_STACK_CANARY 0 #endif +#define GDT_ENTRY_PCIBIOS_CS (GDT_ENTRY_KERNEL_BASE + 17) +#define __PCIBIOS_CS (GDT_ENTRY_PCIBIOS_CS * 8) + +#define GDT_ENTRY_PCIBIOS_DS (GDT_ENTRY_KERNEL_BASE + 18) +#define __PCIBIOS_DS (GDT_ENTRY_PCIBIOS_DS * 8) + #define GDT_ENTRY_DOUBLEFAULT_TSS 31 /* @@ -139,7 +147,7 @@ */ /* Matches PNP_CS32 and PNP_CS16 (they must be consecutive) */ -#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xf4) == GDT_ENTRY_PNPBIOS_BASE * 8) +#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xFFFCU) == PNP_CS32 || ((x) & 0xFFFCU) == PNP_CS16) #else @@ -163,6 +171,8 @@ #define __USER32_CS (GDT_ENTRY_DEFAULT_USER32_CS * 8 + 3) #define __USER32_DS __USER_DS +#define GDT_ENTRY_KERNEXEC_KERNEL_CS 7 + #define GDT_ENTRY_TSS 8 /* needs two entries */ #define GDT_ENTRY_LDT 10 /* needs two entries */ #define GDT_ENTRY_TLS_MIN 12 @@ -183,6 +193,7 @@ #endif #define __KERNEL_CS (GDT_ENTRY_KERNEL_CS * 8) +#define __KERNEXEC_KERNEL_CS (GDT_ENTRY_KERNEXEC_KERNEL_CS * 8) #define __KERNEL_DS (GDT_ENTRY_KERNEL_DS * 8) #define __USER_DS (GDT_ENTRY_DEFAULT_USER_DS* 8 + 3) #define __USER_CS (GDT_ENTRY_DEFAULT_USER_CS* 8 + 3) diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h index 4c2f63c..06ded5b 100644 --- a/arch/x86/include/asm/smp.h +++ b/arch/x86/include/asm/smp.h @@ -24,7 +24,7 @@ extern unsigned int num_processors; DECLARE_PER_CPU(cpumask_var_t, cpu_sibling_map); DECLARE_PER_CPU(cpumask_var_t, cpu_core_map); DECLARE_PER_CPU(u16, cpu_llc_id); -DECLARE_PER_CPU(int, cpu_number); +DECLARE_PER_CPU(unsigned int, cpu_number); static inline struct cpumask *cpu_sibling_mask(int cpu) { diff --git a/arch/x86/include/asm/spinlock.h b/arch/x86/include/asm/spinlock.h index 3089f70..8a618b0 100644 --- a/arch/x86/include/asm/spinlock.h +++ b/arch/x86/include/asm/spinlock.h @@ -249,18 +249,50 @@ static inline int arch_write_can_lock(arch_rwlock_t *lock) static inline void arch_read_lock(arch_rwlock_t *rw) { asm volatile(LOCK_PREFIX " subl $1,(%0)\n\t" - "jns 1f\n" - "call __read_lock_failed\n\t" + +#ifdef CONFIG_PAX_REFCOUNT +#ifdef CONFIG_X86_32 + "into\n0:\n" +#else + "jno 0f\n" + "int $4\n0:\n" +#endif + ".pushsection .fixup,\"ax\"\n" "1:\n" + LOCK_PREFIX " addl $1,(%0)\n" + "jmp 0b\n" + ".popsection\n" + _ASM_EXTABLE(0b, 1b) +#endif + + "jns 2f\n" + "call __read_lock_failed\n\t" + "2:\n" ::LOCK_PTR_REG (rw) : "memory"); } static inline void arch_write_lock(arch_rwlock_t *rw) { asm volatile(LOCK_PREFIX " subl %1,(%0)\n\t" - "jz 1f\n" - "call __write_lock_failed\n\t" + +#ifdef CONFIG_PAX_REFCOUNT +#ifdef CONFIG_X86_32 + "into\n0:\n" +#else + "jno 0f\n" + "int $4\n0:\n" +#endif + ".pushsection .fixup,\"ax\"\n" "1:\n" + LOCK_PREFIX " addl %1,(%0)\n" + "jmp 0b\n" + ".popsection\n" + _ASM_EXTABLE(0b, 1b) +#endif + + "jz 2f\n" + "call __write_lock_failed\n\t" + "2:\n" ::LOCK_PTR_REG (rw), "i" (RW_LOCK_BIAS) : "memory"); } @@ -286,12 +318,45 @@ static inline int arch_write_trylock(arch_rwlock_t *lock) static inline void arch_read_unlock(arch_rwlock_t *rw) { - asm volatile(LOCK_PREFIX "incl %0" :"+m" (rw->lock) : : "memory"); + asm volatile(LOCK_PREFIX "incl %0\n" + +#ifdef CONFIG_PAX_REFCOUNT +#ifdef CONFIG_X86_32 + "into\n0:\n" +#else + "jno 0f\n" + "int $4\n0:\n" +#endif + ".pushsection .fixup,\"ax\"\n" + "1:\n" + LOCK_PREFIX "decl %0\n" + "jmp 0b\n" + ".popsection\n" + _ASM_EXTABLE(0b, 1b) +#endif + + :"+m" (rw->lock) : : "memory"); } static inline void arch_write_unlock(arch_rwlock_t *rw) { - asm volatile(LOCK_PREFIX "addl %1, %0" + asm volatile(LOCK_PREFIX "addl %1, %0\n" + +#ifdef CONFIG_PAX_REFCOUNT +#ifdef CONFIG_X86_32 + "into\n0:\n" +#else + "jno 0f\n" + "int $4\n0:\n" +#endif + ".pushsection .fixup,\"ax\"\n" + "1:\n" + LOCK_PREFIX "subl %1,%0\n" + "jmp 0b\n" + ".popsection\n" + _ASM_EXTABLE(0b, 1b) +#endif + : "+m" (rw->lock) : "i" (RW_LOCK_BIAS) : "memory"); } diff --git a/arch/x86/include/asm/system.h b/arch/x86/include/asm/system.h index e7f4d33..0bef582 100644 --- a/arch/x86/include/asm/system.h +++ b/arch/x86/include/asm/system.h @@ -202,7 +202,7 @@ static inline unsigned long get_limit(unsigned long segment) { unsigned long __limit; asm("lsll %1,%0" : "=r" (__limit) : "r" (segment)); - return __limit + 1; + return __limit; } static inline void native_clts(void) @@ -342,7 +342,7 @@ void enable_hlt(void); void cpu_idle_wait(void); -extern unsigned long arch_align_stack(unsigned long sp); +#define arch_align_stack(x) ((x) & ~0xfUL) extern void free_init_pages(char *what, unsigned long begin, unsigned long end); void default_idle(void); diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h index abd3e0e..cbddafc 100644 --- a/arch/x86/include/asm/uaccess.h +++ b/arch/x86/include/asm/uaccess.h @@ -8,12 +8,15 @@ #include #include #include +#include #include #include #define VERIFY_READ 0 #define VERIFY_WRITE 1 +extern void check_object_size(const void *ptr, unsigned long n, bool to); + /* * The fs value determines whether argument validity checking should be * performed or not. If get_fs() == USER_DS, checking is performed, with @@ -29,7 +32,12 @@ #define get_ds() (KERNEL_DS) #define get_fs() (current_thread_info()->addr_limit) +#ifdef CONFIG_X86_32 +void __set_fs(mm_segment_t x, int cpu); +void set_fs(mm_segment_t x); +#else #define set_fs(x) (current_thread_info()->addr_limit = (x)) +#endif #define segment_eq(a, b) ((a).seg == (b).seg) @@ -77,7 +85,33 @@ * checks that the pointer is in the user space range - after calling * this function, memory access functions may still return -EFAULT. */ -#define access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0)) +#define __access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0)) +#define access_ok(type, addr, size) \ +({ \ + long __size = size; \ + unsigned long __addr = (unsigned long)addr; \ + unsigned long __addr_ao = __addr & PAGE_MASK; \ + unsigned long __end_ao = __addr + __size - 1; \ + bool __ret_ao = __range_not_ok(__addr, __size) == 0; \ + if (__ret_ao && unlikely((__end_ao ^ __addr_ao) & PAGE_MASK)) { \ + while(__addr_ao <= __end_ao) { \ + char __c_ao; \ + __addr_ao += PAGE_SIZE; \ + if (__size > PAGE_SIZE) \ + cond_resched(); \ + if (__get_user(__c_ao, (char __user *)__addr)) \ + break; \ + if (type != VERIFY_WRITE) { \ + __addr = __addr_ao; \ + continue; \ + } \ + if (__put_user(__c_ao, (char __user *)__addr)) \ + break; \ + __addr = __addr_ao; \ + } \ + } \ + __ret_ao; \ +}) /* * The exception table consists of pairs of addresses: the first is the @@ -183,13 +217,21 @@ extern int __get_user_bad(void); asm volatile("call __put_user_" #size : "=a" (__ret_pu) \ : "0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx") - +#ifdef CONFIG_X86_32 +#define _ASM_LOAD_USER_DS(ds) "movw %w" #ds ",%%ds\n" +#define _ASM_LOAD_KERNEL_DS "pushl %%ss; popl %%ds\n" +#else +#define _ASM_LOAD_USER_DS(ds) +#define _ASM_LOAD_KERNEL_DS +#endif #ifdef CONFIG_X86_32 #define __put_user_asm_u64(x, addr, err, errret) \ - asm volatile("1: movl %%eax,0(%2)\n" \ - "2: movl %%edx,4(%2)\n" \ + asm volatile(_ASM_LOAD_USER_DS(5) \ + "1: movl %%eax,%%ds:0(%2)\n" \ + "2: movl %%edx,%%ds:4(%2)\n" \ "3:\n" \ + _ASM_LOAD_KERNEL_DS \ ".section .fixup,\"ax\"\n" \ "4: movl %3,%0\n" \ " jmp 3b\n" \ @@ -197,15 +239,18 @@ extern int __get_user_bad(void); _ASM_EXTABLE(1b, 4b) \ _ASM_EXTABLE(2b, 4b) \ : "=r" (err) \ - : "A" (x), "r" (addr), "i" (errret), "0" (err)) + : "A" (x), "r" (addr), "i" (errret), "0" (err), \ + "r"(__USER_DS)) #define __put_user_asm_ex_u64(x, addr) \ - asm volatile("1: movl %%eax,0(%1)\n" \ - "2: movl %%edx,4(%1)\n" \ + asm volatile(_ASM_LOAD_USER_DS(2) \ + "1: movl %%eax,%%ds:0(%1)\n" \ + "2: movl %%edx,%%ds:4(%1)\n" \ "3:\n" \ + _ASM_LOAD_KERNEL_DS \ _ASM_EXTABLE(1b, 2b - 1b) \ _ASM_EXTABLE(2b, 3b - 2b) \ - : : "A" (x), "r" (addr)) + : : "A" (x), "r" (addr), "r"(__USER_DS)) #define __put_user_x8(x, ptr, __ret_pu) \ asm volatile("call __put_user_8" : "=a" (__ret_pu) \ @@ -374,16 +419,18 @@ do { \ } while (0) #define __get_user_asm(x, addr, err, itype, rtype, ltype, errret) \ - asm volatile("1: mov"itype" %2,%"rtype"1\n" \ + asm volatile(_ASM_LOAD_USER_DS(5) \ + "1: mov"itype" %%ds:%2,%"rtype"1\n" \ "2:\n" \ + _ASM_LOAD_KERNEL_DS \ ".section .fixup,\"ax\"\n" \ "3: mov %3,%0\n" \ " xor"itype" %"rtype"1,%"rtype"1\n" \ " jmp 2b\n" \ ".previous\n" \ _ASM_EXTABLE(1b, 3b) \ - : "=r" (err), ltype(x) \ - : "m" (__m(addr)), "i" (errret), "0" (err)) + : "=r" (err), ltype (x) \ + : "m" (__m(addr)), "i" (errret), "0" (err), "r"(__USER_DS)) #define __get_user_size_ex(x, ptr, size) \ do { \ @@ -407,10 +454,12 @@ do { \ } while (0) #define __get_user_asm_ex(x, addr, itype, rtype, ltype) \ - asm volatile("1: mov"itype" %1,%"rtype"0\n" \ + asm volatile(_ASM_LOAD_USER_DS(2) \ + "1: mov"itype" %%ds:%1,%"rtype"0\n" \ "2:\n" \ + _ASM_LOAD_KERNEL_DS \ _ASM_EXTABLE(1b, 2b - 1b) \ - : ltype(x) : "m" (__m(addr))) + : ltype(x) : "m" (__m(addr)), "r"(__USER_DS)) #define __put_user_nocheck(x, ptr, size) \ ({ \ @@ -424,13 +473,24 @@ do { \ int __gu_err; \ unsigned long __gu_val; \ __get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT); \ - (x) = (__force __typeof__(*(ptr)))__gu_val; \ + (x) = (__typeof__(*(ptr)))__gu_val; \ __gu_err; \ }) /* FIXME: this hack is definitely wrong -AK */ struct __large_struct { unsigned long buf[100]; }; -#define __m(x) (*(struct __large_struct __user *)(x)) +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF) +#define ____m(x) \ +({ \ + unsigned long ____x = (unsigned long)(x); \ + if (____x < PAX_USER_SHADOW_BASE) \ + ____x += PAX_USER_SHADOW_BASE; \ + (void __user *)____x; \ +}) +#else +#define ____m(x) (x) +#endif +#define __m(x) (*(struct __large_struct __user *)____m(x)) /* * Tell gcc we read from memory instead of writing: this is because @@ -438,21 +498,26 @@ struct __large_struct { unsigned long buf[100]; }; * aliasing issues. */ #define __put_user_asm(x, addr, err, itype, rtype, ltype, errret) \ - asm volatile("1: mov"itype" %"rtype"1,%2\n" \ + asm volatile(_ASM_LOAD_USER_DS(5) \ + "1: mov"itype" %"rtype"1,%%ds:%2\n" \ "2:\n" \ + _ASM_LOAD_KERNEL_DS \ ".section .fixup,\"ax\"\n" \ "3: mov %3,%0\n" \ " jmp 2b\n" \ ".previous\n" \ _ASM_EXTABLE(1b, 3b) \ : "=r"(err) \ - : ltype(x), "m" (__m(addr)), "i" (errret), "0" (err)) + : ltype (x), "m" (__m(addr)), "i" (errret), "0" (err),\ + "r"(__USER_DS)) #define __put_user_asm_ex(x, addr, itype, rtype, ltype) \ - asm volatile("1: mov"itype" %"rtype"0,%1\n" \ + asm volatile(_ASM_LOAD_USER_DS(2) \ + "1: mov"itype" %"rtype"0,%%ds:%1\n" \ "2:\n" \ + _ASM_LOAD_KERNEL_DS \ _ASM_EXTABLE(1b, 2b - 1b) \ - : : ltype(x), "m" (__m(addr))) + : : ltype(x), "m" (__m(addr)), "r"(__USER_DS)) /* * uaccess_try and catch @@ -530,7 +595,7 @@ struct __large_struct { unsigned long buf[100]; }; #define get_user_ex(x, ptr) do { \ unsigned long __gue_val; \ __get_user_size_ex((__gue_val), (ptr), (sizeof(*(ptr)))); \ - (x) = (__force __typeof__(*(ptr)))__gue_val; \ + (x) = (__typeof__(*(ptr)))__gue_val; \ } while (0) #ifdef CONFIG_X86_WP_WORKS_OK @@ -567,6 +632,7 @@ extern struct movsl_mask { #define ARCH_HAS_NOCACHE_UACCESS 1 +#define ARCH_HAS_SORT_EXTABLE #ifdef CONFIG_X86_32 # include "uaccess_32.h" #else diff --git a/arch/x86/include/asm/uaccess_32.h b/arch/x86/include/asm/uaccess_32.h index 088d09f..c719d0a 100644 --- a/arch/x86/include/asm/uaccess_32.h +++ b/arch/x86/include/asm/uaccess_32.h @@ -44,6 +44,9 @@ unsigned long __must_check __copy_from_user_ll_nocache_nozero static __always_inline unsigned long __must_check __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n) { + if ((long)n < 0) + return n; + if (__builtin_constant_p(n)) { unsigned long ret; @@ -62,6 +65,8 @@ __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n) return ret; } } + if (!__builtin_constant_p(n)) + check_object_size(from, n, true); return __copy_to_user_ll(to, from, n); } @@ -89,6 +94,9 @@ __copy_to_user(void __user *to, const void *from, unsigned long n) static __always_inline unsigned long __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n) { + if ((long)n < 0) + return n; + /* Avoid zeroing the tail if the copy fails.. * If 'n' is constant and 1, 2, or 4, we do still zero on a failure, * but as the zeroing behaviour is only significant when n is not @@ -138,6 +146,10 @@ static __always_inline unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n) { might_fault(); + + if ((long)n < 0) + return n; + if (__builtin_constant_p(n)) { unsigned long ret; @@ -153,6 +165,8 @@ __copy_from_user(void *to, const void __user *from, unsigned long n) return ret; } } + if (!__builtin_constant_p(n)) + check_object_size(to, n, false); return __copy_from_user_ll(to, from, n); } @@ -160,6 +174,10 @@ static __always_inline unsigned long __copy_from_user_nocache(void *to, const void __user *from, unsigned long n) { might_fault(); + + if ((long)n < 0) + return n; + if (__builtin_constant_p(n)) { unsigned long ret; @@ -182,15 +200,19 @@ static __always_inline unsigned long __copy_from_user_inatomic_nocache(void *to, const void __user *from, unsigned long n) { - return __copy_from_user_ll_nocache_nozero(to, from, n); -} + if ((long)n < 0) + return n; -unsigned long __must_check copy_to_user(void __user *to, - const void *from, unsigned long n); -unsigned long __must_check _copy_from_user(void *to, - const void __user *from, - unsigned long n); + return __copy_from_user_ll_nocache_nozero(to, from, n); +} +extern void copy_to_user_overflow(void) +#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS + __compiletime_error("copy_to_user() buffer size is not provably correct") +#else + __compiletime_warning("copy_to_user() buffer size is not provably correct") +#endif +; extern void copy_from_user_overflow(void) #ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS @@ -200,17 +222,61 @@ extern void copy_from_user_overflow(void) #endif ; -static inline unsigned long __must_check copy_from_user(void *to, - const void __user *from, - unsigned long n) +/** + * copy_to_user: - Copy a block of data into user space. + * @to: Destination address, in user space. + * @from: Source address, in kernel space. + * @n: Number of bytes to copy. + * + * Context: User context only. This function may sleep. + * + * Copy data from kernel space to user space. + * + * Returns number of bytes that could not be copied. + * On success, this will be zero. + */ +static inline unsigned long __must_check +copy_to_user(void __user *to, const void *from, unsigned long n) +{ + int sz = __compiletime_object_size(from); + + if (unlikely(sz != -1 && sz < n)) + copy_to_user_overflow(); + else if (access_ok(VERIFY_WRITE, to, n)) + n = __copy_to_user(to, from, n); + return n; +} + +/** + * copy_from_user: - Copy a block of data from user space. + * @to: Destination address, in kernel space. + * @from: Source address, in user space. + * @n: Number of bytes to copy. + * + * Context: User context only. This function may sleep. + * + * Copy data from user space to kernel space. + * + * Returns number of bytes that could not be copied. + * On success, this will be zero. + * + * If some data could not be copied, this function will pad the copied + * data to the requested size using zero bytes. + */ +static inline unsigned long __must_check +copy_from_user(void *to, const void __user *from, unsigned long n) { int sz = __compiletime_object_size(to); - if (likely(sz == -1 || sz >= n)) - n = _copy_from_user(to, from, n); - else + if (unlikely(sz != -1 && sz < n)) copy_from_user_overflow(); - + else if (access_ok(VERIFY_READ, from, n)) + n = __copy_from_user(to, from, n); + else if ((long)n > 0) { + if (!__builtin_constant_p(n)) + check_object_size(to, n, false); + memset(to, 0, n); + } return n; } diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h index 316708d..8e13510 100644 --- a/arch/x86/include/asm/uaccess_64.h +++ b/arch/x86/include/asm/uaccess_64.h @@ -11,6 +11,9 @@ #include #include #include +#include + +#define set_fs(x) (current_thread_info()->addr_limit = (x)) /* * Copy To/From Userspace @@ -37,26 +40,26 @@ copy_user_generic(void *to, const void *from, unsigned len) return ret; } -__must_check unsigned long -_copy_to_user(void __user *to, const void *from, unsigned len); -__must_check unsigned long -_copy_from_user(void *to, const void __user *from, unsigned len); +static __always_inline __must_check unsigned long +__copy_to_user(void __user *to, const void *from, unsigned len); +static __always_inline __must_check unsigned long +__copy_from_user(void *to, const void __user *from, unsigned len); __must_check unsigned long copy_in_user(void __user *to, const void __user *from, unsigned len); static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, - unsigned long n) + unsigned n) { - int sz = __compiletime_object_size(to); - might_fault(); - if (likely(sz == -1 || sz >= n)) - n = _copy_from_user(to, from, n); -#ifdef CONFIG_DEBUG_VM - else - WARN(1, "Buffer overflow detected!\n"); -#endif + + if (access_ok(VERIFY_READ, from, n)) + n = __copy_from_user(to, from, n); + else if ((int)n > 0) { + if (!__builtin_constant_p(n)) + check_object_size(to, n, false); + memset(to, 0, n); + } return n; } @@ -65,17 +68,35 @@ int copy_to_user(void __user *dst, const void *src, unsigned size) { might_fault(); - return _copy_to_user(dst, src, size); + if (access_ok(VERIFY_WRITE, dst, size)) + size = __copy_to_user(dst, src, size); + return size; } static __always_inline __must_check -int __copy_from_user(void *dst, const void __user *src, unsigned size) +unsigned long __copy_from_user(void *dst, const void __user *src, unsigned size) { - int ret = 0; + int sz = __compiletime_object_size(dst); + unsigned ret = 0; might_fault(); - if (!__builtin_constant_p(size)) + + if ((int)size < 0) + return size; + + if (unlikely(sz != -1 && sz < size)) { +#ifdef CONFIG_DEBUG_VM + WARN(1, "Buffer overflow detected!\n"); +#endif + return size; + } + + if (!__builtin_constant_p(size)) { + check_object_size(dst, size, false); + if ((unsigned long)src < PAX_USER_SHADOW_BASE) + src += PAX_USER_SHADOW_BASE; return copy_user_generic(dst, (__force void *)src, size); + } switch (size) { case 1:__get_user_asm(*(u8 *)dst, (u8 __user *)src, ret, "b", "b", "=q", 1); @@ -108,18 +129,36 @@ int __copy_from_user(void *dst, const void __user *src, unsigned size) ret, "q", "", "=r", 8); return ret; default: + if ((unsigned long)src < PAX_USER_SHADOW_BASE) + src += PAX_USER_SHADOW_BASE; return copy_user_generic(dst, (__force void *)src, size); } } static __always_inline __must_check -int __copy_to_user(void __user *dst, const void *src, unsigned size) +unsigned long __copy_to_user(void __user *dst, const void *src, unsigned size) { - int ret = 0; + int sz = __compiletime_object_size(src); + unsigned ret = 0; might_fault(); - if (!__builtin_constant_p(size)) + + if ((int)size < 0) + return size; + + if (unlikely(sz != -1 && sz < size)) { +#ifdef CONFIG_DEBUG_VM + WARN(1, "Buffer overflow detected!\n"); +#endif + return size; + } + + if (!__builtin_constant_p(size)) { + check_object_size(src, size, true); + if ((unsigned long)dst < PAX_USER_SHADOW_BASE) + dst += PAX_USER_SHADOW_BASE; return copy_user_generic((__force void *)dst, src, size); + } switch (size) { case 1:__put_user_asm(*(u8 *)src, (u8 __user *)dst, ret, "b", "b", "iq", 1); @@ -152,19 +191,30 @@ int __copy_to_user(void __user *dst, const void *src, unsigned size) ret, "q", "", "er", 8); return ret; default: + if ((unsigned long)dst < PAX_USER_SHADOW_BASE) + dst += PAX_USER_SHADOW_BASE; return copy_user_generic((__force void *)dst, src, size); } } static __always_inline __must_check -int __copy_in_user(void __user *dst, const void __user *src, unsigned size) +unsigned long __copy_in_user(void __user *dst, const void __user *src, unsigned size) { - int ret = 0; + unsigned ret = 0; might_fault(); - if (!__builtin_constant_p(size)) + + if ((int)size < 0) + return size; + + if (!__builtin_constant_p(size)) { + if ((unsigned long)src < PAX_USER_SHADOW_BASE) + src += PAX_USER_SHADOW_BASE; + if ((unsigned long)dst < PAX_USER_SHADOW_BASE) + dst += PAX_USER_SHADOW_BASE; return copy_user_generic((__force void *)dst, (__force void *)src, size); + } switch (size) { case 1: { u8 tmp; @@ -204,6 +254,10 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size) return ret; } default: + if ((unsigned long)src < PAX_USER_SHADOW_BASE) + src += PAX_USER_SHADOW_BASE; + if ((unsigned long)dst < PAX_USER_SHADOW_BASE) + dst += PAX_USER_SHADOW_BASE; return copy_user_generic((__force void *)dst, (__force void *)src, size); } @@ -222,33 +276,45 @@ __must_check unsigned long __clear_user(void __user *mem, unsigned long len); static __must_check __always_inline int __copy_from_user_inatomic(void *dst, const void __user *src, unsigned size) { + if ((unsigned long)src < PAX_USER_SHADOW_BASE) + src += PAX_USER_SHADOW_BASE; return copy_user_generic(dst, (__force const void *)src, size); } -static __must_check __always_inline int +static __must_check __always_inline unsigned long __copy_to_user_inatomic(void __user *dst, const void *src, unsigned size) { + if ((int)size < 0) + return size; + + if ((unsigned long)dst < PAX_USER_SHADOW_BASE) + dst += PAX_USER_SHADOW_BASE; return copy_user_generic((__force void *)dst, src, size); } -extern long __copy_user_nocache(void *dst, const void __user *src, +extern unsigned long __copy_user_nocache(void *dst, const void __user *src, unsigned size, int zerorest); -static inline int -__copy_from_user_nocache(void *dst, const void __user *src, unsigned size) +static inline unsigned long __copy_from_user_nocache(void *dst, const void __user *src, unsigned size) { might_sleep(); + + if ((int)size < 0) + return size; + return __copy_user_nocache(dst, src, size, 1); } -static inline int -__copy_from_user_inatomic_nocache(void *dst, const void __user *src, +static inline unsigned long __copy_from_user_inatomic_nocache(void *dst, const void __user *src, unsigned size) { + if ((int)size < 0) + return size; + return __copy_user_nocache(dst, src, size, 0); } -unsigned long +extern unsigned long copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest); #endif /* _ASM_X86_UACCESS_64_H */ diff --git a/arch/x86/include/asm/vgtod.h b/arch/x86/include/asm/vgtod.h index 3d61e20..9507180 100644 --- a/arch/x86/include/asm/vgtod.h +++ b/arch/x86/include/asm/vgtod.h @@ -14,6 +14,7 @@ struct vsyscall_gtod_data { int sysctl_enabled; struct timezone sys_tz; struct { /* extract of a clocksource struct */ + char name[8]; cycle_t (*vread)(void); cycle_t cycle_last; cycle_t mask; diff --git a/arch/x86/include/asm/vmi.h b/arch/x86/include/asm/vmi.h index 61e08c0..b0da582 100644 --- a/arch/x86/include/asm/vmi.h +++ b/arch/x86/include/asm/vmi.h @@ -191,6 +191,7 @@ struct vrom_header { u8 reserved[96]; /* Reserved for headers */ char vmi_init[8]; /* VMI_Init jump point */ char get_reloc[8]; /* VMI_GetRelocationInfo jump point */ + char rom_data[8048]; /* rest of the option ROM */ } __attribute__((packed)); struct pnp_header { diff --git a/arch/x86/include/asm/vsyscall.h b/arch/x86/include/asm/vsyscall.h index d0983d2..1f7c9e9 100644 --- a/arch/x86/include/asm/vsyscall.h +++ b/arch/x86/include/asm/vsyscall.h @@ -15,9 +15,10 @@ enum vsyscall_num { #ifdef __KERNEL__ #include +#include +#include #define __section_vgetcpu_mode __attribute__ ((unused, __section__ (".vgetcpu_mode"), aligned(16))) -#define __section_jiffies __attribute__ ((unused, __section__ (".jiffies"), aligned(16))) /* Definitions for CONFIG_GENERIC_TIME definitions */ #define __section_vsyscall_gtod_data __attribute__ \ @@ -31,7 +32,6 @@ enum vsyscall_num { #define VGETCPU_LSL 2 extern int __vgetcpu_mode; -extern volatile unsigned long __jiffies; /* kernel space (writeable) */ extern int vgetcpu_mode; @@ -39,6 +39,9 @@ extern struct timezone sys_tz; extern void map_vsyscall(void); +extern int vgettimeofday(struct timeval * tv, struct timezone * tz); +extern time_t vtime(time_t *t); +extern long vgetcpu(unsigned *cpu, unsigned *node, struct getcpu_cache *tcache); #endif /* __KERNEL__ */ #endif /* _ASM_X86_VSYSCALL_H */ diff --git a/arch/x86/include/asm/xsave.h b/arch/x86/include/asm/xsave.h index 2c4390c..0dda6eb 100644 --- a/arch/x86/include/asm/xsave.h +++ b/arch/x86/include/asm/xsave.h @@ -59,6 +59,12 @@ static inline int fpu_xrstor_checking(struct fpu *fpu) static inline int xsave_user(struct xsave_struct __user *buf) { int err; + +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF) + if ((unsigned long)buf < PAX_USER_SHADOW_BASE) + buf = (struct xsave_struct __user *)((void __user*)buf + PAX_USER_SHADOW_BASE); +#endif + __asm__ __volatile__("1: .byte " REX_PREFIX "0x0f,0xae,0x27\n" "2:\n" ".section .fixup,\"ax\"\n" @@ -85,6 +91,11 @@ static inline int xrestore_user(struct xsave_struct __user *buf, u64 mask) u32 lmask = mask; u32 hmask = mask >> 32; +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF) + if ((unsigned long)xstate < PAX_USER_SHADOW_BASE) + xstate = (struct xsave_struct *)((void *)xstate + PAX_USER_SHADOW_BASE); +#endif + __asm__ __volatile__("1: .byte " REX_PREFIX "0x0f,0xae,0x2f\n" "2:\n" ".section .fixup,\"ax\"\n" diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c index c05872a..eb10001 100644 --- a/arch/x86/kernel/acpi/boot.c +++ b/arch/x86/kernel/acpi/boot.c @@ -1472,7 +1472,7 @@ static struct dmi_system_id __initdata acpi_dmi_table_late[] = { DMI_MATCH(DMI_PRODUCT_NAME, "HP Compaq 6715b"), }, }, - {} + { NULL, NULL, {{0, {0}}}, NULL} }; /* diff --git a/arch/x86/kernel/acpi/realmode/wakeup.S b/arch/x86/kernel/acpi/realmode/wakeup.S index 580b4e2..e0a70ff 100644 --- a/arch/x86/kernel/acpi/realmode/wakeup.S +++ b/arch/x86/kernel/acpi/realmode/wakeup.S @@ -104,7 +104,7 @@ _start: movl %eax, %ecx orl %edx, %ecx jz 1f - movl $0xc0000080, %ecx + mov $MSR_EFER, %ecx wrmsr 1: diff --git a/arch/x86/kernel/acpi/sleep.c b/arch/x86/kernel/acpi/sleep.c index fcc3c61..13001d3 100644 --- a/arch/x86/kernel/acpi/sleep.c +++ b/arch/x86/kernel/acpi/sleep.c @@ -11,11 +11,12 @@ #include #include #include +#include #include "realmode/wakeup.h" #include "sleep.h" -unsigned long acpi_wakeup_address; +unsigned long acpi_wakeup_address = 0x2000; unsigned long acpi_realmode_flags; /* address in low memory of the wakeup routine. */ @@ -96,8 +97,12 @@ int acpi_save_state_mem(void) header->trampoline_segment = setup_trampoline() >> 4; #ifdef CONFIG_SMP stack_start.sp = temp_stack + sizeof(temp_stack); + + pax_open_kernel(); early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(smp_processor_id()); + pax_close_kernel(); + initial_gs = per_cpu_offset(smp_processor_id()); #endif initial_code = (unsigned long)wakeup_long64; diff --git a/arch/x86/kernel/acpi/wakeup_32.S b/arch/x86/kernel/acpi/wakeup_32.S index 13ab720..95d5442 100644 --- a/arch/x86/kernel/acpi/wakeup_32.S +++ b/arch/x86/kernel/acpi/wakeup_32.S @@ -30,13 +30,11 @@ wakeup_pmode_return: # and restore the stack ... but you need gdt for this to work movl saved_context_esp, %esp - movl %cs:saved_magic, %eax - cmpl $0x12345678, %eax + cmpl $0x12345678, saved_magic jne bogus_magic # jump to place where we left off - movl saved_eip, %eax - jmp *%eax + jmp *(saved_eip) bogus_magic: jmp bogus_magic diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c index 7023773..a29b372 100644 --- a/arch/x86/kernel/alternative.c +++ b/arch/x86/kernel/alternative.c @@ -247,7 +247,7 @@ static void alternatives_smp_lock(const s32 *start, const s32 *end, if (!*poff || ptr < text || ptr >= text_end) continue; /* turn DS segment override prefix into lock prefix */ - if (*ptr == 0x3e) + if (*ktla_ktva(ptr) == 0x3e) text_poke(ptr, ((unsigned char []){0xf0}), 1); }; mutex_unlock(&text_mutex); @@ -268,7 +268,7 @@ static void alternatives_smp_unlock(const s32 *start, const s32 *end, if (!*poff || ptr < text || ptr >= text_end) continue; /* turn lock prefix into DS segment override prefix */ - if (*ptr == 0xf0) + if (*ktla_ktva(ptr) == 0xf0) text_poke(ptr, ((unsigned char []){0x3E}), 1); }; mutex_unlock(&text_mutex); @@ -436,7 +436,7 @@ void __init_or_module apply_paravirt(struct paravirt_patch_site *start, BUG_ON(p->len > MAX_PATCH_LEN); /* prep the buffer with the original instructions */ - memcpy(insnbuf, p->instr, p->len); + memcpy(insnbuf, ktla_ktva(p->instr), p->len); used = pv_init_ops.patch(p->instrtype, p->clobbers, insnbuf, (unsigned long)p->instr, p->len); @@ -504,7 +504,7 @@ void __init alternative_instructions(void) if (smp_alt_once) free_init_pages("SMP alternatives", (unsigned long)__smp_locks, - (unsigned long)__smp_locks_end); + PAGE_ALIGN((unsigned long)__smp_locks_end)); restart_nmi(); } @@ -521,13 +521,17 @@ void __init alternative_instructions(void) * instructions. And on the local CPU you need to be protected again NMI or MCE * handlers seeing an inconsistent instruction while you patch. */ -static void *__init_or_module text_poke_early(void *addr, const void *opcode, +static void *__kprobes text_poke_early(void *addr, const void *opcode, size_t len) { unsigned long flags; local_irq_save(flags); - memcpy(addr, opcode, len); + + pax_open_kernel(); + memcpy(ktla_ktva(addr), opcode, len); sync_core(); + pax_close_kernel(); + local_irq_restore(flags); /* Could also do a CLFLUSH here to speed up CPU recovery; but that causes hangs on some VIA CPUs. */ @@ -549,36 +553,22 @@ static void *__init_or_module text_poke_early(void *addr, const void *opcode, */ void *__kprobes text_poke(void *addr, const void *opcode, size_t len) { - unsigned long flags; - char *vaddr; + unsigned char *vaddr = ktla_ktva(addr); struct page *pages[2]; - int i; + size_t i; if (!core_kernel_text((unsigned long)addr)) { - pages[0] = vmalloc_to_page(addr); - pages[1] = vmalloc_to_page(addr + PAGE_SIZE); + pages[0] = vmalloc_to_page(vaddr); + pages[1] = vmalloc_to_page(vaddr + PAGE_SIZE); } else { - pages[0] = virt_to_page(addr); + pages[0] = virt_to_page(vaddr); WARN_ON(!PageReserved(pages[0])); - pages[1] = virt_to_page(addr + PAGE_SIZE); + pages[1] = virt_to_page(vaddr + PAGE_SIZE); } BUG_ON(!pages[0]); - local_irq_save(flags); - set_fixmap(FIX_TEXT_POKE0, page_to_phys(pages[0])); - if (pages[1]) - set_fixmap(FIX_TEXT_POKE1, page_to_phys(pages[1])); - vaddr = (char *)fix_to_virt(FIX_TEXT_POKE0); - memcpy(&vaddr[(unsigned long)addr & ~PAGE_MASK], opcode, len); - clear_fixmap(FIX_TEXT_POKE0); - if (pages[1]) - clear_fixmap(FIX_TEXT_POKE1); - local_flush_tlb(); - sync_core(); - /* Could also do a CLFLUSH here to speed up CPU recovery; but - that causes hangs on some VIA CPUs. */ + text_poke_early(addr, opcode, len); for (i = 0; i < len; i++) - BUG_ON(((char *)addr)[i] != ((char *)opcode)[i]); - local_irq_restore(flags); + BUG_ON(((char *)vaddr)[i] != ((char *)opcode)[i]); return addr; } diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c index 4424c73..e90c367 100644 --- a/arch/x86/kernel/amd_iommu.c +++ b/arch/x86/kernel/amd_iommu.c @@ -2286,7 +2286,7 @@ static void prealloc_protection_domains(void) } } -static struct dma_map_ops amd_iommu_dma_ops = { +static const struct dma_map_ops amd_iommu_dma_ops = { .alloc_coherent = alloc_coherent, .free_coherent = free_coherent, .map_page = map_page, diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c index ad1515d..96832b6 100644 --- a/arch/x86/kernel/apic/io_apic.c +++ b/arch/x86/kernel/apic/io_apic.c @@ -696,7 +696,7 @@ struct IO_APIC_route_entry **alloc_ioapic_entries(void) ioapic_entries = kzalloc(sizeof(*ioapic_entries) * nr_ioapics, GFP_ATOMIC); if (!ioapic_entries) - return 0; + return NULL; for (apic = 0; apic < nr_ioapics; apic++) { ioapic_entries[apic] = @@ -713,7 +713,7 @@ nomem: kfree(ioapic_entries[apic]); kfree(ioapic_entries); - return 0; + return NULL; } /* @@ -1123,7 +1123,7 @@ int IO_APIC_get_PCI_irq_vector(int bus, int slot, int pin, } EXPORT_SYMBOL(IO_APIC_get_PCI_irq_vector); -void lock_vector_lock(void) +void lock_vector_lock(void) __acquires(vector_lock) { /* Used to the online set of cpus does not change * during assign_irq_vector. @@ -1131,7 +1131,7 @@ void lock_vector_lock(void) raw_spin_lock(&vector_lock); } -void unlock_vector_lock(void) +void unlock_vector_lock(void) __releases(vector_lock) { raw_spin_unlock(&vector_lock); } diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c index c4f9182..9e252a4 100644 --- a/arch/x86/kernel/apm_32.c +++ b/arch/x86/kernel/apm_32.c @@ -410,7 +410,7 @@ static DEFINE_MUTEX(apm_mutex); * This is for buggy BIOS's that refer to (real mode) segment 0x40 * even though they are called in protected mode. */ -static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092, +static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093, (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1); static const char driver_version[] = "1.16ac"; /* no spaces */ @@ -588,7 +588,10 @@ static long __apm_bios_call(void *_call) BUG_ON(cpu != 0); gdt = get_cpu_gdt_table(cpu); save_desc_40 = gdt[0x40 / 8]; + + pax_open_kernel(); gdt[0x40 / 8] = bad_bios_desc; + pax_close_kernel(); apm_irq_save(flags); APM_DO_SAVE_SEGS; @@ -597,7 +600,11 @@ static long __apm_bios_call(void *_call) &call->esi); APM_DO_RESTORE_SEGS; apm_irq_restore(flags); + + pax_open_kernel(); gdt[0x40 / 8] = save_desc_40; + pax_close_kernel(); + put_cpu(); return call->eax & 0xff; @@ -664,7 +671,10 @@ static long __apm_bios_call_simple(void *_call) BUG_ON(cpu != 0); gdt = get_cpu_gdt_table(cpu); save_desc_40 = gdt[0x40 / 8]; + + pax_open_kernel(); gdt[0x40 / 8] = bad_bios_desc; + pax_close_kernel(); apm_irq_save(flags); APM_DO_SAVE_SEGS; @@ -672,7 +682,11 @@ static long __apm_bios_call_simple(void *_call) &call->eax); APM_DO_RESTORE_SEGS; apm_irq_restore(flags); + + pax_open_kernel(); gdt[0x40 / 8] = save_desc_40; + pax_close_kernel(); + put_cpu(); return error; } @@ -975,7 +989,7 @@ recalc: static void apm_power_off(void) { - unsigned char po_bios_call[] = { + const unsigned char po_bios_call[] = { 0xb8, 0x00, 0x10, /* movw $0x1000,ax */ 0x8e, 0xd0, /* movw ax,ss */ 0xbc, 0x00, 0xf0, /* movw $0xf000,sp */ @@ -1931,7 +1945,10 @@ static const struct file_operations apm_bios_fops = { static struct miscdevice apm_device = { APM_MINOR_DEV, "apm_bios", - &apm_bios_fops + &apm_bios_fops, + {NULL, NULL}, + NULL, + NULL }; @@ -2252,7 +2269,7 @@ static struct dmi_system_id __initdata apm_dmi_table[] = { { DMI_MATCH(DMI_SYS_VENDOR, "IBM"), }, }, - { } + { NULL, NULL, {DMI_MATCH(DMI_NONE, {0})}, NULL} }; /* @@ -2355,12 +2372,15 @@ static int __init apm_init(void) * code to that CPU. */ gdt = get_cpu_gdt_table(0); + + pax_open_kernel(); set_desc_base(&gdt[APM_CS >> 3], (unsigned long)__va((unsigned long)apm_info.bios.cseg << 4)); set_desc_base(&gdt[APM_CS_16 >> 3], (unsigned long)__va((unsigned long)apm_info.bios.cseg_16 << 4)); set_desc_base(&gdt[APM_DS >> 3], (unsigned long)__va((unsigned long)apm_info.bios.dseg << 4)); + pax_close_kernel(); proc_create("apm", 0, NULL, &apm_file_ops); diff --git a/arch/x86/kernel/asm-offsets_32.c b/arch/x86/kernel/asm-offsets_32.c index dfdbf64..5b9f997 100644 --- a/arch/x86/kernel/asm-offsets_32.c +++ b/arch/x86/kernel/asm-offsets_32.c @@ -115,6 +115,11 @@ void foo(void) OFFSET(PV_CPU_iret, pv_cpu_ops, iret); OFFSET(PV_CPU_irq_enable_sysexit, pv_cpu_ops, irq_enable_sysexit); OFFSET(PV_CPU_read_cr0, pv_cpu_ops, read_cr0); + +#ifdef CONFIG_PAX_KERNEXEC + OFFSET(PV_CPU_write_cr0, pv_cpu_ops, write_cr0); +#endif + #endif #ifdef CONFIG_XEN diff --git a/arch/x86/kernel/asm-offsets_64.c b/arch/x86/kernel/asm-offsets_64.c index 4a6aeed..31b7fb8 100644 --- a/arch/x86/kernel/asm-offsets_64.c +++ b/arch/x86/kernel/asm-offsets_64.c @@ -63,6 +63,18 @@ int main(void) OFFSET(PV_CPU_irq_enable_sysexit, pv_cpu_ops, irq_enable_sysexit); OFFSET(PV_CPU_swapgs, pv_cpu_ops, swapgs); OFFSET(PV_MMU_read_cr2, pv_mmu_ops, read_cr2); + +#ifdef CONFIG_PAX_KERNEXEC + OFFSET(PV_CPU_read_cr0, pv_cpu_ops, read_cr0); + OFFSET(PV_CPU_write_cr0, pv_cpu_ops, write_cr0); +#endif + +#ifdef CONFIG_PAX_MEMORY_UDEREF + OFFSET(PV_MMU_read_cr3, pv_mmu_ops, read_cr3); + OFFSET(PV_MMU_write_cr3, pv_mmu_ops, write_cr3); + OFFSET(PV_MMU_set_pgd, pv_mmu_ops, set_pgd); +#endif + #endif @@ -115,6 +127,7 @@ int main(void) ENTRY(cr8); BLANK(); #undef ENTRY + DEFINE(TSS_size, sizeof(struct tss_struct)); DEFINE(TSS_ist, offsetof(struct tss_struct, x86_tss.ist)); BLANK(); DEFINE(crypto_tfm_ctx_offset, offsetof(struct crypto_tfm, __crt_ctx)); diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile index 3a785da..68d7133 100644 --- a/arch/x86/kernel/cpu/Makefile +++ b/arch/x86/kernel/cpu/Makefile @@ -8,10 +8,6 @@ CFLAGS_REMOVE_common.o = -pg CFLAGS_REMOVE_perf_event.o = -pg endif -# Make sure load_percpu_segment has no stackprotector -nostackp := $(call cc-option, -fno-stack-protector) -CFLAGS_common.o := $(nostackp) - obj-y := intel_cacheinfo.o addon_cpuid_features.o obj-y += proc.o capflags.o powerflags.o common.o obj-y += vmware.o hypervisor.o sched.o mshyperv.o diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index d938871..1e97c91 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c @@ -83,60 +83,6 @@ static const struct cpu_dev __cpuinitconst default_cpu = { static const struct cpu_dev *this_cpu __cpuinitdata = &default_cpu; -DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = { -#ifdef CONFIG_X86_64 - /* - * We need valid kernel segments for data and code in long mode too - * IRET will check the segment types kkeil 2000/10/28 - * Also sysret mandates a special GDT layout - * - * TLS descriptors are currently at a different place compared to i386. - * Hopefully nobody expects them at a fixed place (Wine?) - */ - [GDT_ENTRY_KERNEL32_CS] = GDT_ENTRY_INIT(0xc09b, 0, 0xfffff), - [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xa09b, 0, 0xfffff), - [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc093, 0, 0xfffff), - [GDT_ENTRY_DEFAULT_USER32_CS] = GDT_ENTRY_INIT(0xc0fb, 0, 0xfffff), - [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f3, 0, 0xfffff), - [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xa0fb, 0, 0xfffff), -#else - [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xc09a, 0, 0xfffff), - [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff), - [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xc0fa, 0, 0xfffff), - [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f2, 0, 0xfffff), - /* - * Segments used for calling PnP BIOS have byte granularity. - * They code segments and data segments have fixed 64k limits, - * the transfer segment sizes are set at run time. - */ - /* 32-bit code */ - [GDT_ENTRY_PNPBIOS_CS32] = GDT_ENTRY_INIT(0x409a, 0, 0xffff), - /* 16-bit code */ - [GDT_ENTRY_PNPBIOS_CS16] = GDT_ENTRY_INIT(0x009a, 0, 0xffff), - /* 16-bit data */ - [GDT_ENTRY_PNPBIOS_DS] = GDT_ENTRY_INIT(0x0092, 0, 0xffff), - /* 16-bit data */ - [GDT_ENTRY_PNPBIOS_TS1] = GDT_ENTRY_INIT(0x0092, 0, 0), - /* 16-bit data */ - [GDT_ENTRY_PNPBIOS_TS2] = GDT_ENTRY_INIT(0x0092, 0, 0), - /* - * The APM segments have byte granularity and their bases - * are set at run time. All have 64k limits. - */ - /* 32-bit code */ - [GDT_ENTRY_APMBIOS_BASE] = GDT_ENTRY_INIT(0x409a, 0, 0xffff), - /* 16-bit code */ - [GDT_ENTRY_APMBIOS_BASE+1] = GDT_ENTRY_INIT(0x009a, 0, 0xffff), - /* data */ - [GDT_ENTRY_APMBIOS_BASE+2] = GDT_ENTRY_INIT(0x4092, 0, 0xffff), - - [GDT_ENTRY_ESPFIX_SS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff), - [GDT_ENTRY_PERCPU] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff), - GDT_STACK_CANARY_INIT -#endif -} }; -EXPORT_PER_CPU_SYMBOL_GPL(gdt_page); - static int __init x86_xsave_setup(char *s) { setup_clear_cpu_cap(X86_FEATURE_XSAVE); @@ -344,7 +290,7 @@ void switch_to_new_gdt(int cpu) { struct desc_ptr gdt_descr; - gdt_descr.address = (long)get_cpu_gdt_table(cpu); + gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu); gdt_descr.size = GDT_SIZE - 1; load_gdt(&gdt_descr); /* Reload the per-cpu base */ @@ -802,6 +748,10 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c) /* Filter out anything that depends on CPUID levels we don't have */ filter_cpuid_features(c, true); +#if defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_KERNEXEC) || (defined(CONFIG_PAX_MEMORY_UDEREF) && defined(CONFIG_X86_32)) + setup_clear_cpu_cap(X86_FEATURE_SEP); +#endif + /* If the model name is still unset, do table lookup. */ if (!c->x86_model_id[0]) { const char *p; @@ -1117,7 +1067,7 @@ void __cpuinit cpu_init(void) int i; cpu = stack_smp_processor_id(); - t = &per_cpu(init_tss, cpu); + t = init_tss + cpu; oist = &per_cpu(orig_ist, cpu); #ifdef CONFIG_NUMA @@ -1143,7 +1093,7 @@ void __cpuinit cpu_init(void) switch_to_new_gdt(cpu); loadsegment(fs, 0); - load_idt((const struct desc_ptr *)&idt_descr); + load_idt(&idt_descr); memset(me->thread.tls_array, 0, GDT_ENTRY_TLS_ENTRIES * 8); syscall_init(); @@ -1205,7 +1155,7 @@ void __cpuinit cpu_init(void) { int cpu = smp_processor_id(); struct task_struct *curr = current; - struct tss_struct *t = &per_cpu(init_tss, cpu); + struct tss_struct *t = init_tss + cpu; struct thread_struct *thread = &curr->thread; if (cpumask_test_and_set_cpu(cpu, cpu_initialized_mask)) { diff --git a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c index 5384b04..0c28c59 100644 --- a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c +++ b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c @@ -484,7 +484,7 @@ static const struct dmi_system_id sw_any_bug_dmi_table[] = { DMI_MATCH(DMI_PRODUCT_NAME, "X6DLP"), }, }, - { } + { NULL, NULL, {DMI_MATCH(DMI_NONE, {0})}, NULL } }; static int acpi_cpufreq_blacklist(struct cpuinfo_x86 *c) diff --git a/arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c b/arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c index 9b1ff37..3c1bac4 100644 --- a/arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c +++ b/arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c @@ -226,7 +226,7 @@ static struct cpu_model models[] = { &cpu_ids[CPU_MP4HT_D0], NULL, 0, NULL }, { &cpu_ids[CPU_MP4HT_E0], NULL, 0, NULL }, - { NULL, } + { NULL, NULL, 0, NULL} }; #undef _BANIAS #undef BANIAS diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c index b438944..aa576fa 100644 --- a/arch/x86/kernel/cpu/intel.c +++ b/arch/x86/kernel/cpu/intel.c @@ -161,7 +161,7 @@ static void __cpuinit trap_init_f00f_bug(void) * Update the IDT descriptor and reload the IDT so that * it uses the read-only mapped virtual address. */ - idt_descr.address = fix_to_virt(FIX_F00F_IDT); + idt_descr.address = (struct desc_struct *)fix_to_virt(FIX_F00F_IDT); load_idt(&idt_descr); } #endif diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c index 18cc425..ebfbac1 100644 --- a/arch/x86/kernel/cpu/mcheck/mce.c +++ b/arch/x86/kernel/cpu/mcheck/mce.c @@ -219,7 +219,7 @@ static void print_mce(struct mce *m) !(m->mcgstatus & MCG_STATUS_EIPV) ? " !INEXACT!" : "", m->cs, m->ip); - if (m->cs == __KERNEL_CS) + if (m->cs == __KERNEL_CS || m->cs == __KERNEXEC_KERNEL_CS) print_symbol("{%s}", m->ip); pr_cont("\n"); } @@ -1471,14 +1471,14 @@ void __cpuinit mcheck_cpu_init(struct cpuinfo_x86 *c) */ static DEFINE_SPINLOCK(mce_state_lock); -static int open_count; /* #times opened */ +static atomic_t open_count; /* #times opened */ static int open_exclu; /* already open exclusive? */ static int mce_open(struct inode *inode, struct file *file) { spin_lock(&mce_state_lock); - if (open_exclu || (open_count && (file->f_flags & O_EXCL))) { + if (open_exclu || (atomic_read(&open_count) && (file->f_flags & O_EXCL))) { spin_unlock(&mce_state_lock); return -EBUSY; @@ -1486,7 +1486,7 @@ static int mce_open(struct inode *inode, struct file *file) if (file->f_flags & O_EXCL) open_exclu = 1; - open_count++; + atomic_inc(&open_count); spin_unlock(&mce_state_lock); @@ -1497,7 +1497,7 @@ static int mce_release(struct inode *inode, struct file *file) { spin_lock(&mce_state_lock); - open_count--; + atomic_dec(&open_count); open_exclu = 0; spin_unlock(&mce_state_lock); @@ -1683,6 +1683,7 @@ static struct miscdevice mce_log_device = { MISC_MCELOG_MINOR, "mcelog", &mce_chrdev_ops, + {NULL, NULL}, NULL, NULL }; /* diff --git a/arch/x86/kernel/cpu/mtrr/generic.c b/arch/x86/kernel/cpu/mtrr/generic.c index fd31a44..e4817d8 100644 --- a/arch/x86/kernel/cpu/mtrr/generic.c +++ b/arch/x86/kernel/cpu/mtrr/generic.c @@ -28,7 +28,7 @@ static struct fixed_range_block fixed_range_blocks[] = { { MSR_MTRRfix64K_00000, 1 }, /* one 64k MTRR */ { MSR_MTRRfix16K_80000, 2 }, /* two 16k MTRRs */ { MSR_MTRRfix4K_C0000, 8 }, /* eight 4k MTRRs */ - {} + { 0, 0 } }; static unsigned long smp_changes_mask; diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/main.c index 01c0f3e..0e18a24 100644 --- a/arch/x86/kernel/cpu/mtrr/main.c +++ b/arch/x86/kernel/cpu/mtrr/main.c @@ -61,7 +61,7 @@ static DEFINE_MUTEX(mtrr_mutex); u64 size_or_mask, size_and_mask; static bool mtrr_aps_delayed_init; -static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM]; +static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM] __read_only; const struct mtrr_ops *mtrr_if; diff --git a/arch/x86/kernel/cpu/mtrr/mtrr.h b/arch/x86/kernel/cpu/mtrr/mtrr.h index df5e41f..d94607c 100644 --- a/arch/x86/kernel/cpu/mtrr/mtrr.h +++ b/arch/x86/kernel/cpu/mtrr/mtrr.h @@ -12,19 +12,19 @@ extern unsigned int mtrr_usage_table[MTRR_MAX_VAR_RANGES]; struct mtrr_ops { - u32 vendor; - u32 use_intel_if; - void (*set)(unsigned int reg, unsigned long base, + const u32 vendor; + const u32 use_intel_if; + void (* const set)(unsigned int reg, unsigned long base, unsigned long size, mtrr_type type); - void (*set_all)(void); + void (* const set_all)(void); - void (*get)(unsigned int reg, unsigned long *base, + void (* const get)(unsigned int reg, unsigned long *base, unsigned long *size, mtrr_type *type); - int (*get_free_region)(unsigned long base, unsigned long size, + int (* const get_free_region)(unsigned long base, unsigned long size, int replace_reg); - int (*validate_add_page)(unsigned long base, unsigned long size, + int (* const validate_add_page)(unsigned long base, unsigned long size, unsigned int type); - int (*have_wrcomb)(void); + int (* const have_wrcomb)(void); }; extern int generic_get_free_region(unsigned long base, unsigned long size, diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c index 5db5b7d..1e1f17e 100644 --- a/arch/x86/kernel/cpu/perf_event.c +++ b/arch/x86/kernel/cpu/perf_event.c @@ -1685,7 +1685,7 @@ perf_callchain_user(struct pt_regs *regs, struct perf_callchain_entry *entry) break; callchain_store(entry, frame.return_address); - fp = frame.next_frame; + fp = (__force const void __user *)frame.next_frame; } } diff --git a/arch/x86/kernel/cpu/perfctr-watchdog.c b/arch/x86/kernel/cpu/perfctr-watchdog.c index fb329e9..ab40f2d 100644 --- a/arch/x86/kernel/cpu/perfctr-watchdog.c +++ b/arch/x86/kernel/cpu/perfctr-watchdog.c @@ -30,11 +30,11 @@ struct nmi_watchdog_ctlblk { /* Interface defining a CPU specific perfctr watchdog */ struct wd_ops { - int (*reserve)(void); - void (*unreserve)(void); - int (*setup)(unsigned nmi_hz); - void (*rearm)(struct nmi_watchdog_ctlblk *wd, unsigned nmi_hz); - void (*stop)(void); + int (* const reserve)(void); + void (* const unreserve)(void); + int (* const setup)(unsigned nmi_hz); + void (* const rearm)(struct nmi_watchdog_ctlblk *wd, unsigned nmi_hz); + void (* const stop)(void); unsigned perfctr; unsigned evntsel; u64 checkbit; @@ -634,6 +634,7 @@ static const struct wd_ops p4_wd_ops = { #define ARCH_PERFMON_NMI_EVENT_SEL ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL #define ARCH_PERFMON_NMI_EVENT_UMASK ARCH_PERFMON_UNHALTED_CORE_CYCLES_UMASK +/* cannot be const, see probe_nmi_watchdog */ static struct wd_ops intel_arch_wd_ops; static int setup_intel_arch_watchdog(unsigned nmi_hz) @@ -686,6 +687,7 @@ static int setup_intel_arch_watchdog(unsigned nmi_hz) return 1; } +/* cannot be const */ static struct wd_ops intel_arch_wd_ops __read_mostly = { .reserve = single_msr_reserve, .unreserve = single_msr_unreserve, diff --git a/arch/x86/kernel/crash.c b/arch/x86/kernel/crash.c index ebd4c51..a345a28 100644 --- a/arch/x86/kernel/crash.c +++ b/arch/x86/kernel/crash.c @@ -40,7 +40,7 @@ static void kdump_nmi_callback(int cpu, struct die_args *args) regs = args->regs; #ifdef CONFIG_X86_32 - if (!user_mode_vm(regs)) { + if (!user_mode(regs)) { crash_fixup_ss_esp(&fixed_regs, regs); regs = &fixed_regs; } diff --git a/arch/x86/kernel/doublefault_32.c b/arch/x86/kernel/doublefault_32.c index 37250fe..bf2ec74 100644 --- a/arch/x86/kernel/doublefault_32.c +++ b/arch/x86/kernel/doublefault_32.c @@ -11,7 +11,7 @@ #define DOUBLEFAULT_STACKSIZE (1024) static unsigned long doublefault_stack[DOUBLEFAULT_STACKSIZE]; -#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE) +#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE-2) #define ptr_ok(x) ((x) > PAGE_OFFSET && (x) < PAGE_OFFSET + MAXMEM) @@ -21,7 +21,7 @@ static void doublefault_fn(void) unsigned long gdt, tss; store_gdt(&gdt_desc); - gdt = gdt_desc.address; + gdt = (unsigned long)gdt_desc.address; printk(KERN_EMERG "PANIC: double fault, gdt at %08lx [%d bytes]\n", gdt, gdt_desc.size); @@ -58,10 +58,10 @@ struct tss_struct doublefault_tss __cacheline_aligned = { /* 0x2 bit is always set */ .flags = X86_EFLAGS_SF | 0x2, .sp = STACK_START, - .es = __USER_DS, + .es = __KERNEL_DS, .cs = __KERNEL_CS, .ss = __KERNEL_DS, - .ds = __USER_DS, + .ds = __KERNEL_DS, .fs = __KERNEL_PERCPU, .__cr3 = __pa_nodebug(swapper_pg_dir), diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c index c89a386..10eb36b 100644 --- a/arch/x86/kernel/dumpstack.c +++ b/arch/x86/kernel/dumpstack.c @@ -207,7 +207,7 @@ void dump_stack(void) #endif printk("Pid: %d, comm: %.20s %s %s %.*s\n", - current->pid, current->comm, print_tainted(), + task_pid_nr(current), current->comm, print_tainted(), init_utsname()->release, (int)strcspn(init_utsname()->version, " "), init_utsname()->version); @@ -263,7 +263,7 @@ void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr) panic("Fatal exception in interrupt"); if (panic_on_oops) panic("Fatal exception"); - do_exit(signr); + do_group_exit(signr); } int __kprobes __die(const char *str, struct pt_regs *regs, long err) @@ -290,7 +290,7 @@ int __kprobes __die(const char *str, struct pt_regs *regs, long err) show_registers(regs); #ifdef CONFIG_X86_32 - if (user_mode_vm(regs)) { + if (user_mode(regs)) { sp = regs->sp; ss = regs->ss & 0xffff; } else { @@ -318,7 +318,7 @@ void die(const char *str, struct pt_regs *regs, long err) unsigned long flags = oops_begin(); int sig = SIGSEGV; - if (!user_mode_vm(regs)) + if (!user_mode(regs)) report_bug(regs->ip, regs); if (__die(str, regs, err)) diff --git a/arch/x86/kernel/dumpstack_32.c b/arch/x86/kernel/dumpstack_32.c index 11540a1..285b0ef 100644 --- a/arch/x86/kernel/dumpstack_32.c +++ b/arch/x86/kernel/dumpstack_32.c @@ -107,11 +107,12 @@ void show_registers(struct pt_regs *regs) * When in-kernel, we also print out the stack and code at the * time of the fault.. */ - if (!user_mode_vm(regs)) { + if (!user_mode(regs)) { unsigned int code_prologue = code_bytes * 43 / 64; unsigned int code_len = code_bytes; unsigned char c; u8 *ip; + unsigned long cs_base = get_desc_base(&get_cpu_gdt_table(smp_processor_id())[(0xffff & regs->cs) >> 3]); printk(KERN_EMERG "Stack:\n"); show_stack_log_lvl(NULL, regs, ®s->sp, @@ -119,10 +120,10 @@ void show_registers(struct pt_regs *regs) printk(KERN_EMERG "Code: "); - ip = (u8 *)regs->ip - code_prologue; + ip = (u8 *)regs->ip - code_prologue + cs_base; if (ip < (u8 *)PAGE_OFFSET || probe_kernel_address(ip, c)) { /* try starting at IP */ - ip = (u8 *)regs->ip; + ip = (u8 *)regs->ip + cs_base; code_len = code_len - code_prologue + 1; } for (i = 0; i < code_len; i++, ip++) { @@ -131,7 +132,7 @@ void show_registers(struct pt_regs *regs) printk(" Bad EIP value."); break; } - if (ip == (u8 *)regs->ip) + if (ip == (u8 *)regs->ip + cs_base) printk("<%02x> ", c); else printk("%02x ", c); @@ -144,6 +145,7 @@ int is_valid_bugaddr(unsigned long ip) { unsigned short ud2; + ip = ktla_ktva(ip); if (ip < PAGE_OFFSET) return 0; if (probe_kernel_address((unsigned short *)ip, ud2)) diff --git a/arch/x86/kernel/efi_32.c b/arch/x86/kernel/efi_32.c index 5cab48e..abf8ca2 100644 --- a/arch/x86/kernel/efi_32.c +++ b/arch/x86/kernel/efi_32.c @@ -38,70 +38,38 @@ */ static unsigned long efi_rt_eflags; -static pgd_t efi_bak_pg_dir_pointer[2]; +static pgd_t __initdata efi_bak_pg_dir_pointer[KERNEL_PGD_PTRS]; -void efi_call_phys_prelog(void) +void __init efi_call_phys_prelog(void) { - unsigned long cr4; - unsigned long temp; struct desc_ptr gdt_descr; local_irq_save(efi_rt_eflags); - /* - * If I don't have PAE, I should just duplicate two entries in page - * directory. If I have PAE, I just need to duplicate one entry in - * page directory. - */ - cr4 = read_cr4_safe(); - if (cr4 & X86_CR4_PAE) { - efi_bak_pg_dir_pointer[0].pgd = - swapper_pg_dir[pgd_index(0)].pgd; - swapper_pg_dir[0].pgd = - swapper_pg_dir[pgd_index(PAGE_OFFSET)].pgd; - } else { - efi_bak_pg_dir_pointer[0].pgd = - swapper_pg_dir[pgd_index(0)].pgd; - efi_bak_pg_dir_pointer[1].pgd = - swapper_pg_dir[pgd_index(0x400000)].pgd; - swapper_pg_dir[pgd_index(0)].pgd = - swapper_pg_dir[pgd_index(PAGE_OFFSET)].pgd; - temp = PAGE_OFFSET + 0x400000; - swapper_pg_dir[pgd_index(0x400000)].pgd = - swapper_pg_dir[pgd_index(temp)].pgd; - } + clone_pgd_range(efi_bak_pg_dir_pointer, swapper_pg_dir, KERNEL_PGD_PTRS); + clone_pgd_range(swapper_pg_dir, swapper_pg_dir + KERNEL_PGD_BOUNDARY, + min_t(unsigned long, KERNEL_PGD_PTRS, KERNEL_PGD_BOUNDARY)); /* * After the lock is released, the original page table is restored. */ __flush_tlb_all(); - gdt_descr.address = __pa(get_cpu_gdt_table(0)); + gdt_descr.address = (struct desc_struct *)__pa(get_cpu_gdt_table(0)); gdt_descr.size = GDT_SIZE - 1; load_gdt(&gdt_descr); } -void efi_call_phys_epilog(void) +void __init efi_call_phys_epilog(void) { - unsigned long cr4; struct desc_ptr gdt_descr; - gdt_descr.address = (unsigned long)get_cpu_gdt_table(0); + gdt_descr.address = get_cpu_gdt_table(0); gdt_descr.size = GDT_SIZE - 1; load_gdt(&gdt_descr); - cr4 = read_cr4_safe(); - - if (cr4 & X86_CR4_PAE) { - swapper_pg_dir[pgd_index(0)].pgd = - efi_bak_pg_dir_pointer[0].pgd; - } else { - swapper_pg_dir[pgd_index(0)].pgd = - efi_bak_pg_dir_pointer[0].pgd; - swapper_pg_dir[pgd_index(0x400000)].pgd = - efi_bak_pg_dir_pointer[1].pgd; - } + clone_pgd_range(swapper_pg_dir, efi_bak_pg_dir_pointer, KERNEL_PGD_PTRS); /* * After the lock is released, the original page table is restored. diff --git a/arch/x86/kernel/efi_stub_32.S b/arch/x86/kernel/efi_stub_32.S index fbe66e6..1f61a01 100644 --- a/arch/x86/kernel/efi_stub_32.S +++ b/arch/x86/kernel/efi_stub_32.S @@ -6,6 +6,7 @@ */ #include +#include #include /* @@ -20,7 +21,7 @@ * service functions will comply with gcc calling convention, too. */ -.text +__INIT ENTRY(efi_call_phys) /* * 0. The function can only be called in Linux kernel. So CS has been @@ -36,9 +37,7 @@ ENTRY(efi_call_phys) * The mapping of lower virtual memory has been created in prelog and * epilog. */ - movl $1f, %edx - subl $__PAGE_OFFSET, %edx - jmp *%edx + jmp 1f-__PAGE_OFFSET 1: /* @@ -47,14 +46,8 @@ ENTRY(efi_call_phys) * parameter 2, ..., param n. To make things easy, we save the return * address of efi_call_phys in a global variable. */ - popl %edx - movl %edx, saved_return_addr - /* get the function pointer into ECX*/ - popl %ecx - movl %ecx, efi_rt_function_ptr - movl $2f, %edx - subl $__PAGE_OFFSET, %edx - pushl %edx + popl (saved_return_addr) + popl (efi_rt_function_ptr) /* * 3. Clear PG bit in %CR0. @@ -73,9 +66,8 @@ ENTRY(efi_call_phys) /* * 5. Call the physical function. */ - jmp *%ecx + call *(efi_rt_function_ptr-__PAGE_OFFSET) -2: /* * 6. After EFI runtime service returns, control will return to * following instruction. We'd better readjust stack pointer first. @@ -88,35 +80,28 @@ ENTRY(efi_call_phys) movl %cr0, %edx orl $0x80000000, %edx movl %edx, %cr0 - jmp 1f -1: + /* * 8. Now restore the virtual mode from flat mode by * adding EIP with PAGE_OFFSET. */ - movl $1f, %edx - jmp *%edx + jmp 1f+__PAGE_OFFSET 1: /* * 9. Balance the stack. And because EAX contain the return value, * we'd better not clobber it. */ - leal efi_rt_function_ptr, %edx - movl (%edx), %ecx - pushl %ecx + pushl (efi_rt_function_ptr) /* - * 10. Push the saved return address onto the stack and return. + * 10. Return to the saved return address. */ - leal saved_return_addr, %edx - movl (%edx), %ecx - pushl %ecx - ret + jmpl *(saved_return_addr) ENDPROC(efi_call_phys) .previous -.data +__INITDATA saved_return_addr: .long 0 efi_rt_function_ptr: diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S index cd49141..cc0b142 100644 --- a/arch/x86/kernel/entry_32.S +++ b/arch/x86/kernel/entry_32.S @@ -192,7 +192,67 @@ #endif /* CONFIG_X86_32_LAZY_GS */ -.macro SAVE_ALL +.macro PAX_EXIT_KERNEL +#ifdef CONFIG_PAX_KERNEXEC +#ifdef CONFIG_PARAVIRT + push %eax; push %ecx; +#endif + mov %cs, %esi + cmp $__KERNEXEC_KERNEL_CS, %esi + jnz 2f +#ifdef CONFIG_PARAVIRT + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0); + mov %eax, %esi +#else + mov %cr0, %esi +#endif + btr $16, %esi + ljmp $__KERNEL_CS, $1f +1: +#ifdef CONFIG_PARAVIRT + mov %esi, %eax + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0); +#else + mov %esi, %cr0 +#endif +2: +#ifdef CONFIG_PARAVIRT + pop %ecx; pop %eax +#endif +#endif +.endm + +.macro PAX_ENTER_KERNEL +#ifdef CONFIG_PAX_KERNEXEC +#ifdef CONFIG_PARAVIRT + push %eax; push %ecx; + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0) + mov %eax, %esi +#else + mov %cr0, %esi +#endif + bts $16, %esi + jnc 1f + mov %cs, %esi + cmp $__KERNEL_CS, %esi + jz 3f + ljmp $__KERNEL_CS, $3f +1: ljmp $__KERNEXEC_KERNEL_CS, $2f +2: +#ifdef CONFIG_PARAVIRT + mov %esi, %eax + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0) +#else + mov %esi, %cr0 +#endif +3: +#ifdef CONFIG_PARAVIRT + pop %ecx; pop %eax +#endif +#endif +.endm + +.macro __SAVE_ALL _DS cld PUSH_GS pushl %fs @@ -225,7 +285,7 @@ pushl %ebx CFI_ADJUST_CFA_OFFSET 4 CFI_REL_OFFSET ebx, 0 - movl $(__USER_DS), %edx + movl $\_DS, %edx movl %edx, %ds movl %edx, %es movl $(__KERNEL_PERCPU), %edx @@ -233,6 +293,15 @@ SET_KERNEL_GS %edx .endm +.macro SAVE_ALL +#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF) + __SAVE_ALL __KERNEL_DS + PAX_ENTER_KERNEL +#else + __SAVE_ALL __USER_DS +#endif +.endm + .macro RESTORE_INT_REGS popl %ebx CFI_ADJUST_CFA_OFFSET -4 @@ -357,7 +426,15 @@ check_userspace: movb PT_CS(%esp), %al andl $(X86_EFLAGS_VM | SEGMENT_RPL_MASK), %eax cmpl $USER_RPL, %eax + +#ifdef CONFIG_PAX_KERNEXEC + jae resume_userspace + + PAX_EXIT_KERNEL + jmp resume_kernel +#else jb resume_kernel # not returning to v8086 or userspace +#endif ENTRY(resume_userspace) LOCKDEP_SYS_EXIT @@ -423,10 +500,9 @@ sysenter_past_esp: /*CFI_REL_OFFSET cs, 0*/ /* * Push current_thread_info()->sysenter_return to the stack. - * A tiny bit of offset fixup is necessary - 4*4 means the 4 words - * pushed above; +8 corresponds to copy_thread's esp0 setting. */ - pushl (TI_sysenter_return-THREAD_SIZE+8+4*4)(%esp) + GET_THREAD_INFO(%ebp) + pushl TI_sysenter_return(%ebp) CFI_ADJUST_CFA_OFFSET 4 CFI_REL_OFFSET eip, 0 @@ -439,9 +515,19 @@ sysenter_past_esp: * Load the potential sixth argument from user stack. * Careful about security. */ + movl PT_OLDESP(%esp),%ebp + +#ifdef CONFIG_PAX_MEMORY_UDEREF + mov PT_OLDSS(%esp),%ds +1: movl %ds:(%ebp),%ebp + push %ss + pop %ds +#else cmpl $__PAGE_OFFSET-3,%ebp jae syscall_fault 1: movl (%ebp),%ebp +#endif + movl %ebp,PT_EBP(%esp) .section __ex_table,"a" .align 4 @@ -464,12 +550,23 @@ sysenter_do_call: testl $_TIF_ALLWORK_MASK, %ecx jne sysexit_audit sysenter_exit: + +#ifdef CONFIG_PAX_RANDKSTACK + pushl %eax + CFI_ADJUST_CFA_OFFSET 4 + call pax_randomize_kstack + popl %eax + CFI_ADJUST_CFA_OFFSET -4 +#endif + /* if something modifies registers it must also disable sysexit */ movl PT_EIP(%esp), %edx movl PT_OLDESP(%esp), %ecx xorl %ebp,%ebp TRACE_IRQS_ON 1: mov PT_FS(%esp), %fs +2: mov PT_DS(%esp), %ds +3: mov PT_ES(%esp), %es PTGS_TO_GS ENABLE_INTERRUPTS_SYSEXIT @@ -513,11 +610,17 @@ sysexit_audit: CFI_ENDPROC .pushsection .fixup,"ax" -2: movl $0,PT_FS(%esp) +4: movl $0,PT_FS(%esp) + jmp 1b +5: movl $0,PT_DS(%esp) + jmp 1b +6: movl $0,PT_ES(%esp) jmp 1b .section __ex_table,"a" .align 4 - .long 1b,2b + .long 1b,4b + .long 2b,5b + .long 3b,6b .popsection PTGS_TO_GS_EX ENDPROC(ia32_sysenter_target) @@ -551,6 +654,10 @@ syscall_exit: testl $_TIF_ALLWORK_MASK, %ecx # current->work jne syscall_exit_work +#ifdef CONFIG_PAX_RANDKSTACK + call pax_randomize_kstack +#endif + restore_all: TRACE_IRQS_IRET restore_all_notrace: @@ -615,7 +722,13 @@ ldt_ss: mov PT_OLDESP(%esp), %eax /* load userspace esp */ mov %dx, %ax /* eax: new kernel esp */ sub %eax, %edx /* offset (low word is 0) */ - PER_CPU(gdt_page, %ebx) +#ifdef CONFIG_SMP + movl PER_CPU_VAR(cpu_number), %ebx + shll $PAGE_SHIFT_asm, %ebx + addl $cpu_gdt_table, %ebx +#else + movl $cpu_gdt_table, %ebx +#endif shr $16, %edx mov %dl, GDT_ENTRY_ESPFIX_SS * 8 + 4(%ebx) /* bits 16..23 */ mov %dh, GDT_ENTRY_ESPFIX_SS * 8 + 7(%ebx) /* bits 24..31 */ @@ -655,25 +768,19 @@ work_resched: work_notifysig: # deal with pending signals and # notify-resume requests + movl %esp, %eax #ifdef CONFIG_VM86 testl $X86_EFLAGS_VM, PT_EFLAGS(%esp) - movl %esp, %eax - jne work_notifysig_v86 # returning to kernel-space or + jz 1f # returning to kernel-space or # vm86-space - xorl %edx, %edx - call do_notify_resume - jmp resume_userspace_sig - ALIGN -work_notifysig_v86: pushl %ecx # save ti_flags for do_notify_resume CFI_ADJUST_CFA_OFFSET 4 call save_v86_state # %eax contains pt_regs pointer popl %ecx CFI_ADJUST_CFA_OFFSET -4 movl %eax, %esp -#else - movl %esp, %eax +1: #endif xorl %edx, %edx call do_notify_resume @@ -708,6 +815,10 @@ END(syscall_exit_work) RING0_INT_FRAME # can't unwind into user space anyway syscall_fault: +#ifdef CONFIG_PAX_MEMORY_UDEREF + push %ss + pop %ds +#endif GET_THREAD_INFO(%ebp) movl $-EFAULT,PT_EAX(%esp) jmp resume_userspace @@ -791,7 +902,13 @@ ptregs_clone: * normal stack and adjusts ESP with the matching offset. */ /* fixup the stack */ - PER_CPU(gdt_page, %ebx) +#ifdef CONFIG_SMP + movl PER_CPU_VAR(cpu_number), %ebx + shll $PAGE_SHIFT_asm, %ebx + addl $cpu_gdt_table, %ebx +#else + movl $cpu_gdt_table, %ebx +#endif mov GDT_ENTRY_ESPFIX_SS * 8 + 4(%ebx), %al /* bits 16..23 */ mov GDT_ENTRY_ESPFIX_SS * 8 + 7(%ebx), %ah /* bits 24..31 */ shl $16, %eax @@ -1273,7 +1390,6 @@ return_to_handler: jmp *%ecx #endif -.section .rodata,"a" #include "syscall_table_32.S" syscall_table_size=(.-sys_call_table) @@ -1330,9 +1446,12 @@ error_code: movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart REG_TO_PTGS %ecx SET_KERNEL_GS %ecx - movl $(__USER_DS), %ecx + movl $(__KERNEL_DS), %ecx movl %ecx, %ds movl %ecx, %es + + PAX_ENTER_KERNEL + TRACE_IRQS_OFF movl %esp,%eax # pt_regs pointer call *%edi @@ -1426,6 +1545,9 @@ nmi_stack_correct: xorl %edx,%edx # zero error code movl %esp,%eax # pt_regs pointer call do_nmi + + PAX_EXIT_KERNEL + jmp restore_all_notrace CFI_ENDPROC @@ -1466,6 +1588,9 @@ nmi_espfix_stack: FIXUP_ESPFIX_STACK # %eax == %esp xorl %edx,%edx # zero error code call do_nmi + + PAX_EXIT_KERNEL + RESTORE_REGS lss 12+4(%esp), %esp # back to espfix stack CFI_ADJUST_CFA_OFFSET -24 diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S index 4db7c4d..1f56a44 100644 --- a/arch/x86/kernel/entry_64.S +++ b/arch/x86/kernel/entry_64.S @@ -53,6 +53,7 @@ #include #include #include +#include /* Avoid __ASSEMBLER__'ifying just for this. */ #include @@ -174,6 +175,189 @@ ENTRY(native_usergs_sysret64) ENDPROC(native_usergs_sysret64) #endif /* CONFIG_PARAVIRT */ + .macro ljmpq sel, off +#if defined(CONFIG_MCORE2) || defined (CONFIG_MATOM) + .byte 0x48; ljmp *1234f(%rip) + .pushsection .rodata + .align 16 + 1234: .quad \off; .word \sel + .popsection +#else + push $\sel + push $\off + lretq +#endif + .endm + +ENTRY(pax_enter_kernel) + +#ifdef CONFIG_PAX_KERNEXEC + push %rdi + +#ifdef CONFIG_PARAVIRT + PV_SAVE_REGS(CLBR_RDI) +#endif + + GET_CR0_INTO_RDI + bts $16,%rdi + jnc 1f + mov %cs,%edi + cmp $__KERNEL_CS,%edi + jz 3f + ljmpq __KERNEL_CS,3f +1: ljmpq __KERNEXEC_KERNEL_CS,2f +2: SET_RDI_INTO_CR0 +3: + +#ifdef CONFIG_PARAVIRT + PV_RESTORE_REGS(CLBR_RDI) +#endif + + pop %rdi +#endif + + retq +ENDPROC(pax_enter_kernel) + +ENTRY(pax_exit_kernel) + +#ifdef CONFIG_PAX_KERNEXEC + push %rdi + +#ifdef CONFIG_PARAVIRT + PV_SAVE_REGS(CLBR_RDI) +#endif + + mov %cs,%rdi + cmp $__KERNEXEC_KERNEL_CS,%edi + jnz 2f + GET_CR0_INTO_RDI + btr $16,%rdi + ljmpq __KERNEL_CS,1f +1: SET_RDI_INTO_CR0 +2: + +#ifdef CONFIG_PARAVIRT + PV_RESTORE_REGS(CLBR_RDI); +#endif + + pop %rdi +#endif + + retq +ENDPROC(pax_exit_kernel) + +ENTRY(pax_enter_kernel_user) + +#ifdef CONFIG_PAX_MEMORY_UDEREF + push %rdi + push %rbx + +#ifdef CONFIG_PARAVIRT + PV_SAVE_REGS(CLBR_RDI) +#endif + + GET_CR3_INTO_RDI + mov %rdi,%rbx + add $__START_KERNEL_map,%rbx + sub phys_base(%rip),%rbx + +#ifdef CONFIG_PARAVIRT + push %rdi + cmpl $0, pv_info+PARAVIRT_enabled + jz 1f + i = 0 + .rept USER_PGD_PTRS + mov i*8(%rbx),%rsi + mov $0,%sil + lea i*8(%rbx),%rdi + call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd) + i = i + 1 + .endr + jmp 2f +1: +#endif + + i = 0 + .rept USER_PGD_PTRS + movb $0,i*8(%rbx) + i = i + 1 + .endr + +#ifdef CONFIG_PARAVIRT +2: pop %rdi +#endif + SET_RDI_INTO_CR3 + +#ifdef CONFIG_PAX_KERNEXEC + GET_CR0_INTO_RDI + bts $16,%rdi + SET_RDI_INTO_CR0 +#endif + +#ifdef CONFIG_PARAVIRT + PV_RESTORE_REGS(CLBR_RDI) +#endif + + pop %rbx + pop %rdi +#endif + + retq +ENDPROC(pax_enter_kernel_user) + +ENTRY(pax_exit_kernel_user) + +#ifdef CONFIG_PAX_MEMORY_UDEREF + push %rdi + +#ifdef CONFIG_PARAVIRT + push %rbx + PV_SAVE_REGS(CLBR_RDI) +#endif + +#ifdef CONFIG_PAX_KERNEXEC + GET_CR0_INTO_RDI + btr $16,%rdi + SET_RDI_INTO_CR0 +#endif + + GET_CR3_INTO_RDI + add $__START_KERNEL_map,%rdi + sub phys_base(%rip),%rdi + +#ifdef CONFIG_PARAVIRT + cmpl $0, pv_info+PARAVIRT_enabled + jz 1f + mov %rdi,%rbx + i = 0 + .rept USER_PGD_PTRS + mov i*8(%rbx),%rsi + mov $0x67,%sil + lea i*8(%rbx),%rdi + call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd) + i = i + 1 + .endr + jmp 2f +1: +#endif + + i = 0 + .rept USER_PGD_PTRS + movb $0x67,i*8(%rdi) + i = i + 1 + .endr + +#ifdef CONFIG_PARAVIRT +2: PV_RESTORE_REGS(CLBR_RDI) + pop %rbx +#endif + + pop %rdi +#endif + + retq +ENDPROC(pax_exit_kernel_user) .macro TRACE_IRQS_IRETQ offset=ARGOFFSET #ifdef CONFIG_TRACE_IRQFLAGS @@ -317,7 +501,7 @@ ENTRY(save_args) leaq -ARGOFFSET+16(%rsp),%rdi /* arg1 for handler */ movq_cfi rbp, 8 /* push %rbp */ leaq 8(%rsp), %rbp /* mov %rsp, %ebp */ - testl $3, CS(%rdi) + testb $3, CS(%rdi) je 1f SWAPGS /* @@ -409,7 +593,7 @@ ENTRY(ret_from_fork) RESTORE_REST - testl $3, CS-ARGOFFSET(%rsp) # from kernel_thread? + testb $3, CS-ARGOFFSET(%rsp) # from kernel_thread? je int_ret_from_sys_call testl $_TIF_IA32, TI_flags(%rcx) # 32-bit compat task needs IRET @@ -468,6 +652,11 @@ ENTRY(system_call_after_swapgs) movq %rsp,PER_CPU_VAR(old_rsp) movq PER_CPU_VAR(kernel_stack),%rsp + +#ifdef CONFIG_PAX_MEMORY_UDEREF + call pax_enter_kernel_user +#endif + /* * No need to follow this irqs off/on section - it's straight * and short: @@ -502,6 +691,11 @@ sysret_check: andl %edi,%edx jnz sysret_careful CFI_REMEMBER_STATE + +#ifdef CONFIG_PAX_MEMORY_UDEREF + call pax_exit_kernel_user +#endif + /* * sysretq will re-enable interrupts: */ @@ -613,7 +807,7 @@ tracesys: GLOBAL(int_ret_from_sys_call) DISABLE_INTERRUPTS(CLBR_NONE) TRACE_IRQS_OFF - testl $3,CS-ARGOFFSET(%rsp) + testb $3,CS-ARGOFFSET(%rsp) je retint_restore_args movl $_TIF_ALLWORK_MASK,%edi /* edi: mask to check */ @@ -800,6 +994,16 @@ END(interrupt) CFI_ADJUST_CFA_OFFSET 10*8 call save_args PARTIAL_FRAME 0 +#ifdef CONFIG_PAX_MEMORY_UDEREF + testb $3, CS(%rdi) + jnz 1f + call pax_enter_kernel + jmp 2f +1: call pax_enter_kernel_user +2: +#else + call pax_enter_kernel +#endif call \func .endm @@ -826,7 +1030,7 @@ ret_from_intr: CFI_ADJUST_CFA_OFFSET -8 exit_intr: GET_THREAD_INFO(%rcx) - testl $3,CS-ARGOFFSET(%rsp) + testb $3,CS-ARGOFFSET(%rsp) je retint_kernel /* Interrupt came from user space */ @@ -848,12 +1052,18 @@ retint_swapgs: /* return to user-space */ * The iretq could re-enable interrupts: */ DISABLE_INTERRUPTS(CLBR_ANY) + +#ifdef CONFIG_PAX_MEMORY_UDEREF + call pax_exit_kernel_user +#endif + TRACE_IRQS_IRETQ SWAPGS jmp restore_args retint_restore_args: /* return to kernel space */ DISABLE_INTERRUPTS(CLBR_ANY) + call pax_exit_kernel /* * The iretq could re-enable interrupts: */ @@ -1040,6 +1250,16 @@ ENTRY(\sym) CFI_ADJUST_CFA_OFFSET 15*8 call error_entry DEFAULT_FRAME 0 +#ifdef CONFIG_PAX_MEMORY_UDEREF + testb $3, CS(%rsp) + jnz 1f + call pax_enter_kernel + jmp 2f +1: call pax_enter_kernel_user +2: +#else + call pax_enter_kernel +#endif movq %rsp,%rdi /* pt_regs pointer */ xorl %esi,%esi /* no error code */ call \do_sym @@ -1057,6 +1277,16 @@ ENTRY(\sym) subq $15*8, %rsp call save_paranoid TRACE_IRQS_OFF +#ifdef CONFIG_PAX_MEMORY_UDEREF + testb $3, CS(%rsp) + jnz 1f + call pax_enter_kernel + jmp 2f +1: call pax_enter_kernel_user +2: +#else + call pax_enter_kernel +#endif movq %rsp,%rdi /* pt_regs pointer */ xorl %esi,%esi /* no error code */ call \do_sym @@ -1074,9 +1304,24 @@ ENTRY(\sym) subq $15*8, %rsp call save_paranoid TRACE_IRQS_OFF +#ifdef CONFIG_PAX_MEMORY_UDEREF + testb $3, CS(%rsp) + jnz 1f + call pax_enter_kernel + jmp 2f +1: call pax_enter_kernel_user +2: +#else + call pax_enter_kernel +#endif movq %rsp,%rdi /* pt_regs pointer */ xorl %esi,%esi /* no error code */ - PER_CPU(init_tss, %r12) +#ifdef CONFIG_SMP + imul $TSS_size, PER_CPU_VAR(cpu_number), %r12d + lea init_tss(%r12), %r12 +#else + lea init_tss(%rip), %r12 +#endif subq $EXCEPTION_STKSZ, TSS_ist + (\ist - 1) * 8(%r12) call \do_sym addq $EXCEPTION_STKSZ, TSS_ist + (\ist - 1) * 8(%r12) @@ -1093,6 +1338,16 @@ ENTRY(\sym) CFI_ADJUST_CFA_OFFSET 15*8 call error_entry DEFAULT_FRAME 0 +#ifdef CONFIG_PAX_MEMORY_UDEREF + testb $3, CS(%rsp) + jnz 1f + call pax_enter_kernel + jmp 2f +1: call pax_enter_kernel_user +2: +#else + call pax_enter_kernel +#endif movq %rsp,%rdi /* pt_regs pointer */ movq ORIG_RAX(%rsp),%rsi /* get error code */ movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */ @@ -1112,6 +1367,16 @@ ENTRY(\sym) call save_paranoid DEFAULT_FRAME 0 TRACE_IRQS_OFF +#ifdef CONFIG_PAX_MEMORY_UDEREF + testb $3, CS(%rsp) + jnz 1f + call pax_enter_kernel + jmp 2f +1: call pax_enter_kernel_user +2: +#else + call pax_enter_kernel +#endif movq %rsp,%rdi /* pt_regs pointer */ movq ORIG_RAX(%rsp),%rsi /* get error code */ movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */ @@ -1370,14 +1635,27 @@ ENTRY(paranoid_exit) TRACE_IRQS_OFF testl %ebx,%ebx /* swapgs needed? */ jnz paranoid_restore - testl $3,CS(%rsp) + testb $3,CS(%rsp) jnz paranoid_userspace +#ifdef CONFIG_PAX_MEMORY_UDEREF + call pax_exit_kernel + TRACE_IRQS_IRETQ 0 + SWAPGS_UNSAFE_STACK + RESTORE_ALL 8 + jmp irq_return +#endif paranoid_swapgs: +#ifdef CONFIG_PAX_MEMORY_UDEREF + call pax_exit_kernel_user +#else + call pax_exit_kernel +#endif TRACE_IRQS_IRETQ 0 SWAPGS_UNSAFE_STACK RESTORE_ALL 8 jmp irq_return paranoid_restore: + call pax_exit_kernel TRACE_IRQS_IRETQ 0 RESTORE_ALL 8 jmp irq_return @@ -1435,7 +1713,7 @@ ENTRY(error_entry) movq_cfi r14, R14+8 movq_cfi r15, R15+8 xorl %ebx,%ebx - testl $3,CS+8(%rsp) + testb $3,CS+8(%rsp) je error_kernelspace error_swapgs: SWAPGS @@ -1499,6 +1777,16 @@ ENTRY(nmi) CFI_ADJUST_CFA_OFFSET 15*8 call save_paranoid DEFAULT_FRAME 0 +#ifdef CONFIG_PAX_MEMORY_UDEREF + testb $3, CS(%rsp) + jnz 1f + call pax_enter_kernel + jmp 2f +1: call pax_enter_kernel_user +2: +#else + call pax_enter_kernel +#endif /* paranoidentry do_nmi, 0; without TRACE_IRQS_OFF */ movq %rsp,%rdi movq $-1,%rsi @@ -1509,11 +1797,12 @@ ENTRY(nmi) DISABLE_INTERRUPTS(CLBR_NONE) testl %ebx,%ebx /* swapgs needed? */ jnz nmi_restore - testl $3,CS(%rsp) + testb $3,CS(%rsp) jnz nmi_userspace nmi_swapgs: SWAPGS_UNSAFE_STACK nmi_restore: + call pax_exit_kernel RESTORE_ALL 8 jmp irq_return nmi_userspace: diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c index cd37469..1a535d2 100644 --- a/arch/x86/kernel/ftrace.c +++ b/arch/x86/kernel/ftrace.c @@ -174,7 +174,9 @@ void ftrace_nmi_enter(void) if (atomic_inc_return(&nmi_running) & MOD_CODE_WRITE_FLAG) { smp_rmb(); + pax_open_kernel(); ftrace_mod_code(); + pax_close_kernel(); atomic_inc(&nmi_update_count); } /* Must have previous changes seen before executions */ @@ -260,7 +262,7 @@ do_ftrace_mod_code(unsigned long ip, void *new_code) -static unsigned char ftrace_nop[MCOUNT_INSN_SIZE]; +static unsigned char ftrace_nop[MCOUNT_INSN_SIZE] __read_only; static unsigned char *ftrace_nop_replace(void) { @@ -273,6 +275,8 @@ ftrace_modify_code(unsigned long ip, unsigned char *old_code, { unsigned char replaced[MCOUNT_INSN_SIZE]; + ip = ktla_ktva(ip); + /* * Note: Due to modules and __init, code can * disappear and change, we need to protect against faulting @@ -329,7 +333,7 @@ int ftrace_update_ftrace_func(ftrace_func_t func) unsigned char old[MCOUNT_INSN_SIZE], *new; int ret; - memcpy(old, &ftrace_call, MCOUNT_INSN_SIZE); + memcpy(old, (void *)ktla_ktva((unsigned long)ftrace_call), MCOUNT_INSN_SIZE); new = ftrace_call_replace(ip, (unsigned long)func); ret = ftrace_modify_code(ip, old, new); @@ -382,15 +386,15 @@ int __init ftrace_dyn_arch_init(void *data) switch (faulted) { case 0: pr_info("converting mcount calls to 0f 1f 44 00 00\n"); - memcpy(ftrace_nop, ftrace_test_p6nop, MCOUNT_INSN_SIZE); + memcpy(ftrace_nop, ktla_ktva(ftrace_test_p6nop), MCOUNT_INSN_SIZE); break; case 1: pr_info("converting mcount calls to 66 66 66 66 90\n"); - memcpy(ftrace_nop, ftrace_test_nop5, MCOUNT_INSN_SIZE); + memcpy(ftrace_nop, ktla_ktva(ftrace_test_nop5), MCOUNT_INSN_SIZE); break; case 2: pr_info("converting mcount calls to jmp . + 5\n"); - memcpy(ftrace_nop, ftrace_test_jmp, MCOUNT_INSN_SIZE); + memcpy(ftrace_nop, ktla_ktva(ftrace_test_jmp), MCOUNT_INSN_SIZE); break; } @@ -411,6 +415,8 @@ static int ftrace_mod_jmp(unsigned long ip, { unsigned char code[MCOUNT_INSN_SIZE]; + ip = ktla_ktva(ip); + if (probe_kernel_read(code, (void *)ip, MCOUNT_INSN_SIZE)) return -EFAULT; diff --git a/arch/x86/kernel/head32.c b/arch/x86/kernel/head32.c index b2e2460..cd2698e 100644 --- a/arch/x86/kernel/head32.c +++ b/arch/x86/kernel/head32.c @@ -17,6 +17,7 @@ #include #include #include +#include static void __init i386_default_early_setup(void) { @@ -40,7 +41,7 @@ void __init i386_start_kernel(void) "EX TRAMPOLINE"); #endif - reserve_early(__pa_symbol(&_text), __pa_symbol(&__bss_stop), "TEXT DATA BSS"); + reserve_early(LOAD_PHYSICAL_ADDR, __pa_symbol(&__bss_stop), "TEXT DATA BSS"); #ifdef CONFIG_BLK_DEV_INITRD /* Reserve INITRD */ diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S index 75e3981..5674277 100644 --- a/arch/x86/kernel/head_32.S +++ b/arch/x86/kernel/head_32.S @@ -25,6 +25,12 @@ /* Physical address */ #define pa(X) ((X) - __PAGE_OFFSET) +#ifdef CONFIG_PAX_KERNEXEC +#define ta(X) (X) +#else +#define ta(X) ((X) - __PAGE_OFFSET) +#endif + /* * References to members of the new_cpu_data structure. */ @@ -54,11 +60,7 @@ * and small than max_low_pfn, otherwise will waste some page table entries */ -#if PTRS_PER_PMD > 1 -#define PAGE_TABLE_SIZE(pages) (((pages) / PTRS_PER_PMD) + PTRS_PER_PGD) -#else -#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PGD) -#endif +#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PTE) /* Enough space to fit pagetables for the low memory linear map */ MAPPING_BEYOND_END = \ @@ -75,6 +77,12 @@ INIT_MAP_SIZE = PAGE_TABLE_SIZE(KERNEL_PAGES) * PAGE_SIZE_asm RESERVE_BRK(pagetables, INIT_MAP_SIZE) /* + * Real beginning of normal "text" segment + */ +ENTRY(stext) +ENTRY(_stext) + +/* * 32-bit kernel entrypoint; only used by the boot CPU. On entry, * %esi points to the real-mode code as a 32-bit pointer. * CS and DS must be 4 GB flat segments, but we don't depend on @@ -82,6 +90,13 @@ RESERVE_BRK(pagetables, INIT_MAP_SIZE) * can. */ __HEAD + +#ifdef CONFIG_PAX_KERNEXEC + jmp startup_32 +/* PaX: fill first page in .text with int3 to catch NULL derefs in kernel mode */ +.fill PAGE_SIZE-5,1,0xcc +#endif + ENTRY(startup_32) /* test KEEP_SEGMENTS flag to see if the bootloader is asking us to not reload segments */ @@ -99,6 +114,55 @@ ENTRY(startup_32) movl %eax,%gs 2: +#ifdef CONFIG_SMP + movl $pa(cpu_gdt_table),%edi + movl $__per_cpu_load,%eax + movw %ax,__KERNEL_PERCPU + 2(%edi) + rorl $16,%eax + movb %al,__KERNEL_PERCPU + 4(%edi) + movb %ah,__KERNEL_PERCPU + 7(%edi) + movl $__per_cpu_end - 1,%eax + subl $__per_cpu_start,%eax + movw %ax,__KERNEL_PERCPU + 0(%edi) +#endif + +#ifdef CONFIG_PAX_MEMORY_UDEREF + movl $NR_CPUS,%ecx + movl $pa(cpu_gdt_table),%edi +1: + movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c09700),GDT_ENTRY_KERNEL_DS * 8 + 4(%edi) + addl $PAGE_SIZE_asm,%edi + loop 1b +#endif + +#ifdef CONFIG_PAX_KERNEXEC + movl $pa(boot_gdt),%edi + movl $__LOAD_PHYSICAL_ADDR,%eax + movw %ax,__BOOT_CS + 2(%edi) + rorl $16,%eax + movb %al,__BOOT_CS + 4(%edi) + movb %ah,__BOOT_CS + 7(%edi) + rorl $16,%eax + + ljmp $(__BOOT_CS),$1f +1: + + movl $NR_CPUS,%ecx + movl $pa(cpu_gdt_table),%edi + addl $__PAGE_OFFSET,%eax +1: + movw %ax,__KERNEL_CS + 2(%edi) + movw %ax,__KERNEXEC_KERNEL_CS + 2(%edi) + rorl $16,%eax + movb %al,__KERNEL_CS + 4(%edi) + movb %al,__KERNEXEC_KERNEL_CS + 4(%edi) + movb %ah,__KERNEL_CS + 7(%edi) + movb %ah,__KERNEXEC_KERNEL_CS + 7(%edi) + rorl $16,%eax + addl $PAGE_SIZE_asm,%edi + loop 1b +#endif + /* * Clear BSS first so that there are no surprises... */ @@ -142,9 +206,7 @@ ENTRY(startup_32) cmpl $num_subarch_entries, %eax jae bad_subarch - movl pa(subarch_entries)(,%eax,4), %eax - subl $__PAGE_OFFSET, %eax - jmp *%eax + jmp *pa(subarch_entries)(,%eax,4) bad_subarch: WEAK(lguest_entry) @@ -156,10 +218,10 @@ WEAK(xen_entry) __INITDATA subarch_entries: - .long default_entry /* normal x86/PC */ - .long lguest_entry /* lguest hypervisor */ - .long xen_entry /* Xen hypervisor */ - .long default_entry /* Moorestown MID */ + .long ta(default_entry) /* normal x86/PC */ + .long ta(lguest_entry) /* lguest hypervisor */ + .long ta(xen_entry) /* Xen hypervisor */ + .long ta(default_entry) /* Moorestown MID */ num_subarch_entries = (. - subarch_entries) / 4 .previous #endif /* CONFIG_PARAVIRT */ @@ -220,8 +282,11 @@ default_entry: movl %eax, pa(max_pfn_mapped) /* Do early initialization of the fixmap area */ - movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR,%eax - movl %eax,pa(swapper_pg_pmd+0x1000*KPMDS-8) +#ifdef CONFIG_COMPAT_VDSO + movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(swapper_pg_pmd+0x1000*KPMDS-8) +#else + movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR,pa(swapper_pg_pmd+0x1000*KPMDS-8) +#endif #else /* Not PAE */ page_pde_offset = (__PAGE_OFFSET >> 20); @@ -251,8 +316,11 @@ page_pde_offset = (__PAGE_OFFSET >> 20); movl %eax, pa(max_pfn_mapped) /* Do early initialization of the fixmap area */ - movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR,%eax - movl %eax,pa(swapper_pg_dir+0xffc) +#ifdef CONFIG_COMPAT_VDSO + movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(swapper_pg_dir+0xffc) +#else + movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR,pa(swapper_pg_dir+0xffc) +#endif #endif jmp 3f /* @@ -299,6 +367,7 @@ ENTRY(startup_32_smp) orl %edx,%eax movl %eax,%cr4 +#ifdef CONFIG_X86_PAE testb $X86_CR4_PAE, %al # check if PAE is enabled jz 6f @@ -323,6 +392,9 @@ ENTRY(startup_32_smp) /* Make changes effective */ wrmsr + btsl $_PAGE_BIT_NX-32,pa(__supported_pte_mask+4) +#endif + 6: /* @@ -348,9 +420,7 @@ ENTRY(startup_32_smp) #ifdef CONFIG_SMP cmpb $0, ready - jz 1f /* Initial CPU cleans BSS */ - jmp checkCPUtype -1: + jnz checkCPUtype /* Initial CPU cleans BSS */ #endif /* CONFIG_SMP */ /* @@ -428,7 +498,7 @@ is386: movl $2,%ecx # set MP 1: movl $(__KERNEL_DS),%eax # reload all the segment registers movl %eax,%ss # after changing gdt. - movl $(__USER_DS),%eax # DS/ES contains default USER segment +# movl $(__KERNEL_DS),%eax # DS/ES contains default KERNEL segment movl %eax,%ds movl %eax,%es @@ -442,8 +512,11 @@ is386: movl $2,%ecx # set MP */ cmpb $0,ready jne 1f - movl $gdt_page,%eax + movl $cpu_gdt_table,%eax movl $stack_canary,%ecx +#ifdef CONFIG_SMP + addl $__per_cpu_load,%ecx +#endif movw %cx, 8 * GDT_ENTRY_STACK_CANARY + 2(%eax) shrl $16, %ecx movb %cl, 8 * GDT_ENTRY_STACK_CANARY + 4(%eax) @@ -461,10 +534,6 @@ is386: movl $2,%ecx # set MP #ifdef CONFIG_SMP movb ready, %cl movb $1, ready - cmpb $0,%cl # the first CPU calls start_kernel - je 1f - movl (stack_start), %esp -1: #endif /* CONFIG_SMP */ jmp *(initial_code) @@ -550,22 +619,22 @@ early_page_fault: jmp early_fault early_fault: - cld #ifdef CONFIG_PRINTK + cmpl $1,%ss:early_recursion_flag + je hlt_loop + incl %ss:early_recursion_flag + cld pusha movl $(__KERNEL_DS),%eax movl %eax,%ds movl %eax,%es - cmpl $2,early_recursion_flag - je hlt_loop - incl early_recursion_flag movl %cr2,%eax pushl %eax pushl %edx /* trapno */ pushl $fault_msg call printk +; call dump_stack #endif - call dump_stack hlt_loop: hlt jmp hlt_loop @@ -573,8 +642,11 @@ hlt_loop: /* This is the default interrupt "handler" :-) */ ALIGN ignore_int: - cld #ifdef CONFIG_PRINTK + cmpl $2,%ss:early_recursion_flag + je hlt_loop + incl %ss:early_recursion_flag + cld pushl %eax pushl %ecx pushl %edx @@ -583,9 +655,6 @@ ignore_int: movl $(__KERNEL_DS),%eax movl %eax,%ds movl %eax,%es - cmpl $2,early_recursion_flag - je hlt_loop - incl early_recursion_flag pushl 16(%esp) pushl 24(%esp) pushl 32(%esp) @@ -614,31 +683,47 @@ ENTRY(initial_page_table) /* * BSS section */ -__PAGE_ALIGNED_BSS - .align PAGE_SIZE_asm #ifdef CONFIG_X86_PAE +.section .swapper_pg_pmd,"a",@progbits swapper_pg_pmd: .fill 1024*KPMDS,4,0 #else +.section .swapper_pg_dir,"a",@progbits ENTRY(swapper_pg_dir) .fill 1024,4,0 #endif +.section .swapper_pg_fixmap,"a",@progbits swapper_pg_fixmap: .fill 1024,4,0 #ifdef CONFIG_X86_TRAMPOLINE +.section .trampoline_pg_dir,"a",@progbits ENTRY(trampoline_pg_dir) +#ifdef CONFIG_X86_PAE + .fill 4,8,0 +#else .fill 1024,4,0 #endif +#endif + +.section .empty_zero_page,"a",@progbits ENTRY(empty_zero_page) .fill 4096,1,0 /* + * The IDT has to be page-aligned to simplify the Pentium + * F0 0F bug workaround.. We have a special link segment + * for this. + */ +.section .idt,"a",@progbits +ENTRY(idt_table) + .fill 256,8,0 + +/* * This starts the data section. */ #ifdef CONFIG_X86_PAE -__PAGE_ALIGNED_DATA - /* Page-aligned for the benefit of paravirt? */ - .align PAGE_SIZE_asm +.section .swapper_pg_dir,"a",@progbits + ENTRY(swapper_pg_dir) .long pa(swapper_pg_pmd+PGD_IDENT_ATTR),0 /* low identity map */ # if KPMDS == 3 @@ -657,15 +742,24 @@ ENTRY(swapper_pg_dir) # error "Kernel PMDs should be 1, 2 or 3" # endif .align PAGE_SIZE_asm /* needs to be page-sized too */ + +#ifdef CONFIG_PAX_PER_CPU_PGD +ENTRY(cpu_pgd) + .rept NR_CPUS + .fill 4,8,0 + .endr +#endif + #endif .data ENTRY(stack_start) - .long init_thread_union+THREAD_SIZE + .long init_thread_union+THREAD_SIZE-8 .long __BOOT_DS ready: .byte 0 +.section .rodata,"a",@progbits early_recursion_flag: .long 0 @@ -701,7 +795,7 @@ fault_msg: .word 0 # 32 bit align gdt_desc.address boot_gdt_descr: .word __BOOT_DS+7 - .long boot_gdt - __PAGE_OFFSET + .long pa(boot_gdt) .word 0 # 32-bit align idt_desc.address idt_descr: @@ -712,7 +806,7 @@ idt_descr: .word 0 # 32 bit align gdt_desc.address ENTRY(early_gdt_descr) .word GDT_ENTRIES*8-1 - .long gdt_page /* Overwritten for secondary CPUs */ + .long cpu_gdt_table /* Overwritten for secondary CPUs */ /* * The boot_gdt must mirror the equivalent in setup.S and is @@ -721,5 +815,65 @@ ENTRY(early_gdt_descr) .align L1_CACHE_BYTES ENTRY(boot_gdt) .fill GDT_ENTRY_BOOT_CS,8,0 - .quad 0x00cf9a000000ffff /* kernel 4GB code at 0x00000000 */ - .quad 0x00cf92000000ffff /* kernel 4GB data at 0x00000000 */ + .quad 0x00cf9b000000ffff /* kernel 4GB code at 0x00000000 */ + .quad 0x00cf93000000ffff /* kernel 4GB data at 0x00000000 */ + + .align PAGE_SIZE_asm +ENTRY(cpu_gdt_table) + .rept NR_CPUS + .quad 0x0000000000000000 /* NULL descriptor */ + .quad 0x0000000000000000 /* 0x0b reserved */ + .quad 0x0000000000000000 /* 0x13 reserved */ + .quad 0x0000000000000000 /* 0x1b reserved */ + +#ifdef CONFIG_PAX_KERNEXEC + .quad 0x00cf9b000000ffff /* 0x20 alternate kernel 4GB code at 0x00000000 */ +#else + .quad 0x0000000000000000 /* 0x20 unused */ +#endif + + .quad 0x0000000000000000 /* 0x28 unused */ + .quad 0x0000000000000000 /* 0x33 TLS entry 1 */ + .quad 0x0000000000000000 /* 0x3b TLS entry 2 */ + .quad 0x0000000000000000 /* 0x43 TLS entry 3 */ + .quad 0x0000000000000000 /* 0x4b reserved */ + .quad 0x0000000000000000 /* 0x53 reserved */ + .quad 0x0000000000000000 /* 0x5b reserved */ + + .quad 0x00cf9b000000ffff /* 0x60 kernel 4GB code at 0x00000000 */ + .quad 0x00cf93000000ffff /* 0x68 kernel 4GB data at 0x00000000 */ + .quad 0x00cffb000000ffff /* 0x73 user 4GB code at 0x00000000 */ + .quad 0x00cff3000000ffff /* 0x7b user 4GB data at 0x00000000 */ + + .quad 0x0000000000000000 /* 0x80 TSS descriptor */ + .quad 0x0000000000000000 /* 0x88 LDT descriptor */ + + /* + * Segments used for calling PnP BIOS have byte granularity. + * The code segments and data segments have fixed 64k limits, + * the transfer segment sizes are set at run time. + */ + .quad 0x00409b000000ffff /* 0x90 32-bit code */ + .quad 0x00009b000000ffff /* 0x98 16-bit code */ + .quad 0x000093000000ffff /* 0xa0 16-bit data */ + .quad 0x0000930000000000 /* 0xa8 16-bit data */ + .quad 0x0000930000000000 /* 0xb0 16-bit data */ + + /* + * The APM segments have byte granularity and their bases + * are set at run time. All have 64k limits. + */ + .quad 0x00409b000000ffff /* 0xb8 APM CS code */ + .quad 0x00009b000000ffff /* 0xc0 APM CS 16 code (16 bit) */ + .quad 0x004093000000ffff /* 0xc8 APM DS data */ + + .quad 0x00c0930000000000 /* 0xd0 - ESPFIX SS */ + .quad 0x0040930000000000 /* 0xd8 - PERCPU */ + .quad 0x0040910000000018 /* 0xe0 - STACK_CANARY */ + .quad 0x0000000000000000 /* 0xe8 - PCIBIOS_CS */ + .quad 0x0000000000000000 /* 0xf0 - PCIBIOS_DS */ + .quad 0x0000000000000000 /* 0xf8 - GDT entry 31: double-fault TSS */ + + /* Be sure this is zeroed to avoid false validations in Xen */ + .fill PAGE_SIZE_asm - GDT_SIZE,1,0 + .endr diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S index 3d1e6f1..708f920 100644 --- a/arch/x86/kernel/head_64.S +++ b/arch/x86/kernel/head_64.S @@ -19,6 +19,7 @@ #include #include #include +#include #ifdef CONFIG_PARAVIRT #include @@ -38,6 +39,10 @@ L4_PAGE_OFFSET = pgd_index(__PAGE_OFFSET) L3_PAGE_OFFSET = pud_index(__PAGE_OFFSET) L4_START_KERNEL = pgd_index(__START_KERNEL_map) L3_START_KERNEL = pud_index(__START_KERNEL_map) +L4_VMALLOC_START = pgd_index(VMALLOC_START) +L3_VMALLOC_START = pud_index(VMALLOC_START) +L4_VMEMMAP_START = pgd_index(VMEMMAP_START) +L3_VMEMMAP_START = pud_index(VMEMMAP_START) .text __HEAD @@ -85,35 +90,22 @@ startup_64: */ addq %rbp, init_level4_pgt + 0(%rip) addq %rbp, init_level4_pgt + (L4_PAGE_OFFSET*8)(%rip) + addq %rbp, init_level4_pgt + (L4_VMALLOC_START*8)(%rip) + addq %rbp, init_level4_pgt + (L4_VMEMMAP_START*8)(%rip) addq %rbp, init_level4_pgt + (L4_START_KERNEL*8)(%rip) addq %rbp, level3_ident_pgt + 0(%rip) +#ifndef CONFIG_XEN + addq %rbp, level3_ident_pgt + 8(%rip) +#endif - addq %rbp, level3_kernel_pgt + (510*8)(%rip) - addq %rbp, level3_kernel_pgt + (511*8)(%rip) - - addq %rbp, level2_fixmap_pgt + (506*8)(%rip) - - /* Add an Identity mapping if I am above 1G */ - leaq _text(%rip), %rdi - andq $PMD_PAGE_MASK, %rdi - - movq %rdi, %rax - shrq $PUD_SHIFT, %rax - andq $(PTRS_PER_PUD - 1), %rax - jz ident_complete + addq %rbp, level3_vmemmap_pgt + (L3_VMEMMAP_START*8)(%rip) - leaq (level2_spare_pgt - __START_KERNEL_map + _KERNPG_TABLE)(%rbp), %rdx - leaq level3_ident_pgt(%rip), %rbx - movq %rdx, 0(%rbx, %rax, 8) + addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8)(%rip) + addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8+8)(%rip) - movq %rdi, %rax - shrq $PMD_SHIFT, %rax - andq $(PTRS_PER_PMD - 1), %rax - leaq __PAGE_KERNEL_IDENT_LARGE_EXEC(%rdi), %rdx - leaq level2_spare_pgt(%rip), %rbx - movq %rdx, 0(%rbx, %rax, 8) -ident_complete: + addq %rbp, level2_fixmap_pgt + (506*8)(%rip) + addq %rbp, level2_fixmap_pgt + (507*8)(%rip) /* * Fixup the kernel text+data virtual addresses. Note that @@ -161,8 +153,8 @@ ENTRY(secondary_startup_64) * after the boot processor executes this code. */ - /* Enable PAE mode and PGE */ - movl $(X86_CR4_PAE | X86_CR4_PGE), %eax + /* Enable PAE mode and PSE/PGE */ + movl $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %eax movq %rax, %cr4 /* Setup early boot stage 4 level pagetables. */ @@ -184,9 +176,14 @@ ENTRY(secondary_startup_64) movl $MSR_EFER, %ecx rdmsr btsl $_EFER_SCE, %eax /* Enable System Call */ - btl $20,%edi /* No Execute supported? */ + btl $(X86_FEATURE_NX & 31),%edi /* No Execute supported? */ jnc 1f btsl $_EFER_NX, %eax + leaq init_level4_pgt(%rip), %rdi + btsq $_PAGE_BIT_NX, 8*L4_PAGE_OFFSET(%rdi) + btsq $_PAGE_BIT_NX, 8*L4_VMALLOC_START(%rdi) + btsq $_PAGE_BIT_NX, 8*L4_VMEMMAP_START(%rdi) + btsq $_PAGE_BIT_NX, __supported_pte_mask(%rip) 1: wrmsr /* Make changes effective */ /* Setup cr0 */ @@ -271,7 +268,7 @@ ENTRY(secondary_startup_64) bad_address: jmp bad_address - .section ".init.text","ax" + __INIT #ifdef CONFIG_EARLY_PRINTK .globl early_idt_handlers early_idt_handlers: @@ -316,18 +313,23 @@ ENTRY(early_idt_handler) #endif /* EARLY_PRINTK */ 1: hlt jmp 1b + .previous #ifdef CONFIG_EARLY_PRINTK + __INITDATA early_recursion_flag: .long 0 + .previous + .section .rodata,"a",@progbits early_idt_msg: .asciz "PANIC: early exception %02lx rip %lx:%lx error %lx cr2 %lx\n" early_idt_ripmsg: .asciz "RIP %s\n" -#endif /* CONFIG_EARLY_PRINTK */ .previous +#endif /* CONFIG_EARLY_PRINTK */ + .section .rodata,"a",@progbits #define NEXT_PAGE(name) \ .balign PAGE_SIZE; \ ENTRY(name) @@ -351,13 +353,36 @@ NEXT_PAGE(init_level4_pgt) .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE .org init_level4_pgt + L4_PAGE_OFFSET*8, 0 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE + .org init_level4_pgt + L4_VMALLOC_START*8, 0 + .quad level3_vmalloc_pgt - __START_KERNEL_map + _KERNPG_TABLE + .org init_level4_pgt + L4_VMEMMAP_START*8, 0 + .quad level3_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE .org init_level4_pgt + L4_START_KERNEL*8, 0 /* (2^48-(2*1024*1024*1024))/(2^39) = 511 */ .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE +#ifdef CONFIG_PAX_PER_CPU_PGD +NEXT_PAGE(cpu_pgd) + .rept NR_CPUS + .fill 512,8,0 + .endr +#endif + NEXT_PAGE(level3_ident_pgt) .quad level2_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE +#ifdef CONFIG_XEN .fill 511,8,0 +#else + .quad level2_ident_pgt + PAGE_SIZE - __START_KERNEL_map + _KERNPG_TABLE + .fill 510,8,0 +#endif + +NEXT_PAGE(level3_vmalloc_pgt) + .fill 512,8,0 + +NEXT_PAGE(level3_vmemmap_pgt) + .fill L3_VMEMMAP_START,8,0 + .quad level2_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE NEXT_PAGE(level3_kernel_pgt) .fill L3_START_KERNEL,8,0 @@ -365,20 +390,23 @@ NEXT_PAGE(level3_kernel_pgt) .quad level2_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE .quad level2_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE +NEXT_PAGE(level2_vmemmap_pgt) + .fill 512,8,0 + NEXT_PAGE(level2_fixmap_pgt) - .fill 506,8,0 - .quad level1_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE - /* 8MB reserved for vsyscalls + a 2MB hole = 4 + 1 entries */ - .fill 5,8,0 + .fill 507,8,0 + .quad level1_vsyscall_pgt - __START_KERNEL_map + _PAGE_TABLE + /* 6MB reserved for vsyscalls + a 2MB hole = 3 + 1 entries */ + .fill 4,8,0 -NEXT_PAGE(level1_fixmap_pgt) +NEXT_PAGE(level1_vsyscall_pgt) .fill 512,8,0 -NEXT_PAGE(level2_ident_pgt) - /* Since I easily can, map the first 1G. + /* Since I easily can, map the first 2G. * Don't set NX because code runs from these pages. */ - PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, PTRS_PER_PMD) +NEXT_PAGE(level2_ident_pgt) + PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, 2*PTRS_PER_PMD) NEXT_PAGE(level2_kernel_pgt) /* @@ -391,33 +419,55 @@ NEXT_PAGE(level2_kernel_pgt) * If you want to increase this then increase MODULES_VADDR * too.) */ - PMDS(0, __PAGE_KERNEL_LARGE_EXEC, - KERNEL_IMAGE_SIZE/PMD_SIZE) - -NEXT_PAGE(level2_spare_pgt) - .fill 512, 8, 0 + PMDS(0, __PAGE_KERNEL_LARGE_EXEC, KERNEL_IMAGE_SIZE/PMD_SIZE) #undef PMDS #undef NEXT_PAGE - .data + .align PAGE_SIZE +ENTRY(cpu_gdt_table) + .rept NR_CPUS + .quad 0x0000000000000000 /* NULL descriptor */ + .quad 0x00cf9b000000ffff /* __KERNEL32_CS */ + .quad 0x00af9b000000ffff /* __KERNEL_CS */ + .quad 0x00cf93000000ffff /* __KERNEL_DS */ + .quad 0x00cffb000000ffff /* __USER32_CS */ + .quad 0x00cff3000000ffff /* __USER_DS, __USER32_DS */ + .quad 0x00affb000000ffff /* __USER_CS */ + +#ifdef CONFIG_PAX_KERNEXEC + .quad 0x00af9b000000ffff /* __KERNEXEC_KERNEL_CS */ +#else + .quad 0x0 /* unused */ +#endif + + .quad 0,0 /* TSS */ + .quad 0,0 /* LDT */ + .quad 0,0,0 /* three TLS descriptors */ + .quad 0x0000f40000000000 /* node/CPU stored in limit */ + /* asm/segment.h:GDT_ENTRIES must match this */ + + /* zero the remaining page */ + .fill PAGE_SIZE / 8 - GDT_ENTRIES,8,0 + .endr + .align 16 .globl early_gdt_descr early_gdt_descr: .word GDT_ENTRIES*8-1 early_gdt_descr_base: - .quad INIT_PER_CPU_VAR(gdt_page) + .quad cpu_gdt_table ENTRY(phys_base) /* This must match the first entry in level2_kernel_pgt */ .quad 0x0000000000000000 #include "../../x86/xen/xen-head.S" - - .section .bss, "aw", @nobits + + .section .rodata,"a",@progbits .align L1_CACHE_BYTES ENTRY(idt_table) - .skip IDT_ENTRIES * 16 + .fill 512,8,0 __PAGE_ALIGNED_BSS .align PAGE_SIZE diff --git a/arch/x86/kernel/i386_ksyms_32.c b/arch/x86/kernel/i386_ksyms_32.c index 9c3bd4a..e1d9b35 100644 --- a/arch/x86/kernel/i386_ksyms_32.c +++ b/arch/x86/kernel/i386_ksyms_32.c @@ -20,8 +20,12 @@ extern void cmpxchg8b_emu(void); EXPORT_SYMBOL(cmpxchg8b_emu); #endif +EXPORT_SYMBOL_GPL(cpu_gdt_table); + /* Networking helper routines. */ EXPORT_SYMBOL(csum_partial_copy_generic); +EXPORT_SYMBOL(csum_partial_copy_generic_to_user); +EXPORT_SYMBOL(csum_partial_copy_generic_from_user); EXPORT_SYMBOL(__get_user_1); EXPORT_SYMBOL(__get_user_2); @@ -36,3 +40,7 @@ EXPORT_SYMBOL(strstr); EXPORT_SYMBOL(csum_partial); EXPORT_SYMBOL(empty_zero_page); + +#ifdef CONFIG_PAX_KERNEXEC +EXPORT_SYMBOL(__LOAD_PHYSICAL_ADDR); +#endif diff --git a/arch/x86/kernel/init_task.c b/arch/x86/kernel/init_task.c index 43e9ccf..c40716b 100644 --- a/arch/x86/kernel/init_task.c +++ b/arch/x86/kernel/init_task.c @@ -38,5 +38,5 @@ EXPORT_SYMBOL(init_task); * section. Since TSS's are completely CPU-local, we want them * on exact cacheline boundaries, to eliminate cacheline ping-pong. */ -DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss) = INIT_TSS; - +struct tss_struct init_tss[NR_CPUS] ____cacheline_internodealigned_in_smp = { [0 ... NR_CPUS-1] = INIT_TSS }; +EXPORT_SYMBOL(init_tss); diff --git a/arch/x86/kernel/ioport.c b/arch/x86/kernel/ioport.c index 8eec0ec..ef34e2d 100644 --- a/arch/x86/kernel/ioport.c +++ b/arch/x86/kernel/ioport.c @@ -6,6 +6,7 @@ #include #include #include +#include #include #include #include @@ -41,6 +42,12 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on) if ((from + num <= from) || (from + num > IO_BITMAP_BITS)) return -EINVAL; +#ifdef CONFIG_GRKERNSEC_IO + if (turn_on && grsec_disable_privio) { + gr_handle_ioperm(); + return -EPERM; + } +#endif if (turn_on && !capable(CAP_SYS_RAWIO)) return -EPERM; @@ -67,7 +74,7 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on) * because the ->io_bitmap_max value must match the bitmap * contents: */ - tss = &per_cpu(init_tss, get_cpu()); + tss = init_tss + get_cpu(); set_bitmap(t->io_bitmap_ptr, from, num, !turn_on); @@ -112,6 +119,12 @@ long sys_iopl(unsigned int level, struct pt_regs *regs) return -EINVAL; /* Trying to gain more privileges? */ if (level > old) { +#ifdef CONFIG_GRKERNSEC_IO + if (grsec_disable_privio) { + gr_handle_iopl(); + return -EPERM; + } +#endif if (!capable(CAP_SYS_RAWIO)) return -EPERM; } diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c index 10709f2..eff1abb 100644 --- a/arch/x86/kernel/irq_32.c +++ b/arch/x86/kernel/irq_32.c @@ -94,7 +94,7 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq) return 0; /* build the stack frame on the IRQ stack */ - isp = (u32 *) ((char *)irqctx + sizeof(*irqctx)); + isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8); irqctx->tinfo.task = curctx->tinfo.task; irqctx->tinfo.previous_esp = current_stack_pointer; @@ -175,7 +175,7 @@ asmlinkage void do_softirq(void) irqctx->tinfo.previous_esp = current_stack_pointer; /* build the stack frame on the softirq stack */ - isp = (u32 *) ((char *)irqctx + sizeof(*irqctx)); + isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8); call_on_stack(__do_softirq, isp); /* diff --git a/arch/x86/kernel/kgdb.c b/arch/x86/kernel/kgdb.c index 01ab17a..5512ff4 100644 --- a/arch/x86/kernel/kgdb.c +++ b/arch/x86/kernel/kgdb.c @@ -77,7 +77,7 @@ void pt_regs_to_gdb_regs(unsigned long *gdb_regs, struct pt_regs *regs) gdb_regs[GDB_CS] = regs->cs; gdb_regs[GDB_FS] = 0xFFFF; gdb_regs[GDB_GS] = 0xFFFF; - if (user_mode_vm(regs)) { + if (user_mode(regs)) { gdb_regs[GDB_SS] = regs->ss; gdb_regs[GDB_SP] = regs->sp; } else { @@ -720,7 +720,7 @@ void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long ip) regs->ip = ip; } -struct kgdb_arch arch_kgdb_ops = { +const struct kgdb_arch arch_kgdb_ops = { /* Breakpoint instruction: */ .gdb_bpt_instr = { 0xcc }, .flags = KGDB_HW_BREAKPOINT, diff --git a/arch/x86/kernel/kprobes.c b/arch/x86/kernel/kprobes.c index 675879b..8c44581 100644 --- a/arch/x86/kernel/kprobes.c +++ b/arch/x86/kernel/kprobes.c @@ -114,9 +114,12 @@ static void __kprobes __synthesize_relative_insn(void *from, void *to, u8 op) s32 raddr; } __attribute__((packed)) *insn; - insn = (struct __arch_relative_insn *)from; + insn = (struct __arch_relative_insn *)(ktla_ktva(from)); + + pax_open_kernel(); insn->raddr = (s32)((long)(to) - ((long)(from) + 5)); insn->op = op; + pax_close_kernel(); } /* Insert a jump instruction at address 'from', which jumps to address 'to'.*/ @@ -315,7 +318,9 @@ static int __kprobes __copy_instruction(u8 *dest, u8 *src, int recover) } } insn_get_length(&insn); + pax_open_kernel(); memcpy(dest, insn.kaddr, insn.length); + pax_close_kernel(); #ifdef CONFIG_X86_64 if (insn_rip_relative(&insn)) { @@ -339,7 +344,9 @@ static int __kprobes __copy_instruction(u8 *dest, u8 *src, int recover) (u8 *) dest; BUG_ON((s64) (s32) newdisp != newdisp); /* Sanity check. */ disp = (u8 *) dest + insn_offset_displacement(&insn); + pax_open_kernel(); *(s32 *) disp = (s32) newdisp; + pax_close_kernel(); } #endif return insn.length; @@ -353,12 +360,12 @@ static void __kprobes arch_copy_kprobe(struct kprobe *p) */ __copy_instruction(p->ainsn.insn, p->addr, 0); - if (can_boost(p->addr)) + if (can_boost(ktla_ktva(p->addr))) p->ainsn.boostable = 0; else p->ainsn.boostable = -1; - p->opcode = *p->addr; + p->opcode = *(ktla_ktva(p->addr)); } int __kprobes arch_prepare_kprobe(struct kprobe *p) @@ -475,7 +482,7 @@ static void __kprobes setup_singlestep(struct kprobe *p, struct pt_regs *regs, * nor set current_kprobe, because it doesn't use single * stepping. */ - regs->ip = (unsigned long)p->ainsn.insn; + regs->ip = ktva_ktla((unsigned long)p->ainsn.insn); preempt_enable_no_resched(); return; } @@ -494,7 +501,7 @@ static void __kprobes setup_singlestep(struct kprobe *p, struct pt_regs *regs, if (p->opcode == BREAKPOINT_INSTRUCTION) regs->ip = (unsigned long)p->addr; else - regs->ip = (unsigned long)p->ainsn.insn; + regs->ip = ktva_ktla((unsigned long)p->ainsn.insn); } /* @@ -573,7 +580,7 @@ static int __kprobes kprobe_handler(struct pt_regs *regs) setup_singlestep(p, regs, kcb, 0); return 1; } - } else if (*addr != BREAKPOINT_INSTRUCTION) { + } else if (*(kprobe_opcode_t *)ktla_ktva((unsigned long)addr) != BREAKPOINT_INSTRUCTION) { /* * The breakpoint instruction was removed right * after we hit it. Another cpu has removed @@ -799,7 +806,7 @@ static void __kprobes resume_execution(struct kprobe *p, struct pt_regs *regs, struct kprobe_ctlblk *kcb) { unsigned long *tos = stack_addr(regs); - unsigned long copy_ip = (unsigned long)p->ainsn.insn; + unsigned long copy_ip = ktva_ktla((unsigned long)p->ainsn.insn); unsigned long orig_ip = (unsigned long)p->addr; kprobe_opcode_t *insn = p->ainsn.insn; @@ -982,7 +989,7 @@ int __kprobes kprobe_exceptions_notify(struct notifier_block *self, struct die_args *args = data; int ret = NOTIFY_DONE; - if (args->regs && user_mode_vm(args->regs)) + if (args->regs && user_mode(args->regs)) return ret; switch (val) { diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c index ea69726..604d066 100644 --- a/arch/x86/kernel/ldt.c +++ b/arch/x86/kernel/ldt.c @@ -67,13 +67,13 @@ static int alloc_ldt(mm_context_t *pc, int mincount, int reload) if (reload) { #ifdef CONFIG_SMP preempt_disable(); - load_LDT(pc); + load_LDT_nolock(pc); if (!cpumask_equal(mm_cpumask(current->mm), cpumask_of(smp_processor_id()))) smp_call_function(flush_ldt, current->mm, 1); preempt_enable(); #else - load_LDT(pc); + load_LDT_nolock(pc); #endif } if (oldsize) { @@ -95,7 +95,7 @@ static inline int copy_ldt(mm_context_t *new, mm_context_t *old) return err; for (i = 0; i < old->size; i++) - write_ldt_entry(new->ldt, i, old->ldt + i * LDT_ENTRY_SIZE); + write_ldt_entry(new->ldt, i, old->ldt + i); return 0; } @@ -116,6 +116,24 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm) retval = copy_ldt(&mm->context, &old_mm->context); mutex_unlock(&old_mm->context.lock); } + + if (tsk == current) { + mm->context.vdso = 0; + +#ifdef CONFIG_X86_32 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC) + mm->context.user_cs_base = 0UL; + mm->context.user_cs_limit = ~0UL; + +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP) + cpus_clear(mm->context.cpu_user_cs_mask); +#endif + +#endif +#endif + + } + return retval; } @@ -230,6 +248,13 @@ static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode) } } +#ifdef CONFIG_PAX_SEGMEXEC + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (ldt_info.contents & MODIFY_LDT_CONTENTS_CODE)) { + error = -EINVAL; + goto out_unlock; + } +#endif + fill_ldt(&ldt, &ldt_info); if (oldmode) ldt.avl = 0; diff --git a/arch/x86/kernel/machine_kexec_32.c b/arch/x86/kernel/machine_kexec_32.c index a3fa43b..8966f4c 100644 --- a/arch/x86/kernel/machine_kexec_32.c +++ b/arch/x86/kernel/machine_kexec_32.c @@ -27,7 +27,7 @@ #include #include -static void set_idt(void *newidt, __u16 limit) +static void set_idt(struct desc_struct *newidt, __u16 limit) { struct desc_ptr curidt; @@ -39,7 +39,7 @@ static void set_idt(void *newidt, __u16 limit) } -static void set_gdt(void *newgdt, __u16 limit) +static void set_gdt(struct desc_struct *newgdt, __u16 limit) { struct desc_ptr curgdt; @@ -217,7 +217,7 @@ void machine_kexec(struct kimage *image) } control_page = page_address(image->control_code_page); - memcpy(control_page, relocate_kernel, KEXEC_CONTROL_CODE_MAX_SIZE); + memcpy(control_page, (void *)ktla_ktva((unsigned long)relocate_kernel), KEXEC_CONTROL_CODE_MAX_SIZE); relocate_kernel_ptr = control_page; page_list[PA_CONTROL_PAGE] = __pa(control_page); diff --git a/arch/x86/kernel/microcode_amd.c b/arch/x86/kernel/microcode_amd.c index e1af7c0..db0032e 100644 --- a/arch/x86/kernel/microcode_amd.c +++ b/arch/x86/kernel/microcode_amd.c @@ -331,7 +331,7 @@ static void microcode_fini_cpu_amd(int cpu) uci->mc = NULL; } -static struct microcode_ops microcode_amd_ops = { +static const struct microcode_ops microcode_amd_ops = { .request_microcode_user = request_microcode_user, .request_microcode_fw = request_microcode_fw, .collect_cpu_info = collect_cpu_info_amd, @@ -339,7 +339,7 @@ static struct microcode_ops microcode_amd_ops = { .microcode_fini_cpu = microcode_fini_cpu_amd, }; -struct microcode_ops * __init init_amd_microcode(void) +const struct microcode_ops * __init init_amd_microcode(void) { return µcode_amd_ops; } diff --git a/arch/x86/kernel/microcode_core.c b/arch/x86/kernel/microcode_core.c index fa6551d..43662ff 100644 --- a/arch/x86/kernel/microcode_core.c +++ b/arch/x86/kernel/microcode_core.c @@ -92,7 +92,7 @@ MODULE_LICENSE("GPL"); #define MICROCODE_VERSION "2.00" -static struct microcode_ops *microcode_ops; +static const struct microcode_ops *microcode_ops; /* * Synchronization. diff --git a/arch/x86/kernel/microcode_intel.c b/arch/x86/kernel/microcode_intel.c index 3561702..f4807b7 100644 --- a/arch/x86/kernel/microcode_intel.c +++ b/arch/x86/kernel/microcode_intel.c @@ -446,13 +446,13 @@ static enum ucode_state request_microcode_fw(int cpu, struct device *device) static int get_ucode_user(void *to, const void *from, size_t n) { - return copy_from_user(to, from, n); + return copy_from_user(to, (__force const void __user *)from, n); } static enum ucode_state request_microcode_user(int cpu, const void __user *buf, size_t size) { - return generic_load_microcode(cpu, (void *)buf, size, &get_ucode_user); + return generic_load_microcode(cpu, (__force void *)buf, size, &get_ucode_user); } static void microcode_fini_cpu(int cpu) @@ -463,7 +463,7 @@ static void microcode_fini_cpu(int cpu) uci->mc = NULL; } -static struct microcode_ops microcode_intel_ops = { +static const struct microcode_ops microcode_intel_ops = { .request_microcode_user = request_microcode_user, .request_microcode_fw = request_microcode_fw, .collect_cpu_info = collect_cpu_info, @@ -471,7 +471,7 @@ static struct microcode_ops microcode_intel_ops = { .microcode_fini_cpu = microcode_fini_cpu, }; -struct microcode_ops * __init init_intel_microcode(void) +const struct microcode_ops * __init init_intel_microcode(void) { return µcode_intel_ops; } diff --git a/arch/x86/kernel/module.c b/arch/x86/kernel/module.c index e0bc186..0c3f714 100644 --- a/arch/x86/kernel/module.c +++ b/arch/x86/kernel/module.c @@ -35,7 +35,7 @@ #define DEBUGP(fmt...) #endif -void *module_alloc(unsigned long size) +static void *__module_alloc(unsigned long size, pgprot_t prot) { struct vm_struct *area; @@ -49,8 +49,18 @@ void *module_alloc(unsigned long size) if (!area) return NULL; - return __vmalloc_area(area, GFP_KERNEL | __GFP_HIGHMEM, - PAGE_KERNEL_EXEC); + return __vmalloc_area(area, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, prot); +} + +void *module_alloc(unsigned long size) +{ + +#ifdef CONFIG_PAX_KERNEXEC + return __module_alloc(size, PAGE_KERNEL); +#else + return __module_alloc(size, PAGE_KERNEL_EXEC); +#endif + } /* Free memory returned from module_alloc */ @@ -59,6 +69,40 @@ void module_free(struct module *mod, void *module_region) vfree(module_region); } +#ifdef CONFIG_PAX_KERNEXEC +#ifdef CONFIG_X86_32 +void *module_alloc_exec(unsigned long size) +{ + struct vm_struct *area; + + if (size == 0) + return NULL; + + area = __get_vm_area(size, VM_ALLOC, (unsigned long)&MODULES_EXEC_VADDR, (unsigned long)&MODULES_EXEC_END); + return area ? area->addr : NULL; +} +EXPORT_SYMBOL(module_alloc_exec); + +void module_free_exec(struct module *mod, void *module_region) +{ + vunmap(module_region); +} +EXPORT_SYMBOL(module_free_exec); +#else +void module_free_exec(struct module *mod, void *module_region) +{ + module_free(mod, module_region); +} +EXPORT_SYMBOL(module_free_exec); + +void *module_alloc_exec(unsigned long size) +{ + return __module_alloc(size, PAGE_KERNEL_RX); +} +EXPORT_SYMBOL(module_alloc_exec); +#endif +#endif + /* We don't need anything special. */ int module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs, @@ -78,14 +122,16 @@ int apply_relocate(Elf32_Shdr *sechdrs, unsigned int i; Elf32_Rel *rel = (void *)sechdrs[relsec].sh_addr; Elf32_Sym *sym; - uint32_t *location; + uint32_t *plocation, location; DEBUGP("Applying relocate section %u to %u\n", relsec, sechdrs[relsec].sh_info); for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) { /* This is where to make the change */ - location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr - + rel[i].r_offset; + plocation = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr + rel[i].r_offset; + location = (uint32_t)plocation; + if (sechdrs[sechdrs[relsec].sh_info].sh_flags & SHF_EXECINSTR) + plocation = ktla_ktva((void *)plocation); /* This is the symbol it is referring to. Note that all undefined symbols have been resolved. */ sym = (Elf32_Sym *)sechdrs[symindex].sh_addr @@ -94,11 +140,15 @@ int apply_relocate(Elf32_Shdr *sechdrs, switch (ELF32_R_TYPE(rel[i].r_info)) { case R_386_32: /* We add the value into the location given */ - *location += sym->st_value; + pax_open_kernel(); + *plocation += sym->st_value; + pax_close_kernel(); break; case R_386_PC32: /* Add the value, subtract its postition */ - *location += sym->st_value - (uint32_t)location; + pax_open_kernel(); + *plocation += sym->st_value - location; + pax_close_kernel(); break; default: printk(KERN_ERR "module %s: Unknown relocation: %u\n", @@ -154,21 +204,30 @@ int apply_relocate_add(Elf64_Shdr *sechdrs, case R_X86_64_NONE: break; case R_X86_64_64: + pax_open_kernel(); *(u64 *)loc = val; + pax_close_kernel(); break; case R_X86_64_32: + pax_open_kernel(); *(u32 *)loc = val; + pax_close_kernel(); if (val != *(u32 *)loc) goto overflow; break; case R_X86_64_32S: + pax_open_kernel(); *(s32 *)loc = val; + pax_close_kernel(); if ((s64)val != *(s32 *)loc) goto overflow; break; case R_X86_64_PC32: val -= (u64)loc; + pax_open_kernel(); *(u32 *)loc = val; + pax_close_kernel(); + #if 0 if ((s64)val != *(s32 *)loc) goto overflow; diff --git a/arch/x86/kernel/paravirt-spinlocks.c b/arch/x86/kernel/paravirt-spinlocks.c index 676b8c7..870ba04 100644 --- a/arch/x86/kernel/paravirt-spinlocks.c +++ b/arch/x86/kernel/paravirt-spinlocks.c @@ -13,7 +13,7 @@ default_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags) arch_spin_lock(lock); } -struct pv_lock_ops pv_lock_ops = { +struct pv_lock_ops pv_lock_ops __read_only = { #ifdef CONFIG_SMP .spin_is_locked = __ticket_spin_is_locked, .spin_is_contended = __ticket_spin_is_contended, diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c index 1db183e..d5174a5 100644 --- a/arch/x86/kernel/paravirt.c +++ b/arch/x86/kernel/paravirt.c @@ -122,7 +122,7 @@ unsigned paravirt_patch_jmp(void *insnbuf, const void *target, * corresponding structure. */ static void *get_call_destination(u8 type) { - struct paravirt_patch_template tmpl = { + const struct paravirt_patch_template tmpl = { .pv_init_ops = pv_init_ops, .pv_time_ops = pv_time_ops, .pv_cpu_ops = pv_cpu_ops, @@ -145,14 +145,14 @@ unsigned paravirt_patch_default(u8 type, u16 clobbers, void *insnbuf, if (opfunc == NULL) /* If there's no function, patch it with a ud2a (BUG) */ ret = paravirt_patch_insns(insnbuf, len, ud2a, ud2a+sizeof(ud2a)); - else if (opfunc == _paravirt_nop) + else if (opfunc == (void *)_paravirt_nop) /* If the operation is a nop, then nop the callsite */ ret = paravirt_patch_nop(); /* identity functions just return their single argument */ - else if (opfunc == _paravirt_ident_32) + else if (opfunc == (void *)_paravirt_ident_32) ret = paravirt_patch_ident_32(insnbuf, len); - else if (opfunc == _paravirt_ident_64) + else if (opfunc == (void *)_paravirt_ident_64) ret = paravirt_patch_ident_64(insnbuf, len); else if (type == PARAVIRT_PATCH(pv_cpu_ops.iret) || @@ -178,7 +178,7 @@ unsigned paravirt_patch_insns(void *insnbuf, unsigned len, if (insn_len > len || start == NULL) insn_len = len; else - memcpy(insnbuf, start, insn_len); + memcpy(insnbuf, ktla_ktva(start), insn_len); return insn_len; } @@ -294,22 +294,22 @@ void arch_flush_lazy_mmu_mode(void) preempt_enable(); } -struct pv_info pv_info = { +struct pv_info pv_info __read_only = { .name = "bare hardware", .paravirt_enabled = 0, .kernel_rpl = 0, .shared_kernel_pmd = 1, /* Only used when CONFIG_X86_PAE is set */ }; -struct pv_init_ops pv_init_ops = { +struct pv_init_ops pv_init_ops __read_only = { .patch = native_patch, }; -struct pv_time_ops pv_time_ops = { +struct pv_time_ops pv_time_ops __read_only = { .sched_clock = native_sched_clock, }; -struct pv_irq_ops pv_irq_ops = { +struct pv_irq_ops pv_irq_ops __read_only = { .save_fl = __PV_IS_CALLEE_SAVE(native_save_fl), .restore_fl = __PV_IS_CALLEE_SAVE(native_restore_fl), .irq_disable = __PV_IS_CALLEE_SAVE(native_irq_disable), @@ -321,7 +321,7 @@ struct pv_irq_ops pv_irq_ops = { #endif }; -struct pv_cpu_ops pv_cpu_ops = { +struct pv_cpu_ops pv_cpu_ops __read_only = { .cpuid = native_cpuid, .get_debugreg = native_get_debugreg, .set_debugreg = native_set_debugreg, @@ -382,7 +382,7 @@ struct pv_cpu_ops pv_cpu_ops = { .end_context_switch = paravirt_nop, }; -struct pv_apic_ops pv_apic_ops = { +struct pv_apic_ops pv_apic_ops __read_only = { #ifdef CONFIG_X86_LOCAL_APIC .startup_ipi_hook = paravirt_nop, #endif @@ -396,7 +396,7 @@ struct pv_apic_ops pv_apic_ops = { #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_64) #endif -struct pv_mmu_ops pv_mmu_ops = { +struct pv_mmu_ops pv_mmu_ops __read_only = { .read_cr2 = native_read_cr2, .write_cr2 = native_write_cr2, @@ -463,6 +463,12 @@ struct pv_mmu_ops pv_mmu_ops = { }, .set_fixmap = native_set_fixmap, + +#ifdef CONFIG_PAX_KERNEXEC + .pax_open_kernel = native_pax_open_kernel, + .pax_close_kernel = native_pax_close_kernel, +#endif + }; EXPORT_SYMBOL_GPL(pv_time_ops); diff --git a/arch/x86/kernel/pci-calgary_64.c b/arch/x86/kernel/pci-calgary_64.c index 078d4ec..f4b9b3c 100644 --- a/arch/x86/kernel/pci-calgary_64.c +++ b/arch/x86/kernel/pci-calgary_64.c @@ -475,7 +475,7 @@ static void calgary_free_coherent(struct device *dev, size_t size, free_pages((unsigned long)vaddr, get_order(size)); } -static struct dma_map_ops calgary_dma_ops = { +static const struct dma_map_ops calgary_dma_ops = { .alloc_coherent = calgary_alloc_coherent, .free_coherent = calgary_free_coherent, .map_sg = calgary_map_sg, diff --git a/arch/x86/kernel/pci-dma.c b/arch/x86/kernel/pci-dma.c index 4b7e3d8..0f9c3e9 100644 --- a/arch/x86/kernel/pci-dma.c +++ b/arch/x86/kernel/pci-dma.c @@ -16,7 +16,7 @@ static int forbid_dac __read_mostly; -struct dma_map_ops *dma_ops = &nommu_dma_ops; +const struct dma_map_ops *dma_ops = &nommu_dma_ops; EXPORT_SYMBOL(dma_ops); static int iommu_sac_force __read_mostly; @@ -248,7 +248,7 @@ early_param("iommu", iommu_setup); int dma_supported(struct device *dev, u64 mask) { - struct dma_map_ops *ops = get_dma_ops(dev); + const struct dma_map_ops *ops = get_dma_ops(dev); #ifdef CONFIG_PCI if (mask > 0xffffffff && forbid_dac > 0) { diff --git a/arch/x86/kernel/pci-gart_64.c b/arch/x86/kernel/pci-gart_64.c index 0f7f130..ab480fd 100644 --- a/arch/x86/kernel/pci-gart_64.c +++ b/arch/x86/kernel/pci-gart_64.c @@ -699,7 +699,7 @@ static __init int init_k8_gatt(struct agp_kern_info *info) return -1; } -static struct dma_map_ops gart_dma_ops = { +static const struct dma_map_ops gart_dma_ops = { .map_sg = gart_map_sg, .unmap_sg = gart_unmap_sg, .map_page = gart_map_page, diff --git a/arch/x86/kernel/pci-nommu.c b/arch/x86/kernel/pci-nommu.c index 3af4af8..7950f48 100644 --- a/arch/x86/kernel/pci-nommu.c +++ b/arch/x86/kernel/pci-nommu.c @@ -95,7 +95,7 @@ static void nommu_sync_sg_for_device(struct device *dev, flush_write_buffers(); } -struct dma_map_ops nommu_dma_ops = { +const struct dma_map_ops nommu_dma_ops = { .alloc_coherent = dma_generic_alloc_coherent, .free_coherent = nommu_free_coherent, .map_sg = nommu_map_sg, diff --git a/arch/x86/kernel/pci-swiotlb.c b/arch/x86/kernel/pci-swiotlb.c index a5bc528..29def6f 100644 --- a/arch/x86/kernel/pci-swiotlb.c +++ b/arch/x86/kernel/pci-swiotlb.c @@ -25,7 +25,7 @@ static void *x86_swiotlb_alloc_coherent(struct device *hwdev, size_t size, return swiotlb_alloc_coherent(hwdev, size, dma_handle, flags); } -static struct dma_map_ops swiotlb_dma_ops = { +static const struct dma_map_ops swiotlb_dma_ops = { .mapping_error = swiotlb_dma_mapping_error, .alloc_coherent = x86_swiotlb_alloc_coherent, .free_coherent = swiotlb_free_coherent, diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c index e7e3521..c8f0251 100644 --- a/arch/x86/kernel/process.c +++ b/arch/x86/kernel/process.c @@ -73,7 +73,7 @@ void exit_thread(void) unsigned long *bp = t->io_bitmap_ptr; if (bp) { - struct tss_struct *tss = &per_cpu(init_tss, get_cpu()); + struct tss_struct *tss = init_tss + get_cpu(); t->io_bitmap_ptr = NULL; clear_thread_flag(TIF_IO_BITMAP); @@ -107,7 +107,7 @@ void show_regs_common(void) printk(KERN_CONT "\n"); printk(KERN_DEFAULT "Pid: %d, comm: %.20s %s %s %.*s %s/%s\n", - current->pid, current->comm, print_tainted(), + task_pid_nr(current), current->comm, print_tainted(), init_utsname()->release, (int)strcspn(init_utsname()->version, " "), init_utsname()->version, board, product); @@ -117,6 +117,9 @@ void flush_thread(void) { struct task_struct *tsk = current; +#if defined(CONFIG_X86_32) && !defined(CONFIG_CC_STACKPROTECTOR) + loadsegment(gs, 0); +#endif flush_ptrace_hw_breakpoint(tsk); memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array)); /* @@ -279,8 +282,8 @@ int kernel_thread(int (*fn)(void *), void *arg, unsigned long flags) regs.di = (unsigned long) arg; #ifdef CONFIG_X86_32 - regs.ds = __USER_DS; - regs.es = __USER_DS; + regs.ds = __KERNEL_DS; + regs.es = __KERNEL_DS; regs.fs = __KERNEL_PERCPU; regs.gs = __KERNEL_STACK_CANARY; #else @@ -689,17 +692,3 @@ static int __init idle_setup(char *str) return 0; } early_param("idle", idle_setup); - -unsigned long arch_align_stack(unsigned long sp) -{ - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space) - sp -= get_random_int() % 8192; - return sp & ~0xf; -} - -unsigned long arch_randomize_brk(struct mm_struct *mm) -{ - unsigned long range_end = mm->brk + 0x02000000; - return randomize_range(mm->brk, range_end, 0) ? : mm->brk; -} - diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c index 8d12878..350b125 100644 --- a/arch/x86/kernel/process_32.c +++ b/arch/x86/kernel/process_32.c @@ -65,6 +65,7 @@ asmlinkage void ret_from_fork(void) __asm__("ret_from_fork"); unsigned long thread_saved_pc(struct task_struct *tsk) { return ((unsigned long *)tsk->thread.sp)[3]; +//XXX return tsk->thread.eip; } #ifndef CONFIG_SMP @@ -126,7 +127,7 @@ void __show_regs(struct pt_regs *regs, int all) unsigned long sp; unsigned short ss, gs; - if (user_mode_vm(regs)) { + if (user_mode(regs)) { sp = regs->sp; ss = regs->ss & 0xffff; gs = get_user_gs(regs); @@ -196,7 +197,7 @@ int copy_thread(unsigned long clone_flags, unsigned long sp, struct task_struct *tsk; int err; - childregs = task_pt_regs(p); + childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 8; *childregs = *regs; childregs->ax = 0; childregs->sp = sp; @@ -230,6 +231,7 @@ int copy_thread(unsigned long clone_flags, unsigned long sp, * Set a new TLS for the child thread? */ if (clone_flags & CLONE_SETTLS) +//XXX needs set_fs()? err = do_set_thread_area(p, -1, (struct user_desc __user *)childregs->si, 0); @@ -293,7 +295,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) struct thread_struct *prev = &prev_p->thread, *next = &next_p->thread; int cpu = smp_processor_id(); - struct tss_struct *tss = &per_cpu(init_tss, cpu); + struct tss_struct *tss = init_tss + cpu; bool preload_fpu; /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */ @@ -328,6 +330,11 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) */ lazy_save_gs(prev->gs); +#ifdef CONFIG_PAX_MEMORY_UDEREF + if (!segment_eq(task_thread_info(prev_p)->addr_limit, task_thread_info(next_p)->addr_limit)) + __set_fs(task_thread_info(next_p)->addr_limit, cpu); +#endif + /* * Load the per-thread Thread-Local Storage descriptor. */ @@ -404,3 +411,27 @@ unsigned long get_wchan(struct task_struct *p) return 0; } +#ifdef CONFIG_PAX_RANDKSTACK +asmlinkage void pax_randomize_kstack(void) +{ + struct thread_struct *thread = ¤t->thread; + unsigned long time; + + if (!randomize_va_space) + return; + + rdtscl(time); + + /* P4 seems to return a 0 LSB, ignore it */ +#ifdef CONFIG_MPENTIUM4 + time &= 0x1EUL; + time <<= 2; +#else + time &= 0xFUL; + time <<= 3; +#endif + + thread->sp0 ^= time; + load_sp0(init_tss + smp_processor_id(), thread); +} +#endif diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c index 3c2422a..2904e2d 100644 --- a/arch/x86/kernel/process_64.c +++ b/arch/x86/kernel/process_64.c @@ -87,7 +87,7 @@ static void __exit_idle(void) void exit_idle(void) { /* idle loop has pid 0 */ - if (current->pid) + if (task_pid_nr(current)) return; __exit_idle(); } @@ -375,7 +375,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) struct thread_struct *prev = &prev_p->thread; struct thread_struct *next = &next_p->thread; int cpu = smp_processor_id(); - struct tss_struct *tss = &per_cpu(init_tss, cpu); + struct tss_struct *tss = init_tss + cpu; unsigned fsindex, gsindex; bool preload_fpu; @@ -528,12 +528,11 @@ unsigned long get_wchan(struct task_struct *p) if (!p || p == current || p->state == TASK_RUNNING) return 0; stack = (unsigned long)task_stack_page(p); - if (p->thread.sp < stack || p->thread.sp >= stack+THREAD_SIZE) + if (p->thread.sp < stack || p->thread.sp > stack+THREAD_SIZE-8-sizeof(u64)) return 0; fp = *(u64 *)(p->thread.sp); do { - if (fp < (unsigned long)stack || - fp >= (unsigned long)stack+THREAD_SIZE) + if (fp < stack || fp > stack+THREAD_SIZE-8-sizeof(u64)) return 0; ip = *(u64 *)(fp+8); if (!in_sched_functions(ip)) diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c index 70c4872..25fb80f 100644 --- a/arch/x86/kernel/ptrace.c +++ b/arch/x86/kernel/ptrace.c @@ -804,7 +804,7 @@ static const struct user_regset_view user_x86_32_view; /* Initialized below. */ long arch_ptrace(struct task_struct *child, long request, long addr, long data) { int ret; - unsigned long __user *datap = (unsigned long __user *)data; + unsigned long __user *datap = (__force unsigned long __user *)data; switch (request) { /* read the word at location addr in the USER area. */ @@ -891,14 +891,14 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data) if (addr < 0) return -EIO; ret = do_get_thread_area(child, addr, - (struct user_desc __user *) data); + (__force struct user_desc __user *) data); break; case PTRACE_SET_THREAD_AREA: if (addr < 0) return -EIO; ret = do_set_thread_area(child, addr, - (struct user_desc __user *) data, 0); + (__force struct user_desc __user *) data, 0); break; #endif @@ -1315,7 +1315,7 @@ static void fill_sigtrap_info(struct task_struct *tsk, memset(info, 0, sizeof(*info)); info->si_signo = SIGTRAP; info->si_code = si_code; - info->si_addr = user_mode_vm(regs) ? (void __user *)regs->ip : NULL; + info->si_addr = user_mode(regs) ? (__force void __user *)regs->ip : NULL; } void user_single_step_siginfo(struct task_struct *tsk, diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c index 76a0d71..0df1a0c 100644 --- a/arch/x86/kernel/reboot.c +++ b/arch/x86/kernel/reboot.c @@ -33,7 +33,7 @@ void (*pm_power_off)(void); EXPORT_SYMBOL(pm_power_off); static const struct desc_ptr no_idt = {}; -static int reboot_mode; +static unsigned short reboot_mode; enum reboot_type reboot_type = BOOT_KBD; int reboot_force; @@ -284,7 +284,7 @@ static struct dmi_system_id __initdata reboot_dmi_table[] = { DMI_MATCH(DMI_BOARD_NAME, "P4S800"), }, }, - { } + { NULL, NULL, {{0, {0}}}, NULL} }; static int __init reboot_init(void) @@ -300,12 +300,12 @@ core_initcall(reboot_init); controller to pulse the CPU reset line, which is more thorough, but doesn't work with at least one type of 486 motherboard. It is easy to stop this code working; hence the copious comments. */ -static const unsigned long long -real_mode_gdt_entries [3] = +static struct desc_struct +real_mode_gdt_entries [3] __read_only = { - 0x0000000000000000ULL, /* Null descriptor */ - 0x00009b000000ffffULL, /* 16-bit real-mode 64k code at 0x00000000 */ - 0x000093000100ffffULL /* 16-bit real-mode 64k data at 0x00000100 */ + GDT_ENTRY_INIT(0, 0, 0), /* Null descriptor */ + GDT_ENTRY_INIT(0x9b, 0, 0xffff), /* 16-bit real-mode 64k code at 0x00000000 */ + GDT_ENTRY_INIT(0x93, 0x100, 0xffff) /* 16-bit real-mode 64k data at 0x00000100 */ }; static const struct desc_ptr @@ -354,7 +354,7 @@ static const unsigned char jump_to_bios [] = * specified by the code and length parameters. * We assume that length will aways be less that 100! */ -void machine_real_restart(const unsigned char *code, int length) +void machine_real_restart(const unsigned char *code, unsigned int length) { local_irq_disable(); @@ -374,8 +374,8 @@ void machine_real_restart(const unsigned char *code, int length) /* Remap the kernel at virtual address zero, as well as offset zero from the kernel segment. This assumes the kernel segment starts at virtual address PAGE_OFFSET. */ - memcpy(swapper_pg_dir, swapper_pg_dir + KERNEL_PGD_BOUNDARY, - sizeof(swapper_pg_dir [0]) * KERNEL_PGD_PTRS); + clone_pgd_range(swapper_pg_dir, swapper_pg_dir + KERNEL_PGD_BOUNDARY, + min_t(unsigned long, KERNEL_PGD_PTRS, KERNEL_PGD_BOUNDARY)); /* * Use `swapper_pg_dir' as our page directory. @@ -387,16 +387,15 @@ void machine_real_restart(const unsigned char *code, int length) boot)". This seems like a fairly standard thing that gets set by REBOOT.COM programs, and the previous reset routine did this too. */ - *((unsigned short *)0x472) = reboot_mode; + *(unsigned short *)(__va(0x472)) = reboot_mode; /* For the switch to real mode, copy some code to low memory. It has to be in the first 64k because it is running in 16-bit mode, and it has to have the same physical and virtual address, because it turns off paging. Copy it near the end of the first page, out of the way of BIOS variables. */ - memcpy((void *)(0x1000 - sizeof(real_mode_switch) - 100), - real_mode_switch, sizeof (real_mode_switch)); - memcpy((void *)(0x1000 - 100), code, length); + memcpy(__va(0x1000 - sizeof (real_mode_switch) - 100), real_mode_switch, sizeof (real_mode_switch)); + memcpy(__va(0x1000 - 100), code, length); /* Set up the IDT for real mode. */ load_idt(&real_mode_idt); diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c index 6600cfd..46a5ea3 100644 --- a/arch/x86/kernel/setup.c +++ b/arch/x86/kernel/setup.c @@ -704,7 +704,7 @@ static void __init trim_bios_range(void) * area (640->1Mb) as ram even though it is not. * take them out. */ - e820_remove_range(BIOS_BEGIN, BIOS_END - BIOS_BEGIN, E820_RAM, 1); + e820_remove_range(ISA_START_ADDRESS, ISA_END_ADDRESS - ISA_START_ADDRESS, E820_RAM, 1); sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map); } @@ -791,14 +791,14 @@ void __init setup_arch(char **cmdline_p) if (!boot_params.hdr.root_flags) root_mountflags &= ~MS_RDONLY; - init_mm.start_code = (unsigned long) _text; - init_mm.end_code = (unsigned long) _etext; + init_mm.start_code = ktla_ktva((unsigned long) _text); + init_mm.end_code = ktla_ktva((unsigned long) _etext); init_mm.end_data = (unsigned long) _edata; init_mm.brk = _brk_end; - code_resource.start = virt_to_phys(_text); - code_resource.end = virt_to_phys(_etext)-1; - data_resource.start = virt_to_phys(_etext); + code_resource.start = virt_to_phys(ktla_ktva(_text)); + code_resource.end = virt_to_phys(ktla_ktva(_etext))-1; + data_resource.start = virt_to_phys(_sdata); data_resource.end = virt_to_phys(_edata)-1; bss_resource.start = virt_to_phys(&__bss_start); bss_resource.end = virt_to_phys(&__bss_stop)-1; diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c index a60df9a..0083b50 100644 --- a/arch/x86/kernel/setup_percpu.c +++ b/arch/x86/kernel/setup_percpu.c @@ -21,19 +21,17 @@ #include #include -DEFINE_PER_CPU(int, cpu_number); +#ifdef CONFIG_SMP +DEFINE_PER_CPU(unsigned int, cpu_number); EXPORT_PER_CPU_SYMBOL(cpu_number); +#endif -#ifdef CONFIG_X86_64 #define BOOT_PERCPU_OFFSET ((unsigned long)__per_cpu_load) -#else -#define BOOT_PERCPU_OFFSET 0 -#endif DEFINE_PER_CPU(unsigned long, this_cpu_off) = BOOT_PERCPU_OFFSET; EXPORT_PER_CPU_SYMBOL(this_cpu_off); -unsigned long __per_cpu_offset[NR_CPUS] __read_mostly = { +unsigned long __per_cpu_offset[NR_CPUS] __read_only = { [0 ... NR_CPUS-1] = BOOT_PERCPU_OFFSET, }; EXPORT_SYMBOL(__per_cpu_offset); @@ -161,10 +159,10 @@ static inline void setup_percpu_segment(int cpu) { #ifdef CONFIG_X86_32 struct desc_struct gdt; + unsigned long base = per_cpu_offset(cpu); - pack_descriptor(&gdt, per_cpu_offset(cpu), 0xFFFFF, - 0x2 | DESCTYPE_S, 0x8); - gdt.s = 1; + pack_descriptor(&gdt, base, (VMALLOC_END - base - 1) >> PAGE_SHIFT, + 0x83 | DESCTYPE_S, 0xC); write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_PERCPU, &gdt, DESCTYPE_S); #endif @@ -213,6 +211,11 @@ void __init setup_per_cpu_areas(void) /* alrighty, percpu areas up and running */ delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start; for_each_possible_cpu(cpu) { +#ifdef CONFIG_CC_STACKPROTECTOR +#ifdef CONFIG_x86_32 + unsigned long canary = per_cpu(stack_canary, cpu); +#endif +#endif per_cpu_offset(cpu) = delta + pcpu_unit_offsets[cpu]; per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu); per_cpu(cpu_number, cpu) = cpu; @@ -249,6 +252,12 @@ void __init setup_per_cpu_areas(void) set_cpu_numa_node(cpu, early_cpu_to_node(cpu)); #endif #endif +#ifdef CONFIG_CC_STACKPROTECTOR +#ifdef CONFIG_x86_32 + if (cpu == boot_cpu_id) + per_cpu(stack_canary, cpu) = canary; +#endif +#endif /* * Up to this point, the boot CPU has been using .init.data * area. Reload any changed state for the boot CPU. diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c index 4fd173c..9dc1de7 100644 --- a/arch/x86/kernel/signal.c +++ b/arch/x86/kernel/signal.c @@ -198,7 +198,7 @@ static unsigned long align_sigframe(unsigned long sp) * Align the stack pointer according to the i386 ABI, * i.e. so that on function entry ((sp + 4) & 15) == 0. */ - sp = ((sp + 4) & -16ul) - 4; + sp = ((sp - 12) & -16ul) - 4; #else /* !CONFIG_X86_32 */ sp = round_down(sp, 16) - 8; #endif @@ -249,11 +249,11 @@ get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size, * Return an always-bogus address instead so we will die with SIGSEGV. */ if (onsigstack && !likely(on_sig_stack(sp))) - return (void __user *)-1L; + return (__force void __user *)-1L; /* save i387 state */ if (used_math() && save_i387_xstate(*fpstate) < 0) - return (void __user *)-1L; + return (__force void __user *)-1L; return (void __user *)sp; } @@ -308,9 +308,9 @@ __setup_frame(int sig, struct k_sigaction *ka, sigset_t *set, } if (current->mm->context.vdso) - restorer = VDSO32_SYMBOL(current->mm->context.vdso, sigreturn); + restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, sigreturn); else - restorer = &frame->retcode; + restorer = (void __user *)&frame->retcode; if (ka->sa.sa_flags & SA_RESTORER) restorer = ka->sa.sa_restorer; @@ -324,7 +324,7 @@ __setup_frame(int sig, struct k_sigaction *ka, sigset_t *set, * reasons and because gdb uses it as a signature to notice * signal handler stack frames. */ - err |= __put_user(*((u64 *)&retcode), (u64 *)frame->retcode); + err |= __put_user(*((u64 *)&retcode), (u64 __user *)frame->retcode); if (err) return -EFAULT; @@ -378,7 +378,10 @@ static int __setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set)); /* Set up to return from userspace. */ - restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn); + if (current->mm->context.vdso) + restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn); + else + restorer = (void __user *)&frame->retcode; if (ka->sa.sa_flags & SA_RESTORER) restorer = ka->sa.sa_restorer; put_user_ex(restorer, &frame->pretcode); @@ -390,7 +393,7 @@ static int __setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, * reasons and because gdb uses it as a signature to notice * signal handler stack frames. */ - put_user_ex(*((u64 *)&rt_retcode), (u64 *)frame->retcode); + put_user_ex(*((u64 *)&rt_retcode), (u64 __user *)frame->retcode); } put_user_catch(err); if (err) @@ -780,7 +783,7 @@ static void do_signal(struct pt_regs *regs) * X86_32: vm86 regs switched out by assembly code before reaching * here, so testing against kernel CS suffices. */ - if (!user_mode(regs)) + if (!user_mode_novm(regs)) return; if (current_thread_info()->status & TS_RESTORE_SIGMASK) diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index 821ee1b..e29b497 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c @@ -786,7 +786,11 @@ do_rest: (unsigned long)task_stack_page(c_idle.idle) - KERNEL_STACK_OFFSET + THREAD_SIZE; #endif + + pax_open_kernel(); early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu); + pax_close_kernel(); + initial_code = (unsigned long)start_secondary; stack_start.sp = (void *) c_idle.idle->thread.sp; @@ -926,6 +930,12 @@ int __cpuinit native_cpu_up(unsigned int cpu) per_cpu(cpu_state, cpu) = CPU_UP_PREPARE; +#ifdef CONFIG_PAX_PER_CPU_PGD + clone_pgd_range(get_cpu_pgd(cpu) + KERNEL_PGD_BOUNDARY, + swapper_pg_dir + KERNEL_PGD_BOUNDARY, + KERNEL_PGD_PTRS); +#endif + err = do_boot_cpu(apicid, cpu); if (err) { diff --git a/arch/x86/kernel/step.c b/arch/x86/kernel/step.c index 58de45e..43bc689 100644 --- a/arch/x86/kernel/step.c +++ b/arch/x86/kernel/step.c @@ -27,10 +27,10 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re struct desc_struct *desc; unsigned long base; - seg &= ~7UL; + seg >>= 3; mutex_lock(&child->mm->context.lock); - if (unlikely((seg >> 3) >= child->mm->context.size)) + if (unlikely(seg >= child->mm->context.size)) addr = -1L; /* bogus selector, access would fault */ else { desc = child->mm->context.ldt + seg; @@ -53,6 +53,9 @@ static int is_setting_trap_flag(struct task_struct *child, struct pt_regs *regs) unsigned char opcode[15]; unsigned long addr = convert_ip_to_linear(child, regs); + if (addr == -EINVAL) + return 0; + copied = access_process_vm(child, addr, opcode, sizeof(opcode), 0); for (i = 0; i < copied; i++) { switch (opcode[i]) { @@ -74,7 +77,7 @@ static int is_setting_trap_flag(struct task_struct *child, struct pt_regs *regs) #ifdef CONFIG_X86_64 case 0x40 ... 0x4f: - if (regs->cs != __USER_CS) + if ((regs->cs & 0xffff) != __USER_CS) /* 32-bit mode: register increment */ return 0; /* 64-bit mode: REX prefix */ diff --git a/arch/x86/kernel/sys_i386_32.c b/arch/x86/kernel/sys_i386_32.c index 196552b..90850f1 100644 --- a/arch/x86/kernel/sys_i386_32.c +++ b/arch/x86/kernel/sys_i386_32.c @@ -24,6 +24,228 @@ #include +int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags) +{ + unsigned long pax_task_size = TASK_SIZE; + +#ifdef CONFIG_PAX_SEGMEXEC + if (current->mm->pax_flags & MF_PAX_SEGMEXEC) + pax_task_size = SEGMEXEC_TASK_SIZE; +#endif + + if (len > pax_task_size || addr > pax_task_size - len) + return -EINVAL; + + return 0; +} + +unsigned long +arch_get_unmapped_area(struct file *filp, unsigned long addr, + unsigned long len, unsigned long pgoff, unsigned long flags) +{ + struct mm_struct *mm = current->mm; + struct vm_area_struct *vma; + unsigned long start_addr, pax_task_size = TASK_SIZE; + +#ifdef CONFIG_PAX_SEGMEXEC + if (mm->pax_flags & MF_PAX_SEGMEXEC) + pax_task_size = SEGMEXEC_TASK_SIZE; +#endif + + pax_task_size -= PAGE_SIZE; + + if (len > pax_task_size) + return -ENOMEM; + + if (flags & MAP_FIXED) + return addr; + +#ifdef CONFIG_PAX_RANDMMAP + if (!(mm->pax_flags & MF_PAX_RANDMMAP)) +#endif + + if (addr) { + addr = PAGE_ALIGN(addr); + if (pax_task_size - len >= addr) { + vma = find_vma(mm, addr); + if (check_heap_stack_gap(vma, addr, len)) + return addr; + } + } + if (len > mm->cached_hole_size) { + start_addr = addr = mm->free_area_cache; + } else { + start_addr = addr = mm->mmap_base; + mm->cached_hole_size = 0; + } + +#ifdef CONFIG_PAX_PAGEEXEC + if (!(__supported_pte_mask & _PAGE_NX) && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE) && start_addr >= mm->mmap_base) { + start_addr = 0x00110000UL; + +#ifdef CONFIG_PAX_RANDMMAP + if (mm->pax_flags & MF_PAX_RANDMMAP) + start_addr += mm->delta_mmap & 0x03FFF000UL; +#endif + + if (mm->start_brk <= start_addr && start_addr < mm->mmap_base) + start_addr = addr = mm->mmap_base; + else + addr = start_addr; + } +#endif + +full_search: + for (vma = find_vma(mm, addr); ; vma = vma->vm_next) { + /* At this point: (!vma || addr < vma->vm_end). */ + if (pax_task_size - len < addr) { + /* + * Start a new search - just in case we missed + * some holes. + */ + if (start_addr != mm->mmap_base) { + start_addr = addr = mm->mmap_base; + mm->cached_hole_size = 0; + goto full_search; + } + return -ENOMEM; + } + if (check_heap_stack_gap(vma, addr, len)) + break; + if (addr + mm->cached_hole_size < vma->vm_start) + mm->cached_hole_size = vma->vm_start - addr; + addr = vma->vm_end; + if (mm->start_brk <= addr && addr < mm->mmap_base) { + start_addr = addr = mm->mmap_base; + mm->cached_hole_size = 0; + goto full_search; + } + } + + /* + * Remember the place where we stopped the search: + */ + mm->free_area_cache = addr + len; + return addr; +} + +unsigned long +arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, + const unsigned long len, const unsigned long pgoff, + const unsigned long flags) +{ + struct vm_area_struct *vma; + struct mm_struct *mm = current->mm; + unsigned long base = mm->mmap_base, addr = addr0, pax_task_size = TASK_SIZE; + +#ifdef CONFIG_PAX_SEGMEXEC + if (mm->pax_flags & MF_PAX_SEGMEXEC) + pax_task_size = SEGMEXEC_TASK_SIZE; +#endif + + pax_task_size -= PAGE_SIZE; + + /* requested length too big for entire address space */ + if (len > pax_task_size) + return -ENOMEM; + + if (flags & MAP_FIXED) + return addr; + +#ifdef CONFIG_PAX_PAGEEXEC + if (!(__supported_pte_mask & _PAGE_NX) && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE)) + goto bottomup; +#endif + +#ifdef CONFIG_PAX_RANDMMAP + if (!(mm->pax_flags & MF_PAX_RANDMMAP)) +#endif + + /* requesting a specific address */ + if (addr) { + addr = PAGE_ALIGN(addr); + if (pax_task_size - len >= addr) { + vma = find_vma(mm, addr); + if (check_heap_stack_gap(vma, addr, len)) + return addr; + } + } + + /* check if free_area_cache is useful for us */ + if (len <= mm->cached_hole_size) { + mm->cached_hole_size = 0; + mm->free_area_cache = mm->mmap_base; + } + + /* either no address requested or can't fit in requested address hole */ + addr = mm->free_area_cache; + + /* make sure it can fit in the remaining address space */ + if (addr > len) { + vma = find_vma(mm, addr-len); + if (check_heap_stack_gap(vma, addr - len, len)) + /* remember the address as a hint for next time */ + return (mm->free_area_cache = addr-len); + } + + if (mm->mmap_base < len) + goto bottomup; + + addr = mm->mmap_base-len; + + do { + /* + * Lookup failure means no vma is above this address, + * else if new region fits below vma->vm_start, + * return with success: + */ + vma = find_vma(mm, addr); + if (check_heap_stack_gap(vma, addr, len)) + /* remember the address as a hint for next time */ + return (mm->free_area_cache = addr); + + /* remember the largest hole we saw so far */ + if (addr + mm->cached_hole_size < vma->vm_start) + mm->cached_hole_size = vma->vm_start - addr; + + /* try just below the current vma->vm_start */ + addr = vma->vm_start-len; + } while (len < vma->vm_start); + +bottomup: + /* + * A failed mmap() very likely causes application failure, + * so fall back to the bottom-up function here. This scenario + * can happen with large stack limits and large mmap() + * allocations. + */ + +#ifdef CONFIG_PAX_SEGMEXEC + if (mm->pax_flags & MF_PAX_SEGMEXEC) + mm->mmap_base = SEGMEXEC_TASK_UNMAPPED_BASE; + else +#endif + + mm->mmap_base = TASK_UNMAPPED_BASE; + +#ifdef CONFIG_PAX_RANDMMAP + if (mm->pax_flags & MF_PAX_RANDMMAP) + mm->mmap_base += mm->delta_mmap; +#endif + + mm->free_area_cache = mm->mmap_base; + mm->cached_hole_size = ~0UL; + addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags); + /* + * Restore the topdown base: + */ + mm->mmap_base = base; + mm->free_area_cache = base; + mm->cached_hole_size = ~0UL; + + return addr; +} + /* * Do a system call from kernel instead of calling sys_execve so we * end up with proper pt_regs. diff --git a/arch/x86/kernel/sys_x86_64.c b/arch/x86/kernel/sys_x86_64.c index ff14a50..a00edfd 100644 --- a/arch/x86/kernel/sys_x86_64.c +++ b/arch/x86/kernel/sys_x86_64.c @@ -32,8 +32,8 @@ out: return error; } -static void find_start_end(unsigned long flags, unsigned long *begin, - unsigned long *end) +static void find_start_end(struct mm_struct *mm, unsigned long flags, + unsigned long *begin, unsigned long *end) { if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT)) { unsigned long new_begin; @@ -52,7 +52,7 @@ static void find_start_end(unsigned long flags, unsigned long *begin, *begin = new_begin; } } else { - *begin = TASK_UNMAPPED_BASE; + *begin = mm->mmap_base; *end = TASK_SIZE; } } @@ -69,16 +69,19 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr, if (flags & MAP_FIXED) return addr; - find_start_end(flags, &begin, &end); + find_start_end(mm, flags, &begin, &end); if (len > end) return -ENOMEM; +#ifdef CONFIG_PAX_RANDMMAP + if (!(mm->pax_flags & MF_PAX_RANDMMAP)) +#endif + if (addr) { addr = PAGE_ALIGN(addr); vma = find_vma(mm, addr); - if (end - len >= addr && - (!vma || addr + len <= vma->vm_start)) + if (end - len >= addr && check_heap_stack_gap(vma, addr, len)) return addr; } if (((flags & MAP_32BIT) || test_thread_flag(TIF_IA32)) @@ -106,7 +109,7 @@ full_search: } return -ENOMEM; } - if (!vma || addr + len <= vma->vm_start) { + if (check_heap_stack_gap(vma, addr, len)) { /* * Remember the place where we stopped the search: */ @@ -128,7 +131,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, { struct vm_area_struct *vma; struct mm_struct *mm = current->mm; - unsigned long addr = addr0; + unsigned long base = mm->mmap_base, addr = addr0; /* requested length too big for entire address space */ if (len > TASK_SIZE) @@ -141,12 +144,15 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT)) goto bottomup; +#ifdef CONFIG_PAX_RANDMMAP + if (!(mm->pax_flags & MF_PAX_RANDMMAP)) +#endif + /* requesting a specific address */ if (addr) { addr = PAGE_ALIGN(addr); vma = find_vma(mm, addr); - if (TASK_SIZE - len >= addr && - (!vma || addr + len <= vma->vm_start)) + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len)) return addr; } @@ -162,7 +168,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, /* make sure it can fit in the remaining address space */ if (addr > len) { vma = find_vma(mm, addr-len); - if (!vma || addr <= vma->vm_start) + if (check_heap_stack_gap(vma, addr - len, len)) /* remember the address as a hint for next time */ return mm->free_area_cache = addr-len; } @@ -179,7 +185,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, * return with success: */ vma = find_vma(mm, addr); - if (!vma || addr+len <= vma->vm_start) + if (check_heap_stack_gap(vma, addr, len)) /* remember the address as a hint for next time */ return mm->free_area_cache = addr; @@ -198,13 +204,21 @@ bottomup: * can happen with large stack limits and large mmap() * allocations. */ + mm->mmap_base = TASK_UNMAPPED_BASE; + +#ifdef CONFIG_PAX_RANDMMAP + if (mm->pax_flags & MF_PAX_RANDMMAP) + mm->mmap_base += mm->delta_mmap; +#endif + + mm->free_area_cache = mm->mmap_base; mm->cached_hole_size = ~0UL; - mm->free_area_cache = TASK_UNMAPPED_BASE; addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags); /* * Restore the topdown base: */ - mm->free_area_cache = mm->mmap_base; + mm->mmap_base = base; + mm->free_area_cache = base; mm->cached_hole_size = ~0UL; return addr; diff --git a/arch/x86/kernel/syscall_table_32.S b/arch/x86/kernel/syscall_table_32.S index 8b37293..520368b 100644 --- a/arch/x86/kernel/syscall_table_32.S +++ b/arch/x86/kernel/syscall_table_32.S @@ -1,3 +1,4 @@ +.section .rodata,"a",@progbits ENTRY(sys_call_table) .long sys_restart_syscall /* 0 - old "setup()" system call, used for restarting */ .long sys_exit diff --git a/arch/x86/kernel/time.c b/arch/x86/kernel/time.c index fb5cc5e1..b457784 100644 --- a/arch/x86/kernel/time.c +++ b/arch/x86/kernel/time.c @@ -26,17 +26,13 @@ int timer_ack; #endif -#ifdef CONFIG_X86_64 -volatile unsigned long __jiffies __section_jiffies = INITIAL_JIFFIES; -#endif - unsigned long profile_pc(struct pt_regs *regs) { unsigned long pc = instruction_pointer(regs); - if (!user_mode_vm(regs) && in_lock_functions(pc)) { + if (!user_mode(regs) && in_lock_functions(pc)) { #ifdef CONFIG_FRAME_POINTER - return *(unsigned long *)(regs->bp + sizeof(long)); + return ktla_ktva(*(unsigned long *)(regs->bp + sizeof(long))); #else unsigned long *sp = (unsigned long *)kernel_stack_pointer(regs); @@ -45,11 +41,17 @@ unsigned long profile_pc(struct pt_regs *regs) * or above a saved flags. Eflags has bits 22-31 zero, * kernel addresses don't. */ + +#ifdef CONFIG_PAX_KERNEXEC + return ktla_ktva(sp[0]); +#else if (sp[0] >> 22) return sp[0]; if (sp[1] >> 22) return sp[1]; #endif + +#endif } return pc; } diff --git a/arch/x86/kernel/tls.c b/arch/x86/kernel/tls.c index 6bb7b85..dd853e1 100644 --- a/arch/x86/kernel/tls.c +++ b/arch/x86/kernel/tls.c @@ -85,6 +85,11 @@ int do_set_thread_area(struct task_struct *p, int idx, if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX) return -EINVAL; +#ifdef CONFIG_PAX_SEGMEXEC + if ((p->mm->pax_flags & MF_PAX_SEGMEXEC) && (info.contents & MODIFY_LDT_CONTENTS_CODE)) + return -EINVAL; +#endif + set_tls_desc(p, idx, &info, 1); return 0; diff --git a/arch/x86/kernel/trampoline_32.S b/arch/x86/kernel/trampoline_32.S index 8508237..229b664 100644 --- a/arch/x86/kernel/trampoline_32.S +++ b/arch/x86/kernel/trampoline_32.S @@ -32,6 +32,12 @@ #include #include +#ifdef CONFIG_PAX_KERNEXEC +#define ta(X) (X) +#else +#define ta(X) ((X) - __PAGE_OFFSET) +#endif + /* We can free up trampoline after bootup if cpu hotplug is not supported. */ __CPUINITRODATA .code16 @@ -60,7 +66,7 @@ r_base = . inc %ax # protected mode (PE) bit lmsw %ax # into protected mode # flush prefetch and jump to startup_32_smp in arch/i386/kernel/head.S - ljmpl $__BOOT_CS, $(startup_32_smp-__PAGE_OFFSET) + ljmpl $__BOOT_CS, $ta(startup_32_smp) # These need to be in the same 64K segment as the above; # hence we don't use the boot_gdt_descr defined in head.S diff --git a/arch/x86/kernel/trampoline_64.S b/arch/x86/kernel/trampoline_64.S index 3af2dff..3b2d3ad 100644 --- a/arch/x86/kernel/trampoline_64.S +++ b/arch/x86/kernel/trampoline_64.S @@ -91,7 +91,7 @@ startup_32: movl $__KERNEL_DS, %eax # Initialize the %ds segment register movl %eax, %ds - movl $X86_CR4_PAE, %eax + movl $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %eax movl %eax, %cr4 # Enable PAE mode # Setup trampoline 4 level pagetables @@ -138,7 +138,7 @@ tidt: # so the kernel can live anywhere .balign 4 tgdt: - .short tgdt_end - tgdt # gdt limit + .short tgdt_end - tgdt - 1 # gdt limit .long tgdt - r_base .short 0 .quad 0x00cf9b000000ffff # __KERNEL32_CS diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c index 4d0f3ed..b680aa1 100644 --- a/arch/x86/kernel/traps.c +++ b/arch/x86/kernel/traps.c @@ -70,12 +70,6 @@ asmlinkage int system_call(void); /* Do we ignore FPU interrupts ? */ char ignore_fpu_irq; - -/* - * The IDT has to be page-aligned to simplify the Pentium - * F0 0F bug workaround. - */ -gate_desc idt_table[NR_VECTORS] __page_aligned_data = { { { { 0, 0 } } }, }; #endif DECLARE_BITMAP(used_vectors, NR_VECTORS); @@ -110,13 +104,13 @@ static inline void preempt_conditional_cli(struct pt_regs *regs) } static void __kprobes -do_trap(int trapnr, int signr, char *str, struct pt_regs *regs, +do_trap(int trapnr, int signr, const char *str, struct pt_regs *regs, long error_code, siginfo_t *info) { struct task_struct *tsk = current; #ifdef CONFIG_X86_32 - if (regs->flags & X86_VM_MASK) { + if (v8086_mode(regs)) { /* * traps 0, 1, 3, 4, and 5 should be forwarded to vm86. * On nmi (interrupt 2), do_trap should not be called. @@ -127,7 +121,7 @@ do_trap(int trapnr, int signr, char *str, struct pt_regs *regs, } #endif - if (!user_mode(regs)) + if (!user_mode_novm(regs)) goto kernel_trap; #ifdef CONFIG_X86_32 @@ -150,7 +144,7 @@ trap_signal: printk_ratelimit()) { printk(KERN_INFO "%s[%d] trap %s ip:%lx sp:%lx error:%lx", - tsk->comm, tsk->pid, str, + tsk->comm, task_pid_nr(tsk), str, regs->ip, regs->sp, error_code); print_vma_addr(" in ", regs->ip); printk("\n"); @@ -167,8 +161,20 @@ kernel_trap: if (!fixup_exception(regs)) { tsk->thread.error_code = error_code; tsk->thread.trap_no = trapnr; + +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC) + if (trapnr == 12 && ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS)) + str = "PAX: suspicious stack segment fault"; +#endif + die(str, regs, error_code); } + +#ifdef CONFIG_PAX_REFCOUNT + if (trapnr == 4) + pax_report_refcount_overflow(regs); +#endif + return; #ifdef CONFIG_X86_32 @@ -257,14 +263,30 @@ do_general_protection(struct pt_regs *regs, long error_code) conditional_sti(regs); #ifdef CONFIG_X86_32 - if (regs->flags & X86_VM_MASK) + if (v8086_mode(regs)) goto gp_in_vm86; #endif tsk = current; - if (!user_mode(regs)) + if (!user_mode_novm(regs)) goto gp_in_kernel; +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC) + if (!(__supported_pte_mask & _PAGE_NX) && tsk->mm && (tsk->mm->pax_flags & MF_PAX_PAGEEXEC)) { + struct mm_struct *mm = tsk->mm; + unsigned long limit; + + down_write(&mm->mmap_sem); + limit = mm->context.user_cs_limit; + if (limit < TASK_SIZE) { + track_exec_limit(mm, limit, TASK_SIZE, VM_EXEC); + up_write(&mm->mmap_sem); + return; + } + up_write(&mm->mmap_sem); + } +#endif + tsk->thread.error_code = error_code; tsk->thread.trap_no = 13; @@ -297,6 +319,13 @@ gp_in_kernel: if (notify_die(DIE_GPF, "general protection fault", regs, error_code, 13, SIGSEGV) == NOTIFY_STOP) return; + +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC) + if ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS) + die("PAX: suspicious general protection fault", regs, error_code); + else +#endif + die("general protection fault", regs, error_code); } @@ -565,7 +594,7 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code) /* It's safe to allow irq's after DR6 has been saved */ preempt_conditional_sti(regs); - if (regs->flags & X86_VM_MASK) { + if (v8086_mode(regs)) { handle_vm86_trap((struct kernel_vm86_regs *) regs, error_code, 1); preempt_conditional_cli(regs); @@ -579,7 +608,7 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code) * We already checked v86 mode above, so we can check for kernel mode * by just checking the CPL of CS. */ - if ((dr6 & DR_STEP) && !user_mode(regs)) { + if ((dr6 & DR_STEP) && !user_mode_novm(regs)) { tsk->thread.debugreg6 &= ~DR_STEP; set_tsk_thread_flag(tsk, TIF_SINGLESTEP); regs->flags &= ~X86_EFLAGS_TF; @@ -608,7 +637,7 @@ void math_error(struct pt_regs *regs, int error_code, int trapnr) return; conditional_sti(regs); - if (!user_mode_vm(regs)) + if (!user_mode(regs)) { if (!fixup_exception(regs)) { task->thread.error_code = error_code; diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c index 4094ae0..8ba3527 100644 --- a/arch/x86/kernel/tsc.c +++ b/arch/x86/kernel/tsc.c @@ -833,7 +833,7 @@ static struct dmi_system_id __initdata bad_tsc_dmi_table[] = { DMI_MATCH(DMI_BOARD_NAME, "2635FA0"), }, }, - {} + { NULL, NULL, {{0, {0}}}, NULL} }; static void __init check_system_tsc_reliable(void) diff --git a/arch/x86/kernel/vm86_32.c b/arch/x86/kernel/vm86_32.c index 61fb985..5401452 100644 --- a/arch/x86/kernel/vm86_32.c +++ b/arch/x86/kernel/vm86_32.c @@ -41,6 +41,7 @@ #include #include #include +#include #include #include @@ -148,7 +149,7 @@ struct pt_regs *save_v86_state(struct kernel_vm86_regs *regs) do_exit(SIGSEGV); } - tss = &per_cpu(init_tss, get_cpu()); + tss = init_tss + get_cpu(); current->thread.sp0 = current->thread.saved_sp0; current->thread.sysenter_cs = __KERNEL_CS; load_sp0(tss, ¤t->thread); @@ -207,6 +208,13 @@ int sys_vm86old(struct vm86_struct __user *v86, struct pt_regs *regs) struct task_struct *tsk; int tmp, ret = -EPERM; +#ifdef CONFIG_GRKERNSEC_VM86 + if (!capable(CAP_SYS_RAWIO)) { + gr_handle_vm86(); + goto out; + } +#endif + tsk = current; if (tsk->thread.saved_sp0) goto out; @@ -237,6 +245,14 @@ int sys_vm86(unsigned long cmd, unsigned long arg, struct pt_regs *regs) int tmp, ret; struct vm86plus_struct __user *v86; +#ifdef CONFIG_GRKERNSEC_VM86 + if (!capable(CAP_SYS_RAWIO)) { + gr_handle_vm86(); + ret = -EPERM; + goto out; + } +#endif + tsk = current; switch (cmd) { case VM86_REQUEST_IRQ: @@ -323,7 +339,7 @@ static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk tsk->thread.saved_fs = info->regs32->fs; tsk->thread.saved_gs = get_user_gs(info->regs32); - tss = &per_cpu(init_tss, get_cpu()); + tss = init_tss + get_cpu(); tsk->thread.sp0 = (unsigned long) &info->VM86_TSS_ESP0; if (cpu_has_sep) tsk->thread.sysenter_cs = 0; @@ -528,7 +544,7 @@ static void do_int(struct kernel_vm86_regs *regs, int i, goto cannot_handle; if (i == 0x21 && is_revectored(AH(regs), &KVM86->int21_revectored)) goto cannot_handle; - intr_ptr = (unsigned long __user *) (i << 2); + intr_ptr = (__force unsigned long __user *) (i << 2); if (get_user(segoffs, intr_ptr)) goto cannot_handle; if ((segoffs >> 16) == BIOSSEG) diff --git a/arch/x86/kernel/vmi_32.c b/arch/x86/kernel/vmi_32.c index ce9fbac..26bc0c8 100644 --- a/arch/x86/kernel/vmi_32.c +++ b/arch/x86/kernel/vmi_32.c @@ -46,12 +46,17 @@ typedef u32 __attribute__((regparm(1))) (VROMFUNC)(void); typedef u64 __attribute__((regparm(2))) (VROMLONGFUNC)(int); #define call_vrom_func(rom,func) \ - (((VROMFUNC *)(rom->func))()) + (((VROMFUNC *)(ktva_ktla(rom.func)))()) #define call_vrom_long_func(rom,func,arg) \ - (((VROMLONGFUNC *)(rom->func)) (arg)) - -static struct vrom_header *vmi_rom; +({\ + u64 __reloc = ((VROMLONGFUNC *)(ktva_ktla(rom.func))) (arg);\ + struct vmi_relocation_info *const __rel = (struct vmi_relocation_info *)&__reloc;\ + __rel->eip = (unsigned char *)ktva_ktla((unsigned long)__rel->eip);\ + __reloc;\ +}) + +static struct vrom_header vmi_rom __attribute((__section__(".vmi.rom"), __aligned__(PAGE_SIZE))); static int disable_pge; static int disable_pse; static int disable_sep; @@ -78,10 +83,10 @@ static struct { void (*set_initial_ap_state)(int, int); void (*halt)(void); void (*set_lazy_mode)(int mode); -} vmi_ops; +} vmi_ops __read_only; /* Cached VMI operations */ -struct vmi_timer_ops vmi_timer_ops; +struct vmi_timer_ops vmi_timer_ops __read_only; /* * VMI patching routines. @@ -96,7 +101,7 @@ struct vmi_timer_ops vmi_timer_ops; static inline void patch_offset(void *insnbuf, unsigned long ip, unsigned long dest) { - *(unsigned long *)(insnbuf+1) = dest-ip-5; + *(unsigned long *)(insnbuf+1) = dest-ip-5; } static unsigned patch_internal(int call, unsigned len, void *insnbuf, @@ -104,6 +109,7 @@ static unsigned patch_internal(int call, unsigned len, void *insnbuf, { u64 reloc; struct vmi_relocation_info *const rel = (struct vmi_relocation_info *)&reloc; + reloc = call_vrom_long_func(vmi_rom, get_reloc, call); switch(rel->type) { case VMI_RELOCATION_CALL_REL: @@ -382,13 +388,13 @@ static void vmi_set_pud(pud_t *pudp, pud_t pudval) static void vmi_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) { - const pte_t pte = { .pte = 0 }; + const pte_t pte = __pte(0ULL); vmi_ops.set_pte(pte, ptep, vmi_flags_addr(mm, addr, VMI_PAGE_PT, 0)); } static void vmi_pmd_clear(pmd_t *pmd) { - const pte_t pte = { .pte = 0 }; + const pte_t pte = __pte(0ULL); vmi_ops.set_pte(pte, (pte_t *)pmd, VMI_PAGE_PD); } #endif @@ -416,8 +422,8 @@ vmi_startup_ipi_hook(int phys_apicid, unsigned long start_eip, ap.ss = __KERNEL_DS; ap.esp = (unsigned long) start_esp; - ap.ds = __USER_DS; - ap.es = __USER_DS; + ap.ds = __KERNEL_DS; + ap.es = __KERNEL_DS; ap.fs = __KERNEL_PERCPU; ap.gs = __KERNEL_STACK_CANARY; @@ -464,6 +470,18 @@ static void vmi_leave_lazy_mmu(void) paravirt_leave_lazy_mmu(); } +#ifdef CONFIG_PAX_KERNEXEC +static unsigned long vmi_pax_open_kernel(void) +{ + return 0; +} + +static unsigned long vmi_pax_close_kernel(void) +{ + return 0; +} +#endif + static inline int __init check_vmi_rom(struct vrom_header *rom) { struct pci_header *pci; @@ -476,6 +494,10 @@ static inline int __init check_vmi_rom(struct vrom_header *rom) return 0; if (rom->vrom_signature != VMI_SIGNATURE) return 0; + if (rom->rom_length * 512 > sizeof(*rom)) { + printk(KERN_WARNING "PAX: VMI: ROM size too big: %x\n", rom->rom_length * 512); + return 0; + } if (rom->api_version_maj != VMI_API_REV_MAJOR || rom->api_version_min+1 < VMI_API_REV_MINOR+1) { printk(KERN_WARNING "VMI: Found mismatched rom version %d.%d\n", @@ -540,7 +562,7 @@ static inline int __init probe_vmi_rom(void) struct vrom_header *romstart; romstart = (struct vrom_header *)isa_bus_to_virt(base); if (check_vmi_rom(romstart)) { - vmi_rom = romstart; + vmi_rom = *romstart; return 1; } } @@ -816,6 +838,11 @@ static inline int __init activate_vmi(void) para_fill(pv_irq_ops.safe_halt, Halt); +#ifdef CONFIG_PAX_KERNEXEC + pv_mmu_ops.pax_open_kernel = vmi_pax_open_kernel; + pv_mmu_ops.pax_close_kernel = vmi_pax_close_kernel; +#endif + /* * Alternative instruction rewriting doesn't happen soon enough * to convert VMI_IRET to a call instead of a jump; so we have @@ -833,16 +860,16 @@ static inline int __init activate_vmi(void) void __init vmi_init(void) { - if (!vmi_rom) + if (!vmi_rom.rom_signature) probe_vmi_rom(); else - check_vmi_rom(vmi_rom); + check_vmi_rom(&vmi_rom); /* In case probing for or validating the ROM failed, basil */ - if (!vmi_rom) + if (!vmi_rom.rom_signature) return; - reserve_top_address(-vmi_rom->virtual_top); + reserve_top_address(-vmi_rom.virtual_top); #ifdef CONFIG_X86_IO_APIC /* This is virtual hardware; timer routing is wired correctly */ @@ -854,7 +881,7 @@ void __init vmi_activate(void) { unsigned long flags; - if (!vmi_rom) + if (!vmi_rom.rom_signature) return; local_irq_save(flags); diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S index d0bb522..fdc8dce 100644 --- a/arch/x86/kernel/vmlinux.lds.S +++ b/arch/x86/kernel/vmlinux.lds.S @@ -26,6 +26,13 @@ #include #include #include +#include + +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC) +#define __KERNEL_TEXT_OFFSET (LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR) +#else +#define __KERNEL_TEXT_OFFSET 0 +#endif #undef i386 /* in case the preprocessor is a 32bit one */ @@ -34,13 +41,13 @@ OUTPUT_FORMAT(CONFIG_OUTPUT_FORMAT, CONFIG_OUTPUT_FORMAT, CONFIG_OUTPUT_FORMAT) #ifdef CONFIG_X86_32 OUTPUT_ARCH(i386) ENTRY(phys_startup_32) -jiffies = jiffies_64; #else OUTPUT_ARCH(i386:x86-64) ENTRY(phys_startup_64) -jiffies_64 = jiffies; #endif +jiffies = jiffies_64; + #if defined(CONFIG_X86_64) && defined(CONFIG_DEBUG_RODATA) /* * On 64-bit, align RODATA to 2MB so that even with CONFIG_DEBUG_RODATA @@ -69,31 +76,46 @@ jiffies_64 = jiffies; PHDRS { text PT_LOAD FLAGS(5); /* R_E */ - data PT_LOAD FLAGS(7); /* RWE */ +#ifdef CONFIG_X86_32 + module PT_LOAD FLAGS(5); /* R_E */ +#endif +#ifdef CONFIG_XEN + rodata PT_LOAD FLAGS(5); /* R_E */ +#else + rodata PT_LOAD FLAGS(4); /* R__ */ +#endif + data PT_LOAD FLAGS(6); /* RW_ */ #ifdef CONFIG_X86_64 user PT_LOAD FLAGS(5); /* R_E */ +#endif + init.begin PT_LOAD FLAGS(6); /* RW_ */ #ifdef CONFIG_SMP percpu PT_LOAD FLAGS(6); /* RW_ */ #endif + text.init PT_LOAD FLAGS(5); /* R_E */ + text.exit PT_LOAD FLAGS(5); /* R_E */ init PT_LOAD FLAGS(7); /* RWE */ -#endif note PT_NOTE FLAGS(0); /* ___ */ } SECTIONS { #ifdef CONFIG_X86_32 - . = LOAD_OFFSET + LOAD_PHYSICAL_ADDR; - phys_startup_32 = startup_32 - LOAD_OFFSET; + . = LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR; #else - . = __START_KERNEL; - phys_startup_64 = startup_64 - LOAD_OFFSET; + . = __START_KERNEL; #endif /* Text and read-only data */ - .text : AT(ADDR(.text) - LOAD_OFFSET) { - _text = .; + .text (. - __KERNEL_TEXT_OFFSET): AT(ADDR(.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) { /* bootstrapping code */ +#ifdef CONFIG_X86_32 + phys_startup_32 = startup_32 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET; +#else + phys_startup_64 = startup_64 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET; +#endif + __LOAD_PHYSICAL_ADDR = . - LOAD_OFFSET + __KERNEL_TEXT_OFFSET; + _text = .; HEAD_TEXT #ifdef CONFIG_X86_32 . = ALIGN(PAGE_SIZE); @@ -108,13 +130,52 @@ SECTIONS IRQENTRY_TEXT *(.fixup) *(.gnu.warning) - /* End of text section */ - _etext = .; } :text = 0x9090 - NOTES :text :note + . += __KERNEL_TEXT_OFFSET; + +#ifdef CONFIG_X86_32 + . = ALIGN(PAGE_SIZE); + .vmi.rom : AT(ADDR(.vmi.rom) - LOAD_OFFSET) { + *(.vmi.rom) + } :module + + . = ALIGN(PAGE_SIZE); + .module.text : AT(ADDR(.module.text) - LOAD_OFFSET) { + +#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_MODULES) + MODULES_EXEC_VADDR = .; + BYTE(0) + . += (CONFIG_PAX_KERNEXEC_MODULE_TEXT * 1024 * 1024); + . = ALIGN(HPAGE_SIZE); + MODULES_EXEC_END = . - 1; +#endif + + } :module +#endif + + .text.end : AT(ADDR(.text.end) - LOAD_OFFSET) { + /* End of text section */ + _etext = . - __KERNEL_TEXT_OFFSET; + } + +#ifdef CONFIG_X86_32 + . = ALIGN(PAGE_SIZE); + .rodata.page_aligned : AT(ADDR(.rodata.page_aligned) - LOAD_OFFSET) { + *(.idt) + . = ALIGN(PAGE_SIZE); + *(.empty_zero_page) + *(.swapper_pg_fixmap) + *(.swapper_pg_pmd) + *(.swapper_pg_dir) + *(.trampoline_pg_dir) + } :rodata +#endif + + . = ALIGN(PAGE_SIZE); + NOTES :rodata :note - EXCEPTION_TABLE(16) :text = 0x9090 + EXCEPTION_TABLE(16) :rodata X64_ALIGN_DEBUG_RODATA_BEGIN RO_DATA(PAGE_SIZE) @@ -122,16 +183,20 @@ SECTIONS /* Data */ .data : AT(ADDR(.data) - LOAD_OFFSET) { + +#ifdef CONFIG_PAX_KERNEXEC + . = ALIGN(HPAGE_SIZE); +#else + . = ALIGN(PAGE_SIZE); +#endif + /* Start of data section */ _sdata = .; /* init_task */ INIT_TASK_DATA(THREAD_SIZE) -#ifdef CONFIG_X86_32 - /* 32 bit has nosave before _edata */ NOSAVE_DATA -#endif PAGE_ALIGNED_DATA(PAGE_SIZE) @@ -194,12 +259,6 @@ SECTIONS } vgetcpu_mode = VVIRT(.vgetcpu_mode); - . = ALIGN(L1_CACHE_BYTES); - .jiffies : AT(VLOAD(.jiffies)) { - *(.jiffies) - } - jiffies = VVIRT(.jiffies); - .vsyscall_3 ADDR(.vsyscall_0) + 3072: AT(VLOAD(.vsyscall_3)) { *(.vsyscall_3) } @@ -215,12 +274,19 @@ SECTIONS #endif /* CONFIG_X86_64 */ /* Init code and data - will be freed after init */ - . = ALIGN(PAGE_SIZE); .init.begin : AT(ADDR(.init.begin) - LOAD_OFFSET) { + BYTE(0) + +#ifdef CONFIG_PAX_KERNEXEC + . = ALIGN(HPAGE_SIZE); +#else + . = ALIGN(PAGE_SIZE); +#endif + __init_begin = .; /* paired with __init_end */ - } + } :init.begin -#if defined(CONFIG_X86_64) && defined(CONFIG_SMP) +#ifdef CONFIG_SMP /* * percpu offsets are zero-based on SMP. PERCPU_VADDR() changes the * output PHDR, so the next output section - .init.text - should @@ -229,12 +295,27 @@ SECTIONS PERCPU_VADDR(0, :percpu) #endif - INIT_TEXT_SECTION(PAGE_SIZE) -#ifdef CONFIG_X86_64 - :init -#endif + . = ALIGN(PAGE_SIZE); + init_begin = .; + .init.text (. - __KERNEL_TEXT_OFFSET): AT(init_begin - LOAD_OFFSET) { + VMLINUX_SYMBOL(_sinittext) = .; + INIT_TEXT + VMLINUX_SYMBOL(_einittext) = .; + . = ALIGN(PAGE_SIZE); + } :text.init - INIT_DATA_SECTION(16) + /* + * .exit.text is discard at runtime, not link time, to deal with + * references from .altinstructions and .eh_frame + */ + .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) { + EXIT_TEXT + . = ALIGN(16); + } :text.exit + . = init_begin + SIZEOF(.init.text) + SIZEOF(.exit.text); + + . = ALIGN(PAGE_SIZE); + INIT_DATA_SECTION(16) :init .x86_cpu_dev.init : AT(ADDR(.x86_cpu_dev.init) - LOAD_OFFSET) { __x86_cpu_dev_start = .; @@ -260,19 +341,11 @@ SECTIONS *(.altinstr_replacement) } - /* - * .exit.text is discard at runtime, not link time, to deal with - * references from .altinstructions and .eh_frame - */ - .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) { - EXIT_TEXT - } - .exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) { EXIT_DATA } -#if !defined(CONFIG_X86_64) || !defined(CONFIG_SMP) +#ifndef CONFIG_SMP PERCPU(PAGE_SIZE) #endif @@ -291,16 +364,10 @@ SECTIONS .smp_locks : AT(ADDR(.smp_locks) - LOAD_OFFSET) { __smp_locks = .; *(.smp_locks) - . = ALIGN(PAGE_SIZE); __smp_locks_end = .; + . = ALIGN(PAGE_SIZE); } -#ifdef CONFIG_X86_64 - .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) { - NOSAVE_DATA - } -#endif - /* BSS */ . = ALIGN(PAGE_SIZE); .bss : AT(ADDR(.bss) - LOAD_OFFSET) { @@ -316,6 +383,7 @@ SECTIONS __brk_base = .; . += 64 * 1024; /* 64k alignment slop space */ *(.brk_reservation) /* areas brk users have reserved */ + . = ALIGN(HPAGE_SIZE); __brk_limit = .; } @@ -342,13 +410,12 @@ SECTIONS * for the boot processor. */ #define INIT_PER_CPU(x) init_per_cpu__##x = x + __per_cpu_load -INIT_PER_CPU(gdt_page); INIT_PER_CPU(irq_stack_union); /* * Build-time check on the image size: */ -. = ASSERT((_end - _text <= KERNEL_IMAGE_SIZE), +. = ASSERT((_end - _text - __KERNEL_TEXT_OFFSET <= KERNEL_IMAGE_SIZE), "kernel image bigger than KERNEL_IMAGE_SIZE"); #ifdef CONFIG_SMP diff --git a/arch/x86/kernel/vsyscall_64.c b/arch/x86/kernel/vsyscall_64.c index 1c0c6ab..937756d 100644 --- a/arch/x86/kernel/vsyscall_64.c +++ b/arch/x86/kernel/vsyscall_64.c @@ -80,6 +80,7 @@ void update_vsyscall(struct timespec *wall_time, struct clocksource *clock, write_seqlock_irqsave(&vsyscall_gtod_data.lock, flags); /* copy vsyscall data */ + strlcpy(vsyscall_gtod_data.clock.name, clock->name, sizeof vsyscall_gtod_data.clock.name); vsyscall_gtod_data.clock.vread = clock->vread; vsyscall_gtod_data.clock.cycle_last = clock->cycle_last; vsyscall_gtod_data.clock.mask = clock->mask; @@ -203,7 +204,7 @@ vgetcpu(unsigned *cpu, unsigned *node, struct getcpu_cache *tcache) We do this here because otherwise user space would do it on its own in a likely inferior way (no access to jiffies). If you don't like it pass NULL. */ - if (tcache && tcache->blob[0] == (j = __jiffies)) { + if (tcache && tcache->blob[0] == (j = jiffies)) { p = tcache->blob[1]; } else if (__vgetcpu_mode == VGETCPU_RDTSCP) { /* Load per CPU data from RDTSCP */ diff --git a/arch/x86/kernel/x8664_ksyms_64.c b/arch/x86/kernel/x8664_ksyms_64.c index 1b950d1..ca83b2a 100644 --- a/arch/x86/kernel/x8664_ksyms_64.c +++ b/arch/x86/kernel/x8664_ksyms_64.c @@ -29,8 +29,6 @@ EXPORT_SYMBOL(__put_user_8); EXPORT_SYMBOL(copy_user_generic_string); EXPORT_SYMBOL(copy_user_generic_unrolled); EXPORT_SYMBOL(__copy_user_nocache); -EXPORT_SYMBOL(_copy_from_user); -EXPORT_SYMBOL(_copy_to_user); EXPORT_SYMBOL(copy_page); EXPORT_SYMBOL(clear_page); diff --git a/arch/x86/kernel/xsave.c b/arch/x86/kernel/xsave.c index 37e68fc..121259e 100644 --- a/arch/x86/kernel/xsave.c +++ b/arch/x86/kernel/xsave.c @@ -54,7 +54,7 @@ int check_for_xstate(struct i387_fxsave_struct __user *buf, fx_sw_user->xstate_size > fx_sw_user->extended_size) return -1; - err = __get_user(magic2, (__u32 *) (((void *)fpstate) + + err = __get_user(magic2, (__u32 __user *) (((void __user *)fpstate) + fx_sw_user->extended_size - FP_XSTATE_MAGIC2_SIZE)); /* @@ -196,7 +196,7 @@ fx_only: * the other extended state. */ xrstor_state(init_xstate_buf, pcntxt_mask & ~XSTATE_FPSSE); - return fxrstor_checking((__force struct i387_fxsave_struct *)buf); + return fxrstor_checking((struct i387_fxsave_struct __user *)buf); } /* @@ -228,7 +228,7 @@ int restore_i387_xstate(void __user *buf) if (use_xsave()) err = restore_user_xstate(buf); else - err = fxrstor_checking((__force struct i387_fxsave_struct *) + err = fxrstor_checking((struct i387_fxsave_struct __user *) buf); if (unlikely(err)) { /* diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c index 582c8fc..24806a4 100644 --- a/arch/x86/kvm/emulate.c +++ b/arch/x86/kvm/emulate.c @@ -88,11 +88,11 @@ #define Src2CL (1<<29) #define Src2ImmByte (2<<29) #define Src2One (3<<29) -#define Src2Imm16 (4<<29) -#define Src2Mem16 (5<<29) /* Used for Ep encoding. First argument has to be +#define Src2Imm16 (4U<<29) +#define Src2Mem16 (5U<<29) /* Used for Ep encoding. First argument has to be in memory and second argument is located immediately after the first one in memory. */ -#define Src2Mask (7<<29) +#define Src2Mask (7U<<29) enum { Group1_80, Group1_81, Group1_82, Group1_83, @@ -446,6 +446,7 @@ static u32 group2_table[] = { #define ____emulate_2op(_op, _src, _dst, _eflags, _x, _y, _suffix) \ do { \ + unsigned long _tmp; \ __asm__ __volatile__ ( \ _PRE_EFLAGS("0", "4", "2") \ _op _suffix " %"_x"3,%1; " \ @@ -459,8 +460,6 @@ static u32 group2_table[] = { /* Raw emulation: instruction has two explicit operands. */ #define __emulate_2op_nobyte(_op,_src,_dst,_eflags,_wx,_wy,_lx,_ly,_qx,_qy) \ do { \ - unsigned long _tmp; \ - \ switch ((_dst).bytes) { \ case 2: \ ____emulate_2op(_op,_src,_dst,_eflags,_wx,_wy,"w"); \ @@ -476,7 +475,6 @@ static u32 group2_table[] = { #define __emulate_2op(_op,_src,_dst,_eflags,_bx,_by,_wx,_wy,_lx,_ly,_qx,_qy) \ do { \ - unsigned long _tmp; \ switch ((_dst).bytes) { \ case 1: \ ____emulate_2op(_op,_src,_dst,_eflags,_bx,_by,"b"); \ diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c index 1eb7a4a..66513e00 100644 --- a/arch/x86/kvm/lapic.c +++ b/arch/x86/kvm/lapic.c @@ -52,7 +52,7 @@ #define APIC_BUS_CYCLE_NS 1 /* #define apic_debug(fmt,arg...) printk(KERN_WARNING fmt,##arg) */ -#define apic_debug(fmt, arg...) +#define apic_debug(fmt, arg...) do {} while (0) #define APIC_LVT_NUM 6 /* 14 is the version for Xeon and Pentium 8.4.8*/ diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index 9ca7032..36aa477 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c @@ -2825,7 +2825,11 @@ static void reload_tss(struct kvm_vcpu *vcpu) int cpu = raw_smp_processor_id(); struct svm_cpu_data *sd = per_cpu(svm_data, cpu); + + pax_open_kernel(); sd->tss_desc->type = 9; /* available 32/64-bit TSS */ + pax_close_kernel(); + load_TR_desc(); } @@ -3370,7 +3374,7 @@ static void svm_fpu_deactivate(struct kvm_vcpu *vcpu) update_cr0_intercept(svm); } -static struct kvm_x86_ops svm_x86_ops = { +static const struct kvm_x86_ops svm_x86_ops = { .cpu_has_kvm_support = has_svm, .disabled_by_bios = is_disabled, .hardware_setup = svm_hardware_setup, diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 39ac456..37d565b 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -654,7 +654,11 @@ static void reload_tss(void) native_store_gdt(&gdt); descs = (void *)gdt.address; + + pax_open_kernel(); descs[GDT_ENTRY_TSS].type = 9; /* available TSS */ + pax_close_kernel(); + load_TR_desc(); } @@ -1554,8 +1558,11 @@ static __init int hardware_setup(void) if (!cpu_has_vmx_flexpriority()) flexpriority_enabled = 0; - if (!cpu_has_vmx_tpr_shadow()) - kvm_x86_ops->update_cr8_intercept = NULL; + if (!cpu_has_vmx_tpr_shadow()) { + pax_open_kernel(); + *(void **)&kvm_x86_ops->update_cr8_intercept = NULL; + pax_close_kernel(); + } if (enable_ept && !cpu_has_vmx_ept_2m_page()) kvm_disable_largepages(); @@ -2537,7 +2544,7 @@ static int vmx_vcpu_setup(struct vcpu_vmx *vmx) vmcs_writel(HOST_IDTR_BASE, dt.address); /* 22.2.4 */ asm("mov $.Lkvm_vmx_return, %0" : "=r"(kvm_vmx_return)); - vmcs_writel(HOST_RIP, kvm_vmx_return); /* 22.2.5 */ + vmcs_writel(HOST_RIP, ktla_ktva(kvm_vmx_return)); /* 22.2.5 */ vmcs_write32(VM_EXIT_MSR_STORE_COUNT, 0); vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, 0); vmcs_write64(VM_EXIT_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.host)); @@ -3913,6 +3920,12 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu) "jmp .Lkvm_vmx_return \n\t" ".Llaunched: " __ex(ASM_VMX_VMRESUME) "\n\t" ".Lkvm_vmx_return: " + +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC) + "ljmp %[cs],$.Lkvm_vmx_return2\n\t" + ".Lkvm_vmx_return2: " +#endif + /* Save guest registers, load host registers, keep flags */ "xchg %0, (%%"R"sp) \n\t" "mov %%"R"ax, %c[rax](%0) \n\t" @@ -3959,8 +3972,13 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu) [r15]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R15])), #endif [cr2]"i"(offsetof(struct vcpu_vmx, vcpu.arch.cr2)) + +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC) + ,[cs]"i"(__KERNEL_CS) +#endif + : "cc", "memory" - , R"bx", R"di", R"si" + , R"ax", R"bx", R"di", R"si" #ifdef CONFIG_X86_64 , "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15" #endif @@ -3974,7 +3992,7 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu) if (vmx->rmode.irq.pending) fixup_rmode_irq(vmx); - asm("mov %0, %%ds; mov %0, %%es" : : "r"(__USER_DS)); + asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r"(__KERNEL_DS)); vmx->launched = 1; vmx_complete_interrupts(vmx); @@ -4195,7 +4213,7 @@ static void vmx_set_supported_cpuid(u32 func, struct kvm_cpuid_entry2 *entry) { } -static struct kvm_x86_ops vmx_x86_ops = { +static const struct kvm_x86_ops vmx_x86_ops = { .cpu_has_kvm_support = cpu_has_kvm_support, .disabled_by_bios = vmx_disabled_by_bios, .hardware_setup = hardware_setup, diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index eee5cdd..415d3fd 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -86,7 +86,7 @@ static void update_cr8_intercept(struct kvm_vcpu *vcpu); static int kvm_dev_ioctl_get_supported_cpuid(struct kvm_cpuid2 *cpuid, struct kvm_cpuid_entry2 __user *entries); -struct kvm_x86_ops *kvm_x86_ops; +const struct kvm_x86_ops *kvm_x86_ops; EXPORT_SYMBOL_GPL(kvm_x86_ops); int ignore_msrs = 0; @@ -112,38 +112,38 @@ static struct kvm_shared_msrs_global __read_mostly shared_msrs_global; static DEFINE_PER_CPU(struct kvm_shared_msrs, shared_msrs); struct kvm_stats_debugfs_item debugfs_entries[] = { - { "pf_fixed", VCPU_STAT(pf_fixed) }, - { "pf_guest", VCPU_STAT(pf_guest) }, - { "tlb_flush", VCPU_STAT(tlb_flush) }, - { "invlpg", VCPU_STAT(invlpg) }, - { "exits", VCPU_STAT(exits) }, - { "io_exits", VCPU_STAT(io_exits) }, - { "mmio_exits", VCPU_STAT(mmio_exits) }, - { "signal_exits", VCPU_STAT(signal_exits) }, - { "irq_window", VCPU_STAT(irq_window_exits) }, - { "nmi_window", VCPU_STAT(nmi_window_exits) }, - { "halt_exits", VCPU_STAT(halt_exits) }, - { "halt_wakeup", VCPU_STAT(halt_wakeup) }, - { "hypercalls", VCPU_STAT(hypercalls) }, - { "request_irq", VCPU_STAT(request_irq_exits) }, - { "irq_exits", VCPU_STAT(irq_exits) }, - { "host_state_reload", VCPU_STAT(host_state_reload) }, - { "efer_reload", VCPU_STAT(efer_reload) }, - { "fpu_reload", VCPU_STAT(fpu_reload) }, - { "insn_emulation", VCPU_STAT(insn_emulation) }, - { "insn_emulation_fail", VCPU_STAT(insn_emulation_fail) }, - { "irq_injections", VCPU_STAT(irq_injections) }, - { "nmi_injections", VCPU_STAT(nmi_injections) }, - { "mmu_shadow_zapped", VM_STAT(mmu_shadow_zapped) }, - { "mmu_pte_write", VM_STAT(mmu_pte_write) }, - { "mmu_pte_updated", VM_STAT(mmu_pte_updated) }, - { "mmu_pde_zapped", VM_STAT(mmu_pde_zapped) }, - { "mmu_flooded", VM_STAT(mmu_flooded) }, - { "mmu_recycled", VM_STAT(mmu_recycled) }, - { "mmu_cache_miss", VM_STAT(mmu_cache_miss) }, - { "mmu_unsync", VM_STAT(mmu_unsync) }, - { "remote_tlb_flush", VM_STAT(remote_tlb_flush) }, - { "largepages", VM_STAT(lpages) }, + { "pf_fixed", VCPU_STAT(pf_fixed), NULL }, + { "pf_guest", VCPU_STAT(pf_guest), NULL }, + { "tlb_flush", VCPU_STAT(tlb_flush), NULL }, + { "invlpg", VCPU_STAT(invlpg), NULL }, + { "exits", VCPU_STAT(exits), NULL }, + { "io_exits", VCPU_STAT(io_exits), NULL }, + { "mmio_exits", VCPU_STAT(mmio_exits), NULL }, + { "signal_exits", VCPU_STAT(signal_exits), NULL }, + { "irq_window", VCPU_STAT(irq_window_exits), NULL }, + { "nmi_window", VCPU_STAT(nmi_window_exits), NULL }, + { "halt_exits", VCPU_STAT(halt_exits), NULL }, + { "halt_wakeup", VCPU_STAT(halt_wakeup), NULL }, + { "hypercalls", VCPU_STAT(hypercalls), NULL }, + { "request_irq", VCPU_STAT(request_irq_exits), NULL }, + { "irq_exits", VCPU_STAT(irq_exits), NULL }, + { "host_state_reload", VCPU_STAT(host_state_reload), NULL }, + { "efer_reload", VCPU_STAT(efer_reload), NULL }, + { "fpu_reload", VCPU_STAT(fpu_reload), NULL }, + { "insn_emulation", VCPU_STAT(insn_emulation), NULL }, + { "insn_emulation_fail", VCPU_STAT(insn_emulation_fail), NULL }, + { "irq_injections", VCPU_STAT(irq_injections), NULL }, + { "nmi_injections", VCPU_STAT(nmi_injections), NULL }, + { "mmu_shadow_zapped", VM_STAT(mmu_shadow_zapped), NULL }, + { "mmu_pte_write", VM_STAT(mmu_pte_write), NULL }, + { "mmu_pte_updated", VM_STAT(mmu_pte_updated), NULL }, + { "mmu_pde_zapped", VM_STAT(mmu_pde_zapped), NULL }, + { "mmu_flooded", VM_STAT(mmu_flooded), NULL }, + { "mmu_recycled", VM_STAT(mmu_recycled), NULL }, + { "mmu_cache_miss", VM_STAT(mmu_cache_miss), NULL }, + { "mmu_unsync", VM_STAT(mmu_unsync), NULL }, + { "remote_tlb_flush", VM_STAT(remote_tlb_flush), NULL }, + { "largepages", VM_STAT(lpages), NULL }, { NULL } }; @@ -1672,6 +1672,8 @@ long kvm_arch_dev_ioctl(struct file *filp, if (n < msr_list.nmsrs) goto out; r = -EFAULT; + if (num_msrs_to_save > ARRAY_SIZE(msrs_to_save)) + goto out; if (copy_to_user(user_msr_list->indices, &msrs_to_save, num_msrs_to_save * sizeof(u32))) goto out; @@ -2103,7 +2105,7 @@ static int kvm_vcpu_ioctl_set_lapic(struct kvm_vcpu *vcpu, static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq) { - if (irq->irq < 0 || irq->irq >= 256) + if (irq->irq >= 256) return -EINVAL; if (irqchip_in_kernel(vcpu->kvm)) return -ENXIO; @@ -4076,10 +4078,10 @@ void kvm_after_handle_nmi(struct kvm_vcpu *vcpu) } EXPORT_SYMBOL_GPL(kvm_after_handle_nmi); -int kvm_arch_init(void *opaque) +int kvm_arch_init(const void *opaque) { int r; - struct kvm_x86_ops *ops = (struct kvm_x86_ops *)opaque; + const struct kvm_x86_ops *ops = (const struct kvm_x86_ops *)opaque; if (kvm_x86_ops) { printk(KERN_ERR "kvm: already loaded the other module\n"); diff --git a/arch/x86/lib/atomic64_cx8_32.S b/arch/x86/lib/atomic64_cx8_32.S index 71e080d..9a90110 100644 --- a/arch/x86/lib/atomic64_cx8_32.S +++ b/arch/x86/lib/atomic64_cx8_32.S @@ -86,13 +86,23 @@ ENTRY(atomic64_\func\()_return_cx8) movl %edx, %ecx \ins\()l %esi, %ebx \insc\()l %edi, %ecx + +#ifdef CONFIG_PAX_REFCOUNT + into +2: + _ASM_EXTABLE(2b, 3f) +#endif + LOCK_PREFIX cmpxchg8b (%ebp) jne 1b - -10: movl %ebx, %eax movl %ecx, %edx + +#ifdef CONFIG_PAX_REFCOUNT +3: +#endif + RESTORE edi RESTORE esi RESTORE ebx @@ -116,13 +126,24 @@ ENTRY(atomic64_\func\()_return_cx8) movl %edx, %ecx \ins\()l $1, %ebx \insc\()l $0, %ecx + +#ifdef CONFIG_PAX_REFCOUNT + into +2: + _ASM_EXTABLE(2b, 3f) +#endif + LOCK_PREFIX cmpxchg8b (%esi) jne 1b -10: movl %ebx, %eax movl %ecx, %edx + +#ifdef CONFIG_PAX_REFCOUNT +3: +#endif + RESTORE ebx ret CFI_ENDPROC @@ -176,6 +197,13 @@ ENTRY(atomic64_add_unless_cx8) movl %edx, %ecx addl %esi, %ebx adcl %edi, %ecx + +#ifdef CONFIG_PAX_REFCOUNT + into +1234: + _ASM_EXTABLE(1234b, 1234b) +#endif + LOCK_PREFIX cmpxchg8b (%ebp) jne 1b @@ -208,6 +236,13 @@ ENTRY(atomic64_inc_not_zero_cx8) movl %edx, %ecx addl $1, %ebx adcl $0, %ecx + +#ifdef CONFIG_PAX_REFCOUNT + into +1234: + _ASM_EXTABLE(1234b, 1234b) +#endif + LOCK_PREFIX cmpxchg8b (%esi) jne 1b diff --git a/arch/x86/lib/checksum_32.S b/arch/x86/lib/checksum_32.S index adbccd0..d9f12e3 100644 --- a/arch/x86/lib/checksum_32.S +++ b/arch/x86/lib/checksum_32.S @@ -28,7 +28,8 @@ #include #include #include - +#include + /* * computes a partial checksum, e.g. for TCP/UDP fragments */ @@ -304,9 +305,22 @@ unsigned int csum_partial_copy_generic (const char *src, char *dst, #define ARGBASE 16 #define FP 12 - -ENTRY(csum_partial_copy_generic) + +ENTRY(csum_partial_copy_generic_to_user) CFI_STARTPROC + pushl $(__USER_DS) + CFI_ADJUST_CFA_OFFSET 4 + popl %es + CFI_ADJUST_CFA_OFFSET -4 + jmp csum_partial_copy_generic + +ENTRY(csum_partial_copy_generic_from_user) + pushl $(__USER_DS) + CFI_ADJUST_CFA_OFFSET 4 + popl %ds + CFI_ADJUST_CFA_OFFSET -4 + +ENTRY(csum_partial_copy_generic) subl $4,%esp CFI_ADJUST_CFA_OFFSET 4 pushl %edi @@ -331,7 +345,7 @@ ENTRY(csum_partial_copy_generic) jmp 4f SRC(1: movw (%esi), %bx ) addl $2, %esi -DST( movw %bx, (%edi) ) +DST( movw %bx, %es:(%edi) ) addl $2, %edi addw %bx, %ax adcl $0, %eax @@ -343,30 +357,30 @@ DST( movw %bx, (%edi) ) SRC(1: movl (%esi), %ebx ) SRC( movl 4(%esi), %edx ) adcl %ebx, %eax -DST( movl %ebx, (%edi) ) +DST( movl %ebx, %es:(%edi) ) adcl %edx, %eax -DST( movl %edx, 4(%edi) ) +DST( movl %edx, %es:4(%edi) ) SRC( movl 8(%esi), %ebx ) SRC( movl 12(%esi), %edx ) adcl %ebx, %eax -DST( movl %ebx, 8(%edi) ) +DST( movl %ebx, %es:8(%edi) ) adcl %edx, %eax -DST( movl %edx, 12(%edi) ) +DST( movl %edx, %es:12(%edi) ) SRC( movl 16(%esi), %ebx ) SRC( movl 20(%esi), %edx ) adcl %ebx, %eax -DST( movl %ebx, 16(%edi) ) +DST( movl %ebx, %es:16(%edi) ) adcl %edx, %eax -DST( movl %edx, 20(%edi) ) +DST( movl %edx, %es:20(%edi) ) SRC( movl 24(%esi), %ebx ) SRC( movl 28(%esi), %edx ) adcl %ebx, %eax -DST( movl %ebx, 24(%edi) ) +DST( movl %ebx, %es:24(%edi) ) adcl %edx, %eax -DST( movl %edx, 28(%edi) ) +DST( movl %edx, %es:28(%edi) ) lea 32(%esi), %esi lea 32(%edi), %edi @@ -380,7 +394,7 @@ DST( movl %edx, 28(%edi) ) shrl $2, %edx # This clears CF SRC(3: movl (%esi), %ebx ) adcl %ebx, %eax -DST( movl %ebx, (%edi) ) +DST( movl %ebx, %es:(%edi) ) lea 4(%esi), %esi lea 4(%edi), %edi dec %edx @@ -392,12 +406,12 @@ DST( movl %ebx, (%edi) ) jb 5f SRC( movw (%esi), %cx ) leal 2(%esi), %esi -DST( movw %cx, (%edi) ) +DST( movw %cx, %es:(%edi) ) leal 2(%edi), %edi je 6f shll $16,%ecx SRC(5: movb (%esi), %cl ) -DST( movb %cl, (%edi) ) +DST( movb %cl, %es:(%edi) ) 6: addl %ecx, %eax adcl $0, %eax 7: @@ -408,7 +422,7 @@ DST( movb %cl, (%edi) ) 6001: movl ARGBASE+20(%esp), %ebx # src_err_ptr - movl $-EFAULT, (%ebx) + movl $-EFAULT, %ss:(%ebx) # zero the complete destination - computing the rest # is too much work @@ -421,11 +435,19 @@ DST( movb %cl, (%edi) ) 6002: movl ARGBASE+24(%esp), %ebx # dst_err_ptr - movl $-EFAULT,(%ebx) + movl $-EFAULT,%ss:(%ebx) jmp 5000b .previous + pushl %ss + CFI_ADJUST_CFA_OFFSET 4 + popl %ds + CFI_ADJUST_CFA_OFFSET -4 + pushl %ss + CFI_ADJUST_CFA_OFFSET 4 + popl %es + CFI_ADJUST_CFA_OFFSET -4 popl %ebx CFI_ADJUST_CFA_OFFSET -4 CFI_RESTORE ebx @@ -439,26 +461,41 @@ DST( movb %cl, (%edi) ) CFI_ADJUST_CFA_OFFSET -4 ret CFI_ENDPROC -ENDPROC(csum_partial_copy_generic) +ENDPROC(csum_partial_copy_generic_to_user) #else /* Version for PentiumII/PPro */ #define ROUND1(x) \ + nop; nop; nop; \ SRC(movl x(%esi), %ebx ) ; \ addl %ebx, %eax ; \ - DST(movl %ebx, x(%edi) ) ; + DST(movl %ebx, %es:x(%edi)) ; #define ROUND(x) \ + nop; nop; nop; \ SRC(movl x(%esi), %ebx ) ; \ adcl %ebx, %eax ; \ - DST(movl %ebx, x(%edi) ) ; + DST(movl %ebx, %es:x(%edi)) ; #define ARGBASE 12 - -ENTRY(csum_partial_copy_generic) + +ENTRY(csum_partial_copy_generic_to_user) CFI_STARTPROC + pushl $(__USER_DS) + CFI_ADJUST_CFA_OFFSET 4 + popl %es + CFI_ADJUST_CFA_OFFSET -4 + jmp csum_partial_copy_generic + +ENTRY(csum_partial_copy_generic_from_user) + pushl $(__USER_DS) + CFI_ADJUST_CFA_OFFSET 4 + popl %ds + CFI_ADJUST_CFA_OFFSET -4 + +ENTRY(csum_partial_copy_generic) pushl %ebx CFI_ADJUST_CFA_OFFSET 4 CFI_REL_OFFSET ebx, 0 @@ -482,7 +519,7 @@ ENTRY(csum_partial_copy_generic) subl %ebx, %edi lea -1(%esi),%edx andl $-32,%edx - lea 3f(%ebx,%ebx), %ebx + lea 3f(%ebx,%ebx,2), %ebx testl %esi, %esi jmp *%ebx 1: addl $64,%esi @@ -503,19 +540,19 @@ ENTRY(csum_partial_copy_generic) jb 5f SRC( movw (%esi), %dx ) leal 2(%esi), %esi -DST( movw %dx, (%edi) ) +DST( movw %dx, %es:(%edi) ) leal 2(%edi), %edi je 6f shll $16,%edx 5: SRC( movb (%esi), %dl ) -DST( movb %dl, (%edi) ) +DST( movb %dl, %es:(%edi) ) 6: addl %edx, %eax adcl $0, %eax 7: .section .fixup, "ax" 6001: movl ARGBASE+20(%esp), %ebx # src_err_ptr - movl $-EFAULT, (%ebx) + movl $-EFAULT, %ss:(%ebx) # zero the complete destination (computing the rest is too much work) movl ARGBASE+8(%esp),%edi # dst movl ARGBASE+12(%esp),%ecx # len @@ -523,10 +560,18 @@ DST( movb %dl, (%edi) ) rep; stosb jmp 7b 6002: movl ARGBASE+24(%esp), %ebx # dst_err_ptr - movl $-EFAULT, (%ebx) + movl $-EFAULT, %ss:(%ebx) jmp 7b .previous + pushl %ss + CFI_ADJUST_CFA_OFFSET 4 + popl %ds + CFI_ADJUST_CFA_OFFSET -4 + pushl %ss + CFI_ADJUST_CFA_OFFSET 4 + popl %es + CFI_ADJUST_CFA_OFFSET -4 popl %esi CFI_ADJUST_CFA_OFFSET -4 CFI_RESTORE esi @@ -538,7 +583,7 @@ DST( movb %dl, (%edi) ) CFI_RESTORE ebx ret CFI_ENDPROC -ENDPROC(csum_partial_copy_generic) +ENDPROC(csum_partial_copy_generic_to_user) #undef ROUND #undef ROUND1 diff --git a/arch/x86/lib/clear_page_64.S b/arch/x86/lib/clear_page_64.S index ebeafcc..609dc8a 100644 --- a/arch/x86/lib/clear_page_64.S +++ b/arch/x86/lib/clear_page_64.S @@ -43,7 +43,7 @@ ENDPROC(clear_page) #include - .section .altinstr_replacement,"ax" + .section .altinstr_replacement,"a" 1: .byte 0xeb /* jmp */ .byte (clear_page_c - clear_page) - (2f - 1b) /* offset */ 2: diff --git a/arch/x86/lib/copy_page_64.S b/arch/x86/lib/copy_page_64.S index 727a5d4..fd61ff4 100644 --- a/arch/x86/lib/copy_page_64.S +++ b/arch/x86/lib/copy_page_64.S @@ -104,7 +104,7 @@ ENDPROC(copy_page) #include - .section .altinstr_replacement,"ax" + .section .altinstr_replacement,"a" 1: .byte 0xeb /* jmp */ .byte (copy_page_c - copy_page) - (2f - 1b) /* offset */ 2: diff --git a/arch/x86/lib/copy_user_64.S b/arch/x86/lib/copy_user_64.S index 71100c9..31abdb0 100644 --- a/arch/x86/lib/copy_user_64.S +++ b/arch/x86/lib/copy_user_64.S @@ -15,13 +15,14 @@ #include #include #include +#include .macro ALTERNATIVE_JUMP feature,orig,alt 0: .byte 0xe9 /* 32bit jump */ .long \orig-1f /* by default jump to orig */ 1: - .section .altinstr_replacement,"ax" + .section .altinstr_replacement,"a" 2: .byte 0xe9 /* near jump with 32bit immediate */ .long \alt-1b /* offset */ /* or alternatively to alt */ .previous @@ -64,37 +65,13 @@ #endif .endm -/* Standard copy_to_user with segment limit checking */ -ENTRY(_copy_to_user) - CFI_STARTPROC - GET_THREAD_INFO(%rax) - movq %rdi,%rcx - addq %rdx,%rcx - jc bad_to_user - cmpq TI_addr_limit(%rax),%rcx - jae bad_to_user - ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,copy_user_generic_unrolled,copy_user_generic_string - CFI_ENDPROC -ENDPROC(_copy_to_user) - -/* Standard copy_from_user with segment limit checking */ -ENTRY(_copy_from_user) - CFI_STARTPROC - GET_THREAD_INFO(%rax) - movq %rsi,%rcx - addq %rdx,%rcx - jc bad_from_user - cmpq TI_addr_limit(%rax),%rcx - jae bad_from_user - ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,copy_user_generic_unrolled,copy_user_generic_string - CFI_ENDPROC -ENDPROC(_copy_from_user) - .section .fixup,"ax" /* must zero dest */ ENTRY(bad_from_user) bad_from_user: CFI_STARTPROC + testl %edx,%edx + js bad_to_user movl %edx,%ecx xorl %eax,%eax rep diff --git a/arch/x86/lib/copy_user_nocache_64.S b/arch/x86/lib/copy_user_nocache_64.S index cb0c112..39e53b9 100644 --- a/arch/x86/lib/copy_user_nocache_64.S +++ b/arch/x86/lib/copy_user_nocache_64.S @@ -14,6 +14,7 @@ #include #include #include +#include .macro ALIGN_DESTINATION #ifdef FIX_ALIGNMENT @@ -50,6 +51,15 @@ */ ENTRY(__copy_user_nocache) CFI_STARTPROC + +#ifdef CONFIG_PAX_MEMORY_UDEREF + mov $PAX_USER_SHADOW_BASE,%rcx + cmp %rcx,%rsi + jae 1f + add %rcx,%rsi +1: +#endif + cmpl $8,%edx jb 20f /* less then 8 bytes, go to byte copy loop */ ALIGN_DESTINATION diff --git a/arch/x86/lib/csum-wrappers_64.c b/arch/x86/lib/csum-wrappers_64.c index 459b58a..bb5720e 100644 --- a/arch/x86/lib/csum-wrappers_64.c +++ b/arch/x86/lib/csum-wrappers_64.c @@ -52,6 +52,8 @@ csum_partial_copy_from_user(const void __user *src, void *dst, len -= 2; } } + if ((unsigned long)src < PAX_USER_SHADOW_BASE) + src += PAX_USER_SHADOW_BASE; isum = csum_partial_copy_generic((__force const void *)src, dst, len, isum, errp, NULL); if (unlikely(*errp)) @@ -105,6 +107,8 @@ csum_partial_copy_to_user(const void *src, void __user *dst, } *errp = 0; + if ((unsigned long)dst < PAX_USER_SHADOW_BASE) + dst += PAX_USER_SHADOW_BASE; return csum_partial_copy_generic(src, (void __force *)dst, len, isum, NULL, errp); } diff --git a/arch/x86/lib/getuser.S b/arch/x86/lib/getuser.S index 51f1504..2f490c0 100644 --- a/arch/x86/lib/getuser.S +++ b/arch/x86/lib/getuser.S @@ -33,14 +33,38 @@ #include #include #include +#include +#include .text ENTRY(__get_user_1) CFI_STARTPROC + +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF) + pushl $(__USER_DS) + popl %ds +#else GET_THREAD_INFO(%_ASM_DX) cmp TI_addr_limit(%_ASM_DX),%_ASM_AX jae bad_get_user + +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF) + mov $PAX_USER_SHADOW_BASE,%_ASM_DX + cmp %_ASM_DX,%_ASM_AX + jae 1234f + add %_ASM_DX,%_ASM_AX +1234: +#endif + +#endif + 1: movzb (%_ASM_AX),%edx + +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF) + pushl %ss + pop %ds +#endif + xor %eax,%eax ret CFI_ENDPROC @@ -49,11 +73,33 @@ ENDPROC(__get_user_1) ENTRY(__get_user_2) CFI_STARTPROC add $1,%_ASM_AX + +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF) + pushl $(__USER_DS) + popl %ds +#else jc bad_get_user GET_THREAD_INFO(%_ASM_DX) cmp TI_addr_limit(%_ASM_DX),%_ASM_AX jae bad_get_user + +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF) + mov $PAX_USER_SHADOW_BASE,%_ASM_DX + cmp %_ASM_DX,%_ASM_AX + jae 1234f + add %_ASM_DX,%_ASM_AX +1234: +#endif + +#endif + 2: movzwl -1(%_ASM_AX),%edx + +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF) + pushl %ss + pop %ds +#endif + xor %eax,%eax ret CFI_ENDPROC @@ -62,11 +108,33 @@ ENDPROC(__get_user_2) ENTRY(__get_user_4) CFI_STARTPROC add $3,%_ASM_AX + +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF) + pushl $(__USER_DS) + popl %ds +#else jc bad_get_user GET_THREAD_INFO(%_ASM_DX) cmp TI_addr_limit(%_ASM_DX),%_ASM_AX jae bad_get_user + +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF) + mov $PAX_USER_SHADOW_BASE,%_ASM_DX + cmp %_ASM_DX,%_ASM_AX + jae 1234f + add %_ASM_DX,%_ASM_AX +1234: +#endif + +#endif + 3: mov -3(%_ASM_AX),%edx + +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF) + pushl %ss + pop %ds +#endif + xor %eax,%eax ret CFI_ENDPROC @@ -80,6 +148,15 @@ ENTRY(__get_user_8) GET_THREAD_INFO(%_ASM_DX) cmp TI_addr_limit(%_ASM_DX),%_ASM_AX jae bad_get_user + +#ifdef CONFIG_PAX_MEMORY_UDEREF + mov $PAX_USER_SHADOW_BASE,%_ASM_DX + cmp %_ASM_DX,%_ASM_AX + jae 1234f + add %_ASM_DX,%_ASM_AX +1234: +#endif + 4: movq -7(%_ASM_AX),%_ASM_DX xor %eax,%eax ret @@ -89,6 +166,12 @@ ENDPROC(__get_user_8) bad_get_user: CFI_STARTPROC + +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF) + pushl %ss + pop %ds +#endif + xor %edx,%edx mov $(-EFAULT),%_ASM_AX ret diff --git a/arch/x86/lib/insn.c b/arch/x86/lib/insn.c index 9f33b98..e3a194d 100644 --- a/arch/x86/lib/insn.c +++ b/arch/x86/lib/insn.c @@ -21,6 +21,7 @@ #include #include #include +#include #define get_next(t, insn) \ ({t r; r = *(t*)insn->next_byte; insn->next_byte += sizeof(t); r; }) @@ -40,8 +41,8 @@ void insn_init(struct insn *insn, const void *kaddr, int x86_64) { memset(insn, 0, sizeof(*insn)); - insn->kaddr = kaddr; - insn->next_byte = kaddr; + insn->kaddr = ktla_ktva(kaddr); + insn->next_byte = ktla_ktva(kaddr); insn->x86_64 = x86_64 ? 1 : 0; insn->opnd_bytes = 4; if (x86_64) diff --git a/arch/x86/lib/mmx_32.c b/arch/x86/lib/mmx_32.c index c9f2d9b..e7fd2c0 100644 --- a/arch/x86/lib/mmx_32.c +++ b/arch/x86/lib/mmx_32.c @@ -29,6 +29,7 @@ void *_mmx_memcpy(void *to, const void *from, size_t len) { void *p; int i; + unsigned long cr0; if (unlikely(in_interrupt())) return __memcpy(to, from, len); @@ -39,44 +40,72 @@ void *_mmx_memcpy(void *to, const void *from, size_t len) kernel_fpu_begin(); __asm__ __volatile__ ( - "1: prefetch (%0)\n" /* This set is 28 bytes */ - " prefetch 64(%0)\n" - " prefetch 128(%0)\n" - " prefetch 192(%0)\n" - " prefetch 256(%0)\n" + "1: prefetch (%1)\n" /* This set is 28 bytes */ + " prefetch 64(%1)\n" + " prefetch 128(%1)\n" + " prefetch 192(%1)\n" + " prefetch 256(%1)\n" "2: \n" ".section .fixup, \"ax\"\n" - "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */ + "3: \n" + +#ifdef CONFIG_PAX_KERNEXEC + " movl %%cr0, %0\n" + " movl %0, %%eax\n" + " andl $0xFFFEFFFF, %%eax\n" + " movl %%eax, %%cr0\n" +#endif + + " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */ + +#ifdef CONFIG_PAX_KERNEXEC + " movl %0, %%cr0\n" +#endif + " jmp 2b\n" ".previous\n" _ASM_EXTABLE(1b, 3b) - : : "r" (from)); + : "=&r" (cr0) : "r" (from) : "ax"); for ( ; i > 5; i--) { __asm__ __volatile__ ( - "1: prefetch 320(%0)\n" - "2: movq (%0), %%mm0\n" - " movq 8(%0), %%mm1\n" - " movq 16(%0), %%mm2\n" - " movq 24(%0), %%mm3\n" - " movq %%mm0, (%1)\n" - " movq %%mm1, 8(%1)\n" - " movq %%mm2, 16(%1)\n" - " movq %%mm3, 24(%1)\n" - " movq 32(%0), %%mm0\n" - " movq 40(%0), %%mm1\n" - " movq 48(%0), %%mm2\n" - " movq 56(%0), %%mm3\n" - " movq %%mm0, 32(%1)\n" - " movq %%mm1, 40(%1)\n" - " movq %%mm2, 48(%1)\n" - " movq %%mm3, 56(%1)\n" + "1: prefetch 320(%1)\n" + "2: movq (%1), %%mm0\n" + " movq 8(%1), %%mm1\n" + " movq 16(%1), %%mm2\n" + " movq 24(%1), %%mm3\n" + " movq %%mm0, (%2)\n" + " movq %%mm1, 8(%2)\n" + " movq %%mm2, 16(%2)\n" + " movq %%mm3, 24(%2)\n" + " movq 32(%1), %%mm0\n" + " movq 40(%1), %%mm1\n" + " movq 48(%1), %%mm2\n" + " movq 56(%1), %%mm3\n" + " movq %%mm0, 32(%2)\n" + " movq %%mm1, 40(%2)\n" + " movq %%mm2, 48(%2)\n" + " movq %%mm3, 56(%2)\n" ".section .fixup, \"ax\"\n" - "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */ + "3:\n" + +#ifdef CONFIG_PAX_KERNEXEC + " movl %%cr0, %0\n" + " movl %0, %%eax\n" + " andl $0xFFFEFFFF, %%eax\n" + " movl %%eax, %%cr0\n" +#endif + + " movw $0x05EB, 1b\n" /* jmp on 5 bytes */ + +#ifdef CONFIG_PAX_KERNEXEC + " movl %0, %%cr0\n" +#endif + " jmp 2b\n" ".previous\n" _ASM_EXTABLE(1b, 3b) - : : "r" (from), "r" (to) : "memory"); + : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax"); from += 64; to += 64; @@ -158,6 +187,7 @@ static void fast_clear_page(void *page) static void fast_copy_page(void *to, void *from) { int i; + unsigned long cr0; kernel_fpu_begin(); @@ -166,42 +196,70 @@ static void fast_copy_page(void *to, void *from) * but that is for later. -AV */ __asm__ __volatile__( - "1: prefetch (%0)\n" - " prefetch 64(%0)\n" - " prefetch 128(%0)\n" - " prefetch 192(%0)\n" - " prefetch 256(%0)\n" + "1: prefetch (%1)\n" + " prefetch 64(%1)\n" + " prefetch 128(%1)\n" + " prefetch 192(%1)\n" + " prefetch 256(%1)\n" "2: \n" ".section .fixup, \"ax\"\n" - "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */ + "3: \n" + +#ifdef CONFIG_PAX_KERNEXEC + " movl %%cr0, %0\n" + " movl %0, %%eax\n" + " andl $0xFFFEFFFF, %%eax\n" + " movl %%eax, %%cr0\n" +#endif + + " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */ + +#ifdef CONFIG_PAX_KERNEXEC + " movl %0, %%cr0\n" +#endif + " jmp 2b\n" ".previous\n" - _ASM_EXTABLE(1b, 3b) : : "r" (from)); + _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax"); for (i = 0; i < (4096-320)/64; i++) { __asm__ __volatile__ ( - "1: prefetch 320(%0)\n" - "2: movq (%0), %%mm0\n" - " movntq %%mm0, (%1)\n" - " movq 8(%0), %%mm1\n" - " movntq %%mm1, 8(%1)\n" - " movq 16(%0), %%mm2\n" - " movntq %%mm2, 16(%1)\n" - " movq 24(%0), %%mm3\n" - " movntq %%mm3, 24(%1)\n" - " movq 32(%0), %%mm4\n" - " movntq %%mm4, 32(%1)\n" - " movq 40(%0), %%mm5\n" - " movntq %%mm5, 40(%1)\n" - " movq 48(%0), %%mm6\n" - " movntq %%mm6, 48(%1)\n" - " movq 56(%0), %%mm7\n" - " movntq %%mm7, 56(%1)\n" + "1: prefetch 320(%1)\n" + "2: movq (%1), %%mm0\n" + " movntq %%mm0, (%2)\n" + " movq 8(%1), %%mm1\n" + " movntq %%mm1, 8(%2)\n" + " movq 16(%1), %%mm2\n" + " movntq %%mm2, 16(%2)\n" + " movq 24(%1), %%mm3\n" + " movntq %%mm3, 24(%2)\n" + " movq 32(%1), %%mm4\n" + " movntq %%mm4, 32(%2)\n" + " movq 40(%1), %%mm5\n" + " movntq %%mm5, 40(%2)\n" + " movq 48(%1), %%mm6\n" + " movntq %%mm6, 48(%2)\n" + " movq 56(%1), %%mm7\n" + " movntq %%mm7, 56(%2)\n" ".section .fixup, \"ax\"\n" - "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */ + "3:\n" + +#ifdef CONFIG_PAX_KERNEXEC + " movl %%cr0, %0\n" + " movl %0, %%eax\n" + " andl $0xFFFEFFFF, %%eax\n" + " movl %%eax, %%cr0\n" +#endif + + " movw $0x05EB, 1b\n" /* jmp on 5 bytes */ + +#ifdef CONFIG_PAX_KERNEXEC + " movl %0, %%cr0\n" +#endif + " jmp 2b\n" ".previous\n" - _ASM_EXTABLE(1b, 3b) : : "r" (from), "r" (to) : "memory"); + _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax"); from += 64; to += 64; @@ -280,47 +338,76 @@ static void fast_clear_page(void *page) static void fast_copy_page(void *to, void *from) { int i; + unsigned long cr0; kernel_fpu_begin(); __asm__ __volatile__ ( - "1: prefetch (%0)\n" - " prefetch 64(%0)\n" - " prefetch 128(%0)\n" - " prefetch 192(%0)\n" - " prefetch 256(%0)\n" + "1: prefetch (%1)\n" + " prefetch 64(%1)\n" + " prefetch 128(%1)\n" + " prefetch 192(%1)\n" + " prefetch 256(%1)\n" "2: \n" ".section .fixup, \"ax\"\n" - "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */ + "3: \n" + +#ifdef CONFIG_PAX_KERNEXEC + " movl %%cr0, %0\n" + " movl %0, %%eax\n" + " andl $0xFFFEFFFF, %%eax\n" + " movl %%eax, %%cr0\n" +#endif + + " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */ + +#ifdef CONFIG_PAX_KERNEXEC + " movl %0, %%cr0\n" +#endif + " jmp 2b\n" ".previous\n" - _ASM_EXTABLE(1b, 3b) : : "r" (from)); + _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax"); for (i = 0; i < 4096/64; i++) { __asm__ __volatile__ ( - "1: prefetch 320(%0)\n" - "2: movq (%0), %%mm0\n" - " movq 8(%0), %%mm1\n" - " movq 16(%0), %%mm2\n" - " movq 24(%0), %%mm3\n" - " movq %%mm0, (%1)\n" - " movq %%mm1, 8(%1)\n" - " movq %%mm2, 16(%1)\n" - " movq %%mm3, 24(%1)\n" - " movq 32(%0), %%mm0\n" - " movq 40(%0), %%mm1\n" - " movq 48(%0), %%mm2\n" - " movq 56(%0), %%mm3\n" - " movq %%mm0, 32(%1)\n" - " movq %%mm1, 40(%1)\n" - " movq %%mm2, 48(%1)\n" - " movq %%mm3, 56(%1)\n" + "1: prefetch 320(%1)\n" + "2: movq (%1), %%mm0\n" + " movq 8(%1), %%mm1\n" + " movq 16(%1), %%mm2\n" + " movq 24(%1), %%mm3\n" + " movq %%mm0, (%2)\n" + " movq %%mm1, 8(%2)\n" + " movq %%mm2, 16(%2)\n" + " movq %%mm3, 24(%2)\n" + " movq 32(%1), %%mm0\n" + " movq 40(%1), %%mm1\n" + " movq 48(%1), %%mm2\n" + " movq 56(%1), %%mm3\n" + " movq %%mm0, 32(%2)\n" + " movq %%mm1, 40(%2)\n" + " movq %%mm2, 48(%2)\n" + " movq %%mm3, 56(%2)\n" ".section .fixup, \"ax\"\n" - "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */ + "3:\n" + +#ifdef CONFIG_PAX_KERNEXEC + " movl %%cr0, %0\n" + " movl %0, %%eax\n" + " andl $0xFFFEFFFF, %%eax\n" + " movl %%eax, %%cr0\n" +#endif + + " movw $0x05EB, 1b\n" /* jmp on 5 bytes */ + +#ifdef CONFIG_PAX_KERNEXEC + " movl %0, %%cr0\n" +#endif + " jmp 2b\n" ".previous\n" _ASM_EXTABLE(1b, 3b) - : : "r" (from), "r" (to) : "memory"); + : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax"); from += 64; to += 64; diff --git a/arch/x86/lib/putuser.S b/arch/x86/lib/putuser.S index 36b0d15..4c35b79 100644 --- a/arch/x86/lib/putuser.S +++ b/arch/x86/lib/putuser.S @@ -15,7 +15,8 @@ #include #include #include - +#include +#include /* * __put_user_X @@ -29,59 +30,162 @@ * as they get called from within inline assembly. */ -#define ENTER CFI_STARTPROC ; \ - GET_THREAD_INFO(%_ASM_BX) +#define ENTER CFI_STARTPROC #define EXIT ret ; \ CFI_ENDPROC +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF) +#define _DEST %_ASM_CX,%_ASM_BX +#else +#define _DEST %_ASM_CX +#endif + .text ENTRY(__put_user_1) ENTER + +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF) + pushl $(__USER_DS) + popl %ds +#else + GET_THREAD_INFO(%_ASM_BX) cmp TI_addr_limit(%_ASM_BX),%_ASM_CX jae bad_put_user -1: movb %al,(%_ASM_CX) + +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF) + mov $PAX_USER_SHADOW_BASE,%_ASM_BX + cmp %_ASM_BX,%_ASM_CX + jb 1234f + xor %ebx,%ebx +1234: +#endif + +#endif + +1: movb %al,(_DEST) + +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF) + pushl %ss + popl %ds +#endif + xor %eax,%eax EXIT ENDPROC(__put_user_1) ENTRY(__put_user_2) ENTER + +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF) + pushl $(__USER_DS) + popl %ds +#else + GET_THREAD_INFO(%_ASM_BX) mov TI_addr_limit(%_ASM_BX),%_ASM_BX sub $1,%_ASM_BX cmp %_ASM_BX,%_ASM_CX jae bad_put_user -2: movw %ax,(%_ASM_CX) + +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF) + mov $PAX_USER_SHADOW_BASE,%_ASM_BX + cmp %_ASM_BX,%_ASM_CX + jb 1234f + xor %ebx,%ebx +1234: +#endif + +#endif + +2: movw %ax,(_DEST) + +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF) + pushl %ss + popl %ds +#endif + xor %eax,%eax EXIT ENDPROC(__put_user_2) ENTRY(__put_user_4) ENTER + +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF) + pushl $(__USER_DS) + popl %ds +#else + GET_THREAD_INFO(%_ASM_BX) mov TI_addr_limit(%_ASM_BX),%_ASM_BX sub $3,%_ASM_BX cmp %_ASM_BX,%_ASM_CX jae bad_put_user -3: movl %eax,(%_ASM_CX) + +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF) + mov $PAX_USER_SHADOW_BASE,%_ASM_BX + cmp %_ASM_BX,%_ASM_CX + jb 1234f + xor %ebx,%ebx +1234: +#endif + +#endif + +3: movl %eax,(_DEST) + +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF) + pushl %ss + popl %ds +#endif + xor %eax,%eax EXIT ENDPROC(__put_user_4) ENTRY(__put_user_8) ENTER + +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF) + pushl $(__USER_DS) + popl %ds +#else + GET_THREAD_INFO(%_ASM_BX) mov TI_addr_limit(%_ASM_BX),%_ASM_BX sub $7,%_ASM_BX cmp %_ASM_BX,%_ASM_CX jae bad_put_user -4: mov %_ASM_AX,(%_ASM_CX) + +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF) + mov $PAX_USER_SHADOW_BASE,%_ASM_BX + cmp %_ASM_BX,%_ASM_CX + jb 1234f + xor %ebx,%ebx +1234: +#endif + +#endif + +4: mov %_ASM_AX,(_DEST) #ifdef CONFIG_X86_32 -5: movl %edx,4(%_ASM_CX) +5: movl %edx,4(_DEST) #endif + +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF) + pushl %ss + popl %ds +#endif + xor %eax,%eax EXIT ENDPROC(__put_user_8) bad_put_user: CFI_STARTPROC + +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF) + pushl %ss + popl %ds +#endif + movl $-EFAULT,%eax EXIT END(bad_put_user) diff --git a/arch/x86/lib/usercopy_32.c b/arch/x86/lib/usercopy_32.c index e218d5d..5f0615c 100644 --- a/arch/x86/lib/usercopy_32.c +++ b/arch/x86/lib/usercopy_32.c @@ -36,31 +36,38 @@ static inline int __movsl_is_ok(unsigned long a1, unsigned long a2, unsigned lon * Copy a null terminated string from userspace. */ -#define __do_strncpy_from_user(dst, src, count, res) \ -do { \ - int __d0, __d1, __d2; \ - might_fault(); \ - __asm__ __volatile__( \ - " testl %1,%1\n" \ - " jz 2f\n" \ - "0: lodsb\n" \ - " stosb\n" \ - " testb %%al,%%al\n" \ - " jz 1f\n" \ - " decl %1\n" \ - " jnz 0b\n" \ - "1: subl %1,%0\n" \ - "2:\n" \ - ".section .fixup,\"ax\"\n" \ - "3: movl %5,%0\n" \ - " jmp 2b\n" \ - ".previous\n" \ - _ASM_EXTABLE(0b,3b) \ - : "=&d"(res), "=&c"(count), "=&a" (__d0), "=&S" (__d1), \ - "=&D" (__d2) \ - : "i"(-EFAULT), "0"(count), "1"(count), "3"(src), "4"(dst) \ - : "memory"); \ -} while (0) +static long __do_strncpy_from_user(char *dst, const char __user *src, long count) +{ + int __d0, __d1, __d2; + long res = -EFAULT; + + might_fault(); + __asm__ __volatile__( + " movw %w10,%%ds\n" + " testl %1,%1\n" + " jz 2f\n" + "0: lodsb\n" + " stosb\n" + " testb %%al,%%al\n" + " jz 1f\n" + " decl %1\n" + " jnz 0b\n" + "1: subl %1,%0\n" + "2:\n" + " pushl %%ss\n" + " popl %%ds\n" + ".section .fixup,\"ax\"\n" + "3: movl %5,%0\n" + " jmp 2b\n" + ".previous\n" + _ASM_EXTABLE(0b,3b) + : "=&d"(res), "=&c"(count), "=&a" (__d0), "=&S" (__d1), + "=&D" (__d2) + : "i"(-EFAULT), "0"(count), "1"(count), "3"(src), "4"(dst), + "r"(__USER_DS) + : "memory"); + return res; +} /** * __strncpy_from_user: - Copy a NUL terminated string from userspace, with less checking. @@ -85,9 +92,7 @@ do { \ long __strncpy_from_user(char *dst, const char __user *src, long count) { - long res; - __do_strncpy_from_user(dst, src, count, res); - return res; + return __do_strncpy_from_user(dst, src, count); } EXPORT_SYMBOL(__strncpy_from_user); @@ -114,7 +119,7 @@ strncpy_from_user(char *dst, const char __user *src, long count) { long res = -EFAULT; if (access_ok(VERIFY_READ, src, 1)) - __do_strncpy_from_user(dst, src, count, res); + res = __do_strncpy_from_user(dst, src, count); return res; } EXPORT_SYMBOL(strncpy_from_user); @@ -123,24 +128,30 @@ EXPORT_SYMBOL(strncpy_from_user); * Zero Userspace */ -#define __do_clear_user(addr,size) \ -do { \ - int __d0; \ - might_fault(); \ - __asm__ __volatile__( \ - "0: rep; stosl\n" \ - " movl %2,%0\n" \ - "1: rep; stosb\n" \ - "2:\n" \ - ".section .fixup,\"ax\"\n" \ - "3: lea 0(%2,%0,4),%0\n" \ - " jmp 2b\n" \ - ".previous\n" \ - _ASM_EXTABLE(0b,3b) \ - _ASM_EXTABLE(1b,2b) \ - : "=&c"(size), "=&D" (__d0) \ - : "r"(size & 3), "0"(size / 4), "1"(addr), "a"(0)); \ -} while (0) +static unsigned long __do_clear_user(void __user *addr, unsigned long size) +{ + int __d0; + + might_fault(); + __asm__ __volatile__( + " movw %w6,%%es\n" + "0: rep; stosl\n" + " movl %2,%0\n" + "1: rep; stosb\n" + "2:\n" + " pushl %%ss\n" + " popl %%es\n" + ".section .fixup,\"ax\"\n" + "3: lea 0(%2,%0,4),%0\n" + " jmp 2b\n" + ".previous\n" + _ASM_EXTABLE(0b,3b) + _ASM_EXTABLE(1b,2b) + : "=&c"(size), "=&D" (__d0) + : "r"(size & 3), "0"(size / 4), "1"(addr), "a"(0), + "r"(__USER_DS)); + return size; +} /** * clear_user: - Zero a block of memory in user space. @@ -157,7 +168,7 @@ clear_user(void __user *to, unsigned long n) { might_fault(); if (access_ok(VERIFY_WRITE, to, n)) - __do_clear_user(to, n); + n = __do_clear_user(to, n); return n; } EXPORT_SYMBOL(clear_user); @@ -176,8 +187,7 @@ EXPORT_SYMBOL(clear_user); unsigned long __clear_user(void __user *to, unsigned long n) { - __do_clear_user(to, n); - return n; + return __do_clear_user(to, n); } EXPORT_SYMBOL(__clear_user); @@ -200,14 +210,17 @@ long strnlen_user(const char __user *s, long n) might_fault(); __asm__ __volatile__( + " movw %w8,%%es\n" " testl %0, %0\n" " jz 3f\n" - " andl %0,%%ecx\n" + " movl %0,%%ecx\n" "0: repne; scasb\n" " setne %%al\n" " subl %%ecx,%0\n" " addl %0,%%eax\n" "1:\n" + " pushl %%ss\n" + " popl %%es\n" ".section .fixup,\"ax\"\n" "2: xorl %%eax,%%eax\n" " jmp 1b\n" @@ -219,7 +232,7 @@ long strnlen_user(const char __user *s, long n) " .long 0b,2b\n" ".previous" :"=&r" (n), "=&D" (s), "=&a" (res), "=&c" (tmp) - :"0" (n), "1" (s), "2" (0), "3" (mask) + :"0" (n), "1" (s), "2" (0), "3" (mask), "r" (__USER_DS) :"cc"); return res & mask; } @@ -227,10 +240,11 @@ EXPORT_SYMBOL(strnlen_user); #ifdef CONFIG_X86_INTEL_USERCOPY static unsigned long -__copy_user_intel(void __user *to, const void *from, unsigned long size) +__generic_copy_to_user_intel(void __user *to, const void *from, unsigned long size) { int d0, d1; __asm__ __volatile__( + " movw %w6, %%es\n" " .align 2,0x90\n" "1: movl 32(%4), %%eax\n" " cmpl $67, %0\n" @@ -239,36 +253,36 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size) " .align 2,0x90\n" "3: movl 0(%4), %%eax\n" "4: movl 4(%4), %%edx\n" - "5: movl %%eax, 0(%3)\n" - "6: movl %%edx, 4(%3)\n" + "5: movl %%eax, %%es:0(%3)\n" + "6: movl %%edx, %%es:4(%3)\n" "7: movl 8(%4), %%eax\n" "8: movl 12(%4),%%edx\n" - "9: movl %%eax, 8(%3)\n" - "10: movl %%edx, 12(%3)\n" + "9: movl %%eax, %%es:8(%3)\n" + "10: movl %%edx, %%es:12(%3)\n" "11: movl 16(%4), %%eax\n" "12: movl 20(%4), %%edx\n" - "13: movl %%eax, 16(%3)\n" - "14: movl %%edx, 20(%3)\n" + "13: movl %%eax, %%es:16(%3)\n" + "14: movl %%edx, %%es:20(%3)\n" "15: movl 24(%4), %%eax\n" "16: movl 28(%4), %%edx\n" - "17: movl %%eax, 24(%3)\n" - "18: movl %%edx, 28(%3)\n" + "17: movl %%eax, %%es:24(%3)\n" + "18: movl %%edx, %%es:28(%3)\n" "19: movl 32(%4), %%eax\n" "20: movl 36(%4), %%edx\n" - "21: movl %%eax, 32(%3)\n" - "22: movl %%edx, 36(%3)\n" + "21: movl %%eax, %%es:32(%3)\n" + "22: movl %%edx, %%es:36(%3)\n" "23: movl 40(%4), %%eax\n" "24: movl 44(%4), %%edx\n" - "25: movl %%eax, 40(%3)\n" - "26: movl %%edx, 44(%3)\n" + "25: movl %%eax, %%es:40(%3)\n" + "26: movl %%edx, %%es:44(%3)\n" "27: movl 48(%4), %%eax\n" "28: movl 52(%4), %%edx\n" - "29: movl %%eax, 48(%3)\n" - "30: movl %%edx, 52(%3)\n" + "29: movl %%eax, %%es:48(%3)\n" + "30: movl %%edx, %%es:52(%3)\n" "31: movl 56(%4), %%eax\n" "32: movl 60(%4), %%edx\n" - "33: movl %%eax, 56(%3)\n" - "34: movl %%edx, 60(%3)\n" + "33: movl %%eax, %%es:56(%3)\n" + "34: movl %%edx, %%es:60(%3)\n" " addl $-64, %0\n" " addl $64, %4\n" " addl $64, %3\n" @@ -282,6 +296,8 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size) "36: movl %%eax, %0\n" "37: rep; movsb\n" "100:\n" + " pushl %%ss\n" + " popl %%es\n" ".section .fixup,\"ax\"\n" "101: lea 0(%%eax,%0,4),%0\n" " jmp 100b\n" @@ -328,7 +344,117 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size) " .long 99b,101b\n" ".previous" : "=&c"(size), "=&D" (d0), "=&S" (d1) - : "1"(to), "2"(from), "0"(size) + : "1"(to), "2"(from), "0"(size), "r"(__USER_DS) + : "eax", "edx", "memory"); + return size; +} + +static unsigned long +__generic_copy_from_user_intel(void *to, const void __user *from, unsigned long size) +{ + int d0, d1; + __asm__ __volatile__( + " movw %w6, %%ds\n" + " .align 2,0x90\n" + "1: movl 32(%4), %%eax\n" + " cmpl $67, %0\n" + " jbe 3f\n" + "2: movl 64(%4), %%eax\n" + " .align 2,0x90\n" + "3: movl 0(%4), %%eax\n" + "4: movl 4(%4), %%edx\n" + "5: movl %%eax, %%es:0(%3)\n" + "6: movl %%edx, %%es:4(%3)\n" + "7: movl 8(%4), %%eax\n" + "8: movl 12(%4),%%edx\n" + "9: movl %%eax, %%es:8(%3)\n" + "10: movl %%edx, %%es:12(%3)\n" + "11: movl 16(%4), %%eax\n" + "12: movl 20(%4), %%edx\n" + "13: movl %%eax, %%es:16(%3)\n" + "14: movl %%edx, %%es:20(%3)\n" + "15: movl 24(%4), %%eax\n" + "16: movl 28(%4), %%edx\n" + "17: movl %%eax, %%es:24(%3)\n" + "18: movl %%edx, %%es:28(%3)\n" + "19: movl 32(%4), %%eax\n" + "20: movl 36(%4), %%edx\n" + "21: movl %%eax, %%es:32(%3)\n" + "22: movl %%edx, %%es:36(%3)\n" + "23: movl 40(%4), %%eax\n" + "24: movl 44(%4), %%edx\n" + "25: movl %%eax, %%es:40(%3)\n" + "26: movl %%edx, %%es:44(%3)\n" + "27: movl 48(%4), %%eax\n" + "28: movl 52(%4), %%edx\n" + "29: movl %%eax, %%es:48(%3)\n" + "30: movl %%edx, %%es:52(%3)\n" + "31: movl 56(%4), %%eax\n" + "32: movl 60(%4), %%edx\n" + "33: movl %%eax, %%es:56(%3)\n" + "34: movl %%edx, %%es:60(%3)\n" + " addl $-64, %0\n" + " addl $64, %4\n" + " addl $64, %3\n" + " cmpl $63, %0\n" + " ja 1b\n" + "35: movl %0, %%eax\n" + " shrl $2, %0\n" + " andl $3, %%eax\n" + " cld\n" + "99: rep; movsl\n" + "36: movl %%eax, %0\n" + "37: rep; movsb\n" + "100:\n" + " pushl %%ss\n" + " popl %%ds\n" + ".section .fixup,\"ax\"\n" + "101: lea 0(%%eax,%0,4),%0\n" + " jmp 100b\n" + ".previous\n" + ".section __ex_table,\"a\"\n" + " .align 4\n" + " .long 1b,100b\n" + " .long 2b,100b\n" + " .long 3b,100b\n" + " .long 4b,100b\n" + " .long 5b,100b\n" + " .long 6b,100b\n" + " .long 7b,100b\n" + " .long 8b,100b\n" + " .long 9b,100b\n" + " .long 10b,100b\n" + " .long 11b,100b\n" + " .long 12b,100b\n" + " .long 13b,100b\n" + " .long 14b,100b\n" + " .long 15b,100b\n" + " .long 16b,100b\n" + " .long 17b,100b\n" + " .long 18b,100b\n" + " .long 19b,100b\n" + " .long 20b,100b\n" + " .long 21b,100b\n" + " .long 22b,100b\n" + " .long 23b,100b\n" + " .long 24b,100b\n" + " .long 25b,100b\n" + " .long 26b,100b\n" + " .long 27b,100b\n" + " .long 28b,100b\n" + " .long 29b,100b\n" + " .long 30b,100b\n" + " .long 31b,100b\n" + " .long 32b,100b\n" + " .long 33b,100b\n" + " .long 34b,100b\n" + " .long 35b,100b\n" + " .long 36b,100b\n" + " .long 37b,100b\n" + " .long 99b,101b\n" + ".previous" + : "=&c"(size), "=&D" (d0), "=&S" (d1) + : "1"(to), "2"(from), "0"(size), "r"(__USER_DS) : "eax", "edx", "memory"); return size; } @@ -338,6 +464,7 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size) { int d0, d1; __asm__ __volatile__( + " movw %w6, %%ds\n" " .align 2,0x90\n" "0: movl 32(%4), %%eax\n" " cmpl $67, %0\n" @@ -346,36 +473,36 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size) " .align 2,0x90\n" "2: movl 0(%4), %%eax\n" "21: movl 4(%4), %%edx\n" - " movl %%eax, 0(%3)\n" - " movl %%edx, 4(%3)\n" + " movl %%eax, %%es:0(%3)\n" + " movl %%edx, %%es:4(%3)\n" "3: movl 8(%4), %%eax\n" "31: movl 12(%4),%%edx\n" - " movl %%eax, 8(%3)\n" - " movl %%edx, 12(%3)\n" + " movl %%eax, %%es:8(%3)\n" + " movl %%edx, %%es:12(%3)\n" "4: movl 16(%4), %%eax\n" "41: movl 20(%4), %%edx\n" - " movl %%eax, 16(%3)\n" - " movl %%edx, 20(%3)\n" + " movl %%eax, %%es:16(%3)\n" + " movl %%edx, %%es:20(%3)\n" "10: movl 24(%4), %%eax\n" "51: movl 28(%4), %%edx\n" - " movl %%eax, 24(%3)\n" - " movl %%edx, 28(%3)\n" + " movl %%eax, %%es:24(%3)\n" + " movl %%edx, %%es:28(%3)\n" "11: movl 32(%4), %%eax\n" "61: movl 36(%4), %%edx\n" - " movl %%eax, 32(%3)\n" - " movl %%edx, 36(%3)\n" + " movl %%eax, %%es:32(%3)\n" + " movl %%edx, %%es:36(%3)\n" "12: movl 40(%4), %%eax\n" "71: movl 44(%4), %%edx\n" - " movl %%eax, 40(%3)\n" - " movl %%edx, 44(%3)\n" + " movl %%eax, %%es:40(%3)\n" + " movl %%edx, %%es:44(%3)\n" "13: movl 48(%4), %%eax\n" "81: movl 52(%4), %%edx\n" - " movl %%eax, 48(%3)\n" - " movl %%edx, 52(%3)\n" + " movl %%eax, %%es:48(%3)\n" + " movl %%edx, %%es:52(%3)\n" "14: movl 56(%4), %%eax\n" "91: movl 60(%4), %%edx\n" - " movl %%eax, 56(%3)\n" - " movl %%edx, 60(%3)\n" + " movl %%eax, %%es:56(%3)\n" + " movl %%edx, %%es:60(%3)\n" " addl $-64, %0\n" " addl $64, %4\n" " addl $64, %3\n" @@ -389,6 +516,8 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size) " movl %%eax,%0\n" "7: rep; movsb\n" "8:\n" + " pushl %%ss\n" + " popl %%ds\n" ".section .fixup,\"ax\"\n" "9: lea 0(%%eax,%0,4),%0\n" "16: pushl %0\n" @@ -423,7 +552,7 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size) " .long 7b,16b\n" ".previous" : "=&c"(size), "=&D" (d0), "=&S" (d1) - : "1"(to), "2"(from), "0"(size) + : "1"(to), "2"(from), "0"(size), "r"(__USER_DS) : "eax", "edx", "memory"); return size; } @@ -439,6 +568,7 @@ static unsigned long __copy_user_zeroing_intel_nocache(void *to, int d0, d1; __asm__ __volatile__( + " movw %w6, %%ds\n" " .align 2,0x90\n" "0: movl 32(%4), %%eax\n" " cmpl $67, %0\n" @@ -447,36 +577,36 @@ static unsigned long __copy_user_zeroing_intel_nocache(void *to, " .align 2,0x90\n" "2: movl 0(%4), %%eax\n" "21: movl 4(%4), %%edx\n" - " movnti %%eax, 0(%3)\n" - " movnti %%edx, 4(%3)\n" + " movnti %%eax, %%es:0(%3)\n" + " movnti %%edx, %%es:4(%3)\n" "3: movl 8(%4), %%eax\n" "31: movl 12(%4),%%edx\n" - " movnti %%eax, 8(%3)\n" - " movnti %%edx, 12(%3)\n" + " movnti %%eax, %%es:8(%3)\n" + " movnti %%edx, %%es:12(%3)\n" "4: movl 16(%4), %%eax\n" "41: movl 20(%4), %%edx\n" - " movnti %%eax, 16(%3)\n" - " movnti %%edx, 20(%3)\n" + " movnti %%eax, %%es:16(%3)\n" + " movnti %%edx, %%es:20(%3)\n" "10: movl 24(%4), %%eax\n" "51: movl 28(%4), %%edx\n" - " movnti %%eax, 24(%3)\n" - " movnti %%edx, 28(%3)\n" + " movnti %%eax, %%es:24(%3)\n" + " movnti %%edx, %%es:28(%3)\n" "11: movl 32(%4), %%eax\n" "61: movl 36(%4), %%edx\n" - " movnti %%eax, 32(%3)\n" - " movnti %%edx, 36(%3)\n" + " movnti %%eax, %%es:32(%3)\n" + " movnti %%edx, %%es:36(%3)\n" "12: movl 40(%4), %%eax\n" "71: movl 44(%4), %%edx\n" - " movnti %%eax, 40(%3)\n" - " movnti %%edx, 44(%3)\n" + " movnti %%eax, %%es:40(%3)\n" + " movnti %%edx, %%es:44(%3)\n" "13: movl 48(%4), %%eax\n" "81: movl 52(%4), %%edx\n" - " movnti %%eax, 48(%3)\n" - " movnti %%edx, 52(%3)\n" + " movnti %%eax, %%es:48(%3)\n" + " movnti %%edx, %%es:52(%3)\n" "14: movl 56(%4), %%eax\n" "91: movl 60(%4), %%edx\n" - " movnti %%eax, 56(%3)\n" - " movnti %%edx, 60(%3)\n" + " movnti %%eax, %%es:56(%3)\n" + " movnti %%edx, %%es:60(%3)\n" " addl $-64, %0\n" " addl $64, %4\n" " addl $64, %3\n" @@ -491,6 +621,8 @@ static unsigned long __copy_user_zeroing_intel_nocache(void *to, " movl %%eax,%0\n" "7: rep; movsb\n" "8:\n" + " pushl %%ss\n" + " popl %%ds\n" ".section .fixup,\"ax\"\n" "9: lea 0(%%eax,%0,4),%0\n" "16: pushl %0\n" @@ -525,7 +657,7 @@ static unsigned long __copy_user_zeroing_intel_nocache(void *to, " .long 7b,16b\n" ".previous" : "=&c"(size), "=&D" (d0), "=&S" (d1) - : "1"(to), "2"(from), "0"(size) + : "1"(to), "2"(from), "0"(size), "r"(__USER_DS) : "eax", "edx", "memory"); return size; } @@ -536,6 +668,7 @@ static unsigned long __copy_user_intel_nocache(void *to, int d0, d1; __asm__ __volatile__( + " movw %w6, %%ds\n" " .align 2,0x90\n" "0: movl 32(%4), %%eax\n" " cmpl $67, %0\n" @@ -544,36 +677,36 @@ static unsigned long __copy_user_intel_nocache(void *to, " .align 2,0x90\n" "2: movl 0(%4), %%eax\n" "21: movl 4(%4), %%edx\n" - " movnti %%eax, 0(%3)\n" - " movnti %%edx, 4(%3)\n" + " movnti %%eax, %%es:0(%3)\n" + " movnti %%edx, %%es:4(%3)\n" "3: movl 8(%4), %%eax\n" "31: movl 12(%4),%%edx\n" - " movnti %%eax, 8(%3)\n" - " movnti %%edx, 12(%3)\n" + " movnti %%eax, %%es:8(%3)\n" + " movnti %%edx, %%es:12(%3)\n" "4: movl 16(%4), %%eax\n" "41: movl 20(%4), %%edx\n" - " movnti %%eax, 16(%3)\n" - " movnti %%edx, 20(%3)\n" + " movnti %%eax, %%es:16(%3)\n" + " movnti %%edx, %%es:20(%3)\n" "10: movl 24(%4), %%eax\n" "51: movl 28(%4), %%edx\n" - " movnti %%eax, 24(%3)\n" - " movnti %%edx, 28(%3)\n" + " movnti %%eax, %%es:24(%3)\n" + " movnti %%edx, %%es:28(%3)\n" "11: movl 32(%4), %%eax\n" "61: movl 36(%4), %%edx\n" - " movnti %%eax, 32(%3)\n" - " movnti %%edx, 36(%3)\n" + " movnti %%eax, %%es:32(%3)\n" + " movnti %%edx, %%es:36(%3)\n" "12: movl 40(%4), %%eax\n" "71: movl 44(%4), %%edx\n" - " movnti %%eax, 40(%3)\n" - " movnti %%edx, 44(%3)\n" + " movnti %%eax, %%es:40(%3)\n" + " movnti %%edx, %%es:44(%3)\n" "13: movl 48(%4), %%eax\n" "81: movl 52(%4), %%edx\n" - " movnti %%eax, 48(%3)\n" - " movnti %%edx, 52(%3)\n" + " movnti %%eax, %%es:48(%3)\n" + " movnti %%edx, %%es:52(%3)\n" "14: movl 56(%4), %%eax\n" "91: movl 60(%4), %%edx\n" - " movnti %%eax, 56(%3)\n" - " movnti %%edx, 60(%3)\n" + " movnti %%eax, %%es:56(%3)\n" + " movnti %%edx, %%es:60(%3)\n" " addl $-64, %0\n" " addl $64, %4\n" " addl $64, %3\n" @@ -588,6 +721,8 @@ static unsigned long __copy_user_intel_nocache(void *to, " movl %%eax,%0\n" "7: rep; movsb\n" "8:\n" + " pushl %%ss\n" + " popl %%ds\n" ".section .fixup,\"ax\"\n" "9: lea 0(%%eax,%0,4),%0\n" "16: jmp 8b\n" @@ -616,7 +751,7 @@ static unsigned long __copy_user_intel_nocache(void *to, " .long 7b,16b\n" ".previous" : "=&c"(size), "=&D" (d0), "=&S" (d1) - : "1"(to), "2"(from), "0"(size) + : "1"(to), "2"(from), "0"(size), "r"(__USER_DS) : "eax", "edx", "memory"); return size; } @@ -629,90 +764,146 @@ static unsigned long __copy_user_intel_nocache(void *to, */ unsigned long __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size); -unsigned long __copy_user_intel(void __user *to, const void *from, +unsigned long __generic_copy_to_user_intel(void __user *to, const void *from, + unsigned long size); +unsigned long __generic_copy_from_user_intel(void *to, const void __user *from, unsigned long size); unsigned long __copy_user_zeroing_intel_nocache(void *to, const void __user *from, unsigned long size); #endif /* CONFIG_X86_INTEL_USERCOPY */ /* Generic arbitrary sized copy. */ -#define __copy_user(to, from, size) \ -do { \ - int __d0, __d1, __d2; \ - __asm__ __volatile__( \ - " cmp $7,%0\n" \ - " jbe 1f\n" \ - " movl %1,%0\n" \ - " negl %0\n" \ - " andl $7,%0\n" \ - " subl %0,%3\n" \ - "4: rep; movsb\n" \ - " movl %3,%0\n" \ - " shrl $2,%0\n" \ - " andl $3,%3\n" \ - " .align 2,0x90\n" \ - "0: rep; movsl\n" \ - " movl %3,%0\n" \ - "1: rep; movsb\n" \ - "2:\n" \ - ".section .fixup,\"ax\"\n" \ - "5: addl %3,%0\n" \ - " jmp 2b\n" \ - "3: lea 0(%3,%0,4),%0\n" \ - " jmp 2b\n" \ - ".previous\n" \ - ".section __ex_table,\"a\"\n" \ - " .align 4\n" \ - " .long 4b,5b\n" \ - " .long 0b,3b\n" \ - " .long 1b,2b\n" \ - ".previous" \ - : "=&c"(size), "=&D" (__d0), "=&S" (__d1), "=r"(__d2) \ - : "3"(size), "0"(size), "1"(to), "2"(from) \ - : "memory"); \ -} while (0) - -#define __copy_user_zeroing(to, from, size) \ -do { \ - int __d0, __d1, __d2; \ - __asm__ __volatile__( \ - " cmp $7,%0\n" \ - " jbe 1f\n" \ - " movl %1,%0\n" \ - " negl %0\n" \ - " andl $7,%0\n" \ - " subl %0,%3\n" \ - "4: rep; movsb\n" \ - " movl %3,%0\n" \ - " shrl $2,%0\n" \ - " andl $3,%3\n" \ - " .align 2,0x90\n" \ - "0: rep; movsl\n" \ - " movl %3,%0\n" \ - "1: rep; movsb\n" \ - "2:\n" \ - ".section .fixup,\"ax\"\n" \ - "5: addl %3,%0\n" \ - " jmp 6f\n" \ - "3: lea 0(%3,%0,4),%0\n" \ - "6: pushl %0\n" \ - " pushl %%eax\n" \ - " xorl %%eax,%%eax\n" \ - " rep; stosb\n" \ - " popl %%eax\n" \ - " popl %0\n" \ - " jmp 2b\n" \ - ".previous\n" \ - ".section __ex_table,\"a\"\n" \ - " .align 4\n" \ - " .long 4b,5b\n" \ - " .long 0b,3b\n" \ - " .long 1b,6b\n" \ - ".previous" \ - : "=&c"(size), "=&D" (__d0), "=&S" (__d1), "=r"(__d2) \ - : "3"(size), "0"(size), "1"(to), "2"(from) \ - : "memory"); \ -} while (0) +static unsigned long +__generic_copy_to_user(void __user *to, const void *from, unsigned long size) +{ + int __d0, __d1, __d2; + + __asm__ __volatile__( + " movw %w8,%%es\n" + " cmp $7,%0\n" + " jbe 1f\n" + " movl %1,%0\n" + " negl %0\n" + " andl $7,%0\n" + " subl %0,%3\n" + "4: rep; movsb\n" + " movl %3,%0\n" + " shrl $2,%0\n" + " andl $3,%3\n" + " .align 2,0x90\n" + "0: rep; movsl\n" + " movl %3,%0\n" + "1: rep; movsb\n" + "2:\n" + " pushl %%ss\n" + " popl %%es\n" + ".section .fixup,\"ax\"\n" + "5: addl %3,%0\n" + " jmp 2b\n" + "3: lea 0(%3,%0,4),%0\n" + " jmp 2b\n" + ".previous\n" + ".section __ex_table,\"a\"\n" + " .align 4\n" + " .long 4b,5b\n" + " .long 0b,3b\n" + " .long 1b,2b\n" + ".previous" + : "=&c"(size), "=&D" (__d0), "=&S" (__d1), "=r"(__d2) + : "3"(size), "0"(size), "1"(to), "2"(from), "r"(__USER_DS) + : "memory"); + return size; +} + +static unsigned long +__generic_copy_from_user(void *to, const void __user *from, unsigned long size) +{ + int __d0, __d1, __d2; + + __asm__ __volatile__( + " movw %w8,%%ds\n" + " cmp $7,%0\n" + " jbe 1f\n" + " movl %1,%0\n" + " negl %0\n" + " andl $7,%0\n" + " subl %0,%3\n" + "4: rep; movsb\n" + " movl %3,%0\n" + " shrl $2,%0\n" + " andl $3,%3\n" + " .align 2,0x90\n" + "0: rep; movsl\n" + " movl %3,%0\n" + "1: rep; movsb\n" + "2:\n" + " pushl %%ss\n" + " popl %%ds\n" + ".section .fixup,\"ax\"\n" + "5: addl %3,%0\n" + " jmp 2b\n" + "3: lea 0(%3,%0,4),%0\n" + " jmp 2b\n" + ".previous\n" + ".section __ex_table,\"a\"\n" + " .align 4\n" + " .long 4b,5b\n" + " .long 0b,3b\n" + " .long 1b,2b\n" + ".previous" + : "=&c"(size), "=&D" (__d0), "=&S" (__d1), "=r"(__d2) + : "3"(size), "0"(size), "1"(to), "2"(from), "r"(__USER_DS) + : "memory"); + return size; +} + +static unsigned long +__copy_user_zeroing(void *to, const void __user *from, unsigned long size) +{ + int __d0, __d1, __d2; + + __asm__ __volatile__( + " movw %w8,%%ds\n" + " cmp $7,%0\n" + " jbe 1f\n" + " movl %1,%0\n" + " negl %0\n" + " andl $7,%0\n" + " subl %0,%3\n" + "4: rep; movsb\n" + " movl %3,%0\n" + " shrl $2,%0\n" + " andl $3,%3\n" + " .align 2,0x90\n" + "0: rep; movsl\n" + " movl %3,%0\n" + "1: rep; movsb\n" + "2:\n" + " pushl %%ss\n" + " popl %%ds\n" + ".section .fixup,\"ax\"\n" + "5: addl %3,%0\n" + " jmp 6f\n" + "3: lea 0(%3,%0,4),%0\n" + "6: pushl %0\n" + " pushl %%eax\n" + " xorl %%eax,%%eax\n" + " rep; stosb\n" + " popl %%eax\n" + " popl %0\n" + " jmp 2b\n" + ".previous\n" + ".section __ex_table,\"a\"\n" + " .align 4\n" + " .long 4b,5b\n" + " .long 0b,3b\n" + " .long 1b,6b\n" + ".previous" + : "=&c"(size), "=&D" (__d0), "=&S" (__d1), "=r"(__d2) + : "3"(size), "0"(size), "1"(to), "2"(from), "r"(__USER_DS) + : "memory"); + return size; +} unsigned long __copy_to_user_ll(void __user *to, const void *from, unsigned long n) @@ -775,9 +966,9 @@ survive: } #endif if (movsl_is_ok(to, from, n)) - __copy_user(to, from, n); + n = __generic_copy_to_user(to, from, n); else - n = __copy_user_intel(to, from, n); + n = __generic_copy_to_user_intel(to, from, n); return n; } EXPORT_SYMBOL(__copy_to_user_ll); @@ -786,7 +977,7 @@ unsigned long __copy_from_user_ll(void *to, const void __user *from, unsigned long n) { if (movsl_is_ok(to, from, n)) - __copy_user_zeroing(to, from, n); + n = __copy_user_zeroing(to, from, n); else n = __copy_user_zeroing_intel(to, from, n); return n; @@ -797,10 +988,9 @@ unsigned long __copy_from_user_ll_nozero(void *to, const void __user *from, unsigned long n) { if (movsl_is_ok(to, from, n)) - __copy_user(to, from, n); + n = __generic_copy_from_user(to, from, n); else - n = __copy_user_intel((void __user *)to, - (const void *)from, n); + n = __generic_copy_from_user_intel(to, from, n); return n; } EXPORT_SYMBOL(__copy_from_user_ll_nozero); @@ -812,9 +1002,9 @@ unsigned long __copy_from_user_ll_nocache(void *to, const void __user *from, if (n > 64 && cpu_has_xmm2) n = __copy_user_zeroing_intel_nocache(to, from, n); else - __copy_user_zeroing(to, from, n); + n = __copy_user_zeroing(to, from, n); #else - __copy_user_zeroing(to, from, n); + n = __copy_user_zeroing(to, from, n); #endif return n; } @@ -827,65 +1017,53 @@ unsigned long __copy_from_user_ll_nocache_nozero(void *to, const void __user *fr if (n > 64 && cpu_has_xmm2) n = __copy_user_intel_nocache(to, from, n); else - __copy_user(to, from, n); + n = __generic_copy_from_user(to, from, n); #else - __copy_user(to, from, n); + n = __generic_copy_from_user(to, from, n); #endif return n; } EXPORT_SYMBOL(__copy_from_user_ll_nocache_nozero); -/** - * copy_to_user: - Copy a block of data into user space. - * @to: Destination address, in user space. - * @from: Source address, in kernel space. - * @n: Number of bytes to copy. - * - * Context: User context only. This function may sleep. - * - * Copy data from kernel space to user space. - * - * Returns number of bytes that could not be copied. - * On success, this will be zero. - */ -unsigned long -copy_to_user(void __user *to, const void *from, unsigned long n) +void copy_from_user_overflow(void) { - if (access_ok(VERIFY_WRITE, to, n)) - n = __copy_to_user(to, from, n); - return n; + WARN(1, "Buffer overflow detected!\n"); } -EXPORT_SYMBOL(copy_to_user); +EXPORT_SYMBOL(copy_from_user_overflow); -/** - * copy_from_user: - Copy a block of data from user space. - * @to: Destination address, in kernel space. - * @from: Source address, in user space. - * @n: Number of bytes to copy. - * - * Context: User context only. This function may sleep. - * - * Copy data from user space to kernel space. - * - * Returns number of bytes that could not be copied. - * On success, this will be zero. - * - * If some data could not be copied, this function will pad the copied - * data to the requested size using zero bytes. - */ -unsigned long -_copy_from_user(void *to, const void __user *from, unsigned long n) +void copy_to_user_overflow(void) { - if (access_ok(VERIFY_READ, from, n)) - n = __copy_from_user(to, from, n); - else - memset(to, 0, n); - return n; + WARN(1, "Buffer overflow detected!\n"); } -EXPORT_SYMBOL(_copy_from_user); +EXPORT_SYMBOL(copy_to_user_overflow); -void copy_from_user_overflow(void) +#ifdef CONFIG_PAX_MEMORY_UDEREF +void __set_fs(mm_segment_t x, int cpu) { - WARN(1, "Buffer overflow detected!\n"); + unsigned long limit = x.seg; + struct desc_struct d; + + current_thread_info()->addr_limit = x; + if (unlikely(paravirt_enabled())) + return; + + if (likely(limit)) + limit = (limit - 1UL) >> PAGE_SHIFT; + pack_descriptor(&d, 0UL, limit, 0xF3, 0xC); + write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_DEFAULT_USER_DS, &d, DESCTYPE_S); } -EXPORT_SYMBOL(copy_from_user_overflow); + +void set_fs(mm_segment_t x) +{ + __set_fs(x, get_cpu()); + put_cpu(); +} +EXPORT_SYMBOL(copy_from_user); +#else +void set_fs(mm_segment_t x) +{ + current_thread_info()->addr_limit = x; +} +#endif + +EXPORT_SYMBOL(set_fs); diff --git a/arch/x86/lib/usercopy_64.c b/arch/x86/lib/usercopy_64.c index b7c2849..7d0bb03 100644 --- a/arch/x86/lib/usercopy_64.c +++ b/arch/x86/lib/usercopy_64.c @@ -42,6 +42,8 @@ long __strncpy_from_user(char *dst, const char __user *src, long count) { long res; + if ((unsigned long)src < PAX_USER_SHADOW_BASE) + src += PAX_USER_SHADOW_BASE; __do_strncpy_from_user(dst, src, count, res); return res; } @@ -65,6 +67,8 @@ unsigned long __clear_user(void __user *addr, unsigned long size) { long __d0; might_fault(); + if ((unsigned long)addr < PAX_USER_SHADOW_BASE) + addr += PAX_USER_SHADOW_BASE; /* no memory constraint because it doesn't change any memory gcc knows about */ asm volatile( @@ -151,10 +155,14 @@ EXPORT_SYMBOL(strlen_user); unsigned long copy_in_user(void __user *to, const void __user *from, unsigned len) { - if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) { + if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) { + if ((unsigned long)to < PAX_USER_SHADOW_BASE) + to += PAX_USER_SHADOW_BASE; + if ((unsigned long)from < PAX_USER_SHADOW_BASE) + from += PAX_USER_SHADOW_BASE; return copy_user_generic((__force void *)to, (__force void *)from, len); - } - return len; + } + return len; } EXPORT_SYMBOL(copy_in_user); diff --git a/arch/x86/mm/extable.c b/arch/x86/mm/extable.c index d0474ad..360f761 100644 --- a/arch/x86/mm/extable.c +++ b/arch/x86/mm/extable.c @@ -1,14 +1,71 @@ #include #include +#include #include +#include +/* + * The exception table needs to be sorted so that the binary + * search that we use to find entries in it works properly. + * This is used both for the kernel exception table and for + * the exception tables of modules that get loaded. + */ +static int cmp_ex(const void *a, const void *b) +{ + const struct exception_table_entry *x = a, *y = b; + + /* avoid overflow */ + if (x->insn > y->insn) + return 1; + if (x->insn < y->insn) + return -1; + return 0; +} + +static void swap_ex(void *a, void *b, int size) +{ + struct exception_table_entry t, *x = a, *y = b; + + t = *x; + + pax_open_kernel(); + *x = *y; + *y = t; + pax_close_kernel(); +} + +void sort_extable(struct exception_table_entry *start, + struct exception_table_entry *finish) +{ + sort(start, finish - start, sizeof(struct exception_table_entry), + cmp_ex, swap_ex); +} + +#ifdef CONFIG_MODULES +/* + * If the exception table is sorted, any referring to the module init + * will be at the beginning or the end. + */ +void trim_init_extable(struct module *m) +{ + /*trim the beginning*/ + while (m->num_exentries && within_module_init(m->extable[0].insn, m)) { + m->extable++; + m->num_exentries--; + } + /*trim the end*/ + while (m->num_exentries && + within_module_init(m->extable[m->num_exentries-1].insn, m)) + m->num_exentries--; +} +#endif /* CONFIG_MODULES */ int fixup_exception(struct pt_regs *regs) { const struct exception_table_entry *fixup; #ifdef CONFIG_PNPBIOS - if (unlikely(SEGMENT_IS_PNP_CODE(regs->cs))) { + if (unlikely(!v8086_mode(regs) && SEGMENT_IS_PNP_CODE(regs->cs))) { extern u32 pnp_bios_fault_eip, pnp_bios_fault_esp; extern u32 pnp_bios_is_utter_crap; pnp_bios_is_utter_crap = 1; diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c index 4c4508e..8752b7e 100644 --- a/arch/x86/mm/fault.c +++ b/arch/x86/mm/fault.c @@ -11,10 +11,19 @@ #include /* __kprobes, ... */ #include /* kmmio_handler, ... */ #include /* perf_sw_event */ +#include +#include #include /* dotraplinkage, ... */ #include /* pgd_*(), ... */ #include /* kmemcheck_*(), ... */ +#include +#include + +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF) +#include +#include "../kernel/dumpstack.h" +#endif /* * Page fault error code bits: @@ -52,7 +61,7 @@ static inline int __kprobes notify_page_fault(struct pt_regs *regs) int ret = 0; /* kprobe_running() needs smp_processor_id() */ - if (kprobes_built_in() && !user_mode_vm(regs)) { + if (kprobes_built_in() && !user_mode(regs)) { preempt_disable(); if (kprobe_running() && kprobe_fault_handler(regs, 14)) ret = 1; @@ -173,6 +182,30 @@ force_sig_info_fault(int si_signo, int si_code, unsigned long address, force_sig_info(si_signo, &info, tsk); } +#ifdef CONFIG_PAX_EMUTRAMP +static int pax_handle_fetch_fault(struct pt_regs *regs); +#endif + +#ifdef CONFIG_PAX_PAGEEXEC +static inline pmd_t * pax_get_pmd(struct mm_struct *mm, unsigned long address) +{ + pgd_t *pgd; + pud_t *pud; + pmd_t *pmd; + + pgd = pgd_offset(mm, address); + if (!pgd_present(*pgd)) + return NULL; + pud = pud_offset(pgd, address); + if (!pud_present(*pud)) + return NULL; + pmd = pmd_offset(pud, address); + if (!pmd_present(*pmd)) + return NULL; + return pmd; +} +#endif + DEFINE_SPINLOCK(pgd_lock); LIST_HEAD(pgd_list); @@ -225,11 +258,24 @@ void vmalloc_sync_all(void) address += PMD_SIZE) { unsigned long flags; + +#ifdef CONFIG_PAX_PER_CPU_PGD + unsigned long cpu; +#else struct page *page; +#endif spin_lock_irqsave(&pgd_lock, flags); + +#ifdef CONFIG_PAX_PER_CPU_PGD + for (cpu = 0; cpu < NR_CPUS; ++cpu) { + pgd_t *pgd = get_cpu_pgd(cpu); +#else list_for_each_entry(page, &pgd_list, lru) { - if (!vmalloc_sync_one(page_address(page), address)) + pgd_t *pgd = page_address(page); +#endif + + if (!vmalloc_sync_one(pgd, address)) break; } spin_unlock_irqrestore(&pgd_lock, flags); @@ -259,6 +305,11 @@ static noinline __kprobes int vmalloc_fault(unsigned long address) * an interrupt in the middle of a task switch.. */ pgd_paddr = read_cr3(); + +#ifdef CONFIG_PAX_PER_CPU_PGD + BUG_ON(__pa(get_cpu_pgd(smp_processor_id())) != (pgd_paddr & PHYSICAL_PAGE_MASK)); +#endif + pmd_k = vmalloc_sync_one(__va(pgd_paddr), address); if (!pmd_k) return -1; @@ -333,15 +384,27 @@ void vmalloc_sync_all(void) const pgd_t *pgd_ref = pgd_offset_k(address); unsigned long flags; + +#ifdef CONFIG_PAX_PER_CPU_PGD + unsigned long cpu; +#else struct page *page; +#endif if (pgd_none(*pgd_ref)) continue; spin_lock_irqsave(&pgd_lock, flags); + +#ifdef CONFIG_PAX_PER_CPU_PGD + for (cpu = 0; cpu < NR_CPUS; ++cpu) { + pgd_t *pgd = pgd_offset_cpu(cpu, address); +#else list_for_each_entry(page, &pgd_list, lru) { pgd_t *pgd; pgd = (pgd_t *)page_address(page) + pgd_index(address); +#endif + if (pgd_none(*pgd)) set_pgd(pgd, *pgd_ref); else @@ -374,7 +437,14 @@ static noinline __kprobes int vmalloc_fault(unsigned long address) * happen within a race in page table update. In the later * case just flush: */ + +#ifdef CONFIG_PAX_PER_CPU_PGD + BUG_ON(__pa(get_cpu_pgd(smp_processor_id())) != (read_cr3() & PHYSICAL_PAGE_MASK)); + pgd = pgd_offset_cpu(smp_processor_id(), address); +#else pgd = pgd_offset(current->active_mm, address); +#endif + pgd_ref = pgd_offset_k(address); if (pgd_none(*pgd_ref)) return -1; @@ -536,7 +606,7 @@ static int is_errata93(struct pt_regs *regs, unsigned long address) static int is_errata100(struct pt_regs *regs, unsigned long address) { #ifdef CONFIG_X86_64 - if ((regs->cs == __USER32_CS || (regs->cs & (1<<2))) && (address >> 32)) + if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)) && (address >> 32)) return 1; #endif return 0; @@ -563,7 +633,7 @@ static int is_f00f_bug(struct pt_regs *regs, unsigned long address) } static const char nx_warning[] = KERN_CRIT -"kernel tried to execute NX-protected page - exploit attempt? (uid: %d)\n"; +"kernel tried to execute NX-protected page - exploit attempt? (uid: %d, task: %s, pid: %d)\n"; static void show_fault_oops(struct pt_regs *regs, unsigned long error_code, @@ -572,15 +642,26 @@ show_fault_oops(struct pt_regs *regs, unsigned long error_code, if (!oops_may_print()) return; - if (error_code & PF_INSTR) { + if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR)) { unsigned int level; pte_t *pte = lookup_address(address, &level); if (pte && pte_present(*pte) && !pte_exec(*pte)) - printk(nx_warning, current_uid()); + printk(nx_warning, current_uid(), current->comm, task_pid_nr(current)); } +#ifdef CONFIG_PAX_KERNEXEC + if (init_mm.start_code <= address && address < init_mm.end_code) { + if (current->signal->curr_ip) + printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n", + ¤t->signal->curr_ip, current->comm, task_pid_nr(current), current_uid(), current_euid()); + else + printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n", + current->comm, task_pid_nr(current), current_uid(), current_euid()); + } +#endif + printk(KERN_ALERT "BUG: unable to handle kernel "); if (address < PAGE_SIZE) printk(KERN_CONT "NULL pointer dereference"); @@ -705,6 +786,68 @@ __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code, unsigned long address, int si_code) { struct task_struct *tsk = current; + struct mm_struct *mm = tsk->mm; + +#ifdef CONFIG_X86_64 + if (mm && (error_code & PF_INSTR) && mm->context.vdso) { + if (regs->ip == (unsigned long)vgettimeofday) { + regs->ip = (unsigned long)VDSO64_SYMBOL(mm->context.vdso, fallback_gettimeofday); + return; + } else if (regs->ip == (unsigned long)vtime) { + regs->ip = (unsigned long)VDSO64_SYMBOL(mm->context.vdso, fallback_time); + return; + } else if (regs->ip == (unsigned long)vgetcpu) { + regs->ip = (unsigned long)VDSO64_SYMBOL(mm->context.vdso, getcpu); + return; + } + } +#endif + +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC) + if (mm && (error_code & PF_USER)) { + unsigned long ip = regs->ip; + + if (v8086_mode(regs)) + ip = ((regs->cs & 0xffff) << 4) + (ip & 0xffff); + + /* + * It's possible to have interrupts off here: + */ + local_irq_enable(); + +#ifdef CONFIG_PAX_PAGEEXEC + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && + (((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR)) || (!(error_code & (PF_PROT | PF_WRITE)) && ip == address))) { + +#ifdef CONFIG_PAX_EMUTRAMP + switch (pax_handle_fetch_fault(regs)) { + case 2: + return; + } +#endif + + pax_report_fault(regs, (void *)ip, (void *)regs->sp); + do_group_exit(SIGKILL); + } +#endif + +#ifdef CONFIG_PAX_SEGMEXEC + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && !(error_code & (PF_PROT | PF_WRITE)) && (ip + SEGMEXEC_TASK_SIZE == address)) { + +#ifdef CONFIG_PAX_EMUTRAMP + switch (pax_handle_fetch_fault(regs)) { + case 2: + return; + } +#endif + + pax_report_fault(regs, (void *)ip, (void *)regs->sp); + do_group_exit(SIGKILL); + } +#endif + + } +#endif /* User mode accesses just cause a SIGSEGV */ if (error_code & PF_USER) { @@ -851,6 +994,106 @@ static int spurious_fault_check(unsigned long error_code, pte_t *pte) return 1; } +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC) +static int pax_handle_pageexec_fault(struct pt_regs *regs, struct mm_struct *mm, unsigned long address, unsigned long error_code) +{ + pte_t *pte; + pmd_t *pmd; + spinlock_t *ptl; + unsigned char pte_mask; + + if ((__supported_pte_mask & _PAGE_NX) || (error_code & (PF_PROT|PF_USER)) != (PF_PROT|PF_USER) || v8086_mode(regs) || + !(mm->pax_flags & MF_PAX_PAGEEXEC)) + return 0; + + /* PaX: it's our fault, let's handle it if we can */ + + /* PaX: take a look at read faults before acquiring any locks */ + if (unlikely(!(error_code & PF_WRITE) && (regs->ip == address))) { + /* instruction fetch attempt from a protected page in user mode */ + up_read(&mm->mmap_sem); + +#ifdef CONFIG_PAX_EMUTRAMP + switch (pax_handle_fetch_fault(regs)) { + case 2: + return 1; + } +#endif + + pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp); + do_group_exit(SIGKILL); + } + + pmd = pax_get_pmd(mm, address); + if (unlikely(!pmd)) + return 0; + + pte = pte_offset_map_lock(mm, pmd, address, &ptl); + if (unlikely(!(pte_val(*pte) & _PAGE_PRESENT) || pte_user(*pte))) { + pte_unmap_unlock(pte, ptl); + return 0; + } + + if (unlikely((error_code & PF_WRITE) && !pte_write(*pte))) { + /* write attempt to a protected page in user mode */ + pte_unmap_unlock(pte, ptl); + return 0; + } + +#ifdef CONFIG_SMP + if (likely(address > get_limit(regs->cs) && cpu_isset(smp_processor_id(), mm->context.cpu_user_cs_mask))) +#else + if (likely(address > get_limit(regs->cs))) +#endif + { + set_pte(pte, pte_mkread(*pte)); + __flush_tlb_one(address); + pte_unmap_unlock(pte, ptl); + up_read(&mm->mmap_sem); + return 1; + } + + pte_mask = _PAGE_ACCESSED | _PAGE_USER | ((error_code & PF_WRITE) << (_PAGE_BIT_DIRTY-1)); + + /* + * PaX: fill DTLB with user rights and retry + */ + __asm__ __volatile__ ( +#ifdef CONFIG_PAX_MEMORY_UDEREF + "movw %w4,%%es\n" +#endif + "orb %2,(%1)\n" +#if defined(CONFIG_M586) || defined(CONFIG_M586TSC) +/* + * PaX: let this uncommented 'invlpg' remind us on the behaviour of Intel's + * (and AMD's) TLBs. namely, they do not cache PTEs that would raise *any* + * page fault when examined during a TLB load attempt. this is true not only + * for PTEs holding a non-present entry but also present entries that will + * raise a page fault (such as those set up by PaX, or the copy-on-write + * mechanism). in effect it means that we do *not* need to flush the TLBs + * for our target pages since their PTEs are simply not in the TLBs at all. + + * the best thing in omitting it is that we gain around 15-20% speed in the + * fast path of the page fault handler and can get rid of tracing since we + * can no longer flush unintended entries. + */ + "invlpg (%0)\n" +#endif + "testb $0,%%es:(%0)\n" + "xorb %3,(%1)\n" +#ifdef CONFIG_PAX_MEMORY_UDEREF + "pushl %%ss\n" + "popl %%es\n" +#endif + : + : "r" (address), "r" (pte), "q" (pte_mask), "i" (_PAGE_USER), "r" (__USER_DS) + : "memory", "cc"); + pte_unmap_unlock(pte, ptl); + up_read(&mm->mmap_sem); + return 1; +} +#endif + /* * Handle a spurious fault caused by a stale TLB entry. * @@ -917,6 +1160,9 @@ int show_unhandled_signals = 1; static inline int access_error(unsigned long error_code, int write, struct vm_area_struct *vma) { + if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR) && !(vma->vm_flags & VM_EXEC)) + return 1; + if (write) { /* write, present and write, not present: */ if (unlikely(!(vma->vm_flags & VM_WRITE))) @@ -950,17 +1196,31 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code) { struct vm_area_struct *vma; struct task_struct *tsk; - unsigned long address; struct mm_struct *mm; int write; int fault; + /* Get the faulting address: */ + unsigned long address = read_cr2(); + +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF) + if (!user_mode(regs) && address < 2 * PAX_USER_SHADOW_BASE) { + if (!search_exception_tables(regs->ip)) { + bad_area_nosemaphore(regs, error_code, address); + return; + } + if (address < PAX_USER_SHADOW_BASE) { + printk(KERN_ERR "PAX: please report this to pageexec@freemail.hu\n"); + printk(KERN_ERR "PAX: faulting IP: %pS\n", (void *)regs->ip); + show_trace_log_lvl(NULL, NULL, (void *)regs->sp, regs->bp, KERN_ERR); + } else + address -= PAX_USER_SHADOW_BASE; + } +#endif + tsk = current; mm = tsk->mm; - /* Get the faulting address: */ - address = read_cr2(); - /* * Detect and handle instructions that would cause a page fault for * both a tracked kernel page and a userspace page. @@ -1020,7 +1280,7 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code) * User-mode registers count as a user access even for any * potential system fault or CPU buglet: */ - if (user_mode_vm(regs)) { + if (user_mode(regs)) { local_irq_enable(); error_code |= PF_USER; } else { @@ -1074,6 +1334,11 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code) might_sleep(); } +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC) + if (pax_handle_pageexec_fault(regs, mm, address, error_code)) + return; +#endif + vma = find_vma(mm, address); if (unlikely(!vma)) { bad_area(regs, error_code, address); @@ -1085,18 +1350,24 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code) bad_area(regs, error_code, address); return; } - if (error_code & PF_USER) { - /* - * Accessing the stack below %sp is always a bug. - * The large cushion allows instructions like enter - * and pusha to work. ("enter $65535, $31" pushes - * 32 pointers and then decrements %sp by 65535.) - */ - if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < regs->sp)) { - bad_area(regs, error_code, address); - return; - } + /* + * Accessing the stack below %sp is always a bug. + * The large cushion allows instructions like enter + * and pusha to work. ("enter $65535, $31" pushes + * 32 pointers and then decrements %sp by 65535.) + */ + if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < task_pt_regs(tsk)->sp)) { + bad_area(regs, error_code, address); + return; } + +#ifdef CONFIG_PAX_SEGMEXEC + if (unlikely((mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end - SEGMEXEC_TASK_SIZE - 1 < address - SEGMEXEC_TASK_SIZE - 1)) { + bad_area(regs, error_code, address); + return; + } +#endif + if (unlikely(expand_stack(vma, address))) { bad_area(regs, error_code, address); return; @@ -1140,3 +1411,199 @@ good_area: up_read(&mm->mmap_sem); } + +#ifdef CONFIG_PAX_EMUTRAMP +static int pax_handle_fetch_fault_32(struct pt_regs *regs) +{ + int err; + + do { /* PaX: gcc trampoline emulation #1 */ + unsigned char mov1, mov2; + unsigned short jmp; + unsigned int addr1, addr2; + +#ifdef CONFIG_X86_64 + if ((regs->ip + 11) >> 32) + break; +#endif + + err = get_user(mov1, (unsigned char __user *)regs->ip); + err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1)); + err |= get_user(mov2, (unsigned char __user *)(regs->ip + 5)); + err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6)); + err |= get_user(jmp, (unsigned short __user *)(regs->ip + 10)); + + if (err) + break; + + if (mov1 == 0xB9 && mov2 == 0xB8 && jmp == 0xE0FF) { + regs->cx = addr1; + regs->ax = addr2; + regs->ip = addr2; + return 2; + } + } while (0); + + do { /* PaX: gcc trampoline emulation #2 */ + unsigned char mov, jmp; + unsigned int addr1, addr2; + +#ifdef CONFIG_X86_64 + if ((regs->ip + 9) >> 32) + break; +#endif + + err = get_user(mov, (unsigned char __user *)regs->ip); + err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1)); + err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5)); + err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6)); + + if (err) + break; + + if (mov == 0xB9 && jmp == 0xE9) { + regs->cx = addr1; + regs->ip = (unsigned int)(regs->ip + addr2 + 10); + return 2; + } + } while (0); + + return 1; /* PaX in action */ +} + +#ifdef CONFIG_X86_64 +static int pax_handle_fetch_fault_64(struct pt_regs *regs) +{ + int err; + + do { /* PaX: gcc trampoline emulation #1 */ + unsigned short mov1, mov2, jmp1; + unsigned char jmp2; + unsigned int addr1; + unsigned long addr2; + + err = get_user(mov1, (unsigned short __user *)regs->ip); + err |= get_user(addr1, (unsigned int __user *)(regs->ip + 2)); + err |= get_user(mov2, (unsigned short __user *)(regs->ip + 6)); + err |= get_user(addr2, (unsigned long __user *)(regs->ip + 8)); + err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 16)); + err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 18)); + + if (err) + break; + + if (mov1 == 0xBB41 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) { + regs->r11 = addr1; + regs->r10 = addr2; + regs->ip = addr1; + return 2; + } + } while (0); + + do { /* PaX: gcc trampoline emulation #2 */ + unsigned short mov1, mov2, jmp1; + unsigned char jmp2; + unsigned long addr1, addr2; + + err = get_user(mov1, (unsigned short __user *)regs->ip); + err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2)); + err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10)); + err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12)); + err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 20)); + err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 22)); + + if (err) + break; + + if (mov1 == 0xBB49 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) { + regs->r11 = addr1; + regs->r10 = addr2; + regs->ip = addr1; + return 2; + } + } while (0); + + return 1; /* PaX in action */ +} +#endif + +/* + * PaX: decide what to do with offenders (regs->ip = fault address) + * + * returns 1 when task should be killed + * 2 when gcc trampoline was detected + */ +static int pax_handle_fetch_fault(struct pt_regs *regs) +{ + if (v8086_mode(regs)) + return 1; + + if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP)) + return 1; + +#ifdef CONFIG_X86_32 + return pax_handle_fetch_fault_32(regs); +#else + if (regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)) + return pax_handle_fetch_fault_32(regs); + else + return pax_handle_fetch_fault_64(regs); +#endif +} +#endif + +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC) +void pax_report_insns(void *pc, void *sp) +{ + long i; + + printk(KERN_ERR "PAX: bytes at PC: "); + for (i = 0; i < 20; i++) { + unsigned char c; + if (get_user(c, (__force unsigned char __user *)pc+i)) + printk(KERN_CONT "?? "); + else + printk(KERN_CONT "%02x ", c); + } + printk("\n"); + + printk(KERN_ERR "PAX: bytes at SP-%lu: ", (unsigned long)sizeof(long)); + for (i = -1; i < 80 / (long)sizeof(long); i++) { + unsigned long c; + if (get_user(c, (__force unsigned long __user *)sp+i)) +#ifdef CONFIG_X86_32 + printk(KERN_CONT "???????? "); +#else + printk(KERN_CONT "???????????????? "); +#endif + else + printk(KERN_CONT "%0*lx ", 2 * (int)sizeof(long), c); + } + printk("\n"); +} +#endif + +/** + * probe_kernel_write(): safely attempt to write to a location + * @dst: address to write to + * @src: pointer to the data that shall be written + * @size: size of the data chunk + * + * Safely write to address @dst from the buffer at @src. If a kernel fault + * happens, handle that and return -EFAULT. + */ +long notrace probe_kernel_write(void *dst, const void *src, size_t size) +{ + long ret; + mm_segment_t old_fs = get_fs(); + + set_fs(KERNEL_DS); + pagefault_disable(); + pax_open_kernel(); + ret = __copy_to_user_inatomic((__force void __user *)dst, src, size); + pax_close_kernel(); + pagefault_enable(); + set_fs(old_fs); + + return ret ? -EFAULT : 0; +} diff --git a/arch/x86/mm/gup.c b/arch/x86/mm/gup.c index 738e659..ca82c82 100644 --- a/arch/x86/mm/gup.c +++ b/arch/x86/mm/gup.c @@ -237,7 +237,7 @@ int __get_user_pages_fast(unsigned long start, int nr_pages, int write, addr = start; len = (unsigned long) nr_pages << PAGE_SHIFT; end = start + len; - if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ, + if (unlikely(!__access_ok(write ? VERIFY_WRITE : VERIFY_READ, (void __user *)start, len))) return 0; diff --git a/arch/x86/mm/highmem_32.c b/arch/x86/mm/highmem_32.c index 63a6ba6..79abd7a 100644 --- a/arch/x86/mm/highmem_32.c +++ b/arch/x86/mm/highmem_32.c @@ -43,7 +43,10 @@ void *kmap_atomic_prot(struct page *page, enum km_type type, pgprot_t prot) idx = type + KM_TYPE_NR*smp_processor_id(); vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); BUG_ON(!pte_none(*(kmap_pte-idx))); + + pax_open_kernel(); set_pte(kmap_pte-idx, mk_pte(page, prot)); + pax_close_kernel(); return (void *)vaddr; } diff --git a/arch/x86/mm/hugetlbpage.c b/arch/x86/mm/hugetlbpage.c index 069ce7c..d318f90 100644 --- a/arch/x86/mm/hugetlbpage.c +++ b/arch/x86/mm/hugetlbpage.c @@ -266,13 +266,20 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file, struct hstate *h = hstate_file(file); struct mm_struct *mm = current->mm; struct vm_area_struct *vma; - unsigned long start_addr; + unsigned long start_addr, pax_task_size = TASK_SIZE; + +#ifdef CONFIG_PAX_SEGMEXEC + if (mm->pax_flags & MF_PAX_SEGMEXEC) + pax_task_size = SEGMEXEC_TASK_SIZE; +#endif + + pax_task_size -= PAGE_SIZE; if (len > mm->cached_hole_size) { - start_addr = mm->free_area_cache; + start_addr = mm->free_area_cache; } else { - start_addr = TASK_UNMAPPED_BASE; - mm->cached_hole_size = 0; + start_addr = mm->mmap_base; + mm->cached_hole_size = 0; } full_search: @@ -280,26 +287,27 @@ full_search: for (vma = find_vma(mm, addr); ; vma = vma->vm_next) { /* At this point: (!vma || addr < vma->vm_end). */ - if (TASK_SIZE - len < addr) { + if (pax_task_size - len < addr) { /* * Start a new search - just in case we missed * some holes. */ - if (start_addr != TASK_UNMAPPED_BASE) { - start_addr = TASK_UNMAPPED_BASE; + if (start_addr != mm->mmap_base) { + start_addr = mm->mmap_base; mm->cached_hole_size = 0; goto full_search; } return -ENOMEM; } - if (!vma || addr + len <= vma->vm_start) { - mm->free_area_cache = addr + len; - return addr; - } + if (check_heap_stack_gap(vma, addr, len)) + break; if (addr + mm->cached_hole_size < vma->vm_start) mm->cached_hole_size = vma->vm_start - addr; addr = ALIGN(vma->vm_end, huge_page_size(h)); } + + mm->free_area_cache = addr + len; + return addr; } static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file, @@ -308,10 +316,9 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file, { struct hstate *h = hstate_file(file); struct mm_struct *mm = current->mm; - struct vm_area_struct *vma, *prev_vma; - unsigned long base = mm->mmap_base, addr = addr0; + struct vm_area_struct *vma; + unsigned long base = mm->mmap_base, addr; unsigned long largest_hole = mm->cached_hole_size; - int first_time = 1; /* don't allow allocations above current base */ if (mm->free_area_cache > base) @@ -321,7 +328,7 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file, largest_hole = 0; mm->free_area_cache = base; } -try_again: + /* make sure it can fit in the remaining address space */ if (mm->free_area_cache < len) goto fail; @@ -329,33 +336,27 @@ try_again: /* either no address requested or cant fit in requested address hole */ addr = (mm->free_area_cache - len) & huge_page_mask(h); do { + vma = find_vma(mm, addr); /* * Lookup failure means no vma is above this address, * i.e. return with success: - */ - if (!(vma = find_vma_prev(mm, addr, &prev_vma))) - return addr; - - /* * new region fits between prev_vma->vm_end and * vma->vm_start, use it: */ - if (addr + len <= vma->vm_start && - (!prev_vma || (addr >= prev_vma->vm_end))) { + if (check_heap_stack_gap(vma, addr, len)) { /* remember the address as a hint for next time */ - mm->cached_hole_size = largest_hole; - return (mm->free_area_cache = addr); - } else { - /* pull free_area_cache down to the first hole */ - if (mm->free_area_cache == vma->vm_end) { - mm->free_area_cache = vma->vm_start; - mm->cached_hole_size = largest_hole; - } + mm->cached_hole_size = largest_hole; + return (mm->free_area_cache = addr); + } + /* pull free_area_cache down to the first hole */ + if (mm->free_area_cache == vma->vm_end) { + mm->free_area_cache = vma->vm_start; + mm->cached_hole_size = largest_hole; } /* remember the largest hole we saw so far */ if (addr + largest_hole < vma->vm_start) - largest_hole = vma->vm_start - addr; + largest_hole = vma->vm_start - addr; /* try just below the current vma->vm_start */ addr = (vma->vm_start - len) & huge_page_mask(h); @@ -363,22 +364,26 @@ try_again: fail: /* - * if hint left us with no space for the requested - * mapping then try again: - */ - if (first_time) { - mm->free_area_cache = base; - largest_hole = 0; - first_time = 0; - goto try_again; - } - /* * A failed mmap() very likely causes application failure, * so fall back to the bottom-up function here. This scenario * can happen with large stack limits and large mmap() * allocations. */ - mm->free_area_cache = TASK_UNMAPPED_BASE; + +#ifdef CONFIG_PAX_SEGMEXEC + if (mm->pax_flags & MF_PAX_SEGMEXEC) + mm->mmap_base = SEGMEXEC_TASK_UNMAPPED_BASE; + else +#endif + + mm->mmap_base = TASK_UNMAPPED_BASE; + +#ifdef CONFIG_PAX_RANDMMAP + if (mm->pax_flags & MF_PAX_RANDMMAP) + mm->mmap_base += mm->delta_mmap; +#endif + + mm->free_area_cache = mm->mmap_base; mm->cached_hole_size = ~0UL; addr = hugetlb_get_unmapped_area_bottomup(file, addr0, len, pgoff, flags); @@ -386,6 +391,7 @@ fail: /* * Restore the topdown base: */ + mm->mmap_base = base; mm->free_area_cache = base; mm->cached_hole_size = ~0UL; @@ -399,10 +405,19 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr, struct hstate *h = hstate_file(file); struct mm_struct *mm = current->mm; struct vm_area_struct *vma; + unsigned long pax_task_size = TASK_SIZE; if (len & ~huge_page_mask(h)) return -EINVAL; - if (len > TASK_SIZE) + +#ifdef CONFIG_PAX_SEGMEXEC + if (mm->pax_flags & MF_PAX_SEGMEXEC) + pax_task_size = SEGMEXEC_TASK_SIZE; +#endif + + pax_task_size -= PAGE_SIZE; + + if (len > pax_task_size) return -ENOMEM; if (flags & MAP_FIXED) { @@ -414,8 +429,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr, if (addr) { addr = ALIGN(addr, huge_page_size(h)); vma = find_vma(mm, addr); - if (TASK_SIZE - len >= addr && - (!vma || addr + len <= vma->vm_start)) + if (pax_task_size - len >= addr && check_heap_stack_gap(vma, addr, len)) return addr; } if (mm->get_unmapped_area == arch_get_unmapped_area) diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c index b278535..9654c83 100644 --- a/arch/x86/mm/init.c +++ b/arch/x86/mm/init.c @@ -70,11 +70,7 @@ static void __init find_early_table_space(unsigned long end, int use_pse, * cause a hotspot and fill up ZONE_DMA. The page tables * need roughly 0.5KB per GB. */ -#ifdef CONFIG_X86_32 - start = 0x7000; -#else - start = 0x8000; -#endif + start = 0x100000; e820_table_start = find_e820_area(start, max_pfn_mapped<> PAGE_SHIFT)) + return 1; +#endif + if ((ISA_START_ADDRESS >> PAGE_SHIFT) <= pagenr && pagenr < (ISA_END_ADDRESS >> PAGE_SHIFT)) return 1; if (iomem_is_exclusive(pagenr << PAGE_SHIFT)) return 0; @@ -380,6 +382,88 @@ void free_init_pages(char *what, unsigned long begin, unsigned long end) void free_initmem(void) { + +#ifdef CONFIG_PAX_KERNEXEC +#ifdef CONFIG_X86_32 + /* PaX: limit KERNEL_CS to actual size */ + unsigned long addr, limit; + struct desc_struct d; + int cpu; + + limit = paravirt_enabled() ? ktva_ktla(0xffffffff) : (unsigned long)&_etext; + limit = (limit - 1UL) >> PAGE_SHIFT; + + memset(__LOAD_PHYSICAL_ADDR + PAGE_OFFSET, POISON_FREE_INITMEM, PAGE_SIZE); + for (cpu = 0; cpu < NR_CPUS; cpu++) { + pack_descriptor(&d, get_desc_base(&get_cpu_gdt_table(cpu)[GDT_ENTRY_KERNEL_CS]), limit, 0x9B, 0xC); + write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_KERNEL_CS, &d, DESCTYPE_S); + } + + /* PaX: make KERNEL_CS read-only */ + addr = PFN_ALIGN(ktla_ktva((unsigned long)&_text)); + if (!paravirt_enabled()) + set_memory_ro(addr, (PFN_ALIGN(_sdata) - addr) >> PAGE_SHIFT); +/* + for (addr = ktla_ktva((unsigned long)&_text); addr < (unsigned long)&_sdata; addr += PMD_SIZE) { + pgd = pgd_offset_k(addr); + pud = pud_offset(pgd, addr); + pmd = pmd_offset(pud, addr); + set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW)); + } +*/ +#ifdef CONFIG_X86_PAE + set_memory_nx(PFN_ALIGN(__init_begin), (PFN_ALIGN(__init_end) - PFN_ALIGN(__init_begin)) >> PAGE_SHIFT); +/* + for (addr = (unsigned long)&__init_begin; addr < (unsigned long)&__init_end; addr += PMD_SIZE) { + pgd = pgd_offset_k(addr); + pud = pud_offset(pgd, addr); + pmd = pmd_offset(pud, addr); + set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask))); + } +*/ +#endif + +#ifdef CONFIG_MODULES + set_memory_4k((unsigned long)MODULES_EXEC_VADDR, (MODULES_EXEC_END - MODULES_EXEC_VADDR) >> PAGE_SHIFT); +#endif + +#else + pgd_t *pgd; + pud_t *pud; + pmd_t *pmd; + unsigned long addr, end; + + /* PaX: make kernel code/rodata read-only, rest non-executable */ + for (addr = __START_KERNEL_map; addr < __START_KERNEL_map + KERNEL_IMAGE_SIZE; addr += PMD_SIZE) { + pgd = pgd_offset_k(addr); + pud = pud_offset(pgd, addr); + pmd = pmd_offset(pud, addr); + if (!pmd_present(*pmd)) + continue; + if ((unsigned long)_text <= addr && addr < (unsigned long)_sdata) + set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW)); + else + set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask))); + } + + addr = (unsigned long)__va(__pa(__START_KERNEL_map)); + end = addr + KERNEL_IMAGE_SIZE; + for (; addr < end; addr += PMD_SIZE) { + pgd = pgd_offset_k(addr); + pud = pud_offset(pgd, addr); + pmd = pmd_offset(pud, addr); + if (!pmd_present(*pmd)) + continue; + if ((unsigned long)__va(__pa(_text)) <= addr && addr < (unsigned long)__va(__pa(_sdata))) + set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW)); + else + set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask))); + } +#endif + + flush_tlb_all(); +#endif + free_init_pages("unused kernel memory", (unsigned long)(&__init_begin), (unsigned long)(&__init_end)); diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c index bca7909..313e4cb 100644 --- a/arch/x86/mm/init_32.c +++ b/arch/x86/mm/init_32.c @@ -72,36 +72,6 @@ static __init void *alloc_low_page(void) } /* - * Creates a middle page table and puts a pointer to it in the - * given global directory entry. This only returns the gd entry - * in non-PAE compilation mode, since the middle layer is folded. - */ -static pmd_t * __init one_md_table_init(pgd_t *pgd) -{ - pud_t *pud; - pmd_t *pmd_table; - -#ifdef CONFIG_X86_PAE - if (!(pgd_val(*pgd) & _PAGE_PRESENT)) { - if (after_bootmem) - pmd_table = (pmd_t *)alloc_bootmem_pages(PAGE_SIZE); - else - pmd_table = (pmd_t *)alloc_low_page(); - paravirt_alloc_pmd(&init_mm, __pa(pmd_table) >> PAGE_SHIFT); - set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT)); - pud = pud_offset(pgd, 0); - BUG_ON(pmd_table != pmd_offset(pud, 0)); - - return pmd_table; - } -#endif - pud = pud_offset(pgd, 0); - pmd_table = pmd_offset(pud, 0); - - return pmd_table; -} - -/* * Create a page table and place a pointer to it in a middle page * directory entry: */ @@ -121,13 +91,28 @@ static pte_t * __init one_page_table_init(pmd_t *pmd) page_table = (pte_t *)alloc_low_page(); paravirt_alloc_pte(&init_mm, __pa(page_table) >> PAGE_SHIFT); +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC) + set_pmd(pmd, __pmd(__pa(page_table) | _KERNPG_TABLE)); +#else set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE)); +#endif BUG_ON(page_table != pte_offset_kernel(pmd, 0)); } return pte_offset_kernel(pmd, 0); } +static pmd_t * __init one_md_table_init(pgd_t *pgd) +{ + pud_t *pud; + pmd_t *pmd_table; + + pud = pud_offset(pgd, 0); + pmd_table = pmd_offset(pud, 0); + + return pmd_table; +} + pmd_t * __init populate_extra_pmd(unsigned long vaddr) { int pgd_idx = pgd_index(vaddr); @@ -201,6 +186,7 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base) int pgd_idx, pmd_idx; unsigned long vaddr; pgd_t *pgd; + pud_t *pud; pmd_t *pmd; pte_t *pte = NULL; @@ -210,8 +196,13 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base) pgd = pgd_base + pgd_idx; for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) { - pmd = one_md_table_init(pgd); - pmd = pmd + pmd_index(vaddr); + pud = pud_offset(pgd, vaddr); + pmd = pmd_offset(pud, vaddr); + +#ifdef CONFIG_X86_PAE + paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT); +#endif + for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end); pmd++, pmd_idx++) { pte = page_table_kmap_check(one_page_table_init(pmd), @@ -223,11 +214,20 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base) } } -static inline int is_kernel_text(unsigned long addr) +static inline int is_kernel_text(unsigned long start, unsigned long end) { - if (addr >= PAGE_OFFSET && addr <= (unsigned long)__init_end) - return 1; - return 0; + if ((start > ktla_ktva((unsigned long)_etext) || + end <= ktla_ktva((unsigned long)_stext)) && + (start > ktla_ktva((unsigned long)_einittext) || + end <= ktla_ktva((unsigned long)_sinittext)) && + +#ifdef CONFIG_ACPI_SLEEP + (start > (unsigned long)__va(acpi_wakeup_address) + 0x4000 || end <= (unsigned long)__va(acpi_wakeup_address)) && +#endif + + (start > (unsigned long)__va(0xfffff) || end <= (unsigned long)__va(0xc0000))) + return 0; + return 1; } /* @@ -244,9 +244,10 @@ kernel_physical_mapping_init(unsigned long start, unsigned long last_map_addr = end; unsigned long start_pfn, end_pfn; pgd_t *pgd_base = swapper_pg_dir; - int pgd_idx, pmd_idx, pte_ofs; + unsigned int pgd_idx, pmd_idx, pte_ofs; unsigned long pfn; pgd_t *pgd; + pud_t *pud; pmd_t *pmd; pte_t *pte; unsigned pages_2m, pages_4k; @@ -279,8 +280,13 @@ repeat: pfn = start_pfn; pgd_idx = pgd_index((pfn<> PAGE_SHIFT); +#endif if (pfn >= end_pfn) continue; @@ -292,14 +298,13 @@ repeat: #endif for (; pmd_idx < PTRS_PER_PMD && pfn < end_pfn; pmd++, pmd_idx++) { - unsigned int addr = pfn * PAGE_SIZE + PAGE_OFFSET; + unsigned long address = pfn * PAGE_SIZE + PAGE_OFFSET; /* * Map with big pages if possible, otherwise * create normal page tables: */ if (use_pse) { - unsigned int addr2; pgprot_t prot = PAGE_KERNEL_LARGE; /* * first pass will use the same initial @@ -309,11 +314,7 @@ repeat: __pgprot(PTE_IDENT_ATTR | _PAGE_PSE); - addr2 = (pfn + PTRS_PER_PTE-1) * PAGE_SIZE + - PAGE_OFFSET + PAGE_SIZE-1; - - if (is_kernel_text(addr) || - is_kernel_text(addr2)) + if (is_kernel_text(address, address + PMD_SIZE)) prot = PAGE_KERNEL_LARGE_EXEC; pages_2m++; @@ -330,7 +331,7 @@ repeat: pte_ofs = pte_index((pfn<> 10, - (unsigned long)&_etext, (unsigned long)&_edata, - ((unsigned long)&_edata - (unsigned long)&_etext) >> 10, + (unsigned long)&_sdata, (unsigned long)&_edata, + ((unsigned long)&_edata - (unsigned long)&_sdata) >> 10, - (unsigned long)&_text, (unsigned long)&_etext, + ktla_ktva((unsigned long)&_text), ktla_ktva((unsigned long)&_etext), ((unsigned long)&_etext - (unsigned long)&_text) >> 10); /* @@ -1013,6 +1018,7 @@ void set_kernel_text_rw(void) if (!kernel_set_to_readonly) return; + start = ktla_ktva(start); pr_debug("Set kernel text: %lx - %lx for read write\n", start, start+size); @@ -1027,6 +1033,7 @@ void set_kernel_text_ro(void) if (!kernel_set_to_readonly) return; + start = ktla_ktva(start); pr_debug("Set kernel text: %lx - %lx for read only\n", start, start+size); @@ -1038,6 +1045,7 @@ void mark_rodata_ro(void) unsigned long start = PFN_ALIGN(_text); unsigned long size = PFN_ALIGN(_etext) - start; + start = ktla_ktva(start); set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT); printk(KERN_INFO "Write protecting the kernel text: %luk\n", size >> 10); diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c index ee41bba..94a8b29 100644 --- a/arch/x86/mm/init_64.c +++ b/arch/x86/mm/init_64.c @@ -50,7 +50,6 @@ #include #include #include -#include static unsigned long dma_reserve __initdata; @@ -74,7 +73,7 @@ early_param("gbpages", parse_direct_gbpages_on); * around without checking the pgd every time. */ -pteval_t __supported_pte_mask __read_mostly = ~_PAGE_IOMAP; +pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_IOMAP); EXPORT_SYMBOL_GPL(__supported_pte_mask); int force_personality32; @@ -165,7 +164,9 @@ void set_pte_vaddr_pud(pud_t *pud_page, unsigned long vaddr, pte_t new_pte) pmd = fill_pmd(pud, vaddr); pte = fill_pte(pmd, vaddr); + pax_open_kernel(); set_pte(pte, new_pte); + pax_close_kernel(); /* * It's enough to flush this one mapping. @@ -224,14 +225,12 @@ static void __init __init_extra_mapping(unsigned long phys, unsigned long size, pgd = pgd_offset_k((unsigned long)__va(phys)); if (pgd_none(*pgd)) { pud = (pud_t *) spp_getpage(); - set_pgd(pgd, __pgd(__pa(pud) | _KERNPG_TABLE | - _PAGE_USER)); + set_pgd(pgd, __pgd(__pa(pud) | _PAGE_TABLE)); } pud = pud_offset(pgd, (unsigned long)__va(phys)); if (pud_none(*pud)) { pmd = (pmd_t *) spp_getpage(); - set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE | - _PAGE_USER)); + set_pud(pud, __pud(__pa(pmd) | _PAGE_TABLE)); } pmd = pmd_offset(pud, phys); BUG_ON(!pmd_none(*pmd)); @@ -680,6 +679,12 @@ void __init mem_init(void) pci_iommu_alloc(); +#ifdef CONFIG_PAX_PER_CPU_PGD + clone_pgd_range(get_cpu_pgd(0) + KERNEL_PGD_BOUNDARY, + swapper_pg_dir + KERNEL_PGD_BOUNDARY, + KERNEL_PGD_PTRS); +#endif + /* clear_bss() already clear the empty_zero_page */ reservedpages = 0; @@ -886,8 +891,8 @@ int kern_addr_valid(unsigned long addr) static struct vm_area_struct gate_vma = { .vm_start = VSYSCALL_START, .vm_end = VSYSCALL_START + (VSYSCALL_MAPPED_PAGES * PAGE_SIZE), - .vm_page_prot = PAGE_READONLY_EXEC, - .vm_flags = VM_READ | VM_EXEC + .vm_page_prot = PAGE_READONLY, + .vm_flags = VM_READ }; struct vm_area_struct *get_gate_vma(struct task_struct *tsk) @@ -921,7 +926,7 @@ int in_gate_area_no_task(unsigned long addr) const char *arch_vma_name(struct vm_area_struct *vma) { - if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso) + if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso) return "[vdso]"; if (vma == &gate_vma) return "[vsyscall]"; diff --git a/arch/x86/mm/iomap_32.c b/arch/x86/mm/iomap_32.c index 84e236c..69bd3f6 100644 --- a/arch/x86/mm/iomap_32.c +++ b/arch/x86/mm/iomap_32.c @@ -65,7 +65,11 @@ void *kmap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot) debug_kmap_atomic(type); idx = type + KM_TYPE_NR * smp_processor_id(); vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); + + pax_open_kernel(); set_pte(kmap_pte - idx, pfn_pte(pfn, prot)); + pax_close_kernel(); + arch_flush_lazy_mmu_mode(); return (void *)vaddr; diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c index 12e4d2d..66f0373 100644 --- a/arch/x86/mm/ioremap.c +++ b/arch/x86/mm/ioremap.c @@ -100,13 +100,10 @@ static void __iomem *__ioremap_caller(resource_size_t phys_addr, /* * Don't allow anybody to remap normal RAM that we're using.. */ - for (pfn = phys_addr >> PAGE_SHIFT; - (pfn << PAGE_SHIFT) < (last_addr & PAGE_MASK); - pfn++) { - + for (pfn = phys_addr >> PAGE_SHIFT; ((resource_size_t)pfn << PAGE_SHIFT) < (last_addr & PAGE_MASK); pfn++) { int is_ram = page_is_ram(pfn); - if (is_ram && pfn_valid(pfn) && !PageReserved(pfn_to_page(pfn))) + if (is_ram && pfn_valid(pfn) && (pfn >= 0x100 || !PageReserved(pfn_to_page(pfn)))) return NULL; WARN_ON_ONCE(is_ram); } @@ -346,7 +343,7 @@ static int __init early_ioremap_debug_setup(char *str) early_param("early_ioremap_debug", early_ioremap_debug_setup); static __initdata int after_paging_init; -static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss; +static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __read_only __aligned(PAGE_SIZE); static inline pmd_t * __init early_ioremap_pmd(unsigned long addr) { @@ -378,8 +375,7 @@ void __init early_ioremap_init(void) slot_virt[i] = __fix_to_virt(FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*i); pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN)); - memset(bm_pte, 0, sizeof(bm_pte)); - pmd_populate_kernel(&init_mm, pmd, bm_pte); + pmd_populate_user(&init_mm, pmd, bm_pte); /* * The boot-ioremap range spans multiple pmds, for which diff --git a/arch/x86/mm/kmemcheck/kmemcheck.c b/arch/x86/mm/kmemcheck/kmemcheck.c index b3b531a..b65b190 100644 --- a/arch/x86/mm/kmemcheck/kmemcheck.c +++ b/arch/x86/mm/kmemcheck/kmemcheck.c @@ -622,9 +622,9 @@ bool kmemcheck_fault(struct pt_regs *regs, unsigned long address, * memory (e.g. tracked pages)? For now, we need this to avoid * invoking kmemcheck for PnP BIOS calls. */ - if (regs->flags & X86_VM_MASK) + if (v8086_mode(regs)) return false; - if (regs->cs != __KERNEL_CS) + if (regs->cs != __KERNEL_CS && regs->cs != __KERNEXEC_KERNEL_CS) return false; pte = kmemcheck_pte_lookup(address); diff --git a/arch/x86/mm/mmap.c b/arch/x86/mm/mmap.c index 1dab519..60a7e5f 100644 --- a/arch/x86/mm/mmap.c +++ b/arch/x86/mm/mmap.c @@ -49,7 +49,7 @@ static unsigned int stack_maxrandom_size(void) * Leave an at least ~128 MB hole with possible stack randomization. */ #define MIN_GAP (128*1024*1024UL + stack_maxrandom_size()) -#define MAX_GAP (TASK_SIZE/6*5) +#define MAX_GAP (pax_task_size/6*5) /* * True on X86_32 or when emulating IA32 on X86_64 @@ -94,27 +94,40 @@ static unsigned long mmap_rnd(void) return rnd << PAGE_SHIFT; } -static unsigned long mmap_base(void) +static unsigned long mmap_base(struct mm_struct *mm) { unsigned long gap = rlimit(RLIMIT_STACK); + unsigned long pax_task_size = TASK_SIZE; + +#ifdef CONFIG_PAX_SEGMEXEC + if (mm->pax_flags & MF_PAX_SEGMEXEC) + pax_task_size = SEGMEXEC_TASK_SIZE; +#endif if (gap < MIN_GAP) gap = MIN_GAP; else if (gap > MAX_GAP) gap = MAX_GAP; - return PAGE_ALIGN(TASK_SIZE - gap - mmap_rnd()); + return PAGE_ALIGN(pax_task_size - gap - mmap_rnd()); } /* * Bottom-up (legacy) layout on X86_32 did not support randomization, X86_64 * does, but not when emulating X86_32 */ -static unsigned long mmap_legacy_base(void) +static unsigned long mmap_legacy_base(struct mm_struct *mm) { - if (mmap_is_ia32()) + if (mmap_is_ia32()) { + +#ifdef CONFIG_PAX_SEGMEXEC + if (mm->pax_flags & MF_PAX_SEGMEXEC) + return SEGMEXEC_TASK_UNMAPPED_BASE; + else +#endif + return TASK_UNMAPPED_BASE; - else + } else return TASK_UNMAPPED_BASE + mmap_rnd(); } @@ -125,11 +138,23 @@ static unsigned long mmap_legacy_base(void) void arch_pick_mmap_layout(struct mm_struct *mm) { if (mmap_is_legacy()) { - mm->mmap_base = mmap_legacy_base(); + mm->mmap_base = mmap_legacy_base(mm); + +#ifdef CONFIG_PAX_RANDMMAP + if (mm->pax_flags & MF_PAX_RANDMMAP) + mm->mmap_base += mm->delta_mmap; +#endif + mm->get_unmapped_area = arch_get_unmapped_area; mm->unmap_area = arch_unmap_area; } else { - mm->mmap_base = mmap_base(); + mm->mmap_base = mmap_base(mm); + +#ifdef CONFIG_PAX_RANDMMAP + if (mm->pax_flags & MF_PAX_RANDMMAP) + mm->mmap_base -= mm->delta_mmap + mm->delta_stack; +#endif + mm->get_unmapped_area = arch_get_unmapped_area_topdown; mm->unmap_area = arch_unmap_area_topdown; } diff --git a/arch/x86/mm/numa_32.c b/arch/x86/mm/numa_32.c index 809baaa..e3892a3 100644 --- a/arch/x86/mm/numa_32.c +++ b/arch/x86/mm/numa_32.c @@ -98,7 +98,6 @@ unsigned long node_memmap_size_bytes(int nid, unsigned long start_pfn, } #endif -extern unsigned long find_max_low_pfn(void); extern unsigned long highend_pfn, highstart_pfn; #define LARGE_PAGE_BYTES (PTRS_PER_PTE * PAGE_SIZE) diff --git a/arch/x86/mm/pageattr-test.c b/arch/x86/mm/pageattr-test.c index e1d1069..2251ff3 100644 --- a/arch/x86/mm/pageattr-test.c +++ b/arch/x86/mm/pageattr-test.c @@ -36,7 +36,7 @@ enum { static int pte_testbit(pte_t pte) { - return pte_flags(pte) & _PAGE_UNUSED1; + return pte_flags(pte) & _PAGE_CPA_TEST; } struct split_state { diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c index 532e793..8cddbe9 100644 --- a/arch/x86/mm/pageattr.c +++ b/arch/x86/mm/pageattr.c @@ -261,16 +261,17 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address, * PCI BIOS based config access (CONFIG_PCI_GOBIOS) support. */ if (within(pfn, BIOS_BEGIN >> PAGE_SHIFT, BIOS_END >> PAGE_SHIFT)) - pgprot_val(forbidden) |= _PAGE_NX; + pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask; /* * The kernel text needs to be executable for obvious reasons * Does not cover __inittext since that is gone later on. On * 64bit we do not enforce !NX on the low mapping */ - if (within(address, (unsigned long)_text, (unsigned long)_etext)) - pgprot_val(forbidden) |= _PAGE_NX; + if (within(address, ktla_ktva((unsigned long)_text), ktla_ktva((unsigned long)_etext))) + pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask; +#ifdef CONFIG_DEBUG_RODATA /* * The .rodata section needs to be read-only. Using the pfn * catches all aliases. @@ -278,6 +279,7 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address, if (within(pfn, __pa((unsigned long)__start_rodata) >> PAGE_SHIFT, __pa((unsigned long)__end_rodata) >> PAGE_SHIFT)) pgprot_val(forbidden) |= _PAGE_RW; +#endif #if defined(CONFIG_X86_64) && defined(CONFIG_DEBUG_RODATA) /* @@ -316,6 +318,13 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address, } #endif +#ifdef CONFIG_PAX_KERNEXEC + if (within(pfn, __pa((unsigned long)&_text), __pa((unsigned long)&_sdata))) { + pgprot_val(forbidden) |= _PAGE_RW; + pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask; + } +#endif + prot = __pgprot(pgprot_val(prot) & ~pgprot_val(forbidden)); return prot; @@ -368,23 +377,37 @@ EXPORT_SYMBOL_GPL(lookup_address); static void __set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte) { /* change init_mm */ + pax_open_kernel(); set_pte_atomic(kpte, pte); + #ifdef CONFIG_X86_32 if (!SHARED_KERNEL_PMD) { + +#ifdef CONFIG_PAX_PER_CPU_PGD + unsigned long cpu; +#else struct page *page; +#endif +#ifdef CONFIG_PAX_PER_CPU_PGD + for (cpu = 0; cpu < NR_CPUS; ++cpu) { + pgd_t *pgd = get_cpu_pgd(cpu); +#else list_for_each_entry(page, &pgd_list, lru) { - pgd_t *pgd; + pgd_t *pgd = (pgd_t *)page_address(page); +#endif + pud_t *pud; pmd_t *pmd; - pgd = (pgd_t *)page_address(page) + pgd_index(address); + pgd += pgd_index(address); pud = pud_offset(pgd, address); pmd = pmd_offset(pud, address); set_pte_atomic((pte_t *)pmd, pte); } } #endif + pax_close_kernel(); } static int diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c index 64121a1..bfb36ea 100644 --- a/arch/x86/mm/pat.c +++ b/arch/x86/mm/pat.c @@ -361,7 +361,7 @@ int free_memtype(u64 start, u64 end) if (!entry) { printk(KERN_INFO "%s:%d freeing invalid memtype %Lx-%Lx\n", - current->comm, current->pid, start, end); + current->comm, task_pid_nr(current), start, end); return -EINVAL; } @@ -492,8 +492,8 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size) while (cursor < to) { if (!devmem_is_allowed(pfn)) { printk(KERN_INFO - "Program %s tried to access /dev/mem between %Lx->%Lx.\n", - current->comm, from, to); + "Program %s tried to access /dev/mem between %Lx->%Lx (%Lx).\n", + current->comm, from, to, cursor); return 0; } cursor += PAGE_SIZE; @@ -557,7 +557,7 @@ int kernel_map_sync_memtype(u64 base, unsigned long size, unsigned long flags) printk(KERN_INFO "%s:%d ioremap_change_attr failed %s " "for %Lx-%Lx\n", - current->comm, current->pid, + current->comm, task_pid_nr(current), cattr_name(flags), base, (unsigned long long)(base + size)); return -EINVAL; @@ -593,7 +593,7 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot, if (want_flags != flags) { printk(KERN_WARNING "%s:%d map pfn RAM range req %s for %Lx-%Lx, got %s\n", - current->comm, current->pid, + current->comm, task_pid_nr(current), cattr_name(want_flags), (unsigned long long)paddr, (unsigned long long)(paddr + size), @@ -615,7 +615,7 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot, free_memtype(paddr, paddr + size); printk(KERN_ERR "%s:%d map pfn expected mapping type %s" " for %Lx-%Lx, got %s\n", - current->comm, current->pid, + current->comm, task_pid_nr(current), cattr_name(want_flags), (unsigned long long)paddr, (unsigned long long)(paddr + size), diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c index 5c4ee42..9ff06a5 100644 --- a/arch/x86/mm/pgtable.c +++ b/arch/x86/mm/pgtable.c @@ -84,8 +84,58 @@ static inline void pgd_list_del(pgd_t *pgd) list_del(&page->lru); } -#define UNSHARED_PTRS_PER_PGD \ - (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD) +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF) +pgdval_t clone_pgd_mask __read_only = ~_PAGE_PRESENT; + +void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count) +{ + while (count--) + *dst++ = __pgd((pgd_val(*src++) | _PAGE_NX) & ~_PAGE_USER); +} +#endif + +#ifdef CONFIG_PAX_PER_CPU_PGD +void __clone_user_pgds(pgd_t *dst, const pgd_t *src, int count) +{ + while (count--) + +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF) + *dst++ = __pgd(pgd_val(*src++) & clone_pgd_mask); +#else + *dst++ = *src++; +#endif + +} +#endif + +#ifdef CONFIG_PAX_PER_CPU_PGD +static inline void pgd_ctor(pgd_t *pgd) {} +static inline void pgd_dtor(pgd_t *pgd) {} +#ifdef CONFIG_X86_64 +#define pxd_t pud_t +#define pyd_t pgd_t +#define paravirt_release_pxd(pfn) paravirt_release_pud(pfn) +#define pxd_free(mm, pud) pud_free((mm), (pud)) +#define pyd_populate(mm, pgd, pud) pgd_populate((mm), (pgd), (pud)) +#define pyd_offset(mm ,address) pgd_offset((mm), (address)) +#define PYD_SIZE PGDIR_SIZE +#else +#define pxd_t pmd_t +#define pyd_t pud_t +#define paravirt_release_pxd(pfn) paravirt_release_pmd(pfn) +#define pxd_free(mm, pud) pmd_free((mm), (pud)) +#define pyd_populate(mm, pgd, pud) pud_populate((mm), (pgd), (pud)) +#define pyd_offset(mm ,address) pud_offset((mm), (address)) +#define PYD_SIZE PUD_SIZE +#endif +#else +#define pxd_t pmd_t +#define pyd_t pud_t +#define paravirt_release_pxd(pfn) paravirt_release_pmd(pfn) +#define pxd_free(mm, pmd) pmd_free((mm), (pmd)) +#define pyd_populate(mm, pud, pmd) pud_populate((mm), (pud), (pmd)) +#define pyd_offset(mm ,address) pud_offset((mm), (address)) +#define PYD_SIZE PUD_SIZE static void pgd_ctor(pgd_t *pgd) { @@ -120,6 +170,7 @@ static void pgd_dtor(pgd_t *pgd) pgd_list_del(pgd); spin_unlock_irqrestore(&pgd_lock, flags); } +#endif /* * List of all pgd's needed for non-PAE so it can invalidate entries @@ -132,7 +183,7 @@ static void pgd_dtor(pgd_t *pgd) * -- wli */ -#ifdef CONFIG_X86_PAE +#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE) /* * In PAE mode, we need to do a cr3 reload (=tlb flush) when * updating the top-level pagetable entries to guarantee the @@ -144,7 +195,7 @@ static void pgd_dtor(pgd_t *pgd) * not shared between pagetables (!SHARED_KERNEL_PMDS), we allocate * and initialize the kernel pmds here. */ -#define PREALLOCATED_PMDS UNSHARED_PTRS_PER_PGD +#define PREALLOCATED_PXDS (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD) void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd) { @@ -163,36 +214,38 @@ void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd) if (mm == current->active_mm) write_cr3(read_cr3()); } +#elif defined(CONFIG_X86_64) && defined(CONFIG_PAX_PER_CPU_PGD) +#define PREALLOCATED_PXDS USER_PGD_PTRS #else /* !CONFIG_X86_PAE */ /* No need to prepopulate any pagetable entries in non-PAE modes. */ -#define PREALLOCATED_PMDS 0 +#define PREALLOCATED_PXDS 0 #endif /* CONFIG_X86_PAE */ -static void free_pmds(pmd_t *pmds[]) +static void free_pxds(pxd_t *pxds[]) { int i; - for(i = 0; i < PREALLOCATED_PMDS; i++) - if (pmds[i]) - free_page((unsigned long)pmds[i]); + for(i = 0; i < PREALLOCATED_PXDS; i++) + if (pxds[i]) + free_page((unsigned long)pxds[i]); } -static int preallocate_pmds(pmd_t *pmds[]) +static int preallocate_pxds(pxd_t *pxds[]) { int i; bool failed = false; - for(i = 0; i < PREALLOCATED_PMDS; i++) { - pmd_t *pmd = (pmd_t *)__get_free_page(PGALLOC_GFP); - if (pmd == NULL) + for(i = 0; i < PREALLOCATED_PXDS; i++) { + pxd_t *pxd = (pxd_t *)__get_free_page(PGALLOC_GFP); + if (pxd == NULL) failed = true; - pmds[i] = pmd; + pxds[i] = pxd; } if (failed) { - free_pmds(pmds); + free_pxds(pxds); return -ENOMEM; } @@ -205,51 +258,56 @@ static int preallocate_pmds(pmd_t *pmds[]) * preallocate which never got a corresponding vma will need to be * freed manually. */ -static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgdp) +static void pgd_mop_up_pxds(struct mm_struct *mm, pgd_t *pgdp) { int i; - for(i = 0; i < PREALLOCATED_PMDS; i++) { + for(i = 0; i < PREALLOCATED_PXDS; i++) { pgd_t pgd = pgdp[i]; if (pgd_val(pgd) != 0) { - pmd_t *pmd = (pmd_t *)pgd_page_vaddr(pgd); + pxd_t *pxd = (pxd_t *)pgd_page_vaddr(pgd); - pgdp[i] = native_make_pgd(0); + set_pgd(pgdp + i, native_make_pgd(0)); - paravirt_release_pmd(pgd_val(pgd) >> PAGE_SHIFT); - pmd_free(mm, pmd); + paravirt_release_pxd(pgd_val(pgd) >> PAGE_SHIFT); + pxd_free(mm, pxd); } } } -static void pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmds[]) +static void pgd_prepopulate_pxd(struct mm_struct *mm, pgd_t *pgd, pxd_t *pxds[]) { - pud_t *pud; + pyd_t *pyd; unsigned long addr; int i; - if (PREALLOCATED_PMDS == 0) /* Work around gcc-3.4.x bug */ + if (PREALLOCATED_PXDS == 0) /* Work around gcc-3.4.x bug */ return; - pud = pud_offset(pgd, 0); +#ifdef CONFIG_X86_64 + pyd = pyd_offset(mm, 0L); +#else + pyd = pyd_offset(pgd, 0L); +#endif - for (addr = i = 0; i < PREALLOCATED_PMDS; - i++, pud++, addr += PUD_SIZE) { - pmd_t *pmd = pmds[i]; + for (addr = i = 0; i < PREALLOCATED_PXDS; + i++, pyd++, addr += PYD_SIZE) { + pxd_t *pxd = pxds[i]; if (i >= KERNEL_PGD_BOUNDARY) - memcpy(pmd, (pmd_t *)pgd_page_vaddr(swapper_pg_dir[i]), - sizeof(pmd_t) * PTRS_PER_PMD); + memcpy(pxd, (pxd_t *)pgd_page_vaddr(swapper_pg_dir[i]), + sizeof(pxd_t) * PTRS_PER_PMD); - pud_populate(mm, pud, pmd); + pyd_populate(mm, pyd, pxd); } } pgd_t *pgd_alloc(struct mm_struct *mm) { pgd_t *pgd; - pmd_t *pmds[PREALLOCATED_PMDS]; + pxd_t *pxds[PREALLOCATED_PXDS]; + unsigned long flags; pgd = (pgd_t *)__get_free_page(PGALLOC_GFP); @@ -259,11 +317,11 @@ pgd_t *pgd_alloc(struct mm_struct *mm) mm->pgd = pgd; - if (preallocate_pmds(pmds) != 0) + if (preallocate_pxds(pxds) != 0) goto out_free_pgd; if (paravirt_pgd_alloc(mm) != 0) - goto out_free_pmds; + goto out_free_pxds; /* * Make sure that pre-populating the pmds is atomic with @@ -273,14 +331,14 @@ pgd_t *pgd_alloc(struct mm_struct *mm) spin_lock_irqsave(&pgd_lock, flags); pgd_ctor(pgd); - pgd_prepopulate_pmd(mm, pgd, pmds); + pgd_prepopulate_pxd(mm, pgd, pxds); spin_unlock_irqrestore(&pgd_lock, flags); return pgd; -out_free_pmds: - free_pmds(pmds); +out_free_pxds: + free_pxds(pxds); out_free_pgd: free_page((unsigned long)pgd); out: @@ -289,7 +347,7 @@ out: void pgd_free(struct mm_struct *mm, pgd_t *pgd) { - pgd_mop_up_pmds(mm, pgd); + pgd_mop_up_pxds(mm, pgd); pgd_dtor(pgd); paravirt_pgd_free(mm, pgd); free_page((unsigned long)pgd); diff --git a/arch/x86/mm/pgtable_32.c b/arch/x86/mm/pgtable_32.c index cac7184..09a39fa 100644 --- a/arch/x86/mm/pgtable_32.c +++ b/arch/x86/mm/pgtable_32.c @@ -48,10 +48,13 @@ void set_pte_vaddr(unsigned long vaddr, pte_t pteval) return; } pte = pte_offset_kernel(pmd, vaddr); + + pax_open_kernel(); if (pte_val(pteval)) set_pte_at(&init_mm, vaddr, pte, pteval); else pte_clear(&init_mm, vaddr, pte); + pax_close_kernel(); /* * It's enough to flush this one mapping. diff --git a/arch/x86/mm/setup_nx.c b/arch/x86/mm/setup_nx.c index a3250aa..fbe9129 100644 --- a/arch/x86/mm/setup_nx.c +++ b/arch/x86/mm/setup_nx.c @@ -5,8 +5,10 @@ #include #include +#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE) static int disable_nx __cpuinitdata; +#ifndef CONFIG_PAX_PAGEEXEC /* * noexec = on|off * @@ -28,12 +30,17 @@ static int __init noexec_setup(char *str) return 0; } early_param("noexec", noexec_setup); +#endif + +#endif void __cpuinit x86_configure_nx(void) { +#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE) if (cpu_has_nx && !disable_nx) __supported_pte_mask |= _PAGE_NX; else +#endif __supported_pte_mask &= ~_PAGE_NX; } diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c index 426f3a1..b42f6da 100644 --- a/arch/x86/mm/tlb.c +++ b/arch/x86/mm/tlb.c @@ -13,7 +13,7 @@ #include DEFINE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate) - = { &init_mm, 0, }; + = { &init_mm, 0 }; /* * Smarter SMP flushing macros. @@ -62,7 +62,11 @@ void leave_mm(int cpu) BUG(); cpumask_clear_cpu(cpu, mm_cpumask(percpu_read(cpu_tlbstate.active_mm))); + +#ifndef CONFIG_PAX_PER_CPU_PGD load_cr3(swapper_pg_dir); +#endif + } EXPORT_SYMBOL_GPL(leave_mm); diff --git a/arch/x86/oprofile/backtrace.c b/arch/x86/oprofile/backtrace.c index 3855096..d03e0ca 100644 --- a/arch/x86/oprofile/backtrace.c +++ b/arch/x86/oprofile/backtrace.c @@ -58,7 +58,7 @@ static struct frame_head *dump_user_backtrace(struct frame_head *head) struct frame_head bufhead[2]; /* Also check accessibility of one struct frame_head beyond */ - if (!access_ok(VERIFY_READ, head, sizeof(bufhead))) + if (!__access_ok(VERIFY_READ, head, sizeof(bufhead))) return NULL; if (__copy_from_user_inatomic(bufhead, head, sizeof(bufhead))) return NULL; @@ -78,7 +78,7 @@ x86_backtrace(struct pt_regs * const regs, unsigned int depth) { struct frame_head *head = (struct frame_head *)frame_pointer(regs); - if (!user_mode_vm(regs)) { + if (!user_mode(regs)) { unsigned long stack = kernel_stack_pointer(regs); if (depth) dump_trace(NULL, regs, (unsigned long *)stack, 0, diff --git a/arch/x86/oprofile/op_model_p4.c b/arch/x86/oprofile/op_model_p4.c index 182558d..d5b66f7 100644 --- a/arch/x86/oprofile/op_model_p4.c +++ b/arch/x86/oprofile/op_model_p4.c @@ -50,7 +50,7 @@ static inline void setup_num_counters(void) #endif } -static int inline addr_increment(void) +static inline int addr_increment(void) { #ifdef CONFIG_SMP return smp_num_siblings == 2 ? 2 : 1; diff --git a/arch/x86/pci/common.c b/arch/x86/pci/common.c index 215a27a..fb15590 100644 --- a/arch/x86/pci/common.c +++ b/arch/x86/pci/common.c @@ -32,8 +32,8 @@ int noioapicreroute = 1; int pcibios_last_bus = -1; unsigned long pirq_table_addr; struct pci_bus *pci_root_bus; -struct pci_raw_ops *raw_pci_ops; -struct pci_raw_ops *raw_pci_ext_ops; +const struct pci_raw_ops *raw_pci_ops; +const struct pci_raw_ops *raw_pci_ext_ops; int raw_pci_read(unsigned int domain, unsigned int bus, unsigned int devfn, int reg, int len, u32 *val) @@ -365,7 +365,7 @@ static const struct dmi_system_id __devinitconst pciprobe_dmi_table[] = { DMI_MATCH(DMI_PRODUCT_NAME, "ProLiant DL585 G2"), }, }, - {} + { NULL, NULL, {DMI_MATCH(DMI_NONE, {0})}, NULL} }; void __init dmi_check_pciprobe(void) diff --git a/arch/x86/pci/direct.c b/arch/x86/pci/direct.c index bd33620..f841b78 100644 --- a/arch/x86/pci/direct.c +++ b/arch/x86/pci/direct.c @@ -79,7 +79,7 @@ static int pci_conf1_write(unsigned int seg, unsigned int bus, #undef PCI_CONF1_ADDRESS -struct pci_raw_ops pci_direct_conf1 = { +const struct pci_raw_ops pci_direct_conf1 = { .read = pci_conf1_read, .write = pci_conf1_write, }; @@ -173,7 +173,7 @@ static int pci_conf2_write(unsigned int seg, unsigned int bus, #undef PCI_CONF2_ADDRESS -struct pci_raw_ops pci_direct_conf2 = { +const struct pci_raw_ops pci_direct_conf2 = { .read = pci_conf2_read, .write = pci_conf2_write, }; @@ -189,7 +189,7 @@ struct pci_raw_ops pci_direct_conf2 = { * This should be close to trivial, but it isn't, because there are buggy * chipsets (yes, you guessed it, by Intel and Compaq) that have no class ID. */ -static int __init pci_sanity_check(struct pci_raw_ops *o) +static int __init pci_sanity_check(const struct pci_raw_ops *o) { u32 x = 0; int year, devfn; diff --git a/arch/x86/pci/fixup.c b/arch/x86/pci/fixup.c index 6dd8955..29b8732 100644 --- a/arch/x86/pci/fixup.c +++ b/arch/x86/pci/fixup.c @@ -364,7 +364,7 @@ static const struct dmi_system_id __devinitconst msi_k8t_dmi_table[] = { DMI_MATCH(DMI_PRODUCT_NAME, "MS-6702E"), }, }, - {} + { NULL, NULL, {DMI_MATCH(DMI_NONE, {0})}, NULL } }; /* @@ -435,7 +435,7 @@ static const struct dmi_system_id __devinitconst toshiba_ohci1394_dmi_table[] = DMI_MATCH(DMI_PRODUCT_VERSION, "PSA40U"), }, }, - { } + { NULL, NULL, {DMI_MATCH(DMI_NONE, {0})}, NULL } }; static void __devinit pci_pre_fixup_toshiba_ohci1394(struct pci_dev *dev) diff --git a/arch/x86/pci/irq.c b/arch/x86/pci/irq.c index 9810a0f..b021186 100644 --- a/arch/x86/pci/irq.c +++ b/arch/x86/pci/irq.c @@ -542,7 +542,7 @@ static __init int intel_router_probe(struct irq_router *r, struct pci_dev *route static struct pci_device_id __initdata pirq_440gx[] = { { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443GX_0) }, { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443GX_2) }, - { }, + { PCI_DEVICE(0, 0) } }; /* 440GX has a proprietary PIRQ router -- don't use it */ @@ -1113,7 +1113,7 @@ static struct dmi_system_id __initdata pciirq_dmi_table[] = { DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate 360"), }, }, - { } + { NULL, NULL, {DMI_MATCH(DMI_NONE, {0})}, NULL } }; void __init pcibios_irq_init(void) diff --git a/arch/x86/pci/mmconfig_32.c b/arch/x86/pci/mmconfig_32.c index a3d9c54..5372e86 100644 --- a/arch/x86/pci/mmconfig_32.c +++ b/arch/x86/pci/mmconfig_32.c @@ -117,7 +117,7 @@ static int pci_mmcfg_write(unsigned int seg, unsigned int bus, return 0; } -static struct pci_raw_ops pci_mmcfg = { +static const struct pci_raw_ops pci_mmcfg = { .read = pci_mmcfg_read, .write = pci_mmcfg_write, }; diff --git a/arch/x86/pci/mmconfig_64.c b/arch/x86/pci/mmconfig_64.c index e783841..915a493 100644 --- a/arch/x86/pci/mmconfig_64.c +++ b/arch/x86/pci/mmconfig_64.c @@ -81,7 +81,7 @@ static int pci_mmcfg_write(unsigned int seg, unsigned int bus, return 0; } -static struct pci_raw_ops pci_mmcfg = { +static const struct pci_raw_ops pci_mmcfg = { .read = pci_mmcfg_read, .write = pci_mmcfg_write, }; diff --git a/arch/x86/pci/numaq_32.c b/arch/x86/pci/numaq_32.c index 5c9e245..d743b10 100644 --- a/arch/x86/pci/numaq_32.c +++ b/arch/x86/pci/numaq_32.c @@ -108,7 +108,7 @@ static int pci_conf1_mq_write(unsigned int seg, unsigned int bus, #undef PCI_CONF1_MQ_ADDRESS -static struct pci_raw_ops pci_direct_conf1_mq = { +static const struct pci_raw_ops pci_direct_conf1_mq = { .read = pci_conf1_mq_read, .write = pci_conf1_mq_write }; diff --git a/arch/x86/pci/olpc.c b/arch/x86/pci/olpc.c index b348154..883dd22 100644 --- a/arch/x86/pci/olpc.c +++ b/arch/x86/pci/olpc.c @@ -297,7 +297,7 @@ static int pci_olpc_write(unsigned int seg, unsigned int bus, return 0; } -static struct pci_raw_ops pci_olpc_conf = { +static const struct pci_raw_ops pci_olpc_conf = { .read = pci_olpc_read, .write = pci_olpc_write, }; diff --git a/arch/x86/pci/pcbios.c b/arch/x86/pci/pcbios.c index 2492d16..78a4d7e 100644 --- a/arch/x86/pci/pcbios.c +++ b/arch/x86/pci/pcbios.c @@ -57,50 +57,93 @@ union bios32 { static struct { unsigned long address; unsigned short segment; -} bios32_indirect = { 0, __KERNEL_CS }; +} bios32_indirect __read_only = { 0, __PCIBIOS_CS }; /* * Returns the entry point for the given service, NULL on error */ -static unsigned long bios32_service(unsigned long service) +static unsigned long __devinit bios32_service(unsigned long service) { unsigned char return_code; /* %al */ unsigned long address; /* %ebx */ unsigned long length; /* %ecx */ unsigned long entry; /* %edx */ unsigned long flags; + struct desc_struct d, *gdt; local_irq_save(flags); - __asm__("lcall *(%%edi); cld" + + gdt = get_cpu_gdt_table(smp_processor_id()); + + pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x9B, 0xC); + write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S); + pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x93, 0xC); + write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S); + + __asm__("movw %w7, %%ds; lcall *(%%edi); push %%ss; pop %%ds; cld" : "=a" (return_code), "=b" (address), "=c" (length), "=d" (entry) : "0" (service), "1" (0), - "D" (&bios32_indirect)); + "D" (&bios32_indirect), + "r"(__PCIBIOS_DS) + : "memory"); + + pax_open_kernel(); + gdt[GDT_ENTRY_PCIBIOS_CS].a = 0; + gdt[GDT_ENTRY_PCIBIOS_CS].b = 0; + gdt[GDT_ENTRY_PCIBIOS_DS].a = 0; + gdt[GDT_ENTRY_PCIBIOS_DS].b = 0; + pax_close_kernel(); + local_irq_restore(flags); switch (return_code) { - case 0: - return address + entry; - case 0x80: /* Not present */ - printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service); - return 0; - default: /* Shouldn't happen */ - printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n", - service, return_code); + case 0: { + int cpu; + unsigned char flags; + + printk(KERN_INFO "bios32_service: base:%08lx length:%08lx entry:%08lx\n", address, length, entry); + if (address >= 0xFFFF0 || length > 0x100000 - address || length <= entry) { + printk(KERN_WARNING "bios32_service: not valid\n"); return 0; + } + address = address + PAGE_OFFSET; + length += 16UL; /* some BIOSs underreport this... */ + flags = 4; + if (length >= 64*1024*1024) { + length >>= PAGE_SHIFT; + flags |= 8; + } + + for (cpu = 0; cpu < NR_CPUS; cpu++) { + gdt = get_cpu_gdt_table(cpu); + pack_descriptor(&d, address, length, 0x9b, flags); + write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S); + pack_descriptor(&d, address, length, 0x93, flags); + write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S); + } + return entry; + } + case 0x80: /* Not present */ + printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service); + return 0; + default: /* Shouldn't happen */ + printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n", + service, return_code); + return 0; } } static struct { unsigned long address; unsigned short segment; -} pci_indirect = { 0, __KERNEL_CS }; +} pci_indirect __read_only = { 0, __PCIBIOS_CS }; -static int pci_bios_present; +static int pci_bios_present __read_only; static int __devinit check_pcibios(void) { @@ -109,11 +152,13 @@ static int __devinit check_pcibios(void) unsigned long flags, pcibios_entry; if ((pcibios_entry = bios32_service(PCI_SERVICE))) { - pci_indirect.address = pcibios_entry + PAGE_OFFSET; + pci_indirect.address = pcibios_entry; local_irq_save(flags); - __asm__( - "lcall *(%%edi); cld\n\t" + __asm__("movw %w6, %%ds\n\t" + "lcall *%%ss:(%%edi); cld\n\t" + "push %%ss\n\t" + "pop %%ds\n\t" "jc 1f\n\t" "xor %%ah, %%ah\n" "1:" @@ -122,7 +167,8 @@ static int __devinit check_pcibios(void) "=b" (ebx), "=c" (ecx) : "1" (PCIBIOS_PCI_BIOS_PRESENT), - "D" (&pci_indirect) + "D" (&pci_indirect), + "r" (__PCIBIOS_DS) : "memory"); local_irq_restore(flags); @@ -166,7 +212,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus, switch (len) { case 1: - __asm__("lcall *(%%esi); cld\n\t" + __asm__("movw %w6, %%ds\n\t" + "lcall *%%ss:(%%esi); cld\n\t" + "push %%ss\n\t" + "pop %%ds\n\t" "jc 1f\n\t" "xor %%ah, %%ah\n" "1:" @@ -175,7 +224,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus, : "1" (PCIBIOS_READ_CONFIG_BYTE), "b" (bx), "D" ((long)reg), - "S" (&pci_indirect)); + "S" (&pci_indirect), + "r" (__PCIBIOS_DS)); /* * Zero-extend the result beyond 8 bits, do not trust the * BIOS having done it: @@ -183,7 +233,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus, *value &= 0xff; break; case 2: - __asm__("lcall *(%%esi); cld\n\t" + __asm__("movw %w6, %%ds\n\t" + "lcall *%%ss:(%%esi); cld\n\t" + "push %%ss\n\t" + "pop %%ds\n\t" "jc 1f\n\t" "xor %%ah, %%ah\n" "1:" @@ -192,7 +245,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus, : "1" (PCIBIOS_READ_CONFIG_WORD), "b" (bx), "D" ((long)reg), - "S" (&pci_indirect)); + "S" (&pci_indirect), + "r" (__PCIBIOS_DS)); /* * Zero-extend the result beyond 16 bits, do not trust the * BIOS having done it: @@ -200,7 +254,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus, *value &= 0xffff; break; case 4: - __asm__("lcall *(%%esi); cld\n\t" + __asm__("movw %w6, %%ds\n\t" + "lcall *%%ss:(%%esi); cld\n\t" + "push %%ss\n\t" + "pop %%ds\n\t" "jc 1f\n\t" "xor %%ah, %%ah\n" "1:" @@ -209,7 +266,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus, : "1" (PCIBIOS_READ_CONFIG_DWORD), "b" (bx), "D" ((long)reg), - "S" (&pci_indirect)); + "S" (&pci_indirect), + "r" (__PCIBIOS_DS)); break; } @@ -232,7 +290,10 @@ static int pci_bios_write(unsigned int seg, unsigned int bus, switch (len) { case 1: - __asm__("lcall *(%%esi); cld\n\t" + __asm__("movw %w6, %%ds\n\t" + "lcall *%%ss:(%%esi); cld\n\t" + "push %%ss\n\t" + "pop %%ds\n\t" "jc 1f\n\t" "xor %%ah, %%ah\n" "1:" @@ -241,10 +302,14 @@ static int pci_bios_write(unsigned int seg, unsigned int bus, "c" (value), "b" (bx), "D" ((long)reg), - "S" (&pci_indirect)); + "S" (&pci_indirect), + "r" (__PCIBIOS_DS)); break; case 2: - __asm__("lcall *(%%esi); cld\n\t" + __asm__("movw %w6, %%ds\n\t" + "lcall *%%ss:(%%esi); cld\n\t" + "push %%ss\n\t" + "pop %%ds\n\t" "jc 1f\n\t" "xor %%ah, %%ah\n" "1:" @@ -253,10 +318,14 @@ static int pci_bios_write(unsigned int seg, unsigned int bus, "c" (value), "b" (bx), "D" ((long)reg), - "S" (&pci_indirect)); + "S" (&pci_indirect), + "r" (__PCIBIOS_DS)); break; case 4: - __asm__("lcall *(%%esi); cld\n\t" + __asm__("movw %w6, %%ds\n\t" + "lcall *%%ss:(%%esi); cld\n\t" + "push %%ss\n\t" + "pop %%ds\n\t" "jc 1f\n\t" "xor %%ah, %%ah\n" "1:" @@ -265,7 +334,8 @@ static int pci_bios_write(unsigned int seg, unsigned int bus, "c" (value), "b" (bx), "D" ((long)reg), - "S" (&pci_indirect)); + "S" (&pci_indirect), + "r" (__PCIBIOS_DS)); break; } @@ -279,7 +349,7 @@ static int pci_bios_write(unsigned int seg, unsigned int bus, * Function table for BIOS32 access */ -static struct pci_raw_ops pci_bios_access = { +static const struct pci_raw_ops pci_bios_access = { .read = pci_bios_read, .write = pci_bios_write }; @@ -288,7 +358,7 @@ static struct pci_raw_ops pci_bios_access = { * Try to find PCI BIOS. */ -static struct pci_raw_ops * __devinit pci_find_bios(void) +static const struct pci_raw_ops * __devinit pci_find_bios(void) { union bios32 *check; unsigned char sum; @@ -369,10 +439,13 @@ struct irq_routing_table * pcibios_get_irq_routing_table(void) DBG("PCI: Fetching IRQ routing table... "); __asm__("push %%es\n\t" + "movw %w8, %%ds\n\t" "push %%ds\n\t" "pop %%es\n\t" - "lcall *(%%esi); cld\n\t" + "lcall *%%ss:(%%esi); cld\n\t" "pop %%es\n\t" + "push %%ss\n\t" + "pop %%ds\n" "jc 1f\n\t" "xor %%ah, %%ah\n" "1:" @@ -383,7 +456,8 @@ struct irq_routing_table * pcibios_get_irq_routing_table(void) "1" (0), "D" ((long) &opt), "S" (&pci_indirect), - "m" (opt) + "m" (opt), + "r" (__PCIBIOS_DS) : "memory"); DBG("OK ret=%d, size=%d, map=%x\n", ret, opt.size, map); if (ret & 0xff00) @@ -407,7 +481,10 @@ int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq) { int ret; - __asm__("lcall *(%%esi); cld\n\t" + __asm__("movw %w5, %%ds\n\t" + "lcall *%%ss:(%%esi); cld\n\t" + "push %%ss\n\t" + "pop %%ds\n" "jc 1f\n\t" "xor %%ah, %%ah\n" "1:" @@ -415,7 +492,8 @@ int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq) : "0" (PCIBIOS_SET_PCI_HW_INT), "b" ((dev->bus->number << 8) | dev->devfn), "c" ((irq << 8) | (pin + 10)), - "S" (&pci_indirect)); + "S" (&pci_indirect), + "r" (__PCIBIOS_DS)); return !(ret & 0xff00); } EXPORT_SYMBOL(pcibios_set_irq_routing); diff --git a/arch/x86/power/cpu.c b/arch/x86/power/cpu.c index 9c57cb1..e2731ff 100644 --- a/arch/x86/power/cpu.c +++ b/arch/x86/power/cpu.c @@ -130,7 +130,7 @@ static void do_fpu_end(void) static void fix_processor_context(void) { int cpu = smp_processor_id(); - struct tss_struct *t = &per_cpu(init_tss, cpu); + struct tss_struct *t = init_tss + cpu; set_tss_desc(cpu, t); /* * This just modifies memory; should not be @@ -140,7 +140,9 @@ static void fix_processor_context(void) */ #ifdef CONFIG_X86_64 + pax_open_kernel(); get_cpu_gdt_table(cpu)[GDT_ENTRY_TSS].type = 9; + pax_close_kernel(); syscall_init(); /* This sets MSR_*STAR and related */ #endif diff --git a/arch/x86/vdso/Makefile b/arch/x86/vdso/Makefile index 6b4ffed..f55f243 100644 --- a/arch/x86/vdso/Makefile +++ b/arch/x86/vdso/Makefile @@ -122,7 +122,7 @@ quiet_cmd_vdso = VDSO $@ $(VDSO_LDFLAGS) $(VDSO_LDFLAGS_$(filter %.lds,$(^F))) \ -Wl,-T,$(filter %.lds,$^) $(filter %.o,$^) -VDSO_LDFLAGS = -fPIC -shared $(call cc-ldoption, -Wl$(comma)--hash-style=sysv) +VDSO_LDFLAGS = -fPIC -shared --no-undefined $(call cc-ldoption, -Wl$(comma)--hash-style=sysv) GCOV_PROFILE := n # diff --git a/arch/x86/vdso/vclock_gettime.c b/arch/x86/vdso/vclock_gettime.c index ee55754..0013b2e 100644 --- a/arch/x86/vdso/vclock_gettime.c +++ b/arch/x86/vdso/vclock_gettime.c @@ -22,24 +22,48 @@ #include #include #include +#include #include "vextern.h" #define gtod vdso_vsyscall_gtod_data +notrace noinline long __vdso_fallback_time(long *t) +{ + long secs; + asm volatile("syscall" + : "=a" (secs) + : "0" (__NR_time),"D" (t) : "r11", "cx", "memory"); + return secs; +} + notrace static long vdso_fallback_gettime(long clock, struct timespec *ts) { long ret; asm("syscall" : "=a" (ret) : - "0" (__NR_clock_gettime),"D" (clock), "S" (ts) : "memory"); + "0" (__NR_clock_gettime),"D" (clock), "S" (ts) : "r11", "cx", "memory"); return ret; } +notrace static inline cycle_t __vdso_vread_hpet(void) +{ + return readl((const void __iomem *)fix_to_virt(VSYSCALL_HPET) + 0xf0); +} + +notrace static inline cycle_t __vdso_vread_tsc(void) +{ + cycle_t ret = (cycle_t)vget_cycles(); + + return ret >= gtod->clock.cycle_last ? ret : gtod->clock.cycle_last; +} + notrace static inline long vgetns(void) { long v; - cycles_t (*vread)(void); - vread = gtod->clock.vread; - v = (vread() - gtod->clock.cycle_last) & gtod->clock.mask; + if (gtod->clock.name[0] == 't' && gtod->clock.name[1] == 's' && gtod->clock.name[2] == 'c' && !gtod->clock.name[3]) + v = __vdso_vread_tsc(); + else + v = __vdso_vread_hpet(); + v = (v - gtod->clock.cycle_last) & gtod->clock.mask; return (v * gtod->clock.mult) >> gtod->clock.shift; } @@ -113,7 +137,9 @@ notrace static noinline int do_monotonic_coarse(struct timespec *ts) notrace int __vdso_clock_gettime(clockid_t clock, struct timespec *ts) { - if (likely(gtod->sysctl_enabled)) + if (likely(gtod->sysctl_enabled && + ((gtod->clock.name[0] == 'h' && gtod->clock.name[1] == 'p' && gtod->clock.name[2] == 'e' && gtod->clock.name[3] == 't' && !gtod->clock.name[4]) || + (gtod->clock.name[0] == 't' && gtod->clock.name[1] == 's' && gtod->clock.name[2] == 'c' && !gtod->clock.name[3])))) switch (clock) { case CLOCK_REALTIME: if (likely(gtod->clock.vread)) @@ -133,10 +159,20 @@ notrace int __vdso_clock_gettime(clockid_t clock, struct timespec *ts) int clock_gettime(clockid_t, struct timespec *) __attribute__((weak, alias("__vdso_clock_gettime"))); -notrace int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz) +notrace noinline int __vdso_fallback_gettimeofday(struct timeval *tv, struct timezone *tz) { long ret; - if (likely(gtod->sysctl_enabled && gtod->clock.vread)) { + asm("syscall" : "=a" (ret) : + "0" (__NR_gettimeofday), "D" (tv), "S" (tz) : "r11", "cx", "memory"); + return ret; +} + +notrace int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz) +{ + if (likely(gtod->sysctl_enabled && + ((gtod->clock.name[0] == 'h' && gtod->clock.name[1] == 'p' && gtod->clock.name[2] == 'e' && gtod->clock.name[3] == 't' && !gtod->clock.name[4]) || + (gtod->clock.name[0] == 't' && gtod->clock.name[1] == 's' && gtod->clock.name[2] == 'c' && !gtod->clock.name[3])))) + { if (likely(tv != NULL)) { BUILD_BUG_ON(offsetof(struct timeval, tv_usec) != offsetof(struct timespec, tv_nsec) || @@ -151,9 +187,7 @@ notrace int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz) } return 0; } - asm("syscall" : "=a" (ret) : - "0" (__NR_gettimeofday), "D" (tv), "S" (tz) : "memory"); - return ret; + return __vdso_fallback_gettimeofday(tv, tz); } int gettimeofday(struct timeval *, struct timezone *) __attribute__((weak, alias("__vdso_gettimeofday"))); diff --git a/arch/x86/vdso/vdso.lds.S b/arch/x86/vdso/vdso.lds.S index 4e5dd3b..00ba15e 100644 --- a/arch/x86/vdso/vdso.lds.S +++ b/arch/x86/vdso/vdso.lds.S @@ -35,3 +35,9 @@ VDSO64_PRELINK = VDSO_PRELINK; #define VEXTERN(x) VDSO64_ ## x = vdso_ ## x; #include "vextern.h" #undef VEXTERN + +#define VEXTERN(x) VDSO64_ ## x = __vdso_ ## x; +VEXTERN(fallback_gettimeofday) +VEXTERN(fallback_time) +VEXTERN(getcpu) +#undef VEXTERN diff --git a/arch/x86/vdso/vdso32-setup.c b/arch/x86/vdso/vdso32-setup.c index 02b442e..72f18fe 100644 --- a/arch/x86/vdso/vdso32-setup.c +++ b/arch/x86/vdso/vdso32-setup.c @@ -25,6 +25,7 @@ #include #include #include +#include enum { VDSO_DISABLED = 0, @@ -226,7 +227,7 @@ static inline void map_compat_vdso(int map) void enable_sep_cpu(void) { int cpu = get_cpu(); - struct tss_struct *tss = &per_cpu(init_tss, cpu); + struct tss_struct *tss = init_tss + cpu; if (!boot_cpu_has(X86_FEATURE_SEP)) { put_cpu(); @@ -249,7 +250,7 @@ static int __init gate_vma_init(void) gate_vma.vm_start = FIXADDR_USER_START; gate_vma.vm_end = FIXADDR_USER_END; gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC; - gate_vma.vm_page_prot = __P101; + gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags); /* * Make sure the vDSO gets into every core dump. * Dumping its contents makes post-mortem fully interpretable later @@ -331,14 +332,14 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) if (compat) addr = VDSO_HIGH_BASE; else { - addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0); + addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, MAP_EXECUTABLE); if (IS_ERR_VALUE(addr)) { ret = addr; goto up_fail; } } - current->mm->context.vdso = (void *)addr; + current->mm->context.vdso = addr; if (compat_uses_vma || !compat) { /* @@ -361,11 +362,11 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) } current_thread_info()->sysenter_return = - VDSO32_SYMBOL(addr, SYSENTER_RETURN); + (__force void __user *)VDSO32_SYMBOL(addr, SYSENTER_RETURN); up_fail: if (ret) - current->mm->context.vdso = NULL; + current->mm->context.vdso = 0; up_write(&mm->mmap_sem); @@ -412,8 +413,14 @@ __initcall(ia32_binfmt_init); const char *arch_vma_name(struct vm_area_struct *vma) { - if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso) + if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso) return "[vdso]"; + +#ifdef CONFIG_PAX_SEGMEXEC + if (vma->vm_mm && vma->vm_mirror && vma->vm_mirror->vm_start == vma->vm_mm->context.vdso) + return "[vdso]"; +#endif + return NULL; } @@ -422,7 +429,7 @@ struct vm_area_struct *get_gate_vma(struct task_struct *tsk) struct mm_struct *mm = tsk->mm; /* Check to see if this task was created in compat vdso mode */ - if (mm && mm->context.vdso == (void *)VDSO_HIGH_BASE) + if (mm && mm->context.vdso == VDSO_HIGH_BASE) return &gate_vma; return NULL; } diff --git a/arch/x86/vdso/vextern.h b/arch/x86/vdso/vextern.h index 1683ba2..48d07f3 100644 --- a/arch/x86/vdso/vextern.h +++ b/arch/x86/vdso/vextern.h @@ -11,6 +11,5 @@ put into vextern.h and be referenced as a pointer with vdso prefix. The main kernel later fills in the values. */ -VEXTERN(jiffies) VEXTERN(vgetcpu_mode) VEXTERN(vsyscall_gtod_data) diff --git a/arch/x86/vdso/vma.c b/arch/x86/vdso/vma.c index ac74869..fe8ff73 100644 --- a/arch/x86/vdso/vma.c +++ b/arch/x86/vdso/vma.c @@ -58,7 +58,7 @@ static int __init init_vdso_vars(void) if (!vbase) goto oom; - if (memcmp(vbase, "\177ELF", 4)) { + if (memcmp(vbase, ELFMAG, SELFMAG)) { printk("VDSO: I'm broken; not ELF\n"); vdso_enabled = 0; } @@ -67,6 +67,7 @@ static int __init init_vdso_vars(void) *(typeof(__ ## x) **) var_ref(VDSO64_SYMBOL(vbase, x), #x) = &__ ## x; #include "vextern.h" #undef VEXTERN + vunmap(vbase); return 0; oom: @@ -117,7 +118,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) goto up_fail; } - current->mm->context.vdso = (void *)addr; + current->mm->context.vdso = addr; ret = install_special_mapping(mm, addr, vdso_size, VM_READ|VM_EXEC| @@ -125,7 +126,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) VM_ALWAYSDUMP, vdso_pages); if (ret) { - current->mm->context.vdso = NULL; + current->mm->context.vdso = 0; goto up_fail; } @@ -133,10 +134,3 @@ up_fail: up_write(&mm->mmap_sem); return ret; } - -static __init int vdso_setup(char *s) -{ - vdso_enabled = simple_strtoul(s, NULL, 0); - return 0; -} -__setup("vdso=", vdso_setup); diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c index 25d787c..54e84e1 100644 --- a/arch/x86/xen/enlighten.c +++ b/arch/x86/xen/enlighten.c @@ -74,8 +74,6 @@ EXPORT_SYMBOL_GPL(xen_start_info); struct shared_info xen_dummy_shared_info; -void *xen_initial_gdt; - /* * Point at some empty memory to start with. We map the real shared_info * page as soon as fixmap is up and running. @@ -551,7 +549,7 @@ static void xen_write_idt_entry(gate_desc *dt, int entrynum, const gate_desc *g) preempt_disable(); - start = __get_cpu_var(idt_desc).address; + start = (unsigned long)__get_cpu_var(idt_desc).address; end = start + __get_cpu_var(idt_desc).size + 1; xen_mc_flush(); @@ -1099,7 +1097,17 @@ asmlinkage void __init xen_start_kernel(void) __userpte_alloc_gfp &= ~__GFP_HIGHMEM; /* Work out if we support NX */ - x86_configure_nx(); +#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE) + if ((cpuid_eax(0x80000000) & 0xffff0000) == 0x80000000 && + (cpuid_edx(0x80000001) & (1U << (X86_FEATURE_NX & 31)))) { + unsigned l, h; + + __supported_pte_mask |= _PAGE_NX; + rdmsr(MSR_EFER, l, h); + l |= EFER_NX; + wrmsr(MSR_EFER, l, h); + } +#endif xen_setup_features(); @@ -1130,13 +1138,6 @@ asmlinkage void __init xen_start_kernel(void) machine_ops = xen_machine_ops; - /* - * The only reliable way to retain the initial address of the - * percpu gdt_page is to remember it here, so we can go and - * mark it RW later, when the initial percpu area is freed. - */ - xen_initial_gdt = &per_cpu(gdt_page, 0); - xen_smp_init(); pgd = (pgd_t *)xen_start_info->pt_base; diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c index 914f046..8965503 100644 --- a/arch/x86/xen/mmu.c +++ b/arch/x86/xen/mmu.c @@ -1694,6 +1694,8 @@ __init pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd, convert_pfn_mfn(init_level4_pgt); convert_pfn_mfn(level3_ident_pgt); convert_pfn_mfn(level3_kernel_pgt); + convert_pfn_mfn(level3_vmalloc_pgt); + convert_pfn_mfn(level3_vmemmap_pgt); l3 = m2v(pgd[pgd_index(__START_KERNEL_map)].pgd); l2 = m2v(l3[pud_index(__START_KERNEL_map)].pud); @@ -1712,7 +1714,10 @@ __init pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd, set_page_prot(init_level4_pgt, PAGE_KERNEL_RO); set_page_prot(level3_ident_pgt, PAGE_KERNEL_RO); set_page_prot(level3_kernel_pgt, PAGE_KERNEL_RO); + set_page_prot(level3_vmalloc_pgt, PAGE_KERNEL_RO); + set_page_prot(level3_vmemmap_pgt, PAGE_KERNEL_RO); set_page_prot(level3_user_vsyscall, PAGE_KERNEL_RO); + set_page_prot(level2_vmemmap_pgt, PAGE_KERNEL_RO); set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO); set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO); diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c index d2dfbf5..40abbbb 100644 --- a/arch/x86/xen/smp.c +++ b/arch/x86/xen/smp.c @@ -169,11 +169,6 @@ static void __init xen_smp_prepare_boot_cpu(void) { BUG_ON(smp_processor_id() != 0); native_smp_prepare_boot_cpu(); - - /* We've switched to the "real" per-cpu gdt, so make sure the - old memory can be recycled */ - make_lowmem_page_readwrite(xen_initial_gdt); - xen_setup_vcpu_info_placement(); } @@ -233,8 +228,8 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle) gdt = get_cpu_gdt_table(cpu); ctxt->flags = VGCF_IN_KERNEL; - ctxt->user_regs.ds = __USER_DS; - ctxt->user_regs.es = __USER_DS; + ctxt->user_regs.ds = __KERNEL_DS; + ctxt->user_regs.es = __KERNEL_DS; ctxt->user_regs.ss = __KERNEL_DS; #ifdef CONFIG_X86_32 ctxt->user_regs.fs = __KERNEL_PERCPU; diff --git a/arch/x86/xen/xen-head.S b/arch/x86/xen/xen-head.S index 1a5ff24..a187d40 100644 --- a/arch/x86/xen/xen-head.S +++ b/arch/x86/xen/xen-head.S @@ -19,6 +19,17 @@ ENTRY(startup_xen) #ifdef CONFIG_X86_32 mov %esi,xen_start_info mov $init_thread_union+THREAD_SIZE,%esp +#ifdef CONFIG_SMP + movl $cpu_gdt_table,%edi + movl $__per_cpu_load,%eax + movw %ax,__KERNEL_PERCPU + 2(%edi) + rorl $16,%eax + movb %al,__KERNEL_PERCPU + 4(%edi) + movb %ah,__KERNEL_PERCPU + 7(%edi) + movl $__per_cpu_end - 1,%eax + subl $__per_cpu_start,%eax + movw %ax,__KERNEL_PERCPU + 0(%edi) +#endif #else mov %rsi,xen_start_info mov $init_thread_union+THREAD_SIZE,%rsp diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h index f9153a3..51eab3d 100644 --- a/arch/x86/xen/xen-ops.h +++ b/arch/x86/xen/xen-ops.h @@ -10,8 +10,6 @@ extern const char xen_hypervisor_callback[]; extern const char xen_failsafe_callback[]; -extern void *xen_initial_gdt; - struct trap_info; void xen_copy_trap_info(struct trap_info *traps); diff --git a/block/blk-iopoll.c b/block/blk-iopoll.c index 58916af..9cb880b 100644 --- a/block/blk-iopoll.c +++ b/block/blk-iopoll.c @@ -77,7 +77,7 @@ void blk_iopoll_complete(struct blk_iopoll *iopoll) } EXPORT_SYMBOL(blk_iopoll_complete); -static void blk_iopoll_softirq(struct softirq_action *h) +static void blk_iopoll_softirq(void) { struct list_head *list = &__get_cpu_var(blk_cpu_iopoll); int rearm = 0, budget = blk_iopoll_budget; diff --git a/block/blk-map.c b/block/blk-map.c index 30a7e51..3254ef2 100644 --- a/block/blk-map.c +++ b/block/blk-map.c @@ -54,7 +54,7 @@ static int __blk_rq_map_user(struct request_queue *q, struct request *rq, * direct dma. else, set up kernel bounce buffers */ uaddr = (unsigned long) ubuf; - if (blk_rq_aligned(q, ubuf, len) && !map_data) + if (blk_rq_aligned(q, (__force void *)ubuf, len) && !map_data) bio = bio_map_user(q, NULL, uaddr, len, reading, gfp_mask); else bio = bio_copy_user(q, map_data, uaddr, len, reading, gfp_mask); @@ -299,7 +299,7 @@ int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf, if (!len || !kbuf) return -EINVAL; - do_copy = !blk_rq_aligned(q, kbuf, len) || object_is_on_stack(kbuf); + do_copy = !blk_rq_aligned(q, kbuf, len) || object_starts_on_stack(kbuf); if (do_copy) bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading); else diff --git a/block/blk-softirq.c b/block/blk-softirq.c index ee9c216..58d410a 100644 --- a/block/blk-softirq.c +++ b/block/blk-softirq.c @@ -17,7 +17,7 @@ static DEFINE_PER_CPU(struct list_head, blk_cpu_done); * Softirq action handler - move entries to local list and loop over them * while passing them to the queue registered handler. */ -static void blk_done_softirq(struct softirq_action *h) +static void blk_done_softirq(void) { struct list_head *cpu_list, local_list; diff --git a/crypto/lrw.c b/crypto/lrw.c index 358f80b..31b2333 100644 --- a/crypto/lrw.c +++ b/crypto/lrw.c @@ -60,7 +60,7 @@ static int setkey(struct crypto_tfm *parent, const u8 *key, struct priv *ctx = crypto_tfm_ctx(parent); struct crypto_cipher *child = ctx->child; int err, i; - be128 tmp = { 0 }; + be128 tmp = { 0, 0 }; int bsize = crypto_cipher_blocksize(child); crypto_cipher_clear_flags(child, CRYPTO_TFM_REQ_MASK); diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c index 68919e2..e4cdf22 100644 --- a/drivers/acpi/battery.c +++ b/drivers/acpi/battery.c @@ -846,7 +846,7 @@ DECLARE_FILE_FUNCTIONS(alarm); } static struct battery_file { - struct file_operations ops; + const struct file_operations ops; mode_t mode; const char *name; } acpi_battery_file[] = { diff --git a/drivers/acpi/blacklist.c b/drivers/acpi/blacklist.c index 51cada7..60e0d99 100644 --- a/drivers/acpi/blacklist.c +++ b/drivers/acpi/blacklist.c @@ -73,7 +73,7 @@ static struct acpi_blacklist_item acpi_blacklist[] __initdata = { {"IBM ", "TP600E ", 0x00000105, ACPI_SIG_DSDT, less_than_or_equal, "Incorrect _ADR", 1}, - {""} + {"", "", 0, NULL, all_versions, NULL, 0} }; #if CONFIG_ACPI_BLACKLIST_YEAR diff --git a/drivers/acpi/dock.c b/drivers/acpi/dock.c index 3fe29e9..b3484bf 100644 --- a/drivers/acpi/dock.c +++ b/drivers/acpi/dock.c @@ -77,7 +77,7 @@ struct dock_dependent_device { struct list_head list; struct list_head hotplug_list; acpi_handle handle; - struct acpi_dock_ops *ops; + const struct acpi_dock_ops *ops; void *context; }; @@ -589,7 +589,7 @@ EXPORT_SYMBOL_GPL(unregister_dock_notifier); * the dock driver after _DCK is executed. */ int -register_hotplug_dock_device(acpi_handle handle, struct acpi_dock_ops *ops, +register_hotplug_dock_device(acpi_handle handle, const struct acpi_dock_ops *ops, void *context) { struct dock_dependent_device *dd; diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c index 78418ce..a991f41 100644 --- a/drivers/acpi/osl.c +++ b/drivers/acpi/osl.c @@ -523,6 +523,8 @@ acpi_os_read_memory(acpi_physical_address phys_addr, u32 * value, u32 width) void __iomem *virt_addr; virt_addr = ioremap(phys_addr, width); + if (!virt_addr) + return AE_NO_MEMORY; if (!value) value = &dummy; @@ -551,6 +553,8 @@ acpi_os_write_memory(acpi_physical_address phys_addr, u32 value, u32 width) void __iomem *virt_addr; virt_addr = ioremap(phys_addr, width); + if (!virt_addr) + return AE_NO_MEMORY; switch (width) { case 8: diff --git a/drivers/acpi/power_meter.c b/drivers/acpi/power_meter.c index 66f6729..2d6de0a 100644 --- a/drivers/acpi/power_meter.c +++ b/drivers/acpi/power_meter.c @@ -316,8 +316,6 @@ static ssize_t set_trip(struct device *dev, struct device_attribute *devattr, return res; temp /= 1000; - if (temp < 0) - return -EINVAL; mutex_lock(&resource->lock); resource->trip[attr->index - 7] = temp; diff --git a/drivers/acpi/proc.c b/drivers/acpi/proc.c index 1ac678d..ebee2d7 100644 --- a/drivers/acpi/proc.c +++ b/drivers/acpi/proc.c @@ -391,20 +391,15 @@ acpi_system_write_wakeup_device(struct file *file, size_t count, loff_t * ppos) { struct list_head *node, *next; - char strbuf[5]; - char str[5] = ""; - unsigned int len = count; + char strbuf[5] = {0}; struct acpi_device *found_dev = NULL; - if (len > 4) - len = 4; - if (len < 0) - return -EFAULT; + if (count > 4) + count = 4; - if (copy_from_user(strbuf, buffer, len)) + if (copy_from_user(strbuf, buffer, count)) return -EFAULT; - strbuf[len] = '\0'; - sscanf(strbuf, "%s", str); + strbuf[count] = '\0'; mutex_lock(&acpi_device_lock); list_for_each_safe(node, next, &acpi_wakeup_device_list) { @@ -413,7 +408,7 @@ acpi_system_write_wakeup_device(struct file *file, if (!dev->wakeup.flags.valid) continue; - if (!strncmp(dev->pnp.bus_id, str, 4)) { + if (!strncmp(dev->pnp.bus_id, strbuf, 4)) { dev->wakeup.state.enabled = dev->wakeup.state.enabled ? 0 : 1; found_dev = dev; diff --git a/drivers/acpi/processor_driver.c b/drivers/acpi/processor_driver.c index 38ea0cc..58c1914 100644 --- a/drivers/acpi/processor_driver.c +++ b/drivers/acpi/processor_driver.c @@ -586,7 +586,7 @@ static int __cpuinit acpi_processor_add(struct acpi_device *device) return 0; #endif - BUG_ON((pr->id >= nr_cpu_ids) || (pr->id < 0)); + BUG_ON(pr->id >= nr_cpu_ids); /* * Buggy BIOS check diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c index e9a8026..553acaa 100644 --- a/drivers/acpi/processor_idle.c +++ b/drivers/acpi/processor_idle.c @@ -124,7 +124,7 @@ static struct dmi_system_id __cpuinitdata processor_power_dmi_table[] = { DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc."), DMI_MATCH(DMI_PRODUCT_NAME,"L8400B series Notebook PC")}, (void *)1}, - {}, + { NULL, NULL, {DMI_MATCH(DMI_NONE, {0})}, NULL}, }; diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c index 4882bc1..53f6c8a 100644 --- a/drivers/acpi/sleep.c +++ b/drivers/acpi/sleep.c @@ -318,7 +318,7 @@ static int acpi_suspend_state_valid(suspend_state_t pm_state) } } -static struct platform_suspend_ops acpi_suspend_ops = { +static const struct platform_suspend_ops acpi_suspend_ops = { .valid = acpi_suspend_state_valid, .begin = acpi_suspend_begin, .prepare_late = acpi_pm_prepare, @@ -346,7 +346,7 @@ static int acpi_suspend_begin_old(suspend_state_t pm_state) * The following callbacks are used if the pre-ACPI 2.0 suspend ordering has * been requested. */ -static struct platform_suspend_ops acpi_suspend_ops_old = { +static const struct platform_suspend_ops acpi_suspend_ops_old = { .valid = acpi_suspend_state_valid, .begin = acpi_suspend_begin_old, .prepare_late = acpi_pm_freeze, @@ -500,7 +500,7 @@ static void acpi_pm_thaw(void) acpi_enable_all_runtime_gpes(); } -static struct platform_hibernation_ops acpi_hibernation_ops = { +static const struct platform_hibernation_ops acpi_hibernation_ops = { .begin = acpi_hibernation_begin, .end = acpi_pm_end, .pre_snapshot = acpi_hibernation_pre_snapshot, @@ -550,7 +550,7 @@ static int acpi_hibernation_pre_snapshot_old(void) * The following callbacks are used if the pre-ACPI 2.0 suspend ordering has * been requested. */ -static struct platform_hibernation_ops acpi_hibernation_ops_old = { +static const struct platform_hibernation_ops acpi_hibernation_ops_old = { .begin = acpi_hibernation_begin_old, .end = acpi_pm_end, .pre_snapshot = acpi_hibernation_pre_snapshot_old, diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c index 9865d46..ffc6a96 100644 --- a/drivers/acpi/video.c +++ b/drivers/acpi/video.c @@ -363,7 +363,7 @@ static int acpi_video_set_brightness(struct backlight_device *bd) vd->brightness->levels[request_level]); } -static struct backlight_ops acpi_backlight_ops = { +static const struct backlight_ops acpi_backlight_ops = { .get_brightness = acpi_video_get_brightness, .update_status = acpi_video_set_brightness, }; diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c index d571e3f..7689d88 100644 --- a/drivers/ata/ahci.c +++ b/drivers/ata/ahci.c @@ -93,17 +93,17 @@ static struct scsi_host_template ahci_sht = { AHCI_SHT("ahci"), }; -static struct ata_port_operations ahci_vt8251_ops = { +static const struct ata_port_operations ahci_vt8251_ops = { .inherits = &ahci_ops, .hardreset = ahci_vt8251_hardreset, }; -static struct ata_port_operations ahci_p5wdh_ops = { +static const struct ata_port_operations ahci_p5wdh_ops = { .inherits = &ahci_ops, .hardreset = ahci_p5wdh_hardreset, }; -static struct ata_port_operations ahci_sb600_ops = { +static const struct ata_port_operations ahci_sb600_ops = { .inherits = &ahci_ops, .softreset = ahci_sb600_softreset, .pmp_softreset = ahci_sb600_softreset, @@ -374,7 +374,7 @@ static const struct pci_device_id ahci_pci_tbl[] = { { PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_STORAGE_SATA_AHCI, 0xffffff, board_ahci }, - { } /* terminate list */ + { 0, 0, 0, 0, 0, 0, 0 } /* terminate list */ }; diff --git a/drivers/ata/ahci.h b/drivers/ata/ahci.h index 22a20e8..9180f01 100644 --- a/drivers/ata/ahci.h +++ b/drivers/ata/ahci.h @@ -309,7 +309,7 @@ extern struct device_attribute *ahci_sdev_attrs[]; .shost_attrs = ahci_shost_attrs, \ .sdev_attrs = ahci_sdev_attrs -extern struct ata_port_operations ahci_ops; +extern const struct ata_port_operations ahci_ops; void ahci_save_initial_config(struct device *dev, struct ahci_host_priv *hpriv, diff --git a/drivers/ata/ata_generic.c b/drivers/ata/ata_generic.c index 7107a69..6268939 100644 --- a/drivers/ata/ata_generic.c +++ b/drivers/ata/ata_generic.c @@ -104,7 +104,7 @@ static struct scsi_host_template generic_sht = { ATA_BMDMA_SHT(DRV_NAME), }; -static struct ata_port_operations generic_port_ops = { +static const struct ata_port_operations generic_port_ops = { .inherits = &ata_bmdma_port_ops, .cable_detect = ata_cable_unknown, .set_mode = generic_set_mode, diff --git a/drivers/ata/ata_piix.c b/drivers/ata/ata_piix.c index 3971bc0..4169733 100644 --- a/drivers/ata/ata_piix.c +++ b/drivers/ata/ata_piix.c @@ -302,7 +302,7 @@ static const struct pci_device_id piix_pci_tbl[] = { { 0x8086, 0x1c08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata }, /* SATA Controller IDE (CPT) */ { 0x8086, 0x1c09, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata }, - { } /* terminate list */ + { 0, 0, 0, 0, 0, 0, 0 } /* terminate list */ }; static struct pci_driver piix_pci_driver = { @@ -320,12 +320,12 @@ static struct scsi_host_template piix_sht = { ATA_BMDMA_SHT(DRV_NAME), }; -static struct ata_port_operations piix_sata_ops = { +static const struct ata_port_operations piix_sata_ops = { .inherits = &ata_bmdma32_port_ops, .sff_irq_check = piix_irq_check, }; -static struct ata_port_operations piix_pata_ops = { +static const struct ata_port_operations piix_pata_ops = { .inherits = &piix_sata_ops, .cable_detect = ata_cable_40wire, .set_piomode = piix_set_piomode, @@ -333,18 +333,18 @@ static struct ata_port_operations piix_pata_ops = { .prereset = piix_pata_prereset, }; -static struct ata_port_operations piix_vmw_ops = { +static const struct ata_port_operations piix_vmw_ops = { .inherits = &piix_pata_ops, .bmdma_status = piix_vmw_bmdma_status, }; -static struct ata_port_operations ich_pata_ops = { +static const struct ata_port_operations ich_pata_ops = { .inherits = &piix_pata_ops, .cable_detect = ich_pata_cable_detect, .set_dmamode = ich_set_dmamode, }; -static struct ata_port_operations piix_sidpr_sata_ops = { +static const struct ata_port_operations piix_sidpr_sata_ops = { .inherits = &piix_sata_ops, .hardreset = sata_std_hardreset, .scr_read = piix_sidpr_scr_read, @@ -620,7 +620,7 @@ static const struct ich_laptop ich_laptop[] = { { 0x2653, 0x1043, 0x82D8 }, /* ICH6M on Asus Eee 701 */ { 0x27df, 0x104d, 0x900e }, /* ICH7 on Sony TZ-90 */ /* end marker */ - { 0, } + { 0, 0, 0 } }; /** @@ -1112,7 +1112,7 @@ static int piix_broken_suspend(void) }, }, - { } /* terminate list */ + { NULL, NULL, {DMI_MATCH(DMI_NONE, {0})}, NULL } /* terminate list */ }; static const char *oemstrs[] = { "Tecra M3,", diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c index b5aecd0..d80d173 100644 --- a/drivers/ata/libahci.c +++ b/drivers/ata/libahci.c @@ -141,7 +141,7 @@ struct device_attribute *ahci_sdev_attrs[] = { }; EXPORT_SYMBOL_GPL(ahci_sdev_attrs); -struct ata_port_operations ahci_ops = { +const struct ata_port_operations ahci_ops = { .inherits = &sata_pmp_port_ops, .qc_defer = ahci_pmp_qc_defer, diff --git a/drivers/ata/libata-acpi.c b/drivers/ata/libata-acpi.c index 7b5eea7..e9eb041 100644 --- a/drivers/ata/libata-acpi.c +++ b/drivers/ata/libata-acpi.c @@ -224,12 +224,12 @@ static void ata_acpi_dev_uevent(acpi_handle handle, u32 event, void *data) ata_acpi_uevent(dev->link->ap, dev, event); } -static struct acpi_dock_ops ata_acpi_dev_dock_ops = { +static const struct acpi_dock_ops ata_acpi_dev_dock_ops = { .handler = ata_acpi_dev_notify_dock, .uevent = ata_acpi_dev_uevent, }; -static struct acpi_dock_ops ata_acpi_ap_dock_ops = { +static const struct acpi_dock_ops ata_acpi_ap_dock_ops = { .handler = ata_acpi_ap_notify_dock, .uevent = ata_acpi_ap_uevent, }; diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c index 1f24267..fa2553e 100644 --- a/drivers/ata/libata-core.c +++ b/drivers/ata/libata-core.c @@ -901,7 +901,7 @@ static const struct ata_xfer_ent { { ATA_SHIFT_PIO, ATA_NR_PIO_MODES, XFER_PIO_0 }, { ATA_SHIFT_MWDMA, ATA_NR_MWDMA_MODES, XFER_MW_DMA_0 }, { ATA_SHIFT_UDMA, ATA_NR_UDMA_MODES, XFER_UDMA_0 }, - { -1, }, + { -1, 0, 0 } }; /** @@ -3073,7 +3073,7 @@ static const struct ata_timing ata_timing[] = { { XFER_UDMA_5, 0, 0, 0, 0, 0, 0, 0, 0, 20 }, { XFER_UDMA_6, 0, 0, 0, 0, 0, 0, 0, 0, 15 }, - { 0xFF } + { 0xFF, 0, 0, 0, 0, 0, 0, 0, 0 } }; #define ENOUGH(v, unit) (((v)-1)/(unit)+1) @@ -4323,7 +4323,7 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = { { "PIONEER DVD-RW DVRTD08", "1.00", ATA_HORKAGE_NOSETXFER }, /* End Marker */ - { } + { NULL, NULL, 0 } }; static int strn_pattern_cmp(const char *patt, const char *name, int wildchar) @@ -4884,7 +4884,7 @@ void ata_qc_free(struct ata_queued_cmd *qc) struct ata_port *ap; unsigned int tag; - WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */ + BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */ ap = qc->ap; qc->flags = 0; @@ -4900,7 +4900,7 @@ void __ata_qc_complete(struct ata_queued_cmd *qc) struct ata_port *ap; struct ata_link *link; - WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */ + BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */ WARN_ON_ONCE(!(qc->flags & ATA_QCFLAG_ACTIVE)); ap = qc->ap; link = qc->dev->link; @@ -5881,7 +5881,7 @@ static void ata_host_stop(struct device *gendev, void *res) * LOCKING: * None. */ -static void ata_finalize_port_ops(struct ata_port_operations *ops) +static void ata_finalize_port_ops(const struct ata_port_operations *ops) { static DEFINE_SPINLOCK(lock); const struct ata_port_operations *cur; @@ -5893,6 +5893,7 @@ static void ata_finalize_port_ops(struct ata_port_operations *ops) return; spin_lock(&lock); + pax_open_kernel(); for (cur = ops->inherits; cur; cur = cur->inherits) { void **inherit = (void **)cur; @@ -5906,8 +5907,9 @@ static void ata_finalize_port_ops(struct ata_port_operations *ops) if (IS_ERR(*pp)) *pp = NULL; - ops->inherits = NULL; + ((struct ata_port_operations *)ops)->inherits = NULL; + pax_close_kernel(); spin_unlock(&lock); } @@ -6004,7 +6006,7 @@ int ata_host_start(struct ata_host *host) */ /* KILLME - the only user left is ipr */ void ata_host_init(struct ata_host *host, struct device *dev, - unsigned long flags, struct ata_port_operations *ops) + unsigned long flags, const struct ata_port_operations *ops) { spin_lock_init(&host->lock); host->dev = dev; @@ -6654,7 +6656,7 @@ static void ata_dummy_error_handler(struct ata_port *ap) /* truly dummy */ } -struct ata_port_operations ata_dummy_port_ops = { +const struct ata_port_operations ata_dummy_port_ops = { .qc_prep = ata_noop_qc_prep, .qc_issue = ata_dummy_qc_issue, .error_handler = ata_dummy_error_handler, diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c index da5a55b..9855d41 100644 --- a/drivers/ata/libata-eh.c +++ b/drivers/ata/libata-eh.c @@ -3684,7 +3684,7 @@ void ata_do_eh(struct ata_port *ap, ata_prereset_fn_t prereset, */ void ata_std_error_handler(struct ata_port *ap) { - struct ata_port_operations *ops = ap->ops; + const struct ata_port_operations *ops = ap->ops; ata_reset_fn_t hardreset = ops->hardreset; /* ignore built-in hardreset if SCR access is not available */ diff --git a/drivers/ata/libata-pmp.c b/drivers/ata/libata-pmp.c index 224faab..1890316 100644 --- a/drivers/ata/libata-pmp.c +++ b/drivers/ata/libata-pmp.c @@ -868,7 +868,7 @@ static int sata_pmp_handle_link_fail(struct ata_link *link, int *link_tries) */ static int sata_pmp_eh_recover(struct ata_port *ap) { - struct ata_port_operations *ops = ap->ops; + const struct ata_port_operations *ops = ap->ops; int pmp_tries, link_tries[SATA_PMP_MAX_PORTS]; struct ata_link *pmp_link = &ap->link; struct ata_device *pmp_dev = pmp_link->device; diff --git a/drivers/ata/pata_acpi.c b/drivers/ata/pata_acpi.c index c8d4703..e9e7d35 100644 --- a/drivers/ata/pata_acpi.c +++ b/drivers/ata/pata_acpi.c @@ -216,7 +216,7 @@ static struct scsi_host_template pacpi_sht = { ATA_BMDMA_SHT(DRV_NAME), }; -static struct ata_port_operations pacpi_ops = { +static const struct ata_port_operations pacpi_ops = { .inherits = &ata_bmdma_port_ops, .qc_issue = pacpi_qc_issue, .cable_detect = pacpi_cable_detect, diff --git a/drivers/ata/pata_ali.c b/drivers/ata/pata_ali.c index 794ec6e..77b1883 100644 --- a/drivers/ata/pata_ali.c +++ b/drivers/ata/pata_ali.c @@ -363,7 +363,7 @@ static struct scsi_host_template ali_sht = { * Port operations for PIO only ALi */ -static struct ata_port_operations ali_early_port_ops = { +static const struct ata_port_operations ali_early_port_ops = { .inherits = &ata_sff_port_ops, .cable_detect = ata_cable_40wire, .set_piomode = ali_set_piomode, @@ -380,7 +380,7 @@ static const struct ata_port_operations ali_dma_base_ops = { * Port operations for DMA capable ALi without cable * detect */ -static struct ata_port_operations ali_20_port_ops = { +static const struct ata_port_operations ali_20_port_ops = { .inherits = &ali_dma_base_ops, .cable_detect = ata_cable_40wire, .mode_filter = ali_20_filter, @@ -391,7 +391,7 @@ static struct ata_port_operations ali_20_port_ops = { /* * Port operations for DMA capable ALi with cable detect */ -static struct ata_port_operations ali_c2_port_ops = { +static const struct ata_port_operations ali_c2_port_ops = { .inherits = &ali_dma_base_ops, .check_atapi_dma = ali_check_atapi_dma, .cable_detect = ali_c2_cable_detect, @@ -402,7 +402,7 @@ static struct ata_port_operations ali_c2_port_ops = { /* * Port operations for DMA capable ALi with cable detect */ -static struct ata_port_operations ali_c4_port_ops = { +static const struct ata_port_operations ali_c4_port_ops = { .inherits = &ali_dma_base_ops, .check_atapi_dma = ali_check_atapi_dma, .cable_detect = ali_c2_cable_detect, @@ -412,7 +412,7 @@ static struct ata_port_operations ali_c4_port_ops = { /* * Port operations for DMA capable ALi with cable detect and LBA48 */ -static struct ata_port_operations ali_c5_port_ops = { +static const struct ata_port_operations ali_c5_port_ops = { .inherits = &ali_dma_base_ops, .check_atapi_dma = ali_check_atapi_dma, .dev_config = ali_warn_atapi_dma, diff --git a/drivers/ata/pata_amd.c b/drivers/ata/pata_amd.c index 620a07c..06d4e17 100644 --- a/drivers/ata/pata_amd.c +++ b/drivers/ata/pata_amd.c @@ -397,28 +397,28 @@ static const struct ata_port_operations amd_base_port_ops = { .prereset = amd_pre_reset, }; -static struct ata_port_operations amd33_port_ops = { +static const struct ata_port_operations amd33_port_ops = { .inherits = &amd_base_port_ops, .cable_detect = ata_cable_40wire, .set_piomode = amd33_set_piomode, .set_dmamode = amd33_set_dmamode, }; -static struct ata_port_operations amd66_port_ops = { +static const struct ata_port_operations amd66_port_ops = { .inherits = &amd_base_port_ops, .cable_detect = ata_cable_unknown, .set_piomode = amd66_set_piomode, .set_dmamode = amd66_set_dmamode, }; -static struct ata_port_operations amd100_port_ops = { +static const struct ata_port_operations amd100_port_ops = { .inherits = &amd_base_port_ops, .cable_detect = ata_cable_unknown, .set_piomode = amd100_set_piomode, .set_dmamode = amd100_set_dmamode, }; -static struct ata_port_operations amd133_port_ops = { +static const struct ata_port_operations amd133_port_ops = { .inherits = &amd_base_port_ops, .cable_detect = amd_cable_detect, .set_piomode = amd133_set_piomode, @@ -433,13 +433,13 @@ static const struct ata_port_operations nv_base_port_ops = { .host_stop = nv_host_stop, }; -static struct ata_port_operations nv100_port_ops = { +static const struct ata_port_operations nv100_port_ops = { .inherits = &nv_base_port_ops, .set_piomode = nv100_set_piomode, .set_dmamode = nv100_set_dmamode, }; -static struct ata_port_operations nv133_port_ops = { +static const struct ata_port_operations nv133_port_ops = { .inherits = &nv_base_port_ops, .set_piomode = nv133_set_piomode, .set_dmamode = nv133_set_dmamode, diff --git a/drivers/ata/pata_artop.c b/drivers/ata/pata_artop.c index ba43f0f..3949a4f 100644 --- a/drivers/ata/pata_artop.c +++ b/drivers/ata/pata_artop.c @@ -311,7 +311,7 @@ static struct scsi_host_template artop_sht = { ATA_BMDMA_SHT(DRV_NAME), }; -static struct ata_port_operations artop6210_ops = { +static const struct ata_port_operations artop6210_ops = { .inherits = &ata_bmdma_port_ops, .cable_detect = ata_cable_40wire, .set_piomode = artop6210_set_piomode, @@ -320,7 +320,7 @@ static struct ata_port_operations artop6210_ops = { .qc_defer = artop6210_qc_defer, }; -static struct ata_port_operations artop6260_ops = { +static const struct ata_port_operations artop6260_ops = { .inherits = &ata_bmdma_port_ops, .cable_detect = artop6260_cable_detect, .set_piomode = artop6260_set_piomode, diff --git a/drivers/ata/pata_at32.c b/drivers/ata/pata_at32.c index 66ce6a5..2e36304 100644 --- a/drivers/ata/pata_at32.c +++ b/drivers/ata/pata_at32.c @@ -173,7 +173,7 @@ static struct scsi_host_template at32_sht = { ATA_PIO_SHT(DRV_NAME), }; -static struct ata_port_operations at32_port_ops = { +static const struct ata_port_operations at32_port_ops = { .inherits = &ata_sff_port_ops, .cable_detect = ata_cable_40wire, .set_piomode = pata_at32_set_piomode, diff --git a/drivers/ata/pata_at91.c b/drivers/ata/pata_at91.c index 0da0dcc..04d7d77 100644 --- a/drivers/ata/pata_at91.c +++ b/drivers/ata/pata_at91.c @@ -196,7 +196,7 @@ static struct scsi_host_template pata_at91_sht = { ATA_PIO_SHT(DRV_NAME), }; -static struct ata_port_operations pata_at91_port_ops = { +static const struct ata_port_operations pata_at91_port_ops = { .inherits = &ata_sff_port_ops, .sff_data_xfer = pata_at91_data_xfer_noirq, diff --git a/drivers/ata/pata_atiixp.c b/drivers/ata/pata_atiixp.c index 4375561..cfe4ef5 100644 --- a/drivers/ata/pata_atiixp.c +++ b/drivers/ata/pata_atiixp.c @@ -214,7 +214,7 @@ static struct scsi_host_template atiixp_sht = { .sg_tablesize = LIBATA_DUMB_MAX_PRD, }; -static struct ata_port_operations atiixp_port_ops = { +static const struct ata_port_operations atiixp_port_ops = { .inherits = &ata_bmdma_port_ops, .qc_prep = ata_bmdma_dumb_qc_prep, diff --git a/drivers/ata/pata_atp867x.c b/drivers/ata/pata_atp867x.c index 9529593..5fcc356 100644 --- a/drivers/ata/pata_atp867x.c +++ b/drivers/ata/pata_atp867x.c @@ -275,7 +275,7 @@ static struct scsi_host_template atp867x_sht = { ATA_BMDMA_SHT(DRV_NAME), }; -static struct ata_port_operations atp867x_ops = { +static const struct ata_port_operations atp867x_ops = { .inherits = &ata_bmdma_port_ops, .cable_detect = atp867x_cable_detect, .set_piomode = atp867x_set_piomode, diff --git a/drivers/ata/pata_bf54x.c b/drivers/ata/pata_bf54x.c index 9cae65d..7b29f13 100644 --- a/drivers/ata/pata_bf54x.c +++ b/drivers/ata/pata_bf54x.c @@ -1420,7 +1420,7 @@ static struct scsi_host_template bfin_sht = { .dma_boundary = ATA_DMA_BOUNDARY, }; -static struct ata_port_operations bfin_pata_ops = { +static const struct ata_port_operations bfin_pata_ops = { .inherits = &ata_bmdma_port_ops, .set_piomode = bfin_set_piomode, diff --git a/drivers/ata/pata_cmd640.c b/drivers/ata/pata_cmd640.c index e5f289f..bad2c44 100644 --- a/drivers/ata/pata_cmd640.c +++ b/drivers/ata/pata_cmd640.c @@ -165,7 +165,7 @@ static struct scsi_host_template cmd640_sht = { ATA_PIO_SHT(DRV_NAME), }; -static struct ata_port_operations cmd640_port_ops = { +static const struct ata_port_operations cmd640_port_ops = { .inherits = &ata_sff_port_ops, /* In theory xfer_noirq is not needed once we kill the prefetcher */ .sff_data_xfer = ata_sff_data_xfer_noirq, diff --git a/drivers/ata/pata_cmd64x.c b/drivers/ata/pata_cmd64x.c index 905ff76..976f0c0 100644 --- a/drivers/ata/pata_cmd64x.c +++ b/drivers/ata/pata_cmd64x.c @@ -268,18 +268,18 @@ static const struct ata_port_operations cmd64x_base_ops = { .set_dmamode = cmd64x_set_dmamode, }; -static struct ata_port_operations cmd64x_port_ops = { +static const struct ata_port_operations cmd64x_port_ops = { .inherits = &cmd64x_base_ops, .cable_detect = ata_cable_40wire, }; -static struct ata_port_operations cmd646r1_port_ops = { +static const struct ata_port_operations cmd646r1_port_ops = { .inherits = &cmd64x_base_ops, .bmdma_stop = cmd646r1_bmdma_stop, .cable_detect = ata_cable_40wire, }; -static struct ata_port_operations cmd648_port_ops = { +static const struct ata_port_operations cmd648_port_ops = { .inherits = &cmd64x_base_ops, .bmdma_stop = cmd648_bmdma_stop, .cable_detect = cmd648_cable_detect, diff --git a/drivers/ata/pata_cs5520.c b/drivers/ata/pata_cs5520.c index 030952f..2984d7a 100644 --- a/drivers/ata/pata_cs5520.c +++ b/drivers/ata/pata_cs5520.c @@ -108,7 +108,7 @@ static struct scsi_host_template cs5520_sht = { .sg_tablesize = LIBATA_DUMB_MAX_PRD, }; -static struct ata_port_operations cs5520_port_ops = { +static const struct ata_port_operations cs5520_port_ops = { .inherits = &ata_bmdma_port_ops, .qc_prep = ata_bmdma_dumb_qc_prep, .cable_detect = ata_cable_40wire, diff --git a/drivers/ata/pata_cs5530.c b/drivers/ata/pata_cs5530.c index f792330..d4a893a 100644 --- a/drivers/ata/pata_cs5530.c +++ b/drivers/ata/pata_cs5530.c @@ -164,7 +164,7 @@ static struct scsi_host_template cs5530_sht = { .sg_tablesize = LIBATA_DUMB_MAX_PRD, }; -static struct ata_port_operations cs5530_port_ops = { +static const struct ata_port_operations cs5530_port_ops = { .inherits = &ata_bmdma_port_ops, .qc_prep = ata_bmdma_dumb_qc_prep, diff --git a/drivers/ata/pata_cs5535.c b/drivers/ata/pata_cs5535.c index 03a9318..003dadf 100644 --- a/drivers/ata/pata_cs5535.c +++ b/drivers/ata/pata_cs5535.c @@ -160,7 +160,7 @@ static struct scsi_host_template cs5535_sht = { ATA_BMDMA_SHT(DRV_NAME), }; -static struct ata_port_operations cs5535_port_ops = { +static const struct ata_port_operations cs5535_port_ops = { .inherits = &ata_bmdma_port_ops, .cable_detect = cs5535_cable_detect, .set_piomode = cs5535_set_piomode, diff --git a/drivers/ata/pata_cs5536.c b/drivers/ata/pata_cs5536.c index 21ee23f..12f1bb4 100644 --- a/drivers/ata/pata_cs5536.c +++ b/drivers/ata/pata_cs5536.c @@ -223,7 +223,7 @@ static struct scsi_host_template cs5536_sht = { ATA_BMDMA_SHT(DRV_NAME), }; -static struct ata_port_operations cs5536_port_ops = { +static const struct ata_port_operations cs5536_port_ops = { .inherits = &ata_bmdma32_port_ops, .cable_detect = cs5536_cable_detect, .set_piomode = cs5536_set_piomode, diff --git a/drivers/ata/pata_cypress.c b/drivers/ata/pata_cypress.c index 6d915b0..5f95c77 100644 --- a/drivers/ata/pata_cypress.c +++ b/drivers/ata/pata_cypress.c @@ -115,7 +115,7 @@ static struct scsi_host_template cy82c693_sht = { ATA_BMDMA_SHT(DRV_NAME), }; -static struct ata_port_operations cy82c693_port_ops = { +static const struct ata_port_operations cy82c693_port_ops = { .inherits = &ata_bmdma_port_ops, .cable_detect = ata_cable_40wire, .set_piomode = cy82c693_set_piomode, diff --git a/drivers/ata/pata_efar.c b/drivers/ata/pata_efar.c index a088347..505e510 100644 --- a/drivers/ata/pata_efar.c +++ b/drivers/ata/pata_efar.c @@ -238,7 +238,7 @@ static struct scsi_host_template efar_sht = { ATA_BMDMA_SHT(DRV_NAME), }; -static struct ata_port_operations efar_ops = { +static const struct ata_port_operations efar_ops = { .inherits = &ata_bmdma_port_ops, .cable_detect = efar_cable_detect, .set_piomode = efar_set_piomode, diff --git a/drivers/ata/pata_hpt366.c b/drivers/ata/pata_hpt366.c index 7688868..6d241ae 100644 --- a/drivers/ata/pata_hpt366.c +++ b/drivers/ata/pata_hpt366.c @@ -269,7 +269,7 @@ static struct scsi_host_template hpt36x_sht = { * Configuration for HPT366/68 */ -static struct ata_port_operations hpt366_port_ops = { +static const struct ata_port_operations hpt366_port_ops = { .inherits = &ata_bmdma_port_ops, .cable_detect = hpt36x_cable_detect, .mode_filter = hpt366_filter, diff --git a/drivers/ata/pata_hpt37x.c b/drivers/ata/pata_hpt37x.c index 9ae4c08..f1bec3e 100644 --- a/drivers/ata/pata_hpt37x.c +++ b/drivers/ata/pata_hpt37x.c @@ -564,7 +564,7 @@ static struct scsi_host_template hpt37x_sht = { * Configuration for HPT370 */ -static struct ata_port_operations hpt370_port_ops = { +static const struct ata_port_operations hpt370_port_ops = { .inherits = &ata_bmdma_port_ops, .bmdma_stop = hpt370_bmdma_stop, @@ -580,7 +580,7 @@ static struct ata_port_operations hpt370_port_ops = { * Configuration for HPT370A. Close to 370 but less filters */ -static struct ata_port_operations hpt370a_port_ops = { +static const struct ata_port_operations hpt370a_port_ops = { .inherits = &hpt370_port_ops, .mode_filter = hpt370a_filter, }; @@ -590,7 +590,7 @@ static struct ata_port_operations hpt370a_port_ops = { * and DMA mode setting functionality. */ -static struct ata_port_operations hpt372_port_ops = { +static const struct ata_port_operations hpt372_port_ops = { .inherits = &ata_bmdma_port_ops, .bmdma_stop = hpt37x_bmdma_stop, @@ -606,7 +606,7 @@ static struct ata_port_operations hpt372_port_ops = { * but we have a different cable detection procedure for function 1. */ -static struct ata_port_operations hpt374_fn1_port_ops = { +static const struct ata_port_operations hpt374_fn1_port_ops = { .inherits = &hpt372_port_ops, .cable_detect = hpt374_fn1_cable_detect, .prereset = hpt37x_pre_reset, diff --git a/drivers/ata/pata_hpt3x2n.c b/drivers/ata/pata_hpt3x2n.c index 32f3463..27654f8 100644 --- a/drivers/ata/pata_hpt3x2n.c +++ b/drivers/ata/pata_hpt3x2n.c @@ -331,7 +331,7 @@ static struct scsi_host_template hpt3x2n_sht = { * Configuration for HPT3x2n. */ -static struct ata_port_operations hpt3x2n_port_ops = { +static const struct ata_port_operations hpt3x2n_port_ops = { .inherits = &ata_bmdma_port_ops, .bmdma_stop = hpt3x2n_bmdma_stop, diff --git a/drivers/ata/pata_hpt3x3.c b/drivers/ata/pata_hpt3x3.c index b63d5e2..d8f14d6 100644 --- a/drivers/ata/pata_hpt3x3.c +++ b/drivers/ata/pata_hpt3x3.c @@ -141,7 +141,7 @@ static struct scsi_host_template hpt3x3_sht = { ATA_BMDMA_SHT(DRV_NAME), }; -static struct ata_port_operations hpt3x3_port_ops = { +static const struct ata_port_operations hpt3x3_port_ops = { .inherits = &ata_bmdma_port_ops, .cable_detect = ata_cable_40wire, .set_piomode = hpt3x3_set_piomode, diff --git a/drivers/ata/pata_icside.c b/drivers/ata/pata_icside.c index 9f2889f..aeb00ba 100644 --- a/drivers/ata/pata_icside.c +++ b/drivers/ata/pata_icside.c @@ -320,7 +320,7 @@ static void pata_icside_postreset(struct ata_link *link, unsigned int *classes) } } -static struct ata_port_operations pata_icside_port_ops = { +static const struct ata_port_operations pata_icside_port_ops = { .inherits = &ata_bmdma_port_ops, /* no need to build any PRD tables for DMA */ .qc_prep = ata_noop_qc_prep, diff --git a/drivers/ata/pata_isapnp.c b/drivers/ata/pata_isapnp.c index 4bceb88..457dfb6 100644 --- a/drivers/ata/pata_isapnp.c +++ b/drivers/ata/pata_isapnp.c @@ -23,12 +23,12 @@ static struct scsi_host_template isapnp_sht = { ATA_PIO_SHT(DRV_NAME), }; -static struct ata_port_operations isapnp_port_ops = { +static const struct ata_port_operations isapnp_port_ops = { .inherits = &ata_sff_port_ops, .cable_detect = ata_cable_40wire, }; -static struct ata_port_operations isapnp_noalt_port_ops = { +static const struct ata_port_operations isapnp_noalt_port_ops = { .inherits = &ata_sff_port_ops, .cable_detect = ata_cable_40wire, /* No altstatus so we don't want to use the lost interrupt poll */ diff --git a/drivers/ata/pata_it8213.c b/drivers/ata/pata_it8213.c index 4d142a2..9c78bee 100644 --- a/drivers/ata/pata_it8213.c +++ b/drivers/ata/pata_it8213.c @@ -233,7 +233,7 @@ static struct scsi_host_template it8213_sht = { }; -static struct ata_port_operations it8213_ops = { +static const struct ata_port_operations it8213_ops = { .inherits = &ata_bmdma_port_ops, .cable_detect = it8213_cable_detect, .set_piomode = it8213_set_piomode, diff --git a/drivers/ata/pata_it821x.c b/drivers/ata/pata_it821x.c index bf88f71..c690096 100644 --- a/drivers/ata/pata_it821x.c +++ b/drivers/ata/pata_it821x.c @@ -801,7 +801,7 @@ static struct scsi_host_template it821x_sht = { ATA_BMDMA_SHT(DRV_NAME), }; -static struct ata_port_operations it821x_smart_port_ops = { +static const struct ata_port_operations it821x_smart_port_ops = { .inherits = &ata_bmdma_port_ops, .check_atapi_dma= it821x_check_atapi_dma, @@ -815,7 +815,7 @@ static struct ata_port_operations it821x_smart_port_ops = { .port_start = it821x_port_start, }; -static struct ata_port_operations it821x_passthru_port_ops = { +static const struct ata_port_operations it821x_passthru_port_ops = { .inherits = &ata_bmdma_port_ops, .check_atapi_dma= it821x_check_atapi_dma, @@ -831,7 +831,7 @@ static struct ata_port_operations it821x_passthru_port_ops = { .port_start = it821x_port_start, }; -static struct ata_port_operations it821x_rdc_port_ops = { +static const struct ata_port_operations it821x_rdc_port_ops = { .inherits = &ata_bmdma_port_ops, .check_atapi_dma= it821x_check_atapi_dma, diff --git a/drivers/ata/pata_ixp4xx_cf.c b/drivers/ata/pata_ixp4xx_cf.c index ba54b08..4b952b7 100644 --- a/drivers/ata/pata_ixp4xx_cf.c +++ b/drivers/ata/pata_ixp4xx_cf.c @@ -89,7 +89,7 @@ static struct scsi_host_template ixp4xx_sht = { ATA_PIO_SHT(DRV_NAME), }; -static struct ata_port_operations ixp4xx_port_ops = { +static const struct ata_port_operations ixp4xx_port_ops = { .inherits = &ata_sff_port_ops, .sff_data_xfer = ixp4xx_mmio_data_xfer, .cable_detect = ata_cable_40wire, diff --git a/drivers/ata/pata_jmicron.c b/drivers/ata/pata_jmicron.c index cb3babb..6770bd8 100644 --- a/drivers/ata/pata_jmicron.c +++ b/drivers/ata/pata_jmicron.c @@ -111,7 +111,7 @@ static struct scsi_host_template jmicron_sht = { ATA_BMDMA_SHT(DRV_NAME), }; -static struct ata_port_operations jmicron_ops = { +static const struct ata_port_operations jmicron_ops = { .inherits = &ata_bmdma_port_ops, .prereset = jmicron_pre_reset, }; diff --git a/drivers/ata/pata_legacy.c b/drivers/ata/pata_legacy.c index 9df1ff7..b570efd 100644 --- a/drivers/ata/pata_legacy.c +++ b/drivers/ata/pata_legacy.c @@ -113,7 +113,7 @@ struct legacy_probe { struct legacy_controller { const char *name; - struct ata_port_operations *ops; + const struct ata_port_operations *ops; unsigned int pio_mask; unsigned int flags; unsigned int pflags; @@ -230,12 +230,12 @@ static const struct ata_port_operations legacy_base_port_ops = { * pio_mask as well. */ -static struct ata_port_operations simple_port_ops = { +static const struct ata_port_operations simple_port_ops = { .inherits = &legacy_base_port_ops, .sff_data_xfer = ata_sff_data_xfer_noirq, }; -static struct ata_port_operations legacy_port_ops = { +static const struct ata_port_operations legacy_port_ops = { .inherits = &legacy_base_port_ops, .sff_data_xfer = ata_sff_data_xfer_noirq, .set_mode = legacy_set_mode, @@ -331,7 +331,7 @@ static unsigned int pdc_data_xfer_vlb(struct ata_device *dev, return buflen; } -static struct ata_port_operations pdc20230_port_ops = { +static const struct ata_port_operations pdc20230_port_ops = { .inherits = &legacy_base_port_ops, .set_piomode = pdc20230_set_piomode, .sff_data_xfer = pdc_data_xfer_vlb, @@ -364,7 +364,7 @@ static void ht6560a_set_piomode(struct ata_port *ap, struct ata_device *adev) ioread8(ap->ioaddr.status_addr); } -static struct ata_port_operations ht6560a_port_ops = { +static const struct ata_port_operations ht6560a_port_ops = { .inherits = &legacy_base_port_ops, .set_piomode = ht6560a_set_piomode, }; @@ -407,7 +407,7 @@ static void ht6560b_set_piomode(struct ata_port *ap, struct ata_device *adev) ioread8(ap->ioaddr.status_addr); } -static struct ata_port_operations ht6560b_port_ops = { +static const struct ata_port_operations ht6560b_port_ops = { .inherits = &legacy_base_port_ops, .set_piomode = ht6560b_set_piomode, }; @@ -506,7 +506,7 @@ static void opti82c611a_set_piomode(struct ata_port *ap, } -static struct ata_port_operations opti82c611a_port_ops = { +static const struct ata_port_operations opti82c611a_port_ops = { .inherits = &legacy_base_port_ops, .set_piomode = opti82c611a_set_piomode, }; @@ -616,7 +616,7 @@ static unsigned int opti82c46x_qc_issue(struct ata_queued_cmd *qc) return ata_sff_qc_issue(qc); } -static struct ata_port_operations opti82c46x_port_ops = { +static const struct ata_port_operations opti82c46x_port_ops = { .inherits = &legacy_base_port_ops, .set_piomode = opti82c46x_set_piomode, .qc_issue = opti82c46x_qc_issue, @@ -778,20 +778,20 @@ static int qdi_port(struct platform_device *dev, return 0; } -static struct ata_port_operations qdi6500_port_ops = { +static const struct ata_port_operations qdi6500_port_ops = { .inherits = &legacy_base_port_ops, .set_piomode = qdi6500_set_piomode, .qc_issue = qdi_qc_issue, .sff_data_xfer = vlb32_data_xfer, }; -static struct ata_port_operations qdi6580_port_ops = { +static const struct ata_port_operations qdi6580_port_ops = { .inherits = &legacy_base_port_ops, .set_piomode = qdi6580_set_piomode, .sff_data_xfer = vlb32_data_xfer, }; -static struct ata_port_operations qdi6580dp_port_ops = { +static const struct ata_port_operations qdi6580dp_port_ops = { .inherits = &legacy_base_port_ops, .set_piomode = qdi6580dp_set_piomode, .qc_issue = qdi_qc_issue, @@ -863,7 +863,7 @@ static int winbond_port(struct platform_device *dev, return 0; } -static struct ata_port_operations winbond_port_ops = { +static const struct ata_port_operations winbond_port_ops = { .inherits = &legacy_base_port_ops, .set_piomode = winbond_set_piomode, .sff_data_xfer = vlb32_data_xfer, @@ -986,7 +986,7 @@ static __init int legacy_init_one(struct legacy_probe *probe) int pio_modes = controller->pio_mask; unsigned long io = probe->port; u32 mask = (1 << probe->slot); - struct ata_port_operations *ops = controller->ops; + const struct ata_port_operations *ops = controller->ops; struct legacy_data *ld = &legacy_data[probe->slot]; struct ata_host *host = NULL; struct ata_port *ap; diff --git a/drivers/ata/pata_macio.c b/drivers/ata/pata_macio.c index 75b49d0..9fa605b 100644 --- a/drivers/ata/pata_macio.c +++ b/drivers/ata/pata_macio.c @@ -918,9 +918,8 @@ static struct scsi_host_template pata_macio_sht = { .slave_configure = pata_macio_slave_config, }; -static struct ata_port_operations pata_macio_ops = { +static const struct ata_port_operations pata_macio_ops = { .inherits = &ata_bmdma_port_ops, - .freeze = pata_macio_freeze, .set_piomode = pata_macio_set_timings, .set_dmamode = pata_macio_set_timings, diff --git a/drivers/ata/pata_marvell.c b/drivers/ata/pata_marvell.c index dd38083..b73db04 100644 --- a/drivers/ata/pata_marvell.c +++ b/drivers/ata/pata_marvell.c @@ -100,7 +100,7 @@ static struct scsi_host_template marvell_sht = { ATA_BMDMA_SHT(DRV_NAME), }; -static struct ata_port_operations marvell_ops = { +static const struct ata_port_operations marvell_ops = { .inherits = &ata_bmdma_port_ops, .cable_detect = marvell_cable_detect, .prereset = marvell_pre_reset, diff --git a/drivers/ata/pata_mpc52xx.c b/drivers/ata/pata_mpc52xx.c index f087ab5..8074223 100644 --- a/drivers/ata/pata_mpc52xx.c +++ b/drivers/ata/pata_mpc52xx.c @@ -609,7 +609,7 @@ static struct scsi_host_template mpc52xx_ata_sht = { ATA_PIO_SHT(DRV_NAME), }; -static struct ata_port_operations mpc52xx_ata_port_ops = { +static const struct ata_port_operations mpc52xx_ata_port_ops = { .inherits = &ata_sff_port_ops, .sff_dev_select = mpc52xx_ata_dev_select, .set_piomode = mpc52xx_ata_set_piomode, diff --git a/drivers/ata/pata_mpiix.c b/drivers/ata/pata_mpiix.c index b21f002..0a27e7f 100644 --- a/drivers/ata/pata_mpiix.c +++ b/drivers/ata/pata_mpiix.c @@ -140,7 +140,7 @@ static struct scsi_host_template mpiix_sht = { ATA_PIO_SHT(DRV_NAME), }; -static struct ata_port_operations mpiix_port_ops = { +static const struct ata_port_operations mpiix_port_ops = { .inherits = &ata_sff_port_ops, .qc_issue = mpiix_qc_issue, .cable_detect = ata_cable_40wire, diff --git a/drivers/ata/pata_netcell.c b/drivers/ata/pata_netcell.c index 3eb921c..c167317 100644 --- a/drivers/ata/pata_netcell.c +++ b/drivers/ata/pata_netcell.c @@ -34,7 +34,7 @@ static struct scsi_host_template netcell_sht = { ATA_BMDMA_SHT(DRV_NAME), }; -static struct ata_port_operations netcell_ops = { +static const struct ata_port_operations netcell_ops = { .inherits = &ata_bmdma_port_ops, .cable_detect = ata_cable_80wire, .read_id = netcell_read_id, diff --git a/drivers/ata/pata_ninja32.c b/drivers/ata/pata_ninja32.c index cc50bd0..123fe21 100644 --- a/drivers/ata/pata_ninja32.c +++ b/drivers/ata/pata_ninja32.c @@ -81,7 +81,7 @@ static struct scsi_host_template ninja32_sht = { ATA_BMDMA_SHT(DRV_NAME), }; -static struct ata_port_operations ninja32_port_ops = { +static const struct ata_port_operations ninja32_port_ops = { .inherits = &ata_bmdma_port_ops, .sff_dev_select = ninja32_dev_select, .cable_detect = ata_cable_40wire, diff --git a/drivers/ata/pata_ns87410.c b/drivers/ata/pata_ns87410.c index 2110863..6144b43 100644 --- a/drivers/ata/pata_ns87410.c +++ b/drivers/ata/pata_ns87410.c @@ -132,7 +132,7 @@ static struct scsi_host_template ns87410_sht = { ATA_PIO_SHT(DRV_NAME), }; -static struct ata_port_operations ns87410_port_ops = { +static const struct ata_port_operations ns87410_port_ops = { .inherits = &ata_sff_port_ops, .qc_issue = ns87410_qc_issue, .cable_detect = ata_cable_40wire, diff --git a/drivers/ata/pata_ns87415.c b/drivers/ata/pata_ns87415.c index 605f198..f5fee2f 100644 --- a/drivers/ata/pata_ns87415.c +++ b/drivers/ata/pata_ns87415.c @@ -299,7 +299,7 @@ static u8 ns87560_bmdma_status(struct ata_port *ap) } #endif /* 87560 SuperIO Support */ -static struct ata_port_operations ns87415_pata_ops = { +static const struct ata_port_operations ns87415_pata_ops = { .inherits = &ata_bmdma_port_ops, .check_atapi_dma = ns87415_check_atapi_dma, @@ -313,7 +313,7 @@ static struct ata_port_operations ns87415_pata_ops = { }; #if defined(CONFIG_SUPERIO) -static struct ata_port_operations ns87560_pata_ops = { +static const struct ata_port_operations ns87560_pata_ops = { .inherits = &ns87415_pata_ops, .sff_tf_read = ns87560_tf_read, .sff_check_status = ns87560_check_status, diff --git a/drivers/ata/pata_octeon_cf.c b/drivers/ata/pata_octeon_cf.c index 06ddd91..1d28cb6 100644 --- a/drivers/ata/pata_octeon_cf.c +++ b/drivers/ata/pata_octeon_cf.c @@ -782,6 +782,7 @@ static unsigned int octeon_cf_qc_issue(struct ata_queued_cmd *qc) return 0; } +/* cannot be const */ static struct ata_port_operations octeon_cf_ops = { .inherits = &ata_sff_port_ops, .check_atapi_dma = octeon_cf_check_atapi_dma, diff --git a/drivers/ata/pata_oldpiix.c b/drivers/ata/pata_oldpiix.c index b811c16..e23051e 100644 --- a/drivers/ata/pata_oldpiix.c +++ b/drivers/ata/pata_oldpiix.c @@ -208,7 +208,7 @@ static struct scsi_host_template oldpiix_sht = { ATA_BMDMA_SHT(DRV_NAME), }; -static struct ata_port_operations oldpiix_pata_ops = { +static const struct ata_port_operations oldpiix_pata_ops = { .inherits = &ata_bmdma_port_ops, .qc_issue = oldpiix_qc_issue, .cable_detect = ata_cable_40wire, diff --git a/drivers/ata/pata_opti.c b/drivers/ata/pata_opti.c index 00c5a02..b486a97 100644 --- a/drivers/ata/pata_opti.c +++ b/drivers/ata/pata_opti.c @@ -152,7 +152,7 @@ static struct scsi_host_template opti_sht = { ATA_PIO_SHT(DRV_NAME), }; -static struct ata_port_operations opti_port_ops = { +static const struct ata_port_operations opti_port_ops = { .inherits = &ata_sff_port_ops, .cable_detect = ata_cable_40wire, .set_piomode = opti_set_piomode, diff --git a/drivers/ata/pata_optidma.c b/drivers/ata/pata_optidma.c index 0852cd0..62f6a68 100644 --- a/drivers/ata/pata_optidma.c +++ b/drivers/ata/pata_optidma.c @@ -337,7 +337,7 @@ static struct scsi_host_template optidma_sht = { ATA_BMDMA_SHT(DRV_NAME), }; -static struct ata_port_operations optidma_port_ops = { +static const struct ata_port_operations optidma_port_ops = { .inherits = &ata_bmdma_port_ops, .cable_detect = ata_cable_40wire, .set_piomode = optidma_set_pio_mode, @@ -346,7 +346,7 @@ static struct ata_port_operations optidma_port_ops = { .prereset = optidma_pre_reset, }; -static struct ata_port_operations optiplus_port_ops = { +static const struct ata_port_operations optiplus_port_ops = { .inherits = &optidma_port_ops, .set_piomode = optiplus_set_pio_mode, .set_dmamode = optiplus_set_dma_mode, diff --git a/drivers/ata/pata_palmld.c b/drivers/ata/pata_palmld.c index 11fb4cc..1a14022 100644 --- a/drivers/ata/pata_palmld.c +++ b/drivers/ata/pata_palmld.c @@ -37,7 +37,7 @@ static struct scsi_host_template palmld_sht = { ATA_PIO_SHT(DRV_NAME), }; -static struct ata_port_operations palmld_port_ops = { +static const struct ata_port_operations palmld_port_ops = { .inherits = &ata_sff_port_ops, .sff_data_xfer = ata_sff_data_xfer_noirq, .cable_detect = ata_cable_40wire, diff --git a/drivers/ata/pata_pcmcia.c b/drivers/ata/pata_pcmcia.c index 118c28e..e025309 100644 --- a/drivers/ata/pata_pcmcia.c +++ b/drivers/ata/pata_pcmcia.c @@ -153,14 +153,14 @@ static struct scsi_host_template pcmcia_sht = { ATA_PIO_SHT(DRV_NAME), }; -static struct ata_port_operations pcmcia_port_ops = { +static const struct ata_port_operations pcmcia_port_ops = { .inherits = &ata_sff_port_ops, .sff_data_xfer = ata_sff_data_xfer_noirq, .cable_detect = ata_cable_40wire, .set_mode = pcmcia_set_mode, }; -static struct ata_port_operations pcmcia_8bit_port_ops = { +static const struct ata_port_operations pcmcia_8bit_port_ops = { .inherits = &ata_sff_port_ops, .sff_data_xfer = ata_data_xfer_8bit, .cable_detect = ata_cable_40wire, @@ -243,7 +243,7 @@ static int pcmcia_init_one(struct pcmcia_device *pdev) unsigned long io_base, ctl_base; void __iomem *io_addr, *ctl_addr; int n_ports = 1; - struct ata_port_operations *ops = &pcmcia_port_ops; + const struct ata_port_operations *ops = &pcmcia_port_ops; /* Set up attributes in order to probe card and get resources */ pdev->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO; diff --git a/drivers/ata/pata_pdc2027x.c b/drivers/ata/pata_pdc2027x.c index b183511..c3ee285 100644 --- a/drivers/ata/pata_pdc2027x.c +++ b/drivers/ata/pata_pdc2027x.c @@ -132,14 +132,14 @@ static struct scsi_host_template pdc2027x_sht = { ATA_BMDMA_SHT(DRV_NAME), }; -static struct ata_port_operations pdc2027x_pata100_ops = { +static const struct ata_port_operations pdc2027x_pata100_ops = { .inherits = &ata_bmdma_port_ops, .check_atapi_dma = pdc2027x_check_atapi_dma, .cable_detect = pdc2027x_cable_detect, .prereset = pdc2027x_prereset, }; -static struct ata_port_operations pdc2027x_pata133_ops = { +static const struct ata_port_operations pdc2027x_pata133_ops = { .inherits = &pdc2027x_pata100_ops, .mode_filter = pdc2027x_mode_filter, .set_piomode = pdc2027x_set_piomode, diff --git a/drivers/ata/pata_pdc202xx_old.c b/drivers/ata/pata_pdc202xx_old.c index c39f213..13968a7 100644 --- a/drivers/ata/pata_pdc202xx_old.c +++ b/drivers/ata/pata_pdc202xx_old.c @@ -274,7 +274,7 @@ static struct scsi_host_template pdc202xx_sht = { ATA_BMDMA_SHT(DRV_NAME), }; -static struct ata_port_operations pdc2024x_port_ops = { +static const struct ata_port_operations pdc2024x_port_ops = { .inherits = &ata_bmdma_port_ops, .cable_detect = ata_cable_40wire, @@ -284,7 +284,7 @@ static struct ata_port_operations pdc2024x_port_ops = { .sff_exec_command = pdc202xx_exec_command, }; -static struct ata_port_operations pdc2026x_port_ops = { +static const struct ata_port_operations pdc2026x_port_ops = { .inherits = &pdc2024x_port_ops, .check_atapi_dma = pdc2026x_check_atapi_dma, diff --git a/drivers/ata/pata_piccolo.c b/drivers/ata/pata_piccolo.c index cb01bf9..3eb315d 100644 --- a/drivers/ata/pata_piccolo.c +++ b/drivers/ata/pata_piccolo.c @@ -67,7 +67,7 @@ static struct scsi_host_template tosh_sht = { ATA_BMDMA_SHT(DRV_NAME), }; -static struct ata_port_operations tosh_port_ops = { +static const struct ata_port_operations tosh_port_ops = { .inherits = &ata_bmdma_port_ops, .cable_detect = ata_cable_unknown, .set_piomode = tosh_set_piomode, diff --git a/drivers/ata/pata_platform.c b/drivers/ata/pata_platform.c index 50400fa..8af063f 100644 --- a/drivers/ata/pata_platform.c +++ b/drivers/ata/pata_platform.c @@ -48,7 +48,7 @@ static struct scsi_host_template pata_platform_sht = { ATA_PIO_SHT(DRV_NAME), }; -static struct ata_port_operations pata_platform_port_ops = { +static const struct ata_port_operations pata_platform_port_ops = { .inherits = &ata_sff_port_ops, .sff_data_xfer = ata_sff_data_xfer_noirq, .cable_detect = ata_cable_unknown, diff --git a/drivers/ata/pata_qdi.c b/drivers/ata/pata_qdi.c index 45879dc..165a9f9 100644 --- a/drivers/ata/pata_qdi.c +++ b/drivers/ata/pata_qdi.c @@ -157,7 +157,7 @@ static struct scsi_host_template qdi_sht = { ATA_PIO_SHT(DRV_NAME), }; -static struct ata_port_operations qdi6500_port_ops = { +static const struct ata_port_operations qdi6500_port_ops = { .inherits = &ata_sff_port_ops, .qc_issue = qdi_qc_issue, .sff_data_xfer = qdi_data_xfer, @@ -165,7 +165,7 @@ static struct ata_port_operations qdi6500_port_ops = { .set_piomode = qdi6500_set_piomode, }; -static struct ata_port_operations qdi6580_port_ops = { +static const struct ata_port_operations qdi6580_port_ops = { .inherits = &qdi6500_port_ops, .set_piomode = qdi6580_set_piomode, }; diff --git a/drivers/ata/pata_radisys.c b/drivers/ata/pata_radisys.c index 8574b31..53789fe 100644 --- a/drivers/ata/pata_radisys.c +++ b/drivers/ata/pata_radisys.c @@ -187,7 +187,7 @@ static struct scsi_host_template radisys_sht = { ATA_BMDMA_SHT(DRV_NAME), }; -static struct ata_port_operations radisys_pata_ops = { +static const struct ata_port_operations radisys_pata_ops = { .inherits = &ata_bmdma_port_ops, .qc_issue = radisys_qc_issue, .cable_detect = ata_cable_unknown, diff --git a/drivers/ata/pata_rb532_cf.c b/drivers/ata/pata_rb532_cf.c index 0ffd631..7c6c78d 100644 --- a/drivers/ata/pata_rb532_cf.c +++ b/drivers/ata/pata_rb532_cf.c @@ -69,7 +69,7 @@ static irqreturn_t rb532_pata_irq_handler(int irq, void *dev_instance) return IRQ_HANDLED; } -static struct ata_port_operations rb532_pata_port_ops = { +static const struct ata_port_operations rb532_pata_port_ops = { .inherits = &ata_sff_port_ops, .sff_data_xfer = ata_sff_data_xfer32, }; diff --git a/drivers/ata/pata_rdc.c b/drivers/ata/pata_rdc.c index 5fbe9b1..1d0cdc0 100644 --- a/drivers/ata/pata_rdc.c +++ b/drivers/ata/pata_rdc.c @@ -273,7 +273,7 @@ static void rdc_set_dmamode(struct ata_port *ap, struct ata_device *adev) pci_write_config_byte(dev, 0x48, udma_enable); } -static struct ata_port_operations rdc_pata_ops = { +static const struct ata_port_operations rdc_pata_ops = { .inherits = &ata_bmdma32_port_ops, .cable_detect = rdc_pata_cable_detect, .set_piomode = rdc_set_piomode, diff --git a/drivers/ata/pata_rz1000.c b/drivers/ata/pata_rz1000.c index 4a454a8..b95ac1a 100644 --- a/drivers/ata/pata_rz1000.c +++ b/drivers/ata/pata_rz1000.c @@ -54,7 +54,7 @@ static struct scsi_host_template rz1000_sht = { ATA_PIO_SHT(DRV_NAME), }; -static struct ata_port_operations rz1000_port_ops = { +static const struct ata_port_operations rz1000_port_ops = { .inherits = &ata_sff_port_ops, .cable_detect = ata_cable_40wire, .set_mode = rz1000_set_mode, diff --git a/drivers/ata/pata_sc1200.c b/drivers/ata/pata_sc1200.c index e2c1825..9b54b46 100644 --- a/drivers/ata/pata_sc1200.c +++ b/drivers/ata/pata_sc1200.c @@ -207,7 +207,7 @@ static struct scsi_host_template sc1200_sht = { .sg_tablesize = LIBATA_DUMB_MAX_PRD, }; -static struct ata_port_operations sc1200_port_ops = { +static const struct ata_port_operations sc1200_port_ops = { .inherits = &ata_bmdma_port_ops, .qc_prep = ata_bmdma_dumb_qc_prep, .qc_issue = sc1200_qc_issue, diff --git a/drivers/ata/pata_scc.c b/drivers/ata/pata_scc.c index d9db3f8..30b8b63 100644 --- a/drivers/ata/pata_scc.c +++ b/drivers/ata/pata_scc.c @@ -927,7 +927,7 @@ static struct scsi_host_template scc_sht = { ATA_BMDMA_SHT(DRV_NAME), }; -static struct ata_port_operations scc_pata_ops = { +static const struct ata_port_operations scc_pata_ops = { .inherits = &ata_bmdma_port_ops, .set_piomode = scc_set_piomode, diff --git a/drivers/ata/pata_sch.c b/drivers/ata/pata_sch.c index e97b32f..464f5a5 100644 --- a/drivers/ata/pata_sch.c +++ b/drivers/ata/pata_sch.c @@ -75,7 +75,7 @@ static struct scsi_host_template sch_sht = { ATA_BMDMA_SHT(DRV_NAME), }; -static struct ata_port_operations sch_pata_ops = { +static const struct ata_port_operations sch_pata_ops = { .inherits = &ata_bmdma_port_ops, .cable_detect = ata_cable_unknown, .set_piomode = sch_set_piomode, diff --git a/drivers/ata/pata_serverworks.c b/drivers/ata/pata_serverworks.c index 86dd714..ac4a72d 100644 --- a/drivers/ata/pata_serverworks.c +++ b/drivers/ata/pata_serverworks.c @@ -300,7 +300,7 @@ static struct scsi_host_template serverworks_sht = { ATA_BMDMA_SHT(DRV_NAME), }; -static struct ata_port_operations serverworks_osb4_port_ops = { +static const struct ata_port_operations serverworks_osb4_port_ops = { .inherits = &ata_bmdma_port_ops, .cable_detect = serverworks_cable_detect, .mode_filter = serverworks_osb4_filter, @@ -308,7 +308,7 @@ static struct ata_port_operations serverworks_osb4_port_ops = { .set_dmamode = serverworks_set_dmamode, }; -static struct ata_port_operations serverworks_csb_port_ops = { +static const struct ata_port_operations serverworks_csb_port_ops = { .inherits = &serverworks_osb4_port_ops, .mode_filter = serverworks_csb_filter, }; diff --git a/drivers/ata/pata_sil680.c b/drivers/ata/pata_sil680.c index d3190d7..68dc724 100644 --- a/drivers/ata/pata_sil680.c +++ b/drivers/ata/pata_sil680.c @@ -214,8 +214,7 @@ static struct scsi_host_template sil680_sht = { ATA_BMDMA_SHT(DRV_NAME), }; - -static struct ata_port_operations sil680_port_ops = { +static const struct ata_port_operations sil680_port_ops = { .inherits = &ata_bmdma32_port_ops, .sff_exec_command = sil680_sff_exec_command, .cable_detect = sil680_cable_detect, diff --git a/drivers/ata/pata_sis.c b/drivers/ata/pata_sis.c index 60cea13..048e50e 100644 --- a/drivers/ata/pata_sis.c +++ b/drivers/ata/pata_sis.c @@ -503,47 +503,47 @@ static struct scsi_host_template sis_sht = { ATA_BMDMA_SHT(DRV_NAME), }; -static struct ata_port_operations sis_133_for_sata_ops = { +static const struct ata_port_operations sis_133_for_sata_ops = { .inherits = &ata_bmdma_port_ops, .set_piomode = sis_133_set_piomode, .set_dmamode = sis_133_set_dmamode, .cable_detect = sis_133_cable_detect, }; -static struct ata_port_operations sis_base_ops = { +static const struct ata_port_operations sis_base_ops = { .inherits = &ata_bmdma_port_ops, .prereset = sis_pre_reset, }; -static struct ata_port_operations sis_133_ops = { +static const struct ata_port_operations sis_133_ops = { .inherits = &sis_base_ops, .set_piomode = sis_133_set_piomode, .set_dmamode = sis_133_set_dmamode, .cable_detect = sis_133_cable_detect, }; -static struct ata_port_operations sis_133_early_ops = { +static const struct ata_port_operations sis_133_early_ops = { .inherits = &sis_base_ops, .set_piomode = sis_100_set_piomode, .set_dmamode = sis_133_early_set_dmamode, .cable_detect = sis_66_cable_detect, }; -static struct ata_port_operations sis_100_ops = { +static const struct ata_port_operations sis_100_ops = { .inherits = &sis_base_ops, .set_piomode = sis_100_set_piomode, .set_dmamode = sis_100_set_dmamode, .cable_detect = sis_66_cable_detect, }; -static struct ata_port_operations sis_66_ops = { +static const struct ata_port_operations sis_66_ops = { .inherits = &sis_base_ops, .set_piomode = sis_old_set_piomode, .set_dmamode = sis_66_set_dmamode, .cable_detect = sis_66_cable_detect, }; -static struct ata_port_operations sis_old_ops = { +static const struct ata_port_operations sis_old_ops = { .inherits = &sis_base_ops, .set_piomode = sis_old_set_piomode, .set_dmamode = sis_old_set_dmamode, diff --git a/drivers/ata/pata_sl82c105.c b/drivers/ata/pata_sl82c105.c index 98548f6..1939a5f 100644 --- a/drivers/ata/pata_sl82c105.c +++ b/drivers/ata/pata_sl82c105.c @@ -231,7 +231,7 @@ static struct scsi_host_template sl82c105_sht = { ATA_BMDMA_SHT(DRV_NAME), }; -static struct ata_port_operations sl82c105_port_ops = { +static const struct ata_port_operations sl82c105_port_ops = { .inherits = &ata_bmdma_port_ops, .qc_defer = sl82c105_qc_defer, .bmdma_start = sl82c105_bmdma_start, diff --git a/drivers/ata/pata_triflex.c b/drivers/ata/pata_triflex.c index 0d1f89e..581bdb1 100644 --- a/drivers/ata/pata_triflex.c +++ b/drivers/ata/pata_triflex.c @@ -178,7 +178,7 @@ static struct scsi_host_template triflex_sht = { ATA_BMDMA_SHT(DRV_NAME), }; -static struct ata_port_operations triflex_port_ops = { +static const struct ata_port_operations triflex_port_ops = { .inherits = &ata_bmdma_port_ops, .bmdma_start = triflex_bmdma_start, .bmdma_stop = triflex_bmdma_stop, diff --git a/drivers/ata/pata_via.c b/drivers/ata/pata_via.c index ac8d7d9..9d02ee8 100644 --- a/drivers/ata/pata_via.c +++ b/drivers/ata/pata_via.c @@ -441,7 +441,7 @@ static struct scsi_host_template via_sht = { ATA_BMDMA_SHT(DRV_NAME), }; -static struct ata_port_operations via_port_ops = { +static const struct ata_port_operations via_port_ops = { .inherits = &ata_bmdma_port_ops, .cable_detect = via_cable_detect, .set_piomode = via_set_piomode, @@ -452,7 +452,7 @@ static struct ata_port_operations via_port_ops = { .mode_filter = via_mode_filter, }; -static struct ata_port_operations via_port_ops_noirq = { +static const struct ata_port_operations via_port_ops_noirq = { .inherits = &via_port_ops, .sff_data_xfer = ata_sff_data_xfer_noirq, }; diff --git a/drivers/ata/pata_winbond.c b/drivers/ata/pata_winbond.c index 6d8619b..ad511c4 100644 --- a/drivers/ata/pata_winbond.c +++ b/drivers/ata/pata_winbond.c @@ -125,7 +125,7 @@ static struct scsi_host_template winbond_sht = { ATA_PIO_SHT(DRV_NAME), }; -static struct ata_port_operations winbond_port_ops = { +static const struct ata_port_operations winbond_port_ops = { .inherits = &ata_sff_port_ops, .sff_data_xfer = winbond_data_xfer, .cable_detect = ata_cable_40wire, diff --git a/drivers/ata/pdc_adma.c b/drivers/ata/pdc_adma.c index adbe042..c13c7b7 100644 --- a/drivers/ata/pdc_adma.c +++ b/drivers/ata/pdc_adma.c @@ -146,7 +146,7 @@ static struct scsi_host_template adma_ata_sht = { .dma_boundary = ADMA_DMA_BOUNDARY, }; -static struct ata_port_operations adma_ata_ops = { +static const struct ata_port_operations adma_ata_ops = { .inherits = &ata_sff_port_ops, .lost_interrupt = ATA_OP_NULL, diff --git a/drivers/ata/sata_fsl.c b/drivers/ata/sata_fsl.c index 61c89b5..b92a38a 100644 --- a/drivers/ata/sata_fsl.c +++ b/drivers/ata/sata_fsl.c @@ -1261,7 +1261,7 @@ static struct scsi_host_template sata_fsl_sht = { .dma_boundary = ATA_DMA_BOUNDARY, }; -static struct ata_port_operations sata_fsl_ops = { +static const struct ata_port_operations sata_fsl_ops = { .inherits = &sata_pmp_port_ops, .qc_defer = ata_std_qc_defer, diff --git a/drivers/ata/sata_inic162x.c b/drivers/ata/sata_inic162x.c index a36149e..645288b 100644 --- a/drivers/ata/sata_inic162x.c +++ b/drivers/ata/sata_inic162x.c @@ -705,7 +705,7 @@ static int inic_port_start(struct ata_port *ap) return 0; } -static struct ata_port_operations inic_port_ops = { +static const struct ata_port_operations inic_port_ops = { .inherits = &sata_port_ops, .check_atapi_dma = inic_check_atapi_dma, diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c index d57f6eb..d14b001 100644 --- a/drivers/ata/sata_mv.c +++ b/drivers/ata/sata_mv.c @@ -663,7 +663,7 @@ static struct scsi_host_template mv6_sht = { .dma_boundary = MV_DMA_BOUNDARY, }; -static struct ata_port_operations mv5_ops = { +static const struct ata_port_operations mv5_ops = { .inherits = &ata_sff_port_ops, .lost_interrupt = ATA_OP_NULL, @@ -683,7 +683,7 @@ static struct ata_port_operations mv5_ops = { .port_stop = mv_port_stop, }; -static struct ata_port_operations mv6_ops = { +static const struct ata_port_operations mv6_ops = { .inherits = &ata_bmdma_port_ops, .lost_interrupt = ATA_OP_NULL, @@ -717,7 +717,7 @@ static struct ata_port_operations mv6_ops = { .port_stop = mv_port_stop, }; -static struct ata_port_operations mv_iie_ops = { +static const struct ata_port_operations mv_iie_ops = { .inherits = &mv6_ops, .dev_config = ATA_OP_NULL, .qc_prep = mv_qc_prep_iie, diff --git a/drivers/ata/sata_nv.c b/drivers/ata/sata_nv.c index 2116113..7441a40 100644 --- a/drivers/ata/sata_nv.c +++ b/drivers/ata/sata_nv.c @@ -465,7 +465,7 @@ static struct scsi_host_template nv_swncq_sht = { * cases. Define nv_hardreset() which only kicks in for post-boot * probing and use it for all variants. */ -static struct ata_port_operations nv_generic_ops = { +static const struct ata_port_operations nv_generic_ops = { .inherits = &ata_bmdma_port_ops, .lost_interrupt = ATA_OP_NULL, .scr_read = nv_scr_read, @@ -473,20 +473,20 @@ static struct ata_port_operations nv_generic_ops = { .hardreset = nv_hardreset, }; -static struct ata_port_operations nv_nf2_ops = { +static const struct ata_port_operations nv_nf2_ops = { .inherits = &nv_generic_ops, .freeze = nv_nf2_freeze, .thaw = nv_nf2_thaw, }; -static struct ata_port_operations nv_ck804_ops = { +static const struct ata_port_operations nv_ck804_ops = { .inherits = &nv_generic_ops, .freeze = nv_ck804_freeze, .thaw = nv_ck804_thaw, .host_stop = nv_ck804_host_stop, }; -static struct ata_port_operations nv_adma_ops = { +static const struct ata_port_operations nv_adma_ops = { .inherits = &nv_ck804_ops, .check_atapi_dma = nv_adma_check_atapi_dma, @@ -510,7 +510,7 @@ static struct ata_port_operations nv_adma_ops = { .host_stop = nv_adma_host_stop, }; -static struct ata_port_operations nv_swncq_ops = { +static const struct ata_port_operations nv_swncq_ops = { .inherits = &nv_generic_ops, .qc_defer = ata_std_qc_defer, diff --git a/drivers/ata/sata_promise.c b/drivers/ata/sata_promise.c index f03ad48..4b0fc9e 100644 --- a/drivers/ata/sata_promise.c +++ b/drivers/ata/sata_promise.c @@ -196,7 +196,7 @@ static const struct ata_port_operations pdc_common_ops = { .error_handler = pdc_error_handler, }; -static struct ata_port_operations pdc_sata_ops = { +static const struct ata_port_operations pdc_sata_ops = { .inherits = &pdc_common_ops, .cable_detect = pdc_sata_cable_detect, .freeze = pdc_sata_freeze, @@ -209,14 +209,14 @@ static struct ata_port_operations pdc_sata_ops = { /* First-generation chips need a more restrictive ->check_atapi_dma op, and ->freeze/thaw that ignore the hotplug controls. */ -static struct ata_port_operations pdc_old_sata_ops = { +static const struct ata_port_operations pdc_old_sata_ops = { .inherits = &pdc_sata_ops, .freeze = pdc_freeze, .thaw = pdc_thaw, .check_atapi_dma = pdc_old_sata_check_atapi_dma, }; -static struct ata_port_operations pdc_pata_ops = { +static const struct ata_port_operations pdc_pata_ops = { .inherits = &pdc_common_ops, .cable_detect = pdc_pata_cable_detect, .freeze = pdc_freeze, diff --git a/drivers/ata/sata_qstor.c b/drivers/ata/sata_qstor.c index daeebf1..64fdbb8 100644 --- a/drivers/ata/sata_qstor.c +++ b/drivers/ata/sata_qstor.c @@ -131,7 +131,7 @@ static struct scsi_host_template qs_ata_sht = { .dma_boundary = QS_DMA_BOUNDARY, }; -static struct ata_port_operations qs_ata_ops = { +static const struct ata_port_operations qs_ata_ops = { .inherits = &ata_sff_port_ops, .check_atapi_dma = qs_check_atapi_dma, diff --git a/drivers/ata/sata_sil.c b/drivers/ata/sata_sil.c index 3a4f842..24382d6 100644 --- a/drivers/ata/sata_sil.c +++ b/drivers/ata/sata_sil.c @@ -182,7 +182,7 @@ static struct scsi_host_template sil_sht = { .sg_tablesize = ATA_MAX_PRD }; -static struct ata_port_operations sil_ops = { +static const struct ata_port_operations sil_ops = { .inherits = &ata_bmdma32_port_ops, .dev_config = sil_dev_config, .set_mode = sil_set_mode, diff --git a/drivers/ata/sata_sil24.c b/drivers/ata/sata_sil24.c index be7726d..cbaee83 100644 --- a/drivers/ata/sata_sil24.c +++ b/drivers/ata/sata_sil24.c @@ -389,7 +389,7 @@ static struct scsi_host_template sil24_sht = { .dma_boundary = ATA_DMA_BOUNDARY, }; -static struct ata_port_operations sil24_ops = { +static const struct ata_port_operations sil24_ops = { .inherits = &sata_pmp_port_ops, .qc_defer = sil24_qc_defer, diff --git a/drivers/ata/sata_sis.c b/drivers/ata/sata_sis.c index 2bfe3ae..c80f777 100644 --- a/drivers/ata/sata_sis.c +++ b/drivers/ata/sata_sis.c @@ -89,7 +89,7 @@ static struct scsi_host_template sis_sht = { ATA_BMDMA_SHT(DRV_NAME), }; -static struct ata_port_operations sis_ops = { +static const struct ata_port_operations sis_ops = { .inherits = &ata_bmdma_port_ops, .scr_read = sis_scr_read, .scr_write = sis_scr_write, diff --git a/drivers/ata/sata_svw.c b/drivers/ata/sata_svw.c index 7d9db4a..0954357 100644 --- a/drivers/ata/sata_svw.c +++ b/drivers/ata/sata_svw.c @@ -344,7 +344,7 @@ static struct scsi_host_template k2_sata_sht = { }; -static struct ata_port_operations k2_sata_ops = { +static const struct ata_port_operations k2_sata_ops = { .inherits = &ata_bmdma_port_ops, .sff_tf_load = k2_sata_tf_load, .sff_tf_read = k2_sata_tf_read, diff --git a/drivers/ata/sata_sx4.c b/drivers/ata/sata_sx4.c index bedd518..feb4028 100644 --- a/drivers/ata/sata_sx4.c +++ b/drivers/ata/sata_sx4.c @@ -249,7 +249,7 @@ static struct scsi_host_template pdc_sata_sht = { }; /* TODO: inherit from base port_ops after converting to new EH */ -static struct ata_port_operations pdc_20621_ops = { +static const struct ata_port_operations pdc_20621_ops = { .inherits = &ata_sff_port_ops, .check_atapi_dma = pdc_check_atapi_dma, diff --git a/drivers/ata/sata_uli.c b/drivers/ata/sata_uli.c index b8578c3..41e5c92 100644 --- a/drivers/ata/sata_uli.c +++ b/drivers/ata/sata_uli.c @@ -80,7 +80,7 @@ static struct scsi_host_template uli_sht = { ATA_BMDMA_SHT(DRV_NAME), }; -static struct ata_port_operations uli_ops = { +static const struct ata_port_operations uli_ops = { .inherits = &ata_bmdma_port_ops, .scr_read = uli_scr_read, .scr_write = uli_scr_write, diff --git a/drivers/ata/sata_via.c b/drivers/ata/sata_via.c index c51b8d2..1a305a3 100644 --- a/drivers/ata/sata_via.c +++ b/drivers/ata/sata_via.c @@ -115,32 +115,32 @@ static struct scsi_host_template svia_sht = { ATA_BMDMA_SHT(DRV_NAME), }; -static struct ata_port_operations svia_base_ops = { +static const struct ata_port_operations svia_base_ops = { .inherits = &ata_bmdma_port_ops, .sff_tf_load = svia_tf_load, }; -static struct ata_port_operations vt6420_sata_ops = { +static const struct ata_port_operations vt6420_sata_ops = { .inherits = &svia_base_ops, .freeze = svia_noop_freeze, .prereset = vt6420_prereset, .bmdma_start = vt6420_bmdma_start, }; -static struct ata_port_operations vt6421_pata_ops = { +static const struct ata_port_operations vt6421_pata_ops = { .inherits = &svia_base_ops, .cable_detect = vt6421_pata_cable_detect, .set_piomode = vt6421_set_pio_mode, .set_dmamode = vt6421_set_dma_mode, }; -static struct ata_port_operations vt6421_sata_ops = { +static const struct ata_port_operations vt6421_sata_ops = { .inherits = &svia_base_ops, .scr_read = svia_scr_read, .scr_write = svia_scr_write, }; -static struct ata_port_operations vt8251_ops = { +static const struct ata_port_operations vt8251_ops = { .inherits = &svia_base_ops, .hardreset = sata_std_hardreset, .scr_read = vt8251_scr_read, diff --git a/drivers/ata/sata_vsc.c b/drivers/ata/sata_vsc.c index b777176..f39a85b 100644 --- a/drivers/ata/sata_vsc.c +++ b/drivers/ata/sata_vsc.c @@ -300,7 +300,7 @@ static struct scsi_host_template vsc_sata_sht = { }; -static struct ata_port_operations vsc_sata_ops = { +static const struct ata_port_operations vsc_sata_ops = { .inherits = &ata_bmdma_port_ops, /* The IRQ handling is not quite standard SFF behaviour so we cannot use the default lost interrupt handler */ diff --git a/drivers/atm/adummy.c b/drivers/atm/adummy.c index 6d44f07..46bd3fe 100644 --- a/drivers/atm/adummy.c +++ b/drivers/atm/adummy.c @@ -78,7 +78,7 @@ adummy_send(struct atm_vcc *vcc, struct sk_buff *skb) vcc->pop(vcc, skb); else dev_kfree_skb_any(skb); - atomic_inc(&vcc->stats->tx); + atomic_inc_unchecked(&vcc->stats->tx); return 0; } diff --git a/drivers/atm/ambassador.c b/drivers/atm/ambassador.c index 9d18644..59f2c35 100644 --- a/drivers/atm/ambassador.c +++ b/drivers/atm/ambassador.c @@ -454,7 +454,7 @@ static void tx_complete (amb_dev * dev, tx_out * tx) { PRINTD (DBG_FLOW|DBG_TX, "tx_complete %p %p", dev, tx); // VC layer stats - atomic_inc(&ATM_SKB(skb)->vcc->stats->tx); + atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx); // free the descriptor kfree (tx_descr); @@ -495,7 +495,7 @@ static void rx_complete (amb_dev * dev, rx_out * rx) { dump_skb ("<<<", vc, skb); // VC layer stats - atomic_inc(&atm_vcc->stats->rx); + atomic_inc_unchecked(&atm_vcc->stats->rx); __net_timestamp(skb); // end of our responsability atm_vcc->push (atm_vcc, skb); @@ -510,7 +510,7 @@ static void rx_complete (amb_dev * dev, rx_out * rx) { } else { PRINTK (KERN_INFO, "dropped over-size frame"); // should we count this? - atomic_inc(&atm_vcc->stats->rx_drop); + atomic_inc_unchecked(&atm_vcc->stats->rx_drop); } } else { @@ -1342,7 +1342,7 @@ static int amb_send (struct atm_vcc * atm_vcc, struct sk_buff * skb) { } if (check_area (skb->data, skb->len)) { - atomic_inc(&atm_vcc->stats->tx_err); + atomic_inc_unchecked(&atm_vcc->stats->tx_err); return -ENOMEM; // ? } diff --git a/drivers/atm/atmtcp.c b/drivers/atm/atmtcp.c index b910181..7e24995 100644 --- a/drivers/atm/atmtcp.c +++ b/drivers/atm/atmtcp.c @@ -207,7 +207,7 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb) if (vcc->pop) vcc->pop(vcc,skb); else dev_kfree_skb(skb); if (dev_data) return 0; - atomic_inc(&vcc->stats->tx_err); + atomic_inc_unchecked(&vcc->stats->tx_err); return -ENOLINK; } size = skb->len+sizeof(struct atmtcp_hdr); @@ -215,7 +215,7 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb) if (!new_skb) { if (vcc->pop) vcc->pop(vcc,skb); else dev_kfree_skb(skb); - atomic_inc(&vcc->stats->tx_err); + atomic_inc_unchecked(&vcc->stats->tx_err); return -ENOBUFS; } hdr = (void *) skb_put(new_skb,sizeof(struct atmtcp_hdr)); @@ -226,8 +226,8 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb) if (vcc->pop) vcc->pop(vcc,skb); else dev_kfree_skb(skb); out_vcc->push(out_vcc,new_skb); - atomic_inc(&vcc->stats->tx); - atomic_inc(&out_vcc->stats->rx); + atomic_inc_unchecked(&vcc->stats->tx); + atomic_inc_unchecked(&out_vcc->stats->rx); return 0; } @@ -301,7 +301,7 @@ static int atmtcp_c_send(struct atm_vcc *vcc,struct sk_buff *skb) out_vcc = find_vcc(dev, ntohs(hdr->vpi), ntohs(hdr->vci)); read_unlock(&vcc_sklist_lock); if (!out_vcc) { - atomic_inc(&vcc->stats->tx_err); + atomic_inc_unchecked(&vcc->stats->tx_err); goto done; } skb_pull(skb,sizeof(struct atmtcp_hdr)); @@ -313,8 +313,8 @@ static int atmtcp_c_send(struct atm_vcc *vcc,struct sk_buff *skb) __net_timestamp(new_skb); skb_copy_from_linear_data(skb, skb_put(new_skb, skb->len), skb->len); out_vcc->push(out_vcc,new_skb); - atomic_inc(&vcc->stats->tx); - atomic_inc(&out_vcc->stats->rx); + atomic_inc_unchecked(&vcc->stats->tx); + atomic_inc_unchecked(&out_vcc->stats->rx); done: if (vcc->pop) vcc->pop(vcc,skb); else dev_kfree_skb(skb); diff --git a/drivers/atm/eni.c b/drivers/atm/eni.c index 90a5a7c..e4a0947 100644 --- a/drivers/atm/eni.c +++ b/drivers/atm/eni.c @@ -526,7 +526,7 @@ static int rx_aal0(struct atm_vcc *vcc) DPRINTK(DEV_LABEL "(itf %d): trashing empty cell\n", vcc->dev->number); length = 0; - atomic_inc(&vcc->stats->rx_err); + atomic_inc_unchecked(&vcc->stats->rx_err); } else { length = ATM_CELL_SIZE-1; /* no HEC */ @@ -581,7 +581,7 @@ static int rx_aal5(struct atm_vcc *vcc) size); } eff = length = 0; - atomic_inc(&vcc->stats->rx_err); + atomic_inc_unchecked(&vcc->stats->rx_err); } else { size = (descr & MID_RED_COUNT)*(ATM_CELL_PAYLOAD >> 2); @@ -598,7 +598,7 @@ static int rx_aal5(struct atm_vcc *vcc) "(VCI=%d,length=%ld,size=%ld (descr 0x%lx))\n", vcc->dev->number,vcc->vci,length,size << 2,descr); length = eff = 0; - atomic_inc(&vcc->stats->rx_err); + atomic_inc_unchecked(&vcc->stats->rx_err); } } skb = eff ? atm_alloc_charge(vcc,eff << 2,GFP_ATOMIC) : NULL; @@ -771,7 +771,7 @@ rx_dequeued++; vcc->push(vcc,skb); pushed++; } - atomic_inc(&vcc->stats->rx); + atomic_inc_unchecked(&vcc->stats->rx); } wake_up(&eni_dev->rx_wait); } @@ -1228,7 +1228,7 @@ static void dequeue_tx(struct atm_dev *dev) PCI_DMA_TODEVICE); if (vcc->pop) vcc->pop(vcc,skb); else dev_kfree_skb_irq(skb); - atomic_inc(&vcc->stats->tx); + atomic_inc_unchecked(&vcc->stats->tx); wake_up(&eni_dev->tx_wait); dma_complete++; } diff --git a/drivers/atm/firestream.c b/drivers/atm/firestream.c index 6e600af..9c2485b 100644 --- a/drivers/atm/firestream.c +++ b/drivers/atm/firestream.c @@ -749,7 +749,7 @@ static void process_txdone_queue (struct fs_dev *dev, struct queue *q) } } - atomic_inc(&ATM_SKB(skb)->vcc->stats->tx); + atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx); fs_dprintk (FS_DEBUG_TXMEM, "i"); fs_dprintk (FS_DEBUG_ALLOC, "Free t-skb: %p\n", skb); @@ -816,7 +816,7 @@ static void process_incoming (struct fs_dev *dev, struct queue *q) #endif skb_put (skb, qe->p1 & 0xffff); ATM_SKB(skb)->vcc = atm_vcc; - atomic_inc(&atm_vcc->stats->rx); + atomic_inc_unchecked(&atm_vcc->stats->rx); __net_timestamp(skb); fs_dprintk (FS_DEBUG_ALLOC, "Free rec-skb: %p (pushed)\n", skb); atm_vcc->push (atm_vcc, skb); @@ -837,12 +837,12 @@ static void process_incoming (struct fs_dev *dev, struct queue *q) kfree (pe); } if (atm_vcc) - atomic_inc(&atm_vcc->stats->rx_drop); + atomic_inc_unchecked(&atm_vcc->stats->rx_drop); break; case 0x1f: /* Reassembly abort: no buffers. */ /* Silently increment error counter. */ if (atm_vcc) - atomic_inc(&atm_vcc->stats->rx_drop); + atomic_inc_unchecked(&atm_vcc->stats->rx_drop); break; default: /* Hmm. Haven't written the code to handle the others yet... -- REW */ printk (KERN_WARNING "Don't know what to do with RX status %x: %s.\n", diff --git a/drivers/atm/fore200e.c b/drivers/atm/fore200e.c index da8f176..aa78cc6 100644 --- a/drivers/atm/fore200e.c +++ b/drivers/atm/fore200e.c @@ -933,9 +933,9 @@ fore200e_tx_irq(struct fore200e* fore200e) #endif /* check error condition */ if (*entry->status & STATUS_ERROR) - atomic_inc(&vcc->stats->tx_err); + atomic_inc_unchecked(&vcc->stats->tx_err); else - atomic_inc(&vcc->stats->tx); + atomic_inc_unchecked(&vcc->stats->tx); } } @@ -1084,7 +1084,7 @@ fore200e_push_rpd(struct fore200e* fore200e, struct atm_vcc* vcc, struct rpd* rp if (skb == NULL) { DPRINTK(2, "unable to alloc new skb, rx PDU length = %d\n", pdu_len); - atomic_inc(&vcc->stats->rx_drop); + atomic_inc_unchecked(&vcc->stats->rx_drop); return -ENOMEM; } @@ -1127,14 +1127,14 @@ fore200e_push_rpd(struct fore200e* fore200e, struct atm_vcc* vcc, struct rpd* rp dev_kfree_skb_any(skb); - atomic_inc(&vcc->stats->rx_drop); + atomic_inc_unchecked(&vcc->stats->rx_drop); return -ENOMEM; } ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0); vcc->push(vcc, skb); - atomic_inc(&vcc->stats->rx); + atomic_inc_unchecked(&vcc->stats->rx); ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0); @@ -1212,7 +1212,7 @@ fore200e_rx_irq(struct fore200e* fore200e) DPRINTK(2, "damaged PDU on %d.%d.%d\n", fore200e->atm_dev->number, entry->rpd->atm_header.vpi, entry->rpd->atm_header.vci); - atomic_inc(&vcc->stats->rx_err); + atomic_inc_unchecked(&vcc->stats->rx_err); } } @@ -1657,7 +1657,7 @@ fore200e_send(struct atm_vcc *vcc, struct sk_buff *skb) goto retry_here; } - atomic_inc(&vcc->stats->tx_err); + atomic_inc_unchecked(&vcc->stats->tx_err); fore200e->tx_sat++; DPRINTK(2, "tx queue of device %s is saturated, PDU dropped - heartbeat is %08x\n", diff --git a/drivers/atm/he.c b/drivers/atm/he.c index 56c2e99..73cb44b 100644 --- a/drivers/atm/he.c +++ b/drivers/atm/he.c @@ -1770,7 +1770,7 @@ he_service_rbrq(struct he_dev *he_dev, int group) if (RBRQ_HBUF_ERR(he_dev->rbrq_head)) { hprintk("HBUF_ERR! (cid 0x%x)\n", cid); - atomic_inc(&vcc->stats->rx_drop); + atomic_inc_unchecked(&vcc->stats->rx_drop); goto return_host_buffers; } @@ -1803,7 +1803,7 @@ he_service_rbrq(struct he_dev *he_dev, int group) RBRQ_LEN_ERR(he_dev->rbrq_head) ? "LEN_ERR" : "", vcc->vpi, vcc->vci); - atomic_inc(&vcc->stats->rx_err); + atomic_inc_unchecked(&vcc->stats->rx_err); goto return_host_buffers; } @@ -1862,7 +1862,7 @@ he_service_rbrq(struct he_dev *he_dev, int group) vcc->push(vcc, skb); spin_lock(&he_dev->global_lock); - atomic_inc(&vcc->stats->rx); + atomic_inc_unchecked(&vcc->stats->rx); return_host_buffers: ++pdus_assembled; @@ -2207,7 +2207,7 @@ __enqueue_tpd(struct he_dev *he_dev, struct he_tpd *tpd, unsigned cid) tpd->vcc->pop(tpd->vcc, tpd->skb); else dev_kfree_skb_any(tpd->skb); - atomic_inc(&tpd->vcc->stats->tx_err); + atomic_inc_unchecked(&tpd->vcc->stats->tx_err); } pci_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status)); return; @@ -2619,7 +2619,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb) vcc->pop(vcc, skb); else dev_kfree_skb_any(skb); - atomic_inc(&vcc->stats->tx_err); + atomic_inc_unchecked(&vcc->stats->tx_err); return -EINVAL; } @@ -2630,7 +2630,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb) vcc->pop(vcc, skb); else dev_kfree_skb_any(skb); - atomic_inc(&vcc->stats->tx_err); + atomic_inc_unchecked(&vcc->stats->tx_err); return -EINVAL; } #endif @@ -2642,7 +2642,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb) vcc->pop(vcc, skb); else dev_kfree_skb_any(skb); - atomic_inc(&vcc->stats->tx_err); + atomic_inc_unchecked(&vcc->stats->tx_err); spin_unlock_irqrestore(&he_dev->global_lock, flags); return -ENOMEM; } @@ -2684,7 +2684,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb) vcc->pop(vcc, skb); else dev_kfree_skb_any(skb); - atomic_inc(&vcc->stats->tx_err); + atomic_inc_unchecked(&vcc->stats->tx_err); spin_unlock_irqrestore(&he_dev->global_lock, flags); return -ENOMEM; } @@ -2715,7 +2715,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb) __enqueue_tpd(he_dev, tpd, cid); spin_unlock_irqrestore(&he_dev->global_lock, flags); - atomic_inc(&vcc->stats->tx); + atomic_inc_unchecked(&vcc->stats->tx); return 0; } diff --git a/drivers/atm/horizon.c b/drivers/atm/horizon.c index 54720ba..a1cd73f 100644 --- a/drivers/atm/horizon.c +++ b/drivers/atm/horizon.c @@ -1034,7 +1034,7 @@ static void rx_schedule (hrz_dev * dev, int irq) { { struct atm_vcc * vcc = ATM_SKB(skb)->vcc; // VC layer stats - atomic_inc(&vcc->stats->rx); + atomic_inc_unchecked(&vcc->stats->rx); __net_timestamp(skb); // end of our responsability vcc->push (vcc, skb); @@ -1186,7 +1186,7 @@ static void tx_schedule (hrz_dev * const dev, int irq) { dev->tx_iovec = NULL; // VC layer stats - atomic_inc(&ATM_SKB(skb)->vcc->stats->tx); + atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx); // free the skb hrz_kfree_skb (skb); diff --git a/drivers/atm/idt77252.c b/drivers/atm/idt77252.c index 98657a6..50d4a30 100644 --- a/drivers/atm/idt77252.c +++ b/drivers/atm/idt77252.c @@ -811,7 +811,7 @@ drain_scq(struct idt77252_dev *card, struct vc_map *vc) else dev_kfree_skb(skb); - atomic_inc(&vcc->stats->tx); + atomic_inc_unchecked(&vcc->stats->tx); } atomic_dec(&scq->used); @@ -1074,13 +1074,13 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe) if ((sb = dev_alloc_skb(64)) == NULL) { printk("%s: Can't allocate buffers for aal0.\n", card->name); - atomic_add(i, &vcc->stats->rx_drop); + atomic_add_unchecked(i, &vcc->stats->rx_drop); break; } if (!atm_charge(vcc, sb->truesize)) { RXPRINTK("%s: atm_charge() dropped aal0 packets.\n", card->name); - atomic_add(i - 1, &vcc->stats->rx_drop); + atomic_add_unchecked(i - 1, &vcc->stats->rx_drop); dev_kfree_skb(sb); break; } @@ -1097,7 +1097,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe) ATM_SKB(sb)->vcc = vcc; __net_timestamp(sb); vcc->push(vcc, sb); - atomic_inc(&vcc->stats->rx); + atomic_inc_unchecked(&vcc->stats->rx); cell += ATM_CELL_PAYLOAD; } @@ -1134,13 +1134,13 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe) "(CDC: %08x)\n", card->name, len, rpp->len, readl(SAR_REG_CDC)); recycle_rx_pool_skb(card, rpp); - atomic_inc(&vcc->stats->rx_err); + atomic_inc_unchecked(&vcc->stats->rx_err); return; } if (stat & SAR_RSQE_CRC) { RXPRINTK("%s: AAL5 CRC error.\n", card->name); recycle_rx_pool_skb(card, rpp); - atomic_inc(&vcc->stats->rx_err); + atomic_inc_unchecked(&vcc->stats->rx_err); return; } if (skb_queue_len(&rpp->queue) > 1) { @@ -1151,7 +1151,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe) RXPRINTK("%s: Can't alloc RX skb.\n", card->name); recycle_rx_pool_skb(card, rpp); - atomic_inc(&vcc->stats->rx_err); + atomic_inc_unchecked(&vcc->stats->rx_err); return; } if (!atm_charge(vcc, skb->truesize)) { @@ -1170,7 +1170,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe) __net_timestamp(skb); vcc->push(vcc, skb); - atomic_inc(&vcc->stats->rx); + atomic_inc_unchecked(&vcc->stats->rx); return; } @@ -1192,7 +1192,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe) __net_timestamp(skb); vcc->push(vcc, skb); - atomic_inc(&vcc->stats->rx); + atomic_inc_unchecked(&vcc->stats->rx); if (skb->truesize > SAR_FB_SIZE_3) add_rx_skb(card, 3, SAR_FB_SIZE_3, 1); @@ -1304,14 +1304,14 @@ idt77252_rx_raw(struct idt77252_dev *card) if (vcc->qos.aal != ATM_AAL0) { RPRINTK("%s: raw cell for non AAL0 vc %u.%u\n", card->name, vpi, vci); - atomic_inc(&vcc->stats->rx_drop); + atomic_inc_unchecked(&vcc->stats->rx_drop); goto drop; } if ((sb = dev_alloc_skb(64)) == NULL) { printk("%s: Can't allocate buffers for AAL0.\n", card->name); - atomic_inc(&vcc->stats->rx_err); + atomic_inc_unchecked(&vcc->stats->rx_err); goto drop; } @@ -1330,7 +1330,7 @@ idt77252_rx_raw(struct idt77252_dev *card) ATM_SKB(sb)->vcc = vcc; __net_timestamp(sb); vcc->push(vcc, sb); - atomic_inc(&vcc->stats->rx); + atomic_inc_unchecked(&vcc->stats->rx); drop: skb_pull(queue, 64); @@ -1955,13 +1955,13 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam) if (vc == NULL) { printk("%s: NULL connection in send().\n", card->name); - atomic_inc(&vcc->stats->tx_err); + atomic_inc_unchecked(&vcc->stats->tx_err); dev_kfree_skb(skb); return -EINVAL; } if (!test_bit(VCF_TX, &vc->flags)) { printk("%s: Trying to transmit on a non-tx VC.\n", card->name); - atomic_inc(&vcc->stats->tx_err); + atomic_inc_unchecked(&vcc->stats->tx_err); dev_kfree_skb(skb); return -EINVAL; } @@ -1973,14 +1973,14 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam) break; default: printk("%s: Unsupported AAL: %d\n", card->name, vcc->qos.aal); - atomic_inc(&vcc->stats->tx_err); + atomic_inc_unchecked(&vcc->stats->tx_err); dev_kfree_skb(skb); return -EINVAL; } if (skb_shinfo(skb)->nr_frags != 0) { printk("%s: No scatter-gather yet.\n", card->name); - atomic_inc(&vcc->stats->tx_err); + atomic_inc_unchecked(&vcc->stats->tx_err); dev_kfree_skb(skb); return -EINVAL; } @@ -1988,7 +1988,7 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam) err = queue_skb(card, vc, skb, oam); if (err) { - atomic_inc(&vcc->stats->tx_err); + atomic_inc_unchecked(&vcc->stats->tx_err); dev_kfree_skb(skb); return err; } @@ -2011,7 +2011,7 @@ idt77252_send_oam(struct atm_vcc *vcc, void *cell, int flags) skb = dev_alloc_skb(64); if (!skb) { printk("%s: Out of memory in send_oam().\n", card->name); - atomic_inc(&vcc->stats->tx_err); + atomic_inc_unchecked(&vcc->stats->tx_err); return -ENOMEM; } atomic_add(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc); diff --git a/drivers/atm/iphase.c b/drivers/atm/iphase.c index ee9ddeb..357368e 100644 --- a/drivers/atm/iphase.c +++ b/drivers/atm/iphase.c @@ -1124,7 +1124,7 @@ static int rx_pkt(struct atm_dev *dev) status = (u_short) (buf_desc_ptr->desc_mode); if (status & (RX_CER | RX_PTE | RX_OFL)) { - atomic_inc(&vcc->stats->rx_err); + atomic_inc_unchecked(&vcc->stats->rx_err); IF_ERR(printk("IA: bad packet, dropping it");) if (status & RX_CER) { IF_ERR(printk(" cause: packet CRC error\n");) @@ -1147,7 +1147,7 @@ static int rx_pkt(struct atm_dev *dev) len = dma_addr - buf_addr; if (len > iadev->rx_buf_sz) { printk("Over %d bytes sdu received, dropped!!!\n", iadev->rx_buf_sz); - atomic_inc(&vcc->stats->rx_err); + atomic_inc_unchecked(&vcc->stats->rx_err); goto out_free_desc; } @@ -1297,7 +1297,7 @@ static void rx_dle_intr(struct atm_dev *dev) ia_vcc = INPH_IA_VCC(vcc); if (ia_vcc == NULL) { - atomic_inc(&vcc->stats->rx_err); + atomic_inc_unchecked(&vcc->stats->rx_err); dev_kfree_skb_any(skb); atm_return(vcc, atm_guess_pdu2truesize(len)); goto INCR_DLE; @@ -1309,7 +1309,7 @@ static void rx_dle_intr(struct atm_dev *dev) if ((length > iadev->rx_buf_sz) || (length > (skb->len - sizeof(struct cpcs_trailer)))) { - atomic_inc(&vcc->stats->rx_err); + atomic_inc_unchecked(&vcc->stats->rx_err); IF_ERR(printk("rx_dle_intr: Bad AAL5 trailer %d (skb len %d)", length, skb->len);) dev_kfree_skb_any(skb); @@ -1325,7 +1325,7 @@ static void rx_dle_intr(struct atm_dev *dev) IF_RX(printk("rx_dle_intr: skb push");) vcc->push(vcc,skb); - atomic_inc(&vcc->stats->rx); + atomic_inc_unchecked(&vcc->stats->rx); iadev->rx_pkt_cnt++; } INCR_DLE: @@ -2807,15 +2807,15 @@ static int ia_ioctl(struct atm_dev *dev, unsigned int cmd, void __user *arg) { struct k_sonet_stats *stats; stats = &PRIV(_ia_dev[board])->sonet_stats; - printk("section_bip: %d\n", atomic_read(&stats->section_bip)); - printk("line_bip : %d\n", atomic_read(&stats->line_bip)); - printk("path_bip : %d\n", atomic_read(&stats->path_bip)); - printk("line_febe : %d\n", atomic_read(&stats->line_febe)); - printk("path_febe : %d\n", atomic_read(&stats->path_febe)); - printk("corr_hcs : %d\n", atomic_read(&stats->corr_hcs)); - printk("uncorr_hcs : %d\n", atomic_read(&stats->uncorr_hcs)); - printk("tx_cells : %d\n", atomic_read(&stats->tx_cells)); - printk("rx_cells : %d\n", atomic_read(&stats->rx_cells)); + printk("section_bip: %d\n", atomic_read_unchecked(&stats->section_bip)); + printk("line_bip : %d\n", atomic_read_unchecked(&stats->line_bip)); + printk("path_bip : %d\n", atomic_read_unchecked(&stats->path_bip)); + printk("line_febe : %d\n", atomic_read_unchecked(&stats->line_febe)); + printk("path_febe : %d\n", atomic_read_unchecked(&stats->path_febe)); + printk("corr_hcs : %d\n", atomic_read_unchecked(&stats->corr_hcs)); + printk("uncorr_hcs : %d\n", atomic_read_unchecked(&stats->uncorr_hcs)); + printk("tx_cells : %d\n", atomic_read_unchecked(&stats->tx_cells)); + printk("rx_cells : %d\n", atomic_read_unchecked(&stats->rx_cells)); } ia_cmds.status = 0; break; @@ -2920,7 +2920,7 @@ static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb) { if ((desc == 0) || (desc > iadev->num_tx_desc)) { IF_ERR(printk(DEV_LABEL "invalid desc for send: %d\n", desc);) - atomic_inc(&vcc->stats->tx); + atomic_inc_unchecked(&vcc->stats->tx); if (vcc->pop) vcc->pop(vcc, skb); else @@ -3025,14 +3025,14 @@ static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb) { ATM_DESC(skb) = vcc->vci; skb_queue_tail(&iadev->tx_dma_q, skb); - atomic_inc(&vcc->stats->tx); + atomic_inc_unchecked(&vcc->stats->tx); iadev->tx_pkt_cnt++; /* Increment transaction counter */ writel(2, iadev->dma+IPHASE5575_TX_COUNTER); #if 0 /* add flow control logic */ - if (atomic_read(&vcc->stats->tx) % 20 == 0) { + if (atomic_read_unchecked(&vcc->stats->tx) % 20 == 0) { if (iavcc->vc_desc_cnt > 10) { vcc->tx_quota = vcc->tx_quota * 3 / 4; printk("Tx1: vcc->tx_quota = %d \n", (u32)vcc->tx_quota ); diff --git a/drivers/atm/lanai.c b/drivers/atm/lanai.c index cbe15a8..9b6f726 100644 --- a/drivers/atm/lanai.c +++ b/drivers/atm/lanai.c @@ -1303,7 +1303,7 @@ static void lanai_send_one_aal5(struct lanai_dev *lanai, vcc_tx_add_aal5_trailer(lvcc, skb->len, 0, 0); lanai_endtx(lanai, lvcc); lanai_free_skb(lvcc->tx.atmvcc, skb); - atomic_inc(&lvcc->tx.atmvcc->stats->tx); + atomic_inc_unchecked(&lvcc->tx.atmvcc->stats->tx); } /* Try to fill the buffer - don't call unless there is backlog */ @@ -1426,7 +1426,7 @@ static void vcc_rx_aal5(struct lanai_vcc *lvcc, int endptr) ATM_SKB(skb)->vcc = lvcc->rx.atmvcc; __net_timestamp(skb); lvcc->rx.atmvcc->push(lvcc->rx.atmvcc, skb); - atomic_inc(&lvcc->rx.atmvcc->stats->rx); + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx); out: lvcc->rx.buf.ptr = end; cardvcc_write(lvcc, endptr, vcc_rxreadptr); @@ -1668,7 +1668,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s) DPRINTK("(itf %d) got RX service entry 0x%X for non-AAL5 " "vcc %d\n", lanai->number, (unsigned int) s, vci); lanai->stats.service_rxnotaal5++; - atomic_inc(&lvcc->rx.atmvcc->stats->rx_err); + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err); return 0; } if (likely(!(s & (SERVICE_TRASH | SERVICE_STREAM | SERVICE_CRCERR)))) { @@ -1680,7 +1680,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s) int bytes; read_unlock(&vcc_sklist_lock); DPRINTK("got trashed rx pdu on vci %d\n", vci); - atomic_inc(&lvcc->rx.atmvcc->stats->rx_err); + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err); lvcc->stats.x.aal5.service_trash++; bytes = (SERVICE_GET_END(s) * 16) - (((unsigned long) lvcc->rx.buf.ptr) - @@ -1692,7 +1692,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s) } if (s & SERVICE_STREAM) { read_unlock(&vcc_sklist_lock); - atomic_inc(&lvcc->rx.atmvcc->stats->rx_err); + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err); lvcc->stats.x.aal5.service_stream++; printk(KERN_ERR DEV_LABEL "(itf %d): Got AAL5 stream " "PDU on VCI %d!\n", lanai->number, vci); @@ -1700,7 +1700,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s) return 0; } DPRINTK("got rx crc error on vci %d\n", vci); - atomic_inc(&lvcc->rx.atmvcc->stats->rx_err); + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err); lvcc->stats.x.aal5.service_rxcrc++; lvcc->rx.buf.ptr = &lvcc->rx.buf.start[SERVICE_GET_END(s) * 4]; cardvcc_write(lvcc, SERVICE_GET_END(s), vcc_rxreadptr); diff --git a/drivers/atm/nicstar.c b/drivers/atm/nicstar.c index b7473a6..3736ea4 100644 --- a/drivers/atm/nicstar.c +++ b/drivers/atm/nicstar.c @@ -1722,7 +1722,7 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb) if ((vc = (vc_map *) vcc->dev_data) == NULL) { printk("nicstar%d: vcc->dev_data == NULL on ns_send().\n", card->index); - atomic_inc(&vcc->stats->tx_err); + atomic_inc_unchecked(&vcc->stats->tx_err); dev_kfree_skb_any(skb); return -EINVAL; } @@ -1730,7 +1730,7 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb) if (!vc->tx) { printk("nicstar%d: Trying to transmit on a non-tx VC.\n", card->index); - atomic_inc(&vcc->stats->tx_err); + atomic_inc_unchecked(&vcc->stats->tx_err); dev_kfree_skb_any(skb); return -EINVAL; } @@ -1738,7 +1738,7 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb) if (vcc->qos.aal != ATM_AAL5 && vcc->qos.aal != ATM_AAL0) { printk("nicstar%d: Only AAL0 and AAL5 are supported.\n", card->index); - atomic_inc(&vcc->stats->tx_err); + atomic_inc_unchecked(&vcc->stats->tx_err); dev_kfree_skb_any(skb); return -EINVAL; } @@ -1746,7 +1746,7 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb) if (skb_shinfo(skb)->nr_frags != 0) { printk("nicstar%d: No scatter-gather yet.\n", card->index); - atomic_inc(&vcc->stats->tx_err); + atomic_inc_unchecked(&vcc->stats->tx_err); dev_kfree_skb_any(skb); return -EINVAL; } @@ -1791,11 +1791,11 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb) if (push_scqe(card, vc, scq, &scqe, skb) != 0) { - atomic_inc(&vcc->stats->tx_err); + atomic_inc_unchecked(&vcc->stats->tx_err); dev_kfree_skb_any(skb); return -EIO; } - atomic_inc(&vcc->stats->tx); + atomic_inc_unchecked(&vcc->stats->tx); return 0; } @@ -2110,14 +2110,14 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe) { printk("nicstar%d: Can't allocate buffers for aal0.\n", card->index); - atomic_add(i,&vcc->stats->rx_drop); + atomic_add_unchecked(i,&vcc->stats->rx_drop); break; } if (!atm_charge(vcc, sb->truesize)) { RXPRINTK("nicstar%d: atm_charge() dropped aal0 packets.\n", card->index); - atomic_add(i-1,&vcc->stats->rx_drop); /* already increased by 1 */ + atomic_add_unchecked(i-1,&vcc->stats->rx_drop); /* already increased by 1 */ dev_kfree_skb_any(sb); break; } @@ -2132,7 +2132,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe) ATM_SKB(sb)->vcc = vcc; __net_timestamp(sb); vcc->push(vcc, sb); - atomic_inc(&vcc->stats->rx); + atomic_inc_unchecked(&vcc->stats->rx); cell += ATM_CELL_PAYLOAD; } @@ -2151,7 +2151,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe) if (iovb == NULL) { printk("nicstar%d: Out of iovec buffers.\n", card->index); - atomic_inc(&vcc->stats->rx_drop); + atomic_inc_unchecked(&vcc->stats->rx_drop); recycle_rx_buf(card, skb); return; } @@ -2181,7 +2181,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe) else if (NS_SKB(iovb)->iovcnt >= NS_MAX_IOVECS) { printk("nicstar%d: received too big AAL5 SDU.\n", card->index); - atomic_inc(&vcc->stats->rx_err); + atomic_inc_unchecked(&vcc->stats->rx_err); recycle_iovec_rx_bufs(card, (struct iovec *) iovb->data, NS_MAX_IOVECS); NS_SKB(iovb)->iovcnt = 0; iovb->len = 0; @@ -2201,7 +2201,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe) printk("nicstar%d: Expected a small buffer, and this is not one.\n", card->index); which_list(card, skb); - atomic_inc(&vcc->stats->rx_err); + atomic_inc_unchecked(&vcc->stats->rx_err); recycle_rx_buf(card, skb); vc->rx_iov = NULL; recycle_iov_buf(card, iovb); @@ -2215,7 +2215,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe) printk("nicstar%d: Expected a large buffer, and this is not one.\n", card->index); which_list(card, skb); - atomic_inc(&vcc->stats->rx_err); + atomic_inc_unchecked(&vcc->stats->rx_err); recycle_iovec_rx_bufs(card, (struct iovec *) iovb->data, NS_SKB(iovb)->iovcnt); vc->rx_iov = NULL; @@ -2239,7 +2239,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe) printk(" - PDU size mismatch.\n"); else printk(".\n"); - atomic_inc(&vcc->stats->rx_err); + atomic_inc_unchecked(&vcc->stats->rx_err); recycle_iovec_rx_bufs(card, (struct iovec *) iovb->data, NS_SKB(iovb)->iovcnt); vc->rx_iov = NULL; @@ -2255,7 +2255,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe) if (!atm_charge(vcc, skb->truesize)) { push_rxbufs(card, skb); - atomic_inc(&vcc->stats->rx_drop); + atomic_inc_unchecked(&vcc->stats->rx_drop); } else { @@ -2267,7 +2267,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe) ATM_SKB(skb)->vcc = vcc; __net_timestamp(skb); vcc->push(vcc, skb); - atomic_inc(&vcc->stats->rx); + atomic_inc_unchecked(&vcc->stats->rx); } } else if (NS_SKB(iovb)->iovcnt == 2) /* One small plus one large buffer */ @@ -2282,7 +2282,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe) if (!atm_charge(vcc, sb->truesize)) { push_rxbufs(card, sb); - atomic_inc(&vcc->stats->rx_drop); + atomic_inc_unchecked(&vcc->stats->rx_drop); } else { @@ -2294,7 +2294,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe) ATM_SKB(sb)->vcc = vcc; __net_timestamp(sb); vcc->push(vcc, sb); - atomic_inc(&vcc->stats->rx); + atomic_inc_unchecked(&vcc->stats->rx); } push_rxbufs(card, skb); @@ -2305,7 +2305,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe) if (!atm_charge(vcc, skb->truesize)) { push_rxbufs(card, skb); - atomic_inc(&vcc->stats->rx_drop); + atomic_inc_unchecked(&vcc->stats->rx_drop); } else { @@ -2319,7 +2319,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe) ATM_SKB(skb)->vcc = vcc; __net_timestamp(skb); vcc->push(vcc, skb); - atomic_inc(&vcc->stats->rx); + atomic_inc_unchecked(&vcc->stats->rx); } push_rxbufs(card, sb); @@ -2341,7 +2341,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe) if (hb == NULL) { printk("nicstar%d: Out of huge buffers.\n", card->index); - atomic_inc(&vcc->stats->rx_drop); + atomic_inc_unchecked(&vcc->stats->rx_drop); recycle_iovec_rx_bufs(card, (struct iovec *) iovb->data, NS_SKB(iovb)->iovcnt); vc->rx_iov = NULL; @@ -2392,7 +2392,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe) } else dev_kfree_skb_any(hb); - atomic_inc(&vcc->stats->rx_drop); + atomic_inc_unchecked(&vcc->stats->rx_drop); } else { @@ -2426,7 +2426,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe) #endif /* NS_USE_DESTRUCTORS */ __net_timestamp(hb); vcc->push(vcc, hb); - atomic_inc(&vcc->stats->rx); + atomic_inc_unchecked(&vcc->stats->rx); } } diff --git a/drivers/atm/solos-pci.c b/drivers/atm/solos-pci.c index 3613422..99624fe 100644 --- a/drivers/atm/solos-pci.c +++ b/drivers/atm/solos-pci.c @@ -715,7 +715,7 @@ void solos_bh(unsigned long card_arg) } atm_charge(vcc, skb->truesize); vcc->push(vcc, skb); - atomic_inc(&vcc->stats->rx); + atomic_inc_unchecked(&vcc->stats->rx); break; case PKT_STATUS: @@ -1023,7 +1023,7 @@ static uint32_t fpga_tx(struct solos_card *card) vcc = SKB_CB(oldskb)->vcc; if (vcc) { - atomic_inc(&vcc->stats->tx); + atomic_inc_unchecked(&vcc->stats->tx); solos_pop(vcc, oldskb); } else dev_kfree_skb_irq(oldskb); diff --git a/drivers/atm/suni.c b/drivers/atm/suni.c index da4b91f..79150c9 100644 --- a/drivers/atm/suni.c +++ b/drivers/atm/suni.c @@ -50,8 +50,8 @@ static DEFINE_SPINLOCK(sunis_lock); #define ADD_LIMITED(s,v) \ - atomic_add((v),&stats->s); \ - if (atomic_read(&stats->s) < 0) atomic_set(&stats->s,INT_MAX); + atomic_add_unchecked((v),&stats->s); \ + if (atomic_read_unchecked(&stats->s) < 0) atomic_set_unchecked(&stats->s,INT_MAX); static void suni_hz(unsigned long from_timer) diff --git a/drivers/atm/uPD98402.c b/drivers/atm/uPD98402.c index c45ae05..ed6f0c4 100644 --- a/drivers/atm/uPD98402.c +++ b/drivers/atm/uPD98402.c @@ -42,7 +42,7 @@ static int fetch_stats(struct atm_dev *dev,struct sonet_stats __user *arg,int ze struct sonet_stats tmp; int error = 0; - atomic_add(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs); + atomic_add_unchecked(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs); sonet_copy_stats(&PRIV(dev)->sonet_stats,&tmp); if (arg) error = copy_to_user(arg,&tmp,sizeof(tmp)); if (zero && !error) { @@ -161,9 +161,9 @@ static int uPD98402_ioctl(struct atm_dev *dev,unsigned int cmd,void __user *arg) #define ADD_LIMITED(s,v) \ - { atomic_add(GET(v),&PRIV(dev)->sonet_stats.s); \ - if (atomic_read(&PRIV(dev)->sonet_stats.s) < 0) \ - atomic_set(&PRIV(dev)->sonet_stats.s,INT_MAX); } + { atomic_add_unchecked(GET(v),&PRIV(dev)->sonet_stats.s); \ + if (atomic_read_unchecked(&PRIV(dev)->sonet_stats.s) < 0) \ + atomic_set_unchecked(&PRIV(dev)->sonet_stats.s,INT_MAX); } static void stat_event(struct atm_dev *dev) @@ -194,7 +194,7 @@ static void uPD98402_int(struct atm_dev *dev) if (reason & uPD98402_INT_PFM) stat_event(dev); if (reason & uPD98402_INT_PCO) { (void) GET(PCOCR); /* clear interrupt cause */ - atomic_add(GET(HECCT), + atomic_add_unchecked(GET(HECCT), &PRIV(dev)->sonet_stats.uncorr_hcs); } if ((reason & uPD98402_INT_RFO) && @@ -222,9 +222,9 @@ static int uPD98402_start(struct atm_dev *dev) PUT(~(uPD98402_INT_PFM | uPD98402_INT_ALM | uPD98402_INT_RFO | uPD98402_INT_LOS),PIMR); /* enable them */ (void) fetch_stats(dev,NULL,1); /* clear kernel counters */ - atomic_set(&PRIV(dev)->sonet_stats.corr_hcs,-1); - atomic_set(&PRIV(dev)->sonet_stats.tx_cells,-1); - atomic_set(&PRIV(dev)->sonet_stats.rx_cells,-1); + atomic_set_unchecked(&PRIV(dev)->sonet_stats.corr_hcs,-1); + atomic_set_unchecked(&PRIV(dev)->sonet_stats.tx_cells,-1); + atomic_set_unchecked(&PRIV(dev)->sonet_stats.rx_cells,-1); return 0; } diff --git a/drivers/atm/zatm.c b/drivers/atm/zatm.c index 702acce..c13b134 100644 --- a/drivers/atm/zatm.c +++ b/drivers/atm/zatm.c @@ -459,7 +459,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy[0],dummy[1]); } if (!size) { dev_kfree_skb_irq(skb); - if (vcc) atomic_inc(&vcc->stats->rx_err); + if (vcc) atomic_inc_unchecked(&vcc->stats->rx_err); continue; } if (!atm_charge(vcc,skb->truesize)) { @@ -469,7 +469,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy[0],dummy[1]); skb->len = size; ATM_SKB(skb)->vcc = vcc; vcc->push(vcc,skb); - atomic_inc(&vcc->stats->rx); + atomic_inc_unchecked(&vcc->stats->rx); } zout(pos & 0xffff,MTA(mbx)); #if 0 /* probably a stupid idea */ @@ -733,7 +733,7 @@ if (*ZATM_PRV_DSC(skb) != (uPD98401_TXPD_V | uPD98401_TXPD_DP | skb_queue_head(&zatm_vcc->backlog,skb); break; } - atomic_inc(&vcc->stats->tx); + atomic_inc_unchecked(&vcc->stats->tx); wake_up(&zatm_vcc->tx_wait); } diff --git a/drivers/char/agp/frontend.c b/drivers/char/agp/frontend.c index 43412c0..3db9a62 100644 --- a/drivers/char/agp/frontend.c +++ b/drivers/char/agp/frontend.c @@ -818,7 +818,7 @@ static int agpioc_reserve_wrap(struct agp_file_private *priv, void __user *arg) if (copy_from_user(&reserve, arg, sizeof(struct agp_region))) return -EFAULT; - if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment)) + if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment_priv)) return -EFAULT; client = agp_find_client_by_pid(reserve.pid); diff --git a/drivers/char/agp/intel-agp.c b/drivers/char/agp/intel-agp.c index e38fe2be..64355e5 100644 --- a/drivers/char/agp/intel-agp.c +++ b/drivers/char/agp/intel-agp.c @@ -1056,7 +1056,7 @@ static struct pci_device_id agp_intel_pci_table[] = { ID(PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB), ID(PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB), ID(PCI_DEVICE_ID_INTEL_SANDYBRIDGE_S_HB), - { } + { 0, 0, 0, 0, 0, 0, 0 } }; MODULE_DEVICE_TABLE(pci, agp_intel_pci_table); diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c index f8e7d89..5b17903 100644 --- a/drivers/char/hpet.c +++ b/drivers/char/hpet.c @@ -429,7 +429,7 @@ static int hpet_release(struct inode *inode, struct file *file) return 0; } -static int hpet_ioctl_common(struct hpet_dev *, int, unsigned long, int); +static int hpet_ioctl_common(struct hpet_dev *, unsigned int, unsigned long, int); static long hpet_ioctl(struct file *file, unsigned int cmd, unsigned long arg) @@ -568,7 +568,7 @@ static inline unsigned long hpet_time_div(struct hpets *hpets, } static int -hpet_ioctl_common(struct hpet_dev *devp, int cmd, unsigned long arg, int kernel) +hpet_ioctl_common(struct hpet_dev *devp, unsigned int cmd, unsigned long arg, int kernel) { struct hpet_timer __iomem *timer; struct hpet __iomem *hpet; @@ -611,11 +611,11 @@ hpet_ioctl_common(struct hpet_dev *devp, int cmd, unsigned long arg, int kernel) { struct hpet_info info; + memset(&info, 0, sizeof(info)); + if (devp->hd_ireqfreq) info.hi_ireqfreq = hpet_time_div(hpetp, devp->hd_ireqfreq); - else - info.hi_ireqfreq = 0; info.hi_flags = readq(&timer->hpet_config) & Tn_PER_INT_CAP_MASK; info.hi_hpet = hpetp->hp_which; @@ -1015,7 +1015,7 @@ static struct acpi_driver hpet_acpi_driver = { }, }; -static struct miscdevice hpet_misc = { HPET_MINOR, "hpet", &hpet_fops }; +static struct miscdevice hpet_misc = { HPET_MINOR, "hpet", &hpet_fops, {NULL, NULL}, NULL, NULL }; static int __init hpet_init(void) { diff --git a/drivers/char/hvc_console.h b/drivers/char/hvc_console.h index 54381eba..41e362b 100644 --- a/drivers/char/hvc_console.h +++ b/drivers/char/hvc_console.h @@ -82,6 +82,7 @@ extern int hvc_instantiate(uint32_t vtermno, int index, /* register a vterm for hvc tty operation (module_init or hotplug add) */ extern struct hvc_struct * hvc_alloc(uint32_t vtermno, int data, const struct hv_ops *ops, int outbuf_size); + /* remove a vterm from hvc tty operation (module_exit or hotplug remove) */ extern int hvc_remove(struct hvc_struct *hp); diff --git a/drivers/char/hvcs.c b/drivers/char/hvcs.c index bedc6c1..2269494 100644 --- a/drivers/char/hvcs.c +++ b/drivers/char/hvcs.c @@ -270,7 +270,7 @@ struct hvcs_struct { unsigned int index; struct tty_struct *tty; - int open_count; + atomic_t open_count; /* * Used to tell the driver kernel_thread what operations need to take @@ -420,7 +420,7 @@ static ssize_t hvcs_vterm_state_store(struct device *dev, struct device_attribut spin_lock_irqsave(&hvcsd->lock, flags); - if (hvcsd->open_count > 0) { + if (atomic_read(&hvcsd->open_count) > 0) { spin_unlock_irqrestore(&hvcsd->lock, flags); printk(KERN_INFO "HVCS: vterm state unchanged. " "The hvcs device node is still in use.\n"); @@ -1136,7 +1136,7 @@ static int hvcs_open(struct tty_struct *tty, struct file *filp) if ((retval = hvcs_partner_connect(hvcsd))) goto error_release; - hvcsd->open_count = 1; + atomic_set(&hvcsd->open_count, 1); hvcsd->tty = tty; tty->driver_data = hvcsd; @@ -1170,7 +1170,7 @@ fast_open: spin_lock_irqsave(&hvcsd->lock, flags); kref_get(&hvcsd->kref); - hvcsd->open_count++; + atomic_inc(&hvcsd->open_count); hvcsd->todo_mask |= HVCS_SCHED_READ; spin_unlock_irqrestore(&hvcsd->lock, flags); @@ -1214,7 +1214,7 @@ static void hvcs_close(struct tty_struct *tty, struct file *filp) hvcsd = tty->driver_data; spin_lock_irqsave(&hvcsd->lock, flags); - if (--hvcsd->open_count == 0) { + if (atomic_dec_and_test(&hvcsd->open_count)) { vio_disable_interrupts(hvcsd->vdev); @@ -1240,10 +1240,10 @@ static void hvcs_close(struct tty_struct *tty, struct file *filp) free_irq(irq, hvcsd); kref_put(&hvcsd->kref, destroy_hvcs_struct); return; - } else if (hvcsd->open_count < 0) { + } else if (atomic_read(&hvcsd->open_count) < 0) { printk(KERN_ERR "HVCS: vty-server@%X open_count: %d" " is missmanaged.\n", - hvcsd->vdev->unit_address, hvcsd->open_count); + hvcsd->vdev->unit_address, atomic_read(&hvcsd->open_count)); } spin_unlock_irqrestore(&hvcsd->lock, flags); @@ -1259,7 +1259,7 @@ static void hvcs_hangup(struct tty_struct * tty) spin_lock_irqsave(&hvcsd->lock, flags); /* Preserve this so that we know how many kref refs to put */ - temp_open_count = hvcsd->open_count; + temp_open_count = atomic_read(&hvcsd->open_count); /* * Don't kref put inside the spinlock because the destruction @@ -1274,7 +1274,7 @@ static void hvcs_hangup(struct tty_struct * tty) hvcsd->tty->driver_data = NULL; hvcsd->tty = NULL; - hvcsd->open_count = 0; + atomic_set(&hvcsd->open_count, 0); /* This will drop any buffered data on the floor which is OK in a hangup * scenario. */ @@ -1345,7 +1345,7 @@ static int hvcs_write(struct tty_struct *tty, * the middle of a write operation? This is a crummy place to do this * but we want to keep it all in the spinlock. */ - if (hvcsd->open_count <= 0) { + if (atomic_read(&hvcsd->open_count) <= 0) { spin_unlock_irqrestore(&hvcsd->lock, flags); return -ENODEV; } @@ -1419,7 +1419,7 @@ static int hvcs_write_room(struct tty_struct *tty) { struct hvcs_struct *hvcsd = tty->driver_data; - if (!hvcsd || hvcsd->open_count <= 0) + if (!hvcsd || atomic_read(&hvcsd->open_count) <= 0) return 0; return HVCS_BUFF_LEN - hvcsd->chars_in_buffer; diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c index 4f3f8c9..3708979 100644 --- a/drivers/char/ipmi/ipmi_msghandler.c +++ b/drivers/char/ipmi/ipmi_msghandler.c @@ -414,7 +414,7 @@ struct ipmi_smi { struct proc_dir_entry *proc_dir; char proc_dir_name[10]; - atomic_t stats[IPMI_NUM_STATS]; + atomic_unchecked_t stats[IPMI_NUM_STATS]; /* * run_to_completion duplicate of smb_info, smi_info @@ -447,9 +447,9 @@ static DEFINE_MUTEX(smi_watchers_mutex); #define ipmi_inc_stat(intf, stat) \ - atomic_inc(&(intf)->stats[IPMI_STAT_ ## stat]) + atomic_inc_unchecked(&(intf)->stats[IPMI_STAT_ ## stat]) #define ipmi_get_stat(intf, stat) \ - ((unsigned int) atomic_read(&(intf)->stats[IPMI_STAT_ ## stat])) + ((unsigned int) atomic_read_unchecked(&(intf)->stats[IPMI_STAT_ ## stat])) static int is_lan_addr(struct ipmi_addr *addr) { @@ -2817,7 +2817,7 @@ int ipmi_register_smi(struct ipmi_smi_handlers *handlers, INIT_LIST_HEAD(&intf->cmd_rcvrs); init_waitqueue_head(&intf->waitq); for (i = 0; i < IPMI_NUM_STATS; i++) - atomic_set(&intf->stats[i], 0); + atomic_set_unchecked(&intf->stats[i], 0); intf->proc_dir = NULL; diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c index b1a3014..50af3c6 100644 --- a/drivers/char/ipmi/ipmi_si_intf.c +++ b/drivers/char/ipmi/ipmi_si_intf.c @@ -286,7 +286,7 @@ struct smi_info { unsigned char slave_addr; /* Counters and things for the proc filesystem. */ - atomic_t stats[SI_NUM_STATS]; + atomic_unchecked_t stats[SI_NUM_STATS]; struct task_struct *thread; @@ -294,9 +294,9 @@ struct smi_info { }; #define smi_inc_stat(smi, stat) \ - atomic_inc(&(smi)->stats[SI_STAT_ ## stat]) + atomic_inc_unchecked(&(smi)->stats[SI_STAT_ ## stat]) #define smi_get_stat(smi, stat) \ - ((unsigned int) atomic_read(&(smi)->stats[SI_STAT_ ## stat])) + ((unsigned int) atomic_read_unchecked(&(smi)->stats[SI_STAT_ ## stat])) #define SI_MAX_PARMS 4 @@ -3151,7 +3151,7 @@ static int try_smi_init(struct smi_info *new_smi) atomic_set(&new_smi->req_events, 0); new_smi->run_to_completion = 0; for (i = 0; i < SI_NUM_STATS; i++) - atomic_set(&new_smi->stats[i], 0); + atomic_set_unchecked(&new_smi->stats[i], 0); new_smi->interrupt_disabled = 1; atomic_set(&new_smi->stop_operation, 0); diff --git a/drivers/char/keyboard.c b/drivers/char/keyboard.c index 54109dc..bb46541 100644 --- a/drivers/char/keyboard.c +++ b/drivers/char/keyboard.c @@ -640,6 +640,16 @@ static void k_spec(struct vc_data *vc, unsigned char value, char up_flag) kbd->kbdmode == VC_MEDIUMRAW) && value != KVAL(K_SAK)) return; /* SAK is allowed even in raw mode */ + +#if defined(CONFIG_GRKERNSEC_PROC) || defined(CONFIG_GRKERNSEC_PROC_MEMMAP) + { + void *func = fn_handler[value]; + if (func == fn_show_state || func == fn_show_ptregs || + func == fn_show_mem) + return; + } +#endif + fn_handler[value](vc); } @@ -1392,7 +1402,7 @@ static const struct input_device_id kbd_ids[] = { .evbit = { BIT_MASK(EV_SND) }, }, - { }, /* Terminating entry */ + { 0 }, /* Terminating entry */ }; MODULE_DEVICE_TABLE(input, kbd_ids); diff --git a/drivers/char/mem.c b/drivers/char/mem.c index 1f528fa..ab9048b 100644 --- a/drivers/char/mem.c +++ b/drivers/char/mem.c @@ -18,6 +18,7 @@ #include #include #include +#include #include #include #include @@ -34,6 +35,10 @@ # include #endif +#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC) +extern struct file_operations grsec_fops; +#endif + static inline unsigned long size_inside_page(unsigned long start, unsigned long size) { @@ -120,6 +125,7 @@ static ssize_t read_mem(struct file *file, char __user *buf, while (count > 0) { unsigned long remaining; + char *temp; sz = size_inside_page(p, count); @@ -135,7 +141,23 @@ static ssize_t read_mem(struct file *file, char __user *buf, if (!ptr) return -EFAULT; - remaining = copy_to_user(buf, ptr, sz); +#ifdef CONFIG_PAX_USERCOPY + temp = kmalloc(sz, GFP_KERNEL); + if (!temp) { + unxlate_dev_mem_ptr(p, ptr); + return -ENOMEM; + } + memcpy(temp, ptr, sz); +#else + temp = ptr; +#endif + + remaining = copy_to_user(buf, temp, sz); + +#ifdef CONFIG_PAX_USERCOPY + kfree(temp); +#endif + unxlate_dev_mem_ptr(p, ptr); if (remaining) return -EFAULT; @@ -161,6 +183,11 @@ static ssize_t write_mem(struct file *file, const char __user *buf, if (!valid_phys_addr_range(p, count)) return -EFAULT; +#ifdef CONFIG_GRKERNSEC_KMEM + gr_handle_mem_write(); + return -EPERM; +#endif + written = 0; #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED @@ -316,6 +343,11 @@ static int mmap_mem(struct file *file, struct vm_area_struct *vma) &vma->vm_page_prot)) return -EINVAL; +#ifdef CONFIG_GRKERNSEC_KMEM + if (gr_handle_mem_mmap(vma->vm_pgoff << PAGE_SHIFT, vma)) + return -EPERM; +#endif + vma->vm_page_prot = phys_mem_access_prot(file, vma->vm_pgoff, size, vma->vm_page_prot); @@ -398,9 +430,8 @@ static ssize_t read_kmem(struct file *file, char __user *buf, size_t count, loff_t *ppos) { unsigned long p = *ppos; - ssize_t low_count, read, sz; + ssize_t low_count, read, sz, err = 0; char * kbuf; /* k-addr because vread() takes vmlist_lock rwlock */ - int err = 0; read = 0; if (p < (unsigned long) high_memory) { @@ -422,6 +453,8 @@ static ssize_t read_kmem(struct file *file, char __user *buf, } #endif while (low_count > 0) { + char *temp; + sz = size_inside_page(p, low_count); /* @@ -431,7 +464,22 @@ static ssize_t read_kmem(struct file *file, char __user *buf, */ kbuf = xlate_dev_kmem_ptr((char *)p); - if (copy_to_user(buf, kbuf, sz)) +#ifdef CONFIG_PAX_USERCOPY + temp = kmalloc(sz, GFP_KERNEL); + if (!temp) + return -ENOMEM; + memcpy(temp, kbuf, sz); +#else + temp = kbuf; +#endif + + err = copy_to_user(buf, temp, sz); + +#ifdef CONFIG_PAX_USERCOPY + kfree(temp); +#endif + + if (err) return -EFAULT; buf += sz; p += sz; @@ -530,6 +578,11 @@ static ssize_t write_kmem(struct file *file, const char __user *buf, char * kbuf; /* k-addr because vwrite() takes vmlist_lock rwlock */ int err = 0; +#ifdef CONFIG_GRKERNSEC_KMEM + gr_handle_kmem_write(); + return -EPERM; +#endif + if (p < (unsigned long) high_memory) { unsigned long to_write = min_t(unsigned long, count, (unsigned long)high_memory - p); @@ -731,6 +784,16 @@ static loff_t memory_lseek(struct file *file, loff_t offset, int orig) static int open_port(struct inode * inode, struct file * filp) { +#ifdef CONFIG_GRKERNSEC_KMEM + gr_handle_open_port(); + return -EPERM; +#endif + + return capable(CAP_SYS_RAWIO) ? 0 : -EPERM; +} + +static int open_mem(struct inode * inode, struct file * filp) +{ return capable(CAP_SYS_RAWIO) ? 0 : -EPERM; } @@ -738,7 +801,6 @@ static int open_port(struct inode * inode, struct file * filp) #define full_lseek null_lseek #define write_zero write_null #define read_full read_zero -#define open_mem open_port #define open_kmem open_mem #define open_oldmem open_mem @@ -855,6 +917,9 @@ static const struct memdev { #ifdef CONFIG_CRASH_DUMP [12] = { "oldmem", 0, &oldmem_fops, NULL }, #endif +#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC) + [13] = { "grsec",S_IRUSR | S_IWUGO, &grsec_fops, NULL }, +#endif }; static int memory_open(struct inode *inode, struct file *filp) diff --git a/drivers/char/n_tty.c b/drivers/char/n_tty.c index bdae832..5d17cfa 100644 --- a/drivers/char/n_tty.c +++ b/drivers/char/n_tty.c @@ -2105,6 +2105,7 @@ void n_tty_inherit_ops(struct tty_ldisc_ops *ops) { *ops = tty_ldisc_N_TTY; ops->owner = NULL; - ops->refcount = ops->flags = 0; + atomic_set(&ops->refcount, 0); + ops->flags = 0; } EXPORT_SYMBOL_GPL(n_tty_inherit_ops); diff --git a/drivers/char/nvram.c b/drivers/char/nvram.c index 66d2917..796ad19 100644 --- a/drivers/char/nvram.c +++ b/drivers/char/nvram.c @@ -245,7 +245,7 @@ static ssize_t nvram_read(struct file *file, char __user *buf, spin_unlock_irq(&rtc_lock); - if (copy_to_user(buf, contents, tmp - contents)) + if (tmp - contents > sizeof(contents) || copy_to_user(buf, contents, tmp - contents)) return -EFAULT; *ppos = i; @@ -434,7 +434,10 @@ static const struct file_operations nvram_fops = { static struct miscdevice nvram_dev = { NVRAM_MINOR, "nvram", - &nvram_fops + &nvram_fops, + {NULL, NULL}, + NULL, + NULL }; static int __init nvram_init(void) diff --git a/drivers/char/pcmcia/ipwireless/tty.c b/drivers/char/pcmcia/ipwireless/tty.c index 1a2c2c3..8c15d4e 100644 --- a/drivers/char/pcmcia/ipwireless/tty.c +++ b/drivers/char/pcmcia/ipwireless/tty.c @@ -51,7 +51,7 @@ struct ipw_tty { int tty_type; struct ipw_network *network; struct tty_struct *linux_tty; - int open_count; + atomic_t open_count; unsigned int control_lines; struct mutex ipw_tty_mutex; int tx_bytes_queued; @@ -127,10 +127,10 @@ static int ipw_open(struct tty_struct *linux_tty, struct file *filp) mutex_unlock(&tty->ipw_tty_mutex); return -ENODEV; } - if (tty->open_count == 0) + if (atomic_read(&tty->open_count) == 0) tty->tx_bytes_queued = 0; - tty->open_count++; + atomic_inc(&tty->open_count); tty->linux_tty = linux_tty; linux_tty->driver_data = tty; @@ -146,9 +146,7 @@ static int ipw_open(struct tty_struct *linux_tty, struct file *filp) static void do_ipw_close(struct ipw_tty *tty) { - tty->open_count--; - - if (tty->open_count == 0) { + if (atomic_dec_return(&tty->open_count) == 0) { struct tty_struct *linux_tty = tty->linux_tty; if (linux_tty != NULL) { @@ -169,7 +167,7 @@ static void ipw_hangup(struct tty_struct *linux_tty) return; mutex_lock(&tty->ipw_tty_mutex); - if (tty->open_count == 0) { + if (atomic_read(&tty->open_count) == 0) { mutex_unlock(&tty->ipw_tty_mutex); return; } @@ -198,7 +196,7 @@ void ipwireless_tty_received(struct ipw_tty *tty, unsigned char *data, return; } - if (!tty->open_count) { + if (!atomic_read(&tty->open_count)) { mutex_unlock(&tty->ipw_tty_mutex); return; } @@ -240,7 +238,7 @@ static int ipw_write(struct tty_struct *linux_tty, return -ENODEV; mutex_lock(&tty->ipw_tty_mutex); - if (!tty->open_count) { + if (!atomic_read(&tty->open_count)) { mutex_unlock(&tty->ipw_tty_mutex); return -EINVAL; } @@ -280,7 +278,7 @@ static int ipw_write_room(struct tty_struct *linux_tty) if (!tty) return -ENODEV; - if (!tty->open_count) + if (!atomic_read(&tty->open_count)) return -EINVAL; room = IPWIRELESS_TX_QUEUE_SIZE - tty->tx_bytes_queued; @@ -322,7 +320,7 @@ static int ipw_chars_in_buffer(struct tty_struct *linux_tty) if (!tty) return 0; - if (!tty->open_count) + if (!atomic_read(&tty->open_count)) return 0; return tty->tx_bytes_queued; @@ -403,7 +401,7 @@ static int ipw_tiocmget(struct tty_struct *linux_tty, struct file *file) if (!tty) return -ENODEV; - if (!tty->open_count) + if (!atomic_read(&tty->open_count)) return -EINVAL; return get_control_lines(tty); @@ -419,7 +417,7 @@ ipw_tiocmset(struct tty_struct *linux_tty, struct file *file, if (!tty) return -ENODEV; - if (!tty->open_count) + if (!atomic_read(&tty->open_count)) return -EINVAL; return set_control_lines(tty, set, clear); @@ -433,7 +431,7 @@ static int ipw_ioctl(struct tty_struct *linux_tty, struct file *file, if (!tty) return -ENODEV; - if (!tty->open_count) + if (!atomic_read(&tty->open_count)) return -EINVAL; /* FIXME: Exactly how is the tty object locked here .. */ @@ -582,7 +580,7 @@ void ipwireless_tty_free(struct ipw_tty *tty) against a parallel ioctl etc */ mutex_lock(&ttyj->ipw_tty_mutex); } - while (ttyj->open_count) + while (atomic_read(&ttyj->open_count)) do_ipw_close(ttyj); ipwireless_disassociate_network_ttys(network, ttyj->channel_idx); diff --git a/drivers/char/pty.c b/drivers/char/pty.c index d83a431..37c0dff 100644 --- a/drivers/char/pty.c +++ b/drivers/char/pty.c @@ -677,7 +677,18 @@ static int ptmx_open(struct inode *inode, struct file *filp) return ret; } -static struct file_operations ptmx_fops; +static const struct file_operations ptmx_fops = { + .llseek = no_llseek, + .read = tty_read, + .write = tty_write, + .poll = tty_poll, + .unlocked_ioctl = tty_ioctl, + .compat_ioctl = tty_compat_ioctl, + .open = ptmx_open, + .release = tty_release, + .fasync = tty_fasync, +}; + static void __init unix98_pty_init(void) { @@ -731,9 +742,6 @@ static void __init unix98_pty_init(void) register_sysctl_table(pty_root_table); /* Now create the /dev/ptmx special device */ - tty_default_fops(&ptmx_fops); - ptmx_fops.open = ptmx_open; - cdev_init(&ptmx_cdev, &ptmx_fops); if (cdev_add(&ptmx_cdev, MKDEV(TTYAUX_MAJOR, 2), 1) || register_chrdev_region(MKDEV(TTYAUX_MAJOR, 2), 1, "/dev/ptmx") < 0) diff --git a/drivers/char/random.c b/drivers/char/random.c index 8d85587..a534af7 100644 --- a/drivers/char/random.c +++ b/drivers/char/random.c @@ -254,8 +254,13 @@ /* * Configuration information */ +#ifdef CONFIG_GRKERNSEC_RANDNET +#define INPUT_POOL_WORDS 512 +#define OUTPUT_POOL_WORDS 128 +#else #define INPUT_POOL_WORDS 128 #define OUTPUT_POOL_WORDS 32 +#endif #define SEC_XFER_SIZE 512 #define EXTRACT_SIZE 10 @@ -293,10 +298,17 @@ static struct poolinfo { int poolwords; int tap1, tap2, tap3, tap4, tap5; } poolinfo_table[] = { +#ifdef CONFIG_GRKERNSEC_RANDNET + /* x^512 + x^411 + x^308 + x^208 +x^104 + x + 1 -- 225 */ + { 512, 411, 308, 208, 104, 1 }, + /* x^128 + x^103 + x^76 + x^51 + x^25 + x + 1 -- 105 */ + { 128, 103, 76, 51, 25, 1 }, +#else /* x^128 + x^103 + x^76 + x^51 +x^25 + x + 1 -- 105 */ { 128, 103, 76, 51, 25, 1 }, /* x^32 + x^26 + x^20 + x^14 + x^7 + x + 1 -- 15 */ { 32, 26, 20, 14, 7, 1 }, +#endif #if 0 /* x^2048 + x^1638 + x^1231 + x^819 + x^411 + x + 1 -- 115 */ { 2048, 1638, 1231, 819, 411, 1 }, @@ -902,7 +914,7 @@ static ssize_t extract_entropy_user(struct entropy_store *r, void __user *buf, extract_buf(r, tmp); i = min_t(int, nbytes, EXTRACT_SIZE); - if (copy_to_user(buf, tmp, i)) { + if (i > sizeof(tmp) || copy_to_user(buf, tmp, i)) { ret = -EFAULT; break; } @@ -1205,7 +1217,7 @@ EXPORT_SYMBOL(generate_random_uuid); #include static int min_read_thresh = 8, min_write_thresh; -static int max_read_thresh = INPUT_POOL_WORDS * 32; +static int max_read_thresh = OUTPUT_POOL_WORDS * 32; static int max_write_thresh = INPUT_POOL_WORDS * 32; static char sysctl_bootid[16]; diff --git a/drivers/char/sonypi.c b/drivers/char/sonypi.c index 73f66d0..5d90cbc 100644 --- a/drivers/char/sonypi.c +++ b/drivers/char/sonypi.c @@ -491,7 +491,7 @@ static struct sonypi_device { spinlock_t fifo_lock; wait_queue_head_t fifo_proc_list; struct fasync_struct *fifo_async; - int open_count; + atomic_t open_count; int model; struct input_dev *input_jog_dev; struct input_dev *input_key_dev; @@ -898,7 +898,7 @@ static int sonypi_misc_fasync(int fd, struct file *filp, int on) static int sonypi_misc_release(struct inode *inode, struct file *file) { mutex_lock(&sonypi_device.lock); - sonypi_device.open_count--; + atomic_dec(&sonypi_device.open_count); mutex_unlock(&sonypi_device.lock); return 0; } @@ -907,9 +907,9 @@ static int sonypi_misc_open(struct inode *inode, struct file *file) { mutex_lock(&sonypi_device.lock); /* Flush input queue on first open */ - if (!sonypi_device.open_count) + if (!atomic_read(&sonypi_device.open_count)) kfifo_reset(&sonypi_device.fifo); - sonypi_device.open_count++; + atomic_inc(&sonypi_device.open_count); mutex_unlock(&sonypi_device.lock); return 0; diff --git a/drivers/char/tpm/tpm_bios.c b/drivers/char/tpm/tpm_bios.c index 0636520..b29fde6 100644 --- a/drivers/char/tpm/tpm_bios.c +++ b/drivers/char/tpm/tpm_bios.c @@ -173,7 +173,7 @@ static void *tpm_bios_measurements_start(struct seq_file *m, loff_t *pos) event = addr; if ((event->event_type == 0 && event->event_size == 0) || - ((addr + sizeof(struct tcpa_event) + event->event_size) >= limit)) + (event->event_size >= limit - addr - sizeof(struct tcpa_event))) return NULL; return addr; @@ -198,7 +198,7 @@ static void *tpm_bios_measurements_next(struct seq_file *m, void *v, return NULL; if ((event->event_type == 0 && event->event_size == 0) || - ((v + sizeof(struct tcpa_event) + event->event_size) >= limit)) + (event->event_size >= limit - v - sizeof(struct tcpa_event))) return NULL; (*pos)++; @@ -291,7 +291,8 @@ static int tpm_binary_bios_measurements_show(struct seq_file *m, void *v) int i; for (i = 0; i < sizeof(struct tcpa_event) + event->event_size; i++) - seq_putc(m, data[i]); + if (!seq_putc(m, data[i])) + return -EFAULT; return 0; } @@ -410,6 +411,11 @@ static int read_log(struct tpm_bios_log *log) log->bios_event_log_end = log->bios_event_log + len; virt = acpi_os_map_memory(start, len); + if (!virt) { + kfree(log->bios_event_log); + log->bios_event_log = NULL; + return -EFAULT; + } memcpy(log->bios_event_log, virt, len); diff --git a/drivers/char/tty_io.c b/drivers/char/tty_io.c index 507441a..53d5b60 100644 --- a/drivers/char/tty_io.c +++ b/drivers/char/tty_io.c @@ -136,20 +136,10 @@ LIST_HEAD(tty_drivers); /* linked list of tty drivers */ DEFINE_MUTEX(tty_mutex); EXPORT_SYMBOL(tty_mutex); -static ssize_t tty_read(struct file *, char __user *, size_t, loff_t *); -static ssize_t tty_write(struct file *, const char __user *, size_t, loff_t *); ssize_t redirected_tty_write(struct file *, const char __user *, size_t, loff_t *); -static unsigned int tty_poll(struct file *, poll_table *); static int tty_open(struct inode *, struct file *); long tty_ioctl(struct file *file, unsigned int cmd, unsigned long arg); -#ifdef CONFIG_COMPAT -static long tty_compat_ioctl(struct file *file, unsigned int cmd, - unsigned long arg); -#else -#define tty_compat_ioctl NULL -#endif -static int tty_fasync(int fd, struct file *filp, int on); static void release_tty(struct tty_struct *tty, int idx); static void __proc_set_tty(struct task_struct *tsk, struct tty_struct *tty); static void proc_set_tty(struct task_struct *tsk, struct tty_struct *tty); @@ -871,7 +861,7 @@ EXPORT_SYMBOL(start_tty); * read calls may be outstanding in parallel. */ -static ssize_t tty_read(struct file *file, char __user *buf, size_t count, +ssize_t tty_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) { int i; @@ -899,6 +889,8 @@ static ssize_t tty_read(struct file *file, char __user *buf, size_t count, return i; } +EXPORT_SYMBOL(tty_read); + void tty_write_unlock(struct tty_struct *tty) { mutex_unlock(&tty->atomic_write_lock); @@ -1048,7 +1040,7 @@ void tty_write_message(struct tty_struct *tty, char *msg) * write method will not be invoked in parallel for each device. */ -static ssize_t tty_write(struct file *file, const char __user *buf, +ssize_t tty_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos) { struct tty_struct *tty; @@ -1075,6 +1067,8 @@ static ssize_t tty_write(struct file *file, const char __user *buf, return ret; } +EXPORT_SYMBOL(tty_write); + ssize_t redirected_tty_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos) { @@ -1897,6 +1891,8 @@ got_driver: +EXPORT_SYMBOL(tty_release); + /** * tty_poll - check tty status * @filp: file being polled @@ -1909,7 +1905,7 @@ got_driver: * may be re-entered freely by other callers. */ -static unsigned int tty_poll(struct file *filp, poll_table *wait) +unsigned int tty_poll(struct file *filp, poll_table *wait) { struct tty_struct *tty; struct tty_ldisc *ld; @@ -1926,7 +1922,9 @@ static unsigned int tty_poll(struct file *filp, poll_table *wait) return ret; } -static int tty_fasync(int fd, struct file *filp, int on) +EXPORT_SYMBOL(tty_poll); + +int tty_fasync(int fd, struct file *filp, int on) { struct tty_struct *tty; unsigned long flags; @@ -1970,6 +1968,8 @@ out: return retval; } +EXPORT_SYMBOL(tty_fasync); + /** * tiocsti - fake input character * @tty: tty to fake input into @@ -2602,8 +2602,10 @@ long tty_ioctl(struct file *file, unsigned int cmd, unsigned long arg) return retval; } +EXPORT_SYMBOL(tty_ioctl); + #ifdef CONFIG_COMPAT -static long tty_compat_ioctl(struct file *file, unsigned int cmd, +long tty_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { struct inode *inode = file->f_dentry->d_inode; @@ -2627,6 +2629,9 @@ static long tty_compat_ioctl(struct file *file, unsigned int cmd, return retval; } + +EXPORT_SYMBOL(tty_compat_ioctl); + #endif /* @@ -3070,11 +3075,6 @@ struct tty_struct *get_current_tty(void) } EXPORT_SYMBOL_GPL(get_current_tty); -void tty_default_fops(struct file_operations *fops) -{ - *fops = tty_fops; -} - /* * Initialize the console device. This is called *early*, so * we can't necessarily depend on lots of kernel help here. diff --git a/drivers/char/tty_ldisc.c b/drivers/char/tty_ldisc.c index 236628f..235d0f9 100644 --- a/drivers/char/tty_ldisc.c +++ b/drivers/char/tty_ldisc.c @@ -76,7 +76,7 @@ static void put_ldisc(struct tty_ldisc *ld) if (atomic_dec_and_lock(&ld->users, &tty_ldisc_lock)) { struct tty_ldisc_ops *ldo = ld->ops; - ldo->refcount--; + atomic_dec(&ldo->refcount); module_put(ldo->owner); spin_unlock_irqrestore(&tty_ldisc_lock, flags); @@ -111,7 +111,7 @@ int tty_register_ldisc(int disc, struct tty_ldisc_ops *new_ldisc) spin_lock_irqsave(&tty_ldisc_lock, flags); tty_ldiscs[disc] = new_ldisc; new_ldisc->num = disc; - new_ldisc->refcount = 0; + atomic_set(&new_ldisc->refcount, 0); spin_unlock_irqrestore(&tty_ldisc_lock, flags); return ret; @@ -139,7 +139,7 @@ int tty_unregister_ldisc(int disc) return -EINVAL; spin_lock_irqsave(&tty_ldisc_lock, flags); - if (tty_ldiscs[disc]->refcount) + if (atomic_read(&tty_ldiscs[disc]->refcount)) ret = -EBUSY; else tty_ldiscs[disc] = NULL; @@ -160,7 +160,7 @@ static struct tty_ldisc_ops *get_ldops(int disc) if (ldops) { ret = ERR_PTR(-EAGAIN); if (try_module_get(ldops->owner)) { - ldops->refcount++; + atomic_inc(&ldops->refcount); ret = ldops; } } @@ -173,7 +173,7 @@ static void put_ldops(struct tty_ldisc_ops *ldops) unsigned long flags; spin_lock_irqsave(&tty_ldisc_lock, flags); - ldops->refcount--; + atomic_dec(&ldops->refcount); module_put(ldops->owner); spin_unlock_irqrestore(&tty_ldisc_lock, flags); } diff --git a/drivers/char/vt_ioctl.c b/drivers/char/vt_ioctl.c index 85cf230..156d86c 100644 --- a/drivers/char/vt_ioctl.c +++ b/drivers/char/vt_ioctl.c @@ -210,9 +210,6 @@ do_kdsk_ioctl(int cmd, struct kbentry __user *user_kbe, int perm, struct kbd_str if (copy_from_user(&tmp, user_kbe, sizeof(struct kbentry))) return -EFAULT; - if (!capable(CAP_SYS_TTY_CONFIG)) - perm = 0; - switch (cmd) { case KDGKBENT: key_map = key_maps[s]; @@ -224,8 +221,12 @@ do_kdsk_ioctl(int cmd, struct kbentry __user *user_kbe, int perm, struct kbd_str val = (i ? K_HOLE : K_NOSUCHMAP); return put_user(val, &user_kbe->kb_value); case KDSKBENT: + if (!capable(CAP_SYS_TTY_CONFIG)) + perm = 0; + if (!perm) return -EPERM; + if (!i && v == K_NOSUCHMAP) { /* deallocate map */ key_map = key_maps[s]; @@ -325,9 +326,6 @@ do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm) int i, j, k; int ret; - if (!capable(CAP_SYS_TTY_CONFIG)) - perm = 0; - kbs = kmalloc(sizeof(*kbs), GFP_KERNEL); if (!kbs) { ret = -ENOMEM; @@ -361,6 +359,9 @@ do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm) kfree(kbs); return ((p && *p) ? -EOVERFLOW : 0); case KDSKBSENT: + if (!capable(CAP_SYS_TTY_CONFIG)) + perm = 0; + if (!perm) { ret = -EPERM; goto reterr; diff --git a/drivers/cpuidle/sysfs.c b/drivers/cpuidle/sysfs.c index 0310ffa..be7917e 100644 --- a/drivers/cpuidle/sysfs.c +++ b/drivers/cpuidle/sysfs.c @@ -300,7 +300,7 @@ static struct kobj_type ktype_state_cpuidle = { .release = cpuidle_state_sysfs_release, }; -static void inline cpuidle_free_state_kobj(struct cpuidle_device *device, int i) +static inline void cpuidle_free_state_kobj(struct cpuidle_device *device, int i) { kobject_put(&device->kobjs[i]->kobj); wait_for_completion(&device->kobjs[i]->kobj_unregister); diff --git a/drivers/edac/edac_core.h b/drivers/edac/edac_core.h index efca934..dbdb52c 100644 --- a/drivers/edac/edac_core.h +++ b/drivers/edac/edac_core.h @@ -100,11 +100,11 @@ extern const char *edac_mem_types[]; #else /* !CONFIG_EDAC_DEBUG */ -#define debugf0( ... ) -#define debugf1( ... ) -#define debugf2( ... ) -#define debugf3( ... ) -#define debugf4( ... ) +#define debugf0( ... ) do {} while (0) +#define debugf1( ... ) do {} while (0) +#define debugf2( ... ) do {} while (0) +#define debugf3( ... ) do {} while (0) +#define debugf4( ... ) do {} while (0) #endif /* !CONFIG_EDAC_DEBUG */ diff --git a/drivers/edac/edac_mc_sysfs.c b/drivers/edac/edac_mc_sysfs.c index c200c2f..474f1ef 100644 --- a/drivers/edac/edac_mc_sysfs.c +++ b/drivers/edac/edac_mc_sysfs.c @@ -776,7 +776,7 @@ static void edac_inst_grp_release(struct kobject *kobj) } /* Intermediate show/store table */ -static struct sysfs_ops inst_grp_ops = { +static const struct sysfs_ops inst_grp_ops = { .show = inst_grp_show, .store = inst_grp_store }; diff --git a/drivers/firewire/core-cdev.c b/drivers/firewire/core-cdev.c index 5bf106b..3748455 100644 --- a/drivers/firewire/core-cdev.c +++ b/drivers/firewire/core-cdev.c @@ -1195,8 +1195,7 @@ static int init_iso_resource(struct client *client, int ret; if ((request->channels == 0 && request->bandwidth == 0) || - request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL || - request->bandwidth < 0) + request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL) return -EINVAL; r = kmalloc(sizeof(*r), GFP_KERNEL); diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c index d464672..c3d886d 100644 --- a/drivers/firmware/dmi_scan.c +++ b/drivers/firmware/dmi_scan.c @@ -387,11 +387,6 @@ void __init dmi_scan_machine(void) } } else { - /* - * no iounmap() for that ioremap(); it would be a no-op, but - * it's so early in setup that sucker gets confused into doing - * what it shouldn't if we actually call it. - */ p = dmi_ioremap(0xF0000, 0x10000); if (p == NULL) goto error; diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c index 25d70d6..9ff22b2 100644 --- a/drivers/gpu/drm/drm_crtc_helper.c +++ b/drivers/gpu/drm/drm_crtc_helper.c @@ -262,7 +262,7 @@ static bool drm_encoder_crtc_ok(struct drm_encoder *encoder, struct drm_crtc *tmp; int crtc_mask = 1; - WARN(!crtc, "checking null crtc?"); + BUG_ON(!crtc); dev = crtc->dev; diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c index c9736ed..0ada3c8 100644 --- a/drivers/gpu/drm/drm_drv.c +++ b/drivers/gpu/drm/drm_drv.c @@ -449,7 +449,7 @@ long drm_ioctl(struct file *filp, dev = file_priv->minor->dev; atomic_inc(&dev->ioctl_count); - atomic_inc(&dev->counts[_DRM_STAT_IOCTLS]); + atomic_inc_unchecked(&dev->counts[_DRM_STAT_IOCTLS]); ++file_priv->ioctl_count; DRM_DEBUG("pid=%d, cmd=0x%02x, nr=0x%02x, dev 0x%lx, auth=%d\n", diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c index e7aace2..b5b4f37 100644 --- a/drivers/gpu/drm/drm_fops.c +++ b/drivers/gpu/drm/drm_fops.c @@ -67,7 +67,7 @@ static int drm_setup(struct drm_device * dev) } for (i = 0; i < ARRAY_SIZE(dev->counts); i++) - atomic_set(&dev->counts[i], 0); + atomic_set_unchecked(&dev->counts[i], 0); dev->sigdata.lock = NULL; @@ -131,9 +131,9 @@ int drm_open(struct inode *inode, struct file *filp) retcode = drm_open_helper(inode, filp, dev); if (!retcode) { - atomic_inc(&dev->counts[_DRM_STAT_OPENS]); + atomic_inc_unchecked(&dev->counts[_DRM_STAT_OPENS]); spin_lock(&dev->count_lock); - if (!dev->open_count++) { + if (atomic_inc_return(&dev->open_count) == 1) { spin_unlock(&dev->count_lock); retcode = drm_setup(dev); goto out; @@ -474,7 +474,7 @@ int drm_release(struct inode *inode, struct file *filp) lock_kernel(); - DRM_DEBUG("open_count = %d\n", dev->open_count); + DRM_DEBUG("open_count = %d\n", atomic_read(&dev->open_count)); if (dev->driver->preclose) dev->driver->preclose(dev, file_priv); @@ -486,7 +486,7 @@ int drm_release(struct inode *inode, struct file *filp) DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n", task_pid_nr(current), (long)old_encode_dev(file_priv->minor->device), - dev->open_count); + atomic_read(&dev->open_count)); /* if the master has gone away we can't do anything with the lock */ if (file_priv->minor->master) @@ -567,9 +567,9 @@ int drm_release(struct inode *inode, struct file *filp) * End inline drm_release */ - atomic_inc(&dev->counts[_DRM_STAT_CLOSES]); + atomic_inc_unchecked(&dev->counts[_DRM_STAT_CLOSES]); spin_lock(&dev->count_lock); - if (!--dev->open_count) { + if (atomic_dec_and_test(&dev->open_count)) { if (atomic_read(&dev->ioctl_count)) { DRM_ERROR("Device busy: %d\n", atomic_read(&dev->ioctl_count)); diff --git a/drivers/gpu/drm/drm_info.c b/drivers/gpu/drm/drm_info.c index f0f6c6b..d31451d 100644 --- a/drivers/gpu/drm/drm_info.c +++ b/drivers/gpu/drm/drm_info.c @@ -75,10 +75,14 @@ int drm_vm_info(struct seq_file *m, void *data) struct drm_local_map *map; struct drm_map_list *r_list; - /* Hardcoded from _DRM_FRAME_BUFFER, - _DRM_REGISTERS, _DRM_SHM, _DRM_AGP, and - _DRM_SCATTER_GATHER and _DRM_CONSISTENT */ - const char *types[] = { "FB", "REG", "SHM", "AGP", "SG", "PCI" }; + static const char * const types[] = { + [_DRM_FRAME_BUFFER] = "FB", + [_DRM_REGISTERS] = "REG", + [_DRM_SHM] = "SHM", + [_DRM_AGP] = "AGP", + [_DRM_SCATTER_GATHER] = "SG", + [_DRM_CONSISTENT] = "PCI", + [_DRM_GEM] = "GEM" }; const char *type; int i; @@ -89,7 +93,7 @@ int drm_vm_info(struct seq_file *m, void *data) map = r_list->map; if (!map) continue; - if (map->type < 0 || map->type > 5) + if (map->type >= ARRAY_SIZE(types)) type = "??"; else type = types[map->type]; diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c index 9b9ff46..4ea724c 100644 --- a/drivers/gpu/drm/drm_ioctl.c +++ b/drivers/gpu/drm/drm_ioctl.c @@ -283,7 +283,7 @@ int drm_getstats(struct drm_device *dev, void *data, stats->data[i].value = (file_priv->master->lock.hw_lock ? file_priv->master->lock.hw_lock->lock : 0); else - stats->data[i].value = atomic_read(&dev->counts[i]); + stats->data[i].value = atomic_read_unchecked(&dev->counts[i]); stats->data[i].type = dev->types[i]; } diff --git a/drivers/gpu/drm/drm_lock.c b/drivers/gpu/drm/drm_lock.c index e2f70a5..c703e86 100644 --- a/drivers/gpu/drm/drm_lock.c +++ b/drivers/gpu/drm/drm_lock.c @@ -87,7 +87,7 @@ int drm_lock(struct drm_device *dev, void *data, struct drm_file *file_priv) if (drm_lock_take(&master->lock, lock->context)) { master->lock.file_priv = file_priv; master->lock.lock_time = jiffies; - atomic_inc(&dev->counts[_DRM_STAT_LOCKS]); + atomic_inc_unchecked(&dev->counts[_DRM_STAT_LOCKS]); break; /* Got lock */ } @@ -165,7 +165,7 @@ int drm_unlock(struct drm_device *dev, void *data, struct drm_file *file_priv) return -EINVAL; } - atomic_inc(&dev->counts[_DRM_STAT_UNLOCKS]); + atomic_inc_unchecked(&dev->counts[_DRM_STAT_UNLOCKS]); /* kernel_context_switch isn't used by any of the x86 drm * modules but is required by the Sparc driver. diff --git a/drivers/gpu/drm/i810/i810_dma.c b/drivers/gpu/drm/i810/i810_dma.c index 997d917..34e6717 100644 --- a/drivers/gpu/drm/i810/i810_dma.c +++ b/drivers/gpu/drm/i810/i810_dma.c @@ -953,8 +953,8 @@ static int i810_dma_vertex(struct drm_device *dev, void *data, dma->buflist[vertex->idx], vertex->discard, vertex->used); - atomic_add(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]); - atomic_inc(&dev->counts[_DRM_STAT_DMA]); + atomic_add_unchecked(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]); + atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]); sarea_priv->last_enqueue = dev_priv->counter - 1; sarea_priv->last_dispatch = (int)hw_status[5]; @@ -1116,8 +1116,8 @@ static int i810_dma_mc(struct drm_device *dev, void *data, i810_dma_dispatch_mc(dev, dma->buflist[mc->idx], mc->used, mc->last_render); - atomic_add(mc->used, &dev->counts[_DRM_STAT_SECONDARY]); - atomic_inc(&dev->counts[_DRM_STAT_DMA]); + atomic_add_unchecked(mc->used, &dev->counts[_DRM_STAT_SECONDARY]); + atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]); sarea_priv->last_enqueue = dev_priv->counter - 1; sarea_priv->last_dispatch = (int)hw_status[5]; diff --git a/drivers/gpu/drm/i915/dvo.h b/drivers/gpu/drm/i915/dvo.h index 0d6ff64..2743ae3 100644 --- a/drivers/gpu/drm/i915/dvo.h +++ b/drivers/gpu/drm/i915/dvo.h @@ -125,23 +125,23 @@ struct intel_dvo_dev_ops { * * \return singly-linked list of modes or NULL if no modes found. */ - struct drm_display_mode *(*get_modes)(struct intel_dvo_device *dvo); + struct drm_display_mode *(* const get_modes)(struct intel_dvo_device *dvo); /** * Clean up driver-specific bits of the output */ - void (*destroy) (struct intel_dvo_device *dvo); + void (* const destroy) (struct intel_dvo_device *dvo); /** * Debugging hook to dump device registers to log file */ - void (*dump_regs)(struct intel_dvo_device *dvo); + void (* const dump_regs)(struct intel_dvo_device *dvo); }; -extern struct intel_dvo_dev_ops sil164_ops; -extern struct intel_dvo_dev_ops ch7xxx_ops; -extern struct intel_dvo_dev_ops ivch_ops; -extern struct intel_dvo_dev_ops tfp410_ops; -extern struct intel_dvo_dev_ops ch7017_ops; +extern const struct intel_dvo_dev_ops sil164_ops; +extern const struct intel_dvo_dev_ops ch7xxx_ops; +extern const struct intel_dvo_dev_ops ivch_ops; +extern const struct intel_dvo_dev_ops tfp410_ops; +extern const struct intel_dvo_dev_ops ch7017_ops; #endif /* _INTEL_DVO_H */ diff --git a/drivers/gpu/drm/i915/dvo_ch7017.c b/drivers/gpu/drm/i915/dvo_ch7017.c index 14d5980..df7e908 100644 --- a/drivers/gpu/drm/i915/dvo_ch7017.c +++ b/drivers/gpu/drm/i915/dvo_ch7017.c @@ -402,7 +402,7 @@ static void ch7017_destroy(struct intel_dvo_device *dvo) } } -struct intel_dvo_dev_ops ch7017_ops = { +const struct intel_dvo_dev_ops ch7017_ops = { .init = ch7017_init, .detect = ch7017_detect, .mode_valid = ch7017_mode_valid, diff --git a/drivers/gpu/drm/i915/dvo_ch7xxx.c b/drivers/gpu/drm/i915/dvo_ch7xxx.c index 6f1944b..b5d996d 100644 --- a/drivers/gpu/drm/i915/dvo_ch7xxx.c +++ b/drivers/gpu/drm/i915/dvo_ch7xxx.c @@ -322,7 +322,7 @@ static void ch7xxx_destroy(struct intel_dvo_device *dvo) } } -struct intel_dvo_dev_ops ch7xxx_ops = { +const struct intel_dvo_dev_ops ch7xxx_ops = { .init = ch7xxx_init, .detect = ch7xxx_detect, .mode_valid = ch7xxx_mode_valid, diff --git a/drivers/gpu/drm/i915/dvo_ivch.c b/drivers/gpu/drm/i915/dvo_ivch.c index a2ec3f4..01bb226 100644 --- a/drivers/gpu/drm/i915/dvo_ivch.c +++ b/drivers/gpu/drm/i915/dvo_ivch.c @@ -412,7 +412,7 @@ static void ivch_destroy(struct intel_dvo_device *dvo) } } -struct intel_dvo_dev_ops ivch_ops= { +const struct intel_dvo_dev_ops ivch_ops= { .init = ivch_init, .dpms = ivch_dpms, .mode_valid = ivch_mode_valid, diff --git a/drivers/gpu/drm/i915/dvo_sil164.c b/drivers/gpu/drm/i915/dvo_sil164.c index 9b8e676..8600683 100644 --- a/drivers/gpu/drm/i915/dvo_sil164.c +++ b/drivers/gpu/drm/i915/dvo_sil164.c @@ -254,7 +254,7 @@ static void sil164_destroy(struct intel_dvo_device *dvo) } } -struct intel_dvo_dev_ops sil164_ops = { +const struct intel_dvo_dev_ops sil164_ops = { .init = sil164_init, .detect = sil164_detect, .mode_valid = sil164_mode_valid, diff --git a/drivers/gpu/drm/i915/dvo_tfp410.c b/drivers/gpu/drm/i915/dvo_tfp410.c index 56f6642..98fbcce 100644 --- a/drivers/gpu/drm/i915/dvo_tfp410.c +++ b/drivers/gpu/drm/i915/dvo_tfp410.c @@ -295,7 +295,7 @@ static void tfp410_destroy(struct intel_dvo_device *dvo) } } -struct intel_dvo_dev_ops tfp410_ops = { +const struct intel_dvo_dev_ops tfp410_ops = { .init = tfp410_init, .detect = tfp410_detect, .mode_valid = tfp410_mode_valid, diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index bdd39fb..d35c5d1 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c @@ -1348,7 +1348,7 @@ static bool i915_switcheroo_can_switch(struct pci_dev *pdev) bool can_switch; spin_lock(&dev->count_lock); - can_switch = (dev->open_count == 0); + can_switch = (atomic_read(&dev->open_count) == 0); spin_unlock(&dev->count_lock); return can_switch; } diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 194e0c4..315bd7c 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -497,7 +497,7 @@ const struct dev_pm_ops i915_pm_ops = { .restore = i915_pm_resume, }; -static struct vm_operations_struct i915_gem_vm_ops = { +static const struct vm_operations_struct i915_gem_vm_ops = { .fault = i915_gem_fault, .open = drm_gem_vm_open, .close = drm_gem_vm_close, diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index dc5d2ef..320b6f6 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -471,7 +471,7 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data, } if (!access_ok(VERIFY_WRITE, - (char __user *)(uintptr_t)args->data_ptr, + (char __user *) (uintptr_t)args->data_ptr, args->size)) { ret = -EFAULT; goto err; @@ -939,7 +939,7 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, } if (!access_ok(VERIFY_READ, - (char __user *)(uintptr_t)args->data_ptr, + (char __user *) (uintptr_t)args->data_ptr, args->size)) { ret = -EFAULT; goto err; diff --git a/drivers/gpu/drm/nouveau/nouveau_backlight.c b/drivers/gpu/drm/nouveau/nouveau_backlight.c index 406228f..8933e98 100644 --- a/drivers/gpu/drm/nouveau/nouveau_backlight.c +++ b/drivers/gpu/drm/nouveau/nouveau_backlight.c @@ -58,7 +58,7 @@ static int nv40_set_intensity(struct backlight_device *bd) return 0; } -static struct backlight_ops nv40_bl_ops = { +static const struct backlight_ops nv40_bl_ops = { .options = BL_CORE_SUSPENDRESUME, .get_brightness = nv40_get_intensity, .update_status = nv40_set_intensity, @@ -81,7 +81,7 @@ static int nv50_set_intensity(struct backlight_device *bd) return 0; } -static struct backlight_ops nv50_bl_ops = { +static const struct backlight_ops nv50_bl_ops = { .options = BL_CORE_SUSPENDRESUME, .get_brightness = nv50_get_intensity, .update_status = nv50_set_intensity, diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c index b02a231..07087b0 100644 --- a/drivers/gpu/drm/nouveau/nouveau_state.c +++ b/drivers/gpu/drm/nouveau/nouveau_state.c @@ -395,7 +395,7 @@ static bool nouveau_switcheroo_can_switch(struct pci_dev *pdev) bool can_switch; spin_lock(&dev->count_lock); - can_switch = (dev->open_count == 0); + can_switch = (atomic_read(&dev->open_count) == 0); spin_unlock(&dev->count_lock); return can_switch; } diff --git a/drivers/gpu/drm/radeon/mkregtable.c b/drivers/gpu/drm/radeon/mkregtable.c index 607241c..54b4fe6 100644 --- a/drivers/gpu/drm/radeon/mkregtable.c +++ b/drivers/gpu/drm/radeon/mkregtable.c @@ -637,14 +637,14 @@ static int parser_auth(struct table *t, const char *filename) regex_t mask_rex; regmatch_t match[4]; char buf[1024]; - size_t end; + long end; int len; int done = 0; int r; unsigned o; struct offset *offset; char last_reg_s[10]; - int last_reg; + unsigned long last_reg; if (regcomp (&mask_rex, "(0x[0-9a-fA-F]*) *([_a-zA-Z0-9]*)", REG_EXTENDED)) { diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c index a718463..2569728 100644 --- a/drivers/gpu/drm/radeon/radeon_device.c +++ b/drivers/gpu/drm/radeon/radeon_device.c @@ -562,7 +562,7 @@ static bool radeon_switcheroo_can_switch(struct pci_dev *pdev) bool can_switch; spin_lock(&dev->count_lock); - can_switch = (dev->open_count == 0); + can_switch = (atomic_read(&dev->open_count) == 0); spin_unlock(&dev->count_lock); return can_switch; } diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c index 2d95376..a65db3c 100644 --- a/drivers/gpu/drm/radeon/radeon_display.c +++ b/drivers/gpu/drm/radeon/radeon_display.c @@ -561,7 +561,7 @@ static void radeon_compute_pll_legacy(struct radeon_pll *pll, if (pll->flags & RADEON_PLL_PREFER_CLOSEST_LOWER) { error = freq - current_freq; - error = error < 0 ? 0xffffffff : error; + error = (int32_t)error < 0 ? 0xffffffff : error; } else error = abs(current_freq - freq); vco_diff = abs(vco - best_vco); diff --git a/drivers/gpu/drm/radeon/radeon_state.c b/drivers/gpu/drm/radeon/radeon_state.c index b3ba44c..a09abbf 100644 --- a/drivers/gpu/drm/radeon/radeon_state.c +++ b/drivers/gpu/drm/radeon/radeon_state.c @@ -2168,7 +2168,7 @@ static int radeon_cp_clear(struct drm_device *dev, void *data, struct drm_file * if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS) sarea_priv->nbox = RADEON_NR_SAREA_CLIPRECTS; - if (DRM_COPY_FROM_USER(&depth_boxes, clear->depth_boxes, + if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS || DRM_COPY_FROM_USER(&depth_boxes, clear->depth_boxes, sarea_priv->nbox * sizeof(depth_boxes[0]))) return -EFAULT; @@ -3031,7 +3031,7 @@ static int radeon_cp_getparam(struct drm_device *dev, void *data, struct drm_fil { drm_radeon_private_t *dev_priv = dev->dev_private; drm_radeon_getparam_t *param = data; - int value; + int value = 0; DRM_DEBUG("pid=%d\n", DRM_CURRENTPID); diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c index e9918d8..90f13b4 100644 --- a/drivers/gpu/drm/radeon/radeon_ttm.c +++ b/drivers/gpu/drm/radeon/radeon_ttm.c @@ -601,8 +601,9 @@ void radeon_ttm_fini(struct radeon_device *rdev) DRM_INFO("radeon: ttm finalized\n"); } -static struct vm_operations_struct radeon_ttm_vm_ops; -static const struct vm_operations_struct *ttm_vm_ops = NULL; +extern int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf); +extern void ttm_bo_vm_open(struct vm_area_struct *vma); +extern void ttm_bo_vm_close(struct vm_area_struct *vma); static int radeon_ttm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) { @@ -610,17 +611,22 @@ static int radeon_ttm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) struct radeon_device *rdev; int r; - bo = (struct ttm_buffer_object *)vma->vm_private_data; - if (bo == NULL) { + bo = (struct ttm_buffer_object *)vma->vm_private_data; + if (!bo) return VM_FAULT_NOPAGE; - } rdev = radeon_get_rdev(bo->bdev); mutex_lock(&rdev->vram_mutex); - r = ttm_vm_ops->fault(vma, vmf); + r = ttm_bo_vm_fault(vma, vmf); mutex_unlock(&rdev->vram_mutex); return r; } +static const struct vm_operations_struct radeon_ttm_vm_ops = { + .fault = radeon_ttm_fault, + .open = ttm_bo_vm_open, + .close = ttm_bo_vm_close +}; + int radeon_mmap(struct file *filp, struct vm_area_struct *vma) { struct drm_file *file_priv; @@ -633,18 +639,11 @@ int radeon_mmap(struct file *filp, struct vm_area_struct *vma) file_priv = (struct drm_file *)filp->private_data; rdev = file_priv->minor->dev->dev_private; - if (rdev == NULL) { + if (!rdev) return -EINVAL; - } r = ttm_bo_mmap(filp, vma, &rdev->mman.bdev); - if (unlikely(r != 0)) { + if (r) return r; - } - if (unlikely(ttm_vm_ops == NULL)) { - ttm_vm_ops = vma->vm_ops; - radeon_ttm_vm_ops = *ttm_vm_ops; - radeon_ttm_vm_ops.fault = &radeon_ttm_fault; - } vma->vm_ops = &radeon_ttm_vm_ops; return 0; } diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index 555ebb1..ea14dea 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c @@ -47,7 +47,7 @@ #include #define TTM_ASSERT_LOCKED(param) -#define TTM_DEBUG(fmt, arg...) +#define TTM_DEBUG(fmt, arg...) do {} while (0) #define TTM_BO_HASH_ORDER 13 static int ttm_bo_setup_vm(struct ttm_buffer_object *bo); diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c index fe6cb77..05e85a6 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_vm.c +++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c @@ -69,11 +69,11 @@ static struct ttm_buffer_object *ttm_bo_vm_lookup_rb(struct ttm_bo_device *bdev, return best_bo; } -static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) +int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) { struct ttm_buffer_object *bo = (struct ttm_buffer_object *) vma->vm_private_data; - struct ttm_bo_device *bdev = bo->bdev; + struct ttm_bo_device *bdev; unsigned long page_offset; unsigned long page_last; unsigned long pfn; @@ -84,6 +84,10 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) unsigned long address = (unsigned long)vmf->virtual_address; int retval = VM_FAULT_NOPAGE; + if (!bo) + return VM_FAULT_NOPAGE; + bdev = bo->bdev; + /* * Work around locking order reversal in fault / nopfn * between mmap_sem and bo_reserve: Perform a trylock operation @@ -212,22 +216,25 @@ out_unlock: ttm_bo_unreserve(bo); return retval; } +EXPORT_SYMBOL(ttm_bo_vm_fault); -static void ttm_bo_vm_open(struct vm_area_struct *vma) +void ttm_bo_vm_open(struct vm_area_struct *vma) { struct ttm_buffer_object *bo = (struct ttm_buffer_object *)vma->vm_private_data; (void)ttm_bo_reference(bo); } +EXPORT_SYMBOL(ttm_bo_vm_open); -static void ttm_bo_vm_close(struct vm_area_struct *vma) +void ttm_bo_vm_close(struct vm_area_struct *vma) { struct ttm_buffer_object *bo = (struct ttm_buffer_object *)vma->vm_private_data; ttm_bo_unref(&bo); vma->vm_private_data = NULL; } +EXPORT_SYMBOL(ttm_bo_vm_close); static const struct vm_operations_struct ttm_bo_vm_ops = { .fault = ttm_bo_vm_fault, diff --git a/drivers/gpu/drm/ttm/ttm_global.c b/drivers/gpu/drm/ttm/ttm_global.c index b170071..28ae90e 100644 --- a/drivers/gpu/drm/ttm/ttm_global.c +++ b/drivers/gpu/drm/ttm/ttm_global.c @@ -36,7 +36,7 @@ struct ttm_global_item { struct mutex mutex; void *object; - int refcount; + atomic_t refcount; }; static struct ttm_global_item glob[TTM_GLOBAL_NUM]; @@ -49,7 +49,7 @@ void ttm_global_init(void) struct ttm_global_item *item = &glob[i]; mutex_init(&item->mutex); item->object = NULL; - item->refcount = 0; + atomic_set(&item->refcount, 0); } } @@ -59,7 +59,7 @@ void ttm_global_release(void) for (i = 0; i < TTM_GLOBAL_NUM; ++i) { struct ttm_global_item *item = &glob[i]; BUG_ON(item->object != NULL); - BUG_ON(item->refcount != 0); + BUG_ON(atomic_read(&item->refcount) != 0); } } @@ -70,7 +70,7 @@ int ttm_global_item_ref(struct ttm_global_reference *ref) void *object; mutex_lock(&item->mutex); - if (item->refcount == 0) { + if (atomic_read(&item->refcount) == 0) { item->object = kzalloc(ref->size, GFP_KERNEL); if (unlikely(item->object == NULL)) { ret = -ENOMEM; @@ -83,7 +83,7 @@ int ttm_global_item_ref(struct ttm_global_reference *ref) goto out_err; } - ++item->refcount; + atomic_inc(&item->refcount); ref->object = item->object; object = item->object; mutex_unlock(&item->mutex); @@ -100,9 +100,9 @@ void ttm_global_item_unref(struct ttm_global_reference *ref) struct ttm_global_item *item = &glob[ref->global_type]; mutex_lock(&item->mutex); - BUG_ON(item->refcount == 0); + BUG_ON(atomic_read(&item->refcount) == 0); BUG_ON(ref->object != item->object); - if (--item->refcount == 0) { + if (atomic_dec_and_test(&item->refcount)) { ref->release(ref); item->object = NULL; } diff --git a/drivers/hid/hidraw.c b/drivers/hid/hidraw.c index 7dca2fa..3c4b2cd 100644 --- a/drivers/hid/hidraw.c +++ b/drivers/hid/hidraw.c @@ -252,7 +252,7 @@ static long hidraw_ioctl(struct file *file, unsigned int cmd, mutex_lock(&minors_lock); dev = hidraw_table[minor]; - if (!dev) { + if (dev == NULL) { ret = -ENODEV; goto out; } diff --git a/drivers/hid/usbhid/hiddev.c b/drivers/hid/usbhid/hiddev.c index c24d2fa..9ae67a5 100644 --- a/drivers/hid/usbhid/hiddev.c +++ b/drivers/hid/usbhid/hiddev.c @@ -616,7 +616,7 @@ static long hiddev_ioctl(struct file *file, unsigned int cmd, unsigned long arg) return put_user(HID_VERSION, (int __user *)arg); case HIDIOCAPPLICATION: - if (arg < 0 || arg >= hid->maxapplication) + if (arg >= hid->maxapplication) return -EINVAL; for (i = 0; i < hid->maxcollection; i++) diff --git a/drivers/hwmon/k8temp.c b/drivers/hwmon/k8temp.c index 8364932..e40d5a6 100644 --- a/drivers/hwmon/k8temp.c +++ b/drivers/hwmon/k8temp.c @@ -138,7 +138,7 @@ static DEVICE_ATTR(name, S_IRUGO, show_name, NULL); static const struct pci_device_id k8temp_ids[] = { { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_K8_NB_MISC) }, - { 0 }, + { 0, 0, 0, 0, 0, 0, 0 }, }; MODULE_DEVICE_TABLE(pci, k8temp_ids); diff --git a/drivers/hwmon/sis5595.c b/drivers/hwmon/sis5595.c index 79c2931..289a012 100644 --- a/drivers/hwmon/sis5595.c +++ b/drivers/hwmon/sis5595.c @@ -699,7 +699,7 @@ static struct sis5595_data *sis5595_update_device(struct device *dev) static const struct pci_device_id sis5595_pci_ids[] = { { PCI_DEVICE(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_503) }, - { 0, } + { 0, 0, 0, 0, 0, 0, 0 } }; MODULE_DEVICE_TABLE(pci, sis5595_pci_ids); diff --git a/drivers/hwmon/via686a.c b/drivers/hwmon/via686a.c index f397ce7..97dfbe6 100644 --- a/drivers/hwmon/via686a.c +++ b/drivers/hwmon/via686a.c @@ -769,7 +769,7 @@ static struct via686a_data *via686a_update_device(struct device *dev) static const struct pci_device_id via686a_pci_ids[] = { { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C686_4) }, - { 0, } + { 0, 0, 0, 0, 0, 0, 0 } }; MODULE_DEVICE_TABLE(pci, via686a_pci_ids); diff --git a/drivers/hwmon/vt8231.c b/drivers/hwmon/vt8231.c index e6078c9..8aae2d3 100644 --- a/drivers/hwmon/vt8231.c +++ b/drivers/hwmon/vt8231.c @@ -699,7 +699,7 @@ static struct platform_driver vt8231_driver = { static const struct pci_device_id vt8231_pci_ids[] = { { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8231_4) }, - { 0, } + { 0, 0, 0, 0, 0, 0, 0 } }; MODULE_DEVICE_TABLE(pci, vt8231_pci_ids); diff --git a/drivers/hwmon/w83791d.c b/drivers/hwmon/w83791d.c index 400a88b..2392c35 100644 --- a/drivers/hwmon/w83791d.c +++ b/drivers/hwmon/w83791d.c @@ -329,8 +329,8 @@ static int w83791d_detect(struct i2c_client *client, struct i2c_board_info *info); static int w83791d_remove(struct i2c_client *client); -static int w83791d_read(struct i2c_client *client, u8 register); -static int w83791d_write(struct i2c_client *client, u8 register, u8 value); +static int w83791d_read(struct i2c_client *client, u8 reg); +static int w83791d_write(struct i2c_client *client, u8 reg, u8 value); static struct w83791d_data *w83791d_update_device(struct device *dev); #ifdef DEBUG diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c index c600811..07bd389 100644 --- a/drivers/i2c/busses/i2c-i801.c +++ b/drivers/i2c/busses/i2c-i801.c @@ -592,7 +592,7 @@ static const struct pci_device_id i801_ids[] = { { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH10_5) }, { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PCH_SMBUS) }, { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CPT_SMBUS) }, - { 0, } + { 0, 0, 0, 0, 0, 0, 0 } }; MODULE_DEVICE_TABLE(pci, i801_ids); diff --git a/drivers/i2c/busses/i2c-piix4.c b/drivers/i2c/busses/i2c-piix4.c index 6d14ac2..0d8f727 100644 --- a/drivers/i2c/busses/i2c-piix4.c +++ b/drivers/i2c/busses/i2c-piix4.c @@ -124,7 +124,7 @@ static struct dmi_system_id __devinitdata piix4_dmi_ibm[] = { .ident = "IBM", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "IBM"), }, }, - { }, + { NULL, NULL, {DMI_MATCH(DMI_NONE, {0})}, NULL } }; static int __devinit piix4_setup(struct pci_dev *PIIX4_dev, @@ -491,7 +491,7 @@ static const struct pci_device_id piix4_ids[] = { PCI_DEVICE_ID_SERVERWORKS_HT1000SB) }, { PCI_DEVICE(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_HT1100LD) }, - { 0, } + { 0, 0, 0, 0, 0, 0, 0 } }; MODULE_DEVICE_TABLE (pci, piix4_ids); diff --git a/drivers/i2c/busses/i2c-sis630.c b/drivers/i2c/busses/i2c-sis630.c index e6f539e..52f75f0 100644 --- a/drivers/i2c/busses/i2c-sis630.c +++ b/drivers/i2c/busses/i2c-sis630.c @@ -471,7 +471,7 @@ static struct i2c_adapter sis630_adapter = { static const struct pci_device_id sis630_ids[] __devinitconst = { { PCI_DEVICE(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_503) }, { PCI_DEVICE(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_LPC) }, - { 0, } + { 0, 0, 0, 0, 0, 0, 0 } }; MODULE_DEVICE_TABLE (pci, sis630_ids); diff --git a/drivers/i2c/busses/i2c-sis96x.c b/drivers/i2c/busses/i2c-sis96x.c index 86837f0..bdd4822 100644 --- a/drivers/i2c/busses/i2c-sis96x.c +++ b/drivers/i2c/busses/i2c-sis96x.c @@ -247,7 +247,7 @@ static struct i2c_adapter sis96x_adapter = { static const struct pci_device_id sis96x_ids[] = { { PCI_DEVICE(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_SMBUS) }, - { 0, } + { 0, 0, 0, 0, 0, 0, 0 } }; MODULE_DEVICE_TABLE (pci, sis96x_ids); diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c index 2de76cc..74186a1 100644 --- a/drivers/ide/ide-cd.c +++ b/drivers/ide/ide-cd.c @@ -774,7 +774,7 @@ static void cdrom_do_block_pc(ide_drive_t *drive, struct request *rq) alignment = queue_dma_alignment(q) | q->dma_pad_mask; if ((unsigned long)buf & alignment || blk_rq_bytes(rq) & q->dma_pad_mask - || object_is_on_stack(buf)) + || object_starts_on_stack(buf)) drive->dma = 0; } } diff --git a/drivers/ieee1394/dv1394.c b/drivers/ieee1394/dv1394.c index adaefab..a85984c 100644 --- a/drivers/ieee1394/dv1394.c +++ b/drivers/ieee1394/dv1394.c @@ -739,7 +739,7 @@ static void frame_prepare(struct video_card *video, unsigned int this_frame) based upon DIF section and sequence */ -static void inline +static inline void frame_put_packet (struct frame *f, struct packet *p) { int section_type = p->data[0] >> 5; /* section type is in bits 5 - 7 */ @@ -2179,7 +2179,7 @@ static const struct ieee1394_device_id dv1394_id_table[] = { .specifier_id = AVC_UNIT_SPEC_ID_ENTRY & 0xffffff, .version = AVC_SW_VERSION_ENTRY & 0xffffff }, - { } + { 0, 0, 0, 0, 0, 0 } }; MODULE_DEVICE_TABLE(ieee1394, dv1394_id_table); diff --git a/drivers/ieee1394/eth1394.c b/drivers/ieee1394/eth1394.c index a4e9dcb..747d53d 100644 --- a/drivers/ieee1394/eth1394.c +++ b/drivers/ieee1394/eth1394.c @@ -446,7 +446,7 @@ static const struct ieee1394_device_id eth1394_id_table[] = { .specifier_id = ETHER1394_GASP_SPECIFIER_ID, .version = ETHER1394_GASP_VERSION, }, - {} + { 0, 0, 0, 0, 0, 0 } }; MODULE_DEVICE_TABLE(ieee1394, eth1394_id_table); diff --git a/drivers/ieee1394/hosts.c b/drivers/ieee1394/hosts.c index e947d8f..6a966b9 100644 --- a/drivers/ieee1394/hosts.c +++ b/drivers/ieee1394/hosts.c @@ -78,6 +78,7 @@ static int dummy_isoctl(struct hpsb_iso *iso, enum isoctl_cmd command, } static struct hpsb_host_driver dummy_driver = { + .name = "dummy", .transmit_packet = dummy_transmit_packet, .devctl = dummy_devctl, .isoctl = dummy_isoctl diff --git a/drivers/ieee1394/ohci1394.c b/drivers/ieee1394/ohci1394.c index d0dc1db..89ca24c 100644 --- a/drivers/ieee1394/ohci1394.c +++ b/drivers/ieee1394/ohci1394.c @@ -148,9 +148,9 @@ printk(level "%s: " fmt "\n" , OHCI1394_DRIVER_NAME , ## args) printk(level "%s: fw-host%d: " fmt "\n" , OHCI1394_DRIVER_NAME, ohci->host->id , ## args) /* Module Parameters */ -static int phys_dma = 1; +static int phys_dma; module_param(phys_dma, int, 0444); -MODULE_PARM_DESC(phys_dma, "Enable physical DMA (default = 1)."); +MODULE_PARM_DESC(phys_dma, "Enable physical DMA (default = 0)."); static void dma_trm_tasklet(unsigned long data); static void dma_trm_reset(struct dma_trm_ctx *d); @@ -3445,7 +3445,7 @@ static struct pci_device_id ohci1394_pci_tbl[] = { .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, }, - { 0, }, + { 0, 0, 0, 0, 0, 0, 0 }, }; MODULE_DEVICE_TABLE(pci, ohci1394_pci_tbl); diff --git a/drivers/ieee1394/raw1394.c b/drivers/ieee1394/raw1394.c index b563d5e..6e11ccb 100644 --- a/drivers/ieee1394/raw1394.c +++ b/drivers/ieee1394/raw1394.c @@ -3002,7 +3002,7 @@ static const struct ieee1394_device_id raw1394_id_table[] = { .match_flags = IEEE1394_MATCH_SPECIFIER_ID | IEEE1394_MATCH_VERSION, .specifier_id = CAMERA_UNIT_SPEC_ID_ENTRY & 0xffffff, .version = (CAMERA_SW_VERSION_ENTRY + 2) & 0xffffff}, - {} + { 0, 0, 0, 0, 0, 0 } }; MODULE_DEVICE_TABLE(ieee1394, raw1394_id_table); diff --git a/drivers/ieee1394/sbp2.c b/drivers/ieee1394/sbp2.c index 4565cb5..3833a2c 100644 --- a/drivers/ieee1394/sbp2.c +++ b/drivers/ieee1394/sbp2.c @@ -289,7 +289,7 @@ static const struct ieee1394_device_id sbp2_id_table[] = { .match_flags = IEEE1394_MATCH_SPECIFIER_ID | IEEE1394_MATCH_VERSION, .specifier_id = SBP2_UNIT_SPEC_ID_ENTRY & 0xffffff, .version = SBP2_SW_VERSION_ENTRY & 0xffffff}, - {} + { 0, 0, 0, 0, 0, 0 } }; MODULE_DEVICE_TABLE(ieee1394, sbp2_id_table); @@ -2110,7 +2110,7 @@ MODULE_DESCRIPTION("IEEE-1394 SBP-2 protocol driver"); MODULE_SUPPORTED_DEVICE(SBP2_DEVICE_NAME); MODULE_LICENSE("GPL"); -static int sbp2_module_init(void) +static int __init sbp2_module_init(void) { int ret; diff --git a/drivers/ieee1394/video1394.c b/drivers/ieee1394/video1394.c index a42bd68..406d68f 100644 --- a/drivers/ieee1394/video1394.c +++ b/drivers/ieee1394/video1394.c @@ -1312,7 +1312,7 @@ static const struct ieee1394_device_id video1394_id_table[] = { .specifier_id = CAMERA_UNIT_SPEC_ID_ENTRY & 0xffffff, .version = (CAMERA_SW_VERSION_ENTRY + 2) & 0xffffff }, - { } + { 0, 0, 0, 0, 0, 0 } }; MODULE_DEVICE_TABLE(ieee1394, video1394_id_table); diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c index ad63b79..d5b73ca 100644 --- a/drivers/infiniband/core/cm.c +++ b/drivers/infiniband/core/cm.c @@ -113,7 +113,7 @@ static char const counter_group_names[CM_COUNTER_GROUPS] struct cm_counter_group { struct kobject obj; - atomic_long_t counter[CM_ATTR_COUNT]; + atomic_long_unchecked_t counter[CM_ATTR_COUNT]; }; struct cm_counter_attribute { @@ -1387,7 +1387,7 @@ static void cm_dup_req_handler(struct cm_work *work, struct ib_mad_send_buf *msg = NULL; int ret; - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES]. + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES]. counter[CM_REQ_COUNTER]); /* Quick state check to discard duplicate REQs. */ @@ -1765,7 +1765,7 @@ static void cm_dup_rep_handler(struct cm_work *work) if (!cm_id_priv) return; - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES]. + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES]. counter[CM_REP_COUNTER]); ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg); if (ret) @@ -1932,7 +1932,7 @@ static int cm_rtu_handler(struct cm_work *work) if (cm_id_priv->id.state != IB_CM_REP_SENT && cm_id_priv->id.state != IB_CM_MRA_REP_RCVD) { spin_unlock_irq(&cm_id_priv->lock); - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES]. + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES]. counter[CM_RTU_COUNTER]); goto out; } @@ -2111,7 +2111,7 @@ static int cm_dreq_handler(struct cm_work *work) cm_id_priv = cm_acquire_id(dreq_msg->remote_comm_id, dreq_msg->local_comm_id); if (!cm_id_priv) { - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES]. + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES]. counter[CM_DREQ_COUNTER]); cm_issue_drep(work->port, work->mad_recv_wc); return -EINVAL; @@ -2132,7 +2132,7 @@ static int cm_dreq_handler(struct cm_work *work) case IB_CM_MRA_REP_RCVD: break; case IB_CM_TIMEWAIT: - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES]. + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES]. counter[CM_DREQ_COUNTER]); if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg)) goto unlock; @@ -2146,7 +2146,7 @@ static int cm_dreq_handler(struct cm_work *work) cm_free_msg(msg); goto deref; case IB_CM_DREQ_RCVD: - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES]. + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES]. counter[CM_DREQ_COUNTER]); goto unlock; default: @@ -2502,7 +2502,7 @@ static int cm_mra_handler(struct cm_work *work) ib_modify_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg, timeout)) { if (cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD) - atomic_long_inc(&work->port-> + atomic_long_inc_unchecked(&work->port-> counter_group[CM_RECV_DUPLICATES]. counter[CM_MRA_COUNTER]); goto out; @@ -2511,7 +2511,7 @@ static int cm_mra_handler(struct cm_work *work) break; case IB_CM_MRA_REQ_RCVD: case IB_CM_MRA_REP_RCVD: - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES]. + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES]. counter[CM_MRA_COUNTER]); /* fall through */ default: @@ -2673,7 +2673,7 @@ static int cm_lap_handler(struct cm_work *work) case IB_CM_LAP_IDLE: break; case IB_CM_MRA_LAP_SENT: - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES]. + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES]. counter[CM_LAP_COUNTER]); if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg)) goto unlock; @@ -2689,7 +2689,7 @@ static int cm_lap_handler(struct cm_work *work) cm_free_msg(msg); goto deref; case IB_CM_LAP_RCVD: - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES]. + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES]. counter[CM_LAP_COUNTER]); goto unlock; default: @@ -2973,7 +2973,7 @@ static int cm_sidr_req_handler(struct cm_work *work) cur_cm_id_priv = cm_insert_remote_sidr(cm_id_priv); if (cur_cm_id_priv) { spin_unlock_irq(&cm.lock); - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES]. + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES]. counter[CM_SIDR_REQ_COUNTER]); goto out; /* Duplicate message. */ } @@ -3184,10 +3184,10 @@ static void cm_send_handler(struct ib_mad_agent *mad_agent, if (!msg->context[0] && (attr_index != CM_REJ_COUNTER)) msg->retries = 1; - atomic_long_add(1 + msg->retries, + atomic_long_add_unchecked(1 + msg->retries, &port->counter_group[CM_XMIT].counter[attr_index]); if (msg->retries) - atomic_long_add(msg->retries, + atomic_long_add_unchecked(msg->retries, &port->counter_group[CM_XMIT_RETRIES]. counter[attr_index]); @@ -3397,7 +3397,7 @@ static void cm_recv_handler(struct ib_mad_agent *mad_agent, } attr_id = be16_to_cpu(mad_recv_wc->recv_buf.mad->mad_hdr.attr_id); - atomic_long_inc(&port->counter_group[CM_RECV]. + atomic_long_inc_unchecked(&port->counter_group[CM_RECV]. counter[attr_id - CM_ATTR_ID_OFFSET]); work = kmalloc(sizeof *work + sizeof(struct ib_sa_path_rec) * paths, @@ -3595,7 +3595,7 @@ static ssize_t cm_show_counter(struct kobject *obj, struct attribute *attr, cm_attr = container_of(attr, struct cm_counter_attribute, attr); return sprintf(buf, "%ld\n", - atomic_long_read(&group->counter[cm_attr->index])); + atomic_long_read_unchecked(&group->counter[cm_attr->index])); } static const struct sysfs_ops cm_counter_ops = { diff --git a/drivers/infiniband/hw/qib/qib.h b/drivers/infiniband/hw/qib/qib.h index 3593983..b29f22a 100644 --- a/drivers/infiniband/hw/qib/qib.h +++ b/drivers/infiniband/hw/qib/qib.h @@ -50,6 +50,7 @@ #include #include #include +#include #include "qib_common.h" #include "qib_verbs.h" diff --git a/drivers/input/keyboard/atkbd.c b/drivers/input/keyboard/atkbd.c index d358ef8..6ab4e78 100644 --- a/drivers/input/keyboard/atkbd.c +++ b/drivers/input/keyboard/atkbd.c @@ -1240,7 +1240,7 @@ static struct serio_device_id atkbd_serio_ids[] = { .id = SERIO_ANY, .extra = SERIO_ANY, }, - { 0 } + { 0, 0, 0, 0 } }; MODULE_DEVICE_TABLE(serio, atkbd_serio_ids); diff --git a/drivers/input/mouse/lifebook.c b/drivers/input/mouse/lifebook.c index c31ad11..d55f9df 100644 --- a/drivers/input/mouse/lifebook.c +++ b/drivers/input/mouse/lifebook.c @@ -123,7 +123,7 @@ static const struct dmi_system_id __initconst lifebook_dmi_table[] = { DMI_MATCH(DMI_PRODUCT_NAME, "LifeBook B142"), }, }, - { } + { NULL, NULL, {DMI_MATCH(DMI_NONE, {0})}, NULL} }; void __init lifebook_module_init(void) diff --git a/drivers/input/mouse/psmouse-base.c b/drivers/input/mouse/psmouse-base.c index 979c502..274e9ee 100644 --- a/drivers/input/mouse/psmouse-base.c +++ b/drivers/input/mouse/psmouse-base.c @@ -1460,7 +1460,7 @@ static struct serio_device_id psmouse_serio_ids[] = { .id = SERIO_ANY, .extra = SERIO_ANY, }, - { 0 } + { 0, 0, 0, 0 } }; MODULE_DEVICE_TABLE(serio, psmouse_serio_ids); diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c index 705589d..a29c615 100644 --- a/drivers/input/mouse/synaptics.c +++ b/drivers/input/mouse/synaptics.c @@ -476,7 +476,7 @@ static void synaptics_process_packet(struct psmouse *psmouse) break; case 2: if (SYN_MODEL_PEN(priv->model_id)) - ; /* Nothing, treat a pen as a single finger */ + break; /* Nothing, treat a pen as a single finger */ break; case 4 ... 15: if (SYN_CAP_PALMDETECT(priv->capabilities)) @@ -701,7 +701,6 @@ static const struct dmi_system_id __initconst toshiba_dmi_table[] = { DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"), DMI_MATCH(DMI_PRODUCT_NAME, "PORTEGE M300"), }, - }, { /* Toshiba Portege M300 */ @@ -710,9 +709,8 @@ static const struct dmi_system_id __initconst toshiba_dmi_table[] = { DMI_MATCH(DMI_PRODUCT_NAME, "Portable PC"), DMI_MATCH(DMI_PRODUCT_VERSION, "Version 1.0"), }, - }, - { } + { NULL, NULL, {DMI_MATCH(DMI_NONE, {0})}, NULL } #endif }; diff --git a/drivers/input/mousedev.c b/drivers/input/mousedev.c index f34b22b..04ac86f 100644 --- a/drivers/input/mousedev.c +++ b/drivers/input/mousedev.c @@ -754,7 +754,7 @@ static ssize_t mousedev_read(struct file *file, char __user *buffer, spin_unlock_irq(&client->packet_lock); - if (copy_to_user(buffer, data, count)) + if (count > sizeof(data) || copy_to_user(buffer, data, count)) return -EFAULT; return count; @@ -1051,7 +1051,7 @@ static struct input_handler mousedev_handler = { #ifdef CONFIG_INPUT_MOUSEDEV_PSAUX static struct miscdevice psaux_mouse = { - PSMOUSE_MINOR, "psaux", &mousedev_fops + PSMOUSE_MINOR, "psaux", &mousedev_fops, {NULL, NULL}, NULL, NULL }; static int psaux_registered; #endif diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h index 8c53926..5259b1e 100644 --- a/drivers/input/serio/i8042-x86ia64io.h +++ b/drivers/input/serio/i8042-x86ia64io.h @@ -183,7 +183,7 @@ static const struct dmi_system_id __initconst i8042_dmi_noloop_table[] = { DMI_MATCH(DMI_PRODUCT_VERSION, "Rev 1"), }, }, - { } + { NULL, NULL, {DMI_MATCH(DMI_NONE, {0})}, NULL } }; /* @@ -420,7 +420,7 @@ static const struct dmi_system_id __initconst i8042_dmi_nomux_table[] = { DMI_MATCH(DMI_PRODUCT_VERSION, "0100"), }, }, - { } + { NULL, NULL, {DMI_MATCH(DMI_NONE, {0})}, NULL } }; static const struct dmi_system_id __initconst i8042_dmi_reset_table[] = { @@ -494,7 +494,7 @@ static const struct dmi_system_id __initconst i8042_dmi_reset_table[] = { DMI_MATCH(DMI_PRODUCT_NAME, "Vostro 1720"), }, }, - { } + { NULL, NULL, {DMI_MATCH(DMI_NONE, {0})}, NULL } }; #ifdef CONFIG_PNP @@ -513,7 +513,7 @@ static const struct dmi_system_id __initconst i8042_dmi_nopnp_table[] = { DMI_MATCH(DMI_BOARD_VENDOR, "MICRO-STAR INTERNATIONAL CO., LTD"), }, }, - { } + { NULL, NULL, {DMI_MATCH(DMI_NONE, {0})}, NULL } }; static const struct dmi_system_id __initconst i8042_dmi_laptop_table[] = { @@ -537,7 +537,7 @@ static const struct dmi_system_id __initconst i8042_dmi_laptop_table[] = { DMI_MATCH(DMI_CHASSIS_TYPE, "14"), /* Sub-Notebook */ }, }, - { } + { NULL, NULL, {DMI_MATCH(DMI_NONE, {0})}, NULL } }; #endif @@ -611,7 +611,7 @@ static const struct dmi_system_id __initconst i8042_dmi_dritek_table[] = { DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate 4280"), }, }, - { } + { NULL, NULL, {DMI_MATCH(DMI_NONE, {0})}, NULL } }; #endif /* CONFIG_X86 */ diff --git a/drivers/input/serio/serio_raw.c b/drivers/input/serio/serio_raw.c index 9986648..08058a3 100644 --- a/drivers/input/serio/serio_raw.c +++ b/drivers/input/serio/serio_raw.c @@ -376,7 +376,7 @@ static struct serio_device_id serio_raw_serio_ids[] = { .id = SERIO_ANY, .extra = SERIO_ANY, }, - { 0 } + { 0, 0, 0, 0 } }; MODULE_DEVICE_TABLE(serio, serio_raw_serio_ids); diff --git a/drivers/isdn/gigaset/common.c b/drivers/isdn/gigaset/common.c index 5d4befb..260e186 100644 --- a/drivers/isdn/gigaset/common.c +++ b/drivers/isdn/gigaset/common.c @@ -723,7 +723,7 @@ struct cardstate *gigaset_initcs(struct gigaset_driver *drv, int channels, cs->commands_pending = 0; cs->cur_at_seq = 0; cs->gotfwver = -1; - cs->open_count = 0; + atomic_set(&cs->open_count, 0); cs->dev = NULL; cs->tty = NULL; cs->tty_dev = NULL; diff --git a/drivers/isdn/gigaset/gigaset.h b/drivers/isdn/gigaset/gigaset.h index 8738b08..0b5ebb9 100644 --- a/drivers/isdn/gigaset/gigaset.h +++ b/drivers/isdn/gigaset/gigaset.h @@ -442,7 +442,7 @@ struct cardstate { spinlock_t cmdlock; unsigned curlen, cmdbytes; - unsigned open_count; + atomic_t open_count; struct tty_struct *tty; struct tasklet_struct if_wake_tasklet; unsigned control_state; diff --git a/drivers/isdn/gigaset/interface.c b/drivers/isdn/gigaset/interface.c index c9f28dd..1fc34b8 100644 --- a/drivers/isdn/gigaset/interface.c +++ b/drivers/isdn/gigaset/interface.c @@ -160,9 +160,7 @@ static int if_open(struct tty_struct *tty, struct file *filp) return -ERESTARTSYS; tty->driver_data = cs; - ++cs->open_count; - - if (cs->open_count == 1) { + if (atomic_inc_return(&cs->open_count) == 1) { spin_lock_irqsave(&cs->lock, flags); cs->tty = tty; spin_unlock_irqrestore(&cs->lock, flags); @@ -190,10 +188,10 @@ static void if_close(struct tty_struct *tty, struct file *filp) if (!cs->connected) gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */ - else if (!cs->open_count) + else if (!atomic_read(&cs->open_count)) dev_warn(cs->dev, "%s: device not opened\n", __func__); else { - if (!--cs->open_count) { + if (!atomic_dec_return(&cs->open_count)) { spin_lock_irqsave(&cs->lock, flags); cs->tty = NULL; spin_unlock_irqrestore(&cs->lock, flags); @@ -228,7 +226,7 @@ static int if_ioctl(struct tty_struct *tty, struct file *file, if (!cs->connected) { gig_dbg(DEBUG_IF, "not connected"); retval = -ENODEV; - } else if (!cs->open_count) + } else if (!atomic_read(&cs->open_count)) dev_warn(cs->dev, "%s: device not opened\n", __func__); else { retval = 0; @@ -355,7 +353,7 @@ static int if_write(struct tty_struct *tty, const unsigned char *buf, int count) if (!cs->connected) { gig_dbg(DEBUG_IF, "not connected"); retval = -ENODEV; - } else if (!cs->open_count) + } else if (!atomic_read(&cs->open_count)) dev_warn(cs->dev, "%s: device not opened\n", __func__); else if (cs->mstate != MS_LOCKED) { dev_warn(cs->dev, "can't write to unlocked device\n"); @@ -389,7 +387,7 @@ static int if_write_room(struct tty_struct *tty) if (!cs->connected) { gig_dbg(DEBUG_IF, "not connected"); retval = -ENODEV; - } else if (!cs->open_count) + } else if (!atomic_read(&cs->open_count)) dev_warn(cs->dev, "%s: device not opened\n", __func__); else if (cs->mstate != MS_LOCKED) { dev_warn(cs->dev, "can't write to unlocked device\n"); @@ -419,7 +417,7 @@ static int if_chars_in_buffer(struct tty_struct *tty) if (!cs->connected) gig_dbg(DEBUG_IF, "not connected"); - else if (!cs->open_count) + else if (!atomic_read(&cs->open_count)) dev_warn(cs->dev, "%s: device not opened\n", __func__); else if (cs->mstate != MS_LOCKED) dev_warn(cs->dev, "can't write to unlocked device\n"); @@ -447,7 +445,7 @@ static void if_throttle(struct tty_struct *tty) if (!cs->connected) gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */ - else if (!cs->open_count) + else if (!atomic_read(&cs->open_count)) dev_warn(cs->dev, "%s: device not opened\n", __func__); else gig_dbg(DEBUG_IF, "%s: not implemented\n", __func__); @@ -471,7 +469,7 @@ static void if_unthrottle(struct tty_struct *tty) if (!cs->connected) gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */ - else if (!cs->open_count) + else if (!atomic_read(&cs->open_count)) dev_warn(cs->dev, "%s: device not opened\n", __func__); else gig_dbg(DEBUG_IF, "%s: not implemented\n", __func__); @@ -502,7 +500,7 @@ static void if_set_termios(struct tty_struct *tty, struct ktermios *old) goto out; } - if (!cs->open_count) { + if (!atomic_read(&cs->open_count)) { dev_warn(cs->dev, "%s: device not opened\n", __func__); goto out; } diff --git a/drivers/isdn/hardware/avm/b1.c b/drivers/isdn/hardware/avm/b1.c index 2a57da59..26d9859 100644 --- a/drivers/isdn/hardware/avm/b1.c +++ b/drivers/isdn/hardware/avm/b1.c @@ -176,7 +176,7 @@ int b1_load_t4file(avmcard *card, capiloaddatapart * t4file) } if (left) { if (t4file->user) { - if (copy_from_user(buf, dp, left)) + if (left > sizeof(buf) || copy_from_user(buf, dp, left)) return -EFAULT; } else { memcpy(buf, dp, left); @@ -224,7 +224,7 @@ int b1_load_config(avmcard *card, capiloaddatapart * config) } if (left) { if (config->user) { - if (copy_from_user(buf, dp, left)) + if (left > sizeof(buf) || copy_from_user(buf, dp, left)) return -EFAULT; } else { memcpy(buf, dp, left); diff --git a/drivers/isdn/icn/icn.c b/drivers/isdn/icn/icn.c index 2e847a9..16670ff 100644 --- a/drivers/isdn/icn/icn.c +++ b/drivers/isdn/icn/icn.c @@ -1045,7 +1045,7 @@ icn_writecmd(const u_char * buf, int len, int user, icn_card * card) if (count > len) count = len; if (user) { - if (copy_from_user(msg, buf, count)) + if (count > sizeof(msg) || copy_from_user(msg, buf, count)) return -EFAULT; } else memcpy(msg, buf, count); diff --git a/drivers/isdn/sc/interrupt.c b/drivers/isdn/sc/interrupt.c index 485be8b..f0225bc 100644 --- a/drivers/isdn/sc/interrupt.c +++ b/drivers/isdn/sc/interrupt.c @@ -112,11 +112,19 @@ irqreturn_t interrupt_handler(int dummy, void *card_inst) } else if(callid>=0x0000 && callid<=0x7FFF) { + int len; + pr_debug("%s: Got Incoming Call\n", sc_adapter[card]->devicename); - strcpy(setup.phone,&(rcvmsg.msg_data.byte_array[4])); - strcpy(setup.eazmsn, - sc_adapter[card]->channel[rcvmsg.phy_link_no-1].dn); + len = strlcpy(setup.phone, &(rcvmsg.msg_data.byte_array[4]), + sizeof(setup.phone)); + if (len >= sizeof(setup.phone)) + continue; + len = strlcpy(setup.eazmsn, + sc_adapter[card]->channel[rcvmsg.phy_link_no - 1].dn, + sizeof(setup.eazmsn)); + if (len >= sizeof(setup.eazmsn)) + continue; setup.si1 = 7; setup.si2 = 0; setup.plan = 0; @@ -176,7 +184,9 @@ irqreturn_t interrupt_handler(int dummy, void *card_inst) * Handle a GetMyNumber Rsp */ if (IS_CE_MESSAGE(rcvmsg,Call,0,GetMyNumber)){ - strcpy(sc_adapter[card]->channel[rcvmsg.phy_link_no-1].dn,rcvmsg.msg_data.byte_array); + strlcpy(sc_adapter[card]->channel[rcvmsg.phy_link_no - 1].dn, + rcvmsg.msg_data.byte_array, + sizeof(rcvmsg.msg_data.byte_array)); continue; } diff --git a/drivers/lguest/core.c b/drivers/lguest/core.c index efa2024..110df57 100644 --- a/drivers/lguest/core.c +++ b/drivers/lguest/core.c @@ -92,9 +92,17 @@ static __init int map_switcher(void) * it's worked so far. The end address needs +1 because __get_vm_area * allocates an extra guard page, so we need space for that. */ + +#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC) + switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE, + VM_ALLOC | VM_KERNEXEC, SWITCHER_ADDR, SWITCHER_ADDR + + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE); +#else switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE, VM_ALLOC, SWITCHER_ADDR, SWITCHER_ADDR + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE); +#endif + if (!switcher_vma) { err = -ENOMEM; printk("lguest: could not map switcher pages high\n"); diff --git a/drivers/macintosh/via-pmu-backlight.c b/drivers/macintosh/via-pmu-backlight.c index 1cec02f..ade1e65 100644 --- a/drivers/macintosh/via-pmu-backlight.c +++ b/drivers/macintosh/via-pmu-backlight.c @@ -15,7 +15,7 @@ #define MAX_PMU_LEVEL 0xFF -static struct backlight_ops pmu_backlight_data; +static const struct backlight_ops pmu_backlight_data; static DEFINE_SPINLOCK(pmu_backlight_lock); static int sleeping, uses_pmu_bl; static u8 bl_curve[FB_BACKLIGHT_LEVELS]; @@ -115,7 +115,7 @@ static int pmu_backlight_get_brightness(struct backlight_device *bd) return bd->props.brightness; } -static struct backlight_ops pmu_backlight_data = { +static const struct backlight_ops pmu_backlight_data = { .get_brightness = pmu_backlight_get_brightness, .update_status = pmu_backlight_update_status, diff --git a/drivers/macintosh/via-pmu.c b/drivers/macintosh/via-pmu.c index 3d4fc0f..352bbd6 100644 --- a/drivers/macintosh/via-pmu.c +++ b/drivers/macintosh/via-pmu.c @@ -2254,7 +2254,7 @@ static int pmu_sleep_valid(suspend_state_t state) && (pmac_call_feature(PMAC_FTR_SLEEP_STATE, NULL, 0, -1) >= 0); } -static struct platform_suspend_ops pmu_pm_ops = { +static const struct platform_suspend_ops pmu_pm_ops = { .enter = powerbook_sleep, .valid = pmu_sleep_valid, }; diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c index 1742435..014d6d8 100644 --- a/drivers/md/bitmap.c +++ b/drivers/md/bitmap.c @@ -58,7 +58,7 @@ # if DEBUG > 0 # define PRINTK(x...) printk(KERN_DEBUG x) # else -# define PRINTK(x...) +# define PRINTK(x...) do {} while (0) # endif #endif diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c index 9924ea2..498330c 100644 --- a/drivers/md/dm-table.c +++ b/drivers/md/dm-table.c @@ -363,7 +363,7 @@ static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev, if (!dev_size) return 0; - if ((start >= dev_size) || (start + len > dev_size)) { + if ((start >= dev_size) || (len > dev_size - start)) { DMWARN("%s: %s too small for target: " "start=%llu, len=%llu, dev_size=%llu", dm_device_name(ti->table->md), bdevname(bdev, b), diff --git a/drivers/md/md.c b/drivers/md/md.c index 7de96c2..d26e244 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -6352,7 +6352,7 @@ static int md_seq_show(struct seq_file *seq, void *v) chunk_kb ? "KB" : "B"); if (bitmap->file) { seq_printf(seq, ", file: "); - seq_path(seq, &bitmap->file->f_path, " \t\n"); + seq_path(seq, &bitmap->file->f_path, " \t\n\\"); } seq_printf(seq, "\n"); @@ -6446,7 +6446,7 @@ static int is_mddev_idle(mddev_t *mddev, int init) struct gendisk *disk = rdev->bdev->bd_contains->bd_disk; curr_events = (int)part_stat_read(&disk->part0, sectors[0]) + (int)part_stat_read(&disk->part0, sectors[1]) - - atomic_read(&disk->sync_io); + atomic_read_unchecked(&disk->sync_io); /* sync IO will cause sync_io to increase before the disk_stats * as sync_io is counted when a request starts, and * disk_stats is counted when it completes. diff --git a/drivers/md/md.h b/drivers/md/md.h index 9ec208e..63622a4 100644 --- a/drivers/md/md.h +++ b/drivers/md/md.h @@ -334,7 +334,7 @@ static inline void rdev_dec_pending(mdk_rdev_t *rdev, mddev_t *mddev) static inline void md_sync_acct(struct block_device *bdev, unsigned long nr_sectors) { - atomic_add(nr_sectors, &bdev->bd_contains->bd_disk->sync_io); + atomic_add_unchecked(nr_sectors, &bdev->bd_contains->bd_disk->sync_io); } struct mdk_personality diff --git a/drivers/media/dvb/dvb-core/dvbdev.c b/drivers/media/dvb/dvb-core/dvbdev.c index b915c39..2fad9b3 100644 --- a/drivers/media/dvb/dvb-core/dvbdev.c +++ b/drivers/media/dvb/dvb-core/dvbdev.c @@ -196,6 +196,7 @@ int dvb_register_device(struct dvb_adapter *adap, struct dvb_device **pdvbdev, const struct dvb_device *template, void *priv, int type) { struct dvb_device *dvbdev; + /* cannot be const, see this function */ struct file_operations *dvbdevfops; struct device *clsdev; int minor; diff --git a/drivers/media/radio/radio-cadet.c b/drivers/media/radio/radio-cadet.c index 482d0f3..48cb07a 100644 --- a/drivers/media/radio/radio-cadet.c +++ b/drivers/media/radio/radio-cadet.c @@ -347,7 +347,7 @@ static ssize_t cadet_read(struct file *file, char __user *data, size_t count, lo while (i < count && dev->rdsin != dev->rdsout) readbuf[i++] = dev->rdsbuf[dev->rdsout++]; - if (copy_to_user(data, readbuf, i)) + if (i > sizeof(readbuf) || copy_to_user(data, readbuf, i)) return -EFAULT; return i; } diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c index a6a5701..89144e1 100644 --- a/drivers/message/fusion/mptbase.c +++ b/drivers/message/fusion/mptbase.c @@ -6715,8 +6715,14 @@ procmpt_iocinfo_read(char *buf, char **start, off_t offset, int request, int *eo len += sprintf(buf+len, " MaxChainDepth = 0x%02x frames\n", ioc->facts.MaxChainDepth); len += sprintf(buf+len, " MinBlockSize = 0x%02x bytes\n", 4*ioc->facts.BlockSize); +#ifdef CONFIG_GRKERNSEC_HIDESYM + len += sprintf(buf+len, " RequestFrames @ 0x%p (Dma @ 0x%p)\n", + NULL, NULL); +#else len += sprintf(buf+len, " RequestFrames @ 0x%p (Dma @ 0x%p)\n", (void *)ioc->req_frames, (void *)(ulong)ioc->req_frames_dma); +#endif + /* * Rounding UP to nearest 4-kB boundary here... */ diff --git a/drivers/message/fusion/mptdebug.h b/drivers/message/fusion/mptdebug.h index 28e4788..d701a42 100644 --- a/drivers/message/fusion/mptdebug.h +++ b/drivers/message/fusion/mptdebug.h @@ -71,7 +71,7 @@ CMD; \ } #else -#define MPT_CHECK_LOGGING(IOC, CMD, BITS) +#define MPT_CHECK_LOGGING(IOC, CMD, BITS) do {} while (0) #endif diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c index ac000e8..36b2d80 100644 --- a/drivers/message/fusion/mptsas.c +++ b/drivers/message/fusion/mptsas.c @@ -437,6 +437,23 @@ mptsas_is_end_device(struct mptsas_devinfo * attached) return 0; } +static inline void +mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy) +{ + if (phy_info->port_details) { + phy_info->port_details->rphy = rphy; + dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n", + ioc->name, rphy)); + } + + if (rphy) { + dsaswideprintk(ioc, dev_printk(KERN_DEBUG, + &rphy->dev, MYIOC_s_FMT "add:", ioc->name)); + dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n", + ioc->name, rphy, rphy->dev.release)); + } +} + /* no mutex */ static void mptsas_port_delete(MPT_ADAPTER *ioc, struct mptsas_portinfo_details * port_details) @@ -475,23 +492,6 @@ mptsas_get_rphy(struct mptsas_phyinfo *phy_info) return NULL; } -static inline void -mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy) -{ - if (phy_info->port_details) { - phy_info->port_details->rphy = rphy; - dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n", - ioc->name, rphy)); - } - - if (rphy) { - dsaswideprintk(ioc, dev_printk(KERN_DEBUG, - &rphy->dev, MYIOC_s_FMT "add:", ioc->name)); - dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n", - ioc->name, rphy, rphy->dev.release)); - } -} - static inline struct sas_port * mptsas_get_port(struct mptsas_phyinfo *phy_info) { diff --git a/drivers/message/fusion/mptscsih.c b/drivers/message/fusion/mptscsih.c index 407cb84..92f14d2 100644 --- a/drivers/message/fusion/mptscsih.c +++ b/drivers/message/fusion/mptscsih.c @@ -1244,15 +1244,16 @@ mptscsih_info(struct Scsi_Host *SChost) h = shost_priv(SChost); - if (h) { - if (h->info_kbuf == NULL) - if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL) - return h->info_kbuf; - h->info_kbuf[0] = '\0'; + if (!h) + return NULL; - mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0); - h->info_kbuf[size-1] = '\0'; - } + if (h->info_kbuf == NULL) + if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL) + return h->info_kbuf; + h->info_kbuf[0] = '\0'; + + mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0); + h->info_kbuf[size-1] = '\0'; return h->info_kbuf; } diff --git a/drivers/message/i2o/i2o_proc.c b/drivers/message/i2o/i2o_proc.c index 07dbeaf..5533142 100644 --- a/drivers/message/i2o/i2o_proc.c +++ b/drivers/message/i2o/i2o_proc.c @@ -255,13 +255,6 @@ static char *scsi_devices[] = { "Array Controller Device" }; -static char *chtostr(u8 * chars, int n) -{ - char tmp[256]; - tmp[0] = 0; - return strncat(tmp, (char *)chars, n); -} - static int i2o_report_query_status(struct seq_file *seq, int block_status, char *group) { @@ -838,8 +831,7 @@ static int i2o_seq_show_ddm_table(struct seq_file *seq, void *v) seq_printf(seq, "%-#7x", ddm_table.i2o_vendor_id); seq_printf(seq, "%-#8x", ddm_table.module_id); - seq_printf(seq, "%-29s", - chtostr(ddm_table.module_name_version, 28)); + seq_printf(seq, "%-.28s", ddm_table.module_name_version); seq_printf(seq, "%9d ", ddm_table.data_size); seq_printf(seq, "%8d", ddm_table.code_size); @@ -940,8 +932,8 @@ static int i2o_seq_show_drivers_stored(struct seq_file *seq, void *v) seq_printf(seq, "%-#7x", dst->i2o_vendor_id); seq_printf(seq, "%-#8x", dst->module_id); - seq_printf(seq, "%-29s", chtostr(dst->module_name_version, 28)); - seq_printf(seq, "%-9s", chtostr(dst->date, 8)); + seq_printf(seq, "%-.28s", dst->module_name_version); + seq_printf(seq, "%-.8s", dst->date); seq_printf(seq, "%8d ", dst->module_size); seq_printf(seq, "%8d ", dst->mpb_size); seq_printf(seq, "0x%04x", dst->module_flags); @@ -1272,14 +1264,10 @@ static int i2o_seq_show_dev_identity(struct seq_file *seq, void *v) seq_printf(seq, "Device Class : %s\n", i2o_get_class_name(work16[0])); seq_printf(seq, "Owner TID : %0#5x\n", work16[2]); seq_printf(seq, "Parent TID : %0#5x\n", work16[3]); - seq_printf(seq, "Vendor info : %s\n", - chtostr((u8 *) (work32 + 2), 16)); - seq_printf(seq, "Product info : %s\n", - chtostr((u8 *) (work32 + 6), 16)); - seq_printf(seq, "Description : %s\n", - chtostr((u8 *) (work32 + 10), 16)); - seq_printf(seq, "Product rev. : %s\n", - chtostr((u8 *) (work32 + 14), 8)); + seq_printf(seq, "Vendor info : %.16s\n", (u8 *) (work32 + 2)); + seq_printf(seq, "Product info : %.16s\n", (u8 *) (work32 + 6)); + seq_printf(seq, "Description : %.16s\n", (u8 *) (work32 + 10)); + seq_printf(seq, "Product rev. : %.8s\n", (u8 *) (work32 + 14)); seq_printf(seq, "Serial number : "); print_serial_number(seq, (u8 *) (work32 + 16), @@ -1324,10 +1312,8 @@ static int i2o_seq_show_ddm_identity(struct seq_file *seq, void *v) } seq_printf(seq, "Registering DDM TID : 0x%03x\n", result.ddm_tid); - seq_printf(seq, "Module name : %s\n", - chtostr(result.module_name, 24)); - seq_printf(seq, "Module revision : %s\n", - chtostr(result.module_rev, 8)); + seq_printf(seq, "Module name : %.24s\n", result.module_name); + seq_printf(seq, "Module revision : %.8s\n", result.module_rev); seq_printf(seq, "Serial number : "); print_serial_number(seq, result.serial_number, sizeof(result) - 36); @@ -1358,14 +1344,10 @@ static int i2o_seq_show_uinfo(struct seq_file *seq, void *v) return 0; } - seq_printf(seq, "Device name : %s\n", - chtostr(result.device_name, 64)); - seq_printf(seq, "Service name : %s\n", - chtostr(result.service_name, 64)); - seq_printf(seq, "Physical name : %s\n", - chtostr(result.physical_location, 64)); - seq_printf(seq, "Instance number : %s\n", - chtostr(result.instance_number, 4)); + seq_printf(seq, "Device name : %.64s\n", result.device_name); + seq_printf(seq, "Service name : %.64s\n", result.service_name); + seq_printf(seq, "Physical name : %.64s\n", result.physical_location); + seq_printf(seq, "Instance number : %.4s\n", result.instance_number); return 0; } diff --git a/drivers/mfd/janz-cmodio.c b/drivers/mfd/janz-cmodio.c index 9ed6307..1b31df6 100644 --- a/drivers/mfd/janz-cmodio.c +++ b/drivers/mfd/janz-cmodio.c @@ -13,6 +13,7 @@ #include #include +#include #include #include #include diff --git a/drivers/misc/kgdbts.c b/drivers/misc/kgdbts.c index 7245023..8e1c8b6 100644 --- a/drivers/misc/kgdbts.c +++ b/drivers/misc/kgdbts.c @@ -118,7 +118,7 @@ } while (0) #define MAX_CONFIG_LEN 40 -static struct kgdb_io kgdbts_io_ops; +static const struct kgdb_io kgdbts_io_ops; static char get_buf[BUFMAX]; static int get_buf_cnt; static char put_buf[BUFMAX]; @@ -1114,7 +1114,7 @@ static void kgdbts_post_exp_handler(void) module_put(THIS_MODULE); } -static struct kgdb_io kgdbts_io_ops = { +static const struct kgdb_io kgdbts_io_ops = { .name = "kgdbts", .read_char = kgdbts_get_char, .write_char = kgdbts_put_char, diff --git a/drivers/misc/sgi-gru/gruhandles.c b/drivers/misc/sgi-gru/gruhandles.c index 2f30bad..c4c13d0 100644 --- a/drivers/misc/sgi-gru/gruhandles.c +++ b/drivers/misc/sgi-gru/gruhandles.c @@ -44,8 +44,8 @@ static void update_mcs_stats(enum mcs_op op, unsigned long clks) unsigned long nsec; nsec = CLKS2NSEC(clks); - atomic_long_inc(&mcs_op_statistics[op].count); - atomic_long_add(nsec, &mcs_op_statistics[op].total); + atomic_long_inc_unchecked(&mcs_op_statistics[op].count); + atomic_long_add_unchecked(nsec, &mcs_op_statistics[op].total); if (mcs_op_statistics[op].max < nsec) mcs_op_statistics[op].max = nsec; } diff --git a/drivers/misc/sgi-gru/gruprocfs.c b/drivers/misc/sgi-gru/gruprocfs.c index 7768b87..f8aac38 100644 --- a/drivers/misc/sgi-gru/gruprocfs.c +++ b/drivers/misc/sgi-gru/gruprocfs.c @@ -32,9 +32,9 @@ #define printstat(s, f) printstat_val(s, &gru_stats.f, #f) -static void printstat_val(struct seq_file *s, atomic_long_t *v, char *id) +static void printstat_val(struct seq_file *s, atomic_long_unchecked_t *v, char *id) { - unsigned long val = atomic_long_read(v); + unsigned long val = atomic_long_read_unchecked(v); seq_printf(s, "%16lu %s\n", val, id); } @@ -134,8 +134,8 @@ static int mcs_statistics_show(struct seq_file *s, void *p) seq_printf(s, "%-20s%12s%12s%12s\n", "#id", "count", "aver-clks", "max-clks"); for (op = 0; op < mcsop_last; op++) { - count = atomic_long_read(&mcs_op_statistics[op].count); - total = atomic_long_read(&mcs_op_statistics[op].total); + count = atomic_long_read_unchecked(&mcs_op_statistics[op].count); + total = atomic_long_read_unchecked(&mcs_op_statistics[op].total); max = mcs_op_statistics[op].max; seq_printf(s, "%-20s%12ld%12ld%12ld\n", id[op], count, count ? total / count : 0, max); diff --git a/drivers/misc/sgi-gru/grutables.h b/drivers/misc/sgi-gru/grutables.h index 7a8b906..8f7d44e 100644 --- a/drivers/misc/sgi-gru/grutables.h +++ b/drivers/misc/sgi-gru/grutables.h @@ -167,82 +167,82 @@ extern unsigned int gru_max_gids; * GRU statistics. */ struct gru_stats_s { - atomic_long_t vdata_alloc; - atomic_long_t vdata_free; - atomic_long_t gts_alloc; - atomic_long_t gts_free; - atomic_long_t gms_alloc; - atomic_long_t gms_free; - atomic_long_t gts_double_allocate; - atomic_long_t assign_context; - atomic_long_t assign_context_failed; - atomic_long_t free_context; - atomic_long_t load_user_context; - atomic_long_t load_kernel_context; - atomic_long_t lock_kernel_context; - atomic_long_t unlock_kernel_context; - atomic_long_t steal_user_context; - atomic_long_t steal_kernel_context; - atomic_long_t steal_context_failed; - atomic_long_t nopfn; - atomic_long_t asid_new; - atomic_long_t asid_next; - atomic_long_t asid_wrap; - atomic_long_t asid_reuse; - atomic_long_t intr; - atomic_long_t intr_cbr; - atomic_long_t intr_tfh; - atomic_long_t intr_spurious; - atomic_long_t intr_mm_lock_failed; - atomic_long_t call_os; - atomic_long_t call_os_wait_queue; - atomic_long_t user_flush_tlb; - atomic_long_t user_unload_context; - atomic_long_t user_exception; - atomic_long_t set_context_option; - atomic_long_t check_context_retarget_intr; - atomic_long_t check_context_unload; - atomic_long_t tlb_dropin; - atomic_long_t tlb_preload_page; - atomic_long_t tlb_dropin_fail_no_asid; - atomic_long_t tlb_dropin_fail_upm; - atomic_long_t tlb_dropin_fail_invalid; - atomic_long_t tlb_dropin_fail_range_active; - atomic_long_t tlb_dropin_fail_idle; - atomic_long_t tlb_dropin_fail_fmm; - atomic_long_t tlb_dropin_fail_no_exception; - atomic_long_t tfh_stale_on_fault; - atomic_long_t mmu_invalidate_range; - atomic_long_t mmu_invalidate_page; - atomic_long_t flush_tlb; - atomic_long_t flush_tlb_gru; - atomic_long_t flush_tlb_gru_tgh; - atomic_long_t flush_tlb_gru_zero_asid; - - atomic_long_t copy_gpa; - atomic_long_t read_gpa; - - atomic_long_t mesq_receive; - atomic_long_t mesq_receive_none; - atomic_long_t mesq_send; - atomic_long_t mesq_send_failed; - atomic_long_t mesq_noop; - atomic_long_t mesq_send_unexpected_error; - atomic_long_t mesq_send_lb_overflow; - atomic_long_t mesq_send_qlimit_reached; - atomic_long_t mesq_send_amo_nacked; - atomic_long_t mesq_send_put_nacked; - atomic_long_t mesq_page_overflow; - atomic_long_t mesq_qf_locked; - atomic_long_t mesq_qf_noop_not_full; - atomic_long_t mesq_qf_switch_head_failed; - atomic_long_t mesq_qf_unexpected_error; - atomic_long_t mesq_noop_unexpected_error; - atomic_long_t mesq_noop_lb_overflow; - atomic_long_t mesq_noop_qlimit_reached; - atomic_long_t mesq_noop_amo_nacked; - atomic_long_t mesq_noop_put_nacked; - atomic_long_t mesq_noop_page_overflow; + atomic_long_unchecked_t vdata_alloc; + atomic_long_unchecked_t vdata_free; + atomic_long_unchecked_t gts_alloc; + atomic_long_unchecked_t gts_free; + atomic_long_unchecked_t gms_alloc; + atomic_long_unchecked_t gms_free; + atomic_long_unchecked_t gts_double_allocate; + atomic_long_unchecked_t assign_context; + atomic_long_unchecked_t assign_context_failed; + atomic_long_unchecked_t free_context; + atomic_long_unchecked_t load_user_context; + atomic_long_unchecked_t load_kernel_context; + atomic_long_unchecked_t lock_kernel_context; + atomic_long_unchecked_t unlock_kernel_context; + atomic_long_unchecked_t steal_user_context; + atomic_long_unchecked_t steal_kernel_context; + atomic_long_unchecked_t steal_context_failed; + atomic_long_unchecked_t nopfn; + atomic_long_unchecked_t asid_new; + atomic_long_unchecked_t asid_next; + atomic_long_unchecked_t asid_wrap; + atomic_long_unchecked_t asid_reuse; + atomic_long_unchecked_t intr; + atomic_long_unchecked_t intr_cbr; + atomic_long_unchecked_t intr_tfh; + atomic_long_unchecked_t intr_spurious; + atomic_long_unchecked_t intr_mm_lock_failed; + atomic_long_unchecked_t call_os; + atomic_long_unchecked_t call_os_wait_queue; + atomic_long_unchecked_t user_flush_tlb; + atomic_long_unchecked_t user_unload_context; + atomic_long_unchecked_t user_exception; + atomic_long_unchecked_t set_context_option; + atomic_long_unchecked_t check_context_retarget_intr; + atomic_long_unchecked_t check_context_unload; + atomic_long_unchecked_t tlb_dropin; + atomic_long_unchecked_t tlb_preload_page; + atomic_long_unchecked_t tlb_dropin_fail_no_asid; + atomic_long_unchecked_t tlb_dropin_fail_upm; + atomic_long_unchecked_t tlb_dropin_fail_invalid; + atomic_long_unchecked_t tlb_dropin_fail_range_active; + atomic_long_unchecked_t tlb_dropin_fail_idle; + atomic_long_unchecked_t tlb_dropin_fail_fmm; + atomic_long_unchecked_t tlb_dropin_fail_no_exception; + atomic_long_unchecked_t tfh_stale_on_fault; + atomic_long_unchecked_t mmu_invalidate_range; + atomic_long_unchecked_t mmu_invalidate_page; + atomic_long_unchecked_t flush_tlb; + atomic_long_unchecked_t flush_tlb_gru; + atomic_long_unchecked_t flush_tlb_gru_tgh; + atomic_long_unchecked_t flush_tlb_gru_zero_asid; + + atomic_long_unchecked_t copy_gpa; + atomic_long_unchecked_t read_gpa; + + atomic_long_unchecked_t mesq_receive; + atomic_long_unchecked_t mesq_receive_none; + atomic_long_unchecked_t mesq_send; + atomic_long_unchecked_t mesq_send_failed; + atomic_long_unchecked_t mesq_noop; + atomic_long_unchecked_t mesq_send_unexpected_error; + atomic_long_unchecked_t mesq_send_lb_overflow; + atomic_long_unchecked_t mesq_send_qlimit_reached; + atomic_long_unchecked_t mesq_send_amo_nacked; + atomic_long_unchecked_t mesq_send_put_nacked; + atomic_long_unchecked_t mesq_page_overflow; + atomic_long_unchecked_t mesq_qf_locked; + atomic_long_unchecked_t mesq_qf_noop_not_full; + atomic_long_unchecked_t mesq_qf_switch_head_failed; + atomic_long_unchecked_t mesq_qf_unexpected_error; + atomic_long_unchecked_t mesq_noop_unexpected_error; + atomic_long_unchecked_t mesq_noop_lb_overflow; + atomic_long_unchecked_t mesq_noop_qlimit_reached; + atomic_long_unchecked_t mesq_noop_amo_nacked; + atomic_long_unchecked_t mesq_noop_put_nacked; + atomic_long_unchecked_t mesq_noop_page_overflow; }; @@ -251,8 +251,8 @@ enum mcs_op {cchop_allocate, cchop_start, cchop_interrupt, cchop_interrupt_sync, tghop_invalidate, mcsop_last}; struct mcs_op_statistic { - atomic_long_t count; - atomic_long_t total; + atomic_long_unchecked_t count; + atomic_long_unchecked_t total; unsigned long max; }; @@ -275,7 +275,7 @@ extern struct mcs_op_statistic mcs_op_statistics[mcsop_last]; #define STAT(id) do { \ if (gru_options & OPT_STATS) \ - atomic_long_inc(&gru_stats.id); \ + atomic_long_inc_unchecked(&gru_stats.id); \ } while (0) #ifdef CONFIG_SGI_GRU_DEBUG diff --git a/drivers/mtd/devices/doc2000.c b/drivers/mtd/devices/doc2000.c index 5bf5f46..c5de373 100644 --- a/drivers/mtd/devices/doc2000.c +++ b/drivers/mtd/devices/doc2000.c @@ -776,7 +776,7 @@ static int doc_write(struct mtd_info *mtd, loff_t to, size_t len, /* The ECC will not be calculated correctly if less than 512 is written */ /* DBB- - if (len != 0x200 && eccbuf) + if (len != 0x200) printk(KERN_WARNING "ECC needs a full sector write (adr: %lx size %lx)\n", (long) to, (long) len); diff --git a/drivers/mtd/devices/doc2001.c b/drivers/mtd/devices/doc2001.c index 0990f78..bb4e8a4 100644 --- a/drivers/mtd/devices/doc2001.c +++ b/drivers/mtd/devices/doc2001.c @@ -393,7 +393,7 @@ static int doc_read (struct mtd_info *mtd, loff_t from, size_t len, struct Nand *mychip = &this->chips[from >> (this->chipshift)]; /* Don't allow read past end of device */ - if (from >= this->totlen) + if (from >= this->totlen || !len) return -EINVAL; /* Don't allow a single read to cross a 512-byte block boundary */ diff --git a/drivers/mtd/nand/denali.c b/drivers/mtd/nand/denali.c index ca03428..41f6395 100644 --- a/drivers/mtd/nand/denali.c +++ b/drivers/mtd/nand/denali.c @@ -24,6 +24,7 @@ #include #include #include +#include #include "denali.h" diff --git a/drivers/mtd/ubi/build.c b/drivers/mtd/ubi/build.c index 13b05cb..dde1a57 100644 --- a/drivers/mtd/ubi/build.c +++ b/drivers/mtd/ubi/build.c @@ -1282,7 +1282,7 @@ module_exit(ubi_exit); static int __init bytes_str_to_int(const char *str) { char *endp; - unsigned long result; + unsigned long result, scale = 1; result = simple_strtoul(str, &endp, 0); if (str == endp || result >= INT_MAX) { @@ -1293,11 +1293,11 @@ static int __init bytes_str_to_int(const char *str) switch (*endp) { case 'G': - result *= 1024; + scale *= 1024; case 'M': - result *= 1024; + scale *= 1024; case 'K': - result *= 1024; + scale *= 1024; if (endp[1] == 'i' && endp[2] == 'B') endp += 2; case '\0': @@ -1308,7 +1308,13 @@ static int __init bytes_str_to_int(const char *str) return -EINVAL; } - return result; + if ((intoverflow_t)result*scale >= INT_MAX) { + printk(KERN_ERR "UBI error: incorrect bytes count: \"%s\"\n", + str); + return -EINVAL; + } + + return result*scale; } /** diff --git a/drivers/net/cxgb3/cxgb3_main.c b/drivers/net/cxgb3/cxgb3_main.c index 3e0f19f..02612a8 100644 --- a/drivers/net/cxgb3/cxgb3_main.c +++ b/drivers/net/cxgb3/cxgb3_main.c @@ -2296,7 +2296,7 @@ static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr) case CHELSIO_GET_QSET_NUM:{ struct ch_reg edata; - memset(&edata, 0, sizeof(struct ch_reg)); + memset(&edata, 0, sizeof(edata)); edata.cmd = CHELSIO_GET_QSET_NUM; edata.val = pi->nqsets; diff --git a/drivers/net/e1000e/82571.c b/drivers/net/e1000e/82571.c index d206f21..979c799 100644 --- a/drivers/net/e1000e/82571.c +++ b/drivers/net/e1000e/82571.c @@ -207,6 +207,7 @@ static s32 e1000_init_mac_params_82571(struct e1000_adapter *adapter) { struct e1000_hw *hw = &adapter->hw; struct e1000_mac_info *mac = &hw->mac; + /* cannot be const */ struct e1000_mac_operations *func = &mac->ops; u32 swsm = 0; u32 swsm2 = 0; @@ -1703,7 +1704,7 @@ static void e1000_clear_hw_cntrs_82571(struct e1000_hw *hw) er32(ICRXDMTC); } -static struct e1000_mac_operations e82571_mac_ops = { +static const struct e1000_mac_operations e82571_mac_ops = { /* .check_mng_mode: mac type dependent */ /* .check_for_link: media type dependent */ .id_led_init = e1000e_id_led_init, @@ -1725,7 +1726,7 @@ static struct e1000_mac_operations e82571_mac_ops = { .read_mac_addr = e1000_read_mac_addr_82571, }; -static struct e1000_phy_operations e82_phy_ops_igp = { +static const struct e1000_phy_operations e82_phy_ops_igp = { .acquire = e1000_get_hw_semaphore_82571, .check_polarity = e1000_check_polarity_igp, .check_reset_block = e1000e_check_reset_block_generic, @@ -1743,7 +1744,7 @@ static struct e1000_phy_operations e82_phy_ops_igp = { .cfg_on_link_up = NULL, }; -static struct e1000_phy_operations e82_phy_ops_m88 = { +static const struct e1000_phy_operations e82_phy_ops_m88 = { .acquire = e1000_get_hw_semaphore_82571, .check_polarity = e1000_check_polarity_m88, .check_reset_block = e1000e_check_reset_block_generic, @@ -1761,7 +1762,7 @@ static struct e1000_phy_operations e82_phy_ops_m88 = { .cfg_on_link_up = NULL, }; -static struct e1000_phy_operations e82_phy_ops_bm = { +static const struct e1000_phy_operations e82_phy_ops_bm = { .acquire = e1000_get_hw_semaphore_82571, .check_polarity = e1000_check_polarity_m88, .check_reset_block = e1000e_check_reset_block_generic, @@ -1779,7 +1780,7 @@ static struct e1000_phy_operations e82_phy_ops_bm = { .cfg_on_link_up = NULL, }; -static struct e1000_nvm_operations e82571_nvm_ops = { +static const struct e1000_nvm_operations e82571_nvm_ops = { .acquire = e1000_acquire_nvm_82571, .read = e1000e_read_nvm_eerd, .release = e1000_release_nvm_82571, diff --git a/drivers/net/e1000e/e1000.h b/drivers/net/e1000e/e1000.h index c0b3db4..2fa61ce 100644 --- a/drivers/net/e1000e/e1000.h +++ b/drivers/net/e1000e/e1000.h @@ -377,9 +377,9 @@ struct e1000_info { u32 pba; u32 max_hw_frame_size; s32 (*get_variants)(struct e1000_adapter *); - struct e1000_mac_operations *mac_ops; - struct e1000_phy_operations *phy_ops; - struct e1000_nvm_operations *nvm_ops; + const struct e1000_mac_operations *mac_ops; + const struct e1000_phy_operations *phy_ops; + const struct e1000_nvm_operations *nvm_ops; }; /* hardware capability, feature, and workaround flags */ diff --git a/drivers/net/e1000e/es2lan.c b/drivers/net/e1000e/es2lan.c index 38d79a6..300d5df 100644 --- a/drivers/net/e1000e/es2lan.c +++ b/drivers/net/e1000e/es2lan.c @@ -205,6 +205,7 @@ static s32 e1000_init_mac_params_80003es2lan(struct e1000_adapter *adapter) { struct e1000_hw *hw = &adapter->hw; struct e1000_mac_info *mac = &hw->mac; + /* cannot be const */ struct e1000_mac_operations *func = &mac->ops; /* Set media type */ @@ -1431,7 +1432,7 @@ static void e1000_clear_hw_cntrs_80003es2lan(struct e1000_hw *hw) er32(ICRXDMTC); } -static struct e1000_mac_operations es2_mac_ops = { +static const struct e1000_mac_operations es2_mac_ops = { .read_mac_addr = e1000_read_mac_addr_80003es2lan, .id_led_init = e1000e_id_led_init, .check_mng_mode = e1000e_check_mng_mode_generic, @@ -1453,7 +1454,7 @@ static struct e1000_mac_operations es2_mac_ops = { .setup_led = e1000e_setup_led_generic, }; -static struct e1000_phy_operations es2_phy_ops = { +static const struct e1000_phy_operations es2_phy_ops = { .acquire = e1000_acquire_phy_80003es2lan, .check_polarity = e1000_check_polarity_m88, .check_reset_block = e1000e_check_reset_block_generic, @@ -1471,7 +1472,7 @@ static struct e1000_phy_operations es2_phy_ops = { .cfg_on_link_up = e1000_cfg_on_link_up_80003es2lan, }; -static struct e1000_nvm_operations es2_nvm_ops = { +static const struct e1000_nvm_operations es2_nvm_ops = { .acquire = e1000_acquire_nvm_80003es2lan, .read = e1000e_read_nvm_eerd, .release = e1000_release_nvm_80003es2lan, diff --git a/drivers/net/e1000e/hw.h b/drivers/net/e1000e/hw.h index 664ed58..23bac4f 100644 --- a/drivers/net/e1000e/hw.h +++ b/drivers/net/e1000e/hw.h @@ -791,13 +791,13 @@ struct e1000_phy_operations { /* Function pointers for the NVM. */ struct e1000_nvm_operations { - s32 (*acquire)(struct e1000_hw *); - s32 (*read)(struct e1000_hw *, u16, u16, u16 *); - void (*release)(struct e1000_hw *); - s32 (*update)(struct e1000_hw *); - s32 (*valid_led_default)(struct e1000_hw *, u16 *); - s32 (*validate)(struct e1000_hw *); - s32 (*write)(struct e1000_hw *, u16, u16, u16 *); + s32 (* const acquire)(struct e1000_hw *); + s32 (* const read)(struct e1000_hw *, u16, u16, u16 *); + void (* const release)(struct e1000_hw *); + s32 (* const update)(struct e1000_hw *); + s32 (* const valid_led_default)(struct e1000_hw *, u16 *); + s32 (* const validate)(struct e1000_hw *); + s32 (* const write)(struct e1000_hw *, u16, u16, u16 *); }; struct e1000_mac_info { @@ -877,6 +877,7 @@ struct e1000_phy_info { }; struct e1000_nvm_info { + /* cannot be const */ struct e1000_nvm_operations ops; enum e1000_nvm_type type; diff --git a/drivers/net/e1000e/ich8lan.c b/drivers/net/e1000e/ich8lan.c index b2507d9..93765c8 100644 --- a/drivers/net/e1000e/ich8lan.c +++ b/drivers/net/e1000e/ich8lan.c @@ -3388,7 +3388,7 @@ static void e1000_clear_hw_cntrs_ich8lan(struct e1000_hw *hw) } } -static struct e1000_mac_operations ich8_mac_ops = { +static const struct e1000_mac_operations ich8_mac_ops = { .id_led_init = e1000e_id_led_init, .check_mng_mode = e1000_check_mng_mode_ich8lan, .check_for_link = e1000_check_for_copper_link_ich8lan, @@ -3407,7 +3407,7 @@ static struct e1000_mac_operations ich8_mac_ops = { /* id_led_init dependent on mac type */ }; -static struct e1000_phy_operations ich8_phy_ops = { +static const struct e1000_phy_operations ich8_phy_ops = { .acquire = e1000_acquire_swflag_ich8lan, .check_reset_block = e1000_check_reset_block_ich8lan, .commit = NULL, @@ -3421,7 +3421,7 @@ static struct e1000_phy_operations ich8_phy_ops = { .write_reg = e1000e_write_phy_reg_igp, }; -static struct e1000_nvm_operations ich8_nvm_ops = { +static const struct e1000_nvm_operations ich8_nvm_ops = { .acquire = e1000_acquire_nvm_ich8lan, .read = e1000_read_nvm_ich8lan, .release = e1000_release_nvm_ich8lan, diff --git a/drivers/net/eql.c b/drivers/net/eql.c index 0cb1cf9..02efc44 100644 --- a/drivers/net/eql.c +++ b/drivers/net/eql.c @@ -555,7 +555,7 @@ static int eql_g_master_cfg(struct net_device *dev, master_config_t __user *mcp) equalizer_t *eql; master_config_t mc; - memset(&mc, 0, sizeof(master_config_t)); + memset(&mc, 0, sizeof(mc)); if (eql_is_master(dev)) { eql = netdev_priv(dev); diff --git a/drivers/net/igb/e1000_82575.c b/drivers/net/igb/e1000_82575.c index 86438b5..82ec00c 100644 --- a/drivers/net/igb/e1000_82575.c +++ b/drivers/net/igb/e1000_82575.c @@ -1597,7 +1597,7 @@ u16 igb_rxpbs_adjust_82580(u32 data) return ret_val; } -static struct e1000_mac_operations e1000_mac_ops_82575 = { +static const struct e1000_mac_operations e1000_mac_ops_82575 = { .init_hw = igb_init_hw_82575, .check_for_link = igb_check_for_link_82575, .rar_set = igb_rar_set, @@ -1605,13 +1605,13 @@ static struct e1000_mac_operations e1000_mac_ops_82575 = { .get_speed_and_duplex = igb_get_speed_and_duplex_copper, }; -static struct e1000_phy_operations e1000_phy_ops_82575 = { +static const struct e1000_phy_operations e1000_phy_ops_82575 = { .acquire = igb_acquire_phy_82575, .get_cfg_done = igb_get_cfg_done_82575, .release = igb_release_phy_82575, }; -static struct e1000_nvm_operations e1000_nvm_ops_82575 = { +static const struct e1000_nvm_operations e1000_nvm_ops_82575 = { .acquire = igb_acquire_nvm_82575, .read = igb_read_nvm_eerd, .release = igb_release_nvm_82575, diff --git a/drivers/net/igb/e1000_hw.h b/drivers/net/igb/e1000_hw.h index cb8db78..aad4c54 100644 --- a/drivers/net/igb/e1000_hw.h +++ b/drivers/net/igb/e1000_hw.h @@ -323,17 +323,17 @@ struct e1000_phy_operations { }; struct e1000_nvm_operations { - s32 (*acquire)(struct e1000_hw *); - s32 (*read)(struct e1000_hw *, u16, u16, u16 *); - void (*release)(struct e1000_hw *); - s32 (*write)(struct e1000_hw *, u16, u16, u16 *); + s32 (* const acquire)(struct e1000_hw *); + s32 (* const read)(struct e1000_hw *, u16, u16, u16 *); + void (* const release)(struct e1000_hw *); + s32 (* const write)(struct e1000_hw *, u16, u16, u16 *); }; struct e1000_info { s32 (*get_invariants)(struct e1000_hw *); - struct e1000_mac_operations *mac_ops; - struct e1000_phy_operations *phy_ops; - struct e1000_nvm_operations *nvm_ops; + const struct e1000_mac_operations *mac_ops; + const struct e1000_phy_operations *phy_ops; + const struct e1000_nvm_operations *nvm_ops; }; extern const struct e1000_info e1000_82575_info; @@ -412,6 +412,7 @@ struct e1000_phy_info { }; struct e1000_nvm_info { + /* cannot be const */ struct e1000_nvm_operations ops; enum e1000_nvm_type type; diff --git a/drivers/net/irda/vlsi_ir.c b/drivers/net/irda/vlsi_ir.c index c3d0738..486ea60 100644 --- a/drivers/net/irda/vlsi_ir.c +++ b/drivers/net/irda/vlsi_ir.c @@ -907,13 +907,12 @@ static netdev_tx_t vlsi_hard_start_xmit(struct sk_buff *skb, /* no race - tx-ring already empty */ vlsi_set_baud(idev, iobase); netif_wake_queue(ndev); - } - else - ; + } else { /* keep the speed change pending like it would * for any len>0 packet. tx completion interrupt * will apply it when the tx ring becomes empty. */ + } spin_unlock_irqrestore(&idev->lock, flags); dev_kfree_skb_any(skb); return NETDEV_TX_OK; diff --git a/drivers/net/pcnet32.c b/drivers/net/pcnet32.c index c200c282..e15e89b 100644 --- a/drivers/net/pcnet32.c +++ b/drivers/net/pcnet32.c @@ -82,7 +82,7 @@ static int cards_found; /* * VLB I/O addresses */ -static unsigned int pcnet32_portlist[] __initdata = +static unsigned int pcnet32_portlist[] __devinitdata = { 0x300, 0x320, 0x340, 0x360, 0 }; static int pcnet32_debug; diff --git a/drivers/net/ppp_generic.c b/drivers/net/ppp_generic.c index 1b2c291..769f512 100644 --- a/drivers/net/ppp_generic.c +++ b/drivers/net/ppp_generic.c @@ -992,7 +992,6 @@ ppp_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) void __user *addr = (void __user *) ifr->ifr_ifru.ifru_data; struct ppp_stats stats; struct ppp_comp_stats cstats; - char *vers; switch (cmd) { case SIOCGPPPSTATS: @@ -1014,8 +1013,7 @@ ppp_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) break; case SIOCGPPPVER: - vers = PPP_VERSION; - if (copy_to_user(addr, vers, strlen(vers) + 1)) + if (copy_to_user(addr, PPP_VERSION, sizeof(PPP_VERSION))) break; err = 0; break; diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c index 573054a..2b931d1 100644 --- a/drivers/net/tg3.c +++ b/drivers/net/tg3.c @@ -12410,7 +12410,7 @@ static void __devinit tg3_read_vpd(struct tg3 *tp) cnt = pci_read_vpd(tp->pdev, pos, TG3_NVM_VPD_LEN - pos, &vpd_data[pos]); - if (cnt == -ETIMEDOUT || -EINTR) + if (cnt == -ETIMEDOUT || cnt == -EINTR) cnt = 0; else if (cnt < 0) goto out_not_found; diff --git a/drivers/net/tg3.h b/drivers/net/tg3.h index ce9c491..7ff916b 100644 --- a/drivers/net/tg3.h +++ b/drivers/net/tg3.h @@ -130,6 +130,7 @@ #define CHIPREV_ID_5750_A0 0x4000 #define CHIPREV_ID_5750_A1 0x4001 #define CHIPREV_ID_5750_A3 0x4003 +#define CHIPREV_ID_5750_C1 0x4201 #define CHIPREV_ID_5750_C2 0x4202 #define CHIPREV_ID_5752_A0_HW 0x5000 #define CHIPREV_ID_5752_A0 0x6000 diff --git a/drivers/net/tulip/de4x5.c b/drivers/net/tulip/de4x5.c index 75a64c8..adfb171 100644 --- a/drivers/net/tulip/de4x5.c +++ b/drivers/net/tulip/de4x5.c @@ -5401,7 +5401,7 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) for (i=0; idev_addr[i]; } - if (copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT; + if (ioc->len > sizeof(tmp.addr) || copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT; break; case DE4X5_SET_HWADDR: /* Set the hardware address */ @@ -5441,7 +5441,7 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) spin_lock_irqsave(&lp->lock, flags); memcpy(&statbuf, &lp->pktStats, ioc->len); spin_unlock_irqrestore(&lp->lock, flags); - if (copy_to_user(ioc->data, &statbuf, ioc->len)) + if (ioc->len > sizeof(statbuf) || copy_to_user(ioc->data, &statbuf, ioc->len)) return -EFAULT; break; } @@ -5474,7 +5474,7 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) tmp.lval[6] = inl(DE4X5_STRR); j+=4; tmp.lval[7] = inl(DE4X5_SIGR); j+=4; ioc->len = j; - if (copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT; + if (copy_to_user(ioc->data, tmp.lval, ioc->len)) return -EFAULT; break; #define DE4X5_DUMP 0x0f /* Dump the DE4X5 Status */ diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c index e254274..4f2d0b4 100644 --- a/drivers/net/usb/hso.c +++ b/drivers/net/usb/hso.c @@ -258,7 +258,7 @@ struct hso_serial { /* from usb_serial_port */ struct tty_struct *tty; - int open_count; + atomic_t open_count; spinlock_t serial_lock; int (*write_data) (struct hso_serial *serial); @@ -1201,7 +1201,7 @@ static void put_rxbuf_data_and_resubmit_ctrl_urb(struct hso_serial *serial) struct urb *urb; urb = serial->rx_urb[0]; - if (serial->open_count > 0) { + if (atomic_read(&serial->open_count) > 0) { count = put_rxbuf_data(urb, serial); if (count == -1) return; @@ -1237,7 +1237,7 @@ static void hso_std_serial_read_bulk_callback(struct urb *urb) DUMP1(urb->transfer_buffer, urb->actual_length); /* Anyone listening? */ - if (serial->open_count == 0) + if (atomic_read(&serial->open_count) == 0) return; if (status == 0) { @@ -1332,8 +1332,7 @@ static int hso_serial_open(struct tty_struct *tty, struct file *filp) spin_unlock_irq(&serial->serial_lock); /* check for port already opened, if not set the termios */ - serial->open_count++; - if (serial->open_count == 1) { + if (atomic_inc_return(&serial->open_count) == 1) { serial->rx_state = RX_IDLE; /* Force default termio settings */ _hso_serial_set_termios(tty, NULL); @@ -1345,7 +1344,7 @@ static int hso_serial_open(struct tty_struct *tty, struct file *filp) result = hso_start_serial_device(serial->parent, GFP_KERNEL); if (result) { hso_stop_serial_device(serial->parent); - serial->open_count--; + atomic_dec(&serial->open_count); kref_put(&serial->parent->ref, hso_serial_ref_free); } } else { @@ -1382,10 +1381,10 @@ static void hso_serial_close(struct tty_struct *tty, struct file *filp) /* reset the rts and dtr */ /* do the actual close */ - serial->open_count--; + atomic_dec(&serial->open_count); - if (serial->open_count <= 0) { - serial->open_count = 0; + if (atomic_read(&serial->open_count) <= 0) { + atomic_set(&serial->open_count, 0); spin_lock_irq(&serial->serial_lock); if (serial->tty == tty) { serial->tty->driver_data = NULL; @@ -1467,7 +1466,7 @@ static void hso_serial_set_termios(struct tty_struct *tty, struct ktermios *old) /* the actual setup */ spin_lock_irqsave(&serial->serial_lock, flags); - if (serial->open_count) + if (atomic_read(&serial->open_count)) _hso_serial_set_termios(tty, old); else tty->termios = old; @@ -1653,10 +1652,11 @@ static int hso_get_count(struct hso_serial *serial, struct uart_icount cnow; struct hso_tiocmget *tiocmget = serial->tiocmget; - memset(&icount, 0, sizeof(struct serial_icounter_struct)); - if (!tiocmget) return -ENOENT; + + memset(&icount, 0, sizeof(icount)); + spin_lock_irq(&serial->serial_lock); memcpy(&cnow, &tiocmget->icount, sizeof(struct uart_icount)); spin_unlock_irq(&serial->serial_lock); @@ -1931,7 +1931,7 @@ static void intr_callback(struct urb *urb) D1("Pending read interrupt on port %d\n", i); spin_lock(&serial->serial_lock); if (serial->rx_state == RX_IDLE && - serial->open_count > 0) { + atomic_read(&serial->open_count) > 0) { /* Setup and send a ctrl req read on * port i */ if (!serial->rx_urb_filled[0]) { @@ -3121,7 +3121,7 @@ static int hso_resume(struct usb_interface *iface) /* Start all serial ports */ for (i = 0; i < HSO_SERIAL_TTY_MINORS; i++) { if (serial_table[i] && (serial_table[i]->interface == iface)) { - if (dev2ser(serial_table[i])->open_count) { + if (atomic_read(&dev2ser(serial_table[i])->open_count)) { result = hso_start_serial_device(serial_table[i], GFP_NOIO); hso_kick_transmit(dev2ser(serial_table[i])); diff --git a/drivers/net/wireless/b43/debugfs.c b/drivers/net/wireless/b43/debugfs.c index 80b19a4..dab3a45 100644 --- a/drivers/net/wireless/b43/debugfs.c +++ b/drivers/net/wireless/b43/debugfs.c @@ -43,7 +43,7 @@ static struct dentry *rootdir; struct b43_debugfs_fops { ssize_t (*read)(struct b43_wldev *dev, char *buf, size_t bufsize); int (*write)(struct b43_wldev *dev, const char *buf, size_t count); - struct file_operations fops; + const struct file_operations fops; /* Offset of struct b43_dfs_file in struct b43_dfsentry */ size_t file_struct_offset; }; diff --git a/drivers/net/wireless/b43legacy/debugfs.c b/drivers/net/wireless/b43legacy/debugfs.c index 1f85ac5..c99b4b4 100644 --- a/drivers/net/wireless/b43legacy/debugfs.c +++ b/drivers/net/wireless/b43legacy/debugfs.c @@ -44,7 +44,7 @@ static struct dentry *rootdir; struct b43legacy_debugfs_fops { ssize_t (*read)(struct b43legacy_wldev *dev, char *buf, size_t bufsize); int (*write)(struct b43legacy_wldev *dev, const char *buf, size_t count); - struct file_operations fops; + const struct file_operations fops; /* Offset of struct b43legacy_dfs_file in struct b43legacy_dfsentry */ size_t file_struct_offset; /* Take wl->irq_lock before calling read/write? */ diff --git a/drivers/net/wireless/iwlwifi/iwl-debug.h b/drivers/net/wireless/iwlwifi/iwl-debug.h index 5c2bcef..d4dd3a5 100644 --- a/drivers/net/wireless/iwlwifi/iwl-debug.h +++ b/drivers/net/wireless/iwlwifi/iwl-debug.h @@ -68,8 +68,8 @@ do { \ } while (0) #else -#define IWL_DEBUG(__priv, level, fmt, args...) -#define IWL_DEBUG_LIMIT(__priv, level, fmt, args...) +#define IWL_DEBUG(__priv, level, fmt, args...) do {} while (0) +#define IWL_DEBUG_LIMIT(__priv, level, fmt, args...) do {} while (0) static inline void iwl_print_hex_dump(struct iwl_priv *priv, int level, void *p, u32 len) {} diff --git a/drivers/net/wireless/libertas/debugfs.c b/drivers/net/wireless/libertas/debugfs.c index de2caac..64f7c60 100644 --- a/drivers/net/wireless/libertas/debugfs.c +++ b/drivers/net/wireless/libertas/debugfs.c @@ -718,7 +718,7 @@ out_unlock: struct lbs_debugfs_files { const char *name; int perm; - struct file_operations fops; + const struct file_operations fops; }; static const struct lbs_debugfs_files debugfs_files[] = { diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c index 4bd61ee..f6a134f 100644 --- a/drivers/net/wireless/rndis_wlan.c +++ b/drivers/net/wireless/rndis_wlan.c @@ -1235,7 +1235,7 @@ static int set_rts_threshold(struct usbnet *usbdev, u32 rts_threshold) netdev_dbg(usbdev->net, "%s(): %i\n", __func__, rts_threshold); - if (rts_threshold < 0 || rts_threshold > 2347) + if (rts_threshold > 2347) rts_threshold = 2347; tmp = cpu_to_le32(rts_threshold); diff --git a/drivers/oprofile/buffer_sync.c b/drivers/oprofile/buffer_sync.c index b7e755f..6f17b32 100644 --- a/drivers/oprofile/buffer_sync.c +++ b/drivers/oprofile/buffer_sync.c @@ -342,7 +342,7 @@ static void add_data(struct op_entry *entry, struct mm_struct *mm) if (cookie == NO_COOKIE) offset = pc; if (cookie == INVALID_COOKIE) { - atomic_inc(&oprofile_stats.sample_lost_no_mapping); + atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping); offset = pc; } if (cookie != last_cookie) { @@ -386,14 +386,14 @@ add_sample(struct mm_struct *mm, struct op_sample *s, int in_kernel) /* add userspace sample */ if (!mm) { - atomic_inc(&oprofile_stats.sample_lost_no_mm); + atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mm); return 0; } cookie = lookup_dcookie(mm, s->eip, &offset); if (cookie == INVALID_COOKIE) { - atomic_inc(&oprofile_stats.sample_lost_no_mapping); + atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping); return 0; } @@ -562,7 +562,7 @@ void sync_buffer(int cpu) /* ignore backtraces if failed to add a sample */ if (state == sb_bt_start) { state = sb_bt_ignore; - atomic_inc(&oprofile_stats.bt_lost_no_mapping); + atomic_inc_unchecked(&oprofile_stats.bt_lost_no_mapping); } } release_mm(mm); diff --git a/drivers/oprofile/event_buffer.c b/drivers/oprofile/event_buffer.c index 5df60a6..72f5c1c 100644 --- a/drivers/oprofile/event_buffer.c +++ b/drivers/oprofile/event_buffer.c @@ -53,7 +53,7 @@ void add_event_entry(unsigned long value) } if (buffer_pos == buffer_size) { - atomic_inc(&oprofile_stats.event_lost_overflow); + atomic_inc_unchecked(&oprofile_stats.event_lost_overflow); return; } diff --git a/drivers/oprofile/oprof.c b/drivers/oprofile/oprof.c index b336cd9..90015a2 100644 --- a/drivers/oprofile/oprof.c +++ b/drivers/oprofile/oprof.c @@ -110,7 +110,7 @@ static void switch_worker(struct work_struct *work) if (oprofile_ops.switch_events()) return; - atomic_inc(&oprofile_stats.multiplex_counter); + atomic_inc_unchecked(&oprofile_stats.multiplex_counter); start_switch_worker(); } diff --git a/drivers/oprofile/oprofile_stats.c b/drivers/oprofile/oprofile_stats.c index 917d28e..d62d981 100644 --- a/drivers/oprofile/oprofile_stats.c +++ b/drivers/oprofile/oprofile_stats.c @@ -30,11 +30,11 @@ void oprofile_reset_stats(void) cpu_buf->sample_invalid_eip = 0; } - atomic_set(&oprofile_stats.sample_lost_no_mm, 0); - atomic_set(&oprofile_stats.sample_lost_no_mapping, 0); - atomic_set(&oprofile_stats.event_lost_overflow, 0); - atomic_set(&oprofile_stats.bt_lost_no_mapping, 0); - atomic_set(&oprofile_stats.multiplex_counter, 0); + atomic_set_unchecked(&oprofile_stats.sample_lost_no_mm, 0); + atomic_set_unchecked(&oprofile_stats.sample_lost_no_mapping, 0); + atomic_set_unchecked(&oprofile_stats.event_lost_overflow, 0); + atomic_set_unchecked(&oprofile_stats.bt_lost_no_mapping, 0); + atomic_set_unchecked(&oprofile_stats.multiplex_counter, 0); } diff --git a/drivers/oprofile/oprofile_stats.h b/drivers/oprofile/oprofile_stats.h index 0b54e46..a37c527 100644 --- a/drivers/oprofile/oprofile_stats.h +++ b/drivers/oprofile/oprofile_stats.h @@ -13,11 +13,11 @@ #include struct oprofile_stat_struct { - atomic_t sample_lost_no_mm; - atomic_t sample_lost_no_mapping; - atomic_t bt_lost_no_mapping; - atomic_t event_lost_overflow; - atomic_t multiplex_counter; + atomic_unchecked_t sample_lost_no_mm; + atomic_unchecked_t sample_lost_no_mapping; + atomic_unchecked_t bt_lost_no_mapping; + atomic_unchecked_t event_lost_overflow; + atomic_unchecked_t multiplex_counter; }; extern struct oprofile_stat_struct oprofile_stats; diff --git a/drivers/oprofile/oprofilefs.c b/drivers/oprofile/oprofilefs.c index 2766a6d..80c77e2 100644 --- a/drivers/oprofile/oprofilefs.c +++ b/drivers/oprofile/oprofilefs.c @@ -187,7 +187,7 @@ static const struct file_operations atomic_ro_fops = { int oprofilefs_create_ro_atomic(struct super_block *sb, struct dentry *root, - char const *name, atomic_t *val) + char const *name, atomic_unchecked_t *val) { struct dentry *d = __oprofilefs_create_file(sb, root, name, &atomic_ro_fops, 0444); diff --git a/drivers/parport/procfs.c b/drivers/parport/procfs.c index 3f56bc0..7094b5d 100644 --- a/drivers/parport/procfs.c +++ b/drivers/parport/procfs.c @@ -64,7 +64,7 @@ static int do_active_device(ctl_table *table, int write, *ppos += len; - return copy_to_user(result, buffer, len) ? -EFAULT : 0; + return (len > sizeof(buffer) || copy_to_user(result, buffer, len)) ? -EFAULT : 0; } #ifdef CONFIG_PARPORT_1284 @@ -106,7 +106,7 @@ static int do_autoprobe(ctl_table *table, int write, *ppos += len; - return copy_to_user (result, buffer, len) ? -EFAULT : 0; + return (len > sizeof(buffer) || copy_to_user (result, buffer, len)) ? -EFAULT : 0; } #endif /* IEEE1284.3 support. */ diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c index cb23aa2..f6dd9fe 100644 --- a/drivers/pci/hotplug/acpiphp_glue.c +++ b/drivers/pci/hotplug/acpiphp_glue.c @@ -110,7 +110,7 @@ static int post_dock_fixups(struct notifier_block *nb, unsigned long val, } -static struct acpi_dock_ops acpiphp_dock_ops = { +static const struct acpi_dock_ops acpiphp_dock_ops = { .handler = handle_hotplug_event_func, }; diff --git a/drivers/pci/hotplug/cpqphp_nvram.c b/drivers/pci/hotplug/cpqphp_nvram.c index 76ba8a1..20ca857 100644 --- a/drivers/pci/hotplug/cpqphp_nvram.c +++ b/drivers/pci/hotplug/cpqphp_nvram.c @@ -428,9 +428,13 @@ static u32 store_HRT (void __iomem *rom_start) void compaq_nvram_init (void __iomem *rom_start) { + +#ifndef CONFIG_PAX_KERNEXEC if (rom_start) { compaq_int15_entry_point = (rom_start + ROM_INT15_PHY_ADDR - ROM_PHY_ADDR); } +#endif + dbg("int15 entry = %p\n", compaq_int15_entry_point); /* initialize our int15 lock */ diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c index 8e499e8..24a1ecb 100644 --- a/drivers/pci/intel-iommu.c +++ b/drivers/pci/intel-iommu.c @@ -2934,7 +2934,7 @@ static int intel_mapping_error(struct device *dev, dma_addr_t dma_addr) return !dma_addr; } -struct dma_map_ops intel_dma_ops = { +const struct dma_map_ops intel_dma_ops = { .alloc_coherent = intel_alloc_coherent, .free_coherent = intel_free_coherent, .map_sg = intel_map_sg, diff --git a/drivers/pci/pcie/portdrv_pci.c b/drivers/pci/pcie/portdrv_pci.c index 3debed2..f03f571 100644 --- a/drivers/pci/pcie/portdrv_pci.c +++ b/drivers/pci/pcie/portdrv_pci.c @@ -250,7 +250,7 @@ static void pcie_portdrv_err_resume(struct pci_dev *dev) static const struct pci_device_id port_pci_ids[] = { { /* handle any PCI-Express port */ PCI_DEVICE_CLASS(((PCI_CLASS_BRIDGE_PCI << 8) | 0x00), ~0), - }, { /* end: all zeroes */ } + }, { 0, 0, 0, 0, 0, 0, 0 } }; MODULE_DEVICE_TABLE(pci, port_pci_ids); diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c index f4adba2..f151c41 100644 --- a/drivers/pci/probe.c +++ b/drivers/pci/probe.c @@ -62,14 +62,14 @@ static ssize_t pci_bus_show_cpuaffinity(struct device *dev, return ret; } -static ssize_t inline pci_bus_show_cpumaskaffinity(struct device *dev, +static inline ssize_t pci_bus_show_cpumaskaffinity(struct device *dev, struct device_attribute *attr, char *buf) { return pci_bus_show_cpuaffinity(dev, 0, attr, buf); } -static ssize_t inline pci_bus_show_cpulistaffinity(struct device *dev, +static inline ssize_t pci_bus_show_cpulistaffinity(struct device *dev, struct device_attribute *attr, char *buf) { diff --git a/drivers/pci/proc.c b/drivers/pci/proc.c index 64ac30b..9a295d5 100644 --- a/drivers/pci/proc.c +++ b/drivers/pci/proc.c @@ -481,7 +481,16 @@ static const struct file_operations proc_bus_pci_dev_operations = { static int __init pci_proc_init(void) { struct pci_dev *dev = NULL; + +#ifdef CONFIG_GRKERNSEC_PROC_ADD +#ifdef CONFIG_GRKERNSEC_PROC_USER + proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR, NULL); +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP) + proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL); +#endif +#else proc_bus_pci_dir = proc_mkdir("bus/pci", NULL); +#endif proc_create("devices", 0, proc_bus_pci_dir, &proc_bus_pci_dev_operations); proc_initialized = 1; diff --git a/drivers/pcmcia/pcmcia_ioctl.c b/drivers/pcmcia/pcmcia_ioctl.c index d007a2a..5d7a756 100644 --- a/drivers/pcmcia/pcmcia_ioctl.c +++ b/drivers/pcmcia/pcmcia_ioctl.c @@ -850,7 +850,7 @@ static int ds_ioctl(struct file *file, u_int cmd, u_long arg) return -EFAULT; } } - buf = kmalloc(sizeof(ds_ioctl_arg_t), GFP_KERNEL); + buf = kzalloc(sizeof(ds_ioctl_arg_t), GFP_KERNEL); if (!buf) return -ENOMEM; diff --git a/drivers/pcmcia/ti113x.h b/drivers/pcmcia/ti113x.h index 9ffa97d..d04ec32 100644 --- a/drivers/pcmcia/ti113x.h +++ b/drivers/pcmcia/ti113x.h @@ -936,7 +936,7 @@ static struct pci_device_id ene_tune_tbl[] = { DEVID(PCI_VENDOR_ID_MOTOROLA, 0x3410, 0xECC0, PCI_ANY_ID, ENE_TEST_C9_TLTENABLE | ENE_TEST_C9_PFENABLE, ENE_TEST_C9_TLTENABLE), - {} + { 0, 0, 0, 0, 0, 0, 0 } }; static void ene_tune_bridge(struct pcmcia_socket *sock, struct pci_bus *bus) diff --git a/drivers/pcmcia/yenta_socket.c b/drivers/pcmcia/yenta_socket.c index f1d4137..9e9bdbb 100644 --- a/drivers/pcmcia/yenta_socket.c +++ b/drivers/pcmcia/yenta_socket.c @@ -1428,7 +1428,7 @@ static struct pci_device_id yenta_table[] = { /* match any cardbus bridge */ CB_ID(PCI_ANY_ID, PCI_ANY_ID, DEFAULT), - { /* all zeroes */ } + { 0, 0, 0, 0, 0, 0, 0 } }; MODULE_DEVICE_TABLE(pci, yenta_table); diff --git a/drivers/platform/x86/acer-wmi.c b/drivers/platform/x86/acer-wmi.c index 1ea6c43..4d4ef01 100644 --- a/drivers/platform/x86/acer-wmi.c +++ b/drivers/platform/x86/acer-wmi.c @@ -916,7 +916,7 @@ static int update_bl_status(struct backlight_device *bd) return 0; } -static struct backlight_ops acer_bl_ops = { +static const struct backlight_ops acer_bl_ops = { .get_brightness = read_brightness, .update_status = update_bl_status, }; diff --git a/drivers/platform/x86/asus-laptop.c b/drivers/platform/x86/asus-laptop.c index 1fccc85..d176c29 100644 --- a/drivers/platform/x86/asus-laptop.c +++ b/drivers/platform/x86/asus-laptop.c @@ -224,7 +224,6 @@ struct asus_laptop { struct asus_led gled; struct asus_led kled; struct workqueue_struct *led_workqueue; - int wireless_status; bool have_rsts; int lcd_state; @@ -621,7 +620,7 @@ static int update_bl_status(struct backlight_device *bd) return asus_lcd_set(asus, value); } -static struct backlight_ops asusbl_ops = { +static const struct backlight_ops asusbl_ops = { .get_brightness = asus_read_brightness, .update_status = update_bl_status, }; diff --git a/drivers/platform/x86/asus_acpi.c b/drivers/platform/x86/asus_acpi.c index 92fd30c..e477b4e 100644 --- a/drivers/platform/x86/asus_acpi.c +++ b/drivers/platform/x86/asus_acpi.c @@ -1464,7 +1464,7 @@ static int asus_hotk_remove(struct acpi_device *device, int type) return 0; } -static struct backlight_ops asus_backlight_data = { +static const struct backlight_ops asus_backlight_data = { .get_brightness = read_brightness, .update_status = set_brightness_status, }; diff --git a/drivers/platform/x86/compal-laptop.c b/drivers/platform/x86/compal-laptop.c index 90111d7..dfcbb83 100644 --- a/drivers/platform/x86/compal-laptop.c +++ b/drivers/platform/x86/compal-laptop.c @@ -168,7 +168,7 @@ static int bl_update_status(struct backlight_device *b) return set_lcd_level(b->props.brightness); } -static struct backlight_ops compalbl_ops = { +static const struct backlight_ops compalbl_ops = { .get_brightness = bl_get_brightness, .update_status = bl_update_status, }; diff --git a/drivers/platform/x86/dell-laptop.c b/drivers/platform/x86/dell-laptop.c index 6110601..3f9e967 100644 --- a/drivers/platform/x86/dell-laptop.c +++ b/drivers/platform/x86/dell-laptop.c @@ -469,7 +469,7 @@ out: return buffer->output[1]; } -static struct backlight_ops dell_ops = { +static const struct backlight_ops dell_ops = { .get_brightness = dell_get_intensity, .update_status = dell_send_intensity, }; diff --git a/drivers/platform/x86/eeepc-laptop.c b/drivers/platform/x86/eeepc-laptop.c index 0306174..9d1f28b 100644 --- a/drivers/platform/x86/eeepc-laptop.c +++ b/drivers/platform/x86/eeepc-laptop.c @@ -1114,7 +1114,7 @@ static int update_bl_status(struct backlight_device *bd) return set_brightness(bd, bd->props.brightness); } -static struct backlight_ops eeepcbl_ops = { +static const struct backlight_ops eeepcbl_ops = { .get_brightness = read_brightness, .update_status = update_bl_status, }; diff --git a/drivers/platform/x86/fujitsu-laptop.c b/drivers/platform/x86/fujitsu-laptop.c index e325aeb..30d69ef 100644 --- a/drivers/platform/x86/fujitsu-laptop.c +++ b/drivers/platform/x86/fujitsu-laptop.c @@ -437,7 +437,7 @@ static int bl_update_status(struct backlight_device *b) return ret; } -static struct backlight_ops fujitsubl_ops = { +static const struct backlight_ops fujitsubl_ops = { .get_brightness = bl_get_brightness, .update_status = bl_update_status, }; diff --git a/drivers/platform/x86/sony-laptop.c b/drivers/platform/x86/sony-laptop.c index 1387c5f..21d4309 100644 --- a/drivers/platform/x86/sony-laptop.c +++ b/drivers/platform/x86/sony-laptop.c @@ -857,7 +857,7 @@ static int sony_backlight_get_brightness(struct backlight_device *bd) } static struct backlight_device *sony_backlight_device; -static struct backlight_ops sony_backlight_ops = { +static const struct backlight_ops sony_backlight_ops = { .update_status = sony_backlight_update_status, .get_brightness = sony_backlight_get_brightness, }; diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c index 4bdb137..e53ba1f 100644 --- a/drivers/platform/x86/thinkpad_acpi.c +++ b/drivers/platform/x86/thinkpad_acpi.c @@ -6142,7 +6142,7 @@ static void tpacpi_brightness_notify_change(void) BACKLIGHT_UPDATE_HOTKEY); } -static struct backlight_ops ibm_backlight_data = { +static const struct backlight_ops ibm_backlight_data = { .get_brightness = brightness_get, .update_status = brightness_update_status, }; diff --git a/drivers/platform/x86/toshiba_acpi.c b/drivers/platform/x86/toshiba_acpi.c index 37aa147..c1aa23e 100644 --- a/drivers/platform/x86/toshiba_acpi.c +++ b/drivers/platform/x86/toshiba_acpi.c @@ -741,7 +741,7 @@ static acpi_status remove_device(void) return AE_OK; } -static struct backlight_ops toshiba_backlight_data = { +static const struct backlight_ops toshiba_backlight_data = { .get_brightness = get_lcd, .update_status = set_lcd_status, }; diff --git a/drivers/pnp/pnpbios/bioscalls.c b/drivers/pnp/pnpbios/bioscalls.c index 8591f6a..a0f2b81 100644 --- a/drivers/pnp/pnpbios/bioscalls.c +++ b/drivers/pnp/pnpbios/bioscalls.c @@ -59,7 +59,7 @@ do { \ set_desc_limit(&gdt[(selname) >> 3], (size) - 1); \ } while(0) -static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092, +static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093, (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1); /* @@ -96,7 +96,10 @@ static inline u16 call_pnp_bios(u16 func, u16 arg1, u16 arg2, u16 arg3, cpu = get_cpu(); save_desc_40 = get_cpu_gdt_table(cpu)[0x40 / 8]; + + pax_open_kernel(); get_cpu_gdt_table(cpu)[0x40 / 8] = bad_bios_desc; + pax_close_kernel(); /* On some boxes IRQ's during PnP BIOS calls are deadly. */ spin_lock_irqsave(&pnp_bios_lock, flags); @@ -134,7 +137,10 @@ static inline u16 call_pnp_bios(u16 func, u16 arg1, u16 arg2, u16 arg3, :"memory"); spin_unlock_irqrestore(&pnp_bios_lock, flags); + pax_open_kernel(); get_cpu_gdt_table(cpu)[0x40 / 8] = save_desc_40; + pax_close_kernel(); + put_cpu(); /* If we get here and this is set then the PnP BIOS faulted on us. */ @@ -468,7 +474,7 @@ int pnp_bios_read_escd(char *data, u32 nvram_base) return status; } -void pnpbios_calls_init(union pnp_bios_install_struct *header) +void __init pnpbios_calls_init(union pnp_bios_install_struct *header) { int i; @@ -476,6 +482,8 @@ void pnpbios_calls_init(union pnp_bios_install_struct *header) pnp_bios_callpoint.offset = header->fields.pm16offset; pnp_bios_callpoint.segment = PNP_CS16; + pax_open_kernel(); + for_each_possible_cpu(i) { struct desc_struct *gdt = get_cpu_gdt_table(i); if (!gdt) @@ -487,4 +495,6 @@ void pnpbios_calls_init(union pnp_bios_install_struct *header) set_desc_base(&gdt[GDT_ENTRY_PNPBIOS_DS], (unsigned long)__va(header->fields.pm16dseg)); } + + pax_close_kernel(); } diff --git a/drivers/pnp/quirks.c b/drivers/pnp/quirks.c index dfbd5a6..a0a9f46 100644 --- a/drivers/pnp/quirks.c +++ b/drivers/pnp/quirks.c @@ -322,7 +322,7 @@ static struct pnp_fixup pnp_fixups[] = { /* PnP resources that might overlap PCI BARs */ {"PNP0c01", quirk_system_pci_resources}, {"PNP0c02", quirk_system_pci_resources}, - {""} + {"", NULL} }; void pnp_fixup_device(struct pnp_dev *dev) diff --git a/drivers/pnp/resource.c b/drivers/pnp/resource.c index e3446ab..b13bf03 100644 --- a/drivers/pnp/resource.c +++ b/drivers/pnp/resource.c @@ -360,7 +360,7 @@ int pnp_check_irq(struct pnp_dev *dev, struct resource *res) return 1; /* check if the resource is valid */ - if (*irq < 0 || *irq > 15) + if (*irq > 15) return 0; /* check if the resource is reserved */ @@ -424,7 +424,7 @@ int pnp_check_dma(struct pnp_dev *dev, struct resource *res) return 1; /* check if the resource is valid */ - if (*dma < 0 || *dma == 4 || *dma > 7) + if (*dma == 4 || *dma > 7) return 0; /* check if the resource is reserved */ diff --git a/drivers/s390/cio/qdio_debug.c b/drivers/s390/cio/qdio_debug.c index 6ce83f5..68ee329 100644 --- a/drivers/s390/cio/qdio_debug.c +++ b/drivers/s390/cio/qdio_debug.c @@ -233,7 +233,7 @@ static int qperf_seq_open(struct inode *inode, struct file *filp) filp->f_path.dentry->d_inode->i_private); } -static struct file_operations debugfs_perf_fops = { +static const struct file_operations debugfs_perf_fops = { .owner = THIS_MODULE, .open = qperf_seq_open, .read = seq_read, diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c index f820cff..54a7195 100644 --- a/drivers/scsi/ipr.c +++ b/drivers/scsi/ipr.c @@ -6091,7 +6091,7 @@ static bool ipr_qc_fill_rtf(struct ata_queued_cmd *qc) return true; } -static struct ata_port_operations ipr_sata_ops = { +static const struct ata_port_operations ipr_sata_ops = { .phy_reset = ipr_ata_phy_reset, .hardreset = ipr_sata_reset, .post_internal_cmd = ipr_ata_post_internal, diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c index 104e0fb..6db906b 100644 --- a/drivers/scsi/libfc/fc_exch.c +++ b/drivers/scsi/libfc/fc_exch.c @@ -100,12 +100,12 @@ struct fc_exch_mgr { * all together if not used XXX */ struct { - atomic_t no_free_exch; - atomic_t no_free_exch_xid; - atomic_t xid_not_found; - atomic_t xid_busy; - atomic_t seq_not_found; - atomic_t non_bls_resp; + atomic_unchecked_t no_free_exch; + atomic_unchecked_t no_free_exch_xid; + atomic_unchecked_t xid_not_found; + atomic_unchecked_t xid_busy; + atomic_unchecked_t seq_not_found; + atomic_unchecked_t non_bls_resp; } stats; }; #define fc_seq_exch(sp) container_of(sp, struct fc_exch, seq) @@ -671,7 +671,7 @@ static struct fc_exch *fc_exch_em_alloc(struct fc_lport *lport, /* allocate memory for exchange */ ep = mempool_alloc(mp->ep_pool, GFP_ATOMIC); if (!ep) { - atomic_inc(&mp->stats.no_free_exch); + atomic_inc_unchecked(&mp->stats.no_free_exch); goto out; } memset(ep, 0, sizeof(*ep)); @@ -719,7 +719,7 @@ out: return ep; err: spin_unlock_bh(&pool->lock); - atomic_inc(&mp->stats.no_free_exch_xid); + atomic_inc_unchecked(&mp->stats.no_free_exch_xid); mempool_free(ep, mp->ep_pool); return NULL; } @@ -864,7 +864,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport, xid = ntohs(fh->fh_ox_id); /* we originated exch */ ep = fc_exch_find(mp, xid); if (!ep) { - atomic_inc(&mp->stats.xid_not_found); + atomic_inc_unchecked(&mp->stats.xid_not_found); reject = FC_RJT_OX_ID; goto out; } @@ -894,7 +894,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport, ep = fc_exch_find(mp, xid); if ((f_ctl & FC_FC_FIRST_SEQ) && fc_sof_is_init(fr_sof(fp))) { if (ep) { - atomic_inc(&mp->stats.xid_busy); + atomic_inc_unchecked(&mp->stats.xid_busy); reject = FC_RJT_RX_ID; goto rel; } @@ -905,7 +905,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport, } xid = ep->xid; /* get our XID */ } else if (!ep) { - atomic_inc(&mp->stats.xid_not_found); + atomic_inc_unchecked(&mp->stats.xid_not_found); reject = FC_RJT_RX_ID; /* XID not found */ goto out; } @@ -922,7 +922,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport, } else { sp = &ep->seq; if (sp->id != fh->fh_seq_id) { - atomic_inc(&mp->stats.seq_not_found); + atomic_inc_unchecked(&mp->stats.seq_not_found); reject = FC_RJT_SEQ_ID; /* sequence/exch should exist */ goto rel; } @@ -1303,22 +1303,22 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp) ep = fc_exch_find(mp, ntohs(fh->fh_ox_id)); if (!ep) { - atomic_inc(&mp->stats.xid_not_found); + atomic_inc_unchecked(&mp->stats.xid_not_found); goto out; } if (ep->esb_stat & ESB_ST_COMPLETE) { - atomic_inc(&mp->stats.xid_not_found); + atomic_inc_unchecked(&mp->stats.xid_not_found); goto out; } if (ep->rxid == FC_XID_UNKNOWN) ep->rxid = ntohs(fh->fh_rx_id); if (ep->sid != 0 && ep->sid != ntoh24(fh->fh_d_id)) { - atomic_inc(&mp->stats.xid_not_found); + atomic_inc_unchecked(&mp->stats.xid_not_found); goto rel; } if (ep->did != ntoh24(fh->fh_s_id) && ep->did != FC_FID_FLOGI) { - atomic_inc(&mp->stats.xid_not_found); + atomic_inc_unchecked(&mp->stats.xid_not_found); goto rel; } sof = fr_sof(fp); @@ -1327,7 +1327,7 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp) sp->ssb_stat |= SSB_ST_RESP; sp->id = fh->fh_seq_id; } else if (sp->id != fh->fh_seq_id) { - atomic_inc(&mp->stats.seq_not_found); + atomic_inc_unchecked(&mp->stats.seq_not_found); goto rel; } @@ -1390,9 +1390,9 @@ static void fc_exch_recv_resp(struct fc_exch_mgr *mp, struct fc_frame *fp) sp = fc_seq_lookup_orig(mp, fp); /* doesn't hold sequence */ if (!sp) - atomic_inc(&mp->stats.xid_not_found); + atomic_inc_unchecked(&mp->stats.xid_not_found); else - atomic_inc(&mp->stats.non_bls_resp); + atomic_inc_unchecked(&mp->stats.non_bls_resp); fc_frame_free(fp); } diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c index fb78856..5c2001e 100644 --- a/drivers/scsi/libsas/sas_ata.c +++ b/drivers/scsi/libsas/sas_ata.c @@ -344,7 +344,7 @@ static int sas_ata_scr_read(struct ata_link *link, unsigned int sc_reg_in, } } -static struct ata_port_operations sas_sata_ops = { +static const struct ata_port_operations sas_sata_ops = { .phy_reset = sas_ata_phy_reset, .post_internal_cmd = sas_ata_post_internal, .qc_defer = ata_std_qc_defer, diff --git a/drivers/scsi/mpt2sas/mpt2sas_debug.h b/drivers/scsi/mpt2sas/mpt2sas_debug.h index 3dcddfe..58216cb 100644 --- a/drivers/scsi/mpt2sas/mpt2sas_debug.h +++ b/drivers/scsi/mpt2sas/mpt2sas_debug.h @@ -79,7 +79,7 @@ CMD; \ } #else -#define MPT_CHECK_LOGGING(IOC, CMD, BITS) +#define MPT_CHECK_LOGGING(IOC, CMD, BITS) do {} while (0) #endif /* CONFIG_SCSI_MPT2SAS_LOGGING */ diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c index be1a8fc..f67861f 100644 --- a/drivers/scsi/qla2xxx/qla_os.c +++ b/drivers/scsi/qla2xxx/qla_os.c @@ -3899,7 +3899,7 @@ static struct pci_driver qla2xxx_pci_driver = { .err_handler = &qla2xxx_err_handler, }; -static struct file_operations apidev_fops = { +static const struct file_operations apidev_fops = { .owner = THIS_MODULE, }; diff --git a/drivers/scsi/scsi_logging.h b/drivers/scsi/scsi_logging.h index 1f65139..eef6227 100644 --- a/drivers/scsi/scsi_logging.h +++ b/drivers/scsi/scsi_logging.h @@ -51,7 +51,7 @@ do { \ } while (0); \ } while (0) #else -#define SCSI_CHECK_LOGGING(SHIFT, BITS, LEVEL, CMD) +#define SCSI_CHECK_LOGGING(SHIFT, BITS, LEVEL, CMD) do {} while (0) #endif /* CONFIG_SCSI_LOGGING */ /* diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c index ef752b2..c017189 100644 --- a/drivers/scsi/sg.c +++ b/drivers/scsi/sg.c @@ -2302,7 +2302,7 @@ struct sg_proc_leaf { const struct file_operations * fops; }; -static struct sg_proc_leaf sg_proc_leaf_arr[] = { +static const struct sg_proc_leaf sg_proc_leaf_arr[] = { {"allow_dio", &adio_fops}, {"debug", &debug_fops}, {"def_reserved_size", &dressz_fops}, @@ -2317,7 +2317,7 @@ sg_proc_init(void) { int k, mask; int num_leaves = ARRAY_SIZE(sg_proc_leaf_arr); - struct sg_proc_leaf * leaf; + const struct sg_proc_leaf * leaf; sg_proc_sgp = proc_mkdir(sg_proc_sg_dirname, NULL); if (!sg_proc_sgp) diff --git a/drivers/serial/8250_pci.c b/drivers/serial/8250_pci.c index 53be4d3..178495f 100644 --- a/drivers/serial/8250_pci.c +++ b/drivers/serial/8250_pci.c @@ -3777,7 +3777,7 @@ static struct pci_device_id serial_pci_tbl[] = { PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_COMMUNICATION_MULTISERIAL << 8, 0xffff00, pbn_default }, - { 0, } + { 0, 0, 0, 0, 0, 0, 0 } }; static struct pci_driver serial_pci_driver = { diff --git a/drivers/serial/kgdboc.c b/drivers/serial/kgdboc.c index a9a94ae..f1641fc 100644 --- a/drivers/serial/kgdboc.c +++ b/drivers/serial/kgdboc.c @@ -20,7 +20,7 @@ #define MAX_CONFIG_LEN 40 -static struct kgdb_io kgdboc_io_ops; +static struct kgdb_io kgdboc_io_ops; /* -1 = init not run yet, 0 = unconfigured, 1 = configured. */ static int configured = -1; diff --git a/drivers/staging/comedi/comedi_fops.c b/drivers/staging/comedi/comedi_fops.c index aeb2c00..7667956 100644 --- a/drivers/staging/comedi/comedi_fops.c +++ b/drivers/staging/comedi/comedi_fops.c @@ -1425,7 +1425,7 @@ static void comedi_unmap(struct vm_area_struct *area) mutex_unlock(&dev->mutex); } -static struct vm_operations_struct comedi_vm_ops = { +static const struct vm_operations_struct comedi_vm_ops = { .close = comedi_unmap, }; diff --git a/drivers/staging/dream/pmem.c b/drivers/staging/dream/pmem.c index 6387365..74aad74 100644 --- a/drivers/staging/dream/pmem.c +++ b/drivers/staging/dream/pmem.c @@ -175,7 +175,7 @@ static int pmem_mmap(struct file *, struct vm_area_struct *); static int pmem_open(struct inode *, struct file *); static long pmem_ioctl(struct file *, unsigned int, unsigned long); -struct file_operations pmem_fops = { +const struct file_operations pmem_fops = { .release = pmem_release, .mmap = pmem_mmap, .open = pmem_open, @@ -1201,7 +1201,7 @@ static ssize_t debug_read(struct file *file, char __user *buf, size_t count, return simple_read_from_buffer(buf, count, ppos, buffer, n); } -static struct file_operations debug_fops = { +static const struct file_operations debug_fops = { .read = debug_read, .open = debug_open, }; diff --git a/drivers/staging/dream/qdsp5/adsp_driver.c b/drivers/staging/dream/qdsp5/adsp_driver.c index 8197765..5f3b690 100644 --- a/drivers/staging/dream/qdsp5/adsp_driver.c +++ b/drivers/staging/dream/qdsp5/adsp_driver.c @@ -577,7 +577,7 @@ static struct adsp_device *inode_to_device(struct inode *inode) static dev_t adsp_devno; static struct class *adsp_class; -static struct file_operations adsp_fops = { +static const struct file_operations adsp_fops = { .owner = THIS_MODULE, .open = adsp_open, .unlocked_ioctl = adsp_ioctl, diff --git a/drivers/staging/dream/qdsp5/audio_aac.c b/drivers/staging/dream/qdsp5/audio_aac.c index a373f35..8c97186 100644 --- a/drivers/staging/dream/qdsp5/audio_aac.c +++ b/drivers/staging/dream/qdsp5/audio_aac.c @@ -1023,7 +1023,7 @@ done: return rc; } -static struct file_operations audio_aac_fops = { +static const struct file_operations audio_aac_fops = { .owner = THIS_MODULE, .open = audio_open, .release = audio_release, diff --git a/drivers/staging/dream/qdsp5/audio_amrnb.c b/drivers/staging/dream/qdsp5/audio_amrnb.c index 07b79d5..15bf062 100644 --- a/drivers/staging/dream/qdsp5/audio_amrnb.c +++ b/drivers/staging/dream/qdsp5/audio_amrnb.c @@ -834,7 +834,7 @@ done: return rc; } -static struct file_operations audio_amrnb_fops = { +static const struct file_operations audio_amrnb_fops = { .owner = THIS_MODULE, .open = audamrnb_open, .release = audamrnb_release, diff --git a/drivers/staging/dream/qdsp5/audio_evrc.c b/drivers/staging/dream/qdsp5/audio_evrc.c index ad989ee..ba5ebf5 100644 --- a/drivers/staging/dream/qdsp5/audio_evrc.c +++ b/drivers/staging/dream/qdsp5/audio_evrc.c @@ -806,7 +806,7 @@ dma_fail: return rc; } -static struct file_operations audio_evrc_fops = { +static const struct file_operations audio_evrc_fops = { .owner = THIS_MODULE, .open = audevrc_open, .release = audevrc_release, diff --git a/drivers/staging/dream/qdsp5/audio_in.c b/drivers/staging/dream/qdsp5/audio_in.c index 6ae48e7..02b8370 100644 --- a/drivers/staging/dream/qdsp5/audio_in.c +++ b/drivers/staging/dream/qdsp5/audio_in.c @@ -914,7 +914,7 @@ static int audpre_open(struct inode *inode, struct file *file) return 0; } -static struct file_operations audio_fops = { +static const struct file_operations audio_fops = { .owner = THIS_MODULE, .open = audio_in_open, .release = audio_in_release, @@ -923,7 +923,7 @@ static struct file_operations audio_fops = { .unlocked_ioctl = audio_in_ioctl, }; -static struct file_operations audpre_fops = { +static const struct file_operations audpre_fops = { .owner = THIS_MODULE, .open = audpre_open, .unlocked_ioctl = audpre_ioctl, diff --git a/drivers/staging/dream/qdsp5/audio_mp3.c b/drivers/staging/dream/qdsp5/audio_mp3.c index 530e1f3..e8bfc32 100644 --- a/drivers/staging/dream/qdsp5/audio_mp3.c +++ b/drivers/staging/dream/qdsp5/audio_mp3.c @@ -941,7 +941,7 @@ done: return rc; } -static struct file_operations audio_mp3_fops = { +static const struct file_operations audio_mp3_fops = { .owner = THIS_MODULE, .open = audio_open, .release = audio_release, diff --git a/drivers/staging/dream/qdsp5/audio_out.c b/drivers/staging/dream/qdsp5/audio_out.c index 76d7fa5..32bc20b 100644 --- a/drivers/staging/dream/qdsp5/audio_out.c +++ b/drivers/staging/dream/qdsp5/audio_out.c @@ -800,7 +800,7 @@ static int audpp_open(struct inode *inode, struct file *file) return 0; } -static struct file_operations audio_fops = { +static const struct file_operations audio_fops = { .owner = THIS_MODULE, .open = audio_open, .release = audio_release, @@ -809,7 +809,7 @@ static struct file_operations audio_fops = { .unlocked_ioctl = audio_ioctl, }; -static struct file_operations audpp_fops = { +static const struct file_operations audpp_fops = { .owner = THIS_MODULE, .open = audpp_open, .unlocked_ioctl = audpp_ioctl, diff --git a/drivers/staging/dream/qdsp5/audio_qcelp.c b/drivers/staging/dream/qdsp5/audio_qcelp.c index effa96f..5fb23cd 100644 --- a/drivers/staging/dream/qdsp5/audio_qcelp.c +++ b/drivers/staging/dream/qdsp5/audio_qcelp.c @@ -817,7 +817,7 @@ err: return rc; } -static struct file_operations audio_qcelp_fops = { +static const struct file_operations audio_qcelp_fops = { .owner = THIS_MODULE, .open = audqcelp_open, .release = audqcelp_release, diff --git a/drivers/staging/dream/qdsp5/snd.c b/drivers/staging/dream/qdsp5/snd.c index 037d7ff..5469ec3 100644 --- a/drivers/staging/dream/qdsp5/snd.c +++ b/drivers/staging/dream/qdsp5/snd.c @@ -242,7 +242,7 @@ err: return rc; } -static struct file_operations snd_fops = { +static const struct file_operations snd_fops = { .owner = THIS_MODULE, .open = snd_open, .release = snd_release, diff --git a/drivers/staging/dt3155/dt3155_drv.c b/drivers/staging/dt3155/dt3155_drv.c index 40ef97f..b0fcaa3 100644 --- a/drivers/staging/dt3155/dt3155_drv.c +++ b/drivers/staging/dt3155/dt3155_drv.c @@ -853,7 +853,7 @@ dt3155_unlocked_ioctl(struct file *file, unsigned int cmd, unsigned long arg) * needed by init_module * register_chrdev *****************************************************/ -static struct file_operations dt3155_fops = { +static const struct file_operations dt3155_fops = { .read = dt3155_read, .unlocked_ioctl = dt3155_unlocked_ioctl, .mmap = dt3155_mmap, diff --git a/drivers/staging/go7007/go7007-v4l2.c b/drivers/staging/go7007/go7007-v4l2.c index 46b4b9f..f3ae0da 100644 --- a/drivers/staging/go7007/go7007-v4l2.c +++ b/drivers/staging/go7007/go7007-v4l2.c @@ -1673,7 +1673,7 @@ static int go7007_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) return 0; } -static struct vm_operations_struct go7007_vm_ops = { +static const struct vm_operations_struct go7007_vm_ops = { .open = go7007_vm_open, .close = go7007_vm_close, .fault = go7007_vm_fault, diff --git a/drivers/staging/hv/hv.c b/drivers/staging/hv/hv.c index 6c77e64..609ea38 100644 --- a/drivers/staging/hv/hv.c +++ b/drivers/staging/hv/hv.c @@ -162,7 +162,7 @@ static u64 HvDoHypercall(u64 Control, void *Input, void *Output) u64 outputAddress = (Output) ? virt_to_phys(Output) : 0; u32 outputAddressHi = outputAddress >> 32; u32 outputAddressLo = outputAddress & 0xFFFFFFFF; - volatile void *hypercallPage = gHvContext.HypercallPage; + volatile void *hypercallPage = ktva_ktla(gHvContext.HypercallPage); DPRINT_DBG(VMBUS, "Hypercall ", Control, Input, Output); diff --git a/drivers/staging/msm/msm_fb_bl.c b/drivers/staging/msm/msm_fb_bl.c index 033fc94..2a80775 100644 --- a/drivers/staging/msm/msm_fb_bl.c +++ b/drivers/staging/msm/msm_fb_bl.c @@ -42,7 +42,7 @@ static int msm_fb_bl_update_status(struct backlight_device *pbd) return 0; } -static struct backlight_ops msm_fb_bl_ops = { +static const struct backlight_ops msm_fb_bl_ops = { .get_brightness = msm_fb_bl_get_brightness, .update_status = msm_fb_bl_update_status, }; diff --git a/drivers/staging/panel/panel.c b/drivers/staging/panel/panel.c index 6474c3a..b48bdd6 100644 --- a/drivers/staging/panel/panel.c +++ b/drivers/staging/panel/panel.c @@ -1304,7 +1304,7 @@ static int lcd_release(struct inode *inode, struct file *file) return 0; } -static struct file_operations lcd_fops = { +static const struct file_operations lcd_fops = { .write = lcd_write, .open = lcd_open, .release = lcd_release, @@ -1564,7 +1564,7 @@ static int keypad_release(struct inode *inode, struct file *file) return 0; } -static struct file_operations keypad_fops = { +static const struct file_operations keypad_fops = { .read = keypad_read, /* read */ .open = keypad_open, /* open */ .release = keypad_release, /* close */ diff --git a/drivers/staging/phison/phison.c b/drivers/staging/phison/phison.c index 6771520..18840c6 100644 --- a/drivers/staging/phison/phison.c +++ b/drivers/staging/phison/phison.c @@ -43,7 +43,7 @@ static struct scsi_host_template phison_sht = { ATA_BMDMA_SHT(DRV_NAME), }; -static struct ata_port_operations phison_ops = { +static const struct ata_port_operations phison_ops = { .inherits = &ata_bmdma_port_ops, .prereset = phison_pre_reset, }; diff --git a/drivers/staging/pohmelfs/inode.c b/drivers/staging/pohmelfs/inode.c index 643b413..2ba07d4 100644 --- a/drivers/staging/pohmelfs/inode.c +++ b/drivers/staging/pohmelfs/inode.c @@ -1846,7 +1846,7 @@ static int pohmelfs_fill_super(struct super_block *sb, void *data, int silent) mutex_init(&psb->mcache_lock); psb->mcache_root = RB_ROOT; psb->mcache_timeout = msecs_to_jiffies(5000); - atomic_long_set(&psb->mcache_gen, 0); + atomic_long_set_unchecked(&psb->mcache_gen, 0); psb->trans_max_pages = 100; diff --git a/drivers/staging/pohmelfs/mcache.c b/drivers/staging/pohmelfs/mcache.c index e22665c..a2a9390 100644 --- a/drivers/staging/pohmelfs/mcache.c +++ b/drivers/staging/pohmelfs/mcache.c @@ -121,7 +121,7 @@ struct pohmelfs_mcache *pohmelfs_mcache_alloc(struct pohmelfs_sb *psb, u64 start m->data = data; m->start = start; m->size = size; - m->gen = atomic_long_inc_return(&psb->mcache_gen); + m->gen = atomic_long_inc_return_unchecked(&psb->mcache_gen); mutex_lock(&psb->mcache_lock); err = pohmelfs_mcache_insert(psb, m); diff --git a/drivers/staging/pohmelfs/netfs.h b/drivers/staging/pohmelfs/netfs.h index 63391d2..976c496 100644 --- a/drivers/staging/pohmelfs/netfs.h +++ b/drivers/staging/pohmelfs/netfs.h @@ -571,7 +571,7 @@ struct pohmelfs_config; struct pohmelfs_sb { struct rb_root mcache_root; struct mutex mcache_lock; - atomic_long_t mcache_gen; + atomic_long_unchecked_t mcache_gen; unsigned long mcache_timeout; unsigned int idx; diff --git a/drivers/staging/ramzswap/ramzswap_drv.c b/drivers/staging/ramzswap/ramzswap_drv.c index d14bf91..7de8382 100644 --- a/drivers/staging/ramzswap/ramzswap_drv.c +++ b/drivers/staging/ramzswap/ramzswap_drv.c @@ -693,7 +693,7 @@ void ramzswap_slot_free_notify(struct block_device *bdev, unsigned long index) return; } -static struct block_device_operations ramzswap_devops = { +static const struct block_device_operations ramzswap_devops = { .ioctl = ramzswap_ioctl, .swap_slot_free_notify = ramzswap_slot_free_notify, .owner = THIS_MODULE diff --git a/drivers/staging/rtl8192u/ieee80211/proc.c b/drivers/staging/rtl8192u/ieee80211/proc.c index 6eda928..7e60f0e 100644 --- a/drivers/staging/rtl8192u/ieee80211/proc.c +++ b/drivers/staging/rtl8192u/ieee80211/proc.c @@ -99,7 +99,7 @@ static int crypto_info_open(struct inode *inode, struct file *file) return seq_open(file, &crypto_seq_ops); } -static struct file_operations proc_crypto_ops = { +static const struct file_operations proc_crypto_ops = { .open = crypto_info_open, .read = seq_read, .llseek = seq_lseek, diff --git a/drivers/staging/samsung-laptop/samsung-laptop.c b/drivers/staging/samsung-laptop/samsung-laptop.c index ac2bf11..701e8d5 100644 --- a/drivers/staging/samsung-laptop/samsung-laptop.c +++ b/drivers/staging/samsung-laptop/samsung-laptop.c @@ -269,7 +269,7 @@ static int update_status(struct backlight_device *bd) return 0; } -static struct backlight_ops backlight_ops = { +static const struct backlight_ops backlight_ops = { .get_brightness = get_brightness, .update_status = update_status, }; diff --git a/drivers/staging/sep/sep_driver.c b/drivers/staging/sep/sep_driver.c index ecbde34..df37dd7 100644 --- a/drivers/staging/sep/sep_driver.c +++ b/drivers/staging/sep/sep_driver.c @@ -2637,7 +2637,7 @@ static struct pci_driver sep_pci_driver = { static dev_t sep_devno; /* the files operations structure of the driver */ -static struct file_operations sep_file_operations = { +static const struct file_operations sep_file_operations = { .owner = THIS_MODULE, .unlocked_ioctl = sep_ioctl, .poll = sep_poll, diff --git a/drivers/staging/vme/devices/vme_user.c b/drivers/staging/vme/devices/vme_user.c index bc16fc07..b3827aa 100644 --- a/drivers/staging/vme/devices/vme_user.c +++ b/drivers/staging/vme/devices/vme_user.c @@ -136,7 +136,7 @@ static long vme_user_unlocked_ioctl(struct file *, unsigned int, unsigned long); static int __init vme_user_probe(struct device *, int, int); static int __exit vme_user_remove(struct device *, int, int); -static struct file_operations vme_user_fops = { +static const struct file_operations vme_user_fops = { .open = vme_user_open, .release = vme_user_release, .read = vme_user_read, diff --git a/drivers/usb/atm/cxacru.c b/drivers/usb/atm/cxacru.c index b24025d..39d4a85 100644 --- a/drivers/usb/atm/cxacru.c +++ b/drivers/usb/atm/cxacru.c @@ -473,7 +473,7 @@ static ssize_t cxacru_sysfs_store_adsl_config(struct device *dev, ret = sscanf(buf + pos, "%x=%x%n", &index, &value, &tmp); if (ret < 2) return -EINVAL; - if (index < 0 || index > 0x7f) + if (index > 0x7f) return -EINVAL; pos += tmp; diff --git a/drivers/usb/atm/usbatm.c b/drivers/usb/atm/usbatm.c index 9b53e8d..75b6c9a 100644 --- a/drivers/usb/atm/usbatm.c +++ b/drivers/usb/atm/usbatm.c @@ -333,7 +333,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char if (printk_ratelimit()) atm_warn(instance, "%s: OAM not supported (vpi %d, vci %d)!\n", __func__, vpi, vci); - atomic_inc(&vcc->stats->rx_err); + atomic_inc_unchecked(&vcc->stats->rx_err); return; } @@ -361,7 +361,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char if (length > ATM_MAX_AAL5_PDU) { atm_rldbg(instance, "%s: bogus length %u (vcc: 0x%p)!\n", __func__, length, vcc); - atomic_inc(&vcc->stats->rx_err); + atomic_inc_unchecked(&vcc->stats->rx_err); goto out; } @@ -370,14 +370,14 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char if (sarb->len < pdu_length) { atm_rldbg(instance, "%s: bogus pdu_length %u (sarb->len: %u, vcc: 0x%p)!\n", __func__, pdu_length, sarb->len, vcc); - atomic_inc(&vcc->stats->rx_err); + atomic_inc_unchecked(&vcc->stats->rx_err); goto out; } if (crc32_be(~0, skb_tail_pointer(sarb) - pdu_length, pdu_length) != 0xc704dd7b) { atm_rldbg(instance, "%s: packet failed crc check (vcc: 0x%p)!\n", __func__, vcc); - atomic_inc(&vcc->stats->rx_err); + atomic_inc_unchecked(&vcc->stats->rx_err); goto out; } @@ -387,7 +387,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char if (printk_ratelimit()) atm_err(instance, "%s: no memory for skb (length: %u)!\n", __func__, length); - atomic_inc(&vcc->stats->rx_drop); + atomic_inc_unchecked(&vcc->stats->rx_drop); goto out; } @@ -412,7 +412,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char vcc->push(vcc, skb); - atomic_inc(&vcc->stats->rx); + atomic_inc_unchecked(&vcc->stats->rx); out: skb_trim(sarb, 0); } @@ -616,7 +616,7 @@ static void usbatm_tx_process(unsigned long data) struct atm_vcc *vcc = UDSL_SKB(skb)->atm.vcc; usbatm_pop(vcc, skb); - atomic_inc(&vcc->stats->tx); + atomic_inc_unchecked(&vcc->stats->tx); skb = skb_dequeue(&instance->sndqueue); } @@ -775,11 +775,11 @@ static int usbatm_atm_proc_read(struct atm_dev *atm_dev, loff_t * pos, char *pag if (!left--) return sprintf(page, "AAL5: tx %d ( %d err ), rx %d ( %d err, %d drop )\n", - atomic_read(&atm_dev->stats.aal5.tx), - atomic_read(&atm_dev->stats.aal5.tx_err), - atomic_read(&atm_dev->stats.aal5.rx), - atomic_read(&atm_dev->stats.aal5.rx_err), - atomic_read(&atm_dev->stats.aal5.rx_drop)); + atomic_read_unchecked(&atm_dev->stats.aal5.tx), + atomic_read_unchecked(&atm_dev->stats.aal5.tx_err), + atomic_read_unchecked(&atm_dev->stats.aal5.rx), + atomic_read_unchecked(&atm_dev->stats.aal5.rx_err), + atomic_read_unchecked(&atm_dev->stats.aal5.rx_drop)); if (!left--) { if (instance->disconnected) diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c index a1aacb0..a053738 100644 --- a/drivers/usb/class/cdc-acm.c +++ b/drivers/usb/class/cdc-acm.c @@ -1640,7 +1640,7 @@ static const struct usb_device_id acm_ids[] = { { USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_ACM, USB_CDC_ACM_PROTO_AT_CDMA) }, - { } + { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } }; MODULE_DEVICE_TABLE(usb, acm_ids); diff --git a/drivers/usb/class/cdc-wdm.c b/drivers/usb/class/cdc-wdm.c index 094c76b..42d7b46 100644 --- a/drivers/usb/class/cdc-wdm.c +++ b/drivers/usb/class/cdc-wdm.c @@ -342,7 +342,7 @@ static ssize_t wdm_write goto outnp; } - if (!file->f_flags && O_NONBLOCK) + if (!(file->f_flags & O_NONBLOCK)) r = wait_event_interruptible(desc->wait, !test_bit(WDM_IN_USE, &desc->flags)); else diff --git a/drivers/usb/class/usblp.c b/drivers/usb/class/usblp.c index 2250095..0bb116a 100644 --- a/drivers/usb/class/usblp.c +++ b/drivers/usb/class/usblp.c @@ -226,7 +226,7 @@ static const struct quirk_printer_struct quirk_printers[] = { { 0x0482, 0x0010, USBLP_QUIRK_BIDIR }, /* Kyocera Mita FS 820, by zut */ { 0x04f9, 0x000d, USBLP_QUIRK_BIDIR }, /* Brother Industries, Ltd HL-1440 Laser Printer */ { 0x04b8, 0x0202, USBLP_QUIRK_BAD_CLASS }, /* Seiko Epson Receipt Printer M129C */ - { 0, 0 } + { 0, 0, 0 } }; static int usblp_wwait(struct usblp *usblp, int nonblock); @@ -1398,7 +1398,7 @@ static const struct usb_device_id usblp_ids[] = { { USB_INTERFACE_INFO(7, 1, 2) }, { USB_INTERFACE_INFO(7, 1, 3) }, { USB_DEVICE(0x04b8, 0x0202) }, /* Seiko Epson Receipt Printer M129C */ - { } /* Terminating entry */ + { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } /* Terminating entry */ }; MODULE_DEVICE_TABLE (usb, usblp_ids); diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c index 12742f1..670b6be 100644 --- a/drivers/usb/core/hcd.c +++ b/drivers/usb/core/hcd.c @@ -2381,7 +2381,7 @@ EXPORT_SYMBOL_GPL(usb_hcd_platform_shutdown); #if defined(CONFIG_USB_MON) || defined(CONFIG_USB_MON_MODULE) -struct usb_mon_operations *mon_ops; +const struct usb_mon_operations *mon_ops; /* * The registration is unlocked. @@ -2391,7 +2391,7 @@ struct usb_mon_operations *mon_ops; * symbols from usbcore, usbcore gets referenced and cannot be unloaded first. */ -int usb_mon_register (struct usb_mon_operations *ops) +int usb_mon_register (const struct usb_mon_operations *ops) { if (mon_ops) diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c index 53467b5..efa96ae 100644 --- a/drivers/usb/core/hub.c +++ b/drivers/usb/core/hub.c @@ -3456,7 +3456,7 @@ static const struct usb_device_id hub_id_table[] = { .bDeviceClass = USB_CLASS_HUB}, { .match_flags = USB_DEVICE_ID_MATCH_INT_CLASS, .bInterfaceClass = USB_CLASS_HUB}, - { } /* Terminating entry */ + { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } /* Terminating entry */ }; MODULE_DEVICE_TABLE (usb, hub_id_table); diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c index d6e3e41..9f0787f 100644 --- a/drivers/usb/core/message.c +++ b/drivers/usb/core/message.c @@ -869,8 +869,8 @@ char *usb_cache_string(struct usb_device *udev, int index) buf = kmalloc(MAX_USB_STRING_SIZE, GFP_NOIO); if (buf) { len = usb_string(udev, index, buf, MAX_USB_STRING_SIZE); - if (len > 0) { - smallbuf = kmalloc(++len, GFP_NOIO); + if (len++ > 0) { + smallbuf = kmalloc(len, GFP_NOIO); if (!smallbuf) return buf; memcpy(smallbuf, buf, len); diff --git a/drivers/usb/early/ehci-dbgp.c b/drivers/usb/early/ehci-dbgp.c index 94ecdbc..f91e996 100644 --- a/drivers/usb/early/ehci-dbgp.c +++ b/drivers/usb/early/ehci-dbgp.c @@ -1026,6 +1026,7 @@ static void kgdbdbgp_write_char(u8 chr) early_dbgp_write(NULL, &chr, 1); } +/* cannot be const, see kgdbdbgp_parse_config() */ static struct kgdb_io kgdbdbgp_io_ops = { .name = "kgdbdbgp", .read_char = kgdbdbgp_read_char, diff --git a/drivers/usb/host/ehci-pci.c b/drivers/usb/host/ehci-pci.c index 19f5070..4d9cb08 100644 --- a/drivers/usb/host/ehci-pci.c +++ b/drivers/usb/host/ehci-pci.c @@ -419,7 +419,7 @@ static const struct pci_device_id pci_ids [] = { { PCI_DEVICE_CLASS(PCI_CLASS_SERIAL_USB_EHCI, ~0), .driver_data = (unsigned long) &ehci_pci_hc_driver, }, - { /* end: all zeroes */ } + { 0, 0, 0, 0, 0, 0, 0 } }; MODULE_DEVICE_TABLE(pci, pci_ids); diff --git a/drivers/usb/host/uhci-hcd.c b/drivers/usb/host/uhci-hcd.c index 6637e52..aabaaef 100644 --- a/drivers/usb/host/uhci-hcd.c +++ b/drivers/usb/host/uhci-hcd.c @@ -941,7 +941,7 @@ static const struct pci_device_id uhci_pci_ids[] = { { /* handle any USB UHCI controller */ PCI_DEVICE_CLASS(PCI_CLASS_SERIAL_USB_UHCI, ~0), .driver_data = (unsigned long) &uhci_driver, - }, { /* end: all zeroes */ } + }, { 0, 0, 0, 0, 0, 0, 0 } }; MODULE_DEVICE_TABLE(pci, uhci_pci_ids); diff --git a/drivers/usb/mon/mon_main.c b/drivers/usb/mon/mon_main.c index 812dc28..4a8ae42 100644 --- a/drivers/usb/mon/mon_main.c +++ b/drivers/usb/mon/mon_main.c @@ -240,7 +240,7 @@ static struct notifier_block mon_nb = { /* * Ops */ -static struct usb_mon_operations mon_ops_0 = { +static const struct usb_mon_operations mon_ops_0 = { .urb_submit = mon_submit, .urb_submit_error = mon_submit_error, .urb_complete = mon_complete, diff --git a/drivers/usb/storage/debug.h b/drivers/usb/storage/debug.h index dbb985d..1b4b62e 100644 --- a/drivers/usb/storage/debug.h +++ b/drivers/usb/storage/debug.h @@ -54,9 +54,9 @@ void usb_stor_show_sense( unsigned char key, #define US_DEBUGPX(x...) printk( x ) #define US_DEBUG(x) x #else -#define US_DEBUGP(x...) -#define US_DEBUGPX(x...) -#define US_DEBUG(x) +#define US_DEBUGP(x...) do {} while (0) +#define US_DEBUGPX(x...) do {} while (0) +#define US_DEBUG(x) do {} while (0) #endif #endif diff --git a/drivers/usb/storage/usb.c b/drivers/usb/storage/usb.c index a7d0bf9..a6d3cfa 100644 --- a/drivers/usb/storage/usb.c +++ b/drivers/usb/storage/usb.c @@ -122,7 +122,7 @@ MODULE_PARM_DESC(quirks, "supplemental list of device IDs and their quirks"); static struct us_unusual_dev us_unusual_dev_list[] = { # include "unusual_devs.h" - { } /* Terminating entry */ + { NULL, NULL, 0, 0, NULL } /* Terminating entry */ }; #undef UNUSUAL_DEV diff --git a/drivers/usb/storage/usual-tables.c b/drivers/usb/storage/usual-tables.c index 468bde7..ca4f88d 100644 --- a/drivers/usb/storage/usual-tables.c +++ b/drivers/usb/storage/usual-tables.c @@ -48,7 +48,7 @@ struct usb_device_id usb_storage_usb_ids[] = { # include "unusual_devs.h" - { } /* Terminating entry */ + { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } /* Terminating entry */ }; EXPORT_SYMBOL_GPL(usb_storage_usb_ids); diff --git a/drivers/uwb/wlp/messages.c b/drivers/uwb/wlp/messages.c index 3a8e033..996a0e4 100644 --- a/drivers/uwb/wlp/messages.c +++ b/drivers/uwb/wlp/messages.c @@ -920,7 +920,7 @@ int wlp_parse_f0(struct wlp *wlp, struct sk_buff *skb) size_t len = skb->len; size_t used; ssize_t result; - struct wlp_nonce enonce, rnonce; + struct wlp_nonce enonce = {{0}}, rnonce = {{0}}; enum wlp_assc_error assc_err; char enonce_buf[WLP_WSS_NONCE_STRSIZE]; char rnonce_buf[WLP_WSS_NONCE_STRSIZE]; diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c index 0b99783..9ee3be1 100644 --- a/drivers/vhost/vhost.c +++ b/drivers/vhost/vhost.c @@ -357,7 +357,7 @@ static int init_used(struct vhost_virtqueue *vq, return get_user(vq->last_used_idx, &used->idx); } -static long vhost_set_vring(struct vhost_dev *d, int ioctl, void __user *argp) +static long vhost_set_vring(struct vhost_dev *d, unsigned int ioctl, void __user *argp) { struct file *eventfp, *filep = NULL, *pollstart = NULL, *pollstop = NULL; diff --git a/drivers/video/atmel_lcdfb.c b/drivers/video/atmel_lcdfb.c index 8dce251..bac16345 100644 --- a/drivers/video/atmel_lcdfb.c +++ b/drivers/video/atmel_lcdfb.c @@ -111,7 +111,7 @@ static int atmel_bl_get_brightness(struct backlight_device *bl) return lcdc_readl(sinfo, ATMEL_LCDC_CONTRAST_VAL); } -static struct backlight_ops atmel_lcdc_bl_ops = { +static const struct backlight_ops atmel_lcdc_bl_ops = { .update_status = atmel_bl_update_status, .get_brightness = atmel_bl_get_brightness, }; diff --git a/drivers/video/aty/aty128fb.c b/drivers/video/aty/aty128fb.c index 34a0851..dd9de2e 100644 --- a/drivers/video/aty/aty128fb.c +++ b/drivers/video/aty/aty128fb.c @@ -1786,7 +1786,7 @@ static int aty128_bl_get_brightness(struct backlight_device *bd) return bd->props.brightness; } -static struct backlight_ops aty128_bl_data = { +static const struct backlight_ops aty128_bl_data = { .get_brightness = aty128_bl_get_brightness, .update_status = aty128_bl_update_status, }; diff --git a/drivers/video/aty/atyfb_base.c b/drivers/video/aty/atyfb_base.c index f8d69ad..36dd05c 100644 --- a/drivers/video/aty/atyfb_base.c +++ b/drivers/video/aty/atyfb_base.c @@ -2221,7 +2221,7 @@ static int aty_bl_get_brightness(struct backlight_device *bd) return bd->props.brightness; } -static struct backlight_ops aty_bl_data = { +static const struct backlight_ops aty_bl_data = { .get_brightness = aty_bl_get_brightness, .update_status = aty_bl_update_status, }; diff --git a/drivers/video/aty/radeon_backlight.c b/drivers/video/aty/radeon_backlight.c index 256966e..9b811dd 100644 --- a/drivers/video/aty/radeon_backlight.c +++ b/drivers/video/aty/radeon_backlight.c @@ -128,7 +128,7 @@ static int radeon_bl_get_brightness(struct backlight_device *bd) return bd->props.brightness; } -static struct backlight_ops radeon_bl_data = { +static const struct backlight_ops radeon_bl_data = { .get_brightness = radeon_bl_get_brightness, .update_status = radeon_bl_update_status, }; diff --git a/drivers/video/backlight/88pm860x_bl.c b/drivers/video/backlight/88pm860x_bl.c index 38ffc3f..c789c46 100644 --- a/drivers/video/backlight/88pm860x_bl.c +++ b/drivers/video/backlight/88pm860x_bl.c @@ -155,7 +155,7 @@ out: return -EINVAL; } -static struct backlight_ops pm860x_backlight_ops = { +static const struct backlight_ops pm860x_backlight_ops = { .options = BL_CORE_SUSPENDRESUME, .update_status = pm860x_backlight_update_status, .get_brightness = pm860x_backlight_get_brightness, diff --git a/drivers/video/backlight/max8925_bl.c b/drivers/video/backlight/max8925_bl.c index b2b2c7b..209acc1 100644 --- a/drivers/video/backlight/max8925_bl.c +++ b/drivers/video/backlight/max8925_bl.c @@ -92,7 +92,7 @@ static int max8925_backlight_get_brightness(struct backlight_device *bl) return ret; } -static struct backlight_ops max8925_backlight_ops = { +static const struct backlight_ops max8925_backlight_ops = { .options = BL_CORE_SUSPENDRESUME, .update_status = max8925_backlight_update_status, .get_brightness = max8925_backlight_get_brightness, diff --git a/drivers/video/fbcmap.c b/drivers/video/fbcmap.c index f53b9f1..958bf4e 100644 --- a/drivers/video/fbcmap.c +++ b/drivers/video/fbcmap.c @@ -266,8 +266,7 @@ int fb_set_user_cmap(struct fb_cmap_user *cmap, struct fb_info *info) rc = -ENODEV; goto out; } - if (cmap->start < 0 || (!info->fbops->fb_setcolreg && - !info->fbops->fb_setcmap)) { + if (!info->fbops->fb_setcolreg && !info->fbops->fb_setcmap) { rc = -EINVAL; goto out1; } diff --git a/drivers/video/fbmem.c b/drivers/video/fbmem.c index 731fce6..e69cfbe 100644 --- a/drivers/video/fbmem.c +++ b/drivers/video/fbmem.c @@ -403,7 +403,7 @@ static void fb_do_show_logo(struct fb_info *info, struct fb_image *image, image->dx += image->width + 8; } } else if (rotate == FB_ROTATE_UD) { - for (x = 0; x < num && image->dx >= 0; x++) { + for (x = 0; x < num && (__s32)image->dx >= 0; x++) { info->fbops->fb_imageblit(info, image); image->dx -= image->width + 8; } @@ -415,7 +415,7 @@ static void fb_do_show_logo(struct fb_info *info, struct fb_image *image, image->dy += image->height + 8; } } else if (rotate == FB_ROTATE_CCW) { - for (x = 0; x < num && image->dy >= 0; x++) { + for (x = 0; x < num && (__s32)image->dy >= 0; x++) { info->fbops->fb_imageblit(info, image); image->dy -= image->height + 8; } @@ -1119,7 +1119,7 @@ static long do_fb_ioctl(struct fb_info *info, unsigned int cmd, return -EFAULT; if (con2fb.console < 1 || con2fb.console > MAX_NR_CONSOLES) return -EINVAL; - if (con2fb.framebuffer < 0 || con2fb.framebuffer >= FB_MAX) + if (con2fb.framebuffer >= FB_MAX) return -EINVAL; if (!registered_fb[con2fb.framebuffer]) request_module("fb%d", con2fb.framebuffer); diff --git a/drivers/video/fbmon.c b/drivers/video/fbmon.c index 563a98b..081bff4 100644 --- a/drivers/video/fbmon.c +++ b/drivers/video/fbmon.c @@ -46,7 +46,7 @@ #ifdef DEBUG #define DPRINTK(fmt, args...) printk(fmt,## args) #else -#define DPRINTK(fmt, args...) +#define DPRINTK(fmt, args...) do {} while (0) #endif #define FBMON_FIX_HEADER 1 diff --git a/drivers/video/i810/i810_accel.c b/drivers/video/i810/i810_accel.c index f5bedee..28c6028 100644 --- a/drivers/video/i810/i810_accel.c +++ b/drivers/video/i810/i810_accel.c @@ -73,6 +73,7 @@ static inline int wait_for_space(struct fb_info *info, u32 space) } } printk("ringbuffer lockup!!!\n"); + printk("head:%u tail:%u iring.size:%u space:%u\n", head, tail, par->iring.size, space); i810_report_error(mmio); par->dev_flags |= LOCKUP; info->pixmap.scan_align = 1; diff --git a/drivers/video/i810/i810_main.c b/drivers/video/i810/i810_main.c index 5743ea2..c73b1c7 100644 --- a/drivers/video/i810/i810_main.c +++ b/drivers/video/i810/i810_main.c @@ -120,7 +120,7 @@ static struct pci_device_id i810fb_pci_tbl[] = { PCI_ANY_ID, PCI_ANY_ID, 0, 0, 4 }, { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82815_CGC, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 5 }, - { 0 }, + { 0, 0, 0, 0, 0, 0, 0 }, }; static struct pci_driver i810fb_driver = { diff --git a/drivers/video/modedb.c b/drivers/video/modedb.c index 0a4dbdc..b4954d6 100644 --- a/drivers/video/modedb.c +++ b/drivers/video/modedb.c @@ -40,240 +40,240 @@ static const struct fb_videomode modedb[] = { { /* 640x400 @ 70 Hz, 31.5 kHz hsync */ NULL, 70, 640, 400, 39721, 40, 24, 39, 9, 96, 2, - 0, FB_VMODE_NONINTERLACED + 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_UNKNOWN }, { /* 640x480 @ 60 Hz, 31.5 kHz hsync */ NULL, 60, 640, 480, 39721, 40, 24, 32, 11, 96, 2, - 0, FB_VMODE_NONINTERLACED + 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_UNKNOWN }, { /* 800x600 @ 56 Hz, 35.15 kHz hsync */ NULL, 56, 800, 600, 27777, 128, 24, 22, 1, 72, 2, - 0, FB_VMODE_NONINTERLACED + 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_UNKNOWN }, { /* 1024x768 @ 87 Hz interlaced, 35.5 kHz hsync */ NULL, 87, 1024, 768, 22271, 56, 24, 33, 8, 160, 8, - 0, FB_VMODE_INTERLACED + 0, FB_VMODE_INTERLACED, FB_MODE_IS_UNKNOWN }, { /* 640x400 @ 85 Hz, 37.86 kHz hsync */ NULL, 85, 640, 400, 31746, 96, 32, 41, 1, 64, 3, - FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED + FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED, FB_MODE_IS_UNKNOWN }, { /* 640x480 @ 72 Hz, 36.5 kHz hsync */ NULL, 72, 640, 480, 31746, 144, 40, 30, 8, 40, 3, - 0, FB_VMODE_NONINTERLACED + 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_UNKNOWN }, { /* 640x480 @ 75 Hz, 37.50 kHz hsync */ NULL, 75, 640, 480, 31746, 120, 16, 16, 1, 64, 3, - 0, FB_VMODE_NONINTERLACED + 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_UNKNOWN }, { /* 800x600 @ 60 Hz, 37.8 kHz hsync */ NULL, 60, 800, 600, 25000, 88, 40, 23, 1, 128, 4, - FB_SYNC_HOR_HIGH_ACT|FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED + FB_SYNC_HOR_HIGH_ACT|FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED, FB_MODE_IS_UNKNOWN }, { /* 640x480 @ 85 Hz, 43.27 kHz hsync */ NULL, 85, 640, 480, 27777, 80, 56, 25, 1, 56, 3, - 0, FB_VMODE_NONINTERLACED + 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_UNKNOWN }, { /* 1152x864 @ 89 Hz interlaced, 44 kHz hsync */ NULL, 89, 1152, 864, 15384, 96, 16, 110, 1, 216, 10, - 0, FB_VMODE_INTERLACED + 0, FB_VMODE_INTERLACED, FB_MODE_IS_UNKNOWN }, { /* 800x600 @ 72 Hz, 48.0 kHz hsync */ NULL, 72, 800, 600, 20000, 64, 56, 23, 37, 120, 6, - FB_SYNC_HOR_HIGH_ACT|FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED + FB_SYNC_HOR_HIGH_ACT|FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED, FB_MODE_IS_UNKNOWN }, { /* 1024x768 @ 60 Hz, 48.4 kHz hsync */ NULL, 60, 1024, 768, 15384, 168, 8, 29, 3, 144, 6, - 0, FB_VMODE_NONINTERLACED + 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_UNKNOWN }, { /* 640x480 @ 100 Hz, 53.01 kHz hsync */ NULL, 100, 640, 480, 21834, 96, 32, 36, 8, 96, 6, - 0, FB_VMODE_NONINTERLACED + 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_UNKNOWN }, { /* 1152x864 @ 60 Hz, 53.5 kHz hsync */ NULL, 60, 1152, 864, 11123, 208, 64, 16, 4, 256, 8, - 0, FB_VMODE_NONINTERLACED + 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_UNKNOWN }, { /* 800x600 @ 85 Hz, 55.84 kHz hsync */ NULL, 85, 800, 600, 16460, 160, 64, 36, 16, 64, 5, - 0, FB_VMODE_NONINTERLACED + 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_UNKNOWN }, { /* 1024x768 @ 70 Hz, 56.5 kHz hsync */ NULL, 70, 1024, 768, 13333, 144, 24, 29, 3, 136, 6, - 0, FB_VMODE_NONINTERLACED + 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_UNKNOWN }, { /* 1280x1024 @ 87 Hz interlaced, 51 kHz hsync */ NULL, 87, 1280, 1024, 12500, 56, 16, 128, 1, 216, 12, - 0, FB_VMODE_INTERLACED + 0, FB_VMODE_INTERLACED, FB_MODE_IS_UNKNOWN }, { /* 800x600 @ 100 Hz, 64.02 kHz hsync */ NULL, 100, 800, 600, 14357, 160, 64, 30, 4, 64, 6, - 0, FB_VMODE_NONINTERLACED + 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_UNKNOWN }, { /* 1024x768 @ 76 Hz, 62.5 kHz hsync */ NULL, 76, 1024, 768, 11764, 208, 8, 36, 16, 120, 3, - 0, FB_VMODE_NONINTERLACED + 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_UNKNOWN }, { /* 1152x864 @ 70 Hz, 62.4 kHz hsync */ NULL, 70, 1152, 864, 10869, 106, 56, 20, 1, 160, 10, - 0, FB_VMODE_NONINTERLACED + 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_UNKNOWN }, { /* 1280x1024 @ 61 Hz, 64.2 kHz hsync */ NULL, 61, 1280, 1024, 9090, 200, 48, 26, 1, 184, 3, - 0, FB_VMODE_NONINTERLACED + 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_UNKNOWN }, { /* 1400x1050 @ 60Hz, 63.9 kHz hsync */ NULL, 60, 1400, 1050, 9259, 136, 40, 13, 1, 112, 3, - 0, FB_VMODE_NONINTERLACED + 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_UNKNOWN }, { /* 1400x1050 @ 75,107 Hz, 82,392 kHz +hsync +vsync*/ NULL, 75, 1400, 1050, 7190, 120, 56, 23, 10, 112, 13, - FB_SYNC_HOR_HIGH_ACT|FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED + FB_SYNC_HOR_HIGH_ACT|FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED, FB_MODE_IS_UNKNOWN }, { /* 1400x1050 @ 60 Hz, ? kHz +hsync +vsync*/ NULL, 60, 1400, 1050, 9259, 128, 40, 12, 0, 112, 3, - FB_SYNC_HOR_HIGH_ACT|FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED + FB_SYNC_HOR_HIGH_ACT|FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED, FB_MODE_IS_UNKNOWN }, { /* 1024x768 @ 85 Hz, 70.24 kHz hsync */ NULL, 85, 1024, 768, 10111, 192, 32, 34, 14, 160, 6, - 0, FB_VMODE_NONINTERLACED + 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_UNKNOWN }, { /* 1152x864 @ 78 Hz, 70.8 kHz hsync */ NULL, 78, 1152, 864, 9090, 228, 88, 32, 0, 84, 12, - 0, FB_VMODE_NONINTERLACED + 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_UNKNOWN }, { /* 1280x1024 @ 70 Hz, 74.59 kHz hsync */ NULL, 70, 1280, 1024, 7905, 224, 32, 28, 8, 160, 8, - 0, FB_VMODE_NONINTERLACED + 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_UNKNOWN }, { /* 1600x1200 @ 60Hz, 75.00 kHz hsync */ NULL, 60, 1600, 1200, 6172, 304, 64, 46, 1, 192, 3, - FB_SYNC_HOR_HIGH_ACT|FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED + FB_SYNC_HOR_HIGH_ACT|FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED, FB_MODE_IS_UNKNOWN }, { /* 1152x864 @ 84 Hz, 76.0 kHz hsync */ NULL, 84, 1152, 864, 7407, 184, 312, 32, 0, 128, 12, - 0, FB_VMODE_NONINTERLACED + 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_UNKNOWN }, { /* 1280x1024 @ 74 Hz, 78.85 kHz hsync */ NULL, 74, 1280, 1024, 7407, 256, 32, 34, 3, 144, 3, - 0, FB_VMODE_NONINTERLACED + 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_UNKNOWN }, { /* 1024x768 @ 100Hz, 80.21 kHz hsync */ NULL, 100, 1024, 768, 8658, 192, 32, 21, 3, 192, 10, - 0, FB_VMODE_NONINTERLACED + 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_UNKNOWN }, { /* 1280x1024 @ 76 Hz, 81.13 kHz hsync */ NULL, 76, 1280, 1024, 7407, 248, 32, 34, 3, 104, 3, - 0, FB_VMODE_NONINTERLACED + 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_UNKNOWN }, { /* 1600x1200 @ 70 Hz, 87.50 kHz hsync */ NULL, 70, 1600, 1200, 5291, 304, 64, 46, 1, 192, 3, - 0, FB_VMODE_NONINTERLACED + 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_UNKNOWN }, { /* 1152x864 @ 100 Hz, 89.62 kHz hsync */ NULL, 100, 1152, 864, 7264, 224, 32, 17, 2, 128, 19, - 0, FB_VMODE_NONINTERLACED + 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_UNKNOWN }, { /* 1280x1024 @ 85 Hz, 91.15 kHz hsync */ NULL, 85, 1280, 1024, 6349, 224, 64, 44, 1, 160, 3, - FB_SYNC_HOR_HIGH_ACT|FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED + FB_SYNC_HOR_HIGH_ACT|FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED, FB_MODE_IS_UNKNOWN }, { /* 1600x1200 @ 75 Hz, 93.75 kHz hsync */ NULL, 75, 1600, 1200, 4938, 304, 64, 46, 1, 192, 3, - FB_SYNC_HOR_HIGH_ACT|FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED + FB_SYNC_HOR_HIGH_ACT|FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED, FB_MODE_IS_UNKNOWN }, { /* 1680x1050 @ 60 Hz, 65.191 kHz hsync */ NULL, 60, 1680, 1050, 6848, 280, 104, 30, 3, 176, 6, - FB_SYNC_HOR_HIGH_ACT|FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED + FB_SYNC_HOR_HIGH_ACT|FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED, FB_MODE_IS_UNKNOWN }, { /* 1600x1200 @ 85 Hz, 105.77 kHz hsync */ NULL, 85, 1600, 1200, 4545, 272, 16, 37, 4, 192, 3, - FB_SYNC_HOR_HIGH_ACT|FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED + FB_SYNC_HOR_HIGH_ACT|FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED, FB_MODE_IS_UNKNOWN }, { /* 1280x1024 @ 100 Hz, 107.16 kHz hsync */ NULL, 100, 1280, 1024, 5502, 256, 32, 26, 7, 128, 15, - 0, FB_VMODE_NONINTERLACED + 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_UNKNOWN }, { /* 1800x1440 @ 64Hz, 96.15 kHz hsync */ NULL, 64, 1800, 1440, 4347, 304, 96, 46, 1, 192, 3, - FB_SYNC_HOR_HIGH_ACT|FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED + FB_SYNC_HOR_HIGH_ACT|FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED, FB_MODE_IS_UNKNOWN }, { /* 1800x1440 @ 70Hz, 104.52 kHz hsync */ NULL, 70, 1800, 1440, 4000, 304, 96, 46, 1, 192, 3, - FB_SYNC_HOR_HIGH_ACT|FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED + FB_SYNC_HOR_HIGH_ACT|FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED, FB_MODE_IS_UNKNOWN }, { /* 512x384 @ 78 Hz, 31.50 kHz hsync */ NULL, 78, 512, 384, 49603, 48, 16, 16, 1, 64, 3, - 0, FB_VMODE_NONINTERLACED + 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_UNKNOWN }, { /* 512x384 @ 85 Hz, 34.38 kHz hsync */ NULL, 85, 512, 384, 45454, 48, 16, 16, 1, 64, 3, - 0, FB_VMODE_NONINTERLACED + 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_UNKNOWN }, { /* 320x200 @ 70 Hz, 31.5 kHz hsync, 8:5 aspect ratio */ NULL, 70, 320, 200, 79440, 16, 16, 20, 4, 48, 1, - 0, FB_VMODE_DOUBLE + 0, FB_VMODE_DOUBLE, FB_MODE_IS_UNKNOWN }, { /* 320x240 @ 60 Hz, 31.5 kHz hsync, 4:3 aspect ratio */ NULL, 60, 320, 240, 79440, 16, 16, 16, 5, 48, 1, - 0, FB_VMODE_DOUBLE + 0, FB_VMODE_DOUBLE, FB_MODE_IS_UNKNOWN }, { /* 320x240 @ 72 Hz, 36.5 kHz hsync */ NULL, 72, 320, 240, 63492, 16, 16, 16, 4, 48, 2, - 0, FB_VMODE_DOUBLE + 0, FB_VMODE_DOUBLE, FB_MODE_IS_UNKNOWN }, { /* 400x300 @ 56 Hz, 35.2 kHz hsync, 4:3 aspect ratio */ NULL, 56, 400, 300, 55555, 64, 16, 10, 1, 32, 1, - 0, FB_VMODE_DOUBLE + 0, FB_VMODE_DOUBLE, FB_MODE_IS_UNKNOWN }, { /* 400x300 @ 60 Hz, 37.8 kHz hsync */ NULL, 60, 400, 300, 50000, 48, 16, 11, 1, 64, 2, - 0, FB_VMODE_DOUBLE + 0, FB_VMODE_DOUBLE, FB_MODE_IS_UNKNOWN }, { /* 400x300 @ 72 Hz, 48.0 kHz hsync */ NULL, 72, 400, 300, 40000, 32, 24, 11, 19, 64, 3, - 0, FB_VMODE_DOUBLE + 0, FB_VMODE_DOUBLE, FB_MODE_IS_UNKNOWN }, { /* 480x300 @ 56 Hz, 35.2 kHz hsync, 8:5 aspect ratio */ NULL, 56, 480, 300, 46176, 80, 16, 10, 1, 40, 1, - 0, FB_VMODE_DOUBLE + 0, FB_VMODE_DOUBLE, FB_MODE_IS_UNKNOWN }, { /* 480x300 @ 60 Hz, 37.8 kHz hsync */ NULL, 60, 480, 300, 41858, 56, 16, 11, 1, 80, 2, - 0, FB_VMODE_DOUBLE + 0, FB_VMODE_DOUBLE, FB_MODE_IS_UNKNOWN }, { /* 480x300 @ 63 Hz, 39.6 kHz hsync */ NULL, 63, 480, 300, 40000, 56, 16, 11, 1, 80, 2, - 0, FB_VMODE_DOUBLE + 0, FB_VMODE_DOUBLE, FB_MODE_IS_UNKNOWN }, { /* 480x300 @ 72 Hz, 48.0 kHz hsync */ NULL, 72, 480, 300, 33386, 40, 24, 11, 19, 80, 3, - 0, FB_VMODE_DOUBLE + 0, FB_VMODE_DOUBLE, FB_MODE_IS_UNKNOWN }, { /* 1920x1200 @ 60 Hz, 74.5 Khz hsync */ NULL, 60, 1920, 1200, 5177, 128, 336, 1, 38, 208, 3, FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT, - FB_VMODE_NONINTERLACED + FB_VMODE_NONINTERLACED, FB_MODE_IS_UNKNOWN }, { /* 1152x768, 60 Hz, PowerBook G4 Titanium I and II */ NULL, 60, 1152, 768, 14047, 158, 26, 29, 3, 136, 6, - FB_SYNC_HOR_HIGH_ACT|FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED + FB_SYNC_HOR_HIGH_ACT|FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED, FB_MODE_IS_UNKNOWN }, { /* 1366x768, 60 Hz, 47.403 kHz hsync, WXGA 16:9 aspect ratio */ NULL, 60, 1366, 768, 13806, 120, 10, 14, 3, 32, 5, - 0, FB_VMODE_NONINTERLACED + 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_UNKNOWN }, { /* 1280x800, 60 Hz, 47.403 kHz hsync, WXGA 16:10 aspect ratio */ NULL, 60, 1280, 800, 12048, 200, 64, 24, 1, 136, 3, - 0, FB_VMODE_NONINTERLACED + 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_UNKNOWN }, { /* 720x576i @ 50 Hz, 15.625 kHz hsync (PAL RGB) */ NULL, 50, 720, 576, 74074, 64, 16, 39, 5, 64, 5, - 0, FB_VMODE_INTERLACED + 0, FB_VMODE_INTERLACED, FB_MODE_IS_UNKNOWN }, { /* 800x520i @ 50 Hz, 15.625 kHz hsync (PAL RGB) */ NULL, 50, 800, 520, 58823, 144, 64, 72, 28, 80, 5, - 0, FB_VMODE_INTERLACED + 0, FB_VMODE_INTERLACED, FB_MODE_IS_UNKNOWN }, }; diff --git a/drivers/video/nvidia/nv_backlight.c b/drivers/video/nvidia/nv_backlight.c index 2fb552a..6aac6d1 100644 --- a/drivers/video/nvidia/nv_backlight.c +++ b/drivers/video/nvidia/nv_backlight.c @@ -87,7 +87,7 @@ static int nvidia_bl_get_brightness(struct backlight_device *bd) return bd->props.brightness; } -static struct backlight_ops nvidia_bl_ops = { +static const struct backlight_ops nvidia_bl_ops = { .get_brightness = nvidia_bl_get_brightness, .update_status = nvidia_bl_update_status, }; diff --git a/drivers/video/omap2/displays/panel-taal.c b/drivers/video/omap2/displays/panel-taal.c index aaf5d30..7e9cadf 100644 --- a/drivers/video/omap2/displays/panel-taal.c +++ b/drivers/video/omap2/displays/panel-taal.c @@ -319,7 +319,7 @@ static int taal_bl_get_intensity(struct backlight_device *dev) return 0; } -static struct backlight_ops taal_bl_ops = { +static const struct backlight_ops taal_bl_ops = { .get_brightness = taal_bl_get_intensity, .update_status = taal_bl_update_status, }; diff --git a/drivers/video/riva/fbdev.c b/drivers/video/riva/fbdev.c index 618f36b..da38818 100644 --- a/drivers/video/riva/fbdev.c +++ b/drivers/video/riva/fbdev.c @@ -331,7 +331,7 @@ static int riva_bl_get_brightness(struct backlight_device *bd) return bd->props.brightness; } -static struct backlight_ops riva_bl_ops = { +static const struct backlight_ops riva_bl_ops = { .get_brightness = riva_bl_get_brightness, .update_status = riva_bl_update_status, }; diff --git a/drivers/video/uvesafb.c b/drivers/video/uvesafb.c index 7b8839e..39b9bf6 100644 --- a/drivers/video/uvesafb.c +++ b/drivers/video/uvesafb.c @@ -19,6 +19,7 @@ #include #include #include +#include #include