diff options
-rw-r--r-- | main/linux-grsec/APKBUILD | 12 | ||||
-rw-r--r-- | main/linux-grsec/grsecurity-2.2.2-3.2.7-201202202005.patch (renamed from main/linux-grsec/grsecurity-2.2.2-3.2.6-201202131824.patch) | 427 | ||||
-rw-r--r-- | main/linux-grsec/kernelconfig.x86 | 6 | ||||
-rw-r--r-- | main/linux-grsec/kernelconfig.x86_64 | 6 |
4 files changed, 290 insertions, 161 deletions
diff --git a/main/linux-grsec/APKBUILD b/main/linux-grsec/APKBUILD index 1f5b17a825..90bf9d1be6 100644 --- a/main/linux-grsec/APKBUILD +++ b/main/linux-grsec/APKBUILD @@ -2,7 +2,7 @@ _flavor=grsec pkgname=linux-${_flavor} -pkgver=3.2.6 +pkgver=3.2.7 _kernver=3.2 pkgrel=0 pkgdesc="Linux kernel with grsecurity" @@ -14,7 +14,7 @@ _config=${config:-kernelconfig.${CARCH}} install= source="http://ftp.kernel.org/pub/linux/kernel/v3.0/linux-$_kernver.tar.bz2 http://ftp.kernel.org/pub/linux/kernel/v3.0/patch-$pkgver.bz2 - grsecurity-2.2.2-3.2.6-201202131824.patch + grsecurity-2.2.2-3.2.7-201202202005.patch 0004-arp-flush-arp-cache-on-device-change.patch @@ -140,10 +140,10 @@ dev() { } md5sums="7ceb61f87c097fc17509844b71268935 linux-3.2.tar.bz2 -2bd4679899df503177a3b61ae2068749 patch-3.2.6.bz2 -905e73610bfdb7fd497fa95adcbea2ce grsecurity-2.2.2-3.2.6-201202131824.patch +899624bffed6a19578613b672cc9483f patch-3.2.7.bz2 +1a1512cc453f2470a42968e015a26eff grsecurity-2.2.2-3.2.7-201202202005.patch 776adeeb5272093574f8836c5037dd7d 0004-arp-flush-arp-cache-on-device-change.patch f3eda7112ef074a4121ec6de943c63ee x86-centaur-enable-cx8-for-via-eden-too.patch 62cc7d7b5ba7ef05b72ff91c0411c189 linux-3.0.x-regression-with-ipv4-routes-having-mtu.patch -bd0b139de82316d44cf3376533daddb8 kernelconfig.x86 -84644f7193b0b9d9bd474b5ec322a0f8 kernelconfig.x86_64" +339d4dd7f74b87d13adff5d2d2abf86a kernelconfig.x86 +68204744d18679153a2a1e932290f93d kernelconfig.x86_64" diff --git a/main/linux-grsec/grsecurity-2.2.2-3.2.6-201202131824.patch b/main/linux-grsec/grsecurity-2.2.2-3.2.7-201202202005.patch index 2ac63128e2..816b75a7d9 100644 --- a/main/linux-grsec/grsecurity-2.2.2-3.2.6-201202131824.patch +++ b/main/linux-grsec/grsecurity-2.2.2-3.2.7-201202202005.patch @@ -186,7 +186,7 @@ index 81c287f..d456d02 100644 pcd. [PARIDE] diff --git a/Makefile b/Makefile -index 47fe496..c50bd2a 100644 +index d1bdc90..e95fe1a 100644 --- a/Makefile +++ b/Makefile @@ -245,8 +245,9 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH" ]; then echo $$BASH; \ @@ -10910,7 +10910,7 @@ index 566e803..b9521e9 100644 } diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h -index 1c66d30..23ab77d 100644 +index 1c66d30..e66922c 100644 --- a/arch/x86/include/asm/uaccess_64.h +++ b/arch/x86/include/asm/uaccess_64.h @@ -10,6 +10,9 @@ @@ -10939,7 +10939,12 @@ index 1c66d30..23ab77d 100644 { unsigned ret; -@@ -36,138 +39,222 @@ copy_user_generic(void *to, const void *from, unsigned len) +@@ -32,142 +35,226 @@ copy_user_generic(void *to, const void *from, unsigned len) + ASM_OUTPUT2("=a" (ret), "=D" (to), "=S" (from), + "=d" (len)), + "1" (to), "2" (from), "3" (len) +- : "memory", "rcx", "r8", "r9", "r10", "r11"); ++ : "memory", "rcx", "r8", "r9", "r11"); return ret; } @@ -41441,7 +41446,7 @@ index 608c1c3..7d040a8 100644 return rc; } diff --git a/fs/exec.c b/fs/exec.c -index 3625464..7949233 100644 +index 3625464..7c7ce8b 100644 --- a/fs/exec.c +++ b/fs/exec.c @@ -55,12 +55,28 @@ @@ -41504,7 +41509,25 @@ index 3625464..7949233 100644 return NULL; if (write) { -@@ -274,6 +282,11 @@ static int __bprm_mm_init(struct linux_binprm *bprm) +@@ -215,6 +223,17 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos, + if (size <= ARG_MAX) + return page; + ++#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP ++ // only allow 1MB for argv+env on suid/sgid binaries ++ // to prevent easy ASLR exhaustion ++ if (((bprm->cred->euid != current_euid()) || ++ (bprm->cred->egid != current_egid())) && ++ (size > (1024 * 1024))) { ++ put_page(page); ++ return NULL; ++ } ++#endif ++ + /* + * Limit to 1/4-th the stack size for the argv+env strings. + * This ensures that: +@@ -274,6 +293,11 @@ static int __bprm_mm_init(struct linux_binprm *bprm) vma->vm_end = STACK_TOP_MAX; vma->vm_start = vma->vm_end - PAGE_SIZE; vma->vm_flags = VM_STACK_FLAGS | VM_STACK_INCOMPLETE_SETUP; @@ -41516,7 +41539,7 @@ index 3625464..7949233 100644 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags); INIT_LIST_HEAD(&vma->anon_vma_chain); -@@ -288,6 +301,12 @@ static int __bprm_mm_init(struct linux_binprm *bprm) +@@ -288,6 +312,12 @@ static int __bprm_mm_init(struct linux_binprm *bprm) mm->stack_vm = mm->total_vm = 1; up_write(&mm->mmap_sem); bprm->p = vma->vm_end - sizeof(void *); @@ -41529,7 +41552,7 @@ index 3625464..7949233 100644 return 0; err: up_write(&mm->mmap_sem); -@@ -396,19 +415,7 @@ err: +@@ -396,19 +426,7 @@ err: return err; } @@ -41550,7 +41573,7 @@ index 3625464..7949233 100644 { const char __user *native; -@@ -417,14 +424,14 @@ static const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr) +@@ -417,14 +435,14 @@ static const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr) compat_uptr_t compat; if (get_user(compat, argv.ptr.compat + nr)) @@ -41567,7 +41590,7 @@ index 3625464..7949233 100644 return native; } -@@ -443,7 +450,7 @@ static int count(struct user_arg_ptr argv, int max) +@@ -443,7 +461,7 @@ static int count(struct user_arg_ptr argv, int max) if (!p) break; @@ -41576,7 +41599,7 @@ index 3625464..7949233 100644 return -EFAULT; if (i++ >= max) -@@ -477,7 +484,7 @@ static int copy_strings(int argc, struct user_arg_ptr argv, +@@ -477,7 +495,7 @@ static int copy_strings(int argc, struct user_arg_ptr argv, ret = -EFAULT; str = get_user_arg_ptr(argv, argc); @@ -41585,7 +41608,7 @@ index 3625464..7949233 100644 goto out; len = strnlen_user(str, MAX_ARG_STRLEN); -@@ -559,7 +566,7 @@ int copy_strings_kernel(int argc, const char *const *__argv, +@@ -559,7 +577,7 @@ int copy_strings_kernel(int argc, const char *const *__argv, int r; mm_segment_t oldfs = get_fs(); struct user_arg_ptr argv = { @@ -41594,7 +41617,7 @@ index 3625464..7949233 100644 }; set_fs(KERNEL_DS); -@@ -594,7 +601,8 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift) +@@ -594,7 +612,8 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift) unsigned long new_end = old_end - shift; struct mmu_gather tlb; @@ -41604,7 +41627,7 @@ index 3625464..7949233 100644 /* * ensure there are no vmas between where we want to go -@@ -603,6 +611,10 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift) +@@ -603,6 +622,10 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift) if (vma != find_vma(mm, new_start)) return -EFAULT; @@ -41615,7 +41638,7 @@ index 3625464..7949233 100644 /* * cover the whole range: [new_start, old_end) */ -@@ -683,10 +695,6 @@ int setup_arg_pages(struct linux_binprm *bprm, +@@ -683,10 +706,6 @@ int setup_arg_pages(struct linux_binprm *bprm, stack_top = arch_align_stack(stack_top); stack_top = PAGE_ALIGN(stack_top); @@ -41626,7 +41649,7 @@ index 3625464..7949233 100644 stack_shift = vma->vm_end - stack_top; bprm->p -= stack_shift; -@@ -698,8 +706,28 @@ int setup_arg_pages(struct linux_binprm *bprm, +@@ -698,8 +717,28 @@ int setup_arg_pages(struct linux_binprm *bprm, bprm->exec -= stack_shift; down_write(&mm->mmap_sem); @@ -41655,7 +41678,7 @@ index 3625464..7949233 100644 /* * Adjust stack execute permissions; explicitly enable for * EXSTACK_ENABLE_X, disable for EXSTACK_DISABLE_X and leave alone -@@ -718,13 +746,6 @@ int setup_arg_pages(struct linux_binprm *bprm, +@@ -718,13 +757,6 @@ int setup_arg_pages(struct linux_binprm *bprm, goto out_unlock; BUG_ON(prev != vma); @@ -41669,7 +41692,7 @@ index 3625464..7949233 100644 /* mprotect_fixup is overkill to remove the temporary stack flags */ vma->vm_flags &= ~VM_STACK_INCOMPLETE_SETUP; -@@ -805,7 +826,7 @@ int kernel_read(struct file *file, loff_t offset, +@@ -805,7 +837,7 @@ int kernel_read(struct file *file, loff_t offset, old_fs = get_fs(); set_fs(get_ds()); /* The cast to a user pointer is valid due to the set_fs() */ @@ -41678,7 +41701,7 @@ index 3625464..7949233 100644 set_fs(old_fs); return result; } -@@ -1067,6 +1088,21 @@ void set_task_comm(struct task_struct *tsk, char *buf) +@@ -1067,6 +1099,21 @@ void set_task_comm(struct task_struct *tsk, char *buf) perf_event_comm(tsk); } @@ -41700,7 +41723,7 @@ index 3625464..7949233 100644 int flush_old_exec(struct linux_binprm * bprm) { int retval; -@@ -1081,6 +1117,7 @@ int flush_old_exec(struct linux_binprm * bprm) +@@ -1081,6 +1128,7 @@ int flush_old_exec(struct linux_binprm * bprm) set_mm_exe_file(bprm->mm, bprm->file); @@ -41708,7 +41731,7 @@ index 3625464..7949233 100644 /* * Release all of the old mmap stuff */ -@@ -1112,10 +1149,6 @@ EXPORT_SYMBOL(would_dump); +@@ -1112,10 +1160,6 @@ EXPORT_SYMBOL(would_dump); void setup_new_exec(struct linux_binprm * bprm) { @@ -41719,7 +41742,7 @@ index 3625464..7949233 100644 arch_pick_mmap_layout(current->mm); /* This is the point of no return */ -@@ -1126,18 +1159,7 @@ void setup_new_exec(struct linux_binprm * bprm) +@@ -1126,18 +1170,7 @@ void setup_new_exec(struct linux_binprm * bprm) else set_dumpable(current->mm, suid_dumpable); @@ -41739,7 +41762,7 @@ index 3625464..7949233 100644 /* Set the new mm task size. We have to do that late because it may * depend on TIF_32BIT which is only updated in flush_thread() on -@@ -1247,7 +1269,7 @@ int check_unsafe_exec(struct linux_binprm *bprm) +@@ -1247,7 +1280,7 @@ int check_unsafe_exec(struct linux_binprm *bprm) } rcu_read_unlock(); @@ -41748,7 +41771,7 @@ index 3625464..7949233 100644 bprm->unsafe |= LSM_UNSAFE_SHARE; } else { res = -EAGAIN; -@@ -1442,6 +1464,10 @@ int search_binary_handler(struct linux_binprm *bprm,struct pt_regs *regs) +@@ -1442,6 +1475,10 @@ int search_binary_handler(struct linux_binprm *bprm,struct pt_regs *regs) EXPORT_SYMBOL(search_binary_handler); @@ -41759,7 +41782,7 @@ index 3625464..7949233 100644 /* * sys_execve() executes a new program. */ -@@ -1450,6 +1476,11 @@ static int do_execve_common(const char *filename, +@@ -1450,6 +1487,11 @@ static int do_execve_common(const char *filename, struct user_arg_ptr envp, struct pt_regs *regs) { @@ -41771,7 +41794,7 @@ index 3625464..7949233 100644 struct linux_binprm *bprm; struct file *file; struct files_struct *displaced; -@@ -1457,6 +1488,8 @@ static int do_execve_common(const char *filename, +@@ -1457,6 +1499,8 @@ static int do_execve_common(const char *filename, int retval; const struct cred *cred = current_cred(); @@ -41780,7 +41803,7 @@ index 3625464..7949233 100644 /* * We move the actual failure in case of RLIMIT_NPROC excess from * set*uid() to execve() because too many poorly written programs -@@ -1497,12 +1530,27 @@ static int do_execve_common(const char *filename, +@@ -1497,12 +1541,27 @@ static int do_execve_common(const char *filename, if (IS_ERR(file)) goto out_unmark; @@ -41808,7 +41831,7 @@ index 3625464..7949233 100644 retval = bprm_mm_init(bprm); if (retval) goto out_file; -@@ -1532,11 +1580,46 @@ static int do_execve_common(const char *filename, +@@ -1532,11 +1591,46 @@ static int do_execve_common(const char *filename, if (retval < 0) goto out; @@ -41856,7 +41879,7 @@ index 3625464..7949233 100644 current->fs->in_exec = 0; current->in_execve = 0; acct_update_integrals(current); -@@ -1545,6 +1628,14 @@ static int do_execve_common(const char *filename, +@@ -1545,6 +1639,14 @@ static int do_execve_common(const char *filename, put_files_struct(displaced); return retval; @@ -41871,7 +41894,7 @@ index 3625464..7949233 100644 out: if (bprm->mm) { acct_arg_size(bprm, 0); -@@ -1618,7 +1709,7 @@ static int expand_corename(struct core_name *cn) +@@ -1618,7 +1720,7 @@ static int expand_corename(struct core_name *cn) { char *old_corename = cn->corename; @@ -41880,7 +41903,7 @@ index 3625464..7949233 100644 cn->corename = krealloc(old_corename, cn->size, GFP_KERNEL); if (!cn->corename) { -@@ -1715,7 +1806,7 @@ static int format_corename(struct core_name *cn, long signr) +@@ -1715,7 +1817,7 @@ static int format_corename(struct core_name *cn, long signr) int pid_in_pattern = 0; int err = 0; @@ -41889,7 +41912,7 @@ index 3625464..7949233 100644 cn->corename = kmalloc(cn->size, GFP_KERNEL); cn->used = 0; -@@ -1812,6 +1903,218 @@ out: +@@ -1812,6 +1914,218 @@ out: return ispipe; } @@ -42108,7 +42131,7 @@ index 3625464..7949233 100644 static int zap_process(struct task_struct *start, int exit_code) { struct task_struct *t; -@@ -2023,17 +2326,17 @@ static void wait_for_dump_helpers(struct file *file) +@@ -2023,17 +2337,17 @@ static void wait_for_dump_helpers(struct file *file) pipe = file->f_path.dentry->d_inode->i_pipe; pipe_lock(pipe); @@ -42131,7 +42154,7 @@ index 3625464..7949233 100644 pipe_unlock(pipe); } -@@ -2094,7 +2397,7 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs) +@@ -2094,7 +2408,7 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs) int retval = 0; int flag = 0; int ispipe; @@ -42140,7 +42163,7 @@ index 3625464..7949233 100644 struct coredump_params cprm = { .signr = signr, .regs = regs, -@@ -2109,6 +2412,9 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs) +@@ -2109,6 +2423,9 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs) audit_core_dumps(signr); @@ -42150,7 +42173,7 @@ index 3625464..7949233 100644 binfmt = mm->binfmt; if (!binfmt || !binfmt->core_dump) goto fail; -@@ -2176,7 +2482,7 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs) +@@ -2176,7 +2493,7 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs) } cprm.limit = RLIM_INFINITY; @@ -42159,7 +42182,7 @@ index 3625464..7949233 100644 if (core_pipe_limit && (core_pipe_limit < dump_count)) { printk(KERN_WARNING "Pid %d(%s) over core_pipe_limit\n", task_tgid_vnr(current), current->comm); -@@ -2203,6 +2509,8 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs) +@@ -2203,6 +2520,8 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs) } else { struct inode *inode; @@ -42168,7 +42191,7 @@ index 3625464..7949233 100644 if (cprm.limit < binfmt->min_coredump) goto fail_unlock; -@@ -2246,7 +2554,7 @@ close_fail: +@@ -2246,7 +2565,7 @@ close_fail: filp_close(cprm.file, NULL); fail_dropcount: if (ispipe) @@ -42177,7 +42200,7 @@ index 3625464..7949233 100644 fail_unlock: kfree(cn.corename); fail_corename: -@@ -2265,7 +2573,7 @@ fail: +@@ -2265,7 +2584,7 @@ fail: */ int dump_write(struct file *file, const void *addr, int nr) { @@ -46563,10 +46586,18 @@ index d33418f..2a5345e 100644 return -EINVAL; diff --git a/fs/seq_file.c b/fs/seq_file.c -index dba43c3..1dfaf14 100644 +index dba43c3..9fb8511 100644 --- a/fs/seq_file.c +++ b/fs/seq_file.c -@@ -40,6 +40,9 @@ int seq_open(struct file *file, const struct seq_operations *op) +@@ -9,6 +9,7 @@ + #include <linux/module.h> + #include <linux/seq_file.h> + #include <linux/slab.h> ++#include <linux/sched.h> + + #include <asm/uaccess.h> + #include <asm/page.h> +@@ -40,6 +41,9 @@ int seq_open(struct file *file, const struct seq_operations *op) memset(p, 0, sizeof(*p)); mutex_init(&p->lock); p->op = op; @@ -46576,7 +46607,7 @@ index dba43c3..1dfaf14 100644 /* * Wrappers around seq_open(e.g. swaps_open) need to be -@@ -76,7 +79,8 @@ static int traverse(struct seq_file *m, loff_t offset) +@@ -76,7 +80,8 @@ static int traverse(struct seq_file *m, loff_t offset) return 0; } if (!m->buf) { @@ -46586,7 +46617,7 @@ index dba43c3..1dfaf14 100644 if (!m->buf) return -ENOMEM; } -@@ -116,7 +120,8 @@ static int traverse(struct seq_file *m, loff_t offset) +@@ -116,7 +121,8 @@ static int traverse(struct seq_file *m, loff_t offset) Eoverflow: m->op->stop(m, p); kfree(m->buf); @@ -46596,7 +46627,7 @@ index dba43c3..1dfaf14 100644 return !m->buf ? -ENOMEM : -EAGAIN; } -@@ -169,7 +174,8 @@ ssize_t seq_read(struct file *file, char __user *buf, size_t size, loff_t *ppos) +@@ -169,7 +175,8 @@ ssize_t seq_read(struct file *file, char __user *buf, size_t size, loff_t *ppos) m->version = file->f_version; /* grab buffer if we didn't have one */ if (!m->buf) { @@ -46606,7 +46637,7 @@ index dba43c3..1dfaf14 100644 if (!m->buf) goto Enomem; } -@@ -210,7 +216,8 @@ ssize_t seq_read(struct file *file, char __user *buf, size_t size, loff_t *ppos) +@@ -210,7 +217,8 @@ ssize_t seq_read(struct file *file, char __user *buf, size_t size, loff_t *ppos) goto Fill; m->op->stop(m, p); kfree(m->buf); @@ -46616,7 +46647,7 @@ index dba43c3..1dfaf14 100644 if (!m->buf) goto Enomem; m->count = 0; -@@ -549,7 +556,7 @@ static void single_stop(struct seq_file *p, void *v) +@@ -549,7 +557,7 @@ static void single_stop(struct seq_file *p, void *v) int single_open(struct file *file, int (*show)(struct seq_file *, void *), void *data) { @@ -47033,10 +47064,10 @@ index 23ce927..e274cc1 100644 kfree(s); diff --git a/grsecurity/Kconfig b/grsecurity/Kconfig new file mode 100644 -index 0000000..8faa28b +index 0000000..41df561 --- /dev/null +++ b/grsecurity/Kconfig -@@ -0,0 +1,1073 @@ +@@ -0,0 +1,1075 @@ +# +# grecurity configuration +# @@ -47243,7 +47274,7 @@ index 0000000..8faa28b + +endchoice + -+menu "Address Space Protection" ++menu "Memory Protections" +depends on GRKERNSEC + +config GRKERNSEC_KMEM @@ -47300,7 +47331,7 @@ index 0000000..8faa28b + protect your kernel against modification, use the RBAC system. + +config GRKERNSEC_PROC_MEMMAP -+ bool "Remove addresses from /proc/<pid>/[smaps|maps|stat]" ++ bool "Harden ASLR against information leaks and entropy reduction" + default y if (PAX_NOEXEC || PAX_ASLR) + depends on PAX_NOEXEC || PAX_ASLR + help @@ -47311,9 +47342,11 @@ index 0000000..8faa28b + dangerous sources of information, this option causes reads of sensitive + /proc/<pid> entries where the file descriptor was opened in a different + task than the one performing the read. Such attempts are logged. -+ If you use PaX it is greatly recommended that you say Y here as it -+ closes up a hole that makes the full ASLR useless for suid -+ binaries. ++ Finally, this option limits argv/env strings for suid/sgid binaries ++ to 1MB to prevent a complete exhaustion of the stack entropy provided ++ by ASLR. ++ If you use PaX it is essential that you say Y here as it closes up ++ several holes that make full ASLR useless for suid/sgid binaries. + +config GRKERNSEC_BRUTE + bool "Deter exploit bruteforcing" @@ -48156,7 +48189,7 @@ index 0000000..1b9afa9 +endif diff --git a/grsecurity/gracl.c b/grsecurity/gracl.c new file mode 100644 -index 0000000..6e989da +index 0000000..cf294ac --- /dev/null +++ b/grsecurity/gracl.c @@ -0,0 +1,4163 @@ @@ -50653,8 +50686,8 @@ index 0000000..6e989da + + /* don't change the role if we're not a privileged process */ + if (role && task->role != role && -+ (((role->roletype & GR_ROLE_USER) && gr_acl_is_capable(CAP_SETUID)) || -+ ((role->roletype & GR_ROLE_GROUP) && gr_acl_is_capable(CAP_SETGID)))) ++ (((role->roletype & GR_ROLE_USER) && !gr_acl_is_capable(CAP_SETUID)) || ++ ((role->roletype & GR_ROLE_GROUP) && !gr_acl_is_capable(CAP_SETGID)))) + return; + + /* perform subject lookup in possibly new role @@ -60374,7 +60407,7 @@ index 2148b12..519b820 100644 static inline void anon_vma_merge(struct vm_area_struct *vma, diff --git a/include/linux/sched.h b/include/linux/sched.h -index 1c4f3e9..dafcd27 100644 +index 1c4f3e9..b4e4851 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -101,6 +101,7 @@ struct bio_list; @@ -60491,7 +60524,7 @@ index 1c4f3e9..dafcd27 100644 +#ifdef CONFIG_GRKERNSEC + /* grsecurity */ +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP -+ long long exec_id; ++ u64 exec_id; +#endif +#ifdef CONFIG_GRKERNSEC_SETXID + const struct cred *delayed_cred; @@ -60650,7 +60683,7 @@ index e8c619d..e0cbd1c 100644 /* Maximum number of letters for an LSM name string */ diff --git a/include/linux/seq_file.h b/include/linux/seq_file.h -index 0b69a46..4796016 100644 +index 0b69a46..b2ffa4c 100644 --- a/include/linux/seq_file.h +++ b/include/linux/seq_file.h @@ -24,6 +24,9 @@ struct seq_file { @@ -60658,7 +60691,7 @@ index 0b69a46..4796016 100644 const struct seq_operations *op; int poll_event; +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP -+ long long exec_id; ++ u64 exec_id; +#endif void *private; }; @@ -65836,36 +65869,6 @@ index 9feffa4..54058df 100644 rdp->dynticks->dynticks_nesting, rdp->dynticks->dynticks_nmi_nesting, rdp->dynticks_fqs); -diff --git a/kernel/relay.c b/kernel/relay.c -index 226fade..b6f803a 100644 ---- a/kernel/relay.c -+++ b/kernel/relay.c -@@ -164,10 +164,14 @@ depopulate: - */ - static struct rchan_buf *relay_create_buf(struct rchan *chan) - { -- struct rchan_buf *buf = kzalloc(sizeof(struct rchan_buf), GFP_KERNEL); -+ struct rchan_buf *buf; -+ -+ if (chan->n_subbufs > UINT_MAX / sizeof(size_t *)) -+ return NULL; -+ -+ buf = kzalloc(sizeof(struct rchan_buf), GFP_KERNEL); - if (!buf) - return NULL; -- - buf->padding = kmalloc(chan->n_subbufs * sizeof(size_t *), GFP_KERNEL); - if (!buf->padding) - goto free_buf; -@@ -574,6 +578,8 @@ struct rchan *relay_open(const char *base_filename, - - if (!(subbuf_size && n_subbufs)) - return NULL; -+ if (subbuf_size > UINT_MAX / n_subbufs) -+ return NULL; - - chan = kzalloc(sizeof(struct rchan), GFP_KERNEL); - if (!chan) diff --git a/kernel/resource.c b/kernel/resource.c index 7640b3a..5879283 100644 --- a/kernel/resource.c @@ -68676,7 +68679,7 @@ index 4f4f53b..9511904 100644 capable(CAP_IPC_LOCK)) ret = do_mlockall(flags); diff --git a/mm/mmap.c b/mm/mmap.c -index eae90af..51ca80b 100644 +index eae90af..44552cf 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -46,6 +46,16 @@ @@ -69301,20 +69304,60 @@ index eae90af..51ca80b 100644 } unsigned long -@@ -1638,6 +1864,28 @@ out: - return prev ? prev->vm_next : vma; - } +@@ -1603,40 +1829,42 @@ struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr) + + EXPORT_SYMBOL(find_vma); +-/* Same as find_vma, but also return a pointer to the previous VMA in *pprev. */ ++/* ++ * Same as find_vma, but also return a pointer to the previous VMA in *pprev. ++ * Note: pprev is set to NULL when return value is NULL. ++ */ + struct vm_area_struct * + find_vma_prev(struct mm_struct *mm, unsigned long addr, + struct vm_area_struct **pprev) + { +- struct vm_area_struct *vma = NULL, *prev = NULL; +- struct rb_node *rb_node; +- if (!mm) +- goto out; ++ struct vm_area_struct *vma; + +- /* Guard against addr being lower than the first VMA */ +- vma = mm->mmap; ++ vma = find_vma(mm, addr); ++ *pprev = vma ? vma->vm_prev : NULL; ++ return vma; ++} + +- /* Go through the RB tree quickly. */ +- rb_node = mm->mm_rb.rb_node; +#ifdef CONFIG_PAX_SEGMEXEC +struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma) +{ + struct vm_area_struct *vma_m; -+ + +- while (rb_node) { +- struct vm_area_struct *vma_tmp; +- vma_tmp = rb_entry(rb_node, struct vm_area_struct, vm_rb); +- +- if (addr < vma_tmp->vm_end) { +- rb_node = rb_node->rb_left; +- } else { +- prev = vma_tmp; +- if (!prev->vm_next || (addr < prev->vm_next->vm_end)) +- break; +- rb_node = rb_node->rb_right; +- } + BUG_ON(!vma || vma->vm_start >= vma->vm_end); + if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC)) { + BUG_ON(vma->vm_mirror); + return NULL; -+ } + } +- +-out: +- *pprev = prev; +- return prev ? prev->vm_next : vma; + BUG_ON(vma->vm_start < SEGMEXEC_TASK_SIZE && SEGMEXEC_TASK_SIZE < vma->vm_end); + vma_m = vma->vm_mirror; + BUG_ON(!vma_m || vma_m->vm_mirror != vma); @@ -69324,13 +69367,12 @@ index eae90af..51ca80b 100644 + BUG_ON(vma->anon_vma != vma_m->anon_vma && vma->anon_vma->root != vma_m->anon_vma->root); + BUG_ON((vma->vm_flags ^ vma_m->vm_flags) & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED | VM_RESERVED)); + return vma_m; -+} + } +#endif -+ + /* * Verify that the stack growth is acceptable and - * update accounting. This is shared with both the -@@ -1654,6 +1902,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns +@@ -1654,6 +1882,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns return -ENOMEM; /* Stack limit test */ @@ -69338,7 +69380,7 @@ index eae90af..51ca80b 100644 if (size > ACCESS_ONCE(rlim[RLIMIT_STACK].rlim_cur)) return -ENOMEM; -@@ -1664,6 +1913,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns +@@ -1664,6 +1893,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns locked = mm->locked_vm + grow; limit = ACCESS_ONCE(rlim[RLIMIT_MEMLOCK].rlim_cur); limit >>= PAGE_SHIFT; @@ -69346,7 +69388,7 @@ index eae90af..51ca80b 100644 if (locked > limit && !capable(CAP_IPC_LOCK)) return -ENOMEM; } -@@ -1694,37 +1944,48 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns +@@ -1694,37 +1924,48 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns * PA-RISC uses this for its stack; IA64 for its Register Backing Store. * vma is the last one with address > vma->vm_end. Have to extend vma. */ @@ -69404,7 +69446,7 @@ index eae90af..51ca80b 100644 unsigned long size, grow; size = address - vma->vm_start; -@@ -1739,6 +2000,8 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address) +@@ -1739,6 +1980,8 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address) } } } @@ -69413,7 +69455,7 @@ index eae90af..51ca80b 100644 vma_unlock_anon_vma(vma); khugepaged_enter_vma_merge(vma); return error; -@@ -1752,6 +2015,8 @@ int expand_downwards(struct vm_area_struct *vma, +@@ -1752,6 +1995,8 @@ int expand_downwards(struct vm_area_struct *vma, unsigned long address) { int error; @@ -69422,7 +69464,7 @@ index eae90af..51ca80b 100644 /* * We must make sure the anon_vma is allocated -@@ -1765,6 +2030,15 @@ int expand_downwards(struct vm_area_struct *vma, +@@ -1765,6 +2010,15 @@ int expand_downwards(struct vm_area_struct *vma, if (error) return error; @@ -69438,7 +69480,7 @@ index eae90af..51ca80b 100644 vma_lock_anon_vma(vma); /* -@@ -1774,9 +2048,17 @@ int expand_downwards(struct vm_area_struct *vma, +@@ -1774,9 +2028,17 @@ int expand_downwards(struct vm_area_struct *vma, */ /* Somebody else might have raced and expanded it already */ @@ -69457,7 +69499,7 @@ index eae90af..51ca80b 100644 size = vma->vm_end - address; grow = (vma->vm_start - address) >> PAGE_SHIFT; -@@ -1786,11 +2068,22 @@ int expand_downwards(struct vm_area_struct *vma, +@@ -1786,11 +2048,22 @@ int expand_downwards(struct vm_area_struct *vma, if (!error) { vma->vm_start = address; vma->vm_pgoff -= grow; @@ -69480,7 +69522,7 @@ index eae90af..51ca80b 100644 khugepaged_enter_vma_merge(vma); return error; } -@@ -1860,6 +2153,13 @@ static void remove_vma_list(struct mm_struct *mm, struct vm_area_struct *vma) +@@ -1860,6 +2133,13 @@ static void remove_vma_list(struct mm_struct *mm, struct vm_area_struct *vma) do { long nrpages = vma_pages(vma); @@ -69494,7 +69536,7 @@ index eae90af..51ca80b 100644 mm->total_vm -= nrpages; vm_stat_account(mm, vma->vm_flags, vma->vm_file, -nrpages); vma = remove_vma(vma); -@@ -1905,6 +2205,16 @@ detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma, +@@ -1905,6 +2185,16 @@ detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma, insertion_point = (prev ? &prev->vm_next : &mm->mmap); vma->vm_prev = NULL; do { @@ -69511,7 +69553,7 @@ index eae90af..51ca80b 100644 rb_erase(&vma->vm_rb, &mm->mm_rb); mm->map_count--; tail_vma = vma; -@@ -1933,14 +2243,33 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma, +@@ -1933,14 +2223,33 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma, struct vm_area_struct *new; int err = -ENOMEM; @@ -69545,7 +69587,7 @@ index eae90af..51ca80b 100644 /* most fields are the same, copy all, and then fixup */ *new = *vma; -@@ -1953,6 +2282,22 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma, +@@ -1953,6 +2262,22 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma, new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT); } @@ -69568,7 +69610,7 @@ index eae90af..51ca80b 100644 pol = mpol_dup(vma_policy(vma)); if (IS_ERR(pol)) { err = PTR_ERR(pol); -@@ -1978,6 +2323,42 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma, +@@ -1978,6 +2303,42 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma, else err = vma_adjust(vma, vma->vm_start, addr, vma->vm_pgoff, new); @@ -69611,7 +69653,7 @@ index eae90af..51ca80b 100644 /* Success. */ if (!err) return 0; -@@ -1990,10 +2371,18 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma, +@@ -1990,10 +2351,18 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma, removed_exe_file_vma(mm); fput(new->vm_file); } @@ -69631,7 +69673,7 @@ index eae90af..51ca80b 100644 kmem_cache_free(vm_area_cachep, new); out_err: return err; -@@ -2006,6 +2395,15 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma, +@@ -2006,6 +2375,15 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma, int split_vma(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long addr, int new_below) { @@ -69647,7 +69689,7 @@ index eae90af..51ca80b 100644 if (mm->map_count >= sysctl_max_map_count) return -ENOMEM; -@@ -2017,11 +2415,30 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma, +@@ -2017,11 +2395,30 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma, * work. This now handles partial unmappings. * Jeremy Fitzhardinge <jeremy@goop.org> */ @@ -69678,7 +69720,7 @@ index eae90af..51ca80b 100644 if ((start & ~PAGE_MASK) || start > TASK_SIZE || len > TASK_SIZE-start) return -EINVAL; -@@ -2096,6 +2513,8 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len) +@@ -2096,6 +2493,8 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len) /* Fix up all other VM information */ remove_vma_list(mm, vma); @@ -69687,7 +69729,7 @@ index eae90af..51ca80b 100644 return 0; } -@@ -2108,22 +2527,18 @@ SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len) +@@ -2108,22 +2507,18 @@ SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len) profile_munmap(addr); @@ -69716,7 +69758,7 @@ index eae90af..51ca80b 100644 /* * this is really a simplified "do_mmap". it only handles * anonymous maps. eventually we may be able to do some -@@ -2137,6 +2552,7 @@ unsigned long do_brk(unsigned long addr, unsigned long len) +@@ -2137,6 +2532,7 @@ unsigned long do_brk(unsigned long addr, unsigned long len) struct rb_node ** rb_link, * rb_parent; pgoff_t pgoff = addr >> PAGE_SHIFT; int error; @@ -69724,7 +69766,7 @@ index eae90af..51ca80b 100644 len = PAGE_ALIGN(len); if (!len) -@@ -2148,16 +2564,30 @@ unsigned long do_brk(unsigned long addr, unsigned long len) +@@ -2148,16 +2544,30 @@ unsigned long do_brk(unsigned long addr, unsigned long len) flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags; @@ -69756,7 +69798,7 @@ index eae90af..51ca80b 100644 locked += mm->locked_vm; lock_limit = rlimit(RLIMIT_MEMLOCK); lock_limit >>= PAGE_SHIFT; -@@ -2174,22 +2604,22 @@ unsigned long do_brk(unsigned long addr, unsigned long len) +@@ -2174,22 +2584,22 @@ unsigned long do_brk(unsigned long addr, unsigned long len) /* * Clear old maps. this also does some error checking for us */ @@ -69783,7 +69825,7 @@ index eae90af..51ca80b 100644 return -ENOMEM; /* Can we just expand an old private anonymous mapping? */ -@@ -2203,7 +2633,7 @@ unsigned long do_brk(unsigned long addr, unsigned long len) +@@ -2203,7 +2613,7 @@ unsigned long do_brk(unsigned long addr, unsigned long len) */ vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL); if (!vma) { @@ -69792,7 +69834,7 @@ index eae90af..51ca80b 100644 return -ENOMEM; } -@@ -2217,11 +2647,12 @@ unsigned long do_brk(unsigned long addr, unsigned long len) +@@ -2217,11 +2627,12 @@ unsigned long do_brk(unsigned long addr, unsigned long len) vma_link(mm, vma, prev, rb_link, rb_parent); out: perf_event_mmap(vma); @@ -69807,7 +69849,7 @@ index eae90af..51ca80b 100644 return addr; } -@@ -2268,8 +2699,10 @@ void exit_mmap(struct mm_struct *mm) +@@ -2268,8 +2679,10 @@ void exit_mmap(struct mm_struct *mm) * Walk the list again, actually closing and freeing it, * with preemption enabled, without holding any MM locks. */ @@ -69819,7 +69861,7 @@ index eae90af..51ca80b 100644 BUG_ON(mm->nr_ptes > (FIRST_USER_ADDRESS+PMD_SIZE-1)>>PMD_SHIFT); } -@@ -2283,6 +2716,13 @@ int insert_vm_struct(struct mm_struct * mm, struct vm_area_struct * vma) +@@ -2283,6 +2696,13 @@ int insert_vm_struct(struct mm_struct * mm, struct vm_area_struct * vma) struct vm_area_struct * __vma, * prev; struct rb_node ** rb_link, * rb_parent; @@ -69833,7 +69875,7 @@ index eae90af..51ca80b 100644 /* * The vm_pgoff of a purely anonymous vma should be irrelevant * until its first write fault, when page's anon_vma and index -@@ -2305,7 +2745,22 @@ int insert_vm_struct(struct mm_struct * mm, struct vm_area_struct * vma) +@@ -2305,7 +2725,22 @@ int insert_vm_struct(struct mm_struct * mm, struct vm_area_struct * vma) if ((vma->vm_flags & VM_ACCOUNT) && security_vm_enough_memory_mm(mm, vma_pages(vma))) return -ENOMEM; @@ -69856,7 +69898,7 @@ index eae90af..51ca80b 100644 return 0; } -@@ -2323,6 +2778,8 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap, +@@ -2323,6 +2758,8 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap, struct rb_node **rb_link, *rb_parent; struct mempolicy *pol; @@ -69865,7 +69907,7 @@ index eae90af..51ca80b 100644 /* * If anonymous vma has not yet been faulted, update new pgoff * to match new location, to increase its chance of merging. -@@ -2373,6 +2830,39 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap, +@@ -2373,6 +2810,39 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap, return NULL; } @@ -69905,7 +69947,7 @@ index eae90af..51ca80b 100644 /* * Return true if the calling process may expand its vm space by the passed * number of pages -@@ -2383,7 +2873,7 @@ int may_expand_vm(struct mm_struct *mm, unsigned long npages) +@@ -2383,7 +2853,7 @@ int may_expand_vm(struct mm_struct *mm, unsigned long npages) unsigned long lim; lim = rlimit(RLIMIT_AS) >> PAGE_SHIFT; @@ -69914,7 +69956,7 @@ index eae90af..51ca80b 100644 if (cur + npages > lim) return 0; return 1; -@@ -2454,6 +2944,22 @@ int install_special_mapping(struct mm_struct *mm, +@@ -2454,6 +2924,22 @@ int install_special_mapping(struct mm_struct *mm, vma->vm_start = addr; vma->vm_end = addr + len; @@ -78181,10 +78223,10 @@ index 0000000..a5eabce +} diff --git a/tools/gcc/kernexec_plugin.c b/tools/gcc/kernexec_plugin.c new file mode 100644 -index 0000000..51f747e +index 0000000..008f159 --- /dev/null +++ b/tools/gcc/kernexec_plugin.c -@@ -0,0 +1,348 @@ +@@ -0,0 +1,427 @@ +/* + * Copyright 2011 by the PaX Team <pageexec@freemail.hu> + * Licensed under the GPL v2 @@ -78232,13 +78274,32 @@ index 0000000..51f747e + .help = "method=[bts|or]\tinstrumentation method\n" +}; + ++static unsigned int execute_kernexec_reload(void); +static unsigned int execute_kernexec_fptr(void); +static unsigned int execute_kernexec_retaddr(void); +static bool kernexec_cmodel_check(void); + -+static void (*kernexec_instrument_fptr)(gimple_stmt_iterator); ++static void (*kernexec_instrument_fptr)(gimple_stmt_iterator *); +static void (*kernexec_instrument_retaddr)(rtx); + ++static struct gimple_opt_pass kernexec_reload_pass = { ++ .pass = { ++ .type = GIMPLE_PASS, ++ .name = "kernexec_reload", ++ .gate = kernexec_cmodel_check, ++ .execute = execute_kernexec_reload, ++ .sub = NULL, ++ .next = NULL, ++ .static_pass_number = 0, ++ .tv_id = TV_NONE, ++ .properties_required = 0, ++ .properties_provided = 0, ++ .properties_destroyed = 0, ++ .todo_flags_start = 0, ++ .todo_flags_finish = TODO_verify_ssa | TODO_verify_stmts | TODO_dump_func | TODO_remove_unused_locals | TODO_update_ssa_no_phi ++ } ++}; ++ +static struct gimple_opt_pass kernexec_fptr_pass = { + .pass = { + .type = GIMPLE_PASS, @@ -78294,15 +78355,66 @@ index 0000000..51f747e +} + +/* ++ * add special KERNEXEC instrumentation: reload %r10 after it has been clobbered ++ */ ++static void kernexec_reload_fptr_mask(gimple_stmt_iterator *gsi) ++{ ++ gimple asm_movabs_stmt; ++ ++ // build asm volatile("movabs $0x8000000000000000, %%r10\n\t" : : : ); ++ asm_movabs_stmt = gimple_build_asm_vec("movabs $0x8000000000000000, %%r10\n\t", NULL, NULL, NULL, NULL); ++ gimple_asm_set_volatile(asm_movabs_stmt, true); ++ gsi_insert_after(gsi, asm_movabs_stmt, GSI_CONTINUE_LINKING); ++ update_stmt(asm_movabs_stmt); ++} ++ ++/* ++ * find all asm() stmts that clobber r10 and add a reload of r10 ++ */ ++static unsigned int execute_kernexec_reload(void) ++{ ++ basic_block bb; ++ ++ // 1. loop through BBs and GIMPLE statements ++ FOR_EACH_BB(bb) { ++ gimple_stmt_iterator gsi; ++ ++ for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) { ++ // gimple match: __asm__ ("" : : : "r10"); ++ gimple asm_stmt; ++ size_t nclobbers; ++ ++ // is it an asm ... ++ asm_stmt = gsi_stmt(gsi); ++ if (gimple_code(asm_stmt) != GIMPLE_ASM) ++ continue; ++ ++ // ... clobbering r10 ++ nclobbers = gimple_asm_nclobbers(asm_stmt); ++ while (nclobbers--) { ++ tree op = gimple_asm_clobber_op(asm_stmt, nclobbers); ++ if (strcmp(TREE_STRING_POINTER(TREE_VALUE(op)), "r10")) ++ continue; ++ kernexec_reload_fptr_mask(&gsi); ++//print_gimple_stmt(stderr, asm_stmt, 0, TDF_LINENO); ++ break; ++ } ++ } ++ } ++ ++ return 0; ++} ++ ++/* + * add special KERNEXEC instrumentation: force MSB of fptr to 1, which will produce + * a non-canonical address from a userland ptr and will just trigger a GPF on dereference + */ -+static void kernexec_instrument_fptr_bts(gimple_stmt_iterator gsi) ++static void kernexec_instrument_fptr_bts(gimple_stmt_iterator *gsi) +{ + gimple assign_intptr, assign_new_fptr, call_stmt; + tree intptr, old_fptr, new_fptr, kernexec_mask; + -+ call_stmt = gsi_stmt(gsi); ++ call_stmt = gsi_stmt(*gsi); + old_fptr = gimple_call_fn(call_stmt); + + // create temporary unsigned long variable used for bitops and cast fptr to it @@ -78310,14 +78422,14 @@ index 0000000..51f747e + add_referenced_var(intptr); + mark_sym_for_renaming(intptr); + assign_intptr = gimple_build_assign(intptr, fold_convert(long_unsigned_type_node, old_fptr)); -+ gsi_insert_before(&gsi, assign_intptr, GSI_SAME_STMT); ++ gsi_insert_before(gsi, assign_intptr, GSI_SAME_STMT); + update_stmt(assign_intptr); + + // apply logical or to temporary unsigned long and bitmask + kernexec_mask = build_int_cstu(long_long_unsigned_type_node, 0x8000000000000000LL); +// kernexec_mask = build_int_cstu(long_long_unsigned_type_node, 0xffffffff80000000LL); + assign_intptr = gimple_build_assign(intptr, fold_build2(BIT_IOR_EXPR, long_long_unsigned_type_node, intptr, kernexec_mask)); -+ gsi_insert_before(&gsi, assign_intptr, GSI_SAME_STMT); ++ gsi_insert_before(gsi, assign_intptr, GSI_SAME_STMT); + update_stmt(assign_intptr); + + // cast temporary unsigned long back to a temporary fptr variable @@ -78325,7 +78437,7 @@ index 0000000..51f747e + add_referenced_var(new_fptr); + mark_sym_for_renaming(new_fptr); + assign_new_fptr = gimple_build_assign(new_fptr, fold_convert(TREE_TYPE(old_fptr), intptr)); -+ gsi_insert_before(&gsi, assign_new_fptr, GSI_SAME_STMT); ++ gsi_insert_before(gsi, assign_new_fptr, GSI_SAME_STMT); + update_stmt(assign_new_fptr); + + // replace call stmt fn with the new fptr @@ -78333,14 +78445,14 @@ index 0000000..51f747e + update_stmt(call_stmt); +} + -+static void kernexec_instrument_fptr_or(gimple_stmt_iterator gsi) ++static void kernexec_instrument_fptr_or(gimple_stmt_iterator *gsi) +{ + gimple asm_or_stmt, call_stmt; + tree old_fptr, new_fptr, input, output; + VEC(tree, gc) *inputs = NULL; + VEC(tree, gc) *outputs = NULL; + -+ call_stmt = gsi_stmt(gsi); ++ call_stmt = gsi_stmt(*gsi); + old_fptr = gimple_call_fn(call_stmt); + + // create temporary fptr variable @@ -78357,7 +78469,7 @@ index 0000000..51f747e + VEC_safe_push(tree, gc, outputs, output); + asm_or_stmt = gimple_build_asm_vec("orq %%r10, %0\n\t", inputs, outputs, NULL, NULL); + gimple_asm_set_volatile(asm_or_stmt, true); -+ gsi_insert_before(&gsi, asm_or_stmt, GSI_SAME_STMT); ++ gsi_insert_before(gsi, asm_or_stmt, GSI_SAME_STMT); + update_stmt(asm_or_stmt); + + // replace call stmt fn with the new fptr @@ -78371,10 +78483,11 @@ index 0000000..51f747e +static unsigned int execute_kernexec_fptr(void) +{ + basic_block bb; -+ gimple_stmt_iterator gsi; + + // 1. loop through BBs and GIMPLE statements + FOR_EACH_BB(bb) { ++ gimple_stmt_iterator gsi; ++ + for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) { + // gimple match: h_1 = get_fptr (); D.2709_3 = h_1 (x_2(D)); + tree fn; @@ -78401,7 +78514,7 @@ index 0000000..51f747e + if (TREE_CODE(fn) != FUNCTION_TYPE) + continue; + -+ kernexec_instrument_fptr(gsi); ++ kernexec_instrument_fptr(&gsi); + +//debug_tree(gimple_call_fn(call_stmt)); +//print_gimple_stmt(stderr, call_stmt, 0, TDF_LINENO); @@ -78483,6 +78596,12 @@ index 0000000..51f747e + const int argc = plugin_info->argc; + const struct plugin_argument * const argv = plugin_info->argv; + int i; ++ struct register_pass_info kernexec_reload_pass_info = { ++ .pass = &kernexec_reload_pass.pass, ++ .reference_pass_name = "ssa", ++ .ref_pass_instance_number = 0, ++ .pos_op = PASS_POS_INSERT_AFTER ++ }; + struct register_pass_info kernexec_fptr_pass_info = { + .pass = &kernexec_fptr_pass.pass, + .reference_pass_name = "ssa", @@ -78528,6 +78647,8 @@ index 0000000..51f747e + if (!kernexec_instrument_fptr || !kernexec_instrument_retaddr) + error(G_("no instrumentation method was selected via '-fplugin-arg-%s-method'"), plugin_name); + ++ if (kernexec_instrument_fptr == kernexec_instrument_fptr_or) ++ register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &kernexec_reload_pass_info); + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &kernexec_fptr_pass_info); + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &kernexec_retaddr_pass_info); + @@ -78535,10 +78656,10 @@ index 0000000..51f747e +} diff --git a/tools/gcc/stackleak_plugin.c b/tools/gcc/stackleak_plugin.c new file mode 100644 -index 0000000..d44f37c +index 0000000..8b61031 --- /dev/null +++ b/tools/gcc/stackleak_plugin.c -@@ -0,0 +1,291 @@ +@@ -0,0 +1,295 @@ +/* + * Copyright 2011 by the PaX Team <pageexec@freemail.hu> + * Licensed under the GPL v2 @@ -78638,7 +78759,7 @@ index 0000000..d44f37c + return track_frame_size >= 0; +} + -+static void stackleak_check_alloca(gimple_stmt_iterator gsi) ++static void stackleak_check_alloca(gimple_stmt_iterator *gsi) +{ + gimple check_alloca; + tree fndecl, fntype, alloca_size; @@ -78647,12 +78768,12 @@ index 0000000..d44f37c + fntype = build_function_type_list(void_type_node, long_unsigned_type_node, NULL_TREE); + fndecl = build_fn_decl(check_function, fntype); + DECL_ASSEMBLER_NAME(fndecl); // for LTO -+ alloca_size = gimple_call_arg(gsi_stmt(gsi), 0); ++ alloca_size = gimple_call_arg(gsi_stmt(*gsi), 0); + check_alloca = gimple_build_call(fndecl, 1, alloca_size); -+ gsi_insert_before(&gsi, check_alloca, GSI_CONTINUE_LINKING); ++ gsi_insert_before(gsi, check_alloca, GSI_SAME_STMT); +} + -+static void stackleak_add_instrumentation(gimple_stmt_iterator gsi) ++static void stackleak_add_instrumentation(gimple_stmt_iterator *gsi) +{ + gimple track_stack; + tree fndecl, fntype; @@ -78662,7 +78783,7 @@ index 0000000..d44f37c + fndecl = build_fn_decl(track_function, fntype); + DECL_ASSEMBLER_NAME(fndecl); // for LTO + track_stack = gimple_build_call(fndecl, 0); -+ gsi_insert_after(&gsi, track_stack, GSI_CONTINUE_LINKING); ++ gsi_insert_after(gsi, track_stack, GSI_CONTINUE_LINKING); +} + +#if BUILDING_GCC_VERSION == 4005 @@ -78705,16 +78826,17 @@ index 0000000..d44f37c + // 1. loop through BBs and GIMPLE statements + FOR_EACH_BB(bb) { + gimple_stmt_iterator gsi; ++ + for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) { + // gimple match: align 8 built-in BUILT_IN_NORMAL:BUILT_IN_ALLOCA attributes <tree_list 0xb7576450> + if (!is_alloca(gsi_stmt(gsi))) + continue; + + // 2. insert stack overflow check before each __builtin_alloca call -+ stackleak_check_alloca(gsi); ++ stackleak_check_alloca(&gsi); + + // 3. insert track call after each __builtin_alloca call -+ stackleak_add_instrumentation(gsi); ++ stackleak_add_instrumentation(&gsi); + if (bb == entry_bb) + prologue_instrumented = true; + } @@ -78722,10 +78844,13 @@ index 0000000..d44f37c + + // 4. insert track call at the beginning + if (!prologue_instrumented) { ++ gimple_stmt_iterator gsi; ++ + bb = split_block_after_labels(ENTRY_BLOCK_PTR)->dest; + if (dom_info_available_p(CDI_DOMINATORS)) + set_immediate_dominator(CDI_DOMINATORS, bb, ENTRY_BLOCK_PTR); -+ stackleak_add_instrumentation(gsi_start_bb(bb)); ++ gsi = gsi_start_bb(bb); ++ stackleak_add_instrumentation(&gsi); + } + + return 0; diff --git a/main/linux-grsec/kernelconfig.x86 b/main/linux-grsec/kernelconfig.x86 index 38e8cd914b..d6a674883e 100644 --- a/main/linux-grsec/kernelconfig.x86 +++ b/main/linux-grsec/kernelconfig.x86 @@ -1,6 +1,6 @@ # # Automatically generated file; DO NOT EDIT. -# Linux/i386 3.2.2 Kernel Configuration +# Linux/i386 3.2.7 Kernel Configuration # # CONFIG_64BIT is not set CONFIG_X86_32=y @@ -4905,7 +4905,9 @@ CONFIG_CIFS_ACL=y # CONFIG_NCP_FS is not set # CONFIG_CODA_FS is not set # CONFIG_AFS_FS is not set -# CONFIG_9P_FS is not set +CONFIG_9P_FS=m +CONFIG_9P_FSCACHE=y +CONFIG_9P_FS_POSIX_ACL=y # # Partition Types diff --git a/main/linux-grsec/kernelconfig.x86_64 b/main/linux-grsec/kernelconfig.x86_64 index 045bfa90ef..21a9dcd891 100644 --- a/main/linux-grsec/kernelconfig.x86_64 +++ b/main/linux-grsec/kernelconfig.x86_64 @@ -1,6 +1,6 @@ # # Automatically generated file; DO NOT EDIT. -# Linux/x86_64 3.2.1 Kernel Configuration +# Linux/x86_64 3.2.7 Kernel Configuration # CONFIG_64BIT=y # CONFIG_X86_32 is not set @@ -4880,7 +4880,9 @@ CONFIG_CIFS_ACL=y # CONFIG_NCP_FS is not set # CONFIG_CODA_FS is not set # CONFIG_AFS_FS is not set -# CONFIG_9P_FS is not set +CONFIG_9P_FS=m +CONFIG_9P_FSCACHE=y +CONFIG_9P_FS_POSIX_ACL=y # # Partition Types |