diff options
author | Natanael Copa <ncopa@alpinelinux.org> | 2014-05-14 08:09:59 +0000 |
---|---|---|
committer | Natanael Copa <ncopa@alpinelinux.org> | 2014-05-14 08:19:35 +0000 |
commit | 1c48994c96119a286b3bae12b89a2b2f1286a816 (patch) | |
tree | 39cc0df520fc00ec27922bc214db53fedbf0e1da /main/linux-grsec | |
parent | fd99da9ad84491f7a9cbdfebe061f76ae43d66e5 (diff) | |
download | aports-1c48994c96119a286b3bae12b89a2b2f1286a816.tar.bz2 aports-1c48994c96119a286b3bae12b89a2b2f1286a816.tar.xz |
main/linux-grsec: upgrade to 3.14.4
Diffstat (limited to 'main/linux-grsec')
-rw-r--r-- | main/linux-grsec/APKBUILD | 16 | ||||
-rw-r--r-- | main/linux-grsec/grsecurity-3.0-3.14.4-201405131205.patch (renamed from main/linux-grsec/grsecurity-3.0-3.14.2-201405051841.patch) | 9476 |
2 files changed, 5229 insertions, 4263 deletions
diff --git a/main/linux-grsec/APKBUILD b/main/linux-grsec/APKBUILD index 9212d4363..c165a3930 100644 --- a/main/linux-grsec/APKBUILD +++ b/main/linux-grsec/APKBUILD @@ -2,7 +2,7 @@ _flavor=grsec pkgname=linux-${_flavor} -pkgver=3.14.2 +pkgver=3.14.4 case $pkgver in *.*.*) _kernver=${pkgver%.*};; *.*) _kernver=${pkgver};; @@ -17,7 +17,7 @@ _config=${config:-kernelconfig.${CARCH}} install= source="http://ftp.kernel.org/pub/linux/kernel/v3.x/linux-$_kernver.tar.xz http://ftp.kernel.org/pub/linux/kernel/v3.x/patch-$pkgver.xz - grsecurity-3.0-3.14.2-201405051841.patch + grsecurity-3.0-3.14.4-201405131205.patch fix-memory-map-for-PIE-applications.patch imx6q-no-unclocked-sleep.patch @@ -165,24 +165,24 @@ dev() { } md5sums="b621207b3f6ecbb67db18b13258f8ea8 linux-3.14.tar.xz -f2239bf772d1b6e1c26cb03f6e056959 patch-3.14.2.xz -886c74ae6a77a7c380f14226fc5f4058 grsecurity-3.0-3.14.2-201405051841.patch +116f27cf17c3522716b6678b17516067 patch-3.14.4.xz +6982685dd709f6fb9f256161c1af1af2 grsecurity-3.0-3.14.4-201405131205.patch c6a4ae7e8ca6159e1631545515805216 fix-memory-map-for-PIE-applications.patch 1a307fc1d63231bf01d22493a4f14378 imx6q-no-unclocked-sleep.patch 68efadc03ab0d14a4ac1549f77a07bb9 kernelconfig.x86 0765de8921029ddcead8a430a26fb1ee kernelconfig.x86_64 6da9d80a834790fa94f06d1ac3e9c6d5 kernelconfig.armhf" sha256sums="61558aa490855f42b6340d1a1596be47454909629327c49a5e4e10268065dffa linux-3.14.tar.xz -1c7a8fee1674bfa6354dc6e3038ca1f16bc1327b035cd4896b795b482c7d1829 patch-3.14.2.xz -97280d28d109de1e235411141df08f5cb499b0d4600f61478373c9506df3474d grsecurity-3.0-3.14.2-201405051841.patch +af640ea64e923d525a8238832e8452381e6dc76a3bf28046411cadd67c408114 patch-3.14.4.xz +d5a84246d0401cc9d79abde2cef6b83732eb0b5d3d745c015d25544e8cc1ad62 grsecurity-3.0-3.14.4-201405131205.patch 500f3577310be52e87b9fecdc2e9c4ca43210fd97d69089f9005d484563f74c7 fix-memory-map-for-PIE-applications.patch 21179fbb22a5b74af0a609350ae1a170e232908572b201d02e791d2ce0a685d3 imx6q-no-unclocked-sleep.patch 9081bc214794934170813ac4c88ec62db503c270a4f2a6c36a4bcdc695b32809 kernelconfig.x86 239513012483b28902951077c6aa4cea70be32e760c5a10cc9a3811fe5e92f06 kernelconfig.x86_64 0422a2a80edb9c6de1a97f4ac9a08ad03097d59970d18387ba499f714b402d67 kernelconfig.armhf" sha512sums="5730d83a7a81134c1e77c0bf89e42dee4f8251ad56c1ac2be20c59e26fdfaa7bea55f277e7af156b637f22e1584914a46089af85039177cb43485089c74ac26e linux-3.14.tar.xz -72132895aa04bd3e029a490cdf6f6363f672562517afafb6c7b982e2a56b989de3684b7a59df830757025b37bfdb458b26deea03ee4ec1f7e379b97934670988 patch-3.14.2.xz -bbfaaa3a7c90880e8c1146218a6c3ba4646e2753f269dc682a2b8e2934ff73a1407cadbe69c43c1848cee914cc56813f48a4a32e339666bf85ce229d7f25ff8e grsecurity-3.0-3.14.2-201405051841.patch +61eca26d57f7d7caa78d157582d4b98fbba1c85af73f1773fb51eab3db4381de53f4fbfbc202083e45297c0b4487bc58880a518e7ee9c0d616cddf0b3909b303 patch-3.14.4.xz +9b4ae932c145d0cd0b32c9bfc7c21a8ea25c29d3beed3c34b91fe5a3ec30a487f002f51d9b0c1bf5a64e0dd9baa0b19cce3ce1cfb3b236129e83dd8472f8fef7 grsecurity-3.0-3.14.4-201405131205.patch 4665c56ae1bbac311f9205d64918e84ee8b01d47d6e2396ff6b8adfb10aada7f7254531ce62e31edbb65c2a54a830f09ad05d314dfcd75d6272f4068945ad7c7 fix-memory-map-for-PIE-applications.patch 87d1ad59732f265a5b0db54490dc1762c14ea4b868e7eb1aedc3ce57b48046de7bbc08cf5cfcf6f1380fa84063b0edb16ba3d5e3c5670be9bbb229275c88b221 imx6q-no-unclocked-sleep.patch a4c3d9e2683e92354c38c0a2b63322b27fb6c1eaa57adea603b522e9b9eed21678f9fcf0afa3f8e9b089e85f31295a89b469c924b458defbd3540a086f3b9ba5 kernelconfig.x86 diff --git a/main/linux-grsec/grsecurity-3.0-3.14.2-201405051841.patch b/main/linux-grsec/grsecurity-3.0-3.14.4-201405131205.patch index ab4273c27..9a8e53a50 100644 --- a/main/linux-grsec/grsecurity-3.0-3.14.2-201405051841.patch +++ b/main/linux-grsec/grsecurity-3.0-3.14.4-201405131205.patch @@ -287,7 +287,7 @@ index 7116fda..d8ed6e8 100644 pcd. [PARIDE] diff --git a/Makefile b/Makefile -index b2f7de8..9e2b63f 100644 +index d7c07fd..d6d4bfa 100644 --- a/Makefile +++ b/Makefile @@ -244,8 +244,9 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH" ]; then echo $$BASH; \ @@ -352,7 +352,7 @@ index b2f7de8..9e2b63f 100644 +endif +COLORIZE_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/colorize_plugin.so +ifdef CONFIG_PAX_SIZE_OVERFLOW -+SIZE_OVERFLOW_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/size_overflow_plugin.so -DSIZE_OVERFLOW_PLUGIN ++SIZE_OVERFLOW_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/size_overflow_plugin/size_overflow_plugin.so -DSIZE_OVERFLOW_PLUGIN +endif +ifdef CONFIG_PAX_LATENT_ENTROPY +LATENT_ENTROPY_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/latent_entropy_plugin.so -DLATENT_ENTROPY_PLUGIN @@ -849,7 +849,7 @@ index 98838a0..b304fb4 100644 /* Allow reads even for write-only mappings */ if (!(vma->vm_flags & (VM_READ | VM_WRITE))) diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig -index 1594945..adf4001 100644 +index 44298ad..29a20c0 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -1862,7 +1862,7 @@ config ALIGNMENT_TRAP @@ -1703,10 +1703,10 @@ index de53547..52b9a28 100644 (unsigned long)(dest_buf) + (size)); \ \ diff --git a/arch/arm/include/asm/futex.h b/arch/arm/include/asm/futex.h -index e42cf59..7b94b8f 100644 +index 2aff798..099eb15 100644 --- a/arch/arm/include/asm/futex.h +++ b/arch/arm/include/asm/futex.h -@@ -50,6 +50,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, +@@ -45,6 +45,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))) return -EFAULT; @@ -1715,7 +1715,7 @@ index e42cf59..7b94b8f 100644 smp_mb(); __asm__ __volatile__("@futex_atomic_cmpxchg_inatomic\n" "1: ldrex %1, [%4]\n" -@@ -65,6 +67,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, +@@ -60,6 +62,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, : "cc", "memory"); smp_mb(); @@ -1724,7 +1724,7 @@ index e42cf59..7b94b8f 100644 *uval = val; return ret; } -@@ -95,6 +99,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, +@@ -90,6 +94,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))) return -EFAULT; @@ -1733,7 +1733,7 @@ index e42cf59..7b94b8f 100644 __asm__ __volatile__("@futex_atomic_cmpxchg_inatomic\n" "1: " TUSER(ldr) " %1, [%4]\n" " teq %1, %2\n" -@@ -105,6 +111,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, +@@ -100,6 +106,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, : "r" (oldval), "r" (newval), "r" (uaddr), "Ir" (-EFAULT) : "cc", "memory"); @@ -1742,7 +1742,7 @@ index e42cf59..7b94b8f 100644 *uval = val; return ret; } -@@ -127,6 +135,7 @@ futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr) +@@ -122,6 +130,7 @@ futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr) return -EFAULT; pagefault_disable(); /* implies preempt_disable() */ @@ -1750,7 +1750,7 @@ index e42cf59..7b94b8f 100644 switch (op) { case FUTEX_OP_SET: -@@ -148,6 +157,7 @@ futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr) +@@ -143,6 +152,7 @@ futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr) ret = -ENOSYS; } @@ -1946,7 +1946,7 @@ index 5cfba15..f415e1a 100644 #define PTE_EXT_AP0 (_AT(pteval_t, 1) << 4) #define PTE_EXT_AP1 (_AT(pteval_t, 2) << 4) diff --git a/arch/arm/include/asm/pgtable-2level.h b/arch/arm/include/asm/pgtable-2level.h -index dfff709..ed4c4e7 100644 +index 219ac88..73ec32a 100644 --- a/arch/arm/include/asm/pgtable-2level.h +++ b/arch/arm/include/asm/pgtable-2level.h @@ -126,6 +126,9 @@ @@ -3603,7 +3603,7 @@ index 78c02b3..c94109a 100644 struct omap_device *omap_device_alloc(struct platform_device *pdev, struct omap_hwmod **ohs, int oh_cnt); diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c -index 1f33f5d..b29fa75 100644 +index 66c60fe..c78950d 100644 --- a/arch/arm/mach-omap2/omap_hwmod.c +++ b/arch/arm/mach-omap2/omap_hwmod.c @@ -194,10 +194,10 @@ struct omap_hwmod_soc_ops { @@ -3698,19 +3698,18 @@ index 2dea8b5..6499da2 100644 extern void ux500_cpu_die(unsigned int cpu); diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig -index 1f8fed9..14d7823 100644 +index ca8ecde..58ba893 100644 --- a/arch/arm/mm/Kconfig +++ b/arch/arm/mm/Kconfig -@@ -446,7 +446,7 @@ config CPU_32v5 +@@ -446,6 +446,7 @@ config CPU_32v5 config CPU_32v6 bool -- select CPU_USE_DOMAINS if CPU_V6 && MMU + select CPU_USE_DOMAINS if CPU_V6 && MMU && !PAX_KERNEXEC && !PAX_MEMORY_UDEREF select TLS_REG_EMUL if !CPU_32v6K && !MMU config CPU_32v6K -@@ -601,6 +601,7 @@ config CPU_CP15_MPU +@@ -600,6 +601,7 @@ config CPU_CP15_MPU config CPU_USE_DOMAINS bool @@ -3718,7 +3717,7 @@ index 1f8fed9..14d7823 100644 help This option enables or disables the use of domain switching via the set_fs() function. -@@ -800,6 +801,7 @@ config NEED_KUSER_HELPERS +@@ -799,6 +801,7 @@ config NEED_KUSER_HELPERS config KUSER_HELPERS bool "Enable kuser helpers in vector page" if !NEED_KUSER_HELPERS default y @@ -3726,7 +3725,7 @@ index 1f8fed9..14d7823 100644 help Warning: disabling this option may break user programs. -@@ -812,7 +814,7 @@ config KUSER_HELPERS +@@ -811,7 +814,7 @@ config KUSER_HELPERS See Documentation/arm/kernel_user_helpers.txt for details. However, the fixed address nature of these helpers can be used @@ -4293,7 +4292,7 @@ index 5e85ed3..b10a7ed 100644 } } diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c -index a623cb3..a896d84 100644 +index b68c6b2..f66c492 100644 --- a/arch/arm/mm/mmu.c +++ b/arch/arm/mm/mmu.c @@ -39,6 +39,22 @@ @@ -4427,7 +4426,7 @@ index a623cb3..a896d84 100644 .domain = DOMAIN_KERNEL, }, [MT_MEMORY_RW_SO] = { -@@ -524,9 +562,14 @@ static void __init build_mem_type_table(void) +@@ -534,9 +572,14 @@ static void __init build_mem_type_table(void) * Mark cache clean areas and XIP ROM read only * from SVC mode and no access from userspace. */ @@ -4445,7 +4444,7 @@ index a623cb3..a896d84 100644 #endif if (is_smp()) { -@@ -542,13 +585,17 @@ static void __init build_mem_type_table(void) +@@ -552,13 +595,17 @@ static void __init build_mem_type_table(void) mem_types[MT_DEVICE_WC].prot_pte |= L_PTE_SHARED; mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_S; mem_types[MT_DEVICE_CACHED].prot_pte |= L_PTE_SHARED; @@ -4467,7 +4466,7 @@ index a623cb3..a896d84 100644 } } -@@ -559,15 +606,20 @@ static void __init build_mem_type_table(void) +@@ -569,15 +616,20 @@ static void __init build_mem_type_table(void) if (cpu_arch >= CPU_ARCH_ARMv6) { if (cpu_arch >= CPU_ARCH_ARMv7 && (cr & CR_TRE)) { /* Non-cacheable Normal is XCB = 001 */ @@ -4491,7 +4490,7 @@ index a623cb3..a896d84 100644 } #ifdef CONFIG_ARM_LPAE -@@ -583,6 +635,8 @@ static void __init build_mem_type_table(void) +@@ -593,6 +645,8 @@ static void __init build_mem_type_table(void) vecs_pgprot |= PTE_EXT_AF; #endif @@ -4500,7 +4499,7 @@ index a623cb3..a896d84 100644 for (i = 0; i < 16; i++) { pteval_t v = pgprot_val(protection_map[i]); protection_map[i] = __pgprot(v | user_pgprot); -@@ -600,21 +654,24 @@ static void __init build_mem_type_table(void) +@@ -610,21 +664,24 @@ static void __init build_mem_type_table(void) mem_types[MT_LOW_VECTORS].prot_l1 |= ecc_mask; mem_types[MT_HIGH_VECTORS].prot_l1 |= ecc_mask; @@ -4531,7 +4530,7 @@ index a623cb3..a896d84 100644 break; } pr_info("Memory policy: %sData cache %s\n", -@@ -832,7 +889,7 @@ static void __init create_mapping(struct map_desc *md) +@@ -842,7 +899,7 @@ static void __init create_mapping(struct map_desc *md) return; } @@ -4540,7 +4539,7 @@ index a623cb3..a896d84 100644 md->virtual >= PAGE_OFFSET && (md->virtual < VMALLOC_START || md->virtual >= VMALLOC_END)) { printk(KERN_WARNING "BUG: mapping for 0x%08llx" -@@ -1247,18 +1304,15 @@ void __init arm_mm_memblock_reserve(void) +@@ -1257,18 +1314,15 @@ void __init arm_mm_memblock_reserve(void) * called function. This means you can't use any function or debugging * method which may touch any device, otherwise the kernel _will_ crash. */ @@ -4563,7 +4562,7 @@ index a623cb3..a896d84 100644 for (addr = VMALLOC_START; addr; addr += PMD_SIZE) pmd_clear(pmd_off_k(addr)); -@@ -1271,7 +1325,7 @@ static void __init devicemaps_init(const struct machine_desc *mdesc) +@@ -1281,7 +1335,7 @@ static void __init devicemaps_init(const struct machine_desc *mdesc) map.pfn = __phys_to_pfn(CONFIG_XIP_PHYS_ADDR & SECTION_MASK); map.virtual = MODULES_VADDR; map.length = ((unsigned long)_etext - map.virtual + ~SECTION_MASK) & SECTION_MASK; @@ -4572,7 +4571,7 @@ index a623cb3..a896d84 100644 create_mapping(&map); #endif -@@ -1282,14 +1336,14 @@ static void __init devicemaps_init(const struct machine_desc *mdesc) +@@ -1292,14 +1346,14 @@ static void __init devicemaps_init(const struct machine_desc *mdesc) map.pfn = __phys_to_pfn(FLUSH_BASE_PHYS); map.virtual = FLUSH_BASE; map.length = SZ_1M; @@ -4589,7 +4588,7 @@ index a623cb3..a896d84 100644 create_mapping(&map); #endif -@@ -1298,7 +1352,7 @@ static void __init devicemaps_init(const struct machine_desc *mdesc) +@@ -1308,7 +1362,7 @@ static void __init devicemaps_init(const struct machine_desc *mdesc) * location (0xffff0000). If we aren't using high-vectors, also * create a mapping at the low-vectors virtual address. */ @@ -4598,7 +4597,7 @@ index a623cb3..a896d84 100644 map.virtual = 0xffff0000; map.length = PAGE_SIZE; #ifdef CONFIG_KUSER_HELPERS -@@ -1355,8 +1409,10 @@ static void __init kmap_init(void) +@@ -1365,8 +1419,10 @@ static void __init kmap_init(void) static void __init map_lowmem(void) { struct memblock_region *reg; @@ -4609,7 +4608,7 @@ index a623cb3..a896d84 100644 /* Map all the lowmem memory banks. */ for_each_memblock(memory, reg) { -@@ -1369,11 +1425,48 @@ static void __init map_lowmem(void) +@@ -1379,11 +1435,48 @@ static void __init map_lowmem(void) if (start >= end) break; @@ -4659,7 +4658,7 @@ index a623cb3..a896d84 100644 create_mapping(&map); } else { -@@ -1390,7 +1483,7 @@ static void __init map_lowmem(void) +@@ -1400,7 +1493,7 @@ static void __init map_lowmem(void) map.pfn = __phys_to_pfn(kernel_x_start); map.virtual = __phys_to_virt(kernel_x_start); map.length = kernel_x_end - kernel_x_start; @@ -4668,7 +4667,7 @@ index a623cb3..a896d84 100644 create_mapping(&map); -@@ -1403,6 +1496,7 @@ static void __init map_lowmem(void) +@@ -1413,6 +1506,7 @@ static void __init map_lowmem(void) create_mapping(&map); } } @@ -6785,7 +6784,7 @@ index 44a1f79..2bd6aa3 100644 void __init gt641xx_irq_init(void) diff --git a/arch/mips/kernel/irq.c b/arch/mips/kernel/irq.c -index d1fea7a..45602ea 100644 +index d1fea7a..2e591b0 100644 --- a/arch/mips/kernel/irq.c +++ b/arch/mips/kernel/irq.c @@ -77,17 +77,17 @@ void ack_bad_irq(unsigned int irq) @@ -6809,6 +6808,25 @@ index d1fea7a..45602ea 100644 } void __init init_IRQ(void) +@@ -110,7 +110,10 @@ void __init init_IRQ(void) + #endif + } + ++ + #ifdef DEBUG_STACKOVERFLOW ++extern void gr_handle_kernel_exploit(void); ++ + static inline void check_stack_overflow(void) + { + unsigned long sp; +@@ -126,6 +129,7 @@ static inline void check_stack_overflow(void) + printk("do_IRQ: stack overflow: %ld\n", + sp - sizeof(struct thread_info)); + dump_stack(); ++ gr_handle_kernel_exploit(); + } + } + #else diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c index 6ae540e..b7396dc 100644 --- a/arch/mips/kernel/process.c @@ -7488,6 +7506,18 @@ index 4006964..fcb3cc2 100644 ret = __copy_from_user(to, from, n); else copy_from_user_overflow(); +diff --git a/arch/parisc/include/uapi/asm/resource.h b/arch/parisc/include/uapi/asm/resource.h +index 8b06343..090483c 100644 +--- a/arch/parisc/include/uapi/asm/resource.h ++++ b/arch/parisc/include/uapi/asm/resource.h +@@ -1,7 +1,6 @@ + #ifndef _ASM_PARISC_RESOURCE_H + #define _ASM_PARISC_RESOURCE_H + +-#define _STK_LIM_MAX 10 * _STK_LIM + #include <asm-generic/resource.h> + + #endif diff --git a/arch/parisc/kernel/module.c b/arch/parisc/kernel/module.c index 50dfafc..b9fc230 100644 --- a/arch/parisc/kernel/module.c @@ -8144,10 +8174,10 @@ index 4aad413..85d86bf 100644 #define _PAGE_NO_CACHE 0x020 /* I: cache inhibit */ #define _PAGE_WRITETHRU 0x040 /* W: cache write-through */ diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h -index 90c06ec..3517221 100644 +index ce17815..c5574cc 100644 --- a/arch/powerpc/include/asm/reg.h +++ b/arch/powerpc/include/asm/reg.h -@@ -248,6 +248,7 @@ +@@ -249,6 +249,7 @@ #define SPRN_DBCR 0x136 /* e300 Data Breakpoint Control Reg */ #define SPRN_DSISR 0x012 /* Data Storage Interrupt Status Register */ #define DSISR_NOHPTE 0x40000000 /* no translation found */ @@ -8424,6 +8454,27 @@ index 38d5073..f00af8d 100644 mr r5,r3 addi r3,r1,STACK_FRAME_OVERHEAD lwz r4,_DAR(r1) +diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c +index 1d0848b..d74685f 100644 +--- a/arch/powerpc/kernel/irq.c ++++ b/arch/powerpc/kernel/irq.c +@@ -447,6 +447,8 @@ void migrate_irqs(void) + } + #endif + ++extern void gr_handle_kernel_exploit(void); ++ + static inline void check_stack_overflow(void) + { + #ifdef CONFIG_DEBUG_STACKOVERFLOW +@@ -459,6 +461,7 @@ static inline void check_stack_overflow(void) + printk("do_IRQ: stack overflow: %ld\n", + sp - sizeof(struct thread_info)); + dump_stack(); ++ gr_handle_kernel_exploit(); + } + #endif + } diff --git a/arch/powerpc/kernel/module_32.c b/arch/powerpc/kernel/module_32.c index 6cff040..74ac5d1 100644 --- a/arch/powerpc/kernel/module_32.c @@ -8467,10 +8518,10 @@ index 6cff040..74ac5d1 100644 sechdrs, module); #endif diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c -index af064d2..ce56147 100644 +index 31d0215..206af70 100644 --- a/arch/powerpc/kernel/process.c +++ b/arch/powerpc/kernel/process.c -@@ -1009,8 +1009,8 @@ void show_regs(struct pt_regs * regs) +@@ -1031,8 +1031,8 @@ void show_regs(struct pt_regs * regs) * Lookup NIP late so we have the best change of getting the * above info out without failing */ @@ -8481,7 +8532,7 @@ index af064d2..ce56147 100644 #endif show_stack(current, (unsigned long *) regs->gpr[1]); if (!user_mode(regs)) -@@ -1532,10 +1532,10 @@ void show_stack(struct task_struct *tsk, unsigned long *stack) +@@ -1554,10 +1554,10 @@ void show_stack(struct task_struct *tsk, unsigned long *stack) newsp = stack[0]; ip = stack[STACK_FRAME_LR_SAVE]; if (!firstframe || ip != lr) { @@ -8494,7 +8545,7 @@ index af064d2..ce56147 100644 (void *)current->ret_stack[curr_frame].ret); curr_frame--; } -@@ -1555,7 +1555,7 @@ void show_stack(struct task_struct *tsk, unsigned long *stack) +@@ -1577,7 +1577,7 @@ void show_stack(struct task_struct *tsk, unsigned long *stack) struct pt_regs *regs = (struct pt_regs *) (sp + STACK_FRAME_OVERHEAD); lr = regs->link; @@ -8503,7 +8554,7 @@ index af064d2..ce56147 100644 regs->trap, (void *)regs->nip, (void *)lr); firstframe = 1; } -@@ -1591,58 +1591,3 @@ void notrace __ppc64_runlatch_off(void) +@@ -1613,58 +1613,3 @@ void notrace __ppc64_runlatch_off(void) mtspr(SPRN_CTRLT, ctrl); } #endif /* CONFIG_PPC64 */ @@ -8602,10 +8653,10 @@ index 2e3d2bf..35df241 100644 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT))) diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c -index a67e00a..f71d8c7 100644 +index 4e47db6..6dcc96e 100644 --- a/arch/powerpc/kernel/signal_32.c +++ b/arch/powerpc/kernel/signal_32.c -@@ -1011,7 +1011,7 @@ int handle_rt_signal32(unsigned long sig, struct k_sigaction *ka, +@@ -1013,7 +1013,7 @@ int handle_rt_signal32(unsigned long sig, struct k_sigaction *ka, /* Save user registers on the stack */ frame = &rt_sf->uc.uc_mcontext; addr = frame; @@ -8615,10 +8666,10 @@ index a67e00a..f71d8c7 100644 tramp = current->mm->context.vdso_base + vdso32_rt_sigtramp; } else { diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c -index 8d253c2..405b337 100644 +index d501dc4..e5a0de0 100644 --- a/arch/powerpc/kernel/signal_64.c +++ b/arch/powerpc/kernel/signal_64.c -@@ -758,7 +758,7 @@ int handle_rt_signal64(int signr, struct k_sigaction *ka, siginfo_t *info, +@@ -760,7 +760,7 @@ int handle_rt_signal64(int signr, struct k_sigaction *ka, siginfo_t *info, current->thread.fp_state.fpscr = 0; /* Set up to return from userspace. */ @@ -8959,10 +9010,10 @@ index 9098692..3d54cd1 100644 struct spu_context *ctx = vma->vm_file->private_data; unsigned long offset = address - vma->vm_start; diff --git a/arch/s390/include/asm/atomic.h b/arch/s390/include/asm/atomic.h -index fa9aaf7..3f5d836 100644 +index 1d47061..0714963 100644 --- a/arch/s390/include/asm/atomic.h +++ b/arch/s390/include/asm/atomic.h -@@ -398,6 +398,16 @@ static inline long long atomic64_dec_if_positive(atomic64_t *v) +@@ -412,6 +412,16 @@ static inline long long atomic64_dec_if_positive(atomic64_t *v) #define atomic64_dec_and_test(_v) (atomic64_sub_return(1, _v) == 0) #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0) @@ -9712,6 +9763,142 @@ index 502f632..da1917f 100644 #define __S100 PAGE_READONLY #define __S101 PAGE_READONLY #define __S110 PAGE_SHARED +diff --git a/arch/sparc/include/asm/pgtable_64.h b/arch/sparc/include/asm/pgtable_64.h +index 0f9e945..a949e55 100644 +--- a/arch/sparc/include/asm/pgtable_64.h ++++ b/arch/sparc/include/asm/pgtable_64.h +@@ -71,6 +71,23 @@ + + #include <linux/sched.h> + ++extern unsigned long sparc64_valid_addr_bitmap[]; ++ ++/* Needs to be defined here and not in linux/mm.h, as it is arch dependent */ ++static inline bool __kern_addr_valid(unsigned long paddr) ++{ ++ if ((paddr >> MAX_PHYS_ADDRESS_BITS) != 0UL) ++ return false; ++ return test_bit(paddr >> ILOG2_4MB, sparc64_valid_addr_bitmap); ++} ++ ++static inline bool kern_addr_valid(unsigned long addr) ++{ ++ unsigned long paddr = __pa(addr); ++ ++ return __kern_addr_valid(paddr); ++} ++ + /* Entries per page directory level. */ + #define PTRS_PER_PTE (1UL << (PAGE_SHIFT-3)) + #define PTRS_PER_PMD (1UL << PMD_BITS) +@@ -79,9 +96,12 @@ + /* Kernel has a separate 44bit address space. */ + #define FIRST_USER_ADDRESS 0 + +-#define pte_ERROR(e) __builtin_trap() +-#define pmd_ERROR(e) __builtin_trap() +-#define pgd_ERROR(e) __builtin_trap() ++#define pmd_ERROR(e) \ ++ pr_err("%s:%d: bad pmd %p(%016lx) seen at (%pS)\n", \ ++ __FILE__, __LINE__, &(e), pmd_val(e), __builtin_return_address(0)) ++#define pgd_ERROR(e) \ ++ pr_err("%s:%d: bad pgd %p(%016lx) seen at (%pS)\n", \ ++ __FILE__, __LINE__, &(e), pgd_val(e), __builtin_return_address(0)) + + #endif /* !(__ASSEMBLY__) */ + +@@ -633,7 +653,7 @@ static inline unsigned long pmd_large(pmd_t pmd) + { + pte_t pte = __pte(pmd_val(pmd)); + +- return (pte_val(pte) & _PAGE_PMD_HUGE) && pte_present(pte); ++ return pte_val(pte) & _PAGE_PMD_HUGE; + } + + #ifdef CONFIG_TRANSPARENT_HUGEPAGE +@@ -719,20 +739,6 @@ static inline pmd_t pmd_mkwrite(pmd_t pmd) + return __pmd(pte_val(pte)); + } + +-static inline pmd_t pmd_mknotpresent(pmd_t pmd) +-{ +- unsigned long mask; +- +- if (tlb_type == hypervisor) +- mask = _PAGE_PRESENT_4V; +- else +- mask = _PAGE_PRESENT_4U; +- +- pmd_val(pmd) &= ~mask; +- +- return pmd; +-} +- + static inline pmd_t pmd_mksplitting(pmd_t pmd) + { + pte_t pte = __pte(pmd_val(pmd)); +@@ -757,6 +763,20 @@ static inline int pmd_present(pmd_t pmd) + + #define pmd_none(pmd) (!pmd_val(pmd)) + ++/* pmd_bad() is only called on non-trans-huge PMDs. Our encoding is ++ * very simple, it's just the physical address. PTE tables are of ++ * size PAGE_SIZE so make sure the sub-PAGE_SIZE bits are clear and ++ * the top bits outside of the range of any physical address size we ++ * support are clear as well. We also validate the physical itself. ++ */ ++#define pmd_bad(pmd) ((pmd_val(pmd) & ~PAGE_MASK) || \ ++ !__kern_addr_valid(pmd_val(pmd))) ++ ++#define pud_none(pud) (!pud_val(pud)) ++ ++#define pud_bad(pud) ((pud_val(pud) & ~PAGE_MASK) || \ ++ !__kern_addr_valid(pud_val(pud))) ++ + #ifdef CONFIG_TRANSPARENT_HUGEPAGE + extern void set_pmd_at(struct mm_struct *mm, unsigned long addr, + pmd_t *pmdp, pmd_t pmd); +@@ -790,10 +810,7 @@ static inline unsigned long __pmd_page(pmd_t pmd) + #define pud_page_vaddr(pud) \ + ((unsigned long) __va(pud_val(pud))) + #define pud_page(pud) virt_to_page((void *)pud_page_vaddr(pud)) +-#define pmd_bad(pmd) (0) + #define pmd_clear(pmdp) (pmd_val(*(pmdp)) = 0UL) +-#define pud_none(pud) (!pud_val(pud)) +-#define pud_bad(pud) (0) + #define pud_present(pud) (pud_val(pud) != 0U) + #define pud_clear(pudp) (pud_val(*(pudp)) = 0UL) + +@@ -893,6 +910,10 @@ extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t *); + extern void update_mmu_cache_pmd(struct vm_area_struct *vma, unsigned long addr, + pmd_t *pmd); + ++#define __HAVE_ARCH_PMDP_INVALIDATE ++extern void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address, ++ pmd_t *pmdp); ++ + #define __HAVE_ARCH_PGTABLE_DEPOSIT + extern void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp, + pgtable_t pgtable); +@@ -919,18 +940,6 @@ extern unsigned long pte_file(pte_t); + extern pte_t pgoff_to_pte(unsigned long); + #define PTE_FILE_MAX_BITS (64UL - PAGE_SHIFT - 1UL) + +-extern unsigned long sparc64_valid_addr_bitmap[]; +- +-/* Needs to be defined here and not in linux/mm.h, as it is arch dependent */ +-static inline bool kern_addr_valid(unsigned long addr) +-{ +- unsigned long paddr = __pa(addr); +- +- if ((paddr >> 41UL) != 0UL) +- return false; +- return test_bit(paddr >> 22, sparc64_valid_addr_bitmap); +-} +- + extern int page_in_phys_avail(unsigned long paddr); + + /* diff --git a/arch/sparc/include/asm/pgtsrmmu.h b/arch/sparc/include/asm/pgtsrmmu.h index 79da178..c2eede8 100644 --- a/arch/sparc/include/asm/pgtsrmmu.h @@ -9889,6 +10076,20 @@ index a5f01ac..703b554 100644 /* * Thread-synchronous status. * +diff --git a/arch/sparc/include/asm/tsb.h b/arch/sparc/include/asm/tsb.h +index 2230f80..90916f9 100644 +--- a/arch/sparc/include/asm/tsb.h ++++ b/arch/sparc/include/asm/tsb.h +@@ -171,7 +171,8 @@ extern struct tsb_phys_patch_entry __tsb_phys_patch, __tsb_phys_patch_end; + andcc REG1, REG2, %g0; \ + be,pt %xcc, 700f; \ + sethi %hi(4 * 1024 * 1024), REG2; \ +- andn REG1, REG2, REG1; \ ++ brgez,pn REG1, FAIL_LABEL; \ ++ andn REG1, REG2, REG1; \ + and VADDR, REG2, REG2; \ + brlz,pt REG1, PTE_LABEL; \ + or REG1, REG2, REG1; \ diff --git a/arch/sparc/include/asm/uaccess.h b/arch/sparc/include/asm/uaccess.h index 0167d26..767bb0c 100644 --- a/arch/sparc/include/asm/uaccess.h @@ -10637,10 +10838,37 @@ index 4ced92f..965eeed 100644 } EXPORT_SYMBOL(die_if_kernel); diff --git a/arch/sparc/kernel/unaligned_64.c b/arch/sparc/kernel/unaligned_64.c -index 3c1a7cb..73e1923 100644 +index 3c1a7cb..9046547 100644 --- a/arch/sparc/kernel/unaligned_64.c +++ b/arch/sparc/kernel/unaligned_64.c -@@ -289,7 +289,7 @@ static void log_unaligned(struct pt_regs *regs) +@@ -166,17 +166,23 @@ static unsigned long *fetch_reg_addr(unsigned int reg, struct pt_regs *regs) + unsigned long compute_effective_address(struct pt_regs *regs, + unsigned int insn, unsigned int rd) + { ++ int from_kernel = (regs->tstate & TSTATE_PRIV) != 0; + unsigned int rs1 = (insn >> 14) & 0x1f; + unsigned int rs2 = insn & 0x1f; +- int from_kernel = (regs->tstate & TSTATE_PRIV) != 0; ++ unsigned long addr; + + if (insn & 0x2000) { + maybe_flush_windows(rs1, 0, rd, from_kernel); +- return (fetch_reg(rs1, regs) + sign_extend_imm13(insn)); ++ addr = (fetch_reg(rs1, regs) + sign_extend_imm13(insn)); + } else { + maybe_flush_windows(rs1, rs2, rd, from_kernel); +- return (fetch_reg(rs1, regs) + fetch_reg(rs2, regs)); ++ addr = (fetch_reg(rs1, regs) + fetch_reg(rs2, regs)); + } ++ ++ if (!from_kernel && test_thread_flag(TIF_32BIT)) ++ addr &= 0xffffffff; ++ ++ return addr; + } + + /* This is just to make gcc think die_if_kernel does return... */ +@@ -289,7 +295,7 @@ static void log_unaligned(struct pt_regs *regs) static DEFINE_RATELIMIT_STATE(ratelimit, 5 * HZ, 5); if (__ratelimit(&ratelimit)) { @@ -11230,7 +11458,7 @@ index 59dbd46..1dd7f5e 100644 if (!(vma->vm_flags & (VM_READ | VM_EXEC))) goto bad_area; diff --git a/arch/sparc/mm/fault_64.c b/arch/sparc/mm/fault_64.c -index 69bb818..6ca35c8 100644 +index 69bb818..3542236 100644 --- a/arch/sparc/mm/fault_64.c +++ b/arch/sparc/mm/fault_64.c @@ -22,6 +22,9 @@ @@ -11252,7 +11480,124 @@ index 69bb818..6ca35c8 100644 printk(KERN_CRIT "OOPS: Fault was to vaddr[%lx]\n", vaddr); dump_stack(); unhandled_fault(regs->tpc, current, regs); -@@ -271,6 +274,466 @@ static void noinline __kprobes bogus_32bit_fault_address(struct pt_regs *regs, +@@ -96,38 +99,51 @@ static unsigned int get_user_insn(unsigned long tpc) + pte_t *ptep, pte; + unsigned long pa; + u32 insn = 0; +- unsigned long pstate; + +- if (pgd_none(*pgdp)) +- goto outret; ++ if (pgd_none(*pgdp) || unlikely(pgd_bad(*pgdp))) ++ goto out; + pudp = pud_offset(pgdp, tpc); +- if (pud_none(*pudp)) +- goto outret; +- pmdp = pmd_offset(pudp, tpc); +- if (pmd_none(*pmdp)) +- goto outret; ++ if (pud_none(*pudp) || unlikely(pud_bad(*pudp))) ++ goto out; + + /* This disables preemption for us as well. */ +- __asm__ __volatile__("rdpr %%pstate, %0" : "=r" (pstate)); +- __asm__ __volatile__("wrpr %0, %1, %%pstate" +- : : "r" (pstate), "i" (PSTATE_IE)); +- ptep = pte_offset_map(pmdp, tpc); +- pte = *ptep; +- if (!pte_present(pte)) +- goto out; ++ local_irq_disable(); + +- pa = (pte_pfn(pte) << PAGE_SHIFT); +- pa += (tpc & ~PAGE_MASK); ++ pmdp = pmd_offset(pudp, tpc); ++ if (pmd_none(*pmdp) || unlikely(pmd_bad(*pmdp))) ++ goto out_irq_enable; + +- /* Use phys bypass so we don't pollute dtlb/dcache. */ +- __asm__ __volatile__("lduwa [%1] %2, %0" +- : "=r" (insn) +- : "r" (pa), "i" (ASI_PHYS_USE_EC)); ++#ifdef CONFIG_TRANSPARENT_HUGEPAGE ++ if (pmd_trans_huge(*pmdp)) { ++ if (pmd_trans_splitting(*pmdp)) ++ goto out_irq_enable; + ++ pa = pmd_pfn(*pmdp) << PAGE_SHIFT; ++ pa += tpc & ~HPAGE_MASK; ++ ++ /* Use phys bypass so we don't pollute dtlb/dcache. */ ++ __asm__ __volatile__("lduwa [%1] %2, %0" ++ : "=r" (insn) ++ : "r" (pa), "i" (ASI_PHYS_USE_EC)); ++ } else ++#endif ++ { ++ ptep = pte_offset_map(pmdp, tpc); ++ pte = *ptep; ++ if (pte_present(pte)) { ++ pa = (pte_pfn(pte) << PAGE_SHIFT); ++ pa += (tpc & ~PAGE_MASK); ++ ++ /* Use phys bypass so we don't pollute dtlb/dcache. */ ++ __asm__ __volatile__("lduwa [%1] %2, %0" ++ : "=r" (insn) ++ : "r" (pa), "i" (ASI_PHYS_USE_EC)); ++ } ++ pte_unmap(ptep); ++ } ++out_irq_enable: ++ local_irq_enable(); + out: +- pte_unmap(ptep); +- __asm__ __volatile__("wrpr %0, 0x0, %%pstate" : : "r" (pstate)); +-outret: + return insn; + } + +@@ -153,7 +169,8 @@ show_signal_msg(struct pt_regs *regs, int sig, int code, + } + + static void do_fault_siginfo(int code, int sig, struct pt_regs *regs, +- unsigned int insn, int fault_code) ++ unsigned long fault_addr, unsigned int insn, ++ int fault_code) + { + unsigned long addr; + siginfo_t info; +@@ -161,10 +178,18 @@ static void do_fault_siginfo(int code, int sig, struct pt_regs *regs, + info.si_code = code; + info.si_signo = sig; + info.si_errno = 0; +- if (fault_code & FAULT_CODE_ITLB) ++ if (fault_code & FAULT_CODE_ITLB) { + addr = regs->tpc; +- else +- addr = compute_effective_address(regs, insn, 0); ++ } else { ++ /* If we were able to probe the faulting instruction, use it ++ * to compute a precise fault address. Otherwise use the fault ++ * time provided address which may only have page granularity. ++ */ ++ if (insn) ++ addr = compute_effective_address(regs, insn, 0); ++ else ++ addr = fault_addr; ++ } + info.si_addr = (void __user *) addr; + info.si_trapno = 0; + +@@ -239,7 +264,7 @@ static void __kprobes do_kernel_fault(struct pt_regs *regs, int si_code, + /* The si_code was set to make clear whether + * this was a SEGV_MAPERR or SEGV_ACCERR fault. + */ +- do_fault_siginfo(si_code, SIGSEGV, regs, insn, fault_code); ++ do_fault_siginfo(si_code, SIGSEGV, regs, address, insn, fault_code); + return; + } + +@@ -271,6 +296,466 @@ static void noinline __kprobes bogus_32bit_fault_address(struct pt_regs *regs, show_regs(regs); } @@ -11719,7 +12064,7 @@ index 69bb818..6ca35c8 100644 asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs) { enum ctx_state prev_state = exception_enter(); -@@ -344,6 +807,29 @@ retry: +@@ -344,6 +829,29 @@ retry: if (!vma) goto bad_area; @@ -11749,6 +12094,28 @@ index 69bb818..6ca35c8 100644 /* Pure DTLB misses do not tell us whether the fault causing * load/store/atomic was a write or not, it only says that there * was no match. So in such a case we (carefully) read the +@@ -525,7 +1033,7 @@ do_sigbus: + * Send a sigbus, regardless of whether we were in kernel + * or user mode. + */ +- do_fault_siginfo(BUS_ADRERR, SIGBUS, regs, insn, fault_code); ++ do_fault_siginfo(BUS_ADRERR, SIGBUS, regs, address, insn, fault_code); + + /* Kernel mode? Handle exceptions or die */ + if (regs->tstate & TSTATE_PRIV) +diff --git a/arch/sparc/mm/gup.c b/arch/sparc/mm/gup.c +index c4d3da6..1aed043 100644 +--- a/arch/sparc/mm/gup.c ++++ b/arch/sparc/mm/gup.c +@@ -73,7 +73,7 @@ static int gup_huge_pmd(pmd_t *pmdp, pmd_t pmd, unsigned long addr, + struct page *head, *page, *tail; + int refs; + +- if (!pmd_large(pmd)) ++ if (!(pmd_val(pmd) & _PAGE_VALID)) + return 0; + + if (write && !pmd_write(pmd)) diff --git a/arch/sparc/mm/hugetlbpage.c b/arch/sparc/mm/hugetlbpage.c index 9bd9ce8..dc84852 100644 --- a/arch/sparc/mm/hugetlbpage.c @@ -11893,6 +12260,67 @@ index eafbc65..5a8070d 100644 #endif /* CONFIG_SMP */ #endif /* CONFIG_DEBUG_DCFLUSH */ } +diff --git a/arch/sparc/mm/tlb.c b/arch/sparc/mm/tlb.c +index b12cb5e..b89aba2 100644 +--- a/arch/sparc/mm/tlb.c ++++ b/arch/sparc/mm/tlb.c +@@ -134,7 +134,7 @@ no_cache_flush: + + #ifdef CONFIG_TRANSPARENT_HUGEPAGE + static void tlb_batch_pmd_scan(struct mm_struct *mm, unsigned long vaddr, +- pmd_t pmd, bool exec) ++ pmd_t pmd) + { + unsigned long end; + pte_t *pte; +@@ -142,8 +142,11 @@ static void tlb_batch_pmd_scan(struct mm_struct *mm, unsigned long vaddr, + pte = pte_offset_map(&pmd, vaddr); + end = vaddr + HPAGE_SIZE; + while (vaddr < end) { +- if (pte_val(*pte) & _PAGE_VALID) ++ if (pte_val(*pte) & _PAGE_VALID) { ++ bool exec = pte_exec(*pte); ++ + tlb_batch_add_one(mm, vaddr, exec); ++ } + pte++; + vaddr += PAGE_SIZE; + } +@@ -177,19 +180,30 @@ void set_pmd_at(struct mm_struct *mm, unsigned long addr, + } + + if (!pmd_none(orig)) { +- pte_t orig_pte = __pte(pmd_val(orig)); +- bool exec = pte_exec(orig_pte); +- + addr &= HPAGE_MASK; + if (pmd_trans_huge(orig)) { ++ pte_t orig_pte = __pte(pmd_val(orig)); ++ bool exec = pte_exec(orig_pte); ++ + tlb_batch_add_one(mm, addr, exec); + tlb_batch_add_one(mm, addr + REAL_HPAGE_SIZE, exec); + } else { +- tlb_batch_pmd_scan(mm, addr, orig, exec); ++ tlb_batch_pmd_scan(mm, addr, orig); + } + } + } + ++void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address, ++ pmd_t *pmdp) ++{ ++ pmd_t entry = *pmdp; ++ ++ pmd_val(entry) &= ~_PAGE_VALID; ++ ++ set_pmd_at(vma->vm_mm, address, pmdp, entry); ++ flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE); ++} ++ + void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp, + pgtable_t pgtable) + { diff --git a/arch/tile/Kconfig b/arch/tile/Kconfig index b3692ce..e4517c9 100644 --- a/arch/tile/Kconfig @@ -12239,10 +12667,10 @@ index 321a52c..3d51a5e 100644 This option helps catch unintended modifications to loadable kernel module's text and read-only data. It also prevents execution diff --git a/arch/x86/Makefile b/arch/x86/Makefile -index eeda43a..5a238be 100644 +index 0dd99ea..e893775 100644 --- a/arch/x86/Makefile +++ b/arch/x86/Makefile -@@ -71,14 +71,12 @@ ifeq ($(CONFIG_X86_32),y) +@@ -71,15 +71,12 @@ ifeq ($(CONFIG_X86_32),y) # CPU-specific tuning. Anything which can be shared with UML should go here. include $(srctree)/arch/x86/Makefile_32.cpu KBUILD_CFLAGS += $(cflags-y) @@ -12254,11 +12682,12 @@ index eeda43a..5a238be 100644 UTS_MACHINE := x86_64 CHECKFLAGS += -D__x86_64__ -m64 +- biarch := -m64 + biarch := $(call cc-option,-m64) KBUILD_AFLAGS += -m64 KBUILD_CFLAGS += -m64 -@@ -111,6 +109,9 @@ else +@@ -112,6 +109,9 @@ else KBUILD_CFLAGS += -maccumulate-outgoing-args endif @@ -12268,7 +12697,7 @@ index eeda43a..5a238be 100644 # Make sure compiler does not have buggy stack-protector support. ifdef CONFIG_CC_STACKPROTECTOR cc_has_sp := $(srctree)/scripts/gcc-x86_$(BITS)-has-stack-protector.sh -@@ -267,3 +268,12 @@ define archhelp +@@ -269,3 +269,12 @@ define archhelp echo ' FDINITRD=file initrd for the booted kernel' echo ' kvmconfig - Enable additional options for guest kernel support' endef @@ -12396,10 +12825,10 @@ index a53440e..c3dbf1e 100644 .previous diff --git a/arch/x86/boot/compressed/head_32.S b/arch/x86/boot/compressed/head_32.S -index 9116aac..abbcdb1 100644 +index f45ab7a..ebc015f 100644 --- a/arch/x86/boot/compressed/head_32.S +++ b/arch/x86/boot/compressed/head_32.S -@@ -117,10 +117,10 @@ preferred_addr: +@@ -119,10 +119,10 @@ preferred_addr: addl %eax, %ebx notl %eax andl %eax, %ebx @@ -12413,7 +12842,7 @@ index 9116aac..abbcdb1 100644 /* Target address to relocate to for decompression */ diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S -index c5c1ae0..2e76d0e 100644 +index b10fa66..5ee0472 100644 --- a/arch/x86/boot/compressed/head_64.S +++ b/arch/x86/boot/compressed/head_64.S @@ -94,10 +94,10 @@ ENTRY(startup_32) @@ -12429,7 +12858,7 @@ index c5c1ae0..2e76d0e 100644 1: /* Target address to relocate to for decompression */ -@@ -271,10 +271,10 @@ preferred_addr: +@@ -268,10 +268,10 @@ preferred_addr: addq %rax, %rbp notq %rax andq %rax, %rbp @@ -12442,7 +12871,7 @@ index c5c1ae0..2e76d0e 100644 1: /* Target address to relocate to for decompression */ -@@ -366,8 +366,8 @@ gdt: +@@ -363,8 +363,8 @@ gdt: .long gdt .word 0 .quad 0x0000000000000000 /* NULL descriptor */ @@ -16002,7 +16431,7 @@ index 59c6c40..5e0b22c 100644 struct compat_timespec { compat_time_t tv_sec; diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h -index e099f95..5aa0fb2 100644 +index 5f12968..a383517 100644 --- a/arch/x86/include/asm/cpufeature.h +++ b/arch/x86/include/asm/cpufeature.h @@ -203,7 +203,7 @@ @@ -16023,7 +16452,7 @@ index e099f95..5aa0fb2 100644 #define X86_FEATURE_BMI2 (9*32+ 8) /* 2nd group bit manipulation extensions */ #define X86_FEATURE_ERMS (9*32+ 9) /* Enhanced REP MOVSB/STOSB */ #define X86_FEATURE_INVPCID (9*32+10) /* Invalidate Processor Context ID */ -@@ -354,6 +354,7 @@ extern const char * const x86_power_flags[32]; +@@ -358,6 +358,7 @@ extern const char * const x86_power_flags[32]; #undef cpu_has_centaur_mcr #define cpu_has_centaur_mcr 0 @@ -16031,7 +16460,7 @@ index e099f95..5aa0fb2 100644 #endif /* CONFIG_X86_64 */ #if __GNUC__ >= 4 -@@ -406,7 +407,8 @@ static __always_inline __pure bool __static_cpu_has(u16 bit) +@@ -410,7 +411,8 @@ static __always_inline __pure bool __static_cpu_has(u16 bit) #ifdef CONFIG_X86_DEBUG_STATIC_CPU_HAS t_warn: @@ -16041,7 +16470,7 @@ index e099f95..5aa0fb2 100644 return false; #endif -@@ -426,7 +428,7 @@ static __always_inline __pure bool __static_cpu_has(u16 bit) +@@ -430,7 +432,7 @@ static __always_inline __pure bool __static_cpu_has(u16 bit) ".section .discard,\"aw\",@progbits\n" " .byte 0xff + (4f-3f) - (2b-1b)\n" /* size check */ ".previous\n" @@ -16050,7 +16479,7 @@ index e099f95..5aa0fb2 100644 "3: movb $1,%0\n" "4:\n" ".previous\n" -@@ -463,7 +465,7 @@ static __always_inline __pure bool _static_cpu_has_safe(u16 bit) +@@ -467,7 +469,7 @@ static __always_inline __pure bool _static_cpu_has_safe(u16 bit) " .byte 2b - 1b\n" /* src len */ " .byte 4f - 3f\n" /* repl len */ ".previous\n" @@ -16059,7 +16488,7 @@ index e099f95..5aa0fb2 100644 "3: .byte 0xe9\n .long %l[t_no] - 2b\n" "4:\n" ".previous\n" -@@ -496,7 +498,7 @@ static __always_inline __pure bool _static_cpu_has_safe(u16 bit) +@@ -500,7 +502,7 @@ static __always_inline __pure bool _static_cpu_has_safe(u16 bit) ".section .discard,\"aw\",@progbits\n" " .byte 0xff + (4f-3f) - (2b-1b)\n" /* size check */ ".previous\n" @@ -16068,7 +16497,7 @@ index e099f95..5aa0fb2 100644 "3: movb $0,%0\n" "4:\n" ".previous\n" -@@ -510,7 +512,7 @@ static __always_inline __pure bool _static_cpu_has_safe(u16 bit) +@@ -514,7 +516,7 @@ static __always_inline __pure bool _static_cpu_has_safe(u16 bit) ".section .discard,\"aw\",@progbits\n" " .byte 0xff + (6f-5f) - (4b-3b)\n" /* size check */ ".previous\n" @@ -20094,10 +20523,10 @@ index 3e276eb..2eb3c30 100644 unsigned long mfn; diff --git a/arch/x86/include/asm/xsave.h b/arch/x86/include/asm/xsave.h -index 5547389..da68716 100644 +index 6c1d741..39e6ecf 100644 --- a/arch/x86/include/asm/xsave.h +++ b/arch/x86/include/asm/xsave.h -@@ -76,8 +76,11 @@ static inline int xsave_user(struct xsave_struct __user *buf) +@@ -80,8 +80,11 @@ static inline int xsave_user(struct xsave_struct __user *buf) if (unlikely(err)) return -EFAULT; @@ -20110,7 +20539,7 @@ index 5547389..da68716 100644 "2: " ASM_CLAC "\n" ".section .fixup,\"ax\"\n" "3: movl $-1,%[err]\n" -@@ -87,18 +90,22 @@ static inline int xsave_user(struct xsave_struct __user *buf) +@@ -91,18 +94,22 @@ static inline int xsave_user(struct xsave_struct __user *buf) : [err] "=r" (err) : "D" (buf), "a" (-1), "d" (-1), "0" (0) : "memory"); @@ -20135,7 +20564,7 @@ index 5547389..da68716 100644 "2: " ASM_CLAC "\n" ".section .fixup,\"ax\"\n" "3: movl $-1,%[err]\n" -@@ -108,6 +115,7 @@ static inline int xrestore_user(struct xsave_struct __user *buf, u64 mask) +@@ -112,6 +119,7 @@ static inline int xrestore_user(struct xsave_struct __user *buf, u64 mask) : [err] "=r" (err) : "D" (xstate), "a" (lmask), "d" (hmask), "0" (0) : "memory"); /* memory required? */ @@ -21657,7 +22086,7 @@ index d9c12d3..7858b62 100644 if (__die(str, regs, err)) diff --git a/arch/x86/kernel/dumpstack_32.c b/arch/x86/kernel/dumpstack_32.c -index f2a1770..540657f 100644 +index f2a1770..10fa52d 100644 --- a/arch/x86/kernel/dumpstack_32.c +++ b/arch/x86/kernel/dumpstack_32.c @@ -38,15 +38,13 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs, @@ -21735,7 +22164,7 @@ index f2a1770..540657f 100644 return ud2 == 0x0b0f; } + -+#ifdef CONFIG_PAX_MEMORY_STACKLEAK ++#if defined(CONFIG_PAX_MEMORY_STACKLEAK) || defined(CONFIG_PAX_USERCOPY) +void pax_check_alloca(unsigned long size) +{ + unsigned long sp = (unsigned long)&sp, stack_left; @@ -21747,7 +22176,7 @@ index f2a1770..540657f 100644 +EXPORT_SYMBOL(pax_check_alloca); +#endif diff --git a/arch/x86/kernel/dumpstack_64.c b/arch/x86/kernel/dumpstack_64.c -index addb207..99635fa 100644 +index addb207..921706b 100644 --- a/arch/x86/kernel/dumpstack_64.c +++ b/arch/x86/kernel/dumpstack_64.c @@ -119,9 +119,9 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs, @@ -21816,7 +22245,7 @@ index addb207..99635fa 100644 return ud2 == 0x0b0f; } + -+#ifdef CONFIG_PAX_MEMORY_STACKLEAK ++#if defined(CONFIG_PAX_MEMORY_STACKLEAK) || defined(CONFIG_PAX_USERCOPY) +void pax_check_alloca(unsigned long size) +{ + unsigned long sp = (unsigned long)&sp, stack_start, stack_end; @@ -23916,7 +24345,7 @@ index 1e96c36..3ff710a 100644 /* * End of kprobes section diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c -index e625319..b9abb9d 100644 +index 1ffc32d..e52c745 100644 --- a/arch/x86/kernel/ftrace.c +++ b/arch/x86/kernel/ftrace.c @@ -104,6 +104,8 @@ ftrace_modify_code_direct(unsigned long ip, unsigned const char *old_code, @@ -24880,10 +25309,19 @@ index d99f31d..1c0f466 100644 } diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c -index d7fcbed..1f747f7 100644 +index d7fcbed..96e715a 100644 --- a/arch/x86/kernel/irq_32.c +++ b/arch/x86/kernel/irq_32.c -@@ -39,7 +39,7 @@ static int check_stack_overflow(void) +@@ -29,6 +29,8 @@ EXPORT_PER_CPU_SYMBOL(irq_regs); + + #ifdef CONFIG_DEBUG_STACKOVERFLOW + ++extern void gr_handle_kernel_exploit(void); ++ + int sysctl_panic_on_stackoverflow __read_mostly; + + /* Debugging check for stack overflow: is there less than 1KB free? */ +@@ -39,13 +41,14 @@ static int check_stack_overflow(void) __asm__ __volatile__("andl %%esp,%0" : "=r" (sp) : "0" (THREAD_SIZE - 1)); @@ -24892,7 +25330,14 @@ index d7fcbed..1f747f7 100644 } static void print_stack_overflow(void) -@@ -59,8 +59,8 @@ static inline void print_stack_overflow(void) { } + { + printk(KERN_WARNING "low stack detected by irq handler\n"); + dump_stack(); ++ gr_handle_kernel_exploit(); + if (sysctl_panic_on_stackoverflow) + panic("low stack detected by irq handler - check messages\n"); + } +@@ -59,8 +62,8 @@ static inline void print_stack_overflow(void) { } * per-CPU IRQ handling contexts (thread information and stack) */ union irq_ctx { @@ -24903,7 +25348,7 @@ index d7fcbed..1f747f7 100644 } __attribute__((aligned(THREAD_SIZE))); static DEFINE_PER_CPU(union irq_ctx *, hardirq_ctx); -@@ -80,10 +80,9 @@ static void call_on_stack(void *func, void *stack) +@@ -80,10 +83,9 @@ static void call_on_stack(void *func, void *stack) static inline int execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq) { @@ -24915,7 +25360,7 @@ index d7fcbed..1f747f7 100644 irqctx = __this_cpu_read(hardirq_ctx); /* -@@ -92,13 +91,16 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq) +@@ -92,13 +94,16 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq) * handler) we can't do that and just have to keep using the * current stack (which is the irq stack already after all) */ @@ -24936,7 +25381,7 @@ index d7fcbed..1f747f7 100644 if (unlikely(overflow)) call_on_stack(print_stack_overflow, isp); -@@ -110,6 +112,11 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq) +@@ -110,6 +115,11 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq) : "0" (irq), "1" (desc), "2" (isp), "D" (desc->handle_irq) : "memory", "cc", "ecx"); @@ -24948,7 +25393,7 @@ index d7fcbed..1f747f7 100644 return 1; } -@@ -118,48 +125,34 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq) +@@ -118,48 +128,34 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq) */ void irq_ctx_init(int cpu) { @@ -25010,7 +25455,7 @@ index d7fcbed..1f747f7 100644 } bool handle_irq(unsigned irq, struct pt_regs *regs) -@@ -173,7 +166,7 @@ bool handle_irq(unsigned irq, struct pt_regs *regs) +@@ -173,7 +169,7 @@ bool handle_irq(unsigned irq, struct pt_regs *regs) if (unlikely(!desc)) return false; @@ -25020,10 +25465,19 @@ index d7fcbed..1f747f7 100644 print_stack_overflow(); desc->handle_irq(irq, desc); diff --git a/arch/x86/kernel/irq_64.c b/arch/x86/kernel/irq_64.c -index 4d1c746..232961d 100644 +index 4d1c746..55a22d6 100644 --- a/arch/x86/kernel/irq_64.c +++ b/arch/x86/kernel/irq_64.c -@@ -44,7 +44,7 @@ static inline void stack_overflow_check(struct pt_regs *regs) +@@ -26,6 +26,8 @@ EXPORT_PER_CPU_SYMBOL(irq_stat); + DEFINE_PER_CPU(struct pt_regs *, irq_regs); + EXPORT_PER_CPU_SYMBOL(irq_regs); + ++extern void gr_handle_kernel_exploit(void); ++ + int sysctl_panic_on_stackoverflow; + + /* +@@ -44,7 +46,7 @@ static inline void stack_overflow_check(struct pt_regs *regs) u64 estack_top, estack_bottom; u64 curbase = (u64)task_stack_page(current); @@ -25032,6 +25486,15 @@ index 4d1c746..232961d 100644 return; if (regs->sp >= curbase + sizeof(struct thread_info) + +@@ -69,6 +71,8 @@ static inline void stack_overflow_check(struct pt_regs *regs) + irq_stack_top, irq_stack_bottom, + estack_top, estack_bottom); + ++ gr_handle_kernel_exploit(); ++ + if (sysctl_panic_on_stackoverflow) + panic("low stack detected by irq handler - check messages\n"); + #endif diff --git a/arch/x86/kernel/jump_label.c b/arch/x86/kernel/jump_label.c index 26d5a55..a01160a 100644 --- a/arch/x86/kernel/jump_label.c @@ -25363,7 +25826,7 @@ index c2bedae..25e7ab6 100644 .name = "data", .mode = S_IRUGO, diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c -index ebc9873..37b8776 100644 +index af1d14a..37b8776 100644 --- a/arch/x86/kernel/ldt.c +++ b/arch/x86/kernel/ldt.c @@ -66,13 +66,13 @@ static int alloc_ldt(mm_context_t *pc, int mincount, int reload) @@ -25416,7 +25879,7 @@ index ebc9873..37b8776 100644 return retval; } -@@ -229,6 +247,24 @@ static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode) +@@ -229,6 +247,13 @@ static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode) } } @@ -25427,20 +25890,9 @@ index ebc9873..37b8776 100644 + } +#endif + -+ /* -+ * On x86-64 we do not support 16-bit segments due to -+ * IRET leaking the high bits of the kernel stack address. -+ */ -+#ifdef CONFIG_X86_64 -+ if (!ldt_info.seg_32bit) { -+ error = -EINVAL; -+ goto out_unlock; -+ } -+#endif -+ - fill_ldt(&ldt, &ldt_info); - if (oldmode) - ldt.avl = 0; + /* + * On x86-64 we do not support 16-bit segments due to + * IRET leaking the high bits of the kernel stack address. diff --git a/arch/x86/kernel/machine_kexec_32.c b/arch/x86/kernel/machine_kexec_32.c index 1667b1d..16492c5 100644 --- a/arch/x86/kernel/machine_kexec_32.c @@ -27876,7 +28328,7 @@ index da6b35a..977e9cf 100644 #ifdef CONFIG_SMP diff --git a/arch/x86/kernel/vsyscall_64.c b/arch/x86/kernel/vsyscall_64.c -index 1f96f93..d5c8f7a 100644 +index 1f96f93..6f29be7 100644 --- a/arch/x86/kernel/vsyscall_64.c +++ b/arch/x86/kernel/vsyscall_64.c @@ -56,15 +56,13 @@ @@ -27896,6 +28348,15 @@ index 1f96f93..d5c8f7a 100644 else if (!strcmp("none", str)) vsyscall_mode = NONE; else +@@ -101,7 +99,7 @@ void update_vsyscall(struct timekeeper *tk) + vdata->monotonic_time_sec = tk->xtime_sec + + tk->wall_to_monotonic.tv_sec; + vdata->monotonic_time_snsec = tk->xtime_nsec +- + (tk->wall_to_monotonic.tv_nsec ++ + ((u64)tk->wall_to_monotonic.tv_nsec + << tk->shift); + while (vdata->monotonic_time_snsec >= + (((u64)NSEC_PER_SEC) << tk->shift)) { @@ -323,8 +321,7 @@ do_ret: return true; @@ -34869,6 +35330,24 @@ index dac7b20..72dbaca 100644 movl %eax, %cr0 /* +diff --git a/arch/x86/realmode/rm/wakeup_asm.S b/arch/x86/realmode/rm/wakeup_asm.S +index 9e7e147..25a4158 100644 +--- a/arch/x86/realmode/rm/wakeup_asm.S ++++ b/arch/x86/realmode/rm/wakeup_asm.S +@@ -126,11 +126,10 @@ ENTRY(wakeup_start) + lgdtl pmode_gdt + + /* This really couldn't... */ +- movl pmode_entry, %eax + movl pmode_cr0, %ecx + movl %ecx, %cr0 +- ljmpl $__KERNEL_CS, $pa_startup_32 +- /* -> jmp *%eax in trampoline_32.S */ ++ ++ ljmpl *pmode_entry + #else + jmp trampoline_start + #endif diff --git a/arch/x86/tools/Makefile b/arch/x86/tools/Makefile index e812034..c747134 100644 --- a/arch/x86/tools/Makefile @@ -36084,7 +36563,7 @@ index 36605ab..6ef6d4b 100644 unsigned long timeout_msec) { diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c -index 8cb2522..a815e54 100644 +index 0a79c54..c1b92ed 100644 --- a/drivers/ata/libata-core.c +++ b/drivers/ata/libata-core.c @@ -98,7 +98,7 @@ static unsigned int ata_dev_set_xfermode(struct ata_device *dev); @@ -36096,7 +36575,7 @@ index 8cb2522..a815e54 100644 struct ata_force_param { const char *name; -@@ -4851,7 +4851,7 @@ void ata_qc_free(struct ata_queued_cmd *qc) +@@ -4858,7 +4858,7 @@ void ata_qc_free(struct ata_queued_cmd *qc) struct ata_port *ap; unsigned int tag; @@ -36105,7 +36584,7 @@ index 8cb2522..a815e54 100644 ap = qc->ap; qc->flags = 0; -@@ -4867,7 +4867,7 @@ void __ata_qc_complete(struct ata_queued_cmd *qc) +@@ -4874,7 +4874,7 @@ void __ata_qc_complete(struct ata_queued_cmd *qc) struct ata_port *ap; struct ata_link *link; @@ -36114,7 +36593,7 @@ index 8cb2522..a815e54 100644 WARN_ON_ONCE(!(qc->flags & ATA_QCFLAG_ACTIVE)); ap = qc->ap; link = qc->dev->link; -@@ -5986,6 +5986,7 @@ static void ata_finalize_port_ops(struct ata_port_operations *ops) +@@ -5993,6 +5993,7 @@ static void ata_finalize_port_ops(struct ata_port_operations *ops) return; spin_lock(&lock); @@ -36122,7 +36601,7 @@ index 8cb2522..a815e54 100644 for (cur = ops->inherits; cur; cur = cur->inherits) { void **inherit = (void **)cur; -@@ -5999,8 +6000,9 @@ static void ata_finalize_port_ops(struct ata_port_operations *ops) +@@ -6006,8 +6007,9 @@ static void ata_finalize_port_ops(struct ata_port_operations *ops) if (IS_ERR(*pp)) *pp = NULL; @@ -36133,7 +36612,7 @@ index 8cb2522..a815e54 100644 spin_unlock(&lock); } -@@ -6193,7 +6195,7 @@ int ata_host_register(struct ata_host *host, struct scsi_host_template *sht) +@@ -6200,7 +6202,7 @@ int ata_host_register(struct ata_host *host, struct scsi_host_template *sht) /* give ports names and add SCSI hosts */ for (i = 0; i < host->n_ports; i++) { @@ -38127,10 +38606,18 @@ index a48e05b..6bac831 100644 kfree(usegment); kfree(ksegment); diff --git a/drivers/char/agp/frontend.c b/drivers/char/agp/frontend.c -index 1b19239..b87b143 100644 +index 1b19239..963967b 100644 --- a/drivers/char/agp/frontend.c +++ b/drivers/char/agp/frontend.c -@@ -819,7 +819,7 @@ static int agpioc_reserve_wrap(struct agp_file_private *priv, void __user *arg) +@@ -731,6 +731,7 @@ static int agpioc_info_wrap(struct agp_file_private *priv, void __user *arg) + + agp_copy_info(agp_bridge, &kerninfo); + ++ memset(&userinfo, 0, sizeof(userinfo)); + userinfo.version.major = kerninfo.version.major; + userinfo.version.minor = kerninfo.version.minor; + userinfo.bridge_id = kerninfo.device->vendor | +@@ -819,7 +820,7 @@ static int agpioc_reserve_wrap(struct agp_file_private *priv, void __user *arg) if (copy_from_user(&reserve, arg, sizeof(struct agp_region))) return -EFAULT; @@ -38139,7 +38626,7 @@ index 1b19239..b87b143 100644 return -EFAULT; client = agp_find_client_by_pid(reserve.pid); -@@ -849,7 +849,7 @@ static int agpioc_reserve_wrap(struct agp_file_private *priv, void __user *arg) +@@ -849,7 +850,7 @@ static int agpioc_reserve_wrap(struct agp_file_private *priv, void __user *arg) if (segment == NULL) return -ENOMEM; @@ -39080,7 +39567,7 @@ index 3d1cba9..0ab21d2 100644 if (speedstep_detect_processor() == SPEEDSTEP_CPU_P4M) { printk(KERN_WARNING PFX "Warning: Pentium 4-M detected. " diff --git a/drivers/cpufreq/sparc-us3-cpufreq.c b/drivers/cpufreq/sparc-us3-cpufreq.c -index 724ffbd..ad83692 100644 +index 724ffbd..f06aaaa 100644 --- a/drivers/cpufreq/sparc-us3-cpufreq.c +++ b/drivers/cpufreq/sparc-us3-cpufreq.c @@ -18,14 +18,12 @@ @@ -39099,7 +39586,7 @@ index 724ffbd..ad83692 100644 /* UltraSPARC-III has three dividers: 1, 2, and 32. These are controlled * in the Safari config register. -@@ -156,14 +154,26 @@ static int __init us3_freq_cpu_init(struct cpufreq_policy *policy) +@@ -156,18 +154,28 @@ static int __init us3_freq_cpu_init(struct cpufreq_policy *policy) static int us3_freq_cpu_exit(struct cpufreq_policy *policy) { @@ -39122,7 +39609,6 @@ index 724ffbd..ad83692 100644 + .target_index = us3_freq_target, + .get = us3_freq_get, + .exit = us3_freq_cpu_exit, -+ .owner = THIS_MODULE, + .name = "UltraSPARC-III", + +}; @@ -39130,7 +39616,11 @@ index 724ffbd..ad83692 100644 static int __init us3_freq_init(void) { unsigned long manuf, impl, ver; -@@ -180,55 +190,15 @@ static int __init us3_freq_init(void) +- int ret; + + if (tlb_type != cheetah && tlb_type != cheetah_plus) + return -ENODEV; +@@ -180,55 +188,15 @@ static int __init us3_freq_init(void) (impl == CHEETAH_IMPL || impl == CHEETAH_PLUS_IMPL || impl == JAGUAR_IMPL || @@ -41862,7 +42352,7 @@ index acc911a..8700c3c 100644 struct iio_chan_spec const *chan, ssize_t (*readfunc)(struct device *dev, diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c -index 0601b9d..e9dc455 100644 +index c323917..6ddea8b 100644 --- a/drivers/infiniband/core/cm.c +++ b/drivers/infiniband/core/cm.c @@ -115,7 +115,7 @@ static char const counter_group_names[CM_COUNTER_GROUPS] @@ -41874,7 +42364,7 @@ index 0601b9d..e9dc455 100644 }; struct cm_counter_attribute { -@@ -1415,7 +1415,7 @@ static void cm_dup_req_handler(struct cm_work *work, +@@ -1398,7 +1398,7 @@ static void cm_dup_req_handler(struct cm_work *work, struct ib_mad_send_buf *msg = NULL; int ret; @@ -41883,7 +42373,7 @@ index 0601b9d..e9dc455 100644 counter[CM_REQ_COUNTER]); /* Quick state check to discard duplicate REQs. */ -@@ -1802,7 +1802,7 @@ static void cm_dup_rep_handler(struct cm_work *work) +@@ -1785,7 +1785,7 @@ static void cm_dup_rep_handler(struct cm_work *work) if (!cm_id_priv) return; @@ -41892,7 +42382,7 @@ index 0601b9d..e9dc455 100644 counter[CM_REP_COUNTER]); ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg); if (ret) -@@ -1969,7 +1969,7 @@ static int cm_rtu_handler(struct cm_work *work) +@@ -1952,7 +1952,7 @@ static int cm_rtu_handler(struct cm_work *work) if (cm_id_priv->id.state != IB_CM_REP_SENT && cm_id_priv->id.state != IB_CM_MRA_REP_RCVD) { spin_unlock_irq(&cm_id_priv->lock); @@ -41901,7 +42391,7 @@ index 0601b9d..e9dc455 100644 counter[CM_RTU_COUNTER]); goto out; } -@@ -2152,7 +2152,7 @@ static int cm_dreq_handler(struct cm_work *work) +@@ -2135,7 +2135,7 @@ static int cm_dreq_handler(struct cm_work *work) cm_id_priv = cm_acquire_id(dreq_msg->remote_comm_id, dreq_msg->local_comm_id); if (!cm_id_priv) { @@ -41910,7 +42400,7 @@ index 0601b9d..e9dc455 100644 counter[CM_DREQ_COUNTER]); cm_issue_drep(work->port, work->mad_recv_wc); return -EINVAL; -@@ -2177,7 +2177,7 @@ static int cm_dreq_handler(struct cm_work *work) +@@ -2160,7 +2160,7 @@ static int cm_dreq_handler(struct cm_work *work) case IB_CM_MRA_REP_RCVD: break; case IB_CM_TIMEWAIT: @@ -41919,7 +42409,7 @@ index 0601b9d..e9dc455 100644 counter[CM_DREQ_COUNTER]); if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg)) goto unlock; -@@ -2191,7 +2191,7 @@ static int cm_dreq_handler(struct cm_work *work) +@@ -2174,7 +2174,7 @@ static int cm_dreq_handler(struct cm_work *work) cm_free_msg(msg); goto deref; case IB_CM_DREQ_RCVD: @@ -41928,7 +42418,7 @@ index 0601b9d..e9dc455 100644 counter[CM_DREQ_COUNTER]); goto unlock; default: -@@ -2558,7 +2558,7 @@ static int cm_mra_handler(struct cm_work *work) +@@ -2541,7 +2541,7 @@ static int cm_mra_handler(struct cm_work *work) ib_modify_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg, timeout)) { if (cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD) @@ -41937,7 +42427,7 @@ index 0601b9d..e9dc455 100644 counter_group[CM_RECV_DUPLICATES]. counter[CM_MRA_COUNTER]); goto out; -@@ -2567,7 +2567,7 @@ static int cm_mra_handler(struct cm_work *work) +@@ -2550,7 +2550,7 @@ static int cm_mra_handler(struct cm_work *work) break; case IB_CM_MRA_REQ_RCVD: case IB_CM_MRA_REP_RCVD: @@ -41946,7 +42436,7 @@ index 0601b9d..e9dc455 100644 counter[CM_MRA_COUNTER]); /* fall through */ default: -@@ -2729,7 +2729,7 @@ static int cm_lap_handler(struct cm_work *work) +@@ -2712,7 +2712,7 @@ static int cm_lap_handler(struct cm_work *work) case IB_CM_LAP_IDLE: break; case IB_CM_MRA_LAP_SENT: @@ -41955,7 +42445,7 @@ index 0601b9d..e9dc455 100644 counter[CM_LAP_COUNTER]); if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg)) goto unlock; -@@ -2745,7 +2745,7 @@ static int cm_lap_handler(struct cm_work *work) +@@ -2728,7 +2728,7 @@ static int cm_lap_handler(struct cm_work *work) cm_free_msg(msg); goto deref; case IB_CM_LAP_RCVD: @@ -41964,7 +42454,7 @@ index 0601b9d..e9dc455 100644 counter[CM_LAP_COUNTER]); goto unlock; default: -@@ -3029,7 +3029,7 @@ static int cm_sidr_req_handler(struct cm_work *work) +@@ -3012,7 +3012,7 @@ static int cm_sidr_req_handler(struct cm_work *work) cur_cm_id_priv = cm_insert_remote_sidr(cm_id_priv); if (cur_cm_id_priv) { spin_unlock_irq(&cm.lock); @@ -41973,7 +42463,7 @@ index 0601b9d..e9dc455 100644 counter[CM_SIDR_REQ_COUNTER]); goto out; /* Duplicate message. */ } -@@ -3241,10 +3241,10 @@ static void cm_send_handler(struct ib_mad_agent *mad_agent, +@@ -3224,10 +3224,10 @@ static void cm_send_handler(struct ib_mad_agent *mad_agent, if (!msg->context[0] && (attr_index != CM_REJ_COUNTER)) msg->retries = 1; @@ -41986,7 +42476,7 @@ index 0601b9d..e9dc455 100644 &port->counter_group[CM_XMIT_RETRIES]. counter[attr_index]); -@@ -3454,7 +3454,7 @@ static void cm_recv_handler(struct ib_mad_agent *mad_agent, +@@ -3437,7 +3437,7 @@ static void cm_recv_handler(struct ib_mad_agent *mad_agent, } attr_id = be16_to_cpu(mad_recv_wc->recv_buf.mad->mad_hdr.attr_id); @@ -41995,7 +42485,7 @@ index 0601b9d..e9dc455 100644 counter[attr_id - CM_ATTR_ID_OFFSET]); work = kmalloc(sizeof *work + sizeof(struct ib_sa_path_rec) * paths, -@@ -3685,7 +3685,7 @@ static ssize_t cm_show_counter(struct kobject *obj, struct attribute *attr, +@@ -3668,7 +3668,7 @@ static ssize_t cm_show_counter(struct kobject *obj, struct attribute *attr, cm_attr = container_of(attr, struct cm_counter_attribute, attr); return sprintf(buf, "%ld\n", @@ -42310,10 +42800,10 @@ index ed9a989..6aa5dc2 100644 int list_len, u64 iova, u64 total_size, u32 access, struct mthca_mr *mr) diff --git a/drivers/infiniband/hw/mthca/mthca_provider.c b/drivers/infiniband/hw/mthca/mthca_provider.c -index 5b71d43..35a9e14 100644 +index 42dde06..1257310 100644 --- a/drivers/infiniband/hw/mthca/mthca_provider.c +++ b/drivers/infiniband/hw/mthca/mthca_provider.c -@@ -763,7 +763,7 @@ unlock: +@@ -764,7 +764,7 @@ unlock: return 0; } @@ -42727,7 +43217,7 @@ index 49eb511..a774366 100644 /** diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c -index 8308e36..ae0d3b5 100644 +index eb62461..2b7fc71 100644 --- a/drivers/infiniband/hw/nes/nes_verbs.c +++ b/drivers/infiniband/hw/nes/nes_verbs.c @@ -46,9 +46,9 @@ @@ -43810,10 +44300,10 @@ index 6a7f2b8..fea0bde 100644 "start=%llu, len=%llu, dev_size=%llu", dm_device_name(ti->table->md), bdevname(bdev, b), diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c -index fb9efc8..81e8986 100644 +index b086a94..74cb67e 100644 --- a/drivers/md/dm-thin-metadata.c +++ b/drivers/md/dm-thin-metadata.c -@@ -397,7 +397,7 @@ static void __setup_btree_details(struct dm_pool_metadata *pmd) +@@ -404,7 +404,7 @@ static void __setup_btree_details(struct dm_pool_metadata *pmd) { pmd->info.tm = pmd->tm; pmd->info.levels = 2; @@ -43822,7 +44312,7 @@ index fb9efc8..81e8986 100644 pmd->info.value_type.size = sizeof(__le64); pmd->info.value_type.inc = data_block_inc; pmd->info.value_type.dec = data_block_dec; -@@ -416,7 +416,7 @@ static void __setup_btree_details(struct dm_pool_metadata *pmd) +@@ -423,7 +423,7 @@ static void __setup_btree_details(struct dm_pool_metadata *pmd) pmd->bl_info.tm = pmd->tm; pmd->bl_info.levels = 1; @@ -44557,7 +45047,7 @@ index ae0f56a..ec71784 100644 /* debug */ static int dvb_usb_dw2102_debug; diff --git a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c -index 8f7a6a4..eb0e1d4 100644 +index b63a5e5..b16a062 100644 --- a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c +++ b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c @@ -326,7 +326,7 @@ struct v4l2_buffer32 { @@ -45050,7 +45540,7 @@ index 81b7d88..95ae998 100644 #include <linux/pci.h> #include <linux/interrupt.h> diff --git a/drivers/mfd/max8925-i2c.c b/drivers/mfd/max8925-i2c.c -index 176aa26..27811b2 100644 +index a83eed5..62a58a9 100644 --- a/drivers/mfd/max8925-i2c.c +++ b/drivers/mfd/max8925-i2c.c @@ -152,7 +152,7 @@ static int max8925_probe(struct i2c_client *client, @@ -45063,7 +45553,7 @@ index 176aa26..27811b2 100644 if (node && !pdata) { diff --git a/drivers/mfd/tps65910.c b/drivers/mfd/tps65910.c -index 1f142d7..cc52c2a 100644 +index d657331..0d9a80f 100644 --- a/drivers/mfd/tps65910.c +++ b/drivers/mfd/tps65910.c @@ -230,7 +230,7 @@ static int tps65910_irq_init(struct tps65910 *tps65910, int irq, @@ -45652,7 +46142,7 @@ index 51b9d6a..52af9a7 100644 #include <linux/mtd/nand.h> #include <linux/mtd/nftl.h> diff --git a/drivers/mtd/sm_ftl.c b/drivers/mtd/sm_ftl.c -index 4b8e895..6b3c498 100644 +index cf49c22..971b133 100644 --- a/drivers/mtd/sm_ftl.c +++ b/drivers/mtd/sm_ftl.c @@ -56,7 +56,7 @@ static ssize_t sm_attr_show(struct device *dev, struct device_attribute *attr, @@ -45663,7 +46153,7 @@ index 4b8e895..6b3c498 100644 + attribute_group_no_const *attr_group; struct attribute **attributes; struct sm_sysfs_attribute *vendor_attribute; - + char *vendor; diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index e5628fc..ffe54d1 100644 --- a/drivers/net/bonding/bond_main.c @@ -46247,6 +46737,19 @@ index 26f8635..c237839 100644 if (cmd == TUNSETIFF || cmd == TUNSETQUEUE || _IOC_TYPE(cmd) == 0x89) { if (copy_from_user(&ifr, argp, ifreq_len)) return -EFAULT; +diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c +index d350d27..75d7d9d 100644 +--- a/drivers/net/usb/cdc_ncm.c ++++ b/drivers/net/usb/cdc_ncm.c +@@ -768,7 +768,7 @@ cdc_ncm_fill_tx_frame(struct usbnet *dev, struct sk_buff *skb, __le32 sign) + skb_out->len > CDC_NCM_MIN_TX_PKT) + memset(skb_put(skb_out, ctx->tx_max - skb_out->len), 0, + ctx->tx_max - skb_out->len); +- else if ((skb_out->len % dev->maxpacket) == 0) ++ else if (skb_out->len < ctx->tx_max && (skb_out->len % dev->maxpacket) == 0) + *skb_put(skb_out, 1) = 0; /* force short packet */ + + /* set final frame length */ diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c index 660bd5e..ac59452 100644 --- a/drivers/net/usb/hso.c @@ -47039,10 +47542,10 @@ index d2fe259..0c4c682 100644 memset(buf, 0, sizeof(buf)); buf_size = min(count, sizeof(buf) - 1); diff --git a/drivers/net/wireless/iwlwifi/dvm/main.c b/drivers/net/wireless/iwlwifi/dvm/main.c -index ba1b1ea..0ff7e98 100644 +index ea7e70c..bc0c45f 100644 --- a/drivers/net/wireless/iwlwifi/dvm/main.c +++ b/drivers/net/wireless/iwlwifi/dvm/main.c -@@ -1123,7 +1123,7 @@ static void iwl_option_config(struct iwl_priv *priv) +@@ -1127,7 +1127,7 @@ static void iwl_option_config(struct iwl_priv *priv) static int iwl_eeprom_init_hw_params(struct iwl_priv *priv) { struct iwl_nvm_data *data = priv->nvm_data; @@ -47051,7 +47554,7 @@ index ba1b1ea..0ff7e98 100644 if (data->sku_cap_11n_enable && !priv->cfg->ht_params) { -@@ -1137,7 +1137,6 @@ static int iwl_eeprom_init_hw_params(struct iwl_priv *priv) +@@ -1141,7 +1141,6 @@ static int iwl_eeprom_init_hw_params(struct iwl_priv *priv) return -EINVAL; } @@ -47796,7 +48299,7 @@ index 8f8551a..3ace3ca 100644 static ssize_t sony_nc_highspeed_charging_store(struct device *dev, struct device_attribute *attr, diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c -index defb6af..7a5d3d1 100644 +index e2a91c8..986cc9f 100644 --- a/drivers/platform/x86/thinkpad_acpi.c +++ b/drivers/platform/x86/thinkpad_acpi.c @@ -2094,7 +2094,7 @@ static int hotkey_mask_get(void) @@ -49184,7 +49687,7 @@ index b2ede05..aaf482ca 100644 /** diff --git a/drivers/scsi/mpt2sas/mpt2sas_scsih.c b/drivers/scsi/mpt2sas/mpt2sas_scsih.c -index 7f0af4f..193ac3e 100644 +index 6fd7d40..b444223 100644 --- a/drivers/scsi/mpt2sas/mpt2sas_scsih.c +++ b/drivers/scsi/mpt2sas/mpt2sas_scsih.c @@ -1557,7 +1557,7 @@ _scsih_get_resync(struct device *dev) @@ -49368,7 +49871,7 @@ index 1f42662..bf9836c 100644 extern void qla2x00_free_sysfs_attr(scsi_qla_host_t *, bool); extern void qla2x00_init_host_attr(scsi_qla_host_t *); diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c -index 89a5300..2a459ab 100644 +index 83cb612..9b7b08c 100644 --- a/drivers/scsi/qla2xxx/qla_os.c +++ b/drivers/scsi/qla2xxx/qla_os.c @@ -1491,8 +1491,10 @@ qla2x00_config_dma_addressing(struct qla_hw_data *ha) @@ -50273,10 +50776,10 @@ index a57bb5a..1f727d33 100644 struct tty_struct *tty; struct tty_ldisc *ld; diff --git a/drivers/tty/hvc/hvc_console.c b/drivers/tty/hvc/hvc_console.c -index 50b4688..e1e8125 100644 +index 0ff7fda..dbc7d52 100644 --- a/drivers/tty/hvc/hvc_console.c +++ b/drivers/tty/hvc/hvc_console.c -@@ -338,7 +338,7 @@ static int hvc_open(struct tty_struct *tty, struct file * filp) +@@ -342,7 +342,7 @@ static int hvc_open(struct tty_struct *tty, struct file * filp) spin_lock_irqsave(&hp->port.lock, flags); /* Check and then increment for fast path open. */ @@ -50285,7 +50788,7 @@ index 50b4688..e1e8125 100644 spin_unlock_irqrestore(&hp->port.lock, flags); hvc_kick(); return 0; -@@ -393,7 +393,7 @@ static void hvc_close(struct tty_struct *tty, struct file * filp) +@@ -397,7 +397,7 @@ static void hvc_close(struct tty_struct *tty, struct file * filp) spin_lock_irqsave(&hp->port.lock, flags); @@ -50294,7 +50797,7 @@ index 50b4688..e1e8125 100644 spin_unlock_irqrestore(&hp->port.lock, flags); /* We are done with the tty pointer now. */ tty_port_tty_set(&hp->port, NULL); -@@ -415,9 +415,9 @@ static void hvc_close(struct tty_struct *tty, struct file * filp) +@@ -419,9 +419,9 @@ static void hvc_close(struct tty_struct *tty, struct file * filp) */ tty_wait_until_sent_from_close(tty, HVC_CLOSE_WAIT); } else { @@ -50306,7 +50809,7 @@ index 50b4688..e1e8125 100644 spin_unlock_irqrestore(&hp->port.lock, flags); } } -@@ -447,12 +447,12 @@ static void hvc_hangup(struct tty_struct *tty) +@@ -451,12 +451,12 @@ static void hvc_hangup(struct tty_struct *tty) * open->hangup case this can be called after the final close so prevent * that from happening for now. */ @@ -50321,7 +50824,7 @@ index 50b4688..e1e8125 100644 spin_unlock_irqrestore(&hp->port.lock, flags); tty_port_tty_set(&hp->port, NULL); -@@ -500,7 +500,7 @@ static int hvc_write(struct tty_struct *tty, const unsigned char *buf, int count +@@ -504,7 +504,7 @@ static int hvc_write(struct tty_struct *tty, const unsigned char *buf, int count return -EPIPE; /* FIXME what's this (unprotected) check for? */ @@ -50699,7 +51202,7 @@ index 2ebe47b..3205833 100644 dlci->modem_rx = 0; diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c -index d15624c..bd628c6 100644 +index e36d1f5..9938e3e 100644 --- a/drivers/tty/n_tty.c +++ b/drivers/tty/n_tty.c @@ -115,7 +115,7 @@ struct n_tty_data { @@ -50711,35 +51214,7 @@ index d15624c..bd628c6 100644 size_t line_start; /* protected by output lock */ -@@ -2356,10 +2356,18 @@ static ssize_t n_tty_write(struct tty_struct *tty, struct file *file, - if (tty->ops->flush_chars) - tty->ops->flush_chars(tty); - } else { -+ struct n_tty_data *ldata = tty->disc_data; -+ bool lock; -+ -+ lock = L_ECHO(tty) || (ldata->icanon & L_ECHONL(tty)); -+ if (lock) -+ mutex_lock(&ldata->output_lock); - while (nr > 0) { - c = tty->ops->write(tty, b, nr); - if (c < 0) { - retval = c; -+ if (lock) -+ mutex_unlock(&ldata->output_lock); - goto break_out; - } - if (!c) -@@ -2367,6 +2375,8 @@ static ssize_t n_tty_write(struct tty_struct *tty, struct file *file, - b += c; - nr -= c; - } -+ if (lock) -+ mutex_unlock(&ldata->output_lock); - } - if (!nr) - break; -@@ -2515,6 +2525,7 @@ void n_tty_inherit_ops(struct tty_ldisc_ops *ops) +@@ -2519,6 +2519,7 @@ void n_tty_inherit_ops(struct tty_ldisc_ops *ops) { *ops = tty_ldisc_N_TTY; ops->owner = NULL; @@ -51889,7 +52364,7 @@ index 2518c32..1c201bb 100644 wake_up(&usb_kill_urb_queue); usb_put_urb(urb); diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c -index 64ea219..dbc1780 100644 +index d498d03..e26f959 100644 --- a/drivers/usb/core/hub.c +++ b/drivers/usb/core/hub.c @@ -27,6 +27,7 @@ @@ -52394,18 +52869,18 @@ index 28fafbf..ae91651 100644 } diff --git a/drivers/video/aty/mach64_cursor.c b/drivers/video/aty/mach64_cursor.c -index 95ec042..e6affdd 100644 +index 0fe02e2..ab01b26 100644 --- a/drivers/video/aty/mach64_cursor.c +++ b/drivers/video/aty/mach64_cursor.c -@@ -7,6 +7,7 @@ - #include <linux/string.h> +@@ -8,6 +8,7 @@ + #include "../fb_draw.h" #include <asm/io.h> +#include <asm/pgtable.h> #ifdef __sparc__ #include <asm/fbio.h> -@@ -208,7 +209,9 @@ int aty_init_cursor(struct fb_info *info) +@@ -218,7 +219,9 @@ int aty_init_cursor(struct fb_info *info) info->sprite.buf_align = 16; /* and 64 lines tall. */ info->sprite.flags = FB_PIXMAP_IO; @@ -55850,6 +56325,19 @@ index 370b24c..ff0be7b 100644 ---help--- A.out (Assembler.OUTput) is a set of formats for libraries and executables used in the earliest versions of UNIX. Linux used +diff --git a/fs/affs/super.c b/fs/affs/super.c +index d098731..9a5b19d 100644 +--- a/fs/affs/super.c ++++ b/fs/affs/super.c +@@ -336,8 +336,6 @@ static int affs_fill_super(struct super_block *sb, void *data, int silent) + &blocksize,&sbi->s_prefix, + sbi->s_volume, &mount_flags)) { + printk(KERN_ERR "AFFS: Error parsing options\n"); +- kfree(sbi->s_prefix); +- kfree(sbi); + return -EINVAL; + } + /* N.B. after this point s_prefix must be released */ diff --git a/fs/afs/inode.c b/fs/afs/inode.c index ce25d75..dc09eeb 100644 --- a/fs/afs/inode.c @@ -55873,10 +56361,10 @@ index ce25d75..dc09eeb 100644 &data); if (!inode) { diff --git a/fs/aio.c b/fs/aio.c -index 062a5f6..e5618e0 100644 +index 12a3de0e..25949c1 100644 --- a/fs/aio.c +++ b/fs/aio.c -@@ -374,7 +374,7 @@ static int aio_setup_ring(struct kioctx *ctx) +@@ -375,7 +375,7 @@ static int aio_setup_ring(struct kioctx *ctx) size += sizeof(struct io_event) * nr_events; nr_pages = PFN_UP(size); @@ -55885,6 +56373,19 @@ index 062a5f6..e5618e0 100644 return -EINVAL; file = aio_private_file(ctx, nr_pages); +@@ -1299,10 +1299,8 @@ rw_common: + &iovec, compat) + : aio_setup_single_vector(req, rw, buf, &nr_segs, + iovec); +- if (ret) +- return ret; +- +- ret = rw_verify_area(rw, file, &req->ki_pos, req->ki_nbytes); ++ if (!ret) ++ ret = rw_verify_area(rw, file, &req->ki_pos, req->ki_nbytes); + if (ret < 0) { + if (iovec != &inline_vec) + kfree(iovec); diff --git a/fs/attr.c b/fs/attr.c index 5d4e59d..fd02418 100644 --- a/fs/attr.c @@ -57017,42 +57518,6 @@ index 67be295..83e2f86 100644 static int __init init_elf_binfmt(void) { register_binfmt(&elf_format); -diff --git a/fs/binfmt_flat.c b/fs/binfmt_flat.c -index d50bbe5..af3b649 100644 ---- a/fs/binfmt_flat.c -+++ b/fs/binfmt_flat.c -@@ -566,7 +566,9 @@ static int load_flat_file(struct linux_binprm * bprm, - realdatastart = (unsigned long) -ENOMEM; - printk("Unable to allocate RAM for process data, errno %d\n", - (int)-realdatastart); -+ down_write(¤t->mm->mmap_sem); - vm_munmap(textpos, text_len); -+ up_write(¤t->mm->mmap_sem); - ret = realdatastart; - goto err; - } -@@ -590,8 +592,10 @@ static int load_flat_file(struct linux_binprm * bprm, - } - if (IS_ERR_VALUE(result)) { - printk("Unable to read data+bss, errno %d\n", (int)-result); -+ down_write(¤t->mm->mmap_sem); - vm_munmap(textpos, text_len); - vm_munmap(realdatastart, len); -+ up_write(¤t->mm->mmap_sem); - ret = result; - goto err; - } -@@ -653,8 +657,10 @@ static int load_flat_file(struct linux_binprm * bprm, - } - if (IS_ERR_VALUE(result)) { - printk("Unable to read code+data+bss, errno %d\n",(int)-result); -+ down_write(¤t->mm->mmap_sem); - vm_munmap(textpos, text_len + data_len + extra + - MAX_SHARED_LIBS * sizeof(unsigned long)); -+ up_write(¤t->mm->mmap_sem); - ret = result; - goto err; - } diff --git a/fs/bio.c b/fs/bio.c index 8754e7b..0669094 100644 --- a/fs/bio.c @@ -58314,7 +58779,7 @@ index e4141f2..d8263e8 100644 i += packet_length_size; if (copy_to_user(&buf[i], msg_ctx->msg, msg_ctx->msg_size)) diff --git a/fs/exec.c b/fs/exec.c -index 3d78fcc..5a38b6b 100644 +index 3d78fcc..460e2a0 100644 --- a/fs/exec.c +++ b/fs/exec.c @@ -55,8 +55,20 @@ @@ -58797,7 +59262,7 @@ index 3d78fcc..5a38b6b 100644 out: if (bprm->mm) { acct_arg_size(bprm, 0); -@@ -1626,3 +1800,295 @@ asmlinkage long compat_sys_execve(const char __user * filename, +@@ -1626,3 +1800,296 @@ asmlinkage long compat_sys_execve(const char __user * filename, return compat_do_execve(getname(filename), argv, envp); } #endif @@ -59014,6 +59479,7 @@ index 3d78fcc..5a38b6b 100644 +#endif + +#ifdef CONFIG_PAX_USERCOPY ++ +static inline bool check_kernel_text_object(unsigned long low, unsigned long high) +{ +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC) @@ -59193,7 +59659,7 @@ index 6ea7b14..8fa16d9 100644 if (free_clusters >= (nclusters + dirty_clusters + resv_clusters)) diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h -index d3a534f..242c50a 100644 +index 3a603a8..9b868ba 100644 --- a/fs/ext4/ext4.h +++ b/fs/ext4/ext4.h @@ -1269,19 +1269,19 @@ struct ext4_sb_info { @@ -59379,7 +59845,7 @@ index 710fed2..a82e4e8 100644 static int parse_strtoull(const char *buf, unsigned long long max, unsigned long long *value) diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c -index e175e94..3ea69bf 100644 +index 55e611c..cfad16d 100644 --- a/fs/ext4/xattr.c +++ b/fs/ext4/xattr.c @@ -381,7 +381,7 @@ static int @@ -61299,10 +61765,10 @@ index acd3947..1f896e2 100644 memcpy(c->data, &cookie, 4); c->len=4; diff --git a/fs/locks.c b/fs/locks.c -index 92a0f0a..45a48f0 100644 +index 4dd39b9..12d6aaf 100644 --- a/fs/locks.c +++ b/fs/locks.c -@@ -2219,16 +2219,16 @@ void locks_remove_flock(struct file *filp) +@@ -2218,16 +2218,16 @@ void locks_remove_flock(struct file *filp) return; if (filp->f_op->flock) { @@ -61906,7 +62372,7 @@ index 4b491b4..a0166f9 100644 out: return len; diff --git a/fs/namespace.c b/fs/namespace.c -index 2ffc5a2..6737083 100644 +index 65233a5..82ac953 100644 --- a/fs/namespace.c +++ b/fs/namespace.c @@ -1339,6 +1339,9 @@ static int do_umount(struct mount *mnt, int flags) @@ -61947,7 +62413,7 @@ index 2ffc5a2..6737083 100644 { return sys_umount(name, 0); } -@@ -2426,6 +2432,16 @@ long do_mount(const char *dev_name, const char *dir_name, +@@ -2431,6 +2437,16 @@ long do_mount(const char *dev_name, const char *dir_name, MS_NOATIME | MS_NODIRATIME | MS_RELATIME| MS_KERNMOUNT | MS_STRICTATIME); @@ -61964,7 +62430,7 @@ index 2ffc5a2..6737083 100644 if (flags & MS_REMOUNT) retval = do_remount(&path, flags & ~MS_REMOUNT, mnt_flags, data_page); -@@ -2440,6 +2456,9 @@ long do_mount(const char *dev_name, const char *dir_name, +@@ -2445,6 +2461,9 @@ long do_mount(const char *dev_name, const char *dir_name, dev_name, data_page); dput_out: path_put(&path); @@ -61974,7 +62440,7 @@ index 2ffc5a2..6737083 100644 return retval; } -@@ -2457,7 +2476,7 @@ static void free_mnt_ns(struct mnt_namespace *ns) +@@ -2462,7 +2481,7 @@ static void free_mnt_ns(struct mnt_namespace *ns) * number incrementing at 10Ghz will take 12,427 years to wrap which * is effectively never, so we can ignore the possibility. */ @@ -61983,7 +62449,7 @@ index 2ffc5a2..6737083 100644 static struct mnt_namespace *alloc_mnt_ns(struct user_namespace *user_ns) { -@@ -2472,7 +2491,7 @@ static struct mnt_namespace *alloc_mnt_ns(struct user_namespace *user_ns) +@@ -2477,7 +2496,7 @@ static struct mnt_namespace *alloc_mnt_ns(struct user_namespace *user_ns) kfree(new_ns); return ERR_PTR(ret); } @@ -61992,7 +62458,7 @@ index 2ffc5a2..6737083 100644 atomic_set(&new_ns->count, 1); new_ns->root = NULL; INIT_LIST_HEAD(&new_ns->list); -@@ -2482,7 +2501,7 @@ static struct mnt_namespace *alloc_mnt_ns(struct user_namespace *user_ns) +@@ -2487,7 +2506,7 @@ static struct mnt_namespace *alloc_mnt_ns(struct user_namespace *user_ns) return new_ns; } @@ -62001,7 +62467,7 @@ index 2ffc5a2..6737083 100644 struct user_namespace *user_ns, struct fs_struct *new_fs) { struct mnt_namespace *new_ns; -@@ -2603,8 +2622,8 @@ struct dentry *mount_subtree(struct vfsmount *mnt, const char *name) +@@ -2608,8 +2627,8 @@ struct dentry *mount_subtree(struct vfsmount *mnt, const char *name) } EXPORT_SYMBOL(mount_subtree); @@ -62012,7 +62478,7 @@ index 2ffc5a2..6737083 100644 { int ret; char *kernel_type; -@@ -2717,6 +2736,11 @@ SYSCALL_DEFINE2(pivot_root, const char __user *, new_root, +@@ -2722,6 +2741,11 @@ SYSCALL_DEFINE2(pivot_root, const char __user *, new_root, if (error) goto out2; @@ -62024,7 +62490,7 @@ index 2ffc5a2..6737083 100644 get_fs_root(current->fs, &root); old_mp = lock_mount(&old); error = PTR_ERR(old_mp); -@@ -2985,7 +3009,7 @@ static int mntns_install(struct nsproxy *nsproxy, void *ns) +@@ -2990,7 +3014,7 @@ static int mntns_install(struct nsproxy *nsproxy, void *ns) !ns_capable(current_user_ns(), CAP_SYS_ADMIN)) return -EPERM; @@ -62070,60 +62536,8 @@ index 360114a..ac6e265 100644 } void nfs_fattr_init(struct nfs_fattr *fattr) -diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c -index 450bfed..d5d06e8 100644 ---- a/fs/nfs/nfs4proc.c -+++ b/fs/nfs/nfs4proc.c -@@ -1068,6 +1068,7 @@ static void nfs4_opendata_free(struct kref *kref) - dput(p->dentry); - nfs_sb_deactive(sb); - nfs_fattr_free_names(&p->f_attr); -+ kfree(p->f_attr.mdsthreshold); - kfree(p); - } - -@@ -2244,10 +2245,12 @@ static int _nfs4_do_open(struct inode *dir, - } - } - -- if (ctx_th && server->attr_bitmask[2] & FATTR4_WORD2_MDSTHRESHOLD) { -- opendata->f_attr.mdsthreshold = pnfs_mdsthreshold_alloc(); -- if (!opendata->f_attr.mdsthreshold) -- goto err_free_label; -+ if (server->attr_bitmask[2] & FATTR4_WORD2_MDSTHRESHOLD) { -+ if (!opendata->f_attr.mdsthreshold) { -+ opendata->f_attr.mdsthreshold = pnfs_mdsthreshold_alloc(); -+ if (!opendata->f_attr.mdsthreshold) -+ goto err_free_label; -+ } - opendata->o_arg.open_bitmap = &nfs4_pnfs_open_bitmap[0]; - } - if (dentry->d_inode != NULL) -@@ -2275,11 +2278,10 @@ static int _nfs4_do_open(struct inode *dir, - if (opendata->file_created) - *opened |= FILE_CREATED; - -- if (pnfs_use_threshold(ctx_th, opendata->f_attr.mdsthreshold, server)) -+ if (pnfs_use_threshold(ctx_th, opendata->f_attr.mdsthreshold, server)) { - *ctx_th = opendata->f_attr.mdsthreshold; -- else -- kfree(opendata->f_attr.mdsthreshold); -- opendata->f_attr.mdsthreshold = NULL; -+ opendata->f_attr.mdsthreshold = NULL; -+ } - - nfs4_label_free(olabel); - -@@ -2289,7 +2291,6 @@ static int _nfs4_do_open(struct inode *dir, - err_free_label: - nfs4_label_free(olabel); - err_opendata_put: -- kfree(opendata->f_attr.mdsthreshold); - nfs4_opendata_put(opendata); - err_put_state_owner: - nfs4_put_state_owner(sp); diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c -index 82189b2..e43a39f 100644 +index 9a914e8..e89c0ea 100644 --- a/fs/nfsd/nfs4proc.c +++ b/fs/nfsd/nfs4proc.c @@ -1178,7 +1178,7 @@ struct nfsd4_operation { @@ -62136,7 +62550,7 @@ index 82189b2..e43a39f 100644 static struct nfsd4_operation nfsd4_ops[]; diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c -index 63f2395..7c47f4d 100644 +index 16e8fa7..b0803f6 100644 --- a/fs/nfsd/nfs4xdr.c +++ b/fs/nfsd/nfs4xdr.c @@ -1531,7 +1531,7 @@ nfsd4_decode_notsupp(struct nfsd4_compoundargs *argp, void *p) @@ -62174,10 +62588,10 @@ index f8f060f..c4ba09a 100644 /* Don't cache excessive amounts of data and XDR failures */ if (!statp || len > (256 >> 2)) { diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c -index 6d7be3f..ef02c86 100644 +index eea5ad1..5a84ac7 100644 --- a/fs/nfsd/vfs.c +++ b/fs/nfsd/vfs.c -@@ -834,7 +834,7 @@ nfsd_vfs_read(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file, +@@ -843,7 +843,7 @@ nfsd_vfs_read(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file, } else { oldfs = get_fs(); set_fs(KERNEL_DS); @@ -62186,7 +62600,7 @@ index 6d7be3f..ef02c86 100644 set_fs(oldfs); } -@@ -925,7 +925,7 @@ nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file, +@@ -934,7 +934,7 @@ nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file, /* Write the data. */ oldfs = get_fs(); set_fs(KERNEL_DS); @@ -62195,7 +62609,7 @@ index 6d7be3f..ef02c86 100644 set_fs(oldfs); if (host_err < 0) goto out_nfserr; -@@ -1470,7 +1470,7 @@ nfsd_readlink(struct svc_rqst *rqstp, struct svc_fh *fhp, char *buf, int *lenp) +@@ -1479,7 +1479,7 @@ nfsd_readlink(struct svc_rqst *rqstp, struct svc_fh *fhp, char *buf, int *lenp) */ oldfs = get_fs(); set_fs(KERNEL_DS); @@ -62292,7 +62706,7 @@ index a80a741..7b96e1b 100644 } diff --git a/fs/notify/fanotify/fanotify_user.c b/fs/notify/fanotify/fanotify_user.c -index 287a22c..a2a043a 100644 +index 287a22c..4e56e4e 100644 --- a/fs/notify/fanotify/fanotify_user.c +++ b/fs/notify/fanotify/fanotify_user.c @@ -251,8 +251,8 @@ static ssize_t copy_event_to_user(struct fsnotify_group *group, @@ -62306,6 +62720,15 @@ index 287a22c..a2a043a 100644 goto out_close_fd; ret = prepare_for_access_response(group, event, fd); +@@ -742,6 +742,8 @@ SYSCALL_DEFINE2(fanotify_init, unsigned int, flags, unsigned int, event_f_flags) + oevent->path.mnt = NULL; + oevent->path.dentry = NULL; + ++ if (force_o_largefile()) ++ event_f_flags |= O_LARGEFILE; + group->fanotify_data.f_flags = event_f_flags; + #ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS + oevent->response = 0; diff --git a/fs/notify/notification.c b/fs/notify/notification.c index 1e58402..bb2d6f4 100644 --- a/fs/notify/notification.c @@ -64177,7 +64600,7 @@ index 87dbcbe..55e1b4d 100644 } diff --git a/fs/proc/stat.c b/fs/proc/stat.c -index 6f599c6..8f4644f 100644 +index 6f599c6..bd00271 100644 --- a/fs/proc/stat.c +++ b/fs/proc/stat.c @@ -11,6 +11,7 @@ @@ -64207,34 +64630,63 @@ index 6f599c6..8f4644f 100644 user = nice = system = idle = iowait = irq = softirq = steal = 0; -@@ -94,6 +107,7 @@ static int show_stat(struct seq_file *p, void *v) - getboottime(&boottime); - jif = boottime.tv_sec; - -+ if (unrestricted) { - for_each_possible_cpu(i) { - user += kcpustat_cpu(i).cpustat[CPUTIME_USER]; +@@ -99,23 +112,25 @@ static int show_stat(struct seq_file *p, void *v) nice += kcpustat_cpu(i).cpustat[CPUTIME_NICE]; -@@ -116,6 +130,7 @@ static int show_stat(struct seq_file *p, void *v) + system += kcpustat_cpu(i).cpustat[CPUTIME_SYSTEM]; + idle += get_idle_time(i); +- iowait += get_iowait_time(i); +- irq += kcpustat_cpu(i).cpustat[CPUTIME_IRQ]; +- softirq += kcpustat_cpu(i).cpustat[CPUTIME_SOFTIRQ]; +- steal += kcpustat_cpu(i).cpustat[CPUTIME_STEAL]; +- guest += kcpustat_cpu(i).cpustat[CPUTIME_GUEST]; +- guest_nice += kcpustat_cpu(i).cpustat[CPUTIME_GUEST_NICE]; +- sum += kstat_cpu_irqs_sum(i); +- sum += arch_irq_stat_cpu(i); ++ if (unrestricted) { ++ iowait += get_iowait_time(i); ++ irq += kcpustat_cpu(i).cpustat[CPUTIME_IRQ]; ++ softirq += kcpustat_cpu(i).cpustat[CPUTIME_SOFTIRQ]; ++ steal += kcpustat_cpu(i).cpustat[CPUTIME_STEAL]; ++ guest += kcpustat_cpu(i).cpustat[CPUTIME_GUEST]; ++ guest_nice += kcpustat_cpu(i).cpustat[CPUTIME_GUEST_NICE]; ++ sum += kstat_cpu_irqs_sum(i); ++ sum += arch_irq_stat_cpu(i); ++ for (j = 0; j < NR_SOFTIRQS; j++) { ++ unsigned int softirq_stat = kstat_softirqs_cpu(j, i); + +- for (j = 0; j < NR_SOFTIRQS; j++) { +- unsigned int softirq_stat = kstat_softirqs_cpu(j, i); +- +- per_softirq_sums[j] += softirq_stat; +- sum_softirq += softirq_stat; ++ per_softirq_sums[j] += softirq_stat; ++ sum_softirq += softirq_stat; ++ } } } - sum += arch_irq_stat(); -+ } +- sum += arch_irq_stat(); ++ if (unrestricted) ++ sum += arch_irq_stat(); seq_puts(p, "cpu "); seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(user)); -@@ -131,6 +146,7 @@ static int show_stat(struct seq_file *p, void *v) - seq_putc(p, '\n'); - - for_each_online_cpu(i) { -+ if (unrestricted) { - /* Copy values here to work around gcc-2.95.3, gcc-2.96 */ - user = kcpustat_cpu(i).cpustat[CPUTIME_USER]; +@@ -136,12 +151,14 @@ static int show_stat(struct seq_file *p, void *v) nice = kcpustat_cpu(i).cpustat[CPUTIME_NICE]; -@@ -142,6 +158,7 @@ static int show_stat(struct seq_file *p, void *v) - steal = kcpustat_cpu(i).cpustat[CPUTIME_STEAL]; - guest = kcpustat_cpu(i).cpustat[CPUTIME_GUEST]; - guest_nice = kcpustat_cpu(i).cpustat[CPUTIME_GUEST_NICE]; + system = kcpustat_cpu(i).cpustat[CPUTIME_SYSTEM]; + idle = get_idle_time(i); +- iowait = get_iowait_time(i); +- irq = kcpustat_cpu(i).cpustat[CPUTIME_IRQ]; +- softirq = kcpustat_cpu(i).cpustat[CPUTIME_SOFTIRQ]; +- steal = kcpustat_cpu(i).cpustat[CPUTIME_STEAL]; +- guest = kcpustat_cpu(i).cpustat[CPUTIME_GUEST]; +- guest_nice = kcpustat_cpu(i).cpustat[CPUTIME_GUEST_NICE]; ++ if (unrestricted) { ++ iowait = get_iowait_time(i); ++ irq = kcpustat_cpu(i).cpustat[CPUTIME_IRQ]; ++ softirq = kcpustat_cpu(i).cpustat[CPUTIME_SOFTIRQ]; ++ steal = kcpustat_cpu(i).cpustat[CPUTIME_STEAL]; ++ guest = kcpustat_cpu(i).cpustat[CPUTIME_GUEST]; ++ guest_nice = kcpustat_cpu(i).cpustat[CPUTIME_GUEST_NICE]; + } seq_printf(p, "cpu%d", i); seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(user)); @@ -80033,10 +80485,10 @@ index b8e9a43..632678d 100644 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu); diff --git a/include/linux/libata.h b/include/linux/libata.h -index bec6dbe..2873d64 100644 +index 3fee55e..42565b7 100644 --- a/include/linux/libata.h +++ b/include/linux/libata.h -@@ -975,7 +975,7 @@ struct ata_port_operations { +@@ -976,7 +976,7 @@ struct ata_port_operations { * fields must be pointers. */ const struct ata_port_operations *inherits; @@ -80751,10 +81203,10 @@ index c3eb102..073c4a6 100644 .ops = ¶m_ops_##type, \ .elemsize = sizeof(array[0]), .elem = array }; \ diff --git a/include/linux/mount.h b/include/linux/mount.h -index 371d346..fba2819 100644 +index 839bac2..a96b37c 100644 --- a/include/linux/mount.h +++ b/include/linux/mount.h -@@ -56,7 +56,7 @@ struct vfsmount { +@@ -59,7 +59,7 @@ struct vfsmount { struct dentry *mnt_root; /* root of the mounted tree */ struct super_block *mnt_sb; /* pointer to superblock */ int mnt_flags; @@ -82438,16 +82890,26 @@ index 387fa7d..3fcde6b 100644 #ifdef CONFIG_MAGIC_SYSRQ diff --git a/include/linux/thread_info.h b/include/linux/thread_info.h -index fddbe20..0312de8 100644 +index fddbe20..e4cce53 100644 --- a/include/linux/thread_info.h +++ b/include/linux/thread_info.h -@@ -161,6 +161,15 @@ static inline bool test_and_clear_restore_sigmask(void) +@@ -161,6 +161,25 @@ static inline bool test_and_clear_restore_sigmask(void) #error "no set_restore_sigmask() provided and default one won't work" #endif +extern void __check_object_size(const void *ptr, unsigned long n, bool to_user); ++ ++#if defined(CONFIG_X86) && defined(CONFIG_PAX_USERCOPY) ++extern void pax_check_alloca(unsigned long size); ++#endif ++ +static inline void check_object_size(const void *ptr, unsigned long n, bool to_user) +{ ++#if defined(CONFIG_X86) && defined(CONFIG_PAX_USERCOPY) ++ /* always check if we've overflowed the stack in a copy*user */ ++ pax_check_alloca(sizeof(unsigned long)); ++#endif ++ +#ifndef CONFIG_PAX_USERCOPY_DEBUG + if (!__builtin_constant_p(n)) +#endif @@ -84162,7 +84624,7 @@ index 6d67213..552fdd9 100644 enum { diff --git a/include/uapi/linux/videodev2.h b/include/uapi/linux/videodev2.h -index 6ae7bbe..1e487fe 100644 +index fe94bb9..c9e51c2 100644 --- a/include/uapi/linux/videodev2.h +++ b/include/uapi/linux/videodev2.h @@ -1227,7 +1227,7 @@ struct v4l2_ext_control { @@ -84175,10 +84637,10 @@ index 6ae7bbe..1e487fe 100644 } __attribute__ ((packed)); diff --git a/include/uapi/linux/xattr.h b/include/uapi/linux/xattr.h -index 40bbc04..e30d9a2 100644 +index c38355c..17a57bc 100644 --- a/include/uapi/linux/xattr.h +++ b/include/uapi/linux/xattr.h -@@ -66,5 +66,9 @@ +@@ -73,5 +73,9 @@ #define XATTR_POSIX_ACL_DEFAULT "posix_acl_default" #define XATTR_NAME_POSIX_ACL_DEFAULT XATTR_SYSTEM_PREFIX XATTR_POSIX_ACL_DEFAULT @@ -89848,7 +90310,7 @@ index c0a58be..784c618 100644 if (!retval) { if (old_rlim) diff --git a/kernel/sysctl.c b/kernel/sysctl.c -index 49e13e1..8dbc052 100644 +index aae21e8..58d8c9a 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c @@ -94,7 +94,6 @@ @@ -89888,7 +90350,7 @@ index 49e13e1..8dbc052 100644 #endif /* this is needed for the proc_doulongvec_minmax of vm_dirty_bytes */ -@@ -177,10 +175,8 @@ static int proc_taint(struct ctl_table *table, int write, +@@ -182,10 +180,8 @@ static int proc_taint(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos); #endif @@ -89899,7 +90361,7 @@ index 49e13e1..8dbc052 100644 static int proc_dointvec_minmax_coredump(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos); -@@ -211,6 +207,8 @@ static int sysrq_sysctl_handler(ctl_table *table, int write, +@@ -216,6 +212,8 @@ static int sysrq_sysctl_handler(ctl_table *table, int write, #endif @@ -89908,7 +90370,7 @@ index 49e13e1..8dbc052 100644 static struct ctl_table kern_table[]; static struct ctl_table vm_table[]; static struct ctl_table fs_table[]; -@@ -225,6 +223,20 @@ extern struct ctl_table epoll_table[]; +@@ -230,6 +228,20 @@ extern struct ctl_table epoll_table[]; int sysctl_legacy_va_layout; #endif @@ -89929,7 +90391,7 @@ index 49e13e1..8dbc052 100644 /* The default sysctl tables: */ static struct ctl_table sysctl_base_table[] = { -@@ -273,6 +285,22 @@ static int max_extfrag_threshold = 1000; +@@ -278,6 +290,22 @@ static int max_extfrag_threshold = 1000; #endif static struct ctl_table kern_table[] = { @@ -89952,7 +90414,7 @@ index 49e13e1..8dbc052 100644 { .procname = "sched_child_runs_first", .data = &sysctl_sched_child_runs_first, -@@ -635,7 +663,7 @@ static struct ctl_table kern_table[] = { +@@ -640,7 +668,7 @@ static struct ctl_table kern_table[] = { .data = &modprobe_path, .maxlen = KMOD_PATH_LEN, .mode = 0644, @@ -89961,7 +90423,7 @@ index 49e13e1..8dbc052 100644 }, { .procname = "modules_disabled", -@@ -802,16 +830,20 @@ static struct ctl_table kern_table[] = { +@@ -807,16 +835,20 @@ static struct ctl_table kern_table[] = { .extra1 = &zero, .extra2 = &one, }, @@ -89983,7 +90445,7 @@ index 49e13e1..8dbc052 100644 { .procname = "ngroups_max", .data = &ngroups_max, -@@ -1055,10 +1087,17 @@ static struct ctl_table kern_table[] = { +@@ -1061,10 +1093,17 @@ static struct ctl_table kern_table[] = { */ { .procname = "perf_event_paranoid", @@ -90004,7 +90466,7 @@ index 49e13e1..8dbc052 100644 }, { .procname = "perf_event_mlock_kb", -@@ -1329,6 +1368,13 @@ static struct ctl_table vm_table[] = { +@@ -1335,6 +1374,13 @@ static struct ctl_table vm_table[] = { .proc_handler = proc_dointvec_minmax, .extra1 = &zero, }, @@ -90018,7 +90480,7 @@ index 49e13e1..8dbc052 100644 #else { .procname = "nr_trim_pages", -@@ -1793,6 +1839,16 @@ int proc_dostring(struct ctl_table *table, int write, +@@ -1799,6 +1845,16 @@ int proc_dostring(struct ctl_table *table, int write, buffer, lenp, ppos); } @@ -90035,7 +90497,7 @@ index 49e13e1..8dbc052 100644 static size_t proc_skip_spaces(char **buf) { size_t ret; -@@ -1898,6 +1954,8 @@ static int proc_put_long(void __user **buf, size_t *size, unsigned long val, +@@ -1904,6 +1960,8 @@ static int proc_put_long(void __user **buf, size_t *size, unsigned long val, len = strlen(tmp); if (len > *size) len = *size; @@ -90044,7 +90506,7 @@ index 49e13e1..8dbc052 100644 if (copy_to_user(*buf, tmp, len)) return -EFAULT; *size -= len; -@@ -2062,7 +2120,7 @@ int proc_dointvec(struct ctl_table *table, int write, +@@ -2068,7 +2126,7 @@ int proc_dointvec(struct ctl_table *table, int write, static int proc_taint(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { @@ -90053,7 +90515,7 @@ index 49e13e1..8dbc052 100644 unsigned long tmptaint = get_taint(); int err; -@@ -2090,7 +2148,6 @@ static int proc_taint(struct ctl_table *table, int write, +@@ -2096,7 +2154,6 @@ static int proc_taint(struct ctl_table *table, int write, return err; } @@ -90061,7 +90523,7 @@ index 49e13e1..8dbc052 100644 static int proc_dointvec_minmax_sysadmin(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { -@@ -2099,7 +2156,6 @@ static int proc_dointvec_minmax_sysadmin(struct ctl_table *table, int write, +@@ -2105,7 +2162,6 @@ static int proc_dointvec_minmax_sysadmin(struct ctl_table *table, int write, return proc_dointvec_minmax(table, write, buffer, lenp, ppos); } @@ -90069,7 +90531,7 @@ index 49e13e1..8dbc052 100644 struct do_proc_dointvec_minmax_conv_param { int *min; -@@ -2646,6 +2702,12 @@ int proc_dostring(struct ctl_table *table, int write, +@@ -2652,6 +2708,12 @@ int proc_dostring(struct ctl_table *table, int write, return -ENOSYS; } @@ -90082,7 +90544,7 @@ index 49e13e1..8dbc052 100644 int proc_dointvec(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { -@@ -2702,5 +2764,6 @@ EXPORT_SYMBOL(proc_dointvec_minmax); +@@ -2708,5 +2770,6 @@ EXPORT_SYMBOL(proc_dointvec_minmax); EXPORT_SYMBOL(proc_dointvec_userhz_jiffies); EXPORT_SYMBOL(proc_dointvec_ms_jiffies); EXPORT_SYMBOL(proc_dostring); @@ -91814,10 +92276,10 @@ index b32b70c..e512eb0 100644 set_page_address(page, (void *)vaddr); diff --git a/mm/hugetlb.c b/mm/hugetlb.c -index c01cb9f..ac0f58e 100644 +index 2de3c84..4ecaf1b 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c -@@ -2068,15 +2068,17 @@ static int hugetlb_sysctl_handler_common(bool obey_mempolicy, +@@ -2069,15 +2069,17 @@ static int hugetlb_sysctl_handler_common(bool obey_mempolicy, struct hstate *h = &default_hstate; unsigned long tmp; int ret; @@ -91838,7 +92300,7 @@ index c01cb9f..ac0f58e 100644 if (ret) goto out; -@@ -2121,15 +2123,17 @@ int hugetlb_overcommit_handler(struct ctl_table *table, int write, +@@ -2122,15 +2124,17 @@ int hugetlb_overcommit_handler(struct ctl_table *table, int write, struct hstate *h = &default_hstate; unsigned long tmp; int ret; @@ -91859,7 +92321,7 @@ index c01cb9f..ac0f58e 100644 if (ret) goto out; -@@ -2598,6 +2602,27 @@ static int unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma, +@@ -2599,6 +2603,27 @@ static int unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma, return 1; } @@ -91887,7 +92349,7 @@ index c01cb9f..ac0f58e 100644 /* * Hugetlb_cow() should be called with page lock of the original hugepage held. * Called with hugetlb_instantiation_mutex held and pte_page locked so we -@@ -2714,6 +2739,11 @@ retry_avoidcopy: +@@ -2715,6 +2740,11 @@ retry_avoidcopy: make_huge_pte(vma, new_page, 1)); page_remove_rmap(old_page); hugepage_add_new_anon_rmap(new_page, vma, address); @@ -91899,7 +92361,7 @@ index c01cb9f..ac0f58e 100644 /* Make the old page be freed below */ new_page = old_page; } -@@ -2878,6 +2908,10 @@ retry: +@@ -2879,6 +2909,10 @@ retry: && (vma->vm_flags & VM_SHARED))); set_huge_pte_at(mm, address, ptep, new_pte); @@ -91910,7 +92372,7 @@ index c01cb9f..ac0f58e 100644 if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) { /* Optimization, do the COW without a second fault */ ret = hugetlb_cow(mm, vma, address, ptep, new_pte, page, ptl); -@@ -2908,6 +2942,10 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma, +@@ -2909,6 +2943,10 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma, static DEFINE_MUTEX(hugetlb_instantiation_mutex); struct hstate *h = hstate_vma(vma); @@ -91921,7 +92383,7 @@ index c01cb9f..ac0f58e 100644 address &= huge_page_mask(h); ptep = huge_pte_offset(mm, address); -@@ -2921,6 +2959,26 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma, +@@ -2922,6 +2960,26 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma, VM_FAULT_SET_HINDEX(hstate_index(h)); } @@ -91949,7 +92411,7 @@ index c01cb9f..ac0f58e 100644 if (!ptep) return VM_FAULT_OOM; diff --git a/mm/internal.h b/mm/internal.h -index 29e1e76..fc3ff04 100644 +index 3e91000..4741a60 100644 --- a/mm/internal.h +++ b/mm/internal.h @@ -94,6 +94,7 @@ extern pmd_t *mm_find_pmd(struct mm_struct *mm, unsigned long address); @@ -93019,7 +93481,7 @@ index bed4880..a493f67 100644 err = -EPERM; goto out; diff --git a/mm/mlock.c b/mm/mlock.c -index 4e1a6816..9683079 100644 +index b1eb536..091d154 100644 --- a/mm/mlock.c +++ b/mm/mlock.c @@ -14,6 +14,7 @@ @@ -93030,7 +93492,7 @@ index 4e1a6816..9683079 100644 #include <linux/sched.h> #include <linux/export.h> #include <linux/rmap.h> -@@ -604,7 +605,7 @@ static int do_mlock(unsigned long start, size_t len, int on) +@@ -606,7 +607,7 @@ static int do_mlock(unsigned long start, size_t len, int on) { unsigned long nstart, end, tmp; struct vm_area_struct * vma, * prev; @@ -93039,7 +93501,7 @@ index 4e1a6816..9683079 100644 VM_BUG_ON(start & ~PAGE_MASK); VM_BUG_ON(len != PAGE_ALIGN(len)); -@@ -613,6 +614,9 @@ static int do_mlock(unsigned long start, size_t len, int on) +@@ -615,6 +616,9 @@ static int do_mlock(unsigned long start, size_t len, int on) return -EINVAL; if (end == start) return 0; @@ -93049,7 +93511,7 @@ index 4e1a6816..9683079 100644 vma = find_vma(current->mm, start); if (!vma || vma->vm_start > start) return -ENOMEM; -@@ -624,6 +628,11 @@ static int do_mlock(unsigned long start, size_t len, int on) +@@ -626,6 +630,11 @@ static int do_mlock(unsigned long start, size_t len, int on) for (nstart = start ; ; ) { vm_flags_t newflags; @@ -93061,7 +93523,7 @@ index 4e1a6816..9683079 100644 /* Here we know that vma->vm_start <= nstart < vma->vm_end. */ newflags = vma->vm_flags & ~VM_LOCKED; -@@ -737,6 +746,7 @@ SYSCALL_DEFINE2(mlock, unsigned long, start, size_t, len) +@@ -739,6 +748,7 @@ SYSCALL_DEFINE2(mlock, unsigned long, start, size_t, len) locked += current->mm->locked_vm; /* check against resource limits */ @@ -93069,7 +93531,7 @@ index 4e1a6816..9683079 100644 if ((locked <= lock_limit) || capable(CAP_IPC_LOCK)) error = do_mlock(start, len, 1); -@@ -774,6 +784,11 @@ static int do_mlockall(int flags) +@@ -776,6 +786,11 @@ static int do_mlockall(int flags) for (vma = current->mm->mmap; vma ; vma = prev->vm_next) { vm_flags_t newflags; @@ -93081,7 +93543,7 @@ index 4e1a6816..9683079 100644 newflags = vma->vm_flags & ~VM_LOCKED; if (flags & MCL_CURRENT) newflags |= VM_LOCKED; -@@ -805,8 +820,10 @@ SYSCALL_DEFINE1(mlockall, int, flags) +@@ -807,8 +822,10 @@ SYSCALL_DEFINE1(mlockall, int, flags) lock_limit >>= PAGE_SHIFT; ret = -ENOMEM; @@ -94573,7 +95035,7 @@ index 769a67a..414d24f 100644 if (nstart < prev->vm_end) diff --git a/mm/mremap.c b/mm/mremap.c -index 0843feb..4f5b2e6 100644 +index 0843feb..c3cde48 100644 --- a/mm/mremap.c +++ b/mm/mremap.c @@ -144,6 +144,12 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd, @@ -94589,7 +95051,26 @@ index 0843feb..4f5b2e6 100644 pte = move_soft_dirty_pte(pte); set_pte_at(mm, new_addr, new_pte, pte); } -@@ -337,6 +343,11 @@ static struct vm_area_struct *vma_to_resize(unsigned long addr, +@@ -194,10 +200,17 @@ unsigned long move_page_tables(struct vm_area_struct *vma, + break; + if (pmd_trans_huge(*old_pmd)) { + int err = 0; +- if (extent == HPAGE_PMD_SIZE) ++ if (extent == HPAGE_PMD_SIZE) { ++ VM_BUG_ON(vma->vm_file || !vma->anon_vma); ++ /* See comment in move_ptes() */ ++ if (need_rmap_locks) ++ anon_vma_lock_write(vma->anon_vma); + err = move_huge_pmd(vma, new_vma, old_addr, + new_addr, old_end, + old_pmd, new_pmd); ++ if (need_rmap_locks) ++ anon_vma_unlock_write(vma->anon_vma); ++ } + if (err > 0) { + need_flush = true; + continue; +@@ -337,6 +350,11 @@ static struct vm_area_struct *vma_to_resize(unsigned long addr, if (is_vm_hugetlb_page(vma)) goto Einval; @@ -94601,7 +95082,7 @@ index 0843feb..4f5b2e6 100644 /* We can't remap across vm area boundaries */ if (old_len > vma->vm_end - addr) goto Efault; -@@ -392,20 +403,25 @@ static unsigned long mremap_to(unsigned long addr, unsigned long old_len, +@@ -392,20 +410,25 @@ static unsigned long mremap_to(unsigned long addr, unsigned long old_len, unsigned long ret = -EINVAL; unsigned long charged = 0; unsigned long map_flags; @@ -94632,7 +95113,7 @@ index 0843feb..4f5b2e6 100644 goto out; ret = do_munmap(mm, new_addr, new_len); -@@ -474,6 +490,7 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len, +@@ -474,6 +497,7 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len, unsigned long ret = -EINVAL; unsigned long charged = 0; bool locked = false; @@ -94640,7 +95121,7 @@ index 0843feb..4f5b2e6 100644 if (flags & ~(MREMAP_FIXED | MREMAP_MAYMOVE)) return ret; -@@ -495,6 +512,17 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len, +@@ -495,6 +519,17 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len, if (!new_len) return ret; @@ -94658,7 +95139,7 @@ index 0843feb..4f5b2e6 100644 down_write(¤t->mm->mmap_sem); if (flags & MREMAP_FIXED) { -@@ -545,6 +573,7 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len, +@@ -545,6 +580,7 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len, new_addr = addr; } ret = addr; @@ -94666,7 +95147,7 @@ index 0843feb..4f5b2e6 100644 goto out; } } -@@ -568,7 +597,12 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len, +@@ -568,7 +604,12 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len, goto out; } @@ -94760,7 +95241,7 @@ index 7106cb1..0805f48 100644 unsigned long bg_thresh, unsigned long dirty, diff --git a/mm/page_alloc.c b/mm/page_alloc.c -index 3bac76a..bf9f9ae 100644 +index 7387a67..67105e4 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -61,6 +61,7 @@ @@ -94856,6 +95337,21 @@ index 3bac76a..bf9f9ae 100644 if (order && (gfp_flags & __GFP_COMP)) prep_compound_page(page, order); +@@ -2401,7 +2441,7 @@ static void reset_alloc_batches(struct zonelist *zonelist, + continue; + mod_zone_page_state(zone, NR_ALLOC_BATCH, + high_wmark_pages(zone) - low_wmark_pages(zone) - +- atomic_long_read(&zone->vm_stat[NR_ALLOC_BATCH])); ++ atomic_long_read_unchecked(&zone->vm_stat[NR_ALLOC_BATCH])); + } + } + +@@ -6565,4 +6605,4 @@ void dump_page(struct page *page, char *reason) + { + dump_page_badflags(page, reason, 0); + } +-EXPORT_SYMBOL_GPL(dump_page); ++EXPORT_SYMBOL(dump_page); diff --git a/mm/page_io.c b/mm/page_io.c index 7c59ef6..1358905 100644 --- a/mm/page_io.c @@ -94934,7 +95430,7 @@ index fd26d04..0cea1b0 100644 if (!mm || IS_ERR(mm)) { rc = IS_ERR(mm) ? PTR_ERR(mm) : -ESRCH; diff --git a/mm/rmap.c b/mm/rmap.c -index 8fc049f..1b21e12 100644 +index d3cbac5..0788da4 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -163,6 +163,10 @@ int anon_vma_prepare(struct vm_area_struct *vma) @@ -100396,7 +100892,7 @@ index 453e974..b3a43a5 100644 if (local->use_chanctx) *chandef = local->monitor_chandef; diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h -index 5e44e317..3d404a6 100644 +index 6bd4984..d8805c5 100644 --- a/net/mac80211/ieee80211_i.h +++ b/net/mac80211/ieee80211_i.h @@ -28,6 +28,7 @@ @@ -100493,10 +100989,10 @@ index ce1c443..6cd39e1 100644 } diff --git a/net/mac80211/main.c b/net/mac80211/main.c -index d767cfb..b4cd07d 100644 +index c7a7a86..a74f57b 100644 --- a/net/mac80211/main.c +++ b/net/mac80211/main.c -@@ -172,7 +172,7 @@ int ieee80211_hw_config(struct ieee80211_local *local, u32 changed) +@@ -174,7 +174,7 @@ int ieee80211_hw_config(struct ieee80211_local *local, u32 changed) changed &= ~(IEEE80211_CONF_CHANGE_CHANNEL | IEEE80211_CONF_CHANGE_POWER); @@ -100506,7 +101002,7 @@ index d767cfb..b4cd07d 100644 /* * Goal: diff --git a/net/mac80211/pm.c b/net/mac80211/pm.c -index af64fb8..366e371 100644 +index d478b88..8c8d157 100644 --- a/net/mac80211/pm.c +++ b/net/mac80211/pm.c @@ -12,7 +12,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan) @@ -100527,7 +101023,7 @@ index af64fb8..366e371 100644 if (local->wowlan) { int err = drv_suspend(local, wowlan); if (err < 0) { -@@ -115,7 +115,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan) +@@ -123,7 +123,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan) WARN_ON(!list_empty(&local->chanctx_list)); /* stop hardware - this must stop RX */ @@ -102271,7 +102767,7 @@ index ae333c1..18521f0 100644 goto out_nomem; cd->u.procfs.channel_ent = NULL; diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c -index 0edada9..9247ea0 100644 +index 3ea5cda..bfb3e08 100644 --- a/net/sunrpc/clnt.c +++ b/net/sunrpc/clnt.c @@ -1415,7 +1415,9 @@ call_start(struct rpc_task *task) @@ -103474,10 +103970,10 @@ index 8fac3fd..32ff38d 100644 unsigned int secindex_strings; diff --git a/security/Kconfig b/security/Kconfig -index beb86b5..1ea5a01 100644 +index beb86b5..55198cd 100644 --- a/security/Kconfig +++ b/security/Kconfig -@@ -4,6 +4,960 @@ +@@ -4,6 +4,961 @@ menu "Security options" @@ -103514,6 +104010,7 @@ index beb86b5..1ea5a01 100644 + select TTY + select DEBUG_KERNEL + select DEBUG_LIST ++ select DEBUG_STACKOVERFLOW if HAVE_DEBUG_STACKOVERFLOW + help + If you say Y here, you will be able to configure many features + that will enhance the security of your system. It is highly @@ -104438,7 +104935,7 @@ index beb86b5..1ea5a01 100644 source security/keys/Kconfig config SECURITY_DMESG_RESTRICT -@@ -103,7 +1057,7 @@ config INTEL_TXT +@@ -103,7 +1058,7 @@ config INTEL_TXT config LSM_MMAP_MIN_ADDR int "Low address space for LSM to protect from user allocation" depends on SECURITY && SECURITY_SELINUX @@ -105819,19 +106316,17 @@ index 7778b8e..3d619fc 100644 diff --git a/tools/gcc/.gitignore b/tools/gcc/.gitignore new file mode 100644 -index 0000000..1f0214f +index 0000000..de92ed9 --- /dev/null +++ b/tools/gcc/.gitignore -@@ -0,0 +1,3 @@ +@@ -0,0 +1 @@ +randomize_layout_seed.h -+size_overflow_hash.h -+size_overflow_hash_aux.h diff --git a/tools/gcc/Makefile b/tools/gcc/Makefile new file mode 100644 -index 0000000..5ca9688 +index 0000000..7b8921f --- /dev/null +++ b/tools/gcc/Makefile -@@ -0,0 +1,62 @@ +@@ -0,0 +1,52 @@ +#CC := gcc +#PLUGIN_SOURCE_FILES := pax_plugin.c +#PLUGIN_OBJECT_FILES := $(patsubst %.c,%.o,$(PLUGIN_SOURCE_FILES)) @@ -105840,23 +106335,29 @@ index 0000000..5ca9688 + +ifeq ($(PLUGINCC),$(HOSTCC)) +HOSTLIBS := hostlibs -+HOST_EXTRACFLAGS += -I$(GCCPLUGINS_DIR)/include -std=gnu99 -ggdb ++HOST_EXTRACFLAGS += -I$(GCCPLUGINS_DIR)/include -I$(src) -std=gnu99 -ggdb ++export HOST_EXTRACFLAGS +else +HOSTLIBS := hostcxxlibs -+HOST_EXTRACXXFLAGS += -I$(GCCPLUGINS_DIR)/include -std=gnu++98 -fno-rtti -ggdb -Wno-unused-parameter -Wno-narrowing ++HOST_EXTRACXXFLAGS += -I$(GCCPLUGINS_DIR)/include -I$(src) -std=gnu++98 -fno-rtti -ggdb -Wno-unused-parameter -Wno-narrowing -Wno-unused-variable ++export HOST_EXTRACXXFLAGS +endif + ++export GCCPLUGINS_DIR HOSTLIBS ++ +$(HOSTLIBS)-$(CONFIG_PAX_CONSTIFY_PLUGIN) := constify_plugin.so +$(HOSTLIBS)-$(CONFIG_PAX_MEMORY_STACKLEAK) += stackleak_plugin.so +$(HOSTLIBS)-$(CONFIG_KALLOCSTAT_PLUGIN) += kallocstat_plugin.so +$(HOSTLIBS)-$(CONFIG_PAX_KERNEXEC_PLUGIN) += kernexec_plugin.so +$(HOSTLIBS)-$(CONFIG_CHECKER_PLUGIN) += checker_plugin.so +$(HOSTLIBS)-y += colorize_plugin.so -+$(HOSTLIBS)-$(CONFIG_PAX_SIZE_OVERFLOW) += size_overflow_plugin.so +$(HOSTLIBS)-$(CONFIG_PAX_LATENT_ENTROPY) += latent_entropy_plugin.so +$(HOSTLIBS)-$(CONFIG_PAX_MEMORY_STRUCTLEAK) += structleak_plugin.so +$(HOSTLIBS)-$(CONFIG_GRKERNSEC_RANDSTRUCT) += randomize_layout_plugin.so + ++subdir-$(CONFIG_PAX_SIZE_OVERFLOW) := size_overflow_plugin ++subdir- += size_overflow_plugin ++ +always := $($(HOSTLIBS)-y) + +constify_plugin-objs := constify_plugin.o @@ -105865,35 +106366,19 @@ index 0000000..5ca9688 +kernexec_plugin-objs := kernexec_plugin.o +checker_plugin-objs := checker_plugin.o +colorize_plugin-objs := colorize_plugin.o -+size_overflow_plugin-objs := size_overflow_plugin.o +latent_entropy_plugin-objs := latent_entropy_plugin.o +structleak_plugin-objs := structleak_plugin.o +randomize_layout_plugin-objs := randomize_layout_plugin.o + -+$(obj)/size_overflow_plugin.o: $(objtree)/$(obj)/size_overflow_hash.h $(objtree)/$(obj)/size_overflow_hash_aux.h +$(obj)/randomize_layout_plugin.o: $(objtree)/$(obj)/randomize_layout_seed.h + -+quiet_cmd_build_size_overflow_hash = GENHASH $@ -+ cmd_build_size_overflow_hash = \ -+ $(CONFIG_SHELL) $(srctree)/$(src)/generate_size_overflow_hash.sh -s size_overflow_hash -d $< -o $@ -+$(objtree)/$(obj)/size_overflow_hash.h: $(src)/size_overflow_hash.data FORCE -+ $(call if_changed,build_size_overflow_hash) -+ -+quiet_cmd_build_size_overflow_hash_aux = GENHASH $@ -+ cmd_build_size_overflow_hash_aux = \ -+ $(CONFIG_SHELL) $(srctree)/$(src)/generate_size_overflow_hash.sh -s size_overflow_hash_aux -d $< -o $@ -+$(objtree)/$(obj)/size_overflow_hash_aux.h: $(src)/size_overflow_hash_aux.data FORCE -+ $(call if_changed,build_size_overflow_hash_aux) -+ -+targets += size_overflow_hash.h size_overflow_hash_aux.h -+ +quiet_cmd_create_randomize_layout_seed = GENSEED $@ + cmd_create_randomize_layout_seed = \ + $(CONFIG_SHELL) $(srctree)/$(src)/gen-random-seed.sh $@ $(objtree)/include/generated/randomize_layout_hash.h +$(objtree)/$(obj)/randomize_layout_seed.h: FORCE + $(call if_changed,create_randomize_layout_seed) + -+targets += size_overflow_hash.h randomize_layout_seed.h randomize_layout_hash.h ++targets += randomize_layout_seed.h randomize_layout_hash.h diff --git a/tools/gcc/checker_plugin.c b/tools/gcc/checker_plugin.c new file mode 100644 index 0000000..5452feea @@ -107136,109 +107621,6 @@ index 0000000..7514850 + HASH=`echo -n "$SEED" | sha256sum | cut -d" " -f1 | tr -d ' \n'` + echo "#define RANDSTRUCT_HASHED_SEED \"$HASH\"" > "$2" +fi -diff --git a/tools/gcc/generate_size_overflow_hash.sh b/tools/gcc/generate_size_overflow_hash.sh -new file mode 100644 -index 0000000..791ca76 ---- /dev/null -+++ b/tools/gcc/generate_size_overflow_hash.sh -@@ -0,0 +1,97 @@ -+#!/bin/bash -+ -+# This script generates the hash table (size_overflow_hash.h) for the size_overflow gcc plugin (size_overflow_plugin.c). -+ -+header1="size_overflow_hash.h" -+database="size_overflow_hash.data" -+n=65536 -+hashtable_name="size_overflow_hash" -+ -+usage() { -+cat <<EOF -+usage: $0 options -+OPTIONS: -+ -h|--help help -+ -o header file -+ -d database file -+ -n hash array size -+ -s name of the hash table -+EOF -+ return 0 -+} -+ -+while true -+do -+ case "$1" in -+ -h|--help) usage && exit 0;; -+ -n) n=$2; shift 2;; -+ -o) header1="$2"; shift 2;; -+ -d) database="$2"; shift 2;; -+ -s) hashtable_name="$2"; shift 2;; -+ --) shift 1; break ;; -+ *) break ;; -+ esac -+done -+ -+create_defines() { -+ for i in `seq 0 31` -+ do -+ echo -e "#define PARAM"$i" (1U << "$i")" >> "$header1" -+ done -+ echo >> "$header1" -+} -+ -+create_structs() { -+ rm -f "$header1" -+ -+ create_defines -+ -+ cat "$database" | while read data -+ do -+ data_array=($data) -+ struct_hash_name="${data_array[0]}" -+ funcn="${data_array[1]}" -+ params="${data_array[2]}" -+ next="${data_array[4]}" -+ -+ echo "const struct size_overflow_hash $struct_hash_name = {" >> "$header1" -+ -+ echo -e "\t.next\t= $next,\n\t.name\t= \"$funcn\"," >> "$header1" -+ echo -en "\t.param\t= " >> "$header1" -+ line= -+ for param_num in ${params//-/ }; -+ do -+ line="${line}PARAM"$param_num"|" -+ done -+ -+ echo -e "${line%?},\n};\n" >> "$header1" -+ done -+} -+ -+create_headers() { -+ echo "const struct size_overflow_hash * const $hashtable_name[$n] = {" >> "$header1" -+} -+ -+create_array_elements() { -+ index=0 -+ grep -v "nohasharray" $database | sort -n -k 4 | while read data -+ do -+ data_array=($data) -+ i="${data_array[3]}" -+ hash="${data_array[0]}" -+ while [[ $index -lt $i ]] -+ do -+ echo -e "\t["$index"]\t= NULL," >> "$header1" -+ index=$(($index + 1)) -+ done -+ index=$(($index + 1)) -+ echo -e "\t["$i"]\t= &"$hash"," >> "$header1" -+ done -+ echo '};' >> $header1 -+} -+ -+create_structs -+create_headers -+create_array_elements -+ -+exit 0 diff --git a/tools/gcc/kallocstat_plugin.c b/tools/gcc/kallocstat_plugin.c new file mode 100644 index 0000000..d81c094 @@ -109340,12 +109722,4148 @@ index 0000000..8dafb22 + + return 0; +} -diff --git a/tools/gcc/size_overflow_hash.data b/tools/gcc/size_overflow_hash.data +diff --git a/tools/gcc/size_overflow_plugin/.gitignore b/tools/gcc/size_overflow_plugin/.gitignore +new file mode 100644 +index 0000000..92d3b0c +--- /dev/null ++++ b/tools/gcc/size_overflow_plugin/.gitignore +@@ -0,0 +1,2 @@ ++size_overflow_hash.h ++size_overflow_hash_aux.h +diff --git a/tools/gcc/size_overflow_plugin/Makefile b/tools/gcc/size_overflow_plugin/Makefile +new file mode 100644 +index 0000000..1ae2ed5 +--- /dev/null ++++ b/tools/gcc/size_overflow_plugin/Makefile +@@ -0,0 +1,20 @@ ++$(HOSTLIBS)-$(CONFIG_PAX_SIZE_OVERFLOW) += size_overflow_plugin.so ++always := $($(HOSTLIBS)-y) ++ ++size_overflow_plugin-objs := $(patsubst $(srctree)/$(src)/%.c,%.o,$(wildcard $(srctree)/$(src)/*.c)) ++ ++$(patsubst $(srctree)/$(src)/%.c,$(obj)/%.o,$(wildcard $(srctree)/$(src)/*.c)): $(objtree)/$(obj)/size_overflow_hash.h $(objtree)/$(obj)/size_overflow_hash_aux.h ++ ++quiet_cmd_build_size_overflow_hash = GENHASH $@ ++ cmd_build_size_overflow_hash = \ ++ $(CONFIG_SHELL) $(srctree)/$(src)/generate_size_overflow_hash.sh -s size_overflow_hash -d $< -o $@ ++$(objtree)/$(obj)/size_overflow_hash.h: $(src)/size_overflow_hash.data FORCE ++ $(call if_changed,build_size_overflow_hash) ++ ++quiet_cmd_build_size_overflow_hash_aux = GENHASH $@ ++ cmd_build_size_overflow_hash_aux = \ ++ $(CONFIG_SHELL) $(srctree)/$(src)/generate_size_overflow_hash.sh -s size_overflow_hash_aux -d $< -o $@ ++$(objtree)/$(obj)/size_overflow_hash_aux.h: $(src)/size_overflow_hash_aux.data FORCE ++ $(call if_changed,build_size_overflow_hash_aux) ++ ++targets += size_overflow_hash.h size_overflow_hash_aux.h +diff --git a/tools/gcc/size_overflow_plugin/generate_size_overflow_hash.sh b/tools/gcc/size_overflow_plugin/generate_size_overflow_hash.sh +new file mode 100644 +index 0000000..12b1e3b +--- /dev/null ++++ b/tools/gcc/size_overflow_plugin/generate_size_overflow_hash.sh +@@ -0,0 +1,102 @@ ++#!/bin/bash ++ ++# This script generates the hash table (size_overflow_hash.h) for the size_overflow gcc plugin (size_overflow_plugin.c). ++ ++header1="size_overflow_hash.h" ++database="size_overflow_hash.data" ++n=65536 ++hashtable_name="size_overflow_hash" ++ ++usage() { ++cat <<EOF ++usage: $0 options ++OPTIONS: ++ -h|--help help ++ -o header file ++ -d database file ++ -n hash array size ++ -s name of the hash table ++EOF ++ return 0 ++} ++ ++while true ++do ++ case "$1" in ++ -h|--help) usage && exit 0;; ++ -n) n=$2; shift 2;; ++ -o) header1="$2"; shift 2;; ++ -d) database="$2"; shift 2;; ++ -s) hashtable_name="$2"; shift 2;; ++ --) shift 1; break ;; ++ *) break ;; ++ esac ++done ++ ++create_defines() { ++ for i in `seq 0 31` ++ do ++ echo -e "#define PARAM"$i" (1U << "$i")" >> "$header1" ++ done ++ echo >> "$header1" ++} ++ ++create_structs() { ++ rm -f "$header1" ++ ++ create_defines ++ ++ cat "$database" | while read data ++ do ++ data_array=($data) ++ struct_hash_name="${data_array[0]}" ++ funcn="${data_array[1]}" ++ params="${data_array[2]}" ++ next="${data_array[4]}" ++ ++ echo "const struct size_overflow_hash $struct_hash_name = {" >> "$header1" ++ ++ echo -e "\t.next\t= $next,\n\t.name\t= \"$funcn\"," >> "$header1" ++ echo -en "\t.param\t= " >> "$header1" ++ line= ++ for param_num in ${params//-/ }; ++ do ++ line="${line}PARAM"$param_num"|" ++ done ++ ++ echo -e "${line%?},\n};\n" >> "$header1" ++ done ++} ++ ++create_headers() { ++ echo "const struct size_overflow_hash * const $hashtable_name[$n] = {" >> "$header1" ++} ++ ++create_array_elements() { ++ index=0 ++ grep -v "nohasharray" $database | sort -n -k 4 | while read data ++ do ++ data_array=($data) ++ i="${data_array[3]}" ++ hash="${data_array[0]}" ++ while [[ $index -lt $i ]] ++ do ++ echo -e "\t["$index"]\t= NULL," >> "$header1" ++ index=$(($index + 1)) ++ done ++ index=$(($index + 1)) ++ echo -e "\t["$i"]\t= &"$hash"," >> "$header1" ++ done ++ echo '};' >> $header1 ++} ++ ++size_overflow_plugin_dir=`dirname $header1` ++if [ "$size_overflow_plugin_dir" != '.' ]; then ++ mkdir -p "$size_overflow_plugin_dir" 2> /dev/null ++fi ++ ++create_structs ++create_headers ++create_array_elements ++ ++exit 0 +diff --git a/tools/gcc/size_overflow_plugin/insert_size_overflow_asm.c b/tools/gcc/size_overflow_plugin/insert_size_overflow_asm.c +new file mode 100644 +index 0000000..f8ac5c6 +--- /dev/null ++++ b/tools/gcc/size_overflow_plugin/insert_size_overflow_asm.c +@@ -0,0 +1,790 @@ ++/* ++ * Copyright 2011-2014 by Emese Revfy <re.emese@gmail.com> ++ * Licensed under the GPL v2, or (at your option) v3 ++ * ++ * Homepage: ++ * http://www.grsecurity.net/~ephox/overflow_plugin/ ++ * ++ * Documentation: ++ * http://forums.grsecurity.net/viewtopic.php?f=7&t=3043 ++ * ++ * This plugin recomputes expressions of function arguments marked by a size_overflow attribute ++ * with double integer precision (DImode/TImode for 32/64 bit integer types). ++ * The recomputed argument is checked against TYPE_MAX and an event is logged on overflow and the triggering process is killed. ++ * ++ * Usage: ++ * $ make ++ * $ make run ++ */ ++ ++#include "gcc-common.h" ++#include "size_overflow.h" ++ ++static void search_size_overflow_attribute(struct pointer_set_t *visited, tree lhs); ++static enum mark search_intentional(struct pointer_set_t *visited, const_tree lhs); ++ ++// data for the size_overflow asm stmt ++struct asm_data { ++ gimple def_stmt; ++ tree input; ++ tree output; ++}; ++ ++#if BUILDING_GCC_VERSION <= 4007 ++static VEC(tree, gc) *create_asm_io_list(tree string, tree io) ++#else ++static vec<tree, va_gc> *create_asm_io_list(tree string, tree io) ++#endif ++{ ++ tree list; ++#if BUILDING_GCC_VERSION <= 4007 ++ VEC(tree, gc) *vec_list = NULL; ++#else ++ vec<tree, va_gc> *vec_list = NULL; ++#endif ++ ++ list = build_tree_list(NULL_TREE, string); ++ list = chainon(NULL_TREE, build_tree_list(list, io)); ++#if BUILDING_GCC_VERSION <= 4007 ++ VEC_safe_push(tree, gc, vec_list, list); ++#else ++ vec_safe_push(vec_list, list); ++#endif ++ return vec_list; ++} ++ ++static void create_asm_stmt(const char *str, tree str_input, tree str_output, struct asm_data *asm_data) ++{ ++ gimple asm_stmt; ++ gimple_stmt_iterator gsi; ++#if BUILDING_GCC_VERSION <= 4007 ++ VEC(tree, gc) *input, *output = NULL; ++#else ++ vec<tree, va_gc> *input, *output = NULL; ++#endif ++ ++ input = create_asm_io_list(str_input, asm_data->input); ++ ++ if (asm_data->output) ++ output = create_asm_io_list(str_output, asm_data->output); ++ ++ asm_stmt = gimple_build_asm_vec(str, input, output, NULL, NULL); ++ gsi = gsi_for_stmt(asm_data->def_stmt); ++ gsi_insert_after(&gsi, asm_stmt, GSI_NEW_STMT); ++ ++ if (asm_data->output) ++ SSA_NAME_DEF_STMT(asm_data->output) = asm_stmt; ++} ++ ++static void replace_call_lhs(const struct asm_data *asm_data) ++{ ++ gimple_set_lhs(asm_data->def_stmt, asm_data->input); ++ update_stmt(asm_data->def_stmt); ++ SSA_NAME_DEF_STMT(asm_data->input) = asm_data->def_stmt; ++} ++ ++static enum mark search_intentional_phi(struct pointer_set_t *visited, const_tree result) ++{ ++ enum mark cur_fndecl_attr; ++ gimple phi = get_def_stmt(result); ++ unsigned int i, n = gimple_phi_num_args(phi); ++ ++ pointer_set_insert(visited, phi); ++ for (i = 0; i < n; i++) { ++ tree arg = gimple_phi_arg_def(phi, i); ++ ++ cur_fndecl_attr = search_intentional(visited, arg); ++ if (cur_fndecl_attr != MARK_NO) ++ return cur_fndecl_attr; ++ } ++ return MARK_NO; ++} ++ ++static enum mark search_intentional_binary(struct pointer_set_t *visited, const_tree lhs) ++{ ++ enum mark cur_fndecl_attr; ++ const_tree rhs1, rhs2; ++ gimple def_stmt = get_def_stmt(lhs); ++ ++ rhs1 = gimple_assign_rhs1(def_stmt); ++ rhs2 = gimple_assign_rhs2(def_stmt); ++ ++ cur_fndecl_attr = search_intentional(visited, rhs1); ++ if (cur_fndecl_attr != MARK_NO) ++ return cur_fndecl_attr; ++ return search_intentional(visited, rhs2); ++} ++ ++// Look up the intentional_overflow attribute on the caller and the callee functions. ++static enum mark search_intentional(struct pointer_set_t *visited, const_tree lhs) ++{ ++ const_gimple def_stmt; ++ ++ if (TREE_CODE(lhs) != SSA_NAME) ++ return get_intentional_attr_type(lhs); ++ ++ def_stmt = get_def_stmt(lhs); ++ if (!def_stmt) ++ return MARK_NO; ++ ++ if (pointer_set_contains(visited, def_stmt)) ++ return MARK_NO; ++ ++ switch (gimple_code(def_stmt)) { ++ case GIMPLE_NOP: ++ return search_intentional(visited, SSA_NAME_VAR(lhs)); ++ case GIMPLE_ASM: ++ if (is_size_overflow_intentional_asm_turn_off(def_stmt)) ++ return MARK_TURN_OFF; ++ return MARK_NO; ++ case GIMPLE_CALL: ++ return MARK_NO; ++ case GIMPLE_PHI: ++ return search_intentional_phi(visited, lhs); ++ case GIMPLE_ASSIGN: ++ switch (gimple_num_ops(def_stmt)) { ++ case 2: ++ return search_intentional(visited, gimple_assign_rhs1(def_stmt)); ++ case 3: ++ return search_intentional_binary(visited, lhs); ++ } ++ case GIMPLE_RETURN: ++ return MARK_NO; ++ default: ++ debug_gimple_stmt((gimple)def_stmt); ++ error("%s: unknown gimple code", __func__); ++ gcc_unreachable(); ++ } ++} ++ ++// Check the intentional_overflow attribute and create the asm comment string for the size_overflow asm stmt. ++static enum mark check_intentional_attribute_gimple(const_tree arg, const_gimple stmt, unsigned int argnum) ++{ ++ const_tree fndecl; ++ struct pointer_set_t *visited; ++ enum mark cur_fndecl_attr, decl_attr = MARK_NO; ++ ++ fndecl = get_interesting_orig_fndecl(stmt, argnum); ++ if (is_end_intentional_intentional_attr(fndecl, argnum)) ++ decl_attr = MARK_NOT_INTENTIONAL; ++ else if (is_yes_intentional_attr(fndecl, argnum)) ++ decl_attr = MARK_YES; ++ else if (is_turn_off_intentional_attr(fndecl) || is_turn_off_intentional_attr(DECL_ORIGIN(current_function_decl))) { ++ return MARK_TURN_OFF; ++ } ++ ++ visited = pointer_set_create(); ++ cur_fndecl_attr = search_intentional(visited, arg); ++ pointer_set_destroy(visited); ++ ++ switch (cur_fndecl_attr) { ++ case MARK_NO: ++ case MARK_TURN_OFF: ++ return cur_fndecl_attr; ++ default: ++ print_missing_intentional(decl_attr, cur_fndecl_attr, fndecl, argnum); ++ return MARK_YES; ++ } ++} ++ ++static void check_missing_size_overflow_attribute(tree var) ++{ ++ tree orig_fndecl; ++ unsigned int num; ++ ++ if (is_a_return_check(var)) ++ orig_fndecl = DECL_ORIGIN(var); ++ else ++ orig_fndecl = DECL_ORIGIN(current_function_decl); ++ ++ num = get_function_num(var, orig_fndecl); ++ if (num == CANNOT_FIND_ARG) ++ return; ++ ++ is_missing_function(orig_fndecl, num); ++} ++ ++static void search_size_overflow_attribute_phi(struct pointer_set_t *visited, const_tree result) ++{ ++ gimple phi = get_def_stmt(result); ++ unsigned int i, n = gimple_phi_num_args(phi); ++ ++ pointer_set_insert(visited, phi); ++ for (i = 0; i < n; i++) { ++ tree arg = gimple_phi_arg_def(phi, i); ++ ++ search_size_overflow_attribute(visited, arg); ++ } ++} ++ ++static void search_size_overflow_attribute_binary(struct pointer_set_t *visited, const_tree lhs) ++{ ++ const_gimple def_stmt = get_def_stmt(lhs); ++ tree rhs1, rhs2; ++ ++ rhs1 = gimple_assign_rhs1(def_stmt); ++ rhs2 = gimple_assign_rhs2(def_stmt); ++ ++ search_size_overflow_attribute(visited, rhs1); ++ search_size_overflow_attribute(visited, rhs2); ++} ++ ++static void search_size_overflow_attribute(struct pointer_set_t *visited, tree lhs) ++{ ++ const_gimple def_stmt; ++ ++ if (TREE_CODE(lhs) == PARM_DECL) { ++ check_missing_size_overflow_attribute(lhs); ++ return; ++ } ++ ++ def_stmt = get_def_stmt(lhs); ++ if (!def_stmt) ++ return; ++ ++ if (pointer_set_insert(visited, def_stmt)) ++ return; ++ ++ switch (gimple_code(def_stmt)) { ++ case GIMPLE_NOP: ++ return search_size_overflow_attribute(visited, SSA_NAME_VAR(lhs)); ++ case GIMPLE_ASM: ++ return; ++ case GIMPLE_CALL: { ++ tree fndecl = gimple_call_fndecl(def_stmt); ++ ++ if (fndecl == NULL_TREE) ++ return; ++ check_missing_size_overflow_attribute(fndecl); ++ return; ++ } ++ case GIMPLE_PHI: ++ return search_size_overflow_attribute_phi(visited, lhs); ++ case GIMPLE_ASSIGN: ++ switch (gimple_num_ops(def_stmt)) { ++ case 2: ++ return search_size_overflow_attribute(visited, gimple_assign_rhs1(def_stmt)); ++ case 3: ++ return search_size_overflow_attribute_binary(visited, lhs); ++ } ++ default: ++ debug_gimple_stmt((gimple)def_stmt); ++ error("%s: unknown gimple code", __func__); ++ gcc_unreachable(); ++ } ++} ++ ++// Search missing entries in the hash table (invoked from the gimple pass) ++static void search_missing_size_overflow_attribute_gimple(const_gimple stmt, unsigned int num) ++{ ++ tree fndecl = NULL_TREE; ++ tree lhs; ++ struct pointer_set_t *visited; ++ ++ if (is_turn_off_intentional_attr(DECL_ORIGIN(current_function_decl))) ++ return; ++ ++ if (num == 0) { ++ gcc_assert(gimple_code(stmt) == GIMPLE_RETURN); ++ lhs = gimple_return_retval(stmt); ++ } else { ++ gcc_assert(is_gimple_call(stmt)); ++ lhs = gimple_call_arg(stmt, num - 1); ++ fndecl = gimple_call_fndecl(stmt); ++ } ++ ++ if (fndecl != NULL_TREE && is_turn_off_intentional_attr(DECL_ORIGIN(fndecl))) ++ return; ++ ++ visited = pointer_set_create(); ++ search_size_overflow_attribute(visited, lhs); ++ pointer_set_destroy(visited); ++} ++ ++static void create_output_from_phi(gimple stmt, unsigned int argnum, struct asm_data *asm_data) ++{ ++ gimple_stmt_iterator gsi; ++ gimple assign; ++ ++ assign = gimple_build_assign(asm_data->input, asm_data->output); ++ gsi = gsi_for_stmt(stmt); ++ gsi_insert_before(&gsi, assign, GSI_NEW_STMT); ++ asm_data->def_stmt = assign; ++ ++ asm_data->output = create_new_var(TREE_TYPE(asm_data->output)); ++ asm_data->output = make_ssa_name(asm_data->output, stmt); ++ if (gimple_code(stmt) == GIMPLE_RETURN) ++ gimple_return_set_retval(stmt, asm_data->output); ++ else ++ gimple_call_set_arg(stmt, argnum - 1, asm_data->output); ++ update_stmt(stmt); ++} ++ ++static char *create_asm_comment(unsigned int argnum, const_gimple stmt , const char *mark_str) ++{ ++ const char *fn_name; ++ char *asm_comment; ++ unsigned int len; ++ ++ if (argnum == 0) ++ fn_name = DECL_NAME_POINTER(current_function_decl); ++ else ++ fn_name = DECL_NAME_POINTER(gimple_call_fndecl(stmt)); ++ ++ len = asprintf(&asm_comment, "%s %s %u", mark_str, fn_name, argnum); ++ gcc_assert(len > 0); ++ ++ return asm_comment; ++} ++ ++static const char *convert_mark_to_str(enum mark mark) ++{ ++ switch (mark) { ++ case MARK_NO: ++ return OK_ASM_STR; ++ case MARK_YES: ++ case MARK_NOT_INTENTIONAL: ++ return YES_ASM_STR; ++ case MARK_TURN_OFF: ++ return TURN_OFF_ASM_STR; ++ } ++ ++ gcc_unreachable(); ++} ++ ++/* Create the input of the size_overflow asm stmt. ++ * When the arg of the callee function is a parm_decl it creates this kind of size_overflow asm stmt: ++ * __asm__("# size_overflow MARK_YES" : : "rm" size_1(D)); ++ * The input field in asm_data will be empty if there is no need for further size_overflow asm stmt insertion. ++ * otherwise create the input (for a phi stmt the output too) of the asm stmt. ++ */ ++static void create_asm_input(gimple stmt, unsigned int argnum, struct asm_data *asm_data) ++{ ++ if (!asm_data->def_stmt) { ++ asm_data->input = NULL_TREE; ++ return; ++ } ++ ++ asm_data->input = create_new_var(TREE_TYPE(asm_data->output)); ++ asm_data->input = make_ssa_name(asm_data->input, asm_data->def_stmt); ++ ++ switch (gimple_code(asm_data->def_stmt)) { ++ case GIMPLE_ASSIGN: ++ case GIMPLE_CALL: ++ replace_call_lhs(asm_data); ++ break; ++ case GIMPLE_PHI: ++ create_output_from_phi(stmt, argnum, asm_data); ++ break; ++ case GIMPLE_NOP: { ++ enum mark mark; ++ const char *mark_str; ++ char *asm_comment; ++ ++ mark = check_intentional_attribute_gimple(asm_data->output, stmt, argnum); ++ ++ asm_data->input = asm_data->output; ++ asm_data->output = NULL; ++ asm_data->def_stmt = stmt; ++ ++ mark_str = convert_mark_to_str(mark); ++ asm_comment = create_asm_comment(argnum, stmt, mark_str); ++ ++ create_asm_stmt(asm_comment, build_string(2, "rm"), NULL, asm_data); ++ free(asm_comment); ++ asm_data->input = NULL_TREE; ++ break; ++ } ++ case GIMPLE_ASM: ++ if (is_size_overflow_asm(asm_data->def_stmt)) { ++ asm_data->input = NULL_TREE; ++ break; ++ } ++ default: ++ debug_gimple_stmt(asm_data->def_stmt); ++ gcc_unreachable(); ++ } ++} ++ ++/* This is the gimple part of searching for a missing size_overflow attribute. If the intentional_overflow attribute type ++ * is of the right kind create the appropriate size_overflow asm stmts: ++ * __asm__("# size_overflow" : =rm" D.3344_8 : "0" cicus.4_16); ++ * __asm__("# size_overflow MARK_YES" : : "rm" size_1(D)); ++ */ ++static void create_size_overflow_asm(gimple stmt, tree output_node, unsigned int argnum) ++{ ++ struct asm_data asm_data; ++ const char *mark_str; ++ char *asm_comment; ++ enum mark mark; ++ ++ if (is_gimple_constant(output_node)) ++ return; ++ ++ asm_data.output = output_node; ++ mark = check_intentional_attribute_gimple(asm_data.output, stmt, argnum); ++ if (mark != MARK_TURN_OFF) ++ search_missing_size_overflow_attribute_gimple(stmt, argnum); ++ ++ asm_data.def_stmt = get_def_stmt(asm_data.output); ++ if (is_size_overflow_intentional_asm_turn_off(asm_data.def_stmt)) ++ return; ++ ++ create_asm_input(stmt, argnum, &asm_data); ++ if (asm_data.input == NULL_TREE) ++ return; ++ ++ mark_str = convert_mark_to_str(mark); ++ asm_comment = create_asm_comment(argnum, stmt, mark_str); ++ create_asm_stmt(asm_comment, build_string(1, "0"), build_string(3, "=rm"), &asm_data); ++ free(asm_comment); ++} ++ ++// Insert an asm stmt with "MARK_TURN_OFF", "MARK_YES" or "MARK_NOT_INTENTIONAL". ++static bool create_mark_asm(gimple stmt, enum mark mark) ++{ ++ struct asm_data asm_data; ++ const char *asm_str; ++ ++ switch (mark) { ++ case MARK_TURN_OFF: ++ asm_str = TURN_OFF_ASM_STR; ++ break; ++ case MARK_NOT_INTENTIONAL: ++ case MARK_YES: ++ asm_str = YES_ASM_STR; ++ break; ++ default: ++ gcc_unreachable(); ++ } ++ ++ asm_data.def_stmt = stmt; ++ asm_data.output = gimple_call_lhs(stmt); ++ ++ if (asm_data.output == NULL_TREE) { ++ asm_data.input = gimple_call_arg(stmt, 0); ++ if (is_gimple_constant(asm_data.input)) ++ return false; ++ asm_data.output = NULL; ++ create_asm_stmt(asm_str, build_string(2, "rm"), NULL, &asm_data); ++ return true; ++ } ++ ++ create_asm_input(stmt, 0, &asm_data); ++ gcc_assert(asm_data.input != NULL_TREE); ++ ++ create_asm_stmt(asm_str, build_string(1, "0"), build_string(3, "=rm"), &asm_data); ++ return true; ++} ++ ++static bool is_from_cast(const_tree node) ++{ ++ gimple def_stmt = get_def_stmt(node); ++ ++ if (!def_stmt) ++ return false; ++ ++ if (gimple_assign_cast_p(def_stmt)) ++ return true; ++ ++ return false; ++} ++ ++// Skip duplication when there is a minus expr and the type of rhs1 or rhs2 is a pointer_type. ++static bool skip_ptr_minus(gimple stmt) ++{ ++ const_tree rhs1, rhs2, ptr1_rhs, ptr2_rhs; ++ ++ if (gimple_assign_rhs_code(stmt) != MINUS_EXPR) ++ return false; ++ ++ rhs1 = gimple_assign_rhs1(stmt); ++ if (!is_from_cast(rhs1)) ++ return false; ++ ++ rhs2 = gimple_assign_rhs2(stmt); ++ if (!is_from_cast(rhs2)) ++ return false; ++ ++ ptr1_rhs = gimple_assign_rhs1(get_def_stmt(rhs1)); ++ ptr2_rhs = gimple_assign_rhs1(get_def_stmt(rhs2)); ++ ++ if (TREE_CODE(TREE_TYPE(ptr1_rhs)) != POINTER_TYPE && TREE_CODE(TREE_TYPE(ptr2_rhs)) != POINTER_TYPE) ++ return false; ++ ++ create_mark_asm(stmt, MARK_YES); ++ return true; ++} ++ ++static void walk_use_def_ptr(struct pointer_set_t *visited, const_tree lhs) ++{ ++ gimple def_stmt; ++ ++ def_stmt = get_def_stmt(lhs); ++ if (!def_stmt) ++ return; ++ ++ if (pointer_set_insert(visited, def_stmt)) ++ return; ++ ++ switch (gimple_code(def_stmt)) { ++ case GIMPLE_NOP: ++ case GIMPLE_ASM: ++ case GIMPLE_CALL: ++ break; ++ case GIMPLE_PHI: { ++ unsigned int i, n = gimple_phi_num_args(def_stmt); ++ ++ pointer_set_insert(visited, def_stmt); ++ ++ for (i = 0; i < n; i++) { ++ tree arg = gimple_phi_arg_def(def_stmt, i); ++ ++ walk_use_def_ptr(visited, arg); ++ } ++ } ++ case GIMPLE_ASSIGN: ++ switch (gimple_num_ops(def_stmt)) { ++ case 2: ++ walk_use_def_ptr(visited, gimple_assign_rhs1(def_stmt)); ++ return; ++ case 3: ++ if (skip_ptr_minus(def_stmt)) ++ return; ++ ++ walk_use_def_ptr(visited, gimple_assign_rhs1(def_stmt)); ++ walk_use_def_ptr(visited, gimple_assign_rhs2(def_stmt)); ++ return; ++ default: ++ return; ++ } ++ default: ++ debug_gimple_stmt((gimple)def_stmt); ++ error("%s: unknown gimple code", __func__); ++ gcc_unreachable(); ++ } ++} ++ ++// Look for a ptr - ptr expression (e.g., cpuset_common_file_read() s - page) ++static void insert_mark_not_intentional_asm_at_ptr(const_tree arg) ++{ ++ struct pointer_set_t *visited; ++ ++ visited = pointer_set_create(); ++ walk_use_def_ptr(visited, arg); ++ pointer_set_destroy(visited); ++} ++ ++// Determine the return value and insert the asm stmt to mark the return stmt. ++static void insert_asm_ret(gimple stmt) ++{ ++ tree ret; ++ ++ ret = gimple_return_retval(stmt); ++ create_size_overflow_asm(stmt, ret, 0); ++} ++ ++// Determine the correct arg index and arg and insert the asm stmt to mark the stmt. ++static void insert_asm_arg(gimple stmt, unsigned int orig_argnum) ++{ ++ tree arg; ++ unsigned int argnum; ++ ++ argnum = get_correct_arg_count(orig_argnum, gimple_call_fndecl(stmt)); ++ gcc_assert(argnum != 0); ++ if (argnum == CANNOT_FIND_ARG) ++ return; ++ ++ arg = gimple_call_arg(stmt, argnum - 1); ++ gcc_assert(arg != NULL_TREE); ++ ++ // skip all ptr - ptr expressions ++ insert_mark_not_intentional_asm_at_ptr(arg); ++ ++ create_size_overflow_asm(stmt, arg, argnum); ++} ++ ++// If a function arg or the return value is marked by the size_overflow attribute then set its index in the array. ++static void set_argnum_attribute(const_tree attr, bool *argnums) ++{ ++ unsigned int argnum; ++ tree attr_value; ++ ++ for (attr_value = TREE_VALUE(attr); attr_value; attr_value = TREE_CHAIN(attr_value)) { ++ argnum = TREE_INT_CST_LOW(TREE_VALUE(attr_value)); ++ argnums[argnum] = true; ++ } ++} ++ ++// If a function arg or the return value is in the hash table then set its index in the array. ++static void set_argnum_hash(tree fndecl, bool *argnums) ++{ ++ unsigned int num; ++ const struct size_overflow_hash *hash; ++ ++ hash = get_function_hash(DECL_ORIGIN(fndecl)); ++ if (!hash) ++ return; ++ ++ for (num = 0; num <= MAX_PARAM; num++) { ++ if (!(hash->param & (1U << num))) ++ continue; ++ ++ argnums[num] = true; ++ } ++} ++ ++static bool is_all_the_argnums_empty(bool *argnums) ++{ ++ unsigned int i; ++ ++ for (i = 0; i <= MAX_PARAM; i++) ++ if (argnums[i]) ++ return false; ++ return true; ++} ++ ++// Check whether the arguments or the return value of the function are in the hash table or are marked by the size_overflow attribute. ++static void search_interesting_args(tree fndecl, bool *argnums) ++{ ++ const_tree attr; ++ ++ set_argnum_hash(fndecl, argnums); ++ if (!is_all_the_argnums_empty(argnums)) ++ return; ++ ++ attr = lookup_attribute("size_overflow", DECL_ATTRIBUTES(fndecl)); ++ if (attr && TREE_VALUE(attr)) ++ set_argnum_attribute(attr, argnums); ++} ++ ++/* ++ * Look up the intentional_overflow attribute that turns off ipa based duplication ++ * on the callee function. ++ */ ++static bool is_mark_turn_off_attribute(gimple stmt) ++{ ++ enum mark mark; ++ const_tree fndecl = gimple_call_fndecl(stmt); ++ ++ mark = get_intentional_attr_type(DECL_ORIGIN(fndecl)); ++ if (mark == MARK_TURN_OFF) ++ return true; ++ return false; ++} ++ ++// If the argument(s) of the callee function is/are in the hash table or are marked by an attribute then mark the call stmt with an asm stmt ++static void handle_interesting_function(gimple stmt) ++{ ++ unsigned int argnum; ++ tree fndecl; ++ bool orig_argnums[MAX_PARAM + 1] = {false}; ++ ++ if (gimple_call_num_args(stmt) == 0) ++ return; ++ fndecl = gimple_call_fndecl(stmt); ++ if (fndecl == NULL_TREE) ++ return; ++ fndecl = DECL_ORIGIN(fndecl); ++ ++ if (is_mark_turn_off_attribute(stmt)) { ++ create_mark_asm(stmt, MARK_TURN_OFF); ++ return; ++ } ++ ++ search_interesting_args(fndecl, orig_argnums); ++ ++ for (argnum = 1; argnum < MAX_PARAM; argnum++) ++ if (orig_argnums[argnum]) ++ insert_asm_arg(stmt, argnum); ++} ++ ++// If the return value of the caller function is in hash table (its index is 0) then mark the return stmt with an asm stmt ++static void handle_interesting_ret(gimple stmt) ++{ ++ bool orig_argnums[MAX_PARAM + 1] = {false}; ++ ++ search_interesting_args(current_function_decl, orig_argnums); ++ ++ if (orig_argnums[0]) ++ insert_asm_ret(stmt); ++} ++ ++// Iterate over all the stmts and search for call and return stmts and mark them if they're in the hash table ++static unsigned int search_interesting_functions(void) ++{ ++ basic_block bb; ++ ++ FOR_ALL_BB_FN(bb, cfun) { ++ gimple_stmt_iterator gsi; ++ ++ for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) { ++ gimple stmt = gsi_stmt(gsi); ++ ++ if (is_size_overflow_asm(stmt)) ++ continue; ++ ++ if (is_gimple_call(stmt)) ++ handle_interesting_function(stmt); ++ else if (gimple_code(stmt) == GIMPLE_RETURN) ++ handle_interesting_ret(stmt); ++ } ++ } ++ return 0; ++} ++ ++/* ++ * A lot of functions get inlined before the ipa passes so after the build_ssa gimple pass ++ * this pass inserts asm stmts to mark the interesting args ++ * that the ipa pass will detect and insert the size overflow checks for. ++ */ ++#if BUILDING_GCC_VERSION >= 4009 ++static const struct pass_data insert_size_overflow_asm_pass_data = { ++#else ++static struct gimple_opt_pass insert_size_overflow_asm_pass = { ++ .pass = { ++#endif ++ .type = GIMPLE_PASS, ++ .name = "insert_size_overflow_asm", ++#if BUILDING_GCC_VERSION >= 4008 ++ .optinfo_flags = OPTGROUP_NONE, ++#endif ++#if BUILDING_GCC_VERSION >= 4009 ++ .has_gate = false, ++ .has_execute = true, ++#else ++ .gate = NULL, ++ .execute = search_interesting_functions, ++ .sub = NULL, ++ .next = NULL, ++ .static_pass_number = 0, ++#endif ++ .tv_id = TV_NONE, ++ .properties_required = PROP_cfg, ++ .properties_provided = 0, ++ .properties_destroyed = 0, ++ .todo_flags_start = 0, ++ .todo_flags_finish = TODO_dump_func | TODO_verify_ssa | TODO_verify_stmts | TODO_remove_unused_locals | TODO_update_ssa_no_phi | TODO_cleanup_cfg | TODO_ggc_collect | TODO_verify_flow ++#if BUILDING_GCC_VERSION < 4009 ++ } ++#endif ++}; ++ ++#if BUILDING_GCC_VERSION >= 4009 ++namespace { ++class insert_size_overflow_asm_pass : public gimple_opt_pass { ++public: ++ insert_size_overflow_asm_pass() : gimple_opt_pass(insert_size_overflow_asm_pass_data, g) {} ++ unsigned int execute() { return search_interesting_functions(); } ++}; ++} ++#endif ++ ++struct opt_pass *make_insert_size_overflow_asm_pass(void) ++{ ++#if BUILDING_GCC_VERSION >= 4009 ++ return new insert_size_overflow_asm_pass(); ++#else ++ return &insert_size_overflow_asm_pass.pass; ++#endif ++} +diff --git a/tools/gcc/size_overflow_plugin/insert_size_overflow_check_core.c b/tools/gcc/size_overflow_plugin/insert_size_overflow_check_core.c +new file mode 100644 +index 0000000..0e36bd3 +--- /dev/null ++++ b/tools/gcc/size_overflow_plugin/insert_size_overflow_check_core.c +@@ -0,0 +1,889 @@ ++/* ++ * Copyright 2011-2014 by Emese Revfy <re.emese@gmail.com> ++ * Licensed under the GPL v2, or (at your option) v3 ++ * ++ * Homepage: ++ * http://www.grsecurity.net/~ephox/overflow_plugin/ ++ * ++ * Documentation: ++ * http://forums.grsecurity.net/viewtopic.php?f=7&t=3043 ++ * ++ * This plugin recomputes expressions of function arguments marked by a size_overflow attribute ++ * with double integer precision (DImode/TImode for 32/64 bit integer types). ++ * The recomputed argument is checked against TYPE_MAX and an event is logged on overflow and the triggering process is killed. ++ * ++ * Usage: ++ * $ make ++ * $ make run ++ */ ++ ++#include "gcc-common.h" ++#include "size_overflow.h" ++ ++#define MIN_CHECK true ++#define MAX_CHECK false ++ ++static tree get_size_overflow_type(struct visited *visited, const_gimple stmt, const_tree node) ++{ ++ const_tree type; ++ tree new_type; ++ ++ gcc_assert(node != NULL_TREE); ++ ++ type = TREE_TYPE(node); ++ ++ if (pointer_set_contains(visited->my_stmts, stmt)) ++ return TREE_TYPE(node); ++ ++ switch (TYPE_MODE(type)) { ++ case QImode: ++ new_type = size_overflow_type_HI; ++ break; ++ case HImode: ++ new_type = size_overflow_type_SI; ++ break; ++ case SImode: ++ new_type = size_overflow_type_DI; ++ break; ++ case DImode: ++ if (LONG_TYPE_SIZE == GET_MODE_BITSIZE(SImode)) ++ new_type = TYPE_UNSIGNED(type) ? unsigned_intDI_type_node : size_overflow_type_DI; ++ else ++ new_type = size_overflow_type_TI; ++ break; ++ case TImode: ++ gcc_assert(!TYPE_UNSIGNED(type)); ++ new_type = size_overflow_type_TI; ++ break; ++ default: ++ debug_tree((tree)node); ++ error("%s: unsupported gcc configuration (%qE).", __func__, current_function_decl); ++ gcc_unreachable(); ++ } ++ ++ if (TYPE_QUALS(type) != 0) ++ return build_qualified_type(new_type, TYPE_QUALS(type)); ++ return new_type; ++} ++ ++static tree get_lhs(const_gimple stmt) ++{ ++ switch (gimple_code(stmt)) { ++ case GIMPLE_ASSIGN: ++ case GIMPLE_CALL: ++ return gimple_get_lhs(stmt); ++ case GIMPLE_PHI: ++ return gimple_phi_result(stmt); ++ default: ++ return NULL_TREE; ++ } ++} ++ ++static tree cast_to_new_size_overflow_type(struct visited *visited, gimple stmt, tree rhs, tree size_overflow_type, bool before) ++{ ++ gimple_stmt_iterator gsi; ++ tree lhs; ++ gimple new_stmt; ++ ++ if (rhs == NULL_TREE) ++ return NULL_TREE; ++ ++ gsi = gsi_for_stmt(stmt); ++ new_stmt = build_cast_stmt(visited, size_overflow_type, rhs, CREATE_NEW_VAR, &gsi, before, false); ++ pointer_set_insert(visited->my_stmts, new_stmt); ++ ++ lhs = get_lhs(new_stmt); ++ gcc_assert(lhs != NULL_TREE); ++ return lhs; ++} ++ ++tree create_assign(struct visited *visited, gimple oldstmt, tree rhs1, bool before) ++{ ++ tree lhs, dst_type; ++ gimple_stmt_iterator gsi; ++ ++ if (rhs1 == NULL_TREE) { ++ debug_gimple_stmt(oldstmt); ++ error("%s: rhs1 is NULL_TREE", __func__); ++ gcc_unreachable(); ++ } ++ ++ switch (gimple_code(oldstmt)) { ++ case GIMPLE_ASM: ++ lhs = rhs1; ++ break; ++ case GIMPLE_CALL: ++ case GIMPLE_ASSIGN: ++ lhs = gimple_get_lhs(oldstmt); ++ break; ++ default: ++ debug_gimple_stmt(oldstmt); ++ gcc_unreachable(); ++ } ++ ++ gsi = gsi_for_stmt(oldstmt); ++ pointer_set_insert(visited->stmts, oldstmt); ++ if (lookup_stmt_eh_lp(oldstmt) != 0) { ++ basic_block next_bb, cur_bb; ++ const_edge e; ++ ++ gcc_assert(before == false); ++ gcc_assert(stmt_can_throw_internal(oldstmt)); ++ gcc_assert(gimple_code(oldstmt) == GIMPLE_CALL); ++ gcc_assert(!gsi_end_p(gsi)); ++ ++ cur_bb = gimple_bb(oldstmt); ++ next_bb = cur_bb->next_bb; ++ e = find_edge(cur_bb, next_bb); ++ gcc_assert(e != NULL); ++ gcc_assert(e->flags & EDGE_FALLTHRU); ++ ++ gsi = gsi_after_labels(next_bb); ++ gcc_assert(!gsi_end_p(gsi)); ++ ++ before = true; ++ oldstmt = gsi_stmt(gsi); ++ } ++ ++ dst_type = get_size_overflow_type(visited, oldstmt, lhs); ++ ++ if (is_gimple_constant(rhs1)) ++ return cast_a_tree(dst_type, rhs1); ++ return cast_to_new_size_overflow_type(visited, oldstmt, rhs1, dst_type, before); ++} ++ ++tree dup_assign(struct visited *visited, gimple oldstmt, const_tree node, tree rhs1, tree rhs2, tree __unused rhs3) ++{ ++ gimple stmt; ++ gimple_stmt_iterator gsi; ++ tree size_overflow_type, new_var, lhs = gimple_assign_lhs(oldstmt); ++ ++ if (pointer_set_contains(visited->my_stmts, oldstmt)) ++ return lhs; ++ ++ if (gimple_num_ops(oldstmt) != 4 && rhs1 == NULL_TREE) { ++ rhs1 = gimple_assign_rhs1(oldstmt); ++ rhs1 = create_assign(visited, oldstmt, rhs1, BEFORE_STMT); ++ } ++ if (gimple_num_ops(oldstmt) == 3 && rhs2 == NULL_TREE) { ++ rhs2 = gimple_assign_rhs2(oldstmt); ++ rhs2 = create_assign(visited, oldstmt, rhs2, BEFORE_STMT); ++ } ++ ++ stmt = gimple_copy(oldstmt); ++ gimple_set_location(stmt, gimple_location(oldstmt)); ++ pointer_set_insert(visited->my_stmts, stmt); ++ ++ if (gimple_assign_rhs_code(oldstmt) == WIDEN_MULT_EXPR) ++ gimple_assign_set_rhs_code(stmt, MULT_EXPR); ++ ++ size_overflow_type = get_size_overflow_type(visited, oldstmt, node); ++ ++ new_var = create_new_var(size_overflow_type); ++ new_var = make_ssa_name(new_var, stmt); ++ gimple_assign_set_lhs(stmt, new_var); ++ ++ if (rhs1 != NULL_TREE) ++ gimple_assign_set_rhs1(stmt, rhs1); ++ ++ if (rhs2 != NULL_TREE) ++ gimple_assign_set_rhs2(stmt, rhs2); ++#if BUILDING_GCC_VERSION >= 4006 ++ if (rhs3 != NULL_TREE) ++ gimple_assign_set_rhs3(stmt, rhs3); ++#endif ++ gimple_set_vuse(stmt, gimple_vuse(oldstmt)); ++ gimple_set_vdef(stmt, gimple_vdef(oldstmt)); ++ ++ gsi = gsi_for_stmt(oldstmt); ++ gsi_insert_after(&gsi, stmt, GSI_SAME_STMT); ++ update_stmt(stmt); ++ pointer_set_insert(visited->stmts, oldstmt); ++ return gimple_assign_lhs(stmt); ++} ++ ++static tree cast_parm_decl(struct visited *visited, tree phi_ssa_name, tree arg, tree size_overflow_type, basic_block bb) ++{ ++ gimple assign; ++ gimple_stmt_iterator gsi; ++ basic_block first_bb; ++ ++ gcc_assert(SSA_NAME_IS_DEFAULT_DEF(arg)); ++ ++ if (bb->index == 0) { ++ first_bb = split_block_after_labels(ENTRY_BLOCK_PTR_FOR_FN(cfun))->dest; ++ gcc_assert(dom_info_available_p(CDI_DOMINATORS)); ++ set_immediate_dominator(CDI_DOMINATORS, first_bb, ENTRY_BLOCK_PTR_FOR_FN(cfun)); ++ bb = first_bb; ++ } ++ ++ gsi = gsi_after_labels(bb); ++ assign = build_cast_stmt(visited, size_overflow_type, arg, phi_ssa_name, &gsi, BEFORE_STMT, false); ++ pointer_set_insert(visited->my_stmts, assign); ++ ++ return gimple_assign_lhs(assign); ++} ++ ++static tree use_phi_ssa_name(struct visited *visited, tree ssa_name_var, tree new_arg) ++{ ++ gimple_stmt_iterator gsi; ++ gimple assign, def_stmt = get_def_stmt(new_arg); ++ ++ if (gimple_code(def_stmt) == GIMPLE_PHI) { ++ gsi = gsi_after_labels(gimple_bb(def_stmt)); ++ assign = build_cast_stmt(visited, TREE_TYPE(new_arg), new_arg, ssa_name_var, &gsi, BEFORE_STMT, true); ++ } else { ++ gsi = gsi_for_stmt(def_stmt); ++ assign = build_cast_stmt(visited, TREE_TYPE(new_arg), new_arg, ssa_name_var, &gsi, AFTER_STMT, true); ++ } ++ ++ pointer_set_insert(visited->my_stmts, assign); ++ return gimple_assign_lhs(assign); ++} ++ ++static tree cast_visited_phi_arg(struct visited *visited, tree ssa_name_var, tree arg, tree size_overflow_type) ++{ ++ basic_block bb; ++ gimple_stmt_iterator gsi; ++ const_gimple def_stmt; ++ gimple assign; ++ ++ def_stmt = get_def_stmt(arg); ++ bb = gimple_bb(def_stmt); ++ gcc_assert(bb->index != 0); ++ gsi = gsi_after_labels(bb); ++ ++ assign = build_cast_stmt(visited, size_overflow_type, arg, ssa_name_var, &gsi, BEFORE_STMT, false); ++ pointer_set_insert(visited->my_stmts, assign); ++ return gimple_assign_lhs(assign); ++} ++ ++static tree create_new_phi_arg(struct visited *visited, tree ssa_name_var, tree new_arg, gimple oldstmt, unsigned int i) ++{ ++ tree size_overflow_type; ++ tree arg; ++ const_gimple def_stmt; ++ ++ if (new_arg != NULL_TREE && is_gimple_constant(new_arg)) ++ return new_arg; ++ ++ arg = gimple_phi_arg_def(oldstmt, i); ++ def_stmt = get_def_stmt(arg); ++ gcc_assert(def_stmt != NULL); ++ size_overflow_type = get_size_overflow_type(visited, oldstmt, arg); ++ ++ switch (gimple_code(def_stmt)) { ++ case GIMPLE_PHI: ++ return cast_visited_phi_arg(visited, ssa_name_var, arg, size_overflow_type); ++ case GIMPLE_NOP: { ++ basic_block bb; ++ ++ bb = gimple_phi_arg_edge(oldstmt, i)->src; ++ return cast_parm_decl(visited, ssa_name_var, arg, size_overflow_type, bb); ++ } ++ case GIMPLE_ASM: { ++ gimple_stmt_iterator gsi; ++ gimple assign, stmt = get_def_stmt(arg); ++ ++ gsi = gsi_for_stmt(stmt); ++ assign = build_cast_stmt(visited, size_overflow_type, arg, ssa_name_var, &gsi, AFTER_STMT, false); ++ pointer_set_insert(visited->my_stmts, assign); ++ return gimple_assign_lhs(assign); ++ } ++ default: ++ gcc_assert(new_arg != NULL_TREE); ++ gcc_assert(types_compatible_p(TREE_TYPE(new_arg), size_overflow_type)); ++ return use_phi_ssa_name(visited, ssa_name_var, new_arg); ++ } ++} ++ ++static gimple overflow_create_phi_node(struct visited *visited, gimple oldstmt, tree result) ++{ ++ basic_block bb; ++ gimple phi; ++ gimple_seq seq; ++ gimple_stmt_iterator gsi = gsi_for_stmt(oldstmt); ++ ++ bb = gsi_bb(gsi); ++ ++ if (result == NULL_TREE) { ++ tree old_result = gimple_phi_result(oldstmt); ++ tree size_overflow_type = get_size_overflow_type(visited, oldstmt, old_result); ++ ++ result = create_new_var(size_overflow_type); ++ } ++ ++ phi = create_phi_node(result, bb); ++ gimple_phi_set_result(phi, make_ssa_name(result, phi)); ++ seq = phi_nodes(bb); ++ gsi = gsi_last(seq); ++ gsi_remove(&gsi, false); ++ ++ gsi = gsi_for_stmt(oldstmt); ++ gsi_insert_after(&gsi, phi, GSI_NEW_STMT); ++ gimple_set_bb(phi, bb); ++ return phi; ++} ++ ++#if BUILDING_GCC_VERSION <= 4007 ++static tree create_new_phi_node(struct visited *visited, VEC(tree, heap) **args, tree ssa_name_var, gimple oldstmt) ++#else ++static tree create_new_phi_node(struct visited *visited, vec<tree, va_heap, vl_embed> *&args, tree ssa_name_var, gimple oldstmt) ++#endif ++{ ++ gimple new_phi; ++ unsigned int i; ++ tree arg, result; ++ location_t loc = gimple_location(oldstmt); ++ ++#if BUILDING_GCC_VERSION <= 4007 ++ gcc_assert(!VEC_empty(tree, *args)); ++#else ++ gcc_assert(!args->is_empty()); ++#endif ++ ++ new_phi = overflow_create_phi_node(visited, oldstmt, ssa_name_var); ++ result = gimple_phi_result(new_phi); ++ ssa_name_var = SSA_NAME_VAR(result); ++ ++#if BUILDING_GCC_VERSION <= 4007 ++ FOR_EACH_VEC_ELT(tree, *args, i, arg) { ++#else ++ FOR_EACH_VEC_SAFE_ELT(args, i, arg) { ++#endif ++ arg = create_new_phi_arg(visited, ssa_name_var, arg, oldstmt, i); ++ add_phi_arg(new_phi, arg, gimple_phi_arg_edge(oldstmt, i), loc); ++ } ++ ++#if BUILDING_GCC_VERSION <= 4007 ++ VEC_free(tree, heap, *args); ++#else ++ vec_free(args); ++#endif ++ update_stmt(new_phi); ++ pointer_set_insert(visited->my_stmts, new_phi); ++ return result; ++} ++ ++static tree handle_phi(struct visited *visited, struct cgraph_node *caller_node, tree orig_result) ++{ ++ tree ssa_name_var = NULL_TREE; ++#if BUILDING_GCC_VERSION <= 4007 ++ VEC(tree, heap) *args = NULL; ++#else ++ vec<tree, va_heap, vl_embed> *args = NULL; ++#endif ++ gimple oldstmt = get_def_stmt(orig_result); ++ unsigned int i, len = gimple_phi_num_args(oldstmt); ++ ++ pointer_set_insert(visited->stmts, oldstmt); ++ for (i = 0; i < len; i++) { ++ tree arg, new_arg; ++ ++ arg = gimple_phi_arg_def(oldstmt, i); ++ new_arg = expand(visited, caller_node, arg); ++ ++ if (ssa_name_var == NULL_TREE && new_arg != NULL_TREE) ++ ssa_name_var = SSA_NAME_VAR(new_arg); ++ ++ if (is_gimple_constant(arg)) { ++ tree size_overflow_type = get_size_overflow_type(visited, oldstmt, arg); ++ ++ new_arg = cast_a_tree(size_overflow_type, arg); ++ } ++ ++#if BUILDING_GCC_VERSION <= 4007 ++ VEC_safe_push(tree, heap, args, new_arg); ++#else ++ vec_safe_push(args, new_arg); ++#endif ++ } ++ ++#if BUILDING_GCC_VERSION <= 4007 ++ return create_new_phi_node(visited, &args, ssa_name_var, oldstmt); ++#else ++ return create_new_phi_node(visited, args, ssa_name_var, oldstmt); ++#endif ++} ++ ++static tree create_cast_assign(struct visited *visited, gimple stmt) ++{ ++ tree rhs1 = gimple_assign_rhs1(stmt); ++ tree lhs = gimple_assign_lhs(stmt); ++ const_tree rhs1_type = TREE_TYPE(rhs1); ++ const_tree lhs_type = TREE_TYPE(lhs); ++ ++ if (TYPE_UNSIGNED(rhs1_type) == TYPE_UNSIGNED(lhs_type)) ++ return create_assign(visited, stmt, lhs, AFTER_STMT); ++ ++ return create_assign(visited, stmt, rhs1, AFTER_STMT); ++} ++ ++static bool skip_lhs_cast_check(const_gimple stmt) ++{ ++ const_tree rhs = gimple_assign_rhs1(stmt); ++ const_gimple def_stmt = get_def_stmt(rhs); ++ ++ // 3.8.2 kernel/futex_compat.c compat_exit_robust_list(): get_user() 64 ulong -> int (compat_long_t), int max ++ if (gimple_code(def_stmt) == GIMPLE_ASM) ++ return true; ++ ++ if (is_const_plus_unsigned_signed_truncation(rhs)) ++ return true; ++ ++ return false; ++} ++ ++static tree create_string_param(tree string) ++{ ++ tree i_type, a_type; ++ const int length = TREE_STRING_LENGTH(string); ++ ++ gcc_assert(length > 0); ++ ++ i_type = build_index_type(build_int_cst(NULL_TREE, length - 1)); ++ a_type = build_array_type(char_type_node, i_type); ++ ++ TREE_TYPE(string) = a_type; ++ TREE_CONSTANT(string) = 1; ++ TREE_READONLY(string) = 1; ++ ++ return build1(ADDR_EXPR, ptr_type_node, string); ++} ++ ++static void insert_cond(basic_block cond_bb, tree arg, enum tree_code cond_code, tree type_value) ++{ ++ gimple cond_stmt; ++ gimple_stmt_iterator gsi = gsi_last_bb(cond_bb); ++ ++ cond_stmt = gimple_build_cond(cond_code, arg, type_value, NULL_TREE, NULL_TREE); ++ gsi_insert_after(&gsi, cond_stmt, GSI_CONTINUE_LINKING); ++ update_stmt(cond_stmt); ++} ++ ++static void insert_cond_result(struct cgraph_node *caller_node, basic_block bb_true, const_gimple stmt, const_tree arg, bool min) ++{ ++ gimple func_stmt; ++ const_gimple def_stmt; ++ const_tree loc_line; ++ tree loc_file, ssa_name, current_func; ++ expanded_location xloc; ++ char *ssa_name_buf; ++ int len; ++ struct cgraph_edge *edge; ++ struct cgraph_node *callee_node; ++ int frequency; ++ gimple_stmt_iterator gsi = gsi_start_bb(bb_true); ++ ++ def_stmt = get_def_stmt(arg); ++ xloc = expand_location(gimple_location(def_stmt)); ++ ++ if (!gimple_has_location(def_stmt)) { ++ xloc = expand_location(gimple_location(stmt)); ++ if (!gimple_has_location(stmt)) ++ xloc = expand_location(DECL_SOURCE_LOCATION(current_function_decl)); ++ } ++ ++ loc_line = build_int_cstu(unsigned_type_node, xloc.line); ++ ++ loc_file = build_string(strlen(xloc.file) + 1, xloc.file); ++ loc_file = create_string_param(loc_file); ++ ++ current_func = build_string(DECL_NAME_LENGTH(current_function_decl) + 1, DECL_NAME_POINTER(current_function_decl)); ++ current_func = create_string_param(current_func); ++ ++ gcc_assert(DECL_NAME(SSA_NAME_VAR(arg)) != NULL); ++ call_count++; ++ len = asprintf(&ssa_name_buf, "%s_%u %s, count: %u\n", DECL_NAME_POINTER(SSA_NAME_VAR(arg)), SSA_NAME_VERSION(arg), min ? "min" : "max", call_count); ++ gcc_assert(len > 0); ++ ssa_name = build_string(len + 1, ssa_name_buf); ++ free(ssa_name_buf); ++ ssa_name = create_string_param(ssa_name); ++ ++ // void report_size_overflow(const char *file, unsigned int line, const char *func, const char *ssa_name) ++ func_stmt = gimple_build_call(report_size_overflow_decl, 4, loc_file, loc_line, current_func, ssa_name); ++ gsi_insert_after(&gsi, func_stmt, GSI_CONTINUE_LINKING); ++ ++ callee_node = cgraph_get_create_node(report_size_overflow_decl); ++ frequency = compute_call_stmt_bb_frequency(current_function_decl, bb_true); ++ ++ edge = cgraph_create_edge(caller_node, callee_node, func_stmt, bb_true->count, frequency, bb_true->loop_depth); ++ gcc_assert(edge != NULL); ++} ++ ++static void insert_check_size_overflow(struct cgraph_node *caller_node, gimple stmt, enum tree_code cond_code, tree arg, tree type_value, bool before, bool min) ++{ ++ basic_block cond_bb, join_bb, bb_true; ++ edge e; ++ gimple_stmt_iterator gsi = gsi_for_stmt(stmt); ++ ++ cond_bb = gimple_bb(stmt); ++ if (before) ++ gsi_prev(&gsi); ++ if (gsi_end_p(gsi)) ++ e = split_block_after_labels(cond_bb); ++ else ++ e = split_block(cond_bb, gsi_stmt(gsi)); ++ cond_bb = e->src; ++ join_bb = e->dest; ++ e->flags = EDGE_FALSE_VALUE; ++ e->probability = REG_BR_PROB_BASE; ++ ++ bb_true = create_empty_bb(cond_bb); ++ make_edge(cond_bb, bb_true, EDGE_TRUE_VALUE); ++ make_edge(cond_bb, join_bb, EDGE_FALSE_VALUE); ++ make_edge(bb_true, join_bb, EDGE_FALLTHRU); ++ ++ gcc_assert(dom_info_available_p(CDI_DOMINATORS)); ++ set_immediate_dominator(CDI_DOMINATORS, bb_true, cond_bb); ++ set_immediate_dominator(CDI_DOMINATORS, join_bb, cond_bb); ++ ++ if (current_loops != NULL) { ++ gcc_assert(cond_bb->loop_father == join_bb->loop_father); ++ add_bb_to_loop(bb_true, cond_bb->loop_father); ++ } ++ ++ insert_cond(cond_bb, arg, cond_code, type_value); ++ insert_cond_result(caller_node, bb_true, stmt, arg, min); ++ ++// print_the_code_insertions(stmt); ++} ++ ++void check_size_overflow(struct cgraph_node *caller_node, gimple stmt, tree size_overflow_type, tree cast_rhs, tree rhs, bool before) ++{ ++ const_tree rhs_type = TREE_TYPE(rhs); ++ tree cast_rhs_type, type_max_type, type_min_type, type_max, type_min; ++ ++ gcc_assert(rhs_type != NULL_TREE); ++ if (TREE_CODE(rhs_type) == POINTER_TYPE) ++ return; ++ ++ gcc_assert(TREE_CODE(rhs_type) == INTEGER_TYPE || TREE_CODE(rhs_type) == ENUMERAL_TYPE); ++ ++ if (is_const_plus_unsigned_signed_truncation(rhs)) ++ return; ++ ++ type_max = cast_a_tree(size_overflow_type, TYPE_MAX_VALUE(rhs_type)); ++ // typemax (-1) < typemin (0) ++ if (TREE_OVERFLOW(type_max)) ++ return; ++ ++ type_min = cast_a_tree(size_overflow_type, TYPE_MIN_VALUE(rhs_type)); ++ ++ cast_rhs_type = TREE_TYPE(cast_rhs); ++ type_max_type = TREE_TYPE(type_max); ++ gcc_assert(types_compatible_p(cast_rhs_type, type_max_type)); ++ ++ insert_check_size_overflow(caller_node, stmt, GT_EXPR, cast_rhs, type_max, before, MAX_CHECK); ++ ++ // special case: get_size_overflow_type(), 32, u64->s ++ if (LONG_TYPE_SIZE == GET_MODE_BITSIZE(SImode) && TYPE_UNSIGNED(size_overflow_type) && !TYPE_UNSIGNED(rhs_type)) ++ return; ++ ++ type_min_type = TREE_TYPE(type_min); ++ gcc_assert(types_compatible_p(type_max_type, type_min_type)); ++ insert_check_size_overflow(caller_node, stmt, LT_EXPR, cast_rhs, type_min, before, MIN_CHECK); ++} ++ ++static tree create_cast_overflow_check(struct visited *visited, struct cgraph_node *caller_node, tree new_rhs1, gimple stmt) ++{ ++ bool cast_lhs, cast_rhs; ++ tree lhs = gimple_assign_lhs(stmt); ++ tree rhs = gimple_assign_rhs1(stmt); ++ const_tree lhs_type = TREE_TYPE(lhs); ++ const_tree rhs_type = TREE_TYPE(rhs); ++ enum machine_mode lhs_mode = TYPE_MODE(lhs_type); ++ enum machine_mode rhs_mode = TYPE_MODE(rhs_type); ++ unsigned int lhs_size = GET_MODE_BITSIZE(lhs_mode); ++ unsigned int rhs_size = GET_MODE_BITSIZE(rhs_mode); ++ ++ static bool check_lhs[3][4] = { ++ // ss su us uu ++ { false, true, true, false }, // lhs > rhs ++ { false, false, false, false }, // lhs = rhs ++ { true, true, true, true }, // lhs < rhs ++ }; ++ ++ static bool check_rhs[3][4] = { ++ // ss su us uu ++ { true, false, true, true }, // lhs > rhs ++ { true, false, true, true }, // lhs = rhs ++ { true, false, true, true }, // lhs < rhs ++ }; ++ ++ // skip lhs check on signed SI -> HI cast or signed SI -> QI cast !!!! ++ if (rhs_mode == SImode && !TYPE_UNSIGNED(rhs_type) && (lhs_mode == HImode || lhs_mode == QImode)) ++ return create_assign(visited, stmt, lhs, AFTER_STMT); ++ ++ if (lhs_size > rhs_size) { ++ cast_lhs = check_lhs[0][TYPE_UNSIGNED(rhs_type) + 2 * TYPE_UNSIGNED(lhs_type)]; ++ cast_rhs = check_rhs[0][TYPE_UNSIGNED(rhs_type) + 2 * TYPE_UNSIGNED(lhs_type)]; ++ } else if (lhs_size == rhs_size) { ++ cast_lhs = check_lhs[1][TYPE_UNSIGNED(rhs_type) + 2 * TYPE_UNSIGNED(lhs_type)]; ++ cast_rhs = check_rhs[1][TYPE_UNSIGNED(rhs_type) + 2 * TYPE_UNSIGNED(lhs_type)]; ++ } else { ++ cast_lhs = check_lhs[2][TYPE_UNSIGNED(rhs_type) + 2 * TYPE_UNSIGNED(lhs_type)]; ++ cast_rhs = check_rhs[2][TYPE_UNSIGNED(rhs_type) + 2 * TYPE_UNSIGNED(lhs_type)]; ++ } ++ ++ if (!cast_lhs && !cast_rhs) ++ return dup_assign(visited, stmt, lhs, new_rhs1, NULL_TREE, NULL_TREE); ++ ++ if (cast_lhs && !skip_lhs_cast_check(stmt)) ++ check_size_overflow(caller_node, stmt, TREE_TYPE(new_rhs1), new_rhs1, lhs, BEFORE_STMT); ++ ++ if (cast_rhs) ++ check_size_overflow(caller_node, stmt, TREE_TYPE(new_rhs1), new_rhs1, rhs, BEFORE_STMT); ++ ++ return dup_assign(visited, stmt, lhs, new_rhs1, NULL_TREE, NULL_TREE); ++} ++ ++static tree handle_unary_rhs(struct visited *visited, struct cgraph_node *caller_node, gimple stmt) ++{ ++ enum tree_code rhs_code; ++ tree rhs1, new_rhs1, lhs = gimple_assign_lhs(stmt); ++ ++ if (pointer_set_contains(visited->my_stmts, stmt)) ++ return lhs; ++ ++ rhs1 = gimple_assign_rhs1(stmt); ++ if (TREE_CODE(TREE_TYPE(rhs1)) == POINTER_TYPE) ++ return create_assign(visited, stmt, lhs, AFTER_STMT); ++ ++ new_rhs1 = expand(visited, caller_node, rhs1); ++ ++ if (new_rhs1 == NULL_TREE) ++ return create_cast_assign(visited, stmt); ++ ++ if (pointer_set_contains(visited->no_cast_check, stmt)) ++ return dup_assign(visited, stmt, lhs, new_rhs1, NULL_TREE, NULL_TREE); ++ ++ rhs_code = gimple_assign_rhs_code(stmt); ++ if (rhs_code == BIT_NOT_EXPR || rhs_code == NEGATE_EXPR) { ++ tree size_overflow_type = get_size_overflow_type(visited, stmt, rhs1); ++ ++ new_rhs1 = cast_to_new_size_overflow_type(visited, stmt, new_rhs1, size_overflow_type, BEFORE_STMT); ++ check_size_overflow(caller_node, stmt, size_overflow_type, new_rhs1, rhs1, BEFORE_STMT); ++ return create_assign(visited, stmt, lhs, AFTER_STMT); ++ } ++ ++ if (!gimple_assign_cast_p(stmt)) ++ return dup_assign(visited, stmt, lhs, new_rhs1, NULL_TREE, NULL_TREE); ++ ++ return create_cast_overflow_check(visited, caller_node, new_rhs1, stmt); ++} ++ ++static tree handle_unary_ops(struct visited *visited, struct cgraph_node *caller_node, gimple stmt) ++{ ++ tree rhs1, lhs = gimple_assign_lhs(stmt); ++ gimple def_stmt = get_def_stmt(lhs); ++ ++ gcc_assert(gimple_code(def_stmt) != GIMPLE_NOP); ++ rhs1 = gimple_assign_rhs1(def_stmt); ++ ++ if (is_gimple_constant(rhs1)) ++ return create_assign(visited, def_stmt, lhs, AFTER_STMT); ++ ++ switch (TREE_CODE(rhs1)) { ++ case SSA_NAME: ++ return handle_unary_rhs(visited, caller_node, def_stmt); ++ case ARRAY_REF: ++ case BIT_FIELD_REF: ++ case ADDR_EXPR: ++ case COMPONENT_REF: ++ case INDIRECT_REF: ++#if BUILDING_GCC_VERSION >= 4006 ++ case MEM_REF: ++#endif ++ case TARGET_MEM_REF: ++ case VIEW_CONVERT_EXPR: ++ return create_assign(visited, def_stmt, lhs, AFTER_STMT); ++ case PARM_DECL: ++ case VAR_DECL: ++ return create_assign(visited, stmt, lhs, AFTER_STMT); ++ ++ default: ++ debug_gimple_stmt(def_stmt); ++ debug_tree(rhs1); ++ gcc_unreachable(); ++ } ++} ++ ++static void __unused print_the_code_insertions(const_gimple stmt) ++{ ++ location_t loc = gimple_location(stmt); ++ ++ inform(loc, "Integer size_overflow check applied here."); ++} ++ ++static tree handle_binary_ops(struct visited *visited, struct cgraph_node *caller_node, tree lhs) ++{ ++ tree rhs1, rhs2, new_lhs; ++ gimple def_stmt = get_def_stmt(lhs); ++ tree new_rhs1 = NULL_TREE; ++ tree new_rhs2 = NULL_TREE; ++ ++ rhs1 = gimple_assign_rhs1(def_stmt); ++ rhs2 = gimple_assign_rhs2(def_stmt); ++ ++ /* no DImode/TImode division in the 32/64 bit kernel */ ++ switch (gimple_assign_rhs_code(def_stmt)) { ++ case RDIV_EXPR: ++ case TRUNC_DIV_EXPR: ++ case CEIL_DIV_EXPR: ++ case FLOOR_DIV_EXPR: ++ case ROUND_DIV_EXPR: ++ case TRUNC_MOD_EXPR: ++ case CEIL_MOD_EXPR: ++ case FLOOR_MOD_EXPR: ++ case ROUND_MOD_EXPR: ++ case EXACT_DIV_EXPR: ++ case POINTER_PLUS_EXPR: ++ case BIT_AND_EXPR: ++ return create_assign(visited, def_stmt, lhs, AFTER_STMT); ++ default: ++ break; ++ } ++ ++ new_lhs = handle_integer_truncation(visited, caller_node, lhs); ++ if (new_lhs != NULL_TREE) ++ return new_lhs; ++ ++ if (TREE_CODE(rhs1) == SSA_NAME) ++ new_rhs1 = expand(visited, caller_node, rhs1); ++ if (TREE_CODE(rhs2) == SSA_NAME) ++ new_rhs2 = expand(visited, caller_node, rhs2); ++ ++ if (skip_expr_on_double_type(def_stmt)) { ++ new_lhs = dup_assign(visited, def_stmt, lhs, new_rhs1, new_rhs2, NULL_TREE); ++ insert_cast_expr(visited, get_def_stmt(new_lhs)); ++ return new_lhs; ++ } ++ ++ if (is_a_neg_overflow(def_stmt, rhs2)) ++ return handle_intentional_overflow(visited, caller_node, true, def_stmt, new_rhs1, NULL_TREE); ++ if (is_a_neg_overflow(def_stmt, rhs1)) ++ return handle_intentional_overflow(visited, caller_node, true, def_stmt, new_rhs2, new_rhs2); ++ ++ ++ if (is_a_constant_overflow(def_stmt, rhs2)) ++ return handle_intentional_overflow(visited, caller_node, !is_a_cast_and_const_overflow(rhs1), def_stmt, new_rhs1, NULL_TREE); ++ if (is_a_constant_overflow(def_stmt, rhs1)) ++ return handle_intentional_overflow(visited, caller_node, !is_a_cast_and_const_overflow(rhs2), def_stmt, new_rhs2, new_rhs2); ++ ++ // the const is between 0 and (signed) MAX ++ if (is_gimple_constant(rhs1)) ++ new_rhs1 = create_assign(visited, def_stmt, rhs1, BEFORE_STMT); ++ if (is_gimple_constant(rhs2)) ++ new_rhs2 = create_assign(visited, def_stmt, rhs2, BEFORE_STMT); ++ ++ return dup_assign(visited, def_stmt, lhs, new_rhs1, new_rhs2, NULL_TREE); ++} ++ ++#if BUILDING_GCC_VERSION >= 4006 ++static tree get_new_rhs(struct visited *visited, struct cgraph_node *caller_node, tree size_overflow_type, tree rhs) ++{ ++ if (is_gimple_constant(rhs)) ++ return cast_a_tree(size_overflow_type, rhs); ++ if (TREE_CODE(rhs) != SSA_NAME) ++ return NULL_TREE; ++ return expand(visited, caller_node, rhs); ++} ++ ++static tree handle_ternary_ops(struct visited *visited, struct cgraph_node *caller_node, tree lhs) ++{ ++ tree rhs1, rhs2, rhs3, new_rhs1, new_rhs2, new_rhs3, size_overflow_type; ++ gimple def_stmt = get_def_stmt(lhs); ++ ++ size_overflow_type = get_size_overflow_type(visited, def_stmt, lhs); ++ ++ rhs1 = gimple_assign_rhs1(def_stmt); ++ rhs2 = gimple_assign_rhs2(def_stmt); ++ rhs3 = gimple_assign_rhs3(def_stmt); ++ new_rhs1 = get_new_rhs(visited, caller_node, size_overflow_type, rhs1); ++ new_rhs2 = get_new_rhs(visited, caller_node, size_overflow_type, rhs2); ++ new_rhs3 = get_new_rhs(visited, caller_node, size_overflow_type, rhs3); ++ ++ return dup_assign(visited, def_stmt, lhs, new_rhs1, new_rhs2, new_rhs3); ++} ++#endif ++ ++static tree get_my_stmt_lhs(struct visited *visited, gimple stmt) ++{ ++ gimple_stmt_iterator gsi; ++ gimple next_stmt = NULL; ++ ++ gsi = gsi_for_stmt(stmt); ++ ++ do { ++ gsi_next(&gsi); ++ next_stmt = gsi_stmt(gsi); ++ ++ if (gimple_code(stmt) == GIMPLE_PHI && !pointer_set_contains(visited->my_stmts, next_stmt)) ++ return NULL_TREE; ++ ++ if (pointer_set_contains(visited->my_stmts, next_stmt) && !pointer_set_contains(visited->skip_expr_casts, next_stmt)) ++ break; ++ ++ gcc_assert(pointer_set_contains(visited->my_stmts, next_stmt)); ++ } while (!gsi_end_p(gsi)); ++ ++ gcc_assert(next_stmt); ++ return get_lhs(next_stmt); ++} ++ ++static tree expand_visited(struct visited *visited, gimple def_stmt) ++{ ++ gimple_stmt_iterator gsi; ++ enum gimple_code code = gimple_code(def_stmt); ++ ++ if (code == GIMPLE_ASM) ++ return NULL_TREE; ++ ++ gsi = gsi_for_stmt(def_stmt); ++ gsi_next(&gsi); ++ ++ if (gimple_code(def_stmt) == GIMPLE_PHI && gsi_end_p(gsi)) ++ return NULL_TREE; ++ return get_my_stmt_lhs(visited, def_stmt); ++} ++ ++tree expand(struct visited *visited, struct cgraph_node *caller_node, tree lhs) ++{ ++ gimple def_stmt; ++ ++ def_stmt = get_def_stmt(lhs); ++ ++ if (!def_stmt || gimple_code(def_stmt) == GIMPLE_NOP) ++ return NULL_TREE; ++ ++ if (pointer_set_contains(visited->my_stmts, def_stmt)) ++ return lhs; ++ ++ if (pointer_set_contains(visited->stmts, def_stmt)) ++ return expand_visited(visited, def_stmt); ++ ++ switch (gimple_code(def_stmt)) { ++ case GIMPLE_PHI: ++ return handle_phi(visited, caller_node, lhs); ++ case GIMPLE_CALL: ++ case GIMPLE_ASM: ++ return create_assign(visited, def_stmt, lhs, AFTER_STMT); ++ case GIMPLE_ASSIGN: ++ switch (gimple_num_ops(def_stmt)) { ++ case 2: ++ return handle_unary_ops(visited, caller_node, def_stmt); ++ case 3: ++ return handle_binary_ops(visited, caller_node, lhs); ++#if BUILDING_GCC_VERSION >= 4006 ++ case 4: ++ return handle_ternary_ops(visited, caller_node, lhs); ++#endif ++ } ++ default: ++ debug_gimple_stmt(def_stmt); ++ error("%s: unknown gimple code", __func__); ++ gcc_unreachable(); ++ } ++} ++ +diff --git a/tools/gcc/size_overflow_plugin/insert_size_overflow_check_ipa.c b/tools/gcc/size_overflow_plugin/insert_size_overflow_check_ipa.c +new file mode 100644 +index 0000000..f8f5dd5 +--- /dev/null ++++ b/tools/gcc/size_overflow_plugin/insert_size_overflow_check_ipa.c +@@ -0,0 +1,1133 @@ ++/* ++ * Copyright 2011-2014 by Emese Revfy <re.emese@gmail.com> ++ * Licensed under the GPL v2, or (at your option) v3 ++ * ++ * Homepage: ++ * http://www.grsecurity.net/~ephox/overflow_plugin/ ++ * ++ * Documentation: ++ * http://forums.grsecurity.net/viewtopic.php?f=7&t=3043 ++ * ++ * This plugin recomputes expressions of function arguments marked by a size_overflow attribute ++ * with double integer precision (DImode/TImode for 32/64 bit integer types). ++ * The recomputed argument is checked against TYPE_MAX and an event is logged on overflow and the triggering process is killed. ++ * ++ * Usage: ++ * $ make ++ * $ make run ++ */ ++ ++#include "gcc-common.h" ++#include "size_overflow.h" ++ ++#define VEC_LEN 128 ++#define RET_CHECK NULL_TREE ++#define WRONG_NODE 32 ++#define NOT_INTENTIONAL_ASM NULL ++ ++unsigned int call_count; ++ ++static void set_conditions(struct pointer_set_t *visited, bool *interesting_conditions, const_tree lhs); ++static void walk_use_def(struct pointer_set_t *visited, struct interesting_node *cur_node, tree lhs); ++ ++struct visited_fns { ++ struct visited_fns *next; ++ const_tree fndecl; ++ unsigned int num; ++ const_gimple first_stmt; ++}; ++ ++struct next_cgraph_node { ++ struct next_cgraph_node *next; ++ struct cgraph_node *current_function; ++ tree callee_fndecl; ++ unsigned int num; ++}; ++ ++// Don't want to duplicate entries in next_cgraph_node ++static bool is_in_next_cgraph_node(struct next_cgraph_node *head, struct cgraph_node *node, const_tree fndecl, unsigned int num) ++{ ++ const_tree new_callee_fndecl; ++ struct next_cgraph_node *cur_node; ++ ++ if (fndecl == RET_CHECK) ++ new_callee_fndecl = NODE_DECL(node); ++ else ++ new_callee_fndecl = fndecl; ++ ++ for (cur_node = head; cur_node; cur_node = cur_node->next) { ++ if (!operand_equal_p(NODE_DECL(cur_node->current_function), NODE_DECL(node), 0)) ++ continue; ++ if (!operand_equal_p(cur_node->callee_fndecl, new_callee_fndecl, 0)) ++ continue; ++ if (num == cur_node->num) ++ return true; ++ } ++ return false; ++} ++ ++/* Add a next_cgraph_node into the list for handle_function(). ++ * handle_function() iterates over all the next cgraph nodes and ++ * starts the overflow check insertion process. ++ */ ++static struct next_cgraph_node *create_new_next_cgraph_node(struct next_cgraph_node *head, struct cgraph_node *node, tree fndecl, unsigned int num) ++{ ++ struct next_cgraph_node *new_node; ++ ++ if (is_in_next_cgraph_node(head, node, fndecl, num)) ++ return head; ++ ++ new_node = (struct next_cgraph_node *)xmalloc(sizeof(*new_node)); ++ new_node->current_function = node; ++ new_node->next = NULL; ++ new_node->num = num; ++ if (fndecl == RET_CHECK) ++ new_node->callee_fndecl = NODE_DECL(node); ++ else ++ new_node->callee_fndecl = fndecl; ++ ++ if (!head) ++ return new_node; ++ ++ new_node->next = head; ++ return new_node; ++} ++ ++static struct next_cgraph_node *create_new_next_cgraph_nodes(struct next_cgraph_node *head, struct cgraph_node *node, unsigned int num) ++{ ++ struct cgraph_edge *e; ++ ++ if (num == 0) ++ return create_new_next_cgraph_node(head, node, RET_CHECK, num); ++ ++ for (e = node->callers; e; e = e->next_caller) { ++ tree fndecl = gimple_call_fndecl(e->call_stmt); ++ ++ gcc_assert(fndecl != NULL_TREE); ++ head = create_new_next_cgraph_node(head, e->caller, fndecl, num); ++ } ++ ++ return head; ++} ++ ++struct missing_functions { ++ struct missing_functions *next; ++ const_tree node; ++ tree fndecl; ++}; ++ ++static struct missing_functions *create_new_missing_function(struct missing_functions *missing_fn_head, tree node) ++{ ++ struct missing_functions *new_function; ++ ++ new_function = (struct missing_functions *)xmalloc(sizeof(*new_function)); ++ new_function->node = node; ++ new_function->next = NULL; ++ ++ if (TREE_CODE(node) == FUNCTION_DECL) ++ new_function->fndecl = node; ++ else ++ new_function->fndecl = current_function_decl; ++ gcc_assert(new_function->fndecl); ++ ++ if (!missing_fn_head) ++ return new_function; ++ ++ new_function->next = missing_fn_head; ++ return new_function; ++} ++ ++/* If the function is missing from the hash table and it is a static function ++ * then create a next_cgraph_node from it for handle_function() ++ */ ++static struct next_cgraph_node *check_missing_overflow_attribute_and_create_next_node(struct next_cgraph_node *cnodes, struct missing_functions *missing_fn_head) ++{ ++ unsigned int num; ++ const_tree orig_fndecl; ++ struct cgraph_node *next_node = NULL; ++ ++ orig_fndecl = DECL_ORIGIN(missing_fn_head->fndecl); ++ ++ num = get_function_num(missing_fn_head->node, orig_fndecl); ++ if (num == CANNOT_FIND_ARG) ++ return cnodes; ++ ++ if (!is_missing_function(orig_fndecl, num)) ++ return cnodes; ++ ++ next_node = cgraph_get_node(missing_fn_head->fndecl); ++ if (next_node && next_node->local.local) ++ cnodes = create_new_next_cgraph_nodes(cnodes, next_node, num); ++ return cnodes; ++} ++ ++/* Search for missing size_overflow attributes on the last nodes in ipa and collect them ++ * into the next_cgraph_node list. They will be the next interesting returns or callees. ++ */ ++static struct next_cgraph_node *search_overflow_attribute(struct next_cgraph_node *cnodes, struct interesting_node *cur_node) ++{ ++ unsigned int i; ++ tree node; ++ struct missing_functions *cur, *missing_fn_head = NULL; ++ ++#if BUILDING_GCC_VERSION <= 4007 ++ FOR_EACH_VEC_ELT(tree, cur_node->last_nodes, i, node) { ++#else ++ FOR_EACH_VEC_ELT(*cur_node->last_nodes, i, node) { ++#endif ++ switch (TREE_CODE(node)) { ++ case PARM_DECL: ++ if (TREE_CODE(TREE_TYPE(node)) != INTEGER_TYPE) ++ break; ++ case FUNCTION_DECL: ++ missing_fn_head = create_new_missing_function(missing_fn_head, node); ++ break; ++ default: ++ break; ++ } ++ } ++ ++ while (missing_fn_head) { ++ cnodes = check_missing_overflow_attribute_and_create_next_node(cnodes, missing_fn_head); ++ ++ cur = missing_fn_head->next; ++ free(missing_fn_head); ++ missing_fn_head = cur; ++ } ++ ++ return cnodes; ++} ++ ++static void walk_phi_set_conditions(struct pointer_set_t *visited, bool *interesting_conditions, const_tree result) ++{ ++ gimple phi = get_def_stmt(result); ++ unsigned int i, n = gimple_phi_num_args(phi); ++ ++ pointer_set_insert(visited, phi); ++ for (i = 0; i < n; i++) { ++ const_tree arg = gimple_phi_arg_def(phi, i); ++ ++ set_conditions(visited, interesting_conditions, arg); ++ } ++} ++ ++enum conditions { ++ FROM_CONST, NOT_UNARY, CAST ++}; ++ ++// Search for constants, cast assignments and binary/ternary assignments ++static void set_conditions(struct pointer_set_t *visited, bool *interesting_conditions, const_tree lhs) ++{ ++ gimple def_stmt = get_def_stmt(lhs); ++ ++ if (is_gimple_constant(lhs)) { ++ interesting_conditions[FROM_CONST] = true; ++ return; ++ } ++ ++ if (!def_stmt) ++ return; ++ ++ if (pointer_set_contains(visited, def_stmt)) ++ return; ++ ++ switch (gimple_code(def_stmt)) { ++ case GIMPLE_NOP: ++ case GIMPLE_CALL: ++ case GIMPLE_ASM: ++ return; ++ case GIMPLE_PHI: ++ return walk_phi_set_conditions(visited, interesting_conditions, lhs); ++ case GIMPLE_ASSIGN: ++ if (gimple_num_ops(def_stmt) == 2) { ++ const_tree rhs = gimple_assign_rhs1(def_stmt); ++ ++ if (gimple_assign_cast_p(def_stmt)) ++ interesting_conditions[CAST] = true; ++ ++ return set_conditions(visited, interesting_conditions, rhs); ++ } else { ++ interesting_conditions[NOT_UNARY] = true; ++ return; ++ } ++ default: ++ debug_gimple_stmt(def_stmt); ++ gcc_unreachable(); ++ } ++} ++ ++// determine whether duplication will be necessary or not. ++static void search_interesting_conditions(struct interesting_node *cur_node, bool *interesting_conditions) ++{ ++ struct pointer_set_t *visited; ++ ++ if (gimple_assign_cast_p(cur_node->first_stmt)) ++ interesting_conditions[CAST] = true; ++ else if (is_gimple_assign(cur_node->first_stmt) && gimple_num_ops(cur_node->first_stmt) > 2) ++ interesting_conditions[NOT_UNARY] = true; ++ ++ visited = pointer_set_create(); ++ set_conditions(visited, interesting_conditions, cur_node->node); ++ pointer_set_destroy(visited); ++} ++ ++// Remove the size_overflow asm stmt and create an assignment from the input and output of the asm ++static void replace_size_overflow_asm_with_assign(gimple asm_stmt, tree lhs, tree rhs) ++{ ++ gimple assign; ++ gimple_stmt_iterator gsi; ++ ++ // already removed ++ if (gimple_bb(asm_stmt) == NULL) ++ return; ++ gsi = gsi_for_stmt(asm_stmt); ++ ++ assign = gimple_build_assign(lhs, rhs); ++ gsi_insert_before(&gsi, assign, GSI_SAME_STMT); ++ SSA_NAME_DEF_STMT(lhs) = assign; ++ ++ gsi_remove(&gsi, true); ++} ++ ++/* Get the fndecl of an interesting stmt, the fndecl is the caller function if the interesting ++ * stmt is a return otherwise it is the callee function. ++ */ ++const_tree get_interesting_orig_fndecl(const_gimple stmt, unsigned int argnum) ++{ ++ const_tree fndecl; ++ ++ if (argnum == 0) ++ fndecl = current_function_decl; ++ else ++ fndecl = gimple_call_fndecl(stmt); ++ ++ if (fndecl == NULL_TREE) ++ return NULL_TREE; ++ ++ return DECL_ORIGIN(fndecl); ++} ++ ++// e.g., 3.8.2, 64, arch/x86/ia32/ia32_signal.c copy_siginfo_from_user32(): compat_ptr() u32 max ++static bool skip_asm(const_tree arg) ++{ ++ gimple def_stmt = get_def_stmt(arg); ++ ++ if (!def_stmt || !gimple_assign_cast_p(def_stmt)) ++ return false; ++ ++ def_stmt = get_def_stmt(gimple_assign_rhs1(def_stmt)); ++ return def_stmt && gimple_code(def_stmt) == GIMPLE_ASM; ++} ++ ++static void walk_use_def_phi(struct pointer_set_t *visited, struct interesting_node *cur_node, tree result) ++{ ++ gimple phi = get_def_stmt(result); ++ unsigned int i, n = gimple_phi_num_args(phi); ++ ++ pointer_set_insert(visited, phi); ++ for (i = 0; i < n; i++) { ++ tree arg = gimple_phi_arg_def(phi, i); ++ ++ walk_use_def(visited, cur_node, arg); ++ } ++} ++ ++static void walk_use_def_binary(struct pointer_set_t *visited, struct interesting_node *cur_node, tree lhs) ++{ ++ gimple def_stmt = get_def_stmt(lhs); ++ tree rhs1, rhs2; ++ ++ rhs1 = gimple_assign_rhs1(def_stmt); ++ rhs2 = gimple_assign_rhs2(def_stmt); ++ ++ walk_use_def(visited, cur_node, rhs1); ++ walk_use_def(visited, cur_node, rhs2); ++} ++ ++static void insert_last_node(struct interesting_node *cur_node, tree node) ++{ ++ unsigned int i; ++ tree element; ++ enum tree_code code; ++ ++ gcc_assert(node != NULL_TREE); ++ ++ if (is_gimple_constant(node)) ++ return; ++ ++ code = TREE_CODE(node); ++ if (code == VAR_DECL) { ++ node = DECL_ORIGIN(node); ++ code = TREE_CODE(node); ++ } ++ ++ if (code != PARM_DECL && code != FUNCTION_DECL && code != COMPONENT_REF) ++ return; ++ ++#if BUILDING_GCC_VERSION <= 4007 ++ FOR_EACH_VEC_ELT(tree, cur_node->last_nodes, i, element) { ++#else ++ FOR_EACH_VEC_ELT(*cur_node->last_nodes, i, element) { ++#endif ++ if (operand_equal_p(node, element, 0)) ++ return; ++ } ++ ++#if BUILDING_GCC_VERSION <= 4007 ++ gcc_assert(VEC_length(tree, cur_node->last_nodes) < VEC_LEN); ++ VEC_safe_push(tree, gc, cur_node->last_nodes, node); ++#else ++ gcc_assert(cur_node->last_nodes->length() < VEC_LEN); ++ vec_safe_push(cur_node->last_nodes, node); ++#endif ++} ++ ++// a size_overflow asm stmt in the control flow doesn't stop the recursion ++static void handle_asm_stmt(struct pointer_set_t *visited, struct interesting_node *cur_node, tree lhs, const_gimple stmt) ++{ ++ if (!is_size_overflow_asm(stmt)) ++ walk_use_def(visited, cur_node, SSA_NAME_VAR(lhs)); ++} ++ ++/* collect the parm_decls and fndecls (for checking a missing size_overflow attribute (ret or arg) or intentional_overflow) ++ * and component refs (for checking the intentional_overflow attribute). ++ */ ++static void walk_use_def(struct pointer_set_t *visited, struct interesting_node *cur_node, tree lhs) ++{ ++ const_gimple def_stmt; ++ ++ if (TREE_CODE(lhs) != SSA_NAME) { ++ insert_last_node(cur_node, lhs); ++ return; ++ } ++ ++ def_stmt = get_def_stmt(lhs); ++ if (!def_stmt) ++ return; ++ ++ if (pointer_set_insert(visited, def_stmt)) ++ return; ++ ++ switch (gimple_code(def_stmt)) { ++ case GIMPLE_NOP: ++ return walk_use_def(visited, cur_node, SSA_NAME_VAR(lhs)); ++ case GIMPLE_ASM: ++ return handle_asm_stmt(visited, cur_node, lhs, def_stmt); ++ case GIMPLE_CALL: { ++ tree fndecl = gimple_call_fndecl(def_stmt); ++ ++ if (fndecl == NULL_TREE) ++ return; ++ insert_last_node(cur_node, fndecl); ++ return; ++ } ++ case GIMPLE_PHI: ++ return walk_use_def_phi(visited, cur_node, lhs); ++ case GIMPLE_ASSIGN: ++ switch (gimple_num_ops(def_stmt)) { ++ case 2: ++ return walk_use_def(visited, cur_node, gimple_assign_rhs1(def_stmt)); ++ case 3: ++ return walk_use_def_binary(visited, cur_node, lhs); ++ } ++ default: ++ debug_gimple_stmt((gimple)def_stmt); ++ error("%s: unknown gimple code", __func__); ++ gcc_unreachable(); ++ } ++} ++ ++// Collect all the last nodes for checking the intentional_overflow and size_overflow attributes ++static void set_last_nodes(struct interesting_node *cur_node) ++{ ++ struct pointer_set_t *visited; ++ ++ visited = pointer_set_create(); ++ walk_use_def(visited, cur_node, cur_node->node); ++ pointer_set_destroy(visited); ++} ++ ++enum precond { ++ NO_ATTRIBUTE_SEARCH, NO_CHECK_INSERT, NONE ++}; ++ ++/* If there is a mark_turn_off intentional attribute on the caller or the callee then there is no duplication and missing size_overflow attribute check anywhere. ++ * There is only missing size_overflow attribute checking if the intentional_overflow attribute is the mark_no type. ++ * Stmt duplication is unnecessary if there are no binary/ternary assignements or if the unary assignment isn't a cast. ++ * It skips the possible error codes too. If the def_stmts trace back to a constant and there are no binary/ternary assigments then we assume that it is some kind of error code. ++ */ ++static enum precond check_preconditions(struct interesting_node *cur_node) ++{ ++ bool interesting_conditions[3] = {false, false, false}; ++ ++ set_last_nodes(cur_node); ++ ++ check_intentional_attribute_ipa(cur_node); ++ if (cur_node->intentional_attr_decl == MARK_TURN_OFF || cur_node->intentional_attr_cur_fndecl == MARK_TURN_OFF) ++ return NO_ATTRIBUTE_SEARCH; ++ ++ search_interesting_conditions(cur_node, interesting_conditions); ++ ++ // error code ++ if (interesting_conditions[CAST] && interesting_conditions[FROM_CONST] && !interesting_conditions[NOT_UNARY]) ++ return NO_ATTRIBUTE_SEARCH; ++ ++ // unnecessary overflow check ++ if (!interesting_conditions[CAST] && !interesting_conditions[NOT_UNARY]) ++ return NO_CHECK_INSERT; ++ ++ if (cur_node->intentional_attr_cur_fndecl != MARK_NO) ++ return NO_CHECK_INSERT; ++ ++ return NONE; ++} ++ ++static tree cast_to_orig_type(struct visited *visited, gimple stmt, const_tree orig_node, tree new_node) ++{ ++ const_gimple assign; ++ tree orig_type = TREE_TYPE(orig_node); ++ gimple_stmt_iterator gsi = gsi_for_stmt(stmt); ++ ++ assign = build_cast_stmt(visited, orig_type, new_node, CREATE_NEW_VAR, &gsi, BEFORE_STMT, false); ++ return gimple_assign_lhs(assign); ++} ++ ++static void change_orig_node(struct visited *visited, struct interesting_node *cur_node, tree new_node) ++{ ++ void (*set_rhs)(gimple, tree); ++ gimple stmt = cur_node->first_stmt; ++ const_tree orig_node = cur_node->node; ++ ++ switch (gimple_code(stmt)) { ++ case GIMPLE_RETURN: ++ gimple_return_set_retval(stmt, cast_to_orig_type(visited, stmt, orig_node, new_node)); ++ break; ++ case GIMPLE_CALL: ++ gimple_call_set_arg(stmt, cur_node->num - 1, cast_to_orig_type(visited, stmt, orig_node, new_node)); ++ break; ++ case GIMPLE_ASSIGN: ++ switch (cur_node->num) { ++ case 1: ++ set_rhs = &gimple_assign_set_rhs1; ++ break; ++ case 2: ++ set_rhs = &gimple_assign_set_rhs2; ++ break; ++#if BUILDING_GCC_VERSION >= 4006 ++ case 3: ++ set_rhs = &gimple_assign_set_rhs3; ++ break; ++#endif ++ default: ++ gcc_unreachable(); ++ } ++ ++ set_rhs(stmt, cast_to_orig_type(visited, stmt, orig_node, new_node)); ++ break; ++ default: ++ debug_gimple_stmt(stmt); ++ gcc_unreachable(); ++ } ++ ++ update_stmt(stmt); ++} ++ ++static struct visited *create_visited(void) ++{ ++ struct visited *new_node; ++ ++ new_node = (struct visited *)xmalloc(sizeof(*new_node)); ++ new_node->stmts = pointer_set_create(); ++ new_node->my_stmts = pointer_set_create(); ++ new_node->skip_expr_casts = pointer_set_create(); ++ new_node->no_cast_check = pointer_set_create(); ++ return new_node; ++} ++ ++static void free_visited(struct visited *visited) ++{ ++ pointer_set_destroy(visited->stmts); ++ pointer_set_destroy(visited->my_stmts); ++ pointer_set_destroy(visited->skip_expr_casts); ++ pointer_set_destroy(visited->no_cast_check); ++ ++ free(visited); ++} ++ ++/* This function calls the main recursion function (expand) that duplicates the stmts. Before that it checks the intentional_overflow attribute and asm stmts, ++ * it decides whether the duplication is necessary or not and it searches for missing size_overflow attributes. After expand() it changes the orig node to the duplicated node ++ * in the original stmt (first stmt) and it inserts the overflow check for the arg of the callee or for the return value. ++ */ ++static struct next_cgraph_node *handle_interesting_stmt(struct visited *visited, struct next_cgraph_node *cnodes, struct interesting_node *cur_node, struct cgraph_node *caller_node) ++{ ++ enum precond ret; ++ tree new_node, orig_node = cur_node->node; ++ ++ ret = check_preconditions(cur_node); ++ if (ret == NO_ATTRIBUTE_SEARCH) ++ return cnodes; ++ ++ cnodes = search_overflow_attribute(cnodes, cur_node); ++ ++ if (ret == NO_CHECK_INSERT) ++ return cnodes; ++ ++ new_node = expand(visited, caller_node, orig_node); ++ if (new_node == NULL_TREE) ++ return cnodes; ++ ++ change_orig_node(visited, cur_node, new_node); ++ check_size_overflow(caller_node, cur_node->first_stmt, TREE_TYPE(new_node), new_node, orig_node, BEFORE_STMT); ++ ++ return cnodes; ++} ++ ++// Check visited_fns interesting nodes. ++static bool is_in_interesting_node(struct interesting_node *head, const_gimple first_stmt, const_tree node, unsigned int num) ++{ ++ struct interesting_node *cur; ++ ++ for (cur = head; cur; cur = cur->next) { ++ if (!operand_equal_p(node, cur->node, 0)) ++ continue; ++ if (num != cur->num) ++ continue; ++ if (first_stmt == cur->first_stmt) ++ return true; ++ } ++ return false; ++} ++ ++/* Create an interesting node. The ipa pass starts to duplicate from these stmts. ++ first_stmt: it is the call or assignment or ret stmt, change_orig_node() will change the original node (retval, or function arg) in this ++ last_nodes: they are the last stmts in the recursion (they haven't a def_stmt). They are useful in the missing size_overflow attribute check and ++ the intentional_overflow attribute check. They are collected by set_last_nodes(). ++ num: arg count of a call stmt or 0 when it is a ret ++ node: the recursion starts from here, it is a call arg or a return value ++ fndecl: the fndecl of the interesting node when the node is an arg. it is the fndecl of the callee function otherwise it is the fndecl of the caller (current_function_fndecl) function. ++ intentional_attr_decl: intentional_overflow attribute of the callee function ++ intentional_attr_cur_fndecl: intentional_overflow attribute of the caller function ++ intentional_mark_from_gimple: the intentional overflow type of size_overflow asm stmt from gimple if it exists ++ */ ++static struct interesting_node *create_new_interesting_node(struct interesting_node *head, gimple first_stmt, tree node, unsigned int num, gimple asm_stmt) ++{ ++ struct interesting_node *new_node; ++ tree fndecl; ++ enum gimple_code code; ++ ++ gcc_assert(node != NULL_TREE); ++ code = gimple_code(first_stmt); ++ gcc_assert(code == GIMPLE_CALL || code == GIMPLE_ASM || code == GIMPLE_ASSIGN || code == GIMPLE_RETURN); ++ ++ if (num == CANNOT_FIND_ARG) ++ return head; ++ ++ if (skip_types(node)) ++ return head; ++ ++ if (skip_asm(node)) ++ return head; ++ ++ if (is_gimple_call(first_stmt)) ++ fndecl = gimple_call_fndecl(first_stmt); ++ else ++ fndecl = current_function_decl; ++ ++ if (fndecl == NULL_TREE) ++ return head; ++ ++ if (is_in_interesting_node(head, first_stmt, node, num)) ++ return head; ++ ++ new_node = (struct interesting_node *)xmalloc(sizeof(*new_node)); ++ ++ new_node->next = NULL; ++ new_node->first_stmt = first_stmt; ++#if BUILDING_GCC_VERSION <= 4007 ++ new_node->last_nodes = VEC_alloc(tree, gc, VEC_LEN); ++#else ++ vec_alloc(new_node->last_nodes, VEC_LEN); ++#endif ++ new_node->num = num; ++ new_node->node = node; ++ new_node->fndecl = fndecl; ++ new_node->intentional_attr_decl = MARK_NO; ++ new_node->intentional_attr_cur_fndecl = MARK_NO; ++ new_node->intentional_mark_from_gimple = asm_stmt; ++ ++ if (!head) ++ return new_node; ++ ++ new_node->next = head; ++ return new_node; ++} ++ ++/* Check the ret stmts in the functions on the next cgraph node list (these functions will be in the hash table and they are reachable from ipa). ++ * If the ret stmt is in the next cgraph node list then it's an interesting ret. ++ */ ++static struct interesting_node *handle_stmt_by_cgraph_nodes_ret(struct interesting_node *head, gimple stmt, struct next_cgraph_node *next_node) ++{ ++ struct next_cgraph_node *cur_node; ++ tree ret = gimple_return_retval(stmt); ++ ++ if (ret == NULL_TREE) ++ return head; ++ ++ for (cur_node = next_node; cur_node; cur_node = cur_node->next) { ++ if (!operand_equal_p(cur_node->callee_fndecl, DECL_ORIGIN(current_function_decl), 0)) ++ continue; ++ if (cur_node->num == 0) ++ head = create_new_interesting_node(head, stmt, ret, 0, NOT_INTENTIONAL_ASM); ++ } ++ ++ return head; ++} ++ ++/* Check the call stmts in the functions on the next cgraph node list (these functions will be in the hash table and they are reachable from ipa). ++ * If the call stmt is in the next cgraph node list then it's an interesting call. ++ */ ++static struct interesting_node *handle_stmt_by_cgraph_nodes_call(struct interesting_node *head, gimple stmt, struct next_cgraph_node *next_node) ++{ ++ unsigned int argnum; ++ tree arg; ++ const_tree fndecl; ++ struct next_cgraph_node *cur_node; ++ ++ fndecl = gimple_call_fndecl(stmt); ++ if (fndecl == NULL_TREE) ++ return head; ++ ++ for (cur_node = next_node; cur_node; cur_node = cur_node->next) { ++ if (!operand_equal_p(cur_node->callee_fndecl, fndecl, 0)) ++ continue; ++ argnum = get_correct_arg_count(cur_node->num, fndecl); ++ gcc_assert(argnum != CANNOT_FIND_ARG); ++ if (argnum == 0) ++ continue; ++ ++ arg = gimple_call_arg(stmt, argnum - 1); ++ head = create_new_interesting_node(head, stmt, arg, argnum, NOT_INTENTIONAL_ASM); ++ } ++ ++ return head; ++} ++ ++static unsigned int check_ops(const_tree orig_node, const_tree node, unsigned int ret_count) ++{ ++ if (!operand_equal_p(orig_node, node, 0)) ++ return WRONG_NODE; ++ if (skip_types(node)) ++ return WRONG_NODE; ++ return ret_count; ++} ++ ++// Get the index of the rhs node in an assignment ++static unsigned int get_assign_ops_count(const_gimple stmt, tree node) ++{ ++ const_tree rhs1, rhs2; ++ unsigned int ret; ++ ++ gcc_assert(stmt); ++ gcc_assert(is_gimple_assign(stmt)); ++ ++ rhs1 = gimple_assign_rhs1(stmt); ++ gcc_assert(rhs1 != NULL_TREE); ++ ++ switch (gimple_num_ops(stmt)) { ++ case 2: ++ return check_ops(node, rhs1, 1); ++ case 3: ++ ret = check_ops(node, rhs1, 1); ++ if (ret != WRONG_NODE) ++ return ret; ++ ++ rhs2 = gimple_assign_rhs2(stmt); ++ gcc_assert(rhs2 != NULL_TREE); ++ return check_ops(node, rhs2, 2); ++ default: ++ gcc_unreachable(); ++ } ++} ++ ++// Find the correct arg number of a call stmt. It is needed when the interesting function is a cloned function. ++static unsigned int find_arg_number_gimple(const_tree arg, const_gimple stmt) ++{ ++ unsigned int i; ++ ++ if (gimple_call_fndecl(stmt) == NULL_TREE) ++ return CANNOT_FIND_ARG; ++ ++ for (i = 0; i < gimple_call_num_args(stmt); i++) { ++ tree node; ++ ++ node = gimple_call_arg(stmt, i); ++ if (!operand_equal_p(arg, node, 0)) ++ continue; ++ if (!skip_types(node)) ++ return i + 1; ++ } ++ ++ return CANNOT_FIND_ARG; ++} ++ ++/* starting from the size_overflow asm stmt collect interesting stmts. They can be ++ * any of return, call or assignment stmts (because of inlining). ++ */ ++static struct interesting_node *get_interesting_ret_or_call(struct pointer_set_t *visited, struct interesting_node *head, tree node, gimple intentional_asm) ++{ ++ use_operand_p use_p; ++ imm_use_iterator imm_iter; ++ unsigned int argnum; ++ ++ gcc_assert(TREE_CODE(node) == SSA_NAME); ++ ++ if (pointer_set_insert(visited, node)) ++ return head; ++ ++ FOR_EACH_IMM_USE_FAST(use_p, imm_iter, node) { ++ gimple stmt = USE_STMT(use_p); ++ ++ if (stmt == NULL) ++ return head; ++ if (is_gimple_debug(stmt)) ++ continue; ++ ++ switch (gimple_code(stmt)) { ++ case GIMPLE_CALL: ++ argnum = find_arg_number_gimple(node, stmt); ++ head = create_new_interesting_node(head, stmt, node, argnum, intentional_asm); ++ break; ++ case GIMPLE_RETURN: ++ head = create_new_interesting_node(head, stmt, node, 0, intentional_asm); ++ break; ++ case GIMPLE_ASSIGN: ++ argnum = get_assign_ops_count(stmt, node); ++ head = create_new_interesting_node(head, stmt, node, argnum, intentional_asm); ++ break; ++ case GIMPLE_PHI: { ++ tree result = gimple_phi_result(stmt); ++ head = get_interesting_ret_or_call(visited, head, result, intentional_asm); ++ break; ++ } ++ case GIMPLE_ASM: ++ if (gimple_asm_noutputs(stmt) != 0) ++ break; ++ if (!is_size_overflow_asm(stmt)) ++ break; ++ head = create_new_interesting_node(head, stmt, node, 1, intentional_asm); ++ break; ++ case GIMPLE_COND: ++ case GIMPLE_SWITCH: ++ break; ++ default: ++ debug_gimple_stmt(stmt); ++ gcc_unreachable(); ++ break; ++ } ++ } ++ return head; ++} ++ ++static void remove_size_overflow_asm(gimple stmt) ++{ ++ gimple_stmt_iterator gsi; ++ tree input, output; ++ ++ if (!is_size_overflow_asm(stmt)) ++ return; ++ ++ if (gimple_asm_noutputs(stmt) == 0) { ++ gsi = gsi_for_stmt(stmt); ++ ipa_remove_stmt_references(cgraph_get_create_node(current_function_decl), stmt); ++ gsi_remove(&gsi, true); ++ return; ++ } ++ ++ input = gimple_asm_input_op(stmt, 0); ++ output = gimple_asm_output_op(stmt, 0); ++ replace_size_overflow_asm_with_assign(stmt, TREE_VALUE(output), TREE_VALUE(input)); ++} ++ ++/* handle the size_overflow asm stmts from the gimple pass and collect the interesting stmts. ++ * If the asm stmt is a parm_decl kind (noutputs == 0) then remove it. ++ * If it is a simple asm stmt then replace it with an assignment from the asm input to the asm output. ++ */ ++static struct interesting_node *handle_stmt_by_size_overflow_asm(gimple stmt, struct interesting_node *head) ++{ ++ const_tree output; ++ struct pointer_set_t *visited; ++ gimple intentional_asm = NOT_INTENTIONAL_ASM; ++ ++ if (!is_size_overflow_asm(stmt)) ++ return head; ++ ++ if (is_size_overflow_intentional_asm_yes(stmt) || is_size_overflow_intentional_asm_turn_off(stmt)) ++ intentional_asm = stmt; ++ ++ gcc_assert(gimple_asm_ninputs(stmt) == 1); ++ ++ if (gimple_asm_noutputs(stmt) == 0 && is_size_overflow_intentional_asm_turn_off(stmt)) ++ return head; ++ ++ if (gimple_asm_noutputs(stmt) == 0) { ++ const_tree input; ++ ++ if (!is_size_overflow_intentional_asm_turn_off(stmt)) ++ return head; ++ ++ input = gimple_asm_input_op(stmt, 0); ++ remove_size_overflow_asm(stmt); ++ if (is_gimple_constant(TREE_VALUE(input))) ++ return head; ++ visited = pointer_set_create(); ++ head = get_interesting_ret_or_call(visited, head, TREE_VALUE(input), intentional_asm); ++ pointer_set_destroy(visited); ++ return head; ++ } ++ ++ if (!is_size_overflow_intentional_asm_yes(stmt) && !is_size_overflow_intentional_asm_turn_off(stmt)) ++ remove_size_overflow_asm(stmt); ++ ++ visited = pointer_set_create(); ++ output = gimple_asm_output_op(stmt, 0); ++ head = get_interesting_ret_or_call(visited, head, TREE_VALUE(output), intentional_asm); ++ pointer_set_destroy(visited); ++ return head; ++} ++ ++/* Iterate over all the stmts of a function and look for the size_overflow asm stmts (they were created in the gimple pass) ++ * or a call stmt or a return stmt and store them in the interesting_node list ++ */ ++static struct interesting_node *collect_interesting_stmts(struct next_cgraph_node *next_node) ++{ ++ basic_block bb; ++ struct interesting_node *head = NULL; ++ ++ FOR_ALL_BB_FN(bb, cfun) { ++ gimple_stmt_iterator gsi; ++ ++ for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) { ++ enum gimple_code code; ++ gimple stmt = gsi_stmt(gsi); ++ ++ code = gimple_code(stmt); ++ ++ if (code == GIMPLE_ASM) ++ head = handle_stmt_by_size_overflow_asm(stmt, head); ++ ++ if (!next_node) ++ continue; ++ if (code == GIMPLE_CALL) ++ head = handle_stmt_by_cgraph_nodes_call(head, stmt, next_node); ++ if (code == GIMPLE_RETURN) ++ head = handle_stmt_by_cgraph_nodes_ret(head, stmt, next_node); ++ } ++ } ++ return head; ++} ++ ++static void free_interesting_node(struct interesting_node *head) ++{ ++ struct interesting_node *cur; ++ ++ while (head) { ++ cur = head->next; ++#if BUILDING_GCC_VERSION <= 4007 ++ VEC_free(tree, gc, head->last_nodes); ++#else ++ vec_free(head->last_nodes); ++#endif ++ free(head); ++ head = cur; ++ } ++} ++ ++static struct visited_fns *insert_visited_fns_function(struct visited_fns *head, struct interesting_node *cur_node) ++{ ++ struct visited_fns *new_visited_fns; ++ ++ new_visited_fns = (struct visited_fns *)xmalloc(sizeof(*new_visited_fns)); ++ new_visited_fns->fndecl = cur_node->fndecl; ++ new_visited_fns->num = cur_node->num; ++ new_visited_fns->first_stmt = cur_node->first_stmt; ++ new_visited_fns->next = NULL; ++ ++ if (!head) ++ return new_visited_fns; ++ ++ new_visited_fns->next = head; ++ return new_visited_fns; ++} ++ ++/* Check whether the function was already visited_fns. If the fndecl, the arg count of the fndecl and the first_stmt (call or return) are same then ++ * it is a visited_fns function. ++ */ ++static bool is_visited_fns_function(struct visited_fns *head, struct interesting_node *cur_node) ++{ ++ struct visited_fns *cur; ++ ++ if (!head) ++ return false; ++ ++ for (cur = head; cur; cur = cur->next) { ++ if (cur_node->first_stmt != cur->first_stmt) ++ continue; ++ if (!operand_equal_p(cur_node->fndecl, cur->fndecl, 0)) ++ continue; ++ if (cur_node->num == cur->num) ++ return true; ++ } ++ return false; ++} ++ ++static void free_next_cgraph_node(struct next_cgraph_node *head) ++{ ++ struct next_cgraph_node *cur; ++ ++ while (head) { ++ cur = head->next; ++ free(head); ++ head = cur; ++ } ++} ++ ++static void remove_all_size_overflow_asm(void) ++{ ++ basic_block bb; ++ ++ FOR_ALL_BB_FN(bb, cfun) { ++ gimple_stmt_iterator si; ++ ++ for (si = gsi_start_bb(bb); !gsi_end_p(si); gsi_next(&si)) ++ remove_size_overflow_asm(gsi_stmt(si)); ++ } ++} ++ ++/* Main recursive walk of the ipa pass: iterate over the collected interesting stmts in a function ++ * (they are interesting if they have an associated size_overflow asm stmt) and recursively walk ++ * the newly collected interesting functions (they are interesting if there is control flow between ++ * the interesting stmts and them). ++ */ ++static struct visited_fns *handle_function(struct cgraph_node *node, struct next_cgraph_node *next_node, struct visited_fns *visited_fns) ++{ ++ struct visited *visited; ++ struct interesting_node *head, *cur_node; ++ struct next_cgraph_node *cur_cnodes, *cnodes_head = NULL; ++ ++ set_current_function_decl(NODE_DECL(node)); ++ call_count = 0; ++ ++ head = collect_interesting_stmts(next_node); ++ ++ visited = create_visited(); ++ for (cur_node = head; cur_node; cur_node = cur_node->next) { ++ if (is_visited_fns_function(visited_fns, cur_node)) ++ continue; ++ cnodes_head = handle_interesting_stmt(visited, cnodes_head, cur_node, node); ++ visited_fns = insert_visited_fns_function(visited_fns, cur_node); ++ } ++ ++ free_visited(visited); ++ free_interesting_node(head); ++ remove_all_size_overflow_asm(); ++ unset_current_function_decl(); ++ ++ for (cur_cnodes = cnodes_head; cur_cnodes; cur_cnodes = cur_cnodes->next) ++ visited_fns = handle_function(cur_cnodes->current_function, cur_cnodes, visited_fns); ++ ++ free_next_cgraph_node(cnodes_head); ++ return visited_fns; ++} ++ ++static void free_visited_fns(struct visited_fns *head) ++{ ++ struct visited_fns *cur; ++ ++ while (head) { ++ cur = head->next; ++ free(head); ++ head = cur; ++ } ++} ++ ++// Main entry point of the ipa pass: erases the plf flag of all stmts and iterates over all the functions ++unsigned int search_function(void) ++{ ++ struct cgraph_node *node; ++ struct visited_fns *visited_fns = NULL; ++ ++ FOR_EACH_FUNCTION_WITH_GIMPLE_BODY(node) { ++ gcc_assert(cgraph_function_flags_ready); ++#if BUILDING_GCC_VERSION <= 4007 ++ gcc_assert(node->reachable); ++#endif ++ ++ visited_fns = handle_function(node, NULL, visited_fns); ++ } ++ ++ free_visited_fns(visited_fns); ++ return 0; ++} ++ ++#if BUILDING_GCC_VERSION >= 4009 ++static const struct pass_data insert_size_overflow_check_data = { ++#else ++static struct ipa_opt_pass_d insert_size_overflow_check = { ++ .pass = { ++#endif ++ .type = SIMPLE_IPA_PASS, ++ .name = "size_overflow", ++#if BUILDING_GCC_VERSION >= 4008 ++ .optinfo_flags = OPTGROUP_NONE, ++#endif ++#if BUILDING_GCC_VERSION >= 4009 ++ .has_gate = false, ++ .has_execute = true, ++#else ++ .gate = NULL, ++ .execute = search_function, ++ .sub = NULL, ++ .next = NULL, ++ .static_pass_number = 0, ++#endif ++ .tv_id = TV_NONE, ++ .properties_required = 0, ++ .properties_provided = 0, ++ .properties_destroyed = 0, ++ .todo_flags_start = 0, ++ .todo_flags_finish = TODO_verify_ssa | TODO_verify_stmts | TODO_remove_unused_locals | TODO_ggc_collect | TODO_verify_flow | TODO_dump_cgraph | TODO_dump_func | TODO_update_ssa_no_phi, ++#if BUILDING_GCC_VERSION < 4009 ++ }, ++ .generate_summary = NULL, ++ .write_summary = NULL, ++ .read_summary = NULL, ++#if BUILDING_GCC_VERSION >= 4006 ++ .write_optimization_summary = NULL, ++ .read_optimization_summary = NULL, ++#endif ++ .stmt_fixup = NULL, ++ .function_transform_todo_flags_start = 0, ++ .function_transform = NULL, ++ .variable_transform = NULL, ++#endif ++}; ++ ++#if BUILDING_GCC_VERSION >= 4009 ++namespace { ++class insert_size_overflow_check : public ipa_opt_pass_d { ++public: ++ insert_size_overflow_check() : ipa_opt_pass_d(insert_size_overflow_check_data, g, NULL, NULL, NULL, NULL, NULL, NULL, 0, NULL, NULL) {} ++ unsigned int execute() { return search_function(); } ++}; ++} ++#endif ++ ++struct opt_pass *make_insert_size_overflow_check(void) ++{ ++#if BUILDING_GCC_VERSION >= 4009 ++ return new insert_size_overflow_check(); ++#else ++ return &insert_size_overflow_check.pass; ++#endif ++} ++ +diff --git a/tools/gcc/size_overflow_plugin/intentional_overflow.c b/tools/gcc/size_overflow_plugin/intentional_overflow.c +new file mode 100644 +index 0000000..742cd52 +--- /dev/null ++++ b/tools/gcc/size_overflow_plugin/intentional_overflow.c +@@ -0,0 +1,568 @@ ++/* ++ * Copyright 2011-2014 by Emese Revfy <re.emese@gmail.com> ++ * Licensed under the GPL v2, or (at your option) v3 ++ * ++ * Homepage: ++ * http://www.grsecurity.net/~ephox/overflow_plugin/ ++ * ++ * Documentation: ++ * http://forums.grsecurity.net/viewtopic.php?f=7&t=3043 ++ * ++ * This plugin recomputes expressions of function arguments marked by a size_overflow attribute ++ * with double integer precision (DImode/TImode for 32/64 bit integer types). ++ * The recomputed argument is checked against TYPE_MAX and an event is logged on overflow and the triggering process is killed. ++ * ++ * Usage: ++ * $ make ++ * $ make run ++ */ ++ ++#include "gcc-common.h" ++#include "size_overflow.h" ++ ++/* Get the param of the intentional_overflow attribute. ++ * * 0: MARK_NOT_INTENTIONAL ++ * * 1..MAX_PARAM: MARK_YES ++ * * -1: MARK_TURN_OFF ++ */ ++static tree get_attribute_param(const_tree decl) ++{ ++ const_tree attr; ++ ++ if (decl == NULL_TREE) ++ return NULL_TREE; ++ ++ attr = lookup_attribute("intentional_overflow", DECL_ATTRIBUTES(decl)); ++ if (!attr || !TREE_VALUE(attr)) ++ return NULL_TREE; ++ ++ return TREE_VALUE(attr); ++} ++ ++// MARK_TURN_OFF ++bool is_turn_off_intentional_attr(const_tree decl) ++{ ++ const_tree param_head; ++ ++ param_head = get_attribute_param(decl); ++ if (param_head == NULL_TREE) ++ return false; ++ ++ if (TREE_INT_CST_HIGH(TREE_VALUE(param_head)) == -1) ++ return true; ++ return false; ++} ++ ++// MARK_NOT_INTENTIONAL ++bool is_end_intentional_intentional_attr(const_tree decl, unsigned int argnum) ++{ ++ const_tree param_head; ++ ++ if (argnum == 0) ++ return false; ++ ++ param_head = get_attribute_param(decl); ++ if (param_head == NULL_TREE) ++ return false; ++ ++ if (!TREE_INT_CST_LOW(TREE_VALUE(param_head))) ++ return true; ++ return false; ++} ++ ++// MARK_YES ++bool is_yes_intentional_attr(const_tree decl, unsigned int argnum) ++{ ++ tree param, param_head; ++ ++ if (argnum == 0) ++ return false; ++ ++ param_head = get_attribute_param(decl); ++ for (param = param_head; param; param = TREE_CHAIN(param)) ++ if (argnum == TREE_INT_CST_LOW(TREE_VALUE(param))) ++ return true; ++ return false; ++} ++ ++void print_missing_intentional(enum mark callee_attr, enum mark caller_attr, const_tree decl, unsigned int argnum) ++{ ++ location_t loc; ++ ++ if (caller_attr == MARK_NO || caller_attr == MARK_NOT_INTENTIONAL || caller_attr == MARK_TURN_OFF) ++ return; ++ ++ if (callee_attr == MARK_NOT_INTENTIONAL || callee_attr == MARK_YES) ++ return; ++ ++ loc = DECL_SOURCE_LOCATION(decl); ++ inform(loc, "The intentional_overflow attribute is missing from +%s+%u+", DECL_NAME_POINTER(decl), argnum); ++} ++ ++// Get the field decl of a component ref for intentional_overflow checking ++static const_tree search_field_decl(const_tree comp_ref) ++{ ++ const_tree field = NULL_TREE; ++ unsigned int i, len = TREE_OPERAND_LENGTH(comp_ref); ++ ++ for (i = 0; i < len; i++) { ++ field = TREE_OPERAND(comp_ref, i); ++ if (TREE_CODE(field) == FIELD_DECL) ++ break; ++ } ++ gcc_assert(TREE_CODE(field) == FIELD_DECL); ++ return field; ++} ++ ++/* Get the type of the intentional_overflow attribute of a node ++ * * MARK_TURN_OFF ++ * * MARK_YES ++ * * MARK_NO ++ * * MARK_NOT_INTENTIONAL ++ */ ++enum mark get_intentional_attr_type(const_tree node) ++{ ++ const_tree cur_decl; ++ ++ if (node == NULL_TREE) ++ return MARK_NO; ++ ++ switch (TREE_CODE(node)) { ++ case COMPONENT_REF: ++ cur_decl = search_field_decl(node); ++ if (is_turn_off_intentional_attr(cur_decl)) ++ return MARK_TURN_OFF; ++ if (is_end_intentional_intentional_attr(cur_decl, 1)) ++ return MARK_YES; ++ break; ++ case PARM_DECL: { ++ unsigned int argnum; ++ ++ cur_decl = DECL_ORIGIN(current_function_decl); ++ argnum = find_arg_number_tree(node, cur_decl); ++ if (argnum == CANNOT_FIND_ARG) ++ return MARK_NO; ++ if (is_yes_intentional_attr(cur_decl, argnum)) ++ return MARK_YES; ++ if (is_end_intentional_intentional_attr(cur_decl, argnum)) ++ return MARK_NOT_INTENTIONAL; ++ break; ++ } ++ case FUNCTION_DECL: ++ if (is_turn_off_intentional_attr(DECL_ORIGIN(node))) ++ return MARK_TURN_OFF; ++ break; ++ default: ++ break; ++ } ++ return MARK_NO; ++} ++ ++// Search for the intentional_overflow attribute on the last nodes ++static enum mark search_last_nodes_intentional(struct interesting_node *cur_node) ++{ ++ unsigned int i; ++ tree last_node; ++ enum mark mark = MARK_NO; ++ ++#if BUILDING_GCC_VERSION <= 4007 ++ FOR_EACH_VEC_ELT(tree, cur_node->last_nodes, i, last_node) { ++#else ++ FOR_EACH_VEC_ELT(*cur_node->last_nodes, i, last_node) { ++#endif ++ mark = get_intentional_attr_type(last_node); ++ if (mark != MARK_NO) ++ break; ++ } ++ return mark; ++} ++ ++/* Check the intentional kind of size_overflow asm stmt (created by the gimple pass) and ++ * set the appropriate intentional_overflow type. Delete the asm stmt in the end. ++ */ ++static bool is_intentional_attribute_from_gimple(struct interesting_node *cur_node) ++{ ++ if (!cur_node->intentional_mark_from_gimple) ++ return false; ++ ++ if (is_size_overflow_intentional_asm_yes(cur_node->intentional_mark_from_gimple)) ++ cur_node->intentional_attr_cur_fndecl = MARK_YES; ++ else ++ cur_node->intentional_attr_cur_fndecl = MARK_TURN_OFF; ++ ++ // skip param decls ++ if (gimple_asm_noutputs(cur_node->intentional_mark_from_gimple) == 0) ++ return true; ++ return true; ++} ++ ++/* Search intentional_overflow attribute on caller and on callee too. ++ * 0</MARK_YES: no dup, search size_overflow and intentional_overflow attributes ++ * 0/MARK_NOT_INTENTIONAL: no dup, search size_overflow attribute (int) ++ * -1/MARK_TURN_OFF: no dup, no search, current_function_decl -> no dup ++*/ ++void check_intentional_attribute_ipa(struct interesting_node *cur_node) ++{ ++ const_tree fndecl; ++ ++ if (is_intentional_attribute_from_gimple(cur_node)) ++ return; ++ ++ if (is_turn_off_intentional_attr(DECL_ORIGIN(current_function_decl))) { ++ cur_node->intentional_attr_cur_fndecl = MARK_TURN_OFF; ++ return; ++ } ++ ++ if (gimple_code(cur_node->first_stmt) == GIMPLE_ASM) { ++ cur_node->intentional_attr_cur_fndecl = MARK_NOT_INTENTIONAL; ++ return; ++ } ++ ++ if (gimple_code(cur_node->first_stmt) == GIMPLE_ASSIGN) ++ return; ++ ++ fndecl = get_interesting_orig_fndecl(cur_node->first_stmt, cur_node->num); ++ if (is_turn_off_intentional_attr(fndecl)) { ++ cur_node->intentional_attr_decl = MARK_TURN_OFF; ++ return; ++ } ++ ++ if (is_end_intentional_intentional_attr(fndecl, cur_node->num)) ++ cur_node->intentional_attr_decl = MARK_NOT_INTENTIONAL; ++ else if (is_yes_intentional_attr(fndecl, cur_node->num)) ++ cur_node->intentional_attr_decl = MARK_YES; ++ ++ cur_node->intentional_attr_cur_fndecl = search_last_nodes_intentional(cur_node); ++ print_missing_intentional(cur_node->intentional_attr_decl, cur_node->intentional_attr_cur_fndecl, cur_node->fndecl, cur_node->num); ++} ++ ++bool is_a_cast_and_const_overflow(const_tree no_const_rhs) ++{ ++ const_tree rhs1, lhs, rhs1_type, lhs_type; ++ enum machine_mode lhs_mode, rhs_mode; ++ gimple def_stmt = get_def_stmt(no_const_rhs); ++ ++ if (!def_stmt || !gimple_assign_cast_p(def_stmt)) ++ return false; ++ ++ rhs1 = gimple_assign_rhs1(def_stmt); ++ lhs = gimple_assign_lhs(def_stmt); ++ rhs1_type = TREE_TYPE(rhs1); ++ lhs_type = TREE_TYPE(lhs); ++ rhs_mode = TYPE_MODE(rhs1_type); ++ lhs_mode = TYPE_MODE(lhs_type); ++ if (TYPE_UNSIGNED(lhs_type) == TYPE_UNSIGNED(rhs1_type) || lhs_mode != rhs_mode) ++ return false; ++ ++ return true; ++} ++ ++static bool no_uses(tree node) ++{ ++ imm_use_iterator imm_iter; ++ use_operand_p use_p; ++ ++ FOR_EACH_IMM_USE_FAST(use_p, imm_iter, node) { ++ const_gimple use_stmt = USE_STMT(use_p); ++ ++ if (use_stmt == NULL) ++ return true; ++ if (is_gimple_debug(use_stmt)) ++ continue; ++ return false; ++ } ++ return true; ++} ++ ++// 3.8.5 mm/page-writeback.c __ilog2_u64(): ret, uint + uintmax; uint -> int; int max ++bool is_const_plus_unsigned_signed_truncation(const_tree lhs) ++{ ++ tree rhs1, lhs_type, rhs_type, rhs2, not_const_rhs; ++ gimple def_stmt = get_def_stmt(lhs); ++ ++ if (!def_stmt || !gimple_assign_cast_p(def_stmt)) ++ return false; ++ ++ rhs1 = gimple_assign_rhs1(def_stmt); ++ rhs_type = TREE_TYPE(rhs1); ++ lhs_type = TREE_TYPE(lhs); ++ if (TYPE_UNSIGNED(lhs_type) || !TYPE_UNSIGNED(rhs_type)) ++ return false; ++ if (TYPE_MODE(lhs_type) != TYPE_MODE(rhs_type)) ++ return false; ++ ++ def_stmt = get_def_stmt(rhs1); ++ if (!def_stmt || !is_gimple_assign(def_stmt) || gimple_num_ops(def_stmt) != 3) ++ return false; ++ ++ if (gimple_assign_rhs_code(def_stmt) != PLUS_EXPR) ++ return false; ++ ++ rhs1 = gimple_assign_rhs1(def_stmt); ++ rhs2 = gimple_assign_rhs2(def_stmt); ++ if (!is_gimple_constant(rhs1) && !is_gimple_constant(rhs2)) ++ return false; ++ ++ if (is_gimple_constant(rhs2)) ++ not_const_rhs = rhs1; ++ else ++ not_const_rhs = rhs2; ++ ++ return no_uses(not_const_rhs); ++} ++ ++static bool is_lt_signed_type_max(const_tree rhs) ++{ ++ const_tree new_type, type_max, type = TREE_TYPE(rhs); ++ ++ if (!TYPE_UNSIGNED(type)) ++ return true; ++ ++ switch (TYPE_MODE(type)) { ++ case QImode: ++ new_type = intQI_type_node; ++ break; ++ case HImode: ++ new_type = intHI_type_node; ++ break; ++ case SImode: ++ new_type = intSI_type_node; ++ break; ++ case DImode: ++ new_type = intDI_type_node; ++ break; ++ default: ++ debug_tree((tree)type); ++ gcc_unreachable(); ++ } ++ ++ type_max = TYPE_MAX_VALUE(new_type); ++ if (!tree_int_cst_lt(type_max, rhs)) ++ return true; ++ ++ return false; ++} ++ ++static bool is_gt_zero(const_tree rhs) ++{ ++ const_tree type = TREE_TYPE(rhs); ++ ++ if (TYPE_UNSIGNED(type)) ++ return true; ++ ++ if (!tree_int_cst_lt(rhs, integer_zero_node)) ++ return true; ++ ++ return false; ++} ++ ++bool is_a_constant_overflow(const_gimple stmt, const_tree rhs) ++{ ++ if (gimple_assign_rhs_code(stmt) == MIN_EXPR) ++ return false; ++ if (!is_gimple_constant(rhs)) ++ return false; ++ ++ // If the const is between 0 and the max value of the signed type of the same bitsize then there is no intentional overflow ++ if (is_lt_signed_type_max(rhs) && is_gt_zero(rhs)) ++ return false; ++ ++ return true; ++} ++ ++static tree change_assign_rhs(struct visited *visited, gimple stmt, const_tree orig_rhs, tree new_rhs) ++{ ++ gimple assign; ++ gimple_stmt_iterator gsi = gsi_for_stmt(stmt); ++ tree origtype = TREE_TYPE(orig_rhs); ++ ++ gcc_assert(is_gimple_assign(stmt)); ++ ++ assign = build_cast_stmt(visited, origtype, new_rhs, CREATE_NEW_VAR, &gsi, BEFORE_STMT, false); ++ pointer_set_insert(visited->my_stmts, assign); ++ return gimple_assign_lhs(assign); ++} ++ ++tree handle_intentional_overflow(struct visited *visited, struct cgraph_node *caller_node, bool check_overflow, gimple stmt, tree change_rhs, tree new_rhs2) ++{ ++ tree new_rhs, orig_rhs; ++ void (*gimple_assign_set_rhs)(gimple, tree); ++ tree rhs1 = gimple_assign_rhs1(stmt); ++ tree rhs2 = gimple_assign_rhs2(stmt); ++ tree lhs = gimple_assign_lhs(stmt); ++ ++ if (!check_overflow) ++ return create_assign(visited, stmt, lhs, AFTER_STMT); ++ ++ if (change_rhs == NULL_TREE) ++ return create_assign(visited, stmt, lhs, AFTER_STMT); ++ ++ if (new_rhs2 == NULL_TREE) { ++ orig_rhs = rhs1; ++ gimple_assign_set_rhs = &gimple_assign_set_rhs1; ++ } else { ++ orig_rhs = rhs2; ++ gimple_assign_set_rhs = &gimple_assign_set_rhs2; ++ } ++ ++ check_size_overflow(caller_node, stmt, TREE_TYPE(change_rhs), change_rhs, orig_rhs, BEFORE_STMT); ++ ++ new_rhs = change_assign_rhs(visited, stmt, orig_rhs, change_rhs); ++ gimple_assign_set_rhs(stmt, new_rhs); ++ update_stmt(stmt); ++ ++ return create_assign(visited, stmt, lhs, AFTER_STMT); ++} ++ ++static bool is_subtraction_special(struct visited *visited, const_gimple stmt) ++{ ++ gimple rhs1_def_stmt, rhs2_def_stmt; ++ const_tree rhs1_def_stmt_rhs1, rhs2_def_stmt_rhs1, rhs1_def_stmt_lhs, rhs2_def_stmt_lhs; ++ enum machine_mode rhs1_def_stmt_rhs1_mode, rhs2_def_stmt_rhs1_mode, rhs1_def_stmt_lhs_mode, rhs2_def_stmt_lhs_mode; ++ const_tree rhs1 = gimple_assign_rhs1(stmt); ++ const_tree rhs2 = gimple_assign_rhs2(stmt); ++ ++ if (is_gimple_constant(rhs1) || is_gimple_constant(rhs2)) ++ return false; ++ ++ gcc_assert(TREE_CODE(rhs1) == SSA_NAME && TREE_CODE(rhs2) == SSA_NAME); ++ ++ if (gimple_assign_rhs_code(stmt) != MINUS_EXPR) ++ return false; ++ ++ rhs1_def_stmt = get_def_stmt(rhs1); ++ rhs2_def_stmt = get_def_stmt(rhs2); ++ if (!gimple_assign_cast_p(rhs1_def_stmt) || !gimple_assign_cast_p(rhs2_def_stmt)) ++ return false; ++ ++ rhs1_def_stmt_rhs1 = gimple_assign_rhs1(rhs1_def_stmt); ++ rhs2_def_stmt_rhs1 = gimple_assign_rhs1(rhs2_def_stmt); ++ rhs1_def_stmt_lhs = gimple_assign_lhs(rhs1_def_stmt); ++ rhs2_def_stmt_lhs = gimple_assign_lhs(rhs2_def_stmt); ++ rhs1_def_stmt_rhs1_mode = TYPE_MODE(TREE_TYPE(rhs1_def_stmt_rhs1)); ++ rhs2_def_stmt_rhs1_mode = TYPE_MODE(TREE_TYPE(rhs2_def_stmt_rhs1)); ++ rhs1_def_stmt_lhs_mode = TYPE_MODE(TREE_TYPE(rhs1_def_stmt_lhs)); ++ rhs2_def_stmt_lhs_mode = TYPE_MODE(TREE_TYPE(rhs2_def_stmt_lhs)); ++ if (GET_MODE_BITSIZE(rhs1_def_stmt_rhs1_mode) <= GET_MODE_BITSIZE(rhs1_def_stmt_lhs_mode)) ++ return false; ++ if (GET_MODE_BITSIZE(rhs2_def_stmt_rhs1_mode) <= GET_MODE_BITSIZE(rhs2_def_stmt_lhs_mode)) ++ return false; ++ ++ pointer_set_insert(visited->no_cast_check, rhs1_def_stmt); ++ pointer_set_insert(visited->no_cast_check, rhs2_def_stmt); ++ return true; ++} ++ ++static gimple create_binary_assign(struct visited *visited, enum tree_code code, gimple stmt, tree rhs1, tree rhs2) ++{ ++ gimple assign; ++ gimple_stmt_iterator gsi = gsi_for_stmt(stmt); ++ tree type = TREE_TYPE(rhs1); ++ tree lhs = create_new_var(type); ++ ++ gcc_assert(types_compatible_p(type, TREE_TYPE(rhs2))); ++ assign = gimple_build_assign_with_ops(code, lhs, rhs1, rhs2); ++ gimple_assign_set_lhs(assign, make_ssa_name(lhs, assign)); ++ ++ gsi_insert_before(&gsi, assign, GSI_NEW_STMT); ++ update_stmt(assign); ++ pointer_set_insert(visited->my_stmts, assign); ++ return assign; ++} ++ ++static tree cast_to_TI_type(struct visited *visited, gimple stmt, tree node) ++{ ++ gimple_stmt_iterator gsi; ++ gimple cast_stmt; ++ tree type = TREE_TYPE(node); ++ ++ if (types_compatible_p(type, intTI_type_node)) ++ return node; ++ ++ gsi = gsi_for_stmt(stmt); ++ cast_stmt = build_cast_stmt(visited, intTI_type_node, node, CREATE_NEW_VAR, &gsi, BEFORE_STMT, false); ++ pointer_set_insert(visited->my_stmts, cast_stmt); ++ return gimple_assign_lhs(cast_stmt); ++} ++ ++static tree get_def_stmt_rhs(struct visited *visited, const_tree var) ++{ ++ tree rhs1, def_stmt_rhs1; ++ gimple rhs1_def_stmt, def_stmt_rhs1_def_stmt, def_stmt; ++ ++ def_stmt = get_def_stmt(var); ++ if (!gimple_assign_cast_p(def_stmt)) ++ return NULL_TREE; ++ gcc_assert(gimple_code(def_stmt) != GIMPLE_NOP && pointer_set_contains(visited->my_stmts, def_stmt) && gimple_assign_cast_p(def_stmt)); ++ ++ rhs1 = gimple_assign_rhs1(def_stmt); ++ rhs1_def_stmt = get_def_stmt(rhs1); ++ if (!gimple_assign_cast_p(rhs1_def_stmt)) ++ return rhs1; ++ ++ def_stmt_rhs1 = gimple_assign_rhs1(rhs1_def_stmt); ++ def_stmt_rhs1_def_stmt = get_def_stmt(def_stmt_rhs1); ++ ++ switch (gimple_code(def_stmt_rhs1_def_stmt)) { ++ case GIMPLE_CALL: ++ case GIMPLE_NOP: ++ case GIMPLE_ASM: ++ case GIMPLE_PHI: ++ return def_stmt_rhs1; ++ case GIMPLE_ASSIGN: ++ return rhs1; ++ default: ++ debug_gimple_stmt(def_stmt_rhs1_def_stmt); ++ gcc_unreachable(); ++ } ++} ++ ++tree handle_integer_truncation(struct visited *visited, struct cgraph_node *caller_node, const_tree lhs) ++{ ++ tree new_rhs1, new_rhs2; ++ tree new_rhs1_def_stmt_rhs1, new_rhs2_def_stmt_rhs1, new_lhs; ++ gimple assign, stmt = get_def_stmt(lhs); ++ tree rhs1 = gimple_assign_rhs1(stmt); ++ tree rhs2 = gimple_assign_rhs2(stmt); ++ ++ if (!is_subtraction_special(visited, stmt)) ++ return NULL_TREE; ++ ++ new_rhs1 = expand(visited, caller_node, rhs1); ++ new_rhs2 = expand(visited, caller_node, rhs2); ++ ++ new_rhs1_def_stmt_rhs1 = get_def_stmt_rhs(visited, new_rhs1); ++ new_rhs2_def_stmt_rhs1 = get_def_stmt_rhs(visited, new_rhs2); ++ ++ if (new_rhs1_def_stmt_rhs1 == NULL_TREE || new_rhs2_def_stmt_rhs1 == NULL_TREE) ++ return NULL_TREE; ++ ++ if (!types_compatible_p(TREE_TYPE(new_rhs1_def_stmt_rhs1), TREE_TYPE(new_rhs2_def_stmt_rhs1))) { ++ new_rhs1_def_stmt_rhs1 = cast_to_TI_type(visited, stmt, new_rhs1_def_stmt_rhs1); ++ new_rhs2_def_stmt_rhs1 = cast_to_TI_type(visited, stmt, new_rhs2_def_stmt_rhs1); ++ } ++ ++ assign = create_binary_assign(visited, MINUS_EXPR, stmt, new_rhs1_def_stmt_rhs1, new_rhs2_def_stmt_rhs1); ++ new_lhs = gimple_assign_lhs(assign); ++ check_size_overflow(caller_node, assign, TREE_TYPE(new_lhs), new_lhs, rhs1, AFTER_STMT); ++ ++ return dup_assign(visited, stmt, lhs, new_rhs1, new_rhs2, NULL_TREE); ++} ++ ++bool is_a_neg_overflow(const_gimple stmt, const_tree rhs) ++{ ++ const_gimple def_stmt; ++ ++ if (TREE_CODE(rhs) != SSA_NAME) ++ return false; ++ ++ if (gimple_assign_rhs_code(stmt) != PLUS_EXPR) ++ return false; ++ ++ def_stmt = get_def_stmt(rhs); ++ if (!is_gimple_assign(def_stmt) || gimple_assign_rhs_code(def_stmt) != BIT_NOT_EXPR) ++ return false; ++ ++ return true; ++} ++ +diff --git a/tools/gcc/size_overflow_plugin/misc.c b/tools/gcc/size_overflow_plugin/misc.c +new file mode 100644 +index 0000000..ca4def3 +--- /dev/null ++++ b/tools/gcc/size_overflow_plugin/misc.c +@@ -0,0 +1,180 @@ ++/* ++ * Copyright 2011-2014 by Emese Revfy <re.emese@gmail.com> ++ * Licensed under the GPL v2, or (at your option) v3 ++ * ++ * Homepage: ++ * http://www.grsecurity.net/~ephox/overflow_plugin/ ++ * ++ * Documentation: ++ * http://forums.grsecurity.net/viewtopic.php?f=7&t=3043 ++ * ++ * This plugin recomputes expressions of function arguments marked by a size_overflow attribute ++ * with double integer precision (DImode/TImode for 32/64 bit integer types). ++ * The recomputed argument is checked against TYPE_MAX and an event is logged on overflow and the triggering process is killed. ++ * ++ * Usage: ++ * $ make ++ * $ make run ++ */ ++ ++#include "gcc-common.h" ++#include "size_overflow.h" ++ ++void set_current_function_decl(tree fndecl) ++{ ++ gcc_assert(fndecl != NULL_TREE); ++ ++ push_cfun(DECL_STRUCT_FUNCTION(fndecl)); ++ calculate_dominance_info(CDI_DOMINATORS); ++ current_function_decl = fndecl; ++} ++ ++void unset_current_function_decl(void) ++{ ++ free_dominance_info(CDI_DOMINATORS); ++ pop_cfun(); ++ current_function_decl = NULL_TREE; ++} ++ ++static bool is_bool(const_tree node) ++{ ++ const_tree type; ++ ++ if (node == NULL_TREE) ++ return false; ++ ++ type = TREE_TYPE(node); ++ if (!INTEGRAL_TYPE_P(type)) ++ return false; ++ if (TREE_CODE(type) == BOOLEAN_TYPE) ++ return true; ++ if (TYPE_PRECISION(type) == 1) ++ return true; ++ return false; ++} ++ ++bool skip_types(const_tree var) ++{ ++ tree type; ++ enum tree_code code; ++ ++ if (is_gimple_constant(var)) ++ return true; ++ ++ switch (TREE_CODE(var)) { ++ case ADDR_EXPR: ++#if BUILDING_GCC_VERSION >= 4006 ++ case MEM_REF: ++#endif ++ case ARRAY_REF: ++ case BIT_FIELD_REF: ++ case INDIRECT_REF: ++ case TARGET_MEM_REF: ++ case COMPONENT_REF: ++ case VAR_DECL: ++ case VIEW_CONVERT_EXPR: ++ return true; ++ default: ++ break; ++ } ++ ++ code = TREE_CODE(var); ++ gcc_assert(code == SSA_NAME || code == PARM_DECL); ++ ++ type = TREE_TYPE(var); ++ switch (TREE_CODE(type)) { ++ case INTEGER_TYPE: ++ case ENUMERAL_TYPE: ++ return false; ++ case BOOLEAN_TYPE: ++ return is_bool(var); ++ default: ++ return true; ++ } ++} ++ ++gimple get_def_stmt(const_tree node) ++{ ++ gcc_assert(node != NULL_TREE); ++ ++ if (skip_types(node)) ++ return NULL; ++ ++ if (TREE_CODE(node) != SSA_NAME) ++ return NULL; ++ return SSA_NAME_DEF_STMT(node); ++} ++ ++tree create_new_var(tree type) ++{ ++ tree new_var = create_tmp_var(type, "cicus"); ++ ++ add_referenced_var(new_var); ++ return new_var; ++} ++ ++static bool skip_cast(tree dst_type, const_tree rhs, bool force) ++{ ++ const_gimple def_stmt = get_def_stmt(rhs); ++ ++ if (force) ++ return false; ++ ++ if (is_gimple_constant(rhs)) ++ return false; ++ ++ if (!def_stmt || gimple_code(def_stmt) == GIMPLE_NOP) ++ return false; ++ ++ if (!types_compatible_p(dst_type, TREE_TYPE(rhs))) ++ return false; ++ ++ // DI type can be on 32 bit (from create_assign) but overflow type stays DI ++ if (LONG_TYPE_SIZE == GET_MODE_BITSIZE(SImode)) ++ return false; ++ ++ return true; ++} ++ ++tree cast_a_tree(tree type, tree var) ++{ ++ gcc_assert(type != NULL_TREE); ++ gcc_assert(var != NULL_TREE); ++ gcc_assert(fold_convertible_p(type, var)); ++ ++ return fold_convert(type, var); ++} ++ ++gimple build_cast_stmt(struct visited *visited, tree dst_type, tree rhs, tree lhs, gimple_stmt_iterator *gsi, bool before, bool force) ++{ ++ gimple assign, def_stmt; ++ ++ gcc_assert(dst_type != NULL_TREE && rhs != NULL_TREE); ++ gcc_assert(!is_gimple_constant(rhs)); ++ if (gsi_end_p(*gsi) && before == AFTER_STMT) ++ gcc_unreachable(); ++ ++ def_stmt = get_def_stmt(rhs); ++ if (def_stmt && gimple_code(def_stmt) != GIMPLE_NOP && skip_cast(dst_type, rhs, force) && pointer_set_contains(visited->my_stmts, def_stmt)) ++ return def_stmt; ++ ++ if (lhs == CREATE_NEW_VAR) ++ lhs = create_new_var(dst_type); ++ ++ assign = gimple_build_assign(lhs, cast_a_tree(dst_type, rhs)); ++ ++ if (!gsi_end_p(*gsi)) { ++ location_t loc = gimple_location(gsi_stmt(*gsi)); ++ gimple_set_location(assign, loc); ++ } ++ ++ gimple_assign_set_lhs(assign, make_ssa_name(lhs, assign)); ++ ++ if (before) ++ gsi_insert_before(gsi, assign, GSI_NEW_STMT); ++ else ++ gsi_insert_after(gsi, assign, GSI_NEW_STMT); ++ update_stmt(assign); ++ return assign; ++} ++ +diff --git a/tools/gcc/size_overflow_plugin/remove_unnecessary_dup.c b/tools/gcc/size_overflow_plugin/remove_unnecessary_dup.c +new file mode 100644 +index 0000000..10cb20e +--- /dev/null ++++ b/tools/gcc/size_overflow_plugin/remove_unnecessary_dup.c +@@ -0,0 +1,151 @@ ++/* ++ * Copyright 2011-2014 by Emese Revfy <re.emese@gmail.com> ++ * Licensed under the GPL v2, or (at your option) v3 ++ * ++ * Homepage: ++ * http://www.grsecurity.net/~ephox/overflow_plugin/ ++ * ++ * Documentation: ++ * http://forums.grsecurity.net/viewtopic.php?f=7&t=3043 ++ * ++ * This plugin recomputes expressions of function arguments marked by a size_overflow attribute ++ * with double integer precision (DImode/TImode for 32/64 bit integer types). ++ * The recomputed argument is checked against TYPE_MAX and an event is logged on overflow and the triggering process is killed. ++ * ++ * Usage: ++ * $ make ++ * $ make run ++ */ ++ ++#include "gcc-common.h" ++#include "size_overflow.h" ++ ++bool skip_expr_on_double_type(const_gimple stmt) ++{ ++ enum tree_code code = gimple_assign_rhs_code(stmt); ++ ++ switch (code) { ++ case RSHIFT_EXPR: ++ case TRUNC_DIV_EXPR: ++ case CEIL_DIV_EXPR: ++ case FLOOR_DIV_EXPR: ++ case ROUND_DIV_EXPR: ++ case EXACT_DIV_EXPR: ++ case RDIV_EXPR: ++ case TRUNC_MOD_EXPR: ++ case CEIL_MOD_EXPR: ++ case FLOOR_MOD_EXPR: ++ case ROUND_MOD_EXPR: ++ return true; ++ default: ++ return false; ++ } ++} ++ ++static bool is_size_overflow_type(const_tree var) ++{ ++ const char *name; ++ const_tree type_name, type; ++ ++ if (var == NULL_TREE) ++ return false; ++ ++ type = TREE_TYPE(var); ++ type_name = TYPE_NAME(type); ++ if (type_name == NULL_TREE) ++ return false; ++ ++ if (DECL_P(type_name)) ++ name = DECL_NAME_POINTER(type_name); ++ else ++ name = IDENTIFIER_POINTER(type_name); ++ ++ if (!strncmp(name, "size_overflow_type", 18)) ++ return true; ++ return false; ++} ++ ++static void create_up_and_down_cast(struct visited *visited, gimple use_stmt, tree orig_type, tree rhs) ++{ ++ const_tree orig_rhs1; ++ tree down_lhs, new_lhs, dup_type = TREE_TYPE(rhs); ++ gimple down_cast, up_cast; ++ gimple_stmt_iterator gsi = gsi_for_stmt(use_stmt); ++ ++ down_cast = build_cast_stmt(visited, orig_type, rhs, CREATE_NEW_VAR, &gsi, BEFORE_STMT, false); ++ down_lhs = gimple_assign_lhs(down_cast); ++ ++ gsi = gsi_for_stmt(use_stmt); ++ up_cast = build_cast_stmt(visited, dup_type, down_lhs, CREATE_NEW_VAR, &gsi, BEFORE_STMT, false); ++ new_lhs = gimple_assign_lhs(up_cast); ++ ++ orig_rhs1 = gimple_assign_rhs1(use_stmt); ++ if (operand_equal_p(orig_rhs1, rhs, 0)) ++ gimple_assign_set_rhs1(use_stmt, new_lhs); ++ else ++ gimple_assign_set_rhs2(use_stmt, new_lhs); ++ update_stmt(use_stmt); ++ ++ pointer_set_insert(visited->my_stmts, up_cast); ++ pointer_set_insert(visited->my_stmts, down_cast); ++ pointer_set_insert(visited->skip_expr_casts, up_cast); ++ pointer_set_insert(visited->skip_expr_casts, down_cast); ++} ++ ++static tree get_proper_unsigned_half_type(const_tree node) ++{ ++ tree new_type, type; ++ ++ gcc_assert(is_size_overflow_type(node)); ++ ++ type = TREE_TYPE(node); ++ switch (TYPE_MODE(type)) { ++ case HImode: ++ new_type = unsigned_intQI_type_node; ++ break; ++ case SImode: ++ new_type = unsigned_intHI_type_node; ++ break; ++ case DImode: ++ new_type = unsigned_intSI_type_node; ++ break; ++ case TImode: ++ new_type = unsigned_intDI_type_node; ++ break; ++ default: ++ gcc_unreachable(); ++ } ++ ++ if (TYPE_QUALS(type) != 0) ++ return build_qualified_type(new_type, TYPE_QUALS(type)); ++ return new_type; ++} ++ ++static void insert_cast_rhs(struct visited *visited, gimple stmt, tree rhs) ++{ ++ tree type; ++ ++ if (rhs == NULL_TREE) ++ return; ++ if (!is_size_overflow_type(rhs)) ++ return; ++ ++ type = get_proper_unsigned_half_type(rhs); ++ if (is_gimple_constant(rhs)) ++ return; ++ create_up_and_down_cast(visited, stmt, type, rhs); ++} ++ ++void insert_cast_expr(struct visited *visited, gimple stmt) ++{ ++ tree rhs1, rhs2; ++ ++ gcc_assert(skip_expr_on_double_type(stmt)); ++ ++ rhs1 = gimple_assign_rhs1(stmt); ++ insert_cast_rhs(visited, stmt, rhs1); ++ ++ rhs2 = gimple_assign_rhs2(stmt); ++ insert_cast_rhs(visited, stmt, rhs2); ++} ++ +diff --git a/tools/gcc/size_overflow_plugin/size_overflow.h b/tools/gcc/size_overflow_plugin/size_overflow.h +new file mode 100644 +index 0000000..040f0f6 +--- /dev/null ++++ b/tools/gcc/size_overflow_plugin/size_overflow.h +@@ -0,0 +1,119 @@ ++#ifndef SIZE_OVERFLOW_H ++#define SIZE_OVERFLOW_H ++ ++#define CREATE_NEW_VAR NULL_TREE ++#define CANNOT_FIND_ARG 32 ++#define MAX_PARAM 31 ++#define BEFORE_STMT true ++#define AFTER_STMT false ++ ++#define TURN_OFF_ASM_STR "# size_overflow MARK_TURN_OFF " ++#define YES_ASM_STR "# size_overflow MARK_YES " ++#define OK_ASM_STR "# size_overflow " ++ ++enum mark { ++ MARK_NO, MARK_YES, MARK_NOT_INTENTIONAL, MARK_TURN_OFF ++}; ++ ++struct visited { ++ struct pointer_set_t *stmts; ++ struct pointer_set_t *my_stmts; ++ struct pointer_set_t *skip_expr_casts; ++ struct pointer_set_t *no_cast_check; ++}; ++ ++// size_overflow_plugin.c ++extern tree report_size_overflow_decl; ++extern tree size_overflow_type_HI; ++extern tree size_overflow_type_SI; ++extern tree size_overflow_type_DI; ++extern tree size_overflow_type_TI; ++ ++ ++// size_overflow_plugin_hash.c ++struct size_overflow_hash { ++ const struct size_overflow_hash * const next; ++ const char * const name; ++ const unsigned int param; ++}; ++ ++struct interesting_node { ++ struct interesting_node *next; ++ gimple first_stmt; ++ const_tree fndecl; ++ tree node; ++#if BUILDING_GCC_VERSION <= 4007 ++ VEC(tree, gc) *last_nodes; ++#else ++ vec<tree, va_gc> *last_nodes; ++#endif ++ unsigned int num; ++ enum mark intentional_attr_decl; ++ enum mark intentional_attr_cur_fndecl; ++ gimple intentional_mark_from_gimple; ++}; ++ ++extern bool is_size_overflow_asm(const_gimple stmt); ++extern unsigned int get_function_num(const_tree node, const_tree orig_fndecl); ++extern unsigned int get_correct_arg_count(unsigned int argnum, const_tree fndecl); ++extern bool is_missing_function(const_tree orig_fndecl, unsigned int num); ++extern bool is_a_return_check(const_tree node); ++extern const struct size_overflow_hash *get_function_hash(const_tree fndecl); ++extern unsigned int find_arg_number_tree(const_tree arg, const_tree func); ++ ++ ++// size_overflow_debug.c ++extern struct opt_pass *make_dump_pass(void); ++ ++ ++// intentional_overflow.c ++extern enum mark get_intentional_attr_type(const_tree node); ++extern bool is_size_overflow_intentional_asm_yes(const_gimple stmt); ++extern bool is_size_overflow_intentional_asm_turn_off(const_gimple stmt); ++extern bool is_end_intentional_intentional_attr(const_tree decl, unsigned int argnum); ++extern bool is_yes_intentional_attr(const_tree decl, unsigned int argnum); ++extern bool is_turn_off_intentional_attr(const_tree decl); ++extern void print_missing_intentional(enum mark callee_attr, enum mark caller_attr, const_tree decl, unsigned int argnum); ++extern void check_intentional_attribute_ipa(struct interesting_node *cur_node); ++extern bool is_a_cast_and_const_overflow(const_tree no_const_rhs); ++extern bool is_const_plus_unsigned_signed_truncation(const_tree lhs); ++extern bool is_a_constant_overflow(const_gimple stmt, const_tree rhs); ++extern tree handle_intentional_overflow(struct visited *visited, struct cgraph_node *caller_node, bool check_overflow, gimple stmt, tree change_rhs, tree new_rhs2); ++extern tree handle_integer_truncation(struct visited *visited, struct cgraph_node *caller_node, const_tree lhs); ++extern bool is_a_neg_overflow(const_gimple stmt, const_tree rhs); ++ ++ ++// insert_size_overflow_check_ipa.c ++extern unsigned int search_function(void); ++extern unsigned int call_count; ++extern struct opt_pass *make_insert_size_overflow_check(void); ++extern const_tree get_interesting_orig_fndecl(const_gimple stmt, unsigned int argnum); ++ ++ ++// insert_size_overflow_asm.c ++extern struct opt_pass *make_insert_size_overflow_asm_pass(void); ++ ++ ++// misc.c ++extern void set_current_function_decl(tree fndecl); ++extern void unset_current_function_decl(void); ++extern gimple get_def_stmt(const_tree node); ++extern tree create_new_var(tree type); ++extern gimple build_cast_stmt(struct visited *visited, tree dst_type, tree rhs, tree lhs, gimple_stmt_iterator *gsi, bool before, bool force); ++extern bool skip_types(const_tree var); ++extern tree cast_a_tree(tree type, tree var); ++ ++ ++// insert_size_overflow_check_core.c ++extern tree expand(struct visited *visited, struct cgraph_node *caller_node, tree lhs); ++extern void check_size_overflow(struct cgraph_node *caller_node, gimple stmt, tree size_overflow_type, tree cast_rhs, tree rhs, bool before); ++extern tree dup_assign(struct visited *visited, gimple oldstmt, const_tree node, tree rhs1, tree rhs2, tree __unused rhs3); ++extern tree create_assign(struct visited *visited, gimple oldstmt, tree rhs1, bool before); ++ ++ ++// remove_unnecessary_dup.c ++extern struct opt_pass *make_remove_unnecessary_dup_pass(void); ++extern void insert_cast_expr(struct visited *visited, gimple stmt); ++extern bool skip_expr_on_double_type(const_gimple stmt); ++ ++#endif +diff --git a/tools/gcc/size_overflow_plugin/size_overflow_debug.c b/tools/gcc/size_overflow_plugin/size_overflow_debug.c +new file mode 100644 +index 0000000..4378111 +--- /dev/null ++++ b/tools/gcc/size_overflow_plugin/size_overflow_debug.c +@@ -0,0 +1,116 @@ ++/* ++ * Copyright 2011-2014 by Emese Revfy <re.emese@gmail.com> ++ * Licensed under the GPL v2, or (at your option) v3 ++ * ++ * Homepage: ++ * http://www.grsecurity.net/~ephox/overflow_plugin/ ++ * ++ * Documentation: ++ * http://forums.grsecurity.net/viewtopic.php?f=7&t=3043 ++ * ++ * This plugin recomputes expressions of function arguments marked by a size_overflow attribute ++ * with double integer precision (DImode/TImode for 32/64 bit integer types). ++ * The recomputed argument is checked against TYPE_MAX and an event is logged on overflow and the triggering process is killed. ++ * ++ * Usage: ++ * $ make ++ * $ make run ++ */ ++ ++#include "gcc-common.h" ++ ++static unsigned int dump_functions(void) ++{ ++ struct cgraph_node *node; ++ ++ FOR_EACH_FUNCTION_WITH_GIMPLE_BODY(node) { ++ basic_block bb; ++ ++ push_cfun(DECL_STRUCT_FUNCTION(NODE_DECL(node))); ++ current_function_decl = NODE_DECL(node); ++ ++ fprintf(stderr, "-----------------------------------------\n%s\n-----------------------------------------\n", DECL_NAME_POINTER(current_function_decl)); ++ ++ FOR_ALL_BB_FN(bb, cfun) { ++ gimple_stmt_iterator si; ++ ++ fprintf(stderr, "<bb %u>:\n", bb->index); ++ for (si = gsi_start_phis(bb); !gsi_end_p(si); gsi_next(&si)) ++ debug_gimple_stmt(gsi_stmt(si)); ++ for (si = gsi_start_bb(bb); !gsi_end_p(si); gsi_next(&si)) ++ debug_gimple_stmt(gsi_stmt(si)); ++ fprintf(stderr, "\n"); ++ } ++ ++ fprintf(stderr, "-------------------------------------------------------------------------\n"); ++ ++ pop_cfun(); ++ current_function_decl = NULL_TREE; ++ } ++ ++ fprintf(stderr, "###############################################################################\n"); ++ ++ return 0; ++} ++ ++#if BUILDING_GCC_VERSION >= 4009 ++static const struct pass_data dump_pass_data = { ++#else ++static struct ipa_opt_pass_d dump_pass = { ++ .pass = { ++#endif ++ .type = SIMPLE_IPA_PASS, ++ .name = "dump", ++#if BUILDING_GCC_VERSION >= 4008 ++ .optinfo_flags = OPTGROUP_NONE, ++#endif ++#if BUILDING_GCC_VERSION >= 4009 ++ .has_gate = false, ++ .has_execute = true, ++#else ++ .gate = NULL, ++ .execute = dump_functions, ++ .sub = NULL, ++ .next = NULL, ++ .static_pass_number = 0, ++#endif ++ .tv_id = TV_NONE, ++ .properties_required = 0, ++ .properties_provided = 0, ++ .properties_destroyed = 0, ++ .todo_flags_start = 0, ++ .todo_flags_finish = 0, ++#if BUILDING_GCC_VERSION < 4009 ++ }, ++ .generate_summary = NULL, ++ .write_summary = NULL, ++ .read_summary = NULL, ++#if BUILDING_GCC_VERSION >= 4006 ++ .write_optimization_summary = NULL, ++ .read_optimization_summary = NULL, ++#endif ++ .stmt_fixup = NULL, ++ .function_transform_todo_flags_start = 0, ++ .function_transform = NULL, ++ .variable_transform = NULL, ++#endif ++}; ++ ++#if BUILDING_GCC_VERSION >= 4009 ++namespace { ++class dump_pass : public ipa_opt_pass_d { ++public: ++ dump_pass() : ipa_opt_pass_d(dump_pass_data, g, NULL, NULL, NULL, NULL, NULL, NULL, 0, NULL, NULL) {} ++ unsigned int execute() { return dump_functions(); } ++}; ++} ++#endif ++ ++struct opt_pass *make_dump_pass(void) ++{ ++#if BUILDING_GCC_VERSION >= 4009 ++ return new dump_pass(); ++#else ++ return &dump_pass.pass; ++#endif ++} +diff --git a/tools/gcc/size_overflow_plugin/size_overflow_hash.data b/tools/gcc/size_overflow_plugin/size_overflow_hash.data new file mode 100644 -index 0000000..ebbd9a3 +index 0000000..41777a8 --- /dev/null -+++ b/tools/gcc/size_overflow_hash.data -@@ -0,0 +1,5933 @@ ++++ b/tools/gcc/size_overflow_plugin/size_overflow_hash.data +@@ -0,0 +1,5934 @@ +intel_fake_agp_alloc_by_type_1 intel_fake_agp_alloc_by_type 1 1 NULL +ocfs2_get_refcount_tree_3 ocfs2_get_refcount_tree 0 3 NULL +storvsc_connect_to_vsp_22 storvsc_connect_to_vsp 2 22 NULL @@ -110296,6 +114814,7 @@ index 0000000..ebbd9a3 +apu_get_register_10737 apu_get_register 0 10737 &sctp_getsockopt_maxseg_10737 +SyS_io_getevents_10756 SyS_io_getevents 3 10756 NULL +vhost_add_used_n_10760 vhost_add_used_n 3 10760 NULL ++rd_build_prot_space_10761 rd_build_prot_space 2-3 10761 NULL +kvm_read_guest_atomic_10765 kvm_read_guest_atomic 4 10765 NULL +__qp_memcpy_to_queue_10779 __qp_memcpy_to_queue 2-4 10779 NULL +diva_set_trace_filter_10820 diva_set_trace_filter 0-1 10820 NULL @@ -115279,11 +119798,11 @@ index 0000000..ebbd9a3 +lookup_inline_extent_backref_65493 lookup_inline_extent_backref 9 65493 NULL +nvme_trans_standard_inquiry_page_65526 nvme_trans_standard_inquiry_page 4 65526 NULL +tree_mod_log_eb_copy_65535 tree_mod_log_eb_copy 6 65535 NULL -diff --git a/tools/gcc/size_overflow_hash_aux.data b/tools/gcc/size_overflow_hash_aux.data +diff --git a/tools/gcc/size_overflow_plugin/size_overflow_hash_aux.data b/tools/gcc/size_overflow_plugin/size_overflow_hash_aux.data new file mode 100644 index 0000000..560cd7b --- /dev/null -+++ b/tools/gcc/size_overflow_hash_aux.data ++++ b/tools/gcc/size_overflow_plugin/size_overflow_hash_aux.data @@ -0,0 +1,92 @@ +spa_set_aux_vdevs_746 spa_set_aux_vdevs 3 746 NULL +zfs_lookup_2144 zfs_lookup 0 2144 NULL @@ -115377,12 +119896,12 @@ index 0000000..560cd7b +proc_copyin_string_62019 proc_copyin_string 4 62019 NULL +random_get_pseudo_bytes_64611 random_get_pseudo_bytes 2 64611 NULL +zpios_read_64734 zpios_read 3 64734 NULL -diff --git a/tools/gcc/size_overflow_plugin.c b/tools/gcc/size_overflow_plugin.c +diff --git a/tools/gcc/size_overflow_plugin/size_overflow_plugin.c b/tools/gcc/size_overflow_plugin/size_overflow_plugin.c new file mode 100644 -index 0000000..948ec25 +index 0000000..900661b --- /dev/null -+++ b/tools/gcc/size_overflow_plugin.c -@@ -0,0 +1,4169 @@ ++++ b/tools/gcc/size_overflow_plugin/size_overflow_plugin.c +@@ -0,0 +1,259 @@ +/* + * Copyright 2011-2014 by Emese Revfy <re.emese@gmail.com> + * Licensed under the GPL v2, or (at your option) v3 @@ -115398,100 +119917,27 @@ index 0000000..948ec25 + * The recomputed argument is checked against TYPE_MAX and an event is logged on overflow and the triggering process is killed. + * + * Usage: -+ * $ # for 4.5/4.6/C based 4.7 -+ * $ gcc -I`gcc -print-file-name=plugin`/include -I`gcc -print-file-name=plugin`/include/c-family -fPIC -shared -O2 -std=gnu99 -ggdb -o size_overflow_plugin.so size_overflow_plugin.c -+ * $ # for C++ based 4.7/4.8+ -+ * $ g++ -I`g++ -print-file-name=plugin`/include -I`g++ -print-file-name=plugin`/include/c-family -fPIC -shared -O2 -std=gnu++98 -fno-rtti -ggdb -o size_overflow_plugin.so size_overflow_plugin.c -+ * -+ * $ gcc -fplugin=./size_overflow_plugin.so test.c -O2 ++ * $ make ++ * $ make run + */ + +#include "gcc-common.h" ++#include "size_overflow.h" + +int plugin_is_GPL_compatible; + -+static struct plugin_info size_overflow_plugin_info = { -+ .version = "20140407", -+ .help = "no-size-overflow\tturn off size overflow checking\n", -+}; ++tree report_size_overflow_decl; + -+#define BEFORE_STMT true -+#define AFTER_STMT false -+#define CREATE_NEW_VAR NULL_TREE -+#define CODES_LIMIT 32 -+#define MAX_PARAM 31 -+#define VEC_LEN 128 -+#define RET_CHECK NULL_TREE -+#define CANNOT_FIND_ARG 32 -+#define WRONG_NODE 32 -+#define NOT_INTENTIONAL_ASM NULL -+#define MIN_CHECK true -+#define MAX_CHECK false -+ -+#define TURN_OFF_ASM_STR "# size_overflow MARK_TURN_OFF " -+#define YES_ASM_STR "# size_overflow MARK_YES " -+#define OK_ASM_STR "# size_overflow " -+ -+struct size_overflow_hash { -+ const struct size_overflow_hash * const next; -+ const char * const name; -+ const unsigned int param; -+}; -+ -+#include "size_overflow_hash.h" -+#include "size_overflow_hash_aux.h" ++tree size_overflow_type_HI; ++tree size_overflow_type_SI; ++tree size_overflow_type_DI; ++tree size_overflow_type_TI; + -+enum mark { -+ MARK_NO, MARK_YES, MARK_NOT_INTENTIONAL, MARK_TURN_OFF -+}; -+ -+static unsigned int call_count; -+ -+enum stmt_flags { -+ MY_STMT, NO_CAST_CHECK, VISITED_STMT, NO_FLAGS -+}; -+ -+struct visited { -+ struct visited *next; -+ const_tree fndecl; -+ unsigned int num; -+}; -+ -+struct next_cgraph_node { -+ struct next_cgraph_node *next; -+ struct cgraph_node *current_function; -+ tree callee_fndecl; -+ unsigned int num; -+}; -+ -+struct interesting_node { -+ struct interesting_node *next; -+ gimple first_stmt; -+ const_tree fndecl; -+ tree node; -+#if BUILDING_GCC_VERSION <= 4007 -+ VEC(tree, gc) *last_nodes; -+#else -+ vec<tree, va_gc> *last_nodes; -+#endif -+ unsigned int num; -+ enum mark intentional_attr_decl; -+ enum mark intentional_attr_cur_fndecl; -+ gimple intentional_mark_from_gimple; ++static struct plugin_info size_overflow_plugin_info = { ++ .version = "20140430", ++ .help = "no-size-overflow\tturn off size overflow checking\n", +}; + -+static tree report_size_overflow_decl; -+ -+static tree expand(struct pointer_set_t *visited, struct cgraph_node *caller_node, tree lhs); -+static void set_conditions(struct pointer_set_t *visited, bool *interesting_conditions, const_tree lhs); -+static void walk_use_def(struct pointer_set_t *visited, struct interesting_node *cur_node, tree lhs); -+static enum mark search_intentional(struct pointer_set_t *visited, const_tree lhs); -+static void search_size_overflow_attribute(struct pointer_set_t *visited, tree lhs); -+ -+static void check_size_overflow(struct cgraph_node *caller_node, gimple stmt, tree size_overflow_type, tree cast_rhs, tree rhs, bool before); -+static tree get_size_overflow_type(gimple stmt, const_tree node); -+static tree dup_assign(struct pointer_set_t *visited, gimple oldstmt, const_tree node, tree rhs1, tree rhs2, tree __unused rhs3); -+ +static tree handle_size_overflow_attribute(tree *node, tree __unused name, tree args, int __unused flags, bool *no_add_attrs) +{ + unsigned int arg_count; @@ -115587,117 +120033,166 @@ index 0000000..948ec25 + register_attribute(&intentional_overflow_attr); +} + -+static enum stmt_flags get_stmt_flag(gimple stmt) ++static tree create_typedef(tree type, const char* ident) +{ -+ bool bit_1, bit_2; ++ tree new_type, decl; + -+ bit_1 = gimple_plf(stmt, GF_PLF_1); -+ bit_2 = gimple_plf(stmt, GF_PLF_2); -+ -+ if (!bit_1 && !bit_2) -+ return NO_FLAGS; -+ if (bit_1 && bit_2) -+ return MY_STMT; -+ if (!bit_1 && bit_2) -+ return VISITED_STMT; -+ return NO_CAST_CHECK; ++ new_type = build_variant_type_copy(type); ++ decl = build_decl(BUILTINS_LOCATION, TYPE_DECL, get_identifier(ident), new_type); ++ DECL_ORIGINAL_TYPE(decl) = type; ++ TYPE_NAME(new_type) = decl; ++ return new_type; +} + -+static void set_stmt_flag(gimple stmt, enum stmt_flags new_flag) ++// Create the noreturn report_size_overflow() function decl. ++static void size_overflow_start_unit(void __unused *gcc_data, void __unused *user_data) +{ -+ bool bit_1, bit_2; ++ tree const_char_ptr_type_node; ++ tree fntype; + -+ switch (new_flag) { -+ case NO_FLAGS: -+ bit_1 = bit_2 = false; -+ break; -+ case MY_STMT: -+ bit_1 = bit_2 = true; -+ break; -+ case VISITED_STMT: -+ bit_1 = false; -+ bit_2 = true; -+ break; -+ case NO_CAST_CHECK: -+ bit_1 = true; -+ bit_2 = false; -+ break; -+ default: -+ gcc_unreachable(); -+ } ++ const_char_ptr_type_node = build_pointer_type(build_type_variant(char_type_node, 1, 0)); ++ ++ size_overflow_type_HI = create_typedef(intHI_type_node, "size_overflow_type_HI"); ++ size_overflow_type_SI = create_typedef(intSI_type_node, "size_overflow_type_SI"); ++ size_overflow_type_DI = create_typedef(intDI_type_node, "size_overflow_type_DI"); ++ size_overflow_type_TI = create_typedef(intTI_type_node, "size_overflow_type_TI"); ++ ++ // void report_size_overflow(const char *loc_file, unsigned int loc_line, const char *current_func, const char *ssa_var) ++ fntype = build_function_type_list(void_type_node, ++ const_char_ptr_type_node, ++ unsigned_type_node, ++ const_char_ptr_type_node, ++ const_char_ptr_type_node, ++ NULL_TREE); ++ report_size_overflow_decl = build_fn_decl("report_size_overflow", fntype); + -+ gimple_set_plf(stmt, GF_PLF_1, bit_1); -+ gimple_set_plf(stmt, GF_PLF_2, bit_2); ++ DECL_ASSEMBLER_NAME(report_size_overflow_decl); ++ TREE_PUBLIC(report_size_overflow_decl) = 1; ++ DECL_EXTERNAL(report_size_overflow_decl) = 1; ++ DECL_ARTIFICIAL(report_size_overflow_decl) = 1; ++ TREE_THIS_VOLATILE(report_size_overflow_decl) = 1; +} + -+static bool is_bool(const_tree node) -+{ -+ const_tree type; + -+ if (node == NULL_TREE) -+ return false; ++extern struct gimple_opt_pass pass_dce; + -+ type = TREE_TYPE(node); -+ if (!INTEGRAL_TYPE_P(type)) -+ return false; -+ if (TREE_CODE(type) == BOOLEAN_TYPE) -+ return true; -+ if (TYPE_PRECISION(type) == 1) -+ return true; -+ return false; ++static struct opt_pass *make_dce_pass(void) ++{ ++#if BUILDING_GCC_VERSION >= 4009 ++ return make_pass_dce(g); ++#else ++ return &pass_dce.pass; ++#endif +} + -+static bool skip_types(const_tree var) ++ ++int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version) +{ -+ tree type; -+ enum tree_code code; ++ int i; ++ const char * const plugin_name = plugin_info->base_name; ++ const int argc = plugin_info->argc; ++ const struct plugin_argument * const argv = plugin_info->argv; ++ bool enable = true; ++ struct register_pass_info insert_size_overflow_asm_pass_info; ++ struct register_pass_info __unused dump_before_pass_info; ++ struct register_pass_info __unused dump_after_pass_info; ++ struct register_pass_info insert_size_overflow_check_info; ++ struct register_pass_info dce_pass_info; ++ static const struct ggc_root_tab gt_ggc_r_gt_size_overflow[] = { ++ { ++ .base = &report_size_overflow_decl, ++ .nelt = 1, ++ .stride = sizeof(report_size_overflow_decl), ++ .cb = >_ggc_mx_tree_node, ++ .pchw = >_pch_nx_tree_node ++ }, ++ LAST_GGC_ROOT_TAB ++ }; + -+ if (is_gimple_constant(var)) -+ return true; ++ insert_size_overflow_asm_pass_info.pass = make_insert_size_overflow_asm_pass(); ++ insert_size_overflow_asm_pass_info.reference_pass_name = "ssa"; ++ insert_size_overflow_asm_pass_info.ref_pass_instance_number = 1; ++ insert_size_overflow_asm_pass_info.pos_op = PASS_POS_INSERT_AFTER; + -+ switch (TREE_CODE(var)) { -+ case ADDR_EXPR: -+#if BUILDING_GCC_VERSION >= 4006 -+ case MEM_REF: -+#endif -+ case ARRAY_REF: -+ case BIT_FIELD_REF: -+ case INDIRECT_REF: -+ case TARGET_MEM_REF: -+ case COMPONENT_REF: -+ case VAR_DECL: -+ case VIEW_CONVERT_EXPR: -+ return true; -+ default: -+ break; ++ dump_before_pass_info.pass = make_dump_pass(); ++ dump_before_pass_info.reference_pass_name = "increase_alignment"; ++ dump_before_pass_info.ref_pass_instance_number = 1; ++ dump_before_pass_info.pos_op = PASS_POS_INSERT_BEFORE; ++ ++ insert_size_overflow_check_info.pass = make_insert_size_overflow_check(); ++ insert_size_overflow_check_info.reference_pass_name = "increase_alignment"; ++ insert_size_overflow_check_info.ref_pass_instance_number = 1; ++ insert_size_overflow_check_info.pos_op = PASS_POS_INSERT_BEFORE; ++ ++ dump_after_pass_info.pass = make_dump_pass(); ++ dump_after_pass_info.reference_pass_name = "increase_alignment"; ++ dump_after_pass_info.ref_pass_instance_number = 1; ++ dump_after_pass_info.pos_op = PASS_POS_INSERT_BEFORE; ++ ++ dce_pass_info.pass = make_dce_pass(); ++ dce_pass_info.reference_pass_name = "vrp"; ++ dce_pass_info.ref_pass_instance_number = 1; ++ dce_pass_info.pos_op = PASS_POS_INSERT_AFTER; ++ ++ if (!plugin_default_version_check(version, &gcc_version)) { ++ error(G_("incompatible gcc/plugin versions")); ++ return 1; + } + -+ code = TREE_CODE(var); -+ gcc_assert(code == SSA_NAME || code == PARM_DECL); ++ for (i = 0; i < argc; ++i) { ++ if (!strcmp(argv[i].key, "no-size-overflow")) { ++ enable = false; ++ continue; ++ } ++ error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key); ++ } + -+ type = TREE_TYPE(var); -+ switch (TREE_CODE(type)) { -+ case INTEGER_TYPE: -+ case ENUMERAL_TYPE: -+ return false; -+ case BOOLEAN_TYPE: -+ return is_bool(var); -+ default: -+ return true; ++ register_callback(plugin_name, PLUGIN_INFO, NULL, &size_overflow_plugin_info); ++ if (enable) { ++ register_callback(plugin_name, PLUGIN_START_UNIT, &size_overflow_start_unit, NULL); ++ register_callback(plugin_name, PLUGIN_REGISTER_GGC_ROOTS, NULL, (void *)>_ggc_r_gt_size_overflow); ++ register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &insert_size_overflow_asm_pass_info); ++// register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &dump_before_pass_info); ++ register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &insert_size_overflow_check_info); ++// register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &dump_after_pass_info); ++ register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &dce_pass_info); + } ++ register_callback(plugin_name, PLUGIN_ATTRIBUTES, register_attributes, NULL); ++ ++ return 0; +} +diff --git a/tools/gcc/size_overflow_plugin/size_overflow_plugin_hash.c b/tools/gcc/size_overflow_plugin/size_overflow_plugin_hash.c +new file mode 100644 +index 0000000..0888f6c +--- /dev/null ++++ b/tools/gcc/size_overflow_plugin/size_overflow_plugin_hash.c +@@ -0,0 +1,364 @@ ++/* ++ * Copyright 2011-2014 by Emese Revfy <re.emese@gmail.com> ++ * Licensed under the GPL v2, or (at your option) v3 ++ * ++ * Homepage: ++ * http://www.grsecurity.net/~ephox/overflow_plugin/ ++ * ++ * Documentation: ++ * http://forums.grsecurity.net/viewtopic.php?f=7&t=3043 ++ * ++ * This plugin recomputes expressions of function arguments marked by a size_overflow attribute ++ * with double integer precision (DImode/TImode for 32/64 bit integer types). ++ * The recomputed argument is checked against TYPE_MAX and an event is logged on overflow and the triggering process is killed. ++ * ++ * Usage: ++ * $ make ++ * $ make run ++ */ + -+static inline gimple get_def_stmt(const_tree node) -+{ -+ gcc_assert(node != NULL_TREE); ++#include "gcc-common.h" ++#include "size_overflow.h" + -+ if (skip_types(node)) -+ return NULL; ++#include "size_overflow_hash.h" ++#include "size_overflow_hash_aux.h" + -+ if (TREE_CODE(node) != SSA_NAME) -+ return NULL; -+ return SSA_NAME_DEF_STMT(node); -+} ++#define CODES_LIMIT 32 + +static unsigned char get_tree_code(const_tree type) +{ @@ -115841,7 +120336,7 @@ index 0000000..948ec25 + return NULL; +} + -+static const struct size_overflow_hash *get_function_hash(const_tree fndecl) ++const struct size_overflow_hash *get_function_hash(const_tree fndecl) +{ + const struct size_overflow_hash *entry; + struct function_hash fn_hash_data; @@ -115886,7 +120381,7 @@ index 0000000..948ec25 + inform(loc, "Function %s is missing from the size_overflow hash table +%s+%u+%u+", curfunc, curfunc, argnum, fn_hash_data.hash); +} + -+static unsigned int find_arg_number_tree(const_tree arg, const_tree func) ++unsigned int find_arg_number_tree(const_tree arg, const_tree func) +{ + tree var; + unsigned int argnum = 1; @@ -115904,1296 +120399,65 @@ index 0000000..948ec25 + return CANNOT_FIND_ARG; +} + -+static tree create_new_var(tree type) -+{ -+ tree new_var = create_tmp_var(type, "cicus"); -+ -+ add_referenced_var(new_var); -+ return new_var; -+} -+ -+static gimple create_binary_assign(enum tree_code code, gimple stmt, tree rhs1, tree rhs2) -+{ -+ gimple assign; -+ gimple_stmt_iterator gsi = gsi_for_stmt(stmt); -+ tree type = TREE_TYPE(rhs1); -+ tree lhs = create_new_var(type); -+ -+ gcc_assert(types_compatible_p(type, TREE_TYPE(rhs2))); -+ assign = gimple_build_assign_with_ops(code, lhs, rhs1, rhs2); -+ gimple_assign_set_lhs(assign, make_ssa_name(lhs, assign)); -+ -+ gsi_insert_before(&gsi, assign, GSI_NEW_STMT); -+ update_stmt(assign); -+ set_stmt_flag(assign, MY_STMT); -+ return assign; -+} -+ -+static tree cast_a_tree(tree type, tree var) -+{ -+ gcc_assert(type != NULL_TREE); -+ gcc_assert(var != NULL_TREE); -+ gcc_assert(fold_convertible_p(type, var)); -+ -+ return fold_convert(type, var); -+} -+ -+static tree get_lhs(const_gimple stmt) -+{ -+ switch (gimple_code(stmt)) { -+ case GIMPLE_ASSIGN: -+ case GIMPLE_CALL: -+ return gimple_get_lhs(stmt); -+ case GIMPLE_PHI: -+ return gimple_phi_result(stmt); -+ default: -+ return NULL_TREE; -+ } -+} -+ -+static bool skip_cast(tree dst_type, const_tree rhs, bool force) -+{ -+ const_gimple def_stmt = get_def_stmt(rhs); -+ -+ if (force) -+ return false; -+ -+ if (is_gimple_constant(rhs)) -+ return false; -+ -+ if (!def_stmt || gimple_code(def_stmt) == GIMPLE_NOP) -+ return false; -+ -+ if (!types_compatible_p(dst_type, TREE_TYPE(rhs))) -+ return false; -+ -+ // DI type can be on 32 bit (from create_assign) but overflow type stays DI -+ if (LONG_TYPE_SIZE == GET_MODE_BITSIZE(SImode)) -+ return false; -+ -+ return true; -+} -+ -+static gimple build_cast_stmt(tree dst_type, tree rhs, tree lhs, gimple_stmt_iterator *gsi, bool before, bool force) -+{ -+ gimple assign, def_stmt; -+ -+ gcc_assert(dst_type != NULL_TREE && rhs != NULL_TREE); -+ if (gsi_end_p(*gsi) && before == AFTER_STMT) -+ gcc_unreachable(); -+ -+ def_stmt = get_def_stmt(rhs); -+ if (def_stmt && gimple_code(def_stmt) != GIMPLE_NOP && skip_cast(dst_type, rhs, force) && get_stmt_flag(def_stmt) == MY_STMT) -+ return def_stmt; -+ -+ if (lhs == CREATE_NEW_VAR) -+ lhs = create_new_var(dst_type); -+ -+ assign = gimple_build_assign(lhs, cast_a_tree(dst_type, rhs)); -+ -+ if (!gsi_end_p(*gsi)) { -+ location_t loc = gimple_location(gsi_stmt(*gsi)); -+ gimple_set_location(assign, loc); -+ } -+ -+ gimple_assign_set_lhs(assign, make_ssa_name(lhs, assign)); -+ -+ if (before) -+ gsi_insert_before(gsi, assign, GSI_NEW_STMT); -+ else -+ gsi_insert_after(gsi, assign, GSI_NEW_STMT); -+ update_stmt(assign); -+ return assign; -+} -+ -+static tree cast_to_new_size_overflow_type(gimple stmt, tree rhs, tree size_overflow_type, bool before) -+{ -+ gimple_stmt_iterator gsi; -+ tree lhs; -+ gimple new_stmt; -+ -+ if (rhs == NULL_TREE) -+ return NULL_TREE; -+ -+ gsi = gsi_for_stmt(stmt); -+ new_stmt = build_cast_stmt(size_overflow_type, rhs, CREATE_NEW_VAR, &gsi, before, false); -+ set_stmt_flag(new_stmt, MY_STMT); -+ -+ lhs = get_lhs(new_stmt); -+ gcc_assert(lhs != NULL_TREE); -+ return lhs; -+} -+ -+static tree cast_to_TI_type(gimple stmt, tree node) -+{ -+ gimple_stmt_iterator gsi; -+ gimple cast_stmt; -+ tree type = TREE_TYPE(node); -+ -+ if (types_compatible_p(type, intTI_type_node)) -+ return node; -+ -+ gsi = gsi_for_stmt(stmt); -+ cast_stmt = build_cast_stmt(intTI_type_node, node, CREATE_NEW_VAR, &gsi, BEFORE_STMT, false); -+ set_stmt_flag(cast_stmt, MY_STMT); -+ return gimple_assign_lhs(cast_stmt); -+} -+ -+static tree create_assign(struct pointer_set_t *visited, gimple oldstmt, tree rhs1, bool before) -+{ -+ tree lhs, new_lhs; -+ gimple_stmt_iterator gsi; -+ -+ if (rhs1 == NULL_TREE) { -+ debug_gimple_stmt(oldstmt); -+ error("%s: rhs1 is NULL_TREE", __func__); -+ gcc_unreachable(); -+ } -+ -+ switch (gimple_code(oldstmt)) { -+ case GIMPLE_ASM: -+ lhs = rhs1; -+ break; -+ case GIMPLE_CALL: -+ case GIMPLE_ASSIGN: -+ lhs = gimple_get_lhs(oldstmt); -+ break; -+ default: -+ debug_gimple_stmt(oldstmt); -+ gcc_unreachable(); -+ } -+ -+ gsi = gsi_for_stmt(oldstmt); -+ pointer_set_insert(visited, oldstmt); -+ if (lookup_stmt_eh_lp(oldstmt) != 0) { -+ basic_block next_bb, cur_bb; -+ const_edge e; -+ -+ gcc_assert(before == false); -+ gcc_assert(stmt_can_throw_internal(oldstmt)); -+ gcc_assert(gimple_code(oldstmt) == GIMPLE_CALL); -+ gcc_assert(!gsi_end_p(gsi)); -+ -+ cur_bb = gimple_bb(oldstmt); -+ next_bb = cur_bb->next_bb; -+ e = find_edge(cur_bb, next_bb); -+ gcc_assert(e != NULL); -+ gcc_assert(e->flags & EDGE_FALLTHRU); -+ -+ gsi = gsi_after_labels(next_bb); -+ gcc_assert(!gsi_end_p(gsi)); -+ -+ before = true; -+ oldstmt = gsi_stmt(gsi); -+ } -+ -+ new_lhs = cast_to_new_size_overflow_type(oldstmt, rhs1, get_size_overflow_type(oldstmt, lhs), before); -+ return new_lhs; -+} -+ -+static tree dup_assign(struct pointer_set_t *visited, gimple oldstmt, const_tree node, tree rhs1, tree rhs2, tree __unused rhs3) -+{ -+ gimple stmt; -+ gimple_stmt_iterator gsi; -+ tree size_overflow_type, new_var, lhs = gimple_assign_lhs(oldstmt); -+ -+ if (get_stmt_flag(oldstmt) == MY_STMT) -+ return lhs; -+ -+ if (gimple_num_ops(oldstmt) != 4 && rhs1 == NULL_TREE) { -+ rhs1 = gimple_assign_rhs1(oldstmt); -+ rhs1 = create_assign(visited, oldstmt, rhs1, BEFORE_STMT); -+ } -+ if (gimple_num_ops(oldstmt) == 3 && rhs2 == NULL_TREE) { -+ rhs2 = gimple_assign_rhs2(oldstmt); -+ rhs2 = create_assign(visited, oldstmt, rhs2, BEFORE_STMT); -+ } -+ -+ stmt = gimple_copy(oldstmt); -+ gimple_set_location(stmt, gimple_location(oldstmt)); -+ set_stmt_flag(stmt, MY_STMT); -+ -+ if (gimple_assign_rhs_code(oldstmt) == WIDEN_MULT_EXPR) -+ gimple_assign_set_rhs_code(stmt, MULT_EXPR); -+ -+ size_overflow_type = get_size_overflow_type(oldstmt, node); -+ -+ new_var = create_new_var(size_overflow_type); -+ new_var = make_ssa_name(new_var, stmt); -+ gimple_assign_set_lhs(stmt, new_var); -+ -+ if (rhs1 != NULL_TREE) -+ gimple_assign_set_rhs1(stmt, rhs1); -+ -+ if (rhs2 != NULL_TREE) -+ gimple_assign_set_rhs2(stmt, rhs2); -+#if BUILDING_GCC_VERSION >= 4006 -+ if (rhs3 != NULL_TREE) -+ gimple_assign_set_rhs3(stmt, rhs3); -+#endif -+ gimple_set_vuse(stmt, gimple_vuse(oldstmt)); -+ gimple_set_vdef(stmt, gimple_vdef(oldstmt)); -+ -+ gsi = gsi_for_stmt(oldstmt); -+ gsi_insert_after(&gsi, stmt, GSI_SAME_STMT); -+ update_stmt(stmt); -+ pointer_set_insert(visited, oldstmt); -+ return gimple_assign_lhs(stmt); -+} -+ -+static tree cast_parm_decl(tree phi_ssa_name, tree arg, tree size_overflow_type, basic_block bb) -+{ -+ gimple assign; -+ gimple_stmt_iterator gsi; -+ basic_block first_bb; -+ -+ gcc_assert(SSA_NAME_IS_DEFAULT_DEF(arg)); -+ -+ if (bb->index == 0) { -+ first_bb = split_block_after_labels(ENTRY_BLOCK_PTR_FOR_FN(cfun))->dest; -+ gcc_assert(dom_info_available_p(CDI_DOMINATORS)); -+ set_immediate_dominator(CDI_DOMINATORS, first_bb, ENTRY_BLOCK_PTR_FOR_FN(cfun)); -+ bb = first_bb; -+ } -+ -+ gsi = gsi_after_labels(bb); -+ assign = build_cast_stmt(size_overflow_type, arg, phi_ssa_name, &gsi, BEFORE_STMT, false); -+ set_stmt_flag(assign, MY_STMT); -+ -+ return gimple_assign_lhs(assign); -+} -+ -+static tree use_phi_ssa_name(tree ssa_name_var, tree new_arg) -+{ -+ gimple_stmt_iterator gsi; -+ gimple assign, def_stmt = get_def_stmt(new_arg); -+ -+ if (gimple_code(def_stmt) == GIMPLE_PHI) { -+ gsi = gsi_after_labels(gimple_bb(def_stmt)); -+ assign = build_cast_stmt(TREE_TYPE(new_arg), new_arg, ssa_name_var, &gsi, BEFORE_STMT, true); -+ } else { -+ gsi = gsi_for_stmt(def_stmt); -+ assign = build_cast_stmt(TREE_TYPE(new_arg), new_arg, ssa_name_var, &gsi, AFTER_STMT, true); -+ } -+ -+ set_stmt_flag(assign, MY_STMT); -+ return gimple_assign_lhs(assign); -+} -+ -+static tree cast_visited_phi_arg(tree ssa_name_var, tree arg, tree size_overflow_type) -+{ -+ basic_block bb; -+ gimple_stmt_iterator gsi; -+ const_gimple def_stmt; -+ gimple assign; -+ -+ def_stmt = get_def_stmt(arg); -+ bb = gimple_bb(def_stmt); -+ gcc_assert(bb->index != 0); -+ gsi = gsi_after_labels(bb); -+ -+ assign = build_cast_stmt(size_overflow_type, arg, ssa_name_var, &gsi, BEFORE_STMT, false); -+ set_stmt_flag(assign, MY_STMT); -+ return gimple_assign_lhs(assign); -+} -+ -+static tree create_new_phi_arg(tree ssa_name_var, tree new_arg, gimple oldstmt, unsigned int i) -+{ -+ tree size_overflow_type; -+ tree arg; -+ const_gimple def_stmt; -+ -+ if (new_arg != NULL_TREE && is_gimple_constant(new_arg)) -+ return new_arg; -+ -+ arg = gimple_phi_arg_def(oldstmt, i); -+ def_stmt = get_def_stmt(arg); -+ gcc_assert(def_stmt != NULL); -+ size_overflow_type = get_size_overflow_type(oldstmt, arg); -+ -+ switch (gimple_code(def_stmt)) { -+ case GIMPLE_PHI: -+ return cast_visited_phi_arg(ssa_name_var, arg, size_overflow_type); -+ case GIMPLE_NOP: { -+ basic_block bb; -+ -+ bb = gimple_phi_arg_edge(oldstmt, i)->src; -+ return cast_parm_decl(ssa_name_var, arg, size_overflow_type, bb); -+ } -+ case GIMPLE_ASM: { -+ gimple_stmt_iterator gsi; -+ gimple assign, stmt = get_def_stmt(arg); -+ -+ gsi = gsi_for_stmt(stmt); -+ assign = build_cast_stmt(size_overflow_type, arg, ssa_name_var, &gsi, AFTER_STMT, false); -+ set_stmt_flag(assign, MY_STMT); -+ return gimple_assign_lhs(assign); -+ } -+ default: -+ gcc_assert(new_arg != NULL_TREE); -+ gcc_assert(types_compatible_p(TREE_TYPE(new_arg), size_overflow_type)); -+ return use_phi_ssa_name(ssa_name_var, new_arg); -+ } -+} -+ -+static gimple overflow_create_phi_node(gimple oldstmt, tree result) -+{ -+ basic_block bb; -+ gimple phi; -+ gimple_seq seq; -+ gimple_stmt_iterator gsi = gsi_for_stmt(oldstmt); -+ -+ bb = gsi_bb(gsi); -+ -+ if (result == NULL_TREE) { -+ tree old_result = gimple_phi_result(oldstmt); -+ tree size_overflow_type = get_size_overflow_type(oldstmt, old_result); -+ -+ result = create_new_var(size_overflow_type); -+ } -+ -+ phi = create_phi_node(result, bb); -+ gimple_phi_set_result(phi, make_ssa_name(result, phi)); -+ seq = phi_nodes(bb); -+ gsi = gsi_last(seq); -+ gsi_remove(&gsi, false); -+ -+ gsi = gsi_for_stmt(oldstmt); -+ gsi_insert_after(&gsi, phi, GSI_NEW_STMT); -+ gimple_set_bb(phi, bb); -+ set_stmt_flag(phi, MY_STMT); -+ return phi; -+} -+ -+#if BUILDING_GCC_VERSION <= 4007 -+static tree create_new_phi_node(VEC(tree, heap) **args, tree ssa_name_var, gimple oldstmt) -+#else -+static tree create_new_phi_node(vec<tree, va_heap, vl_embed> *&args, tree ssa_name_var, gimple oldstmt) -+#endif -+{ -+ gimple new_phi; -+ unsigned int i; -+ tree arg, result; -+ location_t loc = gimple_location(oldstmt); -+ -+#if BUILDING_GCC_VERSION <= 4007 -+ gcc_assert(!VEC_empty(tree, *args)); -+#else -+ gcc_assert(!args->is_empty()); -+#endif -+ -+ new_phi = overflow_create_phi_node(oldstmt, ssa_name_var); -+ result = gimple_phi_result(new_phi); -+ ssa_name_var = SSA_NAME_VAR(result); -+ -+#if BUILDING_GCC_VERSION <= 4007 -+ FOR_EACH_VEC_ELT(tree, *args, i, arg) { -+#else -+ FOR_EACH_VEC_SAFE_ELT(args, i, arg) { -+#endif -+ arg = create_new_phi_arg(ssa_name_var, arg, oldstmt, i); -+ add_phi_arg(new_phi, arg, gimple_phi_arg_edge(oldstmt, i), loc); -+ } -+ -+#if BUILDING_GCC_VERSION <= 4007 -+ VEC_free(tree, heap, *args); -+#else -+ vec_free(args); -+#endif -+ update_stmt(new_phi); -+ return result; -+} -+ -+static tree handle_phi(struct pointer_set_t *visited, struct cgraph_node *caller_node, tree orig_result) -+{ -+ tree ssa_name_var = NULL_TREE; -+#if BUILDING_GCC_VERSION <= 4007 -+ VEC(tree, heap) *args = NULL; -+#else -+ vec<tree, va_heap, vl_embed> *args = NULL; -+#endif -+ gimple oldstmt = get_def_stmt(orig_result); -+ unsigned int i, len = gimple_phi_num_args(oldstmt); -+ -+ pointer_set_insert(visited, oldstmt); -+ for (i = 0; i < len; i++) { -+ tree arg, new_arg; -+ -+ arg = gimple_phi_arg_def(oldstmt, i); -+ new_arg = expand(visited, caller_node, arg); -+ -+ if (ssa_name_var == NULL_TREE && new_arg != NULL_TREE) -+ ssa_name_var = SSA_NAME_VAR(new_arg); -+ -+ if (is_gimple_constant(arg)) { -+ tree size_overflow_type = get_size_overflow_type(oldstmt, arg); -+ -+ new_arg = cast_a_tree(size_overflow_type, arg); -+ } -+ -+#if BUILDING_GCC_VERSION <= 4007 -+ VEC_safe_push(tree, heap, args, new_arg); -+#else -+ vec_safe_push(args, new_arg); -+#endif -+ } -+ -+#if BUILDING_GCC_VERSION <= 4007 -+ return create_new_phi_node(&args, ssa_name_var, oldstmt); -+#else -+ return create_new_phi_node(args, ssa_name_var, oldstmt); -+#endif -+} -+ -+static tree change_assign_rhs(gimple stmt, const_tree orig_rhs, tree new_rhs) ++static const char *get_asm_string(const_gimple stmt) +{ -+ gimple assign; -+ gimple_stmt_iterator gsi = gsi_for_stmt(stmt); -+ tree origtype = TREE_TYPE(orig_rhs); -+ -+ gcc_assert(is_gimple_assign(stmt)); ++ if (!stmt) ++ return NULL; ++ if (gimple_code(stmt) != GIMPLE_ASM) ++ return NULL; + -+ assign = build_cast_stmt(origtype, new_rhs, CREATE_NEW_VAR, &gsi, BEFORE_STMT, false); -+ set_stmt_flag(assign, MY_STMT); -+ return gimple_assign_lhs(assign); ++ return gimple_asm_string(stmt); +} + -+static bool is_a_cast_and_const_overflow(const_tree no_const_rhs) ++bool is_size_overflow_intentional_asm_turn_off(const_gimple stmt) +{ -+ const_tree rhs1, lhs, rhs1_type, lhs_type; -+ enum machine_mode lhs_mode, rhs_mode; -+ gimple def_stmt = get_def_stmt(no_const_rhs); -+ -+ if (!def_stmt || !gimple_assign_cast_p(def_stmt)) -+ return false; ++ const char *str; + -+ rhs1 = gimple_assign_rhs1(def_stmt); -+ lhs = gimple_assign_lhs(def_stmt); -+ rhs1_type = TREE_TYPE(rhs1); -+ lhs_type = TREE_TYPE(lhs); -+ rhs_mode = TYPE_MODE(rhs1_type); -+ lhs_mode = TYPE_MODE(lhs_type); -+ if (TYPE_UNSIGNED(lhs_type) == TYPE_UNSIGNED(rhs1_type) || lhs_mode != rhs_mode) ++ str = get_asm_string(stmt); ++ if (!str) + return false; -+ -+ return true; -+} -+ -+static tree create_cast_assign(struct pointer_set_t *visited, gimple stmt) -+{ -+ tree rhs1 = gimple_assign_rhs1(stmt); -+ tree lhs = gimple_assign_lhs(stmt); -+ const_tree rhs1_type = TREE_TYPE(rhs1); -+ const_tree lhs_type = TREE_TYPE(lhs); -+ -+ if (TYPE_UNSIGNED(rhs1_type) == TYPE_UNSIGNED(lhs_type)) -+ return create_assign(visited, stmt, lhs, AFTER_STMT); -+ -+ return create_assign(visited, stmt, rhs1, AFTER_STMT); ++ return !strncmp(str, TURN_OFF_ASM_STR, sizeof(TURN_OFF_ASM_STR) - 1); +} + -+static bool no_uses(tree node) ++bool is_size_overflow_intentional_asm_yes(const_gimple stmt) +{ -+ imm_use_iterator imm_iter; -+ use_operand_p use_p; -+ -+ FOR_EACH_IMM_USE_FAST(use_p, imm_iter, node) { -+ const_gimple use_stmt = USE_STMT(use_p); ++ const char *str; + -+ if (use_stmt == NULL) -+ return true; -+ if (is_gimple_debug(use_stmt)) -+ continue; ++ str = get_asm_string(stmt); ++ if (!str) + return false; -+ } -+ return true; ++ return !strncmp(str, YES_ASM_STR, sizeof(YES_ASM_STR) - 1); +} + -+// 3.8.5 mm/page-writeback.c __ilog2_u64(): ret, uint + uintmax; uint -> int; int max -+static bool is_const_plus_unsigned_signed_truncation(const_tree lhs) ++bool is_size_overflow_asm(const_gimple stmt) +{ -+ tree rhs1, lhs_type, rhs_type, rhs2, not_const_rhs; -+ gimple def_stmt = get_def_stmt(lhs); -+ -+ if (!def_stmt || !gimple_assign_cast_p(def_stmt)) -+ return false; -+ -+ rhs1 = gimple_assign_rhs1(def_stmt); -+ rhs_type = TREE_TYPE(rhs1); -+ lhs_type = TREE_TYPE(lhs); -+ if (TYPE_UNSIGNED(lhs_type) || !TYPE_UNSIGNED(rhs_type)) -+ return false; -+ if (TYPE_MODE(lhs_type) != TYPE_MODE(rhs_type)) -+ return false; -+ -+ def_stmt = get_def_stmt(rhs1); -+ if (!def_stmt || !is_gimple_assign(def_stmt) || gimple_num_ops(def_stmt) != 3) -+ return false; -+ -+ if (gimple_assign_rhs_code(def_stmt) != PLUS_EXPR) -+ return false; ++ const char *str; + -+ rhs1 = gimple_assign_rhs1(def_stmt); -+ rhs2 = gimple_assign_rhs2(def_stmt); -+ if (!is_gimple_constant(rhs1) && !is_gimple_constant(rhs2)) ++ str = get_asm_string(stmt); ++ if (!str) + return false; -+ -+ if (is_gimple_constant(rhs2)) -+ not_const_rhs = rhs1; -+ else -+ not_const_rhs = rhs2; -+ -+ return no_uses(not_const_rhs); ++ return !strncmp(str, OK_ASM_STR, sizeof(OK_ASM_STR) - 1); +} + -+static bool skip_lhs_cast_check(const_gimple stmt) ++bool is_a_return_check(const_tree node) +{ -+ const_tree rhs = gimple_assign_rhs1(stmt); -+ const_gimple def_stmt = get_def_stmt(rhs); -+ -+ // 3.8.2 kernel/futex_compat.c compat_exit_robust_list(): get_user() 64 ulong -> int (compat_long_t), int max -+ if (gimple_code(def_stmt) == GIMPLE_ASM) -+ return true; -+ -+ if (is_const_plus_unsigned_signed_truncation(rhs)) ++ if (TREE_CODE(node) == FUNCTION_DECL) + return true; + ++ gcc_assert(TREE_CODE(node) == PARM_DECL); + return false; +} + -+static tree create_cast_overflow_check(struct pointer_set_t *visited, struct cgraph_node *caller_node, tree new_rhs1, gimple stmt) -+{ -+ bool cast_lhs, cast_rhs; -+ tree lhs = gimple_assign_lhs(stmt); -+ tree rhs = gimple_assign_rhs1(stmt); -+ const_tree lhs_type = TREE_TYPE(lhs); -+ const_tree rhs_type = TREE_TYPE(rhs); -+ enum machine_mode lhs_mode = TYPE_MODE(lhs_type); -+ enum machine_mode rhs_mode = TYPE_MODE(rhs_type); -+ unsigned int lhs_size = GET_MODE_BITSIZE(lhs_mode); -+ unsigned int rhs_size = GET_MODE_BITSIZE(rhs_mode); -+ -+ static bool check_lhs[3][4] = { -+ // ss su us uu -+ { false, true, true, false }, // lhs > rhs -+ { false, false, false, false }, // lhs = rhs -+ { true, true, true, true }, // lhs < rhs -+ }; -+ -+ static bool check_rhs[3][4] = { -+ // ss su us uu -+ { true, false, true, true }, // lhs > rhs -+ { true, false, true, true }, // lhs = rhs -+ { true, false, true, true }, // lhs < rhs -+ }; -+ -+ // skip lhs check on signed SI -> HI cast or signed SI -> QI cast !!!! -+ if (rhs_mode == SImode && !TYPE_UNSIGNED(rhs_type) && (lhs_mode == HImode || lhs_mode == QImode)) -+ return create_assign(visited, stmt, lhs, AFTER_STMT); -+ -+ if (lhs_size > rhs_size) { -+ cast_lhs = check_lhs[0][TYPE_UNSIGNED(rhs_type) + 2 * TYPE_UNSIGNED(lhs_type)]; -+ cast_rhs = check_rhs[0][TYPE_UNSIGNED(rhs_type) + 2 * TYPE_UNSIGNED(lhs_type)]; -+ } else if (lhs_size == rhs_size) { -+ cast_lhs = check_lhs[1][TYPE_UNSIGNED(rhs_type) + 2 * TYPE_UNSIGNED(lhs_type)]; -+ cast_rhs = check_rhs[1][TYPE_UNSIGNED(rhs_type) + 2 * TYPE_UNSIGNED(lhs_type)]; -+ } else { -+ cast_lhs = check_lhs[2][TYPE_UNSIGNED(rhs_type) + 2 * TYPE_UNSIGNED(lhs_type)]; -+ cast_rhs = check_rhs[2][TYPE_UNSIGNED(rhs_type) + 2 * TYPE_UNSIGNED(lhs_type)]; -+ } -+ -+ if (!cast_lhs && !cast_rhs) -+ return dup_assign(visited, stmt, lhs, new_rhs1, NULL_TREE, NULL_TREE); -+ -+ if (cast_lhs && !skip_lhs_cast_check(stmt)) -+ check_size_overflow(caller_node, stmt, TREE_TYPE(new_rhs1), new_rhs1, lhs, BEFORE_STMT); -+ -+ if (cast_rhs) -+ check_size_overflow(caller_node, stmt, TREE_TYPE(new_rhs1), new_rhs1, rhs, BEFORE_STMT); -+ -+ return dup_assign(visited, stmt, lhs, new_rhs1, NULL_TREE, NULL_TREE); -+} -+ -+static tree handle_unary_rhs(struct pointer_set_t *visited, struct cgraph_node *caller_node, gimple stmt) -+{ -+ tree rhs1, new_rhs1, lhs = gimple_assign_lhs(stmt); -+ -+ if (get_stmt_flag(stmt) == MY_STMT) -+ return lhs; -+ -+ rhs1 = gimple_assign_rhs1(stmt); -+ if (TREE_CODE(TREE_TYPE(rhs1)) == POINTER_TYPE) -+ return create_assign(visited, stmt, lhs, AFTER_STMT); -+ -+ new_rhs1 = expand(visited, caller_node, rhs1); -+ -+ if (new_rhs1 == NULL_TREE) -+ return create_cast_assign(visited, stmt); -+ -+ if (get_stmt_flag(stmt) == NO_CAST_CHECK) -+ return dup_assign(visited, stmt, lhs, new_rhs1, NULL_TREE, NULL_TREE); -+ -+ if (gimple_assign_rhs_code(stmt) == BIT_NOT_EXPR) { -+ tree size_overflow_type = get_size_overflow_type(stmt, rhs1); -+ -+ new_rhs1 = cast_to_new_size_overflow_type(stmt, new_rhs1, size_overflow_type, BEFORE_STMT); -+ check_size_overflow(caller_node, stmt, size_overflow_type, new_rhs1, rhs1, BEFORE_STMT); -+ return create_assign(visited, stmt, lhs, AFTER_STMT); -+ } -+ -+ if (!gimple_assign_cast_p(stmt)) -+ return dup_assign(visited, stmt, lhs, new_rhs1, NULL_TREE, NULL_TREE); -+ -+ return create_cast_overflow_check(visited, caller_node, new_rhs1, stmt); -+} -+ -+static tree handle_unary_ops(struct pointer_set_t *visited, struct cgraph_node *caller_node, gimple stmt) -+{ -+ tree rhs1, lhs = gimple_assign_lhs(stmt); -+ gimple def_stmt = get_def_stmt(lhs); -+ -+ gcc_assert(gimple_code(def_stmt) != GIMPLE_NOP); -+ rhs1 = gimple_assign_rhs1(def_stmt); -+ -+ if (is_gimple_constant(rhs1)) -+ return create_assign(visited, def_stmt, lhs, AFTER_STMT); -+ -+ switch (TREE_CODE(rhs1)) { -+ case SSA_NAME: -+ return handle_unary_rhs(visited, caller_node, def_stmt); -+ case ARRAY_REF: -+ case BIT_FIELD_REF: -+ case ADDR_EXPR: -+ case COMPONENT_REF: -+ case INDIRECT_REF: -+#if BUILDING_GCC_VERSION >= 4006 -+ case MEM_REF: -+#endif -+ case TARGET_MEM_REF: -+ case VIEW_CONVERT_EXPR: -+ return create_assign(visited, def_stmt, lhs, AFTER_STMT); -+ case PARM_DECL: -+ case VAR_DECL: -+ return create_assign(visited, stmt, lhs, AFTER_STMT); -+ -+ default: -+ debug_gimple_stmt(def_stmt); -+ debug_tree(rhs1); -+ gcc_unreachable(); -+ } -+} -+ -+static void insert_cond(basic_block cond_bb, tree arg, enum tree_code cond_code, tree type_value) -+{ -+ gimple cond_stmt; -+ gimple_stmt_iterator gsi = gsi_last_bb(cond_bb); -+ -+ cond_stmt = gimple_build_cond(cond_code, arg, type_value, NULL_TREE, NULL_TREE); -+ gsi_insert_after(&gsi, cond_stmt, GSI_CONTINUE_LINKING); -+ update_stmt(cond_stmt); -+} -+ -+static tree create_string_param(tree string) -+{ -+ tree i_type, a_type; -+ const int length = TREE_STRING_LENGTH(string); -+ -+ gcc_assert(length > 0); -+ -+ i_type = build_index_type(build_int_cst(NULL_TREE, length - 1)); -+ a_type = build_array_type(char_type_node, i_type); -+ -+ TREE_TYPE(string) = a_type; -+ TREE_CONSTANT(string) = 1; -+ TREE_READONLY(string) = 1; -+ -+ return build1(ADDR_EXPR, ptr_type_node, string); -+} -+ -+static void insert_cond_result(struct cgraph_node *caller_node, basic_block bb_true, const_gimple stmt, const_tree arg, bool min) -+{ -+ gimple func_stmt; -+ const_gimple def_stmt; -+ const_tree loc_line; -+ tree loc_file, ssa_name, current_func; -+ expanded_location xloc; -+ char *ssa_name_buf; -+ int len; -+ struct cgraph_edge *edge; -+ struct cgraph_node *callee_node; -+ int frequency; -+ gimple_stmt_iterator gsi = gsi_start_bb(bb_true); -+ -+ def_stmt = get_def_stmt(arg); -+ xloc = expand_location(gimple_location(def_stmt)); -+ -+ if (!gimple_has_location(def_stmt)) { -+ xloc = expand_location(gimple_location(stmt)); -+ if (!gimple_has_location(stmt)) -+ xloc = expand_location(DECL_SOURCE_LOCATION(current_function_decl)); -+ } -+ -+ loc_line = build_int_cstu(unsigned_type_node, xloc.line); -+ -+ loc_file = build_string(strlen(xloc.file) + 1, xloc.file); -+ loc_file = create_string_param(loc_file); -+ -+ current_func = build_string(DECL_NAME_LENGTH(current_function_decl) + 1, DECL_NAME_POINTER(current_function_decl)); -+ current_func = create_string_param(current_func); -+ -+ gcc_assert(DECL_NAME(SSA_NAME_VAR(arg)) != NULL); -+ call_count++; -+ len = asprintf(&ssa_name_buf, "%s_%u %s, count: %u\n", DECL_NAME_POINTER(SSA_NAME_VAR(arg)), SSA_NAME_VERSION(arg), min ? "min" : "max", call_count); -+ gcc_assert(len > 0); -+ ssa_name = build_string(len + 1, ssa_name_buf); -+ free(ssa_name_buf); -+ ssa_name = create_string_param(ssa_name); -+ -+ // void report_size_overflow(const char *file, unsigned int line, const char *func, const char *ssa_name) -+ func_stmt = gimple_build_call(report_size_overflow_decl, 4, loc_file, loc_line, current_func, ssa_name); -+ gsi_insert_after(&gsi, func_stmt, GSI_CONTINUE_LINKING); -+ -+ callee_node = cgraph_get_create_node(report_size_overflow_decl); -+ frequency = compute_call_stmt_bb_frequency(current_function_decl, bb_true); -+ -+ edge = cgraph_create_edge(caller_node, callee_node, func_stmt, bb_true->count, frequency, bb_true->loop_depth); -+ gcc_assert(edge != NULL); -+} -+ -+static void __unused print_the_code_insertions(const_gimple stmt) -+{ -+ location_t loc = gimple_location(stmt); -+ -+ inform(loc, "Integer size_overflow check applied here."); -+} -+ -+static void insert_check_size_overflow(struct cgraph_node *caller_node, gimple stmt, enum tree_code cond_code, tree arg, tree type_value, bool before, bool min) ++// Get the argnum of a function decl, if node is a return then the argnum is 0 ++unsigned int get_function_num(const_tree node, const_tree orig_fndecl) +{ -+ basic_block cond_bb, join_bb, bb_true; -+ edge e; -+ gimple_stmt_iterator gsi = gsi_for_stmt(stmt); -+ -+ cond_bb = gimple_bb(stmt); -+ if (before) -+ gsi_prev(&gsi); -+ if (gsi_end_p(gsi)) -+ e = split_block_after_labels(cond_bb); ++ if (is_a_return_check(node)) ++ return 0; + else -+ e = split_block(cond_bb, gsi_stmt(gsi)); -+ cond_bb = e->src; -+ join_bb = e->dest; -+ e->flags = EDGE_FALSE_VALUE; -+ e->probability = REG_BR_PROB_BASE; -+ -+ bb_true = create_empty_bb(cond_bb); -+ make_edge(cond_bb, bb_true, EDGE_TRUE_VALUE); -+ make_edge(cond_bb, join_bb, EDGE_FALSE_VALUE); -+ make_edge(bb_true, join_bb, EDGE_FALLTHRU); -+ -+ gcc_assert(dom_info_available_p(CDI_DOMINATORS)); -+ set_immediate_dominator(CDI_DOMINATORS, bb_true, cond_bb); -+ set_immediate_dominator(CDI_DOMINATORS, join_bb, cond_bb); -+ -+ if (current_loops != NULL) { -+ gcc_assert(cond_bb->loop_father == join_bb->loop_father); -+ add_bb_to_loop(bb_true, cond_bb->loop_father); -+ } -+ -+ insert_cond(cond_bb, arg, cond_code, type_value); -+ insert_cond_result(caller_node, bb_true, stmt, arg, min); -+ -+// print_the_code_insertions(stmt); -+} -+ -+static void check_size_overflow(struct cgraph_node *caller_node, gimple stmt, tree size_overflow_type, tree cast_rhs, tree rhs, bool before) -+{ -+ const_tree rhs_type = TREE_TYPE(rhs); -+ tree cast_rhs_type, type_max_type, type_min_type, type_max, type_min; -+ -+ gcc_assert(rhs_type != NULL_TREE); -+ if (TREE_CODE(rhs_type) == POINTER_TYPE) -+ return; -+ -+ gcc_assert(TREE_CODE(rhs_type) == INTEGER_TYPE || TREE_CODE(rhs_type) == ENUMERAL_TYPE); -+ -+ if (is_const_plus_unsigned_signed_truncation(rhs)) -+ return; -+ -+ type_max = cast_a_tree(size_overflow_type, TYPE_MAX_VALUE(rhs_type)); -+ // typemax (-1) < typemin (0) -+ if (TREE_OVERFLOW(type_max)) -+ return; -+ -+ type_min = cast_a_tree(size_overflow_type, TYPE_MIN_VALUE(rhs_type)); -+ -+ cast_rhs_type = TREE_TYPE(cast_rhs); -+ type_max_type = TREE_TYPE(type_max); -+ gcc_assert(types_compatible_p(cast_rhs_type, type_max_type)); -+ -+ insert_check_size_overflow(caller_node, stmt, GT_EXPR, cast_rhs, type_max, before, MAX_CHECK); -+ -+ // special case: get_size_overflow_type(), 32, u64->s -+ if (LONG_TYPE_SIZE == GET_MODE_BITSIZE(SImode) && TYPE_UNSIGNED(size_overflow_type) && !TYPE_UNSIGNED(rhs_type)) -+ return; -+ -+ type_min_type = TREE_TYPE(type_min); -+ gcc_assert(types_compatible_p(type_max_type, type_min_type)); -+ insert_check_size_overflow(caller_node, stmt, LT_EXPR, cast_rhs, type_min, before, MIN_CHECK); -+} -+ -+static bool is_lt_signed_type_max(const_tree rhs) -+{ -+ const_tree new_type, type_max, type = TREE_TYPE(rhs); -+ -+ if (!TYPE_UNSIGNED(type)) -+ return true; -+ -+ switch (TYPE_MODE(type)) { -+ case QImode: -+ new_type = intQI_type_node; -+ break; -+ case HImode: -+ new_type = intHI_type_node; -+ break; -+ case SImode: -+ new_type = intSI_type_node; -+ break; -+ case DImode: -+ new_type = intDI_type_node; -+ break; -+ default: -+ debug_tree((tree)type); -+ gcc_unreachable(); -+ } -+ -+ type_max = TYPE_MAX_VALUE(new_type); -+ if (!tree_int_cst_lt(type_max, rhs)) -+ return true; -+ -+ return false; -+} -+ -+static bool is_gt_zero(const_tree rhs) -+{ -+ const_tree type = TREE_TYPE(rhs); -+ -+ if (TYPE_UNSIGNED(type)) -+ return true; -+ -+ if (!tree_int_cst_lt(rhs, integer_zero_node)) -+ return true; -+ -+ return false; -+} -+ -+static bool is_a_constant_overflow(const_gimple stmt, const_tree rhs) -+{ -+ if (gimple_assign_rhs_code(stmt) == MIN_EXPR) -+ return false; -+ if (!is_gimple_constant(rhs)) -+ return false; -+ -+ // If the const is between 0 and the max value of the signed type of the same bitsize then there is no intentional overflow -+// if (is_lt_signed_type_max(rhs) && is_gt_zero(rhs)) -+// return false; -+ -+ return true; -+} -+ -+static tree get_def_stmt_rhs(const_tree var) -+{ -+ tree rhs1, def_stmt_rhs1; -+ gimple rhs1_def_stmt, def_stmt_rhs1_def_stmt, def_stmt; -+ -+ def_stmt = get_def_stmt(var); -+ if (!gimple_assign_cast_p(def_stmt)) -+ return NULL_TREE; -+ gcc_assert(gimple_code(def_stmt) != GIMPLE_NOP && get_stmt_flag(def_stmt) == MY_STMT && gimple_assign_cast_p(def_stmt)); -+ -+ rhs1 = gimple_assign_rhs1(def_stmt); -+ rhs1_def_stmt = get_def_stmt(rhs1); -+ if (!gimple_assign_cast_p(rhs1_def_stmt)) -+ return rhs1; -+ -+ def_stmt_rhs1 = gimple_assign_rhs1(rhs1_def_stmt); -+ def_stmt_rhs1_def_stmt = get_def_stmt(def_stmt_rhs1); -+ -+ switch (gimple_code(def_stmt_rhs1_def_stmt)) { -+ case GIMPLE_CALL: -+ case GIMPLE_NOP: -+ case GIMPLE_ASM: -+ case GIMPLE_PHI: -+ return def_stmt_rhs1; -+ case GIMPLE_ASSIGN: -+ return rhs1; -+ default: -+ debug_gimple_stmt(def_stmt_rhs1_def_stmt); -+ gcc_unreachable(); -+ } -+} -+ -+static tree handle_intentional_overflow(struct pointer_set_t *visited, struct cgraph_node *caller_node, bool check_overflow, gimple stmt, tree change_rhs, tree new_rhs2) -+{ -+ tree new_rhs, orig_rhs; -+ void (*gimple_assign_set_rhs)(gimple, tree); -+ tree rhs1 = gimple_assign_rhs1(stmt); -+ tree rhs2 = gimple_assign_rhs2(stmt); -+ tree lhs = gimple_assign_lhs(stmt); -+ -+ if (!check_overflow) -+ return create_assign(visited, stmt, lhs, AFTER_STMT); -+ -+ if (change_rhs == NULL_TREE) -+ return create_assign(visited, stmt, lhs, AFTER_STMT); -+ -+ if (new_rhs2 == NULL_TREE) { -+ orig_rhs = rhs1; -+ gimple_assign_set_rhs = &gimple_assign_set_rhs1; -+ } else { -+ orig_rhs = rhs2; -+ gimple_assign_set_rhs = &gimple_assign_set_rhs2; -+ } -+ -+ check_size_overflow(caller_node, stmt, TREE_TYPE(change_rhs), change_rhs, orig_rhs, BEFORE_STMT); -+ -+ new_rhs = change_assign_rhs(stmt, orig_rhs, change_rhs); -+ gimple_assign_set_rhs(stmt, new_rhs); -+ update_stmt(stmt); -+ -+ return create_assign(visited, stmt, lhs, AFTER_STMT); -+} -+ -+static bool is_subtraction_special(const_gimple stmt) -+{ -+ gimple rhs1_def_stmt, rhs2_def_stmt; -+ const_tree rhs1_def_stmt_rhs1, rhs2_def_stmt_rhs1, rhs1_def_stmt_lhs, rhs2_def_stmt_lhs; -+ enum machine_mode rhs1_def_stmt_rhs1_mode, rhs2_def_stmt_rhs1_mode, rhs1_def_stmt_lhs_mode, rhs2_def_stmt_lhs_mode; -+ const_tree rhs1 = gimple_assign_rhs1(stmt); -+ const_tree rhs2 = gimple_assign_rhs2(stmt); -+ -+ if (is_gimple_constant(rhs1) || is_gimple_constant(rhs2)) -+ return false; -+ -+ gcc_assert(TREE_CODE(rhs1) == SSA_NAME && TREE_CODE(rhs2) == SSA_NAME); -+ -+ if (gimple_assign_rhs_code(stmt) != MINUS_EXPR) -+ return false; -+ -+ rhs1_def_stmt = get_def_stmt(rhs1); -+ rhs2_def_stmt = get_def_stmt(rhs2); -+ if (!gimple_assign_cast_p(rhs1_def_stmt) || !gimple_assign_cast_p(rhs2_def_stmt)) -+ return false; -+ -+ rhs1_def_stmt_rhs1 = gimple_assign_rhs1(rhs1_def_stmt); -+ rhs2_def_stmt_rhs1 = gimple_assign_rhs1(rhs2_def_stmt); -+ rhs1_def_stmt_lhs = gimple_assign_lhs(rhs1_def_stmt); -+ rhs2_def_stmt_lhs = gimple_assign_lhs(rhs2_def_stmt); -+ rhs1_def_stmt_rhs1_mode = TYPE_MODE(TREE_TYPE(rhs1_def_stmt_rhs1)); -+ rhs2_def_stmt_rhs1_mode = TYPE_MODE(TREE_TYPE(rhs2_def_stmt_rhs1)); -+ rhs1_def_stmt_lhs_mode = TYPE_MODE(TREE_TYPE(rhs1_def_stmt_lhs)); -+ rhs2_def_stmt_lhs_mode = TYPE_MODE(TREE_TYPE(rhs2_def_stmt_lhs)); -+ if (GET_MODE_BITSIZE(rhs1_def_stmt_rhs1_mode) <= GET_MODE_BITSIZE(rhs1_def_stmt_lhs_mode)) -+ return false; -+ if (GET_MODE_BITSIZE(rhs2_def_stmt_rhs1_mode) <= GET_MODE_BITSIZE(rhs2_def_stmt_lhs_mode)) -+ return false; -+ -+ set_stmt_flag(rhs1_def_stmt, NO_CAST_CHECK); -+ set_stmt_flag(rhs2_def_stmt, NO_CAST_CHECK); -+ return true; -+} -+ -+static tree handle_integer_truncation(struct pointer_set_t *visited, struct cgraph_node *caller_node, const_tree lhs) -+{ -+ tree new_rhs1, new_rhs2; -+ tree new_rhs1_def_stmt_rhs1, new_rhs2_def_stmt_rhs1, new_lhs; -+ gimple assign, stmt = get_def_stmt(lhs); -+ tree rhs1 = gimple_assign_rhs1(stmt); -+ tree rhs2 = gimple_assign_rhs2(stmt); -+ -+ if (!is_subtraction_special(stmt)) -+ return NULL_TREE; -+ -+ new_rhs1 = expand(visited, caller_node, rhs1); -+ new_rhs2 = expand(visited, caller_node, rhs2); -+ -+ new_rhs1_def_stmt_rhs1 = get_def_stmt_rhs(new_rhs1); -+ new_rhs2_def_stmt_rhs1 = get_def_stmt_rhs(new_rhs2); -+ -+ if (new_rhs1_def_stmt_rhs1 == NULL_TREE || new_rhs2_def_stmt_rhs1 == NULL_TREE) -+ return NULL_TREE; -+ -+ if (!types_compatible_p(TREE_TYPE(new_rhs1_def_stmt_rhs1), TREE_TYPE(new_rhs2_def_stmt_rhs1))) { -+ new_rhs1_def_stmt_rhs1 = cast_to_TI_type(stmt, new_rhs1_def_stmt_rhs1); -+ new_rhs2_def_stmt_rhs1 = cast_to_TI_type(stmt, new_rhs2_def_stmt_rhs1); -+ } -+ -+ assign = create_binary_assign(MINUS_EXPR, stmt, new_rhs1_def_stmt_rhs1, new_rhs2_def_stmt_rhs1); -+ new_lhs = gimple_assign_lhs(assign); -+ check_size_overflow(caller_node, assign, TREE_TYPE(new_lhs), new_lhs, rhs1, AFTER_STMT); -+ -+ return dup_assign(visited, stmt, lhs, new_rhs1, new_rhs2, NULL_TREE); -+} -+ -+static bool is_a_neg_overflow(const_gimple stmt, const_tree rhs) -+{ -+ const_gimple def_stmt; -+ -+ if (TREE_CODE(rhs) != SSA_NAME) -+ return false; -+ -+ if (gimple_assign_rhs_code(stmt) != PLUS_EXPR) -+ return false; -+ -+ def_stmt = get_def_stmt(rhs); -+ if (!is_gimple_assign(def_stmt) || gimple_assign_rhs_code(def_stmt) != BIT_NOT_EXPR) -+ return false; -+ -+ return true; -+} -+ -+static tree handle_binary_ops(struct pointer_set_t *visited, struct cgraph_node *caller_node, tree lhs) -+{ -+ tree rhs1, rhs2, new_lhs; -+ gimple def_stmt = get_def_stmt(lhs); -+ tree new_rhs1 = NULL_TREE; -+ tree new_rhs2 = NULL_TREE; -+ -+ rhs1 = gimple_assign_rhs1(def_stmt); -+ rhs2 = gimple_assign_rhs2(def_stmt); -+ -+ /* no DImode/TImode division in the 32/64 bit kernel */ -+ switch (gimple_assign_rhs_code(def_stmt)) { -+ case RDIV_EXPR: -+ case TRUNC_DIV_EXPR: -+ case CEIL_DIV_EXPR: -+ case FLOOR_DIV_EXPR: -+ case ROUND_DIV_EXPR: -+ case TRUNC_MOD_EXPR: -+ case CEIL_MOD_EXPR: -+ case FLOOR_MOD_EXPR: -+ case ROUND_MOD_EXPR: -+ case EXACT_DIV_EXPR: -+ case POINTER_PLUS_EXPR: -+ case BIT_AND_EXPR: -+ return create_assign(visited, def_stmt, lhs, AFTER_STMT); -+ default: -+ break; -+ } -+ -+ new_lhs = handle_integer_truncation(visited, caller_node, lhs); -+ if (new_lhs != NULL_TREE) -+ return new_lhs; -+ -+ if (TREE_CODE(rhs1) == SSA_NAME) -+ new_rhs1 = expand(visited, caller_node, rhs1); -+ if (TREE_CODE(rhs2) == SSA_NAME) -+ new_rhs2 = expand(visited, caller_node, rhs2); -+ -+ if (is_a_neg_overflow(def_stmt, rhs2)) -+ return handle_intentional_overflow(visited, caller_node, true, def_stmt, new_rhs1, NULL_TREE); -+ if (is_a_neg_overflow(def_stmt, rhs1)) -+ return handle_intentional_overflow(visited, caller_node, true, def_stmt, new_rhs2, new_rhs2); -+ -+ -+ if (is_a_constant_overflow(def_stmt, rhs2)) -+ return handle_intentional_overflow(visited, caller_node, !is_a_cast_and_const_overflow(rhs1), def_stmt, new_rhs1, NULL_TREE); -+ if (is_a_constant_overflow(def_stmt, rhs1)) -+ return handle_intentional_overflow(visited, caller_node, !is_a_cast_and_const_overflow(rhs2), def_stmt, new_rhs2, new_rhs2); -+ -+ // the const is between 0 and (signed) MAX -+ if (is_gimple_constant(rhs1)) -+ new_rhs1 = create_assign(visited, def_stmt, rhs1, BEFORE_STMT); -+ if (is_gimple_constant(rhs2)) -+ new_rhs2 = create_assign(visited, def_stmt, rhs2, BEFORE_STMT); -+ -+ return dup_assign(visited, def_stmt, lhs, new_rhs1, new_rhs2, NULL_TREE); -+} -+ -+#if BUILDING_GCC_VERSION >= 4006 -+static tree get_new_rhs(struct pointer_set_t *visited, struct cgraph_node *caller_node, tree size_overflow_type, tree rhs) -+{ -+ if (is_gimple_constant(rhs)) -+ return cast_a_tree(size_overflow_type, rhs); -+ if (TREE_CODE(rhs) != SSA_NAME) -+ return NULL_TREE; -+ return expand(visited, caller_node, rhs); -+} -+ -+static tree handle_ternary_ops(struct pointer_set_t *visited, struct cgraph_node *caller_node, tree lhs) -+{ -+ tree rhs1, rhs2, rhs3, new_rhs1, new_rhs2, new_rhs3, size_overflow_type; -+ gimple def_stmt = get_def_stmt(lhs); -+ -+ size_overflow_type = get_size_overflow_type(def_stmt, lhs); -+ -+ rhs1 = gimple_assign_rhs1(def_stmt); -+ rhs2 = gimple_assign_rhs2(def_stmt); -+ rhs3 = gimple_assign_rhs3(def_stmt); -+ new_rhs1 = get_new_rhs(visited, caller_node, size_overflow_type, rhs1); -+ new_rhs2 = get_new_rhs(visited, caller_node, size_overflow_type, rhs2); -+ new_rhs3 = get_new_rhs(visited, caller_node, size_overflow_type, rhs3); -+ -+ return dup_assign(visited, def_stmt, lhs, new_rhs1, new_rhs2, new_rhs3); -+} -+#endif -+ -+static tree get_size_overflow_type(gimple stmt, const_tree node) -+{ -+ const_tree type; -+ tree new_type; -+ -+ gcc_assert(node != NULL_TREE); -+ -+ type = TREE_TYPE(node); -+ -+ if (get_stmt_flag(stmt) == MY_STMT) -+ return TREE_TYPE(node); -+ -+ switch (TYPE_MODE(type)) { -+ case QImode: -+ new_type = intHI_type_node; -+ break; -+ case HImode: -+ new_type = intSI_type_node; -+ break; -+ case SImode: -+ new_type = intDI_type_node; -+ break; -+ case DImode: -+ if (LONG_TYPE_SIZE == GET_MODE_BITSIZE(SImode)) -+ new_type = TYPE_UNSIGNED(type) ? unsigned_intDI_type_node : intDI_type_node; -+ else -+ new_type = intTI_type_node; -+ break; -+ case TImode: -+ gcc_assert(!TYPE_UNSIGNED(type)); -+ new_type = intTI_type_node; -+ break; -+ default: -+ debug_tree((tree)node); -+ error("%s: unsupported gcc configuration (%qE).", __func__, current_function_decl); -+ gcc_unreachable(); -+ } -+ -+ if (TYPE_QUALS(type) != 0) -+ return build_qualified_type(new_type, TYPE_QUALS(type)); -+ return new_type; -+} -+ -+static tree expand_visited(gimple def_stmt) -+{ -+ const_gimple next_stmt; -+ gimple_stmt_iterator gsi; -+ enum gimple_code code = gimple_code(def_stmt); -+ -+ if (code == GIMPLE_ASM) -+ return NULL_TREE; -+ -+ gsi = gsi_for_stmt(def_stmt); -+ gsi_next(&gsi); -+ -+ if (gimple_code(def_stmt) == GIMPLE_PHI && gsi_end_p(gsi)) -+ return NULL_TREE; -+ gcc_assert(!gsi_end_p(gsi)); -+ next_stmt = gsi_stmt(gsi); -+ -+ if (gimple_code(def_stmt) == GIMPLE_PHI && get_stmt_flag((gimple)next_stmt) != MY_STMT) -+ return NULL_TREE; -+ gcc_assert(get_stmt_flag((gimple)next_stmt) == MY_STMT); -+ -+ return get_lhs(next_stmt); -+} -+ -+static tree expand(struct pointer_set_t *visited, struct cgraph_node *caller_node, tree lhs) -+{ -+ gimple def_stmt; -+ -+ def_stmt = get_def_stmt(lhs); -+ -+ if (!def_stmt || gimple_code(def_stmt) == GIMPLE_NOP) -+ return NULL_TREE; -+ -+ if (get_stmt_flag(def_stmt) == MY_STMT) -+ return lhs; -+ -+ if (pointer_set_contains(visited, def_stmt)) -+ return expand_visited(def_stmt); -+ -+ switch (gimple_code(def_stmt)) { -+ case GIMPLE_PHI: -+ return handle_phi(visited, caller_node, lhs); -+ case GIMPLE_CALL: -+ case GIMPLE_ASM: -+ return create_assign(visited, def_stmt, lhs, AFTER_STMT); -+ case GIMPLE_ASSIGN: -+ switch (gimple_num_ops(def_stmt)) { -+ case 2: -+ return handle_unary_ops(visited, caller_node, def_stmt); -+ case 3: -+ return handle_binary_ops(visited, caller_node, lhs); -+#if BUILDING_GCC_VERSION >= 4006 -+ case 4: -+ return handle_ternary_ops(visited, caller_node, lhs); -+#endif -+ } -+ default: -+ debug_gimple_stmt(def_stmt); -+ error("%s: unknown gimple code", __func__); -+ gcc_unreachable(); -+ } -+} -+ -+static tree cast_to_orig_type(gimple stmt, const_tree orig_node, tree new_node) -+{ -+ const_gimple assign; -+ tree orig_type = TREE_TYPE(orig_node); -+ gimple_stmt_iterator gsi = gsi_for_stmt(stmt); -+ -+ assign = build_cast_stmt(orig_type, new_node, CREATE_NEW_VAR, &gsi, BEFORE_STMT, false); -+ return gimple_assign_lhs(assign); -+} -+ -+static void change_orig_node(struct interesting_node *cur_node, tree new_node) -+{ -+ void (*set_rhs)(gimple, tree); -+ gimple stmt = cur_node->first_stmt; -+ const_tree orig_node = cur_node->node; -+ -+ switch (gimple_code(stmt)) { -+ case GIMPLE_RETURN: -+ gimple_return_set_retval(stmt, cast_to_orig_type(stmt, orig_node, new_node)); -+ break; -+ case GIMPLE_CALL: -+ gimple_call_set_arg(stmt, cur_node->num - 1, cast_to_orig_type(stmt, orig_node, new_node)); -+ break; -+ case GIMPLE_ASSIGN: -+ switch (cur_node->num) { -+ case 1: -+ set_rhs = &gimple_assign_set_rhs1; -+ break; -+ case 2: -+ set_rhs = &gimple_assign_set_rhs2; -+ break; -+#if BUILDING_GCC_VERSION >= 4006 -+ case 3: -+ set_rhs = &gimple_assign_set_rhs3; -+ break; -+#endif -+ default: -+ gcc_unreachable(); -+ } -+ -+ set_rhs(stmt, cast_to_orig_type(stmt, orig_node, new_node)); -+ break; -+ default: -+ debug_gimple_stmt(stmt); -+ gcc_unreachable(); -+ } -+ -+ update_stmt(stmt); ++ return find_arg_number_tree(node, orig_fndecl); +} + -+static unsigned int get_correct_arg_count(unsigned int argnum, const_tree fndecl) ++unsigned int get_correct_arg_count(unsigned int argnum, const_tree fndecl) +{ + const struct size_overflow_hash *hash; + unsigned int new_argnum; @@ -117226,81 +120490,6 @@ index 0000000..948ec25 + return CANNOT_FIND_ARG; +} + -+// Don't want to duplicate entries in next_cgraph_node -+static bool is_in_next_cgraph_node(struct next_cgraph_node *head, struct cgraph_node *node, const_tree fndecl, unsigned int num) -+{ -+ const_tree new_callee_fndecl; -+ struct next_cgraph_node *cur_node; -+ -+ if (fndecl == RET_CHECK) -+ new_callee_fndecl = NODE_DECL(node); -+ else -+ new_callee_fndecl = fndecl; -+ -+ for (cur_node = head; cur_node; cur_node = cur_node->next) { -+ if (!operand_equal_p(NODE_DECL(cur_node->current_function), NODE_DECL(node), 0)) -+ continue; -+ if (!operand_equal_p(cur_node->callee_fndecl, new_callee_fndecl, 0)) -+ continue; -+ if (num == cur_node->num) -+ return true; -+ } -+ return false; -+} -+ -+/* Add a next_cgraph_node into the list for handle_function(). -+ * handle_function() iterates over all the next cgraph nodes and -+ * starts the overflow check insertion process. -+ */ -+static struct next_cgraph_node *create_new_next_cgraph_node(struct next_cgraph_node *head, struct cgraph_node *node, tree fndecl, unsigned int num) -+{ -+ struct next_cgraph_node *new_node; -+ -+ if (is_in_next_cgraph_node(head, node, fndecl, num)) -+ return head; -+ -+ new_node = (struct next_cgraph_node *)xmalloc(sizeof(*new_node)); -+ new_node->current_function = node; -+ new_node->next = NULL; -+ new_node->num = num; -+ if (fndecl == RET_CHECK) -+ new_node->callee_fndecl = NODE_DECL(node); -+ else -+ new_node->callee_fndecl = fndecl; -+ -+ if (!head) -+ return new_node; -+ -+ new_node->next = head; -+ return new_node; -+} -+ -+static struct next_cgraph_node *create_new_next_cgraph_nodes(struct next_cgraph_node *head, struct cgraph_node *node, unsigned int num) -+{ -+ struct cgraph_edge *e; -+ -+ if (num == 0) -+ return create_new_next_cgraph_node(head, node, RET_CHECK, num); -+ -+ for (e = node->callers; e; e = e->next_caller) { -+ tree fndecl = gimple_call_fndecl(e->call_stmt); -+ -+ gcc_assert(fndecl != NULL_TREE); -+ head = create_new_next_cgraph_node(head, e->caller, fndecl, num); -+ } -+ -+ return head; -+} -+ -+static bool is_a_return_check(const_tree node) -+{ -+ if (TREE_CODE(node) == FUNCTION_DECL) -+ return true; -+ -+ gcc_assert(TREE_CODE(node) == PARM_DECL); -+ return false; -+} -+ +static bool is_in_hash_table(const_tree fndecl, unsigned int num) +{ + const struct size_overflow_hash *hash; @@ -117311,37 +120500,10 @@ index 0000000..948ec25 + return false; +} + -+struct missing_functions { -+ struct missing_functions *next; -+ const_tree node; -+ tree fndecl; -+}; -+ -+static struct missing_functions *create_new_missing_function(struct missing_functions *missing_fn_head, tree node) -+{ -+ struct missing_functions *new_function; -+ -+ new_function = (struct missing_functions *)xmalloc(sizeof(*new_function)); -+ new_function->node = node; -+ new_function->next = NULL; -+ -+ if (TREE_CODE(node) == FUNCTION_DECL) -+ new_function->fndecl = node; -+ else -+ new_function->fndecl = current_function_decl; -+ gcc_assert(new_function->fndecl); -+ -+ if (!missing_fn_head) -+ return new_function; -+ -+ new_function->next = missing_fn_head; -+ return new_function; -+} -+ +/* Check if the function has a size_overflow attribute or it is in the size_overflow hash table. + * If the function is missing everywhere then print the missing message into stderr. + */ -+static bool is_missing_function(const_tree orig_fndecl, unsigned int num) ++bool is_missing_function(const_tree orig_fndecl, unsigned int num) +{ + switch (DECL_FUNCTION_CODE(orig_fndecl)) { +#if BUILDING_GCC_VERSION >= 4008 @@ -117369,2189 +120531,6 @@ index 0000000..948ec25 + return true; +} + -+// Get the argnum of a function decl, if node is a return then the argnum is 0 -+static unsigned int get_function_num(const_tree node, const_tree orig_fndecl) -+{ -+ if (is_a_return_check(node)) -+ return 0; -+ else -+ return find_arg_number_tree(node, orig_fndecl); -+} -+ -+/* If the function is missing from the hash table and it is a static function -+ * then create a next_cgraph_node from it for handle_function() -+ */ -+static struct next_cgraph_node *check_missing_overflow_attribute_and_create_next_node(struct next_cgraph_node *cnodes, struct missing_functions *missing_fn_head) -+{ -+ unsigned int num; -+ const_tree orig_fndecl; -+ struct cgraph_node *next_node = NULL; -+ -+ orig_fndecl = DECL_ORIGIN(missing_fn_head->fndecl); -+ -+ num = get_function_num(missing_fn_head->node, orig_fndecl); -+ if (num == CANNOT_FIND_ARG) -+ return cnodes; -+ -+ if (!is_missing_function(orig_fndecl, num)) -+ return cnodes; -+ -+ next_node = cgraph_get_node(missing_fn_head->fndecl); -+ if (next_node && next_node->local.local) -+ cnodes = create_new_next_cgraph_nodes(cnodes, next_node, num); -+ return cnodes; -+} -+ -+/* Search for missing size_overflow attributes on the last nodes in ipa and collect them -+ * into the next_cgraph_node list. They will be the next interesting returns or callees. -+ */ -+static struct next_cgraph_node *search_overflow_attribute(struct next_cgraph_node *cnodes, struct interesting_node *cur_node) -+{ -+ unsigned int i; -+ tree node; -+ struct missing_functions *cur, *missing_fn_head = NULL; -+ -+#if BUILDING_GCC_VERSION <= 4007 -+ FOR_EACH_VEC_ELT(tree, cur_node->last_nodes, i, node) { -+#else -+ FOR_EACH_VEC_ELT(*cur_node->last_nodes, i, node) { -+#endif -+ switch (TREE_CODE(node)) { -+ case PARM_DECL: -+ if (TREE_CODE(TREE_TYPE(node)) != INTEGER_TYPE) -+ break; -+ case FUNCTION_DECL: -+ missing_fn_head = create_new_missing_function(missing_fn_head, node); -+ break; -+ default: -+ break; -+ } -+ } -+ -+ while (missing_fn_head) { -+ cnodes = check_missing_overflow_attribute_and_create_next_node(cnodes, missing_fn_head); -+ -+ cur = missing_fn_head->next; -+ free(missing_fn_head); -+ missing_fn_head = cur; -+ } -+ -+ return cnodes; -+} -+ -+static void walk_phi_set_conditions(struct pointer_set_t *visited, bool *interesting_conditions, const_tree result) -+{ -+ gimple phi = get_def_stmt(result); -+ unsigned int i, n = gimple_phi_num_args(phi); -+ -+ pointer_set_insert(visited, phi); -+ for (i = 0; i < n; i++) { -+ const_tree arg = gimple_phi_arg_def(phi, i); -+ -+ set_conditions(visited, interesting_conditions, arg); -+ } -+} -+ -+enum conditions { -+ FROM_CONST, NOT_UNARY, CAST -+}; -+ -+// Search for constants, cast assignments and binary/ternary assignments -+static void set_conditions(struct pointer_set_t *visited, bool *interesting_conditions, const_tree lhs) -+{ -+ gimple def_stmt = get_def_stmt(lhs); -+ -+ if (is_gimple_constant(lhs)) { -+ interesting_conditions[FROM_CONST] = true; -+ return; -+ } -+ -+ if (!def_stmt) -+ return; -+ -+ if (pointer_set_contains(visited, def_stmt)) -+ return; -+ -+ switch (gimple_code(def_stmt)) { -+ case GIMPLE_NOP: -+ case GIMPLE_CALL: -+ case GIMPLE_ASM: -+ return; -+ case GIMPLE_PHI: -+ return walk_phi_set_conditions(visited, interesting_conditions, lhs); -+ case GIMPLE_ASSIGN: -+ if (gimple_num_ops(def_stmt) == 2) { -+ const_tree rhs = gimple_assign_rhs1(def_stmt); -+ -+ if (gimple_assign_cast_p(def_stmt)) -+ interesting_conditions[CAST] = true; -+ -+ return set_conditions(visited, interesting_conditions, rhs); -+ } else { -+ interesting_conditions[NOT_UNARY] = true; -+ return; -+ } -+ default: -+ debug_gimple_stmt(def_stmt); -+ gcc_unreachable(); -+ } -+} -+ -+// determine whether duplication will be necessary or not. -+static void search_interesting_conditions(struct interesting_node *cur_node, bool *interesting_conditions) -+{ -+ struct pointer_set_t *visited; -+ -+ if (gimple_assign_cast_p(cur_node->first_stmt)) -+ interesting_conditions[CAST] = true; -+ else if (is_gimple_assign(cur_node->first_stmt) && gimple_num_ops(cur_node->first_stmt) > 2) -+ interesting_conditions[NOT_UNARY] = true; -+ -+ visited = pointer_set_create(); -+ set_conditions(visited, interesting_conditions, cur_node->node); -+ pointer_set_destroy(visited); -+} -+ -+// Remove the size_overflow asm stmt and create an assignment from the input and output of the asm -+static void replace_size_overflow_asm_with_assign(gimple asm_stmt, tree lhs, tree rhs) -+{ -+ gimple assign; -+ gimple_stmt_iterator gsi; -+ -+ // already removed -+ if (gimple_bb(asm_stmt) == NULL) -+ return; -+ gsi = gsi_for_stmt(asm_stmt); -+ -+ assign = gimple_build_assign(lhs, rhs); -+ gsi_insert_before(&gsi, assign, GSI_SAME_STMT); -+ SSA_NAME_DEF_STMT(lhs) = assign; -+ -+ gsi_remove(&gsi, true); -+} -+ -+// Get the field decl of a component ref for intentional_overflow checking -+static const_tree search_field_decl(const_tree comp_ref) -+{ -+ const_tree field = NULL_TREE; -+ unsigned int i, len = TREE_OPERAND_LENGTH(comp_ref); -+ -+ for (i = 0; i < len; i++) { -+ field = TREE_OPERAND(comp_ref, i); -+ if (TREE_CODE(field) == FIELD_DECL) -+ break; -+ } -+ gcc_assert(TREE_CODE(field) == FIELD_DECL); -+ return field; -+} -+ -+/* Get the fndecl of an interesting stmt, the fndecl is the caller function if the interesting -+ * stmt is a return otherwise it is the callee function. -+ */ -+static const_tree get_interesting_orig_fndecl(const_gimple stmt, unsigned int argnum) -+{ -+ const_tree fndecl; -+ -+ if (argnum == 0) -+ fndecl = current_function_decl; -+ else -+ fndecl = gimple_call_fndecl(stmt); -+ -+ if (fndecl == NULL_TREE) -+ return NULL_TREE; -+ -+ return DECL_ORIGIN(fndecl); -+} -+ -+/* Get the param of the intentional_overflow attribute. -+ * * 0: MARK_NOT_INTENTIONAL -+ * * 1..MAX_PARAM: MARK_YES -+ * * -1: MARK_TURN_OFF -+ */ -+static tree get_attribute_param(const_tree decl) -+{ -+ const_tree attr; -+ -+ if (decl == NULL_TREE) -+ return NULL_TREE; -+ -+ attr = lookup_attribute("intentional_overflow", DECL_ATTRIBUTES(decl)); -+ if (!attr || !TREE_VALUE(attr)) -+ return NULL_TREE; -+ -+ return TREE_VALUE(attr); -+} -+ -+// MARK_TURN_OFF -+static bool is_turn_off_intentional_attr(const_tree decl) -+{ -+ const_tree param_head; -+ -+ param_head = get_attribute_param(decl); -+ if (param_head == NULL_TREE) -+ return false; -+ -+ if (TREE_INT_CST_HIGH(TREE_VALUE(param_head)) == -1) -+ return true; -+ return false; -+} -+ -+// MARK_NOT_INTENTIONAL -+static bool is_end_intentional_intentional_attr(const_tree decl, unsigned int argnum) -+{ -+ const_tree param_head; -+ -+ if (argnum == 0) -+ return false; -+ -+ param_head = get_attribute_param(decl); -+ if (param_head == NULL_TREE) -+ return false; -+ -+ if (!TREE_INT_CST_LOW(TREE_VALUE(param_head))) -+ return true; -+ return false; -+} -+ -+// MARK_YES -+static bool is_yes_intentional_attr(const_tree decl, unsigned int argnum) -+{ -+ tree param, param_head; -+ -+ if (argnum == 0) -+ return false; -+ -+ param_head = get_attribute_param(decl); -+ for (param = param_head; param; param = TREE_CHAIN(param)) -+ if (argnum == TREE_INT_CST_LOW(TREE_VALUE(param))) -+ return true; -+ return false; -+} -+ -+static const char *get_asm_string(const_gimple stmt) -+{ -+ if (!stmt) -+ return NULL; -+ if (gimple_code(stmt) != GIMPLE_ASM) -+ return NULL; -+ -+ return gimple_asm_string(stmt); -+} -+ -+static bool is_size_overflow_intentional_asm_turn_off(const_gimple stmt) -+{ -+ const char *str; -+ -+ str = get_asm_string(stmt); -+ if (!str) -+ return false; -+ return !strncmp(str, TURN_OFF_ASM_STR, sizeof(TURN_OFF_ASM_STR) - 1); -+} -+ -+static bool is_size_overflow_intentional_asm_yes(const_gimple stmt) -+{ -+ const char *str; -+ -+ str = get_asm_string(stmt); -+ if (!str) -+ return false; -+ return !strncmp(str, YES_ASM_STR, sizeof(YES_ASM_STR) - 1); -+} -+ -+static bool is_size_overflow_asm(const_gimple stmt) -+{ -+ const char *str; -+ -+ str = get_asm_string(stmt); -+ if (!str) -+ return false; -+ return !strncmp(str, OK_ASM_STR, sizeof(OK_ASM_STR) - 1); -+} -+ -+static void print_missing_intentional(enum mark callee_attr, enum mark caller_attr, const_tree decl, unsigned int argnum) -+{ -+ location_t loc; -+ -+ if (caller_attr == MARK_NO || caller_attr == MARK_NOT_INTENTIONAL || caller_attr == MARK_TURN_OFF) -+ return; -+ -+ if (callee_attr == MARK_NOT_INTENTIONAL || callee_attr == MARK_YES) -+ return; -+ -+ loc = DECL_SOURCE_LOCATION(decl); -+ inform(loc, "The intentional_overflow attribute is missing from +%s+%u+", DECL_NAME_POINTER(decl), argnum); -+} -+ -+/* Get the type of the intentional_overflow attribute of a node -+ * * MARK_TURN_OFF -+ * * MARK_YES -+ * * MARK_NO -+ * * MARK_NOT_INTENTIONAL -+ */ -+static enum mark get_intentional_attr_type(const_tree node) -+{ -+ const_tree cur_decl; -+ -+ if (node == NULL_TREE) -+ return MARK_NO; -+ -+ switch (TREE_CODE(node)) { -+ case COMPONENT_REF: -+ cur_decl = search_field_decl(node); -+ if (is_turn_off_intentional_attr(cur_decl)) -+ return MARK_TURN_OFF; -+ if (is_end_intentional_intentional_attr(cur_decl, 1)) -+ return MARK_YES; -+ break; -+ case PARM_DECL: { -+ unsigned int argnum; -+ -+ cur_decl = DECL_ORIGIN(current_function_decl); -+ argnum = find_arg_number_tree(node, cur_decl); -+ if (argnum == CANNOT_FIND_ARG) -+ return MARK_NO; -+ if (is_yes_intentional_attr(cur_decl, argnum)) -+ return MARK_YES; -+ if (is_end_intentional_intentional_attr(cur_decl, argnum)) -+ return MARK_NOT_INTENTIONAL; -+ break; -+ } -+ case FUNCTION_DECL: -+ if (is_turn_off_intentional_attr(DECL_ORIGIN(node))) -+ return MARK_TURN_OFF; -+ break; -+ default: -+ break; -+ } -+ return MARK_NO; -+} -+ -+// Search for the intentional_overflow attribute on the last nodes -+static enum mark search_last_nodes_intentional(struct interesting_node *cur_node) -+{ -+ unsigned int i; -+ tree last_node; -+ enum mark mark = MARK_NO; -+ -+#if BUILDING_GCC_VERSION <= 4007 -+ FOR_EACH_VEC_ELT(tree, cur_node->last_nodes, i, last_node) { -+#else -+ FOR_EACH_VEC_ELT(*cur_node->last_nodes, i, last_node) { -+#endif -+ mark = get_intentional_attr_type(last_node); -+ if (mark != MARK_NO) -+ break; -+ } -+ return mark; -+} -+ -+/* Check the intentional kind of size_overflow asm stmt (created by the gimple pass) and -+ * set the appropriate intentional_overflow type. Delete the asm stmt in the end. -+ */ -+static bool is_intentional_attribute_from_gimple(struct interesting_node *cur_node) -+{ -+ if (!cur_node->intentional_mark_from_gimple) -+ return false; -+ -+ if (is_size_overflow_intentional_asm_yes(cur_node->intentional_mark_from_gimple)) -+ cur_node->intentional_attr_cur_fndecl = MARK_YES; -+ else -+ cur_node->intentional_attr_cur_fndecl = MARK_TURN_OFF; -+ -+ // skip param decls -+ if (gimple_asm_noutputs(cur_node->intentional_mark_from_gimple) == 0) -+ return true; -+ return true; -+} -+ -+/* Search intentional_overflow attribute on caller and on callee too. -+ * 0</MARK_YES: no dup, search size_overflow and intentional_overflow attributes -+ * 0/MARK_NOT_INTENTIONAL: no dup, search size_overflow attribute (int) -+ * -1/MARK_TURN_OFF: no dup, no search, current_function_decl -> no dup -+*/ -+static void check_intentional_attribute_ipa(struct interesting_node *cur_node) -+{ -+ const_tree fndecl; -+ -+ if (is_intentional_attribute_from_gimple(cur_node)) -+ return; -+ -+ if (is_turn_off_intentional_attr(DECL_ORIGIN(current_function_decl))) { -+ cur_node->intentional_attr_cur_fndecl = MARK_TURN_OFF; -+ return; -+ } -+ -+ if (gimple_code(cur_node->first_stmt) == GIMPLE_ASM) { -+ cur_node->intentional_attr_cur_fndecl = MARK_NOT_INTENTIONAL; -+ return; -+ } -+ -+ if (gimple_code(cur_node->first_stmt) == GIMPLE_ASSIGN) -+ return; -+ -+ fndecl = get_interesting_orig_fndecl(cur_node->first_stmt, cur_node->num); -+ if (is_turn_off_intentional_attr(fndecl)) { -+ cur_node->intentional_attr_decl = MARK_TURN_OFF; -+ return; -+ } -+ -+ if (is_end_intentional_intentional_attr(fndecl, cur_node->num)) -+ cur_node->intentional_attr_decl = MARK_NOT_INTENTIONAL; -+ else if (is_yes_intentional_attr(fndecl, cur_node->num)) -+ cur_node->intentional_attr_decl = MARK_YES; -+ -+ cur_node->intentional_attr_cur_fndecl = search_last_nodes_intentional(cur_node); -+ print_missing_intentional(cur_node->intentional_attr_decl, cur_node->intentional_attr_cur_fndecl, cur_node->fndecl, cur_node->num); -+} -+ -+// e.g., 3.8.2, 64, arch/x86/ia32/ia32_signal.c copy_siginfo_from_user32(): compat_ptr() u32 max -+static bool skip_asm(const_tree arg) -+{ -+ gimple def_stmt = get_def_stmt(arg); -+ -+ if (!def_stmt || !gimple_assign_cast_p(def_stmt)) -+ return false; -+ -+ def_stmt = get_def_stmt(gimple_assign_rhs1(def_stmt)); -+ return def_stmt && gimple_code(def_stmt) == GIMPLE_ASM; -+} -+ -+static void walk_use_def_phi(struct pointer_set_t *visited, struct interesting_node *cur_node, tree result) -+{ -+ gimple phi = get_def_stmt(result); -+ unsigned int i, n = gimple_phi_num_args(phi); -+ -+ pointer_set_insert(visited, phi); -+ for (i = 0; i < n; i++) { -+ tree arg = gimple_phi_arg_def(phi, i); -+ -+ walk_use_def(visited, cur_node, arg); -+ } -+} -+ -+static void walk_use_def_binary(struct pointer_set_t *visited, struct interesting_node *cur_node, tree lhs) -+{ -+ gimple def_stmt = get_def_stmt(lhs); -+ tree rhs1, rhs2; -+ -+ rhs1 = gimple_assign_rhs1(def_stmt); -+ rhs2 = gimple_assign_rhs2(def_stmt); -+ -+ walk_use_def(visited, cur_node, rhs1); -+ walk_use_def(visited, cur_node, rhs2); -+} -+ -+static void insert_last_node(struct interesting_node *cur_node, tree node) -+{ -+ unsigned int i; -+ tree element; -+ enum tree_code code; -+ -+ gcc_assert(node != NULL_TREE); -+ -+ if (is_gimple_constant(node)) -+ return; -+ -+ code = TREE_CODE(node); -+ if (code == VAR_DECL) { -+ node = DECL_ORIGIN(node); -+ code = TREE_CODE(node); -+ } -+ -+ if (code != PARM_DECL && code != FUNCTION_DECL && code != COMPONENT_REF) -+ return; -+ -+#if BUILDING_GCC_VERSION <= 4007 -+ FOR_EACH_VEC_ELT(tree, cur_node->last_nodes, i, element) { -+#else -+ FOR_EACH_VEC_ELT(*cur_node->last_nodes, i, element) { -+#endif -+ if (operand_equal_p(node, element, 0)) -+ return; -+ } -+ -+#if BUILDING_GCC_VERSION <= 4007 -+ gcc_assert(VEC_length(tree, cur_node->last_nodes) < VEC_LEN); -+ VEC_safe_push(tree, gc, cur_node->last_nodes, node); -+#else -+ gcc_assert(cur_node->last_nodes->length() < VEC_LEN); -+ vec_safe_push(cur_node->last_nodes, node); -+#endif -+} -+ -+// a size_overflow asm stmt in the control flow doesn't stop the recursion -+static void handle_asm_stmt(struct pointer_set_t *visited, struct interesting_node *cur_node, tree lhs, const_gimple stmt) -+{ -+ if (!is_size_overflow_asm(stmt)) -+ walk_use_def(visited, cur_node, SSA_NAME_VAR(lhs)); -+} -+ -+/* collect the parm_decls and fndecls (for checking a missing size_overflow attribute (ret or arg) or intentional_overflow) -+ * and component refs (for checking the intentional_overflow attribute). -+ */ -+static void walk_use_def(struct pointer_set_t *visited, struct interesting_node *cur_node, tree lhs) -+{ -+ const_gimple def_stmt; -+ -+ if (TREE_CODE(lhs) != SSA_NAME) { -+ insert_last_node(cur_node, lhs); -+ return; -+ } -+ -+ def_stmt = get_def_stmt(lhs); -+ if (!def_stmt) -+ return; -+ -+ if (pointer_set_insert(visited, def_stmt)) -+ return; -+ -+ switch (gimple_code(def_stmt)) { -+ case GIMPLE_NOP: -+ return walk_use_def(visited, cur_node, SSA_NAME_VAR(lhs)); -+ case GIMPLE_ASM: -+ return handle_asm_stmt(visited, cur_node, lhs, def_stmt); -+ case GIMPLE_CALL: { -+ tree fndecl = gimple_call_fndecl(def_stmt); -+ -+ if (fndecl == NULL_TREE) -+ return; -+ insert_last_node(cur_node, fndecl); -+ return; -+ } -+ case GIMPLE_PHI: -+ return walk_use_def_phi(visited, cur_node, lhs); -+ case GIMPLE_ASSIGN: -+ switch (gimple_num_ops(def_stmt)) { -+ case 2: -+ return walk_use_def(visited, cur_node, gimple_assign_rhs1(def_stmt)); -+ case 3: -+ return walk_use_def_binary(visited, cur_node, lhs); -+ } -+ default: -+ debug_gimple_stmt((gimple)def_stmt); -+ error("%s: unknown gimple code", __func__); -+ gcc_unreachable(); -+ } -+} -+ -+// Collect all the last nodes for checking the intentional_overflow and size_overflow attributes -+static void set_last_nodes(struct interesting_node *cur_node) -+{ -+ struct pointer_set_t *visited; -+ -+ visited = pointer_set_create(); -+ walk_use_def(visited, cur_node, cur_node->node); -+ pointer_set_destroy(visited); -+} -+ -+enum precond { -+ NO_ATTRIBUTE_SEARCH, NO_CHECK_INSERT, NONE -+}; -+ -+/* If there is a mark_turn_off intentional attribute on the caller or the callee then there is no duplication and missing size_overflow attribute check anywhere. -+ * There is only missing size_overflow attribute checking if the intentional_overflow attribute is the mark_no type. -+ * Stmt duplication is unnecessary if there are no binary/ternary assignements or if the unary assignment isn't a cast. -+ * It skips the possible error codes too. If the def_stmts trace back to a constant and there are no binary/ternary assigments then we assume that it is some kind of error code. -+ */ -+static enum precond check_preconditions(struct interesting_node *cur_node) -+{ -+ bool interesting_conditions[3] = {false, false, false}; -+ -+ set_last_nodes(cur_node); -+ -+ check_intentional_attribute_ipa(cur_node); -+ if (cur_node->intentional_attr_decl == MARK_TURN_OFF || cur_node->intentional_attr_cur_fndecl == MARK_TURN_OFF) -+ return NO_ATTRIBUTE_SEARCH; -+ -+ search_interesting_conditions(cur_node, interesting_conditions); -+ -+ // error code -+ if (interesting_conditions[CAST] && interesting_conditions[FROM_CONST] && !interesting_conditions[NOT_UNARY]) -+ return NO_ATTRIBUTE_SEARCH; -+ -+ // unnecessary overflow check -+ if (!interesting_conditions[CAST] && !interesting_conditions[NOT_UNARY]) -+ return NO_CHECK_INSERT; -+ -+ if (cur_node->intentional_attr_cur_fndecl != MARK_NO) -+ return NO_CHECK_INSERT; -+ -+ return NONE; -+} -+ -+/* This function calls the main recursion function (expand) that duplicates the stmts. Before that it checks the intentional_overflow attribute and asm stmts, -+ * it decides whether the duplication is necessary or not and it searches for missing size_overflow attributes. After expand() it changes the orig node to the duplicated node -+ * in the original stmt (first stmt) and it inserts the overflow check for the arg of the callee or for the return value. -+ */ -+static struct next_cgraph_node *handle_interesting_stmt(struct next_cgraph_node *cnodes, struct interesting_node *cur_node, struct cgraph_node *caller_node) -+{ -+ enum precond ret; -+ struct pointer_set_t *visited; -+ tree new_node, orig_node = cur_node->node; -+ -+ ret = check_preconditions(cur_node); -+ if (ret == NO_ATTRIBUTE_SEARCH) -+ return cnodes; -+ -+ cnodes = search_overflow_attribute(cnodes, cur_node); -+ -+ if (ret == NO_CHECK_INSERT) -+ return cnodes; -+ -+ visited = pointer_set_create(); -+ new_node = expand(visited, caller_node, orig_node); -+ pointer_set_destroy(visited); -+ -+ if (new_node == NULL_TREE) -+ return cnodes; -+ -+ change_orig_node(cur_node, new_node); -+ check_size_overflow(caller_node, cur_node->first_stmt, TREE_TYPE(new_node), new_node, orig_node, BEFORE_STMT); -+ -+ return cnodes; -+} -+ -+// Check visited interesting nodes. -+static bool is_in_interesting_node(struct interesting_node *head, const_gimple first_stmt, const_tree node, unsigned int num) -+{ -+ struct interesting_node *cur; -+ -+ for (cur = head; cur; cur = cur->next) { -+ if (!operand_equal_p(node, cur->node, 0)) -+ continue; -+ if (num != cur->num) -+ continue; -+ if (first_stmt == cur->first_stmt) -+ return true; -+ } -+ return false; -+} -+ -+/* Create an interesting node. The ipa pass starts to duplicate from these stmts. -+ first_stmt: it is the call or assignment or ret stmt, change_orig_node() will change the original node (retval, or function arg) in this -+ last_nodes: they are the last stmts in the recursion (they haven't a def_stmt). They are useful in the missing size_overflow attribute check and -+ the intentional_overflow attribute check. They are collected by set_last_nodes(). -+ num: arg count of a call stmt or 0 when it is a ret -+ node: the recursion starts from here, it is a call arg or a return value -+ fndecl: the fndecl of the interesting node when the node is an arg. it is the fndecl of the callee function otherwise it is the fndecl of the caller (current_function_fndecl) function. -+ intentional_attr_decl: intentional_overflow attribute of the callee function -+ intentional_attr_cur_fndecl: intentional_overflow attribute of the caller function -+ intentional_mark_from_gimple: the intentional overflow type of size_overflow asm stmt from gimple if it exists -+ */ -+static struct interesting_node *create_new_interesting_node(struct interesting_node *head, gimple first_stmt, tree node, unsigned int num, gimple asm_stmt) -+{ -+ struct interesting_node *new_node; -+ tree fndecl; -+ enum gimple_code code; -+ -+ gcc_assert(node != NULL_TREE); -+ code = gimple_code(first_stmt); -+ gcc_assert(code == GIMPLE_CALL || code == GIMPLE_ASM || code == GIMPLE_ASSIGN || code == GIMPLE_RETURN); -+ -+ if (num == CANNOT_FIND_ARG) -+ return head; -+ -+ if (skip_types(node)) -+ return head; -+ -+ if (skip_asm(node)) -+ return head; -+ -+ if (is_gimple_call(first_stmt)) -+ fndecl = gimple_call_fndecl(first_stmt); -+ else -+ fndecl = current_function_decl; -+ -+ if (fndecl == NULL_TREE) -+ return head; -+ -+ if (is_in_interesting_node(head, first_stmt, node, num)) -+ return head; -+ -+ new_node = (struct interesting_node *)xmalloc(sizeof(*new_node)); -+ -+ new_node->next = NULL; -+ new_node->first_stmt = first_stmt; -+#if BUILDING_GCC_VERSION <= 4007 -+ new_node->last_nodes = VEC_alloc(tree, gc, VEC_LEN); -+#else -+ vec_alloc(new_node->last_nodes, VEC_LEN); -+#endif -+ new_node->num = num; -+ new_node->node = node; -+ new_node->fndecl = fndecl; -+ new_node->intentional_attr_decl = MARK_NO; -+ new_node->intentional_attr_cur_fndecl = MARK_NO; -+ new_node->intentional_mark_from_gimple = asm_stmt; -+ -+ if (!head) -+ return new_node; -+ -+ new_node->next = head; -+ return new_node; -+} -+ -+/* Check the ret stmts in the functions on the next cgraph node list (these functions will be in the hash table and they are reachable from ipa). -+ * If the ret stmt is in the next cgraph node list then it's an interesting ret. -+ */ -+static struct interesting_node *handle_stmt_by_cgraph_nodes_ret(struct interesting_node *head, gimple stmt, struct next_cgraph_node *next_node) -+{ -+ struct next_cgraph_node *cur_node; -+ tree ret = gimple_return_retval(stmt); -+ -+ if (ret == NULL_TREE) -+ return head; -+ -+ for (cur_node = next_node; cur_node; cur_node = cur_node->next) { -+ if (!operand_equal_p(cur_node->callee_fndecl, DECL_ORIGIN(current_function_decl), 0)) -+ continue; -+ if (cur_node->num == 0) -+ head = create_new_interesting_node(head, stmt, ret, 0, NOT_INTENTIONAL_ASM); -+ } -+ -+ return head; -+} -+ -+/* Check the call stmts in the functions on the next cgraph node list (these functions will be in the hash table and they are reachable from ipa). -+ * If the call stmt is in the next cgraph node list then it's an interesting call. -+ */ -+static struct interesting_node *handle_stmt_by_cgraph_nodes_call(struct interesting_node *head, gimple stmt, struct next_cgraph_node *next_node) -+{ -+ unsigned int argnum; -+ tree arg; -+ const_tree fndecl; -+ struct next_cgraph_node *cur_node; -+ -+ fndecl = gimple_call_fndecl(stmt); -+ if (fndecl == NULL_TREE) -+ return head; -+ -+ for (cur_node = next_node; cur_node; cur_node = cur_node->next) { -+ if (!operand_equal_p(cur_node->callee_fndecl, fndecl, 0)) -+ continue; -+ argnum = get_correct_arg_count(cur_node->num, fndecl); -+ gcc_assert(argnum != CANNOT_FIND_ARG); -+ if (argnum == 0) -+ continue; -+ -+ arg = gimple_call_arg(stmt, argnum - 1); -+ head = create_new_interesting_node(head, stmt, arg, argnum, NOT_INTENTIONAL_ASM); -+ } -+ -+ return head; -+} -+ -+static unsigned int check_ops(const_tree orig_node, const_tree node, unsigned int ret_count) -+{ -+ if (!operand_equal_p(orig_node, node, 0)) -+ return WRONG_NODE; -+ if (skip_types(node)) -+ return WRONG_NODE; -+ return ret_count; -+} -+ -+// Get the index of the rhs node in an assignment -+static unsigned int get_assign_ops_count(const_gimple stmt, tree node) -+{ -+ const_tree rhs1, rhs2; -+ unsigned int ret; -+ -+ gcc_assert(stmt); -+ gcc_assert(is_gimple_assign(stmt)); -+ -+ rhs1 = gimple_assign_rhs1(stmt); -+ gcc_assert(rhs1 != NULL_TREE); -+ -+ switch (gimple_num_ops(stmt)) { -+ case 2: -+ return check_ops(node, rhs1, 1); -+ case 3: -+ ret = check_ops(node, rhs1, 1); -+ if (ret != WRONG_NODE) -+ return ret; -+ -+ rhs2 = gimple_assign_rhs2(stmt); -+ gcc_assert(rhs2 != NULL_TREE); -+ return check_ops(node, rhs2, 2); -+ default: -+ gcc_unreachable(); -+ } -+} -+ -+// Find the correct arg number of a call stmt. It is needed when the interesting function is a cloned function. -+static unsigned int find_arg_number_gimple(const_tree arg, const_gimple stmt) -+{ -+ unsigned int i; -+ -+ if (gimple_call_fndecl(stmt) == NULL_TREE) -+ return CANNOT_FIND_ARG; -+ -+ for (i = 0; i < gimple_call_num_args(stmt); i++) { -+ tree node; -+ -+ node = gimple_call_arg(stmt, i); -+ if (!operand_equal_p(arg, node, 0)) -+ continue; -+ if (!skip_types(node)) -+ return i + 1; -+ } -+ -+ return CANNOT_FIND_ARG; -+} -+ -+/* starting from the size_overflow asm stmt collect interesting stmts. They can be -+ * any of return, call or assignment stmts (because of inlining). -+ */ -+static struct interesting_node *get_interesting_ret_or_call(struct pointer_set_t *visited, struct interesting_node *head, tree node, gimple intentional_asm) -+{ -+ use_operand_p use_p; -+ imm_use_iterator imm_iter; -+ unsigned int argnum; -+ -+ gcc_assert(TREE_CODE(node) == SSA_NAME); -+ -+ if (pointer_set_insert(visited, node)) -+ return head; -+ -+ FOR_EACH_IMM_USE_FAST(use_p, imm_iter, node) { -+ gimple stmt = USE_STMT(use_p); -+ -+ if (stmt == NULL) -+ return head; -+ if (is_gimple_debug(stmt)) -+ continue; -+ -+ switch (gimple_code(stmt)) { -+ case GIMPLE_CALL: -+ argnum = find_arg_number_gimple(node, stmt); -+ head = create_new_interesting_node(head, stmt, node, argnum, intentional_asm); -+ break; -+ case GIMPLE_RETURN: -+ head = create_new_interesting_node(head, stmt, node, 0, intentional_asm); -+ break; -+ case GIMPLE_ASSIGN: -+ argnum = get_assign_ops_count(stmt, node); -+ head = create_new_interesting_node(head, stmt, node, argnum, intentional_asm); -+ break; -+ case GIMPLE_PHI: { -+ tree result = gimple_phi_result(stmt); -+ head = get_interesting_ret_or_call(visited, head, result, intentional_asm); -+ break; -+ } -+ case GIMPLE_ASM: -+ if (gimple_asm_noutputs(stmt) != 0) -+ break; -+ if (!is_size_overflow_asm(stmt)) -+ break; -+ head = create_new_interesting_node(head, stmt, node, 1, intentional_asm); -+ break; -+ case GIMPLE_COND: -+ case GIMPLE_SWITCH: -+ break; -+ default: -+ debug_gimple_stmt(stmt); -+ gcc_unreachable(); -+ break; -+ } -+ } -+ return head; -+} -+ -+static void remove_size_overflow_asm(gimple stmt) -+{ -+ gimple_stmt_iterator gsi; -+ tree input, output; -+ -+ if (!is_size_overflow_asm(stmt)) -+ return; -+ -+ if (gimple_asm_noutputs(stmt) == 0) { -+ gsi = gsi_for_stmt(stmt); -+ ipa_remove_stmt_references(cgraph_get_create_node(current_function_decl), stmt); -+ gsi_remove(&gsi, true); -+ return; -+ } -+ -+ input = gimple_asm_input_op(stmt, 0); -+ output = gimple_asm_output_op(stmt, 0); -+ replace_size_overflow_asm_with_assign(stmt, TREE_VALUE(output), TREE_VALUE(input)); -+} -+ -+/* handle the size_overflow asm stmts from the gimple pass and collect the interesting stmts. -+ * If the asm stmt is a parm_decl kind (noutputs == 0) then remove it. -+ * If it is a simple asm stmt then replace it with an assignment from the asm input to the asm output. -+ */ -+static struct interesting_node *handle_stmt_by_size_overflow_asm(gimple stmt, struct interesting_node *head) -+{ -+ const_tree output; -+ struct pointer_set_t *visited; -+ gimple intentional_asm = NOT_INTENTIONAL_ASM; -+ -+ if (!is_size_overflow_asm(stmt)) -+ return head; -+ -+ if (is_size_overflow_intentional_asm_yes(stmt) || is_size_overflow_intentional_asm_turn_off(stmt)) -+ intentional_asm = stmt; -+ -+ gcc_assert(gimple_asm_ninputs(stmt) == 1); -+ -+ if (gimple_asm_noutputs(stmt) == 0 && is_size_overflow_intentional_asm_turn_off(stmt)) -+ return head; -+ -+ if (gimple_asm_noutputs(stmt) == 0) { -+ const_tree input; -+ -+ if (!is_size_overflow_intentional_asm_turn_off(stmt)) -+ return head; -+ -+ input = gimple_asm_input_op(stmt, 0); -+ remove_size_overflow_asm(stmt); -+ if (is_gimple_constant(TREE_VALUE(input))) -+ return head; -+ visited = pointer_set_create(); -+ head = get_interesting_ret_or_call(visited, head, TREE_VALUE(input), intentional_asm); -+ pointer_set_destroy(visited); -+ return head; -+ } -+ -+ if (!is_size_overflow_intentional_asm_yes(stmt) && !is_size_overflow_intentional_asm_turn_off(stmt)) -+ remove_size_overflow_asm(stmt); -+ -+ visited = pointer_set_create(); -+ output = gimple_asm_output_op(stmt, 0); -+ head = get_interesting_ret_or_call(visited, head, TREE_VALUE(output), intentional_asm); -+ pointer_set_destroy(visited); -+ return head; -+} -+ -+/* Iterate over all the stmts of a function and look for the size_overflow asm stmts (they were created in the gimple pass) -+ * or a call stmt or a return stmt and store them in the interesting_node list -+ */ -+static struct interesting_node *collect_interesting_stmts(struct next_cgraph_node *next_node) -+{ -+ basic_block bb; -+ struct interesting_node *head = NULL; -+ -+ FOR_ALL_BB_FN(bb, cfun) { -+ gimple_stmt_iterator gsi; -+ -+ for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) { -+ enum gimple_code code; -+ gimple stmt = gsi_stmt(gsi); -+ -+ code = gimple_code(stmt); -+ -+ if (code == GIMPLE_ASM) -+ head = handle_stmt_by_size_overflow_asm(stmt, head); -+ -+ if (!next_node) -+ continue; -+ if (code == GIMPLE_CALL) -+ head = handle_stmt_by_cgraph_nodes_call(head, stmt, next_node); -+ if (code == GIMPLE_RETURN) -+ head = handle_stmt_by_cgraph_nodes_ret(head, stmt, next_node); -+ } -+ } -+ return head; -+} -+ -+static void set_current_function_decl(tree fndecl) -+{ -+ gcc_assert(fndecl != NULL_TREE); -+ -+ push_cfun(DECL_STRUCT_FUNCTION(fndecl)); -+ calculate_dominance_info(CDI_DOMINATORS); -+ current_function_decl = fndecl; -+} -+ -+static void unset_current_function_decl(void) -+{ -+ free_dominance_info(CDI_DOMINATORS); -+ pop_cfun(); -+ current_function_decl = NULL_TREE; -+} -+ -+static void free_interesting_node(struct interesting_node *head) -+{ -+ struct interesting_node *cur; -+ -+ while (head) { -+ cur = head->next; -+#if BUILDING_GCC_VERSION <= 4007 -+ VEC_free(tree, gc, head->last_nodes); -+#else -+ vec_free(head->last_nodes); -+#endif -+ free(head); -+ head = cur; -+ } -+} -+ -+static struct visited *insert_visited_function(struct visited *head, struct interesting_node *cur_node) -+{ -+ struct visited *new_visited; -+ -+ new_visited = (struct visited *)xmalloc(sizeof(*new_visited)); -+ new_visited->fndecl = cur_node->fndecl; -+ new_visited->num = cur_node->num; -+ new_visited->next = NULL; -+ -+ if (!head) -+ return new_visited; -+ -+ new_visited->next = head; -+ return new_visited; -+} -+ -+/* Check whether the function was already visited. If the fndecl, the arg count of the fndecl and the first_stmt (call or return) are same then -+ * it is a visited function. -+ */ -+static bool is_visited_function(struct visited *head, struct interesting_node *cur_node) -+{ -+ struct visited *cur; -+ -+ if (!head) -+ return false; -+ -+ if (get_stmt_flag(cur_node->first_stmt) != VISITED_STMT) -+ return false; -+ -+ for (cur = head; cur; cur = cur->next) { -+ if (!operand_equal_p(cur_node->fndecl, cur->fndecl, 0)) -+ continue; -+ if (cur_node->num == cur->num) -+ return true; -+ } -+ return false; -+} -+ -+static void free_next_cgraph_node(struct next_cgraph_node *head) -+{ -+ struct next_cgraph_node *cur; -+ -+ while (head) { -+ cur = head->next; -+ free(head); -+ head = cur; -+ } -+} -+ -+static void remove_all_size_overflow_asm(void) -+{ -+ basic_block bb; -+ -+ FOR_ALL_BB_FN(bb, cfun) { -+ gimple_stmt_iterator si; -+ -+ for (si = gsi_start_bb(bb); !gsi_end_p(si); gsi_next(&si)) -+ remove_size_overflow_asm(gsi_stmt(si)); -+ } -+} -+ -+/* Main recursive walk of the ipa pass: iterate over the collected interesting stmts in a function -+ * (they are interesting if they have an associated size_overflow asm stmt) and recursively walk -+ * the newly collected interesting functions (they are interesting if there is control flow between -+ * the interesting stmts and them). -+ */ -+static struct visited *handle_function(struct cgraph_node *node, struct next_cgraph_node *next_node, struct visited *visited) -+{ -+ struct interesting_node *head, *cur_node; -+ struct next_cgraph_node *cur_cnodes, *cnodes_head = NULL; -+ -+ set_current_function_decl(NODE_DECL(node)); -+ call_count = 0; -+ -+ head = collect_interesting_stmts(next_node); -+ -+ for (cur_node = head; cur_node; cur_node = cur_node->next) { -+ if (is_visited_function(visited, cur_node)) -+ continue; -+ cnodes_head = handle_interesting_stmt(cnodes_head, cur_node, node); -+ set_stmt_flag(cur_node->first_stmt, VISITED_STMT); -+ visited = insert_visited_function(visited, cur_node); -+ } -+ -+ free_interesting_node(head); -+ remove_all_size_overflow_asm(); -+ unset_current_function_decl(); -+ -+ for (cur_cnodes = cnodes_head; cur_cnodes; cur_cnodes = cur_cnodes->next) -+ visited = handle_function(cur_cnodes->current_function, cur_cnodes, visited); -+ -+ free_next_cgraph_node(cnodes_head); -+ return visited; -+} -+ -+static void free_visited(struct visited *head) -+{ -+ struct visited *cur; -+ -+ while (head) { -+ cur = head->next; -+ free(head); -+ head = cur; -+ } -+} -+ -+// erase the local flag -+static void set_plf_false(void) -+{ -+ basic_block bb; -+ -+ FOR_ALL_BB_FN(bb, cfun) { -+ gimple_stmt_iterator si; -+ -+ for (si = gsi_start_bb(bb); !gsi_end_p(si); gsi_next(&si)) -+ set_stmt_flag(gsi_stmt(si), NO_FLAGS); -+ for (si = gsi_start_phis(bb); !gsi_end_p(si); gsi_next(&si)) -+ set_stmt_flag(gsi_stmt(si), NO_FLAGS); -+ } -+} -+ -+// Main entry point of the ipa pass: erases the plf flag of all stmts and iterates over all the functions -+static unsigned int search_function(void) -+{ -+ struct cgraph_node *node; -+ struct visited *visited = NULL; -+ -+ FOR_EACH_FUNCTION_WITH_GIMPLE_BODY(node) { -+ set_current_function_decl(NODE_DECL(node)); -+ set_plf_false(); -+ unset_current_function_decl(); -+ } -+ -+ FOR_EACH_FUNCTION_WITH_GIMPLE_BODY(node) { -+ gcc_assert(cgraph_function_flags_ready); -+#if BUILDING_GCC_VERSION <= 4007 -+ gcc_assert(node->reachable); -+#endif -+ -+ visited = handle_function(node, NULL, visited); -+ } -+ -+ free_visited(visited); -+ return 0; -+} -+ -+#if BUILDING_GCC_VERSION >= 4009 -+static const struct pass_data ipa_pass_data = { -+#else -+static struct ipa_opt_pass_d ipa_pass = { -+ .pass = { -+#endif -+ .type = SIMPLE_IPA_PASS, -+ .name = "size_overflow", -+#if BUILDING_GCC_VERSION >= 4008 -+ .optinfo_flags = OPTGROUP_NONE, -+#endif -+#if BUILDING_GCC_VERSION >= 4009 -+ .has_gate = false, -+ .has_execute = true, -+#else -+ .gate = NULL, -+ .execute = search_function, -+ .sub = NULL, -+ .next = NULL, -+ .static_pass_number = 0, -+#endif -+ .tv_id = TV_NONE, -+ .properties_required = 0, -+ .properties_provided = 0, -+ .properties_destroyed = 0, -+ .todo_flags_start = 0, -+ .todo_flags_finish = TODO_verify_ssa | TODO_verify_stmts | TODO_remove_unused_locals | TODO_ggc_collect | TODO_verify_flow | TODO_dump_cgraph | TODO_dump_func | TODO_update_ssa_no_phi, -+#if BUILDING_GCC_VERSION < 4009 -+ }, -+ .generate_summary = NULL, -+ .write_summary = NULL, -+ .read_summary = NULL, -+#if BUILDING_GCC_VERSION >= 4006 -+ .write_optimization_summary = NULL, -+ .read_optimization_summary = NULL, -+#endif -+ .stmt_fixup = NULL, -+ .function_transform_todo_flags_start = 0, -+ .function_transform = NULL, -+ .variable_transform = NULL, -+#endif -+}; -+ -+#if BUILDING_GCC_VERSION >= 4009 -+namespace { -+class ipa_pass : public ipa_opt_pass_d { -+public: -+ ipa_pass() : ipa_opt_pass_d(ipa_pass_data, g, NULL, NULL, NULL, NULL, NULL, NULL, 0, NULL, NULL) {} -+ unsigned int execute() { return search_function(); } -+}; -+} -+ -+static opt_pass *make_ipa_pass(void) -+{ -+ return new ipa_pass(); -+} -+#else -+static struct opt_pass *make_ipa_pass(void) -+{ -+ return &ipa_pass.pass; -+} -+#endif -+ -+// data for the size_overflow asm stmt -+struct asm_data { -+ gimple def_stmt; -+ tree input; -+ tree output; -+}; -+ -+#if BUILDING_GCC_VERSION <= 4007 -+static VEC(tree, gc) *create_asm_io_list(tree string, tree io) -+#else -+static vec<tree, va_gc> *create_asm_io_list(tree string, tree io) -+#endif -+{ -+ tree list; -+#if BUILDING_GCC_VERSION <= 4007 -+ VEC(tree, gc) *vec_list = NULL; -+#else -+ vec<tree, va_gc> *vec_list = NULL; -+#endif -+ -+ list = build_tree_list(NULL_TREE, string); -+ list = chainon(NULL_TREE, build_tree_list(list, io)); -+#if BUILDING_GCC_VERSION <= 4007 -+ VEC_safe_push(tree, gc, vec_list, list); -+#else -+ vec_safe_push(vec_list, list); -+#endif -+ return vec_list; -+} -+ -+static void create_asm_stmt(const char *str, tree str_input, tree str_output, struct asm_data *asm_data) -+{ -+ gimple asm_stmt; -+ gimple_stmt_iterator gsi; -+#if BUILDING_GCC_VERSION <= 4007 -+ VEC(tree, gc) *input, *output = NULL; -+#else -+ vec<tree, va_gc> *input, *output = NULL; -+#endif -+ -+ input = create_asm_io_list(str_input, asm_data->input); -+ -+ if (asm_data->output) -+ output = create_asm_io_list(str_output, asm_data->output); -+ -+ asm_stmt = gimple_build_asm_vec(str, input, output, NULL, NULL); -+ gsi = gsi_for_stmt(asm_data->def_stmt); -+ gsi_insert_after(&gsi, asm_stmt, GSI_NEW_STMT); -+ -+ if (asm_data->output) -+ SSA_NAME_DEF_STMT(asm_data->output) = asm_stmt; -+} -+ -+static void replace_call_lhs(const struct asm_data *asm_data) -+{ -+ gimple_set_lhs(asm_data->def_stmt, asm_data->input); -+ update_stmt(asm_data->def_stmt); -+ SSA_NAME_DEF_STMT(asm_data->input) = asm_data->def_stmt; -+} -+ -+static enum mark search_intentional_phi(struct pointer_set_t *visited, const_tree result) -+{ -+ enum mark cur_fndecl_attr; -+ gimple phi = get_def_stmt(result); -+ unsigned int i, n = gimple_phi_num_args(phi); -+ -+ pointer_set_insert(visited, phi); -+ for (i = 0; i < n; i++) { -+ tree arg = gimple_phi_arg_def(phi, i); -+ -+ cur_fndecl_attr = search_intentional(visited, arg); -+ if (cur_fndecl_attr != MARK_NO) -+ return cur_fndecl_attr; -+ } -+ return MARK_NO; -+} -+ -+static enum mark search_intentional_binary(struct pointer_set_t *visited, const_tree lhs) -+{ -+ enum mark cur_fndecl_attr; -+ const_tree rhs1, rhs2; -+ gimple def_stmt = get_def_stmt(lhs); -+ -+ rhs1 = gimple_assign_rhs1(def_stmt); -+ rhs2 = gimple_assign_rhs2(def_stmt); -+ -+ cur_fndecl_attr = search_intentional(visited, rhs1); -+ if (cur_fndecl_attr != MARK_NO) -+ return cur_fndecl_attr; -+ return search_intentional(visited, rhs2); -+} -+ -+// Look up the intentional_overflow attribute on the caller and the callee functions. -+static enum mark search_intentional(struct pointer_set_t *visited, const_tree lhs) -+{ -+ const_gimple def_stmt; -+ -+ if (TREE_CODE(lhs) != SSA_NAME) -+ return get_intentional_attr_type(lhs); -+ -+ def_stmt = get_def_stmt(lhs); -+ if (!def_stmt) -+ return MARK_NO; -+ -+ if (pointer_set_contains(visited, def_stmt)) -+ return MARK_NO; -+ -+ switch (gimple_code(def_stmt)) { -+ case GIMPLE_NOP: -+ return search_intentional(visited, SSA_NAME_VAR(lhs)); -+ case GIMPLE_ASM: -+ if (is_size_overflow_intentional_asm_turn_off(def_stmt)) -+ return MARK_TURN_OFF; -+ return MARK_NO; -+ case GIMPLE_CALL: -+ return MARK_NO; -+ case GIMPLE_PHI: -+ return search_intentional_phi(visited, lhs); -+ case GIMPLE_ASSIGN: -+ switch (gimple_num_ops(def_stmt)) { -+ case 2: -+ return search_intentional(visited, gimple_assign_rhs1(def_stmt)); -+ case 3: -+ return search_intentional_binary(visited, lhs); -+ } -+ case GIMPLE_RETURN: -+ return MARK_NO; -+ default: -+ debug_gimple_stmt((gimple)def_stmt); -+ error("%s: unknown gimple code", __func__); -+ gcc_unreachable(); -+ } -+} -+ -+// Check the intentional_overflow attribute and create the asm comment string for the size_overflow asm stmt. -+static enum mark check_intentional_attribute_gimple(const_tree arg, const_gimple stmt, unsigned int argnum) -+{ -+ const_tree fndecl; -+ struct pointer_set_t *visited; -+ enum mark cur_fndecl_attr, decl_attr = MARK_NO; -+ -+ fndecl = get_interesting_orig_fndecl(stmt, argnum); -+ if (is_end_intentional_intentional_attr(fndecl, argnum)) -+ decl_attr = MARK_NOT_INTENTIONAL; -+ else if (is_yes_intentional_attr(fndecl, argnum)) -+ decl_attr = MARK_YES; -+ else if (is_turn_off_intentional_attr(fndecl) || is_turn_off_intentional_attr(DECL_ORIGIN(current_function_decl))) { -+ return MARK_TURN_OFF; -+ } -+ -+ visited = pointer_set_create(); -+ cur_fndecl_attr = search_intentional(visited, arg); -+ pointer_set_destroy(visited); -+ -+ switch (cur_fndecl_attr) { -+ case MARK_NO: -+ case MARK_TURN_OFF: -+ return cur_fndecl_attr; -+ default: -+ print_missing_intentional(decl_attr, cur_fndecl_attr, fndecl, argnum); -+ return MARK_YES; -+ } -+} -+ -+static void check_missing_size_overflow_attribute(tree var) -+{ -+ tree orig_fndecl; -+ unsigned int num; -+ -+ if (is_a_return_check(var)) -+ orig_fndecl = DECL_ORIGIN(var); -+ else -+ orig_fndecl = DECL_ORIGIN(current_function_decl); -+ -+ num = get_function_num(var, orig_fndecl); -+ if (num == CANNOT_FIND_ARG) -+ return; -+ -+ is_missing_function(orig_fndecl, num); -+} -+ -+static void search_size_overflow_attribute_phi(struct pointer_set_t *visited, const_tree result) -+{ -+ gimple phi = get_def_stmt(result); -+ unsigned int i, n = gimple_phi_num_args(phi); -+ -+ pointer_set_insert(visited, phi); -+ for (i = 0; i < n; i++) { -+ tree arg = gimple_phi_arg_def(phi, i); -+ -+ search_size_overflow_attribute(visited, arg); -+ } -+} -+ -+static void search_size_overflow_attribute_binary(struct pointer_set_t *visited, const_tree lhs) -+{ -+ const_gimple def_stmt = get_def_stmt(lhs); -+ tree rhs1, rhs2; -+ -+ rhs1 = gimple_assign_rhs1(def_stmt); -+ rhs2 = gimple_assign_rhs2(def_stmt); -+ -+ search_size_overflow_attribute(visited, rhs1); -+ search_size_overflow_attribute(visited, rhs2); -+} -+ -+static void search_size_overflow_attribute(struct pointer_set_t *visited, tree lhs) -+{ -+ const_gimple def_stmt; -+ -+ if (TREE_CODE(lhs) == PARM_DECL) { -+ check_missing_size_overflow_attribute(lhs); -+ return; -+ } -+ -+ def_stmt = get_def_stmt(lhs); -+ if (!def_stmt) -+ return; -+ -+ if (pointer_set_insert(visited, def_stmt)) -+ return; -+ -+ switch (gimple_code(def_stmt)) { -+ case GIMPLE_NOP: -+ return search_size_overflow_attribute(visited, SSA_NAME_VAR(lhs)); -+ case GIMPLE_ASM: -+ return; -+ case GIMPLE_CALL: { -+ tree fndecl = gimple_call_fndecl(def_stmt); -+ -+ if (fndecl == NULL_TREE) -+ return; -+ check_missing_size_overflow_attribute(fndecl); -+ return; -+ } -+ case GIMPLE_PHI: -+ return search_size_overflow_attribute_phi(visited, lhs); -+ case GIMPLE_ASSIGN: -+ switch (gimple_num_ops(def_stmt)) { -+ case 2: -+ return search_size_overflow_attribute(visited, gimple_assign_rhs1(def_stmt)); -+ case 3: -+ return search_size_overflow_attribute_binary(visited, lhs); -+ } -+ default: -+ debug_gimple_stmt((gimple)def_stmt); -+ error("%s: unknown gimple code", __func__); -+ gcc_unreachable(); -+ } -+} -+ -+// Search missing entries in the hash table (invoked from the gimple pass) -+static void search_missing_size_overflow_attribute_gimple(const_gimple stmt, unsigned int num) -+{ -+ tree fndecl = NULL_TREE; -+ tree lhs; -+ struct pointer_set_t *visited; -+ -+ if (is_turn_off_intentional_attr(DECL_ORIGIN(current_function_decl))) -+ return; -+ -+ if (num == 0) { -+ gcc_assert(gimple_code(stmt) == GIMPLE_RETURN); -+ lhs = gimple_return_retval(stmt); -+ } else { -+ gcc_assert(is_gimple_call(stmt)); -+ lhs = gimple_call_arg(stmt, num - 1); -+ fndecl = gimple_call_fndecl(stmt); -+ } -+ -+ if (fndecl != NULL_TREE && is_turn_off_intentional_attr(DECL_ORIGIN(fndecl))) -+ return; -+ -+ visited = pointer_set_create(); -+ search_size_overflow_attribute(visited, lhs); -+ pointer_set_destroy(visited); -+} -+ -+static void create_output_from_phi(gimple stmt, unsigned int argnum, struct asm_data *asm_data) -+{ -+ gimple_stmt_iterator gsi; -+ gimple assign; -+ -+ assign = gimple_build_assign(asm_data->input, asm_data->output); -+ gsi = gsi_for_stmt(stmt); -+ gsi_insert_before(&gsi, assign, GSI_NEW_STMT); -+ asm_data->def_stmt = assign; -+ -+ asm_data->output = create_new_var(TREE_TYPE(asm_data->output)); -+ asm_data->output = make_ssa_name(asm_data->output, stmt); -+ if (gimple_code(stmt) == GIMPLE_RETURN) -+ gimple_return_set_retval(stmt, asm_data->output); -+ else -+ gimple_call_set_arg(stmt, argnum - 1, asm_data->output); -+ update_stmt(stmt); -+} -+ -+static char *create_asm_comment(unsigned int argnum, const_gimple stmt , const char *mark_str) -+{ -+ const char *fn_name; -+ char *asm_comment; -+ unsigned int len; -+ -+ if (argnum == 0) -+ fn_name = DECL_NAME_POINTER(current_function_decl); -+ else -+ fn_name = DECL_NAME_POINTER(gimple_call_fndecl(stmt)); -+ -+ len = asprintf(&asm_comment, "%s %s %u", mark_str, fn_name, argnum); -+ gcc_assert(len > 0); -+ -+ return asm_comment; -+} -+ -+static const char *convert_mark_to_str(enum mark mark) -+{ -+ switch (mark) { -+ case MARK_NO: -+ return OK_ASM_STR; -+ case MARK_YES: -+ case MARK_NOT_INTENTIONAL: -+ return YES_ASM_STR; -+ case MARK_TURN_OFF: -+ return TURN_OFF_ASM_STR; -+ } -+ -+ gcc_unreachable(); -+} -+ -+/* Create the input of the size_overflow asm stmt. -+ * When the arg of the callee function is a parm_decl it creates this kind of size_overflow asm stmt: -+ * __asm__("# size_overflow MARK_YES" : : "rm" size_1(D)); -+ * The input field in asm_data will be empty if there is no need for further size_overflow asm stmt insertion. -+ * otherwise create the input (for a phi stmt the output too) of the asm stmt. -+ */ -+static void create_asm_input(gimple stmt, unsigned int argnum, struct asm_data *asm_data) -+{ -+ if (!asm_data->def_stmt) { -+ asm_data->input = NULL_TREE; -+ return; -+ } -+ -+ asm_data->input = create_new_var(TREE_TYPE(asm_data->output)); -+ asm_data->input = make_ssa_name(asm_data->input, asm_data->def_stmt); -+ -+ switch (gimple_code(asm_data->def_stmt)) { -+ case GIMPLE_ASSIGN: -+ case GIMPLE_CALL: -+ replace_call_lhs(asm_data); -+ break; -+ case GIMPLE_PHI: -+ create_output_from_phi(stmt, argnum, asm_data); -+ break; -+ case GIMPLE_NOP: { -+ enum mark mark; -+ const char *mark_str; -+ char *asm_comment; -+ -+ mark = check_intentional_attribute_gimple(asm_data->output, stmt, argnum); -+ -+ asm_data->input = asm_data->output; -+ asm_data->output = NULL; -+ asm_data->def_stmt = stmt; -+ -+ mark_str = convert_mark_to_str(mark); -+ asm_comment = create_asm_comment(argnum, stmt, mark_str); -+ -+ create_asm_stmt(asm_comment, build_string(2, "rm"), NULL, asm_data); -+ free(asm_comment); -+ asm_data->input = NULL_TREE; -+ break; -+ } -+ case GIMPLE_ASM: -+ if (is_size_overflow_asm(asm_data->def_stmt)) { -+ asm_data->input = NULL_TREE; -+ break; -+ } -+ default: -+ debug_gimple_stmt(asm_data->def_stmt); -+ gcc_unreachable(); -+ } -+} -+ -+/* This is the gimple part of searching for a missing size_overflow attribute. If the intentional_overflow attribute type -+ * is of the right kind create the appropriate size_overflow asm stmts: -+ * __asm__("# size_overflow" : =rm" D.3344_8 : "0" cicus.4_16); -+ * __asm__("# size_overflow MARK_YES" : : "rm" size_1(D)); -+ */ -+static void create_size_overflow_asm(gimple stmt, tree output_node, unsigned int argnum) -+{ -+ struct asm_data asm_data; -+ const char *mark_str; -+ char *asm_comment; -+ enum mark mark; -+ -+ if (is_gimple_constant(output_node)) -+ return; -+ -+ asm_data.output = output_node; -+ mark = check_intentional_attribute_gimple(asm_data.output, stmt, argnum); -+ if (mark != MARK_TURN_OFF) -+ search_missing_size_overflow_attribute_gimple(stmt, argnum); -+ -+ asm_data.def_stmt = get_def_stmt(asm_data.output); -+ if (is_size_overflow_intentional_asm_turn_off(asm_data.def_stmt)) -+ return; -+ -+ create_asm_input(stmt, argnum, &asm_data); -+ if (asm_data.input == NULL_TREE) -+ return; -+ -+ mark_str = convert_mark_to_str(mark); -+ asm_comment = create_asm_comment(argnum, stmt, mark_str); -+ create_asm_stmt(asm_comment, build_string(1, "0"), build_string(3, "=rm"), &asm_data); -+ free(asm_comment); -+} -+ -+// Insert an asm stmt with "MARK_TURN_OFF", "MARK_YES" or "MARK_NOT_INTENTIONAL". -+static bool create_mark_asm(gimple stmt, enum mark mark) -+{ -+ struct asm_data asm_data; -+ const char *asm_str; -+ -+ switch (mark) { -+ case MARK_TURN_OFF: -+ asm_str = TURN_OFF_ASM_STR; -+ break; -+ case MARK_NOT_INTENTIONAL: -+ case MARK_YES: -+ asm_str = YES_ASM_STR; -+ break; -+ default: -+ gcc_unreachable(); -+ } -+ -+ asm_data.def_stmt = stmt; -+ asm_data.output = gimple_call_lhs(stmt); -+ -+ if (asm_data.output == NULL_TREE) { -+ asm_data.input = gimple_call_arg(stmt, 0); -+ if (is_gimple_constant(asm_data.input)) -+ return false; -+ asm_data.output = NULL; -+ create_asm_stmt(asm_str, build_string(2, "rm"), NULL, &asm_data); -+ return true; -+ } -+ -+ create_asm_input(stmt, 0, &asm_data); -+ gcc_assert(asm_data.input != NULL_TREE); -+ -+ create_asm_stmt(asm_str, build_string(1, "0"), build_string(3, "=rm"), &asm_data); -+ return true; -+} -+ -+static bool is_from_cast(const_tree node) -+{ -+ gimple def_stmt = get_def_stmt(node); -+ -+ if (!def_stmt) -+ return false; -+ -+ if (gimple_assign_cast_p(def_stmt)) -+ return true; -+ -+ return false; -+} -+ -+// Skip duplication when there is a minus expr and the type of rhs1 or rhs2 is a pointer_type. -+static bool skip_ptr_minus(gimple stmt) -+{ -+ const_tree rhs1, rhs2, ptr1_rhs, ptr2_rhs; -+ -+ if (gimple_assign_rhs_code(stmt) != MINUS_EXPR) -+ return false; -+ -+ rhs1 = gimple_assign_rhs1(stmt); -+ if (!is_from_cast(rhs1)) -+ return false; -+ -+ rhs2 = gimple_assign_rhs2(stmt); -+ if (!is_from_cast(rhs2)) -+ return false; -+ -+ ptr1_rhs = gimple_assign_rhs1(get_def_stmt(rhs1)); -+ ptr2_rhs = gimple_assign_rhs1(get_def_stmt(rhs2)); -+ -+ if (TREE_CODE(TREE_TYPE(ptr1_rhs)) != POINTER_TYPE && TREE_CODE(TREE_TYPE(ptr2_rhs)) != POINTER_TYPE) -+ return false; -+ -+ create_mark_asm(stmt, MARK_YES); -+ return true; -+} -+ -+static void walk_use_def_ptr(struct pointer_set_t *visited, const_tree lhs) -+{ -+ gimple def_stmt; -+ -+ def_stmt = get_def_stmt(lhs); -+ if (!def_stmt) -+ return; -+ -+ if (pointer_set_insert(visited, def_stmt)) -+ return; -+ -+ switch (gimple_code(def_stmt)) { -+ case GIMPLE_NOP: -+ case GIMPLE_ASM: -+ case GIMPLE_CALL: -+ break; -+ case GIMPLE_PHI: { -+ unsigned int i, n = gimple_phi_num_args(def_stmt); -+ -+ pointer_set_insert(visited, def_stmt); -+ -+ for (i = 0; i < n; i++) { -+ tree arg = gimple_phi_arg_def(def_stmt, i); -+ -+ walk_use_def_ptr(visited, arg); -+ } -+ } -+ case GIMPLE_ASSIGN: -+ switch (gimple_num_ops(def_stmt)) { -+ case 2: -+ walk_use_def_ptr(visited, gimple_assign_rhs1(def_stmt)); -+ return; -+ case 3: -+ if (skip_ptr_minus(def_stmt)) -+ return; -+ -+ walk_use_def_ptr(visited, gimple_assign_rhs1(def_stmt)); -+ walk_use_def_ptr(visited, gimple_assign_rhs2(def_stmt)); -+ return; -+ default: -+ return; -+ } -+ default: -+ debug_gimple_stmt((gimple)def_stmt); -+ error("%s: unknown gimple code", __func__); -+ gcc_unreachable(); -+ } -+} -+ -+// Look for a ptr - ptr expression (e.g., cpuset_common_file_read() s - page) -+static void insert_mark_not_intentional_asm_at_ptr(const_tree arg) -+{ -+ struct pointer_set_t *visited; -+ -+ visited = pointer_set_create(); -+ walk_use_def_ptr(visited, arg); -+ pointer_set_destroy(visited); -+} -+ -+// Determine the return value and insert the asm stmt to mark the return stmt. -+static void insert_asm_ret(gimple stmt) -+{ -+ tree ret; -+ -+ ret = gimple_return_retval(stmt); -+ create_size_overflow_asm(stmt, ret, 0); -+} -+ -+// Determine the correct arg index and arg and insert the asm stmt to mark the stmt. -+static void insert_asm_arg(gimple stmt, unsigned int orig_argnum) -+{ -+ tree arg; -+ unsigned int argnum; -+ -+ argnum = get_correct_arg_count(orig_argnum, gimple_call_fndecl(stmt)); -+ gcc_assert(argnum != 0); -+ if (argnum == CANNOT_FIND_ARG) -+ return; -+ -+ arg = gimple_call_arg(stmt, argnum - 1); -+ gcc_assert(arg != NULL_TREE); -+ -+ // skip all ptr - ptr expressions -+ insert_mark_not_intentional_asm_at_ptr(arg); -+ -+ create_size_overflow_asm(stmt, arg, argnum); -+} -+ -+// If a function arg or the return value is marked by the size_overflow attribute then set its index in the array. -+static void set_argnum_attribute(const_tree attr, bool *argnums) -+{ -+ unsigned int argnum; -+ tree attr_value; -+ -+ for (attr_value = TREE_VALUE(attr); attr_value; attr_value = TREE_CHAIN(attr_value)) { -+ argnum = TREE_INT_CST_LOW(TREE_VALUE(attr_value)); -+ argnums[argnum] = true; -+ } -+} -+ -+// If a function arg or the return value is in the hash table then set its index in the array. -+static void set_argnum_hash(tree fndecl, bool *argnums) -+{ -+ unsigned int num; -+ const struct size_overflow_hash *hash; -+ -+ hash = get_function_hash(DECL_ORIGIN(fndecl)); -+ if (!hash) -+ return; -+ -+ for (num = 0; num <= MAX_PARAM; num++) { -+ if (!(hash->param & (1U << num))) -+ continue; -+ -+ argnums[num] = true; -+ } -+} -+ -+static bool is_all_the_argnums_empty(bool *argnums) -+{ -+ unsigned int i; -+ -+ for (i = 0; i <= MAX_PARAM; i++) -+ if (argnums[i]) -+ return false; -+ return true; -+} -+ -+// Check whether the arguments or the return value of the function are in the hash table or are marked by the size_overflow attribute. -+static void search_interesting_args(tree fndecl, bool *argnums) -+{ -+ const_tree attr; -+ -+ set_argnum_hash(fndecl, argnums); -+ if (!is_all_the_argnums_empty(argnums)) -+ return; -+ -+ attr = lookup_attribute("size_overflow", DECL_ATTRIBUTES(fndecl)); -+ if (attr && TREE_VALUE(attr)) -+ set_argnum_attribute(attr, argnums); -+} -+ -+/* -+ * Look up the intentional_overflow attribute that turns off ipa based duplication -+ * on the callee function. -+ */ -+static bool is_mark_turn_off_attribute(gimple stmt) -+{ -+ enum mark mark; -+ const_tree fndecl = gimple_call_fndecl(stmt); -+ -+ mark = get_intentional_attr_type(DECL_ORIGIN(fndecl)); -+ if (mark == MARK_TURN_OFF) -+ return true; -+ return false; -+} -+ -+// If the argument(s) of the callee function is/are in the hash table or are marked by an attribute then mark the call stmt with an asm stmt -+static void handle_interesting_function(gimple stmt) -+{ -+ unsigned int argnum; -+ tree fndecl; -+ bool orig_argnums[MAX_PARAM + 1] = {false}; -+ -+ if (gimple_call_num_args(stmt) == 0) -+ return; -+ fndecl = gimple_call_fndecl(stmt); -+ if (fndecl == NULL_TREE) -+ return; -+ fndecl = DECL_ORIGIN(fndecl); -+ -+ if (is_mark_turn_off_attribute(stmt)) { -+ create_mark_asm(stmt, MARK_TURN_OFF); -+ return; -+ } -+ -+ search_interesting_args(fndecl, orig_argnums); -+ -+ for (argnum = 1; argnum < MAX_PARAM; argnum++) -+ if (orig_argnums[argnum]) -+ insert_asm_arg(stmt, argnum); -+} -+ -+// If the return value of the caller function is in hash table (its index is 0) then mark the return stmt with an asm stmt -+static void handle_interesting_ret(gimple stmt) -+{ -+ bool orig_argnums[MAX_PARAM + 1] = {false}; -+ -+ search_interesting_args(current_function_decl, orig_argnums); -+ -+ if (orig_argnums[0]) -+ insert_asm_ret(stmt); -+} -+ -+// Iterate over all the stmts and search for call and return stmts and mark them if they're in the hash table -+static unsigned int search_interesting_functions(void) -+{ -+ basic_block bb; -+ -+ FOR_ALL_BB_FN(bb, cfun) { -+ gimple_stmt_iterator gsi; -+ -+ for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) { -+ gimple stmt = gsi_stmt(gsi); -+ -+ if (is_size_overflow_asm(stmt)) -+ continue; -+ -+ if (is_gimple_call(stmt)) -+ handle_interesting_function(stmt); -+ else if (gimple_code(stmt) == GIMPLE_RETURN) -+ handle_interesting_ret(stmt); -+ } -+ } -+ return 0; -+} -+ -+/* -+ * A lot of functions get inlined before the ipa passes so after the build_ssa gimple pass -+ * this pass inserts asm stmts to mark the interesting args -+ * that the ipa pass will detect and insert the size overflow checks for. -+ */ -+#if BUILDING_GCC_VERSION >= 4009 -+static const struct pass_data insert_size_overflow_asm_pass_data = { -+#else -+static struct gimple_opt_pass insert_size_overflow_asm_pass = { -+ .pass = { -+#endif -+ .type = GIMPLE_PASS, -+ .name = "insert_size_overflow_asm", -+#if BUILDING_GCC_VERSION >= 4008 -+ .optinfo_flags = OPTGROUP_NONE, -+#endif -+#if BUILDING_GCC_VERSION >= 4009 -+ .has_gate = false, -+ .has_execute = true, -+#else -+ .gate = NULL, -+ .execute = search_interesting_functions, -+ .sub = NULL, -+ .next = NULL, -+ .static_pass_number = 0, -+#endif -+ .tv_id = TV_NONE, -+ .properties_required = PROP_cfg, -+ .properties_provided = 0, -+ .properties_destroyed = 0, -+ .todo_flags_start = 0, -+ .todo_flags_finish = TODO_dump_func | TODO_verify_ssa | TODO_verify_stmts | TODO_remove_unused_locals | TODO_update_ssa_no_phi | TODO_cleanup_cfg | TODO_ggc_collect | TODO_verify_flow -+#if BUILDING_GCC_VERSION < 4009 -+ } -+#endif -+}; -+ -+#if BUILDING_GCC_VERSION >= 4009 -+namespace { -+class insert_size_overflow_asm_pass : public gimple_opt_pass { -+public: -+ insert_size_overflow_asm_pass() : gimple_opt_pass(insert_size_overflow_asm_pass_data, g) {} -+ unsigned int execute() { return search_interesting_functions(); } -+}; -+} -+ -+static opt_pass *make_insert_size_overflow_asm_pass(void) -+{ -+ return new insert_size_overflow_asm_pass(); -+} -+#else -+static struct opt_pass *make_insert_size_overflow_asm_pass(void) -+{ -+ return &insert_size_overflow_asm_pass.pass; -+} -+#endif -+ -+// Create the noreturn report_size_overflow() function decl. -+static void size_overflow_start_unit(void __unused *gcc_data, void __unused *user_data) -+{ -+ tree const_char_ptr_type_node; -+ tree fntype; -+ -+ const_char_ptr_type_node = build_pointer_type(build_type_variant(char_type_node, 1, 0)); -+ -+ // void report_size_overflow(const char *loc_file, unsigned int loc_line, const char *current_func, const char *ssa_var) -+ fntype = build_function_type_list(void_type_node, -+ const_char_ptr_type_node, -+ unsigned_type_node, -+ const_char_ptr_type_node, -+ const_char_ptr_type_node, -+ NULL_TREE); -+ report_size_overflow_decl = build_fn_decl("report_size_overflow", fntype); -+ -+ DECL_ASSEMBLER_NAME(report_size_overflow_decl); -+ TREE_PUBLIC(report_size_overflow_decl) = 1; -+ DECL_EXTERNAL(report_size_overflow_decl) = 1; -+ DECL_ARTIFICIAL(report_size_overflow_decl) = 1; -+ TREE_THIS_VOLATILE(report_size_overflow_decl) = 1; -+} -+ -+static unsigned int dump_functions(void) -+{ -+ struct cgraph_node *node; -+ -+ FOR_EACH_FUNCTION_WITH_GIMPLE_BODY(node) { -+ basic_block bb; -+ -+ push_cfun(DECL_STRUCT_FUNCTION(NODE_DECL(node))); -+ current_function_decl = NODE_DECL(node); -+ -+ fprintf(stderr, "-----------------------------------------\n%s\n-----------------------------------------\n", DECL_NAME_POINTER(current_function_decl)); -+ -+ FOR_ALL_BB_FN(bb, cfun) { -+ gimple_stmt_iterator si; -+ -+ fprintf(stderr, "<bb %u>:\n", bb->index); -+ for (si = gsi_start_phis(bb); !gsi_end_p(si); gsi_next(&si)) -+ debug_gimple_stmt(gsi_stmt(si)); -+ for (si = gsi_start_bb(bb); !gsi_end_p(si); gsi_next(&si)) -+ debug_gimple_stmt(gsi_stmt(si)); -+ fprintf(stderr, "\n"); -+ } -+ -+ fprintf(stderr, "-------------------------------------------------------------------------\n"); -+ -+ pop_cfun(); -+ current_function_decl = NULL_TREE; -+ } -+ -+ fprintf(stderr, "###############################################################################\n"); -+ -+ return 0; -+} -+ -+#if BUILDING_GCC_VERSION >= 4009 -+static const struct pass_data dump_pass_data = { -+#else -+static struct ipa_opt_pass_d dump_pass = { -+ .pass = { -+#endif -+ .type = SIMPLE_IPA_PASS, -+ .name = "dump", -+#if BUILDING_GCC_VERSION >= 4008 -+ .optinfo_flags = OPTGROUP_NONE, -+#endif -+#if BUILDING_GCC_VERSION >= 4009 -+ .has_gate = false, -+ .has_execute = true, -+#else -+ .gate = NULL, -+ .execute = dump_functions, -+ .sub = NULL, -+ .next = NULL, -+ .static_pass_number = 0, -+#endif -+ .tv_id = TV_NONE, -+ .properties_required = 0, -+ .properties_provided = 0, -+ .properties_destroyed = 0, -+ .todo_flags_start = 0, -+ .todo_flags_finish = 0, -+#if BUILDING_GCC_VERSION < 4009 -+ }, -+ .generate_summary = NULL, -+ .write_summary = NULL, -+ .read_summary = NULL, -+#if BUILDING_GCC_VERSION >= 4006 -+ .write_optimization_summary = NULL, -+ .read_optimization_summary = NULL, -+#endif -+ .stmt_fixup = NULL, -+ .function_transform_todo_flags_start = 0, -+ .function_transform = NULL, -+ .variable_transform = NULL, -+#endif -+}; -+ -+#if BUILDING_GCC_VERSION >= 4009 -+namespace { -+class dump_pass : public ipa_opt_pass_d { -+public: -+ dump_pass() : ipa_opt_pass_d(dump_pass_data, g, NULL, NULL, NULL, NULL, NULL, NULL, 0, NULL, NULL) {} -+ unsigned int execute() { return dump_functions(); } -+}; -+} -+ -+static opt_pass *make_dump_pass(void) -+{ -+ return new dump_pass(); -+} -+#else -+static struct opt_pass *make_dump_pass(void) -+{ -+ return &dump_pass.pass; -+} -+#endif -+ -+int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version) -+{ -+ int i; -+ const char * const plugin_name = plugin_info->base_name; -+ const int argc = plugin_info->argc; -+ const struct plugin_argument * const argv = plugin_info->argv; -+ bool enable = true; -+ struct register_pass_info insert_size_overflow_asm_pass_info; -+ struct register_pass_info __unused dump_before_pass_info; -+ struct register_pass_info __unused dump_after_pass_info; -+ struct register_pass_info ipa_pass_info; -+ static const struct ggc_root_tab gt_ggc_r_gt_size_overflow[] = { -+ { -+ .base = &report_size_overflow_decl, -+ .nelt = 1, -+ .stride = sizeof(report_size_overflow_decl), -+ .cb = >_ggc_mx_tree_node, -+ .pchw = >_pch_nx_tree_node -+ }, -+ LAST_GGC_ROOT_TAB -+ }; -+ -+ insert_size_overflow_asm_pass_info.pass = make_insert_size_overflow_asm_pass(); -+ insert_size_overflow_asm_pass_info.reference_pass_name = "ssa"; -+ insert_size_overflow_asm_pass_info.ref_pass_instance_number = 1; -+ insert_size_overflow_asm_pass_info.pos_op = PASS_POS_INSERT_AFTER; -+ -+ dump_before_pass_info.pass = make_dump_pass(); -+ dump_before_pass_info.reference_pass_name = "increase_alignment"; -+ dump_before_pass_info.ref_pass_instance_number = 1; -+ dump_before_pass_info.pos_op = PASS_POS_INSERT_BEFORE; -+ -+ ipa_pass_info.pass = make_ipa_pass(); -+ ipa_pass_info.reference_pass_name = "increase_alignment"; -+ ipa_pass_info.ref_pass_instance_number = 1; -+ ipa_pass_info.pos_op = PASS_POS_INSERT_BEFORE; -+ -+ dump_after_pass_info.pass = make_dump_pass(); -+ dump_after_pass_info.reference_pass_name = "increase_alignment"; -+ dump_after_pass_info.ref_pass_instance_number = 1; -+ dump_after_pass_info.pos_op = PASS_POS_INSERT_BEFORE; -+ -+ if (!plugin_default_version_check(version, &gcc_version)) { -+ error(G_("incompatible gcc/plugin versions")); -+ return 1; -+ } -+ -+ for (i = 0; i < argc; ++i) { -+ if (!strcmp(argv[i].key, "no-size-overflow")) { -+ enable = false; -+ continue; -+ } -+ error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key); -+ } -+ -+ register_callback(plugin_name, PLUGIN_INFO, NULL, &size_overflow_plugin_info); -+ if (enable) { -+ register_callback(plugin_name, PLUGIN_START_UNIT, &size_overflow_start_unit, NULL); -+ register_callback(plugin_name, PLUGIN_REGISTER_GGC_ROOTS, NULL, (void *)>_ggc_r_gt_size_overflow); -+ register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &insert_size_overflow_asm_pass_info); -+// register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &dump_before_pass_info); -+ register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &ipa_pass_info); -+// register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &dump_after_pass_info); -+ } -+ register_callback(plugin_name, PLUGIN_ATTRIBUTES, register_attributes, NULL); -+ -+ return 0; -+} diff --git a/tools/gcc/stackleak_plugin.c b/tools/gcc/stackleak_plugin.c new file mode 100644 index 0000000..dd94983 @@ -120256,19 +121235,6 @@ index 6789d78..4afd019e 100644 + .endm + #endif -diff --git a/virt/kvm/ioapic.c b/virt/kvm/ioapic.c -index ce9ed99..8c805a0 100644 ---- a/virt/kvm/ioapic.c -+++ b/virt/kvm/ioapic.c -@@ -306,7 +306,7 @@ static int ioapic_deliver(struct kvm_ioapic *ioapic, int irq, bool line_status) - BUG_ON(ioapic->rtc_status.pending_eoi != 0); - ret = kvm_irq_delivery_to_apic(ioapic->kvm, NULL, &irqe, - ioapic->rtc_status.dest_map); -- ioapic->rtc_status.pending_eoi = ret; -+ ioapic->rtc_status.pending_eoi = (ret < 0 ? 0 : ret); - } else - ret = kvm_irq_delivery_to_apic(ioapic->kvm, NULL, &irqe, NULL); - diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 03a0381..8b31923 100644 --- a/virt/kvm/kvm_main.c |