aboutsummaryrefslogtreecommitdiffstats
path: root/main
diff options
context:
space:
mode:
authorNatanael Copa <ncopa@alpinelinux.org>2014-07-29 08:05:40 +0000
committerNatanael Copa <ncopa@alpinelinux.org>2014-07-29 08:38:28 +0000
commit506b93cba970e73e3ca37abe5142d61a122e4586 (patch)
tree735e8ebfdee4bd81f529f2fd16c727204df004c2 /main
parent7825a6536e55bfcd20bb84eec0fabc23bf879c71 (diff)
downloadaports-506b93cba970e73e3ca37abe5142d61a122e4586.tar.bz2
aports-506b93cba970e73e3ca37abe5142d61a122e4586.tar.xz
main/linux-grsec: upgrade to 3.14.14
Diffstat (limited to 'main')
-rw-r--r--main/linux-grsec/APKBUILD18
-rw-r--r--main/linux-grsec/grsecurity-3.0-3.14.14-201407282111.patch (renamed from main/linux-grsec/grsecurity-3.0-3.14.13-201407232159.patch)667
2 files changed, 197 insertions, 488 deletions
diff --git a/main/linux-grsec/APKBUILD b/main/linux-grsec/APKBUILD
index 5547bd3f95..f7440e25ab 100644
--- a/main/linux-grsec/APKBUILD
+++ b/main/linux-grsec/APKBUILD
@@ -2,12 +2,12 @@
_flavor=grsec
pkgname=linux-${_flavor}
-pkgver=3.14.13
+pkgver=3.14.14
case $pkgver in
*.*.*) _kernver=${pkgver%.*};;
*.*) _kernver=${pkgver};;
esac
-pkgrel=1
+pkgrel=0
pkgdesc="Linux kernel with grsecurity"
url=http://grsecurity.net
depends="mkinitfs linux-firmware"
@@ -17,7 +17,7 @@ _config=${config:-kernelconfig.${CARCH}}
install=
source="http://ftp.kernel.org/pub/linux/kernel/v3.x/linux-$_kernver.tar.xz
http://ftp.kernel.org/pub/linux/kernel/v3.x/patch-$pkgver.xz
- grsecurity-3.0-3.14.13-201407232159.patch
+ grsecurity-3.0-3.14.14-201407282111.patch
fix-memory-map-for-PIE-applications.patch
imx6q-no-unclocked-sleep.patch
@@ -165,24 +165,24 @@ dev() {
}
md5sums="b621207b3f6ecbb67db18b13258f8ea8 linux-3.14.tar.xz
-132470897fc5d57f5ac7d658100cc430 patch-3.14.13.xz
-a5ee03e4eb9c979a68214661ebf1dece grsecurity-3.0-3.14.13-201407232159.patch
+a0349eb104a30f55d0aef3f960a4f0df patch-3.14.14.xz
+c462f939ea43655c0aaf007ea507366a grsecurity-3.0-3.14.14-201407282111.patch
c6a4ae7e8ca6159e1631545515805216 fix-memory-map-for-PIE-applications.patch
1a307fc1d63231bf01d22493a4f14378 imx6q-no-unclocked-sleep.patch
69688dbc1669bfd04dec7bb316e58b8d kernelconfig.x86
e0b3a0898935183bf42078350d2e31f1 kernelconfig.x86_64
0d71b1663f7cbfffc6e403deca4bbe86 kernelconfig.armhf"
sha256sums="61558aa490855f42b6340d1a1596be47454909629327c49a5e4e10268065dffa linux-3.14.tar.xz
-e6b1a87470ab9f749002959e2c9ca2f7229b4b34f313120b4800eb39f08c4698 patch-3.14.13.xz
-8f892153ab184acec6575ceda7e2b5007aa2e934b193f059064d88b6a7f47477 grsecurity-3.0-3.14.13-201407232159.patch
+282451336497b075749a99678de0ef638a46f22fbb0837480dfd354fb2561c1f patch-3.14.14.xz
+f3021afbbad7c90578b37a4ad89b0067da4a613e988a71f4019733cd9a3129d8 grsecurity-3.0-3.14.14-201407282111.patch
500f3577310be52e87b9fecdc2e9c4ca43210fd97d69089f9005d484563f74c7 fix-memory-map-for-PIE-applications.patch
21179fbb22a5b74af0a609350ae1a170e232908572b201d02e791d2ce0a685d3 imx6q-no-unclocked-sleep.patch
61c9344b8643ab81b0d7230f77fa003c8e2ce46bf4ea18315708e77ccef5de83 kernelconfig.x86
1ce44d635856578779ff6c0d1ba97c4ce44e988411e3c702a79859c28bd8c91c kernelconfig.x86_64
3cddaac02211dd0f5eb4531aecc3a1427f29dcec7b31d9fe0042192d591bcdc8 kernelconfig.armhf"
sha512sums="5730d83a7a81134c1e77c0bf89e42dee4f8251ad56c1ac2be20c59e26fdfaa7bea55f277e7af156b637f22e1584914a46089af85039177cb43485089c74ac26e linux-3.14.tar.xz
-49ec8684af792696230c62960dd2e1623c5ed078de4954739c366cba404cb2e6d5fbd62a8173d48dc29627c9a38e99dbeb9e96fb4f6c6a2fa077e6c5f56535e8 patch-3.14.13.xz
-17289ac3e3ffbd34785d9827cefbf6b7da829e1a878c5e16378b3bb681050fc07d4e94f29b9fcbfe74df21d2743377bc6462fdb25f0ee63f709864cb18060760 grsecurity-3.0-3.14.13-201407232159.patch
+fc838621a786141f21b86741985e99e5c803b7a9476f126d95678bc6e23205e4cd3c33012b30559a5f4dc4bf25199963d1a8347d6486407581ec0677dd64d4a6 patch-3.14.14.xz
+27d7bebc591f1c4800ebc4289900045346e8adebd6529e514d3ba1a9d9f043ca711ea0b1c43aa061c70906941bd3d8d072c7ee8bc8d6020fe3d236748031984e grsecurity-3.0-3.14.14-201407282111.patch
4665c56ae1bbac311f9205d64918e84ee8b01d47d6e2396ff6b8adfb10aada7f7254531ce62e31edbb65c2a54a830f09ad05d314dfcd75d6272f4068945ad7c7 fix-memory-map-for-PIE-applications.patch
87d1ad59732f265a5b0db54490dc1762c14ea4b868e7eb1aedc3ce57b48046de7bbc08cf5cfcf6f1380fa84063b0edb16ba3d5e3c5670be9bbb229275c88b221 imx6q-no-unclocked-sleep.patch
0889c17d6509b8078aa2fd1ba2977a8fa88260bd080e780aeefd7eb6a8805b3bb9a3132991fc1050e6b7bce0ca118ce7f2c57c0f33459812f69c4dee75ff96cf kernelconfig.x86
diff --git a/main/linux-grsec/grsecurity-3.0-3.14.13-201407232159.patch b/main/linux-grsec/grsecurity-3.0-3.14.14-201407282111.patch
index 81dff0ffb4..f2197e016b 100644
--- a/main/linux-grsec/grsecurity-3.0-3.14.13-201407232159.patch
+++ b/main/linux-grsec/grsecurity-3.0-3.14.14-201407282111.patch
@@ -287,7 +287,7 @@ index 7116fda..d8ed6e8 100644
pcd. [PARIDE]
diff --git a/Makefile b/Makefile
-index 7a2981c..9fadd78 100644
+index 230c7f6..64a1278 100644
--- a/Makefile
+++ b/Makefile
@@ -244,8 +244,9 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH" ]; then echo $$BASH; \
@@ -386,7 +386,16 @@ index 7a2981c..9fadd78 100644
include $(srctree)/arch/$(SRCARCH)/Makefile
ifdef CONFIG_READABLE_ASM
-@@ -779,7 +846,7 @@ export mod_sign_cmd
+@@ -639,6 +706,8 @@ KBUILD_CFLAGS += -fomit-frame-pointer
+ endif
+ endif
+
++KBUILD_CFLAGS += $(call cc-option, -fno-var-tracking-assignments)
++
+ ifdef CONFIG_DEBUG_INFO
+ KBUILD_CFLAGS += -g
+ KBUILD_AFLAGS += -Wa,--gdwarf-2
+@@ -779,7 +848,7 @@ export mod_sign_cmd
ifeq ($(KBUILD_EXTMOD),)
@@ -395,7 +404,7 @@ index 7a2981c..9fadd78 100644
vmlinux-dirs := $(patsubst %/,%,$(filter %/, $(init-y) $(init-m) \
$(core-y) $(core-m) $(drivers-y) $(drivers-m) \
-@@ -828,6 +895,8 @@ endif
+@@ -828,6 +897,8 @@ endif
# The actual objects are generated when descending,
# make sure no implicit rule kicks in
@@ -404,7 +413,7 @@ index 7a2981c..9fadd78 100644
$(sort $(vmlinux-deps)): $(vmlinux-dirs) ;
# Handle descending into subdirectories listed in $(vmlinux-dirs)
-@@ -837,7 +906,7 @@ $(sort $(vmlinux-deps)): $(vmlinux-dirs) ;
+@@ -837,7 +908,7 @@ $(sort $(vmlinux-deps)): $(vmlinux-dirs) ;
# Error messages still appears in the original language
PHONY += $(vmlinux-dirs)
@@ -413,7 +422,7 @@ index 7a2981c..9fadd78 100644
$(Q)$(MAKE) $(build)=$@
define filechk_kernel.release
-@@ -880,10 +949,13 @@ prepare1: prepare2 $(version_h) include/generated/utsrelease.h \
+@@ -880,10 +951,13 @@ prepare1: prepare2 $(version_h) include/generated/utsrelease.h \
archprepare: archheaders archscripts prepare1 scripts_basic
@@ -427,7 +436,7 @@ index 7a2981c..9fadd78 100644
prepare: prepare0
# Generate some files
-@@ -991,6 +1063,8 @@ all: modules
+@@ -991,6 +1065,8 @@ all: modules
# using awk while concatenating to the final file.
PHONY += modules
@@ -436,7 +445,7 @@ index 7a2981c..9fadd78 100644
modules: $(vmlinux-dirs) $(if $(KBUILD_BUILTIN),vmlinux) modules.builtin
$(Q)$(AWK) '!x[$$0]++' $(vmlinux-dirs:%=$(objtree)/%/modules.order) > $(objtree)/modules.order
@$(kecho) ' Building modules, stage 2.';
-@@ -1006,7 +1080,7 @@ modules.builtin: $(vmlinux-dirs:%=%/modules.builtin)
+@@ -1006,7 +1082,7 @@ modules.builtin: $(vmlinux-dirs:%=%/modules.builtin)
# Target to prepare building external modules
PHONY += modules_prepare
@@ -445,7 +454,7 @@ index 7a2981c..9fadd78 100644
# Target to install modules
PHONY += modules_install
-@@ -1072,7 +1146,10 @@ MRPROPER_FILES += .config .config.old .version .old_version $(version_h) \
+@@ -1072,7 +1148,10 @@ MRPROPER_FILES += .config .config.old .version .old_version $(version_h) \
Module.symvers tags TAGS cscope* GPATH GTAGS GRTAGS GSYMS \
signing_key.priv signing_key.x509 x509.genkey \
extra_certificates signing_key.x509.keyid \
@@ -457,7 +466,7 @@ index 7a2981c..9fadd78 100644
# clean - Delete most, but leave enough to build external modules
#
-@@ -1111,7 +1188,7 @@ distclean: mrproper
+@@ -1111,7 +1190,7 @@ distclean: mrproper
@find $(srctree) $(RCS_FIND_IGNORE) \
\( -name '*.orig' -o -name '*.rej' -o -name '*~' \
-o -name '*.bak' -o -name '#*#' -o -name '.*.orig' \
@@ -466,7 +475,7 @@ index 7a2981c..9fadd78 100644
-o -name '*%' -o -name '.*.cmd' -o -name 'core' \) \
-type f -print | xargs rm -f
-@@ -1273,6 +1350,8 @@ PHONY += $(module-dirs) modules
+@@ -1273,6 +1352,8 @@ PHONY += $(module-dirs) modules
$(module-dirs): crmodverdir $(objtree)/Module.symvers
$(Q)$(MAKE) $(build)=$(patsubst _module_%,%,$@)
@@ -475,7 +484,7 @@ index 7a2981c..9fadd78 100644
modules: $(module-dirs)
@$(kecho) ' Building modules, stage 2.';
$(Q)$(MAKE) -f $(srctree)/scripts/Makefile.modpost
-@@ -1412,17 +1491,21 @@ else
+@@ -1412,17 +1493,21 @@ else
target-dir = $(if $(KBUILD_EXTMOD),$(dir $<),$(dir $@))
endif
@@ -501,7 +510,7 @@ index 7a2981c..9fadd78 100644
$(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
%.symtypes: %.c prepare scripts FORCE
$(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
-@@ -1432,11 +1515,15 @@ endif
+@@ -1432,11 +1517,15 @@ endif
$(cmd_crmodverdir)
$(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
$(build)=$(build-dir)
@@ -852,10 +861,10 @@ index 98838a0..b304fb4 100644
/* Allow reads even for write-only mappings */
if (!(vma->vm_flags & (VM_READ | VM_WRITE)))
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
-index 44298ad..29a20c0 100644
+index 4733d32..b142a40 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
-@@ -1862,7 +1862,7 @@ config ALIGNMENT_TRAP
+@@ -1863,7 +1863,7 @@ config ALIGNMENT_TRAP
config UACCESS_WITH_MEMCPY
bool "Use kernel mem{cpy,set}() for {copy_to,clear}_user()"
@@ -864,7 +873,7 @@ index 44298ad..29a20c0 100644
default y if CPU_FEROCEON
help
Implement faster copy_to_user and clear_user methods for CPU
-@@ -2125,6 +2125,7 @@ config XIP_PHYS_ADDR
+@@ -2126,6 +2126,7 @@ config XIP_PHYS_ADDR
config KEXEC
bool "Kexec system call (EXPERIMENTAL)"
depends on (!SMP || PM_SLEEP_SMP)
@@ -7960,10 +7969,10 @@ index d72197f..c017c84 100644
/*
* If for any reason at all we couldn't handle the fault, make
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
-index 2156fa2..cc28613 100644
+index ee3c660..afa4212 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
-@@ -393,6 +393,7 @@ config PPC64_SUPPORTS_MEMORY_FAILURE
+@@ -394,6 +394,7 @@ config PPC64_SUPPORTS_MEMORY_FAILURE
config KEXEC
bool "kexec system call"
depends on (PPC_BOOK3S || FSL_BOOKE || (44x && !SMP))
@@ -12643,7 +12652,7 @@ index ad8f795..2c7eec6 100644
/*
* Memory returned by kmalloc() may be used for DMA, so we must make
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
-index 1981dd9..8f3ff4d 100644
+index 7324107..a63fd9f 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -126,7 +126,7 @@ config X86
@@ -12652,10 +12661,10 @@ index 1981dd9..8f3ff4d 100644
select HAVE_IRQ_EXIT_ON_IRQ_STACK if X86_64
- select HAVE_CC_STACKPROTECTOR
+ select HAVE_CC_STACKPROTECTOR if X86_64 || !PAX_MEMORY_UDEREF
+ select ARCH_SUPPORTS_ATOMIC_RMW
config INSTRUCTION_DECODER
- def_bool y
-@@ -251,7 +251,7 @@ config X86_HT
+@@ -252,7 +252,7 @@ config X86_HT
config X86_32_LAZY_GS
def_bool y
@@ -12664,7 +12673,7 @@ index 1981dd9..8f3ff4d 100644
config ARCH_HWEIGHT_CFLAGS
string
-@@ -589,6 +589,7 @@ config SCHED_OMIT_FRAME_POINTER
+@@ -590,6 +590,7 @@ config SCHED_OMIT_FRAME_POINTER
menuconfig HYPERVISOR_GUEST
bool "Linux guest support"
@@ -12672,7 +12681,7 @@ index 1981dd9..8f3ff4d 100644
---help---
Say Y here to enable options for running Linux under various hyper-
visors. This option enables basic hypervisor detection and platform
-@@ -1111,7 +1112,7 @@ choice
+@@ -1112,7 +1113,7 @@ choice
config NOHIGHMEM
bool "off"
@@ -12681,7 +12690,7 @@ index 1981dd9..8f3ff4d 100644
---help---
Linux can use up to 64 Gigabytes of physical memory on x86 systems.
However, the address space of 32-bit x86 processors is only 4
-@@ -1148,7 +1149,7 @@ config NOHIGHMEM
+@@ -1149,7 +1150,7 @@ config NOHIGHMEM
config HIGHMEM4G
bool "4GB"
@@ -12690,7 +12699,7 @@ index 1981dd9..8f3ff4d 100644
---help---
Select this if you have a 32-bit processor and between 1 and 4
gigabytes of physical RAM.
-@@ -1201,7 +1202,7 @@ config PAGE_OFFSET
+@@ -1202,7 +1203,7 @@ config PAGE_OFFSET
hex
default 0xB0000000 if VMSPLIT_3G_OPT
default 0x80000000 if VMSPLIT_2G
@@ -12699,7 +12708,7 @@ index 1981dd9..8f3ff4d 100644
default 0x40000000 if VMSPLIT_1G
default 0xC0000000
depends on X86_32
-@@ -1605,6 +1606,7 @@ source kernel/Kconfig.hz
+@@ -1606,6 +1607,7 @@ source kernel/Kconfig.hz
config KEXEC
bool "kexec system call"
@@ -12707,7 +12716,7 @@ index 1981dd9..8f3ff4d 100644
---help---
kexec is a system call that implements the ability to shutdown your
current kernel, and to start another kernel. It is like a reboot
-@@ -1756,7 +1758,9 @@ config X86_NEED_RELOCS
+@@ -1757,7 +1759,9 @@ config X86_NEED_RELOCS
config PHYSICAL_ALIGN
hex "Alignment value to which kernel should be aligned"
@@ -12718,7 +12727,7 @@ index 1981dd9..8f3ff4d 100644
range 0x2000 0x1000000 if X86_32
range 0x200000 0x1000000 if X86_64
---help---
-@@ -1836,9 +1840,10 @@ config DEBUG_HOTPLUG_CPU0
+@@ -1837,9 +1841,10 @@ config DEBUG_HOTPLUG_CPU0
If unsure, say N.
config COMPAT_VDSO
@@ -21968,10 +21977,10 @@ index 639d128..e92d7e5 100644
while (amd_iommu_v2_event_descs[i].attr.attr.name)
diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
-index aa333d9..f9db700 100644
+index 1340ebf..fc6d5c9 100644
--- a/arch/x86/kernel/cpu/perf_event_intel.c
+++ b/arch/x86/kernel/cpu/perf_event_intel.c
-@@ -2309,10 +2309,10 @@ __init int intel_pmu_init(void)
+@@ -2318,10 +2318,10 @@ __init int intel_pmu_init(void)
x86_pmu.num_counters_fixed = max((int)edx.split.num_counters_fixed, 3);
if (boot_cpu_has(X86_FEATURE_PDCM)) {
@@ -28157,7 +28166,7 @@ index 57409f6..b505597 100644
if (!fixup_exception(regs)) {
task->thread.error_code = error_code;
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
-index cfbe99f..a6e8fa7 100644
+index e0d1d7a..db035d4 100644
--- a/arch/x86/kernel/tsc.c
+++ b/arch/x86/kernel/tsc.c
@@ -150,7 +150,7 @@ static void cyc2ns_write_end(int cpu, struct cyc2ns_data *data)
@@ -41312,12 +41321,12 @@ index 0bb86e6..d41416d 100644
return -EFAULT;
diff --git a/drivers/gpu/drm/qxl/qxl_irq.c b/drivers/gpu/drm/qxl/qxl_irq.c
-index 28f84b4..fb3e224 100644
+index 3485bdc..20d26e3 100644
--- a/drivers/gpu/drm/qxl/qxl_irq.c
+++ b/drivers/gpu/drm/qxl/qxl_irq.c
-@@ -33,19 +33,19 @@ irqreturn_t qxl_irq_handler(int irq, void *arg)
-
- pending = xchg(&qdev->ram_header->int_pending, 0);
+@@ -36,19 +36,19 @@ irqreturn_t qxl_irq_handler(int irq, void *arg)
+ if (!pending)
+ return IRQ_NONE;
- atomic_inc(&qdev->irq_received);
+ atomic_inc_unchecked(&qdev->irq_received);
@@ -41339,7 +41348,7 @@ index 28f84b4..fb3e224 100644
wake_up_all(&qdev->io_cmd_event);
}
if (pending & QXL_INTERRUPT_ERROR) {
-@@ -82,10 +82,10 @@ int qxl_irq_init(struct qxl_device *qdev)
+@@ -85,10 +85,10 @@ int qxl_irq_init(struct qxl_device *qdev)
init_waitqueue_head(&qdev->io_cmd_event);
INIT_WORK(&qdev->client_monitors_config_work,
qxl_client_monitors_config_work_func);
@@ -43699,10 +43708,10 @@ index 228632c9..edfe331 100644
bool setup_remapped_irq(int irq, struct irq_cfg *cfg, struct irq_chip *chip)
diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c
-index ac2d41b..c657aa4 100644
+index 12698ee..a58a958 100644
--- a/drivers/irqchip/irq-gic.c
+++ b/drivers/irqchip/irq-gic.c
-@@ -84,7 +84,7 @@ static u8 gic_cpu_map[NR_GIC_CPU_IF] __read_mostly;
+@@ -85,7 +85,7 @@ static u8 gic_cpu_map[NR_GIC_CPU_IF] __read_mostly;
* Supported arch specific GIC irq extension.
* Default make them NULL.
*/
@@ -43711,7 +43720,7 @@ index ac2d41b..c657aa4 100644
.irq_eoi = NULL,
.irq_mask = NULL,
.irq_unmask = NULL,
-@@ -336,7 +336,7 @@ static void gic_handle_cascade_irq(unsigned int irq, struct irq_desc *desc)
+@@ -337,7 +337,7 @@ static void gic_handle_cascade_irq(unsigned int irq, struct irq_desc *desc)
chained_irq_exit(chip, desc);
}
@@ -44523,7 +44532,7 @@ index 6a7f2b8..fea0bde 100644
"start=%llu, len=%llu, dev_size=%llu",
dm_device_name(ti->table->md), bdevname(bdev, b),
diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c
-index b086a94..74cb67e 100644
+index e9d33ad..dae9880d 100644
--- a/drivers/md/dm-thin-metadata.c
+++ b/drivers/md/dm-thin-metadata.c
@@ -404,7 +404,7 @@ static void __setup_btree_details(struct dm_pool_metadata *pmd)
@@ -46698,20 +46707,6 @@ index 455d4c3..3353ee7 100644
}
if (!request_mem_region(mem->start, mem_size, pdev->name)) {
-diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
-index dbcff50..5ed5124 100644
---- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
-+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
-@@ -793,7 +793,8 @@ static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
-
- return;
- }
-- bnx2x_frag_free(fp, new_data);
-+ if (new_data)
-+ bnx2x_frag_free(fp, new_data);
- drop:
- /* drop the packet and keep the buffer in the bin */
- DP(NETIF_MSG_RX_STATUS,
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
index a89a40f..5a8a2ac 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
@@ -46844,7 +46839,7 @@ index c05b66d..ed69872 100644
break;
}
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
-index 36c8061..ca5e1e0 100644
+index 80bfa03..45114e6 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -534,7 +534,7 @@ static void accumulate_16bit_val(u32 *acc, u16 val)
@@ -52873,7 +52868,7 @@ index 2518c32..1c201bb 100644
wake_up(&usb_kill_urb_queue);
usb_put_urb(urb);
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
-index 3baa51b..92907cf 100644
+index 36b1e85..18fb0a4 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -27,6 +27,7 @@
@@ -52884,7 +52879,7 @@ index 3baa51b..92907cf 100644
#include <asm/uaccess.h>
#include <asm/byteorder.h>
-@@ -4483,6 +4484,10 @@ static void hub_port_connect_change(struct usb_hub *hub, int port1,
+@@ -4502,6 +4503,10 @@ static void hub_port_connect_change(struct usb_hub *hub, int port1,
goto done;
return;
}
@@ -56887,7 +56882,7 @@ index ce25d75..dc09eeb 100644
&data);
if (!inode) {
diff --git a/fs/aio.c b/fs/aio.c
-index e609e15..c9fcd97 100644
+index 6d68e01..573d8dc 100644
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -380,7 +380,7 @@ static int aio_setup_ring(struct kioctx *ctx)
@@ -59032,7 +59027,7 @@ index a81147e..20bf2b5 100644
/*
diff --git a/fs/compat_ioctl.c b/fs/compat_ioctl.c
-index 3881610..ab3df0b 100644
+index 3881610..d4599d0 100644
--- a/fs/compat_ioctl.c
+++ b/fs/compat_ioctl.c
@@ -621,7 +621,7 @@ static int serial_struct_ioctl(unsigned fd, unsigned cmd,
@@ -59050,7 +59045,7 @@ index 3881610..ab3df0b 100644
return -EFAULT;
- if (get_user(datap, &umsgs[i].buf) ||
- put_user(compat_ptr(datap), &tmsgs[i].buf))
-+ if (get_user(datap, (u8 __user * __user *)&umsgs[i].buf) ||
++ if (get_user(datap, (compat_caddr_t __user *)&umsgs[i].buf) ||
+ put_user(compat_ptr(datap), (u8 __user * __user *)&tmsgs[i].buf))
return -EFAULT;
}
@@ -62039,10 +62034,10 @@ index 0a648bb..8d463f1 100644
}
diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
-index 1d1292c..bba17ea 100644
+index 342f0239..d67794c 100644
--- a/fs/fuse/dir.c
+++ b/fs/fuse/dir.c
-@@ -1418,7 +1418,7 @@ static char *read_link(struct dentry *dentry)
+@@ -1419,7 +1419,7 @@ static char *read_link(struct dentry *dentry)
return link;
}
@@ -84712,7 +84707,7 @@ index 0dfcc92..7967849 100644
/* Structure to track chunk fragments that have been acked, but peer
diff --git a/include/net/sock.h b/include/net/sock.h
-index 57c31dd..f5e5196 100644
+index 2f7bc43..530dadc 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -348,7 +348,7 @@ struct sock {
@@ -84751,17 +84746,6 @@ index 57c31dd..f5e5196 100644
static inline struct sock_iocb *kiocb_to_siocb(struct kiocb *iocb)
{
-@@ -1755,8 +1755,8 @@ sk_dst_get(struct sock *sk)
-
- rcu_read_lock();
- dst = rcu_dereference(sk->sk_dst_cache);
-- if (dst)
-- dst_hold(dst);
-+ if (dst && !atomic_inc_not_zero(&dst->__refcnt))
-+ dst = NULL;
- rcu_read_unlock();
- return dst;
- }
@@ -1830,7 +1830,7 @@ static inline void sk_nocaps_add(struct sock *sk, netdev_features_t flags)
}
@@ -86803,7 +86787,7 @@ index 0b097c8..11dd5c5 100644
#ifdef CONFIG_MODULE_UNLOAD
{
diff --git a/kernel/events/core.c b/kernel/events/core.c
-index 0e7fea7..f869fde 100644
+index f774e93..c602612 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -158,8 +158,15 @@ static struct srcu_struct pmus_srcu;
@@ -89525,7 +89509,7 @@ index 2fac9cc..56fef29 100644
select LZO_COMPRESS
select LZO_DECOMPRESS
diff --git a/kernel/power/process.c b/kernel/power/process.c
-index 06ec886..9dba35e 100644
+index 14f9a8d..98ee610 100644
--- a/kernel/power/process.c
+++ b/kernel/power/process.c
@@ -34,6 +34,7 @@ static int try_to_freeze_tasks(bool user_only)
@@ -91452,10 +91436,10 @@ index 7c7964c..2a0d412 100644
update_vsyscall_tz();
if (firsttime) {
diff --git a/kernel/time/alarmtimer.c b/kernel/time/alarmtimer.c
-index 88c9c65..7497ebc 100644
+index fe75444..190c528 100644
--- a/kernel/time/alarmtimer.c
+++ b/kernel/time/alarmtimer.c
-@@ -795,7 +795,7 @@ static int __init alarmtimer_init(void)
+@@ -811,7 +811,7 @@ static int __init alarmtimer_init(void)
struct platform_device *pdev;
int error = 0;
int i;
@@ -91656,7 +91640,7 @@ index 4f3a3c03..04b7886 100644
ret = -EIO;
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
-index 868633e..921dc41 100644
+index e3be87e..7480b36 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -1965,12 +1965,17 @@ ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec)
@@ -91719,7 +91703,7 @@ index 868633e..921dc41 100644
ftrace_graph_active++;
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
-index 04202d9..e3e4242 100644
+index 0954450..0ed035c 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -352,9 +352,9 @@ struct buffer_data_page {
@@ -91745,7 +91729,7 @@ index 04202d9..e3e4242 100644
local_t dropped_events;
local_t committing;
local_t commits;
-@@ -995,8 +995,8 @@ static int rb_tail_page_update(struct ring_buffer_per_cpu *cpu_buffer,
+@@ -991,8 +991,8 @@ static int rb_tail_page_update(struct ring_buffer_per_cpu *cpu_buffer,
*
* We add a counter to the write field to denote this.
*/
@@ -91756,7 +91740,7 @@ index 04202d9..e3e4242 100644
/*
* Just make sure we have seen our old_write and synchronize
-@@ -1024,8 +1024,8 @@ static int rb_tail_page_update(struct ring_buffer_per_cpu *cpu_buffer,
+@@ -1020,8 +1020,8 @@ static int rb_tail_page_update(struct ring_buffer_per_cpu *cpu_buffer,
* cmpxchg to only update if an interrupt did not already
* do it for us. If the cmpxchg fails, we don't care.
*/
@@ -91767,7 +91751,7 @@ index 04202d9..e3e4242 100644
/*
* No need to worry about races with clearing out the commit.
-@@ -1389,12 +1389,12 @@ static void rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer);
+@@ -1385,12 +1385,12 @@ static void rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer);
static inline unsigned long rb_page_entries(struct buffer_page *bpage)
{
@@ -91782,7 +91766,7 @@ index 04202d9..e3e4242 100644
}
static int
-@@ -1489,7 +1489,7 @@ rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned int nr_pages)
+@@ -1485,7 +1485,7 @@ rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned int nr_pages)
* bytes consumed in ring buffer from here.
* Increment overrun to account for the lost events.
*/
@@ -91791,7 +91775,7 @@ index 04202d9..e3e4242 100644
local_sub(BUF_PAGE_SIZE, &cpu_buffer->entries_bytes);
}
-@@ -2067,7 +2067,7 @@ rb_handle_head_page(struct ring_buffer_per_cpu *cpu_buffer,
+@@ -2063,7 +2063,7 @@ rb_handle_head_page(struct ring_buffer_per_cpu *cpu_buffer,
* it is our responsibility to update
* the counters.
*/
@@ -91800,7 +91784,7 @@ index 04202d9..e3e4242 100644
local_sub(BUF_PAGE_SIZE, &cpu_buffer->entries_bytes);
/*
-@@ -2217,7 +2217,7 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
+@@ -2213,7 +2213,7 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
if (tail == BUF_PAGE_SIZE)
tail_page->real_end = 0;
@@ -91809,7 +91793,7 @@ index 04202d9..e3e4242 100644
return;
}
-@@ -2252,7 +2252,7 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
+@@ -2248,7 +2248,7 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
rb_event_set_padding(event);
/* Set the write back to the previous setting */
@@ -91818,7 +91802,7 @@ index 04202d9..e3e4242 100644
return;
}
-@@ -2264,7 +2264,7 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
+@@ -2260,7 +2260,7 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
/* Set write to end of buffer */
length = (tail + length) - BUF_PAGE_SIZE;
@@ -91827,7 +91811,7 @@ index 04202d9..e3e4242 100644
}
/*
-@@ -2290,7 +2290,7 @@ rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer,
+@@ -2286,7 +2286,7 @@ rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer,
* about it.
*/
if (unlikely(next_page == commit_page)) {
@@ -91836,7 +91820,7 @@ index 04202d9..e3e4242 100644
goto out_reset;
}
-@@ -2346,7 +2346,7 @@ rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer,
+@@ -2342,7 +2342,7 @@ rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer,
cpu_buffer->tail_page) &&
(cpu_buffer->commit_page ==
cpu_buffer->reader_page))) {
@@ -91845,7 +91829,7 @@ index 04202d9..e3e4242 100644
goto out_reset;
}
}
-@@ -2394,7 +2394,7 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
+@@ -2390,7 +2390,7 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
length += RB_LEN_TIME_EXTEND;
tail_page = cpu_buffer->tail_page;
@@ -91854,7 +91838,7 @@ index 04202d9..e3e4242 100644
/* set write to only the index of the write */
write &= RB_WRITE_MASK;
-@@ -2418,7 +2418,7 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
+@@ -2414,7 +2414,7 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
kmemcheck_annotate_bitfield(event, bitfield);
rb_update_event(cpu_buffer, event, length, add_timestamp, delta);
@@ -91863,7 +91847,7 @@ index 04202d9..e3e4242 100644
/*
* If this is the first commit on the page, then update
-@@ -2451,7 +2451,7 @@ rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer,
+@@ -2447,7 +2447,7 @@ rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer,
if (bpage->page == (void *)addr && rb_page_write(bpage) == old_index) {
unsigned long write_mask =
@@ -91872,7 +91856,7 @@ index 04202d9..e3e4242 100644
unsigned long event_length = rb_event_length(event);
/*
* This is on the tail page. It is possible that
-@@ -2461,7 +2461,7 @@ rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer,
+@@ -2457,7 +2457,7 @@ rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer,
*/
old_index += write_mask;
new_index += write_mask;
@@ -91881,7 +91865,7 @@ index 04202d9..e3e4242 100644
if (index == old_index) {
/* update counters */
local_sub(event_length, &cpu_buffer->entries_bytes);
-@@ -2853,7 +2853,7 @@ rb_decrement_entry(struct ring_buffer_per_cpu *cpu_buffer,
+@@ -2849,7 +2849,7 @@ rb_decrement_entry(struct ring_buffer_per_cpu *cpu_buffer,
/* Do the likely case first */
if (likely(bpage->page == (void *)addr)) {
@@ -91890,7 +91874,7 @@ index 04202d9..e3e4242 100644
return;
}
-@@ -2865,7 +2865,7 @@ rb_decrement_entry(struct ring_buffer_per_cpu *cpu_buffer,
+@@ -2861,7 +2861,7 @@ rb_decrement_entry(struct ring_buffer_per_cpu *cpu_buffer,
start = bpage;
do {
if (bpage->page == (void *)addr) {
@@ -91899,7 +91883,7 @@ index 04202d9..e3e4242 100644
return;
}
rb_inc_page(cpu_buffer, &bpage);
-@@ -3149,7 +3149,7 @@ static inline unsigned long
+@@ -3145,7 +3145,7 @@ static inline unsigned long
rb_num_of_entries(struct ring_buffer_per_cpu *cpu_buffer)
{
return local_read(&cpu_buffer->entries) -
@@ -91908,7 +91892,7 @@ index 04202d9..e3e4242 100644
}
/**
-@@ -3238,7 +3238,7 @@ unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu)
+@@ -3234,7 +3234,7 @@ unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu)
return 0;
cpu_buffer = buffer->buffers[cpu];
@@ -91917,7 +91901,7 @@ index 04202d9..e3e4242 100644
return ret;
}
-@@ -3261,7 +3261,7 @@ ring_buffer_commit_overrun_cpu(struct ring_buffer *buffer, int cpu)
+@@ -3257,7 +3257,7 @@ ring_buffer_commit_overrun_cpu(struct ring_buffer *buffer, int cpu)
return 0;
cpu_buffer = buffer->buffers[cpu];
@@ -91926,7 +91910,7 @@ index 04202d9..e3e4242 100644
return ret;
}
-@@ -3346,7 +3346,7 @@ unsigned long ring_buffer_overruns(struct ring_buffer *buffer)
+@@ -3342,7 +3342,7 @@ unsigned long ring_buffer_overruns(struct ring_buffer *buffer)
/* if you care about this being correct, lock the buffer */
for_each_buffer_cpu(buffer, cpu) {
cpu_buffer = buffer->buffers[cpu];
@@ -91935,7 +91919,7 @@ index 04202d9..e3e4242 100644
}
return overruns;
-@@ -3522,8 +3522,8 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
+@@ -3518,8 +3518,8 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
/*
* Reset the reader page to size zero.
*/
@@ -91946,7 +91930,7 @@ index 04202d9..e3e4242 100644
local_set(&cpu_buffer->reader_page->page->commit, 0);
cpu_buffer->reader_page->real_end = 0;
-@@ -3557,7 +3557,7 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
+@@ -3553,7 +3553,7 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
* want to compare with the last_overrun.
*/
smp_mb();
@@ -91955,7 +91939,7 @@ index 04202d9..e3e4242 100644
/*
* Here's the tricky part.
-@@ -4127,8 +4127,8 @@ rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
+@@ -4123,8 +4123,8 @@ rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
cpu_buffer->head_page
= list_entry(cpu_buffer->pages, struct buffer_page, list);
@@ -91966,7 +91950,7 @@ index 04202d9..e3e4242 100644
local_set(&cpu_buffer->head_page->page->commit, 0);
cpu_buffer->head_page->read = 0;
-@@ -4138,14 +4138,14 @@ rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
+@@ -4134,14 +4134,14 @@ rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
INIT_LIST_HEAD(&cpu_buffer->new_pages);
@@ -91985,7 +91969,7 @@ index 04202d9..e3e4242 100644
local_set(&cpu_buffer->dropped_events, 0);
local_set(&cpu_buffer->entries, 0);
local_set(&cpu_buffer->committing, 0);
-@@ -4550,8 +4550,8 @@ int ring_buffer_read_page(struct ring_buffer *buffer,
+@@ -4546,8 +4546,8 @@ int ring_buffer_read_page(struct ring_buffer *buffer,
rb_init_page(bpage);
bpage = reader->page;
reader->page = *data_page;
@@ -91997,10 +91981,10 @@ index 04202d9..e3e4242 100644
*data_page = bpage;
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
-index 922657f..3d229d9 100644
+index 7e259b2..e9d9452 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
-@@ -3398,7 +3398,7 @@ int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set)
+@@ -3412,7 +3412,7 @@ int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set)
return 0;
}
@@ -92043,10 +92027,10 @@ index 26dc348..8708ca7 100644
+ return atomic64_inc_return_unchecked(&trace_counter);
}
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
-index 7b16d40..1b2875d 100644
+index e4c4efc..ef4e975 100644
--- a/kernel/trace/trace_events.c
+++ b/kernel/trace/trace_events.c
-@@ -1681,7 +1681,6 @@ __trace_early_add_new_event(struct ftrace_event_call *call,
+@@ -1682,7 +1682,6 @@ __trace_early_add_new_event(struct ftrace_event_call *call,
return 0;
}
@@ -96375,7 +96359,7 @@ index cdbd312..2e1e0b9 100644
/*
diff --git a/mm/shmem.c b/mm/shmem.c
-index 1f18c9d..6aa94ab 100644
+index ff85863..6aa94ab 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -33,7 +33,7 @@
@@ -96387,7 +96371,7 @@ index 1f18c9d..6aa94ab 100644
#ifdef CONFIG_SHMEM
/*
-@@ -77,14 +77,15 @@ static struct vfsmount *shm_mnt;
+@@ -77,7 +77,7 @@ static struct vfsmount *shm_mnt;
#define BOGO_DIRENT_SIZE 20
/* Symlink up to this size is kmalloc'ed instead of using a swappable page */
@@ -96395,180 +96379,8 @@ index 1f18c9d..6aa94ab 100644
+#define SHORT_SYMLINK_LEN 64
/*
-- * shmem_fallocate and shmem_writepage communicate via inode->i_private
-- * (with i_mutex making sure that it has only one user at a time):
-- * we would prefer not to enlarge the shmem inode just for that.
-+ * shmem_fallocate communicates with shmem_fault or shmem_writepage via
-+ * inode->i_private (with i_mutex making sure that it has only one user at
-+ * a time): we would prefer not to enlarge the shmem inode just for that.
- */
- struct shmem_falloc {
-+ wait_queue_head_t *waitq; /* faults into hole wait for punch to end */
- pgoff_t start; /* start of range currently being fallocated */
- pgoff_t next; /* the next page offset to be fallocated */
- pgoff_t nr_falloced; /* how many new pages have been fallocated */
-@@ -533,22 +534,19 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend,
- return;
-
- index = start;
-- for ( ; ; ) {
-+ while (index < end) {
- cond_resched();
- pvec.nr = shmem_find_get_pages_and_swap(mapping, index,
- min(end - index, (pgoff_t)PAGEVEC_SIZE),
- pvec.pages, indices);
- if (!pvec.nr) {
-- if (index == start || unfalloc)
-+ /* If all gone or hole-punch or unfalloc, we're done */
-+ if (index == start || end != -1)
- break;
-+ /* But if truncating, restart to make sure all gone */
- index = start;
- continue;
- }
-- if ((index == start || unfalloc) && indices[0] >= end) {
-- shmem_deswap_pagevec(&pvec);
-- pagevec_release(&pvec);
-- break;
-- }
- mem_cgroup_uncharge_start();
- for (i = 0; i < pagevec_count(&pvec); i++) {
- struct page *page = pvec.pages[i];
-@@ -560,8 +558,12 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend,
- if (radix_tree_exceptional_entry(page)) {
- if (unfalloc)
- continue;
-- nr_swaps_freed += !shmem_free_swap(mapping,
-- index, page);
-+ if (shmem_free_swap(mapping, index, page)) {
-+ /* Swap was replaced by page: retry */
-+ index--;
-+ break;
-+ }
-+ nr_swaps_freed++;
- continue;
- }
-
-@@ -570,6 +572,11 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend,
- if (page->mapping == mapping) {
- VM_BUG_ON_PAGE(PageWriteback(page), page);
- truncate_inode_page(mapping, page);
-+ } else {
-+ /* Page was replaced by swap: retry */
-+ unlock_page(page);
-+ index--;
-+ break;
- }
- }
- unlock_page(page);
-@@ -824,6 +831,7 @@ static int shmem_writepage(struct page *page, struct writeback_control *wbc)
- spin_lock(&inode->i_lock);
- shmem_falloc = inode->i_private;
- if (shmem_falloc &&
-+ !shmem_falloc->waitq &&
- index >= shmem_falloc->start &&
- index < shmem_falloc->next)
- shmem_falloc->nr_unswapped++;
-@@ -1298,6 +1306,64 @@ static int shmem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
- int error;
- int ret = VM_FAULT_LOCKED;
-
-+ /*
-+ * Trinity finds that probing a hole which tmpfs is punching can
-+ * prevent the hole-punch from ever completing: which in turn
-+ * locks writers out with its hold on i_mutex. So refrain from
-+ * faulting pages into the hole while it's being punched. Although
-+ * shmem_undo_range() does remove the additions, it may be unable to
-+ * keep up, as each new page needs its own unmap_mapping_range() call,
-+ * and the i_mmap tree grows ever slower to scan if new vmas are added.
-+ *
-+ * It does not matter if we sometimes reach this check just before the
-+ * hole-punch begins, so that one fault then races with the punch:
-+ * we just need to make racing faults a rare case.
-+ *
-+ * The implementation below would be much simpler if we just used a
-+ * standard mutex or completion: but we cannot take i_mutex in fault,
-+ * and bloating every shmem inode for this unlikely case would be sad.
-+ */
-+ if (unlikely(inode->i_private)) {
-+ struct shmem_falloc *shmem_falloc;
-+
-+ spin_lock(&inode->i_lock);
-+ shmem_falloc = inode->i_private;
-+ if (shmem_falloc &&
-+ shmem_falloc->waitq &&
-+ vmf->pgoff >= shmem_falloc->start &&
-+ vmf->pgoff < shmem_falloc->next) {
-+ wait_queue_head_t *shmem_falloc_waitq;
-+ DEFINE_WAIT(shmem_fault_wait);
-+
-+ ret = VM_FAULT_NOPAGE;
-+ if ((vmf->flags & FAULT_FLAG_ALLOW_RETRY) &&
-+ !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) {
-+ /* It's polite to up mmap_sem if we can */
-+ up_read(&vma->vm_mm->mmap_sem);
-+ ret = VM_FAULT_RETRY;
-+ }
-+
-+ shmem_falloc_waitq = shmem_falloc->waitq;
-+ prepare_to_wait(shmem_falloc_waitq, &shmem_fault_wait,
-+ TASK_UNINTERRUPTIBLE);
-+ spin_unlock(&inode->i_lock);
-+ schedule();
-+
-+ /*
-+ * shmem_falloc_waitq points into the shmem_fallocate()
-+ * stack of the hole-punching task: shmem_falloc_waitq
-+ * is usually invalid by the time we reach here, but
-+ * finish_wait() does not dereference it in that case;
-+ * though i_lock needed lest racing with wake_up_all().
-+ */
-+ spin_lock(&inode->i_lock);
-+ finish_wait(shmem_falloc_waitq, &shmem_fault_wait);
-+ spin_unlock(&inode->i_lock);
-+ return ret;
-+ }
-+ spin_unlock(&inode->i_lock);
-+ }
-+
- error = shmem_getpage(inode, vmf->pgoff, &vmf->page, SGP_CACHE, &ret);
- if (error)
- return ((error == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS);
-@@ -1817,12 +1883,25 @@ static long shmem_fallocate(struct file *file, int mode, loff_t offset,
- struct address_space *mapping = file->f_mapping;
- loff_t unmap_start = round_up(offset, PAGE_SIZE);
- loff_t unmap_end = round_down(offset + len, PAGE_SIZE) - 1;
-+ DECLARE_WAIT_QUEUE_HEAD_ONSTACK(shmem_falloc_waitq);
-+
-+ shmem_falloc.waitq = &shmem_falloc_waitq;
-+ shmem_falloc.start = unmap_start >> PAGE_SHIFT;
-+ shmem_falloc.next = (unmap_end + 1) >> PAGE_SHIFT;
-+ spin_lock(&inode->i_lock);
-+ inode->i_private = &shmem_falloc;
-+ spin_unlock(&inode->i_lock);
-
- if ((u64)unmap_end > (u64)unmap_start)
- unmap_mapping_range(mapping, unmap_start,
- 1 + unmap_end - unmap_start, 0);
- shmem_truncate_range(inode, offset, offset + len - 1);
- /* No need to unmap again: hole-punching leaves COWed pages */
-+
-+ spin_lock(&inode->i_lock);
-+ inode->i_private = NULL;
-+ wake_up_all(&shmem_falloc_waitq);
-+ spin_unlock(&inode->i_lock);
- error = 0;
- goto out;
- }
-@@ -1840,6 +1919,7 @@ static long shmem_fallocate(struct file *file, int mode, loff_t offset,
- goto out;
- }
-
-+ shmem_falloc.waitq = NULL;
- shmem_falloc.start = start;
- shmem_falloc.next = start;
- shmem_falloc.nr_falloced = 0;
-@@ -2218,6 +2298,11 @@ static const struct xattr_handler *shmem_xattr_handlers[] = {
+ * shmem_fallocate communicates with shmem_fault or shmem_writepage via
+@@ -2298,6 +2298,11 @@ static const struct xattr_handler *shmem_xattr_handlers[] = {
static int shmem_xattr_validate(const char *name)
{
struct { const char *prefix; size_t len; } arr[] = {
@@ -96580,7 +96392,7 @@ index 1f18c9d..6aa94ab 100644
{ XATTR_SECURITY_PREFIX, XATTR_SECURITY_PREFIX_LEN },
{ XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN }
};
-@@ -2273,6 +2358,15 @@ static int shmem_setxattr(struct dentry *dentry, const char *name,
+@@ -2353,6 +2358,15 @@ static int shmem_setxattr(struct dentry *dentry, const char *name,
if (err)
return err;
@@ -96596,7 +96408,7 @@ index 1f18c9d..6aa94ab 100644
return simple_xattr_set(&info->xattrs, name, value, size, flags);
}
-@@ -2585,8 +2679,7 @@ int shmem_fill_super(struct super_block *sb, void *data, int silent)
+@@ -2665,8 +2679,7 @@ int shmem_fill_super(struct super_block *sb, void *data, int silent)
int err = -ENOMEM;
/* Round up to L1_CACHE_BYTES to resist false sharing */
@@ -98954,10 +98766,10 @@ index a16ed7b..eb44d17 100644
return err;
diff --git a/net/core/dev.c b/net/core/dev.c
-index 4c1b483..3d45b13 100644
+index 37bddf7..c78c480 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
-@@ -1688,14 +1688,14 @@ int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
+@@ -1695,14 +1695,14 @@ int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
{
if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) {
if (skb_copy_ubufs(skb, GFP_ATOMIC)) {
@@ -98974,7 +98786,7 @@ index 4c1b483..3d45b13 100644
kfree_skb(skb);
return NET_RX_DROP;
}
-@@ -2453,7 +2453,7 @@ static int illegal_highdma(const struct net_device *dev, struct sk_buff *skb)
+@@ -2460,7 +2460,7 @@ static int illegal_highdma(const struct net_device *dev, struct sk_buff *skb)
struct dev_gso_cb {
void (*destructor)(struct sk_buff *skb);
@@ -98983,7 +98795,7 @@ index 4c1b483..3d45b13 100644
#define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb)
-@@ -3227,7 +3227,7 @@ enqueue:
+@@ -3234,7 +3234,7 @@ enqueue:
local_irq_restore(flags);
@@ -98992,7 +98804,7 @@ index 4c1b483..3d45b13 100644
kfree_skb(skb);
return NET_RX_DROP;
}
-@@ -3308,7 +3308,7 @@ int netif_rx_ni(struct sk_buff *skb)
+@@ -3315,7 +3315,7 @@ int netif_rx_ni(struct sk_buff *skb)
}
EXPORT_SYMBOL(netif_rx_ni);
@@ -99001,7 +98813,7 @@ index 4c1b483..3d45b13 100644
{
struct softnet_data *sd = &__get_cpu_var(softnet_data);
-@@ -3645,7 +3645,7 @@ ncls:
+@@ -3652,7 +3652,7 @@ ncls:
ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
} else {
drop:
@@ -99010,7 +98822,7 @@ index 4c1b483..3d45b13 100644
kfree_skb(skb);
/* Jamal, now you will not able to escape explaining
* me how you were going to use this. :-)
-@@ -4333,7 +4333,7 @@ void netif_napi_del(struct napi_struct *napi)
+@@ -4342,7 +4342,7 @@ void netif_napi_del(struct napi_struct *napi)
}
EXPORT_SYMBOL(netif_napi_del);
@@ -99019,7 +98831,7 @@ index 4c1b483..3d45b13 100644
{
struct softnet_data *sd = &__get_cpu_var(softnet_data);
unsigned long time_limit = jiffies + 2;
-@@ -6302,7 +6302,7 @@ struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
+@@ -6311,7 +6311,7 @@ struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
} else {
netdev_stats_to_stats64(storage, &dev->stats);
}
@@ -99046,40 +98858,6 @@ index cf999e0..c59a975 100644
}
}
EXPORT_SYMBOL(dev_load);
-diff --git a/net/core/dst.c b/net/core/dst.c
-index ca4231e..15b6792 100644
---- a/net/core/dst.c
-+++ b/net/core/dst.c
-@@ -267,6 +267,15 @@ again:
- }
- EXPORT_SYMBOL(dst_destroy);
-
-+static void dst_destroy_rcu(struct rcu_head *head)
-+{
-+ struct dst_entry *dst = container_of(head, struct dst_entry, rcu_head);
-+
-+ dst = dst_destroy(dst);
-+ if (dst)
-+ __dst_free(dst);
-+}
-+
- void dst_release(struct dst_entry *dst)
- {
- if (dst) {
-@@ -274,11 +283,8 @@ void dst_release(struct dst_entry *dst)
-
- newrefcnt = atomic_dec_return(&dst->__refcnt);
- WARN_ON(newrefcnt < 0);
-- if (unlikely(dst->flags & DST_NOCACHE) && !newrefcnt) {
-- dst = dst_destroy(dst);
-- if (dst)
-- __dst_free(dst);
-- }
-+ if (unlikely(dst->flags & DST_NOCACHE) && !newrefcnt)
-+ call_rcu(&dst->rcu_head, dst_destroy_rcu);
- }
- }
- EXPORT_SYMBOL(dst_release);
diff --git a/net/core/filter.c b/net/core/filter.c
index ebce437..9fed9d0 100644
--- a/net/core/filter.c
@@ -99456,7 +99234,7 @@ index b442e7e..6f5b5a2 100644
{
struct socket *sock;
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
-index e5ae776e..15c90cb 100644
+index 7f2e1fc..6206b10 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -2003,7 +2003,7 @@ EXPORT_SYMBOL(__skb_checksum);
@@ -99468,7 +99246,7 @@ index e5ae776e..15c90cb 100644
.update = csum_partial_ext,
.combine = csum_block_add_ext,
};
-@@ -3220,13 +3220,15 @@ void __init skb_init(void)
+@@ -3221,13 +3221,15 @@ void __init skb_init(void)
skbuff_head_cache = kmem_cache_create("skbuff_head_cache",
sizeof(struct sk_buff),
0,
@@ -99779,26 +99557,11 @@ index 5325b54..a0d4d69 100644
return -EFAULT;
*lenp = len;
-diff --git a/net/dns_resolver/dns_query.c b/net/dns_resolver/dns_query.c
-index e7b6d53..f005cc7 100644
---- a/net/dns_resolver/dns_query.c
-+++ b/net/dns_resolver/dns_query.c
-@@ -149,7 +149,9 @@ int dns_query(const char *type, const char *name, size_t namelen,
- if (!*_result)
- goto put;
-
-- memcpy(*_result, upayload->data, len + 1);
-+ memcpy(*_result, upayload->data, len);
-+ (*_result)[len] = '\0';
-+
- if (_expiry)
- *_expiry = rkey->expiry;
-
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
-index 19ab78a..bf575c9 100644
+index 07bd8ed..c574801 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
-@@ -1703,13 +1703,9 @@ static int __init inet_init(void)
+@@ -1706,13 +1706,9 @@ static int __init inet_init(void)
BUILD_BUG_ON(sizeof(struct inet_skb_parm) > FIELD_SIZEOF(struct sk_buff, cb));
@@ -99813,7 +99576,7 @@ index 19ab78a..bf575c9 100644
rc = proto_register(&udp_prot, 1);
if (rc)
-@@ -1816,8 +1812,6 @@ out_unregister_udp_proto:
+@@ -1819,8 +1815,6 @@ out_unregister_udp_proto:
proto_unregister(&udp_prot);
out_unregister_tcp_proto:
proto_unregister(&tcp_prot);
@@ -100097,42 +99860,6 @@ index 580dd96..9fcef7e 100644
msg.msg_controllen = len;
msg.msg_flags = flags;
-diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c
-index 0c3a5d1..c05c07d 100644
---- a/net/ipv4/ip_tunnel.c
-+++ b/net/ipv4/ip_tunnel.c
-@@ -73,12 +73,7 @@ static void __tunnel_dst_set(struct ip_tunnel_dst *idst,
- {
- struct dst_entry *old_dst;
-
-- if (dst) {
-- if (dst->flags & DST_NOCACHE)
-- dst = NULL;
-- else
-- dst_clone(dst);
-- }
-+ dst_clone(dst);
- old_dst = xchg((__force struct dst_entry **)&idst->dst, dst);
- dst_release(old_dst);
- }
-@@ -108,13 +103,14 @@ static struct rtable *tunnel_rtable_get(struct ip_tunnel *t, u32 cookie)
-
- rcu_read_lock();
- dst = rcu_dereference(this_cpu_ptr(t->dst_cache)->dst);
-+ if (dst && !atomic_inc_not_zero(&dst->__refcnt))
-+ dst = NULL;
- if (dst) {
- if (dst->obsolete && dst->ops->check(dst, cookie) == NULL) {
-- rcu_read_unlock();
- tunnel_dst_reset(t);
-- return NULL;
-+ dst_release(dst);
-+ dst = NULL;
- }
-- dst_hold(dst);
- }
- rcu_read_unlock();
- return (struct rtable *)dst;
diff --git a/net/ipv4/ip_vti.c b/net/ipv4/ip_vti.c
index e4a8f76..dd8ad72 100644
--- a/net/ipv4/ip_vti.c
@@ -100446,7 +100173,7 @@ index c04518f..c402063 100644
static int raw_seq_show(struct seq_file *seq, void *v)
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
-index 1344373..02f339e 100644
+index 031553f..e482974 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -233,7 +233,7 @@ static const struct seq_operations rt_cache_seq_ops = {
@@ -100476,7 +100203,7 @@ index 1344373..02f339e 100644
}
static const struct file_operations rt_acct_proc_fops = {
-@@ -2623,34 +2623,34 @@ static struct ctl_table ipv4_route_flush_table[] = {
+@@ -2624,34 +2624,34 @@ static struct ctl_table ipv4_route_flush_table[] = {
.maxlen = sizeof(int),
.mode = 0200,
.proc_handler = ipv4_sysctl_rtcache_flush,
@@ -100519,7 +100246,7 @@ index 1344373..02f339e 100644
err_dup:
return -ENOMEM;
}
-@@ -2673,8 +2673,8 @@ static __net_initdata struct pernet_operations sysctl_route_ops = {
+@@ -2674,8 +2674,8 @@ static __net_initdata struct pernet_operations sysctl_route_ops = {
static __net_init int rt_genid_init(struct net *net)
{
@@ -100652,7 +100379,7 @@ index 44eba05..b36864b 100644
hdr = register_net_sysctl(&init_net, "net/ipv4", ipv4_table);
if (hdr == NULL)
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
-index e364746..598e76e 100644
+index 3898694..9bd1a03 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -761,7 +761,7 @@ static void tcp_update_pacing_rate(struct sock *sk)
@@ -102730,7 +102457,7 @@ index 11de55e..f25e448 100644
return 0;
}
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
-index 7f40fd2..c72ef1f 100644
+index 0dfe894..7702a84 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -257,7 +257,7 @@ static void netlink_overrun(struct sock *sk)
@@ -103468,37 +103195,37 @@ index 604a6ac..f87f0a3 100644
return -EFAULT;
to += addrlen;
diff --git a/net/sctp/sysctl.c b/net/sctp/sysctl.c
-index c82fdc1..4ca1f95 100644
+index dfa532f..1dcfb44 100644
--- a/net/sctp/sysctl.c
+++ b/net/sctp/sysctl.c
-@@ -308,7 +308,7 @@ static int proc_sctp_do_hmac_alg(struct ctl_table *ctl, int write,
+@@ -307,7 +307,7 @@ static int proc_sctp_do_hmac_alg(struct ctl_table *ctl, int write,
+ loff_t *ppos)
{
struct net *net = current->nsproxy->net_ns;
- char tmp[8];
- struct ctl_table tbl;
+ ctl_table_no_const tbl;
- int ret;
- int changed = 0;
+ bool changed = false;
char *none = "none";
+ char tmp[8];
@@ -355,7 +355,7 @@ static int proc_sctp_do_rto_min(struct ctl_table *ctl, int write,
- {
struct net *net = current->nsproxy->net_ns;
- int new_value;
-- struct ctl_table tbl;
-+ ctl_table_no_const tbl;
unsigned int min = *(unsigned int *) ctl->extra1;
unsigned int max = *(unsigned int *) ctl->extra2;
- int ret;
-@@ -382,7 +382,7 @@ static int proc_sctp_do_rto_max(struct ctl_table *ctl, int write,
- {
- struct net *net = current->nsproxy->net_ns;
- int new_value;
- struct ctl_table tbl;
+ ctl_table_no_const tbl;
+ int ret, new_value;
+
+ memset(&tbl, 0, sizeof(struct ctl_table));
+@@ -384,7 +384,7 @@ static int proc_sctp_do_rto_max(struct ctl_table *ctl, int write,
+ struct net *net = current->nsproxy->net_ns;
unsigned int min = *(unsigned int *) ctl->extra1;
unsigned int max = *(unsigned int *) ctl->extra2;
- int ret;
-@@ -408,7 +408,7 @@ static int proc_sctp_do_auth(struct ctl_table *ctl, int write,
+- struct ctl_table tbl;
++ ctl_table_no_const tbl;
+ int ret, new_value;
+
+ memset(&tbl, 0, sizeof(struct ctl_table));
+@@ -411,7 +411,7 @@ static int proc_sctp_do_auth(struct ctl_table *ctl, int write,
loff_t *ppos)
{
struct net *net = current->nsproxy->net_ns;
@@ -103507,7 +103234,7 @@ index c82fdc1..4ca1f95 100644
int new_value, ret;
memset(&tbl, 0, sizeof(struct ctl_table));
-@@ -436,7 +436,7 @@ static int proc_sctp_do_auth(struct ctl_table *ctl, int write,
+@@ -438,7 +438,7 @@ static int proc_sctp_do_auth(struct ctl_table *ctl, int write,
int sctp_sysctl_net_register(struct net *net)
{
@@ -103516,7 +103243,7 @@ index c82fdc1..4ca1f95 100644
if (!net_eq(net, &init_net)) {
int i;
-@@ -449,7 +449,10 @@ int sctp_sysctl_net_register(struct net *net)
+@@ -451,7 +451,10 @@ int sctp_sysctl_net_register(struct net *net)
table[i].data += (char *)(&net->sctp) - (char *)&init_net.sctp;
}
@@ -103528,26 +103255,6 @@ index c82fdc1..4ca1f95 100644
return 0;
}
-diff --git a/net/sctp/ulpevent.c b/net/sctp/ulpevent.c
-index 85c6465..879f3cd 100644
---- a/net/sctp/ulpevent.c
-+++ b/net/sctp/ulpevent.c
-@@ -411,6 +411,7 @@ struct sctp_ulpevent *sctp_ulpevent_make_remote_error(
- * sre_type:
- * It should be SCTP_REMOTE_ERROR.
- */
-+ memset(sre, 0, sizeof(*sre));
- sre->sre_type = SCTP_REMOTE_ERROR;
-
- /*
-@@ -916,6 +917,7 @@ void sctp_ulpevent_read_sndrcvinfo(const struct sctp_ulpevent *event,
- * For recvmsg() the SCTP stack places the message's stream number in
- * this value.
- */
-+ memset(&sinfo, 0, sizeof(sinfo));
- sinfo.sinfo_stream = event->stream;
- /* sinfo_ssn: 16 bits (unsigned integer)
- *
diff --git a/net/socket.c b/net/socket.c
index a19ae19..89554dc 100644
--- a/net/socket.c
@@ -110983,10 +110690,10 @@ index 0000000..12b1e3b
+exit 0
diff --git a/tools/gcc/size_overflow_plugin/insert_size_overflow_asm.c b/tools/gcc/size_overflow_plugin/insert_size_overflow_asm.c
new file mode 100644
-index 0000000..3e8148c
+index 0000000..c43901f
--- /dev/null
+++ b/tools/gcc/size_overflow_plugin/insert_size_overflow_asm.c
-@@ -0,0 +1,790 @@
+@@ -0,0 +1,748 @@
+/*
+ * Copyright 2011-2014 by Emese Revfy <re.emese@gmail.com>
+ * Licensed under the GPL v2, or (at your option) v3
@@ -111466,45 +111173,6 @@ index 0000000..3e8148c
+ return true;
+}
+
-+static bool is_from_cast(const_tree node)
-+{
-+ gimple def_stmt = get_def_stmt(node);
-+
-+ if (!def_stmt)
-+ return false;
-+
-+ if (gimple_assign_cast_p(def_stmt))
-+ return true;
-+
-+ return false;
-+}
-+
-+// Skip duplication when there is a minus expr and the type of rhs1 or rhs2 is a pointer_type.
-+static bool skip_ptr_minus(gimple stmt)
-+{
-+ const_tree rhs1, rhs2, ptr1_rhs, ptr2_rhs;
-+
-+ if (gimple_assign_rhs_code(stmt) != MINUS_EXPR)
-+ return false;
-+
-+ rhs1 = gimple_assign_rhs1(stmt);
-+ if (!is_from_cast(rhs1))
-+ return false;
-+
-+ rhs2 = gimple_assign_rhs2(stmt);
-+ if (!is_from_cast(rhs2))
-+ return false;
-+
-+ ptr1_rhs = gimple_assign_rhs1(get_def_stmt(rhs1));
-+ ptr2_rhs = gimple_assign_rhs1(get_def_stmt(rhs2));
-+
-+ if (TREE_CODE(TREE_TYPE(ptr1_rhs)) != POINTER_TYPE && TREE_CODE(TREE_TYPE(ptr2_rhs)) != POINTER_TYPE)
-+ return false;
-+
-+ create_mark_asm(stmt, MARK_YES);
-+ return true;
-+}
-+
+static void walk_use_def_ptr(struct pointer_set_t *visited, const_tree lhs)
+{
+ gimple def_stmt;
@@ -111538,9 +111206,6 @@ index 0000000..3e8148c
+ walk_use_def_ptr(visited, gimple_assign_rhs1(def_stmt));
+ return;
+ case 3:
-+ if (skip_ptr_minus(def_stmt))
-+ return;
-+
+ walk_use_def_ptr(visited, gimple_assign_rhs1(def_stmt));
+ walk_use_def_ptr(visited, gimple_assign_rhs2(def_stmt));
+ return;
@@ -111779,10 +111444,10 @@ index 0000000..3e8148c
+}
diff --git a/tools/gcc/size_overflow_plugin/insert_size_overflow_check_core.c b/tools/gcc/size_overflow_plugin/insert_size_overflow_check_core.c
new file mode 100644
-index 0000000..88469e9
+index 0000000..73f0a12
--- /dev/null
+++ b/tools/gcc/size_overflow_plugin/insert_size_overflow_check_core.c
-@@ -0,0 +1,902 @@
+@@ -0,0 +1,943 @@
+/*
+ * Copyright 2011-2014 by Emese Revfy <re.emese@gmail.com>
+ * Licensed under the GPL v2, or (at your option) v3
@@ -112506,6 +112171,44 @@ index 0000000..88469e9
+ inform(loc, "Integer size_overflow check applied here.");
+}
+
++static bool is_from_cast(const_tree node)
++{
++ gimple def_stmt = get_def_stmt(node);
++
++ if (!def_stmt)
++ return false;
++
++ if (gimple_assign_cast_p(def_stmt))
++ return true;
++
++ return false;
++}
++
++// Skip duplication when there is a minus expr and the type of rhs1 or rhs2 is a pointer_type.
++static bool is_a_ptr_minus(gimple stmt)
++{
++ const_tree rhs1, rhs2, ptr1_rhs, ptr2_rhs;
++
++ if (gimple_assign_rhs_code(stmt) != MINUS_EXPR)
++ return false;
++
++ rhs1 = gimple_assign_rhs1(stmt);
++ if (!is_from_cast(rhs1))
++ return false;
++
++ rhs2 = gimple_assign_rhs2(stmt);
++ if (!is_from_cast(rhs2))
++ return false;
++
++ ptr1_rhs = gimple_assign_rhs1(get_def_stmt(rhs1));
++ ptr2_rhs = gimple_assign_rhs1(get_def_stmt(rhs2));
++
++ if (TREE_CODE(TREE_TYPE(ptr1_rhs)) != POINTER_TYPE && TREE_CODE(TREE_TYPE(ptr2_rhs)) != POINTER_TYPE)
++ return false;
++
++ return true;
++}
++
+static tree handle_binary_ops(struct visited *visited, struct cgraph_node *caller_node, tree lhs)
+{
+ enum intentional_overflow_type res;
@@ -112514,6 +112217,9 @@ index 0000000..88469e9
+ tree new_rhs1 = NULL_TREE;
+ tree new_rhs2 = NULL_TREE;
+
++ if (is_a_ptr_minus(def_stmt))
++ return create_assign(visited, def_stmt, lhs, AFTER_STMT);
++
+ rhs1 = gimple_assign_rhs1(def_stmt);
+ rhs2 = gimple_assign_rhs2(def_stmt);
+
@@ -112687,7 +112393,7 @@ index 0000000..88469e9
+
diff --git a/tools/gcc/size_overflow_plugin/insert_size_overflow_check_ipa.c b/tools/gcc/size_overflow_plugin/insert_size_overflow_check_ipa.c
new file mode 100644
-index 0000000..715a590
+index 0000000..df50164
--- /dev/null
+++ b/tools/gcc/size_overflow_plugin/insert_size_overflow_check_ipa.c
@@ -0,0 +1,1141 @@
@@ -112926,7 +112632,7 @@ index 0000000..715a590
+
+ switch (gimple_code(def_stmt)) {
+ case GIMPLE_CALL:
-+ if (lhs == gimple_return_retval(def_stmt))
++ if (lhs == gimple_call_lhs(def_stmt))
+ interesting_conditions[RET] = true;
+ return;
+ case GIMPLE_NOP:
@@ -113834,10 +113540,10 @@ index 0000000..715a590
+
diff --git a/tools/gcc/size_overflow_plugin/intentional_overflow.c b/tools/gcc/size_overflow_plugin/intentional_overflow.c
new file mode 100644
-index 0000000..38904bc
+index 0000000..d71d72a
--- /dev/null
+++ b/tools/gcc/size_overflow_plugin/intentional_overflow.c
-@@ -0,0 +1,733 @@
+@@ -0,0 +1,736 @@
+/*
+ * Copyright 2011-2014 by Emese Revfy <re.emese@gmail.com>
+ * Licensed under the GPL v2, or (at your option) v3
@@ -114442,6 +114148,9 @@ index 0000000..38904bc
+ } else
+ return false;
+
++ if (!is_gimple_assign(def_stmt))
++ return false;
++
+ if (gimple_assign_rhs_code(def_stmt) != PLUS_EXPR && gimple_assign_rhs_code(def_stmt) != MINUS_EXPR)
+ return false;
+
@@ -121273,7 +120982,7 @@ index 0000000..560cd7b
+zpios_read_64734 zpios_read 3 64734 NULL
diff --git a/tools/gcc/size_overflow_plugin/size_overflow_plugin.c b/tools/gcc/size_overflow_plugin/size_overflow_plugin.c
new file mode 100644
-index 0000000..a15328d
+index 0000000..95f7abd
--- /dev/null
+++ b/tools/gcc/size_overflow_plugin/size_overflow_plugin.c
@@ -0,0 +1,259 @@
@@ -121309,7 +121018,7 @@ index 0000000..a15328d
+tree size_overflow_type_TI;
+
+static struct plugin_info size_overflow_plugin_info = {
-+ .version = "20140713",
++ .version = "20140725",
+ .help = "no-size-overflow\tturn off size overflow checking\n",
+};
+