diff options
author | Natanael Copa <ncopa@alpinelinux.org> | 2014-09-15 06:28:58 +0000 |
---|---|---|
committer | Natanael Copa <ncopa@alpinelinux.org> | 2014-09-15 06:29:32 +0000 |
commit | 678bd9ae1a333b5d9e0d38344a3a13aefb689d3e (patch) | |
tree | 828eaa57b3a20f8235d7e003041798ace94ee9f8 /main | |
parent | 49634869f2741fc89b0a35617536a48e84b42b5f (diff) | |
download | aports-678bd9ae1a333b5d9e0d38344a3a13aefb689d3e.tar.bz2 aports-678bd9ae1a333b5d9e0d38344a3a13aefb689d3e.tar.xz |
main/linux-grsec: upgrade to grsecurity-3.0-3.14.18-201409141906
Diffstat (limited to 'main')
-rw-r--r-- | main/linux-grsec/APKBUILD | 10 | ||||
-rw-r--r-- | main/linux-grsec/grsecurity-3.0-3.14.18-201409141906.patch (renamed from main/linux-grsec/grsecurity-3.0-3.14.18-201409082127.patch) | 1160 |
2 files changed, 1102 insertions, 68 deletions
diff --git a/main/linux-grsec/APKBUILD b/main/linux-grsec/APKBUILD index 11c21822b..f92929874 100644 --- a/main/linux-grsec/APKBUILD +++ b/main/linux-grsec/APKBUILD @@ -7,7 +7,7 @@ case $pkgver in *.*.*) _kernver=${pkgver%.*};; *.*) _kernver=${pkgver};; esac -pkgrel=0 +pkgrel=1 pkgdesc="Linux kernel with grsecurity" url=http://grsecurity.net depends="mkinitfs linux-firmware" @@ -17,7 +17,7 @@ _config=${config:-kernelconfig.${CARCH}} install= source="http://ftp.kernel.org/pub/linux/kernel/v3.x/linux-$_kernver.tar.xz http://ftp.kernel.org/pub/linux/kernel/v3.x/patch-$pkgver.xz - grsecurity-3.0-3.14.18-201409082127.patch + grsecurity-3.0-3.14.18-201409141906.patch fix-memory-map-for-PIE-applications.patch imx6q-no-unclocked-sleep.patch @@ -166,7 +166,7 @@ dev() { md5sums="b621207b3f6ecbb67db18b13258f8ea8 linux-3.14.tar.xz f00741b35127573c3cf085fc43f6e3f0 patch-3.14.18.xz -43a6f021cff545fa3e3c0386e473ff23 grsecurity-3.0-3.14.18-201409082127.patch +eb7a53b063df0e0018014049a08f5b40 grsecurity-3.0-3.14.18-201409141906.patch c6a4ae7e8ca6159e1631545515805216 fix-memory-map-for-PIE-applications.patch 1a307fc1d63231bf01d22493a4f14378 imx6q-no-unclocked-sleep.patch 5395777f2ffcaeedb482afce441a0e2f kernelconfig.x86 @@ -174,7 +174,7 @@ c6a4ae7e8ca6159e1631545515805216 fix-memory-map-for-PIE-applications.patch 0d71b1663f7cbfffc6e403deca4bbe86 kernelconfig.armhf" sha256sums="61558aa490855f42b6340d1a1596be47454909629327c49a5e4e10268065dffa linux-3.14.tar.xz 3723d8d91e1bba0ed57a4951e8089ebfaa21ac186c3b729b4d2bad2da3eaed9f patch-3.14.18.xz -ac5c311624480651775d6c482a3314edd8f1e1e5730e98f2aa6f648e47e20422 grsecurity-3.0-3.14.18-201409082127.patch +a9f82ac307226ea1726e7c7e904627e69ee8016985b73fd4cb8dec4f5768b222 grsecurity-3.0-3.14.18-201409141906.patch 500f3577310be52e87b9fecdc2e9c4ca43210fd97d69089f9005d484563f74c7 fix-memory-map-for-PIE-applications.patch 21179fbb22a5b74af0a609350ae1a170e232908572b201d02e791d2ce0a685d3 imx6q-no-unclocked-sleep.patch c1f2bcf8711c2295895f682a8e32a0719f389557deb0f1fa1ce9e751dd04f8ae kernelconfig.x86 @@ -182,7 +182,7 @@ c1f2bcf8711c2295895f682a8e32a0719f389557deb0f1fa1ce9e751dd04f8ae kernelconfig.x 3cddaac02211dd0f5eb4531aecc3a1427f29dcec7b31d9fe0042192d591bcdc8 kernelconfig.armhf" sha512sums="5730d83a7a81134c1e77c0bf89e42dee4f8251ad56c1ac2be20c59e26fdfaa7bea55f277e7af156b637f22e1584914a46089af85039177cb43485089c74ac26e linux-3.14.tar.xz c7c5b281986819cb69592cc4c2b7c7d79f34aa86f21db1dd64b795dda79b5f9df95626dada5c8e0613c58d8d7979f37baf0a87cd458f340018ce61b42e4eb6c5 patch-3.14.18.xz -2a12ca6dd993fa874da02bdc6913a78a29b21b6621bd243157f5ae65240d4bf934e438f35620935508b7e251445551b1b0c962aec9e26b2d34bca1c8281fdbc2 grsecurity-3.0-3.14.18-201409082127.patch +ff711fc291a3a795a1421936f0e3168ef8a6b92bcad21f9b7a1468945ee33cf9dcb3e56a5424be99af4cc660bd00e1770812ee2beb0fad9b34dd610558bd9cd9 grsecurity-3.0-3.14.18-201409141906.patch 4665c56ae1bbac311f9205d64918e84ee8b01d47d6e2396ff6b8adfb10aada7f7254531ce62e31edbb65c2a54a830f09ad05d314dfcd75d6272f4068945ad7c7 fix-memory-map-for-PIE-applications.patch 87d1ad59732f265a5b0db54490dc1762c14ea4b868e7eb1aedc3ce57b48046de7bbc08cf5cfcf6f1380fa84063b0edb16ba3d5e3c5670be9bbb229275c88b221 imx6q-no-unclocked-sleep.patch bba0241bf9d51154959cb06d8ecf328b0475bf1c656baff1c0066285c71f6b0115534c8c4fc238b63b617b5c7a75d0a13371d80a7fe99194eaa4238cd4712357 kernelconfig.x86 diff --git a/main/linux-grsec/grsecurity-3.0-3.14.18-201409082127.patch b/main/linux-grsec/grsecurity-3.0-3.14.18-201409141906.patch index 2a009861c..54a332ad2 100644 --- a/main/linux-grsec/grsecurity-3.0-3.14.18-201409082127.patch +++ b/main/linux-grsec/grsecurity-3.0-3.14.18-201409141906.patch @@ -876,7 +876,7 @@ index 4733d32..b142a40 100644 kexec is a system call that implements the ability to shutdown your current kernel, and to start another kernel. It is like a reboot diff --git a/arch/arm/include/asm/atomic.h b/arch/arm/include/asm/atomic.h -index 62d2cb5..09d45e3 100644 +index 62d2cb5..0d7f7f5 100644 --- a/arch/arm/include/asm/atomic.h +++ b/arch/arm/include/asm/atomic.h @@ -18,17 +18,35 @@ @@ -1379,7 +1379,7 @@ index 62d2cb5..09d45e3 100644 " sbc %R0, %R0, %R4\n" " strexd %1, %0, %H0, [%3]\n" " teq %1, #0\n" -@@ -344,16 +691,29 @@ static inline long long atomic64_sub_return(long long i, atomic64_t *v) +@@ -344,10 +691,25 @@ static inline long long atomic64_sub_return(long long i, atomic64_t *v) __asm__ __volatile__("@ atomic64_sub_return\n" "1: ldrexd %0, %H0, [%3]\n" " subs %Q0, %Q0, %Q4\n" @@ -1406,13 +1406,7 @@ index 62d2cb5..09d45e3 100644 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) : "r" (&v->counter), "r" (i) : "cc"); - -- smp_mb(); -- - return result; - } - -@@ -382,6 +742,31 @@ static inline long long atomic64_cmpxchg(atomic64_t *ptr, long long old, +@@ -382,6 +744,31 @@ static inline long long atomic64_cmpxchg(atomic64_t *ptr, long long old, return oldval; } @@ -1444,7 +1438,7 @@ index 62d2cb5..09d45e3 100644 static inline long long atomic64_xchg(atomic64_t *ptr, long long new) { long long result; -@@ -406,20 +791,34 @@ static inline long long atomic64_xchg(atomic64_t *ptr, long long new) +@@ -406,20 +793,34 @@ static inline long long atomic64_xchg(atomic64_t *ptr, long long new) static inline long long atomic64_dec_if_positive(atomic64_t *v) { long long result; @@ -1485,7 +1479,7 @@ index 62d2cb5..09d45e3 100644 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) : "r" (&v->counter) : "cc"); -@@ -442,13 +841,25 @@ static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u) +@@ -442,13 +843,25 @@ static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u) " teq %0, %5\n" " teqeq %H0, %H5\n" " moveq %1, #0\n" @@ -1514,7 +1508,7 @@ index 62d2cb5..09d45e3 100644 : "=&r" (val), "+r" (ret), "=&r" (tmp), "+Qo" (v->counter) : "r" (&v->counter), "r" (u), "r" (a) : "cc"); -@@ -461,10 +872,13 @@ static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u) +@@ -461,10 +874,13 @@ static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u) #define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0) #define atomic64_inc(v) atomic64_add(1LL, (v)) @@ -3271,7 +3265,7 @@ index 7bcee5c..e2f3249 100644 __data_loc = .; #endif diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c -index bd18bb8..87ede26 100644 +index bd18bb8..2bf342f 100644 --- a/arch/arm/kvm/arm.c +++ b/arch/arm/kvm/arm.c @@ -57,7 +57,7 @@ static unsigned long hyp_default_vectors; @@ -3310,6 +3304,15 @@ index bd18bb8..87ede26 100644 kvm->arch.vmid = kvm_next_vmid; kvm_next_vmid++; +@@ -1033,7 +1033,7 @@ static void check_kvm_target_cpu(void *ret) + /** + * Initialize Hyp-mode and memory mappings on all CPUs. + */ +-int kvm_arch_init(void *opaque) ++int kvm_arch_init(const void *opaque) + { + int err; + int ret, cpu; diff --git a/arch/arm/lib/clear_user.S b/arch/arm/lib/clear_user.S index 14a0d98..7771a7d 100644 --- a/arch/arm/lib/clear_user.S @@ -5040,6 +5043,17 @@ index 0c8e553..112d734 100644 help kexec is a system call that implements the ability to shutdown your current kernel, and to start another kernel. It is like a reboot +diff --git a/arch/ia64/Makefile b/arch/ia64/Makefile +index f37238f..810b95f 100644 +--- a/arch/ia64/Makefile ++++ b/arch/ia64/Makefile +@@ -99,5 +99,6 @@ endef + archprepare: make_nr_irqs_h FORCE + PHONY += make_nr_irqs_h FORCE + ++make_nr_irqs_h: KBUILD_CFLAGS := $(filter-out $(GCC_PLUGINS_CFLAGS),$(KBUILD_CFLAGS)) + make_nr_irqs_h: FORCE + $(Q)$(MAKE) $(build)=arch/ia64/kernel include/generated/nr-irqs.h diff --git a/arch/ia64/include/asm/atomic.h b/arch/ia64/include/asm/atomic.h index 6e6fe18..a6ae668 100644 --- a/arch/ia64/include/asm/atomic.h @@ -7136,6 +7150,19 @@ index 81e6ae0..6ab6e79 100644 info.si_code = FPE_INTOVF; info.si_signo = SIGFPE; +diff --git a/arch/mips/kvm/kvm_mips.c b/arch/mips/kvm/kvm_mips.c +index 3e0ff8d..9eafbf0b 100644 +--- a/arch/mips/kvm/kvm_mips.c ++++ b/arch/mips/kvm/kvm_mips.c +@@ -832,7 +832,7 @@ long kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg) + return r; + } + +-int kvm_arch_init(void *opaque) ++int kvm_arch_init(const void *opaque) + { + int ret; + diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c index becc42b..9e43d4b 100644 --- a/arch/mips/mm/fault.c @@ -12269,10 +12296,18 @@ index ad8f795..2c7eec6 100644 /* * Memory returned by kmalloc() may be used for DMA, so we must make diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig -index e409891..d64a8f7 100644 +index e409891..8ec65be 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig -@@ -126,7 +126,7 @@ config X86 +@@ -22,6 +22,7 @@ config X86_64 + config X86 + def_bool y + select ARCH_HAS_DEBUG_STRICT_USER_COPY_CHECKS ++ select ARCH_HAS_FAST_MULTIPLIER + select ARCH_MIGHT_HAVE_PC_PARPORT + select ARCH_MIGHT_HAVE_PC_SERIO + select HAVE_AOUT if X86_32 +@@ -126,7 +127,7 @@ config X86 select RTC_LIB select HAVE_DEBUG_STACKOVERFLOW select HAVE_IRQ_EXIT_ON_IRQ_STACK if X86_64 @@ -12281,7 +12316,7 @@ index e409891..d64a8f7 100644 select ARCH_SUPPORTS_ATOMIC_RMW config INSTRUCTION_DECODER -@@ -252,7 +252,7 @@ config X86_HT +@@ -252,7 +253,7 @@ config X86_HT config X86_32_LAZY_GS def_bool y @@ -12290,7 +12325,7 @@ index e409891..d64a8f7 100644 config ARCH_HWEIGHT_CFLAGS string -@@ -590,6 +590,7 @@ config SCHED_OMIT_FRAME_POINTER +@@ -590,6 +591,7 @@ config SCHED_OMIT_FRAME_POINTER menuconfig HYPERVISOR_GUEST bool "Linux guest support" @@ -12298,7 +12333,7 @@ index e409891..d64a8f7 100644 ---help--- Say Y here to enable options for running Linux under various hyper- visors. This option enables basic hypervisor detection and platform -@@ -1129,7 +1130,7 @@ choice +@@ -1129,7 +1131,7 @@ choice config NOHIGHMEM bool "off" @@ -12307,7 +12342,7 @@ index e409891..d64a8f7 100644 ---help--- Linux can use up to 64 Gigabytes of physical memory on x86 systems. However, the address space of 32-bit x86 processors is only 4 -@@ -1166,7 +1167,7 @@ config NOHIGHMEM +@@ -1166,7 +1168,7 @@ config NOHIGHMEM config HIGHMEM4G bool "4GB" @@ -12316,7 +12351,7 @@ index e409891..d64a8f7 100644 ---help--- Select this if you have a 32-bit processor and between 1 and 4 gigabytes of physical RAM. -@@ -1219,7 +1220,7 @@ config PAGE_OFFSET +@@ -1219,7 +1221,7 @@ config PAGE_OFFSET hex default 0xB0000000 if VMSPLIT_3G_OPT default 0x80000000 if VMSPLIT_2G @@ -12325,7 +12360,7 @@ index e409891..d64a8f7 100644 default 0x40000000 if VMSPLIT_1G default 0xC0000000 depends on X86_32 -@@ -1624,6 +1625,7 @@ source kernel/Kconfig.hz +@@ -1624,6 +1626,7 @@ source kernel/Kconfig.hz config KEXEC bool "kexec system call" @@ -12333,7 +12368,7 @@ index e409891..d64a8f7 100644 ---help--- kexec is a system call that implements the ability to shutdown your current kernel, and to start another kernel. It is like a reboot -@@ -1775,7 +1777,9 @@ config X86_NEED_RELOCS +@@ -1775,7 +1778,9 @@ config X86_NEED_RELOCS config PHYSICAL_ALIGN hex "Alignment value to which kernel should be aligned" @@ -12344,7 +12379,7 @@ index e409891..d64a8f7 100644 range 0x2000 0x1000000 if X86_32 range 0x200000 0x1000000 if X86_64 ---help--- -@@ -1855,9 +1859,10 @@ config DEBUG_HOTPLUG_CPU0 +@@ -1855,9 +1860,10 @@ config DEBUG_HOTPLUG_CPU0 If unsure, say N. config COMPAT_VDSO @@ -15762,7 +15797,7 @@ index 69bbb48..32517fe 100644 #define smp_load_acquire(p) \ diff --git a/arch/x86/include/asm/bitops.h b/arch/x86/include/asm/bitops.h -index 9fc1af7..776d75a 100644 +index 9fc1af7..98cab0b 100644 --- a/arch/x86/include/asm/bitops.h +++ b/arch/x86/include/asm/bitops.h @@ -49,7 +49,7 @@ @@ -15846,6 +15881,15 @@ index 9fc1af7..776d75a 100644 { int bitpos = -1; /* +@@ -499,8 +499,6 @@ static __always_inline int fls64(__u64 x) + + #include <asm-generic/bitops/sched.h> + +-#define ARCH_HAS_FAST_MULTIPLIER 1 +- + #include <asm/arch_hweight.h> + + #include <asm-generic/bitops/const_hweight.h> diff --git a/arch/x86/include/asm/boot.h b/arch/x86/include/asm/boot.h index 4fa687a..60f2d39 100644 --- a/arch/x86/include/asm/boot.h @@ -18168,7 +18212,7 @@ index b39e194..9d44fd1 100644 /* diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h -index fdedd38..95c02c2 100644 +index fdedd38..129b180 100644 --- a/arch/x86/include/asm/processor.h +++ b/arch/x86/include/asm/processor.h @@ -128,7 +128,7 @@ struct cpuinfo_x86 { @@ -18190,7 +18234,7 @@ index fdedd38..95c02c2 100644 +#define INVPCID_SINGLE_ADDRESS 0UL +#define INVPCID_SINGLE_CONTEXT 1UL +#define INVPCID_ALL_GLOBAL 2UL -+#define INVPCID_ALL_MONGLOBAL 3UL ++#define INVPCID_ALL_NONGLOBAL 3UL + +#define PCID_KERNEL 0UL +#define PCID_USER 1UL @@ -19295,7 +19339,7 @@ index e1940c0..ac50dd8 100644 #endif #endif /* _ASM_X86_THREAD_INFO_H */ diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h -index 04905bf..49203ca 100644 +index 04905bf..1178cdf 100644 --- a/arch/x86/include/asm/tlbflush.h +++ b/arch/x86/include/asm/tlbflush.h @@ -17,18 +17,44 @@ @@ -19306,7 +19350,7 @@ index 04905bf..49203ca 100644 + u64 descriptor[2]; + + descriptor[0] = PCID_KERNEL; -+ asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_ALL_MONGLOBAL) : "memory"); ++ asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_ALL_NONGLOBAL) : "memory"); + return; + } + @@ -29166,7 +29210,7 @@ index f5cc9eb..51fa319 100644 CFI_ENDPROC ENDPROC(atomic64_inc_not_zero_cx8) diff --git a/arch/x86/lib/checksum_32.S b/arch/x86/lib/checksum_32.S -index e78b8ee..7e173a8 100644 +index e78b8eee..7e173a8 100644 --- a/arch/x86/lib/checksum_32.S +++ b/arch/x86/lib/checksum_32.S @@ -29,7 +29,8 @@ @@ -40827,7 +40871,7 @@ index d45d50d..72a5dd2 100644 int diff --git a/drivers/gpu/drm/nouveau/nouveau_vga.c b/drivers/gpu/drm/nouveau/nouveau_vga.c -index 471347e..5adc6b9 100644 +index 471347e..5adc6b9d 100644 --- a/drivers/gpu/drm/nouveau/nouveau_vga.c +++ b/drivers/gpu/drm/nouveau/nouveau_vga.c @@ -67,7 +67,7 @@ nouveau_switcheroo_can_switch(struct pci_dev *pdev) @@ -59370,10 +59414,126 @@ index a93f7e6..d58bcbe 100644 return 0; while (nr) { diff --git a/fs/dcache.c b/fs/dcache.c -index 7f3b400..9c911f2 100644 +index 7f3b400..f91b141 100644 --- a/fs/dcache.c +++ b/fs/dcache.c -@@ -1495,7 +1495,7 @@ struct dentry *__d_alloc(struct super_block *sb, const struct qstr *name) +@@ -106,8 +106,7 @@ static inline struct hlist_bl_head *d_hash(const struct dentry *parent, + unsigned int hash) + { + hash += (unsigned long) parent / L1_CACHE_BYTES; +- hash = hash + (hash >> d_hash_shift); +- return dentry_hashtable + (hash & d_hash_mask); ++ return dentry_hashtable + hash_32(hash, d_hash_shift); + } + + /* Statistics gathering. */ +@@ -251,7 +250,7 @@ static void __d_free(struct rcu_head *head) + */ + static void d_free(struct dentry *dentry) + { +- BUG_ON((int)dentry->d_lockref.count > 0); ++ BUG_ON((int)__lockref_read(&dentry->d_lockref) > 0); + this_cpu_dec(nr_dentry); + if (dentry->d_op && dentry->d_op->d_release) + dentry->d_op->d_release(dentry); +@@ -597,7 +596,7 @@ repeat: + dentry->d_flags |= DCACHE_REFERENCED; + dentry_lru_add(dentry); + +- dentry->d_lockref.count--; ++ __lockref_dec(&dentry->d_lockref); + spin_unlock(&dentry->d_lock); + return; + +@@ -652,7 +651,7 @@ int d_invalidate(struct dentry * dentry) + * We also need to leave mountpoints alone, + * directory or not. + */ +- if (dentry->d_lockref.count > 1 && dentry->d_inode) { ++ if (__lockref_read(&dentry->d_lockref) > 1 && dentry->d_inode) { + if (S_ISDIR(dentry->d_inode->i_mode) || d_mountpoint(dentry)) { + spin_unlock(&dentry->d_lock); + return -EBUSY; +@@ -668,7 +667,7 @@ EXPORT_SYMBOL(d_invalidate); + /* This must be called with d_lock held */ + static inline void __dget_dlock(struct dentry *dentry) + { +- dentry->d_lockref.count++; ++ __lockref_inc(&dentry->d_lockref); + } + + static inline void __dget(struct dentry *dentry) +@@ -709,8 +708,8 @@ repeat: + goto repeat; + } + rcu_read_unlock(); +- BUG_ON(!ret->d_lockref.count); +- ret->d_lockref.count++; ++ BUG_ON(!__lockref_read(&ret->d_lockref)); ++ __lockref_inc(&ret->d_lockref); + spin_unlock(&ret->d_lock); + return ret; + } +@@ -793,7 +792,7 @@ restart: + spin_lock(&inode->i_lock); + hlist_for_each_entry(dentry, &inode->i_dentry, d_alias) { + spin_lock(&dentry->d_lock); +- if (!dentry->d_lockref.count) { ++ if (!__lockref_read(&dentry->d_lockref)) { + /* + * inform the fs via d_prune that this dentry + * is about to be unhashed and destroyed. +@@ -885,7 +884,7 @@ static void shrink_dentry_list(struct list_head *list) + * We found an inuse dentry which was not removed from + * the LRU because of laziness during lookup. Do not free it. + */ +- if (dentry->d_lockref.count) { ++ if (__lockref_read(&dentry->d_lockref)) { + spin_unlock(&dentry->d_lock); + continue; + } +@@ -931,7 +930,7 @@ dentry_lru_isolate(struct list_head *item, spinlock_t *lru_lock, void *arg) + * counts, just remove them from the LRU. Otherwise give them + * another pass through the LRU. + */ +- if (dentry->d_lockref.count) { ++ if (__lockref_read(&dentry->d_lockref) > 0) { + d_lru_isolate(dentry); + spin_unlock(&dentry->d_lock); + return LRU_REMOVED; +@@ -1269,7 +1268,7 @@ static enum d_walk_ret select_collect(void *_data, struct dentry *dentry) + * loop in shrink_dcache_parent() might not make any progress + * and loop forever. + */ +- if (dentry->d_lockref.count) { ++ if (__lockref_read(&dentry->d_lockref)) { + dentry_lru_del(dentry); + } else if (!(dentry->d_flags & DCACHE_SHRINK_LIST)) { + /* +@@ -1323,11 +1322,11 @@ static enum d_walk_ret umount_collect(void *_data, struct dentry *dentry) + struct select_data *data = _data; + enum d_walk_ret ret = D_WALK_CONTINUE; + +- if (dentry->d_lockref.count) { ++ if (__lockref_read(&dentry->d_lockref)) { + dentry_lru_del(dentry); + if (likely(!list_empty(&dentry->d_subdirs))) + goto out; +- if (dentry == data->start && dentry->d_lockref.count == 1) ++ if (dentry == data->start && __lockref_read(&dentry->d_lockref) == 1) + goto out; + printk(KERN_ERR + "BUG: Dentry %p{i=%lx,n=%s}" +@@ -1337,7 +1336,7 @@ static enum d_walk_ret umount_collect(void *_data, struct dentry *dentry) + dentry->d_inode ? + dentry->d_inode->i_ino : 0UL, + dentry->d_name.name, +- dentry->d_lockref.count, ++ __lockref_read(&dentry->d_lockref), + dentry->d_sb->s_type->name, + dentry->d_sb->s_id); + BUG(); +@@ -1495,7 +1494,7 @@ struct dentry *__d_alloc(struct super_block *sb, const struct qstr *name) */ dentry->d_iname[DNAME_INLINE_LEN-1] = 0; if (name->len > DNAME_INLINE_LEN-1) { @@ -59382,7 +59542,43 @@ index 7f3b400..9c911f2 100644 if (!dname) { kmem_cache_free(dentry_cache, dentry); return NULL; -@@ -3430,7 +3430,8 @@ void __init vfs_caches_init(unsigned long mempages) +@@ -1513,7 +1512,7 @@ struct dentry *__d_alloc(struct super_block *sb, const struct qstr *name) + smp_wmb(); + dentry->d_name.name = dname; + +- dentry->d_lockref.count = 1; ++ __lockref_set(&dentry->d_lockref, 1); + dentry->d_flags = 0; + spin_lock_init(&dentry->d_lock); + seqcount_init(&dentry->d_seq); +@@ -2276,7 +2275,7 @@ struct dentry *__d_lookup(const struct dentry *parent, const struct qstr *name) + goto next; + } + +- dentry->d_lockref.count++; ++ __lockref_inc(&dentry->d_lockref); + found = dentry; + spin_unlock(&dentry->d_lock); + break; +@@ -2375,7 +2374,7 @@ again: + spin_lock(&dentry->d_lock); + inode = dentry->d_inode; + isdir = S_ISDIR(inode->i_mode); +- if (dentry->d_lockref.count == 1) { ++ if (__lockref_read(&dentry->d_lockref) == 1) { + if (!spin_trylock(&inode->i_lock)) { + spin_unlock(&dentry->d_lock); + cpu_relax(); +@@ -3314,7 +3313,7 @@ static enum d_walk_ret d_genocide_kill(void *data, struct dentry *dentry) + + if (!(dentry->d_flags & DCACHE_GENOCIDE)) { + dentry->d_flags |= DCACHE_GENOCIDE; +- dentry->d_lockref.count--; ++ __lockref_dec(&dentry->d_lockref); + } + } + return D_WALK_CONTINUE; +@@ -3430,7 +3429,8 @@ void __init vfs_caches_init(unsigned long mempages) mempages -= reserve; names_cachep = kmem_cache_create("names_cache", PATH_MAX, 0, @@ -62537,10 +62733,18 @@ index b29e42f..5ea7fdf 100644 #define MNT_NS_INTERNAL ERR_PTR(-EINVAL) /* distinct from any mnt_namespace */ diff --git a/fs/namei.c b/fs/namei.c -index bdea109..e242796 100644 +index bdea109..6e919ab 100644 --- a/fs/namei.c +++ b/fs/namei.c -@@ -330,17 +330,34 @@ int generic_permission(struct inode *inode, int mask) +@@ -34,6 +34,7 @@ + #include <linux/device_cgroup.h> + #include <linux/fs_struct.h> + #include <linux/posix_acl.h> ++#include <linux/hash.h> + #include <asm/uaccess.h> + + #include "internal.h" +@@ -330,17 +331,34 @@ int generic_permission(struct inode *inode, int mask) if (ret != -EACCES) return ret; @@ -62578,7 +62782,7 @@ index bdea109..e242796 100644 * Read/write DACs are always overridable. * Executable DACs are overridable when there is * at least one exec bit set. -@@ -349,14 +366,6 @@ int generic_permission(struct inode *inode, int mask) +@@ -349,14 +367,6 @@ int generic_permission(struct inode *inode, int mask) if (capable_wrt_inode_uidgid(inode, CAP_DAC_OVERRIDE)) return 0; @@ -62593,7 +62797,7 @@ index bdea109..e242796 100644 return -EACCES; } -@@ -822,7 +831,7 @@ follow_link(struct path *link, struct nameidata *nd, void **p) +@@ -822,7 +832,7 @@ follow_link(struct path *link, struct nameidata *nd, void **p) { struct dentry *dentry = link->dentry; int error; @@ -62602,7 +62806,7 @@ index bdea109..e242796 100644 BUG_ON(nd->flags & LOOKUP_RCU); -@@ -843,6 +852,12 @@ follow_link(struct path *link, struct nameidata *nd, void **p) +@@ -843,6 +853,12 @@ follow_link(struct path *link, struct nameidata *nd, void **p) if (error) goto out_put_nd_path; @@ -62615,7 +62819,7 @@ index bdea109..e242796 100644 nd->last_type = LAST_BIND; *p = dentry->d_inode->i_op->follow_link(dentry, nd); error = PTR_ERR(*p); -@@ -1591,6 +1606,8 @@ static inline int nested_symlink(struct path *path, struct nameidata *nd) +@@ -1591,6 +1607,8 @@ static inline int nested_symlink(struct path *path, struct nameidata *nd) if (res) break; res = walk_component(nd, path, LOOKUP_FOLLOW); @@ -62624,6 +62828,16 @@ index bdea109..e242796 100644 put_link(nd, &link, cookie); } while (res > 0); +@@ -1624,8 +1642,7 @@ static inline int nested_symlink(struct path *path, struct nameidata *nd) + + static inline unsigned int fold_hash(unsigned long hash) + { +- hash += hash >> (8*sizeof(int)); +- return hash; ++ return hash_64(hash, 32); + } + + #else /* 32-bit case */ @@ -1664,7 +1681,7 @@ EXPORT_SYMBOL(full_name_hash); static inline unsigned long hash_name(const char *name, unsigned int *hashp) { @@ -63483,6 +63697,28 @@ index 287a22c..4e56e4e 100644 group->fanotify_data.f_flags = event_f_flags; #ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS oevent->response = 0; +diff --git a/fs/notify/fdinfo.c b/fs/notify/fdinfo.c +index 238a593..9d7e2b9 100644 +--- a/fs/notify/fdinfo.c ++++ b/fs/notify/fdinfo.c +@@ -42,7 +42,7 @@ static int show_mark_fhandle(struct seq_file *m, struct inode *inode) + { + struct { + struct file_handle handle; +- u8 pad[64]; ++ u8 pad[MAX_HANDLE_SZ]; + } f; + int size, ret, i; + +@@ -50,7 +50,7 @@ static int show_mark_fhandle(struct seq_file *m, struct inode *inode) + size = f.handle.handle_bytes >> 2; + + ret = exportfs_encode_inode_fh(inode, (struct fid *)f.handle.f_handle, &size, 0); +- if ((ret == 255) || (ret == -ENOSPC)) { ++ if ((ret == FILEID_INVALID) || (ret < 0)) { + WARN_ONCE(1, "Can't encode file handler for inotify: %d\n", ret); + return 0; + } diff --git a/fs/notify/notification.c b/fs/notify/notification.c index 1e58402..bb2d6f4 100644 --- a/fs/notify/notification.c @@ -66416,6 +66652,133 @@ index e18b988..f1d4ad0f 100644 { int err; +diff --git a/fs/udf/inode.c b/fs/udf/inode.c +index 982ce05..c693331 100644 +--- a/fs/udf/inode.c ++++ b/fs/udf/inode.c +@@ -51,7 +51,6 @@ MODULE_LICENSE("GPL"); + + static umode_t udf_convert_permissions(struct fileEntry *); + static int udf_update_inode(struct inode *, int); +-static void udf_fill_inode(struct inode *, struct buffer_head *); + static int udf_sync_inode(struct inode *inode); + static int udf_alloc_i_data(struct inode *inode, size_t size); + static sector_t inode_getblk(struct inode *, sector_t, int *, int *); +@@ -1271,13 +1270,25 @@ update_time: + return 0; + } + ++/* ++ * Maximum length of linked list formed by ICB hierarchy. The chosen number is ++ * arbitrary - just that we hopefully don't limit any real use of rewritten ++ * inode on write-once media but avoid looping for too long on corrupted media. ++ */ ++#define UDF_MAX_ICB_NESTING 1024 ++ + static void __udf_read_inode(struct inode *inode) + { + struct buffer_head *bh = NULL; + struct fileEntry *fe; ++ struct extendedFileEntry *efe; + uint16_t ident; + struct udf_inode_info *iinfo = UDF_I(inode); ++ struct udf_sb_info *sbi = UDF_SB(inode->i_sb); ++ unsigned int link_count; ++ unsigned int indirections = 0; + ++reread: + /* + * Set defaults, but the inode is still incomplete! + * Note: get_new_inode() sets the following on a new inode: +@@ -1307,6 +1318,7 @@ static void __udf_read_inode(struct inode *inode) + } + + fe = (struct fileEntry *)bh->b_data; ++ efe = (struct extendedFileEntry *)bh->b_data; + + if (fe->icbTag.strategyType == cpu_to_le16(4096)) { + struct buffer_head *ibh; +@@ -1314,28 +1326,26 @@ static void __udf_read_inode(struct inode *inode) + ibh = udf_read_ptagged(inode->i_sb, &iinfo->i_location, 1, + &ident); + if (ident == TAG_IDENT_IE && ibh) { +- struct buffer_head *nbh = NULL; + struct kernel_lb_addr loc; + struct indirectEntry *ie; + + ie = (struct indirectEntry *)ibh->b_data; + loc = lelb_to_cpu(ie->indirectICB.extLocation); + +- if (ie->indirectICB.extLength && +- (nbh = udf_read_ptagged(inode->i_sb, &loc, 0, +- &ident))) { +- if (ident == TAG_IDENT_FE || +- ident == TAG_IDENT_EFE) { +- memcpy(&iinfo->i_location, +- &loc, +- sizeof(struct kernel_lb_addr)); +- brelse(bh); +- brelse(ibh); +- brelse(nbh); +- __udf_read_inode(inode); ++ if (ie->indirectICB.extLength) { ++ brelse(bh); ++ brelse(ibh); ++ memcpy(&iinfo->i_location, &loc, ++ sizeof(struct kernel_lb_addr)); ++ if (++indirections > UDF_MAX_ICB_NESTING) { ++ udf_err(inode->i_sb, ++ "too many ICBs in ICB hierarchy" ++ " (max %d supported)\n", ++ UDF_MAX_ICB_NESTING); ++ make_bad_inode(inode); + return; + } +- brelse(nbh); ++ goto reread; + } + } + brelse(ibh); +@@ -1346,22 +1356,6 @@ static void __udf_read_inode(struct inode *inode) + make_bad_inode(inode); + return; + } +- udf_fill_inode(inode, bh); +- +- brelse(bh); +-} +- +-static void udf_fill_inode(struct inode *inode, struct buffer_head *bh) +-{ +- struct fileEntry *fe; +- struct extendedFileEntry *efe; +- struct udf_sb_info *sbi = UDF_SB(inode->i_sb); +- struct udf_inode_info *iinfo = UDF_I(inode); +- unsigned int link_count; +- +- fe = (struct fileEntry *)bh->b_data; +- efe = (struct extendedFileEntry *)bh->b_data; +- + if (fe->icbTag.strategyType == cpu_to_le16(4)) + iinfo->i_strat4096 = 0; + else /* if (fe->icbTag.strategyType == cpu_to_le16(4096)) */ +@@ -1551,6 +1545,7 @@ static void udf_fill_inode(struct inode *inode, struct buffer_head *bh) + } else + make_bad_inode(inode); + } ++ brelse(bh); + } + + static int udf_alloc_i_data(struct inode *inode, size_t size) +@@ -1664,7 +1659,7 @@ static int udf_update_inode(struct inode *inode, int do_sync) + FE_PERM_U_DELETE | FE_PERM_U_CHATTR)); + fe->permissions = cpu_to_le32(udfperms); + +- if (S_ISDIR(inode->i_mode)) ++ if (S_ISDIR(inode->i_mode) && inode->i_nlink > 0) + fe->fileLinkCount = cpu_to_le16(inode->i_nlink - 1); + else + fe->fileLinkCount = cpu_to_le16(inode->i_nlink); diff --git a/fs/udf/misc.c b/fs/udf/misc.c index c175b4d..8f36a16 100644 --- a/fs/udf/misc.c @@ -80906,10 +81269,28 @@ index 0000000..e7ffaaf + +#endif diff --git a/include/linux/hash.h b/include/linux/hash.h -index bd1754c..8240892 100644 +index bd1754c..69b7715 100644 --- a/include/linux/hash.h +++ b/include/linux/hash.h -@@ -83,7 +83,7 @@ static inline u32 hash32_ptr(const void *ptr) +@@ -37,6 +37,9 @@ static __always_inline u64 hash_64(u64 val, unsigned int bits) + { + u64 hash = val; + ++#if defined(CONFIG_ARCH_HAS_FAST_MULTIPLIER) && BITS_PER_LONG == 64 ++ hash = hash * GOLDEN_RATIO_PRIME_64; ++#else + /* Sigh, gcc can't optimise this alone like it does for 32 bits. */ + u64 n = hash; + n <<= 18; +@@ -51,6 +54,7 @@ static __always_inline u64 hash_64(u64 val, unsigned int bits) + hash += n; + n <<= 2; + hash += n; ++#endif + + /* High bits are more random, so use them. */ + return hash >> (64 - bits); +@@ -83,7 +87,7 @@ static inline u32 hash32_ptr(const void *ptr) struct fast_hash_ops { u32 (*hash)(const void *data, u32 len, u32 seed); u32 (*hash2)(const u32 *data, u32 len, u32 seed); @@ -81425,6 +81806,47 @@ index ef95941..82db65a 100644 /** * list_move - delete from one list and add as another's head * @list: the entry to move +diff --git a/include/linux/lockref.h b/include/linux/lockref.h +index 4bfde0e..d6e2e09 100644 +--- a/include/linux/lockref.h ++++ b/include/linux/lockref.h +@@ -47,4 +47,36 @@ static inline int __lockref_is_dead(const struct lockref *l) + return ((int)l->count < 0); + } + ++static inline unsigned int __lockref_read(struct lockref *lockref) ++{ ++ return lockref->count; ++} ++ ++static inline void __lockref_set(struct lockref *lockref, unsigned int count) ++{ ++ lockref->count = count; ++} ++ ++static inline void __lockref_inc(struct lockref *lockref) ++{ ++ ++#ifdef CONFIG_PAX_REFCOUNT ++ atomic_inc((atomic_t *)&lockref->count); ++#else ++ lockref->count++; ++#endif ++ ++} ++ ++static inline void __lockref_dec(struct lockref *lockref) ++{ ++ ++#ifdef CONFIG_PAX_REFCOUNT ++ atomic_dec((atomic_t *)&lockref->count); ++#else ++ lockref->count--; ++#endif ++ ++} ++ + #endif /* __LINUX_LOCKREF_H */ diff --git a/include/linux/math64.h b/include/linux/math64.h index c45c089..298841c 100644 --- a/include/linux/math64.h @@ -87733,7 +88155,7 @@ index c44bff8..a3c5876 100644 else new_fs = fs; diff --git a/kernel/futex.c b/kernel/futex.c -index e3087af..8e3b90f 100644 +index e3087af..4730710 100644 --- a/kernel/futex.c +++ b/kernel/futex.c @@ -54,6 +54,7 @@ @@ -87783,7 +88205,15 @@ index e3087af..8e3b90f 100644 pagefault_disable(); ret = __copy_from_user_inatomic(dest, from, sizeof(u32)); -@@ -3019,6 +3025,7 @@ static void __init futex_detect_cmpxchg(void) +@@ -2614,6 +2620,7 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags, + * shared futexes. We need to compare the keys: + */ + if (match_futex(&q.key, &key2)) { ++ queue_unlock(hb); + ret = -EINVAL; + goto out_put_keys; + } +@@ -3019,6 +3026,7 @@ static void __init futex_detect_cmpxchg(void) { #ifndef CONFIG_HAVE_FUTEX_CMPXCHG u32 curval; @@ -87791,7 +88221,7 @@ index e3087af..8e3b90f 100644 /* * This will fail and we want it. Some arch implementations do -@@ -3030,8 +3037,11 @@ static void __init futex_detect_cmpxchg(void) +@@ -3030,8 +3038,11 @@ static void __init futex_detect_cmpxchg(void) * implementation, the non-functional ones will return * -ENOSYS. */ @@ -88025,10 +88455,26 @@ index 3127ad5..159d880 100644 return -ENOMEM; reset_iter(iter, 0); diff --git a/kernel/kcmp.c b/kernel/kcmp.c -index e30ac0f..3528cac 100644 +index e30ac0f..a7fcafb 100644 --- a/kernel/kcmp.c +++ b/kernel/kcmp.c -@@ -99,6 +99,10 @@ SYSCALL_DEFINE5(kcmp, pid_t, pid1, pid_t, pid2, int, type, +@@ -44,11 +44,12 @@ static long kptr_obfuscate(long v, int type) + */ + static int kcmp_ptr(void *v1, void *v2, enum kcmp_type type) + { +- long ret; ++ long t1, t2; + +- ret = kptr_obfuscate((long)v1, type) - kptr_obfuscate((long)v2, type); ++ t1 = kptr_obfuscate((long)v1, type); ++ t2 = kptr_obfuscate((long)v2, type); + +- return (ret < 0) | ((ret > 0) << 1); ++ return (t1 < t2) | ((t1 > t2) << 1); + } + + /* The caller must have pinned the task */ +@@ -99,6 +100,10 @@ SYSCALL_DEFINE5(kcmp, pid_t, pid1, pid_t, pid2, int, type, struct task_struct *task1, *task2; int ret; @@ -91717,10 +92163,71 @@ index 7c7964c..2a0d412 100644 update_vsyscall_tz(); if (firsttime) { diff --git a/kernel/time/alarmtimer.c b/kernel/time/alarmtimer.c -index fe75444..190c528 100644 +index fe75444..b8a1463 100644 --- a/kernel/time/alarmtimer.c +++ b/kernel/time/alarmtimer.c -@@ -811,7 +811,7 @@ static int __init alarmtimer_init(void) +@@ -464,18 +464,26 @@ static enum alarmtimer_type clock2alarm(clockid_t clockid) + static enum alarmtimer_restart alarm_handle_timer(struct alarm *alarm, + ktime_t now) + { ++ unsigned long flags; + struct k_itimer *ptr = container_of(alarm, struct k_itimer, + it.alarm.alarmtimer); +- if (posix_timer_event(ptr, 0) != 0) +- ptr->it_overrun++; ++ enum alarmtimer_restart result = ALARMTIMER_NORESTART; ++ ++ spin_lock_irqsave(&ptr->it_lock, flags); ++ if ((ptr->it_sigev_notify & ~SIGEV_THREAD_ID) != SIGEV_NONE) { ++ if (posix_timer_event(ptr, 0) != 0) ++ ptr->it_overrun++; ++ } + + /* Re-add periodic timers */ + if (ptr->it.alarm.interval.tv64) { + ptr->it_overrun += alarm_forward(alarm, now, + ptr->it.alarm.interval); +- return ALARMTIMER_RESTART; ++ result = ALARMTIMER_RESTART; + } +- return ALARMTIMER_NORESTART; ++ spin_unlock_irqrestore(&ptr->it_lock, flags); ++ ++ return result; + } + + /** +@@ -541,18 +549,22 @@ static int alarm_timer_create(struct k_itimer *new_timer) + * @new_timer: k_itimer pointer + * @cur_setting: itimerspec data to fill + * +- * Copies the itimerspec data out from the k_itimer ++ * Copies out the current itimerspec data + */ + static void alarm_timer_get(struct k_itimer *timr, + struct itimerspec *cur_setting) + { +- memset(cur_setting, 0, sizeof(struct itimerspec)); ++ ktime_t relative_expiry_time = ++ alarm_expires_remaining(&(timr->it.alarm.alarmtimer)); + +- cur_setting->it_interval = +- ktime_to_timespec(timr->it.alarm.interval); +- cur_setting->it_value = +- ktime_to_timespec(timr->it.alarm.alarmtimer.node.expires); +- return; ++ if (ktime_to_ns(relative_expiry_time) > 0) { ++ cur_setting->it_value = ktime_to_timespec(relative_expiry_time); ++ } else { ++ cur_setting->it_value.tv_sec = 0; ++ cur_setting->it_value.tv_nsec = 0; ++ } ++ ++ cur_setting->it_interval = ktime_to_timespec(timr->it.alarm.interval); + } + + /** +@@ -811,7 +823,7 @@ static int __init alarmtimer_init(void) struct platform_device *pdev; int error = 0; int i; @@ -92504,6 +93011,20 @@ index b4defde..f092808 100644 } spin_unlock_irq(&pool->lock); +diff --git a/lib/Kconfig b/lib/Kconfig +index 991c98b..88061cf 100644 +--- a/lib/Kconfig ++++ b/lib/Kconfig +@@ -51,6 +51,9 @@ config PERCPU_RWSEM + config ARCH_USE_CMPXCHG_LOCKREF + bool + ++config ARCH_HAS_FAST_MULTIPLIER ++ bool ++ + config CRC_CCITT + tristate "CRC-CCITT functions" + help diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index a48abea..e108def 100644 --- a/lib/Kconfig.debug @@ -92584,10 +93105,25 @@ index 48140e3..de854e5 100644 ifneq ($(CONFIG_HAVE_DEC_LOCK),y) diff --git a/lib/assoc_array.c b/lib/assoc_array.c -index c0b1007..ae146f0 100644 +index c0b1007..2404d03 100644 --- a/lib/assoc_array.c +++ b/lib/assoc_array.c -@@ -1735,7 +1735,7 @@ ascend_old_tree: +@@ -1723,11 +1723,13 @@ ascend_old_tree: + shortcut = assoc_array_ptr_to_shortcut(ptr); + slot = shortcut->parent_slot; + cursor = shortcut->back_pointer; ++ if (!cursor) ++ goto gc_complete; + } else { + slot = node->parent_slot; + cursor = ptr; + } +- BUG_ON(!ptr); ++ BUG_ON(!cursor); + node = assoc_array_ptr_to_node(cursor); + slot++; + goto continue_node; +@@ -1735,7 +1737,7 @@ ascend_old_tree: gc_complete: edit->set[0].to = new_root; assoc_array_apply_edit(edit); @@ -92754,6 +93290,28 @@ index fea973f..386626f 100644 .hash = jhash, .hash2 = jhash2, }; +diff --git a/lib/hweight.c b/lib/hweight.c +index b7d81ba..9a5c1f2 100644 +--- a/lib/hweight.c ++++ b/lib/hweight.c +@@ -11,7 +11,7 @@ + + unsigned int __sw_hweight32(unsigned int w) + { +-#ifdef ARCH_HAS_FAST_MULTIPLIER ++#ifdef CONFIG_ARCH_HAS_FAST_MULTIPLIER + w -= (w >> 1) & 0x55555555; + w = (w & 0x33333333) + ((w >> 2) & 0x33333333); + w = (w + (w >> 4)) & 0x0f0f0f0f; +@@ -49,7 +49,7 @@ unsigned long __sw_hweight64(__u64 w) + return __sw_hweight32((unsigned int)(w >> 32)) + + __sw_hweight32((unsigned int)w); + #elif BITS_PER_LONG == 64 +-#ifdef ARCH_HAS_FAST_MULTIPLIER ++#ifdef CONFIG_ARCH_HAS_FAST_MULTIPLIER + w -= (w >> 1) & 0x5555555555555555ul; + w = (w & 0x3333333333333333ul) + ((w >> 2) & 0x3333333333333333ul); + w = (w + (w >> 4)) & 0x0f0f0f0f0f0f0f0ful; diff --git a/lib/inflate.c b/lib/inflate.c index 013a761..c28f3fc 100644 --- a/lib/inflate.c @@ -93040,6 +93598,98 @@ index c24c2f7..f0296f4 100644 + pax_close_kernel(); +} +EXPORT_SYMBOL(pax_list_del_rcu); +diff --git a/lib/lockref.c b/lib/lockref.c +index f07a40d..0a445a7 100644 +--- a/lib/lockref.c ++++ b/lib/lockref.c +@@ -49,13 +49,13 @@ + void lockref_get(struct lockref *lockref) + { + CMPXCHG_LOOP( +- new.count++; ++ __lockref_inc(&new); + , + return; + ); + + spin_lock(&lockref->lock); +- lockref->count++; ++ __lockref_inc(lockref); + spin_unlock(&lockref->lock); + } + EXPORT_SYMBOL(lockref_get); +@@ -70,7 +70,7 @@ int lockref_get_not_zero(struct lockref *lockref) + int retval; + + CMPXCHG_LOOP( +- new.count++; ++ __lockref_inc(&new); + if (!old.count) + return 0; + , +@@ -80,7 +80,7 @@ int lockref_get_not_zero(struct lockref *lockref) + spin_lock(&lockref->lock); + retval = 0; + if (lockref->count) { +- lockref->count++; ++ __lockref_inc(lockref); + retval = 1; + } + spin_unlock(&lockref->lock); +@@ -97,7 +97,7 @@ EXPORT_SYMBOL(lockref_get_not_zero); + int lockref_get_or_lock(struct lockref *lockref) + { + CMPXCHG_LOOP( +- new.count++; ++ __lockref_inc(&new); + if (!old.count) + break; + , +@@ -107,7 +107,7 @@ int lockref_get_or_lock(struct lockref *lockref) + spin_lock(&lockref->lock); + if (!lockref->count) + return 0; +- lockref->count++; ++ __lockref_inc(lockref); + spin_unlock(&lockref->lock); + return 1; + } +@@ -121,7 +121,7 @@ EXPORT_SYMBOL(lockref_get_or_lock); + int lockref_put_or_lock(struct lockref *lockref) + { + CMPXCHG_LOOP( +- new.count--; ++ __lockref_dec(&new); + if (old.count <= 1) + break; + , +@@ -131,7 +131,7 @@ int lockref_put_or_lock(struct lockref *lockref) + spin_lock(&lockref->lock); + if (lockref->count <= 1) + return 0; +- lockref->count--; ++ __lockref_dec(lockref); + spin_unlock(&lockref->lock); + return 1; + } +@@ -158,7 +158,7 @@ int lockref_get_not_dead(struct lockref *lockref) + int retval; + + CMPXCHG_LOOP( +- new.count++; ++ __lockref_inc(&new); + if ((int)old.count < 0) + return 0; + , +@@ -168,7 +168,7 @@ int lockref_get_not_dead(struct lockref *lockref) + spin_lock(&lockref->lock); + retval = 0; + if ((int) lockref->count >= 0) { +- lockref->count++; ++ __lockref_inc(lockref); + retval = 1; + } + spin_unlock(&lockref->lock); diff --git a/lib/percpu-refcount.c b/lib/percpu-refcount.c index 963b703..438bc51 100644 --- a/lib/percpu-refcount.c @@ -93106,6 +93756,22 @@ index 0922579..9d7adb9 100644 + printk("%lu pages hwpoisoned\n", atomic_long_read_unchecked(&num_poisoned_pages)); #endif } +diff --git a/lib/string.c b/lib/string.c +index e5878de..315fad2 100644 +--- a/lib/string.c ++++ b/lib/string.c +@@ -789,9 +789,9 @@ void *memchr_inv(const void *start, int c, size_t bytes) + return check_bytes8(start, value, bytes); + + value64 = value; +-#if defined(ARCH_HAS_FAST_MULTIPLIER) && BITS_PER_LONG == 64 ++#if defined(CONFIG_ARCH_HAS_FAST_MULTIPLIER) && BITS_PER_LONG == 64 + value64 *= 0x0101010101010101; +-#elif defined(ARCH_HAS_FAST_MULTIPLIER) ++#elif defined(CONFIG_ARCH_HAS_FAST_MULTIPLIER) + value64 *= 0x01010101; + value64 |= value64 << 32; + #else diff --git a/lib/strncpy_from_user.c b/lib/strncpy_from_user.c index bb2b201..46abaf9 100644 --- a/lib/strncpy_from_user.c @@ -98926,6 +99592,340 @@ index b543470..d2ddae2 100644 if (!can_dir) { printk(KERN_INFO "can: failed to create /proc/net/can . " +diff --git a/net/ceph/auth_x.c b/net/ceph/auth_x.c +index 96238ba..de6662b 100644 +--- a/net/ceph/auth_x.c ++++ b/net/ceph/auth_x.c +@@ -13,8 +13,6 @@ + #include "auth_x.h" + #include "auth_x_protocol.h" + +-#define TEMP_TICKET_BUF_LEN 256 +- + static void ceph_x_validate_tickets(struct ceph_auth_client *ac, int *pneed); + + static int ceph_x_is_authenticated(struct ceph_auth_client *ac) +@@ -64,7 +62,7 @@ static int ceph_x_encrypt(struct ceph_crypto_key *secret, + } + + static int ceph_x_decrypt(struct ceph_crypto_key *secret, +- void **p, void *end, void *obuf, size_t olen) ++ void **p, void *end, void **obuf, size_t olen) + { + struct ceph_x_encrypt_header head; + size_t head_len = sizeof(head); +@@ -75,8 +73,14 @@ static int ceph_x_decrypt(struct ceph_crypto_key *secret, + return -EINVAL; + + dout("ceph_x_decrypt len %d\n", len); +- ret = ceph_decrypt2(secret, &head, &head_len, obuf, &olen, +- *p, len); ++ if (*obuf == NULL) { ++ *obuf = kmalloc(len, GFP_NOFS); ++ if (!*obuf) ++ return -ENOMEM; ++ olen = len; ++ } ++ ++ ret = ceph_decrypt2(secret, &head, &head_len, *obuf, &olen, *p, len); + if (ret) + return ret; + if (head.struct_v != 1 || le64_to_cpu(head.magic) != CEPHX_ENC_MAGIC) +@@ -129,145 +133,154 @@ static void remove_ticket_handler(struct ceph_auth_client *ac, + kfree(th); + } + ++static int process_one_ticket(struct ceph_auth_client *ac, ++ struct ceph_crypto_key *secret, ++ void **p, void *end) ++{ ++ struct ceph_x_info *xi = ac->private; ++ int type; ++ u8 tkt_struct_v, blob_struct_v; ++ struct ceph_x_ticket_handler *th; ++ void *dbuf = NULL; ++ void *dp, *dend; ++ int dlen; ++ char is_enc; ++ struct timespec validity; ++ struct ceph_crypto_key old_key; ++ void *ticket_buf = NULL; ++ void *tp, *tpend; ++ struct ceph_timespec new_validity; ++ struct ceph_crypto_key new_session_key; ++ struct ceph_buffer *new_ticket_blob; ++ unsigned long new_expires, new_renew_after; ++ u64 new_secret_id; ++ int ret; ++ ++ ceph_decode_need(p, end, sizeof(u32) + 1, bad); ++ ++ type = ceph_decode_32(p); ++ dout(" ticket type %d %s\n", type, ceph_entity_type_name(type)); ++ ++ tkt_struct_v = ceph_decode_8(p); ++ if (tkt_struct_v != 1) ++ goto bad; ++ ++ th = get_ticket_handler(ac, type); ++ if (IS_ERR(th)) { ++ ret = PTR_ERR(th); ++ goto out; ++ } ++ ++ /* blob for me */ ++ dlen = ceph_x_decrypt(secret, p, end, &dbuf, 0); ++ if (dlen <= 0) { ++ ret = dlen; ++ goto out; ++ } ++ dout(" decrypted %d bytes\n", dlen); ++ dp = dbuf; ++ dend = dp + dlen; ++ ++ tkt_struct_v = ceph_decode_8(&dp); ++ if (tkt_struct_v != 1) ++ goto bad; ++ ++ memcpy(&old_key, &th->session_key, sizeof(old_key)); ++ ret = ceph_crypto_key_decode(&new_session_key, &dp, dend); ++ if (ret) ++ goto out; ++ ++ ceph_decode_copy(&dp, &new_validity, sizeof(new_validity)); ++ ceph_decode_timespec(&validity, &new_validity); ++ new_expires = get_seconds() + validity.tv_sec; ++ new_renew_after = new_expires - (validity.tv_sec / 4); ++ dout(" expires=%lu renew_after=%lu\n", new_expires, ++ new_renew_after); ++ ++ /* ticket blob for service */ ++ ceph_decode_8_safe(p, end, is_enc, bad); ++ if (is_enc) { ++ /* encrypted */ ++ dout(" encrypted ticket\n"); ++ dlen = ceph_x_decrypt(&old_key, p, end, &ticket_buf, 0); ++ if (dlen < 0) { ++ ret = dlen; ++ goto out; ++ } ++ tp = ticket_buf; ++ dlen = ceph_decode_32(&tp); ++ } else { ++ /* unencrypted */ ++ ceph_decode_32_safe(p, end, dlen, bad); ++ ticket_buf = kmalloc(dlen, GFP_NOFS); ++ if (!ticket_buf) { ++ ret = -ENOMEM; ++ goto out; ++ } ++ tp = ticket_buf; ++ ceph_decode_need(p, end, dlen, bad); ++ ceph_decode_copy(p, ticket_buf, dlen); ++ } ++ tpend = tp + dlen; ++ dout(" ticket blob is %d bytes\n", dlen); ++ ceph_decode_need(&tp, tpend, 1 + sizeof(u64), bad); ++ blob_struct_v = ceph_decode_8(&tp); ++ new_secret_id = ceph_decode_64(&tp); ++ ret = ceph_decode_buffer(&new_ticket_blob, &tp, tpend); ++ if (ret) ++ goto out; ++ ++ /* all is well, update our ticket */ ++ ceph_crypto_key_destroy(&th->session_key); ++ if (th->ticket_blob) ++ ceph_buffer_put(th->ticket_blob); ++ th->session_key = new_session_key; ++ th->ticket_blob = new_ticket_blob; ++ th->validity = new_validity; ++ th->secret_id = new_secret_id; ++ th->expires = new_expires; ++ th->renew_after = new_renew_after; ++ dout(" got ticket service %d (%s) secret_id %lld len %d\n", ++ type, ceph_entity_type_name(type), th->secret_id, ++ (int)th->ticket_blob->vec.iov_len); ++ xi->have_keys |= th->service; ++ ++out: ++ kfree(ticket_buf); ++ kfree(dbuf); ++ return ret; ++ ++bad: ++ ret = -EINVAL; ++ goto out; ++} ++ + static int ceph_x_proc_ticket_reply(struct ceph_auth_client *ac, + struct ceph_crypto_key *secret, + void *buf, void *end) + { +- struct ceph_x_info *xi = ac->private; +- int num; + void *p = buf; +- int ret; +- char *dbuf; +- char *ticket_buf; + u8 reply_struct_v; ++ u32 num; ++ int ret; + +- dbuf = kmalloc(TEMP_TICKET_BUF_LEN, GFP_NOFS); +- if (!dbuf) +- return -ENOMEM; +- +- ret = -ENOMEM; +- ticket_buf = kmalloc(TEMP_TICKET_BUF_LEN, GFP_NOFS); +- if (!ticket_buf) +- goto out_dbuf; +- +- ceph_decode_need(&p, end, 1 + sizeof(u32), bad); +- reply_struct_v = ceph_decode_8(&p); ++ ceph_decode_8_safe(&p, end, reply_struct_v, bad); + if (reply_struct_v != 1) +- goto bad; +- num = ceph_decode_32(&p); ++ return -EINVAL; ++ ++ ceph_decode_32_safe(&p, end, num, bad); + dout("%d tickets\n", num); ++ + while (num--) { +- int type; +- u8 tkt_struct_v, blob_struct_v; +- struct ceph_x_ticket_handler *th; +- void *dp, *dend; +- int dlen; +- char is_enc; +- struct timespec validity; +- struct ceph_crypto_key old_key; +- void *tp, *tpend; +- struct ceph_timespec new_validity; +- struct ceph_crypto_key new_session_key; +- struct ceph_buffer *new_ticket_blob; +- unsigned long new_expires, new_renew_after; +- u64 new_secret_id; +- +- ceph_decode_need(&p, end, sizeof(u32) + 1, bad); +- +- type = ceph_decode_32(&p); +- dout(" ticket type %d %s\n", type, ceph_entity_type_name(type)); +- +- tkt_struct_v = ceph_decode_8(&p); +- if (tkt_struct_v != 1) +- goto bad; +- +- th = get_ticket_handler(ac, type); +- if (IS_ERR(th)) { +- ret = PTR_ERR(th); +- goto out; +- } +- +- /* blob for me */ +- dlen = ceph_x_decrypt(secret, &p, end, dbuf, +- TEMP_TICKET_BUF_LEN); +- if (dlen <= 0) { +- ret = dlen; +- goto out; +- } +- dout(" decrypted %d bytes\n", dlen); +- dend = dbuf + dlen; +- dp = dbuf; +- +- tkt_struct_v = ceph_decode_8(&dp); +- if (tkt_struct_v != 1) +- goto bad; +- +- memcpy(&old_key, &th->session_key, sizeof(old_key)); +- ret = ceph_crypto_key_decode(&new_session_key, &dp, dend); ++ ret = process_one_ticket(ac, secret, &p, end); + if (ret) +- goto out; +- +- ceph_decode_copy(&dp, &new_validity, sizeof(new_validity)); +- ceph_decode_timespec(&validity, &new_validity); +- new_expires = get_seconds() + validity.tv_sec; +- new_renew_after = new_expires - (validity.tv_sec / 4); +- dout(" expires=%lu renew_after=%lu\n", new_expires, +- new_renew_after); +- +- /* ticket blob for service */ +- ceph_decode_8_safe(&p, end, is_enc, bad); +- tp = ticket_buf; +- if (is_enc) { +- /* encrypted */ +- dout(" encrypted ticket\n"); +- dlen = ceph_x_decrypt(&old_key, &p, end, ticket_buf, +- TEMP_TICKET_BUF_LEN); +- if (dlen < 0) { +- ret = dlen; +- goto out; +- } +- dlen = ceph_decode_32(&tp); +- } else { +- /* unencrypted */ +- ceph_decode_32_safe(&p, end, dlen, bad); +- ceph_decode_need(&p, end, dlen, bad); +- ceph_decode_copy(&p, ticket_buf, dlen); +- } +- tpend = tp + dlen; +- dout(" ticket blob is %d bytes\n", dlen); +- ceph_decode_need(&tp, tpend, 1 + sizeof(u64), bad); +- blob_struct_v = ceph_decode_8(&tp); +- new_secret_id = ceph_decode_64(&tp); +- ret = ceph_decode_buffer(&new_ticket_blob, &tp, tpend); +- if (ret) +- goto out; +- +- /* all is well, update our ticket */ +- ceph_crypto_key_destroy(&th->session_key); +- if (th->ticket_blob) +- ceph_buffer_put(th->ticket_blob); +- th->session_key = new_session_key; +- th->ticket_blob = new_ticket_blob; +- th->validity = new_validity; +- th->secret_id = new_secret_id; +- th->expires = new_expires; +- th->renew_after = new_renew_after; +- dout(" got ticket service %d (%s) secret_id %lld len %d\n", +- type, ceph_entity_type_name(type), th->secret_id, +- (int)th->ticket_blob->vec.iov_len); +- xi->have_keys |= th->service; ++ return ret; + } + +- ret = 0; +-out: +- kfree(ticket_buf); +-out_dbuf: +- kfree(dbuf); +- return ret; ++ return 0; + + bad: +- ret = -EINVAL; +- goto out; ++ return -EINVAL; + } + + static int ceph_x_build_authorizer(struct ceph_auth_client *ac, +@@ -583,13 +596,14 @@ static int ceph_x_verify_authorizer_reply(struct ceph_auth_client *ac, + struct ceph_x_ticket_handler *th; + int ret = 0; + struct ceph_x_authorize_reply reply; ++ void *preply = &reply; + void *p = au->reply_buf; + void *end = p + sizeof(au->reply_buf); + + th = get_ticket_handler(ac, au->service); + if (IS_ERR(th)) + return PTR_ERR(th); +- ret = ceph_x_decrypt(&th->session_key, &p, end, &reply, sizeof(reply)); ++ ret = ceph_x_decrypt(&th->session_key, &p, end, &preply, sizeof(reply)); + if (ret < 0) + return ret; + if (ret != sizeof(reply)) diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c index 988721a..947846d 100644 --- a/net/ceph/messenger.c @@ -98948,6 +99948,26 @@ index 988721a..947846d 100644 s = addr_str[i]; switch (ss->ss_family) { +diff --git a/net/ceph/mon_client.c b/net/ceph/mon_client.c +index 2ac9ef3..dbcbf5a 100644 +--- a/net/ceph/mon_client.c ++++ b/net/ceph/mon_client.c +@@ -1041,7 +1041,15 @@ static struct ceph_msg *mon_alloc_msg(struct ceph_connection *con, + if (!m) { + pr_info("alloc_msg unknown type %d\n", type); + *skip = 1; ++ } else if (front_len > m->front_alloc_len) { ++ pr_warning("mon_alloc_msg front %d > prealloc %d (%u#%llu)\n", ++ front_len, m->front_alloc_len, ++ (unsigned int)con->peer_name.type, ++ le64_to_cpu(con->peer_name.num)); ++ ceph_msg_put(m); ++ m = ceph_msg_new(type, front_len, GFP_NOFS, false); + } ++ + return m; + } + diff --git a/net/compat.c b/net/compat.c index cbc1a2a..ab7644e 100644 --- a/net/compat.c @@ -103634,7 +104654,7 @@ index dfa532f..1dcfb44 100644 } diff --git a/net/socket.c b/net/socket.c -index a19ae19..89554dc 100644 +index a19ae19..edb5c03 100644 --- a/net/socket.c +++ b/net/socket.c @@ -88,6 +88,7 @@ @@ -103818,7 +104838,17 @@ index a19ae19..89554dc 100644 int err, err2; int fput_needed; -@@ -2065,7 +2131,7 @@ static int ___sys_sendmsg(struct socket *sock, struct msghdr __user *msg, +@@ -1987,6 +2053,9 @@ static int copy_msghdr_from_user(struct msghdr *kmsg, + if (copy_from_user(kmsg, umsg, sizeof(struct msghdr))) + return -EFAULT; + ++ if (kmsg->msg_name == NULL) ++ kmsg->msg_namelen = 0; ++ + if (kmsg->msg_namelen < 0) + return -EINVAL; + +@@ -2065,7 +2134,7 @@ static int ___sys_sendmsg(struct socket *sock, struct msghdr __user *msg, * checking falls down on this. */ if (copy_from_user(ctl_buf, @@ -103827,7 +104857,7 @@ index a19ae19..89554dc 100644 ctl_len)) goto out_freectl; msg_sys->msg_control = ctl_buf; -@@ -2216,7 +2282,7 @@ static int ___sys_recvmsg(struct socket *sock, struct msghdr __user *msg, +@@ -2216,7 +2285,7 @@ static int ___sys_recvmsg(struct socket *sock, struct msghdr __user *msg, int err, total_len, len; /* kernel mode address */ @@ -103836,7 +104866,7 @@ index a19ae19..89554dc 100644 /* user mode address pointers */ struct sockaddr __user *uaddr; -@@ -2245,7 +2311,7 @@ static int ___sys_recvmsg(struct socket *sock, struct msghdr __user *msg, +@@ -2245,7 +2314,7 @@ static int ___sys_recvmsg(struct socket *sock, struct msghdr __user *msg, /* Save the user-mode address (verify_iovec will change the * kernel msghdr to use the kernel address space) */ @@ -103845,7 +104875,7 @@ index a19ae19..89554dc 100644 uaddr_len = COMPAT_NAMELEN(msg); if (MSG_CMSG_COMPAT & flags) err = verify_compat_iovec(msg_sys, iov, &addr, VERIFY_WRITE); -@@ -2889,7 +2955,7 @@ static int ethtool_ioctl(struct net *net, struct compat_ifreq __user *ifr32) +@@ -2889,7 +2958,7 @@ static int ethtool_ioctl(struct net *net, struct compat_ifreq __user *ifr32) ifr = compat_alloc_user_space(buf_size); rxnfc = (void __user *)ifr + ALIGN(sizeof(struct ifreq), 8); @@ -103854,7 +104884,7 @@ index a19ae19..89554dc 100644 return -EFAULT; if (put_user(convert_in ? rxnfc : compat_ptr(data), -@@ -3000,7 +3066,7 @@ static int bond_ioctl(struct net *net, unsigned int cmd, +@@ -3000,7 +3069,7 @@ static int bond_ioctl(struct net *net, unsigned int cmd, old_fs = get_fs(); set_fs(KERNEL_DS); err = dev_ioctl(net, cmd, @@ -103863,7 +104893,7 @@ index a19ae19..89554dc 100644 set_fs(old_fs); return err; -@@ -3093,7 +3159,7 @@ static int compat_sioc_ifmap(struct net *net, unsigned int cmd, +@@ -3093,7 +3162,7 @@ static int compat_sioc_ifmap(struct net *net, unsigned int cmd, old_fs = get_fs(); set_fs(KERNEL_DS); @@ -103872,7 +104902,7 @@ index a19ae19..89554dc 100644 set_fs(old_fs); if (cmd == SIOCGIFMAP && !err) { -@@ -3177,7 +3243,7 @@ static int routing_ioctl(struct net *net, struct socket *sock, +@@ -3177,7 +3246,7 @@ static int routing_ioctl(struct net *net, struct socket *sock, ret |= get_user(rtdev, &(ur4->rt_dev)); if (rtdev) { ret |= copy_from_user(devname, compat_ptr(rtdev), 15); @@ -103881,7 +104911,7 @@ index a19ae19..89554dc 100644 devname[15] = 0; } else r4.rt_dev = NULL; -@@ -3404,8 +3470,8 @@ int kernel_getsockopt(struct socket *sock, int level, int optname, +@@ -3404,8 +3473,8 @@ int kernel_getsockopt(struct socket *sock, int level, int optname, int __user *uoptlen; int err; @@ -103892,7 +104922,7 @@ index a19ae19..89554dc 100644 set_fs(KERNEL_DS); if (level == SOL_SOCKET) -@@ -3425,7 +3491,7 @@ int kernel_setsockopt(struct socket *sock, int level, int optname, +@@ -3425,7 +3494,7 @@ int kernel_setsockopt(struct socket *sock, int level, int optname, char __user *uoptval; int err; @@ -109542,10 +110572,10 @@ index 0000000..89f256d +} diff --git a/tools/gcc/latent_entropy_plugin.c b/tools/gcc/latent_entropy_plugin.c new file mode 100644 -index 0000000..39d7cc7 +index 0000000..e48b323 --- /dev/null +++ b/tools/gcc/latent_entropy_plugin.c -@@ -0,0 +1,462 @@ +@@ -0,0 +1,466 @@ +/* + * Copyright 2012-2014 by the PaX Team <pageexec@freemail.hu> + * Licensed under the GPL v2 @@ -109574,7 +110604,7 @@ index 0000000..39d7cc7 +static tree latent_entropy_decl; + +static struct plugin_info latent_entropy_plugin_info = { -+ .version = "201403280150", ++ .version = "201409101820", + .help = NULL +}; + @@ -109750,6 +110780,10 @@ index 0000000..39d7cc7 + if (TREE_THIS_VOLATILE(current_function_decl)) + return false; + ++ // gcc-4.5 doesn't discover some trivial noreturn functions ++ if (EDGE_COUNT(EXIT_BLOCK_PTR_FOR_FN(cfun)->preds) == 0) ++ return false; ++ + return lookup_attribute("latent_entropy", DECL_ATTRIBUTES(current_function_decl)) != NULL_TREE; +} + |