aboutsummaryrefslogtreecommitdiffstats
path: root/main
diff options
context:
space:
mode:
authorNatanael Copa <ncopa@alpinelinux.org>2013-12-17 08:26:11 +0000
committerNatanael Copa <ncopa@alpinelinux.org>2013-12-17 09:16:42 +0000
commit2d95fec17198f679b87226a06739f53fa6614445 (patch)
treef3873a49da7124abd501291673db63eeafefd911 /main
parent20497dd86dea2881735bf6eceb62e31ff26f2b0c (diff)
downloadaports-2d95fec17198f679b87226a06739f53fa6614445.tar.bz2
aports-2d95fec17198f679b87226a06739f53fa6614445.tar.xz
main/linux-grsec: upgrade to 3.12.5
Diffstat (limited to 'main')
-rw-r--r--main/linux-grsec/APKBUILD16
-rw-r--r--main/linux-grsec/grsecurity-3.0-3.12.5-201312151212.patch (renamed from main/linux-grsec/grsecurity-3.0-3.12.4-201312081754.patch)2498
2 files changed, 1688 insertions, 826 deletions
diff --git a/main/linux-grsec/APKBUILD b/main/linux-grsec/APKBUILD
index c1a06bd5cb..14aeb0ddad 100644
--- a/main/linux-grsec/APKBUILD
+++ b/main/linux-grsec/APKBUILD
@@ -2,7 +2,7 @@
_flavor=grsec
pkgname=linux-${_flavor}
-pkgver=3.12.4
+pkgver=3.12.5
case $pkgver in
*.*.*) _kernver=${pkgver%.*};;
*.*) _kernver=${pkgver};;
@@ -17,7 +17,7 @@ _config=${config:-kernelconfig.${CARCH}}
install=
source="http://ftp.kernel.org/pub/linux/kernel/v3.x/linux-$_kernver.tar.xz
http://ftp.kernel.org/pub/linux/kernel/v3.x/patch-$pkgver.xz
- grsecurity-3.0-$pkgver-201312081754.patch
+ grsecurity-3.0-$pkgver-201312151212.patch
fix-memory-map-for-PIE-applications.patch
@@ -144,20 +144,20 @@ dev() {
}
md5sums="cc6ee608854e0da4b64f6c1ff8b6398c linux-3.12.tar.xz
-511b5a2f0de55b5e91fd293766ce182b patch-3.12.4.xz
-97395c529c1dd1826fff077c1ba9814e grsecurity-3.0-3.12.4-201312081754.patch
+70e456d21f7e7c0dc2f9bd170f1ae4ee patch-3.12.5.xz
+81ff7554dff7791db8880c67e3983d56 grsecurity-3.0-3.12.5-201312151212.patch
c6a4ae7e8ca6159e1631545515805216 fix-memory-map-for-PIE-applications.patch
1be334b642b74ce8ee6ad499491470b0 kernelconfig.x86
f073b6d53be4460289eb4c9cfa1a3909 kernelconfig.x86_64"
sha256sums="2e120ec7fde19fa51dc6b6cc11c81860a0775defcad5a5bf910ed9a50e845a02 linux-3.12.tar.xz
-b1e21b37e29c7f32f1395356958019ff1ac2f2e75bcc7dda2a60ba79cfffd845 patch-3.12.4.xz
-695fb49d9a8960f5ed8d11b24ce0286346b7dde9876d65a283210a8579b3b09e grsecurity-3.0-3.12.4-201312081754.patch
+bfb519ae2a3662340cb20b5f9433f9b3b8598e612286274f96ec8c8bf6bc09c4 patch-3.12.5.xz
+0fa7f629dea0eb02019f730f1bf216d5c70851de740d57dab74c23eb88749c68 grsecurity-3.0-3.12.5-201312151212.patch
500f3577310be52e87b9fecdc2e9c4ca43210fd97d69089f9005d484563f74c7 fix-memory-map-for-PIE-applications.patch
1e4ea33e1368b50cda84c1da3dac2b1e20057e045f37cd0506a1f0a321630aba kernelconfig.x86
213c1fc7f0694883d4bce982adc2b40756261282e030b9305b0cbccb3b175b32 kernelconfig.x86_64"
sha512sums="4ba5797e0772726d05c9f2eee66dc6dc2a5033c749ef44764c805a83da739ed5d0c6443b76785e38fe1ef74cc7ade787e48144faed0cfcb6f124f05248c700ff linux-3.12.tar.xz
-efbb8e2a343935651101b43e7b977dd8c69aca7871b75d221edb32f93bd8ad83c6d0ae8d6622249019f8448fee8d3d754192bf7947602b8ed435bd786f1bb241 patch-3.12.4.xz
-f9a9bfee3977624fa2e4cc047e35dad42c71fd9eaec1c73f7bd9ad951d23809cc690bb5070c6d56879db9bcf88f04ad8365aa0f8c302fb756dac2cfd720ac88d grsecurity-3.0-3.12.4-201312081754.patch
+4bf50be98a56d1c0721b4d8fded68d750213ac5120b8a57d51af3268adea7b96b795f1cb615219b64a0c8fbb479bc428526a53e257b4a24a19ffbc4c4e598a12 patch-3.12.5.xz
+422576f6b72182a3fdf2310a6e3e2931e3b18ae72f6709408e5dd32920c99407673e4c32f48c74ec841bbf7da04d6049fddfe3221aef16fc23f3736ab4bcee6d grsecurity-3.0-3.12.5-201312151212.patch
4665c56ae1bbac311f9205d64918e84ee8b01d47d6e2396ff6b8adfb10aada7f7254531ce62e31edbb65c2a54a830f09ad05d314dfcd75d6272f4068945ad7c7 fix-memory-map-for-PIE-applications.patch
381b2074adcc336ef0e015d52bc8a98cd7dd25d17aaa3c7c1768dd8244ee1f15c470e61514eb979b1c3e516e54eba6e51eab69b780553d5f3f8a283f2ba91851 kernelconfig.x86
0aec64a170ec14e4a8de7f9b2b28513a95839cacd2b319628f7c9421179cd76b75e528847b10fe15441432d91b3d930605440c2c9442de59c51eeb4ab3f3921d kernelconfig.x86_64"
diff --git a/main/linux-grsec/grsecurity-3.0-3.12.4-201312081754.patch b/main/linux-grsec/grsecurity-3.0-3.12.5-201312151212.patch
index fa9c2c7fdf..81f0265cdc 100644
--- a/main/linux-grsec/grsecurity-3.0-3.12.4-201312081754.patch
+++ b/main/linux-grsec/grsecurity-3.0-3.12.5-201312151212.patch
@@ -281,7 +281,7 @@ index fcbb736..5508d8c 100644
pcd. [PARIDE]
diff --git a/Makefile b/Makefile
-index 3b7165e..9112a63 100644
+index 986f3cd..8691deb 100644
--- a/Makefile
+++ b/Makefile
@@ -241,8 +241,9 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH" ]; then echo $$BASH; \
@@ -1968,7 +1968,7 @@ index 5689c18..eea12f9 100644
#define L_PTE_DIRTY_HIGH (1 << (55 - 32))
diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h
-index be956db..c8f25e2 100644
+index 1571d12..b8a9b43 100644
--- a/arch/arm/include/asm/pgtable.h
+++ b/arch/arm/include/asm/pgtable.h
@@ -33,6 +33,9 @@
@@ -4123,7 +4123,7 @@ index f123d6e..04bf569 100644
return __arm_ioremap_caller(phys_addr, size, mtype,
__builtin_return_address(0));
diff --git a/arch/arm/mm/mmap.c b/arch/arm/mm/mmap.c
-index 0c63562..7128a90 100644
+index 304661d..53a6b19 100644
--- a/arch/arm/mm/mmap.c
+++ b/arch/arm/mm/mmap.c
@@ -59,6 +59,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
@@ -7242,7 +7242,7 @@ index 2a625fb..9908930 100644
DEBUGP("register_unwind_table(), sect = %d at 0x%p - 0x%p (gp=0x%lx)\n",
me->arch.unwind_section, table, end, gp);
diff --git a/arch/parisc/kernel/sys_parisc.c b/arch/parisc/kernel/sys_parisc.c
-index 5dfd248..64914ac 100644
+index 0d3a9d4..44975d0 100644
--- a/arch/parisc/kernel/sys_parisc.c
+++ b/arch/parisc/kernel/sys_parisc.c
@@ -33,9 +33,11 @@
@@ -7266,29 +7266,26 @@ index 5dfd248..64914ac 100644
return vm_unmapped_area(&info);
}
-@@ -61,10 +64,11 @@ static int get_offset(struct address_space *mapping)
- return (unsigned long) mapping >> 8;
+@@ -69,15 +72,17 @@ static unsigned long shared_align_offset(struct file *filp, unsigned long pgoff)
}
--static unsigned long get_shared_area(struct address_space *mapping,
-- unsigned long addr, unsigned long len, unsigned long pgoff)
-+static unsigned long get_shared_area(struct file *filp, struct address_space *mapping,
-+ unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags)
+ static unsigned long get_shared_area(struct file *filp, unsigned long addr,
+- unsigned long len, unsigned long pgoff)
++ unsigned long len, unsigned long pgoff, unsigned long flags)
{
struct vm_unmapped_area_info info;
+ unsigned long offset = gr_rand_threadstack_offset(current->mm, filp, flags);
info.flags = 0;
info.length = len;
-@@ -72,6 +76,7 @@ static unsigned long get_shared_area(struct address_space *mapping,
+ info.low_limit = PAGE_ALIGN(addr);
info.high_limit = TASK_SIZE;
info.align_mask = PAGE_MASK & (SHMLBA - 1);
- info.align_offset = (get_offset(mapping) + pgoff) << PAGE_SHIFT;
+ info.threadstack_offset = offset;
+ info.align_offset = shared_align_offset(filp, pgoff);
return vm_unmapped_area(&info);
}
-
-@@ -86,15 +91,22 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
+@@ -93,13 +98,20 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
return -EINVAL;
return addr;
}
@@ -7303,16 +7300,13 @@ index 5dfd248..64914ac 100644
+
+ }
+
- if (filp) {
-- addr = get_shared_area(filp->f_mapping, addr, len, pgoff);
-+ addr = get_shared_area(filp, filp->f_mapping, addr, len, pgoff, flags);
- } else if(flags & MAP_SHARED) {
-- addr = get_shared_area(NULL, addr, len, pgoff);
-+ addr = get_shared_area(filp, NULL, addr, len, pgoff, flags);
- } else {
+ if (filp || (flags & MAP_SHARED))
+- addr = get_shared_area(filp, addr, len, pgoff);
++ addr = get_shared_area(filp, addr, len, pgoff, flags);
+ else
- addr = get_unshared_area(addr, len);
-+ addr = get_unshared_area(filp, addr, len, flags);
- }
++ addr = get_unshared_area(addr, len, flags);
+
return addr;
}
diff --git a/arch/parisc/kernel/traps.c b/arch/parisc/kernel/traps.c
@@ -11748,10 +11742,10 @@ index 78d91af..8ceb94b 100644
This option helps catch unintended modifications to loadable
kernel module's text and read-only data. It also prevents execution
diff --git a/arch/x86/Makefile b/arch/x86/Makefile
-index 41250fb..863762e 100644
+index eda00f9..c511701 100644
--- a/arch/x86/Makefile
+++ b/arch/x86/Makefile
-@@ -46,14 +46,12 @@ ifeq ($(CONFIG_X86_32),y)
+@@ -49,14 +49,12 @@ ifeq ($(CONFIG_X86_32),y)
# CPU-specific tuning. Anything which can be shared with UML should go here.
include $(srctree)/arch/x86/Makefile_32.cpu
KBUILD_CFLAGS += $(cflags-y)
@@ -11767,7 +11761,7 @@ index 41250fb..863762e 100644
KBUILD_AFLAGS += -m64
KBUILD_CFLAGS += -m64
-@@ -83,6 +81,9 @@ else
+@@ -89,6 +87,9 @@ else
KBUILD_CFLAGS += -maccumulate-outgoing-args
endif
@@ -11777,7 +11771,7 @@ index 41250fb..863762e 100644
ifdef CONFIG_CC_STACKPROTECTOR
cc_has_sp := $(srctree)/scripts/gcc-x86_$(BITS)-has-stack-protector.sh
ifeq ($(shell $(CONFIG_SHELL) $(cc_has_sp) $(CC) $(KBUILD_CPPFLAGS) $(biarch)),y)
-@@ -241,3 +242,12 @@ define archhelp
+@@ -247,3 +248,12 @@ define archhelp
echo ' FDINITRD=file initrd for the booted kernel'
echo ' kvmconfig - Enable additional options for guest kernel support'
endef
@@ -12159,7 +12153,7 @@ index 43eda28..5ab5fdb 100644
unsigned int v;
diff --git a/arch/x86/crypto/aes-x86_64-asm_64.S b/arch/x86/crypto/aes-x86_64-asm_64.S
-index 9105655..5e37f27 100644
+index 9105655..41779c1 100644
--- a/arch/x86/crypto/aes-x86_64-asm_64.S
+++ b/arch/x86/crypto/aes-x86_64-asm_64.S
@@ -8,6 +8,8 @@
@@ -12175,13 +12169,13 @@ index 9105655..5e37f27 100644
je B192; \
leaq 32(r9),r9;
-+#define ret pax_force_retaddr 0, 1; ret
++#define ret pax_force_retaddr; ret
+
#define epilogue(FUNC,r1,r2,r3,r4,r5,r6,r7,r8,r9) \
movq r1,r2; \
movq r3,r4; \
diff --git a/arch/x86/crypto/aesni-intel_asm.S b/arch/x86/crypto/aesni-intel_asm.S
-index 477e9d7..3ab339f 100644
+index 477e9d7..c92c7d8 100644
--- a/arch/x86/crypto/aesni-intel_asm.S
+++ b/arch/x86/crypto/aesni-intel_asm.S
@@ -31,6 +31,7 @@
@@ -12192,19 +12186,240 @@ index 477e9d7..3ab339f 100644
#ifdef __x86_64__
.data
-@@ -1441,6 +1442,7 @@ _return_T_done_decrypt:
+@@ -205,7 +206,7 @@ enc: .octa 0x2
+ * num_initial_blocks = b mod 4
+ * encrypt the initial num_initial_blocks blocks and apply ghash on
+ * the ciphertext
+-* %r10, %r11, %r12, %rax, %xmm5, %xmm6, %xmm7, %xmm8, %xmm9 registers
++* %r10, %r11, %r15, %rax, %xmm5, %xmm6, %xmm7, %xmm8, %xmm9 registers
+ * are clobbered
+ * arg1, %arg2, %arg3, %r14 are used as a pointer only, not modified
+ */
+@@ -214,8 +215,8 @@ enc: .octa 0x2
+ .macro INITIAL_BLOCKS_DEC num_initial_blocks TMP1 TMP2 TMP3 TMP4 TMP5 XMM0 XMM1 \
+ XMM2 XMM3 XMM4 XMMDst TMP6 TMP7 i i_seq operation
+ mov arg7, %r10 # %r10 = AAD
+- mov arg8, %r12 # %r12 = aadLen
+- mov %r12, %r11
++ mov arg8, %r15 # %r15 = aadLen
++ mov %r15, %r11
+ pxor %xmm\i, %xmm\i
+ _get_AAD_loop\num_initial_blocks\operation:
+ movd (%r10), \TMP1
+@@ -223,15 +224,15 @@ _get_AAD_loop\num_initial_blocks\operation:
+ psrldq $4, %xmm\i
+ pxor \TMP1, %xmm\i
+ add $4, %r10
+- sub $4, %r12
++ sub $4, %r15
+ jne _get_AAD_loop\num_initial_blocks\operation
+ cmp $16, %r11
+ je _get_AAD_loop2_done\num_initial_blocks\operation
+- mov $16, %r12
++ mov $16, %r15
+ _get_AAD_loop2\num_initial_blocks\operation:
+ psrldq $4, %xmm\i
+- sub $4, %r12
+- cmp %r11, %r12
++ sub $4, %r15
++ cmp %r11, %r15
+ jne _get_AAD_loop2\num_initial_blocks\operation
+ _get_AAD_loop2_done\num_initial_blocks\operation:
+ movdqa SHUF_MASK(%rip), %xmm14
+@@ -443,7 +444,7 @@ _initial_blocks_done\num_initial_blocks\operation:
+ * num_initial_blocks = b mod 4
+ * encrypt the initial num_initial_blocks blocks and apply ghash on
+ * the ciphertext
+-* %r10, %r11, %r12, %rax, %xmm5, %xmm6, %xmm7, %xmm8, %xmm9 registers
++* %r10, %r11, %r15, %rax, %xmm5, %xmm6, %xmm7, %xmm8, %xmm9 registers
+ * are clobbered
+ * arg1, %arg2, %arg3, %r14 are used as a pointer only, not modified
+ */
+@@ -452,8 +453,8 @@ _initial_blocks_done\num_initial_blocks\operation:
+ .macro INITIAL_BLOCKS_ENC num_initial_blocks TMP1 TMP2 TMP3 TMP4 TMP5 XMM0 XMM1 \
+ XMM2 XMM3 XMM4 XMMDst TMP6 TMP7 i i_seq operation
+ mov arg7, %r10 # %r10 = AAD
+- mov arg8, %r12 # %r12 = aadLen
+- mov %r12, %r11
++ mov arg8, %r15 # %r15 = aadLen
++ mov %r15, %r11
+ pxor %xmm\i, %xmm\i
+ _get_AAD_loop\num_initial_blocks\operation:
+ movd (%r10), \TMP1
+@@ -461,15 +462,15 @@ _get_AAD_loop\num_initial_blocks\operation:
+ psrldq $4, %xmm\i
+ pxor \TMP1, %xmm\i
+ add $4, %r10
+- sub $4, %r12
++ sub $4, %r15
+ jne _get_AAD_loop\num_initial_blocks\operation
+ cmp $16, %r11
+ je _get_AAD_loop2_done\num_initial_blocks\operation
+- mov $16, %r12
++ mov $16, %r15
+ _get_AAD_loop2\num_initial_blocks\operation:
+ psrldq $4, %xmm\i
+- sub $4, %r12
+- cmp %r11, %r12
++ sub $4, %r15
++ cmp %r11, %r15
+ jne _get_AAD_loop2\num_initial_blocks\operation
+ _get_AAD_loop2_done\num_initial_blocks\operation:
+ movdqa SHUF_MASK(%rip), %xmm14
+@@ -1269,7 +1270,7 @@ TMP7 XMM1 XMM2 XMM3 XMM4 XMMDst
+ *
+ *****************************************************************************/
+ ENTRY(aesni_gcm_dec)
+- push %r12
++ push %r15
+ push %r13
+ push %r14
+ mov %rsp, %r14
+@@ -1279,8 +1280,8 @@ ENTRY(aesni_gcm_dec)
+ */
+ sub $VARIABLE_OFFSET, %rsp
+ and $~63, %rsp # align rsp to 64 bytes
+- mov %arg6, %r12
+- movdqu (%r12), %xmm13 # %xmm13 = HashKey
++ mov %arg6, %r15
++ movdqu (%r15), %xmm13 # %xmm13 = HashKey
+ movdqa SHUF_MASK(%rip), %xmm2
+ PSHUFB_XMM %xmm2, %xmm13
+
+@@ -1308,10 +1309,10 @@ ENTRY(aesni_gcm_dec)
+ movdqa %xmm13, HashKey(%rsp) # store HashKey<<1 (mod poly)
+ mov %arg4, %r13 # save the number of bytes of plaintext/ciphertext
+ and $-16, %r13 # %r13 = %r13 - (%r13 mod 16)
+- mov %r13, %r12
+- and $(3<<4), %r12
++ mov %r13, %r15
++ and $(3<<4), %r15
+ jz _initial_num_blocks_is_0_decrypt
+- cmp $(2<<4), %r12
++ cmp $(2<<4), %r15
+ jb _initial_num_blocks_is_1_decrypt
+ je _initial_num_blocks_is_2_decrypt
+ _initial_num_blocks_is_3_decrypt:
+@@ -1361,16 +1362,16 @@ _zero_cipher_left_decrypt:
+ sub $16, %r11
+ add %r13, %r11
+ movdqu (%arg3,%r11,1), %xmm1 # receive the last <16 byte block
+- lea SHIFT_MASK+16(%rip), %r12
+- sub %r13, %r12
++ lea SHIFT_MASK+16(%rip), %r15
++ sub %r13, %r15
+ # adjust the shuffle mask pointer to be able to shift 16-%r13 bytes
+ # (%r13 is the number of bytes in plaintext mod 16)
+- movdqu (%r12), %xmm2 # get the appropriate shuffle mask
++ movdqu (%r15), %xmm2 # get the appropriate shuffle mask
+ PSHUFB_XMM %xmm2, %xmm1 # right shift 16-%r13 butes
+
+ movdqa %xmm1, %xmm2
+ pxor %xmm1, %xmm0 # Ciphertext XOR E(K, Yn)
+- movdqu ALL_F-SHIFT_MASK(%r12), %xmm1
++ movdqu ALL_F-SHIFT_MASK(%r15), %xmm1
+ # get the appropriate mask to mask out top 16-%r13 bytes of %xmm0
+ pand %xmm1, %xmm0 # mask out top 16-%r13 bytes of %xmm0
+ pand %xmm1, %xmm2
+@@ -1399,9 +1400,9 @@ _less_than_8_bytes_left_decrypt:
+ sub $1, %r13
+ jne _less_than_8_bytes_left_decrypt
+ _multiple_of_16_bytes_decrypt:
+- mov arg8, %r12 # %r13 = aadLen (number of bytes)
+- shl $3, %r12 # convert into number of bits
+- movd %r12d, %xmm15 # len(A) in %xmm15
++ mov arg8, %r15 # %r13 = aadLen (number of bytes)
++ shl $3, %r15 # convert into number of bits
++ movd %r15d, %xmm15 # len(A) in %xmm15
+ shl $3, %arg4 # len(C) in bits (*128)
+ MOVQ_R64_XMM %arg4, %xmm1
+ pslldq $8, %xmm15 # %xmm15 = len(A)||0x0000000000000000
+@@ -1440,7 +1441,8 @@ _return_T_done_decrypt:
+ mov %r14, %rsp
pop %r14
pop %r13
- pop %r12
-+ pax_force_retaddr 0, 1
+- pop %r12
++ pop %r15
++ pax_force_retaddr
ret
ENDPROC(aesni_gcm_dec)
-@@ -1705,6 +1707,7 @@ _return_T_done_encrypt:
+@@ -1529,7 +1531,7 @@ ENDPROC(aesni_gcm_dec)
+ * poly = x^128 + x^127 + x^126 + x^121 + 1
+ ***************************************************************************/
+ ENTRY(aesni_gcm_enc)
+- push %r12
++ push %r15
+ push %r13
+ push %r14
+ mov %rsp, %r14
+@@ -1539,8 +1541,8 @@ ENTRY(aesni_gcm_enc)
+ #
+ sub $VARIABLE_OFFSET, %rsp
+ and $~63, %rsp
+- mov %arg6, %r12
+- movdqu (%r12), %xmm13
++ mov %arg6, %r15
++ movdqu (%r15), %xmm13
+ movdqa SHUF_MASK(%rip), %xmm2
+ PSHUFB_XMM %xmm2, %xmm13
+
+@@ -1564,13 +1566,13 @@ ENTRY(aesni_gcm_enc)
+ movdqa %xmm13, HashKey(%rsp)
+ mov %arg4, %r13 # %xmm13 holds HashKey<<1 (mod poly)
+ and $-16, %r13
+- mov %r13, %r12
++ mov %r13, %r15
+
+ # Encrypt first few blocks
+
+- and $(3<<4), %r12
++ and $(3<<4), %r15
+ jz _initial_num_blocks_is_0_encrypt
+- cmp $(2<<4), %r12
++ cmp $(2<<4), %r15
+ jb _initial_num_blocks_is_1_encrypt
+ je _initial_num_blocks_is_2_encrypt
+ _initial_num_blocks_is_3_encrypt:
+@@ -1623,14 +1625,14 @@ _zero_cipher_left_encrypt:
+ sub $16, %r11
+ add %r13, %r11
+ movdqu (%arg3,%r11,1), %xmm1 # receive the last <16 byte blocks
+- lea SHIFT_MASK+16(%rip), %r12
+- sub %r13, %r12
++ lea SHIFT_MASK+16(%rip), %r15
++ sub %r13, %r15
+ # adjust the shuffle mask pointer to be able to shift 16-r13 bytes
+ # (%r13 is the number of bytes in plaintext mod 16)
+- movdqu (%r12), %xmm2 # get the appropriate shuffle mask
++ movdqu (%r15), %xmm2 # get the appropriate shuffle mask
+ PSHUFB_XMM %xmm2, %xmm1 # shift right 16-r13 byte
+ pxor %xmm1, %xmm0 # Plaintext XOR Encrypt(K, Yn)
+- movdqu ALL_F-SHIFT_MASK(%r12), %xmm1
++ movdqu ALL_F-SHIFT_MASK(%r15), %xmm1
+ # get the appropriate mask to mask out top 16-r13 bytes of xmm0
+ pand %xmm1, %xmm0 # mask out top 16-r13 bytes of xmm0
+ movdqa SHUF_MASK(%rip), %xmm10
+@@ -1663,9 +1665,9 @@ _less_than_8_bytes_left_encrypt:
+ sub $1, %r13
+ jne _less_than_8_bytes_left_encrypt
+ _multiple_of_16_bytes_encrypt:
+- mov arg8, %r12 # %r12 = addLen (number of bytes)
+- shl $3, %r12
+- movd %r12d, %xmm15 # len(A) in %xmm15
++ mov arg8, %r15 # %r15 = addLen (number of bytes)
++ shl $3, %r15
++ movd %r15d, %xmm15 # len(A) in %xmm15
+ shl $3, %arg4 # len(C) in bits (*128)
+ MOVQ_R64_XMM %arg4, %xmm1
+ pslldq $8, %xmm15 # %xmm15 = len(A)||0x0000000000000000
+@@ -1704,7 +1706,8 @@ _return_T_done_encrypt:
+ mov %r14, %rsp
pop %r14
pop %r13
- pop %r12
-+ pax_force_retaddr 0, 1
+- pop %r12
++ pop %r15
++ pax_force_retaddr
ret
ENDPROC(aesni_gcm_enc)
@@ -12212,7 +12427,7 @@ index 477e9d7..3ab339f 100644
pxor %xmm1, %xmm0
movaps %xmm0, (TKEYP)
add $0x10, TKEYP
-+ pax_force_retaddr_bts
++ pax_force_retaddr
ret
ENDPROC(_key_expansion_128)
ENDPROC(_key_expansion_256a)
@@ -12220,7 +12435,7 @@ index 477e9d7..3ab339f 100644
shufps $0b01001110, %xmm2, %xmm1
movaps %xmm1, 0x10(TKEYP)
add $0x20, TKEYP
-+ pax_force_retaddr_bts
++ pax_force_retaddr
ret
ENDPROC(_key_expansion_192a)
@@ -12228,7 +12443,7 @@ index 477e9d7..3ab339f 100644
movaps %xmm0, (TKEYP)
add $0x10, TKEYP
-+ pax_force_retaddr_bts
++ pax_force_retaddr
ret
ENDPROC(_key_expansion_192b)
@@ -12236,7 +12451,7 @@ index 477e9d7..3ab339f 100644
pxor %xmm1, %xmm2
movaps %xmm2, (TKEYP)
add $0x10, TKEYP
-+ pax_force_retaddr_bts
++ pax_force_retaddr
ret
ENDPROC(_key_expansion_256b)
@@ -12244,7 +12459,7 @@ index 477e9d7..3ab339f 100644
#ifndef __x86_64__
popl KEYP
#endif
-+ pax_force_retaddr 0, 1
++ pax_force_retaddr
ret
ENDPROC(aesni_set_key)
@@ -12252,7 +12467,7 @@ index 477e9d7..3ab339f 100644
popl KLEN
popl KEYP
#endif
-+ pax_force_retaddr 0, 1
++ pax_force_retaddr
ret
ENDPROC(aesni_enc)
@@ -12260,7 +12475,7 @@ index 477e9d7..3ab339f 100644
AESENC KEY STATE
movaps 0x70(TKEYP), KEY
AESENCLAST KEY STATE
-+ pax_force_retaddr_bts
++ pax_force_retaddr
ret
ENDPROC(_aesni_enc1)
@@ -12268,7 +12483,7 @@ index 477e9d7..3ab339f 100644
AESENCLAST KEY STATE2
AESENCLAST KEY STATE3
AESENCLAST KEY STATE4
-+ pax_force_retaddr_bts
++ pax_force_retaddr
ret
ENDPROC(_aesni_enc4)
@@ -12276,7 +12491,7 @@ index 477e9d7..3ab339f 100644
popl KLEN
popl KEYP
#endif
-+ pax_force_retaddr 0, 1
++ pax_force_retaddr
ret
ENDPROC(aesni_dec)
@@ -12284,7 +12499,7 @@ index 477e9d7..3ab339f 100644
AESDEC KEY STATE
movaps 0x70(TKEYP), KEY
AESDECLAST KEY STATE
-+ pax_force_retaddr_bts
++ pax_force_retaddr
ret
ENDPROC(_aesni_dec1)
@@ -12292,7 +12507,7 @@ index 477e9d7..3ab339f 100644
AESDECLAST KEY STATE2
AESDECLAST KEY STATE3
AESDECLAST KEY STATE4
-+ pax_force_retaddr_bts
++ pax_force_retaddr
ret
ENDPROC(_aesni_dec4)
@@ -12300,7 +12515,7 @@ index 477e9d7..3ab339f 100644
popl KEYP
popl LEN
#endif
-+ pax_force_retaddr 0, 1
++ pax_force_retaddr
ret
ENDPROC(aesni_ecb_enc)
@@ -12308,7 +12523,7 @@ index 477e9d7..3ab339f 100644
popl KEYP
popl LEN
#endif
-+ pax_force_retaddr 0, 1
++ pax_force_retaddr
ret
ENDPROC(aesni_ecb_dec)
@@ -12316,7 +12531,7 @@ index 477e9d7..3ab339f 100644
popl LEN
popl IVP
#endif
-+ pax_force_retaddr 0, 1
++ pax_force_retaddr
ret
ENDPROC(aesni_cbc_enc)
@@ -12324,7 +12539,7 @@ index 477e9d7..3ab339f 100644
popl LEN
popl IVP
#endif
-+ pax_force_retaddr 0, 1
++ pax_force_retaddr
ret
ENDPROC(aesni_cbc_dec)
@@ -12332,7 +12547,7 @@ index 477e9d7..3ab339f 100644
mov $1, TCTR_LOW
MOVQ_R64_XMM TCTR_LOW INC
MOVQ_R64_XMM CTR TCTR_LOW
-+ pax_force_retaddr_bts
++ pax_force_retaddr
ret
ENDPROC(_aesni_inc_init)
@@ -12340,7 +12555,7 @@ index 477e9d7..3ab339f 100644
.Linc_low:
movaps CTR, IV
PSHUFB_XMM BSWAP_MASK IV
-+ pax_force_retaddr_bts
++ pax_force_retaddr
ret
ENDPROC(_aesni_inc)
@@ -12348,7 +12563,7 @@ index 477e9d7..3ab339f 100644
.Lctr_enc_ret:
movups IV, (IVP)
.Lctr_enc_just_ret:
-+ pax_force_retaddr 0, 1
++ pax_force_retaddr
ret
ENDPROC(aesni_ctr_enc)
@@ -12356,12 +12571,12 @@ index 477e9d7..3ab339f 100644
pxor INC, STATE4
movdqu STATE4, 0x70(OUTP)
-+ pax_force_retaddr 0, 1
++ pax_force_retaddr
ret
ENDPROC(aesni_xts_crypt8)
diff --git a/arch/x86/crypto/blowfish-x86_64-asm_64.S b/arch/x86/crypto/blowfish-x86_64-asm_64.S
-index 246c670..4d1ed00 100644
+index 246c670..466e2d6 100644
--- a/arch/x86/crypto/blowfish-x86_64-asm_64.S
+++ b/arch/x86/crypto/blowfish-x86_64-asm_64.S
@@ -21,6 +21,7 @@
@@ -12376,11 +12591,11 @@ index 246c670..4d1ed00 100644
jnz .L__enc_xor;
write_block();
-+ pax_force_retaddr 0, 1
++ pax_force_retaddr
ret;
.L__enc_xor:
xor_block();
-+ pax_force_retaddr 0, 1
++ pax_force_retaddr
ret;
ENDPROC(__blowfish_enc_blk)
@@ -12388,7 +12603,7 @@ index 246c670..4d1ed00 100644
movq %r11, %rbp;
-+ pax_force_retaddr 0, 1
++ pax_force_retaddr
ret;
ENDPROC(blowfish_dec_blk)
@@ -12396,7 +12611,7 @@ index 246c670..4d1ed00 100644
popq %rbx;
popq %rbp;
-+ pax_force_retaddr 0, 1
++ pax_force_retaddr
ret;
.L__enc_xor4:
@@ -12404,7 +12619,7 @@ index 246c670..4d1ed00 100644
popq %rbx;
popq %rbp;
-+ pax_force_retaddr 0, 1
++ pax_force_retaddr
ret;
ENDPROC(__blowfish_enc_blk_4way)
@@ -12412,11 +12627,11 @@ index 246c670..4d1ed00 100644
popq %rbx;
popq %rbp;
-+ pax_force_retaddr 0, 1
++ pax_force_retaddr
ret;
ENDPROC(blowfish_dec_blk_4way)
diff --git a/arch/x86/crypto/camellia-aesni-avx-asm_64.S b/arch/x86/crypto/camellia-aesni-avx-asm_64.S
-index ce71f92..2dd5b1e 100644
+index ce71f92..1dce7ec 100644
--- a/arch/x86/crypto/camellia-aesni-avx-asm_64.S
+++ b/arch/x86/crypto/camellia-aesni-avx-asm_64.S
@@ -16,6 +16,7 @@
@@ -12431,7 +12646,7 @@ index ce71f92..2dd5b1e 100644
roundsm16(%xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7,
%xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, %xmm15,
%rcx, (%r9));
-+ pax_force_retaddr_bts
++ pax_force_retaddr
ret;
ENDPROC(roundsm16_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_cd)
@@ -12439,7 +12654,7 @@ index ce71f92..2dd5b1e 100644
roundsm16(%xmm4, %xmm5, %xmm6, %xmm7, %xmm0, %xmm1, %xmm2, %xmm3,
%xmm12, %xmm13, %xmm14, %xmm15, %xmm8, %xmm9, %xmm10, %xmm11,
%rax, (%r9));
-+ pax_force_retaddr_bts
++ pax_force_retaddr
ret;
ENDPROC(roundsm16_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab)
@@ -12447,7 +12662,7 @@ index ce71f92..2dd5b1e 100644
%xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14,
%xmm15, (key_table)(CTX, %r8, 8), (%rax), 1 * 16(%rax));
-+ pax_force_retaddr_bts
++ pax_force_retaddr
ret;
.align 8
@@ -12455,7 +12670,7 @@ index ce71f92..2dd5b1e 100644
%xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14,
%xmm15, (key_table)(CTX), (%rax), 1 * 16(%rax));
-+ pax_force_retaddr_bts
++ pax_force_retaddr
ret;
.align 8
@@ -12463,7 +12678,7 @@ index ce71f92..2dd5b1e 100644
%xmm15, %xmm14, %xmm13, %xmm12, %xmm11, %xmm10, %xmm9,
%xmm8, %rsi);
-+ pax_force_retaddr 0, 1
++ pax_force_retaddr
ret;
ENDPROC(camellia_ecb_enc_16way)
@@ -12471,7 +12686,7 @@ index ce71f92..2dd5b1e 100644
%xmm15, %xmm14, %xmm13, %xmm12, %xmm11, %xmm10, %xmm9,
%xmm8, %rsi);
-+ pax_force_retaddr 0, 1
++ pax_force_retaddr
ret;
ENDPROC(camellia_ecb_dec_16way)
@@ -12479,7 +12694,7 @@ index ce71f92..2dd5b1e 100644
%xmm15, %xmm14, %xmm13, %xmm12, %xmm11, %xmm10, %xmm9,
%xmm8, %rsi);
-+ pax_force_retaddr 0, 1
++ pax_force_retaddr
ret;
ENDPROC(camellia_cbc_dec_16way)
@@ -12487,7 +12702,7 @@ index ce71f92..2dd5b1e 100644
%xmm15, %xmm14, %xmm13, %xmm12, %xmm11, %xmm10, %xmm9,
%xmm8, %rsi);
-+ pax_force_retaddr 0, 1
++ pax_force_retaddr
ret;
ENDPROC(camellia_ctr_16way)
@@ -12495,12 +12710,12 @@ index ce71f92..2dd5b1e 100644
%xmm15, %xmm14, %xmm13, %xmm12, %xmm11, %xmm10, %xmm9,
%xmm8, %rsi);
-+ pax_force_retaddr 0, 1
++ pax_force_retaddr
ret;
ENDPROC(camellia_xts_crypt_16way)
diff --git a/arch/x86/crypto/camellia-aesni-avx2-asm_64.S b/arch/x86/crypto/camellia-aesni-avx2-asm_64.S
-index 0e0b886..8fc756a 100644
+index 0e0b886..5a3123c 100644
--- a/arch/x86/crypto/camellia-aesni-avx2-asm_64.S
+++ b/arch/x86/crypto/camellia-aesni-avx2-asm_64.S
@@ -11,6 +11,7 @@
@@ -12515,7 +12730,7 @@ index 0e0b886..8fc756a 100644
roundsm32(%ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7,
%ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14, %ymm15,
%rcx, (%r9));
-+ pax_force_retaddr_bts
++ pax_force_retaddr
ret;
ENDPROC(roundsm32_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_cd)
@@ -12523,7 +12738,7 @@ index 0e0b886..8fc756a 100644
roundsm32(%ymm4, %ymm5, %ymm6, %ymm7, %ymm0, %ymm1, %ymm2, %ymm3,
%ymm12, %ymm13, %ymm14, %ymm15, %ymm8, %ymm9, %ymm10, %ymm11,
%rax, (%r9));
-+ pax_force_retaddr_bts
++ pax_force_retaddr
ret;
ENDPROC(roundsm32_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab)
@@ -12531,7 +12746,7 @@ index 0e0b886..8fc756a 100644
%ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14,
%ymm15, (key_table)(CTX, %r8, 8), (%rax), 1 * 32(%rax));
-+ pax_force_retaddr_bts
++ pax_force_retaddr
ret;
.align 8
@@ -12539,7 +12754,7 @@ index 0e0b886..8fc756a 100644
%ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14,
%ymm15, (key_table)(CTX), (%rax), 1 * 32(%rax));
-+ pax_force_retaddr_bts
++ pax_force_retaddr
ret;
.align 8
@@ -12547,7 +12762,7 @@ index 0e0b886..8fc756a 100644
vzeroupper;
-+ pax_force_retaddr 0, 1
++ pax_force_retaddr
ret;
ENDPROC(camellia_ecb_enc_32way)
@@ -12555,7 +12770,7 @@ index 0e0b886..8fc756a 100644
vzeroupper;
-+ pax_force_retaddr 0, 1
++ pax_force_retaddr
ret;
ENDPROC(camellia_ecb_dec_32way)
@@ -12563,7 +12778,7 @@ index 0e0b886..8fc756a 100644
vzeroupper;
-+ pax_force_retaddr 0, 1
++ pax_force_retaddr
ret;
ENDPROC(camellia_cbc_dec_32way)
@@ -12571,7 +12786,7 @@ index 0e0b886..8fc756a 100644
vzeroupper;
-+ pax_force_retaddr 0, 1
++ pax_force_retaddr
ret;
ENDPROC(camellia_ctr_32way)
@@ -12579,12 +12794,12 @@ index 0e0b886..8fc756a 100644
vzeroupper;
-+ pax_force_retaddr 0, 1
++ pax_force_retaddr
ret;
ENDPROC(camellia_xts_crypt_32way)
diff --git a/arch/x86/crypto/camellia-x86_64-asm_64.S b/arch/x86/crypto/camellia-x86_64-asm_64.S
-index 310319c..ce174a4 100644
+index 310319c..db3d7b5 100644
--- a/arch/x86/crypto/camellia-x86_64-asm_64.S
+++ b/arch/x86/crypto/camellia-x86_64-asm_64.S
@@ -21,6 +21,7 @@
@@ -12599,14 +12814,14 @@ index 310319c..ce174a4 100644
enc_outunpack(mov, RT1);
movq RRBP, %rbp;
-+ pax_force_retaddr 0, 1
++ pax_force_retaddr
ret;
.L__enc_xor:
enc_outunpack(xor, RT1);
movq RRBP, %rbp;
-+ pax_force_retaddr 0, 1
++ pax_force_retaddr
ret;
ENDPROC(__camellia_enc_blk)
@@ -12614,7 +12829,7 @@ index 310319c..ce174a4 100644
dec_outunpack();
movq RRBP, %rbp;
-+ pax_force_retaddr 0, 1
++ pax_force_retaddr
ret;
ENDPROC(camellia_dec_blk)
@@ -12622,7 +12837,7 @@ index 310319c..ce174a4 100644
movq RRBP, %rbp;
popq %rbx;
-+ pax_force_retaddr 0, 1
++ pax_force_retaddr
ret;
.L__enc2_xor:
@@ -12630,7 +12845,7 @@ index 310319c..ce174a4 100644
movq RRBP, %rbp;
popq %rbx;
-+ pax_force_retaddr 0, 1
++ pax_force_retaddr
ret;
ENDPROC(__camellia_enc_blk_2way)
@@ -12638,11 +12853,11 @@ index 310319c..ce174a4 100644
movq RRBP, %rbp;
movq RXOR, %rbx;
-+ pax_force_retaddr 0, 1
++ pax_force_retaddr
ret;
ENDPROC(camellia_dec_blk_2way)
diff --git a/arch/x86/crypto/cast5-avx-x86_64-asm_64.S b/arch/x86/crypto/cast5-avx-x86_64-asm_64.S
-index c35fd5d..c1ee236 100644
+index c35fd5d..2d8c7db 100644
--- a/arch/x86/crypto/cast5-avx-x86_64-asm_64.S
+++ b/arch/x86/crypto/cast5-avx-x86_64-asm_64.S
@@ -24,6 +24,7 @@
@@ -12657,7 +12872,7 @@ index c35fd5d..c1ee236 100644
outunpack_blocks(RR3, RL3, RTMP, RX, RKM);
outunpack_blocks(RR4, RL4, RTMP, RX, RKM);
-+ pax_force_retaddr 0, 1
++ pax_force_retaddr
ret;
ENDPROC(__cast5_enc_blk16)
@@ -12665,7 +12880,7 @@ index c35fd5d..c1ee236 100644
outunpack_blocks(RR3, RL3, RTMP, RX, RKM);
outunpack_blocks(RR4, RL4, RTMP, RX, RKM);
-+ pax_force_retaddr 0, 1
++ pax_force_retaddr
ret;
.L__skip_dec:
@@ -12685,23 +12900,103 @@ index c35fd5d..c1ee236 100644
ret;
ENDPROC(cast5_ecb_dec_16way)
-@@ -469,6 +474,7 @@ ENTRY(cast5_cbc_dec_16way)
+@@ -430,10 +435,10 @@ ENTRY(cast5_cbc_dec_16way)
+ * %rdx: src
+ */
- popq %r12;
+- pushq %r12;
++ pushq %r14;
+
+ movq %rsi, %r11;
+- movq %rdx, %r12;
++ movq %rdx, %r14;
+
+ vmovdqu (0*16)(%rdx), RL1;
+ vmovdqu (1*16)(%rdx), RR1;
+@@ -447,16 +452,16 @@ ENTRY(cast5_cbc_dec_16way)
+ call __cast5_dec_blk16;
+
+ /* xor with src */
+- vmovq (%r12), RX;
++ vmovq (%r14), RX;
+ vpshufd $0x4f, RX, RX;
+ vpxor RX, RR1, RR1;
+- vpxor 0*16+8(%r12), RL1, RL1;
+- vpxor 1*16+8(%r12), RR2, RR2;
+- vpxor 2*16+8(%r12), RL2, RL2;
+- vpxor 3*16+8(%r12), RR3, RR3;
+- vpxor 4*16+8(%r12), RL3, RL3;
+- vpxor 5*16+8(%r12), RR4, RR4;
+- vpxor 6*16+8(%r12), RL4, RL4;
++ vpxor 0*16+8(%r14), RL1, RL1;
++ vpxor 1*16+8(%r14), RR2, RR2;
++ vpxor 2*16+8(%r14), RL2, RL2;
++ vpxor 3*16+8(%r14), RR3, RR3;
++ vpxor 4*16+8(%r14), RL3, RL3;
++ vpxor 5*16+8(%r14), RR4, RR4;
++ vpxor 6*16+8(%r14), RL4, RL4;
+
+ vmovdqu RR1, (0*16)(%r11);
+ vmovdqu RL1, (1*16)(%r11);
+@@ -467,8 +472,9 @@ ENTRY(cast5_cbc_dec_16way)
+ vmovdqu RR4, (6*16)(%r11);
+ vmovdqu RL4, (7*16)(%r11);
+
+- popq %r12;
++ popq %r14;
+ pax_force_retaddr
ret;
ENDPROC(cast5_cbc_dec_16way)
-@@ -542,5 +548,6 @@ ENTRY(cast5_ctr_16way)
+@@ -480,10 +486,10 @@ ENTRY(cast5_ctr_16way)
+ * %rcx: iv (big endian, 64bit)
+ */
- popq %r12;
+- pushq %r12;
++ pushq %r14;
+
+ movq %rsi, %r11;
+- movq %rdx, %r12;
++ movq %rdx, %r14;
+
+ vpcmpeqd RTMP, RTMP, RTMP;
+ vpsrldq $8, RTMP, RTMP; /* low: -1, high: 0 */
+@@ -523,14 +529,14 @@ ENTRY(cast5_ctr_16way)
+ call __cast5_enc_blk16;
+
+ /* dst = src ^ iv */
+- vpxor (0*16)(%r12), RR1, RR1;
+- vpxor (1*16)(%r12), RL1, RL1;
+- vpxor (2*16)(%r12), RR2, RR2;
+- vpxor (3*16)(%r12), RL2, RL2;
+- vpxor (4*16)(%r12), RR3, RR3;
+- vpxor (5*16)(%r12), RL3, RL3;
+- vpxor (6*16)(%r12), RR4, RR4;
+- vpxor (7*16)(%r12), RL4, RL4;
++ vpxor (0*16)(%r14), RR1, RR1;
++ vpxor (1*16)(%r14), RL1, RL1;
++ vpxor (2*16)(%r14), RR2, RR2;
++ vpxor (3*16)(%r14), RL2, RL2;
++ vpxor (4*16)(%r14), RR3, RR3;
++ vpxor (5*16)(%r14), RL3, RL3;
++ vpxor (6*16)(%r14), RR4, RR4;
++ vpxor (7*16)(%r14), RL4, RL4;
+ vmovdqu RR1, (0*16)(%r11);
+ vmovdqu RL1, (1*16)(%r11);
+ vmovdqu RR2, (2*16)(%r11);
+@@ -540,7 +546,8 @@ ENTRY(cast5_ctr_16way)
+ vmovdqu RR4, (6*16)(%r11);
+ vmovdqu RL4, (7*16)(%r11);
+
+- popq %r12;
++ popq %r14;
+ pax_force_retaddr
ret;
ENDPROC(cast5_ctr_16way)
diff --git a/arch/x86/crypto/cast6-avx-x86_64-asm_64.S b/arch/x86/crypto/cast6-avx-x86_64-asm_64.S
-index e3531f8..18ded3a 100644
+index e3531f8..e123f35 100644
--- a/arch/x86/crypto/cast6-avx-x86_64-asm_64.S
+++ b/arch/x86/crypto/cast6-avx-x86_64-asm_64.S
@@ -24,6 +24,7 @@
@@ -12716,7 +13011,7 @@ index e3531f8..18ded3a 100644
outunpack_blocks(RA1, RB1, RC1, RD1, RTMP, RX, RKRF, RKM);
outunpack_blocks(RA2, RB2, RC2, RD2, RTMP, RX, RKRF, RKM);
-+ pax_force_retaddr 0, 1
++ pax_force_retaddr
ret;
ENDPROC(__cast6_enc_blk8)
@@ -12724,7 +13019,7 @@ index e3531f8..18ded3a 100644
outunpack_blocks(RA1, RB1, RC1, RD1, RTMP, RX, RKRF, RKM);
outunpack_blocks(RA2, RB2, RC2, RD2, RTMP, RX, RKRF, RKM);
-+ pax_force_retaddr 0, 1
++ pax_force_retaddr
ret;
ENDPROC(__cast6_dec_blk8)
@@ -12744,17 +13039,52 @@ index e3531f8..18ded3a 100644
ret;
ENDPROC(cast6_ecb_dec_8way)
-@@ -399,6 +404,7 @@ ENTRY(cast6_cbc_dec_8way)
+@@ -386,19 +391,20 @@ ENTRY(cast6_cbc_dec_8way)
+ * %rdx: src
+ */
+
+- pushq %r12;
++ pushq %r14;
+
+ movq %rsi, %r11;
+- movq %rdx, %r12;
++ movq %rdx, %r14;
+
+ load_8way(%rdx, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
+
+ call __cast6_dec_blk8;
- popq %r12;
+- store_cbc_8way(%r12, %r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
++ store_cbc_8way(%r14, %r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
+
+- popq %r12;
++ popq %r14;
+ pax_force_retaddr
ret;
ENDPROC(cast6_cbc_dec_8way)
-@@ -424,6 +430,7 @@ ENTRY(cast6_ctr_8way)
+@@ -410,20 +416,21 @@ ENTRY(cast6_ctr_8way)
+ * %rcx: iv (little endian, 128bit)
+ */
+
+- pushq %r12;
++ pushq %r14;
+
+ movq %rsi, %r11;
+- movq %rdx, %r12;
++ movq %rdx, %r14;
- popq %r12;
+ load_ctr_8way(%rcx, .Lbswap128_mask, RA1, RB1, RC1, RD1, RA2, RB2, RC2,
+ RD2, RX, RKR, RKM);
+
+ call __cast6_enc_blk8;
+
+- store_ctr_8way(%r12, %r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
++ store_ctr_8way(%r14, %r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
+
+- popq %r12;
++ popq %r14;
+ pax_force_retaddr
ret;
@@ -12776,7 +13106,7 @@ index e3531f8..18ded3a 100644
ret;
ENDPROC(cast6_xts_dec_8way)
diff --git a/arch/x86/crypto/crc32c-pcl-intel-asm_64.S b/arch/x86/crypto/crc32c-pcl-intel-asm_64.S
-index dbc4339..3d868c5 100644
+index dbc4339..de6e120 100644
--- a/arch/x86/crypto/crc32c-pcl-intel-asm_64.S
+++ b/arch/x86/crypto/crc32c-pcl-intel-asm_64.S
@@ -45,6 +45,7 @@
@@ -12791,7 +13121,7 @@ index dbc4339..3d868c5 100644
popq %rsi
popq %rdi
popq %rbx
-+ pax_force_retaddr 0, 1
++ pax_force_retaddr
ret
################################################################
@@ -12839,7 +13169,7 @@ index 586f41a..d02851e 100644
ret
ENDPROC(clmul_ghash_setkey)
diff --git a/arch/x86/crypto/salsa20-x86_64-asm_64.S b/arch/x86/crypto/salsa20-x86_64-asm_64.S
-index 9279e0b..9270820 100644
+index 9279e0b..c4b3d2c 100644
--- a/arch/x86/crypto/salsa20-x86_64-asm_64.S
+++ b/arch/x86/crypto/salsa20-x86_64-asm_64.S
@@ -1,4 +1,5 @@
@@ -12852,7 +13182,7 @@ index 9279e0b..9270820 100644
add %r11,%rsp
mov %rdi,%rax
mov %rsi,%rdx
-+ pax_force_retaddr 0, 1
++ pax_force_retaddr
ret
# bytesatleast65:
._bytesatleast65:
@@ -13056,7 +13386,7 @@ index acc066c..1559cc4 100644
ret;
ENDPROC(serpent_dec_blk_8way)
diff --git a/arch/x86/crypto/sha1_ssse3_asm.S b/arch/x86/crypto/sha1_ssse3_asm.S
-index a410950..3356d42 100644
+index a410950..9dfe7ad 100644
--- a/arch/x86/crypto/sha1_ssse3_asm.S
+++ b/arch/x86/crypto/sha1_ssse3_asm.S
@@ -29,6 +29,7 @@
@@ -13067,16 +13397,35 @@ index a410950..3356d42 100644
#define CTX %rdi // arg1
#define BUF %rsi // arg2
-@@ -104,6 +105,7 @@
- pop %r12
+@@ -75,9 +76,9 @@
+
+ push %rbx
+ push %rbp
+- push %r12
++ push %r14
+
+- mov %rsp, %r12
++ mov %rsp, %r14
+ sub $64, %rsp # allocate workspace
+ and $~15, %rsp # align stack
+
+@@ -99,11 +100,12 @@
+ xor %rax, %rax
+ rep stosq
+
+- mov %r12, %rsp # deallocate workspace
++ mov %r14, %rsp # deallocate workspace
+
+- pop %r12
++ pop %r14
pop %rbp
pop %rbx
-+ pax_force_retaddr 0, 1
++ pax_force_retaddr
ret
ENDPROC(\name)
diff --git a/arch/x86/crypto/sha256-avx-asm.S b/arch/x86/crypto/sha256-avx-asm.S
-index 642f156..4ab07b9 100644
+index 642f156..51a513c 100644
--- a/arch/x86/crypto/sha256-avx-asm.S
+++ b/arch/x86/crypto/sha256-avx-asm.S
@@ -49,6 +49,7 @@
@@ -13091,12 +13440,12 @@ index 642f156..4ab07b9 100644
popq %r13
popq %rbp
popq %rbx
-+ pax_force_retaddr 0, 1
++ pax_force_retaddr
ret
ENDPROC(sha256_transform_avx)
diff --git a/arch/x86/crypto/sha256-avx2-asm.S b/arch/x86/crypto/sha256-avx2-asm.S
-index 9e86944..2e7f95a 100644
+index 9e86944..3795e6a 100644
--- a/arch/x86/crypto/sha256-avx2-asm.S
+++ b/arch/x86/crypto/sha256-avx2-asm.S
@@ -50,6 +50,7 @@
@@ -13111,12 +13460,12 @@ index 9e86944..2e7f95a 100644
popq %r12
popq %rbp
popq %rbx
-+ pax_force_retaddr 0, 1
++ pax_force_retaddr
ret
ENDPROC(sha256_transform_rorx)
diff --git a/arch/x86/crypto/sha256-ssse3-asm.S b/arch/x86/crypto/sha256-ssse3-asm.S
-index f833b74..c36ed14 100644
+index f833b74..8c62a9e 100644
--- a/arch/x86/crypto/sha256-ssse3-asm.S
+++ b/arch/x86/crypto/sha256-ssse3-asm.S
@@ -47,6 +47,7 @@
@@ -13131,12 +13480,12 @@ index f833b74..c36ed14 100644
popq %rbp
popq %rbx
-+ pax_force_retaddr 0, 1
++ pax_force_retaddr
ret
ENDPROC(sha256_transform_ssse3)
diff --git a/arch/x86/crypto/sha512-avx-asm.S b/arch/x86/crypto/sha512-avx-asm.S
-index 974dde9..4533d34 100644
+index 974dde9..a823ff9 100644
--- a/arch/x86/crypto/sha512-avx-asm.S
+++ b/arch/x86/crypto/sha512-avx-asm.S
@@ -49,6 +49,7 @@
@@ -13151,12 +13500,12 @@ index 974dde9..4533d34 100644
mov frame_RSPSAVE(%rsp), %rsp
nowork:
-+ pax_force_retaddr 0, 1
++ pax_force_retaddr
ret
ENDPROC(sha512_transform_avx)
diff --git a/arch/x86/crypto/sha512-avx2-asm.S b/arch/x86/crypto/sha512-avx2-asm.S
-index 568b961..061ef1d 100644
+index 568b961..ed20c37 100644
--- a/arch/x86/crypto/sha512-avx2-asm.S
+++ b/arch/x86/crypto/sha512-avx2-asm.S
@@ -51,6 +51,7 @@
@@ -13171,12 +13520,12 @@ index 568b961..061ef1d 100644
# Restore Stack Pointer
mov frame_RSPSAVE(%rsp), %rsp
-+ pax_force_retaddr 0, 1
++ pax_force_retaddr
ret
ENDPROC(sha512_transform_rorx)
diff --git a/arch/x86/crypto/sha512-ssse3-asm.S b/arch/x86/crypto/sha512-ssse3-asm.S
-index fb56855..e23914f 100644
+index fb56855..6edd768 100644
--- a/arch/x86/crypto/sha512-ssse3-asm.S
+++ b/arch/x86/crypto/sha512-ssse3-asm.S
@@ -48,6 +48,7 @@
@@ -13191,12 +13540,12 @@ index fb56855..e23914f 100644
mov frame_RSPSAVE(%rsp), %rsp
nowork:
-+ pax_force_retaddr 0, 1
++ pax_force_retaddr
ret
ENDPROC(sha512_transform_ssse3)
diff --git a/arch/x86/crypto/twofish-avx-x86_64-asm_64.S b/arch/x86/crypto/twofish-avx-x86_64-asm_64.S
-index 0505813..63b1d00 100644
+index 0505813..b067311 100644
--- a/arch/x86/crypto/twofish-avx-x86_64-asm_64.S
+++ b/arch/x86/crypto/twofish-avx-x86_64-asm_64.S
@@ -24,6 +24,7 @@
@@ -13211,7 +13560,7 @@ index 0505813..63b1d00 100644
outunpack_blocks(RC1, RD1, RA1, RB1, RK1, RX0, RY0, RK2);
outunpack_blocks(RC2, RD2, RA2, RB2, RK1, RX0, RY0, RK2);
-+ pax_force_retaddr 0, 1
++ pax_force_retaddr
ret;
ENDPROC(__twofish_enc_blk8)
@@ -13219,7 +13568,7 @@ index 0505813..63b1d00 100644
outunpack_blocks(RA1, RB1, RC1, RD1, RK1, RX0, RY0, RK2);
outunpack_blocks(RA2, RB2, RC2, RD2, RK1, RX0, RY0, RK2);
-+ pax_force_retaddr 0, 1
++ pax_force_retaddr
ret;
ENDPROC(__twofish_dec_blk8)
@@ -13227,7 +13576,7 @@ index 0505813..63b1d00 100644
store_8way(%r11, RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2);
-+ pax_force_retaddr 0, 1
++ pax_force_retaddr
ret;
ENDPROC(twofish_ecb_enc_8way)
@@ -13235,23 +13584,58 @@ index 0505813..63b1d00 100644
store_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
-+ pax_force_retaddr 0, 1
++ pax_force_retaddr
ret;
ENDPROC(twofish_ecb_dec_8way)
-@@ -383,6 +388,7 @@ ENTRY(twofish_cbc_dec_8way)
+@@ -370,19 +375,20 @@ ENTRY(twofish_cbc_dec_8way)
+ * %rdx: src
+ */
+
+- pushq %r12;
++ pushq %r14;
- popq %r12;
+ movq %rsi, %r11;
+- movq %rdx, %r12;
++ movq %rdx, %r14;
-+ pax_force_retaddr 0, 1
+ load_8way(%rdx, RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2);
+
+ call __twofish_dec_blk8;
+
+- store_cbc_8way(%r12, %r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
++ store_cbc_8way(%r14, %r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
+
+- popq %r12;
++ popq %r14;
+
++ pax_force_retaddr
ret;
ENDPROC(twofish_cbc_dec_8way)
-@@ -408,6 +414,7 @@ ENTRY(twofish_ctr_8way)
+@@ -394,20 +400,21 @@ ENTRY(twofish_ctr_8way)
+ * %rcx: iv (little endian, 128bit)
+ */
- popq %r12;
+- pushq %r12;
++ pushq %r14;
-+ pax_force_retaddr 0, 1
+ movq %rsi, %r11;
+- movq %rdx, %r12;
++ movq %rdx, %r14;
+
+ load_ctr_8way(%rcx, .Lbswap128_mask, RA1, RB1, RC1, RD1, RA2, RB2, RC2,
+ RD2, RX0, RX1, RY0);
+
+ call __twofish_enc_blk8;
+
+- store_ctr_8way(%r12, %r11, RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2);
++ store_ctr_8way(%r14, %r11, RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2);
+
+- popq %r12;
++ popq %r14;
+
++ pax_force_retaddr
ret;
ENDPROC(twofish_ctr_8way)
@@ -13259,7 +13643,7 @@ index 0505813..63b1d00 100644
/* dst <= regs xor IVs(in dst) */
store_xts_8way(%r11, RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2);
-+ pax_force_retaddr 0, 1
++ pax_force_retaddr
ret;
ENDPROC(twofish_xts_enc_8way)
@@ -13267,11 +13651,11 @@ index 0505813..63b1d00 100644
/* dst <= regs xor IVs(in dst) */
store_xts_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
-+ pax_force_retaddr 0, 1
++ pax_force_retaddr
ret;
ENDPROC(twofish_xts_dec_8way)
diff --git a/arch/x86/crypto/twofish-x86_64-asm_64-3way.S b/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
-index 1c3b7ce..b365c5e 100644
+index 1c3b7ce..02f578d 100644
--- a/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
+++ b/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
@@ -21,6 +21,7 @@
@@ -13286,7 +13670,7 @@ index 1c3b7ce..b365c5e 100644
popq %r13;
popq %r14;
popq %r15;
-+ pax_force_retaddr 0, 1
++ pax_force_retaddr
ret;
.L__enc_xor3:
@@ -13294,7 +13678,7 @@ index 1c3b7ce..b365c5e 100644
popq %r13;
popq %r14;
popq %r15;
-+ pax_force_retaddr 0, 1
++ pax_force_retaddr
ret;
ENDPROC(__twofish_enc_blk_3way)
@@ -13302,11 +13686,11 @@ index 1c3b7ce..b365c5e 100644
popq %r13;
popq %r14;
popq %r15;
-+ pax_force_retaddr 0, 1
++ pax_force_retaddr
ret;
ENDPROC(twofish_dec_blk_3way)
diff --git a/arch/x86/crypto/twofish-x86_64-asm_64.S b/arch/x86/crypto/twofish-x86_64-asm_64.S
-index a039d21..29e7615 100644
+index a039d21..524b8b2 100644
--- a/arch/x86/crypto/twofish-x86_64-asm_64.S
+++ b/arch/x86/crypto/twofish-x86_64-asm_64.S
@@ -22,6 +22,7 @@
@@ -13321,7 +13705,7 @@ index a039d21..29e7615 100644
popq R1
movq $1,%rax
-+ pax_force_retaddr 0, 1
++ pax_force_retaddr
ret
ENDPROC(twofish_enc_blk)
@@ -13329,7 +13713,7 @@ index a039d21..29e7615 100644
popq R1
movq $1,%rax
-+ pax_force_retaddr 0, 1
++ pax_force_retaddr
ret
ENDPROC(twofish_dec_blk)
diff --git a/arch/x86/ia32/ia32_aout.c b/arch/x86/ia32/ia32_aout.c
@@ -13399,7 +13783,7 @@ index 665a730..8e7a67a 100644
err |= copy_siginfo_to_user32(&frame->info, &ksig->info);
diff --git a/arch/x86/ia32/ia32entry.S b/arch/x86/ia32/ia32entry.S
-index 4299eb0..904b82a 100644
+index 4299eb0..c0687a7 100644
--- a/arch/x86/ia32/ia32entry.S
+++ b/arch/x86/ia32/ia32entry.S
@@ -15,8 +15,10 @@
@@ -13413,6 +13797,24 @@ index 4299eb0..904b82a 100644
/* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
#include <linux/elf-em.h>
+@@ -62,12 +64,12 @@
+ */
+ .macro LOAD_ARGS32 offset, _r9=0
+ .if \_r9
+- movl \offset+16(%rsp),%r9d
++ movl \offset+R9(%rsp),%r9d
+ .endif
+- movl \offset+40(%rsp),%ecx
+- movl \offset+48(%rsp),%edx
+- movl \offset+56(%rsp),%esi
+- movl \offset+64(%rsp),%edi
++ movl \offset+RCX(%rsp),%ecx
++ movl \offset+RDX(%rsp),%edx
++ movl \offset+RSI(%rsp),%esi
++ movl \offset+RDI(%rsp),%edi
+ movl %eax,%eax /* zero extension */
+ .endm
+
@@ -96,6 +98,32 @@ ENTRY(native_irq_enable_sysexit)
ENDPROC(native_irq_enable_sysexit)
#endif
@@ -13514,7 +13916,7 @@ index 4299eb0..904b82a 100644
CFI_REMEMBER_STATE
jnz sysenter_tracesys
cmpq $(IA32_NR_syscalls-1),%rax
-@@ -162,12 +209,15 @@ sysenter_do_call:
+@@ -162,15 +209,18 @@ sysenter_do_call:
sysenter_dispatch:
call *ia32_sys_call_table(,%rax,8)
movq %rax,RAX-ARGOFFSET(%rsp)
@@ -13530,8 +13932,13 @@ index 4299eb0..904b82a 100644
+ pax_erase_kstack
+ andl $~TS_COMPAT,TI_status(%r11)
/* clear IF, that popfq doesn't enable interrupts early */
- andl $~0x200,EFLAGS-R11(%rsp)
- movl RIP-R11(%rsp),%edx /* User %eip */
+- andl $~0x200,EFLAGS-R11(%rsp)
+- movl RIP-R11(%rsp),%edx /* User %eip */
++ andl $~X86_EFLAGS_IF,EFLAGS(%rsp)
++ movl RIP(%rsp),%edx /* User %eip */
+ CFI_REGISTER rip,rdx
+ RESTORE_ARGS 0,24,0,0,0,0
+ xorq %r8,%r8
@@ -193,6 +243,9 @@ sysexit_from_sys_call:
movl %eax,%esi /* 2nd arg: syscall number */
movl $AUDIT_ARCH_I386,%edi /* 1st arg: audit arch */
@@ -13640,7 +14047,7 @@ index 4299eb0..904b82a 100644
CFI_REMEMBER_STATE
jnz cstar_tracesys
cmpq $IA32_NR_syscalls-1,%rax
-@@ -319,12 +395,15 @@ cstar_do_call:
+@@ -319,13 +395,16 @@ cstar_do_call:
cstar_dispatch:
call *ia32_sys_call_table(,%rax,8)
movq %rax,RAX-ARGOFFSET(%rsp)
@@ -13652,12 +14059,14 @@ index 4299eb0..904b82a 100644
jnz sysretl_audit
sysretl_from_sys_call:
- andl $~TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
+- RESTORE_ARGS 0,-ARG_SKIP,0,0,0
+ pax_exit_kernel_user
+ pax_erase_kstack
+ andl $~TS_COMPAT,TI_status(%r11)
- RESTORE_ARGS 0,-ARG_SKIP,0,0,0
++ RESTORE_ARGS 0,-ORIG_RAX,0,0,0
movl RIP-ARGOFFSET(%rsp),%ecx
CFI_REGISTER rip,rcx
+ movl EFLAGS-ARGOFFSET(%rsp),%r11d
@@ -352,7 +431,7 @@ sysretl_audit:
cstar_tracesys:
@@ -13747,7 +14156,7 @@ index 8e0ceec..af13504 100644
SET_GID(gid, from_kgid_munged(current_user_ns(), stat->gid));
if (!access_ok(VERIFY_WRITE, ubuf, sizeof(struct stat64)) ||
diff --git a/arch/x86/include/asm/alternative-asm.h b/arch/x86/include/asm/alternative-asm.h
-index 372231c..a5aa1a1 100644
+index 372231c..51b537d 100644
--- a/arch/x86/include/asm/alternative-asm.h
+++ b/arch/x86/include/asm/alternative-asm.h
@@ -18,6 +18,45 @@
@@ -13773,13 +14182,13 @@ index 372231c..a5aa1a1 100644
+ .if \reload
+ pax_set_fptr_mask
+ .endif
-+ orq %r10,\rip(%rsp)
++ orq %r12,\rip(%rsp)
+ .endm
+ .macro pax_force_fptr ptr
-+ orq %r10,\ptr
++ orq %r12,\ptr
+ .endm
+ .macro pax_set_fptr_mask
-+ movabs $0x8000000000000000,%r10
++ movabs $0x8000000000000000,%r12
+ .endm
+#endif
+#else
@@ -14881,6 +15290,178 @@ index 9863ee3..4a1f8e1 100644
else if (pg_flags == _PGMT_WC)
return _PAGE_CACHE_WC;
else if (pg_flags == _PGMT_UC_MINUS)
+diff --git a/arch/x86/include/asm/calling.h b/arch/x86/include/asm/calling.h
+index 0fa6750..cb7b2c3 100644
+--- a/arch/x86/include/asm/calling.h
++++ b/arch/x86/include/asm/calling.h
+@@ -80,103 +80,113 @@ For 32-bit we have the following conventions - kernel is built with
+ #define RSP 152
+ #define SS 160
+
+-#define ARGOFFSET R11
+-#define SWFRAME ORIG_RAX
++#define ARGOFFSET R15
+
+ .macro SAVE_ARGS addskip=0, save_rcx=1, save_r891011=1
+- subq $9*8+\addskip, %rsp
+- CFI_ADJUST_CFA_OFFSET 9*8+\addskip
+- movq_cfi rdi, 8*8
+- movq_cfi rsi, 7*8
+- movq_cfi rdx, 6*8
++ subq $ORIG_RAX-ARGOFFSET+\addskip, %rsp
++ CFI_ADJUST_CFA_OFFSET ORIG_RAX-ARGOFFSET+\addskip
++ movq_cfi rdi, RDI
++ movq_cfi rsi, RSI
++ movq_cfi rdx, RDX
+
+ .if \save_rcx
+- movq_cfi rcx, 5*8
++ movq_cfi rcx, RCX
+ .endif
+
+- movq_cfi rax, 4*8
++ movq_cfi rax, RAX
+
+ .if \save_r891011
+- movq_cfi r8, 3*8
+- movq_cfi r9, 2*8
+- movq_cfi r10, 1*8
+- movq_cfi r11, 0*8
++ movq_cfi r8, R8
++ movq_cfi r9, R9
++ movq_cfi r10, R10
++ movq_cfi r11, R11
+ .endif
+
++#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
++ movq_cfi r12, R12
++#endif
++
+ .endm
+
+-#define ARG_SKIP (9*8)
++#define ARG_SKIP ORIG_RAX
+
+ .macro RESTORE_ARGS rstor_rax=1, addskip=0, rstor_rcx=1, rstor_r11=1, \
+ rstor_r8910=1, rstor_rdx=1
++
++#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
++ movq_cfi_restore R12, r12
++#endif
++
+ .if \rstor_r11
+- movq_cfi_restore 0*8, r11
++ movq_cfi_restore R11, r11
+ .endif
+
+ .if \rstor_r8910
+- movq_cfi_restore 1*8, r10
+- movq_cfi_restore 2*8, r9
+- movq_cfi_restore 3*8, r8
++ movq_cfi_restore R10, r10
++ movq_cfi_restore R9, r9
++ movq_cfi_restore R8, r8
+ .endif
+
+ .if \rstor_rax
+- movq_cfi_restore 4*8, rax
++ movq_cfi_restore RAX, rax
+ .endif
+
+ .if \rstor_rcx
+- movq_cfi_restore 5*8, rcx
++ movq_cfi_restore RCX, rcx
+ .endif
+
+ .if \rstor_rdx
+- movq_cfi_restore 6*8, rdx
++ movq_cfi_restore RDX, rdx
+ .endif
+
+- movq_cfi_restore 7*8, rsi
+- movq_cfi_restore 8*8, rdi
++ movq_cfi_restore RSI, rsi
++ movq_cfi_restore RDI, rdi
+
+- .if ARG_SKIP+\addskip > 0
+- addq $ARG_SKIP+\addskip, %rsp
+- CFI_ADJUST_CFA_OFFSET -(ARG_SKIP+\addskip)
++ .if ORIG_RAX+\addskip > 0
++ addq $ORIG_RAX+\addskip, %rsp
++ CFI_ADJUST_CFA_OFFSET -(ORIG_RAX+\addskip)
+ .endif
+ .endm
+
+- .macro LOAD_ARGS offset, skiprax=0
+- movq \offset(%rsp), %r11
+- movq \offset+8(%rsp), %r10
+- movq \offset+16(%rsp), %r9
+- movq \offset+24(%rsp), %r8
+- movq \offset+40(%rsp), %rcx
+- movq \offset+48(%rsp), %rdx
+- movq \offset+56(%rsp), %rsi
+- movq \offset+64(%rsp), %rdi
++ .macro LOAD_ARGS skiprax=0
++ movq R11(%rsp), %r11
++ movq R10(%rsp), %r10
++ movq R9(%rsp), %r9
++ movq R8(%rsp), %r8
++ movq RCX(%rsp), %rcx
++ movq RDX(%rsp), %rdx
++ movq RSI(%rsp), %rsi
++ movq RDI(%rsp), %rdi
+ .if \skiprax
+ .else
+- movq \offset+72(%rsp), %rax
++ movq RAX(%rsp), %rax
+ .endif
+ .endm
+
+-#define REST_SKIP (6*8)
+-
+ .macro SAVE_REST
+- subq $REST_SKIP, %rsp
+- CFI_ADJUST_CFA_OFFSET REST_SKIP
+- movq_cfi rbx, 5*8
+- movq_cfi rbp, 4*8
+- movq_cfi r12, 3*8
+- movq_cfi r13, 2*8
+- movq_cfi r14, 1*8
+- movq_cfi r15, 0*8
++ movq_cfi rbx, RBX
++ movq_cfi rbp, RBP
++
++#ifndef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
++ movq_cfi r12, R12
++#endif
++
++ movq_cfi r13, R13
++ movq_cfi r14, R14
++ movq_cfi r15, R15
+ .endm
+
+ .macro RESTORE_REST
+- movq_cfi_restore 0*8, r15
+- movq_cfi_restore 1*8, r14
+- movq_cfi_restore 2*8, r13
+- movq_cfi_restore 3*8, r12
+- movq_cfi_restore 4*8, rbp
+- movq_cfi_restore 5*8, rbx
+- addq $REST_SKIP, %rsp
+- CFI_ADJUST_CFA_OFFSET -(REST_SKIP)
++ movq_cfi_restore R15, r15
++ movq_cfi_restore R14, r14
++ movq_cfi_restore R13, r13
++
++#ifndef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
++ movq_cfi_restore R12, r12
++#endif
++
++ movq_cfi_restore RBP, rbp
++ movq_cfi_restore RBX, rbx
+ .endm
+
+ .macro SAVE_ALL
diff --git a/arch/x86/include/asm/checksum_32.h b/arch/x86/include/asm/checksum_32.h
index f50de69..2b0a458 100644
--- a/arch/x86/include/asm/checksum_32.h
@@ -18961,6 +19542,18 @@ index bbae024..e1528f9 100644
#define BIOS_END 0x00100000
#define BIOS_ROM_BASE 0xffe00000
+diff --git a/arch/x86/include/uapi/asm/ptrace-abi.h b/arch/x86/include/uapi/asm/ptrace-abi.h
+index 7b0a55a..ad115bf 100644
+--- a/arch/x86/include/uapi/asm/ptrace-abi.h
++++ b/arch/x86/include/uapi/asm/ptrace-abi.h
+@@ -49,7 +49,6 @@
+ #define EFLAGS 144
+ #define RSP 152
+ #define SS 160
+-#define ARGOFFSET R11
+ #endif /* __ASSEMBLY__ */
+
+ /* top of stack page */
diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile
index a5408b9..5133813 100644
--- a/arch/x86/kernel/Makefile
@@ -21406,7 +21999,7 @@ index f0dcb0c..9f39b80 100644
/*
diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
-index b077f4c..feb26c1 100644
+index b077f4c..8e0df9f 100644
--- a/arch/x86/kernel/entry_64.S
+++ b/arch/x86/kernel/entry_64.S
@@ -59,6 +59,8 @@
@@ -21924,27 +22517,84 @@ index b077f4c..feb26c1 100644
.macro TRACE_IRQS_IRETQ offset=ARGOFFSET
#ifdef CONFIG_TRACE_IRQFLAGS
-@@ -375,8 +808,8 @@ ENDPROC(native_usergs_sysret64)
+@@ -320,7 +753,7 @@ ENDPROC(native_usergs_sysret64)
+ .endm
+
+ .macro TRACE_IRQS_IRETQ_DEBUG offset=ARGOFFSET
+- bt $9,EFLAGS-\offset(%rsp) /* interrupts off? */
++ bt $X86_EFLAGS_IF_BIT,EFLAGS-\offset(%rsp) /* interrupts off? */
+ jnc 1f
+ TRACE_IRQS_ON_DEBUG
+ 1:
+@@ -358,27 +791,6 @@ ENDPROC(native_usergs_sysret64)
+ movq \tmp,R11+\offset(%rsp)
.endm
- .macro UNFAKE_STACK_FRAME
+- .macro FAKE_STACK_FRAME child_rip
+- /* push in order ss, rsp, eflags, cs, rip */
+- xorl %eax, %eax
+- pushq_cfi $__KERNEL_DS /* ss */
+- /*CFI_REL_OFFSET ss,0*/
+- pushq_cfi %rax /* rsp */
+- CFI_REL_OFFSET rsp,0
+- pushq_cfi $(X86_EFLAGS_IF|X86_EFLAGS_FIXED) /* eflags - interrupts on */
+- /*CFI_REL_OFFSET rflags,0*/
+- pushq_cfi $__KERNEL_CS /* cs */
+- /*CFI_REL_OFFSET cs,0*/
+- pushq_cfi \child_rip /* rip */
+- CFI_REL_OFFSET rip,0
+- pushq_cfi %rax /* orig rax */
+- .endm
+-
+- .macro UNFAKE_STACK_FRAME
- addq $8*6, %rsp
- CFI_ADJUST_CFA_OFFSET -(6*8)
-+ addq $8*6 + ARG_SKIP, %rsp
-+ CFI_ADJUST_CFA_OFFSET -(6*8 + ARG_SKIP)
- .endm
-
+- .endm
+-
/*
-@@ -463,7 +896,7 @@ ENDPROC(native_usergs_sysret64)
+ * initial frame state for interrupts (and exceptions without error code)
+ */
+@@ -445,25 +857,26 @@ ENDPROC(native_usergs_sysret64)
+ /* save partial stack frame */
+ .macro SAVE_ARGS_IRQ
+ cld
+- /* start from rbp in pt_regs and jump over */
+- movq_cfi rdi, (RDI-RBP)
+- movq_cfi rsi, (RSI-RBP)
+- movq_cfi rdx, (RDX-RBP)
+- movq_cfi rcx, (RCX-RBP)
+- movq_cfi rax, (RAX-RBP)
+- movq_cfi r8, (R8-RBP)
+- movq_cfi r9, (R9-RBP)
+- movq_cfi r10, (R10-RBP)
+- movq_cfi r11, (R11-RBP)
++ /* start from r15 in pt_regs and jump over */
++ movq_cfi rdi, RDI
++ movq_cfi rsi, RSI
++ movq_cfi rdx, RDX
++ movq_cfi rcx, RCX
++ movq_cfi rax, RAX
++ movq_cfi r8, R8
++ movq_cfi r9, R9
++ movq_cfi r10, R10
++ movq_cfi r11, R11
++ movq_cfi r12, R12
+
+ /* Save rbp so that we can unwind from get_irq_regs() */
+- movq_cfi rbp, 0
++ movq_cfi rbp, RBP
+
+ /* Save previous stack value */
movq %rsp, %rsi
- leaq -RBP(%rsp),%rdi /* arg1 for handler */
+- leaq -RBP(%rsp),%rdi /* arg1 for handler */
- testl $3, CS-RBP(%rsi)
-+ testb $3, CS-RBP(%rsi)
++ movq %rsp,%rdi /* arg1 for handler */
++ testb $3, CS(%rsi)
je 1f
SWAPGS
/*
-@@ -514,9 +947,10 @@ ENTRY(save_paranoid)
+@@ -514,9 +927,10 @@ ENTRY(save_paranoid)
js 1f /* negative -> in kernel */
SWAPGS
xorl %ebx,%ebx
@@ -21957,7 +22607,7 @@ index b077f4c..feb26c1 100644
.popsection
/*
-@@ -538,7 +972,7 @@ ENTRY(ret_from_fork)
+@@ -538,7 +952,7 @@ ENTRY(ret_from_fork)
RESTORE_REST
@@ -21966,7 +22616,15 @@ index b077f4c..feb26c1 100644
jz 1f
testl $_TIF_IA32, TI_flags(%rcx) # 32-bit compat task needs IRET
-@@ -556,7 +990,7 @@ ENTRY(ret_from_fork)
+@@ -548,15 +962,13 @@ ENTRY(ret_from_fork)
+ jmp ret_from_sys_call # go to the SYSRET fastpath
+
+ 1:
+- subq $REST_SKIP, %rsp # leave space for volatiles
+- CFI_ADJUST_CFA_OFFSET REST_SKIP
+ movq %rbp, %rdi
+ call *%rbx
+ movl $0, RAX(%rsp)
RESTORE_REST
jmp int_ret_from_sys_call
CFI_ENDPROC
@@ -21975,7 +22633,7 @@ index b077f4c..feb26c1 100644
/*
* System call entry. Up to 6 arguments in registers are supported.
-@@ -593,7 +1027,7 @@ END(ret_from_fork)
+@@ -593,7 +1005,7 @@ END(ret_from_fork)
ENTRY(system_call)
CFI_STARTPROC simple
CFI_SIGNAL_FRAME
@@ -21984,7 +22642,7 @@ index b077f4c..feb26c1 100644
CFI_REGISTER rip,rcx
/*CFI_REGISTER rflags,r11*/
SWAPGS_UNSAFE_STACK
-@@ -606,16 +1040,23 @@ GLOBAL(system_call_after_swapgs)
+@@ -606,16 +1018,23 @@ GLOBAL(system_call_after_swapgs)
movq %rsp,PER_CPU_VAR(old_rsp)
movq PER_CPU_VAR(kernel_stack),%rsp
@@ -22010,16 +22668,7 @@ index b077f4c..feb26c1 100644
jnz tracesys
system_call_fastpath:
#if __SYSCALL_MASK == ~0
-@@ -625,7 +1066,7 @@ system_call_fastpath:
- cmpl $__NR_syscall_max,%eax
- #endif
- ja badsys
-- movq %r10,%rcx
-+ movq R10-ARGOFFSET(%rsp),%rcx
- call *sys_call_table(,%rax,8) # XXX: rip relative
- movq %rax,RAX-ARGOFFSET(%rsp)
- /*
-@@ -639,10 +1080,13 @@ sysret_check:
+@@ -639,10 +1058,13 @@ sysret_check:
LOCKDEP_SYS_EXIT
DISABLE_INTERRUPTS(CLBR_NONE)
TRACE_IRQS_OFF
@@ -22034,15 +22683,7 @@ index b077f4c..feb26c1 100644
/*
* sysretq will re-enable interrupts:
*/
-@@ -694,14 +1138,18 @@ badsys:
- * jump back to the normal fast path.
- */
- auditsys:
-- movq %r10,%r9 /* 6th arg: 4th syscall arg */
-+ movq R10-ARGOFFSET(%rsp),%r9 /* 6th arg: 4th syscall arg */
- movq %rdx,%r8 /* 5th arg: 3rd syscall arg */
- movq %rsi,%rcx /* 4th arg: 2nd syscall arg */
- movq %rdi,%rdx /* 3rd arg: 1st syscall arg */
+@@ -701,6 +1123,9 @@ auditsys:
movq %rax,%rsi /* 2nd arg: syscall number */
movl $AUDIT_ARCH_X86_64,%edi /* 1st arg: audit arch */
call __audit_syscall_entry
@@ -22050,11 +22691,9 @@ index b077f4c..feb26c1 100644
+ pax_erase_kstack
+
LOAD_ARGS 0 /* reload call-clobbered registers */
-+ pax_set_fptr_mask
jmp system_call_fastpath
- /*
-@@ -722,7 +1170,7 @@ sysret_audit:
+@@ -722,7 +1147,7 @@ sysret_audit:
/* Do syscall tracing */
tracesys:
#ifdef CONFIG_AUDITSYSCALL
@@ -22063,7 +22702,7 @@ index b077f4c..feb26c1 100644
jz auditsys
#endif
SAVE_REST
-@@ -730,12 +1178,16 @@ tracesys:
+@@ -730,12 +1155,15 @@ tracesys:
FIXUP_TOP_OF_STACK %rdi
movq %rsp,%rdi
call syscall_trace_enter
@@ -22075,21 +22714,12 @@ index b077f4c..feb26c1 100644
* We don't reload %rax because syscall_trace_enter() returned
* the value it wants us to use in the table lookup.
*/
- LOAD_ARGS ARGOFFSET, 1
-+ pax_set_fptr_mask
+- LOAD_ARGS ARGOFFSET, 1
++ LOAD_ARGS 1
RESTORE_REST
#if __SYSCALL_MASK == ~0
cmpq $__NR_syscall_max,%rax
-@@ -744,7 +1196,7 @@ tracesys:
- cmpl $__NR_syscall_max,%eax
- #endif
- ja int_ret_from_sys_call /* RAX(%rsp) set to -ENOSYS above */
-- movq %r10,%rcx /* fixup for C */
-+ movq R10-ARGOFFSET(%rsp),%rcx /* fixup for C */
- call *sys_call_table(,%rax,8)
- movq %rax,RAX-ARGOFFSET(%rsp)
- /* Use IRET because user could have changed frame */
-@@ -765,7 +1217,9 @@ GLOBAL(int_with_check)
+@@ -765,7 +1193,9 @@ GLOBAL(int_with_check)
andl %edi,%edx
jnz int_careful
andl $~TS_COMPAT,TI_status(%rcx)
@@ -22100,7 +22730,7 @@ index b077f4c..feb26c1 100644
/* Either reschedule or signal or syscall exit tracking needed. */
/* First do a reschedule test. */
-@@ -811,7 +1265,7 @@ int_restore_rest:
+@@ -811,7 +1241,7 @@ int_restore_rest:
TRACE_IRQS_OFF
jmp int_with_check
CFI_ENDPROC
@@ -22109,19 +22739,20 @@ index b077f4c..feb26c1 100644
.macro FORK_LIKE func
ENTRY(stub_\func)
-@@ -824,9 +1278,10 @@ ENTRY(stub_\func)
+@@ -824,9 +1254,10 @@ ENTRY(stub_\func)
DEFAULT_FRAME 0 8 /* offset 8: return address */
call sys_\func
RESTORE_TOP_OF_STACK %r11, 8
+- ret $REST_SKIP /* pop extended registers */
+ pax_force_retaddr
- ret $REST_SKIP /* pop extended registers */
++ ret
CFI_ENDPROC
-END(stub_\func)
+ENDPROC(stub_\func)
.endm
.macro FIXED_FRAME label,func
-@@ -836,9 +1291,10 @@ ENTRY(\label)
+@@ -836,9 +1267,10 @@ ENTRY(\label)
FIXUP_TOP_OF_STACK %r11, 8-ARGOFFSET
call \func
RESTORE_TOP_OF_STACK %r11, 8-ARGOFFSET
@@ -22133,19 +22764,27 @@ index b077f4c..feb26c1 100644
.endm
FORK_LIKE clone
-@@ -855,9 +1311,10 @@ ENTRY(ptregscall_common)
- movq_cfi_restore R12+8, r12
- movq_cfi_restore RBP+8, rbp
- movq_cfi_restore RBX+8, rbx
-+ pax_force_retaddr
- ret $REST_SKIP /* pop extended registers */
- CFI_ENDPROC
+@@ -846,19 +1278,6 @@ END(\label)
+ FORK_LIKE vfork
+ FIXED_FRAME stub_iopl, sys_iopl
+
+-ENTRY(ptregscall_common)
+- DEFAULT_FRAME 1 8 /* offset 8: return address */
+- RESTORE_TOP_OF_STACK %r11, 8
+- movq_cfi_restore R15+8, r15
+- movq_cfi_restore R14+8, r14
+- movq_cfi_restore R13+8, r13
+- movq_cfi_restore R12+8, r12
+- movq_cfi_restore RBP+8, rbp
+- movq_cfi_restore RBX+8, rbx
+- ret $REST_SKIP /* pop extended registers */
+- CFI_ENDPROC
-END(ptregscall_common)
-+ENDPROC(ptregscall_common)
-
+-
ENTRY(stub_execve)
CFI_STARTPROC
-@@ -870,7 +1327,7 @@ ENTRY(stub_execve)
+ addq $8, %rsp
+@@ -870,7 +1289,7 @@ ENTRY(stub_execve)
RESTORE_REST
jmp int_ret_from_sys_call
CFI_ENDPROC
@@ -22154,7 +22793,7 @@ index b077f4c..feb26c1 100644
/*
* sigreturn is special because it needs to restore all registers on return.
-@@ -887,7 +1344,7 @@ ENTRY(stub_rt_sigreturn)
+@@ -887,7 +1306,7 @@ ENTRY(stub_rt_sigreturn)
RESTORE_REST
jmp int_ret_from_sys_call
CFI_ENDPROC
@@ -22163,7 +22802,7 @@ index b077f4c..feb26c1 100644
#ifdef CONFIG_X86_X32_ABI
ENTRY(stub_x32_rt_sigreturn)
-@@ -901,7 +1358,7 @@ ENTRY(stub_x32_rt_sigreturn)
+@@ -901,7 +1320,7 @@ ENTRY(stub_x32_rt_sigreturn)
RESTORE_REST
jmp int_ret_from_sys_call
CFI_ENDPROC
@@ -22172,7 +22811,7 @@ index b077f4c..feb26c1 100644
ENTRY(stub_x32_execve)
CFI_STARTPROC
-@@ -915,7 +1372,7 @@ ENTRY(stub_x32_execve)
+@@ -915,7 +1334,7 @@ ENTRY(stub_x32_execve)
RESTORE_REST
jmp int_ret_from_sys_call
CFI_ENDPROC
@@ -22181,7 +22820,7 @@ index b077f4c..feb26c1 100644
#endif
-@@ -952,7 +1409,7 @@ vector=vector+1
+@@ -952,7 +1371,7 @@ vector=vector+1
2: jmp common_interrupt
.endr
CFI_ENDPROC
@@ -22190,9 +22829,14 @@ index b077f4c..feb26c1 100644
.previous
END(interrupt)
-@@ -972,6 +1429,16 @@ END(interrupt)
- subq $ORIG_RAX-RBP, %rsp
- CFI_ADJUST_CFA_OFFSET ORIG_RAX-RBP
+@@ -969,9 +1388,19 @@ END(interrupt)
+ /* 0(%rsp): ~(interrupt number) */
+ .macro interrupt func
+ /* reserve pt_regs for scratch regs and rbp */
+- subq $ORIG_RAX-RBP, %rsp
+- CFI_ADJUST_CFA_OFFSET ORIG_RAX-RBP
++ subq $ORIG_RAX, %rsp
++ CFI_ADJUST_CFA_OFFSET ORIG_RAX
SAVE_ARGS_IRQ
+#ifdef CONFIG_PAX_MEMORY_UDEREF
+ testb $3, CS(%rdi)
@@ -22207,7 +22851,17 @@ index b077f4c..feb26c1 100644
call \func
.endm
-@@ -1004,7 +1471,7 @@ ret_from_intr:
+@@ -997,14 +1426,14 @@ ret_from_intr:
+
+ /* Restore saved previous stack */
+ popq %rsi
+- CFI_DEF_CFA rsi,SS+8-RBP /* reg/off reset after def_cfa_expr */
+- leaq ARGOFFSET-RBP(%rsi), %rsp
++ CFI_DEF_CFA rsi,SS+8 /* reg/off reset after def_cfa_expr */
++ movq %rsi, %rsp
+ CFI_DEF_CFA_REGISTER rsp
+- CFI_ADJUST_CFA_OFFSET RBP-ARGOFFSET
++ CFI_ADJUST_CFA_OFFSET -ARGOFFSET
exit_intr:
GET_THREAD_INFO(%rcx)
@@ -22216,7 +22870,7 @@ index b077f4c..feb26c1 100644
je retint_kernel
/* Interrupt came from user space */
-@@ -1026,12 +1493,16 @@ retint_swapgs: /* return to user-space */
+@@ -1026,12 +1455,16 @@ retint_swapgs: /* return to user-space */
* The iretq could re-enable interrupts:
*/
DISABLE_INTERRUPTS(CLBR_ANY)
@@ -22233,7 +22887,7 @@ index b077f4c..feb26c1 100644
/*
* The iretq could re-enable interrupts:
*/
-@@ -1114,7 +1585,7 @@ ENTRY(retint_kernel)
+@@ -1114,7 +1547,7 @@ ENTRY(retint_kernel)
#endif
CFI_ENDPROC
@@ -22242,7 +22896,7 @@ index b077f4c..feb26c1 100644
/*
* End of kprobes section
*/
-@@ -1132,7 +1603,7 @@ ENTRY(\sym)
+@@ -1132,7 +1565,7 @@ ENTRY(\sym)
interrupt \do_sym
jmp ret_from_intr
CFI_ENDPROC
@@ -22251,7 +22905,7 @@ index b077f4c..feb26c1 100644
.endm
#ifdef CONFIG_TRACING
-@@ -1215,12 +1686,22 @@ ENTRY(\sym)
+@@ -1215,12 +1648,22 @@ ENTRY(\sym)
CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
call error_entry
DEFAULT_FRAME 0
@@ -22275,7 +22929,7 @@ index b077f4c..feb26c1 100644
.endm
.macro paranoidzeroentry sym do_sym
-@@ -1233,15 +1714,25 @@ ENTRY(\sym)
+@@ -1233,15 +1676,25 @@ ENTRY(\sym)
CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
call save_paranoid
TRACE_IRQS_OFF
@@ -22299,11 +22953,11 @@ index b077f4c..feb26c1 100644
.endm
-#define INIT_TSS_IST(x) PER_CPU_VAR(init_tss) + (TSS_ist + ((x) - 1) * 8)
-+#define INIT_TSS_IST(x) (TSS_ist + ((x) - 1) * 8)(%r12)
++#define INIT_TSS_IST(x) (TSS_ist + ((x) - 1) * 8)(%r13)
.macro paranoidzeroentry_ist sym do_sym ist
ENTRY(\sym)
INTR_FRAME
-@@ -1252,14 +1743,30 @@ ENTRY(\sym)
+@@ -1252,14 +1705,30 @@ ENTRY(\sym)
CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
call save_paranoid
TRACE_IRQS_OFF_DEBUG
@@ -22320,10 +22974,10 @@ index b077f4c..feb26c1 100644
movq %rsp,%rdi /* pt_regs pointer */
xorl %esi,%esi /* no error code */
+#ifdef CONFIG_SMP
-+ imul $TSS_size, PER_CPU_VAR(cpu_number), %r12d
-+ lea init_tss(%r12), %r12
++ imul $TSS_size, PER_CPU_VAR(cpu_number), %r13d
++ lea init_tss(%r13), %r13
+#else
-+ lea init_tss(%rip), %r12
++ lea init_tss(%rip), %r13
+#endif
subq $EXCEPTION_STKSZ, INIT_TSS_IST(\ist)
call \do_sym
@@ -22335,7 +22989,7 @@ index b077f4c..feb26c1 100644
.endm
.macro errorentry sym do_sym
-@@ -1271,13 +1778,23 @@ ENTRY(\sym)
+@@ -1271,13 +1740,23 @@ ENTRY(\sym)
CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
call error_entry
DEFAULT_FRAME 0
@@ -22360,7 +23014,7 @@ index b077f4c..feb26c1 100644
.endm
/* error code is on the stack already */
-@@ -1291,13 +1808,23 @@ ENTRY(\sym)
+@@ -1291,13 +1770,23 @@ ENTRY(\sym)
call save_paranoid
DEFAULT_FRAME 0
TRACE_IRQS_OFF
@@ -22385,7 +23039,7 @@ index b077f4c..feb26c1 100644
.endm
zeroentry divide_error do_divide_error
-@@ -1327,9 +1854,10 @@ gs_change:
+@@ -1327,9 +1816,10 @@ gs_change:
2: mfence /* workaround */
SWAPGS
popfq_cfi
@@ -22397,7 +23051,7 @@ index b077f4c..feb26c1 100644
_ASM_EXTABLE(gs_change,bad_gs)
.section .fixup,"ax"
-@@ -1357,9 +1885,10 @@ ENTRY(call_softirq)
+@@ -1357,9 +1847,10 @@ ENTRY(call_softirq)
CFI_DEF_CFA_REGISTER rsp
CFI_ADJUST_CFA_OFFSET -8
decl PER_CPU_VAR(irq_count)
@@ -22409,7 +23063,7 @@ index b077f4c..feb26c1 100644
#ifdef CONFIG_XEN
zeroentry xen_hypervisor_callback xen_do_hypervisor_callback
-@@ -1397,7 +1926,7 @@ ENTRY(xen_do_hypervisor_callback) # do_hypervisor_callback(struct *pt_regs)
+@@ -1397,7 +1888,7 @@ ENTRY(xen_do_hypervisor_callback) # do_hypervisor_callback(struct *pt_regs)
decl PER_CPU_VAR(irq_count)
jmp error_exit
CFI_ENDPROC
@@ -22418,7 +23072,7 @@ index b077f4c..feb26c1 100644
/*
* Hypervisor uses this for application faults while it executes.
-@@ -1456,7 +1985,7 @@ ENTRY(xen_failsafe_callback)
+@@ -1456,7 +1947,7 @@ ENTRY(xen_failsafe_callback)
SAVE_ALL
jmp error_exit
CFI_ENDPROC
@@ -22427,7 +23081,7 @@ index b077f4c..feb26c1 100644
apicinterrupt3 HYPERVISOR_CALLBACK_VECTOR \
xen_hvm_callback_vector xen_evtchn_do_upcall
-@@ -1508,18 +2037,33 @@ ENTRY(paranoid_exit)
+@@ -1508,18 +1999,33 @@ ENTRY(paranoid_exit)
DEFAULT_FRAME
DISABLE_INTERRUPTS(CLBR_NONE)
TRACE_IRQS_OFF_DEBUG
@@ -22463,7 +23117,7 @@ index b077f4c..feb26c1 100644
jmp irq_return
paranoid_userspace:
GET_THREAD_INFO(%rcx)
-@@ -1548,7 +2092,7 @@ paranoid_schedule:
+@@ -1548,7 +2054,7 @@ paranoid_schedule:
TRACE_IRQS_OFF
jmp paranoid_userspace
CFI_ENDPROC
@@ -22472,7 +23126,7 @@ index b077f4c..feb26c1 100644
/*
* Exception entry point. This expects an error code/orig_rax on the stack.
-@@ -1575,12 +2119,13 @@ ENTRY(error_entry)
+@@ -1575,12 +2081,13 @@ ENTRY(error_entry)
movq_cfi r14, R14+8
movq_cfi r15, R15+8
xorl %ebx,%ebx
@@ -22487,7 +23141,7 @@ index b077f4c..feb26c1 100644
ret
/*
-@@ -1607,7 +2152,7 @@ bstep_iret:
+@@ -1607,7 +2114,7 @@ bstep_iret:
movq %rcx,RIP+8(%rsp)
jmp error_swapgs
CFI_ENDPROC
@@ -22496,7 +23150,7 @@ index b077f4c..feb26c1 100644
/* ebx: no swapgs flag (1: don't need swapgs, 0: need it) */
-@@ -1618,7 +2163,7 @@ ENTRY(error_exit)
+@@ -1618,7 +2125,7 @@ ENTRY(error_exit)
DISABLE_INTERRUPTS(CLBR_NONE)
TRACE_IRQS_OFF
GET_THREAD_INFO(%rcx)
@@ -22505,7 +23159,7 @@ index b077f4c..feb26c1 100644
jne retint_kernel
LOCKDEP_SYS_EXIT_IRQ
movl TI_flags(%rcx),%edx
-@@ -1627,7 +2172,7 @@ ENTRY(error_exit)
+@@ -1627,7 +2134,7 @@ ENTRY(error_exit)
jnz retint_careful
jmp retint_swapgs
CFI_ENDPROC
@@ -22514,7 +23168,7 @@ index b077f4c..feb26c1 100644
/*
* Test if a given stack is an NMI stack or not.
-@@ -1685,9 +2230,11 @@ ENTRY(nmi)
+@@ -1685,9 +2192,11 @@ ENTRY(nmi)
* If %cs was not the kernel segment, then the NMI triggered in user
* space, which means it is definitely not nested.
*/
@@ -22527,7 +23181,7 @@ index b077f4c..feb26c1 100644
/*
* Check the special variable on the stack to see if NMIs are
* executing.
-@@ -1721,8 +2268,7 @@ nested_nmi:
+@@ -1721,8 +2230,7 @@ nested_nmi:
1:
/* Set up the interrupted NMIs stack to jump to repeat_nmi */
@@ -22537,7 +23191,7 @@ index b077f4c..feb26c1 100644
CFI_ADJUST_CFA_OFFSET 1*8
leaq -10*8(%rsp), %rdx
pushq_cfi $__KERNEL_DS
-@@ -1740,6 +2286,7 @@ nested_nmi_out:
+@@ -1740,6 +2248,7 @@ nested_nmi_out:
CFI_RESTORE rdx
/* No need to check faults here */
@@ -22545,17 +23199,29 @@ index b077f4c..feb26c1 100644
INTERRUPT_RETURN
CFI_RESTORE_STATE
-@@ -1856,6 +2403,8 @@ end_repeat_nmi:
+@@ -1852,9 +2361,11 @@ end_repeat_nmi:
+ * NMI itself takes a page fault, the page fault that was preempted
+ * will read the information from the NMI page fault and not the
+ * origin fault. Save it off and restore it if it changes.
+- * Use the r12 callee-saved register.
++ * Use the r13 callee-saved register.
*/
- movq %cr2, %r12
-
-+ pax_enter_kernel_nmi
+- movq %cr2, %r12
++ movq %cr2, %r13
+
++ pax_enter_kernel_nmi
+
/* paranoidentry do_nmi, 0; without TRACE_IRQS_OFF */
movq %rsp,%rdi
- movq $-1,%rsi
-@@ -1868,26 +2417,31 @@ end_repeat_nmi:
- movq %r12, %cr2
+@@ -1863,31 +2374,36 @@ end_repeat_nmi:
+
+ /* Did the NMI take a page fault? Restore cr2 if it did */
+ movq %cr2, %rcx
+- cmpq %rcx, %r12
++ cmpq %rcx, %r13
+ je 1f
+- movq %r12, %cr2
++ movq %r13, %cr2
1:
- testl %ebx,%ebx /* swapgs needed? */
@@ -26670,7 +27336,7 @@ index b110fe6..d9c19f2 100644
out:
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
-index 5439117..d08f3d4 100644
+index 5439117..f4d21f7 100644
--- a/arch/x86/kvm/lapic.c
+++ b/arch/x86/kvm/lapic.c
@@ -55,7 +55,7 @@
@@ -26682,6 +27348,117 @@ index 5439117..d08f3d4 100644
#define APIC_LVT_NUM 6
/* 14 is the version for Xeon and Pentium 8.4.8*/
+@@ -143,6 +143,8 @@ static inline int kvm_apic_id(struct kvm_lapic *apic)
+ return (kvm_apic_get_reg(apic, APIC_ID) >> 24) & 0xff;
+ }
+
++#define KVM_X2APIC_CID_BITS 0
++
+ static void recalculate_apic_map(struct kvm *kvm)
+ {
+ struct kvm_apic_map *new, *old = NULL;
+@@ -180,7 +182,8 @@ static void recalculate_apic_map(struct kvm *kvm)
+ if (apic_x2apic_mode(apic)) {
+ new->ldr_bits = 32;
+ new->cid_shift = 16;
+- new->cid_mask = new->lid_mask = 0xffff;
++ new->cid_mask = (1 << KVM_X2APIC_CID_BITS) - 1;
++ new->lid_mask = 0xffff;
+ } else if (kvm_apic_sw_enabled(apic) &&
+ !new->cid_mask /* flat mode */ &&
+ kvm_apic_get_reg(apic, APIC_DFR) == APIC_DFR_CLUSTER) {
+@@ -841,7 +844,8 @@ static u32 apic_get_tmcct(struct kvm_lapic *apic)
+ ASSERT(apic != NULL);
+
+ /* if initial count is 0, current count should also be 0 */
+- if (kvm_apic_get_reg(apic, APIC_TMICT) == 0)
++ if (kvm_apic_get_reg(apic, APIC_TMICT) == 0 ||
++ apic->lapic_timer.period == 0)
+ return 0;
+
+ remaining = hrtimer_get_remaining(&apic->lapic_timer.timer);
+@@ -1691,7 +1695,6 @@ static void apic_sync_pv_eoi_from_guest(struct kvm_vcpu *vcpu,
+ void kvm_lapic_sync_from_vapic(struct kvm_vcpu *vcpu)
+ {
+ u32 data;
+- void *vapic;
+
+ if (test_bit(KVM_APIC_PV_EOI_PENDING, &vcpu->arch.apic_attention))
+ apic_sync_pv_eoi_from_guest(vcpu, vcpu->arch.apic);
+@@ -1699,9 +1702,8 @@ void kvm_lapic_sync_from_vapic(struct kvm_vcpu *vcpu)
+ if (!test_bit(KVM_APIC_CHECK_VAPIC, &vcpu->arch.apic_attention))
+ return;
+
+- vapic = kmap_atomic(vcpu->arch.apic->vapic_page);
+- data = *(u32 *)(vapic + offset_in_page(vcpu->arch.apic->vapic_addr));
+- kunmap_atomic(vapic);
++ kvm_read_guest_cached(vcpu->kvm, &vcpu->arch.apic->vapic_cache, &data,
++ sizeof(u32));
+
+ apic_set_tpr(vcpu->arch.apic, data & 0xff);
+ }
+@@ -1737,7 +1739,6 @@ void kvm_lapic_sync_to_vapic(struct kvm_vcpu *vcpu)
+ u32 data, tpr;
+ int max_irr, max_isr;
+ struct kvm_lapic *apic = vcpu->arch.apic;
+- void *vapic;
+
+ apic_sync_pv_eoi_to_guest(vcpu, apic);
+
+@@ -1753,18 +1754,24 @@ void kvm_lapic_sync_to_vapic(struct kvm_vcpu *vcpu)
+ max_isr = 0;
+ data = (tpr & 0xff) | ((max_isr & 0xf0) << 8) | (max_irr << 24);
+
+- vapic = kmap_atomic(vcpu->arch.apic->vapic_page);
+- *(u32 *)(vapic + offset_in_page(vcpu->arch.apic->vapic_addr)) = data;
+- kunmap_atomic(vapic);
++ kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.apic->vapic_cache, &data,
++ sizeof(u32));
+ }
+
+-void kvm_lapic_set_vapic_addr(struct kvm_vcpu *vcpu, gpa_t vapic_addr)
++int kvm_lapic_set_vapic_addr(struct kvm_vcpu *vcpu, gpa_t vapic_addr)
+ {
+- vcpu->arch.apic->vapic_addr = vapic_addr;
+- if (vapic_addr)
++ if (vapic_addr) {
++ if (kvm_gfn_to_hva_cache_init(vcpu->kvm,
++ &vcpu->arch.apic->vapic_cache,
++ vapic_addr, sizeof(u32)))
++ return -EINVAL;
+ __set_bit(KVM_APIC_CHECK_VAPIC, &vcpu->arch.apic_attention);
+- else
++ } else {
+ __clear_bit(KVM_APIC_CHECK_VAPIC, &vcpu->arch.apic_attention);
++ }
++
++ vcpu->arch.apic->vapic_addr = vapic_addr;
++ return 0;
+ }
+
+ int kvm_x2apic_msr_write(struct kvm_vcpu *vcpu, u32 msr, u64 data)
+diff --git a/arch/x86/kvm/lapic.h b/arch/x86/kvm/lapic.h
+index c730ac9..c8b0d0d 100644
+--- a/arch/x86/kvm/lapic.h
++++ b/arch/x86/kvm/lapic.h
+@@ -34,7 +34,7 @@ struct kvm_lapic {
+ */
+ void *regs;
+ gpa_t vapic_addr;
+- struct page *vapic_page;
++ struct gfn_to_hva_cache vapic_cache;
+ unsigned long pending_events;
+ unsigned int sipi_vector;
+ };
+@@ -76,7 +76,7 @@ void kvm_set_lapic_tscdeadline_msr(struct kvm_vcpu *vcpu, u64 data);
+ void kvm_apic_write_nodecode(struct kvm_vcpu *vcpu, u32 offset);
+ void kvm_apic_set_eoi_accelerated(struct kvm_vcpu *vcpu, int vector);
+
+-void kvm_lapic_set_vapic_addr(struct kvm_vcpu *vcpu, gpa_t vapic_addr);
++int kvm_lapic_set_vapic_addr(struct kvm_vcpu *vcpu, gpa_t vapic_addr);
+ void kvm_lapic_sync_from_vapic(struct kvm_vcpu *vcpu);
+ void kvm_lapic_sync_to_vapic(struct kvm_vcpu *vcpu);
+
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
index ad75d77..a679d32 100644
--- a/arch/x86/kvm/paging_tmpl.h
@@ -26883,7 +27660,7 @@ index 2b2fce1..da76be4 100644
vcpu->arch.regs_avail = ~((1 << VCPU_REGS_RIP) | (1 << VCPU_REGS_RSP)
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
-index e5ca72a..83d5177 100644
+index e5ca72a..0f30b12 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -1779,8 +1779,8 @@ static int xen_hvm_config(struct kvm_vcpu *vcpu, u64 data)
@@ -26906,7 +27683,17 @@ index e5ca72a..83d5177 100644
if (copy_to_user(user_msr_list->indices, &msrs_to_save,
num_msrs_to_save * sizeof(u32)))
goto out;
-@@ -5462,7 +5464,7 @@ static struct notifier_block pvclock_gtod_notifier = {
+@@ -3192,8 +3194,7 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
+ r = -EFAULT;
+ if (copy_from_user(&va, argp, sizeof va))
+ goto out;
+- r = 0;
+- kvm_lapic_set_vapic_addr(vcpu, va.vapic_addr);
++ r = kvm_lapic_set_vapic_addr(vcpu, va.vapic_addr);
+ break;
+ }
+ case KVM_X86_SETUP_MCE: {
+@@ -5462,7 +5463,7 @@ static struct notifier_block pvclock_gtod_notifier = {
};
#endif
@@ -26915,6 +27702,64 @@ index e5ca72a..83d5177 100644
{
int r;
struct kvm_x86_ops *ops = opaque;
+@@ -5718,36 +5719,6 @@ static void post_kvm_run_save(struct kvm_vcpu *vcpu)
+ !kvm_event_needs_reinjection(vcpu);
+ }
+
+-static int vapic_enter(struct kvm_vcpu *vcpu)
+-{
+- struct kvm_lapic *apic = vcpu->arch.apic;
+- struct page *page;
+-
+- if (!apic || !apic->vapic_addr)
+- return 0;
+-
+- page = gfn_to_page(vcpu->kvm, apic->vapic_addr >> PAGE_SHIFT);
+- if (is_error_page(page))
+- return -EFAULT;
+-
+- vcpu->arch.apic->vapic_page = page;
+- return 0;
+-}
+-
+-static void vapic_exit(struct kvm_vcpu *vcpu)
+-{
+- struct kvm_lapic *apic = vcpu->arch.apic;
+- int idx;
+-
+- if (!apic || !apic->vapic_addr)
+- return;
+-
+- idx = srcu_read_lock(&vcpu->kvm->srcu);
+- kvm_release_page_dirty(apic->vapic_page);
+- mark_page_dirty(vcpu->kvm, apic->vapic_addr >> PAGE_SHIFT);
+- srcu_read_unlock(&vcpu->kvm->srcu, idx);
+-}
+-
+ static void update_cr8_intercept(struct kvm_vcpu *vcpu)
+ {
+ int max_irr, tpr;
+@@ -6047,11 +6018,6 @@ static int __vcpu_run(struct kvm_vcpu *vcpu)
+ struct kvm *kvm = vcpu->kvm;
+
+ vcpu->srcu_idx = srcu_read_lock(&kvm->srcu);
+- r = vapic_enter(vcpu);
+- if (r) {
+- srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx);
+- return r;
+- }
+
+ r = 1;
+ while (r > 0) {
+@@ -6110,8 +6076,6 @@ static int __vcpu_run(struct kvm_vcpu *vcpu)
+
+ srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx);
+
+- vapic_exit(vcpu);
+-
+ return r;
+ }
+
diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c
index bdf8532..f63c587 100644
--- a/arch/x86/lguest/boot.c
@@ -27719,7 +28564,7 @@ index 1e572c5..2a162cd 100644
CFI_ENDPROC
diff --git a/arch/x86/lib/copy_page_64.S b/arch/x86/lib/copy_page_64.S
-index 176cca6..1166c50 100644
+index 176cca6..e0d658e 100644
--- a/arch/x86/lib/copy_page_64.S
+++ b/arch/x86/lib/copy_page_64.S
@@ -9,6 +9,7 @@ copy_page_rep:
@@ -27730,74 +28575,68 @@ index 176cca6..1166c50 100644
ret
CFI_ENDPROC
ENDPROC(copy_page_rep)
-@@ -20,12 +21,14 @@ ENDPROC(copy_page_rep)
-
- ENTRY(copy_page)
- CFI_STARTPROC
-- subq $2*8, %rsp
-- CFI_ADJUST_CFA_OFFSET 2*8
-+ subq $3*8, %rsp
-+ CFI_ADJUST_CFA_OFFSET 3*8
+@@ -24,8 +25,8 @@ ENTRY(copy_page)
+ CFI_ADJUST_CFA_OFFSET 2*8
movq %rbx, (%rsp)
CFI_REL_OFFSET rbx, 0
- movq %r12, 1*8(%rsp)
- CFI_REL_OFFSET r12, 1*8
-+ movq %r13, 2*8(%rsp)
-+ CFI_REL_OFFSET r13, 2*8
+- movq %r12, 1*8(%rsp)
+- CFI_REL_OFFSET r12, 1*8
++ movq %r13, 1*8(%rsp)
++ CFI_REL_OFFSET r13, 1*8
movl $(4096/64)-5, %ecx
.p2align 4
-@@ -36,7 +39,7 @@ ENTRY(copy_page)
- movq 0x8*2(%rsi), %rdx
- movq 0x8*3(%rsi), %r8
+@@ -38,7 +39,7 @@ ENTRY(copy_page)
movq 0x8*4(%rsi), %r9
-- movq 0x8*5(%rsi), %r10
-+ movq 0x8*5(%rsi), %r13
+ movq 0x8*5(%rsi), %r10
movq 0x8*6(%rsi), %r11
- movq 0x8*7(%rsi), %r12
+- movq 0x8*7(%rsi), %r12
++ movq 0x8*7(%rsi), %r13
-@@ -47,7 +50,7 @@ ENTRY(copy_page)
- movq %rdx, 0x8*2(%rdi)
- movq %r8, 0x8*3(%rdi)
+ prefetcht0 5*64(%rsi)
+
+@@ -49,7 +50,7 @@ ENTRY(copy_page)
movq %r9, 0x8*4(%rdi)
-- movq %r10, 0x8*5(%rdi)
-+ movq %r13, 0x8*5(%rdi)
+ movq %r10, 0x8*5(%rdi)
movq %r11, 0x8*6(%rdi)
- movq %r12, 0x8*7(%rdi)
+- movq %r12, 0x8*7(%rdi)
++ movq %r13, 0x8*7(%rdi)
-@@ -66,7 +69,7 @@ ENTRY(copy_page)
- movq 0x8*2(%rsi), %rdx
- movq 0x8*3(%rsi), %r8
+ leaq 64 (%rsi), %rsi
+ leaq 64 (%rdi), %rdi
+@@ -68,7 +69,7 @@ ENTRY(copy_page)
movq 0x8*4(%rsi), %r9
-- movq 0x8*5(%rsi), %r10
-+ movq 0x8*5(%rsi), %r13
+ movq 0x8*5(%rsi), %r10
movq 0x8*6(%rsi), %r11
- movq 0x8*7(%rsi), %r12
+- movq 0x8*7(%rsi), %r12
++ movq 0x8*7(%rsi), %r13
-@@ -75,7 +78,7 @@ ENTRY(copy_page)
- movq %rdx, 0x8*2(%rdi)
- movq %r8, 0x8*3(%rdi)
+ movq %rax, 0x8*0(%rdi)
+ movq %rbx, 0x8*1(%rdi)
+@@ -77,7 +78,7 @@ ENTRY(copy_page)
movq %r9, 0x8*4(%rdi)
-- movq %r10, 0x8*5(%rdi)
-+ movq %r13, 0x8*5(%rdi)
+ movq %r10, 0x8*5(%rdi)
movq %r11, 0x8*6(%rdi)
- movq %r12, 0x8*7(%rdi)
+- movq %r12, 0x8*7(%rdi)
++ movq %r13, 0x8*7(%rdi)
+
+ leaq 64(%rdi), %rdi
+ leaq 64(%rsi), %rsi
+@@ -85,10 +86,11 @@ ENTRY(copy_page)
-@@ -87,8 +90,11 @@ ENTRY(copy_page)
+ movq (%rsp), %rbx
CFI_RESTORE rbx
- movq 1*8(%rsp), %r12
- CFI_RESTORE r12
-- addq $2*8, %rsp
-- CFI_ADJUST_CFA_OFFSET -2*8
-+ movq 2*8(%rsp), %r13
+- movq 1*8(%rsp), %r12
+- CFI_RESTORE r12
++ movq 1*8(%rsp), %r13
+ CFI_RESTORE r13
-+ addq $3*8, %rsp
-+ CFI_ADJUST_CFA_OFFSET -3*8
+ addq $2*8, %rsp
+ CFI_ADJUST_CFA_OFFSET -2*8
+ pax_force_retaddr
ret
.Lcopy_page_end:
CFI_ENDPROC
-@@ -99,7 +105,7 @@ ENDPROC(copy_page)
+@@ -99,7 +101,7 @@ ENDPROC(copy_page)
#include <asm/cpufeature.h>
@@ -27807,7 +28646,7 @@ index 176cca6..1166c50 100644
.byte (copy_page_rep - copy_page) - (2f - 1b) /* offset */
2:
diff --git a/arch/x86/lib/copy_user_64.S b/arch/x86/lib/copy_user_64.S
-index a30ca15..6b3f4e1 100644
+index a30ca15..407412b 100644
--- a/arch/x86/lib/copy_user_64.S
+++ b/arch/x86/lib/copy_user_64.S
@@ -18,31 +18,7 @@
@@ -27904,30 +28743,6 @@ index a30ca15..6b3f4e1 100644
ASM_STAC
cmpl $8,%edx
jb 20f /* less then 8 bytes, go to byte copy loop */
-@@ -141,19 +72,19 @@ ENTRY(copy_user_generic_unrolled)
- jz 17f
- 1: movq (%rsi),%r8
- 2: movq 1*8(%rsi),%r9
--3: movq 2*8(%rsi),%r10
-+3: movq 2*8(%rsi),%rax
- 4: movq 3*8(%rsi),%r11
- 5: movq %r8,(%rdi)
- 6: movq %r9,1*8(%rdi)
--7: movq %r10,2*8(%rdi)
-+7: movq %rax,2*8(%rdi)
- 8: movq %r11,3*8(%rdi)
- 9: movq 4*8(%rsi),%r8
- 10: movq 5*8(%rsi),%r9
--11: movq 6*8(%rsi),%r10
-+11: movq 6*8(%rsi),%rax
- 12: movq 7*8(%rsi),%r11
- 13: movq %r8,4*8(%rdi)
- 14: movq %r9,5*8(%rdi)
--15: movq %r10,6*8(%rdi)
-+15: movq %rax,6*8(%rdi)
- 16: movq %r11,7*8(%rdi)
- leaq 64(%rsi),%rsi
- leaq 64(%rdi),%rdi
@@ -180,6 +111,8 @@ ENTRY(copy_user_generic_unrolled)
jnz 21b
23: xor %eax,%eax
@@ -27972,7 +28787,7 @@ index a30ca15..6b3f4e1 100644
.section .fixup,"ax"
diff --git a/arch/x86/lib/copy_user_nocache_64.S b/arch/x86/lib/copy_user_nocache_64.S
-index 6a4f43c..55d26f2 100644
+index 6a4f43c..c70fb52 100644
--- a/arch/x86/lib/copy_user_nocache_64.S
+++ b/arch/x86/lib/copy_user_nocache_64.S
@@ -8,6 +8,7 @@
@@ -28008,30 +28823,6 @@ index 6a4f43c..55d26f2 100644
ASM_STAC
cmpl $8,%edx
jb 20f /* less then 8 bytes, go to byte copy loop */
-@@ -59,19 +71,19 @@ ENTRY(__copy_user_nocache)
- jz 17f
- 1: movq (%rsi),%r8
- 2: movq 1*8(%rsi),%r9
--3: movq 2*8(%rsi),%r10
-+3: movq 2*8(%rsi),%rax
- 4: movq 3*8(%rsi),%r11
- 5: movnti %r8,(%rdi)
- 6: movnti %r9,1*8(%rdi)
--7: movnti %r10,2*8(%rdi)
-+7: movnti %rax,2*8(%rdi)
- 8: movnti %r11,3*8(%rdi)
- 9: movq 4*8(%rsi),%r8
- 10: movq 5*8(%rsi),%r9
--11: movq 6*8(%rsi),%r10
-+11: movq 6*8(%rsi),%rax
- 12: movq 7*8(%rsi),%r11
- 13: movnti %r8,4*8(%rdi)
- 14: movnti %r9,5*8(%rdi)
--15: movnti %r10,6*8(%rdi)
-+15: movnti %rax,6*8(%rdi)
- 16: movnti %r11,7*8(%rdi)
- leaq 64(%rsi),%rsi
- leaq 64(%rdi),%rdi
@@ -98,7 +110,9 @@ ENTRY(__copy_user_nocache)
jnz 21b
23: xorl %eax,%eax
@@ -28043,7 +28834,7 @@ index 6a4f43c..55d26f2 100644
.section .fixup,"ax"
diff --git a/arch/x86/lib/csum-copy_64.S b/arch/x86/lib/csum-copy_64.S
-index 2419d5f..953ee51 100644
+index 2419d5f..fe52d0e 100644
--- a/arch/x86/lib/csum-copy_64.S
+++ b/arch/x86/lib/csum-copy_64.S
@@ -9,6 +9,7 @@
@@ -28054,11 +28845,62 @@ index 2419d5f..953ee51 100644
/*
* Checksum copy with exception handling.
+@@ -56,8 +57,8 @@ ENTRY(csum_partial_copy_generic)
+ CFI_ADJUST_CFA_OFFSET 7*8
+ movq %rbx, 2*8(%rsp)
+ CFI_REL_OFFSET rbx, 2*8
+- movq %r12, 3*8(%rsp)
+- CFI_REL_OFFSET r12, 3*8
++ movq %r15, 3*8(%rsp)
++ CFI_REL_OFFSET r15, 3*8
+ movq %r14, 4*8(%rsp)
+ CFI_REL_OFFSET r14, 4*8
+ movq %r13, 5*8(%rsp)
+@@ -72,16 +73,16 @@ ENTRY(csum_partial_copy_generic)
+ movl %edx, %ecx
+
+ xorl %r9d, %r9d
+- movq %rcx, %r12
++ movq %rcx, %r15
+
+- shrq $6, %r12
++ shrq $6, %r15
+ jz .Lhandle_tail /* < 64 */
+
+ clc
+
+ /* main loop. clear in 64 byte blocks */
+ /* r9: zero, r8: temp2, rbx: temp1, rax: sum, rcx: saved length */
+- /* r11: temp3, rdx: temp4, r12 loopcnt */
++ /* r11: temp3, rdx: temp4, r15 loopcnt */
+ /* r10: temp5, rbp: temp6, r14 temp7, r13 temp8 */
+ .p2align 4
+ .Lloop:
+@@ -115,7 +116,7 @@ ENTRY(csum_partial_copy_generic)
+ adcq %r14, %rax
+ adcq %r13, %rax
+
+- decl %r12d
++ decl %r15d
+
+ dest
+ movq %rbx, (%rsi)
+@@ -210,8 +211,8 @@ ENTRY(csum_partial_copy_generic)
+ .Lende:
+ movq 2*8(%rsp), %rbx
+ CFI_RESTORE rbx
+- movq 3*8(%rsp), %r12
+- CFI_RESTORE r12
++ movq 3*8(%rsp), %r15
++ CFI_RESTORE r15
+ movq 4*8(%rsp), %r14
+ CFI_RESTORE r14
+ movq 5*8(%rsp), %r13
@@ -220,6 +221,7 @@ ENTRY(csum_partial_copy_generic)
CFI_RESTORE rbp
addq $7*8, %rsp
CFI_ADJUST_CFA_OFFSET -7*8
-+ pax_force_retaddr 0, 1
++ pax_force_retaddr
ret
CFI_RESTORE_STATE
@@ -28298,7 +29140,7 @@ index 05a95e7..326f2fa 100644
CFI_ENDPROC
ENDPROC(__iowrite32_copy)
diff --git a/arch/x86/lib/memcpy_64.S b/arch/x86/lib/memcpy_64.S
-index 56313a3..9b59269 100644
+index 56313a3..0db417e 100644
--- a/arch/x86/lib/memcpy_64.S
+++ b/arch/x86/lib/memcpy_64.S
@@ -24,7 +24,7 @@
@@ -28332,48 +29174,9 @@ index 56313a3..9b59269 100644
ret
.Lmemcpy_e_e:
.previous
-@@ -76,13 +78,13 @@ ENTRY(memcpy)
- */
- movq 0*8(%rsi), %r8
- movq 1*8(%rsi), %r9
-- movq 2*8(%rsi), %r10
-+ movq 2*8(%rsi), %rcx
- movq 3*8(%rsi), %r11
- leaq 4*8(%rsi), %rsi
-
- movq %r8, 0*8(%rdi)
- movq %r9, 1*8(%rdi)
-- movq %r10, 2*8(%rdi)
-+ movq %rcx, 2*8(%rdi)
- movq %r11, 3*8(%rdi)
- leaq 4*8(%rdi), %rdi
- jae .Lcopy_forward_loop
-@@ -105,12 +107,12 @@ ENTRY(memcpy)
- subq $0x20, %rdx
- movq -1*8(%rsi), %r8
- movq -2*8(%rsi), %r9
-- movq -3*8(%rsi), %r10
-+ movq -3*8(%rsi), %rcx
- movq -4*8(%rsi), %r11
- leaq -4*8(%rsi), %rsi
- movq %r8, -1*8(%rdi)
- movq %r9, -2*8(%rdi)
-- movq %r10, -3*8(%rdi)
-+ movq %rcx, -3*8(%rdi)
- movq %r11, -4*8(%rdi)
- leaq -4*8(%rdi), %rdi
- jae .Lcopy_backward_loop
-@@ -130,12 +132,13 @@ ENTRY(memcpy)
- */
- movq 0*8(%rsi), %r8
- movq 1*8(%rsi), %r9
-- movq -2*8(%rsi, %rdx), %r10
-+ movq -2*8(%rsi, %rdx), %rcx
- movq -1*8(%rsi, %rdx), %r11
- movq %r8, 0*8(%rdi)
+@@ -136,6 +138,7 @@ ENTRY(memcpy)
movq %r9, 1*8(%rdi)
-- movq %r10, -2*8(%rdi, %rdx)
-+ movq %rcx, -2*8(%rdi, %rdx)
+ movq %r10, -2*8(%rdi, %rdx)
movq %r11, -1*8(%rdi, %rdx)
+ pax_force_retaddr
retq
@@ -28404,121 +29207,9 @@ index 56313a3..9b59269 100644
CFI_ENDPROC
ENDPROC(memcpy)
diff --git a/arch/x86/lib/memmove_64.S b/arch/x86/lib/memmove_64.S
-index 65268a6..5aa7815 100644
+index 65268a6..dd1de11 100644
--- a/arch/x86/lib/memmove_64.S
+++ b/arch/x86/lib/memmove_64.S
-@@ -61,13 +61,13 @@ ENTRY(memmove)
- 5:
- sub $0x20, %rdx
- movq 0*8(%rsi), %r11
-- movq 1*8(%rsi), %r10
-+ movq 1*8(%rsi), %rcx
- movq 2*8(%rsi), %r9
- movq 3*8(%rsi), %r8
- leaq 4*8(%rsi), %rsi
-
- movq %r11, 0*8(%rdi)
-- movq %r10, 1*8(%rdi)
-+ movq %rcx, 1*8(%rdi)
- movq %r9, 2*8(%rdi)
- movq %r8, 3*8(%rdi)
- leaq 4*8(%rdi), %rdi
-@@ -81,10 +81,10 @@ ENTRY(memmove)
- 4:
- movq %rdx, %rcx
- movq -8(%rsi, %rdx), %r11
-- lea -8(%rdi, %rdx), %r10
-+ lea -8(%rdi, %rdx), %r9
- shrq $3, %rcx
- rep movsq
-- movq %r11, (%r10)
-+ movq %r11, (%r9)
- jmp 13f
- .Lmemmove_end_forward:
-
-@@ -95,14 +95,14 @@ ENTRY(memmove)
- 7:
- movq %rdx, %rcx
- movq (%rsi), %r11
-- movq %rdi, %r10
-+ movq %rdi, %r9
- leaq -8(%rsi, %rdx), %rsi
- leaq -8(%rdi, %rdx), %rdi
- shrq $3, %rcx
- std
- rep movsq
- cld
-- movq %r11, (%r10)
-+ movq %r11, (%r9)
- jmp 13f
-
- /*
-@@ -127,13 +127,13 @@ ENTRY(memmove)
- 8:
- subq $0x20, %rdx
- movq -1*8(%rsi), %r11
-- movq -2*8(%rsi), %r10
-+ movq -2*8(%rsi), %rcx
- movq -3*8(%rsi), %r9
- movq -4*8(%rsi), %r8
- leaq -4*8(%rsi), %rsi
-
- movq %r11, -1*8(%rdi)
-- movq %r10, -2*8(%rdi)
-+ movq %rcx, -2*8(%rdi)
- movq %r9, -3*8(%rdi)
- movq %r8, -4*8(%rdi)
- leaq -4*8(%rdi), %rdi
-@@ -151,11 +151,11 @@ ENTRY(memmove)
- * Move data from 16 bytes to 31 bytes.
- */
- movq 0*8(%rsi), %r11
-- movq 1*8(%rsi), %r10
-+ movq 1*8(%rsi), %rcx
- movq -2*8(%rsi, %rdx), %r9
- movq -1*8(%rsi, %rdx), %r8
- movq %r11, 0*8(%rdi)
-- movq %r10, 1*8(%rdi)
-+ movq %rcx, 1*8(%rdi)
- movq %r9, -2*8(%rdi, %rdx)
- movq %r8, -1*8(%rdi, %rdx)
- jmp 13f
-@@ -167,9 +167,9 @@ ENTRY(memmove)
- * Move data from 8 bytes to 15 bytes.
- */
- movq 0*8(%rsi), %r11
-- movq -1*8(%rsi, %rdx), %r10
-+ movq -1*8(%rsi, %rdx), %r9
- movq %r11, 0*8(%rdi)
-- movq %r10, -1*8(%rdi, %rdx)
-+ movq %r9, -1*8(%rdi, %rdx)
- jmp 13f
- 10:
- cmpq $4, %rdx
-@@ -178,9 +178,9 @@ ENTRY(memmove)
- * Move data from 4 bytes to 7 bytes.
- */
- movl (%rsi), %r11d
-- movl -4(%rsi, %rdx), %r10d
-+ movl -4(%rsi, %rdx), %r9d
- movl %r11d, (%rdi)
-- movl %r10d, -4(%rdi, %rdx)
-+ movl %r9d, -4(%rdi, %rdx)
- jmp 13f
- 11:
- cmp $2, %rdx
-@@ -189,9 +189,9 @@ ENTRY(memmove)
- * Move data from 2 bytes to 3 bytes.
- */
- movw (%rsi), %r11w
-- movw -2(%rsi, %rdx), %r10w
-+ movw -2(%rsi, %rdx), %r9w
- movw %r11w, (%rdi)
-- movw %r10w, -2(%rdi, %rdx)
-+ movw %r9w, -2(%rdi, %rdx)
- jmp 13f
- 12:
- cmp $1, %rdx
@@ -202,14 +202,16 @@ ENTRY(memmove)
movb (%rsi), %r11b
movb %r11b, (%rdi)
@@ -28538,7 +29229,7 @@ index 65268a6..5aa7815 100644
.Lmemmove_end_forward_efs:
.previous
diff --git a/arch/x86/lib/memset_64.S b/arch/x86/lib/memset_64.S
-index 2dcb380..50a78bc 100644
+index 2dcb380..2eb79fe 100644
--- a/arch/x86/lib/memset_64.S
+++ b/arch/x86/lib/memset_64.S
@@ -16,7 +16,7 @@
@@ -28574,21 +29265,10 @@ index 2dcb380..50a78bc 100644
ret
.Lmemset_e_e:
.previous
-@@ -59,7 +61,7 @@
- ENTRY(memset)
- ENTRY(__memset)
- CFI_STARTPROC
-- movq %rdi,%r10
-+ movq %rdi,%r11
-
- /* expand byte value */
- movzbl %sil,%ecx
-@@ -117,7 +119,8 @@ ENTRY(__memset)
- jnz .Lloop_1
+@@ -118,6 +120,7 @@ ENTRY(__memset)
.Lende:
-- movq %r10,%rax
-+ movq %r11,%rax
+ movq %r10,%rax
+ pax_force_retaddr
ret
@@ -28913,7 +29593,7 @@ index c9f2d9b..e7fd2c0 100644
from += 64;
to += 64;
diff --git a/arch/x86/lib/msr-reg.S b/arch/x86/lib/msr-reg.S
-index f6d13ee..aca5f0b 100644
+index f6d13ee..d789440 100644
--- a/arch/x86/lib/msr-reg.S
+++ b/arch/x86/lib/msr-reg.S
@@ -3,6 +3,7 @@
@@ -28924,34 +29604,8 @@ index f6d13ee..aca5f0b 100644
#ifdef CONFIG_X86_64
/*
-@@ -16,7 +17,7 @@ ENTRY(\op\()_safe_regs)
- CFI_STARTPROC
- pushq_cfi %rbx
- pushq_cfi %rbp
-- movq %rdi, %r10 /* Save pointer */
-+ movq %rdi, %r9 /* Save pointer */
- xorl %r11d, %r11d /* Return value */
- movl (%rdi), %eax
- movl 4(%rdi), %ecx
-@@ -27,16 +28,17 @@ ENTRY(\op\()_safe_regs)
- movl 28(%rdi), %edi
- CFI_REMEMBER_STATE
- 1: \op
--2: movl %eax, (%r10)
-+2: movl %eax, (%r9)
- movl %r11d, %eax /* Return value */
-- movl %ecx, 4(%r10)
-- movl %edx, 8(%r10)
-- movl %ebx, 12(%r10)
-- movl %ebp, 20(%r10)
-- movl %esi, 24(%r10)
-- movl %edi, 28(%r10)
-+ movl %ecx, 4(%r9)
-+ movl %edx, 8(%r9)
-+ movl %ebx, 12(%r9)
-+ movl %ebp, 20(%r9)
-+ movl %esi, 24(%r9)
-+ movl %edi, 28(%r9)
+@@ -37,6 +38,7 @@ ENTRY(\op\()_safe_regs)
+ movl %edi, 28(%r10)
popq_cfi %rbp
popq_cfi %rbx
+ pax_force_retaddr
@@ -29221,7 +29875,7 @@ index 5dff5f0..cadebf4 100644
CFI_ENDPROC
ENDPROC(call_rwsem_downgrade_wake)
diff --git a/arch/x86/lib/thunk_64.S b/arch/x86/lib/thunk_64.S
-index a63efd6..ccecad8 100644
+index a63efd6..8149fbe 100644
--- a/arch/x86/lib/thunk_64.S
+++ b/arch/x86/lib/thunk_64.S
@@ -8,6 +8,7 @@
@@ -29232,10 +29886,30 @@ index a63efd6..ccecad8 100644
/* rdi: arg1 ... normal C conventions. rax is saved/restored. */
.macro THUNK name, func, put_ret_addr_in_rdi=0
-@@ -41,5 +42,6 @@
- SAVE_ARGS
+@@ -15,11 +16,11 @@
+ \name:
+ CFI_STARTPROC
+
+- /* this one pushes 9 elems, the next one would be %rIP */
+- SAVE_ARGS
++ /* this one pushes 15+1 elems, the next one would be %rIP */
++ SAVE_ARGS 8
+
+ .if \put_ret_addr_in_rdi
+- movq_cfi_restore 9*8, rdi
++ movq_cfi_restore RIP, rdi
+ .endif
+
+ call \func
+@@ -38,8 +39,9 @@
+
+ /* SAVE_ARGS below is used only for the .cfi directives it contains. */
+ CFI_STARTPROC
+- SAVE_ARGS
++ SAVE_ARGS 8
restore:
- RESTORE_ARGS
+- RESTORE_ARGS
++ RESTORE_ARGS 1,8
+ pax_force_retaddr
ret
CFI_ENDPROC
@@ -30698,7 +31372,7 @@ index 3aaeffc..42ea9fb 100644
+ return ret ? -EFAULT : 0;
+}
diff --git a/arch/x86/mm/gup.c b/arch/x86/mm/gup.c
-index dd74e46..7d26398 100644
+index dd74e46..0970b01 100644
--- a/arch/x86/mm/gup.c
+++ b/arch/x86/mm/gup.c
@@ -255,7 +255,7 @@ int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
@@ -30710,6 +31384,17 @@ index dd74e46..7d26398 100644
(void __user *)start, len)))
return 0;
+@@ -331,6 +331,10 @@ int get_user_pages_fast(unsigned long start, int nr_pages, int write,
+ goto slow_irqon;
+ #endif
+
++ if (unlikely(!__access_ok(write ? VERIFY_WRITE : VERIFY_READ,
++ (void __user *)start, len)))
++ return 0;
++
+ /*
+ * XXX: batch / limit 'nr', to avoid large irq off latency
+ * needs some instrumenting to determine the common sizes used by
diff --git a/arch/x86/mm/highmem_32.c b/arch/x86/mm/highmem_32.c
index 4500142..53a363c 100644
--- a/arch/x86/mm/highmem_32.c
@@ -34858,10 +35543,10 @@ index 81a94a3..b711c74 100644
}
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
-index db6dfcf..770d1f0 100644
+index ab58556..ed19dd2 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
-@@ -4113,7 +4113,7 @@ int ata_sas_port_init(struct ata_port *ap)
+@@ -4114,7 +4114,7 @@ int ata_sas_port_init(struct ata_port *ap)
if (rc)
return rc;
@@ -37581,7 +38266,7 @@ index f897d51..15da295 100644
if (policy->cpu != 0)
return -ENODEV;
diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c
-index d75040d..4738ca5 100644
+index 22c07fb..9dff5ac 100644
--- a/drivers/cpuidle/cpuidle.c
+++ b/drivers/cpuidle/cpuidle.c
@@ -252,7 +252,7 @@ static int poll_idle(struct cpuidle_device *dev,
@@ -37981,10 +38666,10 @@ index 5145fa3..0d3babd 100644
return efivars_register(&generic_efivars, &generic_ops, efi_kobj);
}
diff --git a/drivers/firmware/efi/efivars.c b/drivers/firmware/efi/efivars.c
-index 8a7432a..28fb839 100644
+index 8c5a61a..cf07bd0 100644
--- a/drivers/firmware/efi/efivars.c
+++ b/drivers/firmware/efi/efivars.c
-@@ -452,7 +452,7 @@ efivar_create_sysfs_entry(struct efivar_entry *new_var)
+@@ -456,7 +456,7 @@ efivar_create_sysfs_entry(struct efivar_entry *new_var)
static int
create_efivars_bin_attributes(void)
{
@@ -45864,7 +46549,7 @@ index c9382d6..6619864 100644
error = bus_register(&fcoe_bus_type);
if (error)
diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
-index df0c3c7..b00e1d0 100644
+index 3cafe0d..f1e87f8 100644
--- a/drivers/scsi/hosts.c
+++ b/drivers/scsi/hosts.c
@@ -42,7 +42,7 @@
@@ -45886,10 +46571,10 @@ index df0c3c7..b00e1d0 100644
/* These three are default values which can be overridden */
diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
-index 891c86b..dd1224a0 100644
+index 0eb0940..3ca9b79 100644
--- a/drivers/scsi/hpsa.c
+++ b/drivers/scsi/hpsa.c
-@@ -578,7 +578,7 @@ static inline u32 next_command(struct ctlr_info *h, u8 q)
+@@ -579,7 +579,7 @@ static inline u32 next_command(struct ctlr_info *h, u8 q)
unsigned long flags;
if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
@@ -45898,7 +46583,7 @@ index 891c86b..dd1224a0 100644
if ((rq->head[rq->current_entry] & 1) == rq->wraparound) {
a = rq->head[rq->current_entry];
-@@ -3444,7 +3444,7 @@ static void start_io(struct ctlr_info *h)
+@@ -3445,7 +3445,7 @@ static void start_io(struct ctlr_info *h)
while (!list_empty(&h->reqQ)) {
c = list_entry(h->reqQ.next, struct CommandList, list);
/* can't do anything if fifo is full */
@@ -45907,7 +46592,7 @@ index 891c86b..dd1224a0 100644
dev_warn(&h->pdev->dev, "fifo full\n");
break;
}
-@@ -3466,7 +3466,7 @@ static void start_io(struct ctlr_info *h)
+@@ -3467,7 +3467,7 @@ static void start_io(struct ctlr_info *h)
/* Tell the controller execute command */
spin_unlock_irqrestore(&h->lock, flags);
@@ -45916,7 +46601,7 @@ index 891c86b..dd1224a0 100644
spin_lock_irqsave(&h->lock, flags);
}
spin_unlock_irqrestore(&h->lock, flags);
-@@ -3474,17 +3474,17 @@ static void start_io(struct ctlr_info *h)
+@@ -3475,17 +3475,17 @@ static void start_io(struct ctlr_info *h)
static inline unsigned long get_next_completion(struct ctlr_info *h, u8 q)
{
@@ -45937,7 +46622,7 @@ index 891c86b..dd1224a0 100644
(h->interrupts_enabled == 0);
}
-@@ -4386,7 +4386,7 @@ static int hpsa_pci_init(struct ctlr_info *h)
+@@ -4387,7 +4387,7 @@ static int hpsa_pci_init(struct ctlr_info *h)
if (prod_index < 0)
return -ENODEV;
h->product_name = products[prod_index].product_name;
@@ -45946,7 +46631,7 @@ index 891c86b..dd1224a0 100644
pci_disable_link_state(h->pdev, PCIE_LINK_STATE_L0S |
PCIE_LINK_STATE_L1 | PCIE_LINK_STATE_CLKPM);
-@@ -4668,7 +4668,7 @@ static void controller_lockup_detected(struct ctlr_info *h)
+@@ -4669,7 +4669,7 @@ static void controller_lockup_detected(struct ctlr_info *h)
assert_spin_locked(&lockup_detector_lock);
remove_ctlr_from_lockup_detector_list(h);
@@ -45955,7 +46640,7 @@ index 891c86b..dd1224a0 100644
spin_lock_irqsave(&h->lock, flags);
h->lockup_detected = readl(h->vaddr + SA5_SCRATCHPAD_OFFSET);
spin_unlock_irqrestore(&h->lock, flags);
-@@ -4845,7 +4845,7 @@ reinit_after_soft_reset:
+@@ -4846,7 +4846,7 @@ reinit_after_soft_reset:
}
/* make sure the board interrupts are off */
@@ -45964,7 +46649,7 @@ index 891c86b..dd1224a0 100644
if (hpsa_request_irq(h, do_hpsa_intr_msi, do_hpsa_intr_intx))
goto clean2;
-@@ -4879,7 +4879,7 @@ reinit_after_soft_reset:
+@@ -4880,7 +4880,7 @@ reinit_after_soft_reset:
* fake ones to scoop up any residual completions.
*/
spin_lock_irqsave(&h->lock, flags);
@@ -45973,7 +46658,7 @@ index 891c86b..dd1224a0 100644
spin_unlock_irqrestore(&h->lock, flags);
free_irqs(h);
rc = hpsa_request_irq(h, hpsa_msix_discard_completions,
-@@ -4898,9 +4898,9 @@ reinit_after_soft_reset:
+@@ -4899,9 +4899,9 @@ reinit_after_soft_reset:
dev_info(&h->pdev->dev, "Board READY.\n");
dev_info(&h->pdev->dev,
"Waiting for stale completions to drain.\n");
@@ -45985,7 +46670,7 @@ index 891c86b..dd1224a0 100644
rc = controller_reset_failed(h->cfgtable);
if (rc)
-@@ -4921,7 +4921,7 @@ reinit_after_soft_reset:
+@@ -4922,7 +4922,7 @@ reinit_after_soft_reset:
}
/* Turn the interrupts on so we can service requests */
@@ -45994,7 +46679,7 @@ index 891c86b..dd1224a0 100644
hpsa_hba_inquiry(h);
hpsa_register_scsi(h); /* hook ourselves into SCSI subsystem */
-@@ -4976,7 +4976,7 @@ static void hpsa_shutdown(struct pci_dev *pdev)
+@@ -4977,7 +4977,7 @@ static void hpsa_shutdown(struct pci_dev *pdev)
* To write all data in the battery backed cache to disks
*/
hpsa_flush_cache(h);
@@ -46003,7 +46688,7 @@ index 891c86b..dd1224a0 100644
hpsa_free_irqs_and_disable_msix(h);
}
-@@ -5144,7 +5144,7 @@ static void hpsa_enter_performant_mode(struct ctlr_info *h, u32 use_short_tags)
+@@ -5145,7 +5145,7 @@ static void hpsa_enter_performant_mode(struct ctlr_info *h, u32 use_short_tags)
return;
}
/* Change the access methods to the performant access methods */
@@ -46171,7 +46856,7 @@ index 5879929..32b241d 100644
}
EXPORT_SYMBOL(fc_exch_update_stats);
diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
-index 161c98e..6d563b3 100644
+index d289583..b745eec 100644
--- a/drivers/scsi/libsas/sas_ata.c
+++ b/drivers/scsi/libsas/sas_ata.c
@@ -554,7 +554,7 @@ static struct ata_port_operations sas_sata_ops = {
@@ -46412,7 +47097,7 @@ index 7f0af4f..193ac3e 100644
unsigned long flags;
diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c
-index 1eb7b028..b2a6080 100644
+index a38f71b..f3bc572 100644
--- a/drivers/scsi/pmcraid.c
+++ b/drivers/scsi/pmcraid.c
@@ -200,8 +200,8 @@ static int pmcraid_slave_alloc(struct scsi_device *scsi_dev)
@@ -46456,7 +47141,7 @@ index 1eb7b028..b2a6080 100644
pinstance->num_hrrq;
if (request_size) {
-@@ -4483,7 +4483,7 @@ static void pmcraid_worker_function(struct work_struct *workp)
+@@ -4484,7 +4484,7 @@ static void pmcraid_worker_function(struct work_struct *workp)
pinstance = container_of(workp, struct pmcraid_instance, worker_q);
/* add resources only after host is added into system */
@@ -46465,7 +47150,7 @@ index 1eb7b028..b2a6080 100644
return;
fw_version = be16_to_cpu(pinstance->inq_data->fw_version);
-@@ -5310,8 +5310,8 @@ static int pmcraid_init_instance(struct pci_dev *pdev, struct Scsi_Host *host,
+@@ -5311,8 +5311,8 @@ static int pmcraid_init_instance(struct pci_dev *pdev, struct Scsi_Host *host,
init_waitqueue_head(&pinstance->reset_wait_q);
atomic_set(&pinstance->outstanding_cmds, 0);
@@ -46476,7 +47161,7 @@ index 1eb7b028..b2a6080 100644
INIT_LIST_HEAD(&pinstance->free_res_q);
INIT_LIST_HEAD(&pinstance->used_res_q);
-@@ -6024,7 +6024,7 @@ static int pmcraid_probe(struct pci_dev *pdev,
+@@ -6025,7 +6025,7 @@ static int pmcraid_probe(struct pci_dev *pdev,
/* Schedule worker thread to handle CCN and take care of adding and
* removing devices to OS
*/
@@ -46780,10 +47465,10 @@ index f379c7f..e8fc69c 100644
transport_setup_device(&rport->dev);
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
-index 5693f6d7..b0bf05a 100644
+index 2634d69..fcf7a81 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
-@@ -2934,7 +2934,7 @@ static int sd_probe(struct device *dev)
+@@ -2940,7 +2940,7 @@ static int sd_probe(struct device *dev)
sdkp->disk = gd;
sdkp->index = index;
atomic_set(&sdkp->openers, 0);
@@ -47728,10 +48413,10 @@ index c0f76da..d974c32 100644
dlci_get(dlci->gsm->dlci[0]);
mux_get(dlci->gsm);
diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
-index ff58293..71c87bc 100644
+index 4d6f430..0810fa9 100644
--- a/drivers/tty/n_tty.c
+++ b/drivers/tty/n_tty.c
-@@ -2502,6 +2502,7 @@ void n_tty_inherit_ops(struct tty_ldisc_ops *ops)
+@@ -2504,6 +2504,7 @@ void n_tty_inherit_ops(struct tty_ldisc_ops *ops)
{
*ops = tty_ldisc_N_TTY;
ops->owner = NULL;
@@ -52899,7 +53584,7 @@ index 89dec7f..361b0d75 100644
fd_offset + ex.a_text);
if (error != N_DATADDR(ex)) {
diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
-index 4c94a79..f428019 100644
+index 4c94a79..2610454 100644
--- a/fs/binfmt_elf.c
+++ b/fs/binfmt_elf.c
@@ -34,6 +34,7 @@
@@ -53415,15 +54100,20 @@ index 4c94a79..f428019 100644
struct elfhdr elf_ex;
struct elfhdr interp_elf_ex;
} *loc;
-+ unsigned long pax_task_size = TASK_SIZE;
++ unsigned long pax_task_size;
loc = kmalloc(sizeof(*loc), GFP_KERNEL);
if (!loc) {
-@@ -723,11 +1068,81 @@ static int load_elf_binary(struct linux_binprm *bprm)
+@@ -723,11 +1068,82 @@ static int load_elf_binary(struct linux_binprm *bprm)
goto out_free_dentry;
/* OK, This is the point of no return */
- current->mm->def_flags = def_flags;
++ current->mm->def_flags = 0;
+
+ /* Do this immediately, since STACK_TOP as used in setup_arg_pages
+ may depend on the personality. */
+ SET_PERSONALITY(loc->elf_ex);
+
+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
+ current->mm->pax_flags = 0UL;
@@ -53442,8 +54132,6 @@ index 4c94a79..f428019 100644
+ current->mm->delta_stack = 0UL;
+#endif
+
-+ current->mm->def_flags = 0;
-+
+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
+ if (0 > pax_parse_pax_flags(&loc->elf_ex, elf_phdata, bprm->file)) {
+ send_sig(SIGKILL, current, 0);
@@ -53471,19 +54159,17 @@ index 4c94a79..f428019 100644
+ current->mm->context.user_cs_limit = TASK_SIZE-SEGMEXEC_TASK_SIZE;
+ pax_task_size = SEGMEXEC_TASK_SIZE;
+ current->mm->def_flags |= VM_NOHUGEPAGE;
-+ }
++ } else
+#endif
+
++ pax_task_size = TASK_SIZE;
++
+#if defined(CONFIG_ARCH_TRACK_EXEC_LIMIT) || defined(CONFIG_PAX_SEGMEXEC)
+ if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
+ set_user_cs(current->mm->context.user_cs_base, current->mm->context.user_cs_limit, get_cpu());
+ put_cpu();
+ }
+#endif
-
- /* Do this immediately, since STACK_TOP as used in setup_arg_pages
- may depend on the personality. */
- SET_PERSONALITY(loc->elf_ex);
+
+#ifdef CONFIG_PAX_ASLR
+ if (current->mm->pax_flags & MF_PAX_RANDMMAP) {
@@ -53502,7 +54188,7 @@ index 4c94a79..f428019 100644
if (elf_read_implies_exec(loc->elf_ex, executable_stack))
current->personality |= READ_IMPLIES_EXEC;
-@@ -817,6 +1232,20 @@ static int load_elf_binary(struct linux_binprm *bprm)
+@@ -817,6 +1233,20 @@ static int load_elf_binary(struct linux_binprm *bprm)
#else
load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr);
#endif
@@ -53523,7 +54209,7 @@ index 4c94a79..f428019 100644
}
error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt,
-@@ -849,9 +1278,9 @@ static int load_elf_binary(struct linux_binprm *bprm)
+@@ -849,9 +1279,9 @@ static int load_elf_binary(struct linux_binprm *bprm)
* allowed task size. Note that p_filesz must always be
* <= p_memsz so it is only necessary to check p_memsz.
*/
@@ -53536,7 +54222,7 @@ index 4c94a79..f428019 100644
/* set_brk can never work. Avoid overflows. */
send_sig(SIGKILL, current, 0);
retval = -EINVAL;
-@@ -890,17 +1319,45 @@ static int load_elf_binary(struct linux_binprm *bprm)
+@@ -890,17 +1320,45 @@ static int load_elf_binary(struct linux_binprm *bprm)
goto out_free_dentry;
}
if (likely(elf_bss != elf_brk) && unlikely(padzero(elf_bss))) {
@@ -53588,7 +54274,7 @@ index 4c94a79..f428019 100644
load_bias);
if (!IS_ERR((void *)elf_entry)) {
/*
-@@ -1122,7 +1579,7 @@ static bool always_dump_vma(struct vm_area_struct *vma)
+@@ -1122,7 +1580,7 @@ static bool always_dump_vma(struct vm_area_struct *vma)
* Decide what to dump of a segment, part, all or none.
*/
static unsigned long vma_dump_size(struct vm_area_struct *vma,
@@ -53597,7 +54283,7 @@ index 4c94a79..f428019 100644
{
#define FILTER(type) (mm_flags & (1UL << MMF_DUMP_##type))
-@@ -1160,7 +1617,7 @@ static unsigned long vma_dump_size(struct vm_area_struct *vma,
+@@ -1160,7 +1618,7 @@ static unsigned long vma_dump_size(struct vm_area_struct *vma,
if (vma->vm_file == NULL)
return 0;
@@ -53606,7 +54292,7 @@ index 4c94a79..f428019 100644
goto whole;
/*
-@@ -1385,9 +1842,9 @@ static void fill_auxv_note(struct memelfnote *note, struct mm_struct *mm)
+@@ -1385,9 +1843,9 @@ static void fill_auxv_note(struct memelfnote *note, struct mm_struct *mm)
{
elf_addr_t *auxv = (elf_addr_t *) mm->saved_auxv;
int i = 0;
@@ -53618,7 +54304,7 @@ index 4c94a79..f428019 100644
fill_note(note, "CORE", NT_AUXV, i * sizeof(elf_addr_t), auxv);
}
-@@ -1396,7 +1853,7 @@ static void fill_siginfo_note(struct memelfnote *note, user_siginfo_t *csigdata,
+@@ -1396,7 +1854,7 @@ static void fill_siginfo_note(struct memelfnote *note, user_siginfo_t *csigdata,
{
mm_segment_t old_fs = get_fs();
set_fs(KERNEL_DS);
@@ -53627,7 +54313,7 @@ index 4c94a79..f428019 100644
set_fs(old_fs);
fill_note(note, "CORE", NT_SIGINFO, sizeof(*csigdata), csigdata);
}
-@@ -2023,14 +2480,14 @@ static void fill_extnum_info(struct elfhdr *elf, struct elf_shdr *shdr4extnum,
+@@ -2023,14 +2481,14 @@ static void fill_extnum_info(struct elfhdr *elf, struct elf_shdr *shdr4extnum,
}
static size_t elf_core_vma_data_size(struct vm_area_struct *gate_vma,
@@ -53644,7 +54330,7 @@ index 4c94a79..f428019 100644
return size;
}
-@@ -2123,7 +2580,7 @@ static int elf_core_dump(struct coredump_params *cprm)
+@@ -2123,7 +2581,7 @@ static int elf_core_dump(struct coredump_params *cprm)
dataoff = offset = roundup(offset, ELF_EXEC_PAGESIZE);
@@ -53653,7 +54339,7 @@ index 4c94a79..f428019 100644
offset += elf_core_extra_data_size();
e_shoff = offset;
-@@ -2137,10 +2594,12 @@ static int elf_core_dump(struct coredump_params *cprm)
+@@ -2137,10 +2595,12 @@ static int elf_core_dump(struct coredump_params *cprm)
offset = dataoff;
size += sizeof(*elf);
@@ -53666,7 +54352,7 @@ index 4c94a79..f428019 100644
if (size > cprm->limit
|| !dump_write(cprm->file, phdr4note, sizeof(*phdr4note)))
goto end_coredump;
-@@ -2154,7 +2613,7 @@ static int elf_core_dump(struct coredump_params *cprm)
+@@ -2154,7 +2614,7 @@ static int elf_core_dump(struct coredump_params *cprm)
phdr.p_offset = offset;
phdr.p_vaddr = vma->vm_start;
phdr.p_paddr = 0;
@@ -53675,7 +54361,7 @@ index 4c94a79..f428019 100644
phdr.p_memsz = vma->vm_end - vma->vm_start;
offset += phdr.p_filesz;
phdr.p_flags = vma->vm_flags & VM_READ ? PF_R : 0;
-@@ -2165,6 +2624,7 @@ static int elf_core_dump(struct coredump_params *cprm)
+@@ -2165,6 +2625,7 @@ static int elf_core_dump(struct coredump_params *cprm)
phdr.p_align = ELF_EXEC_PAGESIZE;
size += sizeof(phdr);
@@ -53683,7 +54369,7 @@ index 4c94a79..f428019 100644
if (size > cprm->limit
|| !dump_write(cprm->file, &phdr, sizeof(phdr)))
goto end_coredump;
-@@ -2189,7 +2649,7 @@ static int elf_core_dump(struct coredump_params *cprm)
+@@ -2189,7 +2650,7 @@ static int elf_core_dump(struct coredump_params *cprm)
unsigned long addr;
unsigned long end;
@@ -53692,7 +54378,7 @@ index 4c94a79..f428019 100644
for (addr = vma->vm_start; addr < end; addr += PAGE_SIZE) {
struct page *page;
-@@ -2198,6 +2658,7 @@ static int elf_core_dump(struct coredump_params *cprm)
+@@ -2198,6 +2659,7 @@ static int elf_core_dump(struct coredump_params *cprm)
page = get_dump_page(addr);
if (page) {
void *kaddr = kmap(page);
@@ -53700,7 +54386,7 @@ index 4c94a79..f428019 100644
stop = ((size += PAGE_SIZE) > cprm->limit) ||
!dump_write(cprm->file, kaddr,
PAGE_SIZE);
-@@ -2215,6 +2676,7 @@ static int elf_core_dump(struct coredump_params *cprm)
+@@ -2215,6 +2677,7 @@ static int elf_core_dump(struct coredump_params *cprm)
if (e_phnum == PN_XNUM) {
size += sizeof(*shdr4extnum);
@@ -53708,7 +54394,7 @@ index 4c94a79..f428019 100644
if (size > cprm->limit
|| !dump_write(cprm->file, shdr4extnum,
sizeof(*shdr4extnum)))
-@@ -2235,6 +2697,167 @@ out:
+@@ -2235,6 +2698,167 @@ out:
#endif /* CONFIG_ELF_CORE */
@@ -54070,6 +54756,19 @@ index e913328..a34fb36 100644
/* Wake up anybody who may be waiting on this transaction */
wake_up(&root->fs_info->transaction_wait);
wake_up(&root->fs_info->transaction_blocked_wait);
+diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
+index 79f057c..e14e1f7 100644
+--- a/fs/btrfs/tree-log.c
++++ b/fs/btrfs/tree-log.c
+@@ -3375,7 +3375,7 @@ static int log_one_extent(struct btrfs_trans_handle *trans,
+ btrfs_set_token_file_extent_type(leaf, fi,
+ BTRFS_FILE_EXTENT_REG,
+ &token);
+- if (em->block_start == 0)
++ if (em->block_start == EXTENT_MAP_HOLE)
+ skip_csum = true;
+ }
+
diff --git a/fs/buffer.c b/fs/buffer.c
index 6024877..7bd000a 100644
--- a/fs/buffer.c
@@ -54433,6 +55132,28 @@ index c8e03f8..75362f6 100644
#endif
GLOBAL_EXTERN atomic_t smBufAllocCount;
GLOBAL_EXTERN atomic_t midCount;
+diff --git a/fs/cifs/file.c b/fs/cifs/file.c
+index 7ddddf2..2e12dbc 100644
+--- a/fs/cifs/file.c
++++ b/fs/cifs/file.c
+@@ -1900,10 +1900,14 @@ static int cifs_writepages(struct address_space *mapping,
+ index = mapping->writeback_index; /* Start from prev offset */
+ end = -1;
+ } else {
+- index = wbc->range_start >> PAGE_CACHE_SHIFT;
+- end = wbc->range_end >> PAGE_CACHE_SHIFT;
+- if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
++ if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX) {
+ range_whole = true;
++ index = 0;
++ end = ULONG_MAX;
++ } else {
++ index = wbc->range_start >> PAGE_CACHE_SHIFT;
++ end = wbc->range_end >> PAGE_CACHE_SHIFT;
++ }
+ scanned = true;
+ }
+ retry:
diff --git a/fs/cifs/link.c b/fs/cifs/link.c
index 7e36ceb..109252f 100644
--- a/fs/cifs/link.c
@@ -55069,9 +55790,18 @@ index 9bdeca1..2a9b08d 100644
EXPORT_SYMBOL(dump_write);
diff --git a/fs/dcache.c b/fs/dcache.c
-index 89f9671..5977a84 100644
+index 89f9671..d2dce57 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
+@@ -1570,7 +1570,7 @@ struct dentry *__d_alloc(struct super_block *sb, const struct qstr *name)
+ */
+ dentry->d_iname[DNAME_INLINE_LEN-1] = 0;
+ if (name->len > DNAME_INLINE_LEN-1) {
+- dname = kmalloc(name->len + 1, GFP_KERNEL);
++ dname = kmalloc(round_up(name->len + 1, sizeof(unsigned long)), GFP_KERNEL);
+ if (!dname) {
+ kmem_cache_free(dentry_cache, dentry);
+ return NULL;
@@ -2893,6 +2893,7 @@ static int prepend_path(const struct path *path,
restart:
bptr = *buffer;
@@ -59289,7 +60019,7 @@ index d420331..2dbb3fd 100644
}
putname(tmp);
diff --git a/fs/pipe.c b/fs/pipe.c
-index d2c45e1..009fe1c 100644
+index 0e0752e..7cfdd50 100644
--- a/fs/pipe.c
+++ b/fs/pipe.c
@@ -56,7 +56,7 @@ unsigned int pipe_min_size = PAGE_SIZE;
@@ -59370,7 +60100,16 @@ index d2c45e1..009fe1c 100644
mask |= POLLERR;
}
-@@ -734,17 +734,17 @@ pipe_release(struct inode *inode, struct file *file)
+@@ -731,7 +731,7 @@ static void put_pipe_info(struct inode *inode, struct pipe_inode_info *pipe)
+ int kill = 0;
+
+ spin_lock(&inode->i_lock);
+- if (!--pipe->files) {
++ if (atomic_dec_and_test(&pipe->files)) {
+ inode->i_pipe = NULL;
+ kill = 1;
+ }
+@@ -748,11 +748,11 @@ pipe_release(struct inode *inode, struct file *file)
__pipe_lock(pipe);
if (file->f_mode & FMODE_READ)
@@ -59385,14 +60124,7 @@ index d2c45e1..009fe1c 100644
wake_up_interruptible_sync_poll(&pipe->wait, POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM | POLLERR | POLLHUP);
kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
- }
- spin_lock(&inode->i_lock);
-- if (!--pipe->files) {
-+ if (atomic_dec_and_test(&pipe->files)) {
- inode->i_pipe = NULL;
- kill = 1;
- }
-@@ -811,7 +811,7 @@ void free_pipe_info(struct pipe_inode_info *pipe)
+@@ -817,7 +817,7 @@ void free_pipe_info(struct pipe_inode_info *pipe)
kfree(pipe);
}
@@ -59401,7 +60133,7 @@ index d2c45e1..009fe1c 100644
/*
* pipefs_dname() is called from d_path().
-@@ -841,8 +841,9 @@ static struct inode * get_pipe_inode(void)
+@@ -847,8 +847,9 @@ static struct inode * get_pipe_inode(void)
goto fail_iput;
inode->i_pipe = pipe;
@@ -59413,7 +60145,7 @@ index d2c45e1..009fe1c 100644
inode->i_fop = &pipefifo_fops;
/*
-@@ -1022,17 +1023,17 @@ static int fifo_open(struct inode *inode, struct file *filp)
+@@ -1027,17 +1028,17 @@ static int fifo_open(struct inode *inode, struct file *filp)
spin_lock(&inode->i_lock);
if (inode->i_pipe) {
pipe = inode->i_pipe;
@@ -59434,7 +60166,7 @@ index d2c45e1..009fe1c 100644
spin_unlock(&inode->i_lock);
free_pipe_info(pipe);
pipe = inode->i_pipe;
-@@ -1057,10 +1058,10 @@ static int fifo_open(struct inode *inode, struct file *filp)
+@@ -1062,10 +1063,10 @@ static int fifo_open(struct inode *inode, struct file *filp)
* opened, even when there is no process writing the FIFO.
*/
pipe->r_counter++;
@@ -59447,7 +60179,7 @@ index d2c45e1..009fe1c 100644
if ((filp->f_flags & O_NONBLOCK)) {
/* suppress POLLHUP until we have
* seen a writer */
-@@ -1079,14 +1080,14 @@ static int fifo_open(struct inode *inode, struct file *filp)
+@@ -1084,14 +1085,14 @@ static int fifo_open(struct inode *inode, struct file *filp)
* errno=ENXIO when there is no process reading the FIFO.
*/
ret = -ENXIO;
@@ -59465,7 +60197,7 @@ index d2c45e1..009fe1c 100644
if (wait_for_partner(pipe, &pipe->r_counter))
goto err_wr;
}
-@@ -1100,11 +1101,11 @@ static int fifo_open(struct inode *inode, struct file *filp)
+@@ -1105,11 +1106,11 @@ static int fifo_open(struct inode *inode, struct file *filp)
* the process can at least talk to itself.
*/
@@ -59480,7 +60212,7 @@ index d2c45e1..009fe1c 100644
wake_up_partner(pipe);
break;
-@@ -1118,20 +1119,20 @@ static int fifo_open(struct inode *inode, struct file *filp)
+@@ -1123,13 +1124,13 @@ static int fifo_open(struct inode *inode, struct file *filp)
return 0;
err_rd:
@@ -59496,14 +60228,6 @@ index d2c45e1..009fe1c 100644
wake_up_interruptible(&pipe->wait);
ret = -ERESTARTSYS;
goto err;
-
- err:
- spin_lock(&inode->i_lock);
-- if (!--pipe->files) {
-+ if (atomic_dec_and_test(&pipe->files)) {
- inode->i_pipe = NULL;
- kill = 1;
- }
diff --git a/fs/proc/Kconfig b/fs/proc/Kconfig
index 15af622..0e9f4467 100644
--- a/fs/proc/Kconfig
@@ -67113,10 +67837,10 @@ index 0000000..25f54ef
+};
diff --git a/grsecurity/gracl_policy.c b/grsecurity/gracl_policy.c
new file mode 100644
-index 0000000..36e293f
+index 0000000..361a099
--- /dev/null
+++ b/grsecurity/gracl_policy.c
-@@ -0,0 +1,1777 @@
+@@ -0,0 +1,1782 @@
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/sched.h>
@@ -67576,12 +68300,12 @@ index 0000000..36e293f
+ printk(KERN_ALERT "Obtained real root device=%d, inode=%lu\n", __get_dev(gr_real_root.dentry), gr_real_root.dentry->d_inode->i_ino);
+#endif
+
-+ fakefs_obj_rw = acl_alloc(sizeof(struct acl_object_label));
++ fakefs_obj_rw = kzalloc(sizeof(struct acl_object_label), GFP_KERNEL);
+ if (fakefs_obj_rw == NULL)
+ return 1;
+ fakefs_obj_rw->mode = GR_FIND | GR_READ | GR_WRITE;
+
-+ fakefs_obj_rwx = acl_alloc(sizeof(struct acl_object_label));
++ fakefs_obj_rwx = kzalloc(sizeof(struct acl_object_label), GFP_KERNEL);
+ if (fakefs_obj_rwx == NULL)
+ return 1;
+ fakefs_obj_rwx->mode = GR_FIND | GR_READ | GR_WRITE | GR_EXEC;
@@ -67659,6 +68383,11 @@ index 0000000..36e293f
+ } while_each_thread(task2, task);
+ read_unlock(&tasklist_lock);
+
++ kfree(fakefs_obj_rw);
++ fakefs_obj_rw = NULL;
++ kfree(fakefs_obj_rwx);
++ fakefs_obj_rwx = NULL;
++
+ /* release the reference to the real root dentry and vfsmount */
+ path_put(&gr_real_root);
+ memset(&gr_real_root, 0, sizeof(gr_real_root));
@@ -73989,7 +74718,7 @@ index 0bc7275..4ccbf11 100644
unsigned int offset, size_t len);
diff --git a/include/linux/efi.h b/include/linux/efi.h
-index 5f8f176..62a0556 100644
+index 094ddd0..f1dfcd3 100644
--- a/include/linux/efi.h
+++ b/include/linux/efi.h
@@ -745,6 +745,7 @@ struct efivar_operations {
@@ -74001,7 +74730,7 @@ index 5f8f176..62a0556 100644
struct efivars {
/*
diff --git a/include/linux/elf.h b/include/linux/elf.h
-index 40a3c0e..4c45a38 100644
+index 40a3c0e0..4c45a38 100644
--- a/include/linux/elf.h
+++ b/include/linux/elf.h
@@ -24,6 +24,7 @@ extern Elf32_Dyn _DYNAMIC [];
@@ -77195,7 +77924,7 @@ index cc7494a..1e27036 100644
extern bool qid_valid(struct kqid qid);
diff --git a/include/linux/random.h b/include/linux/random.h
-index bf9085e..128eade 100644
+index bf9085e..1e8bbcf 100644
--- a/include/linux/random.h
+++ b/include/linux/random.h
@@ -10,9 +10,19 @@
@@ -77220,11 +77949,23 @@ index bf9085e..128eade 100644
extern void get_random_bytes(void *buf, int nbytes);
extern void get_random_bytes_arch(void *buf, int nbytes);
-@@ -33,6 +43,11 @@ void prandom_seed(u32 seed);
+@@ -23,16 +33,21 @@ extern int random_int_secret_init(void);
+ extern const struct file_operations random_fops, urandom_fops;
+ #endif
+
+-unsigned int get_random_int(void);
++unsigned int __intentional_overflow(-1) get_random_int(void);
+ unsigned long randomize_range(unsigned long start, unsigned long end, unsigned long len);
+
+-u32 prandom_u32(void);
++u32 prandom_u32(void) __intentional_overflow(-1);
+ void prandom_bytes(void *buf, int nbytes);
+ void prandom_seed(u32 seed);
+
u32 prandom_u32_state(struct rnd_state *);
void prandom_bytes_state(struct rnd_state *state, void *buf, int nbytes);
-+static inline unsigned long pax_get_random_long(void)
++static inline unsigned long __intentional_overflow(-1) pax_get_random_long(void)
+{
+ return prandom_u32() + (sizeof(long) > 4 ? (unsigned long)prandom_u32() << 32 : 0);
+}
@@ -81824,7 +82565,7 @@ index 086fe73..72c1122 100644
else
new_fs = fs;
diff --git a/kernel/futex.c b/kernel/futex.c
-index c3a1a55..e32b4a98 100644
+index c3a1a55..1b8cfce 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -54,6 +54,7 @@
@@ -81847,6 +82588,15 @@ index c3a1a55..e32b4a98 100644
/*
* The futex address must be "naturally" aligned.
*/
+@@ -288,7 +294,7 @@ again:
+ put_page(page);
+ /* serialize against __split_huge_page_splitting() */
+ local_irq_disable();
+- if (likely(__get_user_pages_fast(address, 1, 1, &page) == 1)) {
++ if (likely(__get_user_pages_fast(address, 1, !ro, &page) == 1)) {
+ page_head = compound_head(page);
+ /*
+ * page_head is valid pointer but we must pin
@@ -441,7 +447,7 @@ static int cmpxchg_futex_value_locked(u32 *curval, u32 __user *uaddr,
static int get_futex_value_locked(u32 *dest, u32 __user *from)
@@ -85664,7 +86414,7 @@ index 88c9c65..7497ebc 100644
.clock_get = alarm_clock_get,
.timer_create = alarm_timer_create,
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
-index 947ba25..20cbade 100644
+index 5cf6c70..ac341b0 100644
--- a/kernel/time/timekeeping.c
+++ b/kernel/time/timekeeping.c
@@ -15,6 +15,7 @@
@@ -87232,7 +87982,7 @@ index ae4846f..b0acebe 100644
send_sig(SIGXFSZ, current, 0);
return -EFBIG;
diff --git a/mm/fremap.c b/mm/fremap.c
-index 5bff081..d8189a9 100644
+index 5bff081..bfa6e93 100644
--- a/mm/fremap.c
+++ b/mm/fremap.c
@@ -163,6 +163,11 @@ SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size,
@@ -87247,6 +87997,36 @@ index 5bff081..d8189a9 100644
/*
* Make sure the vma is shared, that it supports prefaulting,
* and that the remapped range is valid and fully within
+@@ -208,9 +213,10 @@ get_write_lock:
+ if (mapping_cap_account_dirty(mapping)) {
+ unsigned long addr;
+ struct file *file = get_file(vma->vm_file);
++ /* mmap_region may free vma; grab the info now */
++ vm_flags = ACCESS_ONCE(vma->vm_flags);
+
+- addr = mmap_region(file, start, size,
+- vma->vm_flags, pgoff);
++ addr = mmap_region(file, start, size, vm_flags, pgoff);
+ fput(file);
+ if (IS_ERR_VALUE(addr)) {
+ err = addr;
+@@ -218,7 +224,7 @@ get_write_lock:
+ BUG_ON(addr != start);
+ err = 0;
+ }
+- goto out;
++ goto out_freed;
+ }
+ mutex_lock(&mapping->i_mmap_mutex);
+ flush_dcache_mmap_lock(mapping);
+@@ -253,6 +259,7 @@ get_write_lock:
+ out:
+ if (vma)
+ vm_flags = vma->vm_flags;
++out_freed:
+ if (likely(!has_write_lock))
+ up_read(&mm->mmap_sem);
+ else
diff --git a/mm/highmem.c b/mm/highmem.c
index b32b70c..e512eb0 100644
--- a/mm/highmem.c
@@ -94303,7 +95083,7 @@ index 4b85e6f..22f9ac9 100644
syn_set ? 0 : icsk->icsk_user_timeout, syn_set)) {
/* Has it gone just too far? */
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
-index 5e2c2f1..6473c22 100644
+index f60b1ee..40b401c 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -87,6 +87,7 @@
@@ -94354,7 +95134,7 @@ index 5e2c2f1..6473c22 100644
daddr = inet->inet_daddr;
dport = inet->inet_dport;
/* Open fast path for connected socket.
-@@ -1141,7 +1158,7 @@ static unsigned int first_packet_length(struct sock *sk)
+@@ -1144,7 +1161,7 @@ static unsigned int first_packet_length(struct sock *sk)
IS_UDPLITE(sk));
UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
IS_UDPLITE(sk));
@@ -94363,7 +95143,7 @@ index 5e2c2f1..6473c22 100644
__skb_unlink(skb, rcvq);
__skb_queue_tail(&list_kill, skb);
}
-@@ -1221,6 +1238,10 @@ try_again:
+@@ -1224,6 +1241,10 @@ try_again:
if (!skb)
goto out;
@@ -94374,7 +95154,7 @@ index 5e2c2f1..6473c22 100644
ulen = skb->len - sizeof(struct udphdr);
copied = len;
if (copied > ulen)
-@@ -1254,7 +1275,7 @@ try_again:
+@@ -1257,7 +1278,7 @@ try_again:
if (unlikely(err)) {
trace_kfree_skb(skb, udp_recvmsg);
if (!peeked) {
@@ -94383,7 +95163,7 @@ index 5e2c2f1..6473c22 100644
UDP_INC_STATS_USER(sock_net(sk),
UDP_MIB_INERRORS, is_udplite);
}
-@@ -1542,7 +1563,7 @@ csum_error:
+@@ -1545,7 +1566,7 @@ csum_error:
UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite);
drop:
UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
@@ -94392,7 +95172,7 @@ index 5e2c2f1..6473c22 100644
kfree_skb(skb);
return -1;
}
-@@ -1561,7 +1582,7 @@ static void flush_stack(struct sock **stack, unsigned int count,
+@@ -1564,7 +1585,7 @@ static void flush_stack(struct sock **stack, unsigned int count,
skb1 = (i == final) ? skb : skb_clone(skb, GFP_ATOMIC);
if (!skb1) {
@@ -94401,7 +95181,7 @@ index 5e2c2f1..6473c22 100644
UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
IS_UDPLITE(sk));
UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
-@@ -1733,6 +1754,9 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
+@@ -1736,6 +1757,9 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
goto csum_error;
UDP_INC_STATS_BH(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
@@ -94411,7 +95191,7 @@ index 5e2c2f1..6473c22 100644
icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
/*
-@@ -2165,7 +2189,7 @@ static void udp4_format_sock(struct sock *sp, struct seq_file *f,
+@@ -2168,7 +2192,7 @@ static void udp4_format_sock(struct sock *sp, struct seq_file *f,
from_kuid_munged(seq_user_ns(f), sock_i_uid(sp)),
0, sock_i_ino(sp),
atomic_read(&sp->sk_refcnt), sp,
@@ -96851,7 +97631,7 @@ index 6b36561..4f21064 100644
table = kmemdup(sctp_net_table, sizeof(sctp_net_table), GFP_KERNEL);
diff --git a/net/socket.c b/net/socket.c
-index e83c416..17afbfa 100644
+index e83c416..9169305 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -88,6 +88,7 @@
@@ -97053,6 +97833,15 @@ index e83c416..17afbfa 100644
/* user mode address pointers */
struct sockaddr __user *uaddr;
+@@ -2227,7 +2293,7 @@ static int ___sys_recvmsg(struct socket *sock, struct msghdr __user *msg,
+ /* Save the user-mode address (verify_iovec will change the
+ * kernel msghdr to use the kernel address space)
+ */
+- uaddr = (__force void __user *)msg_sys->msg_name;
++ uaddr = (void __force_user *)msg_sys->msg_name;
+ uaddr_len = COMPAT_NAMELEN(msg);
+ if (MSG_CMSG_COMPAT & flags)
+ err = verify_compat_iovec(msg_sys, iov, &addr, VERIFY_WRITE);
@@ -2985,7 +3051,7 @@ static int bond_ioctl(struct net *net, unsigned int cmd,
old_fs = get_fs();
set_fs(KERNEL_DS);
@@ -101544,10 +102333,10 @@ index 0000000..568b360
+}
diff --git a/tools/gcc/kernexec_plugin.c b/tools/gcc/kernexec_plugin.c
new file mode 100644
-index 0000000..698da67
+index 0000000..a25306b
--- /dev/null
+++ b/tools/gcc/kernexec_plugin.c
-@@ -0,0 +1,471 @@
+@@ -0,0 +1,474 @@
+/*
+ * Copyright 2011-2013 by the PaX Team <pageexec@freemail.hu>
+ * Licensed under the GPL v2
@@ -101693,21 +102482,21 @@ index 0000000..698da67
+}
+
+/*
-+ * add special KERNEXEC instrumentation: reload %r10 after it has been clobbered
++ * add special KERNEXEC instrumentation: reload %r12 after it has been clobbered
+ */
+static void kernexec_reload_fptr_mask(gimple_stmt_iterator *gsi)
+{
+ gimple asm_movabs_stmt;
+
-+ // build asm volatile("movabs $0x8000000000000000, %%r10\n\t" : : : );
-+ asm_movabs_stmt = gimple_build_asm_vec("movabs $0x8000000000000000, %%r10\n\t", NULL, NULL, NULL, NULL);
++ // build asm volatile("movabs $0x8000000000000000, %%r12\n\t" : : : );
++ asm_movabs_stmt = gimple_build_asm_vec("movabs $0x8000000000000000, %%r12\n\t", NULL, NULL, NULL, NULL);
+ gimple_asm_set_volatile(asm_movabs_stmt, true);
+ gsi_insert_after(gsi, asm_movabs_stmt, GSI_CONTINUE_LINKING);
+ update_stmt(asm_movabs_stmt);
+}
+
+/*
-+ * find all asm() stmts that clobber r10 and add a reload of r10
++ * find all asm() stmts that clobber r12 and add a reload of r12
+ */
+static unsigned int execute_kernexec_reload(void)
+{
@@ -101718,7 +102507,7 @@ index 0000000..698da67
+ gimple_stmt_iterator gsi;
+
+ for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) {
-+ // gimple match: __asm__ ("" : : : "r10");
++ // gimple match: __asm__ ("" : : : "r12");
+ gimple asm_stmt;
+ size_t nclobbers;
+
@@ -101727,11 +102516,11 @@ index 0000000..698da67
+ if (gimple_code(asm_stmt) != GIMPLE_ASM)
+ continue;
+
-+ // ... clobbering r10
++ // ... clobbering r12
+ nclobbers = gimple_asm_nclobbers(asm_stmt);
+ while (nclobbers--) {
+ tree op = gimple_asm_clobber_op(asm_stmt, nclobbers);
-+ if (strcmp(TREE_STRING_POINTER(TREE_VALUE(op)), "r10"))
++ if (strcmp(TREE_STRING_POINTER(TREE_VALUE(op)), "r12"))
+ continue;
+ kernexec_reload_fptr_mask(&gsi);
+//print_gimple_stmt(stderr, asm_stmt, 0, TDF_LINENO);
@@ -101814,7 +102603,7 @@ index 0000000..698da67
+#endif
+ new_fptr = make_ssa_name(new_fptr, NULL);
+
-+ // build asm volatile("orq %%r10, %0\n\t" : "=r"(new_fptr) : "0"(old_fptr));
++ // build asm volatile("orq %%r12, %0\n\t" : "=r"(new_fptr) : "0"(old_fptr));
+ input = build_tree_list(NULL_TREE, build_string(1, "0"));
+ input = chainon(NULL_TREE, build_tree_list(input, old_fptr));
+ output = build_tree_list(NULL_TREE, build_string(2, "=r"));
@@ -101826,7 +102615,7 @@ index 0000000..698da67
+ vec_safe_push(inputs, input);
+ vec_safe_push(outputs, output);
+#endif
-+ asm_or_stmt = gimple_build_asm_vec("orq %%r10, %0\n\t", inputs, outputs, NULL, NULL);
++ asm_or_stmt = gimple_build_asm_vec("orq %%r12, %0\n\t", inputs, outputs, NULL, NULL);
+ SSA_NAME_DEF_STMT(new_fptr) = asm_or_stmt;
+ gimple_asm_set_volatile(asm_or_stmt, true);
+ gsi_insert_before(gsi, asm_or_stmt, GSI_SAME_STMT);
@@ -101906,19 +102695,19 @@ index 0000000..698da67
+ emit_insn_before(btsq, insn);
+}
+
-+// add special KERNEXEC instrumentation: orq %r10,(%rsp) just before retn
++// add special KERNEXEC instrumentation: orq %r12,(%rsp) just before retn
+static void kernexec_instrument_retaddr_or(rtx insn)
+{
+ rtx orq;
+ rtvec argvec, constraintvec, labelvec;
+ int line;
+
-+ // create asm volatile("orq %%r10,(%%rsp)":::)
++ // create asm volatile("orq %%r12,(%%rsp)":::)
+ argvec = rtvec_alloc(0);
+ constraintvec = rtvec_alloc(0);
+ labelvec = rtvec_alloc(0);
+ line = expand_location(RTL_LOCATION(insn)).line;
-+ orq = gen_rtx_ASM_OPERANDS(VOIDmode, "orq %%r10,(%%rsp)", empty_string, 0, argvec, constraintvec, labelvec, line);
++ orq = gen_rtx_ASM_OPERANDS(VOIDmode, "orq %%r12,(%%rsp)", empty_string, 0, argvec, constraintvec, labelvec, line);
+ MEM_VOLATILE_P(orq) = 1;
+// RTX_FRAME_RELATED_P(orq) = 1; // not for ASM_OPERANDS
+ emit_insn_before(orq, insn);
@@ -101931,6 +102720,9 @@ index 0000000..698da67
+{
+ rtx insn;
+
++// if (stack_realign_drap)
++// inform(DECL_SOURCE_LOCATION(current_function_decl), "drap detected in %s\n", IDENTIFIER_POINTER(DECL_NAME(current_function_decl)));
++
+ // 1. find function returns
+ for (insn = get_insns(); insn; insn = NEXT_INSN(insn)) {
+ // rtl match: (jump_insn 41 40 42 2 (return) fptr.c:42 634 {return_internal} (nil))
@@ -102002,7 +102794,7 @@ index 0000000..698da67
+ } else if (!strcmp(argv[i].value, "or")) {
+ kernexec_instrument_fptr = kernexec_instrument_fptr_or;
+ kernexec_instrument_retaddr = kernexec_instrument_retaddr_or;
-+ fix_register("r10", 1, 1);
++ fix_register("r12", 1, 1);
+ } else
+ error(G_("invalid option argument '-fplugin-arg-%s-%s=%s'"), plugin_name, argv[i].key, argv[i].value);
+ continue;
@@ -102362,10 +103154,10 @@ index 0000000..679b9ef
+}
diff --git a/tools/gcc/size_overflow_hash.data b/tools/gcc/size_overflow_hash.data
new file mode 100644
-index 0000000..3a5b4b5
+index 0000000..7dad2cd
--- /dev/null
+++ b/tools/gcc/size_overflow_hash.data
-@@ -0,0 +1,7687 @@
+@@ -0,0 +1,7690 @@
+intel_fake_agp_alloc_by_type_1 intel_fake_agp_alloc_by_type 1 1 NULL
+ocfs2_get_refcount_tree_3 ocfs2_get_refcount_tree 0 3 NULL
+batadv_orig_node_del_if_4 batadv_orig_node_del_if 2 4 NULL
@@ -102380,8 +103172,8 @@ index 0000000..3a5b4b5
+snd_korg1212_copy_to_92 snd_korg1212_copy_to 6 92 NULL
+load_msg_95 load_msg 2 95 NULL
+device_flush_iotlb_115 device_flush_iotlb 2-3 115 NULL
-+ipath_verbs_send_117 ipath_verbs_send 5-3 117 NULL nohasharray
-+write_all_supers_117 write_all_supers 0 117 &ipath_verbs_send_117
++write_all_supers_117 write_all_supers 0 117 NULL nohasharray
++ipath_verbs_send_117 ipath_verbs_send 5-3 117 &write_all_supers_117
+init_q_132 init_q 4 132 NULL
+ocfs2_local_alloc_slide_window_134 ocfs2_local_alloc_slide_window 0 134 NULL
+memstick_alloc_host_142 memstick_alloc_host 1 142 NULL
@@ -103375,8 +104167,8 @@ index 0000000..3a5b4b5
+usb_allocate_stream_buffers_8964 usb_allocate_stream_buffers 3 8964 NULL
+qib_qsfp_dump_8966 qib_qsfp_dump 0-3 8966 NULL
+venus_mkdir_8967 venus_mkdir 4 8967 NULL
-+seq_open_net_8968 seq_open_net 4 8968 NULL nohasharray
-+vol_cdev_read_8968 vol_cdev_read 3 8968 &seq_open_net_8968
++vol_cdev_read_8968 vol_cdev_read 3 8968 NULL nohasharray
++seq_open_net_8968 seq_open_net 4 8968 &vol_cdev_read_8968
+bio_integrity_get_tag_8974 bio_integrity_get_tag 3 8974 NULL
+btrfs_alloc_free_block_8986 btrfs_alloc_free_block 3-8 8986 NULL
+jbd2_journal_blocks_per_page_9004 jbd2_journal_blocks_per_page 0 9004 NULL
@@ -103789,8 +104581,8 @@ index 0000000..3a5b4b5
+shash_compat_setkey_12267 shash_compat_setkey 3 12267 NULL
+add_sctp_bind_addr_12269 add_sctp_bind_addr 3 12269 NULL
+note_last_dentry_12285 note_last_dentry 3 12285 NULL
-+il_dbgfs_nvm_read_12288 il_dbgfs_nvm_read 3 12288 NULL nohasharray
-+roundup_to_multiple_of_64_12288 roundup_to_multiple_of_64 0-1 12288 &il_dbgfs_nvm_read_12288
++roundup_to_multiple_of_64_12288 roundup_to_multiple_of_64 0-1 12288 NULL nohasharray
++il_dbgfs_nvm_read_12288 il_dbgfs_nvm_read 3 12288 &roundup_to_multiple_of_64_12288
+wrap_min_12303 wrap_min 0-1-2 12303 NULL
+bt_sock_recvmsg_12316 bt_sock_recvmsg 4 12316 NULL
+pcbit_writecmd_12332 pcbit_writecmd 2 12332 NULL
@@ -104049,8 +104841,8 @@ index 0000000..3a5b4b5
+efx_mdio_check_mmds_14486 efx_mdio_check_mmds 2 14486 NULL nohasharray
+ieee80211_if_read_dot11MeshGateAnnouncementProtocol_14486 ieee80211_if_read_dot11MeshGateAnnouncementProtocol 3 14486 &efx_mdio_check_mmds_14486
+ocfs2_debug_read_14507 ocfs2_debug_read 3 14507 NULL
-+ep0_write_14536 ep0_write 3 14536 NULL nohasharray
-+dataflash_read_user_otp_14536 dataflash_read_user_otp 3-2 14536 &ep0_write_14536
++dataflash_read_user_otp_14536 dataflash_read_user_otp 3-2 14536 NULL nohasharray
++ep0_write_14536 ep0_write 3 14536 &dataflash_read_user_otp_14536
+register_trace_sched_switch_14545 register_trace_sched_switch 0 14545 NULL
+picolcd_debug_eeprom_read_14549 picolcd_debug_eeprom_read 3 14549 NULL
+drm_vmalloc_dma_14550 drm_vmalloc_dma 1 14550 NULL
@@ -104086,8 +104878,8 @@ index 0000000..3a5b4b5
+keys_proc_write_14792 keys_proc_write 3 14792 NULL
+ext4_kvmalloc_14796 ext4_kvmalloc 1 14796 NULL
+__kfifo_in_14797 __kfifo_in 3-0 14797 NULL
-+hpet_readl_14801 hpet_readl 0 14801 NULL nohasharray
-+snd_als300_gcr_read_14801 snd_als300_gcr_read 0 14801 &hpet_readl_14801
++snd_als300_gcr_read_14801 snd_als300_gcr_read 0 14801 NULL nohasharray
++hpet_readl_14801 hpet_readl 0 14801 &snd_als300_gcr_read_14801
+changed_cb_14819 changed_cb 0 14819 NULL
+do_tune_cpucache_14828 do_tune_cpucache 2 14828 NULL
+mrp_attr_create_14853 mrp_attr_create 3 14853 NULL
@@ -104778,8 +105570,8 @@ index 0000000..3a5b4b5
+cpulist_scnprintf_20648 cpulist_scnprintf 2-0 20648 NULL
+oz_add_farewell_20652 oz_add_farewell 5 20652 NULL
+oz_cdev_read_20659 oz_cdev_read 3 20659 NULL
-+snd_hdsp_playback_copy_20676 snd_hdsp_playback_copy 5 20676 NULL nohasharray
-+btrfs_qgroup_reserve_20676 btrfs_qgroup_reserve 0 20676 &snd_hdsp_playback_copy_20676
++btrfs_qgroup_reserve_20676 btrfs_qgroup_reserve 0 20676 NULL nohasharray
++snd_hdsp_playback_copy_20676 snd_hdsp_playback_copy 5 20676 &btrfs_qgroup_reserve_20676
+get_user_page_nowait_20682 get_user_page_nowait 3 20682 NULL nohasharray
+dvb_dmxdev_buffer_read_20682 dvb_dmxdev_buffer_read 0-4 20682 &get_user_page_nowait_20682
+cpumask_size_20683 cpumask_size 0 20683 NULL
@@ -105444,8 +106236,8 @@ index 0000000..3a5b4b5
+read_sb_page_26119 read_sb_page 5 26119 NULL
+__fswab64_26155 __fswab64 0 26155 NULL
+copy_oldmem_page_26164 copy_oldmem_page 3 26164 NULL
-+gfs2_xattr_acl_get_26166 gfs2_xattr_acl_get 0 26166 NULL nohasharray
-+ath6kl_roam_table_read_26166 ath6kl_roam_table_read 3 26166 &gfs2_xattr_acl_get_26166
++ath6kl_roam_table_read_26166 ath6kl_roam_table_read 3 26166 NULL nohasharray
++gfs2_xattr_acl_get_26166 gfs2_xattr_acl_get 0 26166 &ath6kl_roam_table_read_26166
+disk_devt_26180 disk_devt 0 26180 NULL
+cgroup_setxattr_26188 cgroup_setxattr 4 26188 NULL
+ieee80211_if_fmt_dot11MeshTTL_26198 ieee80211_if_fmt_dot11MeshTTL 3 26198 NULL
@@ -105771,8 +106563,8 @@ index 0000000..3a5b4b5
+xz_dec_init_29029 xz_dec_init 2 29029 NULL
+i915_gem_object_bind_to_vm_29035 i915_gem_object_bind_to_vm 0 29035 NULL
+ieee80211_if_read_ht_opmode_29044 ieee80211_if_read_ht_opmode 3 29044 NULL
-+ProcessGetHostMibs_29049 ProcessGetHostMibs 0 29049 NULL nohasharray
-+rxrpc_sendmsg_29049 rxrpc_sendmsg 4 29049 &ProcessGetHostMibs_29049
++rxrpc_sendmsg_29049 rxrpc_sendmsg 4 29049 NULL nohasharray
++ProcessGetHostMibs_29049 ProcessGetHostMibs 0 29049 &rxrpc_sendmsg_29049
+btrfs_root_bytenr_29058 btrfs_root_bytenr 0 29058 NULL
+iso_packets_buffer_init_29061 iso_packets_buffer_init 3-4 29061 NULL
+roundup_64_29066 roundup_64 2-0-1 29066 NULL
@@ -105875,7 +106667,8 @@ index 0000000..3a5b4b5
+lov_ost_pool_extend_29914 lov_ost_pool_extend 2 29914 NULL
+write_file_queue_29922 write_file_queue 3 29922 NULL
+ext4_xattr_set_acl_29930 ext4_xattr_set_acl 4 29930 NULL
-+__btrfs_getxattr_29947 __btrfs_getxattr 0 29947 NULL
++__btrfs_getxattr_29947 __btrfs_getxattr 0 29947 NULL nohasharray
++ipv6_recv_error_29947 ipv6_recv_error 3 29947 &__btrfs_getxattr_29947
+diva_os_get_context_size_29983 diva_os_get_context_size 0 29983 NULL
+arch_setup_dmar_msi_29992 arch_setup_dmar_msi 1 29992 NULL
+vmci_host_setup_notify_30002 vmci_host_setup_notify 2 30002 NULL
@@ -106382,8 +107175,8 @@ index 0000000..3a5b4b5
+av7110_vbi_write_34384 av7110_vbi_write 3 34384 NULL
+usbvision_v4l2_read_34386 usbvision_v4l2_read 3 34386 NULL
+read_rbu_image_type_34387 read_rbu_image_type 6 34387 NULL
-+iwl_calib_set_34400 iwl_calib_set 3 34400 NULL nohasharray
-+ivtv_read_pos_34400 ivtv_read_pos 3 34400 &iwl_calib_set_34400
++ivtv_read_pos_34400 ivtv_read_pos 3 34400 NULL nohasharray
++iwl_calib_set_34400 iwl_calib_set 3 34400 &ivtv_read_pos_34400
+wd_exp_mode_write_34407 wd_exp_mode_write 3 34407 NULL
+nl80211_send_disassoc_34424 nl80211_send_disassoc 4 34424 NULL
+security_socket_create_34439 security_socket_create 0 34439 NULL
@@ -106393,13 +107186,13 @@ index 0000000..3a5b4b5
+i2o_parm_field_get_34477 i2o_parm_field_get 5 34477 NULL
+ocfs2_block_group_clear_bits_34484 ocfs2_block_group_clear_bits 0 34484 NULL
+security_inode_permission_34488 security_inode_permission 0 34488 NULL
-+SyS_pwritev_34494 SyS_pwritev 3 34494 NULL nohasharray
-+__ffs64_34494 __ffs64 1-0 34494 &SyS_pwritev_34494
++__ffs64_34494 __ffs64 1-0 34494 NULL nohasharray
++SyS_pwritev_34494 SyS_pwritev 3 34494 &__ffs64_34494
+qp_alloc_res_34496 qp_alloc_res 5 34496 NULL
+lu_buf_check_and_alloc_34505 lu_buf_check_and_alloc 2 34505 NULL
+snd_pcm_hw_param_value_34525 snd_pcm_hw_param_value 0 34525 NULL
-+ext4_fallocate_34537 ext4_fallocate 4-3 34537 NULL nohasharray
-+tracing_stats_read_34537 tracing_stats_read 3 34537 &ext4_fallocate_34537
++tracing_stats_read_34537 tracing_stats_read 3 34537 NULL nohasharray
++ext4_fallocate_34537 ext4_fallocate 4-3 34537 &tracing_stats_read_34537
+hugetlbfs_read_actor_34547 hugetlbfs_read_actor 4-5-2-0 34547 NULL
+dbBackSplit_34561 dbBackSplit 0 34561 NULL
+alloc_ieee80211_rsl_34564 alloc_ieee80211_rsl 1 34564 NULL nohasharray
@@ -106412,8 +107205,8 @@ index 0000000..3a5b4b5
+cw1200_queue_init_34599 cw1200_queue_init 4 34599 &ceph_msgpool_init_34599
+__add_prelim_ref_34600 __add_prelim_ref 0 34600 NULL
+brcmf_cfg80211_mgmt_tx_34608 brcmf_cfg80211_mgmt_tx 7 34608 NULL
-+__jffs2_ref_totlen_34609 __jffs2_ref_totlen 0 34609 NULL nohasharray
-+mtd_write_34609 mtd_write 0 34609 &__jffs2_ref_totlen_34609
++mtd_write_34609 mtd_write 0 34609 NULL nohasharray
++__jffs2_ref_totlen_34609 __jffs2_ref_totlen 0 34609 &mtd_write_34609
+apei_get_nvs_resources_34616 apei_get_nvs_resources 0 34616 NULL
+__cfg80211_disconnected_34622 __cfg80211_disconnected 3 34622 NULL
+cnic_alloc_dma_34641 cnic_alloc_dma 3 34641 NULL
@@ -106876,11 +107669,11 @@ index 0000000..3a5b4b5
+snd_pcm_playback_rewind_38249 snd_pcm_playback_rewind 0-2 38249 NULL
+from_dblock_38256 from_dblock 0-1 38256 NULL
+vmci_qp_broker_set_page_store_38260 vmci_qp_broker_set_page_store 3-2 38260 NULL
-+SYSC_msgrcv_38268 SYSC_msgrcv 3 38268 NULL nohasharray
-+ieee80211_if_read_auto_open_plinks_38268 ieee80211_if_read_auto_open_plinks 3 38268 &SYSC_msgrcv_38268 nohasharray
-+mthca_alloc_icm_table_38268 mthca_alloc_icm_table 4-3 38268 &ieee80211_if_read_auto_open_plinks_38268
-+xfs_bmdr_to_bmbt_38275 xfs_bmdr_to_bmbt 5 38275 NULL nohasharray
-+xfs_bmbt_to_bmdr_38275 xfs_bmbt_to_bmdr 3 38275 &xfs_bmdr_to_bmbt_38275
++ieee80211_if_read_auto_open_plinks_38268 ieee80211_if_read_auto_open_plinks 3 38268 NULL nohasharray
++SYSC_msgrcv_38268 SYSC_msgrcv 3 38268 &ieee80211_if_read_auto_open_plinks_38268 nohasharray
++mthca_alloc_icm_table_38268 mthca_alloc_icm_table 4-3 38268 &SYSC_msgrcv_38268
++xfs_bmbt_to_bmdr_38275 xfs_bmbt_to_bmdr 3 38275 NULL nohasharray
++xfs_bmdr_to_bmbt_38275 xfs_bmdr_to_bmbt 5 38275 &xfs_bmbt_to_bmdr_38275
+ftdi_process_packet_38281 ftdi_process_packet 4 38281 NULL
+gpa_to_gfn_38291 gpa_to_gfn 0-1 38291 NULL
+ucma_query_path_38305 ucma_query_path 3 38305 NULL
@@ -106953,8 +107746,8 @@ index 0000000..3a5b4b5
+ext3_trim_all_free_38929 ext3_trim_all_free 4-3-2 38929 NULL
+il_dbgfs_sram_write_38942 il_dbgfs_sram_write 3 38942 NULL
+__ath6kl_wmi_send_mgmt_cmd_38971 __ath6kl_wmi_send_mgmt_cmd 7 38971 NULL
-+C_SYSC_preadv64_38977 C_SYSC_preadv64 3 38977 NULL nohasharray
-+usb_maxpacket_38977 usb_maxpacket 0 38977 &C_SYSC_preadv64_38977
++usb_maxpacket_38977 usb_maxpacket 0 38977 NULL nohasharray
++C_SYSC_preadv64_38977 C_SYSC_preadv64 3 38977 &usb_maxpacket_38977
+OSDSetBlock_38986 OSDSetBlock 2-4 38986 NULL
+lpfc_idiag_extacc_write_38998 lpfc_idiag_extacc_write 3 38998 NULL
+udf_new_block_38999 udf_new_block 4 38999 NULL
@@ -107019,8 +107812,8 @@ index 0000000..3a5b4b5
+ext_depth_39607 ext_depth 0 39607 NULL
+nfs_idmap_get_key_39616 nfs_idmap_get_key 2 39616 NULL
+sdio_readb_39618 sdio_readb 0 39618 NULL
-+set_dev_class_39645 set_dev_class 4 39645 NULL nohasharray
-+dm_exception_table_init_39645 dm_exception_table_init 2 39645 &set_dev_class_39645
++dm_exception_table_init_39645 dm_exception_table_init 2 39645 NULL nohasharray
++set_dev_class_39645 set_dev_class 4 39645 &dm_exception_table_init_39645
+snd_rme32_capture_copy_39653 snd_rme32_capture_copy 5 39653 NULL
+tcp_try_rmem_schedule_39657 tcp_try_rmem_schedule 3 39657 NULL
+kvm_read_guest_cached_39666 kvm_read_guest_cached 4 39666 NULL
@@ -107463,8 +108256,8 @@ index 0000000..3a5b4b5
+usb_alloc_urb_43436 usb_alloc_urb 1 43436 NULL
+ath6kl_wmi_roam_tbl_event_rx_43440 ath6kl_wmi_roam_tbl_event_rx 3 43440 NULL
+ocfs2_rotate_tree_left_43442 ocfs2_rotate_tree_left 0 43442 NULL
-+usemap_size_43443 usemap_size 0-2-1 43443 NULL nohasharray
-+usb_string_43443 usb_string 0 43443 &usemap_size_43443
++usb_string_43443 usb_string 0 43443 NULL nohasharray
++usemap_size_43443 usemap_size 0-2-1 43443 &usb_string_43443
+get_vm_area_size_43444 get_vm_area_size 0 43444 NULL
+nvme_trans_device_id_page_43466 nvme_trans_device_id_page 4 43466 NULL
+calculate_discard_block_size_43480 calculate_discard_block_size 0 43480 NULL nohasharray
@@ -107534,6 +108327,7 @@ index 0000000..3a5b4b5
+xlog_recover_add_to_cont_trans_44102 xlog_recover_add_to_cont_trans 4 44102 NULL
+skb_frag_dma_map_44112 skb_frag_dma_map 0 44112 NULL
+tracing_set_trace_read_44122 tracing_set_trace_read 3 44122 NULL
++hwif_to_node_44127 hwif_to_node 0 44127 NULL
+SyS_process_vm_writev_44129 SyS_process_vm_writev 3-5 44129 NULL
+vmw_gmr_bind_44130 vmw_gmr_bind 3 44130 NULL
+lookup_extent_data_ref_44136 lookup_extent_data_ref 0 44136 NULL
@@ -107579,7 +108373,6 @@ index 0000000..3a5b4b5
+osst_do_scsi_44410 osst_do_scsi 4 44410 NULL
+check_user_page_hwpoison_44412 check_user_page_hwpoison 1 44412 NULL
+ieee80211_if_read_rc_rateidx_mcs_mask_5ghz_44423 ieee80211_if_read_rc_rateidx_mcs_mask_5ghz 3 44423 NULL
-+prandom_u32_state_44445 prandom_u32_state 0 44445 NULL
+iwl_dbgfs_bf_params_write_44450 iwl_dbgfs_bf_params_write 3 44450 NULL
+write_file_debug_44476 write_file_debug 3 44476 NULL
+btrfs_chunk_item_size_44478 btrfs_chunk_item_size 0-1 44478 NULL
@@ -107743,6 +108536,7 @@ index 0000000..3a5b4b5
+ll_max_readahead_mb_seq_write_45815 ll_max_readahead_mb_seq_write 3 45815 NULL
+fm_v4l2_init_video_device_45821 fm_v4l2_init_video_device 2 45821 NULL
+memcg_update_cache_size_45828 memcg_update_cache_size 2 45828 NULL
++ipv6_recv_rxpmtu_45830 ipv6_recv_rxpmtu 3 45830 NULL
+task_state_char_45839 task_state_char 1 45839 NULL
+__ip_select_ident_45851 __ip_select_ident 3 45851 NULL
+x509_process_extension_45854 x509_process_extension 5 45854 NULL
@@ -107771,8 +108565,8 @@ index 0000000..3a5b4b5
+dma_tx_errors_read_46060 dma_tx_errors_read 3 46060 &__ocfs2_move_extent_46060
+sel_commit_bools_write_46077 sel_commit_bools_write 3 46077 NULL
+arizona_set_irq_wake_46101 arizona_set_irq_wake 2 46101 NULL
-+memcg_update_array_size_46111 memcg_update_array_size 1 46111 NULL nohasharray
-+il3945_ucode_general_stats_read_46111 il3945_ucode_general_stats_read 3 46111 &memcg_update_array_size_46111
++il3945_ucode_general_stats_read_46111 il3945_ucode_general_stats_read 3 46111 NULL nohasharray
++memcg_update_array_size_46111 memcg_update_array_size 1 46111 &il3945_ucode_general_stats_read_46111
+C_SYSC_writev_46113 C_SYSC_writev 3 46113 NULL
+mlx4_ib_alloc_fast_reg_page_list_46119 mlx4_ib_alloc_fast_reg_page_list 2 46119 NULL
+paging32_walk_addr_nested_46121 paging32_walk_addr_nested 3 46121 NULL
@@ -107792,8 +108586,8 @@ index 0000000..3a5b4b5
+mpi_read_raw_data_46248 mpi_read_raw_data 2 46248 NULL
+ReadReg_46277 ReadReg 0 46277 NULL
+sg_proc_write_dressz_46316 sg_proc_write_dressz 3 46316 NULL
-+__hwahc_dev_set_key_46328 __hwahc_dev_set_key 5 46328 NULL nohasharray
-+compat_SyS_readv_46328 compat_SyS_readv 3 46328 &__hwahc_dev_set_key_46328
++compat_SyS_readv_46328 compat_SyS_readv 3 46328 NULL nohasharray
++__hwahc_dev_set_key_46328 __hwahc_dev_set_key 5 46328 &compat_SyS_readv_46328
+iwl_dbgfs_chain_noise_read_46355 iwl_dbgfs_chain_noise_read 3 46355 NULL
+smk_write_direct_46363 smk_write_direct 3 46363 NULL
+__iommu_calculate_agaw_46366 __iommu_calculate_agaw 2 46366 NULL
@@ -108011,8 +108805,8 @@ index 0000000..3a5b4b5
+set_discoverable_48141 set_discoverable 4 48141 NULL
+dn_fib_count_nhs_48145 dn_fib_count_nhs 0 48145 NULL
+get_cur_inode_state_48149 get_cur_inode_state 0 48149 NULL
-+_add_to_r4w_48152 _add_to_r4w 4 48152 NULL nohasharray
-+bitmap_onto_48152 bitmap_onto 4 48152 &_add_to_r4w_48152
++bitmap_onto_48152 bitmap_onto 4 48152 NULL nohasharray
++_add_to_r4w_48152 _add_to_r4w 4 48152 &bitmap_onto_48152
+isr_dma1_done_read_48159 isr_dma1_done_read 3 48159 NULL
+c4iw_id_table_alloc_48163 c4iw_id_table_alloc 3 48163 NULL
+ocfs2_find_next_zero_bit_unaligned_48170 ocfs2_find_next_zero_bit_unaligned 2-3 48170 NULL nohasharray
@@ -108093,8 +108887,8 @@ index 0000000..3a5b4b5
+vc_do_resize_48842 vc_do_resize 4-3 48842 NULL
+comedi_buf_write_alloc_48846 comedi_buf_write_alloc 0-2 48846 NULL
+suspend_dtim_interval_write_48854 suspend_dtim_interval_write 3 48854 NULL
-+C_SYSC_pwritev64_48864 C_SYSC_pwritev64 3 48864 NULL nohasharray
-+viafb_dvp1_proc_write_48864 viafb_dvp1_proc_write 3 48864 &C_SYSC_pwritev64_48864
++viafb_dvp1_proc_write_48864 viafb_dvp1_proc_write 3 48864 NULL nohasharray
++C_SYSC_pwritev64_48864 C_SYSC_pwritev64 3 48864 &viafb_dvp1_proc_write_48864
+__ffs_ep0_read_events_48868 __ffs_ep0_read_events 3 48868 NULL
+ext2_alloc_branch_48889 ext2_alloc_branch 4 48889 NULL
+crypto_cipher_ctxsize_48890 crypto_cipher_ctxsize 0 48890 NULL
@@ -108349,8 +109143,8 @@ index 0000000..3a5b4b5
+dpcm_show_state_50827 dpcm_show_state 0 50827 NULL
+acpi_ev_install_gpe_block_50829 acpi_ev_install_gpe_block 2 50829 NULL
+SetArea_50835 SetArea 4 50835 NULL nohasharray
-+create_mem_extents_50835 create_mem_extents 0 50835 &SetArea_50835 nohasharray
-+mask_from_50835 mask_from 0-1-2 50835 &create_mem_extents_50835
++mask_from_50835 mask_from 0-1-2 50835 &SetArea_50835 nohasharray
++create_mem_extents_50835 create_mem_extents 0 50835 &mask_from_50835
+videobuf_dma_init_user_50839 videobuf_dma_init_user 3-4 50839 NULL
+btrfs_search_slot_for_read_50843 btrfs_search_slot_for_read 0 50843 NULL
+self_check_write_50856 self_check_write 0-5 50856 NULL
@@ -108441,8 +109235,8 @@ index 0000000..3a5b4b5
+load_pdptrs_51541 load_pdptrs 3 51541 NULL
+__alloc_eip_netdev_51549 __alloc_eip_netdev 1 51549 NULL
+ixgb_get_eeprom_len_51586 ixgb_get_eeprom_len 0 51586 NULL
-+get_cur_path_51589 get_cur_path 0 51589 NULL nohasharray
-+snd_interval_refine_first_51589 snd_interval_refine_first 0 51589 &get_cur_path_51589
++snd_interval_refine_first_51589 snd_interval_refine_first 0 51589 NULL nohasharray
++get_cur_path_51589 get_cur_path 0 51589 &snd_interval_refine_first_51589
+aac_convert_sgraw2_51598 aac_convert_sgraw2 4 51598 NULL
+table_size_to_number_of_entries_51613 table_size_to_number_of_entries 0-1 51613 NULL
+extent_fiemap_51621 extent_fiemap 3 51621 NULL
@@ -108478,8 +109272,8 @@ index 0000000..3a5b4b5
+get_indirect_ea_51869 get_indirect_ea 4 51869 NULL
+user_read_51881 user_read 3 51881 NULL
+dbAdjCtl_51888 dbAdjCtl 0 51888 NULL
-+SyS_mq_timedsend_51896 SyS_mq_timedsend 3 51896 NULL nohasharray
-+virt_to_phys_51896 virt_to_phys 0 51896 &SyS_mq_timedsend_51896
++virt_to_phys_51896 virt_to_phys 0 51896 NULL nohasharray
++SyS_mq_timedsend_51896 SyS_mq_timedsend 3 51896 &virt_to_phys_51896
+commit_fs_roots_51898 commit_fs_roots 0 51898 NULL
+wmi_set_ie_51919 wmi_set_ie 3 51919 NULL
+dbg_status_buf_51930 dbg_status_buf 2 51930 NULL
@@ -108689,8 +109483,8 @@ index 0000000..3a5b4b5
+nr_sendmsg_53656 nr_sendmsg 4 53656 NULL
+fuse_fill_write_pages_53682 fuse_fill_write_pages 0-4 53682 NULL
+v4l2_event_subscribe_53687 v4l2_event_subscribe 3 53687 NULL
-+bdev_logical_block_size_53690 bdev_logical_block_size 0 53690 NULL nohasharray
-+igb_alloc_q_vector_53690 igb_alloc_q_vector 6-4 53690 &bdev_logical_block_size_53690
++igb_alloc_q_vector_53690 igb_alloc_q_vector 6-4 53690 NULL nohasharray
++bdev_logical_block_size_53690 bdev_logical_block_size 0 53690 &igb_alloc_q_vector_53690
+find_overflow_devnum_53711 find_overflow_devnum 0 53711 NULL
+bio_integrity_split_53714 bio_integrity_split 3 53714 NULL
+__ocfs2_resv_find_window_53721 __ocfs2_resv_find_window 3 53721 NULL
@@ -108891,8 +109685,8 @@ index 0000000..3a5b4b5
+lov_get_stripecnt_55297 lov_get_stripecnt 0-3 55297 NULL
+gsm_control_modem_55303 gsm_control_modem 3 55303 NULL
+wimax_msg_len_55304 wimax_msg_len 0 55304 NULL
-+__get_vm_area_node_55305 __get_vm_area_node 6 55305 NULL nohasharray
-+qp_alloc_guest_work_55305 qp_alloc_guest_work 5-3 55305 &__get_vm_area_node_55305
++qp_alloc_guest_work_55305 qp_alloc_guest_work 5-3 55305 NULL nohasharray
++__get_vm_area_node_55305 __get_vm_area_node 6 55305 &qp_alloc_guest_work_55305
+__vxge_hw_vpath_initialize_55328 __vxge_hw_vpath_initialize 2 55328 NULL
+do_shmat_55336 do_shmat 5 55336 NULL
+vme_user_read_55338 vme_user_read 3 55338 NULL
@@ -109075,8 +109869,8 @@ index 0000000..3a5b4b5
+__bitmap_clear_bits_56912 __bitmap_clear_bits 3 56912 NULL
+strcspn_56913 strcspn 0 56913 NULL
+__kfifo_out_56927 __kfifo_out 0-3 56927 NULL
-+CopyBufferToControlPacket_56933 CopyBufferToControlPacket 0 56933 NULL nohasharray
-+journal_init_revoke_56933 journal_init_revoke 2 56933 &CopyBufferToControlPacket_56933
++journal_init_revoke_56933 journal_init_revoke 2 56933 NULL nohasharray
++CopyBufferToControlPacket_56933 CopyBufferToControlPacket 0 56933 &journal_init_revoke_56933
+nouveau_xtensa_create__56952 nouveau_xtensa_create_ 8 56952 NULL
+diva_get_driver_info_56967 diva_get_driver_info 0 56967 NULL
+nouveau_device_create__56984 nouveau_device_create_ 6 56984 NULL
@@ -109192,8 +109986,8 @@ index 0000000..3a5b4b5
+kiblnd_create_tx_pool_57846 kiblnd_create_tx_pool 2 57846 NULL
+process_all_new_xattrs_57881 process_all_new_xattrs 0 57881 NULL
+xt_alloc_table_info_57903 xt_alloc_table_info 1 57903 NULL
-+iio_read_first_n_kfifo_57910 iio_read_first_n_kfifo 2 57910 NULL nohasharray
-+atomic_add_return_unchecked_57910 atomic_add_return_unchecked 0-1 57910 &iio_read_first_n_kfifo_57910
++atomic_add_return_unchecked_57910 atomic_add_return_unchecked 0-1 57910 NULL nohasharray
++iio_read_first_n_kfifo_57910 iio_read_first_n_kfifo 2 57910 &atomic_add_return_unchecked_57910
+memcg_caches_array_size_57918 memcg_caches_array_size 0-1 57918 NULL
+twl_i2c_write_57923 twl_i2c_write 3-4 57923 NULL
+__snd_gf1_look16_57925 __snd_gf1_look16 0 57925 NULL
@@ -109432,8 +110226,8 @@ index 0000000..3a5b4b5
+xlog_bread_offset_60030 xlog_bread_offset 3 60030 NULL
+bio_integrity_hw_sectors_60039 bio_integrity_hw_sectors 0-2 60039 NULL
+do_ip6t_set_ctl_60040 do_ip6t_set_ctl 4 60040 NULL
-+vcs_size_60050 vcs_size 0 60050 NULL nohasharray
-+pin_2_irq_60050 pin_2_irq 0-3 60050 &vcs_size_60050
++pin_2_irq_60050 pin_2_irq 0-3 60050 NULL nohasharray
++vcs_size_60050 vcs_size 0 60050 &pin_2_irq_60050
+gru_alloc_gts_60056 gru_alloc_gts 3-2 60056 NULL
+open_cur_inode_file_60057 open_cur_inode_file 0 60057 NULL
+compat_writev_60063 compat_writev 3 60063 NULL
@@ -109662,6 +110456,7 @@ index 0000000..3a5b4b5
+ipath_user_sdma_pin_pages_62100 ipath_user_sdma_pin_pages 3-5-4 62100 NULL
+jffs2_security_setxattr_62107 jffs2_security_setxattr 4 62107 NULL
+btrfs_direct_IO_62114 btrfs_direct_IO 4 62114 NULL
++ip_recv_error_62117 ip_recv_error 3 62117 NULL
+generic_block_fiemap_62122 generic_block_fiemap 4 62122 NULL
+llc_ui_header_len_62131 llc_ui_header_len 0 62131 NULL
+qib_diag_write_62133 qib_diag_write 3 62133 NULL nohasharray
@@ -110055,10 +110850,10 @@ index 0000000..3a5b4b5
+nvme_trans_standard_inquiry_page_65526 nvme_trans_standard_inquiry_page 4 65526 NULL
diff --git a/tools/gcc/size_overflow_plugin.c b/tools/gcc/size_overflow_plugin.c
new file mode 100644
-index 0000000..a3f9702
+index 0000000..5515dcb
--- /dev/null
+++ b/tools/gcc/size_overflow_plugin.c
-@@ -0,0 +1,3870 @@
+@@ -0,0 +1,3927 @@
+/*
+ * Copyright 2011, 2012, 2013 by Emese Revfy <re.emese@gmail.com>
+ * Licensed under the GPL v2, or (at your option) v3
@@ -110119,6 +110914,10 @@ index 0000000..a3f9702
+#define MIN_CHECK true
+#define MAX_CHECK false
+
++#define TURN_OFF_ASM_STR "# size_overflow MARK_TURN_OFF\n\t"
++#define YES_ASM_STR "# size_overflow MARK_YES\n\t"
++#define OK_ASM_STR "# size_overflow\n\t"
++
+#if BUILDING_GCC_VERSION == 4005
+#define DECL_CHAIN(NODE) (TREE_CHAIN(DECL_MINIMAL_CHECK(NODE)))
+#endif
@@ -110184,7 +110983,7 @@ index 0000000..a3f9702
+static tree dup_assign(struct pointer_set_t *visited, gimple oldstmt, const_tree node, tree rhs1, tree rhs2, tree __unused rhs3);
+
+static struct plugin_info size_overflow_plugin_info = {
-+ .version = "20131203beta",
++ .version = "20131214beta",
+ .help = "no-size-overflow\tturn off size overflow checking\n",
+};
+
@@ -111398,11 +112197,16 @@ index 0000000..a3f9702
+
+ cast_rhs_type = TREE_TYPE(cast_rhs);
+ type_max_type = TREE_TYPE(type_max);
-+ type_min_type = TREE_TYPE(type_min);
+ gcc_assert(types_compatible_p(cast_rhs_type, type_max_type));
-+ gcc_assert(types_compatible_p(type_max_type, type_min_type));
+
+ insert_check_size_overflow(caller_node, stmt, GT_EXPR, cast_rhs, type_max, before, MAX_CHECK);
++
++ // special case: get_size_overflow_type(), 32, u64->s
++ if (LONG_TYPE_SIZE == GET_MODE_BITSIZE(SImode) && TYPE_UNSIGNED(size_overflow_type) && !TYPE_UNSIGNED(rhs_type))
++ return;
++
++ type_min_type = TREE_TYPE(type_min);
++ gcc_assert(types_compatible_p(type_max_type, type_min_type));
+ insert_check_size_overflow(caller_node, stmt, LT_EXPR, cast_rhs, type_min, before, MIN_CHECK);
+}
+
@@ -111670,7 +112474,7 @@ index 0000000..a3f9702
+ break;
+ case DImode:
+ if (LONG_TYPE_SIZE == GET_MODE_BITSIZE(SImode))
-+ new_type = intDI_type_node;
++ new_type = TYPE_UNSIGNED(type) ? unsigned_intDI_type_node : intDI_type_node;
+ else
+ new_type = intTI_type_node;
+ break;
@@ -112252,36 +113056,43 @@ index 0000000..a3f9702
+ return false;
+}
+
++static const char *get_asm_string(const_gimple stmt)
++{
++ if (!stmt)
++ return NULL;
++ if (gimple_code(stmt) != GIMPLE_ASM)
++ return NULL;
++
++ return gimple_asm_string(stmt);
++}
++
+static bool is_size_overflow_intentional_asm_turn_off(const_gimple stmt)
+{
+ const char *str;
+
-+ if (!stmt)
++ str = get_asm_string(stmt);
++ if (!str)
+ return false;
-+
-+ str = gimple_asm_string(stmt);
-+ return !strcmp(str, "# size_overflow MARK_TURN_OFF\n\t");
++ return !strcmp(str, TURN_OFF_ASM_STR);
+}
+
+static bool is_size_overflow_intentional_asm_yes(const_gimple stmt)
+{
+ const char *str;
+
-+ if (!stmt)
++ str = get_asm_string(stmt);
++ if (!str)
+ return false;
-+
-+ str = gimple_asm_string(stmt);
-+ return !strcmp(str, "# size_overflow MARK_YES\n\t");
++ return !strcmp(str, YES_ASM_STR);
+}
+
+static bool is_size_overflow_asm(const_gimple stmt)
+{
+ const char *str;
+
-+ if (!stmt)
++ str = get_asm_string(stmt);
++ if (!str)
+ return false;
-+
-+ str = gimple_asm_string(stmt);
+ return !strncmp(str, "# size_overflow", 15);
+}
+
@@ -112370,8 +113181,6 @@ index 0000000..a3f9702
+ */
+static bool is_intentional_attribute_from_gimple(struct interesting_node *cur_node)
+{
-+ const_tree input, output;
-+
+ if (!cur_node->intentional_mark_from_gimple)
+ return false;
+
@@ -112383,10 +113192,6 @@ index 0000000..a3f9702
+ // skip param decls
+ if (gimple_asm_noutputs(cur_node->intentional_mark_from_gimple) == 0)
+ return true;
-+ input = gimple_asm_input_op(cur_node->intentional_mark_from_gimple, 0);
-+ output = gimple_asm_output_op(cur_node->intentional_mark_from_gimple, 0);
-+
-+ replace_size_overflow_asm_with_assign(cur_node->intentional_mark_from_gimple, TREE_VALUE(output), TREE_VALUE(input));
+ return true;
+}
+
@@ -112399,6 +113204,9 @@ index 0000000..a3f9702
+{
+ const_tree fndecl;
+
++ if (is_intentional_attribute_from_gimple(cur_node))
++ return;
++
+ if (is_turn_off_intentional_attr(DECL_ORIGIN(current_function_decl))) {
+ cur_node->intentional_attr_cur_fndecl = MARK_TURN_OFF;
+ return;
@@ -112423,9 +113231,6 @@ index 0000000..a3f9702
+ else if (is_yes_intentional_attr(fndecl, cur_node->num))
+ cur_node->intentional_attr_decl = MARK_YES;
+
-+ if (is_intentional_attribute_from_gimple(cur_node))
-+ return;
-+
+ cur_node->intentional_attr_cur_fndecl = search_last_nodes_intentional(cur_node);
+ print_missing_intentional(cur_node->intentional_attr_decl, cur_node->intentional_attr_cur_fndecl, cur_node->fndecl, cur_node->num);
+}
@@ -112511,13 +113316,8 @@ index 0000000..a3f9702
+// a size_overflow asm stmt in the control flow doesn't stop the recursion
+static void handle_asm_stmt(struct pointer_set_t *visited, struct interesting_node *cur_node, tree lhs, const_gimple stmt)
+{
-+ const_tree asm_lhs;
-+
+ if (!is_size_overflow_asm(stmt))
-+ return walk_use_def(visited, cur_node, SSA_NAME_VAR(lhs));
-+
-+ asm_lhs = gimple_asm_input_op(stmt, 0);
-+ walk_use_def(visited, cur_node, TREE_VALUE(asm_lhs));
++ walk_use_def(visited, cur_node, SSA_NAME_VAR(lhs));
+}
+
+/* collect the parm_decls and fndecls (for checking a missing size_overflow attribute (ret or arg) or intentional_overflow)
@@ -112578,39 +113378,58 @@ index 0000000..a3f9702
+ pointer_set_destroy(visited);
+}
+
-+/* This function calls the main recursion function (expand) that duplicates the stmts. Before that it checks the intentional_overflow attribute and asm stmts,
-+ * it decides whether the duplication is necessary or not and it searches for missing size_overflow attributes. After expand() it changes the orig node to the duplicated node
-+ * in the original stmt (first stmt) and it inserts the overflow check for the arg of the callee or for the return value.
-+ * If there is a mark_turn_off intentional attribute on the caller or the callee then there is no duplication and missing size_overflow attribute check anywhere.
++enum precond {
++ NO_ATTRIBUTE_SEARCH, NO_CHECK_INSERT, NONE
++};
++
++/* If there is a mark_turn_off intentional attribute on the caller or the callee then there is no duplication and missing size_overflow attribute check anywhere.
+ * There is only missing size_overflow attribute checking if the intentional_overflow attribute is the mark_no type.
+ * Stmt duplication is unnecessary if there are no binary/ternary assignements or if the unary assignment isn't a cast.
+ * It skips the possible error codes too. If the def_stmts trace back to a constant and there are no binary/ternary assigments then we assume that it is some kind of error code.
+ */
-+static struct next_cgraph_node *handle_interesting_stmt(struct next_cgraph_node *cnodes, struct interesting_node *cur_node, struct cgraph_node *caller_node)
++static enum precond check_preconditions(struct interesting_node *cur_node)
+{
-+ struct pointer_set_t *visited;
+ bool interesting_conditions[3] = {false, false, false};
-+ tree new_node, orig_node = cur_node->node;
+
+ set_last_nodes(cur_node);
+
+ check_intentional_attribute_ipa(cur_node);
+ if (cur_node->intentional_attr_decl == MARK_TURN_OFF || cur_node->intentional_attr_cur_fndecl == MARK_TURN_OFF)
-+ return cnodes;
++ return NO_ATTRIBUTE_SEARCH;
+
+ search_interesting_conditions(cur_node, interesting_conditions);
+
+ // error code
+ if (interesting_conditions[CAST] && interesting_conditions[FROM_CONST] && !interesting_conditions[NOT_UNARY])
-+ return cnodes;
++ return NO_ATTRIBUTE_SEARCH;
+
-+ cnodes = search_overflow_attribute(cnodes, cur_node);
++ // unnecessary overflow check
++ if (!interesting_conditions[CAST] && !interesting_conditions[NOT_UNARY])
++ return NO_CHECK_INSERT;
+
+ if (cur_node->intentional_attr_cur_fndecl != MARK_NO)
++ return NO_CHECK_INSERT;
++
++ return NONE;
++}
++
++/* This function calls the main recursion function (expand) that duplicates the stmts. Before that it checks the intentional_overflow attribute and asm stmts,
++ * it decides whether the duplication is necessary or not and it searches for missing size_overflow attributes. After expand() it changes the orig node to the duplicated node
++ * in the original stmt (first stmt) and it inserts the overflow check for the arg of the callee or for the return value.
++ */
++static struct next_cgraph_node *handle_interesting_stmt(struct next_cgraph_node *cnodes, struct interesting_node *cur_node, struct cgraph_node *caller_node)
++{
++ enum precond ret;
++ struct pointer_set_t *visited;
++ tree new_node, orig_node = cur_node->node;
++
++ ret = check_preconditions(cur_node);
++ if (ret == NO_ATTRIBUTE_SEARCH)
+ return cnodes;
+
-+ // unnecessary overflow check
-+ if (!interesting_conditions[CAST] && !interesting_conditions[NOT_UNARY])
++ cnodes = search_overflow_attribute(cnodes, cur_node);
++
++ if (ret == NO_CHECK_INSERT)
+ return cnodes;
+
+ visited = pointer_set_create();
@@ -112822,9 +113641,6 @@ index 0000000..a3f9702
+ imm_use_iterator imm_iter;
+ unsigned int argnum;
+
-+ if (is_size_overflow_intentional_asm_turn_off(intentional_asm))
-+ return head;
-+
+ gcc_assert(TREE_CODE(node) == SSA_NAME);
+
+ if (pointer_set_insert(visited, node))
@@ -112879,8 +113695,6 @@ index 0000000..a3f9702
+ gimple_stmt_iterator gsi;
+ tree input, output;
+
-+ if (gimple_code(stmt) != GIMPLE_ASM)
-+ return;
+ if (!is_size_overflow_asm(stmt))
+ return;
+
@@ -112913,13 +113727,19 @@ index 0000000..a3f9702
+
+ gcc_assert(gimple_asm_ninputs(stmt) == 1);
+
++ if (gimple_asm_noutputs(stmt) == 0 && is_size_overflow_intentional_asm_turn_off(stmt))
++ return head;
++
+ if (gimple_asm_noutputs(stmt) == 0) {
-+ const_tree input = gimple_asm_input_op(stmt, 0);
++ const_tree input;
++
++ if (!is_size_overflow_intentional_asm_turn_off(stmt))
++ return head;
+
++ input = gimple_asm_input_op(stmt, 0);
+ remove_size_overflow_asm(stmt);
+ if (is_gimple_constant(TREE_VALUE(input)))
+ return head;
-+
+ visited = pointer_set_create();
+ head = get_interesting_ret_or_call(visited, head, TREE_VALUE(input), intentional_asm);
+ pointer_set_destroy(visited);
@@ -113326,6 +114146,9 @@ index 0000000..a3f9702
+ case GIMPLE_NOP:
+ return search_intentional(visited, SSA_NAME_VAR(lhs));
+ case GIMPLE_ASM:
++ if (is_size_overflow_intentional_asm_turn_off(def_stmt))
++ return MARK_TURN_OFF;
++ return MARK_NO;
+ case GIMPLE_CALL:
+ return MARK_NO;
+ case GIMPLE_PHI:
@@ -113347,10 +114170,9 @@ index 0000000..a3f9702
+}
+
+// Check the intentional_overflow attribute and create the asm comment string for the size_overflow asm stmt.
-+static const char *check_intentional_attribute_gimple(const_tree arg, const_gimple stmt, unsigned int argnum)
++static enum mark check_intentional_attribute_gimple(const_tree arg, const_gimple stmt, unsigned int argnum)
+{
+ const_tree fndecl;
-+ const char *asm_str;
+ struct pointer_set_t *visited;
+ enum mark cur_fndecl_attr, decl_attr = MARK_NO;
+
@@ -113360,7 +114182,7 @@ index 0000000..a3f9702
+ else if (is_yes_intentional_attr(fndecl, argnum))
+ decl_attr = MARK_YES;
+ else if (is_turn_off_intentional_attr(fndecl) || is_turn_off_intentional_attr(DECL_ORIGIN(current_function_decl))) {
-+ return "# size_overflow MARK_TURN_OFF\n\t";
++ return MARK_TURN_OFF;
+ }
+
+ visited = pointer_set_create();
@@ -113369,18 +114191,13 @@ index 0000000..a3f9702
+
+ switch (cur_fndecl_attr) {
+ case MARK_NO:
-+ asm_str = "# size_overflow\n\t";
-+ break;
++ return MARK_NO;
+ case MARK_TURN_OFF:
-+ asm_str = "# size_overflow MARK_TURN_OFF\n\t";
-+ break;
++ return MARK_TURN_OFF;
+ default:
-+ asm_str = "# size_overflow MARK_YES\n\t";
+ print_missing_intentional(decl_attr, cur_fndecl_attr, fndecl, argnum);
-+ break;
++ return MARK_YES;
+ }
-+
-+ return asm_str;
+}
+
+static void check_missing_size_overflow_attribute(tree var)
@@ -113516,6 +114333,21 @@ index 0000000..a3f9702
+ update_stmt(stmt);
+}
+
++static const char *convert_mark_to_str(enum mark mark)
++{
++ switch (mark) {
++ case MARK_NO:
++ return OK_ASM_STR;
++ case MARK_YES:
++ case MARK_NOT_INTENTIONAL:
++ return YES_ASM_STR;
++ case MARK_TURN_OFF:
++ return TURN_OFF_ASM_STR;
++ }
++
++ gcc_unreachable();
++}
++
+/* Create the input of the size_overflow asm stmt.
+ * When the arg of the callee function is a parm_decl it creates this kind of size_overflow asm stmt:
+ * __asm__("# size_overflow MARK_YES" : : "rm" size_1(D));
@@ -113529,6 +114361,8 @@ index 0000000..a3f9702
+ return;
+ }
+
++ gcc_assert(!is_size_overflow_intentional_asm_turn_off(asm_data->def_stmt));
++
+ asm_data->input = create_new_var(TREE_TYPE(asm_data->output));
+ asm_data->input = make_ssa_name(asm_data->input, asm_data->def_stmt);
+
@@ -113541,7 +114375,11 @@ index 0000000..a3f9702
+ create_output_from_phi(stmt, argnum, asm_data);
+ break;
+ case GIMPLE_NOP: {
-+ const char *str = check_intentional_attribute_gimple(asm_data->output, stmt, argnum);
++ enum mark mark;
++ const char *str;
++
++ mark = check_intentional_attribute_gimple(asm_data->output, stmt, argnum);
++ str = convert_mark_to_str(mark);
+
+ asm_data->input = asm_data->output;
+ asm_data->output = NULL;
@@ -113571,19 +114409,24 @@ index 0000000..a3f9702
+{
+ struct asm_data asm_data;
+ const char *str;
++ enum mark mark;
+
+ if (is_gimple_constant(output_node))
+ return;
+
++ asm_data.output = output_node;
++ mark = check_intentional_attribute_gimple(asm_data.output, stmt, argnum);
++ if (mark == MARK_TURN_OFF)
++ return;
++
+ search_missing_size_overflow_attribute_gimple(stmt, argnum);
+
-+ asm_data.output = output_node;
+ asm_data.def_stmt = get_def_stmt(asm_data.output);
+ create_asm_input(stmt, argnum, &asm_data);
+ if (asm_data.input == NULL_TREE)
+ return;
+
-+ str = check_intentional_attribute_gimple(asm_data.output, stmt, argnum);
++ str = convert_mark_to_str(mark);
+ create_asm_stmt(str, build_string(1, "0"), build_string(3, "=rm"), &asm_data);
+}
+
@@ -113680,16 +114523,22 @@ index 0000000..a3f9702
+ if (mark != MARK_TURN_OFF)
+ return false;
+
-+ asm_data.input = gimple_call_lhs(stmt);
-+ if (asm_data.input == NULL_TREE) {
++ asm_data.def_stmt = stmt;
++ asm_data.output = gimple_call_lhs(stmt);
++
++ if (asm_data.output == NULL_TREE) {
+ asm_data.input = gimple_call_arg(stmt, 0);
+ if (is_gimple_constant(asm_data.input))
+ return false;
++ asm_data.output = NULL;
++ create_asm_stmt(TURN_OFF_ASM_STR, build_string(2, "rm"), NULL, &asm_data);
++ return true;
+ }
+
-+ asm_data.output = NULL;
-+ asm_data.def_stmt = stmt;
-+ create_asm_stmt("# size_overflow MARK_TURN_OFF\n\t", build_string(2, "rm"), NULL, &asm_data);
++ create_asm_input(stmt, 0, &asm_data);
++ gcc_assert(asm_data.input != NULL_TREE);
++
++ create_asm_stmt(TURN_OFF_ASM_STR, build_string(1, "0"), build_string(3, "=rm"), &asm_data);
+ return true;
+}
+
@@ -113739,6 +114588,9 @@ index 0000000..a3f9702
+ for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) {
+ gimple stmt = gsi_stmt(gsi);
+
++ if (is_size_overflow_asm(stmt))
++ continue;
++
+ if (is_gimple_call(stmt))
+ handle_interesting_function(stmt);
+ else if (gimple_code(stmt) == GIMPLE_RETURN)
@@ -114588,7 +115440,7 @@ index 96b919d..c49bb74 100644
+
#endif
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
-index 1cf9ccb..b9236e2 100644
+index 1cf9ccb..4a8abb5 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -75,12 +75,17 @@ LIST_HEAD(vm_list);
@@ -114629,7 +115481,17 @@ index 1cf9ccb..b9236e2 100644
.release = kvm_vcpu_release,
.unlocked_ioctl = kvm_vcpu_ioctl,
#ifdef CONFIG_COMPAT
-@@ -2550,7 +2555,7 @@ static int kvm_vm_mmap(struct file *file, struct vm_area_struct *vma)
+@@ -1893,6 +1898,9 @@ static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, u32 id)
+ int r;
+ struct kvm_vcpu *vcpu, *v;
+
++ if (id >= KVM_MAX_VCPUS)
++ return -EINVAL;
++
+ vcpu = kvm_arch_vcpu_create(kvm, id);
+ if (IS_ERR(vcpu))
+ return PTR_ERR(vcpu);
+@@ -2550,7 +2558,7 @@ static int kvm_vm_mmap(struct file *file, struct vm_area_struct *vma)
return 0;
}
@@ -114638,7 +115500,7 @@ index 1cf9ccb..b9236e2 100644
.release = kvm_vm_release,
.unlocked_ioctl = kvm_vm_ioctl,
#ifdef CONFIG_COMPAT
-@@ -2651,7 +2656,7 @@ out:
+@@ -2651,7 +2659,7 @@ out:
return r;
}
@@ -114647,7 +115509,7 @@ index 1cf9ccb..b9236e2 100644
.unlocked_ioctl = kvm_dev_ioctl,
.compat_ioctl = kvm_dev_ioctl,
.llseek = noop_llseek,
-@@ -2677,7 +2682,7 @@ static void hardware_enable_nolock(void *junk)
+@@ -2677,7 +2685,7 @@ static void hardware_enable_nolock(void *junk)
if (r) {
cpumask_clear_cpu(cpu, cpus_hardware_enabled);
@@ -114656,7 +115518,7 @@ index 1cf9ccb..b9236e2 100644
printk(KERN_INFO "kvm: enabling virtualization on "
"CPU%d failed\n", cpu);
}
-@@ -2731,10 +2736,10 @@ static int hardware_enable_all(void)
+@@ -2731,10 +2739,10 @@ static int hardware_enable_all(void)
kvm_usage_count++;
if (kvm_usage_count == 1) {
@@ -114669,7 +115531,7 @@ index 1cf9ccb..b9236e2 100644
hardware_disable_all_nolock();
r = -EBUSY;
}
-@@ -3168,7 +3173,7 @@ static void kvm_sched_out(struct preempt_notifier *pn,
+@@ -3168,7 +3176,7 @@ static void kvm_sched_out(struct preempt_notifier *pn,
kvm_arch_vcpu_put(vcpu);
}
@@ -114678,7 +115540,7 @@ index 1cf9ccb..b9236e2 100644
struct module *module)
{
int r;
-@@ -3215,7 +3220,7 @@ int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
+@@ -3215,7 +3223,7 @@ int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
if (!vcpu_align)
vcpu_align = __alignof__(struct kvm_vcpu);
kvm_vcpu_cache = kmem_cache_create("kvm_vcpu", vcpu_size, vcpu_align,
@@ -114687,7 +115549,7 @@ index 1cf9ccb..b9236e2 100644
if (!kvm_vcpu_cache) {
r = -ENOMEM;
goto out_free_3;
-@@ -3225,9 +3230,11 @@ int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
+@@ -3225,9 +3233,11 @@ int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
if (r)
goto out_free;
@@ -114699,7 +115561,7 @@ index 1cf9ccb..b9236e2 100644
r = misc_register(&kvm_dev);
if (r) {
-@@ -3237,9 +3244,6 @@ int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
+@@ -3237,9 +3247,6 @@ int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
register_syscore_ops(&kvm_syscore_ops);