summaryrefslogtreecommitdiffstats
path: root/main/linux-grsec/patch-3.6.11-al4.patch
diff options
context:
space:
mode:
Diffstat (limited to 'main/linux-grsec/patch-3.6.11-al4.patch')
-rw-r--r--main/linux-grsec/patch-3.6.11-al4.patch16533
1 files changed, 16533 insertions, 0 deletions
diff --git a/main/linux-grsec/patch-3.6.11-al4.patch b/main/linux-grsec/patch-3.6.11-al4.patch
new file mode 100644
index 000000000..70802f681
--- /dev/null
+++ b/main/linux-grsec/patch-3.6.11-al4.patch
@@ -0,0 +1,16533 @@
+diff --git a/MAINTAINERS b/MAINTAINERS
+index 9a6c4da..61ed6f7 100644
+--- a/MAINTAINERS
++++ b/MAINTAINERS
+@@ -2697,7 +2697,7 @@ S: Maintained
+ F: drivers/net/ethernet/i825xx/eexpress.*
+
+ ETHERNET BRIDGE
+-M: Stephen Hemminger <shemminger@vyatta.com>
++M: Stephen Hemminger <stephen@networkplumber.org>
+ L: bridge@lists.linux-foundation.org
+ L: netdev@vger.kernel.org
+ W: http://www.linuxfoundation.org/en/Net:Bridge
+@@ -4420,7 +4420,7 @@ S: Maintained
+
+ MARVELL GIGABIT ETHERNET DRIVERS (skge/sky2)
+ M: Mirko Lindner <mlindner@marvell.com>
+-M: Stephen Hemminger <shemminger@vyatta.com>
++M: Stephen Hemminger <stephen@networkplumber.org>
+ L: netdev@vger.kernel.org
+ S: Maintained
+ F: drivers/net/ethernet/marvell/sk*
+@@ -4664,7 +4664,7 @@ S: Supported
+ F: drivers/infiniband/hw/nes/
+
+ NETEM NETWORK EMULATOR
+-M: Stephen Hemminger <shemminger@vyatta.com>
++M: Stephen Hemminger <stephen@networkplumber.org>
+ L: netem@lists.linux-foundation.org
+ S: Maintained
+ F: net/sched/sch_netem.c
+diff --git a/arch/arm/kernel/sched_clock.c b/arch/arm/kernel/sched_clock.c
+index f451539..63bc22c 100644
+--- a/arch/arm/kernel/sched_clock.c
++++ b/arch/arm/kernel/sched_clock.c
+@@ -89,11 +89,11 @@ static void notrace update_sched_clock(void)
+ * detectable in cyc_to_fixed_sched_clock().
+ */
+ raw_local_irq_save(flags);
+- cd.epoch_cyc = cyc;
++ cd.epoch_cyc_copy = cyc;
+ smp_wmb();
+ cd.epoch_ns = ns;
+ smp_wmb();
+- cd.epoch_cyc_copy = cyc;
++ cd.epoch_cyc = cyc;
+ raw_local_irq_restore(flags);
+ }
+
+diff --git a/arch/arm/kernel/swp_emulate.c b/arch/arm/kernel/swp_emulate.c
+index df74518..ab1017b 100644
+--- a/arch/arm/kernel/swp_emulate.c
++++ b/arch/arm/kernel/swp_emulate.c
+@@ -109,10 +109,12 @@ static void set_segfault(struct pt_regs *regs, unsigned long addr)
+ {
+ siginfo_t info;
+
++ down_read(&current->mm->mmap_sem);
+ if (find_vma(current->mm, addr) == NULL)
+ info.si_code = SEGV_MAPERR;
+ else
+ info.si_code = SEGV_ACCERR;
++ up_read(&current->mm->mmap_sem);
+
+ info.si_signo = SIGSEGV;
+ info.si_errno = 0;
+diff --git a/arch/arm/mach-at91/setup.c b/arch/arm/mach-at91/setup.c
+index bd0e88c..c2ff99c 100644
+--- a/arch/arm/mach-at91/setup.c
++++ b/arch/arm/mach-at91/setup.c
+@@ -104,6 +104,8 @@ static void __init soc_detect(u32 dbgu_base)
+ switch (socid) {
+ case ARCH_ID_AT91RM9200:
+ at91_soc_initdata.type = AT91_SOC_RM9200;
++ if (at91_soc_initdata.subtype == AT91_SOC_SUBTYPE_NONE)
++ at91_soc_initdata.subtype = AT91_SOC_RM9200_BGA;
+ at91_boot_soc = at91rm9200_soc;
+ break;
+
+diff --git a/arch/arm/mach-pxa/include/mach/mfp-pxa27x.h b/arch/arm/mach-pxa/include/mach/mfp-pxa27x.h
+index a611ad3..b6132aa 100644
+--- a/arch/arm/mach-pxa/include/mach/mfp-pxa27x.h
++++ b/arch/arm/mach-pxa/include/mach/mfp-pxa27x.h
+@@ -463,6 +463,9 @@
+ GPIO76_LCD_PCLK, \
+ GPIO77_LCD_BIAS
+
++/* these enable a work-around for a hw bug in pxa27x during ac97 warm reset */
++#define GPIO113_AC97_nRESET_GPIO_HIGH MFP_CFG_OUT(GPIO113, AF0, DEFAULT)
++#define GPIO95_AC97_nRESET_GPIO_HIGH MFP_CFG_OUT(GPIO95, AF0, DEFAULT)
+
+ extern int keypad_set_wake(unsigned int on);
+ #endif /* __ASM_ARCH_MFP_PXA27X_H */
+diff --git a/arch/arm/mach-pxa/include/mach/smemc.h b/arch/arm/mach-pxa/include/mach/smemc.h
+index b7de471..b802f28 100644
+--- a/arch/arm/mach-pxa/include/mach/smemc.h
++++ b/arch/arm/mach-pxa/include/mach/smemc.h
+@@ -37,6 +37,7 @@
+ #define CSADRCFG1 (SMEMC_VIRT + 0x84) /* Address Configuration Register for CS1 */
+ #define CSADRCFG2 (SMEMC_VIRT + 0x88) /* Address Configuration Register for CS2 */
+ #define CSADRCFG3 (SMEMC_VIRT + 0x8C) /* Address Configuration Register for CS3 */
++#define CSMSADRCFG (SMEMC_VIRT + 0xA0) /* Chip Select Configuration Register */
+
+ /*
+ * More handy macros for PCMCIA
+diff --git a/arch/arm/mach-pxa/pxa27x.c b/arch/arm/mach-pxa/pxa27x.c
+index 4726c24..a2fe795 100644
+--- a/arch/arm/mach-pxa/pxa27x.c
++++ b/arch/arm/mach-pxa/pxa27x.c
+@@ -47,9 +47,9 @@ void pxa27x_clear_otgph(void)
+ EXPORT_SYMBOL(pxa27x_clear_otgph);
+
+ static unsigned long ac97_reset_config[] = {
+- GPIO113_GPIO,
++ GPIO113_AC97_nRESET_GPIO_HIGH,
+ GPIO113_AC97_nRESET,
+- GPIO95_GPIO,
++ GPIO95_AC97_nRESET_GPIO_HIGH,
+ GPIO95_AC97_nRESET,
+ };
+
+diff --git a/arch/arm/mach-pxa/smemc.c b/arch/arm/mach-pxa/smemc.c
+index 7992305..f38aa89 100644
+--- a/arch/arm/mach-pxa/smemc.c
++++ b/arch/arm/mach-pxa/smemc.c
+@@ -40,6 +40,8 @@ static void pxa3xx_smemc_resume(void)
+ __raw_writel(csadrcfg[1], CSADRCFG1);
+ __raw_writel(csadrcfg[2], CSADRCFG2);
+ __raw_writel(csadrcfg[3], CSADRCFG3);
++ /* CSMSADRCFG wakes up in its default state (0), so we need to set it */
++ __raw_writel(0x2, CSMSADRCFG);
+ }
+
+ static struct syscore_ops smemc_syscore_ops = {
+@@ -49,8 +51,19 @@ static struct syscore_ops smemc_syscore_ops = {
+
+ static int __init smemc_init(void)
+ {
+- if (cpu_is_pxa3xx())
++ if (cpu_is_pxa3xx()) {
++ /*
++ * The only documentation we have on the
++ * Chip Select Configuration Register (CSMSADRCFG) is that
++ * it must be programmed to 0x2.
++ * Moreover, in the bit definitions, the second bit
++ * (CSMSADRCFG[1]) is called "SETALWAYS".
++ * Other bits are reserved in this register.
++ */
++ __raw_writel(0x2, CSMSADRCFG);
++
+ register_syscore_ops(&smemc_syscore_ops);
++ }
+
+ return 0;
+ }
+diff --git a/arch/arm/mach-realview/include/mach/board-eb.h b/arch/arm/mach-realview/include/mach/board-eb.h
+index 124bce6..a301e61 100644
+--- a/arch/arm/mach-realview/include/mach/board-eb.h
++++ b/arch/arm/mach-realview/include/mach/board-eb.h
+@@ -47,7 +47,7 @@
+ #define REALVIEW_EB_USB_BASE 0x4F000000 /* USB */
+
+ #ifdef CONFIG_REALVIEW_EB_ARM11MP_REVB
+-#define REALVIEW_EB11MP_PRIV_MEM_BASE 0x1F000000
++#define REALVIEW_EB11MP_PRIV_MEM_BASE 0x10100000
+ #define REALVIEW_EB11MP_L220_BASE 0x10102000 /* L220 registers */
+ #define REALVIEW_EB11MP_SYS_PLD_CTRL1 0xD8 /* Register offset for MPCore sysctl */
+ #else
+diff --git a/arch/arm/mach-s3c24xx/include/mach/debug-macro.S b/arch/arm/mach-s3c24xx/include/mach/debug-macro.S
+index 4135de8..13ed33c 100644
+--- a/arch/arm/mach-s3c24xx/include/mach/debug-macro.S
++++ b/arch/arm/mach-s3c24xx/include/mach/debug-macro.S
+@@ -40,17 +40,17 @@
+ addeq \rd, \rx, #(S3C24XX_PA_GPIO - S3C24XX_PA_UART)
+ addne \rd, \rx, #(S3C24XX_VA_GPIO - S3C24XX_VA_UART)
+ bic \rd, \rd, #0xff000
+- ldr \rd, [ \rd, # S3C2410_GSTATUS1 - S3C2410_GPIOREG(0) ]
++ ldr \rd, [\rd, # S3C2410_GSTATUS1 - S3C2410_GPIOREG(0)]
+ and \rd, \rd, #0x00ff0000
+ teq \rd, #0x00440000 @ is it 2440?
+ 1004:
+- ldr \rd, [ \rx, # S3C2410_UFSTAT ]
++ ldr \rd, [\rx, # S3C2410_UFSTAT]
+ moveq \rd, \rd, lsr #SHIFT_2440TXF
+ tst \rd, #S3C2410_UFSTAT_TXFULL
+ .endm
+
+ .macro fifo_full_s3c2410 rd, rx
+- ldr \rd, [ \rx, # S3C2410_UFSTAT ]
++ ldr \rd, [\rx, # S3C2410_UFSTAT]
+ tst \rd, #S3C2410_UFSTAT_TXFULL
+ .endm
+
+@@ -68,18 +68,18 @@
+ addeq \rd, \rx, #(S3C24XX_PA_GPIO - S3C24XX_PA_UART)
+ addne \rd, \rx, #(S3C24XX_VA_GPIO - S3C24XX_VA_UART)
+ bic \rd, \rd, #0xff000
+- ldr \rd, [ \rd, # S3C2410_GSTATUS1 - S3C2410_GPIOREG(0) ]
++ ldr \rd, [\rd, # S3C2410_GSTATUS1 - S3C2410_GPIOREG(0)]
+ and \rd, \rd, #0x00ff0000
+ teq \rd, #0x00440000 @ is it 2440?
+
+ 10000:
+- ldr \rd, [ \rx, # S3C2410_UFSTAT ]
++ ldr \rd, [\rx, # S3C2410_UFSTAT]
+ andne \rd, \rd, #S3C2410_UFSTAT_TXMASK
+ andeq \rd, \rd, #S3C2440_UFSTAT_TXMASK
+ .endm
+
+ .macro fifo_level_s3c2410 rd, rx
+- ldr \rd, [ \rx, # S3C2410_UFSTAT ]
++ ldr \rd, [\rx, # S3C2410_UFSTAT]
+ and \rd, \rd, #S3C2410_UFSTAT_TXMASK
+ .endm
+
+diff --git a/arch/arm/mach-s3c24xx/include/mach/entry-macro.S b/arch/arm/mach-s3c24xx/include/mach/entry-macro.S
+index 7615a14..6a21bee 100644
+--- a/arch/arm/mach-s3c24xx/include/mach/entry-macro.S
++++ b/arch/arm/mach-s3c24xx/include/mach/entry-macro.S
+@@ -31,10 +31,10 @@
+
+ @@ try the interrupt offset register, since it is there
+
+- ldr \irqstat, [ \base, #INTPND ]
++ ldr \irqstat, [\base, #INTPND ]
+ teq \irqstat, #0
+ beq 1002f
+- ldr \irqnr, [ \base, #INTOFFSET ]
++ ldr \irqnr, [\base, #INTOFFSET ]
+ mov \tmp, #1
+ tst \irqstat, \tmp, lsl \irqnr
+ bne 1001f
+diff --git a/arch/arm/mach-s3c24xx/pm-h1940.S b/arch/arm/mach-s3c24xx/pm-h1940.S
+index c93bf2d..6183a68 100644
+--- a/arch/arm/mach-s3c24xx/pm-h1940.S
++++ b/arch/arm/mach-s3c24xx/pm-h1940.S
+@@ -30,4 +30,4 @@
+
+ h1940_pm_return:
+ mov r0, #S3C2410_PA_GPIO
+- ldr pc, [ r0, #S3C2410_GSTATUS3 - S3C24XX_VA_GPIO ]
++ ldr pc, [r0, #S3C2410_GSTATUS3 - S3C24XX_VA_GPIO]
+diff --git a/arch/arm/mach-s3c24xx/sleep-s3c2410.S b/arch/arm/mach-s3c24xx/sleep-s3c2410.S
+index dd5b638..65200ae 100644
+--- a/arch/arm/mach-s3c24xx/sleep-s3c2410.S
++++ b/arch/arm/mach-s3c24xx/sleep-s3c2410.S
+@@ -45,9 +45,9 @@ ENTRY(s3c2410_cpu_suspend)
+ ldr r4, =S3C2410_REFRESH
+ ldr r5, =S3C24XX_MISCCR
+ ldr r6, =S3C2410_CLKCON
+- ldr r7, [ r4 ] @ get REFRESH (and ensure in TLB)
+- ldr r8, [ r5 ] @ get MISCCR (and ensure in TLB)
+- ldr r9, [ r6 ] @ get CLKCON (and ensure in TLB)
++ ldr r7, [r4] @ get REFRESH (and ensure in TLB)
++ ldr r8, [r5] @ get MISCCR (and ensure in TLB)
++ ldr r9, [r6] @ get CLKCON (and ensure in TLB)
+
+ orr r7, r7, #S3C2410_REFRESH_SELF @ SDRAM sleep command
+ orr r8, r8, #S3C2410_MISCCR_SDSLEEP @ SDRAM power-down signals
+@@ -61,8 +61,8 @@ ENTRY(s3c2410_cpu_suspend)
+ @@ align next bit of code to cache line
+ .align 5
+ s3c2410_do_sleep:
+- streq r7, [ r4 ] @ SDRAM sleep command
+- streq r8, [ r5 ] @ SDRAM power-down config
+- streq r9, [ r6 ] @ CPU sleep
++ streq r7, [r4] @ SDRAM sleep command
++ streq r8, [r5] @ SDRAM power-down config
++ streq r9, [r6] @ CPU sleep
+ 1: beq 1b
+ mov pc, r14
+diff --git a/arch/arm/mach-s3c24xx/sleep-s3c2412.S b/arch/arm/mach-s3c24xx/sleep-s3c2412.S
+index c82418e..5adaceb 100644
+--- a/arch/arm/mach-s3c24xx/sleep-s3c2412.S
++++ b/arch/arm/mach-s3c24xx/sleep-s3c2412.S
+@@ -57,12 +57,12 @@ s3c2412_sleep_enter1:
+ * retry, as simply returning causes the system to lock.
+ */
+
+- ldrne r9, [ r1 ]
+- strne r9, [ r1 ]
+- ldrne r9, [ r2 ]
+- strne r9, [ r2 ]
+- ldrne r9, [ r3 ]
+- strne r9, [ r3 ]
++ ldrne r9, [r1]
++ strne r9, [r1]
++ ldrne r9, [r2]
++ strne r9, [r2]
++ ldrne r9, [r3]
++ strne r9, [r3]
+ bne s3c2412_sleep_enter1
+
+ mov pc, r14
+diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
+index 13f555d..357fc03 100644
+--- a/arch/arm/mm/dma-mapping.c
++++ b/arch/arm/mm/dma-mapping.c
+@@ -729,25 +729,27 @@ static void dma_cache_maint_page(struct page *page, unsigned long offset,
+ size_t size, enum dma_data_direction dir,
+ void (*op)(const void *, size_t, int))
+ {
++ unsigned long pfn;
++ size_t left = size;
++
++ pfn = page_to_pfn(page) + offset / PAGE_SIZE;
++ offset %= PAGE_SIZE;
++
+ /*
+ * A single sg entry may refer to multiple physically contiguous
+ * pages. But we still need to process highmem pages individually.
+ * If highmem is not configured then the bulk of this loop gets
+ * optimized out.
+ */
+- size_t left = size;
+ do {
+ size_t len = left;
+ void *vaddr;
+
++ page = pfn_to_page(pfn);
++
+ if (PageHighMem(page)) {
+- if (len + offset > PAGE_SIZE) {
+- if (offset >= PAGE_SIZE) {
+- page += offset / PAGE_SIZE;
+- offset %= PAGE_SIZE;
+- }
++ if (len + offset > PAGE_SIZE)
+ len = PAGE_SIZE - offset;
+- }
+ vaddr = kmap_high_get(page);
+ if (vaddr) {
+ vaddr += offset;
+@@ -764,7 +766,7 @@ static void dma_cache_maint_page(struct page *page, unsigned long offset,
+ op(vaddr, len, dir);
+ }
+ offset = 0;
+- page++;
++ pfn++;
+ left -= len;
+ } while (left);
+ }
+diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
+index c2fa21d..b68b531 100644
+--- a/arch/arm/mm/mmu.c
++++ b/arch/arm/mm/mmu.c
+@@ -498,7 +498,7 @@ static void __init build_mem_type_table(void)
+ #endif
+
+ for (i = 0; i < 16; i++) {
+- unsigned long v = pgprot_val(protection_map[i]);
++ pteval_t v = pgprot_val(protection_map[i]);
+ protection_map[i] = __pgprot(v | user_pgprot);
+ }
+
+diff --git a/arch/arm/plat-samsung/include/plat/debug-macro.S b/arch/arm/plat-samsung/include/plat/debug-macro.S
+index 207e275..f3a9cff 100644
+--- a/arch/arm/plat-samsung/include/plat/debug-macro.S
++++ b/arch/arm/plat-samsung/include/plat/debug-macro.S
+@@ -14,12 +14,12 @@
+ /* The S5PV210/S5PC110 implementations are as belows. */
+
+ .macro fifo_level_s5pv210 rd, rx
+- ldr \rd, [ \rx, # S3C2410_UFSTAT ]
++ ldr \rd, [\rx, # S3C2410_UFSTAT]
+ and \rd, \rd, #S5PV210_UFSTAT_TXMASK
+ .endm
+
+ .macro fifo_full_s5pv210 rd, rx
+- ldr \rd, [ \rx, # S3C2410_UFSTAT ]
++ ldr \rd, [\rx, # S3C2410_UFSTAT]
+ tst \rd, #S5PV210_UFSTAT_TXFULL
+ .endm
+
+@@ -27,7 +27,7 @@
+ * most widely re-used */
+
+ .macro fifo_level_s3c2440 rd, rx
+- ldr \rd, [ \rx, # S3C2410_UFSTAT ]
++ ldr \rd, [\rx, # S3C2410_UFSTAT]
+ and \rd, \rd, #S3C2440_UFSTAT_TXMASK
+ .endm
+
+@@ -36,7 +36,7 @@
+ #endif
+
+ .macro fifo_full_s3c2440 rd, rx
+- ldr \rd, [ \rx, # S3C2410_UFSTAT ]
++ ldr \rd, [\rx, # S3C2410_UFSTAT]
+ tst \rd, #S3C2440_UFSTAT_TXFULL
+ .endm
+
+@@ -45,11 +45,11 @@
+ #endif
+
+ .macro senduart,rd,rx
+- strb \rd, [\rx, # S3C2410_UTXH ]
++ strb \rd, [\rx, # S3C2410_UTXH]
+ .endm
+
+ .macro busyuart, rd, rx
+- ldr \rd, [ \rx, # S3C2410_UFCON ]
++ ldr \rd, [\rx, # S3C2410_UFCON]
+ tst \rd, #S3C2410_UFCON_FIFOMODE @ fifo enabled?
+ beq 1001f @
+ @ FIFO enabled...
+@@ -60,7 +60,7 @@
+
+ 1001:
+ @ busy waiting for non fifo
+- ldr \rd, [ \rx, # S3C2410_UTRSTAT ]
++ ldr \rd, [\rx, # S3C2410_UTRSTAT]
+ tst \rd, #S3C2410_UTRSTAT_TXFE
+ beq 1001b
+
+@@ -68,7 +68,7 @@
+ .endm
+
+ .macro waituart,rd,rx
+- ldr \rd, [ \rx, # S3C2410_UFCON ]
++ ldr \rd, [\rx, # S3C2410_UFCON]
+ tst \rd, #S3C2410_UFCON_FIFOMODE @ fifo enabled?
+ beq 1001f @
+ @ FIFO enabled...
+@@ -79,7 +79,7 @@
+ b 1002f
+ 1001:
+ @ idle waiting for non fifo
+- ldr \rd, [ \rx, # S3C2410_UTRSTAT ]
++ ldr \rd, [\rx, # S3C2410_UTRSTAT]
+ tst \rd, #S3C2410_UTRSTAT_TXFE
+ beq 1001b
+
+diff --git a/arch/arm/vfp/entry.S b/arch/arm/vfp/entry.S
+index cc926c9..323ce1a 100644
+--- a/arch/arm/vfp/entry.S
++++ b/arch/arm/vfp/entry.S
+@@ -22,7 +22,7 @@
+ @ IRQs disabled.
+ @
+ ENTRY(do_vfp)
+-#ifdef CONFIG_PREEMPT
++#ifdef CONFIG_PREEMPT_COUNT
+ ldr r4, [r10, #TI_PREEMPT] @ get preempt count
+ add r11, r4, #1 @ increment it
+ str r11, [r10, #TI_PREEMPT]
+@@ -35,7 +35,7 @@ ENTRY(do_vfp)
+ ENDPROC(do_vfp)
+
+ ENTRY(vfp_null_entry)
+-#ifdef CONFIG_PREEMPT
++#ifdef CONFIG_PREEMPT_COUNT
+ get_thread_info r10
+ ldr r4, [r10, #TI_PREEMPT] @ get preempt count
+ sub r11, r4, #1 @ decrement it
+@@ -53,7 +53,7 @@ ENDPROC(vfp_null_entry)
+
+ __INIT
+ ENTRY(vfp_testing_entry)
+-#ifdef CONFIG_PREEMPT
++#ifdef CONFIG_PREEMPT_COUNT
+ get_thread_info r10
+ ldr r4, [r10, #TI_PREEMPT] @ get preempt count
+ sub r11, r4, #1 @ decrement it
+diff --git a/arch/arm/vfp/vfphw.S b/arch/arm/vfp/vfphw.S
+index ea0349f..dd5e56f 100644
+--- a/arch/arm/vfp/vfphw.S
++++ b/arch/arm/vfp/vfphw.S
+@@ -168,7 +168,7 @@ vfp_hw_state_valid:
+ @ else it's one 32-bit instruction, so
+ @ always subtract 4 from the following
+ @ instruction address.
+-#ifdef CONFIG_PREEMPT
++#ifdef CONFIG_PREEMPT_COUNT
+ get_thread_info r10
+ ldr r4, [r10, #TI_PREEMPT] @ get preempt count
+ sub r11, r4, #1 @ decrement it
+@@ -192,7 +192,7 @@ look_for_VFP_exceptions:
+ @ not recognised by VFP
+
+ DBGSTR "not VFP"
+-#ifdef CONFIG_PREEMPT
++#ifdef CONFIG_PREEMPT_COUNT
+ get_thread_info r10
+ ldr r4, [r10, #TI_PREEMPT] @ get preempt count
+ sub r11, r4, #1 @ decrement it
+diff --git a/arch/cris/include/asm/io.h b/arch/cris/include/asm/io.h
+index 32567bc..ac12ae2 100644
+--- a/arch/cris/include/asm/io.h
++++ b/arch/cris/include/asm/io.h
+@@ -133,12 +133,39 @@ static inline void writel(unsigned int b, volatile void __iomem *addr)
+ #define insb(port,addr,count) (cris_iops ? cris_iops->read_io(port,addr,1,count) : 0)
+ #define insw(port,addr,count) (cris_iops ? cris_iops->read_io(port,addr,2,count) : 0)
+ #define insl(port,addr,count) (cris_iops ? cris_iops->read_io(port,addr,4,count) : 0)
+-#define outb(data,port) if (cris_iops) cris_iops->write_io(port,(void*)(unsigned)data,1,1)
+-#define outw(data,port) if (cris_iops) cris_iops->write_io(port,(void*)(unsigned)data,2,1)
+-#define outl(data,port) if (cris_iops) cris_iops->write_io(port,(void*)(unsigned)data,4,1)
+-#define outsb(port,addr,count) if(cris_iops) cris_iops->write_io(port,(void*)addr,1,count)
+-#define outsw(port,addr,count) if(cris_iops) cris_iops->write_io(port,(void*)addr,2,count)
+-#define outsl(port,addr,count) if(cris_iops) cris_iops->write_io(port,(void*)addr,3,count)
++static inline void outb(unsigned char data, unsigned int port)
++{
++ if (cris_iops)
++ cris_iops->write_io(port, (void *) &data, 1, 1);
++}
++static inline void outw(unsigned short data, unsigned int port)
++{
++ if (cris_iops)
++ cris_iops->write_io(port, (void *) &data, 2, 1);
++}
++static inline void outl(unsigned int data, unsigned int port)
++{
++ if (cris_iops)
++ cris_iops->write_io(port, (void *) &data, 4, 1);
++}
++static inline void outsb(unsigned int port, const void *addr,
++ unsigned long count)
++{
++ if (cris_iops)
++ cris_iops->write_io(port, (void *)addr, 1, count);
++}
++static inline void outsw(unsigned int port, const void *addr,
++ unsigned long count)
++{
++ if (cris_iops)
++ cris_iops->write_io(port, (void *)addr, 2, count);
++}
++static inline void outsl(unsigned int port, const void *addr,
++ unsigned long count)
++{
++ if (cris_iops)
++ cris_iops->write_io(port, (void *)addr, 4, count);
++}
+
+ /*
+ * Convert a physical pointer to a virtual kernel pointer for /dev/mem
+diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
+index e9a5fd7..69b17a9 100644
+--- a/arch/mips/kernel/process.c
++++ b/arch/mips/kernel/process.c
+@@ -72,9 +72,7 @@ void __noreturn cpu_idle(void)
+ }
+ }
+ #ifdef CONFIG_HOTPLUG_CPU
+- if (!cpu_online(cpu) && !cpu_isset(cpu, cpu_callin_map) &&
+- (system_state == SYSTEM_RUNNING ||
+- system_state == SYSTEM_BOOTING))
++ if (!cpu_online(cpu) && !cpu_isset(cpu, cpu_callin_map))
+ play_dead();
+ #endif
+ rcu_idle_exit();
+diff --git a/arch/parisc/include/asm/pgtable.h b/arch/parisc/include/asm/pgtable.h
+index ee99f23..7df49fa 100644
+--- a/arch/parisc/include/asm/pgtable.h
++++ b/arch/parisc/include/asm/pgtable.h
+@@ -12,11 +12,10 @@
+
+ #include <linux/bitops.h>
+ #include <linux/spinlock.h>
++#include <linux/mm_types.h>
+ #include <asm/processor.h>
+ #include <asm/cache.h>
+
+-struct vm_area_struct;
+-
+ /*
+ * kern_addr_valid(ADDR) tests if ADDR is pointing to valid kernel
+ * memory. For the return value to be meaningful, ADDR must be >=
+@@ -40,7 +39,14 @@ struct vm_area_struct;
+ do{ \
+ *(pteptr) = (pteval); \
+ } while(0)
+-#define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval)
++
++extern void purge_tlb_entries(struct mm_struct *, unsigned long);
++
++#define set_pte_at(mm, addr, ptep, pteval) \
++ do { \
++ set_pte(ptep, pteval); \
++ purge_tlb_entries(mm, addr); \
++ } while (0)
+
+ #endif /* !__ASSEMBLY__ */
+
+@@ -466,6 +472,7 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr,
+ old = pte_val(*ptep);
+ new = pte_val(pte_wrprotect(__pte (old)));
+ } while (cmpxchg((unsigned long *) ptep, old, new) != old);
++ purge_tlb_entries(mm, addr);
+ #else
+ pte_t old_pte = *ptep;
+ set_pte_at(mm, addr, ptep, pte_wrprotect(old_pte));
+diff --git a/arch/parisc/kernel/cache.c b/arch/parisc/kernel/cache.c
+index 9d18189..fa21463 100644
+--- a/arch/parisc/kernel/cache.c
++++ b/arch/parisc/kernel/cache.c
+@@ -420,6 +420,24 @@ void kunmap_parisc(void *addr)
+ EXPORT_SYMBOL(kunmap_parisc);
+ #endif
+
++void purge_tlb_entries(struct mm_struct *mm, unsigned long addr)
++{
++ unsigned long flags;
++
++ /* Note: purge_tlb_entries can be called at startup with
++ no context. */
++
++ /* Disable preemption while we play with %sr1. */
++ preempt_disable();
++ mtsp(mm->context, 1);
++ purge_tlb_start(flags);
++ pdtlb(addr);
++ pitlb(addr);
++ purge_tlb_end(flags);
++ preempt_enable();
++}
++EXPORT_SYMBOL(purge_tlb_entries);
++
+ void __flush_tlb_range(unsigned long sid, unsigned long start,
+ unsigned long end)
+ {
+diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S
+index 58bddee..9e07bd0 100644
+--- a/arch/powerpc/kernel/head_64.S
++++ b/arch/powerpc/kernel/head_64.S
+@@ -422,7 +422,7 @@ _STATIC(__after_prom_start)
+ tovirt(r6,r6) /* on booke, we already run at PAGE_OFFSET */
+ #endif
+
+-#ifdef CONFIG_CRASH_DUMP
++#ifdef CONFIG_RELOCATABLE
+ /*
+ * Check if the kernel has to be running as relocatable kernel based on the
+ * variable __run_at_load, if it is set the kernel is treated as relocatable
+diff --git a/arch/powerpc/kernel/machine_kexec_64.c b/arch/powerpc/kernel/machine_kexec_64.c
+index d7f6090..39833e0 100644
+--- a/arch/powerpc/kernel/machine_kexec_64.c
++++ b/arch/powerpc/kernel/machine_kexec_64.c
+@@ -162,6 +162,8 @@ static int kexec_all_irq_disabled = 0;
+ static void kexec_smp_down(void *arg)
+ {
+ local_irq_disable();
++ hard_irq_disable();
++
+ mb(); /* make sure our irqs are disabled before we say they are */
+ get_paca()->kexec_state = KEXEC_STATE_IRQS_OFF;
+ while(kexec_all_irq_disabled == 0)
+@@ -244,6 +246,8 @@ static void kexec_prepare_cpus(void)
+ wake_offline_cpus();
+ smp_call_function(kexec_smp_down, NULL, /* wait */0);
+ local_irq_disable();
++ hard_irq_disable();
++
+ mb(); /* make sure IRQs are disabled before we say they are */
+ get_paca()->kexec_state = KEXEC_STATE_IRQS_OFF;
+
+@@ -281,6 +285,7 @@ static void kexec_prepare_cpus(void)
+ if (ppc_md.kexec_cpu_down)
+ ppc_md.kexec_cpu_down(0, 0);
+ local_irq_disable();
++ hard_irq_disable();
+ }
+
+ #endif /* SMP */
+diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c
+index e49e931..5395666 100644
+--- a/arch/powerpc/kernel/time.c
++++ b/arch/powerpc/kernel/time.c
+@@ -759,13 +759,8 @@ void update_vsyscall(struct timespec *wall_time, struct timespec *wtm,
+
+ void update_vsyscall_tz(void)
+ {
+- /* Make userspace gettimeofday spin until we're done. */
+- ++vdso_data->tb_update_count;
+- smp_mb();
+ vdso_data->tz_minuteswest = sys_tz.tz_minuteswest;
+ vdso_data->tz_dsttime = sys_tz.tz_dsttime;
+- smp_mb();
+- ++vdso_data->tb_update_count;
+ }
+
+ static void __init clocksource_init(void)
+diff --git a/arch/powerpc/kvm/44x_emulate.c b/arch/powerpc/kvm/44x_emulate.c
+index c8c6157..c39cd0b 100644
+--- a/arch/powerpc/kvm/44x_emulate.c
++++ b/arch/powerpc/kvm/44x_emulate.c
+@@ -76,6 +76,7 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
+ run->dcr.dcrn = dcrn;
+ run->dcr.data = 0;
+ run->dcr.is_write = 0;
++ vcpu->arch.dcr_is_write = 0;
+ vcpu->arch.io_gpr = rt;
+ vcpu->arch.dcr_needed = 1;
+ kvmppc_account_exit(vcpu, DCR_EXITS);
+@@ -94,6 +95,7 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
+ run->dcr.dcrn = dcrn;
+ run->dcr.data = kvmppc_get_gpr(vcpu, rs);
+ run->dcr.is_write = 1;
++ vcpu->arch.dcr_is_write = 1;
+ vcpu->arch.dcr_needed = 1;
+ kvmppc_account_exit(vcpu, DCR_EXITS);
+ emulated = EMULATE_DO_DCR;
+diff --git a/arch/powerpc/platforms/40x/ppc40x_simple.c b/arch/powerpc/platforms/40x/ppc40x_simple.c
+index 9761206..f0eee75 100644
+--- a/arch/powerpc/platforms/40x/ppc40x_simple.c
++++ b/arch/powerpc/platforms/40x/ppc40x_simple.c
+@@ -57,7 +57,8 @@ static const char *board[] __initdata = {
+ "amcc,makalu",
+ "apm,klondike",
+ "est,hotfoot",
+- "plathome,obs600"
++ "plathome,obs600",
++ NULL
+ };
+
+ static int __init ppc40x_probe(void)
+diff --git a/arch/s390/include/asm/timex.h b/arch/s390/include/asm/timex.h
+index fba4d66..4c060bb 100644
+--- a/arch/s390/include/asm/timex.h
++++ b/arch/s390/include/asm/timex.h
+@@ -128,4 +128,32 @@ static inline unsigned long long get_clock_monotonic(void)
+ return get_clock_xt() - sched_clock_base_cc;
+ }
+
++/**
++ * tod_to_ns - convert a TOD format value to nanoseconds
++ * @todval: to be converted TOD format value
++ * Returns: number of nanoseconds that correspond to the TOD format value
++ *
++ * Converting a 64 Bit TOD format value to nanoseconds means that the value
++ * must be divided by 4.096. In order to achieve that we multiply with 125
++ * and divide by 512:
++ *
++ * ns = (todval * 125) >> 9;
++ *
++ * In order to avoid an overflow with the multiplication we can rewrite this.
++ * With a split todval == 2^32 * th + tl (th upper 32 bits, tl lower 32 bits)
++ * we end up with
++ *
++ * ns = ((2^32 * th + tl) * 125 ) >> 9;
++ * -> ns = (2^23 * th * 125) + ((tl * 125) >> 9);
++ *
++ */
++static inline unsigned long long tod_to_ns(unsigned long long todval)
++{
++ unsigned long long ns;
++
++ ns = ((todval >> 32) << 23) * 125;
++ ns += ((todval & 0xffffffff) * 125) >> 9;
++ return ns;
++}
++
+ #endif
+diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c
+index dcec960..18c39ad 100644
+--- a/arch/s390/kernel/time.c
++++ b/arch/s390/kernel/time.c
+@@ -63,7 +63,7 @@ static DEFINE_PER_CPU(struct clock_event_device, comparators);
+ */
+ unsigned long long notrace __kprobes sched_clock(void)
+ {
+- return (get_clock_monotonic() * 125) >> 9;
++ return tod_to_ns(get_clock_monotonic());
+ }
+
+ /*
+@@ -120,6 +120,9 @@ static int s390_next_ktime(ktime_t expires,
+ nsecs = ktime_to_ns(ktime_add(timespec_to_ktime(ts), expires));
+ do_div(nsecs, 125);
+ S390_lowcore.clock_comparator = sched_clock_base_cc + (nsecs << 9);
++ /* Program the maximum value if we have an overflow (== year 2042) */
++ if (unlikely(S390_lowcore.clock_comparator < sched_clock_base_cc))
++ S390_lowcore.clock_comparator = -1ULL;
+ set_clock_comparator(S390_lowcore.clock_comparator);
+ return 0;
+ }
+diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c
+index b7bc1aa..a80b585 100644
+--- a/arch/s390/kvm/interrupt.c
++++ b/arch/s390/kvm/interrupt.c
+@@ -390,7 +390,7 @@ int kvm_s390_handle_wait(struct kvm_vcpu *vcpu)
+ return 0;
+ }
+
+- sltime = ((vcpu->arch.sie_block->ckc - now)*125)>>9;
++ sltime = tod_to_ns(vcpu->arch.sie_block->ckc - now);
+
+ hrtimer_start(&vcpu->arch.ckc_timer, ktime_set (0, sltime) , HRTIMER_MODE_REL);
+ VCPU_EVENT(vcpu, 5, "enabled wait via clock comparator: %llx ns", sltime);
+diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
+index d470ccb..ef49b98 100644
+--- a/arch/s390/kvm/kvm-s390.c
++++ b/arch/s390/kvm/kvm-s390.c
+@@ -753,6 +753,14 @@ int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
+ } else
+ prefix = 0;
+
++ /*
++ * The guest FPRS and ACRS are in the host FPRS/ACRS due to the lazy
++ * copying in vcpu load/put. Lets update our copies before we save
++ * it into the save area
++ */
++ save_fp_regs(&vcpu->arch.guest_fpregs);
++ save_access_regs(vcpu->run->s.regs.acrs);
++
+ if (__guestcopy(vcpu, addr + offsetof(struct save_area, fp_regs),
+ vcpu->arch.guest_fpregs.fprs, 128, prefix))
+ return -EFAULT;
+diff --git a/arch/sh/include/asm/elf.h b/arch/sh/include/asm/elf.h
+index f38112b..978b7fd 100644
+--- a/arch/sh/include/asm/elf.h
++++ b/arch/sh/include/asm/elf.h
+@@ -202,9 +202,9 @@ extern void __kernel_vsyscall;
+ if (vdso_enabled) \
+ NEW_AUX_ENT(AT_SYSINFO_EHDR, VDSO_BASE); \
+ else \
+- NEW_AUX_ENT(AT_IGNORE, 0);
++ NEW_AUX_ENT(AT_IGNORE, 0)
+ #else
+-#define VSYSCALL_AUX_ENT
++#define VSYSCALL_AUX_ENT NEW_AUX_ENT(AT_IGNORE, 0)
+ #endif /* CONFIG_VSYSCALL */
+
+ #ifdef CONFIG_SH_FPU
+diff --git a/arch/sparc/include/asm/hugetlb.h b/arch/sparc/include/asm/hugetlb.h
+index 1770610..f368cef 100644
+--- a/arch/sparc/include/asm/hugetlb.h
++++ b/arch/sparc/include/asm/hugetlb.h
+@@ -58,14 +58,20 @@ static inline pte_t huge_pte_wrprotect(pte_t pte)
+ static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
+ unsigned long addr, pte_t *ptep)
+ {
+- ptep_set_wrprotect(mm, addr, ptep);
++ pte_t old_pte = *ptep;
++ set_huge_pte_at(mm, addr, ptep, pte_wrprotect(old_pte));
+ }
+
+ static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma,
+ unsigned long addr, pte_t *ptep,
+ pte_t pte, int dirty)
+ {
+- return ptep_set_access_flags(vma, addr, ptep, pte, dirty);
++ int changed = !pte_same(*ptep, pte);
++ if (changed) {
++ set_huge_pte_at(vma->vm_mm, addr, ptep, pte);
++ flush_tlb_page(vma, addr);
++ }
++ return changed;
+ }
+
+ static inline pte_t huge_ptep_get(pte_t *ptep)
+diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
+index 50a1d1f..01897ac 100644
+--- a/arch/x86/Kconfig
++++ b/arch/x86/Kconfig
+@@ -1246,10 +1246,6 @@ config NODES_SHIFT
+ Specify the maximum number of NUMA Nodes available on the target
+ system. Increases memory reserved to accommodate various tables.
+
+-config HAVE_ARCH_ALLOC_REMAP
+- def_bool y
+- depends on X86_32 && NUMA
+-
+ config ARCH_HAVE_MEMORY_PRESENT
+ def_bool y
+ depends on X86_32 && DISCONTIGMEM
+diff --git a/arch/x86/ia32/ia32entry.S b/arch/x86/ia32/ia32entry.S
+index 20e5f7b..f6d477a 100644
+--- a/arch/x86/ia32/ia32entry.S
++++ b/arch/x86/ia32/ia32entry.S
+@@ -204,7 +204,7 @@ sysexit_from_sys_call:
+ testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
+ jnz ia32_ret_from_sys_call
+ TRACE_IRQS_ON
+- sti
++ ENABLE_INTERRUPTS(CLBR_NONE)
+ movl %eax,%esi /* second arg, syscall return value */
+ cmpl $-MAX_ERRNO,%eax /* is it an error ? */
+ jbe 1f
+@@ -214,7 +214,7 @@ sysexit_from_sys_call:
+ call __audit_syscall_exit
+ movq RAX-ARGOFFSET(%rsp),%rax /* reload syscall return value */
+ movl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),%edi
+- cli
++ DISABLE_INTERRUPTS(CLBR_NONE)
+ TRACE_IRQS_OFF
+ testl %edi,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
+ jz \exit
+diff --git a/arch/x86/include/asm/efi.h b/arch/x86/include/asm/efi.h
+index 029189d..da37433 100644
+--- a/arch/x86/include/asm/efi.h
++++ b/arch/x86/include/asm/efi.h
+@@ -94,6 +94,7 @@ extern void __iomem *efi_ioremap(unsigned long addr, unsigned long size,
+ #endif /* CONFIG_X86_32 */
+
+ extern int add_efi_memmap;
++extern unsigned long x86_efi_facility;
+ extern void efi_set_executable(efi_memory_desc_t *md, bool executable);
+ extern int efi_memblock_x86_reserve_range(void);
+ extern void efi_call_phys_prelog(void);
+diff --git a/arch/x86/include/asm/mmzone_32.h b/arch/x86/include/asm/mmzone_32.h
+index eb05fb3..8a9b3e2 100644
+--- a/arch/x86/include/asm/mmzone_32.h
++++ b/arch/x86/include/asm/mmzone_32.h
+@@ -14,12 +14,6 @@ extern struct pglist_data *node_data[];
+
+ #include <asm/numaq.h>
+
+-extern void resume_map_numa_kva(pgd_t *pgd);
+-
+-#else /* !CONFIG_NUMA */
+-
+-static inline void resume_map_numa_kva(pgd_t *pgd) {}
+-
+ #endif /* CONFIG_NUMA */
+
+ #ifdef CONFIG_DISCONTIGMEM
+diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
+index c3520d7..3f3dd52 100644
+--- a/arch/x86/include/asm/pgtable.h
++++ b/arch/x86/include/asm/pgtable.h
+@@ -142,6 +142,11 @@ static inline unsigned long pmd_pfn(pmd_t pmd)
+ return (pmd_val(pmd) & PTE_PFN_MASK) >> PAGE_SHIFT;
+ }
+
++static inline unsigned long pud_pfn(pud_t pud)
++{
++ return (pud_val(pud) & PTE_PFN_MASK) >> PAGE_SHIFT;
++}
++
+ #define pte_page(pte) pfn_to_page(pte_pfn(pte))
+
+ static inline int pmd_large(pmd_t pte)
+diff --git a/arch/x86/kernel/apic/x2apic_phys.c b/arch/x86/kernel/apic/x2apic_phys.c
+index e03a1e1..562a76d 100644
+--- a/arch/x86/kernel/apic/x2apic_phys.c
++++ b/arch/x86/kernel/apic/x2apic_phys.c
+@@ -20,18 +20,19 @@ static int set_x2apic_phys_mode(char *arg)
+ }
+ early_param("x2apic_phys", set_x2apic_phys_mode);
+
+-static int x2apic_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
++static bool x2apic_fadt_phys(void)
+ {
+- if (x2apic_phys)
+- return x2apic_enabled();
+- else if ((acpi_gbl_FADT.header.revision >= FADT2_REVISION_ID) &&
+- (acpi_gbl_FADT.flags & ACPI_FADT_APIC_PHYSICAL) &&
+- x2apic_enabled()) {
++ if ((acpi_gbl_FADT.header.revision >= FADT2_REVISION_ID) &&
++ (acpi_gbl_FADT.flags & ACPI_FADT_APIC_PHYSICAL)) {
+ printk(KERN_DEBUG "System requires x2apic physical mode\n");
+- return 1;
++ return true;
+ }
+- else
+- return 0;
++ return false;
++}
++
++static int x2apic_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
++{
++ return x2apic_enabled() && (x2apic_phys || x2apic_fadt_phys());
+ }
+
+ static void
+@@ -82,7 +83,7 @@ static void init_x2apic_ldr(void)
+
+ static int x2apic_phys_probe(void)
+ {
+- if (x2apic_mode && x2apic_phys)
++ if (x2apic_mode && (x2apic_phys || x2apic_fadt_phys()))
+ return 1;
+
+ return apic == &apic_x2apic_phys;
+diff --git a/arch/x86/kernel/cpu/mshyperv.c b/arch/x86/kernel/cpu/mshyperv.c
+index 0a630dd..646d192 100644
+--- a/arch/x86/kernel/cpu/mshyperv.c
++++ b/arch/x86/kernel/cpu/mshyperv.c
+@@ -68,7 +68,8 @@ static void __init ms_hyperv_init_platform(void)
+ printk(KERN_INFO "HyperV: features 0x%x, hints 0x%x\n",
+ ms_hyperv.features, ms_hyperv.hints);
+
+- clocksource_register_hz(&hyperv_cs, NSEC_PER_SEC/100);
++ if (ms_hyperv.features & HV_X64_MSR_TIME_REF_COUNT_AVAILABLE)
++ clocksource_register_hz(&hyperv_cs, NSEC_PER_SEC/100);
+ }
+
+ const __refconst struct hypervisor_x86 x86_hyper_ms_hyperv = {
+diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S
+index 8f8e8ee..2a6919e 100644
+--- a/arch/x86/kernel/entry_32.S
++++ b/arch/x86/kernel/entry_32.S
+@@ -1065,7 +1065,6 @@ ENTRY(xen_failsafe_callback)
+ lea 16(%esp),%esp
+ CFI_ADJUST_CFA_OFFSET -16
+ jz 5f
+- addl $16,%esp
+ jmp iret_exc
+ 5: pushl_cfi $-1 /* orig_ax = -1 => not a system call */
+ SAVE_ALL
+diff --git a/arch/x86/kernel/msr.c b/arch/x86/kernel/msr.c
+index eb11369..8563b64 100644
+--- a/arch/x86/kernel/msr.c
++++ b/arch/x86/kernel/msr.c
+@@ -174,6 +174,9 @@ static int msr_open(struct inode *inode, struct file *file)
+ unsigned int cpu;
+ struct cpuinfo_x86 *c;
+
++ if (!capable(CAP_SYS_RAWIO))
++ return -EPERM;
++
+ cpu = iminor(file->f_path.dentry->d_inode);
+ if (cpu >= nr_cpu_ids || !cpu_online(cpu))
+ return -ENXIO; /* No such CPU */
+diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
+index 52190a9..3f20ab4 100644
+--- a/arch/x86/kernel/reboot.c
++++ b/arch/x86/kernel/reboot.c
+@@ -592,7 +592,7 @@ static void native_machine_emergency_restart(void)
+ break;
+
+ case BOOT_EFI:
+- if (efi_enabled)
++ if (efi_enabled(EFI_RUNTIME_SERVICES))
+ efi.reset_system(reboot_mode ?
+ EFI_RESET_WARM :
+ EFI_RESET_COLD,
+diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
+index 5cee802..771ff4d 100644
+--- a/arch/x86/kernel/setup.c
++++ b/arch/x86/kernel/setup.c
+@@ -613,6 +613,83 @@ static __init void reserve_ibft_region(void)
+
+ static unsigned reserve_low = CONFIG_X86_RESERVE_LOW << 10;
+
++static bool __init snb_gfx_workaround_needed(void)
++{
++#ifdef CONFIG_PCI
++ int i;
++ u16 vendor, devid;
++ static const u16 snb_ids[] = {
++ 0x0102,
++ 0x0112,
++ 0x0122,
++ 0x0106,
++ 0x0116,
++ 0x0126,
++ 0x010a,
++ };
++
++ /* Assume no if something weird is going on with PCI */
++ if (!early_pci_allowed())
++ return false;
++
++ vendor = read_pci_config_16(0, 2, 0, PCI_VENDOR_ID);
++ if (vendor != 0x8086)
++ return false;
++
++ devid = read_pci_config_16(0, 2, 0, PCI_DEVICE_ID);
++ for (i = 0; i < ARRAY_SIZE(snb_ids); i++)
++ if (devid == snb_ids[i])
++ return true;
++#endif
++
++ return false;
++}
++
++/*
++ * Sandy Bridge graphics has trouble with certain ranges, exclude
++ * them from allocation.
++ */
++static void __init trim_snb_memory(void)
++{
++ static const unsigned long bad_pages[] = {
++ 0x20050000,
++ 0x20110000,
++ 0x20130000,
++ 0x20138000,
++ 0x40004000,
++ };
++ int i;
++
++ if (!snb_gfx_workaround_needed())
++ return;
++
++ printk(KERN_DEBUG "reserving inaccessible SNB gfx pages\n");
++
++ /*
++ * Reserve all memory below the 1 MB mark that has not
++ * already been reserved.
++ */
++ memblock_reserve(0, 1<<20);
++
++ for (i = 0; i < ARRAY_SIZE(bad_pages); i++) {
++ if (memblock_reserve(bad_pages[i], PAGE_SIZE))
++ printk(KERN_WARNING "failed to reserve 0x%08lx\n",
++ bad_pages[i]);
++ }
++}
++
++/*
++ * Here we put platform-specific memory range workarounds, i.e.
++ * memory known to be corrupt or otherwise in need to be reserved on
++ * specific platforms.
++ *
++ * If this gets used more widely it could use a real dispatch mechanism.
++ */
++static void __init trim_platform_memory_ranges(void)
++{
++ trim_snb_memory();
++}
++
+ static void __init trim_bios_range(void)
+ {
+ /*
+@@ -633,6 +710,7 @@ static void __init trim_bios_range(void)
+ * take them out.
+ */
+ e820_remove_range(BIOS_BEGIN, BIOS_END - BIOS_BEGIN, E820_RAM, 1);
++
+ sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
+ }
+
+@@ -732,15 +810,15 @@ void __init setup_arch(char **cmdline_p)
+ #ifdef CONFIG_EFI
+ if (!strncmp((char *)&boot_params.efi_info.efi_loader_signature,
+ "EL32", 4)) {
+- efi_enabled = 1;
+- efi_64bit = false;
++ set_bit(EFI_BOOT, &x86_efi_facility);
+ } else if (!strncmp((char *)&boot_params.efi_info.efi_loader_signature,
+ "EL64", 4)) {
+- efi_enabled = 1;
+- efi_64bit = true;
++ set_bit(EFI_BOOT, &x86_efi_facility);
++ set_bit(EFI_64BIT, &x86_efi_facility);
+ }
+- if (efi_enabled && efi_memblock_x86_reserve_range())
+- efi_enabled = 0;
++
++ if (efi_enabled(EFI_BOOT))
++ efi_memblock_x86_reserve_range();
+ #endif
+
+ x86_init.oem.arch_setup();
+@@ -813,7 +891,7 @@ void __init setup_arch(char **cmdline_p)
+
+ finish_e820_parsing();
+
+- if (efi_enabled)
++ if (efi_enabled(EFI_BOOT))
+ efi_init();
+
+ dmi_scan_machine();
+@@ -896,7 +974,7 @@ void __init setup_arch(char **cmdline_p)
+ * The EFI specification says that boot service code won't be called
+ * after ExitBootServices(). This is, in fact, a lie.
+ */
+- if (efi_enabled)
++ if (efi_enabled(EFI_MEMMAP))
+ efi_reserve_boot_services();
+
+ /* preallocate 4k for mptable mpc */
+@@ -911,6 +989,8 @@ void __init setup_arch(char **cmdline_p)
+
+ setup_real_mode();
+
++ trim_platform_memory_ranges();
++
+ init_gbpages();
+
+ /* max_pfn_mapped is updated here */
+@@ -1035,7 +1115,7 @@ void __init setup_arch(char **cmdline_p)
+
+ #ifdef CONFIG_VT
+ #if defined(CONFIG_VGA_CONSOLE)
+- if (!efi_enabled || (efi_mem_type(0xa0000) != EFI_CONVENTIONAL_MEMORY))
++ if (!efi_enabled(EFI_BOOT) || (efi_mem_type(0xa0000) != EFI_CONVENTIONAL_MEMORY))
+ conswitchp = &vga_con;
+ #elif defined(CONFIG_DUMMY_CONSOLE)
+ conswitchp = &dummy_con;
+@@ -1050,14 +1130,14 @@ void __init setup_arch(char **cmdline_p)
+ arch_init_ideal_nops();
+
+ #ifdef CONFIG_EFI
+- /* Once setup is done above, disable efi_enabled on mismatched
+- * firmware/kernel archtectures since there is no support for
+- * runtime services.
++ /* Once setup is done above, unmap the EFI memory map on
++ * mismatched firmware/kernel archtectures since there is no
++ * support for runtime services.
+ */
+- if (efi_enabled && IS_ENABLED(CONFIG_X86_64) != efi_64bit) {
++ if (efi_enabled(EFI_BOOT) &&
++ IS_ENABLED(CONFIG_X86_64) != efi_enabled(EFI_64BIT)) {
+ pr_info("efi: Setup done, disabling due to 32/64-bit mismatch\n");
+ efi_unmap_memmap();
+- efi_enabled = 0;
+ }
+ #endif
+ }
+diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
+index 76dcd9d..c6b10e2 100644
+--- a/arch/x86/mm/fault.c
++++ b/arch/x86/mm/fault.c
+@@ -747,13 +747,15 @@ __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
+ return;
+ }
+ #endif
++ /* Kernel addresses are always protection faults: */
++ if (address >= TASK_SIZE)
++ error_code |= PF_PROT;
+
+- if (unlikely(show_unhandled_signals))
++ if (likely(show_unhandled_signals))
+ show_signal_msg(regs, error_code, address, tsk);
+
+- /* Kernel addresses are always protection faults: */
+ tsk->thread.cr2 = address;
+- tsk->thread.error_code = error_code | (address >= TASK_SIZE);
++ tsk->thread.error_code = error_code;
+ tsk->thread.trap_nr = X86_TRAP_PF;
+
+ force_sig_info_fault(SIGSEGV, si_code, address, tsk, 0);
+diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
+index 3baff25..ce42da7 100644
+--- a/arch/x86/mm/init_64.c
++++ b/arch/x86/mm/init_64.c
+@@ -829,6 +829,9 @@ int kern_addr_valid(unsigned long addr)
+ if (pud_none(*pud))
+ return 0;
+
++ if (pud_large(*pud))
++ return pfn_valid(pud_pfn(*pud));
++
+ pmd = pmd_offset(pud, addr);
+ if (pmd_none(*pmd))
+ return 0;
+diff --git a/arch/x86/mm/numa.c b/arch/x86/mm/numa.c
+index 2d125be..8504f36 100644
+--- a/arch/x86/mm/numa.c
++++ b/arch/x86/mm/numa.c
+@@ -193,7 +193,6 @@ int __init numa_add_memblk(int nid, u64 start, u64 end)
+ static void __init setup_node_data(int nid, u64 start, u64 end)
+ {
+ const size_t nd_size = roundup(sizeof(pg_data_t), PAGE_SIZE);
+- bool remapped = false;
+ u64 nd_pa;
+ void *nd;
+ int tnid;
+@@ -205,37 +204,28 @@ static void __init setup_node_data(int nid, u64 start, u64 end)
+ if (end && (end - start) < NODE_MIN_SIZE)
+ return;
+
+- /* initialize remap allocator before aligning to ZONE_ALIGN */
+- init_alloc_remap(nid, start, end);
+-
+ start = roundup(start, ZONE_ALIGN);
+
+ printk(KERN_INFO "Initmem setup node %d [mem %#010Lx-%#010Lx]\n",
+ nid, start, end - 1);
+
+ /*
+- * Allocate node data. Try remap allocator first, node-local
+- * memory and then any node. Never allocate in DMA zone.
++ * Allocate node data. Try node-local memory and then any node.
++ * Never allocate in DMA zone.
+ */
+- nd = alloc_remap(nid, nd_size);
+- if (nd) {
+- nd_pa = __pa(nd);
+- remapped = true;
+- } else {
+- nd_pa = memblock_alloc_nid(nd_size, SMP_CACHE_BYTES, nid);
+- if (!nd_pa) {
+- pr_err("Cannot find %zu bytes in node %d\n",
+- nd_size, nid);
+- return;
+- }
+- nd = __va(nd_pa);
++ nd_pa = memblock_alloc_nid(nd_size, SMP_CACHE_BYTES, nid);
++ if (!nd_pa) {
++ pr_err("Cannot find %zu bytes in node %d\n",
++ nd_size, nid);
++ return;
+ }
++ nd = __va(nd_pa);
+
+ /* report and initialize */
+- printk(KERN_INFO " NODE_DATA [mem %#010Lx-%#010Lx]%s\n",
+- nd_pa, nd_pa + nd_size - 1, remapped ? " (remapped)" : "");
++ printk(KERN_INFO " NODE_DATA [mem %#010Lx-%#010Lx]\n",
++ nd_pa, nd_pa + nd_size - 1);
+ tnid = early_pfn_to_nid(nd_pa >> PAGE_SHIFT);
+- if (!remapped && tnid != nid)
++ if (tnid != nid)
+ printk(KERN_INFO " NODE_DATA(%d) on node %d\n", nid, tnid);
+
+ node_data[nid] = nd;
+diff --git a/arch/x86/mm/numa_32.c b/arch/x86/mm/numa_32.c
+index 534255a..73a6d73 100644
+--- a/arch/x86/mm/numa_32.c
++++ b/arch/x86/mm/numa_32.c
+@@ -73,167 +73,6 @@ unsigned long node_memmap_size_bytes(int nid, unsigned long start_pfn,
+
+ extern unsigned long highend_pfn, highstart_pfn;
+
+-#define LARGE_PAGE_BYTES (PTRS_PER_PTE * PAGE_SIZE)
+-
+-static void *node_remap_start_vaddr[MAX_NUMNODES];
+-void set_pmd_pfn(unsigned long vaddr, unsigned long pfn, pgprot_t flags);
+-
+-/*
+- * Remap memory allocator
+- */
+-static unsigned long node_remap_start_pfn[MAX_NUMNODES];
+-static void *node_remap_end_vaddr[MAX_NUMNODES];
+-static void *node_remap_alloc_vaddr[MAX_NUMNODES];
+-
+-/**
+- * alloc_remap - Allocate remapped memory
+- * @nid: NUMA node to allocate memory from
+- * @size: The size of allocation
+- *
+- * Allocate @size bytes from the remap area of NUMA node @nid. The
+- * size of the remap area is predetermined by init_alloc_remap() and
+- * only the callers considered there should call this function. For
+- * more info, please read the comment on top of init_alloc_remap().
+- *
+- * The caller must be ready to handle allocation failure from this
+- * function and fall back to regular memory allocator in such cases.
+- *
+- * CONTEXT:
+- * Single CPU early boot context.
+- *
+- * RETURNS:
+- * Pointer to the allocated memory on success, %NULL on failure.
+- */
+-void *alloc_remap(int nid, unsigned long size)
+-{
+- void *allocation = node_remap_alloc_vaddr[nid];
+-
+- size = ALIGN(size, L1_CACHE_BYTES);
+-
+- if (!allocation || (allocation + size) > node_remap_end_vaddr[nid])
+- return NULL;
+-
+- node_remap_alloc_vaddr[nid] += size;
+- memset(allocation, 0, size);
+-
+- return allocation;
+-}
+-
+-#ifdef CONFIG_HIBERNATION
+-/**
+- * resume_map_numa_kva - add KVA mapping to the temporary page tables created
+- * during resume from hibernation
+- * @pgd_base - temporary resume page directory
+- */
+-void resume_map_numa_kva(pgd_t *pgd_base)
+-{
+- int node;
+-
+- for_each_online_node(node) {
+- unsigned long start_va, start_pfn, nr_pages, pfn;
+-
+- start_va = (unsigned long)node_remap_start_vaddr[node];
+- start_pfn = node_remap_start_pfn[node];
+- nr_pages = (node_remap_end_vaddr[node] -
+- node_remap_start_vaddr[node]) >> PAGE_SHIFT;
+-
+- printk(KERN_DEBUG "%s: node %d\n", __func__, node);
+-
+- for (pfn = 0; pfn < nr_pages; pfn += PTRS_PER_PTE) {
+- unsigned long vaddr = start_va + (pfn << PAGE_SHIFT);
+- pgd_t *pgd = pgd_base + pgd_index(vaddr);
+- pud_t *pud = pud_offset(pgd, vaddr);
+- pmd_t *pmd = pmd_offset(pud, vaddr);
+-
+- set_pmd(pmd, pfn_pmd(start_pfn + pfn,
+- PAGE_KERNEL_LARGE_EXEC));
+-
+- printk(KERN_DEBUG "%s: %08lx -> pfn %08lx\n",
+- __func__, vaddr, start_pfn + pfn);
+- }
+- }
+-}
+-#endif
+-
+-/**
+- * init_alloc_remap - Initialize remap allocator for a NUMA node
+- * @nid: NUMA node to initizlie remap allocator for
+- *
+- * NUMA nodes may end up without any lowmem. As allocating pgdat and
+- * memmap on a different node with lowmem is inefficient, a special
+- * remap allocator is implemented which can be used by alloc_remap().
+- *
+- * For each node, the amount of memory which will be necessary for
+- * pgdat and memmap is calculated and two memory areas of the size are
+- * allocated - one in the node and the other in lowmem; then, the area
+- * in the node is remapped to the lowmem area.
+- *
+- * As pgdat and memmap must be allocated in lowmem anyway, this
+- * doesn't waste lowmem address space; however, the actual lowmem
+- * which gets remapped over is wasted. The amount shouldn't be
+- * problematic on machines this feature will be used.
+- *
+- * Initialization failure isn't fatal. alloc_remap() is used
+- * opportunistically and the callers will fall back to other memory
+- * allocation mechanisms on failure.
+- */
+-void __init init_alloc_remap(int nid, u64 start, u64 end)
+-{
+- unsigned long start_pfn = start >> PAGE_SHIFT;
+- unsigned long end_pfn = end >> PAGE_SHIFT;
+- unsigned long size, pfn;
+- u64 node_pa, remap_pa;
+- void *remap_va;
+-
+- /*
+- * The acpi/srat node info can show hot-add memroy zones where
+- * memory could be added but not currently present.
+- */
+- printk(KERN_DEBUG "node %d pfn: [%lx - %lx]\n",
+- nid, start_pfn, end_pfn);
+-
+- /* calculate the necessary space aligned to large page size */
+- size = node_memmap_size_bytes(nid, start_pfn, end_pfn);
+- size += ALIGN(sizeof(pg_data_t), PAGE_SIZE);
+- size = ALIGN(size, LARGE_PAGE_BYTES);
+-
+- /* allocate node memory and the lowmem remap area */
+- node_pa = memblock_find_in_range(start, end, size, LARGE_PAGE_BYTES);
+- if (!node_pa) {
+- pr_warning("remap_alloc: failed to allocate %lu bytes for node %d\n",
+- size, nid);
+- return;
+- }
+- memblock_reserve(node_pa, size);
+-
+- remap_pa = memblock_find_in_range(min_low_pfn << PAGE_SHIFT,
+- max_low_pfn << PAGE_SHIFT,
+- size, LARGE_PAGE_BYTES);
+- if (!remap_pa) {
+- pr_warning("remap_alloc: failed to allocate %lu bytes remap area for node %d\n",
+- size, nid);
+- memblock_free(node_pa, size);
+- return;
+- }
+- memblock_reserve(remap_pa, size);
+- remap_va = phys_to_virt(remap_pa);
+-
+- /* perform actual remap */
+- for (pfn = 0; pfn < size >> PAGE_SHIFT; pfn += PTRS_PER_PTE)
+- set_pmd_pfn((unsigned long)remap_va + (pfn << PAGE_SHIFT),
+- (node_pa >> PAGE_SHIFT) + pfn,
+- PAGE_KERNEL_LARGE);
+-
+- /* initialize remap allocator parameters */
+- node_remap_start_pfn[nid] = node_pa >> PAGE_SHIFT;
+- node_remap_start_vaddr[nid] = remap_va;
+- node_remap_end_vaddr[nid] = remap_va + size;
+- node_remap_alloc_vaddr[nid] = remap_va;
+-
+- printk(KERN_DEBUG "remap_alloc: node %d [%08llx-%08llx) -> [%p-%p)\n",
+- nid, node_pa, node_pa + size, remap_va, remap_va + size);
+-}
+-
+ void __init initmem_init(void)
+ {
+ x86_numa_init();
+diff --git a/arch/x86/mm/numa_internal.h b/arch/x86/mm/numa_internal.h
+index 7178c3a..ad86ec9 100644
+--- a/arch/x86/mm/numa_internal.h
++++ b/arch/x86/mm/numa_internal.h
+@@ -21,12 +21,6 @@ void __init numa_reset_distance(void);
+
+ void __init x86_numa_init(void);
+
+-#ifdef CONFIG_X86_64
+-static inline void init_alloc_remap(int nid, u64 start, u64 end) { }
+-#else
+-void __init init_alloc_remap(int nid, u64 start, u64 end);
+-#endif
+-
+ #ifdef CONFIG_NUMA_EMU
+ void __init numa_emulation(struct numa_meminfo *numa_meminfo,
+ int numa_dist_cnt);
+diff --git a/arch/x86/platform/efi/efi.c b/arch/x86/platform/efi/efi.c
+index 72d8899..3705bb0 100644
+--- a/arch/x86/platform/efi/efi.c
++++ b/arch/x86/platform/efi/efi.c
+@@ -50,9 +50,6 @@
+
+ #define EFI_DEBUG 1
+
+-int efi_enabled;
+-EXPORT_SYMBOL(efi_enabled);
+-
+ struct efi __read_mostly efi = {
+ .mps = EFI_INVALID_TABLE_ADDR,
+ .acpi = EFI_INVALID_TABLE_ADDR,
+@@ -68,19 +65,28 @@ EXPORT_SYMBOL(efi);
+
+ struct efi_memory_map memmap;
+
+-bool efi_64bit;
+-
+ static struct efi efi_phys __initdata;
+ static efi_system_table_t efi_systab __initdata;
+
+ static inline bool efi_is_native(void)
+ {
+- return IS_ENABLED(CONFIG_X86_64) == efi_64bit;
++ return IS_ENABLED(CONFIG_X86_64) == efi_enabled(EFI_64BIT);
++}
++
++unsigned long x86_efi_facility;
++
++/*
++ * Returns 1 if 'facility' is enabled, 0 otherwise.
++ */
++int efi_enabled(int facility)
++{
++ return test_bit(facility, &x86_efi_facility) != 0;
+ }
++EXPORT_SYMBOL(efi_enabled);
+
+ static int __init setup_noefi(char *arg)
+ {
+- efi_enabled = 0;
++ clear_bit(EFI_RUNTIME_SERVICES, &x86_efi_facility);
+ return 0;
+ }
+ early_param("noefi", setup_noefi);
+@@ -425,6 +431,7 @@ void __init efi_reserve_boot_services(void)
+
+ void __init efi_unmap_memmap(void)
+ {
++ clear_bit(EFI_MEMMAP, &x86_efi_facility);
+ if (memmap.map) {
+ early_iounmap(memmap.map, memmap.nr_map * memmap.desc_size);
+ memmap.map = NULL;
+@@ -459,7 +466,7 @@ void __init efi_free_boot_services(void)
+
+ static int __init efi_systab_init(void *phys)
+ {
+- if (efi_64bit) {
++ if (efi_enabled(EFI_64BIT)) {
+ efi_system_table_64_t *systab64;
+ u64 tmp = 0;
+
+@@ -551,7 +558,7 @@ static int __init efi_config_init(u64 tables, int nr_tables)
+ void *config_tables, *tablep;
+ int i, sz;
+
+- if (efi_64bit)
++ if (efi_enabled(EFI_64BIT))
+ sz = sizeof(efi_config_table_64_t);
+ else
+ sz = sizeof(efi_config_table_32_t);
+@@ -571,7 +578,7 @@ static int __init efi_config_init(u64 tables, int nr_tables)
+ efi_guid_t guid;
+ unsigned long table;
+
+- if (efi_64bit) {
++ if (efi_enabled(EFI_64BIT)) {
+ u64 table64;
+ guid = ((efi_config_table_64_t *)tablep)->guid;
+ table64 = ((efi_config_table_64_t *)tablep)->table;
+@@ -683,7 +690,6 @@ void __init efi_init(void)
+ if (boot_params.efi_info.efi_systab_hi ||
+ boot_params.efi_info.efi_memmap_hi) {
+ pr_info("Table located above 4GB, disabling EFI.\n");
+- efi_enabled = 0;
+ return;
+ }
+ efi_phys.systab = (efi_system_table_t *)boot_params.efi_info.efi_systab;
+@@ -693,10 +699,10 @@ void __init efi_init(void)
+ ((__u64)boot_params.efi_info.efi_systab_hi<<32));
+ #endif
+
+- if (efi_systab_init(efi_phys.systab)) {
+- efi_enabled = 0;
++ if (efi_systab_init(efi_phys.systab))
+ return;
+- }
++
++ set_bit(EFI_SYSTEM_TABLES, &x86_efi_facility);
+
+ /*
+ * Show what we know for posterity
+@@ -714,10 +720,10 @@ void __init efi_init(void)
+ efi.systab->hdr.revision >> 16,
+ efi.systab->hdr.revision & 0xffff, vendor);
+
+- if (efi_config_init(efi.systab->tables, efi.systab->nr_tables)) {
+- efi_enabled = 0;
++ if (efi_config_init(efi.systab->tables, efi.systab->nr_tables))
+ return;
+- }
++
++ set_bit(EFI_CONFIG_TABLES, &x86_efi_facility);
+
+ /*
+ * Note: We currently don't support runtime services on an EFI
+@@ -726,15 +732,17 @@ void __init efi_init(void)
+
+ if (!efi_is_native())
+ pr_info("No EFI runtime due to 32/64-bit mismatch with kernel\n");
+- else if (efi_runtime_init()) {
+- efi_enabled = 0;
+- return;
++ else {
++ if (efi_runtime_init())
++ return;
++ set_bit(EFI_RUNTIME_SERVICES, &x86_efi_facility);
+ }
+
+- if (efi_memmap_init()) {
+- efi_enabled = 0;
++ if (efi_memmap_init())
+ return;
+- }
++
++ set_bit(EFI_MEMMAP, &x86_efi_facility);
++
+ #ifdef CONFIG_X86_32
+ if (efi_is_native()) {
+ x86_platform.get_wallclock = efi_get_time;
+@@ -900,7 +908,7 @@ void __init efi_enter_virtual_mode(void)
+ *
+ * Call EFI services through wrapper functions.
+ */
+- efi.runtime_version = efi_systab.fw_revision;
++ efi.runtime_version = efi_systab.hdr.revision;
+ efi.get_time = virt_efi_get_time;
+ efi.set_time = virt_efi_set_time;
+ efi.get_wakeup_time = virt_efi_get_wakeup_time;
+@@ -943,6 +951,9 @@ u64 efi_mem_attributes(unsigned long phys_addr)
+ efi_memory_desc_t *md;
+ void *p;
+
++ if (!efi_enabled(EFI_MEMMAP))
++ return 0;
++
+ for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) {
+ md = p;
+ if ((md->phys_addr <= phys_addr) &&
+diff --git a/arch/x86/platform/efi/efi_64.c b/arch/x86/platform/efi/efi_64.c
+index ac3aa54..0fba86d 100644
+--- a/arch/x86/platform/efi/efi_64.c
++++ b/arch/x86/platform/efi/efi_64.c
+@@ -38,7 +38,7 @@
+ #include <asm/cacheflush.h>
+ #include <asm/fixmap.h>
+
+-static pgd_t save_pgd __initdata;
++static pgd_t *save_pgd __initdata;
+ static unsigned long efi_flags __initdata;
+
+ static void __init early_code_mapping_set_exec(int executable)
+@@ -61,12 +61,20 @@ static void __init early_code_mapping_set_exec(int executable)
+ void __init efi_call_phys_prelog(void)
+ {
+ unsigned long vaddress;
++ int pgd;
++ int n_pgds;
+
+ early_code_mapping_set_exec(1);
+ local_irq_save(efi_flags);
+- vaddress = (unsigned long)__va(0x0UL);
+- save_pgd = *pgd_offset_k(0x0UL);
+- set_pgd(pgd_offset_k(0x0UL), *pgd_offset_k(vaddress));
++
++ n_pgds = DIV_ROUND_UP((max_pfn << PAGE_SHIFT), PGDIR_SIZE);
++ save_pgd = kmalloc(n_pgds * sizeof(pgd_t), GFP_KERNEL);
++
++ for (pgd = 0; pgd < n_pgds; pgd++) {
++ save_pgd[pgd] = *pgd_offset_k(pgd * PGDIR_SIZE);
++ vaddress = (unsigned long)__va(pgd * PGDIR_SIZE);
++ set_pgd(pgd_offset_k(pgd * PGDIR_SIZE), *pgd_offset_k(vaddress));
++ }
+ __flush_tlb_all();
+ }
+
+@@ -75,7 +83,11 @@ void __init efi_call_phys_epilog(void)
+ /*
+ * After the lock is released, the original page table is restored.
+ */
+- set_pgd(pgd_offset_k(0x0UL), save_pgd);
++ int pgd;
++ int n_pgds = DIV_ROUND_UP((max_pfn << PAGE_SHIFT) , PGDIR_SIZE);
++ for (pgd = 0; pgd < n_pgds; pgd++)
++ set_pgd(pgd_offset_k(pgd * PGDIR_SIZE), save_pgd[pgd]);
++ kfree(save_pgd);
+ __flush_tlb_all();
+ local_irq_restore(efi_flags);
+ early_code_mapping_set_exec(0);
+diff --git a/arch/x86/power/hibernate_32.c b/arch/x86/power/hibernate_32.c
+index 74202c1..7d28c88 100644
+--- a/arch/x86/power/hibernate_32.c
++++ b/arch/x86/power/hibernate_32.c
+@@ -129,8 +129,6 @@ static int resume_physical_mapping_init(pgd_t *pgd_base)
+ }
+ }
+
+- resume_map_numa_kva(pgd_base);
+-
+ return 0;
+ }
+
+diff --git a/arch/x86/xen/spinlock.c b/arch/x86/xen/spinlock.c
+index 83e866d..f7a080e 100644
+--- a/arch/x86/xen/spinlock.c
++++ b/arch/x86/xen/spinlock.c
+@@ -328,7 +328,6 @@ static noinline void xen_spin_unlock_slow(struct xen_spinlock *xl)
+ if (per_cpu(lock_spinners, cpu) == xl) {
+ ADD_STATS(released_slow_kicked, 1);
+ xen_send_IPI_one(cpu, XEN_SPIN_UNLOCK_VECTOR);
+- break;
+ }
+ }
+ }
+diff --git a/arch/x86/xen/xen-asm_32.S b/arch/x86/xen/xen-asm_32.S
+index f9643fc..33ca6e4 100644
+--- a/arch/x86/xen/xen-asm_32.S
++++ b/arch/x86/xen/xen-asm_32.S
+@@ -89,11 +89,11 @@ ENTRY(xen_iret)
+ */
+ #ifdef CONFIG_SMP
+ GET_THREAD_INFO(%eax)
+- movl TI_cpu(%eax), %eax
+- movl __per_cpu_offset(,%eax,4), %eax
+- mov xen_vcpu(%eax), %eax
++ movl %ss:TI_cpu(%eax), %eax
++ movl %ss:__per_cpu_offset(,%eax,4), %eax
++ mov %ss:xen_vcpu(%eax), %eax
+ #else
+- movl xen_vcpu, %eax
++ movl %ss:xen_vcpu, %eax
+ #endif
+
+ /* check IF state we're restoring */
+@@ -106,11 +106,11 @@ ENTRY(xen_iret)
+ * resuming the code, so we don't have to be worried about
+ * being preempted to another CPU.
+ */
+- setz XEN_vcpu_info_mask(%eax)
++ setz %ss:XEN_vcpu_info_mask(%eax)
+ xen_iret_start_crit:
+
+ /* check for unmasked and pending */
+- cmpw $0x0001, XEN_vcpu_info_pending(%eax)
++ cmpw $0x0001, %ss:XEN_vcpu_info_pending(%eax)
+
+ /*
+ * If there's something pending, mask events again so we can
+@@ -118,7 +118,7 @@ xen_iret_start_crit:
+ * touch XEN_vcpu_info_mask.
+ */
+ jne 1f
+- movb $1, XEN_vcpu_info_mask(%eax)
++ movb $1, %ss:XEN_vcpu_info_mask(%eax)
+
+ 1: popl %eax
+
+diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c
+index 9eaf708..251435a 100644
+--- a/drivers/acpi/osl.c
++++ b/drivers/acpi/osl.c
+@@ -250,7 +250,7 @@ acpi_physical_address __init acpi_os_get_root_pointer(void)
+ return acpi_rsdp;
+ #endif
+
+- if (efi_enabled) {
++ if (efi_enabled(EFI_CONFIG_TABLES)) {
+ if (efi.acpi20 != EFI_INVALID_TABLE_ADDR)
+ return efi.acpi20;
+ else if (efi.acpi != EFI_INVALID_TABLE_ADDR)
+diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
+index ad3730b..aac684d 100644
+--- a/drivers/acpi/processor_idle.c
++++ b/drivers/acpi/processor_idle.c
+@@ -1009,6 +1009,9 @@ static int acpi_processor_setup_cpuidle_cx(struct acpi_processor *pr)
+ return -EINVAL;
+ }
+
++ if (!dev)
++ return -EINVAL;
++
+ dev->cpu = pr->id;
+
+ if (max_cstate == 0)
+@@ -1196,6 +1199,7 @@ int acpi_processor_cst_has_changed(struct acpi_processor *pr)
+ }
+
+ /* Populate Updated C-state information */
++ acpi_processor_get_power_info(pr);
+ acpi_processor_setup_cpuidle_states(pr);
+
+ /* Enable all cpuidle devices */
+diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
+index d1ecca2..f1fcaca 100644
+--- a/drivers/acpi/scan.c
++++ b/drivers/acpi/scan.c
+@@ -807,8 +807,8 @@ acpi_bus_extract_wakeup_device_power_package(acpi_handle handle,
+ static void acpi_bus_set_run_wake_flags(struct acpi_device *device)
+ {
+ struct acpi_device_id button_device_ids[] = {
+- {"PNP0C0D", 0},
+ {"PNP0C0C", 0},
++ {"PNP0C0D", 0},
+ {"PNP0C0E", 0},
+ {"", 0},
+ };
+@@ -820,6 +820,11 @@ static void acpi_bus_set_run_wake_flags(struct acpi_device *device)
+ /* Power button, Lid switch always enable wakeup */
+ if (!acpi_match_device_ids(device, button_device_ids)) {
+ device->wakeup.flags.run_wake = 1;
++ if (!acpi_match_device_ids(device, &button_device_ids[1])) {
++ /* Do not use Lid/sleep button for S5 wakeup */
++ if (device->wakeup.sleep_state == ACPI_STATE_S5)
++ device->wakeup.sleep_state = ACPI_STATE_S4;
++ }
+ device_set_wakeup_capable(&device->dev, true);
+ return;
+ }
+@@ -1175,7 +1180,7 @@ static void acpi_device_set_id(struct acpi_device *device)
+ acpi_add_id(device, ACPI_DOCK_HID);
+ else if (!acpi_ibm_smbus_match(device))
+ acpi_add_id(device, ACPI_SMBUS_IBM_HID);
+- else if (!acpi_device_hid(device) &&
++ else if (list_empty(&device->pnp.ids) &&
+ ACPI_IS_ROOT_DEVICE(device->parent)) {
+ acpi_add_id(device, ACPI_BUS_HID); /* \_SB, LNXSYBUS */
+ strcpy(device->pnp.device_name, ACPI_BUS_DEVICE_NAME);
+diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
+index 847ed55..813aa38 100644
+--- a/drivers/acpi/sleep.c
++++ b/drivers/acpi/sleep.c
+@@ -109,6 +109,180 @@ void __init acpi_old_suspend_ordering(void)
+ old_suspend_ordering = true;
+ }
+
++static int __init init_old_suspend_ordering(const struct dmi_system_id *d)
++{
++ acpi_old_suspend_ordering();
++ return 0;
++}
++
++static int __init init_nvs_nosave(const struct dmi_system_id *d)
++{
++ acpi_nvs_nosave();
++ return 0;
++}
++
++static struct dmi_system_id __initdata acpisleep_dmi_table[] = {
++ {
++ .callback = init_old_suspend_ordering,
++ .ident = "Abit KN9 (nForce4 variant)",
++ .matches = {
++ DMI_MATCH(DMI_BOARD_VENDOR, "http://www.abit.com.tw/"),
++ DMI_MATCH(DMI_BOARD_NAME, "KN9 Series(NF-CK804)"),
++ },
++ },
++ {
++ .callback = init_old_suspend_ordering,
++ .ident = "HP xw4600 Workstation",
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
++ DMI_MATCH(DMI_PRODUCT_NAME, "HP xw4600 Workstation"),
++ },
++ },
++ {
++ .callback = init_old_suspend_ordering,
++ .ident = "Asus Pundit P1-AH2 (M2N8L motherboard)",
++ .matches = {
++ DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTek Computer INC."),
++ DMI_MATCH(DMI_BOARD_NAME, "M2N8L"),
++ },
++ },
++ {
++ .callback = init_old_suspend_ordering,
++ .ident = "Panasonic CF51-2L",
++ .matches = {
++ DMI_MATCH(DMI_BOARD_VENDOR,
++ "Matsushita Electric Industrial Co.,Ltd."),
++ DMI_MATCH(DMI_BOARD_NAME, "CF51-2L"),
++ },
++ },
++ {
++ .callback = init_nvs_nosave,
++ .ident = "Sony Vaio VGN-FW21E",
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
++ DMI_MATCH(DMI_PRODUCT_NAME, "VGN-FW21E"),
++ },
++ },
++ {
++ .callback = init_nvs_nosave,
++ .ident = "Sony Vaio VPCEB17FX",
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
++ DMI_MATCH(DMI_PRODUCT_NAME, "VPCEB17FX"),
++ },
++ },
++ {
++ .callback = init_nvs_nosave,
++ .ident = "Sony Vaio VGN-SR11M",
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
++ DMI_MATCH(DMI_PRODUCT_NAME, "VGN-SR11M"),
++ },
++ },
++ {
++ .callback = init_nvs_nosave,
++ .ident = "Everex StepNote Series",
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "Everex Systems, Inc."),
++ DMI_MATCH(DMI_PRODUCT_NAME, "Everex StepNote Series"),
++ },
++ },
++ {
++ .callback = init_nvs_nosave,
++ .ident = "Sony Vaio VPCEB1Z1E",
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
++ DMI_MATCH(DMI_PRODUCT_NAME, "VPCEB1Z1E"),
++ },
++ },
++ {
++ .callback = init_nvs_nosave,
++ .ident = "Sony Vaio VGN-NW130D",
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
++ DMI_MATCH(DMI_PRODUCT_NAME, "VGN-NW130D"),
++ },
++ },
++ {
++ .callback = init_nvs_nosave,
++ .ident = "Sony Vaio VPCCW29FX",
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
++ DMI_MATCH(DMI_PRODUCT_NAME, "VPCCW29FX"),
++ },
++ },
++ {
++ .callback = init_nvs_nosave,
++ .ident = "Averatec AV1020-ED2",
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "AVERATEC"),
++ DMI_MATCH(DMI_PRODUCT_NAME, "1000 Series"),
++ },
++ },
++ {
++ .callback = init_old_suspend_ordering,
++ .ident = "Asus A8N-SLI DELUXE",
++ .matches = {
++ DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."),
++ DMI_MATCH(DMI_BOARD_NAME, "A8N-SLI DELUXE"),
++ },
++ },
++ {
++ .callback = init_old_suspend_ordering,
++ .ident = "Asus A8N-SLI Premium",
++ .matches = {
++ DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."),
++ DMI_MATCH(DMI_BOARD_NAME, "A8N-SLI Premium"),
++ },
++ },
++ {
++ .callback = init_nvs_nosave,
++ .ident = "Sony Vaio VGN-SR26GN_P",
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
++ DMI_MATCH(DMI_PRODUCT_NAME, "VGN-SR26GN_P"),
++ },
++ },
++ {
++ .callback = init_nvs_nosave,
++ .ident = "Sony Vaio VPCEB1S1E",
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
++ DMI_MATCH(DMI_PRODUCT_NAME, "VPCEB1S1E"),
++ },
++ },
++ {
++ .callback = init_nvs_nosave,
++ .ident = "Sony Vaio VGN-FW520F",
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
++ DMI_MATCH(DMI_PRODUCT_NAME, "VGN-FW520F"),
++ },
++ },
++ {
++ .callback = init_nvs_nosave,
++ .ident = "Asus K54C",
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc."),
++ DMI_MATCH(DMI_PRODUCT_NAME, "K54C"),
++ },
++ },
++ {
++ .callback = init_nvs_nosave,
++ .ident = "Asus K54HR",
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc."),
++ DMI_MATCH(DMI_PRODUCT_NAME, "K54HR"),
++ },
++ },
++ {},
++};
++
++static void acpi_sleep_dmi_check(void)
++{
++ dmi_check_system(acpisleep_dmi_table);
++}
++
+ /**
+ * acpi_pm_freeze - Disable the GPEs and suspend EC transactions.
+ */
+@@ -224,6 +398,7 @@ static void acpi_pm_end(void)
+ }
+ #else /* !CONFIG_ACPI_SLEEP */
+ #define acpi_target_sleep_state ACPI_STATE_S0
++static inline void acpi_sleep_dmi_check(void) {}
+ #endif /* CONFIG_ACPI_SLEEP */
+
+ #ifdef CONFIG_SUSPEND
+@@ -382,175 +557,6 @@ static const struct platform_suspend_ops acpi_suspend_ops_old = {
+ .end = acpi_pm_end,
+ .recover = acpi_pm_finish,
+ };
+-
+-static int __init init_old_suspend_ordering(const struct dmi_system_id *d)
+-{
+- old_suspend_ordering = true;
+- return 0;
+-}
+-
+-static int __init init_nvs_nosave(const struct dmi_system_id *d)
+-{
+- acpi_nvs_nosave();
+- return 0;
+-}
+-
+-static struct dmi_system_id __initdata acpisleep_dmi_table[] = {
+- {
+- .callback = init_old_suspend_ordering,
+- .ident = "Abit KN9 (nForce4 variant)",
+- .matches = {
+- DMI_MATCH(DMI_BOARD_VENDOR, "http://www.abit.com.tw/"),
+- DMI_MATCH(DMI_BOARD_NAME, "KN9 Series(NF-CK804)"),
+- },
+- },
+- {
+- .callback = init_old_suspend_ordering,
+- .ident = "HP xw4600 Workstation",
+- .matches = {
+- DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
+- DMI_MATCH(DMI_PRODUCT_NAME, "HP xw4600 Workstation"),
+- },
+- },
+- {
+- .callback = init_old_suspend_ordering,
+- .ident = "Asus Pundit P1-AH2 (M2N8L motherboard)",
+- .matches = {
+- DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTek Computer INC."),
+- DMI_MATCH(DMI_BOARD_NAME, "M2N8L"),
+- },
+- },
+- {
+- .callback = init_old_suspend_ordering,
+- .ident = "Panasonic CF51-2L",
+- .matches = {
+- DMI_MATCH(DMI_BOARD_VENDOR,
+- "Matsushita Electric Industrial Co.,Ltd."),
+- DMI_MATCH(DMI_BOARD_NAME, "CF51-2L"),
+- },
+- },
+- {
+- .callback = init_nvs_nosave,
+- .ident = "Sony Vaio VGN-FW21E",
+- .matches = {
+- DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
+- DMI_MATCH(DMI_PRODUCT_NAME, "VGN-FW21E"),
+- },
+- },
+- {
+- .callback = init_nvs_nosave,
+- .ident = "Sony Vaio VPCEB17FX",
+- .matches = {
+- DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
+- DMI_MATCH(DMI_PRODUCT_NAME, "VPCEB17FX"),
+- },
+- },
+- {
+- .callback = init_nvs_nosave,
+- .ident = "Sony Vaio VGN-SR11M",
+- .matches = {
+- DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
+- DMI_MATCH(DMI_PRODUCT_NAME, "VGN-SR11M"),
+- },
+- },
+- {
+- .callback = init_nvs_nosave,
+- .ident = "Everex StepNote Series",
+- .matches = {
+- DMI_MATCH(DMI_SYS_VENDOR, "Everex Systems, Inc."),
+- DMI_MATCH(DMI_PRODUCT_NAME, "Everex StepNote Series"),
+- },
+- },
+- {
+- .callback = init_nvs_nosave,
+- .ident = "Sony Vaio VPCEB1Z1E",
+- .matches = {
+- DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
+- DMI_MATCH(DMI_PRODUCT_NAME, "VPCEB1Z1E"),
+- },
+- },
+- {
+- .callback = init_nvs_nosave,
+- .ident = "Sony Vaio VGN-NW130D",
+- .matches = {
+- DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
+- DMI_MATCH(DMI_PRODUCT_NAME, "VGN-NW130D"),
+- },
+- },
+- {
+- .callback = init_nvs_nosave,
+- .ident = "Sony Vaio VPCCW29FX",
+- .matches = {
+- DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
+- DMI_MATCH(DMI_PRODUCT_NAME, "VPCCW29FX"),
+- },
+- },
+- {
+- .callback = init_nvs_nosave,
+- .ident = "Averatec AV1020-ED2",
+- .matches = {
+- DMI_MATCH(DMI_SYS_VENDOR, "AVERATEC"),
+- DMI_MATCH(DMI_PRODUCT_NAME, "1000 Series"),
+- },
+- },
+- {
+- .callback = init_old_suspend_ordering,
+- .ident = "Asus A8N-SLI DELUXE",
+- .matches = {
+- DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."),
+- DMI_MATCH(DMI_BOARD_NAME, "A8N-SLI DELUXE"),
+- },
+- },
+- {
+- .callback = init_old_suspend_ordering,
+- .ident = "Asus A8N-SLI Premium",
+- .matches = {
+- DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."),
+- DMI_MATCH(DMI_BOARD_NAME, "A8N-SLI Premium"),
+- },
+- },
+- {
+- .callback = init_nvs_nosave,
+- .ident = "Sony Vaio VGN-SR26GN_P",
+- .matches = {
+- DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
+- DMI_MATCH(DMI_PRODUCT_NAME, "VGN-SR26GN_P"),
+- },
+- },
+- {
+- .callback = init_nvs_nosave,
+- .ident = "Sony Vaio VPCEB1S1E",
+- .matches = {
+- DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
+- DMI_MATCH(DMI_PRODUCT_NAME, "VPCEB1S1E"),
+- },
+- },
+- {
+- .callback = init_nvs_nosave,
+- .ident = "Sony Vaio VGN-FW520F",
+- .matches = {
+- DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
+- DMI_MATCH(DMI_PRODUCT_NAME, "VGN-FW520F"),
+- },
+- },
+- {
+- .callback = init_nvs_nosave,
+- .ident = "Asus K54C",
+- .matches = {
+- DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc."),
+- DMI_MATCH(DMI_PRODUCT_NAME, "K54C"),
+- },
+- },
+- {
+- .callback = init_nvs_nosave,
+- .ident = "Asus K54HR",
+- .matches = {
+- DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc."),
+- DMI_MATCH(DMI_PRODUCT_NAME, "K54HR"),
+- },
+- },
+- {},
+-};
+ #endif /* CONFIG_SUSPEND */
+
+ #ifdef CONFIG_HIBERNATION
+@@ -881,13 +887,13 @@ int __init acpi_sleep_init(void)
+ u8 type_a, type_b;
+ #ifdef CONFIG_SUSPEND
+ int i = 0;
+-
+- dmi_check_system(acpisleep_dmi_table);
+ #endif
+
+ if (acpi_disabled)
+ return 0;
+
++ acpi_sleep_dmi_check();
++
+ sleep_states[ACPI_STATE_S0] = 1;
+ printk(KERN_INFO PREFIX "(supports S0");
+
+diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
+index 7862d17..4979127 100644
+--- a/drivers/ata/ahci.c
++++ b/drivers/ata/ahci.c
+@@ -53,6 +53,7 @@
+
+ enum {
+ AHCI_PCI_BAR_STA2X11 = 0,
++ AHCI_PCI_BAR_ENMOTUS = 2,
+ AHCI_PCI_BAR_STANDARD = 5,
+ };
+
+@@ -410,6 +411,9 @@ static const struct pci_device_id ahci_pci_tbl[] = {
+ { PCI_VDEVICE(ASMEDIA, 0x0611), board_ahci }, /* ASM1061 */
+ { PCI_VDEVICE(ASMEDIA, 0x0612), board_ahci }, /* ASM1062 */
+
++ /* Enmotus */
++ { PCI_DEVICE(0x1c44, 0x8000), board_ahci },
++
+ /* Generic, PCI class code for AHCI */
+ { PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
+ PCI_CLASS_STORAGE_SATA_AHCI, 0xffffff, board_ahci },
+@@ -1098,9 +1102,11 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
+ dev_info(&pdev->dev,
+ "PDC42819 can only drive SATA devices with this driver\n");
+
+- /* The Connext uses non-standard BAR */
++ /* Both Connext and Enmotus devices use non-standard BARs */
+ if (pdev->vendor == PCI_VENDOR_ID_STMICRO && pdev->device == 0xCC06)
+ ahci_pci_bar = AHCI_PCI_BAR_STA2X11;
++ else if (pdev->vendor == 0x1c44 && pdev->device == 0x8000)
++ ahci_pci_bar = AHCI_PCI_BAR_ENMOTUS;
+
+ /* acquire resources */
+ rc = pcim_enable_device(pdev);
+diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
+index 8e1039c..8789aef 100644
+--- a/drivers/ata/libata-core.c
++++ b/drivers/ata/libata-core.c
+@@ -2541,6 +2541,7 @@ int ata_bus_probe(struct ata_port *ap)
+ * bus as we may be talking too fast.
+ */
+ dev->pio_mode = XFER_PIO_0;
++ dev->dma_mode = 0xff;
+
+ /* If the controller has a pio mode setup function
+ * then use it to set the chipset to rights. Don't
+diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
+index 7d4535e..105e31f 100644
+--- a/drivers/ata/libata-eh.c
++++ b/drivers/ata/libata-eh.c
+@@ -2653,6 +2653,7 @@ int ata_eh_reset(struct ata_link *link, int classify,
+ * bus as we may be talking too fast.
+ */
+ dev->pio_mode = XFER_PIO_0;
++ dev->dma_mode = 0xff;
+
+ /* If the controller has a pio mode setup function
+ * then use it to set the chipset to rights. Don't
+diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
+index 8ec81ca..9f8b751 100644
+--- a/drivers/ata/libata-scsi.c
++++ b/drivers/ata/libata-scsi.c
+@@ -309,7 +309,8 @@ ata_scsi_activity_show(struct device *dev, struct device_attribute *attr,
+ struct ata_port *ap = ata_shost_to_port(sdev->host);
+ struct ata_device *atadev = ata_scsi_find_dev(ap, sdev);
+
+- if (ap->ops->sw_activity_show && (ap->flags & ATA_FLAG_SW_ACTIVITY))
++ if (atadev && ap->ops->sw_activity_show &&
++ (ap->flags & ATA_FLAG_SW_ACTIVITY))
+ return ap->ops->sw_activity_show(atadev, buf);
+ return -EINVAL;
+ }
+@@ -324,7 +325,8 @@ ata_scsi_activity_store(struct device *dev, struct device_attribute *attr,
+ enum sw_activity val;
+ int rc;
+
+- if (ap->ops->sw_activity_store && (ap->flags & ATA_FLAG_SW_ACTIVITY)) {
++ if (atadev && ap->ops->sw_activity_store &&
++ (ap->flags & ATA_FLAG_SW_ACTIVITY)) {
+ val = simple_strtoul(buf, NULL, 0);
+ switch (val) {
+ case OFF: case BLINK_ON: case BLINK_OFF:
+diff --git a/drivers/ata/sata_promise.c b/drivers/ata/sata_promise.c
+index 489c817..fb0dd87 100644
+--- a/drivers/ata/sata_promise.c
++++ b/drivers/ata/sata_promise.c
+@@ -147,6 +147,10 @@ struct pdc_port_priv {
+ dma_addr_t pkt_dma;
+ };
+
++struct pdc_host_priv {
++ spinlock_t hard_reset_lock;
++};
++
+ static int pdc_sata_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val);
+ static int pdc_sata_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val);
+ static int pdc_ata_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
+@@ -801,9 +805,10 @@ static void pdc_hard_reset_port(struct ata_port *ap)
+ void __iomem *host_mmio = ap->host->iomap[PDC_MMIO_BAR];
+ void __iomem *pcictl_b1_mmio = host_mmio + PDC_PCI_CTL + 1;
+ unsigned int ata_no = pdc_ata_port_to_ata_no(ap);
++ struct pdc_host_priv *hpriv = ap->host->private_data;
+ u8 tmp;
+
+- spin_lock(&ap->host->lock);
++ spin_lock(&hpriv->hard_reset_lock);
+
+ tmp = readb(pcictl_b1_mmio);
+ tmp &= ~(0x10 << ata_no);
+@@ -814,7 +819,7 @@ static void pdc_hard_reset_port(struct ata_port *ap)
+ writeb(tmp, pcictl_b1_mmio);
+ readb(pcictl_b1_mmio); /* flush */
+
+- spin_unlock(&ap->host->lock);
++ spin_unlock(&hpriv->hard_reset_lock);
+ }
+
+ static int pdc_sata_hardreset(struct ata_link *link, unsigned int *class,
+@@ -1182,6 +1187,7 @@ static int pdc_ata_init_one(struct pci_dev *pdev,
+ const struct ata_port_info *pi = &pdc_port_info[ent->driver_data];
+ const struct ata_port_info *ppi[PDC_MAX_PORTS];
+ struct ata_host *host;
++ struct pdc_host_priv *hpriv;
+ void __iomem *host_mmio;
+ int n_ports, i, rc;
+ int is_sataii_tx4;
+@@ -1218,6 +1224,11 @@ static int pdc_ata_init_one(struct pci_dev *pdev,
+ dev_err(&pdev->dev, "failed to allocate host\n");
+ return -ENOMEM;
+ }
++ hpriv = devm_kzalloc(&pdev->dev, sizeof *hpriv, GFP_KERNEL);
++ if (!hpriv)
++ return -ENOMEM;
++ spin_lock_init(&hpriv->hard_reset_lock);
++ host->private_data = hpriv;
+ host->iomap = pcim_iomap_table(pdev);
+
+ is_sataii_tx4 = pdc_is_sataii_tx4(pi->flags);
+diff --git a/drivers/atm/iphase.h b/drivers/atm/iphase.h
+index 6a0955e..53ecac5 100644
+--- a/drivers/atm/iphase.h
++++ b/drivers/atm/iphase.h
+@@ -636,82 +636,82 @@ struct rx_buf_desc {
+ #define SEG_BASE IPHASE5575_FRAG_CONTROL_REG_BASE
+ #define REASS_BASE IPHASE5575_REASS_CONTROL_REG_BASE
+
+-typedef volatile u_int freg_t;
++typedef volatile u_int ffreg_t;
+ typedef u_int rreg_t;
+
+ typedef struct _ffredn_t {
+- freg_t idlehead_high; /* Idle cell header (high) */
+- freg_t idlehead_low; /* Idle cell header (low) */
+- freg_t maxrate; /* Maximum rate */
+- freg_t stparms; /* Traffic Management Parameters */
+- freg_t abrubr_abr; /* ABRUBR Priority Byte 1, TCR Byte 0 */
+- freg_t rm_type; /* */
+- u_int filler5[0x17 - 0x06];
+- freg_t cmd_reg; /* Command register */
+- u_int filler18[0x20 - 0x18];
+- freg_t cbr_base; /* CBR Pointer Base */
+- freg_t vbr_base; /* VBR Pointer Base */
+- freg_t abr_base; /* ABR Pointer Base */
+- freg_t ubr_base; /* UBR Pointer Base */
+- u_int filler24;
+- freg_t vbrwq_base; /* VBR Wait Queue Base */
+- freg_t abrwq_base; /* ABR Wait Queue Base */
+- freg_t ubrwq_base; /* UBR Wait Queue Base */
+- freg_t vct_base; /* Main VC Table Base */
+- freg_t vcte_base; /* Extended Main VC Table Base */
+- u_int filler2a[0x2C - 0x2A];
+- freg_t cbr_tab_beg; /* CBR Table Begin */
+- freg_t cbr_tab_end; /* CBR Table End */
+- freg_t cbr_pointer; /* CBR Pointer */
+- u_int filler2f[0x30 - 0x2F];
+- freg_t prq_st_adr; /* Packet Ready Queue Start Address */
+- freg_t prq_ed_adr; /* Packet Ready Queue End Address */
+- freg_t prq_rd_ptr; /* Packet Ready Queue read pointer */
+- freg_t prq_wr_ptr; /* Packet Ready Queue write pointer */
+- freg_t tcq_st_adr; /* Transmit Complete Queue Start Address*/
+- freg_t tcq_ed_adr; /* Transmit Complete Queue End Address */
+- freg_t tcq_rd_ptr; /* Transmit Complete Queue read pointer */
+- freg_t tcq_wr_ptr; /* Transmit Complete Queue write pointer*/
+- u_int filler38[0x40 - 0x38];
+- freg_t queue_base; /* Base address for PRQ and TCQ */
+- freg_t desc_base; /* Base address of descriptor table */
+- u_int filler42[0x45 - 0x42];
+- freg_t mode_reg_0; /* Mode register 0 */
+- freg_t mode_reg_1; /* Mode register 1 */
+- freg_t intr_status_reg;/* Interrupt Status register */
+- freg_t mask_reg; /* Mask Register */
+- freg_t cell_ctr_high1; /* Total cell transfer count (high) */
+- freg_t cell_ctr_lo1; /* Total cell transfer count (low) */
+- freg_t state_reg; /* Status register */
+- u_int filler4c[0x58 - 0x4c];
+- freg_t curr_desc_num; /* Contains the current descriptor num */
+- freg_t next_desc; /* Next descriptor */
+- freg_t next_vc; /* Next VC */
+- u_int filler5b[0x5d - 0x5b];
+- freg_t present_slot_cnt;/* Present slot count */
+- u_int filler5e[0x6a - 0x5e];
+- freg_t new_desc_num; /* New descriptor number */
+- freg_t new_vc; /* New VC */
+- freg_t sched_tbl_ptr; /* Schedule table pointer */
+- freg_t vbrwq_wptr; /* VBR wait queue write pointer */
+- freg_t vbrwq_rptr; /* VBR wait queue read pointer */
+- freg_t abrwq_wptr; /* ABR wait queue write pointer */
+- freg_t abrwq_rptr; /* ABR wait queue read pointer */
+- freg_t ubrwq_wptr; /* UBR wait queue write pointer */
+- freg_t ubrwq_rptr; /* UBR wait queue read pointer */
+- freg_t cbr_vc; /* CBR VC */
+- freg_t vbr_sb_vc; /* VBR SB VC */
+- freg_t abr_sb_vc; /* ABR SB VC */
+- freg_t ubr_sb_vc; /* UBR SB VC */
+- freg_t vbr_next_link; /* VBR next link */
+- freg_t abr_next_link; /* ABR next link */
+- freg_t ubr_next_link; /* UBR next link */
+- u_int filler7a[0x7c-0x7a];
+- freg_t out_rate_head; /* Out of rate head */
+- u_int filler7d[0xca-0x7d]; /* pad out to full address space */
+- freg_t cell_ctr_high1_nc;/* Total cell transfer count (high) */
+- freg_t cell_ctr_lo1_nc;/* Total cell transfer count (low) */
+- u_int fillercc[0x100-0xcc]; /* pad out to full address space */
++ ffreg_t idlehead_high; /* Idle cell header (high) */
++ ffreg_t idlehead_low; /* Idle cell header (low) */
++ ffreg_t maxrate; /* Maximum rate */
++ ffreg_t stparms; /* Traffic Management Parameters */
++ ffreg_t abrubr_abr; /* ABRUBR Priority Byte 1, TCR Byte 0 */
++ ffreg_t rm_type; /* */
++ u_int filler5[0x17 - 0x06];
++ ffreg_t cmd_reg; /* Command register */
++ u_int filler18[0x20 - 0x18];
++ ffreg_t cbr_base; /* CBR Pointer Base */
++ ffreg_t vbr_base; /* VBR Pointer Base */
++ ffreg_t abr_base; /* ABR Pointer Base */
++ ffreg_t ubr_base; /* UBR Pointer Base */
++ u_int filler24;
++ ffreg_t vbrwq_base; /* VBR Wait Queue Base */
++ ffreg_t abrwq_base; /* ABR Wait Queue Base */
++ ffreg_t ubrwq_base; /* UBR Wait Queue Base */
++ ffreg_t vct_base; /* Main VC Table Base */
++ ffreg_t vcte_base; /* Extended Main VC Table Base */
++ u_int filler2a[0x2C - 0x2A];
++ ffreg_t cbr_tab_beg; /* CBR Table Begin */
++ ffreg_t cbr_tab_end; /* CBR Table End */
++ ffreg_t cbr_pointer; /* CBR Pointer */
++ u_int filler2f[0x30 - 0x2F];
++ ffreg_t prq_st_adr; /* Packet Ready Queue Start Address */
++ ffreg_t prq_ed_adr; /* Packet Ready Queue End Address */
++ ffreg_t prq_rd_ptr; /* Packet Ready Queue read pointer */
++ ffreg_t prq_wr_ptr; /* Packet Ready Queue write pointer */
++ ffreg_t tcq_st_adr; /* Transmit Complete Queue Start Address*/
++ ffreg_t tcq_ed_adr; /* Transmit Complete Queue End Address */
++ ffreg_t tcq_rd_ptr; /* Transmit Complete Queue read pointer */
++ ffreg_t tcq_wr_ptr; /* Transmit Complete Queue write pointer*/
++ u_int filler38[0x40 - 0x38];
++ ffreg_t queue_base; /* Base address for PRQ and TCQ */
++ ffreg_t desc_base; /* Base address of descriptor table */
++ u_int filler42[0x45 - 0x42];
++ ffreg_t mode_reg_0; /* Mode register 0 */
++ ffreg_t mode_reg_1; /* Mode register 1 */
++ ffreg_t intr_status_reg;/* Interrupt Status register */
++ ffreg_t mask_reg; /* Mask Register */
++ ffreg_t cell_ctr_high1; /* Total cell transfer count (high) */
++ ffreg_t cell_ctr_lo1; /* Total cell transfer count (low) */
++ ffreg_t state_reg; /* Status register */
++ u_int filler4c[0x58 - 0x4c];
++ ffreg_t curr_desc_num; /* Contains the current descriptor num */
++ ffreg_t next_desc; /* Next descriptor */
++ ffreg_t next_vc; /* Next VC */
++ u_int filler5b[0x5d - 0x5b];
++ ffreg_t present_slot_cnt;/* Present slot count */
++ u_int filler5e[0x6a - 0x5e];
++ ffreg_t new_desc_num; /* New descriptor number */
++ ffreg_t new_vc; /* New VC */
++ ffreg_t sched_tbl_ptr; /* Schedule table pointer */
++ ffreg_t vbrwq_wptr; /* VBR wait queue write pointer */
++ ffreg_t vbrwq_rptr; /* VBR wait queue read pointer */
++ ffreg_t abrwq_wptr; /* ABR wait queue write pointer */
++ ffreg_t abrwq_rptr; /* ABR wait queue read pointer */
++ ffreg_t ubrwq_wptr; /* UBR wait queue write pointer */
++ ffreg_t ubrwq_rptr; /* UBR wait queue read pointer */
++ ffreg_t cbr_vc; /* CBR VC */
++ ffreg_t vbr_sb_vc; /* VBR SB VC */
++ ffreg_t abr_sb_vc; /* ABR SB VC */
++ ffreg_t ubr_sb_vc; /* UBR SB VC */
++ ffreg_t vbr_next_link; /* VBR next link */
++ ffreg_t abr_next_link; /* ABR next link */
++ ffreg_t ubr_next_link; /* UBR next link */
++ u_int filler7a[0x7c-0x7a];
++ ffreg_t out_rate_head; /* Out of rate head */
++ u_int filler7d[0xca-0x7d]; /* pad out to full address space */
++ ffreg_t cell_ctr_high1_nc;/* Total cell transfer count (high) */
++ ffreg_t cell_ctr_lo1_nc;/* Total cell transfer count (low) */
++ u_int fillercc[0x100-0xcc]; /* pad out to full address space */
+ } ffredn_t;
+
+ typedef struct _rfredn_t {
+diff --git a/drivers/atm/solos-pci.c b/drivers/atm/solos-pci.c
+index 9851093..1853a45 100644
+--- a/drivers/atm/solos-pci.c
++++ b/drivers/atm/solos-pci.c
+@@ -967,10 +967,11 @@ static uint32_t fpga_tx(struct solos_card *card)
+ for (port = 0; tx_pending; tx_pending >>= 1, port++) {
+ if (tx_pending & 1) {
+ struct sk_buff *oldskb = card->tx_skb[port];
+- if (oldskb)
++ if (oldskb) {
+ pci_unmap_single(card->dev, SKB_CB(oldskb)->dma_addr,
+ oldskb->len, PCI_DMA_TODEVICE);
+-
++ card->tx_skb[port] = NULL;
++ }
+ spin_lock(&card->tx_queue_lock);
+ skb = skb_dequeue(&card->tx_queue[port]);
+ if (!skb)
+diff --git a/drivers/base/bus.c b/drivers/base/bus.c
+index 181ed26..513a02d 100644
+--- a/drivers/base/bus.c
++++ b/drivers/base/bus.c
+@@ -293,7 +293,7 @@ int bus_for_each_dev(struct bus_type *bus, struct device *start,
+ struct device *dev;
+ int error = 0;
+
+- if (!bus)
++ if (!bus || !bus->p)
+ return -EINVAL;
+
+ klist_iter_init_node(&bus->p->klist_devices, &i,
+@@ -327,7 +327,7 @@ struct device *bus_find_device(struct bus_type *bus,
+ struct klist_iter i;
+ struct device *dev;
+
+- if (!bus)
++ if (!bus || !bus->p)
+ return NULL;
+
+ klist_iter_init_node(&bus->p->klist_devices, &i,
+diff --git a/drivers/base/dd.c b/drivers/base/dd.c
+index e3bbed8..61d3e1b 100644
+--- a/drivers/base/dd.c
++++ b/drivers/base/dd.c
+@@ -172,6 +172,8 @@ static int deferred_probe_initcall(void)
+
+ driver_deferred_probe_enable = true;
+ driver_deferred_probe_trigger();
++ /* Sort as many dependencies as possible before exiting initcalls */
++ flush_workqueue(deferred_wq);
+ return 0;
+ }
+ late_initcall(deferred_probe_initcall);
+diff --git a/drivers/base/regmap/regmap-debugfs.c b/drivers/base/regmap/regmap-debugfs.c
+index bb1ff17..c394041 100644
+--- a/drivers/base/regmap/regmap-debugfs.c
++++ b/drivers/base/regmap/regmap-debugfs.c
+@@ -90,7 +90,7 @@ static ssize_t regmap_map_read_file(struct file *file, char __user *user_buf,
+ /* If we're in the region the user is trying to read */
+ if (p >= *ppos) {
+ /* ...but not beyond it */
+- if (buf_pos >= count - 1 - tot_len)
++ if (buf_pos + 1 + tot_len >= count)
+ break;
+
+ /* Format the register */
+diff --git a/drivers/bcma/driver_mips.c b/drivers/bcma/driver_mips.c
+index cc65b45..b4e83b8 100644
+--- a/drivers/bcma/driver_mips.c
++++ b/drivers/bcma/driver_mips.c
+@@ -115,7 +115,7 @@ static void bcma_core_mips_set_irq(struct bcma_device *dev, unsigned int irq)
+ bcma_read32(mdev, BCMA_MIPS_MIPS74K_INTMASK(0)) &
+ ~(1 << irqflag));
+ else
+- bcma_write32(mdev, BCMA_MIPS_MIPS74K_INTMASK(irq), 0);
++ bcma_write32(mdev, BCMA_MIPS_MIPS74K_INTMASK(oldirq), 0);
+
+ /* assign the new one */
+ if (irq == 0) {
+diff --git a/drivers/block/aoe/aoe.h b/drivers/block/aoe/aoe.h
+index db195ab..e49ddd0 100644
+--- a/drivers/block/aoe/aoe.h
++++ b/drivers/block/aoe/aoe.h
+@@ -1,5 +1,5 @@
+ /* Copyright (c) 2007 Coraid, Inc. See COPYING for GPL terms. */
+-#define VERSION "47"
++#define VERSION "47q"
+ #define AOE_MAJOR 152
+ #define DEVICE_NAME "aoe"
+
+diff --git a/drivers/block/aoe/aoeblk.c b/drivers/block/aoe/aoeblk.c
+index 321de7b..7eca463 100644
+--- a/drivers/block/aoe/aoeblk.c
++++ b/drivers/block/aoe/aoeblk.c
+@@ -276,8 +276,6 @@ aoeblk_gdalloc(void *vp)
+ goto err_mempool;
+ blk_queue_make_request(d->blkq, aoeblk_make_request);
+ d->blkq->backing_dev_info.name = "aoe";
+- if (bdi_init(&d->blkq->backing_dev_info))
+- goto err_blkq;
+ spin_lock_irqsave(&d->lock, flags);
+ gd->major = AOE_MAJOR;
+ gd->first_minor = d->sysminor * AOE_PARTITIONS;
+@@ -298,9 +296,6 @@ aoeblk_gdalloc(void *vp)
+ aoedisk_add_sysfs(d);
+ return;
+
+-err_blkq:
+- blk_cleanup_queue(d->blkq);
+- d->blkq = NULL;
+ err_mempool:
+ mempool_destroy(d->bufpool);
+ err_disk:
+diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
+index 54a55f0..7aac910 100644
+--- a/drivers/block/rbd.c
++++ b/drivers/block/rbd.c
+@@ -69,7 +69,7 @@
+ #define DEV_NAME_LEN 32
+ #define MAX_INT_FORMAT_WIDTH ((5 * sizeof (int)) / 2 + 1)
+
+-#define RBD_NOTIFY_TIMEOUT_DEFAULT 10
++#define RBD_READ_ONLY_DEFAULT false
+
+ /*
+ * block device image metadata (in-memory version)
+@@ -91,7 +91,7 @@ struct rbd_image_header {
+ };
+
+ struct rbd_options {
+- int notify_timeout;
++ bool read_only;
+ };
+
+ /*
+@@ -177,7 +177,7 @@ struct rbd_device {
+ u64 snap_id; /* current snapshot id */
+ /* whether the snap_id this device reads from still exists */
+ bool snap_exists;
+- int read_only;
++ bool read_only;
+
+ struct list_head node;
+
+@@ -186,6 +186,7 @@ struct rbd_device {
+
+ /* sysfs related */
+ struct device dev;
++ unsigned long open_count;
+ };
+
+ static DEFINE_MUTEX(ctl_mutex); /* Serialize open/close/setup/teardown */
+@@ -249,8 +250,11 @@ static int rbd_open(struct block_device *bdev, fmode_t mode)
+ if ((mode & FMODE_WRITE) && rbd_dev->read_only)
+ return -EROFS;
+
++ mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
+ rbd_get_dev(rbd_dev);
+ set_device_ro(bdev, rbd_dev->read_only);
++ rbd_dev->open_count++;
++ mutex_unlock(&ctl_mutex);
+
+ return 0;
+ }
+@@ -259,7 +263,11 @@ static int rbd_release(struct gendisk *disk, fmode_t mode)
+ {
+ struct rbd_device *rbd_dev = disk->private_data;
+
++ mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
++ BUG_ON(!rbd_dev->open_count);
++ rbd_dev->open_count--;
+ rbd_put_dev(rbd_dev);
++ mutex_unlock(&ctl_mutex);
+
+ return 0;
+ }
+@@ -341,17 +349,24 @@ static struct rbd_client *__rbd_client_find(struct ceph_options *ceph_opts)
+ * mount options
+ */
+ enum {
+- Opt_notify_timeout,
+ Opt_last_int,
+ /* int args above */
+ Opt_last_string,
+ /* string args above */
++ Opt_read_only,
++ Opt_read_write,
++ /* Boolean args above */
++ Opt_last_bool,
+ };
+
+ static match_table_t rbd_opts_tokens = {
+- {Opt_notify_timeout, "notify_timeout=%d"},
+ /* int args above */
+ /* string args above */
++ {Opt_read_only, "read_only"},
++ {Opt_read_only, "ro"}, /* Alternate spelling */
++ {Opt_read_write, "read_write"},
++ {Opt_read_write, "rw"}, /* Alternate spelling */
++ /* Boolean args above */
+ {-1, NULL}
+ };
+
+@@ -376,13 +391,18 @@ static int parse_rbd_opts_token(char *c, void *private)
+ } else if (token > Opt_last_int && token < Opt_last_string) {
+ dout("got string token %d val %s\n", token,
+ argstr[0].from);
++ } else if (token > Opt_last_string && token < Opt_last_bool) {
++ dout("got Boolean token %d\n", token);
+ } else {
+ dout("got token %d\n", token);
+ }
+
+ switch (token) {
+- case Opt_notify_timeout:
+- rbd_opts->notify_timeout = intval;
++ case Opt_read_only:
++ rbd_opts->read_only = true;
++ break;
++ case Opt_read_write:
++ rbd_opts->read_only = false;
+ break;
+ default:
+ BUG_ON(token);
+@@ -406,7 +426,7 @@ static struct rbd_client *rbd_get_client(const char *mon_addr,
+ if (!rbd_opts)
+ return ERR_PTR(-ENOMEM);
+
+- rbd_opts->notify_timeout = RBD_NOTIFY_TIMEOUT_DEFAULT;
++ rbd_opts->read_only = RBD_READ_ONLY_DEFAULT;
+
+ ceph_opts = ceph_parse_options(options, mon_addr,
+ mon_addr + mon_addr_len,
+@@ -606,7 +626,7 @@ static int rbd_header_set_snap(struct rbd_device *rbd_dev, u64 *size)
+ sizeof (RBD_SNAP_HEAD_NAME))) {
+ rbd_dev->snap_id = CEPH_NOSNAP;
+ rbd_dev->snap_exists = false;
+- rbd_dev->read_only = 0;
++ rbd_dev->read_only = rbd_dev->rbd_opts.read_only;
+ if (size)
+ *size = rbd_dev->header.image_size;
+ } else {
+@@ -618,7 +638,7 @@ static int rbd_header_set_snap(struct rbd_device *rbd_dev, u64 *size)
+ goto done;
+ rbd_dev->snap_id = snap_id;
+ rbd_dev->snap_exists = true;
+- rbd_dev->read_only = 1;
++ rbd_dev->read_only = true; /* No choice for snapshots */
+ }
+
+ ret = 0;
+@@ -938,8 +958,9 @@ static int rbd_do_request(struct request *rq,
+ layout->fl_stripe_count = cpu_to_le32(1);
+ layout->fl_object_size = cpu_to_le32(1 << RBD_MAX_OBJ_ORDER);
+ layout->fl_pg_pool = cpu_to_le32(rbd_dev->pool_id);
+- ceph_calc_raw_layout(osdc, layout, snapid, ofs, &len, &bno,
+- req, ops);
++ ret = ceph_calc_raw_layout(osdc, layout, snapid, ofs, &len, &bno,
++ req, ops);
++ rbd_assert(ret == 0);
+
+ ceph_osdc_build_request(req, ofs, &len,
+ ops,
+@@ -2260,8 +2281,8 @@ static void rbd_id_put(struct rbd_device *rbd_dev)
+ struct rbd_device *rbd_dev;
+
+ rbd_dev = list_entry(tmp, struct rbd_device, node);
+- if (rbd_id > max_id)
+- max_id = rbd_id;
++ if (rbd_dev->id > max_id)
++ max_id = rbd_dev->id;
+ }
+ spin_unlock(&rbd_dev_list_lock);
+
+@@ -2623,6 +2644,11 @@ static ssize_t rbd_remove(struct bus_type *bus,
+ goto done;
+ }
+
++ if (rbd_dev->open_count) {
++ ret = -EBUSY;
++ goto done;
++ }
++
+ __rbd_remove_all_snaps(rbd_dev);
+ rbd_bus_del_dev(rbd_dev);
+
+diff --git a/drivers/block/sunvdc.c b/drivers/block/sunvdc.c
+index 9dcf76a..31dd451 100644
+--- a/drivers/block/sunvdc.c
++++ b/drivers/block/sunvdc.c
+@@ -461,7 +461,7 @@ static int generic_request(struct vdc_port *port, u8 op, void *buf, int len)
+ int op_len, err;
+ void *req_buf;
+
+- if (!(((u64)1 << ((u64)op - 1)) & port->operations))
++ if (!(((u64)1 << (u64)op) & port->operations))
+ return -EOPNOTSUPP;
+
+ switch (op) {
+diff --git a/drivers/bluetooth/ath3k.c b/drivers/bluetooth/ath3k.c
+index fc2de55..b00000e 100644
+--- a/drivers/bluetooth/ath3k.c
++++ b/drivers/bluetooth/ath3k.c
+@@ -67,6 +67,7 @@ static struct usb_device_id ath3k_table[] = {
+ { USB_DEVICE(0x13d3, 0x3304) },
+ { USB_DEVICE(0x0930, 0x0215) },
+ { USB_DEVICE(0x0489, 0xE03D) },
++ { USB_DEVICE(0x0489, 0xE027) },
+
+ /* Atheros AR9285 Malbec with sflash firmware */
+ { USB_DEVICE(0x03F0, 0x311D) },
+diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
+index 654e248..e023c65 100644
+--- a/drivers/bluetooth/btusb.c
++++ b/drivers/bluetooth/btusb.c
+@@ -123,6 +123,7 @@ static struct usb_device_id blacklist_table[] = {
+ { USB_DEVICE(0x13d3, 0x3304), .driver_info = BTUSB_IGNORE },
+ { USB_DEVICE(0x0930, 0x0215), .driver_info = BTUSB_IGNORE },
+ { USB_DEVICE(0x0489, 0xe03d), .driver_info = BTUSB_IGNORE },
++ { USB_DEVICE(0x0489, 0xe027), .driver_info = BTUSB_IGNORE },
+
+ /* Atheros AR9285 Malbec with sflash firmware */
+ { USB_DEVICE(0x03f0, 0x311d), .driver_info = BTUSB_IGNORE },
+diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
+index cdf2f54..f77e341 100644
+--- a/drivers/char/virtio_console.c
++++ b/drivers/char/virtio_console.c
+@@ -1808,7 +1808,8 @@ static void virtcons_remove(struct virtio_device *vdev)
+ /* Disable interrupts for vqs */
+ vdev->config->reset(vdev);
+ /* Finish up work that's lined up */
+- cancel_work_sync(&portdev->control_work);
++ if (use_multiport(portdev))
++ cancel_work_sync(&portdev->control_work);
+
+ list_for_each_entry_safe(port, port2, &portdev->ports, list)
+ unplug_port(port);
+diff --git a/drivers/dca/dca-core.c b/drivers/dca/dca-core.c
+index bc6f5fa..819dfda 100644
+--- a/drivers/dca/dca-core.c
++++ b/drivers/dca/dca-core.c
+@@ -420,6 +420,11 @@ void unregister_dca_provider(struct dca_provider *dca, struct device *dev)
+
+ raw_spin_lock_irqsave(&dca_lock, flags);
+
++ if (list_empty(&dca_domains)) {
++ raw_spin_unlock_irqrestore(&dca_lock, flags);
++ return;
++ }
++
+ list_del(&dca->node);
+
+ pci_rc = dca_pci_rc_from_dev(dev);
+diff --git a/drivers/dma/ioat/dma_v3.c b/drivers/dma/ioat/dma_v3.c
+index f7f1dc6..ed0e8b7 100644
+--- a/drivers/dma/ioat/dma_v3.c
++++ b/drivers/dma/ioat/dma_v3.c
+@@ -951,7 +951,7 @@ static int __devinit ioat_xor_val_self_test(struct ioatdma_device *device)
+ goto free_resources;
+ }
+ }
+- dma_sync_single_for_device(dev, dest_dma, PAGE_SIZE, DMA_TO_DEVICE);
++ dma_sync_single_for_device(dev, dest_dma, PAGE_SIZE, DMA_FROM_DEVICE);
+
+ /* skip validate if the capability is not present */
+ if (!dma_has_cap(DMA_XOR_VAL, dma_chan->device->cap_mask))
+diff --git a/drivers/edac/edac_pci_sysfs.c b/drivers/edac/edac_pci_sysfs.c
+index e164c55..1bfb207 100644
+--- a/drivers/edac/edac_pci_sysfs.c
++++ b/drivers/edac/edac_pci_sysfs.c
+@@ -256,7 +256,7 @@ static ssize_t edac_pci_dev_store(struct kobject *kobj,
+ struct edac_pci_dev_attribute *edac_pci_dev;
+ edac_pci_dev = (struct edac_pci_dev_attribute *)attr;
+
+- if (edac_pci_dev->show)
++ if (edac_pci_dev->store)
+ return edac_pci_dev->store(edac_pci_dev->value, buffer, count);
+ return -EIO;
+ }
+diff --git a/drivers/firewire/net.c b/drivers/firewire/net.c
+index 08c6749..638e1f7 100644
+--- a/drivers/firewire/net.c
++++ b/drivers/firewire/net.c
+@@ -861,8 +861,8 @@ static void fwnet_receive_broadcast(struct fw_iso_context *context,
+ if (specifier_id == IANA_SPECIFIER_ID && ver == RFC2734_SW_VERSION) {
+ buf_ptr += 2;
+ length -= IEEE1394_GASP_HDR_SIZE;
+- fwnet_incoming_packet(dev, buf_ptr, length,
+- source_node_id, -1, true);
++ fwnet_incoming_packet(dev, buf_ptr, length, source_node_id,
++ context->card->generation, true);
+ }
+
+ packet.payload_length = dev->rcv_buffer_size;
+@@ -958,7 +958,12 @@ static void fwnet_transmit_packet_done(struct fwnet_packet_task *ptask)
+ break;
+ }
+
+- skb_pull(skb, ptask->max_payload);
++ if (ptask->dest_node == IEEE1394_ALL_NODES) {
++ skb_pull(skb,
++ ptask->max_payload + IEEE1394_GASP_HDR_SIZE);
++ } else {
++ skb_pull(skb, ptask->max_payload);
++ }
+ if (ptask->outstanding_pkts > 1) {
+ fwnet_make_sf_hdr(&ptask->hdr, RFC2374_HDR_INTFRAG,
+ dg_size, fg_off, datagram_label);
+@@ -1062,7 +1067,7 @@ static int fwnet_send_packet(struct fwnet_packet_task *ptask)
+ smp_rmb();
+ node_id = dev->card->node_id;
+
+- p = skb_push(ptask->skb, 8);
++ p = skb_push(ptask->skb, IEEE1394_GASP_HDR_SIZE);
+ put_unaligned_be32(node_id << 16 | IANA_SPECIFIER_ID >> 8, p);
+ put_unaligned_be32((IANA_SPECIFIER_ID & 0xff) << 24
+ | RFC2734_SW_VERSION, &p[4]);
+diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c
+index b298158..982f1f5 100644
+--- a/drivers/firmware/dmi_scan.c
++++ b/drivers/firmware/dmi_scan.c
+@@ -16,6 +16,7 @@
+ */
+ static char dmi_empty_string[] = " ";
+
++static u16 __initdata dmi_ver;
+ /*
+ * Catch too early calls to dmi_check_system():
+ */
+@@ -118,12 +119,12 @@ static int __init dmi_walk_early(void (*decode)(const struct dmi_header *,
+ return 0;
+ }
+
+-static int __init dmi_checksum(const u8 *buf)
++static int __init dmi_checksum(const u8 *buf, u8 len)
+ {
+ u8 sum = 0;
+ int a;
+
+- for (a = 0; a < 15; a++)
++ for (a = 0; a < len; a++)
+ sum += buf[a];
+
+ return sum == 0;
+@@ -161,8 +162,10 @@ static void __init dmi_save_uuid(const struct dmi_header *dm, int slot, int inde
+ return;
+
+ for (i = 0; i < 16 && (is_ff || is_00); i++) {
+- if(d[i] != 0x00) is_ff = 0;
+- if(d[i] != 0xFF) is_00 = 0;
++ if (d[i] != 0x00)
++ is_00 = 0;
++ if (d[i] != 0xFF)
++ is_ff = 0;
+ }
+
+ if (is_ff || is_00)
+@@ -172,7 +175,15 @@ static void __init dmi_save_uuid(const struct dmi_header *dm, int slot, int inde
+ if (!s)
+ return;
+
+- sprintf(s, "%pUB", d);
++ /*
++ * As of version 2.6 of the SMBIOS specification, the first 3 fields of
++ * the UUID are supposed to be little-endian encoded. The specification
++ * says that this is the defacto standard.
++ */
++ if (dmi_ver >= 0x0206)
++ sprintf(s, "%pUL", d);
++ else
++ sprintf(s, "%pUB", d);
+
+ dmi_ident[slot] = s;
+ }
+@@ -404,35 +415,63 @@ static int __init dmi_present(const char __iomem *p)
+ u8 buf[15];
+
+ memcpy_fromio(buf, p, 15);
+- if ((memcmp(buf, "_DMI_", 5) == 0) && dmi_checksum(buf)) {
++ if (dmi_checksum(buf, 15)) {
+ dmi_num = (buf[13] << 8) | buf[12];
+ dmi_len = (buf[7] << 8) | buf[6];
+ dmi_base = (buf[11] << 24) | (buf[10] << 16) |
+ (buf[9] << 8) | buf[8];
+
+- /*
+- * DMI version 0.0 means that the real version is taken from
+- * the SMBIOS version, which we don't know at this point.
+- */
+- if (buf[14] != 0)
+- printk(KERN_INFO "DMI %d.%d present.\n",
+- buf[14] >> 4, buf[14] & 0xF);
+- else
+- printk(KERN_INFO "DMI present.\n");
+ if (dmi_walk_early(dmi_decode) == 0) {
++ if (dmi_ver)
++ pr_info("SMBIOS %d.%d present.\n",
++ dmi_ver >> 8, dmi_ver & 0xFF);
++ else {
++ dmi_ver = (buf[14] & 0xF0) << 4 |
++ (buf[14] & 0x0F);
++ pr_info("Legacy DMI %d.%d present.\n",
++ dmi_ver >> 8, dmi_ver & 0xFF);
++ }
+ dmi_dump_ids();
+ return 0;
+ }
+ }
++ dmi_ver = 0;
+ return 1;
+ }
+
++static int __init smbios_present(const char __iomem *p)
++{
++ u8 buf[32];
++ int offset = 0;
++
++ memcpy_fromio(buf, p, 32);
++ if ((buf[5] < 32) && dmi_checksum(buf, buf[5])) {
++ dmi_ver = (buf[6] << 8) + buf[7];
++
++ /* Some BIOS report weird SMBIOS version, fix that up */
++ switch (dmi_ver) {
++ case 0x021F:
++ case 0x0221:
++ pr_debug("SMBIOS version fixup(2.%d->2.%d)\n",
++ dmi_ver & 0xFF, 3);
++ dmi_ver = 0x0203;
++ break;
++ case 0x0233:
++ pr_debug("SMBIOS version fixup(2.%d->2.%d)\n", 51, 6);
++ dmi_ver = 0x0206;
++ break;
++ }
++ offset = 16;
++ }
++ return dmi_present(buf + offset);
++}
++
+ void __init dmi_scan_machine(void)
+ {
+ char __iomem *p, *q;
+ int rc;
+
+- if (efi_enabled) {
++ if (efi_enabled(EFI_CONFIG_TABLES)) {
+ if (efi.smbios == EFI_INVALID_TABLE_ADDR)
+ goto error;
+
+@@ -444,7 +483,7 @@ void __init dmi_scan_machine(void)
+ if (p == NULL)
+ goto error;
+
+- rc = dmi_present(p + 0x10); /* offset of _DMI_ string */
++ rc = smbios_present(p);
+ dmi_iounmap(p, 32);
+ if (!rc) {
+ dmi_available = 1;
+@@ -462,7 +501,12 @@ void __init dmi_scan_machine(void)
+ goto error;
+
+ for (q = p; q < p + 0x10000; q += 16) {
+- rc = dmi_present(q);
++ if (memcmp(q, "_SM_", 4) == 0 && q - p <= 0xFFE0)
++ rc = smbios_present(q);
++ else if (memcmp(q, "_DMI_", 5) == 0)
++ rc = dmi_present(q);
++ else
++ continue;
+ if (!rc) {
+ dmi_available = 1;
+ dmi_iounmap(p, 0x10000);
+diff --git a/drivers/firmware/efivars.c b/drivers/firmware/efivars.c
+index d10c987..bfd8f43 100644
+--- a/drivers/firmware/efivars.c
++++ b/drivers/firmware/efivars.c
+@@ -1224,7 +1224,7 @@ efivars_init(void)
+ printk(KERN_INFO "EFI Variables Facility v%s %s\n", EFIVARS_VERSION,
+ EFIVARS_DATE);
+
+- if (!efi_enabled)
++ if (!efi_enabled(EFI_RUNTIME_SERVICES))
+ return 0;
+
+ /* For now we'll register the efi directory at /sys/firmware/efi */
+@@ -1262,7 +1262,7 @@ err_put:
+ static void __exit
+ efivars_exit(void)
+ {
+- if (efi_enabled) {
++ if (efi_enabled(EFI_RUNTIME_SERVICES)) {
+ unregister_efivars(&__efivars);
+ kobject_put(efi_kobj);
+ }
+diff --git a/drivers/firmware/iscsi_ibft_find.c b/drivers/firmware/iscsi_ibft_find.c
+index 4da4eb9..2224f1d 100644
+--- a/drivers/firmware/iscsi_ibft_find.c
++++ b/drivers/firmware/iscsi_ibft_find.c
+@@ -99,7 +99,7 @@ unsigned long __init find_ibft_region(unsigned long *sizep)
+ /* iBFT 1.03 section 1.4.3.1 mandates that UEFI machines will
+ * only use ACPI for this */
+
+- if (!efi_enabled)
++ if (!efi_enabled(EFI_BOOT))
+ find_ibft_in_mem();
+
+ if (ibft_addr) {
+diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
+index af81f77..3859f43 100644
+--- a/drivers/gpu/drm/drm_crtc.c
++++ b/drivers/gpu/drm/drm_crtc.c
+@@ -2023,7 +2023,7 @@ uint32_t drm_mode_legacy_fb_format(uint32_t bpp, uint32_t depth)
+
+ switch (bpp) {
+ case 8:
+- fmt = DRM_FORMAT_RGB332;
++ fmt = DRM_FORMAT_C8;
+ break;
+ case 16:
+ if (depth == 15)
+@@ -3633,6 +3633,7 @@ void drm_fb_get_bpp_depth(uint32_t format, unsigned int *depth,
+ int *bpp)
+ {
+ switch (format) {
++ case DRM_FORMAT_C8:
+ case DRM_FORMAT_RGB332:
+ case DRM_FORMAT_BGR233:
+ *depth = 8;
+diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
+index b7ee230..7906edd 100644
+--- a/drivers/gpu/drm/drm_edid.c
++++ b/drivers/gpu/drm/drm_edid.c
+@@ -1924,7 +1924,8 @@ int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid)
+ num_modes += add_cvt_modes(connector, edid);
+ num_modes += add_standard_modes(connector, edid);
+ num_modes += add_established_modes(connector, edid);
+- num_modes += add_inferred_modes(connector, edid);
++ if (edid->features & DRM_EDID_FEATURE_DEFAULT_GTF)
++ num_modes += add_inferred_modes(connector, edid);
+ num_modes += add_cea_modes(connector, edid);
+
+ if (quirks & (EDID_QUIRK_PREFER_LARGE_60 | EDID_QUIRK_PREFER_LARGE_75))
+diff --git a/drivers/gpu/drm/drm_usb.c b/drivers/gpu/drm/drm_usb.c
+index 37c9a52..767782a 100644
+--- a/drivers/gpu/drm/drm_usb.c
++++ b/drivers/gpu/drm/drm_usb.c
+@@ -18,7 +18,7 @@ int drm_get_usb_dev(struct usb_interface *interface,
+
+ usbdev = interface_to_usbdev(interface);
+ dev->usbdev = usbdev;
+- dev->dev = &usbdev->dev;
++ dev->dev = &interface->dev;
+
+ mutex_lock(&drm_global_mutex);
+
+diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+index cdf46b5..d8bb392 100644
+--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
++++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+@@ -749,6 +749,8 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
+ total = 0;
+ for (i = 0; i < count; i++) {
+ struct drm_i915_gem_relocation_entry __user *user_relocs;
++ u64 invalid_offset = (u64)-1;
++ int j;
+
+ user_relocs = (void __user *)(uintptr_t)exec[i].relocs_ptr;
+
+@@ -759,6 +761,25 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
+ goto err;
+ }
+
++ /* As we do not update the known relocation offsets after
++ * relocating (due to the complexities in lock handling),
++ * we need to mark them as invalid now so that we force the
++ * relocation processing next time. Just in case the target
++ * object is evicted and then rebound into its old
++ * presumed_offset before the next execbuffer - if that
++ * happened we would make the mistake of assuming that the
++ * relocations were valid.
++ */
++ for (j = 0; j < exec[i].relocation_count; j++) {
++ if (copy_to_user(&user_relocs[j].presumed_offset,
++ &invalid_offset,
++ sizeof(invalid_offset))) {
++ ret = -EFAULT;
++ mutex_lock(&dev->struct_mutex);
++ goto err;
++ }
++ }
++
+ reloc_offset[i] = total;
+ total += exec[i].relocation_count;
+ }
+diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
+index f02cfad..380e7da 100644
+--- a/drivers/gpu/drm/i915/i915_reg.h
++++ b/drivers/gpu/drm/i915/i915_reg.h
+@@ -506,6 +506,7 @@
+ * the enables for writing to the corresponding low bit.
+ */
+ #define _3D_CHICKEN 0x02084
++#define _3D_CHICKEN_HIZ_PLANE_DISABLE_MSAA_4X_SNB (1 << 10)
+ #define _3D_CHICKEN2 0x0208c
+ /* Disables pipelining of read flushes past the SF-WIZ interface.
+ * Required on all Ironlake steppings according to the B-Spec, but the
+@@ -3274,6 +3275,8 @@
+ #define _PFA_CTL_1 0x68080
+ #define _PFB_CTL_1 0x68880
+ #define PF_ENABLE (1<<31)
++#define PF_PIPE_SEL_MASK_IVB (3<<29)
++#define PF_PIPE_SEL_IVB(pipe) ((pipe)<<29)
+ #define PF_FILTER_MASK (3<<23)
+ #define PF_FILTER_PROGRAMMED (0<<23)
+ #define PF_FILTER_MED_3x3 (1<<23)
+diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
+index 0777c79..3098027 100644
+--- a/drivers/gpu/drm/i915/intel_display.c
++++ b/drivers/gpu/drm/i915/intel_display.c
+@@ -146,8 +146,8 @@ static const intel_limit_t intel_limits_i9xx_sdvo = {
+ .vco = { .min = 1400000, .max = 2800000 },
+ .n = { .min = 1, .max = 6 },
+ .m = { .min = 70, .max = 120 },
+- .m1 = { .min = 10, .max = 22 },
+- .m2 = { .min = 5, .max = 9 },
++ .m1 = { .min = 8, .max = 18 },
++ .m2 = { .min = 3, .max = 7 },
+ .p = { .min = 5, .max = 80 },
+ .p1 = { .min = 1, .max = 8 },
+ .p2 = { .dot_limit = 200000,
+@@ -2347,18 +2347,6 @@ static void intel_fdi_normal_train(struct drm_crtc *crtc)
+ FDI_FE_ERRC_ENABLE);
+ }
+
+-static void cpt_phase_pointer_enable(struct drm_device *dev, int pipe)
+-{
+- struct drm_i915_private *dev_priv = dev->dev_private;
+- u32 flags = I915_READ(SOUTH_CHICKEN1);
+-
+- flags |= FDI_PHASE_SYNC_OVR(pipe);
+- I915_WRITE(SOUTH_CHICKEN1, flags); /* once to unlock... */
+- flags |= FDI_PHASE_SYNC_EN(pipe);
+- I915_WRITE(SOUTH_CHICKEN1, flags); /* then again to enable */
+- POSTING_READ(SOUTH_CHICKEN1);
+-}
+-
+ /* The FDI link training functions for ILK/Ibexpeak. */
+ static void ironlake_fdi_link_train(struct drm_crtc *crtc)
+ {
+@@ -2509,9 +2497,6 @@ static void gen6_fdi_link_train(struct drm_crtc *crtc)
+ POSTING_READ(reg);
+ udelay(150);
+
+- if (HAS_PCH_CPT(dev))
+- cpt_phase_pointer_enable(dev, pipe);
+-
+ for (i = 0; i < 4; i++) {
+ reg = FDI_TX_CTL(pipe);
+ temp = I915_READ(reg);
+@@ -2638,9 +2623,6 @@ static void ivb_manual_fdi_link_train(struct drm_crtc *crtc)
+ POSTING_READ(reg);
+ udelay(150);
+
+- if (HAS_PCH_CPT(dev))
+- cpt_phase_pointer_enable(dev, pipe);
+-
+ for (i = 0; i < 4; i++) {
+ reg = FDI_TX_CTL(pipe);
+ temp = I915_READ(reg);
+@@ -2754,17 +2736,6 @@ static void ironlake_fdi_pll_enable(struct drm_crtc *crtc)
+ }
+ }
+
+-static void cpt_phase_pointer_disable(struct drm_device *dev, int pipe)
+-{
+- struct drm_i915_private *dev_priv = dev->dev_private;
+- u32 flags = I915_READ(SOUTH_CHICKEN1);
+-
+- flags &= ~(FDI_PHASE_SYNC_EN(pipe));
+- I915_WRITE(SOUTH_CHICKEN1, flags); /* once to disable... */
+- flags &= ~(FDI_PHASE_SYNC_OVR(pipe));
+- I915_WRITE(SOUTH_CHICKEN1, flags); /* then again to lock */
+- POSTING_READ(SOUTH_CHICKEN1);
+-}
+ static void ironlake_fdi_disable(struct drm_crtc *crtc)
+ {
+ struct drm_device *dev = crtc->dev;
+@@ -2794,8 +2765,6 @@ static void ironlake_fdi_disable(struct drm_crtc *crtc)
+ I915_WRITE(FDI_RX_CHICKEN(pipe),
+ I915_READ(FDI_RX_CHICKEN(pipe) &
+ ~FDI_RX_PHASE_SYNC_POINTER_EN));
+- } else if (HAS_PCH_CPT(dev)) {
+- cpt_phase_pointer_disable(dev, pipe);
+ }
+
+ /* still set train pattern 1 */
+@@ -3233,7 +3202,11 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
+ * as some pre-programmed values are broken,
+ * e.g. x201.
+ */
+- I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3);
++ if (IS_IVYBRIDGE(dev))
++ I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3 |
++ PF_PIPE_SEL_IVB(pipe));
++ else
++ I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3);
+ I915_WRITE(PF_WIN_POS(pipe), dev_priv->pch_pf_pos);
+ I915_WRITE(PF_WIN_SZ(pipe), dev_priv->pch_pf_size);
+ }
+@@ -3435,6 +3408,7 @@ static void i9xx_crtc_disable(struct drm_crtc *crtc)
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ int pipe = intel_crtc->pipe;
+ int plane = intel_crtc->plane;
++ u32 pctl;
+
+ if (!intel_crtc->active)
+ return;
+@@ -3450,6 +3424,13 @@ static void i9xx_crtc_disable(struct drm_crtc *crtc)
+
+ intel_disable_plane(dev_priv, plane, pipe);
+ intel_disable_pipe(dev_priv, pipe);
++
++ /* Disable pannel fitter if it is on this pipe. */
++ pctl = I915_READ(PFIT_CONTROL);
++ if ((pctl & PFIT_ENABLE) &&
++ ((pctl & PFIT_PIPE_MASK) >> PFIT_PIPE_SHIFT) == pipe)
++ I915_WRITE(PFIT_CONTROL, 0);
++
+ intel_disable_pll(dev_priv, pipe);
+
+ intel_crtc->active = false;
+diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
+index 0c52448..2e6448c 100644
+--- a/drivers/gpu/drm/i915/intel_lvds.c
++++ b/drivers/gpu/drm/i915/intel_lvds.c
+@@ -774,14 +774,6 @@ static const struct dmi_system_id intel_no_lvds[] = {
+ },
+ {
+ .callback = intel_no_lvds_dmi_callback,
+- .ident = "ZOTAC ZBOXSD-ID12/ID13",
+- .matches = {
+- DMI_MATCH(DMI_BOARD_VENDOR, "ZOTAC"),
+- DMI_MATCH(DMI_BOARD_NAME, "ZBOXSD-ID12/ID13"),
+- },
+- },
+- {
+- .callback = intel_no_lvds_dmi_callback,
+ .ident = "Gigabyte GA-D525TUD",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "Gigabyte Technology Co., Ltd."),
+diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
+index c23c9ea..572b2ca 100644
+--- a/drivers/gpu/drm/i915/intel_pm.c
++++ b/drivers/gpu/drm/i915/intel_pm.c
+@@ -3324,6 +3324,10 @@ static void gen6_init_clock_gating(struct drm_device *dev)
+ I915_READ(ILK_DISPLAY_CHICKEN2) |
+ ILK_ELPIN_409_SELECT);
+
++ /* WaDisableHiZPlanesWhenMSAAEnabled */
++ I915_WRITE(_3D_CHICKEN,
++ _MASKED_BIT_ENABLE(_3D_CHICKEN_HIZ_PLANE_DISABLE_MSAA_4X_SNB));
++
+ I915_WRITE(WM3_LP_ILK, 0);
+ I915_WRITE(WM2_LP_ILK, 0);
+ I915_WRITE(WM1_LP_ILK, 0);
+diff --git a/drivers/gpu/drm/nouveau/nv04_dfp.c b/drivers/gpu/drm/nouveau/nv04_dfp.c
+index 89640f2..2b59f41 100644
+--- a/drivers/gpu/drm/nouveau/nv04_dfp.c
++++ b/drivers/gpu/drm/nouveau/nv04_dfp.c
+@@ -504,7 +504,7 @@ static void nv04_dfp_update_backlight(struct drm_encoder *encoder, int mode)
+
+ static inline bool is_powersaving_dpms(int mode)
+ {
+- return (mode != DRM_MODE_DPMS_ON);
++ return mode != DRM_MODE_DPMS_ON && mode != NV_DPMS_CLEARED;
+ }
+
+ static void nv04_lvds_dpms(struct drm_encoder *encoder, int mode)
+diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
+index 2817101..9bd3015 100644
+--- a/drivers/gpu/drm/radeon/atombios_crtc.c
++++ b/drivers/gpu/drm/radeon/atombios_crtc.c
+@@ -258,8 +258,6 @@ void atombios_crtc_dpms(struct drm_crtc *crtc, int mode)
+ radeon_crtc->enabled = true;
+ /* adjust pm to dpms changes BEFORE enabling crtcs */
+ radeon_pm_compute_clocks(rdev);
+- if (ASIC_IS_DCE6(rdev) && !radeon_crtc->in_mode_set)
+- atombios_powergate_crtc(crtc, ATOM_DISABLE);
+ atombios_enable_crtc(crtc, ATOM_ENABLE);
+ if (ASIC_IS_DCE3(rdev) && !ASIC_IS_DCE6(rdev))
+ atombios_enable_crtc_memreq(crtc, ATOM_ENABLE);
+@@ -277,8 +275,6 @@ void atombios_crtc_dpms(struct drm_crtc *crtc, int mode)
+ atombios_enable_crtc_memreq(crtc, ATOM_DISABLE);
+ atombios_enable_crtc(crtc, ATOM_DISABLE);
+ radeon_crtc->enabled = false;
+- if (ASIC_IS_DCE6(rdev) && !radeon_crtc->in_mode_set)
+- atombios_powergate_crtc(crtc, ATOM_ENABLE);
+ /* adjust pm to dpms changes AFTER disabling crtcs */
+ radeon_pm_compute_clocks(rdev);
+ break;
+@@ -1667,6 +1663,8 @@ static void atombios_crtc_disable(struct drm_crtc *crtc)
+ int i;
+
+ atombios_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
++ if (ASIC_IS_DCE6(rdev))
++ atombios_powergate_crtc(crtc, ATOM_ENABLE);
+
+ for (i = 0; i < rdev->num_crtc; i++) {
+ if (rdev->mode_info.crtcs[i] &&
+diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c
+index 2eb418e..e53a91b 100644
+--- a/drivers/gpu/drm/radeon/atombios_encoders.c
++++ b/drivers/gpu/drm/radeon/atombios_encoders.c
+@@ -95,7 +95,7 @@ static bool radeon_atom_mode_fixup(struct drm_encoder *encoder,
+ ((radeon_encoder->active_device & (ATOM_DEVICE_DFP_SUPPORT | ATOM_DEVICE_LCD_SUPPORT)) ||
+ (radeon_encoder_get_dp_bridge_encoder_id(encoder) != ENCODER_OBJECT_ID_NONE))) {
+ struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
+- radeon_dp_set_link_config(connector, mode);
++ radeon_dp_set_link_config(connector, adjusted_mode);
+ }
+
+ return true;
+diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
+index 5528fea..8f0ce47 100644
+--- a/drivers/gpu/drm/radeon/evergreen.c
++++ b/drivers/gpu/drm/radeon/evergreen.c
+@@ -1259,14 +1259,18 @@ void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *sav
+ if (!(tmp & EVERGREEN_CRTC_BLANK_DATA_EN)) {
+ radeon_wait_for_vblank(rdev, i);
+ tmp |= EVERGREEN_CRTC_BLANK_DATA_EN;
++ WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1);
+ WREG32(EVERGREEN_CRTC_BLANK_CONTROL + crtc_offsets[i], tmp);
++ WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 0);
+ }
+ } else {
+ tmp = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]);
+ if (!(tmp & EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE)) {
+ radeon_wait_for_vblank(rdev, i);
+ tmp |= EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE;
++ WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1);
+ WREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i], tmp);
++ WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 0);
+ }
+ }
+ /* wait for the next frame */
+@@ -1291,6 +1295,8 @@ void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *sav
+ blackout &= ~BLACKOUT_MODE_MASK;
+ WREG32(MC_SHARED_BLACKOUT_CNTL, blackout | 1);
+ }
++ /* wait for the MC to settle */
++ udelay(100);
+ }
+
+ void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *save)
+@@ -1324,11 +1330,15 @@ void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *s
+ if (ASIC_IS_DCE6(rdev)) {
+ tmp = RREG32(EVERGREEN_CRTC_BLANK_CONTROL + crtc_offsets[i]);
+ tmp |= EVERGREEN_CRTC_BLANK_DATA_EN;
++ WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1);
+ WREG32(EVERGREEN_CRTC_BLANK_CONTROL + crtc_offsets[i], tmp);
++ WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 0);
+ } else {
+ tmp = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]);
+ tmp &= ~EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE;
++ WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1);
+ WREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i], tmp);
++ WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 0);
+ }
+ /* wait for the next frame */
+ frame_count = radeon_get_vblank_counter(rdev, i);
+diff --git a/drivers/gpu/drm/radeon/evergreen_cs.c b/drivers/gpu/drm/radeon/evergreen_cs.c
+index 4a33cdc..bf849ea 100644
+--- a/drivers/gpu/drm/radeon/evergreen_cs.c
++++ b/drivers/gpu/drm/radeon/evergreen_cs.c
+@@ -2724,6 +2724,7 @@ static bool evergreen_vm_reg_valid(u32 reg)
+
+ /* check config regs */
+ switch (reg) {
++ case WAIT_UNTIL:
+ case GRBM_GFX_INDEX:
+ case CP_STRMOUT_CNTL:
+ case CP_COHER_CNTL:
+diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c
+index f75247d..33d3975 100644
+--- a/drivers/gpu/drm/radeon/radeon_combios.c
++++ b/drivers/gpu/drm/radeon/radeon_combios.c
+@@ -2419,6 +2419,14 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev)
+ 1),
+ ATOM_DEVICE_CRT1_SUPPORT);
+ }
++ /* RV100 board with external TDMS bit mis-set.
++ * Actually uses internal TMDS, clear the bit.
++ */
++ if (dev->pdev->device == 0x5159 &&
++ dev->pdev->subsystem_vendor == 0x1014 &&
++ dev->pdev->subsystem_device == 0x029A) {
++ tmp &= ~(1 << 4);
++ }
+ if ((tmp >> 4) & 0x1) {
+ devices |= ATOM_DEVICE_DFP2_SUPPORT;
+ radeon_add_legacy_encoder(dev,
+diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
+index 895e628..a7e797c 100644
+--- a/drivers/gpu/drm/radeon/radeon_connectors.c
++++ b/drivers/gpu/drm/radeon/radeon_connectors.c
+@@ -745,7 +745,7 @@ radeon_vga_detect(struct drm_connector *connector, bool force)
+ ret = connector_status_disconnected;
+
+ if (radeon_connector->ddc_bus)
+- dret = radeon_ddc_probe(radeon_connector);
++ dret = radeon_ddc_probe(radeon_connector, false);
+ if (dret) {
+ radeon_connector->detected_by_load = false;
+ if (radeon_connector->edid) {
+@@ -951,7 +951,7 @@ radeon_dvi_detect(struct drm_connector *connector, bool force)
+ return connector->status;
+
+ if (radeon_connector->ddc_bus)
+- dret = radeon_ddc_probe(radeon_connector);
++ dret = radeon_ddc_probe(radeon_connector, false);
+ if (dret) {
+ radeon_connector->detected_by_load = false;
+ if (radeon_connector->edid) {
+@@ -1391,7 +1391,8 @@ radeon_dp_detect(struct drm_connector *connector, bool force)
+ if (encoder) {
+ /* setup ddc on the bridge */
+ radeon_atom_ext_encoder_setup_ddc(encoder);
+- if (radeon_ddc_probe(radeon_connector)) /* try DDC */
++ /* bridge chips are always aux */
++ if (radeon_ddc_probe(radeon_connector, true)) /* try DDC */
+ ret = connector_status_connected;
+ else if (radeon_connector->dac_load_detect) { /* try load detection */
+ struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private;
+@@ -1409,7 +1410,8 @@ radeon_dp_detect(struct drm_connector *connector, bool force)
+ if (radeon_dp_getdpcd(radeon_connector))
+ ret = connector_status_connected;
+ } else {
+- if (radeon_ddc_probe(radeon_connector))
++ /* try non-aux ddc (DP to DVI/HMDI/etc. adapter) */
++ if (radeon_ddc_probe(radeon_connector, false))
+ ret = connector_status_connected;
+ }
+ }
+diff --git a/drivers/gpu/drm/radeon/radeon_cursor.c b/drivers/gpu/drm/radeon/radeon_cursor.c
+index 8794744..f1b951d 100644
+--- a/drivers/gpu/drm/radeon/radeon_cursor.c
++++ b/drivers/gpu/drm/radeon/radeon_cursor.c
+@@ -240,7 +240,8 @@ int radeon_crtc_cursor_move(struct drm_crtc *crtc,
+ y = 0;
+ }
+
+- if (ASIC_IS_AVIVO(rdev)) {
++ /* fixed on DCE6 and newer */
++ if (ASIC_IS_AVIVO(rdev) && !ASIC_IS_DCE6(rdev)) {
+ int i = 0;
+ struct drm_crtc *crtc_p;
+
+diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
+index 7a3daeb..9d9bf5f 100644
+--- a/drivers/gpu/drm/radeon/radeon_device.c
++++ b/drivers/gpu/drm/radeon/radeon_device.c
+@@ -427,7 +427,8 @@ bool radeon_card_posted(struct radeon_device *rdev)
+ {
+ uint32_t reg;
+
+- if (efi_enabled && rdev->pdev->subsystem_vendor == PCI_VENDOR_ID_APPLE)
++ if (efi_enabled(EFI_BOOT) &&
++ rdev->pdev->subsystem_vendor == PCI_VENDOR_ID_APPLE)
+ return false;
+
+ /* first check CRTCs */
+diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
+index 7ddef8f..06a31cf 100644
+--- a/drivers/gpu/drm/radeon/radeon_display.c
++++ b/drivers/gpu/drm/radeon/radeon_display.c
+@@ -695,10 +695,15 @@ int radeon_ddc_get_modes(struct radeon_connector *radeon_connector)
+ if (radeon_connector->router.ddc_valid)
+ radeon_router_select_ddc_port(radeon_connector);
+
+- if ((radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
+- (radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_eDP) ||
+- (radeon_connector_encoder_get_dp_bridge_encoder_id(&radeon_connector->base) !=
+- ENCODER_OBJECT_ID_NONE)) {
++ if (radeon_connector_encoder_get_dp_bridge_encoder_id(&radeon_connector->base) !=
++ ENCODER_OBJECT_ID_NONE) {
++ struct radeon_connector_atom_dig *dig = radeon_connector->con_priv;
++
++ if (dig->dp_i2c_bus)
++ radeon_connector->edid = drm_get_edid(&radeon_connector->base,
++ &dig->dp_i2c_bus->adapter);
++ } else if ((radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
++ (radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_eDP)) {
+ struct radeon_connector_atom_dig *dig = radeon_connector->con_priv;
+
+ if ((dig->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT ||
+@@ -1106,14 +1111,16 @@ radeon_user_framebuffer_create(struct drm_device *dev,
+ }
+
+ radeon_fb = kzalloc(sizeof(*radeon_fb), GFP_KERNEL);
+- if (radeon_fb == NULL)
++ if (radeon_fb == NULL) {
++ drm_gem_object_unreference_unlocked(obj);
+ return ERR_PTR(-ENOMEM);
++ }
+
+ ret = radeon_framebuffer_init(dev, radeon_fb, mode_cmd, obj);
+ if (ret) {
+ kfree(radeon_fb);
+ drm_gem_object_unreference_unlocked(obj);
+- return NULL;
++ return ERR_PTR(ret);
+ }
+
+ return &radeon_fb->base;
+diff --git a/drivers/gpu/drm/radeon/radeon_i2c.c b/drivers/gpu/drm/radeon/radeon_i2c.c
+index 3edec1c..6076e85 100644
+--- a/drivers/gpu/drm/radeon/radeon_i2c.c
++++ b/drivers/gpu/drm/radeon/radeon_i2c.c
+@@ -39,7 +39,7 @@ extern u32 radeon_atom_hw_i2c_func(struct i2c_adapter *adap);
+ * radeon_ddc_probe
+ *
+ */
+-bool radeon_ddc_probe(struct radeon_connector *radeon_connector)
++bool radeon_ddc_probe(struct radeon_connector *radeon_connector, bool use_aux)
+ {
+ u8 out = 0x0;
+ u8 buf[8];
+@@ -63,7 +63,13 @@ bool radeon_ddc_probe(struct radeon_connector *radeon_connector)
+ if (radeon_connector->router.ddc_valid)
+ radeon_router_select_ddc_port(radeon_connector);
+
+- ret = i2c_transfer(&radeon_connector->ddc_bus->adapter, msgs, 2);
++ if (use_aux) {
++ struct radeon_connector_atom_dig *dig = radeon_connector->con_priv;
++ ret = i2c_transfer(&dig->dp_i2c_bus->adapter, msgs, 2);
++ } else {
++ ret = i2c_transfer(&radeon_connector->ddc_bus->adapter, msgs, 2);
++ }
++
+ if (ret != 2)
+ /* Couldn't find an accessible DDC on this connector */
+ return false;
+diff --git a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
+index dd402bb..9633dbb 100644
+--- a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
++++ b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
+@@ -618,6 +618,14 @@ static enum drm_connector_status radeon_legacy_primary_dac_detect(struct drm_enc
+ enum drm_connector_status found = connector_status_disconnected;
+ bool color = true;
+
++ /* just don't bother on RN50 those chip are often connected to remoting
++ * console hw and often we get failure to load detect those. So to make
++ * everyone happy report the encoder as always connected.
++ */
++ if (ASIC_IS_RN50(rdev)) {
++ return connector_status_connected;
++ }
++
+ /* save the regs we need */
+ vclk_ecp_cntl = RREG32_PLL(RADEON_VCLK_ECP_CNTL);
+ crtc_ext_cntl = RREG32(RADEON_CRTC_EXT_CNTL);
+diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h
+index d569789..485d16e 100644
+--- a/drivers/gpu/drm/radeon/radeon_mode.h
++++ b/drivers/gpu/drm/radeon/radeon_mode.h
+@@ -534,7 +534,7 @@ extern void radeon_i2c_put_byte(struct radeon_i2c_chan *i2c,
+ u8 val);
+ extern void radeon_router_select_ddc_port(struct radeon_connector *radeon_connector);
+ extern void radeon_router_select_cd_port(struct radeon_connector *radeon_connector);
+-extern bool radeon_ddc_probe(struct radeon_connector *radeon_connector);
++extern bool radeon_ddc_probe(struct radeon_connector *radeon_connector, bool use_aux);
+ extern int radeon_ddc_get_modes(struct radeon_connector *radeon_connector);
+
+ extern struct drm_encoder *radeon_best_encoder(struct drm_connector *connector);
+diff --git a/drivers/gpu/drm/radeon/radeon_ring.c b/drivers/gpu/drm/radeon/radeon_ring.c
+index 43c431a..f2017fc 100644
+--- a/drivers/gpu/drm/radeon/radeon_ring.c
++++ b/drivers/gpu/drm/radeon/radeon_ring.c
+@@ -361,6 +361,9 @@ int radeon_ring_alloc(struct radeon_device *rdev, struct radeon_ring *ring, unsi
+ {
+ int r;
+
++ /* make sure we aren't trying to allocate more space than there is on the ring */
++ if (ndw > (ring->ring_size / 4))
++ return -ENOMEM;
+ /* Align requested size with padding so unlock_commit can
+ * pad safely */
+ ndw = (ndw + ring->align_mask) & ~ring->align_mask;
+diff --git a/drivers/gpu/drm/radeon/reg_srcs/cayman b/drivers/gpu/drm/radeon/reg_srcs/cayman
+index 0f656b1..a072fa8 100644
+--- a/drivers/gpu/drm/radeon/reg_srcs/cayman
++++ b/drivers/gpu/drm/radeon/reg_srcs/cayman
+@@ -1,5 +1,6 @@
+ cayman 0x9400
+ 0x0000802C GRBM_GFX_INDEX
++0x00008040 WAIT_UNTIL
+ 0x000084FC CP_STRMOUT_CNTL
+ 0x000085F0 CP_COHER_CNTL
+ 0x000085F4 CP_COHER_SIZE
+diff --git a/drivers/gpu/drm/udl/udl_connector.c b/drivers/gpu/drm/udl/udl_connector.c
+index 8d9dc44..3234224 100644
+--- a/drivers/gpu/drm/udl/udl_connector.c
++++ b/drivers/gpu/drm/udl/udl_connector.c
+@@ -22,13 +22,17 @@
+ static u8 *udl_get_edid(struct udl_device *udl)
+ {
+ u8 *block;
+- char rbuf[3];
++ char *rbuf;
+ int ret, i;
+
+ block = kmalloc(EDID_LENGTH, GFP_KERNEL);
+ if (block == NULL)
+ return NULL;
+
++ rbuf = kmalloc(2, GFP_KERNEL);
++ if (rbuf == NULL)
++ goto error;
++
+ for (i = 0; i < EDID_LENGTH; i++) {
+ ret = usb_control_msg(udl->ddev->usbdev,
+ usb_rcvctrlpipe(udl->ddev->usbdev, 0), (0x02),
+@@ -36,16 +40,17 @@ static u8 *udl_get_edid(struct udl_device *udl)
+ HZ);
+ if (ret < 1) {
+ DRM_ERROR("Read EDID byte %d failed err %x\n", i, ret);
+- i--;
+ goto error;
+ }
+ block[i] = rbuf[1];
+ }
+
++ kfree(rbuf);
+ return block;
+
+ error:
+ kfree(block);
++ kfree(rbuf);
+ return NULL;
+ }
+
+@@ -59,6 +64,14 @@ static int udl_get_modes(struct drm_connector *connector)
+
+ connector->display_info.raw_edid = (char *)edid;
+
++ /*
++ * We only read the main block, but if the monitor reports extension
++ * blocks then the drm edid code expects them to be present, so patch
++ * the extension count to 0.
++ */
++ edid->checksum += edid->extensions;
++ edid->extensions = 0;
++
+ drm_mode_connector_update_edid_property(connector, edid);
+ ret = drm_add_edid_modes(connector, edid);
+ connector->display_info.raw_edid = NULL;
+diff --git a/drivers/gpu/drm/udl/udl_drv.h b/drivers/gpu/drm/udl/udl_drv.h
+index 87aa5f5..cc6d90f 100644
+--- a/drivers/gpu/drm/udl/udl_drv.h
++++ b/drivers/gpu/drm/udl/udl_drv.h
+@@ -75,6 +75,8 @@ struct udl_framebuffer {
+ struct drm_framebuffer base;
+ struct udl_gem_object *obj;
+ bool active_16; /* active on the 16-bit channel */
++ int x1, y1, x2, y2; /* dirty rect */
++ spinlock_t dirty_lock;
+ };
+
+ #define to_udl_fb(x) container_of(x, struct udl_framebuffer, base)
+diff --git a/drivers/gpu/drm/udl/udl_fb.c b/drivers/gpu/drm/udl/udl_fb.c
+index 6f6ca50..c9df873 100644
+--- a/drivers/gpu/drm/udl/udl_fb.c
++++ b/drivers/gpu/drm/udl/udl_fb.c
+@@ -22,9 +22,9 @@
+
+ #include "drm_fb_helper.h"
+
+-#define DL_DEFIO_WRITE_DELAY 5 /* fb_deferred_io.delay in jiffies */
++#define DL_DEFIO_WRITE_DELAY (HZ/20) /* fb_deferred_io.delay in jiffies */
+
+-static int fb_defio = 1; /* Optionally enable experimental fb_defio mmap support */
++static int fb_defio = 0; /* Optionally enable experimental fb_defio mmap support */
+ static int fb_bpp = 16;
+
+ module_param(fb_bpp, int, S_IWUSR | S_IRUSR | S_IWGRP | S_IRGRP);
+@@ -153,6 +153,9 @@ int udl_handle_damage(struct udl_framebuffer *fb, int x, int y,
+ struct urb *urb;
+ int aligned_x;
+ int bpp = (fb->base.bits_per_pixel / 8);
++ int x2, y2;
++ bool store_for_later = false;
++ unsigned long flags;
+
+ if (!fb->active_16)
+ return 0;
+@@ -169,8 +172,6 @@ int udl_handle_damage(struct udl_framebuffer *fb, int x, int y,
+ }
+ }
+
+- start_cycles = get_cycles();
+-
+ aligned_x = DL_ALIGN_DOWN(x, sizeof(unsigned long));
+ width = DL_ALIGN_UP(width + (x-aligned_x), sizeof(unsigned long));
+ x = aligned_x;
+@@ -180,19 +181,53 @@ int udl_handle_damage(struct udl_framebuffer *fb, int x, int y,
+ (y + height > fb->base.height))
+ return -EINVAL;
+
++ /* if we are in atomic just store the info
++ can't test inside spin lock */
++ if (in_atomic())
++ store_for_later = true;
++
++ x2 = x + width - 1;
++ y2 = y + height - 1;
++
++ spin_lock_irqsave(&fb->dirty_lock, flags);
++
++ if (fb->y1 < y)
++ y = fb->y1;
++ if (fb->y2 > y2)
++ y2 = fb->y2;
++ if (fb->x1 < x)
++ x = fb->x1;
++ if (fb->x2 > x2)
++ x2 = fb->x2;
++
++ if (store_for_later) {
++ fb->x1 = x;
++ fb->x2 = x2;
++ fb->y1 = y;
++ fb->y2 = y2;
++ spin_unlock_irqrestore(&fb->dirty_lock, flags);
++ return 0;
++ }
++
++ fb->x1 = fb->y1 = INT_MAX;
++ fb->x2 = fb->y2 = 0;
++
++ spin_unlock_irqrestore(&fb->dirty_lock, flags);
++ start_cycles = get_cycles();
++
+ urb = udl_get_urb(dev);
+ if (!urb)
+ return 0;
+ cmd = urb->transfer_buffer;
+
+- for (i = y; i < y + height ; i++) {
++ for (i = y; i <= y2 ; i++) {
+ const int line_offset = fb->base.pitches[0] * i;
+ const int byte_offset = line_offset + (x * bpp);
+ const int dev_byte_offset = (fb->base.width * bpp * i) + (x * bpp);
+ if (udl_render_hline(dev, bpp, &urb,
+ (char *) fb->obj->vmapping,
+ &cmd, byte_offset, dev_byte_offset,
+- width * bpp,
++ (x2 - x + 1) * bpp,
+ &bytes_identical, &bytes_sent))
+ goto error;
+ }
+@@ -417,6 +452,7 @@ udl_framebuffer_init(struct drm_device *dev,
+ {
+ int ret;
+
++ spin_lock_init(&ufb->dirty_lock);
+ ufb->obj = obj;
+ ret = drm_framebuffer_init(dev, &ufb->base, &udlfb_funcs);
+ drm_helper_mode_fill_fb_struct(&ufb->base, mode_cmd);
+diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
+index 5de3bb3..10ef742 100644
+--- a/drivers/hid/hid-core.c
++++ b/drivers/hid/hid-core.c
+@@ -1528,6 +1528,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ANSI) },
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ISO) },
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_JIS) },
++ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_ANSI) },
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_ISO) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_TP_ONLY) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY) },
+diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
+index ab8ce9f..2a3f007 100644
+--- a/drivers/hid/hid-ids.h
++++ b/drivers/hid/hid-ids.h
+@@ -681,6 +681,9 @@
+ #define USB_VENDOR_ID_SIGMA_MICRO 0x1c4f
+ #define USB_DEVICE_ID_SIGMA_MICRO_KEYBOARD 0x0002
+
++#define USB_VENDOR_ID_SIGMATEL 0x066F
++#define USB_DEVICE_ID_SIGMATEL_STMP3780 0x3780
++
+ #define USB_VENDOR_ID_SKYCABLE 0x1223
+ #define USB_DEVICE_ID_SKYCABLE_WIRELESS_PRESENTER 0x3F07
+
+diff --git a/drivers/hid/hid-wiimote-ext.c b/drivers/hid/hid-wiimote-ext.c
+index 0a1805c..ef59c84 100644
+--- a/drivers/hid/hid-wiimote-ext.c
++++ b/drivers/hid/hid-wiimote-ext.c
+@@ -378,14 +378,14 @@ static void handler_nunchuck(struct wiimote_ext *ext, const __u8 *payload)
+
+ if (ext->motionp) {
+ input_report_key(ext->input,
+- wiiext_keymap[WIIEXT_KEY_Z], !!(payload[5] & 0x04));
++ wiiext_keymap[WIIEXT_KEY_Z], !(payload[5] & 0x04));
+ input_report_key(ext->input,
+- wiiext_keymap[WIIEXT_KEY_C], !!(payload[5] & 0x08));
++ wiiext_keymap[WIIEXT_KEY_C], !(payload[5] & 0x08));
+ } else {
+ input_report_key(ext->input,
+- wiiext_keymap[WIIEXT_KEY_Z], !!(payload[5] & 0x01));
++ wiiext_keymap[WIIEXT_KEY_Z], !(payload[5] & 0x01));
+ input_report_key(ext->input,
+- wiiext_keymap[WIIEXT_KEY_C], !!(payload[5] & 0x02));
++ wiiext_keymap[WIIEXT_KEY_C], !(payload[5] & 0x02));
+ }
+
+ input_sync(ext->input);
+diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c
+index 8865fa3..eb55cef 100644
+--- a/drivers/hid/usbhid/hid-quirks.c
++++ b/drivers/hid/usbhid/hid-quirks.c
+@@ -79,6 +79,7 @@ static const struct hid_blacklist {
+ { USB_VENDOR_ID_QUANTA, USB_DEVICE_ID_PIXART_IMAGING_INC_OPTICAL_TOUCH_SCREEN, HID_QUIRK_NOGET },
+ { USB_VENDOR_ID_QUANTA, USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH_3008, HID_QUIRK_NOGET },
+ { USB_VENDOR_ID_SENNHEISER, USB_DEVICE_ID_SENNHEISER_BTD500USB, HID_QUIRK_NOGET },
++ { USB_VENDOR_ID_SIGMATEL, USB_DEVICE_ID_SIGMATEL_STMP3780, HID_QUIRK_NOGET },
+ { USB_VENDOR_ID_SUN, USB_DEVICE_ID_RARITAN_KVM_DONGLE, HID_QUIRK_NOGET },
+ { USB_VENDOR_ID_SYMBOL, USB_DEVICE_ID_SYMBOL_SCANNER_1, HID_QUIRK_NOGET },
+ { USB_VENDOR_ID_SYMBOL, USB_DEVICE_ID_SYMBOL_SCANNER_2, HID_QUIRK_NOGET },
+diff --git a/drivers/hwmon/lm73.c b/drivers/hwmon/lm73.c
+index 8fa2632..7272176 100644
+--- a/drivers/hwmon/lm73.c
++++ b/drivers/hwmon/lm73.c
+@@ -49,6 +49,7 @@ static ssize_t set_temp(struct device *dev, struct device_attribute *da,
+ struct i2c_client *client = to_i2c_client(dev);
+ long temp;
+ short value;
++ s32 err;
+
+ int status = kstrtol(buf, 10, &temp);
+ if (status < 0)
+@@ -57,8 +58,8 @@ static ssize_t set_temp(struct device *dev, struct device_attribute *da,
+ /* Write value */
+ value = (short) SENSORS_LIMIT(temp/250, (LM73_TEMP_MIN*4),
+ (LM73_TEMP_MAX*4)) << 5;
+- i2c_smbus_write_word_swapped(client, attr->index, value);
+- return count;
++ err = i2c_smbus_write_word_swapped(client, attr->index, value);
++ return (err < 0) ? err : count;
+ }
+
+ static ssize_t show_temp(struct device *dev, struct device_attribute *da,
+@@ -66,11 +67,16 @@ static ssize_t show_temp(struct device *dev, struct device_attribute *da,
+ {
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
+ struct i2c_client *client = to_i2c_client(dev);
++ int temp;
++
++ s32 err = i2c_smbus_read_word_swapped(client, attr->index);
++ if (err < 0)
++ return err;
++
+ /* use integer division instead of equivalent right shift to
+ guarantee arithmetic shift and preserve the sign */
+- int temp = ((s16) (i2c_smbus_read_word_swapped(client,
+- attr->index))*250) / 32;
+- return sprintf(buf, "%d\n", temp);
++ temp = (((s16) err) * 250) / 32;
++ return scnprintf(buf, PAGE_SIZE, "%d\n", temp);
+ }
+
+
+diff --git a/drivers/infiniband/hw/nes/nes.h b/drivers/infiniband/hw/nes/nes.h
+index c438e46..3f41d9f 100644
+--- a/drivers/infiniband/hw/nes/nes.h
++++ b/drivers/infiniband/hw/nes/nes.h
+@@ -524,6 +524,7 @@ void nes_iwarp_ce_handler(struct nes_device *, struct nes_hw_cq *);
+ int nes_destroy_cqp(struct nes_device *);
+ int nes_nic_cm_xmit(struct sk_buff *, struct net_device *);
+ void nes_recheck_link_status(struct work_struct *work);
++void nes_terminate_timeout(unsigned long context);
+
+ /* nes_nic.c */
+ struct net_device *nes_netdev_init(struct nes_device *, void __iomem *);
+diff --git a/drivers/infiniband/hw/nes/nes_hw.c b/drivers/infiniband/hw/nes/nes_hw.c
+index d42c9f4..96801c3 100644
+--- a/drivers/infiniband/hw/nes/nes_hw.c
++++ b/drivers/infiniband/hw/nes/nes_hw.c
+@@ -75,7 +75,6 @@ static void nes_process_iwarp_aeqe(struct nes_device *nesdev,
+ static void process_critical_error(struct nes_device *nesdev);
+ static void nes_process_mac_intr(struct nes_device *nesdev, u32 mac_number);
+ static unsigned int nes_reset_adapter_ne020(struct nes_device *nesdev, u8 *OneG_Mode);
+-static void nes_terminate_timeout(unsigned long context);
+ static void nes_terminate_start_timer(struct nes_qp *nesqp);
+
+ #ifdef CONFIG_INFINIBAND_NES_DEBUG
+@@ -3522,7 +3521,7 @@ static void nes_terminate_received(struct nes_device *nesdev,
+ }
+
+ /* Timeout routine in case terminate fails to complete */
+-static void nes_terminate_timeout(unsigned long context)
++void nes_terminate_timeout(unsigned long context)
+ {
+ struct nes_qp *nesqp = (struct nes_qp *)(unsigned long)context;
+
+@@ -3532,11 +3531,7 @@ static void nes_terminate_timeout(unsigned long context)
+ /* Set a timer in case hw cannot complete the terminate sequence */
+ static void nes_terminate_start_timer(struct nes_qp *nesqp)
+ {
+- init_timer(&nesqp->terminate_timer);
+- nesqp->terminate_timer.function = nes_terminate_timeout;
+- nesqp->terminate_timer.expires = jiffies + HZ;
+- nesqp->terminate_timer.data = (unsigned long)nesqp;
+- add_timer(&nesqp->terminate_timer);
++ mod_timer(&nesqp->terminate_timer, (jiffies + HZ));
+ }
+
+ /**
+diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c
+index 8b8812d..da84ea3 100644
+--- a/drivers/infiniband/hw/nes/nes_verbs.c
++++ b/drivers/infiniband/hw/nes/nes_verbs.c
+@@ -1404,6 +1404,9 @@ static struct ib_qp *nes_create_qp(struct ib_pd *ibpd,
+ }
+
+ nesqp->sig_all = (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR);
++ init_timer(&nesqp->terminate_timer);
++ nesqp->terminate_timer.function = nes_terminate_timeout;
++ nesqp->terminate_timer.data = (unsigned long)nesqp;
+
+ /* update the QP table */
+ nesdev->nesadapter->qp_table[nesqp->hwqp.qp_id-NES_FIRST_QPN] = nesqp;
+@@ -1413,7 +1416,6 @@ static struct ib_qp *nes_create_qp(struct ib_pd *ibpd,
+ return &nesqp->ibqp;
+ }
+
+-
+ /**
+ * nes_clean_cq
+ */
+@@ -2559,6 +2561,11 @@ static struct ib_mr *nes_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
+ return ibmr;
+ case IWNES_MEMREG_TYPE_QP:
+ case IWNES_MEMREG_TYPE_CQ:
++ if (!region->length) {
++ nes_debug(NES_DBG_MR, "Unable to register zero length region for CQ\n");
++ ib_umem_release(region);
++ return ERR_PTR(-EINVAL);
++ }
+ nespbl = kzalloc(sizeof(*nespbl), GFP_KERNEL);
+ if (!nespbl) {
+ nes_debug(NES_DBG_MR, "Unable to allocate PBL\n");
+diff --git a/drivers/input/joystick/walkera0701.c b/drivers/input/joystick/walkera0701.c
+index 4dfa1ee..f8f892b 100644
+--- a/drivers/input/joystick/walkera0701.c
++++ b/drivers/input/joystick/walkera0701.c
+@@ -196,6 +196,7 @@ static void walkera0701_close(struct input_dev *dev)
+ struct walkera_dev *w = input_get_drvdata(dev);
+
+ parport_disable_irq(w->parport);
++ hrtimer_cancel(&w->timer);
+ }
+
+ static int walkera0701_connect(struct walkera_dev *w, int parport)
+@@ -224,6 +225,9 @@ static int walkera0701_connect(struct walkera_dev *w, int parport)
+ if (parport_claim(w->pardevice))
+ goto init_err1;
+
++ hrtimer_init(&w->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
++ w->timer.function = timer_handler;
++
+ w->input_dev = input_allocate_device();
+ if (!w->input_dev)
+ goto init_err2;
+@@ -254,8 +258,6 @@ static int walkera0701_connect(struct walkera_dev *w, int parport)
+ if (err)
+ goto init_err3;
+
+- hrtimer_init(&w->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+- w->timer.function = timer_handler;
+ return 0;
+
+ init_err3:
+@@ -271,7 +273,6 @@ static int walkera0701_connect(struct walkera_dev *w, int parport)
+
+ static void walkera0701_disconnect(struct walkera_dev *w)
+ {
+- hrtimer_cancel(&w->timer);
+ input_unregister_device(w->input_dev);
+ parport_release(w->pardevice);
+ parport_unregister_device(w->pardevice);
+diff --git a/drivers/input/mouse/sentelic.c b/drivers/input/mouse/sentelic.c
+index a261d85..c4c9218 100644
+--- a/drivers/input/mouse/sentelic.c
++++ b/drivers/input/mouse/sentelic.c
+@@ -791,7 +791,7 @@ static psmouse_ret_t fsp_process_byte(struct psmouse *psmouse)
+ fsp_set_slot(dev, 0, fgrs > 0, abs_x, abs_y);
+ fsp_set_slot(dev, 1, false, 0, 0);
+ }
+- if (fgrs > 0) {
++ if (fgrs == 1 || (fgrs == 2 && !(packet[0] & FSP_PB0_MFMC_FGR2))) {
+ input_report_abs(dev, ABS_X, abs_x);
+ input_report_abs(dev, ABS_Y, abs_y);
+ }
+diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h
+index d6cc77a..5f306f7 100644
+--- a/drivers/input/serio/i8042-x86ia64io.h
++++ b/drivers/input/serio/i8042-x86ia64io.h
+@@ -921,6 +921,7 @@ static int __init i8042_platform_init(void)
+ int retval;
+
+ #ifdef CONFIG_X86
++ u8 a20_on = 0xdf;
+ /* Just return if pre-detection shows no i8042 controller exist */
+ if (!x86_platform.i8042_detect())
+ return -ENODEV;
+@@ -960,6 +961,14 @@ static int __init i8042_platform_init(void)
+
+ if (dmi_check_system(i8042_dmi_dritek_table))
+ i8042_dritek = true;
++
++ /*
++ * A20 was already enabled during early kernel init. But some buggy
++ * BIOSes (in MSI Laptops) require A20 to be enabled using 8042 to
++ * resume from S3. So we do it here and hope that nothing breaks.
++ */
++ i8042_command(&a20_on, 0x10d1);
++ i8042_command(NULL, 0x00ff); /* Null command for SMM firmware */
+ #endif /* CONFIG_X86 */
+
+ return retval;
+diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c
+index 18a89b7..e69ece6 100644
+--- a/drivers/iommu/amd_iommu_init.c
++++ b/drivers/iommu/amd_iommu_init.c
+@@ -906,6 +906,38 @@ static void __init free_iommu_all(void)
+ }
+
+ /*
++ * Family15h Model 10h-1fh erratum 746 (IOMMU Logging May Stall Translations)
++ * Workaround:
++ * BIOS should disable L2B micellaneous clock gating by setting
++ * L2_L2B_CK_GATE_CONTROL[CKGateL2BMiscDisable](D0F2xF4_x90[2]) = 1b
++ */
++static void __init amd_iommu_erratum_746_workaround(struct amd_iommu *iommu)
++{
++ u32 value;
++
++ if ((boot_cpu_data.x86 != 0x15) ||
++ (boot_cpu_data.x86_model < 0x10) ||
++ (boot_cpu_data.x86_model > 0x1f))
++ return;
++
++ pci_write_config_dword(iommu->dev, 0xf0, 0x90);
++ pci_read_config_dword(iommu->dev, 0xf4, &value);
++
++ if (value & BIT(2))
++ return;
++
++ /* Select NB indirect register 0x90 and enable writing */
++ pci_write_config_dword(iommu->dev, 0xf0, 0x90 | (1 << 8));
++
++ pci_write_config_dword(iommu->dev, 0xf4, value | 0x4);
++ pr_info("AMD-Vi: Applying erratum 746 workaround for IOMMU at %s\n",
++ dev_name(&iommu->dev->dev));
++
++ /* Clear the enable writing bit */
++ pci_write_config_dword(iommu->dev, 0xf0, 0x90);
++}
++
++/*
+ * This function clues the initialization function for one IOMMU
+ * together and also allocates the command buffer and programs the
+ * hardware. It does NOT enable the IOMMU. This is done afterwards.
+@@ -1092,6 +1124,8 @@ static int iommu_init_pci(struct amd_iommu *iommu)
+ iommu->stored_l2[i] = iommu_read_l2(iommu, i);
+ }
+
++ amd_iommu_erratum_746_workaround(iommu);
++
+ return pci_enable_device(iommu->dev);
+ }
+
+diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
+index 554e6ac..16dc458 100644
+--- a/drivers/iommu/intel-iommu.c
++++ b/drivers/iommu/intel-iommu.c
+@@ -1827,10 +1827,17 @@ static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
+ if (!pte)
+ return -ENOMEM;
+ /* It is large page*/
+- if (largepage_lvl > 1)
++ if (largepage_lvl > 1) {
+ pteval |= DMA_PTE_LARGE_PAGE;
+- else
++ /* Ensure that old small page tables are removed to make room
++ for superpage, if they exist. */
++ dma_pte_clear_range(domain, iov_pfn,
++ iov_pfn + lvl_to_nr_pages(largepage_lvl) - 1);
++ dma_pte_free_pagetable(domain, iov_pfn,
++ iov_pfn + lvl_to_nr_pages(largepage_lvl) - 1);
++ } else {
+ pteval &= ~(uint64_t)DMA_PTE_LARGE_PAGE;
++ }
+
+ }
+ /* We don't need lock here, nobody else
+@@ -2320,8 +2327,39 @@ static int domain_add_dev_info(struct dmar_domain *domain,
+ return 0;
+ }
+
++static bool device_has_rmrr(struct pci_dev *dev)
++{
++ struct dmar_rmrr_unit *rmrr;
++ int i;
++
++ for_each_rmrr_units(rmrr) {
++ for (i = 0; i < rmrr->devices_cnt; i++) {
++ /*
++ * Return TRUE if this RMRR contains the device that
++ * is passed in.
++ */
++ if (rmrr->devices[i] == dev)
++ return true;
++ }
++ }
++ return false;
++}
++
+ static int iommu_should_identity_map(struct pci_dev *pdev, int startup)
+ {
++
++ /*
++ * We want to prevent any device associated with an RMRR from
++ * getting placed into the SI Domain. This is done because
++ * problems exist when devices are moved in and out of domains
++ * and their respective RMRR info is lost. We exempt USB devices
++ * from this process due to their usage of RMRRs that are known
++ * to not be needed after BIOS hand-off to OS.
++ */
++ if (device_has_rmrr(pdev) &&
++ (pdev->class >> 8) != PCI_CLASS_SERIAL_USB)
++ return 0;
++
+ if ((iommu_identity_mapping & IDENTMAP_AZALIA) && IS_AZALIA(pdev))
+ return 1;
+
+@@ -4196,23 +4234,38 @@ static struct iommu_ops intel_iommu_ops = {
+ .pgsize_bitmap = INTEL_IOMMU_PGSIZES,
+ };
+
++static void __devinit quirk_iommu_g4x_gfx(struct pci_dev *dev)
++{
++ /* G4x/GM45 integrated gfx dmar support is totally busted. */
++ printk(KERN_INFO "DMAR: Disabling IOMMU for graphics on this chipset\n");
++ dmar_map_gfx = 0;
++}
++
++DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_g4x_gfx);
++DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e00, quirk_iommu_g4x_gfx);
++DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e10, quirk_iommu_g4x_gfx);
++DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e20, quirk_iommu_g4x_gfx);
++DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e30, quirk_iommu_g4x_gfx);
++DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e40, quirk_iommu_g4x_gfx);
++DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e90, quirk_iommu_g4x_gfx);
++
+ static void __devinit quirk_iommu_rwbf(struct pci_dev *dev)
+ {
+ /*
+ * Mobile 4 Series Chipset neglects to set RWBF capability,
+- * but needs it:
++ * but needs it. Same seems to hold for the desktop versions.
+ */
+ printk(KERN_INFO "DMAR: Forcing write-buffer flush capability\n");
+ rwbf_quirk = 1;
+-
+- /* https://bugzilla.redhat.com/show_bug.cgi?id=538163 */
+- if (dev->revision == 0x07) {
+- printk(KERN_INFO "DMAR: Disabling IOMMU for graphics on this chipset\n");
+- dmar_map_gfx = 0;
+- }
+ }
+
+ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_rwbf);
++DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e00, quirk_iommu_rwbf);
++DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e10, quirk_iommu_rwbf);
++DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e20, quirk_iommu_rwbf);
++DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e30, quirk_iommu_rwbf);
++DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e40, quirk_iommu_rwbf);
++DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e90, quirk_iommu_rwbf);
+
+ #define GGC 0x52
+ #define GGC_MEMORY_SIZE_MASK (0xf << 8)
+diff --git a/drivers/isdn/gigaset/capi.c b/drivers/isdn/gigaset/capi.c
+index 68452b7..03a0a01 100644
+--- a/drivers/isdn/gigaset/capi.c
++++ b/drivers/isdn/gigaset/capi.c
+@@ -248,6 +248,8 @@ static inline void dump_rawmsg(enum debuglevel level, const char *tag,
+ CAPIMSG_APPID(data), CAPIMSG_MSGID(data), l,
+ CAPIMSG_CONTROL(data));
+ l -= 12;
++ if (l <= 0)
++ return;
+ dbgline = kmalloc(3 * l, GFP_ATOMIC);
+ if (!dbgline)
+ return;
+diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
+index afd9598..a651d52 100644
+--- a/drivers/md/dm-ioctl.c
++++ b/drivers/md/dm-ioctl.c
+@@ -1566,6 +1566,14 @@ static int copy_params(struct dm_ioctl __user *user, struct dm_ioctl **param)
+ if (copy_from_user(dmi, user, tmp.data_size))
+ goto bad;
+
++ /*
++ * Abort if something changed the ioctl data while it was being copied.
++ */
++ if (dmi->data_size != tmp.data_size) {
++ DMERR("rejecting ioctl: data size modified while processing parameters");
++ goto bad;
++ }
++
+ /* Wipe the user buffer so we do not return it to userspace */
+ if (secure_data && clear_user(user, tmp.data_size))
+ goto bad;
+diff --git a/drivers/md/persistent-data/dm-btree-internal.h b/drivers/md/persistent-data/dm-btree-internal.h
+index 5709bfe..accbb05 100644
+--- a/drivers/md/persistent-data/dm-btree-internal.h
++++ b/drivers/md/persistent-data/dm-btree-internal.h
+@@ -36,13 +36,13 @@ struct node_header {
+ __le32 padding;
+ } __packed;
+
+-struct node {
++struct btree_node {
+ struct node_header header;
+ __le64 keys[0];
+ } __packed;
+
+
+-void inc_children(struct dm_transaction_manager *tm, struct node *n,
++void inc_children(struct dm_transaction_manager *tm, struct btree_node *n,
+ struct dm_btree_value_type *vt);
+
+ int new_block(struct dm_btree_info *info, struct dm_block **result);
+@@ -64,7 +64,7 @@ struct ro_spine {
+ void init_ro_spine(struct ro_spine *s, struct dm_btree_info *info);
+ int exit_ro_spine(struct ro_spine *s);
+ int ro_step(struct ro_spine *s, dm_block_t new_child);
+-struct node *ro_node(struct ro_spine *s);
++struct btree_node *ro_node(struct ro_spine *s);
+
+ struct shadow_spine {
+ struct dm_btree_info *info;
+@@ -98,17 +98,17 @@ int shadow_root(struct shadow_spine *s);
+ /*
+ * Some inlines.
+ */
+-static inline __le64 *key_ptr(struct node *n, uint32_t index)
++static inline __le64 *key_ptr(struct btree_node *n, uint32_t index)
+ {
+ return n->keys + index;
+ }
+
+-static inline void *value_base(struct node *n)
++static inline void *value_base(struct btree_node *n)
+ {
+ return &n->keys[le32_to_cpu(n->header.max_entries)];
+ }
+
+-static inline void *value_ptr(struct node *n, uint32_t index)
++static inline void *value_ptr(struct btree_node *n, uint32_t index)
+ {
+ uint32_t value_size = le32_to_cpu(n->header.value_size);
+ return value_base(n) + (value_size * index);
+@@ -117,7 +117,7 @@ static inline void *value_ptr(struct node *n, uint32_t index)
+ /*
+ * Assumes the values are suitably-aligned and converts to core format.
+ */
+-static inline uint64_t value64(struct node *n, uint32_t index)
++static inline uint64_t value64(struct btree_node *n, uint32_t index)
+ {
+ __le64 *values_le = value_base(n);
+
+@@ -127,7 +127,7 @@ static inline uint64_t value64(struct node *n, uint32_t index)
+ /*
+ * Searching for a key within a single node.
+ */
+-int lower_bound(struct node *n, uint64_t key);
++int lower_bound(struct btree_node *n, uint64_t key);
+
+ extern struct dm_block_validator btree_node_validator;
+
+diff --git a/drivers/md/persistent-data/dm-btree-remove.c b/drivers/md/persistent-data/dm-btree-remove.c
+index aa71e23..c4f2813 100644
+--- a/drivers/md/persistent-data/dm-btree-remove.c
++++ b/drivers/md/persistent-data/dm-btree-remove.c
+@@ -53,7 +53,7 @@
+ /*
+ * Some little utilities for moving node data around.
+ */
+-static void node_shift(struct node *n, int shift)
++static void node_shift(struct btree_node *n, int shift)
+ {
+ uint32_t nr_entries = le32_to_cpu(n->header.nr_entries);
+ uint32_t value_size = le32_to_cpu(n->header.value_size);
+@@ -79,7 +79,7 @@ static void node_shift(struct node *n, int shift)
+ }
+ }
+
+-static void node_copy(struct node *left, struct node *right, int shift)
++static void node_copy(struct btree_node *left, struct btree_node *right, int shift)
+ {
+ uint32_t nr_left = le32_to_cpu(left->header.nr_entries);
+ uint32_t value_size = le32_to_cpu(left->header.value_size);
+@@ -108,7 +108,7 @@ static void node_copy(struct node *left, struct node *right, int shift)
+ /*
+ * Delete a specific entry from a leaf node.
+ */
+-static void delete_at(struct node *n, unsigned index)
++static void delete_at(struct btree_node *n, unsigned index)
+ {
+ unsigned nr_entries = le32_to_cpu(n->header.nr_entries);
+ unsigned nr_to_copy = nr_entries - (index + 1);
+@@ -128,7 +128,7 @@ static void delete_at(struct node *n, unsigned index)
+ n->header.nr_entries = cpu_to_le32(nr_entries - 1);
+ }
+
+-static unsigned merge_threshold(struct node *n)
++static unsigned merge_threshold(struct btree_node *n)
+ {
+ return le32_to_cpu(n->header.max_entries) / 3;
+ }
+@@ -136,7 +136,7 @@ static unsigned merge_threshold(struct node *n)
+ struct child {
+ unsigned index;
+ struct dm_block *block;
+- struct node *n;
++ struct btree_node *n;
+ };
+
+ static struct dm_btree_value_type le64_type = {
+@@ -147,7 +147,7 @@ static struct dm_btree_value_type le64_type = {
+ .equal = NULL
+ };
+
+-static int init_child(struct dm_btree_info *info, struct node *parent,
++static int init_child(struct dm_btree_info *info, struct btree_node *parent,
+ unsigned index, struct child *result)
+ {
+ int r, inc;
+@@ -177,7 +177,7 @@ static int exit_child(struct dm_btree_info *info, struct child *c)
+ return dm_tm_unlock(info->tm, c->block);
+ }
+
+-static void shift(struct node *left, struct node *right, int count)
++static void shift(struct btree_node *left, struct btree_node *right, int count)
+ {
+ uint32_t nr_left = le32_to_cpu(left->header.nr_entries);
+ uint32_t nr_right = le32_to_cpu(right->header.nr_entries);
+@@ -203,11 +203,11 @@ static void shift(struct node *left, struct node *right, int count)
+ right->header.nr_entries = cpu_to_le32(nr_right + count);
+ }
+
+-static void __rebalance2(struct dm_btree_info *info, struct node *parent,
++static void __rebalance2(struct dm_btree_info *info, struct btree_node *parent,
+ struct child *l, struct child *r)
+ {
+- struct node *left = l->n;
+- struct node *right = r->n;
++ struct btree_node *left = l->n;
++ struct btree_node *right = r->n;
+ uint32_t nr_left = le32_to_cpu(left->header.nr_entries);
+ uint32_t nr_right = le32_to_cpu(right->header.nr_entries);
+ unsigned threshold = 2 * merge_threshold(left) + 1;
+@@ -239,7 +239,7 @@ static int rebalance2(struct shadow_spine *s, struct dm_btree_info *info,
+ unsigned left_index)
+ {
+ int r;
+- struct node *parent;
++ struct btree_node *parent;
+ struct child left, right;
+
+ parent = dm_block_data(shadow_current(s));
+@@ -270,9 +270,9 @@ static int rebalance2(struct shadow_spine *s, struct dm_btree_info *info,
+ * in right, then rebalance2. This wastes some cpu, but I want something
+ * simple atm.
+ */
+-static void delete_center_node(struct dm_btree_info *info, struct node *parent,
++static void delete_center_node(struct dm_btree_info *info, struct btree_node *parent,
+ struct child *l, struct child *c, struct child *r,
+- struct node *left, struct node *center, struct node *right,
++ struct btree_node *left, struct btree_node *center, struct btree_node *right,
+ uint32_t nr_left, uint32_t nr_center, uint32_t nr_right)
+ {
+ uint32_t max_entries = le32_to_cpu(left->header.max_entries);
+@@ -301,9 +301,9 @@ static void delete_center_node(struct dm_btree_info *info, struct node *parent,
+ /*
+ * Redistributes entries among 3 sibling nodes.
+ */
+-static void redistribute3(struct dm_btree_info *info, struct node *parent,
++static void redistribute3(struct dm_btree_info *info, struct btree_node *parent,
+ struct child *l, struct child *c, struct child *r,
+- struct node *left, struct node *center, struct node *right,
++ struct btree_node *left, struct btree_node *center, struct btree_node *right,
+ uint32_t nr_left, uint32_t nr_center, uint32_t nr_right)
+ {
+ int s;
+@@ -343,12 +343,12 @@ static void redistribute3(struct dm_btree_info *info, struct node *parent,
+ *key_ptr(parent, r->index) = right->keys[0];
+ }
+
+-static void __rebalance3(struct dm_btree_info *info, struct node *parent,
++static void __rebalance3(struct dm_btree_info *info, struct btree_node *parent,
+ struct child *l, struct child *c, struct child *r)
+ {
+- struct node *left = l->n;
+- struct node *center = c->n;
+- struct node *right = r->n;
++ struct btree_node *left = l->n;
++ struct btree_node *center = c->n;
++ struct btree_node *right = r->n;
+
+ uint32_t nr_left = le32_to_cpu(left->header.nr_entries);
+ uint32_t nr_center = le32_to_cpu(center->header.nr_entries);
+@@ -371,7 +371,7 @@ static int rebalance3(struct shadow_spine *s, struct dm_btree_info *info,
+ unsigned left_index)
+ {
+ int r;
+- struct node *parent = dm_block_data(shadow_current(s));
++ struct btree_node *parent = dm_block_data(shadow_current(s));
+ struct child left, center, right;
+
+ /*
+@@ -421,7 +421,7 @@ static int get_nr_entries(struct dm_transaction_manager *tm,
+ {
+ int r;
+ struct dm_block *block;
+- struct node *n;
++ struct btree_node *n;
+
+ r = dm_tm_read_lock(tm, b, &btree_node_validator, &block);
+ if (r)
+@@ -438,7 +438,7 @@ static int rebalance_children(struct shadow_spine *s,
+ {
+ int i, r, has_left_sibling, has_right_sibling;
+ uint32_t child_entries;
+- struct node *n;
++ struct btree_node *n;
+
+ n = dm_block_data(shadow_current(s));
+
+@@ -483,7 +483,7 @@ static int rebalance_children(struct shadow_spine *s,
+ return r;
+ }
+
+-static int do_leaf(struct node *n, uint64_t key, unsigned *index)
++static int do_leaf(struct btree_node *n, uint64_t key, unsigned *index)
+ {
+ int i = lower_bound(n, key);
+
+@@ -506,7 +506,7 @@ static int remove_raw(struct shadow_spine *s, struct dm_btree_info *info,
+ uint64_t key, unsigned *index)
+ {
+ int i = *index, r;
+- struct node *n;
++ struct btree_node *n;
+
+ for (;;) {
+ r = shadow_step(s, root, vt);
+@@ -556,7 +556,7 @@ int dm_btree_remove(struct dm_btree_info *info, dm_block_t root,
+ unsigned level, last_level = info->levels - 1;
+ int index = 0, r = 0;
+ struct shadow_spine spine;
+- struct node *n;
++ struct btree_node *n;
+
+ init_shadow_spine(&spine, info);
+ for (level = 0; level < info->levels; level++) {
+diff --git a/drivers/md/persistent-data/dm-btree-spine.c b/drivers/md/persistent-data/dm-btree-spine.c
+index d9a7912..2f0805c 100644
+--- a/drivers/md/persistent-data/dm-btree-spine.c
++++ b/drivers/md/persistent-data/dm-btree-spine.c
+@@ -23,7 +23,7 @@ static void node_prepare_for_write(struct dm_block_validator *v,
+ struct dm_block *b,
+ size_t block_size)
+ {
+- struct node *n = dm_block_data(b);
++ struct btree_node *n = dm_block_data(b);
+ struct node_header *h = &n->header;
+
+ h->blocknr = cpu_to_le64(dm_block_location(b));
+@@ -38,7 +38,7 @@ static int node_check(struct dm_block_validator *v,
+ struct dm_block *b,
+ size_t block_size)
+ {
+- struct node *n = dm_block_data(b);
++ struct btree_node *n = dm_block_data(b);
+ struct node_header *h = &n->header;
+ size_t value_size;
+ __le32 csum_disk;
+@@ -164,7 +164,7 @@ int ro_step(struct ro_spine *s, dm_block_t new_child)
+ return r;
+ }
+
+-struct node *ro_node(struct ro_spine *s)
++struct btree_node *ro_node(struct ro_spine *s)
+ {
+ struct dm_block *block;
+
+diff --git a/drivers/md/persistent-data/dm-btree.c b/drivers/md/persistent-data/dm-btree.c
+index d12b2cc..371f3d4 100644
+--- a/drivers/md/persistent-data/dm-btree.c
++++ b/drivers/md/persistent-data/dm-btree.c
+@@ -38,7 +38,7 @@ static void array_insert(void *base, size_t elt_size, unsigned nr_elts,
+ /*----------------------------------------------------------------*/
+
+ /* makes the assumption that no two keys are the same. */
+-static int bsearch(struct node *n, uint64_t key, int want_hi)
++static int bsearch(struct btree_node *n, uint64_t key, int want_hi)
+ {
+ int lo = -1, hi = le32_to_cpu(n->header.nr_entries);
+
+@@ -58,12 +58,12 @@ static int bsearch(struct node *n, uint64_t key, int want_hi)
+ return want_hi ? hi : lo;
+ }
+
+-int lower_bound(struct node *n, uint64_t key)
++int lower_bound(struct btree_node *n, uint64_t key)
+ {
+ return bsearch(n, key, 0);
+ }
+
+-void inc_children(struct dm_transaction_manager *tm, struct node *n,
++void inc_children(struct dm_transaction_manager *tm, struct btree_node *n,
+ struct dm_btree_value_type *vt)
+ {
+ unsigned i;
+@@ -77,7 +77,7 @@ void inc_children(struct dm_transaction_manager *tm, struct node *n,
+ vt->inc(vt->context, value_ptr(n, i));
+ }
+
+-static int insert_at(size_t value_size, struct node *node, unsigned index,
++static int insert_at(size_t value_size, struct btree_node *node, unsigned index,
+ uint64_t key, void *value)
+ __dm_written_to_disk(value)
+ {
+@@ -122,7 +122,7 @@ int dm_btree_empty(struct dm_btree_info *info, dm_block_t *root)
+ {
+ int r;
+ struct dm_block *b;
+- struct node *n;
++ struct btree_node *n;
+ size_t block_size;
+ uint32_t max_entries;
+
+@@ -154,7 +154,7 @@ EXPORT_SYMBOL_GPL(dm_btree_empty);
+ #define MAX_SPINE_DEPTH 64
+ struct frame {
+ struct dm_block *b;
+- struct node *n;
++ struct btree_node *n;
+ unsigned level;
+ unsigned nr_children;
+ unsigned current_child;
+@@ -295,7 +295,7 @@ EXPORT_SYMBOL_GPL(dm_btree_del);
+ /*----------------------------------------------------------------*/
+
+ static int btree_lookup_raw(struct ro_spine *s, dm_block_t block, uint64_t key,
+- int (*search_fn)(struct node *, uint64_t),
++ int (*search_fn)(struct btree_node *, uint64_t),
+ uint64_t *result_key, void *v, size_t value_size)
+ {
+ int i, r;
+@@ -406,7 +406,7 @@ static int btree_split_sibling(struct shadow_spine *s, dm_block_t root,
+ size_t size;
+ unsigned nr_left, nr_right;
+ struct dm_block *left, *right, *parent;
+- struct node *ln, *rn, *pn;
++ struct btree_node *ln, *rn, *pn;
+ __le64 location;
+
+ left = shadow_current(s);
+@@ -491,7 +491,7 @@ static int btree_split_beneath(struct shadow_spine *s, uint64_t key)
+ size_t size;
+ unsigned nr_left, nr_right;
+ struct dm_block *left, *right, *new_parent;
+- struct node *pn, *ln, *rn;
++ struct btree_node *pn, *ln, *rn;
+ __le64 val;
+
+ new_parent = shadow_current(s);
+@@ -576,7 +576,7 @@ static int btree_insert_raw(struct shadow_spine *s, dm_block_t root,
+ uint64_t key, unsigned *index)
+ {
+ int r, i = *index, top = 1;
+- struct node *node;
++ struct btree_node *node;
+
+ for (;;) {
+ r = shadow_step(s, root, vt);
+@@ -643,7 +643,7 @@ static int insert(struct dm_btree_info *info, dm_block_t root,
+ unsigned level, index = -1, last_level = info->levels - 1;
+ dm_block_t block = root;
+ struct shadow_spine spine;
+- struct node *n;
++ struct btree_node *n;
+ struct dm_btree_value_type le64_type;
+
+ le64_type.context = NULL;
+diff --git a/drivers/mfd/mfd-core.c b/drivers/mfd/mfd-core.c
+index f8b7771..7604f4e 100644
+--- a/drivers/mfd/mfd-core.c
++++ b/drivers/mfd/mfd-core.c
+@@ -21,6 +21,10 @@
+ #include <linux/irqdomain.h>
+ #include <linux/of.h>
+
++static struct device_type mfd_dev_type = {
++ .name = "mfd_device",
++};
++
+ int mfd_cell_enable(struct platform_device *pdev)
+ {
+ const struct mfd_cell *cell = mfd_get_cell(pdev);
+@@ -91,6 +95,7 @@ static int mfd_add_device(struct device *parent, int id,
+ goto fail_device;
+
+ pdev->dev.parent = parent;
++ pdev->dev.type = &mfd_dev_type;
+
+ if (parent->of_node && cell->of_compatible) {
+ for_each_child_of_node(parent->of_node, np) {
+@@ -204,10 +209,16 @@ EXPORT_SYMBOL(mfd_add_devices);
+
+ static int mfd_remove_devices_fn(struct device *dev, void *c)
+ {
+- struct platform_device *pdev = to_platform_device(dev);
+- const struct mfd_cell *cell = mfd_get_cell(pdev);
++ struct platform_device *pdev;
++ const struct mfd_cell *cell;
+ atomic_t **usage_count = c;
+
++ if (dev->type != &mfd_dev_type)
++ return 0;
++
++ pdev = to_platform_device(dev);
++ cell = mfd_get_cell(pdev);
++
+ /* find the base address of usage_count pointers (for freeing) */
+ if (!*usage_count || (cell->usage_count < *usage_count))
+ *usage_count = cell->usage_count;
+diff --git a/drivers/mfd/wm8994-core.c b/drivers/mfd/wm8994-core.c
+index 2febf88..38de846 100644
+--- a/drivers/mfd/wm8994-core.c
++++ b/drivers/mfd/wm8994-core.c
+@@ -557,6 +557,7 @@ static __devinit int wm8994_device_init(struct wm8994 *wm8994, int irq)
+ case 1:
+ case 2:
+ case 3:
++ case 4:
+ regmap_patch = wm1811_reva_patch;
+ patch_regs = ARRAY_SIZE(wm1811_reva_patch);
+ break;
+diff --git a/drivers/misc/sgi-xp/xpc_main.c b/drivers/misc/sgi-xp/xpc_main.c
+index 8d082b4..d971817 100644
+--- a/drivers/misc/sgi-xp/xpc_main.c
++++ b/drivers/misc/sgi-xp/xpc_main.c
+@@ -53,6 +53,10 @@
+ #include <linux/kthread.h>
+ #include "xpc.h"
+
++#ifdef CONFIG_X86_64
++#include <asm/traps.h>
++#endif
++
+ /* define two XPC debug device structures to be used with dev_dbg() et al */
+
+ struct device_driver xpc_dbg_name = {
+@@ -1079,6 +1083,9 @@ xpc_system_reboot(struct notifier_block *nb, unsigned long event, void *unused)
+ return NOTIFY_DONE;
+ }
+
++/* Used to only allow one cpu to complete disconnect */
++static unsigned int xpc_die_disconnecting;
++
+ /*
+ * Notify other partitions to deactivate from us by first disengaging from all
+ * references to our memory.
+@@ -1092,6 +1099,9 @@ xpc_die_deactivate(void)
+ long keep_waiting;
+ long wait_to_print;
+
++ if (cmpxchg(&xpc_die_disconnecting, 0, 1))
++ return;
++
+ /* keep xpc_hb_checker thread from doing anything (just in case) */
+ xpc_exiting = 1;
+
+@@ -1159,7 +1169,7 @@ xpc_die_deactivate(void)
+ * about the lack of a heartbeat.
+ */
+ static int
+-xpc_system_die(struct notifier_block *nb, unsigned long event, void *unused)
++xpc_system_die(struct notifier_block *nb, unsigned long event, void *_die_args)
+ {
+ #ifdef CONFIG_IA64 /* !!! temporary kludge */
+ switch (event) {
+@@ -1191,7 +1201,27 @@ xpc_system_die(struct notifier_block *nb, unsigned long event, void *unused)
+ break;
+ }
+ #else
+- xpc_die_deactivate();
++ struct die_args *die_args = _die_args;
++
++ switch (event) {
++ case DIE_TRAP:
++ if (die_args->trapnr == X86_TRAP_DF)
++ xpc_die_deactivate();
++
++ if (((die_args->trapnr == X86_TRAP_MF) ||
++ (die_args->trapnr == X86_TRAP_XF)) &&
++ !user_mode_vm(die_args->regs))
++ xpc_die_deactivate();
++
++ break;
++ case DIE_INT3:
++ case DIE_DEBUG:
++ break;
++ case DIE_OOPS:
++ case DIE_GPF:
++ default:
++ xpc_die_deactivate();
++ }
+ #endif
+
+ return NOTIFY_DONE;
+diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c
+index e23f813..c80c588 100644
+--- a/drivers/mmc/host/sdhci-esdhc-imx.c
++++ b/drivers/mmc/host/sdhci-esdhc-imx.c
+@@ -237,15 +237,18 @@ static void esdhc_writel_le(struct sdhci_host *host, u32 val, int reg)
+
+ static u16 esdhc_readw_le(struct sdhci_host *host, int reg)
+ {
++ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
++ struct pltfm_imx_data *imx_data = pltfm_host->priv;
++
+ if (unlikely(reg == SDHCI_HOST_VERSION)) {
+- u16 val = readw(host->ioaddr + (reg ^ 2));
+- /*
+- * uSDHC supports SDHCI v3.0, but it's encoded as value
+- * 0x3 in host controller version register, which violates
+- * SDHCI_SPEC_300 definition. Work it around here.
+- */
+- if ((val & SDHCI_SPEC_VER_MASK) == 3)
+- return --val;
++ reg ^= 2;
++ if (is_imx6q_usdhc(imx_data)) {
++ /*
++ * The usdhc register returns a wrong host version.
++ * Correct it here.
++ */
++ return SDHCI_SPEC_300;
++ }
+ }
+
+ return readw(host->ioaddr + reg);
+diff --git a/drivers/mtd/nand/cs553x_nand.c b/drivers/mtd/nand/cs553x_nand.c
+index adb6c3e..2cdeab8 100644
+--- a/drivers/mtd/nand/cs553x_nand.c
++++ b/drivers/mtd/nand/cs553x_nand.c
+@@ -237,6 +237,7 @@ static int __init cs553x_init_one(int cs, int mmio, unsigned long adr)
+ this->ecc.hwctl = cs_enable_hwecc;
+ this->ecc.calculate = cs_calculate_ecc;
+ this->ecc.correct = nand_correct_data;
++ this->ecc.strength = 1;
+
+ /* Enable the following for a flash based bad block table */
+ this->bbt_options = NAND_BBT_USE_FLASH;
+@@ -247,8 +248,6 @@ static int __init cs553x_init_one(int cs, int mmio, unsigned long adr)
+ goto out_ior;
+ }
+
+- this->ecc.strength = 1;
+-
+ new_mtd->name = kasprintf(GFP_KERNEL, "cs553x_nand_cs%d", cs);
+
+ cs553x_mtd[cs] = new_mtd;
+diff --git a/drivers/mtd/nand/gpmi-nand/gpmi-lib.c b/drivers/mtd/nand/gpmi-nand/gpmi-lib.c
+index a1f4332..b27e215 100644
+--- a/drivers/mtd/nand/gpmi-nand/gpmi-lib.c
++++ b/drivers/mtd/nand/gpmi-nand/gpmi-lib.c
+@@ -136,6 +136,15 @@ int gpmi_init(struct gpmi_nand_data *this)
+ if (ret)
+ goto err_out;
+
++ /*
++ * Reset BCH here, too. We got failures otherwise :(
++ * See later BCH reset for explanation of MX23 handling
++ */
++ ret = gpmi_reset_block(r->bch_regs, GPMI_IS_MX23(this));
++ if (ret)
++ goto err_out;
++
++
+ /* Choose NAND mode. */
+ writel(BM_GPMI_CTRL1_GPMI_MODE, r->gpmi_regs + HW_GPMI_CTRL1_CLR);
+
+diff --git a/drivers/net/can/c_can/c_can.c b/drivers/net/can/c_can/c_can.c
+index 4c538e3..f56a48e 100644
+--- a/drivers/net/can/c_can/c_can.c
++++ b/drivers/net/can/c_can/c_can.c
+@@ -918,7 +918,7 @@ static int c_can_handle_bus_err(struct net_device *dev,
+ break;
+ case LEC_ACK_ERROR:
+ netdev_dbg(dev, "ack error\n");
+- cf->data[2] |= (CAN_ERR_PROT_LOC_ACK |
++ cf->data[3] |= (CAN_ERR_PROT_LOC_ACK |
+ CAN_ERR_PROT_LOC_ACK_DEL);
+ break;
+ case LEC_BIT1_ERROR:
+@@ -931,7 +931,7 @@ static int c_can_handle_bus_err(struct net_device *dev,
+ break;
+ case LEC_CRC_ERROR:
+ netdev_dbg(dev, "CRC error\n");
+- cf->data[2] |= (CAN_ERR_PROT_LOC_CRC_SEQ |
++ cf->data[3] |= (CAN_ERR_PROT_LOC_CRC_SEQ |
+ CAN_ERR_PROT_LOC_CRC_DEL);
+ break;
+ default:
+diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c
+index 963e2cc..8233e5e 100644
+--- a/drivers/net/can/dev.c
++++ b/drivers/net/can/dev.c
+@@ -609,8 +609,7 @@ void close_candev(struct net_device *dev)
+ {
+ struct can_priv *priv = netdev_priv(dev);
+
+- if (del_timer_sync(&priv->restart_timer))
+- dev_put(dev);
++ del_timer_sync(&priv->restart_timer);
+ can_flush_echo_skb(dev);
+ }
+ EXPORT_SYMBOL_GPL(close_candev);
+diff --git a/drivers/net/can/pch_can.c b/drivers/net/can/pch_can.c
+index 48b3d62..7a43d4d 100644
+--- a/drivers/net/can/pch_can.c
++++ b/drivers/net/can/pch_can.c
+@@ -560,7 +560,7 @@ static void pch_can_error(struct net_device *ndev, u32 status)
+ stats->rx_errors++;
+ break;
+ case PCH_CRC_ERR:
+- cf->data[2] |= CAN_ERR_PROT_LOC_CRC_SEQ |
++ cf->data[3] |= CAN_ERR_PROT_LOC_CRC_SEQ |
+ CAN_ERR_PROT_LOC_CRC_DEL;
+ priv->can.can_stats.bus_error++;
+ stats->rx_errors++;
+diff --git a/drivers/net/can/ti_hecc.c b/drivers/net/can/ti_hecc.c
+index 9ded21e..44996a9 100644
+--- a/drivers/net/can/ti_hecc.c
++++ b/drivers/net/can/ti_hecc.c
+@@ -746,12 +746,12 @@ static int ti_hecc_error(struct net_device *ndev, int int_status,
+ }
+ if (err_status & HECC_CANES_CRCE) {
+ hecc_set_bit(priv, HECC_CANES, HECC_CANES_CRCE);
+- cf->data[2] |= CAN_ERR_PROT_LOC_CRC_SEQ |
++ cf->data[3] |= CAN_ERR_PROT_LOC_CRC_SEQ |
+ CAN_ERR_PROT_LOC_CRC_DEL;
+ }
+ if (err_status & HECC_CANES_ACKE) {
+ hecc_set_bit(priv, HECC_CANES, HECC_CANES_ACKE);
+- cf->data[2] |= CAN_ERR_PROT_LOC_ACK |
++ cf->data[3] |= CAN_ERR_PROT_LOC_ACK |
+ CAN_ERR_PROT_LOC_ACK_DEL;
+ }
+ }
+diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
+index 711eb14..3756278 100644
+--- a/drivers/net/ethernet/broadcom/tg3.c
++++ b/drivers/net/ethernet/broadcom/tg3.c
+@@ -1245,14 +1245,26 @@ static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, u32 set)
+ return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg);
+ }
+
+-#define TG3_PHY_AUXCTL_SMDSP_ENABLE(tp) \
+- tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \
+- MII_TG3_AUXCTL_ACTL_SMDSP_ENA | \
+- MII_TG3_AUXCTL_ACTL_TX_6DB)
++static int tg3_phy_toggle_auxctl_smdsp(struct tg3 *tp, bool enable)
++{
++ u32 val;
++ int err;
+
+-#define TG3_PHY_AUXCTL_SMDSP_DISABLE(tp) \
+- tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \
+- MII_TG3_AUXCTL_ACTL_TX_6DB);
++ err = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
++
++ if (err)
++ return err;
++ if (enable)
++
++ val |= MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
++ else
++ val &= ~MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
++
++ err = tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
++ val | MII_TG3_AUXCTL_ACTL_TX_6DB);
++
++ return err;
++}
+
+ static int tg3_bmcr_reset(struct tg3 *tp)
+ {
+@@ -2185,7 +2197,7 @@ static void tg3_phy_apply_otp(struct tg3 *tp)
+
+ otp = tp->phy_otp;
+
+- if (TG3_PHY_AUXCTL_SMDSP_ENABLE(tp))
++ if (tg3_phy_toggle_auxctl_smdsp(tp, true))
+ return;
+
+ phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
+@@ -2210,7 +2222,7 @@ static void tg3_phy_apply_otp(struct tg3 *tp)
+ ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
+ tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
+
+- TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
++ tg3_phy_toggle_auxctl_smdsp(tp, false);
+ }
+
+ static void tg3_phy_eee_adjust(struct tg3 *tp, u32 current_link_up)
+@@ -2246,9 +2258,9 @@ static void tg3_phy_eee_adjust(struct tg3 *tp, u32 current_link_up)
+
+ if (!tp->setlpicnt) {
+ if (current_link_up == 1 &&
+- !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
++ !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
+ tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0000);
+- TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
++ tg3_phy_toggle_auxctl_smdsp(tp, false);
+ }
+
+ val = tr32(TG3_CPMU_EEE_MODE);
+@@ -2264,11 +2276,11 @@ static void tg3_phy_eee_enable(struct tg3 *tp)
+ (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
+ tg3_flag(tp, 57765_CLASS)) &&
+- !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
++ !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
+ val = MII_TG3_DSP_TAP26_ALNOKO |
+ MII_TG3_DSP_TAP26_RMRXSTO;
+ tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
+- TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
++ tg3_phy_toggle_auxctl_smdsp(tp, false);
+ }
+
+ val = tr32(TG3_CPMU_EEE_MODE);
+@@ -2412,7 +2424,7 @@ static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
+ tg3_writephy(tp, MII_CTRL1000,
+ CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
+
+- err = TG3_PHY_AUXCTL_SMDSP_ENABLE(tp);
++ err = tg3_phy_toggle_auxctl_smdsp(tp, true);
+ if (err)
+ return err;
+
+@@ -2433,7 +2445,7 @@ static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
+ tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
+ tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000);
+
+- TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
++ tg3_phy_toggle_auxctl_smdsp(tp, false);
+
+ tg3_writephy(tp, MII_CTRL1000, phy9_orig);
+
+@@ -2522,10 +2534,10 @@ static int tg3_phy_reset(struct tg3 *tp)
+
+ out:
+ if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) &&
+- !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
++ !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
+ tg3_phydsp_write(tp, 0x201f, 0x2aaa);
+ tg3_phydsp_write(tp, 0x000a, 0x0323);
+- TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
++ tg3_phy_toggle_auxctl_smdsp(tp, false);
+ }
+
+ if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) {
+@@ -2534,14 +2546,14 @@ out:
+ }
+
+ if (tp->phy_flags & TG3_PHYFLG_BER_BUG) {
+- if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
++ if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
+ tg3_phydsp_write(tp, 0x000a, 0x310b);
+ tg3_phydsp_write(tp, 0x201f, 0x9506);
+ tg3_phydsp_write(tp, 0x401f, 0x14e2);
+- TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
++ tg3_phy_toggle_auxctl_smdsp(tp, false);
+ }
+ } else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) {
+- if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
++ if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
+ tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
+ if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) {
+ tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
+@@ -2550,7 +2562,7 @@ out:
+ } else
+ tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
+
+- TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
++ tg3_phy_toggle_auxctl_smdsp(tp, false);
+ }
+ }
+
+@@ -3967,7 +3979,7 @@ static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
+ tw32(TG3_CPMU_EEE_MODE,
+ tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE);
+
+- err = TG3_PHY_AUXCTL_SMDSP_ENABLE(tp);
++ err = tg3_phy_toggle_auxctl_smdsp(tp, true);
+ if (!err) {
+ u32 err2;
+
+@@ -4000,7 +4012,7 @@ static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
+ MII_TG3_DSP_CH34TP2_HIBW01);
+ }
+
+- err2 = TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
++ err2 = tg3_phy_toggle_auxctl_smdsp(tp, false);
+ if (!err)
+ err = err2;
+ }
+@@ -6703,6 +6715,9 @@ static void tg3_poll_controller(struct net_device *dev)
+ int i;
+ struct tg3 *tp = netdev_priv(dev);
+
++ if (tg3_irq_sync(tp))
++ return;
++
+ for (i = 0; i < tp->irq_cnt; i++)
+ tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]);
+ }
+@@ -15806,6 +15821,7 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
+ tp->pm_cap = pm_cap;
+ tp->rx_mode = TG3_DEF_RX_MODE;
+ tp->tx_mode = TG3_DEF_TX_MODE;
++ tp->irq_sync = 1;
+
+ if (tg3_debug > 0)
+ tp->msg_enable = tg3_debug;
+diff --git a/drivers/net/ethernet/calxeda/xgmac.c b/drivers/net/ethernet/calxeda/xgmac.c
+index 16814b3..e29c1b6 100644
+--- a/drivers/net/ethernet/calxeda/xgmac.c
++++ b/drivers/net/ethernet/calxeda/xgmac.c
+@@ -546,6 +546,10 @@ static int desc_get_rx_status(struct xgmac_priv *priv, struct xgmac_dma_desc *p)
+ return -1;
+ }
+
++ /* All frames should fit into a single buffer */
++ if (!(status & RXDESC_FIRST_SEG) || !(status & RXDESC_LAST_SEG))
++ return -1;
++
+ /* Check if packet has checksum already */
+ if ((status & RXDESC_FRAME_TYPE) && (status & RXDESC_EXT_STATUS) &&
+ !(ext_status & RXDESC_IP_PAYLOAD_MASK))
+diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
+index 48cc4fb..8a747b7 100644
+--- a/drivers/net/ethernet/intel/igb/igb_main.c
++++ b/drivers/net/ethernet/intel/igb/igb_main.c
+@@ -936,17 +936,18 @@ static int igb_request_msix(struct igb_adapter *adapter)
+ {
+ struct net_device *netdev = adapter->netdev;
+ struct e1000_hw *hw = &adapter->hw;
+- int i, err = 0, vector = 0;
++ int i, err = 0, vector = 0, free_vector = 0;
+
+ err = request_irq(adapter->msix_entries[vector].vector,
+ igb_msix_other, 0, netdev->name, adapter);
+ if (err)
+- goto out;
+- vector++;
++ goto err_out;
+
+ for (i = 0; i < adapter->num_q_vectors; i++) {
+ struct igb_q_vector *q_vector = adapter->q_vector[i];
+
++ vector++;
++
+ q_vector->itr_register = hw->hw_addr + E1000_EITR(vector);
+
+ if (q_vector->rx.ring && q_vector->tx.ring)
+@@ -965,13 +966,22 @@ static int igb_request_msix(struct igb_adapter *adapter)
+ igb_msix_ring, 0, q_vector->name,
+ q_vector);
+ if (err)
+- goto out;
+- vector++;
++ goto err_free;
+ }
+
+ igb_configure_msix(adapter);
+ return 0;
+-out:
++
++err_free:
++ /* free already assigned IRQs */
++ free_irq(adapter->msix_entries[free_vector++].vector, adapter);
++
++ vector--;
++ for (i = 0; i < vector; i++) {
++ free_irq(adapter->msix_entries[free_vector++].vector,
++ adapter->q_vector[i]);
++ }
++err_out:
+ return err;
+ }
+
+@@ -4661,11 +4671,13 @@ void igb_update_stats(struct igb_adapter *adapter,
+ bytes = 0;
+ packets = 0;
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+- u32 rqdpc_tmp = rd32(E1000_RQDPC(i)) & 0x0FFF;
++ u32 rqdpc = rd32(E1000_RQDPC(i));
+ struct igb_ring *ring = adapter->rx_ring[i];
+
+- ring->rx_stats.drops += rqdpc_tmp;
+- net_stats->rx_fifo_errors += rqdpc_tmp;
++ if (rqdpc) {
++ ring->rx_stats.drops += rqdpc;
++ net_stats->rx_fifo_errors += rqdpc;
++ }
+
+ do {
+ start = u64_stats_fetch_begin_bh(&ring->rx_syncp);
+diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
+index 10bba09..2af24ba 100644
+--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
++++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
+@@ -630,10 +630,15 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
+ ring->tx_csum++;
+ }
+
+- /* Copy dst mac address to wqe */
+- ethh = (struct ethhdr *)skb->data;
+- tx_desc->ctrl.srcrb_flags16[0] = get_unaligned((__be16 *)ethh->h_dest);
+- tx_desc->ctrl.imm = get_unaligned((__be32 *)(ethh->h_dest + 2));
++ if (mlx4_is_mfunc(mdev->dev) || priv->validate_loopback) {
++ /* Copy dst mac address to wqe. This allows loopback in eSwitch,
++ * so that VFs and PF can communicate with each other
++ */
++ ethh = (struct ethhdr *)skb->data;
++ tx_desc->ctrl.srcrb_flags16[0] = get_unaligned((__be16 *)ethh->h_dest);
++ tx_desc->ctrl.imm = get_unaligned((__be32 *)(ethh->h_dest + 2));
++ }
++
+ /* Handle LSO (TSO) packets */
+ if (lso_header_size) {
+ /* Mark opcode as LSO */
+diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
+index 2f816c6..8ca1ed8 100644
+--- a/drivers/net/ethernet/mellanox/mlx4/main.c
++++ b/drivers/net/ethernet/mellanox/mlx4/main.c
+@@ -1630,15 +1630,8 @@ static void mlx4_enable_msi_x(struct mlx4_dev *dev)
+ int i;
+
+ if (msi_x) {
+- /* In multifunction mode each function gets 2 msi-X vectors
+- * one for data path completions anf the other for asynch events
+- * or command completions */
+- if (mlx4_is_mfunc(dev)) {
+- nreq = 2;
+- } else {
+- nreq = min_t(int, dev->caps.num_eqs -
+- dev->caps.reserved_eqs, nreq);
+- }
++ nreq = min_t(int, dev->caps.num_eqs - dev->caps.reserved_eqs,
++ nreq);
+
+ entries = kcalloc(nreq, sizeof *entries, GFP_KERNEL);
+ if (!entries)
+diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c
+index bc165f4..695667d 100644
+--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c
++++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c
+@@ -144,7 +144,7 @@ void netxen_release_tx_buffers(struct netxen_adapter *adapter)
+ buffrag->length, PCI_DMA_TODEVICE);
+ buffrag->dma = 0ULL;
+ }
+- for (j = 0; j < cmd_buf->frag_count; j++) {
++ for (j = 1; j < cmd_buf->frag_count; j++) {
+ buffrag++;
+ if (buffrag->dma) {
+ pci_unmap_page(adapter->pdev, buffrag->dma,
+diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
+index a77c558..d6a8218 100644
+--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
++++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
+@@ -1963,10 +1963,12 @@ unwind:
+ while (--i >= 0) {
+ nf = &pbuf->frag_array[i+1];
+ pci_unmap_page(pdev, nf->dma, nf->length, PCI_DMA_TODEVICE);
++ nf->dma = 0ULL;
+ }
+
+ nf = &pbuf->frag_array[0];
+ pci_unmap_single(pdev, nf->dma, skb_headlen(skb), PCI_DMA_TODEVICE);
++ nf->dma = 0ULL;
+
+ out_err:
+ return -ENOMEM;
+diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
+index df7bbba..6c1c396 100644
+--- a/drivers/net/ethernet/realtek/r8169.c
++++ b/drivers/net/ethernet/realtek/r8169.c
+@@ -6088,13 +6088,6 @@ process_pkt:
+ tp->rx_stats.bytes += pkt_size;
+ u64_stats_update_end(&tp->rx_stats.syncp);
+ }
+-
+- /* Work around for AMD plateform. */
+- if ((desc->opts2 & cpu_to_le32(0xfffe000)) &&
+- (tp->mac_version == RTL_GIGA_MAC_VER_05)) {
+- desc->opts2 = 0;
+- cur_rx++;
+- }
+ }
+
+ count = cur_rx - tp->cur_rx;
+diff --git a/drivers/net/ethernet/via/via-rhine.c b/drivers/net/ethernet/via/via-rhine.c
+index 0459c09..046526e0 100644
+--- a/drivers/net/ethernet/via/via-rhine.c
++++ b/drivers/net/ethernet/via/via-rhine.c
+@@ -1802,7 +1802,7 @@ static void rhine_tx(struct net_device *dev)
+ rp->tx_skbuff[entry]->len,
+ PCI_DMA_TODEVICE);
+ }
+- dev_kfree_skb_irq(rp->tx_skbuff[entry]);
++ dev_kfree_skb(rp->tx_skbuff[entry]);
+ rp->tx_skbuff[entry] = NULL;
+ entry = (++rp->dirty_tx) % TX_RING_SIZE;
+ }
+@@ -2011,11 +2011,7 @@ static void rhine_slow_event_task(struct work_struct *work)
+ if (intr_status & IntrPCIErr)
+ netif_warn(rp, hw, dev, "PCI error\n");
+
+- napi_disable(&rp->napi);
+- rhine_irq_disable(rp);
+- /* Slow and safe. Consider __napi_schedule as a replacement ? */
+- napi_enable(&rp->napi);
+- napi_schedule(&rp->napi);
++ iowrite16(RHINE_EVENT & 0xffff, rp->base + IntrEnable);
+
+ out_unlock:
+ mutex_unlock(&rp->task_lock);
+diff --git a/drivers/net/loopback.c b/drivers/net/loopback.c
+index e2a06fd..ba61c33 100644
+--- a/drivers/net/loopback.c
++++ b/drivers/net/loopback.c
+@@ -77,6 +77,11 @@ static netdev_tx_t loopback_xmit(struct sk_buff *skb,
+
+ skb_orphan(skb);
+
++ /* Before queueing this packet to netif_rx(),
++ * make sure dst is refcounted.
++ */
++ skb_dst_force(skb);
++
+ skb->protocol = eth_type_trans(skb, dev);
+
+ /* it's OK to use per_cpu_ptr() because BHs are off */
+diff --git a/drivers/net/wimax/i2400m/i2400m-usb.h b/drivers/net/wimax/i2400m/i2400m-usb.h
+index 6650fde..9f1e947 100644
+--- a/drivers/net/wimax/i2400m/i2400m-usb.h
++++ b/drivers/net/wimax/i2400m/i2400m-usb.h
+@@ -152,6 +152,9 @@ enum {
+ /* Device IDs */
+ USB_DEVICE_ID_I6050 = 0x0186,
+ USB_DEVICE_ID_I6050_2 = 0x0188,
++ USB_DEVICE_ID_I6150 = 0x07d6,
++ USB_DEVICE_ID_I6150_2 = 0x07d7,
++ USB_DEVICE_ID_I6150_3 = 0x07d9,
+ USB_DEVICE_ID_I6250 = 0x0187,
+ };
+
+diff --git a/drivers/net/wimax/i2400m/usb.c b/drivers/net/wimax/i2400m/usb.c
+index 713d033..080f363 100644
+--- a/drivers/net/wimax/i2400m/usb.c
++++ b/drivers/net/wimax/i2400m/usb.c
+@@ -510,6 +510,9 @@ int i2400mu_probe(struct usb_interface *iface,
+ switch (id->idProduct) {
+ case USB_DEVICE_ID_I6050:
+ case USB_DEVICE_ID_I6050_2:
++ case USB_DEVICE_ID_I6150:
++ case USB_DEVICE_ID_I6150_2:
++ case USB_DEVICE_ID_I6150_3:
+ case USB_DEVICE_ID_I6250:
+ i2400mu->i6050 = 1;
+ break;
+@@ -759,6 +762,9 @@ static
+ struct usb_device_id i2400mu_id_table[] = {
+ { USB_DEVICE(0x8086, USB_DEVICE_ID_I6050) },
+ { USB_DEVICE(0x8086, USB_DEVICE_ID_I6050_2) },
++ { USB_DEVICE(0x8087, USB_DEVICE_ID_I6150) },
++ { USB_DEVICE(0x8087, USB_DEVICE_ID_I6150_2) },
++ { USB_DEVICE(0x8087, USB_DEVICE_ID_I6150_3) },
+ { USB_DEVICE(0x8086, USB_DEVICE_ID_I6250) },
+ { USB_DEVICE(0x8086, 0x0181) },
+ { USB_DEVICE(0x8086, 0x1403) },
+diff --git a/drivers/net/wireless/ath/ath5k/base.c b/drivers/net/wireless/ath/ath5k/base.c
+index 2aab20e..68a4046 100644
+--- a/drivers/net/wireless/ath/ath5k/base.c
++++ b/drivers/net/wireless/ath/ath5k/base.c
+@@ -848,7 +848,7 @@ ath5k_txbuf_free_skb(struct ath5k_hw *ah, struct ath5k_buf *bf)
+ return;
+ dma_unmap_single(ah->dev, bf->skbaddr, bf->skb->len,
+ DMA_TO_DEVICE);
+- dev_kfree_skb_any(bf->skb);
++ ieee80211_free_txskb(ah->hw, bf->skb);
+ bf->skb = NULL;
+ bf->skbaddr = 0;
+ bf->desc->ds_data = 0;
+@@ -1575,7 +1575,7 @@ ath5k_tx_queue(struct ieee80211_hw *hw, struct sk_buff *skb,
+ return;
+
+ drop_packet:
+- dev_kfree_skb_any(skb);
++ ieee80211_free_txskb(hw, skb);
+ }
+
+ static void
+diff --git a/drivers/net/wireless/ath/ath5k/mac80211-ops.c b/drivers/net/wireless/ath/ath5k/mac80211-ops.c
+index d56453e..b010a77 100644
+--- a/drivers/net/wireless/ath/ath5k/mac80211-ops.c
++++ b/drivers/net/wireless/ath/ath5k/mac80211-ops.c
+@@ -61,7 +61,7 @@ ath5k_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
+ u16 qnum = skb_get_queue_mapping(skb);
+
+ if (WARN_ON(qnum >= ah->ah_capabilities.cap_queues.q_tx_num)) {
+- dev_kfree_skb_any(skb);
++ ieee80211_free_txskb(hw, skb);
+ return;
+ }
+
+diff --git a/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h b/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h
+index 6f7cf49..262e1e0 100644
+--- a/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h
++++ b/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h
+@@ -534,98 +534,98 @@ static const u32 ar9300_2p2_baseband_core[][2] = {
+
+ static const u32 ar9300Modes_high_power_tx_gain_table_2p2[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+- {0x0000a2dc, 0x00033800, 0x00033800, 0x03aaa352, 0x03aaa352},
+- {0x0000a2e0, 0x0003c000, 0x0003c000, 0x03ccc584, 0x03ccc584},
+- {0x0000a2e4, 0x03fc0000, 0x03fc0000, 0x03f0f800, 0x03f0f800},
++ {0x0000a2dc, 0x0380c7fc, 0x0380c7fc, 0x03aaa352, 0x03aaa352},
++ {0x0000a2e0, 0x0000f800, 0x0000f800, 0x03ccc584, 0x03ccc584},
++ {0x0000a2e4, 0x03ff0000, 0x03ff0000, 0x03f0f800, 0x03f0f800},
+ {0x0000a2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000},
+- {0x0000a410, 0x000050d9, 0x000050d9, 0x000050d9, 0x000050d9},
+- {0x0000a500, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+- {0x0000a504, 0x06000003, 0x06000003, 0x04000002, 0x04000002},
+- {0x0000a508, 0x0a000020, 0x0a000020, 0x08000004, 0x08000004},
+- {0x0000a50c, 0x10000023, 0x10000023, 0x0b000200, 0x0b000200},
+- {0x0000a510, 0x16000220, 0x16000220, 0x0f000202, 0x0f000202},
+- {0x0000a514, 0x1c000223, 0x1c000223, 0x12000400, 0x12000400},
+- {0x0000a518, 0x21002220, 0x21002220, 0x16000402, 0x16000402},
+- {0x0000a51c, 0x27002223, 0x27002223, 0x19000404, 0x19000404},
+- {0x0000a520, 0x2b022220, 0x2b022220, 0x1c000603, 0x1c000603},
+- {0x0000a524, 0x2f022222, 0x2f022222, 0x21000a02, 0x21000a02},
+- {0x0000a528, 0x34022225, 0x34022225, 0x25000a04, 0x25000a04},
+- {0x0000a52c, 0x3a02222a, 0x3a02222a, 0x28000a20, 0x28000a20},
+- {0x0000a530, 0x3e02222c, 0x3e02222c, 0x2c000e20, 0x2c000e20},
+- {0x0000a534, 0x4202242a, 0x4202242a, 0x30000e22, 0x30000e22},
+- {0x0000a538, 0x4702244a, 0x4702244a, 0x34000e24, 0x34000e24},
+- {0x0000a53c, 0x4b02244c, 0x4b02244c, 0x38001640, 0x38001640},
+- {0x0000a540, 0x4e02246c, 0x4e02246c, 0x3c001660, 0x3c001660},
+- {0x0000a544, 0x52022470, 0x52022470, 0x3f001861, 0x3f001861},
+- {0x0000a548, 0x55022490, 0x55022490, 0x43001a81, 0x43001a81},
+- {0x0000a54c, 0x59022492, 0x59022492, 0x47001a83, 0x47001a83},
+- {0x0000a550, 0x5d022692, 0x5d022692, 0x4a001c84, 0x4a001c84},
+- {0x0000a554, 0x61022892, 0x61022892, 0x4e001ce3, 0x4e001ce3},
+- {0x0000a558, 0x65024890, 0x65024890, 0x52001ce5, 0x52001ce5},
+- {0x0000a55c, 0x69024892, 0x69024892, 0x56001ce9, 0x56001ce9},
+- {0x0000a560, 0x6e024c92, 0x6e024c92, 0x5a001ceb, 0x5a001ceb},
+- {0x0000a564, 0x74026e92, 0x74026e92, 0x5d001eec, 0x5d001eec},
+- {0x0000a568, 0x74026e92, 0x74026e92, 0x5d001eec, 0x5d001eec},
+- {0x0000a56c, 0x74026e92, 0x74026e92, 0x5d001eec, 0x5d001eec},
+- {0x0000a570, 0x74026e92, 0x74026e92, 0x5d001eec, 0x5d001eec},
+- {0x0000a574, 0x74026e92, 0x74026e92, 0x5d001eec, 0x5d001eec},
+- {0x0000a578, 0x74026e92, 0x74026e92, 0x5d001eec, 0x5d001eec},
+- {0x0000a57c, 0x74026e92, 0x74026e92, 0x5d001eec, 0x5d001eec},
+- {0x0000a580, 0x00800000, 0x00800000, 0x00800000, 0x00800000},
+- {0x0000a584, 0x06800003, 0x06800003, 0x04800002, 0x04800002},
+- {0x0000a588, 0x0a800020, 0x0a800020, 0x08800004, 0x08800004},
+- {0x0000a58c, 0x10800023, 0x10800023, 0x0b800200, 0x0b800200},
+- {0x0000a590, 0x16800220, 0x16800220, 0x0f800202, 0x0f800202},
+- {0x0000a594, 0x1c800223, 0x1c800223, 0x12800400, 0x12800400},
+- {0x0000a598, 0x21802220, 0x21802220, 0x16800402, 0x16800402},
+- {0x0000a59c, 0x27802223, 0x27802223, 0x19800404, 0x19800404},
+- {0x0000a5a0, 0x2b822220, 0x2b822220, 0x1c800603, 0x1c800603},
+- {0x0000a5a4, 0x2f822222, 0x2f822222, 0x21800a02, 0x21800a02},
+- {0x0000a5a8, 0x34822225, 0x34822225, 0x25800a04, 0x25800a04},
+- {0x0000a5ac, 0x3a82222a, 0x3a82222a, 0x28800a20, 0x28800a20},
+- {0x0000a5b0, 0x3e82222c, 0x3e82222c, 0x2c800e20, 0x2c800e20},
+- {0x0000a5b4, 0x4282242a, 0x4282242a, 0x30800e22, 0x30800e22},
+- {0x0000a5b8, 0x4782244a, 0x4782244a, 0x34800e24, 0x34800e24},
+- {0x0000a5bc, 0x4b82244c, 0x4b82244c, 0x38801640, 0x38801640},
+- {0x0000a5c0, 0x4e82246c, 0x4e82246c, 0x3c801660, 0x3c801660},
+- {0x0000a5c4, 0x52822470, 0x52822470, 0x3f801861, 0x3f801861},
+- {0x0000a5c8, 0x55822490, 0x55822490, 0x43801a81, 0x43801a81},
+- {0x0000a5cc, 0x59822492, 0x59822492, 0x47801a83, 0x47801a83},
+- {0x0000a5d0, 0x5d822692, 0x5d822692, 0x4a801c84, 0x4a801c84},
+- {0x0000a5d4, 0x61822892, 0x61822892, 0x4e801ce3, 0x4e801ce3},
+- {0x0000a5d8, 0x65824890, 0x65824890, 0x52801ce5, 0x52801ce5},
+- {0x0000a5dc, 0x69824892, 0x69824892, 0x56801ce9, 0x56801ce9},
+- {0x0000a5e0, 0x6e824c92, 0x6e824c92, 0x5a801ceb, 0x5a801ceb},
+- {0x0000a5e4, 0x74826e92, 0x74826e92, 0x5d801eec, 0x5d801eec},
+- {0x0000a5e8, 0x74826e92, 0x74826e92, 0x5d801eec, 0x5d801eec},
+- {0x0000a5ec, 0x74826e92, 0x74826e92, 0x5d801eec, 0x5d801eec},
+- {0x0000a5f0, 0x74826e92, 0x74826e92, 0x5d801eec, 0x5d801eec},
+- {0x0000a5f4, 0x74826e92, 0x74826e92, 0x5d801eec, 0x5d801eec},
+- {0x0000a5f8, 0x74826e92, 0x74826e92, 0x5d801eec, 0x5d801eec},
+- {0x0000a5fc, 0x74826e92, 0x74826e92, 0x5d801eec, 0x5d801eec},
++ {0x0000a410, 0x000050d8, 0x000050d8, 0x000050d9, 0x000050d9},
++ {0x0000a500, 0x00002220, 0x00002220, 0x00000000, 0x00000000},
++ {0x0000a504, 0x04002222, 0x04002222, 0x04000002, 0x04000002},
++ {0x0000a508, 0x09002421, 0x09002421, 0x08000004, 0x08000004},
++ {0x0000a50c, 0x0d002621, 0x0d002621, 0x0b000200, 0x0b000200},
++ {0x0000a510, 0x13004620, 0x13004620, 0x0f000202, 0x0f000202},
++ {0x0000a514, 0x19004a20, 0x19004a20, 0x11000400, 0x11000400},
++ {0x0000a518, 0x1d004e20, 0x1d004e20, 0x15000402, 0x15000402},
++ {0x0000a51c, 0x21005420, 0x21005420, 0x19000404, 0x19000404},
++ {0x0000a520, 0x26005e20, 0x26005e20, 0x1b000603, 0x1b000603},
++ {0x0000a524, 0x2b005e40, 0x2b005e40, 0x1f000a02, 0x1f000a02},
++ {0x0000a528, 0x2f005e42, 0x2f005e42, 0x23000a04, 0x23000a04},
++ {0x0000a52c, 0x33005e44, 0x33005e44, 0x26000a20, 0x26000a20},
++ {0x0000a530, 0x38005e65, 0x38005e65, 0x2a000e20, 0x2a000e20},
++ {0x0000a534, 0x3c005e69, 0x3c005e69, 0x2e000e22, 0x2e000e22},
++ {0x0000a538, 0x40005e6b, 0x40005e6b, 0x31000e24, 0x31000e24},
++ {0x0000a53c, 0x44005e6d, 0x44005e6d, 0x34001640, 0x34001640},
++ {0x0000a540, 0x49005e72, 0x49005e72, 0x38001660, 0x38001660},
++ {0x0000a544, 0x4e005eb2, 0x4e005eb2, 0x3b001861, 0x3b001861},
++ {0x0000a548, 0x53005f12, 0x53005f12, 0x3e001a81, 0x3e001a81},
++ {0x0000a54c, 0x59025eb2, 0x59025eb2, 0x42001a83, 0x42001a83},
++ {0x0000a550, 0x5e025f12, 0x5e025f12, 0x44001c84, 0x44001c84},
++ {0x0000a554, 0x61027f12, 0x61027f12, 0x48001ce3, 0x48001ce3},
++ {0x0000a558, 0x6702bf12, 0x6702bf12, 0x4c001ce5, 0x4c001ce5},
++ {0x0000a55c, 0x6b02bf14, 0x6b02bf14, 0x50001ce9, 0x50001ce9},
++ {0x0000a560, 0x6f02bf16, 0x6f02bf16, 0x54001ceb, 0x54001ceb},
++ {0x0000a564, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
++ {0x0000a568, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
++ {0x0000a56c, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
++ {0x0000a570, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
++ {0x0000a574, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
++ {0x0000a578, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
++ {0x0000a57c, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
++ {0x0000a580, 0x00802220, 0x00802220, 0x00800000, 0x00800000},
++ {0x0000a584, 0x04802222, 0x04802222, 0x04800002, 0x04800002},
++ {0x0000a588, 0x09802421, 0x09802421, 0x08800004, 0x08800004},
++ {0x0000a58c, 0x0d802621, 0x0d802621, 0x0b800200, 0x0b800200},
++ {0x0000a590, 0x13804620, 0x13804620, 0x0f800202, 0x0f800202},
++ {0x0000a594, 0x19804a20, 0x19804a20, 0x11800400, 0x11800400},
++ {0x0000a598, 0x1d804e20, 0x1d804e20, 0x15800402, 0x15800402},
++ {0x0000a59c, 0x21805420, 0x21805420, 0x19800404, 0x19800404},
++ {0x0000a5a0, 0x26805e20, 0x26805e20, 0x1b800603, 0x1b800603},
++ {0x0000a5a4, 0x2b805e40, 0x2b805e40, 0x1f800a02, 0x1f800a02},
++ {0x0000a5a8, 0x2f805e42, 0x2f805e42, 0x23800a04, 0x23800a04},
++ {0x0000a5ac, 0x33805e44, 0x33805e44, 0x26800a20, 0x26800a20},
++ {0x0000a5b0, 0x38805e65, 0x38805e65, 0x2a800e20, 0x2a800e20},
++ {0x0000a5b4, 0x3c805e69, 0x3c805e69, 0x2e800e22, 0x2e800e22},
++ {0x0000a5b8, 0x40805e6b, 0x40805e6b, 0x31800e24, 0x31800e24},
++ {0x0000a5bc, 0x44805e6d, 0x44805e6d, 0x34801640, 0x34801640},
++ {0x0000a5c0, 0x49805e72, 0x49805e72, 0x38801660, 0x38801660},
++ {0x0000a5c4, 0x4e805eb2, 0x4e805eb2, 0x3b801861, 0x3b801861},
++ {0x0000a5c8, 0x53805f12, 0x53805f12, 0x3e801a81, 0x3e801a81},
++ {0x0000a5cc, 0x59825eb2, 0x59825eb2, 0x42801a83, 0x42801a83},
++ {0x0000a5d0, 0x5e825f12, 0x5e825f12, 0x44801c84, 0x44801c84},
++ {0x0000a5d4, 0x61827f12, 0x61827f12, 0x48801ce3, 0x48801ce3},
++ {0x0000a5d8, 0x6782bf12, 0x6782bf12, 0x4c801ce5, 0x4c801ce5},
++ {0x0000a5dc, 0x6b82bf14, 0x6b82bf14, 0x50801ce9, 0x50801ce9},
++ {0x0000a5e0, 0x6f82bf16, 0x6f82bf16, 0x54801ceb, 0x54801ceb},
++ {0x0000a5e4, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
++ {0x0000a5e8, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
++ {0x0000a5ec, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
++ {0x0000a5f0, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
++ {0x0000a5f4, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
++ {0x0000a5f8, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
++ {0x0000a5fc, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
+ {0x0000a600, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a604, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a608, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a60c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+- {0x0000a610, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+- {0x0000a614, 0x02004000, 0x02004000, 0x01404000, 0x01404000},
+- {0x0000a618, 0x02004801, 0x02004801, 0x01404501, 0x01404501},
+- {0x0000a61c, 0x02808a02, 0x02808a02, 0x02008501, 0x02008501},
+- {0x0000a620, 0x0380ce03, 0x0380ce03, 0x0280ca03, 0x0280ca03},
+- {0x0000a624, 0x04411104, 0x04411104, 0x03010c04, 0x03010c04},
+- {0x0000a628, 0x04411104, 0x04411104, 0x04014c04, 0x04014c04},
+- {0x0000a62c, 0x04411104, 0x04411104, 0x04015005, 0x04015005},
+- {0x0000a630, 0x04411104, 0x04411104, 0x04015005, 0x04015005},
+- {0x0000a634, 0x04411104, 0x04411104, 0x04015005, 0x04015005},
+- {0x0000a638, 0x04411104, 0x04411104, 0x04015005, 0x04015005},
+- {0x0000a63c, 0x04411104, 0x04411104, 0x04015005, 0x04015005},
+- {0x0000b2dc, 0x00033800, 0x00033800, 0x03aaa352, 0x03aaa352},
+- {0x0000b2e0, 0x0003c000, 0x0003c000, 0x03ccc584, 0x03ccc584},
+- {0x0000b2e4, 0x03fc0000, 0x03fc0000, 0x03f0f800, 0x03f0f800},
++ {0x0000a610, 0x00804000, 0x00804000, 0x00000000, 0x00000000},
++ {0x0000a614, 0x00804201, 0x00804201, 0x01404000, 0x01404000},
++ {0x0000a618, 0x0280c802, 0x0280c802, 0x01404501, 0x01404501},
++ {0x0000a61c, 0x0280ca03, 0x0280ca03, 0x02008501, 0x02008501},
++ {0x0000a620, 0x04c15104, 0x04c15104, 0x0280ca03, 0x0280ca03},
++ {0x0000a624, 0x04c15305, 0x04c15305, 0x03010c04, 0x03010c04},
++ {0x0000a628, 0x04c15305, 0x04c15305, 0x04014c04, 0x04014c04},
++ {0x0000a62c, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
++ {0x0000a630, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
++ {0x0000a634, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
++ {0x0000a638, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
++ {0x0000a63c, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
++ {0x0000b2dc, 0x0380c7fc, 0x0380c7fc, 0x03aaa352, 0x03aaa352},
++ {0x0000b2e0, 0x0000f800, 0x0000f800, 0x03ccc584, 0x03ccc584},
++ {0x0000b2e4, 0x03ff0000, 0x03ff0000, 0x03f0f800, 0x03f0f800},
+ {0x0000b2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000},
+- {0x0000c2dc, 0x00033800, 0x00033800, 0x03aaa352, 0x03aaa352},
+- {0x0000c2e0, 0x0003c000, 0x0003c000, 0x03ccc584, 0x03ccc584},
+- {0x0000c2e4, 0x03fc0000, 0x03fc0000, 0x03f0f800, 0x03f0f800},
++ {0x0000c2dc, 0x0380c7fc, 0x0380c7fc, 0x03aaa352, 0x03aaa352},
++ {0x0000c2e0, 0x0000f800, 0x0000f800, 0x03ccc584, 0x03ccc584},
++ {0x0000c2e4, 0x03ff0000, 0x03ff0000, 0x03f0f800, 0x03f0f800},
+ {0x0000c2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000},
+ {0x00016044, 0x012492d4, 0x012492d4, 0x012492d4, 0x012492d4},
+ {0x00016048, 0x66480001, 0x66480001, 0x66480001, 0x66480001},
+diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h
+index 3a1ff55..a633aea 100644
+--- a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h
++++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h
+@@ -68,13 +68,13 @@
+ #define AR9300_BASE_ADDR 0x3ff
+ #define AR9300_BASE_ADDR_512 0x1ff
+
+-#define AR9300_OTP_BASE 0x14000
+-#define AR9300_OTP_STATUS 0x15f18
++#define AR9300_OTP_BASE (AR_SREV_9340(ah) ? 0x30000 : 0x14000)
++#define AR9300_OTP_STATUS (AR_SREV_9340(ah) ? 0x30018 : 0x15f18)
+ #define AR9300_OTP_STATUS_TYPE 0x7
+ #define AR9300_OTP_STATUS_VALID 0x4
+ #define AR9300_OTP_STATUS_ACCESS_BUSY 0x2
+ #define AR9300_OTP_STATUS_SM_BUSY 0x1
+-#define AR9300_OTP_READ_DATA 0x15f1c
++#define AR9300_OTP_READ_DATA (AR_SREV_9340(ah) ? 0x3001c : 0x15f1c)
+
+ enum targetPowerHTRates {
+ HT_TARGET_RATE_0_8_16,
+diff --git a/drivers/net/wireless/ath/ath9k/beacon.c b/drivers/net/wireless/ath/ath9k/beacon.c
+index 1b48414..4527d0d 100644
+--- a/drivers/net/wireless/ath/ath9k/beacon.c
++++ b/drivers/net/wireless/ath/ath9k/beacon.c
+@@ -147,6 +147,7 @@ static struct ath_buf *ath9k_beacon_generate(struct ieee80211_hw *hw,
+ skb->len, DMA_TO_DEVICE);
+ dev_kfree_skb_any(skb);
+ bf->bf_buf_addr = 0;
++ bf->bf_mpdu = NULL;
+ }
+
+ skb = ieee80211_beacon_get(hw, vif);
+diff --git a/drivers/net/wireless/ath/ath9k/calib.c b/drivers/net/wireless/ath/ath9k/calib.c
+index e5cceb0..bbd249d 100644
+--- a/drivers/net/wireless/ath/ath9k/calib.c
++++ b/drivers/net/wireless/ath/ath9k/calib.c
+@@ -69,6 +69,7 @@ s16 ath9k_hw_getchan_noise(struct ath_hw *ah, struct ath9k_channel *chan)
+
+ if (chan && chan->noisefloor) {
+ s8 delta = chan->noisefloor -
++ ATH9K_NF_CAL_NOISE_THRESH -
+ ath9k_hw_get_default_nf(ah, chan);
+ if (delta > 0)
+ noise += delta;
+diff --git a/drivers/net/wireless/ath/ath9k/calib.h b/drivers/net/wireless/ath/ath9k/calib.h
+index 1060c19..60dcb6c 100644
+--- a/drivers/net/wireless/ath/ath9k/calib.h
++++ b/drivers/net/wireless/ath/ath9k/calib.h
+@@ -21,6 +21,9 @@
+
+ #define AR_PHY_CCA_FILTERWINDOW_LENGTH 5
+
++/* Internal noise floor can vary by about 6db depending on the frequency */
++#define ATH9K_NF_CAL_NOISE_THRESH 6
++
+ #define NUM_NF_READINGS 6
+ #define ATH9K_NF_CAL_HIST_MAX 5
+
+diff --git a/drivers/net/wireless/ath/ath9k/htc_hst.c b/drivers/net/wireless/ath/ath9k/htc_hst.c
+index 4a9570d..aac4a40 100644
+--- a/drivers/net/wireless/ath/ath9k/htc_hst.c
++++ b/drivers/net/wireless/ath/ath9k/htc_hst.c
+@@ -344,6 +344,8 @@ void ath9k_htc_txcompletion_cb(struct htc_target *htc_handle,
+ endpoint->ep_callbacks.tx(endpoint->ep_callbacks.priv,
+ skb, htc_hdr->endpoint_id,
+ txok);
++ } else {
++ kfree_skb(skb);
+ }
+ }
+
+diff --git a/drivers/net/wireless/ath/ath9k/recv.c b/drivers/net/wireless/ath/ath9k/recv.c
+index 4480c0c..6b12d48 100644
+--- a/drivers/net/wireless/ath/ath9k/recv.c
++++ b/drivers/net/wireless/ath/ath9k/recv.c
+@@ -744,6 +744,7 @@ static struct ath_buf *ath_get_next_rx_buf(struct ath_softc *sc,
+ return NULL;
+ }
+
++ list_del(&bf->list);
+ if (!bf->bf_mpdu)
+ return bf;
+
+@@ -1251,14 +1252,15 @@ requeue_drop_frag:
+ sc->rx.frag = NULL;
+ }
+ requeue:
++ list_add_tail(&bf->list, &sc->rx.rxbuf);
++ if (flush)
++ continue;
++
+ if (edma) {
+- list_add_tail(&bf->list, &sc->rx.rxbuf);
+ ath_rx_edma_buf_link(sc, qtype);
+ } else {
+- list_move_tail(&bf->list, &sc->rx.rxbuf);
+ ath_rx_buf_link(sc, bf);
+- if (!flush)
+- ath9k_hw_rxena(ah);
++ ath9k_hw_rxena(ah);
+ }
+ } while (1);
+
+diff --git a/drivers/net/wireless/b43/b43.h b/drivers/net/wireless/b43/b43.h
+index 7c899fc..ac593ab 100644
+--- a/drivers/net/wireless/b43/b43.h
++++ b/drivers/net/wireless/b43/b43.h
+@@ -7,6 +7,7 @@
+ #include <linux/hw_random.h>
+ #include <linux/bcma/bcma.h>
+ #include <linux/ssb/ssb.h>
++#include <linux/completion.h>
+ #include <net/mac80211.h>
+
+ #include "debugfs.h"
+@@ -718,6 +719,10 @@ enum b43_firmware_file_type {
+ struct b43_request_fw_context {
+ /* The device we are requesting the fw for. */
+ struct b43_wldev *dev;
++ /* a completion event structure needed if this call is asynchronous */
++ struct completion fw_load_complete;
++ /* a pointer to the firmware object */
++ const struct firmware *blob;
+ /* The type of firmware to request. */
+ enum b43_firmware_file_type req_type;
+ /* Error messages for each firmware type. */
+diff --git a/drivers/net/wireless/b43/dma.c b/drivers/net/wireless/b43/dma.c
+index 777cd74..38bc5a7 100644
+--- a/drivers/net/wireless/b43/dma.c
++++ b/drivers/net/wireless/b43/dma.c
+@@ -409,7 +409,10 @@ static inline
+ struct b43_dmadesc_meta *meta)
+ {
+ if (meta->skb) {
+- dev_kfree_skb_any(meta->skb);
++ if (ring->tx)
++ ieee80211_free_txskb(ring->dev->wl->hw, meta->skb);
++ else
++ dev_kfree_skb_any(meta->skb);
+ meta->skb = NULL;
+ }
+ }
+@@ -1454,7 +1457,7 @@ int b43_dma_tx(struct b43_wldev *dev, struct sk_buff *skb)
+ if (unlikely(err == -ENOKEY)) {
+ /* Drop this packet, as we don't have the encryption key
+ * anymore and must not transmit it unencrypted. */
+- dev_kfree_skb_any(skb);
++ ieee80211_free_txskb(dev->wl->hw, skb);
+ err = 0;
+ goto out;
+ }
+diff --git a/drivers/net/wireless/b43/dma.h b/drivers/net/wireless/b43/dma.h
+index 315b96e..9fdd198 100644
+--- a/drivers/net/wireless/b43/dma.h
++++ b/drivers/net/wireless/b43/dma.h
+@@ -169,7 +169,7 @@ struct b43_dmadesc_generic {
+
+ /* DMA engine tuning knobs */
+ #define B43_TXRING_SLOTS 256
+-#define B43_RXRING_SLOTS 64
++#define B43_RXRING_SLOTS 256
+ #define B43_DMA0_RX_FW598_BUFSIZE (B43_DMA0_RX_FW598_FO + IEEE80211_MAX_FRAME_LEN)
+ #define B43_DMA0_RX_FW351_BUFSIZE (B43_DMA0_RX_FW351_FO + IEEE80211_MAX_FRAME_LEN)
+
+diff --git a/drivers/net/wireless/b43/main.c b/drivers/net/wireless/b43/main.c
+index 46d9d4e..b70bc2d 100644
+--- a/drivers/net/wireless/b43/main.c
++++ b/drivers/net/wireless/b43/main.c
+@@ -2088,11 +2088,18 @@ static void b43_print_fw_helptext(struct b43_wl *wl, bool error)
+ b43warn(wl, text);
+ }
+
++static void b43_fw_cb(const struct firmware *firmware, void *context)
++{
++ struct b43_request_fw_context *ctx = context;
++
++ ctx->blob = firmware;
++ complete(&ctx->fw_load_complete);
++}
++
+ int b43_do_request_fw(struct b43_request_fw_context *ctx,
+ const char *name,
+- struct b43_firmware_file *fw)
++ struct b43_firmware_file *fw, bool async)
+ {
+- const struct firmware *blob;
+ struct b43_fw_header *hdr;
+ u32 size;
+ int err;
+@@ -2131,11 +2138,31 @@ int b43_do_request_fw(struct b43_request_fw_context *ctx,
+ B43_WARN_ON(1);
+ return -ENOSYS;
+ }
+- err = request_firmware(&blob, ctx->fwname, ctx->dev->dev->dev);
++ if (async) {
++ /* do this part asynchronously */
++ init_completion(&ctx->fw_load_complete);
++ err = request_firmware_nowait(THIS_MODULE, 1, ctx->fwname,
++ ctx->dev->dev->dev, GFP_KERNEL,
++ ctx, b43_fw_cb);
++ if (err < 0) {
++ pr_err("Unable to load firmware\n");
++ return err;
++ }
++ /* stall here until fw ready */
++ wait_for_completion(&ctx->fw_load_complete);
++ if (ctx->blob)
++ goto fw_ready;
++ /* On some ARM systems, the async request will fail, but the next sync
++ * request works. For this reason, we dall through here
++ */
++ }
++ err = request_firmware(&ctx->blob, ctx->fwname,
++ ctx->dev->dev->dev);
+ if (err == -ENOENT) {
+ snprintf(ctx->errors[ctx->req_type],
+ sizeof(ctx->errors[ctx->req_type]),
+- "Firmware file \"%s\" not found\n", ctx->fwname);
++ "Firmware file \"%s\" not found\n",
++ ctx->fwname);
+ return err;
+ } else if (err) {
+ snprintf(ctx->errors[ctx->req_type],
+@@ -2144,14 +2171,15 @@ int b43_do_request_fw(struct b43_request_fw_context *ctx,
+ ctx->fwname, err);
+ return err;
+ }
+- if (blob->size < sizeof(struct b43_fw_header))
++fw_ready:
++ if (ctx->blob->size < sizeof(struct b43_fw_header))
+ goto err_format;
+- hdr = (struct b43_fw_header *)(blob->data);
++ hdr = (struct b43_fw_header *)(ctx->blob->data);
+ switch (hdr->type) {
+ case B43_FW_TYPE_UCODE:
+ case B43_FW_TYPE_PCM:
+ size = be32_to_cpu(hdr->size);
+- if (size != blob->size - sizeof(struct b43_fw_header))
++ if (size != ctx->blob->size - sizeof(struct b43_fw_header))
+ goto err_format;
+ /* fallthrough */
+ case B43_FW_TYPE_IV:
+@@ -2162,7 +2190,7 @@ int b43_do_request_fw(struct b43_request_fw_context *ctx,
+ goto err_format;
+ }
+
+- fw->data = blob;
++ fw->data = ctx->blob;
+ fw->filename = name;
+ fw->type = ctx->req_type;
+
+@@ -2172,7 +2200,7 @@ err_format:
+ snprintf(ctx->errors[ctx->req_type],
+ sizeof(ctx->errors[ctx->req_type]),
+ "Firmware file \"%s\" format error.\n", ctx->fwname);
+- release_firmware(blob);
++ release_firmware(ctx->blob);
+
+ return -EPROTO;
+ }
+@@ -2223,7 +2251,7 @@ static int b43_try_request_fw(struct b43_request_fw_context *ctx)
+ goto err_no_ucode;
+ }
+ }
+- err = b43_do_request_fw(ctx, filename, &fw->ucode);
++ err = b43_do_request_fw(ctx, filename, &fw->ucode, true);
+ if (err)
+ goto err_load;
+
+@@ -2235,7 +2263,7 @@ static int b43_try_request_fw(struct b43_request_fw_context *ctx)
+ else
+ goto err_no_pcm;
+ fw->pcm_request_failed = false;
+- err = b43_do_request_fw(ctx, filename, &fw->pcm);
++ err = b43_do_request_fw(ctx, filename, &fw->pcm, false);
+ if (err == -ENOENT) {
+ /* We did not find a PCM file? Not fatal, but
+ * core rev <= 10 must do without hwcrypto then. */
+@@ -2296,7 +2324,7 @@ static int b43_try_request_fw(struct b43_request_fw_context *ctx)
+ default:
+ goto err_no_initvals;
+ }
+- err = b43_do_request_fw(ctx, filename, &fw->initvals);
++ err = b43_do_request_fw(ctx, filename, &fw->initvals, false);
+ if (err)
+ goto err_load;
+
+@@ -2355,7 +2383,7 @@ static int b43_try_request_fw(struct b43_request_fw_context *ctx)
+ default:
+ goto err_no_initvals;
+ }
+- err = b43_do_request_fw(ctx, filename, &fw->initvals_band);
++ err = b43_do_request_fw(ctx, filename, &fw->initvals_band, false);
+ if (err)
+ goto err_load;
+
+@@ -3397,7 +3425,7 @@ static void b43_tx_work(struct work_struct *work)
+ break;
+ }
+ if (unlikely(err))
+- dev_kfree_skb(skb); /* Drop it */
++ ieee80211_free_txskb(wl->hw, skb);
+ err = 0;
+ }
+
+@@ -3418,7 +3446,7 @@ static void b43_op_tx(struct ieee80211_hw *hw,
+
+ if (unlikely(skb->len < 2 + 2 + 6)) {
+ /* Too short, this can't be a valid frame. */
+- dev_kfree_skb_any(skb);
++ ieee80211_free_txskb(hw, skb);
+ return;
+ }
+ B43_WARN_ON(skb_shinfo(skb)->nr_frags);
+@@ -4228,8 +4256,12 @@ redo:
+
+ /* Drain all TX queues. */
+ for (queue_num = 0; queue_num < B43_QOS_QUEUE_NUM; queue_num++) {
+- while (skb_queue_len(&wl->tx_queue[queue_num]))
+- dev_kfree_skb(skb_dequeue(&wl->tx_queue[queue_num]));
++ while (skb_queue_len(&wl->tx_queue[queue_num])) {
++ struct sk_buff *skb;
++
++ skb = skb_dequeue(&wl->tx_queue[queue_num]);
++ ieee80211_free_txskb(wl->hw, skb);
++ }
+ }
+
+ b43_mac_suspend(dev);
+diff --git a/drivers/net/wireless/b43/main.h b/drivers/net/wireless/b43/main.h
+index 8c684cd..abac25e 100644
+--- a/drivers/net/wireless/b43/main.h
++++ b/drivers/net/wireless/b43/main.h
+@@ -137,9 +137,8 @@ void b43_mac_phy_clock_set(struct b43_wldev *dev, bool on);
+
+
+ struct b43_request_fw_context;
+-int b43_do_request_fw(struct b43_request_fw_context *ctx,
+- const char *name,
+- struct b43_firmware_file *fw);
++int b43_do_request_fw(struct b43_request_fw_context *ctx, const char *name,
++ struct b43_firmware_file *fw, bool async);
+ void b43_do_release_fw(struct b43_firmware_file *fw);
+
+ #endif /* B43_MAIN_H_ */
+diff --git a/drivers/net/wireless/b43/pio.c b/drivers/net/wireless/b43/pio.c
+index 3533ab8..a73ff8c 100644
+--- a/drivers/net/wireless/b43/pio.c
++++ b/drivers/net/wireless/b43/pio.c
+@@ -196,7 +196,7 @@ static void b43_pio_cancel_tx_packets(struct b43_pio_txqueue *q)
+ for (i = 0; i < ARRAY_SIZE(q->packets); i++) {
+ pack = &(q->packets[i]);
+ if (pack->skb) {
+- dev_kfree_skb_any(pack->skb);
++ ieee80211_free_txskb(q->dev->wl->hw, pack->skb);
+ pack->skb = NULL;
+ }
+ }
+@@ -552,7 +552,7 @@ int b43_pio_tx(struct b43_wldev *dev, struct sk_buff *skb)
+ if (unlikely(err == -ENOKEY)) {
+ /* Drop this packet, as we don't have the encryption key
+ * anymore and must not transmit it unencrypted. */
+- dev_kfree_skb_any(skb);
++ ieee80211_free_txskb(dev->wl->hw, skb);
+ err = 0;
+ goto out;
+ }
+diff --git a/drivers/net/wireless/b43legacy/b43legacy.h b/drivers/net/wireless/b43legacy/b43legacy.h
+index a29da67..482476f 100644
+--- a/drivers/net/wireless/b43legacy/b43legacy.h
++++ b/drivers/net/wireless/b43legacy/b43legacy.h
+@@ -13,6 +13,7 @@
+
+ #include <linux/ssb/ssb.h>
+ #include <linux/ssb/ssb_driver_chipcommon.h>
++#include <linux/completion.h>
+
+ #include <net/mac80211.h>
+
+@@ -733,6 +734,10 @@ struct b43legacy_wldev {
+
+ /* Firmware data */
+ struct b43legacy_firmware fw;
++ const struct firmware *fwp; /* needed to pass fw pointer */
++
++ /* completion struct for firmware loading */
++ struct completion fw_load_complete;
+
+ /* Devicelist in struct b43legacy_wl (all 802.11 cores) */
+ struct list_head list;
+diff --git a/drivers/net/wireless/b43legacy/main.c b/drivers/net/wireless/b43legacy/main.c
+index 0ef08e0..aa87fb7 100644
+--- a/drivers/net/wireless/b43legacy/main.c
++++ b/drivers/net/wireless/b43legacy/main.c
+@@ -1513,9 +1513,17 @@ static void b43legacy_print_fw_helptext(struct b43legacy_wl *wl)
+ "and download the correct firmware (version 3).\n");
+ }
+
++static void b43legacy_fw_cb(const struct firmware *firmware, void *context)
++{
++ struct b43legacy_wldev *dev = context;
++
++ dev->fwp = firmware;
++ complete(&dev->fw_load_complete);
++}
++
+ static int do_request_fw(struct b43legacy_wldev *dev,
+ const char *name,
+- const struct firmware **fw)
++ const struct firmware **fw, bool async)
+ {
+ char path[sizeof(modparam_fwpostfix) + 32];
+ struct b43legacy_fw_header *hdr;
+@@ -1528,7 +1536,24 @@ static int do_request_fw(struct b43legacy_wldev *dev,
+ snprintf(path, ARRAY_SIZE(path),
+ "b43legacy%s/%s.fw",
+ modparam_fwpostfix, name);
+- err = request_firmware(fw, path, dev->dev->dev);
++ b43legacyinfo(dev->wl, "Loading firmware %s\n", path);
++ if (async) {
++ init_completion(&dev->fw_load_complete);
++ err = request_firmware_nowait(THIS_MODULE, 1, path,
++ dev->dev->dev, GFP_KERNEL,
++ dev, b43legacy_fw_cb);
++ if (err) {
++ b43legacyerr(dev->wl, "Unable to load firmware\n");
++ return err;
++ }
++ /* stall here until fw ready */
++ wait_for_completion(&dev->fw_load_complete);
++ if (!dev->fwp)
++ err = -EINVAL;
++ *fw = dev->fwp;
++ } else {
++ err = request_firmware(fw, path, dev->dev->dev);
++ }
+ if (err) {
+ b43legacyerr(dev->wl, "Firmware file \"%s\" not found "
+ "or load failed.\n", path);
+@@ -1580,7 +1605,7 @@ static void b43legacy_request_firmware(struct work_struct *work)
+ filename = "ucode4";
+ else
+ filename = "ucode5";
+- err = do_request_fw(dev, filename, &fw->ucode);
++ err = do_request_fw(dev, filename, &fw->ucode, true);
+ if (err)
+ goto err_load;
+ }
+@@ -1589,7 +1614,7 @@ static void b43legacy_request_firmware(struct work_struct *work)
+ filename = "pcm4";
+ else
+ filename = "pcm5";
+- err = do_request_fw(dev, filename, &fw->pcm);
++ err = do_request_fw(dev, filename, &fw->pcm, false);
+ if (err)
+ goto err_load;
+ }
+@@ -1607,7 +1632,7 @@ static void b43legacy_request_firmware(struct work_struct *work)
+ default:
+ goto err_no_initvals;
+ }
+- err = do_request_fw(dev, filename, &fw->initvals);
++ err = do_request_fw(dev, filename, &fw->initvals, false);
+ if (err)
+ goto err_load;
+ }
+@@ -1627,7 +1652,7 @@ static void b43legacy_request_firmware(struct work_struct *work)
+ default:
+ goto err_no_initvals;
+ }
+- err = do_request_fw(dev, filename, &fw->initvals_band);
++ err = do_request_fw(dev, filename, &fw->initvals_band, false);
+ if (err)
+ goto err_load;
+ }
+diff --git a/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c b/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c
+index a5edebe..c110674 100644
+--- a/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c
++++ b/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c
+@@ -1394,9 +1394,10 @@ void brcms_add_timer(struct brcms_timer *t, uint ms, int periodic)
+ #endif
+ t->ms = ms;
+ t->periodic = (bool) periodic;
+- t->set = true;
+-
+- atomic_inc(&t->wl->callbacks);
++ if (!t->set) {
++ t->set = true;
++ atomic_inc(&t->wl->callbacks);
++ }
+
+ ieee80211_queue_delayed_work(hw, &t->dly_wrk, msecs_to_jiffies(ms));
+ }
+diff --git a/drivers/net/wireless/iwlegacy/common.c b/drivers/net/wireless/iwlegacy/common.c
+index 0370403..27eccd9 100644
+--- a/drivers/net/wireless/iwlegacy/common.c
++++ b/drivers/net/wireless/iwlegacy/common.c
+@@ -3957,17 +3957,21 @@ il_connection_init_rx_config(struct il_priv *il)
+
+ memset(&il->staging, 0, sizeof(il->staging));
+
+- if (!il->vif) {
++ switch (il->iw_mode) {
++ case NL80211_IFTYPE_UNSPECIFIED:
+ il->staging.dev_type = RXON_DEV_TYPE_ESS;
+- } else if (il->vif->type == NL80211_IFTYPE_STATION) {
++ break;
++ case NL80211_IFTYPE_STATION:
+ il->staging.dev_type = RXON_DEV_TYPE_ESS;
+ il->staging.filter_flags = RXON_FILTER_ACCEPT_GRP_MSK;
+- } else if (il->vif->type == NL80211_IFTYPE_ADHOC) {
++ break;
++ case NL80211_IFTYPE_ADHOC:
+ il->staging.dev_type = RXON_DEV_TYPE_IBSS;
+ il->staging.flags = RXON_FLG_SHORT_PREAMBLE_MSK;
+ il->staging.filter_flags =
+ RXON_FILTER_BCON_AWARE_MSK | RXON_FILTER_ACCEPT_GRP_MSK;
+- } else {
++ break;
++ default:
+ IL_ERR("Unsupported interface type %d\n", il->vif->type);
+ return;
+ }
+@@ -4550,8 +4554,7 @@ out:
+ EXPORT_SYMBOL(il_mac_add_interface);
+
+ static void
+-il_teardown_interface(struct il_priv *il, struct ieee80211_vif *vif,
+- bool mode_change)
++il_teardown_interface(struct il_priv *il, struct ieee80211_vif *vif)
+ {
+ lockdep_assert_held(&il->mutex);
+
+@@ -4560,9 +4563,7 @@ il_teardown_interface(struct il_priv *il, struct ieee80211_vif *vif,
+ il_force_scan_end(il);
+ }
+
+- if (!mode_change)
+- il_set_mode(il);
+-
++ il_set_mode(il);
+ }
+
+ void
+@@ -4575,8 +4576,8 @@ il_mac_remove_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
+
+ WARN_ON(il->vif != vif);
+ il->vif = NULL;
+-
+- il_teardown_interface(il, vif, false);
++ il->iw_mode = NL80211_IFTYPE_UNSPECIFIED;
++ il_teardown_interface(il, vif);
+ memset(il->bssid, 0, ETH_ALEN);
+
+ D_MAC80211("leave\n");
+@@ -4685,18 +4686,10 @@ il_mac_change_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ }
+
+ /* success */
+- il_teardown_interface(il, vif, true);
+ vif->type = newtype;
+ vif->p2p = false;
+- err = il_set_mode(il);
+- WARN_ON(err);
+- /*
+- * We've switched internally, but submitting to the
+- * device may have failed for some reason. Mask this
+- * error, because otherwise mac80211 will not switch
+- * (and set the interface type back) and we'll be
+- * out of sync with it.
+- */
++ il->iw_mode = newtype;
++ il_teardown_interface(il, vif);
+ err = 0;
+
+ out:
+diff --git a/drivers/net/wireless/mwifiex/pcie.c b/drivers/net/wireless/mwifiex/pcie.c
+index 13fbc4e..b879e13 100644
+--- a/drivers/net/wireless/mwifiex/pcie.c
++++ b/drivers/net/wireless/mwifiex/pcie.c
+@@ -161,7 +161,7 @@ static int mwifiex_pcie_suspend(struct pci_dev *pdev, pm_message_t state)
+
+ if (pdev) {
+ card = (struct pcie_service_card *) pci_get_drvdata(pdev);
+- if (!card || card->adapter) {
++ if (!card || !card->adapter) {
+ pr_err("Card or adapter structure is not valid\n");
+ return 0;
+ }
+diff --git a/drivers/net/wireless/mwifiex/sta_ioctl.c b/drivers/net/wireless/mwifiex/sta_ioctl.c
+index fb21360..8951285 100644
+--- a/drivers/net/wireless/mwifiex/sta_ioctl.c
++++ b/drivers/net/wireless/mwifiex/sta_ioctl.c
+@@ -53,7 +53,6 @@ int mwifiex_copy_mcast_addr(struct mwifiex_multicast_list *mlist,
+ */
+ int mwifiex_wait_queue_complete(struct mwifiex_adapter *adapter)
+ {
+- bool cancel_flag = false;
+ int status;
+ struct cmd_ctrl_node *cmd_queued;
+
+@@ -67,14 +66,11 @@ int mwifiex_wait_queue_complete(struct mwifiex_adapter *adapter)
+ atomic_inc(&adapter->cmd_pending);
+
+ /* Wait for completion */
+- wait_event_interruptible(adapter->cmd_wait_q.wait,
+- *(cmd_queued->condition));
+- if (!*(cmd_queued->condition))
+- cancel_flag = true;
+-
+- if (cancel_flag) {
+- mwifiex_cancel_pending_ioctl(adapter);
+- dev_dbg(adapter->dev, "cmd cancel\n");
++ status = wait_event_interruptible(adapter->cmd_wait_q.wait,
++ *(cmd_queued->condition));
++ if (status) {
++ dev_err(adapter->dev, "cmd_wait_q terminated: %d\n", status);
++ return status;
+ }
+
+ status = adapter->cmd_wait_q.status;
+@@ -427,8 +423,11 @@ int mwifiex_enable_hs(struct mwifiex_adapter *adapter)
+ return false;
+ }
+
+- wait_event_interruptible(adapter->hs_activate_wait_q,
+- adapter->hs_activate_wait_q_woken);
++ if (wait_event_interruptible(adapter->hs_activate_wait_q,
++ adapter->hs_activate_wait_q_woken)) {
++ dev_err(adapter->dev, "hs_activate_wait_q terminated\n");
++ return false;
++ }
+
+ return true;
+ }
+diff --git a/drivers/net/wireless/p54/p54usb.c b/drivers/net/wireless/p54/p54usb.c
+index effb044..3d4dd4d 100644
+--- a/drivers/net/wireless/p54/p54usb.c
++++ b/drivers/net/wireless/p54/p54usb.c
+@@ -47,6 +47,7 @@ static struct usb_device_id p54u_table[] = {
+ {USB_DEVICE(0x0411, 0x0050)}, /* Buffalo WLI2-USB2-G54 */
+ {USB_DEVICE(0x045e, 0x00c2)}, /* Microsoft MN-710 */
+ {USB_DEVICE(0x0506, 0x0a11)}, /* 3COM 3CRWE254G72 */
++ {USB_DEVICE(0x0675, 0x0530)}, /* DrayTek Vigor 530 */
+ {USB_DEVICE(0x06b9, 0x0120)}, /* Thomson SpeedTouch 120g */
+ {USB_DEVICE(0x0707, 0xee06)}, /* SMC 2862W-G */
+ {USB_DEVICE(0x07aa, 0x001c)}, /* Corega CG-WLUSB2GT */
+@@ -82,7 +83,9 @@ static struct usb_device_id p54u_table[] = {
+ {USB_DEVICE(0x06a9, 0x000e)}, /* Westell 802.11g USB (A90-211WG-01) */
+ {USB_DEVICE(0x06b9, 0x0121)}, /* Thomson SpeedTouch 121g */
+ {USB_DEVICE(0x0707, 0xee13)}, /* SMC 2862W-G version 2 */
++ {USB_DEVICE(0x0803, 0x4310)}, /* Zoom 4410a */
+ {USB_DEVICE(0x083a, 0x4521)}, /* Siemens Gigaset USB Adapter 54 version 2 */
++ {USB_DEVICE(0x083a, 0x4531)}, /* T-Com Sinus 154 data II */
+ {USB_DEVICE(0x083a, 0xc501)}, /* Zoom Wireless-G 4410 */
+ {USB_DEVICE(0x083a, 0xf503)}, /* Accton FD7050E ver 1010ec */
+ {USB_DEVICE(0x0846, 0x4240)}, /* Netgear WG111 (v2) */
+@@ -101,6 +104,7 @@ static struct usb_device_id p54u_table[] = {
+ {USB_DEVICE(0x13B1, 0x000C)}, /* Linksys WUSB54AG */
+ {USB_DEVICE(0x1413, 0x5400)}, /* Telsey 802.11g USB2.0 Adapter */
+ {USB_DEVICE(0x1435, 0x0427)}, /* Inventel UR054G */
++ /* {USB_DEVICE(0x15a9, 0x0002)}, * Also SparkLAN WL-682 with 3887 */
+ {USB_DEVICE(0x1668, 0x1050)}, /* Actiontec 802UIG-1 */
+ {USB_DEVICE(0x1740, 0x1000)}, /* Senao NUB-350 */
+ {USB_DEVICE(0x2001, 0x3704)}, /* DLink DWL-G122 rev A2 */
+diff --git a/drivers/net/wireless/rtlwifi/base.c b/drivers/net/wireless/rtlwifi/base.c
+index 942e56b..a37df3a 100644
+--- a/drivers/net/wireless/rtlwifi/base.c
++++ b/drivers/net/wireless/rtlwifi/base.c
+@@ -980,7 +980,8 @@ u8 rtl_is_special_data(struct ieee80211_hw *hw, struct sk_buff *skb, u8 is_tx)
+ is_tx ? "Tx" : "Rx");
+
+ if (is_tx) {
+- rtl_lps_leave(hw);
++ schedule_work(&rtlpriv->
++ works.lps_leave_work);
+ ppsc->last_delaylps_stamp_jiffies =
+ jiffies;
+ }
+@@ -990,7 +991,7 @@ u8 rtl_is_special_data(struct ieee80211_hw *hw, struct sk_buff *skb, u8 is_tx)
+ }
+ } else if (ETH_P_ARP == ether_type) {
+ if (is_tx) {
+- rtl_lps_leave(hw);
++ schedule_work(&rtlpriv->works.lps_leave_work);
+ ppsc->last_delaylps_stamp_jiffies = jiffies;
+ }
+
+@@ -1000,7 +1001,7 @@ u8 rtl_is_special_data(struct ieee80211_hw *hw, struct sk_buff *skb, u8 is_tx)
+ "802.1X %s EAPOL pkt!!\n", is_tx ? "Tx" : "Rx");
+
+ if (is_tx) {
+- rtl_lps_leave(hw);
++ schedule_work(&rtlpriv->works.lps_leave_work);
+ ppsc->last_delaylps_stamp_jiffies = jiffies;
+ }
+
+diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
+index b7e6607..6395412 100644
+--- a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
++++ b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
+@@ -285,6 +285,7 @@ static struct usb_device_id rtl8192c_usb_ids[] = {
+ {RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x817f, rtl92cu_hal_cfg)},
+ /* RTL8188CUS-VL */
+ {RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x818a, rtl92cu_hal_cfg)},
++ {RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x819a, rtl92cu_hal_cfg)},
+ /* 8188 Combo for BC4 */
+ {RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x8754, rtl92cu_hal_cfg)},
+
+diff --git a/drivers/net/wireless/rtlwifi/usb.c b/drivers/net/wireless/rtlwifi/usb.c
+index aa970fc..6ce8484 100644
+--- a/drivers/net/wireless/rtlwifi/usb.c
++++ b/drivers/net/wireless/rtlwifi/usb.c
+@@ -42,8 +42,12 @@
+
+ static void usbctrl_async_callback(struct urb *urb)
+ {
+- if (urb)
+- kfree(urb->context);
++ if (urb) {
++ /* free dr */
++ kfree(urb->setup_packet);
++ /* free databuf */
++ kfree(urb->transfer_buffer);
++ }
+ }
+
+ static int _usbctrl_vendorreq_async_write(struct usb_device *udev, u8 request,
+@@ -55,39 +59,47 @@ static int _usbctrl_vendorreq_async_write(struct usb_device *udev, u8 request,
+ u8 reqtype;
+ struct usb_ctrlrequest *dr;
+ struct urb *urb;
+- struct rtl819x_async_write_data {
+- u8 data[REALTEK_USB_VENQT_MAX_BUF_SIZE];
+- struct usb_ctrlrequest dr;
+- } *buf;
++ const u16 databuf_maxlen = REALTEK_USB_VENQT_MAX_BUF_SIZE;
++ u8 *databuf;
++
++ if (WARN_ON_ONCE(len > databuf_maxlen))
++ len = databuf_maxlen;
+
+ pipe = usb_sndctrlpipe(udev, 0); /* write_out */
+ reqtype = REALTEK_USB_VENQT_WRITE;
+
+- buf = kmalloc(sizeof(*buf), GFP_ATOMIC);
+- if (!buf)
++ dr = kmalloc(sizeof(*dr), GFP_ATOMIC);
++ if (!dr)
++ return -ENOMEM;
++
++ databuf = kmalloc(databuf_maxlen, GFP_ATOMIC);
++ if (!databuf) {
++ kfree(dr);
+ return -ENOMEM;
++ }
+
+ urb = usb_alloc_urb(0, GFP_ATOMIC);
+ if (!urb) {
+- kfree(buf);
++ kfree(databuf);
++ kfree(dr);
+ return -ENOMEM;
+ }
+
+- dr = &buf->dr;
+-
+ dr->bRequestType = reqtype;
+ dr->bRequest = request;
+ dr->wValue = cpu_to_le16(value);
+ dr->wIndex = cpu_to_le16(index);
+ dr->wLength = cpu_to_le16(len);
+ /* data are already in little-endian order */
+- memcpy(buf, pdata, len);
++ memcpy(databuf, pdata, len);
+ usb_fill_control_urb(urb, udev, pipe,
+- (unsigned char *)dr, buf, len,
+- usbctrl_async_callback, buf);
++ (unsigned char *)dr, databuf, len,
++ usbctrl_async_callback, NULL);
+ rc = usb_submit_urb(urb, GFP_ATOMIC);
+- if (rc < 0)
+- kfree(buf);
++ if (rc < 0) {
++ kfree(databuf);
++ kfree(dr);
++ }
+ usb_free_urb(urb);
+ return rc;
+ }
+@@ -210,17 +222,16 @@ static void _usb_writeN_sync(struct rtl_priv *rtlpriv, u32 addr, void *data,
+ u16 index = REALTEK_USB_VENQT_CMD_IDX;
+ int pipe = usb_sndctrlpipe(udev, 0); /* write_out */
+ u8 *buffer;
+- dma_addr_t dma_addr;
+
+- wvalue = (u16)(addr&0x0000ffff);
+- buffer = usb_alloc_coherent(udev, (size_t)len, GFP_ATOMIC, &dma_addr);
++ wvalue = (u16)(addr & 0x0000ffff);
++ buffer = kmalloc(len, GFP_ATOMIC);
+ if (!buffer)
+ return;
+ memcpy(buffer, data, len);
+ usb_control_msg(udev, pipe, request, reqtype, wvalue,
+ index, buffer, len, 50);
+
+- usb_free_coherent(udev, (size_t)len, buffer, dma_addr);
++ kfree(buffer);
+ }
+
+ static void _rtl_usb_io_handler_init(struct device *dev,
+@@ -543,8 +554,8 @@ static void _rtl_rx_pre_process(struct ieee80211_hw *hw, struct sk_buff *skb)
+ WARN_ON(skb_queue_empty(&rx_queue));
+ while (!skb_queue_empty(&rx_queue)) {
+ _skb = skb_dequeue(&rx_queue);
+- _rtl_usb_rx_process_agg(hw, skb);
+- ieee80211_rx_irqsafe(hw, skb);
++ _rtl_usb_rx_process_agg(hw, _skb);
++ ieee80211_rx_irqsafe(hw, _skb);
+ }
+ }
+
+diff --git a/drivers/net/xen-netback/common.h b/drivers/net/xen-netback/common.h
+index 94b79c3..9d7f172 100644
+--- a/drivers/net/xen-netback/common.h
++++ b/drivers/net/xen-netback/common.h
+@@ -151,6 +151,9 @@ void xen_netbk_queue_tx_skb(struct xenvif *vif, struct sk_buff *skb);
+ /* Notify xenvif that ring now has space to send an skb to the frontend */
+ void xenvif_notify_tx_completion(struct xenvif *vif);
+
++/* Prevent the device from generating any further traffic. */
++void xenvif_carrier_off(struct xenvif *vif);
++
+ /* Returns number of ring slots required to send an skb to the frontend */
+ unsigned int xen_netbk_count_skb_slots(struct xenvif *vif, struct sk_buff *skb);
+
+diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c
+index b7d41f8..221f426 100644
+--- a/drivers/net/xen-netback/interface.c
++++ b/drivers/net/xen-netback/interface.c
+@@ -132,6 +132,7 @@ static void xenvif_up(struct xenvif *vif)
+ static void xenvif_down(struct xenvif *vif)
+ {
+ disable_irq(vif->irq);
++ del_timer_sync(&vif->credit_timeout);
+ xen_netbk_deschedule_xenvif(vif);
+ xen_netbk_remove_xenvif(vif);
+ }
+@@ -343,23 +344,26 @@ err:
+ return err;
+ }
+
+-void xenvif_disconnect(struct xenvif *vif)
++void xenvif_carrier_off(struct xenvif *vif)
+ {
+ struct net_device *dev = vif->dev;
+- if (netif_carrier_ok(dev)) {
+- rtnl_lock();
+- netif_carrier_off(dev); /* discard queued packets */
+- if (netif_running(dev))
+- xenvif_down(vif);
+- rtnl_unlock();
+- xenvif_put(vif);
+- }
++
++ rtnl_lock();
++ netif_carrier_off(dev); /* discard queued packets */
++ if (netif_running(dev))
++ xenvif_down(vif);
++ rtnl_unlock();
++ xenvif_put(vif);
++}
++
++void xenvif_disconnect(struct xenvif *vif)
++{
++ if (netif_carrier_ok(vif->dev))
++ xenvif_carrier_off(vif);
+
+ atomic_dec(&vif->refcnt);
+ wait_event(vif->waiting_to_free, atomic_read(&vif->refcnt) == 0);
+
+- del_timer_sync(&vif->credit_timeout);
+-
+ if (vif->irq)
+ unbind_from_irqhandler(vif->irq, vif);
+
+diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
+index 682633b..6aa059e 100644
+--- a/drivers/net/xen-netback/netback.c
++++ b/drivers/net/xen-netback/netback.c
+@@ -146,7 +146,8 @@ void xen_netbk_remove_xenvif(struct xenvif *vif)
+ atomic_dec(&netbk->netfront_count);
+ }
+
+-static void xen_netbk_idx_release(struct xen_netbk *netbk, u16 pending_idx);
++static void xen_netbk_idx_release(struct xen_netbk *netbk, u16 pending_idx,
++ u8 status);
+ static void make_tx_response(struct xenvif *vif,
+ struct xen_netif_tx_request *txp,
+ s8 st);
+@@ -850,7 +851,7 @@ static void netbk_tx_err(struct xenvif *vif,
+
+ do {
+ make_tx_response(vif, txp, XEN_NETIF_RSP_ERROR);
+- if (cons >= end)
++ if (cons == end)
+ break;
+ txp = RING_GET_REQUEST(&vif->tx, cons++);
+ } while (1);
+@@ -859,6 +860,13 @@ static void netbk_tx_err(struct xenvif *vif,
+ xenvif_put(vif);
+ }
+
++static void netbk_fatal_tx_err(struct xenvif *vif)
++{
++ netdev_err(vif->dev, "fatal error; disabling device\n");
++ xenvif_carrier_off(vif);
++ xenvif_put(vif);
++}
++
+ static int netbk_count_requests(struct xenvif *vif,
+ struct xen_netif_tx_request *first,
+ struct xen_netif_tx_request *txp,
+@@ -872,29 +880,33 @@ static int netbk_count_requests(struct xenvif *vif,
+
+ do {
+ if (frags >= work_to_do) {
+- netdev_dbg(vif->dev, "Need more frags\n");
+- return -frags;
++ netdev_err(vif->dev, "Need more frags\n");
++ netbk_fatal_tx_err(vif);
++ return -ENODATA;
+ }
+
+ if (unlikely(frags >= MAX_SKB_FRAGS)) {
+- netdev_dbg(vif->dev, "Too many frags\n");
+- return -frags;
++ netdev_err(vif->dev, "Too many frags\n");
++ netbk_fatal_tx_err(vif);
++ return -E2BIG;
+ }
+
+ memcpy(txp, RING_GET_REQUEST(&vif->tx, cons + frags),
+ sizeof(*txp));
+ if (txp->size > first->size) {
+- netdev_dbg(vif->dev, "Frags galore\n");
+- return -frags;
++ netdev_err(vif->dev, "Frag is bigger than frame.\n");
++ netbk_fatal_tx_err(vif);
++ return -EIO;
+ }
+
+ first->size -= txp->size;
+ frags++;
+
+ if (unlikely((txp->offset + txp->size) > PAGE_SIZE)) {
+- netdev_dbg(vif->dev, "txp->offset: %x, size: %u\n",
++ netdev_err(vif->dev, "txp->offset: %x, size: %u\n",
+ txp->offset, txp->size);
+- return -frags;
++ netbk_fatal_tx_err(vif);
++ return -EINVAL;
+ }
+ } while ((txp++)->flags & XEN_NETTXF_more_data);
+ return frags;
+@@ -937,7 +949,7 @@ static struct gnttab_copy *xen_netbk_get_requests(struct xen_netbk *netbk,
+ pending_idx = netbk->pending_ring[index];
+ page = xen_netbk_alloc_page(netbk, skb, pending_idx);
+ if (!page)
+- return NULL;
++ goto err;
+
+ gop->source.u.ref = txp->gref;
+ gop->source.domid = vif->domid;
+@@ -959,6 +971,17 @@ static struct gnttab_copy *xen_netbk_get_requests(struct xen_netbk *netbk,
+ }
+
+ return gop;
++err:
++ /* Unwind, freeing all pages and sending error responses. */
++ while (i-- > start) {
++ xen_netbk_idx_release(netbk, frag_get_pending_idx(&frags[i]),
++ XEN_NETIF_RSP_ERROR);
++ }
++ /* The head too, if necessary. */
++ if (start)
++ xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_ERROR);
++
++ return NULL;
+ }
+
+ static int xen_netbk_tx_check_gop(struct xen_netbk *netbk,
+@@ -967,30 +990,20 @@ static int xen_netbk_tx_check_gop(struct xen_netbk *netbk,
+ {
+ struct gnttab_copy *gop = *gopp;
+ u16 pending_idx = *((u16 *)skb->data);
+- struct pending_tx_info *pending_tx_info = netbk->pending_tx_info;
+- struct xenvif *vif = pending_tx_info[pending_idx].vif;
+- struct xen_netif_tx_request *txp;
+ struct skb_shared_info *shinfo = skb_shinfo(skb);
+ int nr_frags = shinfo->nr_frags;
+ int i, err, start;
+
+ /* Check status of header. */
+ err = gop->status;
+- if (unlikely(err)) {
+- pending_ring_idx_t index;
+- index = pending_index(netbk->pending_prod++);
+- txp = &pending_tx_info[pending_idx].req;
+- make_tx_response(vif, txp, XEN_NETIF_RSP_ERROR);
+- netbk->pending_ring[index] = pending_idx;
+- xenvif_put(vif);
+- }
++ if (unlikely(err))
++ xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_ERROR);
+
+ /* Skip first skb fragment if it is on same page as header fragment. */
+ start = (frag_get_pending_idx(&shinfo->frags[0]) == pending_idx);
+
+ for (i = start; i < nr_frags; i++) {
+ int j, newerr;
+- pending_ring_idx_t index;
+
+ pending_idx = frag_get_pending_idx(&shinfo->frags[i]);
+
+@@ -999,16 +1012,12 @@ static int xen_netbk_tx_check_gop(struct xen_netbk *netbk,
+ if (likely(!newerr)) {
+ /* Had a previous error? Invalidate this fragment. */
+ if (unlikely(err))
+- xen_netbk_idx_release(netbk, pending_idx);
++ xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_OKAY);
+ continue;
+ }
+
+ /* Error on this fragment: respond to client with an error. */
+- txp = &netbk->pending_tx_info[pending_idx].req;
+- make_tx_response(vif, txp, XEN_NETIF_RSP_ERROR);
+- index = pending_index(netbk->pending_prod++);
+- netbk->pending_ring[index] = pending_idx;
+- xenvif_put(vif);
++ xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_ERROR);
+
+ /* Not the first error? Preceding frags already invalidated. */
+ if (err)
+@@ -1016,10 +1025,10 @@ static int xen_netbk_tx_check_gop(struct xen_netbk *netbk,
+
+ /* First error: invalidate header and preceding fragments. */
+ pending_idx = *((u16 *)skb->data);
+- xen_netbk_idx_release(netbk, pending_idx);
++ xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_OKAY);
+ for (j = start; j < i; j++) {
+ pending_idx = frag_get_pending_idx(&shinfo->frags[j]);
+- xen_netbk_idx_release(netbk, pending_idx);
++ xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_OKAY);
+ }
+
+ /* Remember the error: invalidate all subsequent fragments. */
+@@ -1053,7 +1062,7 @@ static void xen_netbk_fill_frags(struct xen_netbk *netbk, struct sk_buff *skb)
+
+ /* Take an extra reference to offset xen_netbk_idx_release */
+ get_page(netbk->mmap_pages[pending_idx]);
+- xen_netbk_idx_release(netbk, pending_idx);
++ xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_OKAY);
+ }
+ }
+
+@@ -1066,7 +1075,8 @@ static int xen_netbk_get_extras(struct xenvif *vif,
+
+ do {
+ if (unlikely(work_to_do-- <= 0)) {
+- netdev_dbg(vif->dev, "Missing extra info\n");
++ netdev_err(vif->dev, "Missing extra info\n");
++ netbk_fatal_tx_err(vif);
+ return -EBADR;
+ }
+
+@@ -1075,8 +1085,9 @@ static int xen_netbk_get_extras(struct xenvif *vif,
+ if (unlikely(!extra.type ||
+ extra.type >= XEN_NETIF_EXTRA_TYPE_MAX)) {
+ vif->tx.req_cons = ++cons;
+- netdev_dbg(vif->dev,
++ netdev_err(vif->dev,
+ "Invalid extra type: %d\n", extra.type);
++ netbk_fatal_tx_err(vif);
+ return -EINVAL;
+ }
+
+@@ -1092,13 +1103,15 @@ static int netbk_set_skb_gso(struct xenvif *vif,
+ struct xen_netif_extra_info *gso)
+ {
+ if (!gso->u.gso.size) {
+- netdev_dbg(vif->dev, "GSO size must not be zero.\n");
++ netdev_err(vif->dev, "GSO size must not be zero.\n");
++ netbk_fatal_tx_err(vif);
+ return -EINVAL;
+ }
+
+ /* Currently only TCPv4 S.O. is supported. */
+ if (gso->u.gso.type != XEN_NETIF_GSO_TYPE_TCPV4) {
+- netdev_dbg(vif->dev, "Bad GSO type %d.\n", gso->u.gso.type);
++ netdev_err(vif->dev, "Bad GSO type %d.\n", gso->u.gso.type);
++ netbk_fatal_tx_err(vif);
+ return -EINVAL;
+ }
+
+@@ -1235,9 +1248,25 @@ static unsigned xen_netbk_tx_build_gops(struct xen_netbk *netbk)
+
+ /* Get a netif from the list with work to do. */
+ vif = poll_net_schedule_list(netbk);
++ /* This can sometimes happen because the test of
++ * list_empty(net_schedule_list) at the top of the
++ * loop is unlocked. Just go back and have another
++ * look.
++ */
+ if (!vif)
+ continue;
+
++ if (vif->tx.sring->req_prod - vif->tx.req_cons >
++ XEN_NETIF_TX_RING_SIZE) {
++ netdev_err(vif->dev,
++ "Impossible number of requests. "
++ "req_prod %d, req_cons %d, size %ld\n",
++ vif->tx.sring->req_prod, vif->tx.req_cons,
++ XEN_NETIF_TX_RING_SIZE);
++ netbk_fatal_tx_err(vif);
++ continue;
++ }
++
+ RING_FINAL_CHECK_FOR_REQUESTS(&vif->tx, work_to_do);
+ if (!work_to_do) {
+ xenvif_put(vif);
+@@ -1265,17 +1294,14 @@ static unsigned xen_netbk_tx_build_gops(struct xen_netbk *netbk)
+ work_to_do = xen_netbk_get_extras(vif, extras,
+ work_to_do);
+ idx = vif->tx.req_cons;
+- if (unlikely(work_to_do < 0)) {
+- netbk_tx_err(vif, &txreq, idx);
++ if (unlikely(work_to_do < 0))
+ continue;
+- }
+ }
+
+ ret = netbk_count_requests(vif, &txreq, txfrags, work_to_do);
+- if (unlikely(ret < 0)) {
+- netbk_tx_err(vif, &txreq, idx - ret);
++ if (unlikely(ret < 0))
+ continue;
+- }
++
+ idx += ret;
+
+ if (unlikely(txreq.size < ETH_HLEN)) {
+@@ -1287,11 +1313,11 @@ static unsigned xen_netbk_tx_build_gops(struct xen_netbk *netbk)
+
+ /* No crossing a page as the payload mustn't fragment. */
+ if (unlikely((txreq.offset + txreq.size) > PAGE_SIZE)) {
+- netdev_dbg(vif->dev,
++ netdev_err(vif->dev,
+ "txreq.offset: %x, size: %u, end: %lu\n",
+ txreq.offset, txreq.size,
+ (txreq.offset&~PAGE_MASK) + txreq.size);
+- netbk_tx_err(vif, &txreq, idx);
++ netbk_fatal_tx_err(vif);
+ continue;
+ }
+
+@@ -1319,8 +1345,8 @@ static unsigned xen_netbk_tx_build_gops(struct xen_netbk *netbk)
+ gso = &extras[XEN_NETIF_EXTRA_TYPE_GSO - 1];
+
+ if (netbk_set_skb_gso(vif, skb, gso)) {
++ /* Failure in netbk_set_skb_gso is fatal. */
+ kfree_skb(skb);
+- netbk_tx_err(vif, &txreq, idx);
+ continue;
+ }
+ }
+@@ -1419,7 +1445,7 @@ static void xen_netbk_tx_submit(struct xen_netbk *netbk)
+ txp->size -= data_len;
+ } else {
+ /* Schedule a response immediately. */
+- xen_netbk_idx_release(netbk, pending_idx);
++ xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_OKAY);
+ }
+
+ if (txp->flags & XEN_NETTXF_csum_blank)
+@@ -1474,7 +1500,8 @@ static void xen_netbk_tx_action(struct xen_netbk *netbk)
+
+ }
+
+-static void xen_netbk_idx_release(struct xen_netbk *netbk, u16 pending_idx)
++static void xen_netbk_idx_release(struct xen_netbk *netbk, u16 pending_idx,
++ u8 status)
+ {
+ struct xenvif *vif;
+ struct pending_tx_info *pending_tx_info;
+@@ -1488,7 +1515,7 @@ static void xen_netbk_idx_release(struct xen_netbk *netbk, u16 pending_idx)
+
+ vif = pending_tx_info->vif;
+
+- make_tx_response(vif, &pending_tx_info->req, XEN_NETIF_RSP_OKAY);
++ make_tx_response(vif, &pending_tx_info->req, status);
+
+ index = pending_index(netbk->pending_prod++);
+ netbk->pending_ring[index] = pending_idx;
+diff --git a/drivers/pci/hotplug/pciehp.h b/drivers/pci/hotplug/pciehp.h
+index 26ffd3e..2c113de 100644
+--- a/drivers/pci/hotplug/pciehp.h
++++ b/drivers/pci/hotplug/pciehp.h
+@@ -44,7 +44,6 @@ extern bool pciehp_poll_mode;
+ extern int pciehp_poll_time;
+ extern bool pciehp_debug;
+ extern bool pciehp_force;
+-extern struct workqueue_struct *pciehp_wq;
+
+ #define dbg(format, arg...) \
+ do { \
+@@ -78,6 +77,7 @@ struct slot {
+ struct hotplug_slot *hotplug_slot;
+ struct delayed_work work; /* work for button event */
+ struct mutex lock;
++ struct workqueue_struct *wq;
+ };
+
+ struct event_info {
+diff --git a/drivers/pci/hotplug/pciehp_core.c b/drivers/pci/hotplug/pciehp_core.c
+index 365c6b9..9e39df9 100644
+--- a/drivers/pci/hotplug/pciehp_core.c
++++ b/drivers/pci/hotplug/pciehp_core.c
+@@ -42,7 +42,6 @@ bool pciehp_debug;
+ bool pciehp_poll_mode;
+ int pciehp_poll_time;
+ bool pciehp_force;
+-struct workqueue_struct *pciehp_wq;
+
+ #define DRIVER_VERSION "0.4"
+ #define DRIVER_AUTHOR "Dan Zink <dan.zink@compaq.com>, Greg Kroah-Hartman <greg@kroah.com>, Dely Sy <dely.l.sy@intel.com>"
+@@ -340,18 +339,13 @@ static int __init pcied_init(void)
+ {
+ int retval = 0;
+
+- pciehp_wq = alloc_workqueue("pciehp", 0, 0);
+- if (!pciehp_wq)
+- return -ENOMEM;
+-
+ pciehp_firmware_init();
+ retval = pcie_port_service_register(&hpdriver_portdrv);
+ dbg("pcie_port_service_register = %d\n", retval);
+ info(DRIVER_DESC " version: " DRIVER_VERSION "\n");
+- if (retval) {
+- destroy_workqueue(pciehp_wq);
++ if (retval)
+ dbg("Failure to register service\n");
+- }
++
+ return retval;
+ }
+
+@@ -359,7 +353,6 @@ static void __exit pcied_cleanup(void)
+ {
+ dbg("unload_pciehpd()\n");
+ pcie_port_service_unregister(&hpdriver_portdrv);
+- destroy_workqueue(pciehp_wq);
+ info(DRIVER_DESC " version: " DRIVER_VERSION " unloaded\n");
+ }
+
+diff --git a/drivers/pci/hotplug/pciehp_ctrl.c b/drivers/pci/hotplug/pciehp_ctrl.c
+index 27f4429..38f0186 100644
+--- a/drivers/pci/hotplug/pciehp_ctrl.c
++++ b/drivers/pci/hotplug/pciehp_ctrl.c
+@@ -49,7 +49,7 @@ static int queue_interrupt_event(struct slot *p_slot, u32 event_type)
+ info->p_slot = p_slot;
+ INIT_WORK(&info->work, interrupt_event_handler);
+
+- queue_work(pciehp_wq, &info->work);
++ queue_work(p_slot->wq, &info->work);
+
+ return 0;
+ }
+@@ -344,7 +344,7 @@ void pciehp_queue_pushbutton_work(struct work_struct *work)
+ kfree(info);
+ goto out;
+ }
+- queue_work(pciehp_wq, &info->work);
++ queue_work(p_slot->wq, &info->work);
+ out:
+ mutex_unlock(&p_slot->lock);
+ }
+@@ -377,7 +377,7 @@ static void handle_button_press_event(struct slot *p_slot)
+ if (ATTN_LED(ctrl))
+ pciehp_set_attention_status(p_slot, 0);
+
+- queue_delayed_work(pciehp_wq, &p_slot->work, 5*HZ);
++ queue_delayed_work(p_slot->wq, &p_slot->work, 5*HZ);
+ break;
+ case BLINKINGOFF_STATE:
+ case BLINKINGON_STATE:
+@@ -439,7 +439,7 @@ static void handle_surprise_event(struct slot *p_slot)
+ else
+ p_slot->state = POWERON_STATE;
+
+- queue_work(pciehp_wq, &info->work);
++ queue_work(p_slot->wq, &info->work);
+ }
+
+ static void interrupt_event_handler(struct work_struct *work)
+diff --git a/drivers/pci/hotplug/pciehp_hpc.c b/drivers/pci/hotplug/pciehp_hpc.c
+index 302451e..61632c5 100644
+--- a/drivers/pci/hotplug/pciehp_hpc.c
++++ b/drivers/pci/hotplug/pciehp_hpc.c
+@@ -773,23 +773,32 @@ static void pcie_shutdown_notification(struct controller *ctrl)
+ static int pcie_init_slot(struct controller *ctrl)
+ {
+ struct slot *slot;
++ char name[32];
+
+ slot = kzalloc(sizeof(*slot), GFP_KERNEL);
+ if (!slot)
+ return -ENOMEM;
+
++ snprintf(name, sizeof(name), "pciehp-%u", PSN(ctrl));
++ slot->wq = alloc_workqueue(name, 0, 0);
++ if (!slot->wq)
++ goto abort;
++
+ slot->ctrl = ctrl;
+ mutex_init(&slot->lock);
+ INIT_DELAYED_WORK(&slot->work, pciehp_queue_pushbutton_work);
+ ctrl->slot = slot;
+ return 0;
++abort:
++ kfree(slot);
++ return -ENOMEM;
+ }
+
+ static void pcie_cleanup_slot(struct controller *ctrl)
+ {
+ struct slot *slot = ctrl->slot;
+ cancel_delayed_work(&slot->work);
+- flush_workqueue(pciehp_wq);
++ destroy_workqueue(slot->wq);
+ kfree(slot);
+ }
+
+diff --git a/drivers/pci/hotplug/shpchp.h b/drivers/pci/hotplug/shpchp.h
+index ca64932..1b69d95 100644
+--- a/drivers/pci/hotplug/shpchp.h
++++ b/drivers/pci/hotplug/shpchp.h
+@@ -47,7 +47,6 @@ extern bool shpchp_poll_mode;
+ extern int shpchp_poll_time;
+ extern bool shpchp_debug;
+ extern struct workqueue_struct *shpchp_wq;
+-extern struct workqueue_struct *shpchp_ordered_wq;
+
+ #define dbg(format, arg...) \
+ do { \
+diff --git a/drivers/pci/hotplug/shpchp_core.c b/drivers/pci/hotplug/shpchp_core.c
+index b6de307..8c6d645 100644
+--- a/drivers/pci/hotplug/shpchp_core.c
++++ b/drivers/pci/hotplug/shpchp_core.c
+@@ -40,7 +40,6 @@ bool shpchp_debug;
+ bool shpchp_poll_mode;
+ int shpchp_poll_time;
+ struct workqueue_struct *shpchp_wq;
+-struct workqueue_struct *shpchp_ordered_wq;
+
+ #define DRIVER_VERSION "0.4"
+ #define DRIVER_AUTHOR "Dan Zink <dan.zink@compaq.com>, Greg Kroah-Hartman <greg@kroah.com>, Dely Sy <dely.l.sy@intel.com>"
+@@ -181,7 +180,6 @@ void cleanup_slots(struct controller *ctrl)
+ list_del(&slot->slot_list);
+ cancel_delayed_work(&slot->work);
+ flush_workqueue(shpchp_wq);
+- flush_workqueue(shpchp_ordered_wq);
+ pci_hp_deregister(slot->hotplug_slot);
+ }
+ }
+@@ -370,17 +368,10 @@ static int __init shpcd_init(void)
+ if (!shpchp_wq)
+ return -ENOMEM;
+
+- shpchp_ordered_wq = alloc_ordered_workqueue("shpchp_ordered", 0);
+- if (!shpchp_ordered_wq) {
+- destroy_workqueue(shpchp_wq);
+- return -ENOMEM;
+- }
+-
+ retval = pci_register_driver(&shpc_driver);
+ dbg("%s: pci_register_driver = %d\n", __func__, retval);
+ info(DRIVER_DESC " version: " DRIVER_VERSION "\n");
+ if (retval) {
+- destroy_workqueue(shpchp_ordered_wq);
+ destroy_workqueue(shpchp_wq);
+ }
+ return retval;
+@@ -390,7 +381,6 @@ static void __exit shpcd_cleanup(void)
+ {
+ dbg("unload_shpchpd()\n");
+ pci_unregister_driver(&shpc_driver);
+- destroy_workqueue(shpchp_ordered_wq);
+ destroy_workqueue(shpchp_wq);
+ info(DRIVER_DESC " version: " DRIVER_VERSION " unloaded\n");
+ }
+diff --git a/drivers/pci/hotplug/shpchp_ctrl.c b/drivers/pci/hotplug/shpchp_ctrl.c
+index f9b5a52..fd2cae9 100644
+--- a/drivers/pci/hotplug/shpchp_ctrl.c
++++ b/drivers/pci/hotplug/shpchp_ctrl.c
+@@ -453,7 +453,7 @@ void shpchp_queue_pushbutton_work(struct work_struct *work)
+ kfree(info);
+ goto out;
+ }
+- queue_work(shpchp_ordered_wq, &info->work);
++ queue_work(shpchp_wq, &info->work);
+ out:
+ mutex_unlock(&p_slot->lock);
+ }
+diff --git a/drivers/pci/pcie/aer/aerdrv_core.c b/drivers/pci/pcie/aer/aerdrv_core.c
+index 1b7d05d..b0c92a5 100644
+--- a/drivers/pci/pcie/aer/aerdrv_core.c
++++ b/drivers/pci/pcie/aer/aerdrv_core.c
+@@ -649,6 +649,7 @@ static void aer_recover_work_func(struct work_struct *work)
+ continue;
+ }
+ do_recovery(pdev, entry.severity);
++ pci_dev_put(pdev);
+ }
+ }
+ #endif
+diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
+index b500840..474f22f 100644
+--- a/drivers/pci/pcie/aspm.c
++++ b/drivers/pci/pcie/aspm.c
+@@ -798,6 +798,9 @@ void pcie_clear_aspm(struct pci_bus *bus)
+ {
+ struct pci_dev *child;
+
++ if (aspm_force)
++ return;
++
+ /*
+ * Clear any ASPM setup that the firmware has carried out on this bus
+ */
+diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
+index 5155317..161e7f0 100644
+--- a/drivers/pci/quirks.c
++++ b/drivers/pci/quirks.c
+@@ -2686,7 +2686,7 @@ static void ricoh_mmc_fixup_r5c832(struct pci_dev *dev)
+ if (PCI_FUNC(dev->devfn))
+ return;
+ /*
+- * RICOH 0xe823 SD/MMC card reader fails to recognize
++ * RICOH 0xe822 and 0xe823 SD/MMC card readers fail to recognize
+ * certain types of SD/MMC cards. Lowering the SD base
+ * clock frequency from 200Mhz to 50Mhz fixes this issue.
+ *
+@@ -2697,7 +2697,8 @@ static void ricoh_mmc_fixup_r5c832(struct pci_dev *dev)
+ * 0xf9 - Key register for 0x150
+ * 0xfc - key register for 0xe1
+ */
+- if (dev->device == PCI_DEVICE_ID_RICOH_R5CE823) {
++ if (dev->device == PCI_DEVICE_ID_RICOH_R5CE822 ||
++ dev->device == PCI_DEVICE_ID_RICOH_R5CE823) {
+ pci_write_config_byte(dev, 0xf9, 0xfc);
+ pci_write_config_byte(dev, 0x150, 0x10);
+ pci_write_config_byte(dev, 0xf9, 0x00);
+@@ -2724,6 +2725,8 @@ static void ricoh_mmc_fixup_r5c832(struct pci_dev *dev)
+ }
+ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_R5C832, ricoh_mmc_fixup_r5c832);
+ DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_R5C832, ricoh_mmc_fixup_r5c832);
++DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_R5CE822, ricoh_mmc_fixup_r5c832);
++DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_R5CE822, ricoh_mmc_fixup_r5c832);
+ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_R5CE823, ricoh_mmc_fixup_r5c832);
+ DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_R5CE823, ricoh_mmc_fixup_r5c832);
+ #endif /*CONFIG_MMC_RICOH_MMC*/
+diff --git a/drivers/pci/remove.c b/drivers/pci/remove.c
+index 04a4861..073c33f 100644
+--- a/drivers/pci/remove.c
++++ b/drivers/pci/remove.c
+@@ -19,6 +19,8 @@ static void pci_free_resources(struct pci_dev *dev)
+
+ static void pci_stop_dev(struct pci_dev *dev)
+ {
++ pci_pme_active(dev, false);
++
+ if (dev->is_added) {
+ pci_proc_detach_device(dev);
+ pci_remove_sysfs_dev_files(dev);
+diff --git a/drivers/pcmcia/vrc4171_card.c b/drivers/pcmcia/vrc4171_card.c
+index 86e4a1a..6bb02ab 100644
+--- a/drivers/pcmcia/vrc4171_card.c
++++ b/drivers/pcmcia/vrc4171_card.c
+@@ -246,6 +246,7 @@ static int pccard_init(struct pcmcia_socket *sock)
+ socket = &vrc4171_sockets[slot];
+ socket->csc_irq = search_nonuse_irq();
+ socket->io_irq = search_nonuse_irq();
++ spin_lock_init(&socket->lock);
+
+ return 0;
+ }
+diff --git a/drivers/platform/x86/ibm_rtl.c b/drivers/platform/x86/ibm_rtl.c
+index 7481146..97c2be1 100644
+--- a/drivers/platform/x86/ibm_rtl.c
++++ b/drivers/platform/x86/ibm_rtl.c
+@@ -244,7 +244,7 @@ static int __init ibm_rtl_init(void) {
+ if (force)
+ pr_warn("module loaded by force\n");
+ /* first ensure that we are running on IBM HW */
+- else if (efi_enabled || !dmi_check_system(ibm_rtl_dmi_table))
++ else if (efi_enabled(EFI_BOOT) || !dmi_check_system(ibm_rtl_dmi_table))
+ return -ENODEV;
+
+ /* Get the address for the Extended BIOS Data Area */
+diff --git a/drivers/platform/x86/samsung-laptop.c b/drivers/platform/x86/samsung-laptop.c
+index dd90d15..d1f0300 100644
+--- a/drivers/platform/x86/samsung-laptop.c
++++ b/drivers/platform/x86/samsung-laptop.c
+@@ -26,6 +26,7 @@
+ #include <linux/seq_file.h>
+ #include <linux/debugfs.h>
+ #include <linux/ctype.h>
++#include <linux/efi.h>
+ #include <acpi/video.h>
+
+ /*
+@@ -1523,6 +1524,16 @@ static struct dmi_system_id __initdata samsung_dmi_table[] = {
+ },
+ .driver_data = &samsung_broken_acpi_video,
+ },
++ {
++ .callback = samsung_dmi_matched,
++ .ident = "N250P",
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."),
++ DMI_MATCH(DMI_PRODUCT_NAME, "N250P"),
++ DMI_MATCH(DMI_BOARD_NAME, "N250P"),
++ },
++ .driver_data = &samsung_broken_acpi_video,
++ },
+ { },
+ };
+ MODULE_DEVICE_TABLE(dmi, samsung_dmi_table);
+@@ -1534,6 +1545,9 @@ static int __init samsung_init(void)
+ struct samsung_laptop *samsung;
+ int ret;
+
++ if (efi_enabled(EFI_BOOT))
++ return -ENODEV;
++
+ quirks = &samsung_unknown;
+ if (!force && !dmi_check_system(samsung_dmi_table))
+ return -ENODEV;
+diff --git a/drivers/pnp/pnpacpi/core.c b/drivers/pnp/pnpacpi/core.c
+index e49871d..3c5c353 100644
+--- a/drivers/pnp/pnpacpi/core.c
++++ b/drivers/pnp/pnpacpi/core.c
+@@ -58,7 +58,7 @@ static inline int __init is_exclusive_device(struct acpi_device *dev)
+ if (!(('0' <= (c) && (c) <= '9') || ('A' <= (c) && (c) <= 'F'))) \
+ return 0
+ #define TEST_ALPHA(c) \
+- if (!('@' <= (c) || (c) <= 'Z')) \
++ if (!('A' <= (c) && (c) <= 'Z')) \
+ return 0
+ static int __init ispnpidacpi(const char *id)
+ {
+diff --git a/drivers/regulator/wm831x-dcdc.c b/drivers/regulator/wm831x-dcdc.c
+index 7413885..d4b9b1e 100644
+--- a/drivers/regulator/wm831x-dcdc.c
++++ b/drivers/regulator/wm831x-dcdc.c
+@@ -290,7 +290,7 @@ static int wm831x_buckv_set_voltage_sel(struct regulator_dev *rdev,
+ if (vsel > dcdc->dvs_vsel) {
+ ret = wm831x_set_bits(wm831x, dvs_reg,
+ WM831X_DC1_DVS_VSEL_MASK,
+- dcdc->dvs_vsel);
++ vsel);
+ if (ret == 0)
+ dcdc->dvs_vsel = vsel;
+ else
+diff --git a/drivers/rtc/rtc-isl1208.c b/drivers/rtc/rtc-isl1208.c
+index dd2aeee..8f8c8ae 100644
+--- a/drivers/rtc/rtc-isl1208.c
++++ b/drivers/rtc/rtc-isl1208.c
+@@ -494,6 +494,7 @@ isl1208_rtc_interrupt(int irq, void *data)
+ {
+ unsigned long timeout = jiffies + msecs_to_jiffies(1000);
+ struct i2c_client *client = data;
++ struct rtc_device *rtc = i2c_get_clientdata(client);
+ int handled = 0, sr, err;
+
+ /*
+@@ -516,6 +517,8 @@ isl1208_rtc_interrupt(int irq, void *data)
+ if (sr & ISL1208_REG_SR_ALM) {
+ dev_dbg(&client->dev, "alarm!\n");
+
++ rtc_update_irq(rtc, 1, RTC_IRQF | RTC_AF);
++
+ /* Clear the alarm */
+ sr &= ~ISL1208_REG_SR_ALM;
+ sr = i2c_smbus_write_byte_data(client, ISL1208_REG_SR, sr);
+diff --git a/drivers/rtc/rtc-vt8500.c b/drivers/rtc/rtc-vt8500.c
+index 9e94fb1..44878da 100644
+--- a/drivers/rtc/rtc-vt8500.c
++++ b/drivers/rtc/rtc-vt8500.c
+@@ -69,7 +69,7 @@
+ | ALARM_SEC_BIT)
+
+ #define VT8500_RTC_CR_ENABLE (1 << 0) /* Enable RTC */
+-#define VT8500_RTC_CR_24H (1 << 1) /* 24h time format */
++#define VT8500_RTC_CR_12H (1 << 1) /* 12h time format */
+ #define VT8500_RTC_CR_SM_ENABLE (1 << 2) /* Enable periodic irqs */
+ #define VT8500_RTC_CR_SM_SEC (1 << 3) /* 0: 1Hz/60, 1: 1Hz */
+ #define VT8500_RTC_CR_CALIB (1 << 4) /* Enable calibration */
+@@ -118,7 +118,7 @@ static int vt8500_rtc_read_time(struct device *dev, struct rtc_time *tm)
+ tm->tm_min = bcd2bin((time & TIME_MIN_MASK) >> TIME_MIN_S);
+ tm->tm_hour = bcd2bin((time & TIME_HOUR_MASK) >> TIME_HOUR_S);
+ tm->tm_mday = bcd2bin(date & DATE_DAY_MASK);
+- tm->tm_mon = bcd2bin((date & DATE_MONTH_MASK) >> DATE_MONTH_S);
++ tm->tm_mon = bcd2bin((date & DATE_MONTH_MASK) >> DATE_MONTH_S) - 1;
+ tm->tm_year = bcd2bin((date & DATE_YEAR_MASK) >> DATE_YEAR_S)
+ + ((date >> DATE_CENTURY_S) & 1 ? 200 : 100);
+ tm->tm_wday = (time & TIME_DOW_MASK) >> TIME_DOW_S;
+@@ -137,8 +137,9 @@ static int vt8500_rtc_set_time(struct device *dev, struct rtc_time *tm)
+ }
+
+ writel((bin2bcd(tm->tm_year - 100) << DATE_YEAR_S)
+- | (bin2bcd(tm->tm_mon) << DATE_MONTH_S)
+- | (bin2bcd(tm->tm_mday)),
++ | (bin2bcd(tm->tm_mon + 1) << DATE_MONTH_S)
++ | (bin2bcd(tm->tm_mday))
++ | ((tm->tm_year >= 200) << DATE_CENTURY_S),
+ vt8500_rtc->regbase + VT8500_RTC_DS);
+ writel((bin2bcd(tm->tm_wday) << TIME_DOW_S)
+ | (bin2bcd(tm->tm_hour) << TIME_HOUR_S)
+@@ -248,7 +249,7 @@ static int __devinit vt8500_rtc_probe(struct platform_device *pdev)
+ }
+
+ /* Enable RTC and set it to 24-hour mode */
+- writel(VT8500_RTC_CR_ENABLE | VT8500_RTC_CR_24H,
++ writel(VT8500_RTC_CR_ENABLE,
+ vt8500_rtc->regbase + VT8500_RTC_CR);
+
+ vt8500_rtc->rtc = rtc_device_register("vt8500-rtc", &pdev->dev,
+diff --git a/drivers/s390/cio/device_pgid.c b/drivers/s390/cio/device_pgid.c
+index 368368f..908d287 100644
+--- a/drivers/s390/cio/device_pgid.c
++++ b/drivers/s390/cio/device_pgid.c
+@@ -234,7 +234,7 @@ static int pgid_cmp(struct pgid *p1, struct pgid *p2)
+ * Determine pathgroup state from PGID data.
+ */
+ static void pgid_analyze(struct ccw_device *cdev, struct pgid **p,
+- int *mismatch, int *reserved, u8 *reset)
++ int *mismatch, u8 *reserved, u8 *reset)
+ {
+ struct pgid *pgid = &cdev->private->pgid[0];
+ struct pgid *first = NULL;
+@@ -248,7 +248,7 @@ static void pgid_analyze(struct ccw_device *cdev, struct pgid **p,
+ if ((cdev->private->pgid_valid_mask & lpm) == 0)
+ continue;
+ if (pgid->inf.ps.state2 == SNID_STATE2_RESVD_ELSE)
+- *reserved = 1;
++ *reserved |= lpm;
+ if (pgid_is_reset(pgid)) {
+ *reset |= lpm;
+ continue;
+@@ -316,14 +316,14 @@ static void snid_done(struct ccw_device *cdev, int rc)
+ struct subchannel *sch = to_subchannel(cdev->dev.parent);
+ struct pgid *pgid;
+ int mismatch = 0;
+- int reserved = 0;
++ u8 reserved = 0;
+ u8 reset = 0;
+ u8 donepm;
+
+ if (rc)
+ goto out;
+ pgid_analyze(cdev, &pgid, &mismatch, &reserved, &reset);
+- if (reserved)
++ if (reserved == cdev->private->pgid_valid_mask)
+ rc = -EUSERS;
+ else if (mismatch)
+ rc = -EOPNOTSUPP;
+@@ -336,7 +336,7 @@ static void snid_done(struct ccw_device *cdev, int rc)
+ }
+ out:
+ CIO_MSG_EVENT(2, "snid: device 0.%x.%04x: rc=%d pvm=%02x vpm=%02x "
+- "todo=%02x mism=%d rsvd=%d reset=%02x\n", id->ssid,
++ "todo=%02x mism=%d rsvd=%02x reset=%02x\n", id->ssid,
+ id->devno, rc, cdev->private->pgid_valid_mask, sch->vpm,
+ cdev->private->pgid_todo_mask, mismatch, reserved, reset);
+ switch (rc) {
+diff --git a/drivers/s390/kvm/kvm_virtio.c b/drivers/s390/kvm/kvm_virtio.c
+index 47cccd5..9c77c8b 100644
+--- a/drivers/s390/kvm/kvm_virtio.c
++++ b/drivers/s390/kvm/kvm_virtio.c
+@@ -419,6 +419,26 @@ static void kvm_extint_handler(struct ext_code ext_code,
+ }
+
+ /*
++ * For s390-virtio, we expect a page above main storage containing
++ * the virtio configuration. Try to actually load from this area
++ * in order to figure out if the host provides this page.
++ */
++static int __init test_devices_support(unsigned long addr)
++{
++ int ret = -EIO;
++
++ asm volatile(
++ "0: lura 0,%1\n"
++ "1: xgr %0,%0\n"
++ "2:\n"
++ EX_TABLE(0b,2b)
++ EX_TABLE(1b,2b)
++ : "+d" (ret)
++ : "a" (addr)
++ : "0", "cc");
++ return ret;
++}
++/*
+ * Init function for virtio
+ * devices are in a single page above top of "normal" mem
+ */
+@@ -429,21 +449,23 @@ static int __init kvm_devices_init(void)
+ if (!MACHINE_IS_KVM)
+ return -ENODEV;
+
++ if (test_devices_support(real_memory_size) < 0)
++ return -ENODEV;
++
++ rc = vmem_add_mapping(real_memory_size, PAGE_SIZE);
++ if (rc)
++ return rc;
++
++ kvm_devices = (void *) real_memory_size;
++
+ kvm_root = root_device_register("kvm_s390");
+ if (IS_ERR(kvm_root)) {
+ rc = PTR_ERR(kvm_root);
+ printk(KERN_ERR "Could not register kvm_s390 root device");
++ vmem_remove_mapping(real_memory_size, PAGE_SIZE);
+ return rc;
+ }
+
+- rc = vmem_add_mapping(real_memory_size, PAGE_SIZE);
+- if (rc) {
+- root_device_unregister(kvm_root);
+- return rc;
+- }
+-
+- kvm_devices = (void *) real_memory_size;
+-
+ INIT_WORK(&hotplug_work, hotplug_devices);
+
+ service_subclass_irq_register();
+diff --git a/drivers/scsi/isci/init.c b/drivers/scsi/isci/init.c
+index 1286a8a..1c91061 100644
+--- a/drivers/scsi/isci/init.c
++++ b/drivers/scsi/isci/init.c
+@@ -633,7 +633,7 @@ static int __devinit isci_pci_probe(struct pci_dev *pdev, const struct pci_devic
+ return -ENOMEM;
+ pci_set_drvdata(pdev, pci_info);
+
+- if (efi_enabled)
++ if (efi_enabled(EFI_RUNTIME_SERVICES))
+ orom = isci_get_efi_var(pdev);
+
+ if (!orom)
+diff --git a/drivers/scsi/mvsas/mv_94xx.h b/drivers/scsi/mvsas/mv_94xx.h
+index 8f7eb4f..487aa6f 100644
+--- a/drivers/scsi/mvsas/mv_94xx.h
++++ b/drivers/scsi/mvsas/mv_94xx.h
+@@ -258,21 +258,11 @@ enum sas_sata_phy_regs {
+ #define SPI_ADDR_VLD_94XX (1U << 1)
+ #define SPI_CTRL_SpiStart_94XX (1U << 0)
+
+-#define mv_ffc(x) ffz(x)
+-
+ static inline int
+ mv_ffc64(u64 v)
+ {
+- int i;
+- i = mv_ffc((u32)v);
+- if (i >= 0)
+- return i;
+- i = mv_ffc((u32)(v>>32));
+-
+- if (i != 0)
+- return 32 + i;
+-
+- return -1;
++ u64 x = ~v;
++ return x ? __ffs64(x) : -1;
+ }
+
+ #define r_reg_set_enable(i) \
+diff --git a/drivers/scsi/mvsas/mv_sas.c b/drivers/scsi/mvsas/mv_sas.c
+index 4539d59..a3776d6 100644
+--- a/drivers/scsi/mvsas/mv_sas.c
++++ b/drivers/scsi/mvsas/mv_sas.c
+@@ -1629,7 +1629,7 @@ int mvs_abort_task(struct sas_task *task)
+ mv_dprintk("mvs_abort_task() mvi=%p task=%p "
+ "slot=%p slot_idx=x%x\n",
+ mvi, task, slot, slot_idx);
+- mvs_tmf_timedout((unsigned long)task);
++ task->task_state_flags |= SAS_TASK_STATE_ABORTED;
+ mvs_slot_task_free(mvi, task, slot, slot_idx);
+ rc = TMF_RESP_FUNC_COMPLETE;
+ goto out;
+diff --git a/drivers/scsi/mvsas/mv_sas.h b/drivers/scsi/mvsas/mv_sas.h
+index c04a4f5..da24955 100644
+--- a/drivers/scsi/mvsas/mv_sas.h
++++ b/drivers/scsi/mvsas/mv_sas.h
+@@ -69,7 +69,7 @@ extern struct kmem_cache *mvs_task_list_cache;
+ #define DEV_IS_EXPANDER(type) \
+ ((type == EDGE_DEV) || (type == FANOUT_DEV))
+
+-#define bit(n) ((u32)1 << n)
++#define bit(n) ((u64)1 << n)
+
+ #define for_each_phy(__lseq_mask, __mc, __lseq) \
+ for ((__mc) = (__lseq_mask), (__lseq) = 0; \
+diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
+index fb8cd38..0076210 100644
+--- a/drivers/scsi/qla2xxx/qla_os.c
++++ b/drivers/scsi/qla2xxx/qla_os.c
+@@ -3801,9 +3801,9 @@ qla2x00_do_dpc(void *data)
+ "ISP abort end.\n");
+ }
+
+- if (test_bit(FCPORT_UPDATE_NEEDED, &base_vha->dpc_flags)) {
++ if (test_and_clear_bit(FCPORT_UPDATE_NEEDED,
++ &base_vha->dpc_flags)) {
+ qla2x00_update_fcports(base_vha);
+- clear_bit(FCPORT_UPDATE_NEEDED, &base_vha->dpc_flags);
+ }
+
+ if (test_bit(SCR_PENDING, &base_vha->dpc_flags)) {
+diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
+index ce5224c..931a7d9 100644
+--- a/drivers/scsi/scsi_sysfs.c
++++ b/drivers/scsi/scsi_sysfs.c
+@@ -247,11 +247,11 @@ show_shost_active_mode(struct device *dev,
+
+ static DEVICE_ATTR(active_mode, S_IRUGO | S_IWUSR, show_shost_active_mode, NULL);
+
+-static int check_reset_type(char *str)
++static int check_reset_type(const char *str)
+ {
+- if (strncmp(str, "adapter", 10) == 0)
++ if (sysfs_streq(str, "adapter"))
+ return SCSI_ADAPTER_RESET;
+- else if (strncmp(str, "firmware", 10) == 0)
++ else if (sysfs_streq(str, "firmware"))
+ return SCSI_FIRMWARE_RESET;
+ else
+ return 0;
+@@ -264,12 +264,9 @@ store_host_reset(struct device *dev, struct device_attribute *attr,
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct scsi_host_template *sht = shost->hostt;
+ int ret = -EINVAL;
+- char str[10];
+ int type;
+
+- sscanf(buf, "%s", str);
+- type = check_reset_type(str);
+-
++ type = check_reset_type(buf);
+ if (!type)
+ goto exit_store_host_reset;
+
+diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
+index 4df73e5..8afedd6 100644
+--- a/drivers/scsi/sd.c
++++ b/drivers/scsi/sd.c
+@@ -2926,10 +2926,6 @@ static int __init init_sd(void)
+ if (err)
+ goto err_out;
+
+- err = scsi_register_driver(&sd_template.gendrv);
+- if (err)
+- goto err_out_class;
+-
+ sd_cdb_cache = kmem_cache_create("sd_ext_cdb", SD_EXT_CDB_SIZE,
+ 0, 0, NULL);
+ if (!sd_cdb_cache) {
+@@ -2943,8 +2939,15 @@ static int __init init_sd(void)
+ goto err_out_cache;
+ }
+
++ err = scsi_register_driver(&sd_template.gendrv);
++ if (err)
++ goto err_out_driver;
++
+ return 0;
+
++err_out_driver:
++ mempool_destroy(sd_cdb_pool);
++
+ err_out_cache:
+ kmem_cache_destroy(sd_cdb_cache);
+
+@@ -2967,10 +2970,10 @@ static void __exit exit_sd(void)
+
+ SCSI_LOG_HLQUEUE(3, printk("exit_sd: exiting sd driver\n"));
+
++ scsi_unregister_driver(&sd_template.gendrv);
+ mempool_destroy(sd_cdb_pool);
+ kmem_cache_destroy(sd_cdb_cache);
+
+- scsi_unregister_driver(&sd_template.gendrv);
+ class_unregister(&sd_disk_class);
+
+ for (i = 0; i < SD_MAJORS; i++)
+diff --git a/drivers/staging/comedi/Kconfig b/drivers/staging/comedi/Kconfig
+index 6cee785..2701546 100644
+--- a/drivers/staging/comedi/Kconfig
++++ b/drivers/staging/comedi/Kconfig
+@@ -444,6 +444,7 @@ config COMEDI_ADQ12B
+
+ config COMEDI_NI_AT_A2150
+ tristate "NI AT-A2150 ISA card support"
++ select COMEDI_FC
+ depends on VIRT_TO_BUS
+ ---help---
+ Enable support for National Instruments AT-A2150 cards
+diff --git a/drivers/staging/comedi/comedi_fops.c b/drivers/staging/comedi/comedi_fops.c
+index 41dea18..da45902 100644
+--- a/drivers/staging/comedi/comedi_fops.c
++++ b/drivers/staging/comedi/comedi_fops.c
+@@ -1545,8 +1545,16 @@ static long comedi_unlocked_ioctl(struct file *file, unsigned int cmd,
+ /* Device config is special, because it must work on
+ * an unconfigured device. */
+ if (cmd == COMEDI_DEVCONFIG) {
++ if (minor >= COMEDI_NUM_BOARD_MINORS) {
++ /* Device config not appropriate on non-board minors. */
++ rc = -ENOTTY;
++ goto done;
++ }
+ rc = do_devconfig_ioctl(dev,
+ (struct comedi_devconfig __user *)arg);
++ if (rc == 0)
++ /* Evade comedi_auto_unconfig(). */
++ dev_file_info->hardware_device = NULL;
+ goto done;
+ }
+
+diff --git a/drivers/staging/comedi/drivers/comedi_test.c b/drivers/staging/comedi/drivers/comedi_test.c
+index 523a809..e38bd64 100644
+--- a/drivers/staging/comedi/drivers/comedi_test.c
++++ b/drivers/staging/comedi/drivers/comedi_test.c
+@@ -396,7 +396,7 @@ static int waveform_ai_cancel(struct comedi_device *dev,
+ struct comedi_subdevice *s)
+ {
+ devpriv->timer_running = 0;
+- del_timer(&devpriv->timer);
++ del_timer_sync(&devpriv->timer);
+ return 0;
+ }
+
+diff --git a/drivers/staging/comedi/drivers/ni_pcimio.c b/drivers/staging/comedi/drivers/ni_pcimio.c
+index 89f4d43..af5007c 100644
+--- a/drivers/staging/comedi/drivers/ni_pcimio.c
++++ b/drivers/staging/comedi/drivers/ni_pcimio.c
+@@ -963,7 +963,7 @@ static const struct ni_board_struct ni_boards[] = {
+ .ao_range_table = &range_ni_M_625x_ao,
+ .reg_type = ni_reg_625x,
+ .ao_unipolar = 0,
+- .ao_speed = 357,
++ .ao_speed = 350,
+ .num_p0_dio_channels = 8,
+ .caldac = {caldac_none},
+ .has_8255 = 0,
+@@ -982,7 +982,7 @@ static const struct ni_board_struct ni_boards[] = {
+ .ao_range_table = &range_ni_M_625x_ao,
+ .reg_type = ni_reg_625x,
+ .ao_unipolar = 0,
+- .ao_speed = 357,
++ .ao_speed = 350,
+ .num_p0_dio_channels = 8,
+ .caldac = {caldac_none},
+ .has_8255 = 0,
+@@ -1001,7 +1001,7 @@ static const struct ni_board_struct ni_boards[] = {
+ .ao_range_table = &range_ni_M_625x_ao,
+ .reg_type = ni_reg_625x,
+ .ao_unipolar = 0,
+- .ao_speed = 357,
++ .ao_speed = 350,
+ .num_p0_dio_channels = 8,
+ .caldac = {caldac_none},
+ .has_8255 = 0,
+@@ -1037,7 +1037,7 @@ static const struct ni_board_struct ni_boards[] = {
+ .ao_range_table = &range_ni_M_625x_ao,
+ .reg_type = ni_reg_625x,
+ .ao_unipolar = 0,
+- .ao_speed = 357,
++ .ao_speed = 350,
+ .num_p0_dio_channels = 32,
+ .caldac = {caldac_none},
+ .has_8255 = 0,
+@@ -1056,7 +1056,7 @@ static const struct ni_board_struct ni_boards[] = {
+ .ao_range_table = &range_ni_M_625x_ao,
+ .reg_type = ni_reg_625x,
+ .ao_unipolar = 0,
+- .ao_speed = 357,
++ .ao_speed = 350,
+ .num_p0_dio_channels = 32,
+ .caldac = {caldac_none},
+ .has_8255 = 0,
+@@ -1092,7 +1092,7 @@ static const struct ni_board_struct ni_boards[] = {
+ .ao_range_table = &range_ni_M_628x_ao,
+ .reg_type = ni_reg_628x,
+ .ao_unipolar = 1,
+- .ao_speed = 357,
++ .ao_speed = 350,
+ .num_p0_dio_channels = 8,
+ .caldac = {caldac_none},
+ .has_8255 = 0,
+@@ -1111,7 +1111,7 @@ static const struct ni_board_struct ni_boards[] = {
+ .ao_range_table = &range_ni_M_628x_ao,
+ .reg_type = ni_reg_628x,
+ .ao_unipolar = 1,
+- .ao_speed = 357,
++ .ao_speed = 350,
+ .num_p0_dio_channels = 8,
+ .caldac = {caldac_none},
+ .has_8255 = 0,
+@@ -1147,7 +1147,7 @@ static const struct ni_board_struct ni_boards[] = {
+ .ao_range_table = &range_ni_M_628x_ao,
+ .reg_type = ni_reg_628x,
+ .ao_unipolar = 1,
+- .ao_speed = 357,
++ .ao_speed = 350,
+ .num_p0_dio_channels = 32,
+ .caldac = {caldac_none},
+ .has_8255 = 0,
+diff --git a/drivers/staging/rtl8712/usb_intf.c b/drivers/staging/rtl8712/usb_intf.c
+index c758c40..ad53781 100644
+--- a/drivers/staging/rtl8712/usb_intf.c
++++ b/drivers/staging/rtl8712/usb_intf.c
+@@ -64,6 +64,8 @@ static struct usb_device_id rtl871x_usb_id_tbl[] = {
+ {USB_DEVICE(0x0B05, 0x1791)}, /* 11n mode disable */
+ /* Belkin */
+ {USB_DEVICE(0x050D, 0x945A)},
++ /* ISY IWL - Belkin clone */
++ {USB_DEVICE(0x050D, 0x11F1)},
+ /* Corega */
+ {USB_DEVICE(0x07AA, 0x0047)},
+ /* D-Link */
+diff --git a/drivers/staging/speakup/synth.c b/drivers/staging/speakup/synth.c
+index df95337..7616f05 100644
+--- a/drivers/staging/speakup/synth.c
++++ b/drivers/staging/speakup/synth.c
+@@ -342,7 +342,7 @@ int synth_init(char *synth_name)
+
+ mutex_lock(&spk_mutex);
+ /* First, check if we already have it loaded. */
+- for (i = 0; synths[i] != NULL && i < MAXSYNTHS; i++)
++ for (i = 0; i < MAXSYNTHS && synths[i] != NULL; i++)
+ if (strcmp(synths[i]->name, synth_name) == 0)
+ synth = synths[i];
+
+@@ -423,7 +423,7 @@ int synth_add(struct spk_synth *in_synth)
+ int i;
+ int status = 0;
+ mutex_lock(&spk_mutex);
+- for (i = 0; synths[i] != NULL && i < MAXSYNTHS; i++)
++ for (i = 0; i < MAXSYNTHS && synths[i] != NULL; i++)
+ /* synth_remove() is responsible for rotating the array down */
+ if (in_synth == synths[i]) {
+ mutex_unlock(&spk_mutex);
+diff --git a/drivers/staging/vt6656/bssdb.h b/drivers/staging/vt6656/bssdb.h
+index 6b2ec39..806cbf7 100644
+--- a/drivers/staging/vt6656/bssdb.h
++++ b/drivers/staging/vt6656/bssdb.h
+@@ -90,7 +90,6 @@ typedef struct tagSRSNCapObject {
+ } SRSNCapObject, *PSRSNCapObject;
+
+ // BSS info(AP)
+-#pragma pack(1)
+ typedef struct tagKnownBSS {
+ // BSS info
+ BOOL bActive;
+diff --git a/drivers/staging/vt6656/dpc.c b/drivers/staging/vt6656/dpc.c
+index 3aa895e..a510b29 100644
+--- a/drivers/staging/vt6656/dpc.c
++++ b/drivers/staging/vt6656/dpc.c
+@@ -1238,7 +1238,7 @@ static BOOL s_bHandleRxEncryption (
+
+ PayloadLen -= (WLAN_HDR_ADDR3_LEN + 8 + 4); // 24 is 802.11 header, 8 is IV&ExtIV, 4 is crc
+ *pdwRxTSC47_16 = cpu_to_le32(*(PDWORD)(pbyIV + 4));
+- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"ExtIV: %lx\n",*pdwRxTSC47_16);
++ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"ExtIV: %x\n", *pdwRxTSC47_16);
+ if (byDecMode == KEY_CTL_TKIP) {
+ *pwRxTSC15_0 = cpu_to_le16(MAKEWORD(*(pbyIV+2), *pbyIV));
+ } else {
+@@ -1349,7 +1349,7 @@ static BOOL s_bHostWepRxEncryption (
+
+ PayloadLen -= (WLAN_HDR_ADDR3_LEN + 8 + 4); // 24 is 802.11 header, 8 is IV&ExtIV, 4 is crc
+ *pdwRxTSC47_16 = cpu_to_le32(*(PDWORD)(pbyIV + 4));
+- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"ExtIV: %lx\n",*pdwRxTSC47_16);
++ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"ExtIV: %x\n", *pdwRxTSC47_16);
+
+ if (byDecMode == KEY_CTL_TKIP) {
+ *pwRxTSC15_0 = cpu_to_le16(MAKEWORD(*(pbyIV+2), *pbyIV));
+diff --git a/drivers/staging/vt6656/int.h b/drivers/staging/vt6656/int.h
+index 3734e2c..91ceb77 100644
+--- a/drivers/staging/vt6656/int.h
++++ b/drivers/staging/vt6656/int.h
+@@ -34,7 +34,6 @@
+ #include "device.h"
+
+ /*--------------------- Export Definitions -------------------------*/
+-#pragma pack(1)
+ typedef struct tagSINTData {
+ BYTE byTSR0;
+ BYTE byPkt0;
+diff --git a/drivers/staging/vt6656/iocmd.h b/drivers/staging/vt6656/iocmd.h
+index 22710ce..ae6e2d2 100644
+--- a/drivers/staging/vt6656/iocmd.h
++++ b/drivers/staging/vt6656/iocmd.h
+@@ -95,13 +95,12 @@ typedef enum tagWZONETYPE {
+ // Ioctl interface structure
+ // Command structure
+ //
+-#pragma pack(1)
+ typedef struct tagSCmdRequest {
+ u8 name[16];
+ void *data;
+ u16 wResult;
+ u16 wCmdCode;
+-} SCmdRequest, *PSCmdRequest;
++} __packed SCmdRequest, *PSCmdRequest;
+
+ //
+ // Scan
+@@ -111,7 +110,7 @@ typedef struct tagSCmdScan {
+
+ u8 ssid[SSID_MAXLEN + 2];
+
+-} SCmdScan, *PSCmdScan;
++} __packed SCmdScan, *PSCmdScan;
+
+ //
+ // BSS Join
+@@ -126,7 +125,7 @@ typedef struct tagSCmdBSSJoin {
+ BOOL bPSEnable;
+ BOOL bShareKeyAuth;
+
+-} SCmdBSSJoin, *PSCmdBSSJoin;
++} __packed SCmdBSSJoin, *PSCmdBSSJoin;
+
+ //
+ // Zonetype Setting
+@@ -137,7 +136,7 @@ typedef struct tagSCmdZoneTypeSet {
+ BOOL bWrite;
+ WZONETYPE ZoneType;
+
+-} SCmdZoneTypeSet, *PSCmdZoneTypeSet;
++} __packed SCmdZoneTypeSet, *PSCmdZoneTypeSet;
+
+ typedef struct tagSWPAResult {
+ char ifname[100];
+@@ -145,7 +144,7 @@ typedef struct tagSWPAResult {
+ u8 key_mgmt;
+ u8 eap_type;
+ BOOL authenticated;
+-} SWPAResult, *PSWPAResult;
++} __packed SWPAResult, *PSWPAResult;
+
+ typedef struct tagSCmdStartAP {
+
+@@ -157,7 +156,7 @@ typedef struct tagSCmdStartAP {
+ BOOL bShareKeyAuth;
+ u8 byBasicRate;
+
+-} SCmdStartAP, *PSCmdStartAP;
++} __packed SCmdStartAP, *PSCmdStartAP;
+
+ typedef struct tagSCmdSetWEP {
+
+@@ -167,7 +166,7 @@ typedef struct tagSCmdSetWEP {
+ BOOL bWepKeyAvailable[WEP_NKEYS];
+ u32 auWepKeyLength[WEP_NKEYS];
+
+-} SCmdSetWEP, *PSCmdSetWEP;
++} __packed SCmdSetWEP, *PSCmdSetWEP;
+
+ typedef struct tagSBSSIDItem {
+
+@@ -180,14 +179,14 @@ typedef struct tagSBSSIDItem {
+ BOOL bWEPOn;
+ u32 uRSSI;
+
+-} SBSSIDItem;
++} __packed SBSSIDItem;
+
+
+ typedef struct tagSBSSIDList {
+
+ u32 uItem;
+ SBSSIDItem sBSSIDList[0];
+-} SBSSIDList, *PSBSSIDList;
++} __packed SBSSIDList, *PSBSSIDList;
+
+
+ typedef struct tagSNodeItem {
+@@ -208,7 +207,7 @@ typedef struct tagSNodeItem {
+ u32 uTxAttempts;
+ u16 wFailureRatio;
+
+-} SNodeItem;
++} __packed SNodeItem;
+
+
+ typedef struct tagSNodeList {
+@@ -216,7 +215,7 @@ typedef struct tagSNodeList {
+ u32 uItem;
+ SNodeItem sNodeList[0];
+
+-} SNodeList, *PSNodeList;
++} __packed SNodeList, *PSNodeList;
+
+
+ typedef struct tagSCmdLinkStatus {
+@@ -229,7 +228,7 @@ typedef struct tagSCmdLinkStatus {
+ u32 uChannel;
+ u32 uLinkRate;
+
+-} SCmdLinkStatus, *PSCmdLinkStatus;
++} __packed SCmdLinkStatus, *PSCmdLinkStatus;
+
+ //
+ // 802.11 counter
+@@ -247,7 +246,7 @@ typedef struct tagSDot11MIBCount {
+ u32 ReceivedFragmentCount;
+ u32 MulticastReceivedFrameCount;
+ u32 FCSErrorCount;
+-} SDot11MIBCount, *PSDot11MIBCount;
++} __packed SDot11MIBCount, *PSDot11MIBCount;
+
+
+
+@@ -355,13 +354,13 @@ typedef struct tagSStatMIBCount {
+ u32 ullTxBroadcastBytes[2];
+ u32 ullTxMulticastBytes[2];
+ u32 ullTxDirectedBytes[2];
+-} SStatMIBCount, *PSStatMIBCount;
++} __packed SStatMIBCount, *PSStatMIBCount;
+
+ typedef struct tagSCmdValue {
+
+ u32 dwValue;
+
+-} SCmdValue, *PSCmdValue;
++} __packed SCmdValue, *PSCmdValue;
+
+ //
+ // hostapd & viawget ioctl related
+@@ -431,7 +430,7 @@ struct viawget_hostapd_param {
+ u8 ssid[32];
+ } scan_req;
+ } u;
+-};
++} __packed;
+
+ /*--------------------- Export Classes ----------------------------*/
+
+diff --git a/drivers/staging/vt6656/iowpa.h b/drivers/staging/vt6656/iowpa.h
+index 959c886..2522dde 100644
+--- a/drivers/staging/vt6656/iowpa.h
++++ b/drivers/staging/vt6656/iowpa.h
+@@ -67,12 +67,11 @@ enum {
+
+
+
+-#pragma pack(1)
+ typedef struct viawget_wpa_header {
+ u8 type;
+ u16 req_ie_len;
+ u16 resp_ie_len;
+-} viawget_wpa_header;
++} __packed viawget_wpa_header;
+
+ struct viawget_wpa_param {
+ u32 cmd;
+@@ -113,9 +112,8 @@ struct viawget_wpa_param {
+ u8 *buf;
+ } scan_results;
+ } u;
+-};
++} __packed;
+
+-#pragma pack(1)
+ struct viawget_scan_result {
+ u8 bssid[6];
+ u8 ssid[32];
+@@ -130,7 +128,7 @@ struct viawget_scan_result {
+ int noise;
+ int level;
+ int maxrate;
+-};
++} __packed;
+
+ /*--------------------- Export Classes ----------------------------*/
+
+diff --git a/drivers/staging/vt6656/key.c b/drivers/staging/vt6656/key.c
+index ee62a06..ba3a561 100644
+--- a/drivers/staging/vt6656/key.c
++++ b/drivers/staging/vt6656/key.c
+@@ -223,7 +223,7 @@ BOOL KeybSetKey(
+ PSKeyManagement pTable,
+ PBYTE pbyBSSID,
+ DWORD dwKeyIndex,
+- unsigned long uKeyLength,
++ u32 uKeyLength,
+ PQWORD pKeyRSC,
+ PBYTE pbyKey,
+ BYTE byKeyDecMode
+@@ -235,7 +235,8 @@ BOOL KeybSetKey(
+ PSKeyItem pKey;
+ unsigned int uKeyIdx;
+
+- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Enter KeybSetKey: %lX\n", dwKeyIndex);
++ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO
++ "Enter KeybSetKey: %X\n", dwKeyIndex);
+
+ j = (MAX_KEY_TABLE-1);
+ for (i=0;i<(MAX_KEY_TABLE-1);i++) {
+@@ -261,7 +262,9 @@ BOOL KeybSetKey(
+ if ((dwKeyIndex & TRANSMIT_KEY) != 0) {
+ // Group transmit key
+ pTable->KeyTable[i].dwGTKeyIndex = dwKeyIndex;
+- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Group transmit key(R)[%lX]: %d\n", pTable->KeyTable[i].dwGTKeyIndex, i);
++ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO
++ "Group transmit key(R)[%X]: %d\n",
++ pTable->KeyTable[i].dwGTKeyIndex, i);
+ }
+ pTable->KeyTable[i].wKeyCtl &= 0xFF0F; // clear group key control filed
+ pTable->KeyTable[i].wKeyCtl |= (byKeyDecMode << 4);
+@@ -302,9 +305,12 @@ BOOL KeybSetKey(
+ }
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"\n");
+
+- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"pKey->dwTSC47_16: %lx\n ", pKey->dwTSC47_16);
+- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"pKey->wTSC15_0: %x\n ", pKey->wTSC15_0);
+- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"pKey->dwKeyIndex: %lx\n ", pKey->dwKeyIndex);
++ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"pKey->dwTSC47_16: %x\n ",
++ pKey->dwTSC47_16);
++ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"pKey->wTSC15_0: %x\n ",
++ pKey->wTSC15_0);
++ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"pKey->dwKeyIndex: %x\n ",
++ pKey->dwKeyIndex);
+
+ return (TRUE);
+ }
+@@ -326,7 +332,9 @@ BOOL KeybSetKey(
+ if ((dwKeyIndex & TRANSMIT_KEY) != 0) {
+ // Group transmit key
+ pTable->KeyTable[j].dwGTKeyIndex = dwKeyIndex;
+- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Group transmit key(N)[%lX]: %d\n", pTable->KeyTable[j].dwGTKeyIndex, j);
++ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO
++ "Group transmit key(N)[%X]: %d\n",
++ pTable->KeyTable[j].dwGTKeyIndex, j);
+ }
+ pTable->KeyTable[j].wKeyCtl &= 0xFF0F; // clear group key control filed
+ pTable->KeyTable[j].wKeyCtl |= (byKeyDecMode << 4);
+@@ -367,9 +375,11 @@ BOOL KeybSetKey(
+ }
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"\n");
+
+- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"pKey->dwTSC47_16: %lx\n ", pKey->dwTSC47_16);
++ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"pKey->dwTSC47_16: %x\n ",
++ pKey->dwTSC47_16);
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"pKey->wTSC15_0: %x\n ", pKey->wTSC15_0);
+- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"pKey->dwKeyIndex: %lx\n ", pKey->dwKeyIndex);
++ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"pKey->dwKeyIndex: %x\n ",
++ pKey->dwKeyIndex);
+
+ return (TRUE);
+ }
+@@ -597,7 +607,8 @@ BOOL KeybGetTransmitKey(PSKeyManagement pTable, PBYTE pbyBSSID, DWORD dwKeyType,
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"%x ", pTable->KeyTable[i].abyBSSID[ii]);
+ }
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"\n");
+- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"dwGTKeyIndex: %lX\n", pTable->KeyTable[i].dwGTKeyIndex);
++ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"dwGTKeyIndex: %X\n",
++ pTable->KeyTable[i].dwGTKeyIndex);
+
+ return (TRUE);
+ }
+@@ -664,7 +675,7 @@ BOOL KeybSetDefaultKey(
+ void *pDeviceHandler,
+ PSKeyManagement pTable,
+ DWORD dwKeyIndex,
+- unsigned long uKeyLength,
++ u32 uKeyLength,
+ PQWORD pKeyRSC,
+ PBYTE pbyKey,
+ BYTE byKeyDecMode
+@@ -696,7 +707,10 @@ BOOL KeybSetDefaultKey(
+ if ((dwKeyIndex & TRANSMIT_KEY) != 0) {
+ // Group transmit key
+ pTable->KeyTable[MAX_KEY_TABLE-1].dwGTKeyIndex = dwKeyIndex;
+- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Group transmit key(R)[%lX]: %d\n", pTable->KeyTable[MAX_KEY_TABLE-1].dwGTKeyIndex, MAX_KEY_TABLE-1);
++ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO
++ "Group transmit key(R)[%X]: %d\n",
++ pTable->KeyTable[MAX_KEY_TABLE-1].dwGTKeyIndex,
++ MAX_KEY_TABLE-1);
+
+ }
+ pTable->KeyTable[MAX_KEY_TABLE-1].wKeyCtl &= 0x7F00; // clear all key control filed
+@@ -747,9 +761,11 @@ BOOL KeybSetDefaultKey(
+ }
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"\n");
+
+- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"pKey->dwTSC47_16: %lx\n", pKey->dwTSC47_16);
++ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"pKey->dwTSC47_16: %x\n",
++ pKey->dwTSC47_16);
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"pKey->wTSC15_0: %x\n", pKey->wTSC15_0);
+- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"pKey->dwKeyIndex: %lx\n", pKey->dwKeyIndex);
++ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"pKey->dwKeyIndex: %x\n",
++ pKey->dwKeyIndex);
+
+ return (TRUE);
+ }
+@@ -775,7 +791,7 @@ BOOL KeybSetAllGroupKey(
+ void *pDeviceHandler,
+ PSKeyManagement pTable,
+ DWORD dwKeyIndex,
+- unsigned long uKeyLength,
++ u32 uKeyLength,
+ PQWORD pKeyRSC,
+ PBYTE pbyKey,
+ BYTE byKeyDecMode
+@@ -787,7 +803,8 @@ BOOL KeybSetAllGroupKey(
+ PSKeyItem pKey;
+ unsigned int uKeyIdx;
+
+- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Enter KeybSetAllGroupKey: %lX\n", dwKeyIndex);
++ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Enter KeybSetAllGroupKey: %X\n",
++ dwKeyIndex);
+
+
+ if ((dwKeyIndex & PAIRWISE_KEY) != 0) { // Pairwise key
+@@ -804,7 +821,9 @@ BOOL KeybSetAllGroupKey(
+ if ((dwKeyIndex & TRANSMIT_KEY) != 0) {
+ // Group transmit key
+ pTable->KeyTable[i].dwGTKeyIndex = dwKeyIndex;
+- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Group transmit key(R)[%lX]: %d\n", pTable->KeyTable[i].dwGTKeyIndex, i);
++ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO
++ "Group transmit key(R)[%X]: %d\n",
++ pTable->KeyTable[i].dwGTKeyIndex, i);
+
+ }
+ pTable->KeyTable[i].wKeyCtl &= 0xFF0F; // clear group key control filed
+diff --git a/drivers/staging/vt6656/key.h b/drivers/staging/vt6656/key.h
+index f749c7a..bd35d39 100644
+--- a/drivers/staging/vt6656/key.h
++++ b/drivers/staging/vt6656/key.h
+@@ -58,7 +58,7 @@
+ typedef struct tagSKeyItem
+ {
+ BOOL bKeyValid;
+- unsigned long uKeyLength;
++ u32 uKeyLength;
+ BYTE abyKey[MAX_KEY_LEN];
+ QWORD KeyRSC;
+ DWORD dwTSC47_16;
+@@ -107,7 +107,7 @@ BOOL KeybSetKey(
+ PSKeyManagement pTable,
+ PBYTE pbyBSSID,
+ DWORD dwKeyIndex,
+- unsigned long uKeyLength,
++ u32 uKeyLength,
+ PQWORD pKeyRSC,
+ PBYTE pbyKey,
+ BYTE byKeyDecMode
+@@ -146,7 +146,7 @@ BOOL KeybSetDefaultKey(
+ void *pDeviceHandler,
+ PSKeyManagement pTable,
+ DWORD dwKeyIndex,
+- unsigned long uKeyLength,
++ u32 uKeyLength,
+ PQWORD pKeyRSC,
+ PBYTE pbyKey,
+ BYTE byKeyDecMode
+@@ -156,7 +156,7 @@ BOOL KeybSetAllGroupKey(
+ void *pDeviceHandler,
+ PSKeyManagement pTable,
+ DWORD dwKeyIndex,
+- unsigned long uKeyLength,
++ u32 uKeyLength,
+ PQWORD pKeyRSC,
+ PBYTE pbyKey,
+ BYTE byKeyDecMode
+diff --git a/drivers/staging/vt6656/mac.c b/drivers/staging/vt6656/mac.c
+index af4a29d..8fddc7b 100644
+--- a/drivers/staging/vt6656/mac.c
++++ b/drivers/staging/vt6656/mac.c
+@@ -260,7 +260,8 @@ BYTE pbyData[24];
+ dwData1 <<= 16;
+ dwData1 |= MAKEWORD(*(pbyAddr+4), *(pbyAddr+5));
+
+- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"1. wOffset: %d, Data: %lX, KeyCtl:%X\n", wOffset, dwData1, wKeyCtl);
++ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"1. wOffset: %d, Data: %X,"\
++ " KeyCtl:%X\n", wOffset, dwData1, wKeyCtl);
+
+ //VNSvOutPortW(dwIoBase + MAC_REG_MISCFFNDEX, wOffset);
+ //VNSvOutPortD(dwIoBase + MAC_REG_MISCFFDATA, dwData);
+@@ -277,7 +278,8 @@ BYTE pbyData[24];
+ dwData2 <<= 8;
+ dwData2 |= *(pbyAddr+0);
+
+- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"2. wOffset: %d, Data: %lX\n", wOffset, dwData2);
++ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"2. wOffset: %d, Data: %X\n",
++ wOffset, dwData2);
+
+ //VNSvOutPortW(dwIoBase + MAC_REG_MISCFFNDEX, wOffset);
+ //VNSvOutPortD(dwIoBase + MAC_REG_MISCFFDATA, dwData);
+diff --git a/drivers/staging/vt6656/rf.c b/drivers/staging/vt6656/rf.c
+index 3fd0478..8cf0881 100644
+--- a/drivers/staging/vt6656/rf.c
++++ b/drivers/staging/vt6656/rf.c
+@@ -769,6 +769,9 @@ BYTE byPwr = pDevice->byCCKPwr;
+ return TRUE;
+ }
+
++ if (uCH == 0)
++ return -EINVAL;
++
+ switch (uRATE) {
+ case RATE_1M:
+ case RATE_2M:
+diff --git a/drivers/staging/vt6656/rxtx.c b/drivers/staging/vt6656/rxtx.c
+index b6e04e7..7a56929 100644
+--- a/drivers/staging/vt6656/rxtx.c
++++ b/drivers/staging/vt6656/rxtx.c
+@@ -375,7 +375,8 @@ s_vFillTxKey (
+ *(pbyIVHead+3) = (BYTE)(((pDevice->byKeyIndex << 6) & 0xc0) | 0x20); // 0x20 is ExtIV
+ // Append IV&ExtIV after Mac Header
+ *pdwExtIV = cpu_to_le32(pTransmitKey->dwTSC47_16);
+- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"vFillTxKey()---- pdwExtIV: %lx\n", *pdwExtIV);
++ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"vFillTxKey()---- pdwExtIV: %x\n",
++ *pdwExtIV);
+
+ } else if (pTransmitKey->byCipherSuite == KEY_CTL_CCMP) {
+ pTransmitKey->wTSC15_0++;
+@@ -1751,7 +1752,8 @@ s_bPacketToWirelessUsb(
+ MIC_vAppend((PBYTE)&(psEthHeader->abyDstAddr[0]), 12);
+ dwMIC_Priority = 0;
+ MIC_vAppend((PBYTE)&dwMIC_Priority, 4);
+- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"MIC KEY: %lX, %lX\n", dwMICKey0, dwMICKey1);
++ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"MIC KEY: %X, %X\n",
++ dwMICKey0, dwMICKey1);
+
+ ///////////////////////////////////////////////////////////////////
+
+@@ -2633,7 +2635,8 @@ vDMA0_tx_80211(PSDevice pDevice, struct sk_buff *skb) {
+ MIC_vAppend((PBYTE)&(sEthHeader.abyDstAddr[0]), 12);
+ dwMIC_Priority = 0;
+ MIC_vAppend((PBYTE)&dwMIC_Priority, 4);
+- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"DMA0_tx_8021:MIC KEY: %lX, %lX\n", dwMICKey0, dwMICKey1);
++ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"DMA0_tx_8021:MIC KEY:"\
++ " %X, %X\n", dwMICKey0, dwMICKey1);
+
+ uLength = cbHeaderSize + cbMacHdLen + uPadding + cbIVlen;
+
+@@ -2653,7 +2656,8 @@ vDMA0_tx_80211(PSDevice pDevice, struct sk_buff *skb) {
+
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"uLength: %d, %d\n", uLength, cbFrameBodySize);
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"cbReqCount:%d, %d, %d, %d\n", cbReqCount, cbHeaderSize, uPadding, cbIVlen);
+- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"MIC:%lx, %lx\n", *pdwMIC_L, *pdwMIC_R);
++ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"MIC:%x, %x\n",
++ *pdwMIC_L, *pdwMIC_R);
+
+ }
+
+@@ -3027,7 +3031,8 @@ int nsDMA_tx_packet(PSDevice pDevice, unsigned int uDMAIdx, struct sk_buff *skb)
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"error: KEY is GTK!!~~\n");
+ }
+ else {
+- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Find PTK [%lX]\n", pTransmitKey->dwKeyIndex);
++ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Find PTK [%X]\n",
++ pTransmitKey->dwKeyIndex);
+ bNeedEncryption = TRUE;
+ }
+ }
+@@ -3041,7 +3046,8 @@ int nsDMA_tx_packet(PSDevice pDevice, unsigned int uDMAIdx, struct sk_buff *skb)
+ if (pDevice->bEnableHostWEP) {
+ if ((uNodeIndex != 0) &&
+ (pMgmt->sNodeDBTable[uNodeIndex].dwKeyIndex & PAIRWISE_KEY)) {
+- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Find PTK [%lX]\n", pTransmitKey->dwKeyIndex);
++ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Find PTK [%X]\n",
++ pTransmitKey->dwKeyIndex);
+ bNeedEncryption = TRUE;
+ }
+ }
+diff --git a/drivers/staging/vt6656/ttype.h b/drivers/staging/vt6656/ttype.h
+index 8e9450e..dfbf747 100644
+--- a/drivers/staging/vt6656/ttype.h
++++ b/drivers/staging/vt6656/ttype.h
+@@ -29,6 +29,8 @@
+ #ifndef __TTYPE_H__
+ #define __TTYPE_H__
+
++#include <linux/types.h>
++
+ /******* Common definitions and typedefs ***********************************/
+
+ typedef int BOOL;
+@@ -42,17 +44,17 @@ typedef int BOOL;
+
+ /****** Simple typedefs ***************************************************/
+
+-typedef unsigned char BYTE; // 8-bit
+-typedef unsigned short WORD; // 16-bit
+-typedef unsigned long DWORD; // 32-bit
++typedef u8 BYTE;
++typedef u16 WORD;
++typedef u32 DWORD;
+
+ // QWORD is for those situation that we want
+ // an 8-byte-aligned 8 byte long structure
+ // which is NOT really a floating point number.
+ typedef union tagUQuadWord {
+ struct {
+- DWORD dwLowDword;
+- DWORD dwHighDword;
++ u32 dwLowDword;
++ u32 dwHighDword;
+ } u;
+ double DoNotUseThisField;
+ } UQuadWord;
+@@ -60,8 +62,8 @@ typedef UQuadWord QWORD; // 64-bit
+
+ /****** Common pointer types ***********************************************/
+
+-typedef unsigned long ULONG_PTR; // 32-bit
+-typedef unsigned long DWORD_PTR; // 32-bit
++typedef u32 ULONG_PTR;
++typedef u32 DWORD_PTR;
+
+ // boolean pointer
+
+diff --git a/drivers/staging/vt6656/usbpipe.c b/drivers/staging/vt6656/usbpipe.c
+index 609e8fa..e962eae 100644
+--- a/drivers/staging/vt6656/usbpipe.c
++++ b/drivers/staging/vt6656/usbpipe.c
+@@ -165,6 +165,11 @@ int PIPEnsControlOut(
+ if (pDevice->Flags & fMP_CONTROL_WRITES)
+ return STATUS_FAILURE;
+
++ if (pDevice->Flags & fMP_CONTROL_READS)
++ return STATUS_FAILURE;
++
++ MP_SET_FLAG(pDevice, fMP_CONTROL_WRITES);
++
+ pDevice->sUsbCtlRequest.bRequestType = 0x40;
+ pDevice->sUsbCtlRequest.bRequest = byRequest;
+ pDevice->sUsbCtlRequest.wValue = cpu_to_le16p(&wValue);
+@@ -179,12 +184,13 @@ int PIPEnsControlOut(
+
+ ntStatus = usb_submit_urb(pDevice->pControlURB, GFP_ATOMIC);
+ if (ntStatus != 0) {
+- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"control send request submission failed: %d\n", ntStatus);
++ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO
++ "control send request submission failed: %d\n",
++ ntStatus);
++ MP_CLEAR_FLAG(pDevice, fMP_CONTROL_WRITES);
+ return STATUS_FAILURE;
+ }
+- else {
+- MP_SET_FLAG(pDevice, fMP_CONTROL_WRITES);
+- }
++
+ spin_unlock_irq(&pDevice->lock);
+ for (ii = 0; ii <= USB_CTL_WAIT; ii ++) {
+
+@@ -224,6 +230,11 @@ int PIPEnsControlIn(
+ if (pDevice->Flags & fMP_CONTROL_READS)
+ return STATUS_FAILURE;
+
++ if (pDevice->Flags & fMP_CONTROL_WRITES)
++ return STATUS_FAILURE;
++
++ MP_SET_FLAG(pDevice, fMP_CONTROL_READS);
++
+ pDevice->sUsbCtlRequest.bRequestType = 0xC0;
+ pDevice->sUsbCtlRequest.bRequest = byRequest;
+ pDevice->sUsbCtlRequest.wValue = cpu_to_le16p(&wValue);
+@@ -237,10 +248,11 @@ int PIPEnsControlIn(
+
+ ntStatus = usb_submit_urb(pDevice->pControlURB, GFP_ATOMIC);
+ if (ntStatus != 0) {
+- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"control request submission failed: %d\n", ntStatus);
+- }else {
+- MP_SET_FLAG(pDevice, fMP_CONTROL_READS);
+- }
++ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO
++ "control request submission failed: %d\n", ntStatus);
++ MP_CLEAR_FLAG(pDevice, fMP_CONTROL_READS);
++ return STATUS_FAILURE;
++ }
+
+ spin_unlock_irq(&pDevice->lock);
+ for (ii = 0; ii <= USB_CTL_WAIT; ii ++) {
+diff --git a/drivers/staging/vt6656/wcmd.c b/drivers/staging/vt6656/wcmd.c
+index 9d2caa8..2225b9e 100644
+--- a/drivers/staging/vt6656/wcmd.c
++++ b/drivers/staging/vt6656/wcmd.c
+@@ -316,17 +316,19 @@ s_MgrMakeProbeRequest(
+ return pTxPacket;
+ }
+
+-void vCommandTimerWait(void *hDeviceContext, unsigned int MSecond)
++void vCommandTimerWait(void *hDeviceContext, unsigned long MSecond)
+ {
+- PSDevice pDevice = (PSDevice)hDeviceContext;
++ PSDevice pDevice = (PSDevice)hDeviceContext;
+
+- init_timer(&pDevice->sTimerCommand);
+- pDevice->sTimerCommand.data = (unsigned long)pDevice;
+- pDevice->sTimerCommand.function = (TimerFunction)vRunCommand;
+- // RUN_AT :1 msec ~= (HZ/1024)
+- pDevice->sTimerCommand.expires = (unsigned int)RUN_AT((MSecond * HZ) >> 10);
+- add_timer(&pDevice->sTimerCommand);
+- return;
++ init_timer(&pDevice->sTimerCommand);
++
++ pDevice->sTimerCommand.data = (unsigned long)pDevice;
++ pDevice->sTimerCommand.function = (TimerFunction)vRunCommand;
++ pDevice->sTimerCommand.expires = RUN_AT((MSecond * HZ) / 1000);
++
++ add_timer(&pDevice->sTimerCommand);
++
++ return;
+ }
+
+ void vRunCommand(void *hDeviceContext)
+diff --git a/drivers/staging/vt6656/wpa2.h b/drivers/staging/vt6656/wpa2.h
+index 46c2959..c359252 100644
+--- a/drivers/staging/vt6656/wpa2.h
++++ b/drivers/staging/vt6656/wpa2.h
+@@ -45,8 +45,8 @@ typedef struct tagsPMKIDInfo {
+ } PMKIDInfo, *PPMKIDInfo;
+
+ typedef struct tagSPMKIDCache {
+- unsigned long BSSIDInfoCount;
+- PMKIDInfo BSSIDInfo[MAX_PMKID_CACHE];
++ u32 BSSIDInfoCount;
++ PMKIDInfo BSSIDInfo[MAX_PMKID_CACHE];
+ } SPMKIDCache, *PSPMKIDCache;
+
+
+diff --git a/drivers/staging/wlan-ng/prism2mgmt.c b/drivers/staging/wlan-ng/prism2mgmt.c
+index 4efa9bc..89bfd85 100644
+--- a/drivers/staging/wlan-ng/prism2mgmt.c
++++ b/drivers/staging/wlan-ng/prism2mgmt.c
+@@ -406,7 +406,7 @@ int prism2mgmt_scan_results(wlandevice_t *wlandev, void *msgp)
+ /* SSID */
+ req->ssid.status = P80211ENUM_msgitem_status_data_ok;
+ req->ssid.data.len = le16_to_cpu(item->ssid.len);
+- req->ssid.data.len = min_t(u16, req->ssid.data.len, WLAN_BSSID_LEN);
++ req->ssid.data.len = min_t(u16, req->ssid.data.len, WLAN_SSID_MAXLEN);
+ memcpy(req->ssid.data.data, item->ssid.data, req->ssid.data.len);
+
+ /* supported rates */
+diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
+index 6b6f50a..5bafd2d 100644
+--- a/drivers/target/iscsi/iscsi_target.c
++++ b/drivers/target/iscsi/iscsi_target.c
+@@ -2384,7 +2384,7 @@ static void iscsit_build_conn_drop_async_message(struct iscsi_conn *conn)
+ if (!conn_p)
+ return;
+
+- cmd = iscsit_allocate_cmd(conn_p, GFP_KERNEL);
++ cmd = iscsit_allocate_cmd(conn_p, GFP_ATOMIC);
+ if (!cmd) {
+ iscsit_dec_conn_usage_count(conn_p);
+ return;
+diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
+index 68d4c10..f535c50 100644
+--- a/drivers/target/target_core_device.c
++++ b/drivers/target/target_core_device.c
+@@ -1193,6 +1193,8 @@ int se_dev_set_queue_depth(struct se_device *dev, u32 queue_depth)
+
+ int se_dev_set_fabric_max_sectors(struct se_device *dev, u32 fabric_max_sectors)
+ {
++ int block_size = dev->se_sub_dev->se_dev_attrib.block_size;
++
+ if (atomic_read(&dev->dev_export_obj.obj_access_count)) {
+ pr_err("dev[%p]: Unable to change SE Device"
+ " fabric_max_sectors while dev_export_obj: %d count exists\n",
+@@ -1230,8 +1232,12 @@ int se_dev_set_fabric_max_sectors(struct se_device *dev, u32 fabric_max_sectors)
+ /*
+ * Align max_sectors down to PAGE_SIZE to follow transport_allocate_data_tasks()
+ */
++ if (!block_size) {
++ block_size = 512;
++ pr_warn("Defaulting to 512 for zero block_size\n");
++ }
+ fabric_max_sectors = se_dev_align_max_sectors(fabric_max_sectors,
+- dev->se_sub_dev->se_dev_attrib.block_size);
++ block_size);
+
+ dev->se_sub_dev->se_dev_attrib.fabric_max_sectors = fabric_max_sectors;
+ pr_debug("dev[%p]: SE Device max_sectors changed to %u\n",
+@@ -1622,6 +1628,7 @@ int core_dev_setup_virtual_lun0(void)
+ ret = PTR_ERR(dev);
+ goto out;
+ }
++ dev->dev_link_magic = SE_DEV_LINK_MAGIC;
+ se_dev->se_dev_ptr = dev;
+ g_lun0_dev = dev;
+
+diff --git a/drivers/target/target_core_fabric_configfs.c b/drivers/target/target_core_fabric_configfs.c
+index ea479e5..c0dd776 100644
+--- a/drivers/target/target_core_fabric_configfs.c
++++ b/drivers/target/target_core_fabric_configfs.c
+@@ -72,6 +72,12 @@ static int target_fabric_mappedlun_link(
+ struct se_portal_group *se_tpg;
+ struct config_item *nacl_ci, *tpg_ci, *tpg_ci_s, *wwn_ci, *wwn_ci_s;
+ int ret = 0, lun_access;
++
++ if (lun->lun_link_magic != SE_LUN_LINK_MAGIC) {
++ pr_err("Bad lun->lun_link_magic, not a valid lun_ci pointer:"
++ " %p to struct lun: %p\n", lun_ci, lun);
++ return -EFAULT;
++ }
+ /*
+ * Ensure that the source port exists
+ */
+@@ -763,6 +769,11 @@ static int target_fabric_port_link(
+ ret = -ENODEV;
+ goto out;
+ }
++ if (dev->dev_link_magic != SE_DEV_LINK_MAGIC) {
++ pr_err("Bad dev->dev_link_magic, not a valid se_dev_ci pointer:"
++ " %p to struct se_device: %p\n", se_dev_ci, dev);
++ return -EFAULT;
++ }
+
+ lun_p = core_dev_add_lun(se_tpg, dev, lun->unpacked_lun);
+ if (IS_ERR(lun_p)) {
+diff --git a/drivers/target/target_core_tpg.c b/drivers/target/target_core_tpg.c
+index b8628a5..8dfe6f5 100644
+--- a/drivers/target/target_core_tpg.c
++++ b/drivers/target/target_core_tpg.c
+@@ -672,6 +672,7 @@ int core_tpg_register(
+ for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
+ lun = se_tpg->tpg_lun_list[i];
+ lun->unpacked_lun = i;
++ lun->lun_link_magic = SE_LUN_LINK_MAGIC;
+ lun->lun_status = TRANSPORT_LUN_STATUS_FREE;
+ atomic_set(&lun->lun_acl_count, 0);
+ init_completion(&lun->lun_shutdown_comp);
+diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
+index c87ef74..65e6320 100644
+--- a/drivers/target/target_core_transport.c
++++ b/drivers/target/target_core_transport.c
+@@ -1085,6 +1085,7 @@ struct se_device *transport_add_device_to_core_hba(
+ dev->se_hba = hba;
+ dev->se_sub_dev = se_dev;
+ dev->transport = transport;
++ dev->dev_link_magic = SE_DEV_LINK_MAGIC;
+ INIT_LIST_HEAD(&dev->dev_list);
+ INIT_LIST_HEAD(&dev->dev_sep_list);
+ INIT_LIST_HEAD(&dev->dev_tmr_list);
+@@ -1553,6 +1554,8 @@ static void target_complete_tmr_failure(struct work_struct *work)
+
+ se_cmd->se_tmr_req->response = TMR_LUN_DOES_NOT_EXIST;
+ se_cmd->se_tfo->queue_tm_rsp(se_cmd);
++
++ transport_cmd_check_stop_to_fabric(se_cmd);
+ }
+
+ /**
+diff --git a/drivers/target/tcm_fc/tfc_sess.c b/drivers/target/tcm_fc/tfc_sess.c
+index 3c9e5b5..230d8ec 100644
+--- a/drivers/target/tcm_fc/tfc_sess.c
++++ b/drivers/target/tcm_fc/tfc_sess.c
+@@ -356,11 +356,11 @@ static int ft_prli_locked(struct fc_rport_priv *rdata, u32 spp_len,
+
+ tport = ft_tport_create(rdata->local_port);
+ if (!tport)
+- return 0; /* not a target for this local port */
++ goto not_target; /* not a target for this local port */
+
+ acl = ft_acl_get(tport->tpg, rdata);
+ if (!acl)
+- return 0;
++ goto not_target; /* no target for this remote */
+
+ if (!rspp)
+ goto fill;
+@@ -397,12 +397,18 @@ static int ft_prli_locked(struct fc_rport_priv *rdata, u32 spp_len,
+
+ /*
+ * OR in our service parameters with other provider (initiator), if any.
+- * TBD XXX - indicate RETRY capability?
+ */
+ fill:
+ fcp_parm = ntohl(spp->spp_params);
++ fcp_parm &= ~FCP_SPPF_RETRY;
+ spp->spp_params = htonl(fcp_parm | FCP_SPPF_TARG_FCN);
+ return FC_SPP_RESP_ACK;
++
++not_target:
++ fcp_parm = ntohl(spp->spp_params);
++ fcp_parm &= ~FCP_SPPF_TARG_FCN;
++ spp->spp_params = htonl(fcp_parm);
++ return 0;
+ }
+
+ /**
+@@ -431,7 +437,6 @@ static void ft_sess_rcu_free(struct rcu_head *rcu)
+ {
+ struct ft_sess *sess = container_of(rcu, struct ft_sess, rcu);
+
+- transport_deregister_session(sess->se_sess);
+ kfree(sess);
+ }
+
+@@ -439,6 +444,7 @@ static void ft_sess_free(struct kref *kref)
+ {
+ struct ft_sess *sess = container_of(kref, struct ft_sess, kref);
+
++ transport_deregister_session(sess->se_sess);
+ call_rcu(&sess->rcu, ft_sess_rcu_free);
+ }
+
+diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c
+index 90dff82..4a418e4 100644
+--- a/drivers/tty/n_gsm.c
++++ b/drivers/tty/n_gsm.c
+@@ -1692,6 +1692,8 @@ static inline void dlci_put(struct gsm_dlci *dlci)
+ kref_put(&dlci->ref, gsm_dlci_free);
+ }
+
++static void gsm_destroy_network(struct gsm_dlci *dlci);
++
+ /**
+ * gsm_dlci_release - release DLCI
+ * @dlci: DLCI to destroy
+@@ -1705,9 +1707,19 @@ static void gsm_dlci_release(struct gsm_dlci *dlci)
+ {
+ struct tty_struct *tty = tty_port_tty_get(&dlci->port);
+ if (tty) {
++ mutex_lock(&dlci->mutex);
++ gsm_destroy_network(dlci);
++ mutex_unlock(&dlci->mutex);
++
++ /* tty_vhangup needs the tty_lock, so unlock and
++ relock after doing the hangup. */
++ tty_unlock();
+ tty_vhangup(tty);
++ tty_lock();
++ tty_port_tty_set(&dlci->port, NULL);
+ tty_kref_put(tty);
+ }
++ dlci->state = DLCI_CLOSED;
+ dlci_put(dlci);
+ }
+
+@@ -2933,6 +2945,8 @@ static void gsmtty_close(struct tty_struct *tty, struct file *filp)
+
+ if (dlci == NULL)
+ return;
++ if (dlci->state == DLCI_CLOSED)
++ return;
+ mutex_lock(&dlci->mutex);
+ gsm_destroy_network(dlci);
+ mutex_unlock(&dlci->mutex);
+@@ -2951,6 +2965,8 @@ out:
+ static void gsmtty_hangup(struct tty_struct *tty)
+ {
+ struct gsm_dlci *dlci = tty->driver_data;
++ if (dlci->state == DLCI_CLOSED)
++ return;
+ tty_port_hangup(&dlci->port);
+ gsm_dlci_begin_close(dlci);
+ }
+@@ -2958,9 +2974,12 @@ static void gsmtty_hangup(struct tty_struct *tty)
+ static int gsmtty_write(struct tty_struct *tty, const unsigned char *buf,
+ int len)
+ {
++ int sent;
+ struct gsm_dlci *dlci = tty->driver_data;
++ if (dlci->state == DLCI_CLOSED)
++ return -EINVAL;
+ /* Stuff the bytes into the fifo queue */
+- int sent = kfifo_in_locked(dlci->fifo, buf, len, &dlci->lock);
++ sent = kfifo_in_locked(dlci->fifo, buf, len, &dlci->lock);
+ /* Need to kick the channel */
+ gsm_dlci_data_kick(dlci);
+ return sent;
+@@ -2969,18 +2988,24 @@ static int gsmtty_write(struct tty_struct *tty, const unsigned char *buf,
+ static int gsmtty_write_room(struct tty_struct *tty)
+ {
+ struct gsm_dlci *dlci = tty->driver_data;
++ if (dlci->state == DLCI_CLOSED)
++ return -EINVAL;
+ return TX_SIZE - kfifo_len(dlci->fifo);
+ }
+
+ static int gsmtty_chars_in_buffer(struct tty_struct *tty)
+ {
+ struct gsm_dlci *dlci = tty->driver_data;
++ if (dlci->state == DLCI_CLOSED)
++ return -EINVAL;
+ return kfifo_len(dlci->fifo);
+ }
+
+ static void gsmtty_flush_buffer(struct tty_struct *tty)
+ {
+ struct gsm_dlci *dlci = tty->driver_data;
++ if (dlci->state == DLCI_CLOSED)
++ return;
+ /* Caution needed: If we implement reliable transport classes
+ then the data being transmitted can't simply be junked once
+ it has first hit the stack. Until then we can just blow it
+@@ -2999,6 +3024,8 @@ static void gsmtty_wait_until_sent(struct tty_struct *tty, int timeout)
+ static int gsmtty_tiocmget(struct tty_struct *tty)
+ {
+ struct gsm_dlci *dlci = tty->driver_data;
++ if (dlci->state == DLCI_CLOSED)
++ return -EINVAL;
+ return dlci->modem_rx;
+ }
+
+@@ -3008,6 +3035,8 @@ static int gsmtty_tiocmset(struct tty_struct *tty,
+ struct gsm_dlci *dlci = tty->driver_data;
+ unsigned int modem_tx = dlci->modem_tx;
+
++ if (dlci->state == DLCI_CLOSED)
++ return -EINVAL;
+ modem_tx &= ~clear;
+ modem_tx |= set;
+
+@@ -3026,6 +3055,8 @@ static int gsmtty_ioctl(struct tty_struct *tty,
+ struct gsm_netconfig nc;
+ int index;
+
++ if (dlci->state == DLCI_CLOSED)
++ return -EINVAL;
+ switch (cmd) {
+ case GSMIOC_ENABLE_NET:
+ if (copy_from_user(&nc, (void __user *)arg, sizeof(nc)))
+@@ -3052,6 +3083,9 @@ static int gsmtty_ioctl(struct tty_struct *tty,
+
+ static void gsmtty_set_termios(struct tty_struct *tty, struct ktermios *old)
+ {
++ struct gsm_dlci *dlci = tty->driver_data;
++ if (dlci->state == DLCI_CLOSED)
++ return;
+ /* For the moment its fixed. In actual fact the speed information
+ for the virtual channel can be propogated in both directions by
+ the RPN control message. This however rapidly gets nasty as we
+@@ -3063,6 +3097,8 @@ static void gsmtty_set_termios(struct tty_struct *tty, struct ktermios *old)
+ static void gsmtty_throttle(struct tty_struct *tty)
+ {
+ struct gsm_dlci *dlci = tty->driver_data;
++ if (dlci->state == DLCI_CLOSED)
++ return;
+ if (tty->termios->c_cflag & CRTSCTS)
+ dlci->modem_tx &= ~TIOCM_DTR;
+ dlci->throttled = 1;
+@@ -3073,6 +3109,8 @@ static void gsmtty_throttle(struct tty_struct *tty)
+ static void gsmtty_unthrottle(struct tty_struct *tty)
+ {
+ struct gsm_dlci *dlci = tty->driver_data;
++ if (dlci->state == DLCI_CLOSED)
++ return;
+ if (tty->termios->c_cflag & CRTSCTS)
+ dlci->modem_tx |= TIOCM_DTR;
+ dlci->throttled = 0;
+@@ -3084,6 +3122,8 @@ static int gsmtty_break_ctl(struct tty_struct *tty, int state)
+ {
+ struct gsm_dlci *dlci = tty->driver_data;
+ int encode = 0; /* Off */
++ if (dlci->state == DLCI_CLOSED)
++ return -EINVAL;
+
+ if (state == -1) /* "On indefinitely" - we can't encode this
+ properly */
+diff --git a/drivers/tty/serial/8250/8250_dw.c b/drivers/tty/serial/8250/8250_dw.c
+index f574eef..b6dc908 100644
+--- a/drivers/tty/serial/8250/8250_dw.c
++++ b/drivers/tty/serial/8250/8250_dw.c
+@@ -79,7 +79,7 @@ static int dw8250_handle_irq(struct uart_port *p)
+ } else if ((iir & UART_IIR_BUSY) == UART_IIR_BUSY) {
+ /* Clear the USR and write the LCR again. */
+ (void)p->serial_in(p, UART_USR);
+- p->serial_out(p, d->last_lcr, UART_LCR);
++ p->serial_out(p, UART_LCR, d->last_lcr);
+
+ return 1;
+ }
+diff --git a/drivers/tty/serial/ifx6x60.c b/drivers/tty/serial/ifx6x60.c
+index 3ad079f..f43156f 100644
+--- a/drivers/tty/serial/ifx6x60.c
++++ b/drivers/tty/serial/ifx6x60.c
+@@ -552,6 +552,7 @@ static void ifx_port_shutdown(struct tty_port *port)
+ container_of(port, struct ifx_spi_device, tty_port);
+
+ mrdy_set_low(ifx_dev);
++ del_timer(&ifx_dev->spi_timer);
+ clear_bit(IFX_SPI_STATE_TIMER_PENDING, &ifx_dev->flags);
+ tasklet_kill(&ifx_dev->io_work_tasklet);
+ }
+diff --git a/drivers/tty/tty_ioctl.c b/drivers/tty/tty_ioctl.c
+index a1b9a2f..f8d03da 100644
+--- a/drivers/tty/tty_ioctl.c
++++ b/drivers/tty/tty_ioctl.c
+@@ -617,7 +617,7 @@ static int set_termios(struct tty_struct *tty, void __user *arg, int opt)
+ if (opt & TERMIOS_WAIT) {
+ tty_wait_until_sent(tty, 0);
+ if (signal_pending(current))
+- return -EINTR;
++ return -ERESTARTSYS;
+ }
+
+ tty_set_termios(tty, &tmp_termios);
+@@ -684,7 +684,7 @@ static int set_termiox(struct tty_struct *tty, void __user *arg, int opt)
+ if (opt & TERMIOS_WAIT) {
+ tty_wait_until_sent(tty, 0);
+ if (signal_pending(current))
+- return -EINTR;
++ return -ERESTARTSYS;
+ }
+
+ mutex_lock(&tty->termios_mutex);
+diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c
+index a13f7e1..d2f0b26 100644
+--- a/drivers/tty/vt/vt.c
++++ b/drivers/tty/vt/vt.c
+@@ -656,7 +656,7 @@ static inline void save_screen(struct vc_data *vc)
+ * Redrawing of screen
+ */
+
+-static void clear_buffer_attributes(struct vc_data *vc)
++void clear_buffer_attributes(struct vc_data *vc)
+ {
+ unsigned short *p = (unsigned short *)vc->vc_origin;
+ int count = vc->vc_screenbuf_size / 2;
+@@ -3017,7 +3017,7 @@ int __init vty_init(const struct file_operations *console_fops)
+
+ static struct class *vtconsole_class;
+
+-static int bind_con_driver(const struct consw *csw, int first, int last,
++static int do_bind_con_driver(const struct consw *csw, int first, int last,
+ int deflt)
+ {
+ struct module *owner = csw->owner;
+@@ -3028,7 +3028,7 @@ static int bind_con_driver(const struct consw *csw, int first, int last,
+ if (!try_module_get(owner))
+ return -ENODEV;
+
+- console_lock();
++ WARN_CONSOLE_UNLOCKED();
+
+ /* check if driver is registered */
+ for (i = 0; i < MAX_NR_CON_DRIVER; i++) {
+@@ -3113,11 +3113,22 @@ static int bind_con_driver(const struct consw *csw, int first, int last,
+
+ retval = 0;
+ err:
+- console_unlock();
+ module_put(owner);
+ return retval;
+ };
+
++
++static int bind_con_driver(const struct consw *csw, int first, int last,
++ int deflt)
++{
++ int ret;
++
++ console_lock();
++ ret = do_bind_con_driver(csw, first, last, deflt);
++ console_unlock();
++ return ret;
++}
++
+ #ifdef CONFIG_VT_HW_CONSOLE_BINDING
+ static int con_is_graphics(const struct consw *csw, int first, int last)
+ {
+@@ -3154,6 +3165,18 @@ static int con_is_graphics(const struct consw *csw, int first, int last)
+ */
+ int unbind_con_driver(const struct consw *csw, int first, int last, int deflt)
+ {
++ int retval;
++
++ console_lock();
++ retval = do_unbind_con_driver(csw, first, last, deflt);
++ console_unlock();
++ return retval;
++}
++EXPORT_SYMBOL(unbind_con_driver);
++
++/* unlocked version of unbind_con_driver() */
++int do_unbind_con_driver(const struct consw *csw, int first, int last, int deflt)
++{
+ struct module *owner = csw->owner;
+ const struct consw *defcsw = NULL;
+ struct con_driver *con_driver = NULL, *con_back = NULL;
+@@ -3162,7 +3185,7 @@ int unbind_con_driver(const struct consw *csw, int first, int last, int deflt)
+ if (!try_module_get(owner))
+ return -ENODEV;
+
+- console_lock();
++ WARN_CONSOLE_UNLOCKED();
+
+ /* check if driver is registered and if it is unbindable */
+ for (i = 0; i < MAX_NR_CON_DRIVER; i++) {
+@@ -3175,10 +3198,8 @@ int unbind_con_driver(const struct consw *csw, int first, int last, int deflt)
+ }
+ }
+
+- if (retval) {
+- console_unlock();
++ if (retval)
+ goto err;
+- }
+
+ retval = -ENODEV;
+
+@@ -3194,15 +3215,11 @@ int unbind_con_driver(const struct consw *csw, int first, int last, int deflt)
+ }
+ }
+
+- if (retval) {
+- console_unlock();
++ if (retval)
+ goto err;
+- }
+
+- if (!con_is_bound(csw)) {
+- console_unlock();
++ if (!con_is_bound(csw))
+ goto err;
+- }
+
+ first = max(first, con_driver->first);
+ last = min(last, con_driver->last);
+@@ -3229,15 +3246,14 @@ int unbind_con_driver(const struct consw *csw, int first, int last, int deflt)
+ if (!con_is_bound(csw))
+ con_driver->flag &= ~CON_DRIVER_FLAG_INIT;
+
+- console_unlock();
+ /* ignore return value, binding should not fail */
+- bind_con_driver(defcsw, first, last, deflt);
++ do_bind_con_driver(defcsw, first, last, deflt);
+ err:
+ module_put(owner);
+ return retval;
+
+ }
+-EXPORT_SYMBOL(unbind_con_driver);
++EXPORT_SYMBOL_GPL(do_unbind_con_driver);
+
+ static int vt_bind(struct con_driver *con)
+ {
+@@ -3522,28 +3538,18 @@ int con_debug_leave(void)
+ }
+ EXPORT_SYMBOL_GPL(con_debug_leave);
+
+-/**
+- * register_con_driver - register console driver to console layer
+- * @csw: console driver
+- * @first: the first console to take over, minimum value is 0
+- * @last: the last console to take over, maximum value is MAX_NR_CONSOLES -1
+- *
+- * DESCRIPTION: This function registers a console driver which can later
+- * bind to a range of consoles specified by @first and @last. It will
+- * also initialize the console driver by calling con_startup().
+- */
+-int register_con_driver(const struct consw *csw, int first, int last)
++static int do_register_con_driver(const struct consw *csw, int first, int last)
+ {
+ struct module *owner = csw->owner;
+ struct con_driver *con_driver;
+ const char *desc;
+ int i, retval = 0;
+
++ WARN_CONSOLE_UNLOCKED();
++
+ if (!try_module_get(owner))
+ return -ENODEV;
+
+- console_lock();
+-
+ for (i = 0; i < MAX_NR_CON_DRIVER; i++) {
+ con_driver = &registered_con_driver[i];
+
+@@ -3596,10 +3602,29 @@ int register_con_driver(const struct consw *csw, int first, int last)
+ }
+
+ err:
+- console_unlock();
+ module_put(owner);
+ return retval;
+ }
++
++/**
++ * register_con_driver - register console driver to console layer
++ * @csw: console driver
++ * @first: the first console to take over, minimum value is 0
++ * @last: the last console to take over, maximum value is MAX_NR_CONSOLES -1
++ *
++ * DESCRIPTION: This function registers a console driver which can later
++ * bind to a range of consoles specified by @first and @last. It will
++ * also initialize the console driver by calling con_startup().
++ */
++int register_con_driver(const struct consw *csw, int first, int last)
++{
++ int retval;
++
++ console_lock();
++ retval = do_register_con_driver(csw, first, last);
++ console_unlock();
++ return retval;
++}
+ EXPORT_SYMBOL(register_con_driver);
+
+ /**
+@@ -3615,9 +3640,18 @@ EXPORT_SYMBOL(register_con_driver);
+ */
+ int unregister_con_driver(const struct consw *csw)
+ {
+- int i, retval = -ENODEV;
++ int retval;
+
+ console_lock();
++ retval = do_unregister_con_driver(csw);
++ console_unlock();
++ return retval;
++}
++EXPORT_SYMBOL(unregister_con_driver);
++
++int do_unregister_con_driver(const struct consw *csw)
++{
++ int i, retval = -ENODEV;
+
+ /* cannot unregister a bound driver */
+ if (con_is_bound(csw))
+@@ -3643,27 +3677,53 @@ int unregister_con_driver(const struct consw *csw)
+ }
+ }
+ err:
+- console_unlock();
+ return retval;
+ }
+-EXPORT_SYMBOL(unregister_con_driver);
++EXPORT_SYMBOL_GPL(do_unregister_con_driver);
+
+ /*
+ * If we support more console drivers, this function is used
+ * when a driver wants to take over some existing consoles
+ * and become default driver for newly opened ones.
+ *
+- * take_over_console is basically a register followed by unbind
++ * take_over_console is basically a register followed by unbind
++ */
++int do_take_over_console(const struct consw *csw, int first, int last, int deflt)
++{
++ int err;
++
++ err = do_register_con_driver(csw, first, last);
++ /*
++ * If we get an busy error we still want to bind the console driver
++ * and return success, as we may have unbound the console driver
++ * but not unregistered it.
++ */
++ if (err == -EBUSY)
++ err = 0;
++ if (!err)
++ do_bind_con_driver(csw, first, last, deflt);
++
++ return err;
++}
++EXPORT_SYMBOL_GPL(do_take_over_console);
++
++/*
++ * If we support more console drivers, this function is used
++ * when a driver wants to take over some existing consoles
++ * and become default driver for newly opened ones.
++ *
++ * take_over_console is basically a register followed by unbind
+ */
+ int take_over_console(const struct consw *csw, int first, int last, int deflt)
+ {
+ int err;
+
+ err = register_con_driver(csw, first, last);
+- /* if we get an busy error we still want to bind the console driver
++ /*
++ * If we get an busy error we still want to bind the console driver
+ * and return success, as we may have unbound the console driver
+-  * but not unregistered it.
+- */
++ * but not unregistered it.
++ */
+ if (err == -EBUSY)
+ err = 0;
+ if (!err)
+diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
+index 89c752a..d775bc9 100644
+--- a/drivers/usb/class/cdc-acm.c
++++ b/drivers/usb/class/cdc-acm.c
+@@ -1602,6 +1602,9 @@ static const struct usb_device_id acm_ids[] = {
+ { USB_DEVICE(0x0572, 0x1340), /* Conexant CX93010-2x UCMxx */
+ .driver_info = NO_UNION_NORMAL,
+ },
++ { USB_DEVICE(0x05f9, 0x4002), /* PSC Scanning, Magellan 800i */
++ .driver_info = NO_UNION_NORMAL,
++ },
+ { USB_DEVICE(0x1bbb, 0x0003), /* Alcatel OT-I650 */
+ .driver_info = NO_UNION_NORMAL, /* reports zero length descriptor */
+ },
+diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
+index fe7faf0..2aed077 100644
+--- a/drivers/usb/core/hub.c
++++ b/drivers/usb/core/hub.c
+@@ -867,6 +867,60 @@ static int hub_hub_status(struct usb_hub *hub,
+ return ret;
+ }
+
++static int hub_set_port_link_state(struct usb_hub *hub, int port1,
++ unsigned int link_status)
++{
++ return set_port_feature(hub->hdev,
++ port1 | (link_status << 3),
++ USB_PORT_FEAT_LINK_STATE);
++}
++
++/*
++ * If USB 3.0 ports are placed into the Disabled state, they will no longer
++ * detect any device connects or disconnects. This is generally not what the
++ * USB core wants, since it expects a disabled port to produce a port status
++ * change event when a new device connects.
++ *
++ * Instead, set the link state to Disabled, wait for the link to settle into
++ * that state, clear any change bits, and then put the port into the RxDetect
++ * state.
++ */
++static int hub_usb3_port_disable(struct usb_hub *hub, int port1)
++{
++ int ret;
++ int total_time;
++ u16 portchange, portstatus;
++
++ if (!hub_is_superspeed(hub->hdev))
++ return -EINVAL;
++
++ ret = hub_set_port_link_state(hub, port1, USB_SS_PORT_LS_SS_DISABLED);
++ if (ret) {
++ dev_err(hub->intfdev, "cannot disable port %d (err = %d)\n",
++ port1, ret);
++ return ret;
++ }
++
++ /* Wait for the link to enter the disabled state. */
++ for (total_time = 0; ; total_time += HUB_DEBOUNCE_STEP) {
++ ret = hub_port_status(hub, port1, &portstatus, &portchange);
++ if (ret < 0)
++ return ret;
++
++ if ((portstatus & USB_PORT_STAT_LINK_STATE) ==
++ USB_SS_PORT_LS_SS_DISABLED)
++ break;
++ if (total_time >= HUB_DEBOUNCE_TIMEOUT)
++ break;
++ msleep(HUB_DEBOUNCE_STEP);
++ }
++ if (total_time >= HUB_DEBOUNCE_TIMEOUT)
++ dev_warn(hub->intfdev, "Could not disable port %d after %d ms\n",
++ port1, total_time);
++
++ return hub_set_port_link_state(hub, port1, USB_SS_PORT_LS_RX_DETECT);
++}
++
+ static int hub_port_disable(struct usb_hub *hub, int port1, int set_state)
+ {
+ struct usb_device *hdev = hub->hdev;
+@@ -875,8 +929,13 @@ static int hub_port_disable(struct usb_hub *hub, int port1, int set_state)
+ if (hdev->children[port1-1] && set_state)
+ usb_set_device_state(hdev->children[port1-1],
+ USB_STATE_NOTATTACHED);
+- if (!hub->error && !hub_is_superspeed(hub->hdev))
+- ret = clear_port_feature(hdev, port1, USB_PORT_FEAT_ENABLE);
++ if (!hub->error) {
++ if (hub_is_superspeed(hub->hdev))
++ ret = hub_usb3_port_disable(hub, port1);
++ else
++ ret = clear_port_feature(hdev, port1,
++ USB_PORT_FEAT_ENABLE);
++ }
+ if (ret)
+ dev_err(hub->intfdev, "cannot disable port %d (err = %d)\n",
+ port1, ret);
+@@ -2339,7 +2398,7 @@ static unsigned hub_is_wusb(struct usb_hub *hub)
+ #define HUB_SHORT_RESET_TIME 10
+ #define HUB_BH_RESET_TIME 50
+ #define HUB_LONG_RESET_TIME 200
+-#define HUB_RESET_TIMEOUT 500
++#define HUB_RESET_TIMEOUT 800
+
+ static int hub_port_reset(struct usb_hub *hub, int port1,
+ struct usb_device *udev, unsigned int delay, bool warm);
+@@ -2374,6 +2433,10 @@ static int hub_port_wait_reset(struct usb_hub *hub, int port1,
+ if (ret < 0)
+ return ret;
+
++ /* The port state is unknown until the reset completes. */
++ if ((portstatus & USB_PORT_STAT_RESET))
++ goto delay;
++
+ /*
+ * Some buggy devices require a warm reset to be issued even
+ * when the port appears not to be connected.
+@@ -2419,11 +2482,7 @@ static int hub_port_wait_reset(struct usb_hub *hub, int port1,
+ if ((portchange & USB_PORT_STAT_C_CONNECTION))
+ return -ENOTCONN;
+
+- /* if we`ve finished resetting, then break out of
+- * the loop
+- */
+- if (!(portstatus & USB_PORT_STAT_RESET) &&
+- (portstatus & USB_PORT_STAT_ENABLE)) {
++ if ((portstatus & USB_PORT_STAT_ENABLE)) {
+ if (hub_is_wusb(hub))
+ udev->speed = USB_SPEED_WIRELESS;
+ else if (hub_is_superspeed(hub->hdev))
+@@ -2437,10 +2496,15 @@ static int hub_port_wait_reset(struct usb_hub *hub, int port1,
+ return 0;
+ }
+ } else {
+- if (portchange & USB_PORT_STAT_C_BH_RESET)
+- return 0;
++ if (!(portstatus & USB_PORT_STAT_CONNECTION) ||
++ hub_port_warm_reset_required(hub,
++ portstatus))
++ return -ENOTCONN;
++
++ return 0;
+ }
+
++delay:
+ /* switch to the long delay after two short delay failures */
+ if (delay_time >= 2 * HUB_SHORT_RESET_TIME)
+ delay = HUB_LONG_RESET_TIME;
+@@ -2464,14 +2528,11 @@ static void hub_port_finish_reset(struct usb_hub *hub, int port1,
+ msleep(10 + 40);
+ update_devnum(udev, 0);
+ hcd = bus_to_hcd(udev->bus);
+- if (hcd->driver->reset_device) {
+- *status = hcd->driver->reset_device(hcd, udev);
+- if (*status < 0) {
+- dev_err(&udev->dev, "Cannot reset "
+- "HCD device state\n");
+- break;
+- }
+- }
++ /* The xHC may think the device is already reset,
++ * so ignore the status.
++ */
++ if (hcd->driver->reset_device)
++ hcd->driver->reset_device(hcd, udev);
+ }
+ /* FALL THROUGH */
+ case -ENOTCONN:
+@@ -2479,16 +2540,16 @@ static void hub_port_finish_reset(struct usb_hub *hub, int port1,
+ clear_port_feature(hub->hdev,
+ port1, USB_PORT_FEAT_C_RESET);
+ /* FIXME need disconnect() for NOTATTACHED device */
+- if (warm) {
++ if (hub_is_superspeed(hub->hdev)) {
+ clear_port_feature(hub->hdev, port1,
+ USB_PORT_FEAT_C_BH_PORT_RESET);
+ clear_port_feature(hub->hdev, port1,
+ USB_PORT_FEAT_C_PORT_LINK_STATE);
+- } else {
++ }
++ if (!warm)
+ usb_set_device_state(udev, *status
+ ? USB_STATE_NOTATTACHED
+ : USB_STATE_DEFAULT);
+- }
+ break;
+ }
+ }
+@@ -2676,6 +2737,23 @@ void usb_enable_ltm(struct usb_device *udev)
+ EXPORT_SYMBOL_GPL(usb_enable_ltm);
+
+ #ifdef CONFIG_USB_SUSPEND
++/*
++ * usb_disable_function_remotewakeup - disable usb3.0
++ * device's function remote wakeup
++ * @udev: target device
++ *
++ * Assume there's only one function on the USB 3.0
++ * device and disable remote wake for the first
++ * interface. FIXME if the interface association
++ * descriptor shows there's more than one function.
++ */
++static int usb_disable_function_remotewakeup(struct usb_device *udev)
++{
++ return usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
++ USB_REQ_CLEAR_FEATURE, USB_RECIP_INTERFACE,
++ USB_INTRF_FUNC_SUSPEND, 0, NULL, 0,
++ USB_CTRL_SET_TIMEOUT);
++}
+
+ /*
+ * usb_port_suspend - suspend a usb device's upstream port
+@@ -2793,12 +2871,19 @@ int usb_port_suspend(struct usb_device *udev, pm_message_t msg)
+ dev_dbg(hub->intfdev, "can't suspend port %d, status %d\n",
+ port1, status);
+ /* paranoia: "should not happen" */
+- if (udev->do_remote_wakeup)
+- (void) usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
+- USB_REQ_CLEAR_FEATURE, USB_RECIP_DEVICE,
+- USB_DEVICE_REMOTE_WAKEUP, 0,
+- NULL, 0,
+- USB_CTRL_SET_TIMEOUT);
++ if (udev->do_remote_wakeup) {
++ if (!hub_is_superspeed(hub->hdev)) {
++ (void) usb_control_msg(udev,
++ usb_sndctrlpipe(udev, 0),
++ USB_REQ_CLEAR_FEATURE,
++ USB_RECIP_DEVICE,
++ USB_DEVICE_REMOTE_WAKEUP, 0,
++ NULL, 0,
++ USB_CTRL_SET_TIMEOUT);
++ } else
++ (void) usb_disable_function_remotewakeup(udev);
++
++ }
+
+ /* Try to enable USB2 hardware LPM again */
+ if (udev->usb2_hw_lpm_capable == 1)
+@@ -2837,7 +2922,7 @@ int usb_port_suspend(struct usb_device *udev, pm_message_t msg)
+ static int finish_port_resume(struct usb_device *udev)
+ {
+ int status = 0;
+- u16 devstatus;
++ u16 devstatus = 0;
+
+ /* caller owns the udev device lock */
+ dev_dbg(&udev->dev, "%s\n",
+@@ -2882,21 +2967,37 @@ static int finish_port_resume(struct usb_device *udev)
+ if (status) {
+ dev_dbg(&udev->dev, "gone after usb resume? status %d\n",
+ status);
+- } else if (udev->actconfig) {
+- le16_to_cpus(&devstatus);
+- if (devstatus & (1 << USB_DEVICE_REMOTE_WAKEUP)) {
+- status = usb_control_msg(udev,
+- usb_sndctrlpipe(udev, 0),
+- USB_REQ_CLEAR_FEATURE,
++ /*
++ * There are a few quirky devices which violate the standard
++ * by claiming to have remote wakeup enabled after a reset,
++ * which crash if the feature is cleared, hence check for
++ * udev->reset_resume
++ */
++ } else if (udev->actconfig && !udev->reset_resume) {
++ if (!hub_is_superspeed(udev->parent)) {
++ le16_to_cpus(&devstatus);
++ if (devstatus & (1 << USB_DEVICE_REMOTE_WAKEUP))
++ status = usb_control_msg(udev,
++ usb_sndctrlpipe(udev, 0),
++ USB_REQ_CLEAR_FEATURE,
+ USB_RECIP_DEVICE,
+- USB_DEVICE_REMOTE_WAKEUP, 0,
+- NULL, 0,
+- USB_CTRL_SET_TIMEOUT);
+- if (status)
+- dev_dbg(&udev->dev,
+- "disable remote wakeup, status %d\n",
+- status);
++ USB_DEVICE_REMOTE_WAKEUP, 0,
++ NULL, 0,
++ USB_CTRL_SET_TIMEOUT);
++ } else {
++ status = usb_get_status(udev, USB_RECIP_INTERFACE, 0,
++ &devstatus);
++ le16_to_cpus(&devstatus);
++ if (!status && devstatus & (USB_INTRF_STAT_FUNC_RW_CAP
++ | USB_INTRF_STAT_FUNC_RW))
++ status =
++ usb_disable_function_remotewakeup(udev);
+ }
++
++ if (status)
++ dev_dbg(&udev->dev,
++ "disable remote wakeup, status %d\n",
++ status);
+ status = 0;
+ }
+ return status;
+@@ -4511,9 +4612,14 @@ static void hub_events(void)
+ * SS.Inactive state.
+ */
+ if (hub_port_warm_reset_required(hub, portstatus)) {
++ int status;
++
+ dev_dbg(hub_dev, "warm reset port %d\n", i);
+- hub_port_reset(hub, i, NULL,
++ status = hub_port_reset(hub, i, NULL,
+ HUB_BH_RESET_TIME, true);
++ if (status < 0)
++ hub_port_disable(hub, i, 1);
++ connect_change = 0;
+ }
+
+ if (connect_change)
+diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c
+index 0ab7da2..583150b 100644
+--- a/drivers/usb/core/message.c
++++ b/drivers/usb/core/message.c
+@@ -1808,29 +1808,8 @@ free_interfaces:
+ goto free_interfaces;
+ }
+
+- ret = usb_control_msg(dev, usb_sndctrlpipe(dev, 0),
+- USB_REQ_SET_CONFIGURATION, 0, configuration, 0,
+- NULL, 0, USB_CTRL_SET_TIMEOUT);
+- if (ret < 0) {
+- /* All the old state is gone, so what else can we do?
+- * The device is probably useless now anyway.
+- */
+- cp = NULL;
+- }
+-
+- dev->actconfig = cp;
+- if (!cp) {
+- usb_set_device_state(dev, USB_STATE_ADDRESS);
+- usb_hcd_alloc_bandwidth(dev, NULL, NULL, NULL);
+- /* Leave LPM disabled while the device is unconfigured. */
+- mutex_unlock(hcd->bandwidth_mutex);
+- usb_autosuspend_device(dev);
+- goto free_interfaces;
+- }
+- mutex_unlock(hcd->bandwidth_mutex);
+- usb_set_device_state(dev, USB_STATE_CONFIGURED);
+-
+- /* Initialize the new interface structures and the
++ /*
++ * Initialize the new interface structures and the
+ * hc/hcd/usbcore interface/endpoint state.
+ */
+ for (i = 0; i < nintf; ++i) {
+@@ -1874,6 +1853,35 @@ free_interfaces:
+ }
+ kfree(new_interfaces);
+
++ ret = usb_control_msg(dev, usb_sndctrlpipe(dev, 0),
++ USB_REQ_SET_CONFIGURATION, 0, configuration, 0,
++ NULL, 0, USB_CTRL_SET_TIMEOUT);
++ if (ret < 0 && cp) {
++ /*
++ * All the old state is gone, so what else can we do?
++ * The device is probably useless now anyway.
++ */
++ usb_hcd_alloc_bandwidth(dev, NULL, NULL, NULL);
++ for (i = 0; i < nintf; ++i) {
++ usb_disable_interface(dev, cp->interface[i], true);
++ put_device(&cp->interface[i]->dev);
++ cp->interface[i] = NULL;
++ }
++ cp = NULL;
++ }
++
++ dev->actconfig = cp;
++ mutex_unlock(hcd->bandwidth_mutex);
++
++ if (!cp) {
++ usb_set_device_state(dev, USB_STATE_ADDRESS);
++
++ /* Leave LPM disabled while the device is unconfigured. */
++ usb_autosuspend_device(dev);
++ return ret;
++ }
++ usb_set_device_state(dev, USB_STATE_CONFIGURED);
++
+ if (cp->string == NULL &&
+ !(dev->quirks & USB_QUIRK_CONFIG_INTF_STRINGS))
+ cp->string = usb_cache_string(dev, cp->desc.iConfiguration);
+diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
+index eb0fd10..b6c4084 100644
+--- a/drivers/usb/dwc3/gadget.c
++++ b/drivers/usb/dwc3/gadget.c
+@@ -1619,6 +1619,7 @@ static int __devinit dwc3_gadget_init_endpoints(struct dwc3 *dwc)
+
+ if (epnum == 0 || epnum == 1) {
+ dep->endpoint.maxpacket = 512;
++ dep->endpoint.maxburst = 1;
+ dep->endpoint.ops = &dwc3_gadget_ep0_ops;
+ if (!epnum)
+ dwc->gadget.ep0 = &dep->endpoint;
+diff --git a/drivers/usb/gadget/dummy_hcd.c b/drivers/usb/gadget/dummy_hcd.c
+index afdbb1c..4ad1f1c 100644
+--- a/drivers/usb/gadget/dummy_hcd.c
++++ b/drivers/usb/gadget/dummy_hcd.c
+@@ -126,10 +126,7 @@ static const char ep0name[] = "ep0";
+ static const char *const ep_name[] = {
+ ep0name, /* everyone has ep0 */
+
+- /* act like a net2280: high speed, six configurable endpoints */
+- "ep-a", "ep-b", "ep-c", "ep-d", "ep-e", "ep-f",
+-
+- /* or like pxa250: fifteen fixed function endpoints */
++ /* act like a pxa250: fifteen fixed function endpoints */
+ "ep1in-bulk", "ep2out-bulk", "ep3in-iso", "ep4out-iso", "ep5in-int",
+ "ep6in-bulk", "ep7out-bulk", "ep8in-iso", "ep9out-iso", "ep10in-int",
+ "ep11in-bulk", "ep12out-bulk", "ep13in-iso", "ep14out-iso",
+@@ -137,6 +134,10 @@ static const char *const ep_name[] = {
+
+ /* or like sa1100: two fixed function endpoints */
+ "ep1out-bulk", "ep2in-bulk",
++
++ /* and now some generic EPs so we have enough in multi config */
++ "ep3out", "ep4in", "ep5out", "ep6out", "ep7in", "ep8out", "ep9in",
++ "ep10out", "ep11out", "ep12in", "ep13out", "ep14in", "ep15out",
+ };
+ #define DUMMY_ENDPOINTS ARRAY_SIZE(ep_name)
+
+diff --git a/drivers/usb/gadget/f_ecm.c b/drivers/usb/gadget/f_ecm.c
+index 30b908f..672c66a 100644
+--- a/drivers/usb/gadget/f_ecm.c
++++ b/drivers/usb/gadget/f_ecm.c
+@@ -808,9 +808,9 @@ fail:
+ /* we might as well release our claims on endpoints */
+ if (ecm->notify)
+ ecm->notify->driver_data = NULL;
+- if (ecm->port.out_ep->desc)
++ if (ecm->port.out_ep)
+ ecm->port.out_ep->driver_data = NULL;
+- if (ecm->port.in_ep->desc)
++ if (ecm->port.in_ep)
+ ecm->port.in_ep->driver_data = NULL;
+
+ ERROR(cdev, "%s: can't bind, err %d\n", f->name, status);
+diff --git a/drivers/usb/gadget/f_eem.c b/drivers/usb/gadget/f_eem.c
+index 1a7b2dd..a9cf2052 100644
+--- a/drivers/usb/gadget/f_eem.c
++++ b/drivers/usb/gadget/f_eem.c
+@@ -319,10 +319,9 @@ fail:
+ if (f->hs_descriptors)
+ usb_free_descriptors(f->hs_descriptors);
+
+- /* we might as well release our claims on endpoints */
+- if (eem->port.out_ep->desc)
++ if (eem->port.out_ep)
+ eem->port.out_ep->driver_data = NULL;
+- if (eem->port.in_ep->desc)
++ if (eem->port.in_ep)
+ eem->port.in_ep->driver_data = NULL;
+
+ ERROR(cdev, "%s: can't bind, err %d\n", f->name, status);
+diff --git a/drivers/usb/gadget/f_midi.c b/drivers/usb/gadget/f_midi.c
+index 2f7e8f2..1bf9596 100644
+--- a/drivers/usb/gadget/f_midi.c
++++ b/drivers/usb/gadget/f_midi.c
+@@ -416,6 +416,7 @@ static void f_midi_unbind(struct usb_configuration *c, struct usb_function *f)
+ midi->id = NULL;
+
+ usb_free_descriptors(f->descriptors);
++ usb_free_descriptors(f->hs_descriptors);
+ kfree(midi);
+ }
+
+diff --git a/drivers/usb/gadget/f_ncm.c b/drivers/usb/gadget/f_ncm.c
+index aab8ede..d7811ae 100644
+--- a/drivers/usb/gadget/f_ncm.c
++++ b/drivers/usb/gadget/f_ncm.c
+@@ -1259,9 +1259,9 @@ fail:
+ /* we might as well release our claims on endpoints */
+ if (ncm->notify)
+ ncm->notify->driver_data = NULL;
+- if (ncm->port.out_ep->desc)
++ if (ncm->port.out_ep)
+ ncm->port.out_ep->driver_data = NULL;
+- if (ncm->port.in_ep->desc)
++ if (ncm->port.in_ep)
+ ncm->port.in_ep->driver_data = NULL;
+
+ ERROR(cdev, "%s: can't bind, err %d\n", f->name, status);
+diff --git a/drivers/usb/gadget/f_phonet.c b/drivers/usb/gadget/f_phonet.c
+index 8ee9268..a6c19a4 100644
+--- a/drivers/usb/gadget/f_phonet.c
++++ b/drivers/usb/gadget/f_phonet.c
+@@ -531,7 +531,7 @@ int pn_bind(struct usb_configuration *c, struct usb_function *f)
+
+ req = usb_ep_alloc_request(fp->out_ep, GFP_KERNEL);
+ if (!req)
+- goto err;
++ goto err_req;
+
+ req->complete = pn_rx_complete;
+ fp->out_reqv[i] = req;
+@@ -540,14 +540,18 @@ int pn_bind(struct usb_configuration *c, struct usb_function *f)
+ /* Outgoing USB requests */
+ fp->in_req = usb_ep_alloc_request(fp->in_ep, GFP_KERNEL);
+ if (!fp->in_req)
+- goto err;
++ goto err_req;
+
+ INFO(cdev, "USB CDC Phonet function\n");
+ INFO(cdev, "using %s, OUT %s, IN %s\n", cdev->gadget->name,
+ fp->out_ep->name, fp->in_ep->name);
+ return 0;
+
++err_req:
++ for (i = 0; i < phonet_rxq_size && fp->out_reqv[i]; i++)
++ usb_ep_free_request(fp->out_ep, fp->out_reqv[i]);
+ err:
++
+ if (fp->out_ep)
+ fp->out_ep->driver_data = NULL;
+ if (fp->in_ep)
+diff --git a/drivers/usb/gadget/f_rndis.c b/drivers/usb/gadget/f_rndis.c
+index b1681e4..47953fe 100644
+--- a/drivers/usb/gadget/f_rndis.c
++++ b/drivers/usb/gadget/f_rndis.c
+@@ -803,9 +803,9 @@ fail:
+ /* we might as well release our claims on endpoints */
+ if (rndis->notify)
+ rndis->notify->driver_data = NULL;
+- if (rndis->port.out_ep->desc)
++ if (rndis->port.out_ep)
+ rndis->port.out_ep->driver_data = NULL;
+- if (rndis->port.in_ep->desc)
++ if (rndis->port.in_ep)
+ rndis->port.in_ep->driver_data = NULL;
+
+ ERROR(cdev, "%s: can't bind, err %d\n", f->name, status);
+diff --git a/drivers/usb/gadget/f_subset.c b/drivers/usb/gadget/f_subset.c
+index 21ab474..e5bb966 100644
+--- a/drivers/usb/gadget/f_subset.c
++++ b/drivers/usb/gadget/f_subset.c
+@@ -370,9 +370,9 @@ fail:
+ usb_free_descriptors(f->hs_descriptors);
+
+ /* we might as well release our claims on endpoints */
+- if (geth->port.out_ep->desc)
++ if (geth->port.out_ep)
+ geth->port.out_ep->driver_data = NULL;
+- if (geth->port.in_ep->desc)
++ if (geth->port.in_ep)
+ geth->port.in_ep->driver_data = NULL;
+
+ ERROR(cdev, "%s: can't bind, err %d\n", f->name, status);
+diff --git a/drivers/usb/gadget/f_uvc.c b/drivers/usb/gadget/f_uvc.c
+index 2a8bf06..10f13c1 100644
+--- a/drivers/usb/gadget/f_uvc.c
++++ b/drivers/usb/gadget/f_uvc.c
+@@ -417,7 +417,6 @@ uvc_register_video(struct uvc_device *uvc)
+ return -ENOMEM;
+
+ video->parent = &cdev->gadget->dev;
+- video->minor = -1;
+ video->fops = &uvc_v4l2_fops;
+ video->release = video_device_release;
+ strncpy(video->name, cdev->gadget->name, sizeof(video->name));
+@@ -577,23 +576,12 @@ uvc_function_unbind(struct usb_configuration *c, struct usb_function *f)
+
+ INFO(cdev, "uvc_function_unbind\n");
+
+- if (uvc->vdev) {
+- if (uvc->vdev->minor == -1)
+- video_device_release(uvc->vdev);
+- else
+- video_unregister_device(uvc->vdev);
+- uvc->vdev = NULL;
+- }
+-
+- if (uvc->control_ep)
+- uvc->control_ep->driver_data = NULL;
+- if (uvc->video.ep)
+- uvc->video.ep->driver_data = NULL;
++ video_unregister_device(uvc->vdev);
++ uvc->control_ep->driver_data = NULL;
++ uvc->video.ep->driver_data = NULL;
+
+- if (uvc->control_req) {
+- usb_ep_free_request(cdev->gadget->ep0, uvc->control_req);
+- kfree(uvc->control_buf);
+- }
++ usb_ep_free_request(cdev->gadget->ep0, uvc->control_req);
++ kfree(uvc->control_buf);
+
+ kfree(f->descriptors);
+ kfree(f->hs_descriptors);
+@@ -740,7 +728,22 @@ uvc_function_bind(struct usb_configuration *c, struct usb_function *f)
+ return 0;
+
+ error:
+- uvc_function_unbind(c, f);
++ if (uvc->vdev)
++ video_device_release(uvc->vdev);
++
++ if (uvc->control_ep)
++ uvc->control_ep->driver_data = NULL;
++ if (uvc->video.ep)
++ uvc->video.ep->driver_data = NULL;
++
++ if (uvc->control_req) {
++ usb_ep_free_request(cdev->gadget->ep0, uvc->control_req);
++ kfree(uvc->control_buf);
++ }
++
++ kfree(f->descriptors);
++ kfree(f->hs_descriptors);
++ kfree(f->ss_descriptors);
+ return ret;
+ }
+
+diff --git a/drivers/usb/host/ehci-hub.c b/drivers/usb/host/ehci-hub.c
+index c788022..a5d4c41 100644
+--- a/drivers/usb/host/ehci-hub.c
++++ b/drivers/usb/host/ehci-hub.c
+@@ -623,7 +623,11 @@ ehci_hub_status_data (struct usb_hcd *hcd, char *buf)
+ status = STS_PCD;
+ }
+ }
+- /* FIXME autosuspend idle root hubs */
++
++ /* If a resume is in progress, make sure it can finish */
++ if (ehci->resuming_ports)
++ mod_timer(&hcd->rh_timer, jiffies + msecs_to_jiffies(25));
++
+ spin_unlock_irqrestore (&ehci->lock, flags);
+ return status ? retval : 0;
+ }
+diff --git a/drivers/usb/host/ehci-omap.c b/drivers/usb/host/ehci-omap.c
+index d7fe287..50fcc42 100644
+--- a/drivers/usb/host/ehci-omap.c
++++ b/drivers/usb/host/ehci-omap.c
+@@ -374,7 +374,7 @@ static const struct hc_driver ehci_omap_hc_driver = {
+ .clear_tt_buffer_complete = ehci_clear_tt_buffer_complete,
+ };
+
+-MODULE_ALIAS("platform:omap-ehci");
++MODULE_ALIAS("platform:ehci-omap");
+ MODULE_AUTHOR("Texas Instruments, Inc.");
+ MODULE_AUTHOR("Felipe Balbi <felipe.balbi@nokia.com>");
+
+diff --git a/drivers/usb/host/ehci-pci.c b/drivers/usb/host/ehci-pci.c
+index 2cb7d37..f42b68e 100644
+--- a/drivers/usb/host/ehci-pci.c
++++ b/drivers/usb/host/ehci-pci.c
+@@ -334,7 +334,8 @@ static bool usb_is_intel_switchable_ehci(struct pci_dev *pdev)
+ pdev->vendor == PCI_VENDOR_ID_INTEL &&
+ (pdev->device == 0x1E26 ||
+ pdev->device == 0x8C2D ||
+- pdev->device == 0x8C26);
++ pdev->device == 0x8C26 ||
++ pdev->device == 0x9C26);
+ }
+
+ static void ehci_enable_xhci_companion(void)
+diff --git a/drivers/usb/host/ehci-sched.c b/drivers/usb/host/ehci-sched.c
+index 528a540..838e571 100644
+--- a/drivers/usb/host/ehci-sched.c
++++ b/drivers/usb/host/ehci-sched.c
+@@ -236,7 +236,7 @@ static inline unsigned char tt_start_uframe(struct ehci_hcd *ehci, __hc32 mask)
+ }
+
+ static const unsigned char
+-max_tt_usecs[] = { 125, 125, 125, 125, 125, 125, 30, 0 };
++max_tt_usecs[] = { 125, 125, 125, 125, 125, 125, 125, 25 };
+
+ /* carryover low/fullspeed bandwidth that crosses uframe boundries */
+ static inline void carryover_tt_bandwidth(unsigned short tt_usecs[8])
+diff --git a/drivers/usb/host/pci-quirks.c b/drivers/usb/host/pci-quirks.c
+index 39f9e4a..7893351 100644
+--- a/drivers/usb/host/pci-quirks.c
++++ b/drivers/usb/host/pci-quirks.c
+@@ -723,6 +723,7 @@ static int handshake(void __iomem *ptr, u32 mask, u32 done,
+ }
+
+ #define PCI_DEVICE_ID_INTEL_LYNX_POINT_XHCI 0x8C31
++#define PCI_DEVICE_ID_INTEL_LYNX_POINT_LP_XHCI 0x9C31
+
+ bool usb_is_intel_ppt_switchable_xhci(struct pci_dev *pdev)
+ {
+@@ -736,7 +737,8 @@ bool usb_is_intel_lpt_switchable_xhci(struct pci_dev *pdev)
+ {
+ return pdev->class == PCI_CLASS_SERIAL_USB_XHCI &&
+ pdev->vendor == PCI_VENDOR_ID_INTEL &&
+- pdev->device == PCI_DEVICE_ID_INTEL_LYNX_POINT_XHCI;
++ (pdev->device == PCI_DEVICE_ID_INTEL_LYNX_POINT_XHCI ||
++ pdev->device == PCI_DEVICE_ID_INTEL_LYNX_POINT_LP_XHCI);
+ }
+
+ bool usb_is_intel_switchable_xhci(struct pci_dev *pdev)
+@@ -778,6 +780,7 @@ void usb_enable_xhci_ports(struct pci_dev *xhci_pdev)
+ "defaulting to EHCI.\n");
+ dev_warn(&xhci_pdev->dev,
+ "USB 3.0 devices will work at USB 2.0 speeds.\n");
++ usb_disable_xhci_ports(xhci_pdev);
+ return;
+ }
+
+diff --git a/drivers/usb/host/uhci-hcd.c b/drivers/usb/host/uhci-hcd.c
+index e4db350..3fe069f 100644
+--- a/drivers/usb/host/uhci-hcd.c
++++ b/drivers/usb/host/uhci-hcd.c
+@@ -447,6 +447,10 @@ static irqreturn_t uhci_irq(struct usb_hcd *hcd)
+ return IRQ_NONE;
+ uhci_writew(uhci, status, USBSTS); /* Clear it */
+
++ spin_lock(&uhci->lock);
++ if (unlikely(!uhci->is_initialized)) /* not yet configured */
++ goto done;
++
+ if (status & ~(USBSTS_USBINT | USBSTS_ERROR | USBSTS_RD)) {
+ if (status & USBSTS_HSE)
+ dev_err(uhci_dev(uhci), "host system error, "
+@@ -455,7 +459,6 @@ static irqreturn_t uhci_irq(struct usb_hcd *hcd)
+ dev_err(uhci_dev(uhci), "host controller process "
+ "error, something bad happened!\n");
+ if (status & USBSTS_HCH) {
+- spin_lock(&uhci->lock);
+ if (uhci->rh_state >= UHCI_RH_RUNNING) {
+ dev_err(uhci_dev(uhci),
+ "host controller halted, "
+@@ -473,15 +476,15 @@ static irqreturn_t uhci_irq(struct usb_hcd *hcd)
+ * pending unlinks */
+ mod_timer(&hcd->rh_timer, jiffies);
+ }
+- spin_unlock(&uhci->lock);
+ }
+ }
+
+- if (status & USBSTS_RD)
++ if (status & USBSTS_RD) {
++ spin_unlock(&uhci->lock);
+ usb_hcd_poll_rh_status(hcd);
+- else {
+- spin_lock(&uhci->lock);
++ } else {
+ uhci_scan_schedule(uhci);
++ done:
+ spin_unlock(&uhci->lock);
+ }
+
+@@ -662,9 +665,9 @@ static int uhci_start(struct usb_hcd *hcd)
+ */
+ mb();
+
++ spin_lock_irq(&uhci->lock);
+ configure_hc(uhci);
+ uhci->is_initialized = 1;
+- spin_lock_irq(&uhci->lock);
+ start_rh(uhci);
+ spin_unlock_irq(&uhci->lock);
+ return 0;
+diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
+index d5eb357..abb9772 100644
+--- a/drivers/usb/host/xhci-hub.c
++++ b/drivers/usb/host/xhci-hub.c
+@@ -762,12 +762,39 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
+ break;
+ case USB_PORT_FEAT_LINK_STATE:
+ temp = xhci_readl(xhci, port_array[wIndex]);
++
++ /* Disable port */
++ if (link_state == USB_SS_PORT_LS_SS_DISABLED) {
++ xhci_dbg(xhci, "Disable port %d\n", wIndex);
++ temp = xhci_port_state_to_neutral(temp);
++ /*
++ * Clear all change bits, so that we get a new
++ * connection event.
++ */
++ temp |= PORT_CSC | PORT_PEC | PORT_WRC |
++ PORT_OCC | PORT_RC | PORT_PLC |
++ PORT_CEC;
++ xhci_writel(xhci, temp | PORT_PE,
++ port_array[wIndex]);
++ temp = xhci_readl(xhci, port_array[wIndex]);
++ break;
++ }
++
++ /* Put link in RxDetect (enable port) */
++ if (link_state == USB_SS_PORT_LS_RX_DETECT) {
++ xhci_dbg(xhci, "Enable port %d\n", wIndex);
++ xhci_set_link_state(xhci, port_array, wIndex,
++ link_state);
++ temp = xhci_readl(xhci, port_array[wIndex]);
++ break;
++ }
++
+ /* Software should not attempt to set
+- * port link state above '5' (Rx.Detect) and the port
++ * port link state above '3' (U3) and the port
+ * must be enabled.
+ */
+ if ((temp & PORT_PE) == 0 ||
+- (link_state > USB_SS_PORT_LS_RX_DETECT)) {
++ (link_state > USB_SS_PORT_LS_U3)) {
+ xhci_warn(xhci, "Cannot set link state.\n");
+ goto error;
+ }
+@@ -938,6 +965,7 @@ int xhci_hub_status_data(struct usb_hcd *hcd, char *buf)
+ int max_ports;
+ __le32 __iomem **port_array;
+ struct xhci_bus_state *bus_state;
++ bool reset_change = false;
+
+ max_ports = xhci_get_ports(hcd, &port_array);
+ bus_state = &xhci->bus_state[hcd_index(hcd)];
+@@ -969,6 +997,12 @@ int xhci_hub_status_data(struct usb_hcd *hcd, char *buf)
+ buf[(i + 1) / 8] |= 1 << (i + 1) % 8;
+ status = 1;
+ }
++ if ((temp & PORT_RC))
++ reset_change = true;
++ }
++ if (!status && !reset_change) {
++ xhci_dbg(xhci, "%s: stopping port polling.\n", __func__);
++ clear_bit(HCD_FLAG_POLL_RH, &hcd->flags);
+ }
+ spin_unlock_irqrestore(&xhci->lock, flags);
+ return status ? retval : 0;
+diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
+index 487bc08..35616ff 100644
+--- a/drivers/usb/host/xhci-mem.c
++++ b/drivers/usb/host/xhci-mem.c
+@@ -205,7 +205,12 @@ static int xhci_alloc_segments_for_ring(struct xhci_hcd *xhci,
+
+ next = xhci_segment_alloc(xhci, cycle_state, flags);
+ if (!next) {
+- xhci_free_segments_for_ring(xhci, *first);
++ prev = *first;
++ while (prev) {
++ next = prev->next;
++ xhci_segment_free(xhci, prev);
++ prev = next;
++ }
+ return -ENOMEM;
+ }
+ xhci_link_segments(xhci, prev, next, type);
+@@ -258,7 +263,7 @@ static struct xhci_ring *xhci_ring_alloc(struct xhci_hcd *xhci,
+ return ring;
+
+ fail:
+- xhci_ring_free(xhci, ring);
++ kfree(ring);
+ return NULL;
+ }
+
+@@ -1245,6 +1250,8 @@ static unsigned int xhci_microframes_to_exponent(struct usb_device *udev,
+ static unsigned int xhci_parse_microframe_interval(struct usb_device *udev,
+ struct usb_host_endpoint *ep)
+ {
++ if (ep->desc.bInterval == 0)
++ return 0;
+ return xhci_microframes_to_exponent(udev, ep,
+ ep->desc.bInterval, 0, 15);
+ }
+diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
+index 4f1e265..394984f7 100644
+--- a/drivers/usb/host/xhci-ring.c
++++ b/drivers/usb/host/xhci-ring.c
+@@ -1698,7 +1698,7 @@ static void handle_port_status(struct xhci_hcd *xhci,
+ faked_port_index + 1);
+ if (slot_id && xhci->devs[slot_id])
+ xhci_ring_device(xhci, slot_id);
+- if (bus_state->port_remote_wakeup && (1 << faked_port_index)) {
++ if (bus_state->port_remote_wakeup & (1 << faked_port_index)) {
+ bus_state->port_remote_wakeup &=
+ ~(1 << faked_port_index);
+ xhci_test_and_clear_bit(xhci, port_array,
+@@ -1725,6 +1725,15 @@ cleanup:
+ if (bogus_port_status)
+ return;
+
++ /*
++ * xHCI port-status-change events occur when the "or" of all the
++ * status-change bits in the portsc register changes from 0 to 1.
++ * New status changes won't cause an event if any other change
++ * bits are still set. When an event occurs, switch over to
++ * polling to avoid losing status changes.
++ */
++ xhci_dbg(xhci, "%s: starting port polling.\n", __func__);
++ set_bit(HCD_FLAG_POLL_RH, &hcd->flags);
+ spin_unlock(&xhci->lock);
+ /* Pass this up to the core */
+ usb_hcd_poll_rh_status(hcd);
+@@ -2578,6 +2587,8 @@ cleanup:
+ (trb_comp_code != COMP_STALL &&
+ trb_comp_code != COMP_BABBLE))
+ xhci_urb_free_priv(xhci, urb_priv);
++ else
++ kfree(urb_priv);
+
+ usb_hcd_unlink_urb_from_ep(bus_to_hcd(urb->dev->bus), urb);
+ if ((urb->actual_length != urb->transfer_buffer_length &&
+@@ -3069,11 +3080,11 @@ static u32 xhci_td_remainder(unsigned int remainder)
+ }
+
+ /*
+- * For xHCI 1.0 host controllers, TD size is the number of packets remaining in
+- * the TD (*not* including this TRB).
++ * For xHCI 1.0 host controllers, TD size is the number of max packet sized
++ * packets remaining in the TD (*not* including this TRB).
+ *
+ * Total TD packet count = total_packet_count =
+- * roundup(TD size in bytes / wMaxPacketSize)
++ * DIV_ROUND_UP(TD size in bytes / wMaxPacketSize)
+ *
+ * Packets transferred up to and including this TRB = packets_transferred =
+ * rounddown(total bytes transferred including this TRB / wMaxPacketSize)
+@@ -3081,24 +3092,27 @@ static u32 xhci_td_remainder(unsigned int remainder)
+ * TD size = total_packet_count - packets_transferred
+ *
+ * It must fit in bits 21:17, so it can't be bigger than 31.
++ * The last TRB in a TD must have the TD size set to zero.
+ */
+-
+ static u32 xhci_v1_0_td_remainder(int running_total, int trb_buff_len,
+- unsigned int total_packet_count, struct urb *urb)
++ unsigned int total_packet_count, struct urb *urb,
++ unsigned int num_trbs_left)
+ {
+ int packets_transferred;
+
+ /* One TRB with a zero-length data packet. */
+- if (running_total == 0 && trb_buff_len == 0)
++ if (num_trbs_left == 0 || (running_total == 0 && trb_buff_len == 0))
+ return 0;
+
+ /* All the TRB queueing functions don't count the current TRB in
+ * running_total.
+ */
+ packets_transferred = (running_total + trb_buff_len) /
+- usb_endpoint_maxp(&urb->ep->desc);
++ GET_MAX_PACKET(usb_endpoint_maxp(&urb->ep->desc));
+
+- return xhci_td_remainder(total_packet_count - packets_transferred);
++ if ((total_packet_count - packets_transferred) > 31)
++ return 31 << 17;
++ return (total_packet_count - packets_transferred) << 17;
+ }
+
+ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
+@@ -3125,7 +3139,7 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
+
+ num_trbs = count_sg_trbs_needed(xhci, urb);
+ num_sgs = urb->num_mapped_sgs;
+- total_packet_count = roundup(urb->transfer_buffer_length,
++ total_packet_count = DIV_ROUND_UP(urb->transfer_buffer_length,
+ usb_endpoint_maxp(&urb->ep->desc));
+
+ trb_buff_len = prepare_transfer(xhci, xhci->devs[slot_id],
+@@ -3208,7 +3222,8 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
+ running_total);
+ } else {
+ remainder = xhci_v1_0_td_remainder(running_total,
+- trb_buff_len, total_packet_count, urb);
++ trb_buff_len, total_packet_count, urb,
++ num_trbs - 1);
+ }
+ length_field = TRB_LEN(trb_buff_len) |
+ remainder |
+@@ -3316,7 +3331,7 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
+ start_cycle = ep_ring->cycle_state;
+
+ running_total = 0;
+- total_packet_count = roundup(urb->transfer_buffer_length,
++ total_packet_count = DIV_ROUND_UP(urb->transfer_buffer_length,
+ usb_endpoint_maxp(&urb->ep->desc));
+ /* How much data is in the first TRB? */
+ addr = (u64) urb->transfer_dma;
+@@ -3362,7 +3377,8 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
+ running_total);
+ } else {
+ remainder = xhci_v1_0_td_remainder(running_total,
+- trb_buff_len, total_packet_count, urb);
++ trb_buff_len, total_packet_count, urb,
++ num_trbs - 1);
+ }
+ length_field = TRB_LEN(trb_buff_len) |
+ remainder |
+@@ -3625,8 +3641,9 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
+ addr = start_addr + urb->iso_frame_desc[i].offset;
+ td_len = urb->iso_frame_desc[i].length;
+ td_remain_len = td_len;
+- total_packet_count = roundup(td_len,
+- usb_endpoint_maxp(&urb->ep->desc));
++ total_packet_count = DIV_ROUND_UP(td_len,
++ GET_MAX_PACKET(
++ usb_endpoint_maxp(&urb->ep->desc)));
+ /* A zero-length transfer still involves at least one packet. */
+ if (total_packet_count == 0)
+ total_packet_count++;
+@@ -3648,9 +3665,11 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
+ td = urb_priv->td[i];
+ for (j = 0; j < trbs_per_td; j++) {
+ u32 remainder = 0;
+- field = TRB_TBC(burst_count) | TRB_TLBPC(residue);
++ field = 0;
+
+ if (first_trb) {
++ field = TRB_TBC(burst_count) |
++ TRB_TLBPC(residue);
+ /* Queue the isoc TRB */
+ field |= TRB_TYPE(TRB_ISOC);
+ /* Assume URB_ISO_ASAP is set */
+@@ -3704,7 +3723,8 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
+ } else {
+ remainder = xhci_v1_0_td_remainder(
+ running_total, trb_buff_len,
+- total_packet_count, urb);
++ total_packet_count, urb,
++ (trbs_per_td - j - 1));
+ }
+ length_field = TRB_LEN(trb_buff_len) |
+ remainder |
+diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
+index a6e910b..b6586e3 100644
+--- a/drivers/usb/host/xhci.c
++++ b/drivers/usb/host/xhci.c
+@@ -480,7 +480,7 @@ static bool compliance_mode_recovery_timer_quirk_check(void)
+ if (strstr(dmi_product_name, "Z420") ||
+ strstr(dmi_product_name, "Z620") ||
+ strstr(dmi_product_name, "Z820") ||
+- strstr(dmi_product_name, "Z1"))
++ strstr(dmi_product_name, "Z1 Workstation"))
+ return true;
+
+ return false;
+@@ -880,6 +880,11 @@ int xhci_suspend(struct xhci_hcd *xhci)
+ struct usb_hcd *hcd = xhci_to_hcd(xhci);
+ u32 command;
+
++ /* Don't poll the roothubs on bus suspend. */
++ xhci_dbg(xhci, "%s: stopping port polling.\n", __func__);
++ clear_bit(HCD_FLAG_POLL_RH, &hcd->flags);
++ del_timer_sync(&hcd->rh_timer);
++
+ spin_lock_irq(&xhci->lock);
+ clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
+ clear_bit(HCD_FLAG_HW_ACCESSIBLE, &xhci->shared_hcd->flags);
+@@ -1064,6 +1069,11 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
+ if (xhci->quirks & XHCI_COMP_MODE_QUIRK)
+ compliance_mode_recovery_timer_init(xhci);
+
++ /* Re-enable port polling. */
++ xhci_dbg(xhci, "%s: starting port polling.\n", __func__);
++ set_bit(HCD_FLAG_POLL_RH, &hcd->flags);
++ usb_hcd_poll_rh_status(hcd);
++
+ return retval;
+ }
+ #endif /* CONFIG_PM */
+@@ -2253,7 +2263,7 @@ static bool xhci_is_async_ep(unsigned int ep_type)
+
+ static bool xhci_is_sync_in_ep(unsigned int ep_type)
+ {
+- return (ep_type == ISOC_IN_EP || ep_type != INT_IN_EP);
++ return (ep_type == ISOC_IN_EP || ep_type == INT_IN_EP);
+ }
+
+ static unsigned int xhci_get_ss_bw_consumed(struct xhci_bw_info *ep_bw)
+diff --git a/drivers/usb/musb/cppi_dma.c b/drivers/usb/musb/cppi_dma.c
+index 8637c1f..c5835e8 100644
+--- a/drivers/usb/musb/cppi_dma.c
++++ b/drivers/usb/musb/cppi_dma.c
+@@ -1314,6 +1314,7 @@ irqreturn_t cppi_interrupt(int irq, void *dev_id)
+
+ return IRQ_HANDLED;
+ }
++EXPORT_SYMBOL_GPL(cppi_interrupt);
+
+ /* Instantiate a software object representing a DMA controller. */
+ struct dma_controller *__init
+diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c
+index 26f1bef..c5630c2 100644
+--- a/drivers/usb/musb/musb_core.c
++++ b/drivers/usb/musb/musb_core.c
+@@ -2402,10 +2402,7 @@ static int __init musb_init(void)
+ if (usb_disabled())
+ return 0;
+
+- pr_info("%s: version " MUSB_VERSION ", "
+- "?dma?"
+- ", "
+- "otg (peripheral+host)",
++ pr_info("%s: version " MUSB_VERSION ", ?dma?, otg (peripheral+host)\n",
+ musb_driver_name);
+ return platform_driver_register(&musb_driver);
+ }
+diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
+index 360bdeb..1f939cb 100644
+--- a/drivers/usb/serial/ftdi_sio.c
++++ b/drivers/usb/serial/ftdi_sio.c
+@@ -590,6 +590,7 @@ static struct usb_device_id id_table_combined [] = {
+ /*
+ * ELV devices:
+ */
++ { USB_DEVICE(FTDI_ELV_VID, FTDI_ELV_WS300_PID) },
+ { USB_DEVICE(FTDI_VID, FTDI_ELV_USR_PID) },
+ { USB_DEVICE(FTDI_VID, FTDI_ELV_MSM1_PID) },
+ { USB_DEVICE(FTDI_VID, FTDI_ELV_KL100_PID) },
+@@ -676,6 +677,7 @@ static struct usb_device_id id_table_combined [] = {
+ { USB_DEVICE(FTDI_VID, XSENS_CONVERTER_5_PID) },
+ { USB_DEVICE(FTDI_VID, XSENS_CONVERTER_6_PID) },
+ { USB_DEVICE(FTDI_VID, XSENS_CONVERTER_7_PID) },
++ { USB_DEVICE(FTDI_VID, FTDI_OMNI1509) },
+ { USB_DEVICE(MOBILITY_VID, MOBILITY_USB_SERIAL_PID) },
+ { USB_DEVICE(FTDI_VID, FTDI_ACTIVE_ROBOTS_PID) },
+ { USB_DEVICE(FTDI_VID, FTDI_MHAM_KW_PID) },
+@@ -881,6 +883,8 @@ static struct usb_device_id id_table_combined [] = {
+ { USB_DEVICE(FTDI_VID, FTDI_DISTORTEC_JTAG_LOCK_PICK_PID),
+ .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
+ { USB_DEVICE(FTDI_VID, FTDI_LUMEL_PD12_PID) },
++ /* Crucible Devices */
++ { USB_DEVICE(FTDI_VID, FTDI_CT_COMET_PID) },
+ { }, /* Optional parameter entry */
+ { } /* Terminating entry */
+ };
+@@ -1886,24 +1890,22 @@ static void ftdi_dtr_rts(struct usb_serial_port *port, int on)
+ {
+ struct ftdi_private *priv = usb_get_serial_port_data(port);
+
+- mutex_lock(&port->serial->disc_mutex);
+- if (!port->serial->disconnected) {
+- /* Disable flow control */
+- if (!on && usb_control_msg(port->serial->dev,
++ /* Disable flow control */
++ if (!on) {
++ if (usb_control_msg(port->serial->dev,
+ usb_sndctrlpipe(port->serial->dev, 0),
+ FTDI_SIO_SET_FLOW_CTRL_REQUEST,
+ FTDI_SIO_SET_FLOW_CTRL_REQUEST_TYPE,
+ 0, priv->interface, NULL, 0,
+ WDR_TIMEOUT) < 0) {
+- dev_err(&port->dev, "error from flowcontrol urb\n");
++ dev_err(&port->dev, "error from flowcontrol urb\n");
+ }
+- /* drop RTS and DTR */
+- if (on)
+- set_mctrl(port, TIOCM_DTR | TIOCM_RTS);
+- else
+- clear_mctrl(port, TIOCM_DTR | TIOCM_RTS);
+ }
+- mutex_unlock(&port->serial->disc_mutex);
++ /* drop RTS and DTR */
++ if (on)
++ set_mctrl(port, TIOCM_DTR | TIOCM_RTS);
++ else
++ clear_mctrl(port, TIOCM_DTR | TIOCM_RTS);
+ }
+
+ /*
+diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
+index 049b6e7..9d359e1 100644
+--- a/drivers/usb/serial/ftdi_sio_ids.h
++++ b/drivers/usb/serial/ftdi_sio_ids.h
+@@ -147,6 +147,11 @@
+ #define XSENS_CONVERTER_6_PID 0xD38E
+ #define XSENS_CONVERTER_7_PID 0xD38F
+
++/**
++ * Zolix (www.zolix.com.cb) product ids
++ */
++#define FTDI_OMNI1509 0xD491 /* Omni1509 embedded USB-serial */
++
+ /*
+ * NDI (www.ndigital.com) product ids
+ */
+@@ -204,7 +209,7 @@
+
+ /*
+ * ELV USB devices submitted by Christian Abt of ELV (www.elv.de).
+- * All of these devices use FTDI's vendor ID (0x0403).
++ * Almost all of these devices use FTDI's vendor ID (0x0403).
+ * Further IDs taken from ELV Windows .inf file.
+ *
+ * The previously included PID for the UO 100 module was incorrect.
+@@ -212,6 +217,8 @@
+ *
+ * Armin Laeuger originally sent the PID for the UM 100 module.
+ */
++#define FTDI_ELV_VID 0x1B1F /* ELV AG */
++#define FTDI_ELV_WS300_PID 0xC006 /* eQ3 WS 300 PC II */
+ #define FTDI_ELV_USR_PID 0xE000 /* ELV Universal-Sound-Recorder */
+ #define FTDI_ELV_MSM1_PID 0xE001 /* ELV Mini-Sound-Modul */
+ #define FTDI_ELV_KL100_PID 0xE002 /* ELV Kfz-Leistungsmesser KL 100 */
+@@ -1259,3 +1266,9 @@
+ * ATI command output: Cinterion MC55i
+ */
+ #define FTDI_CINTERION_MC55I_PID 0xA951
++
++/*
++ * Product: Comet Caller ID decoder
++ * Manufacturer: Crucible Technologies
++ */
++#define FTDI_CT_COMET_PID 0x8e08
+diff --git a/drivers/usb/serial/mct_u232.c b/drivers/usb/serial/mct_u232.c
+index c088250..7e4ef87 100644
+--- a/drivers/usb/serial/mct_u232.c
++++ b/drivers/usb/serial/mct_u232.c
+@@ -507,19 +507,15 @@ static void mct_u232_dtr_rts(struct usb_serial_port *port, int on)
+ unsigned int control_state;
+ struct mct_u232_private *priv = usb_get_serial_port_data(port);
+
+- mutex_lock(&port->serial->disc_mutex);
+- if (!port->serial->disconnected) {
+- /* drop DTR and RTS */
+- spin_lock_irq(&priv->lock);
+- if (on)
+- priv->control_state |= TIOCM_DTR | TIOCM_RTS;
+- else
+- priv->control_state &= ~(TIOCM_DTR | TIOCM_RTS);
+- control_state = priv->control_state;
+- spin_unlock_irq(&priv->lock);
+- mct_u232_set_modem_ctrl(port->serial, control_state);
+- }
+- mutex_unlock(&port->serial->disc_mutex);
++ spin_lock_irq(&priv->lock);
++ if (on)
++ priv->control_state |= TIOCM_DTR | TIOCM_RTS;
++ else
++ priv->control_state &= ~(TIOCM_DTR | TIOCM_RTS);
++ control_state = priv->control_state;
++ spin_unlock_irq(&priv->lock);
++
++ mct_u232_set_modem_ctrl(port, control_state);
+ }
+
+ static void mct_u232_close(struct usb_serial_port *port)
+diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
+index 56fed62..b8f2e3b 100644
+--- a/drivers/usb/serial/option.c
++++ b/drivers/usb/serial/option.c
+@@ -243,6 +243,7 @@ static void option_instat_callback(struct urb *urb);
+ #define TELIT_PRODUCT_CC864_DUAL 0x1005
+ #define TELIT_PRODUCT_CC864_SINGLE 0x1006
+ #define TELIT_PRODUCT_DE910_DUAL 0x1010
++#define TELIT_PRODUCT_LE920 0x1200
+
+ /* ZTE PRODUCTS */
+ #define ZTE_VENDOR_ID 0x19d2
+@@ -289,6 +290,7 @@ static void option_instat_callback(struct urb *urb);
+ #define ALCATEL_VENDOR_ID 0x1bbb
+ #define ALCATEL_PRODUCT_X060S_X200 0x0000
+ #define ALCATEL_PRODUCT_X220_X500D 0x0017
++#define ALCATEL_PRODUCT_L100V 0x011e
+
+ #define PIRELLI_VENDOR_ID 0x1266
+ #define PIRELLI_PRODUCT_C100_1 0x1002
+@@ -430,9 +432,12 @@ static void option_instat_callback(struct urb *urb);
+ #define MEDIATEK_VENDOR_ID 0x0e8d
+ #define MEDIATEK_PRODUCT_DC_1COM 0x00a0
+ #define MEDIATEK_PRODUCT_DC_4COM 0x00a5
++#define MEDIATEK_PRODUCT_DC_4COM2 0x00a7
+ #define MEDIATEK_PRODUCT_DC_5COM 0x00a4
+ #define MEDIATEK_PRODUCT_7208_1COM 0x7101
+ #define MEDIATEK_PRODUCT_7208_2COM 0x7102
++#define MEDIATEK_PRODUCT_7103_2COM 0x7103
++#define MEDIATEK_PRODUCT_7106_2COM 0x7106
+ #define MEDIATEK_PRODUCT_FP_1COM 0x0003
+ #define MEDIATEK_PRODUCT_FP_2COM 0x0023
+ #define MEDIATEK_PRODUCT_FPDC_1COM 0x0043
+@@ -442,6 +447,18 @@ static void option_instat_callback(struct urb *urb);
+ #define CELLIENT_VENDOR_ID 0x2692
+ #define CELLIENT_PRODUCT_MEN200 0x9005
+
++/* Hyundai Petatel Inc. products */
++#define PETATEL_VENDOR_ID 0x1ff4
++#define PETATEL_PRODUCT_NP10T 0x600e
++
++/* TP-LINK Incorporated products */
++#define TPLINK_VENDOR_ID 0x2357
++#define TPLINK_PRODUCT_MA180 0x0201
++
++/* Changhong products */
++#define CHANGHONG_VENDOR_ID 0x2077
++#define CHANGHONG_PRODUCT_CH690 0x7001
++
+ /* some devices interfaces need special handling due to a number of reasons */
+ enum option_blacklist_reason {
+ OPTION_BLACKLIST_NONE = 0,
+@@ -463,6 +480,7 @@ static const struct option_blacklist_info four_g_w14_blacklist = {
+
+ static const struct option_blacklist_info alcatel_x200_blacklist = {
+ .sendsetup = BIT(0) | BIT(1),
++ .reserved = BIT(4),
+ };
+
+ static const struct option_blacklist_info zte_0037_blacklist = {
+@@ -523,6 +541,11 @@ static const struct option_blacklist_info zte_1255_blacklist = {
+ .reserved = BIT(3) | BIT(4),
+ };
+
++static const struct option_blacklist_info telit_le920_blacklist = {
++ .sendsetup = BIT(0),
++ .reserved = BIT(1) | BIT(5),
++};
++
+ static const struct usb_device_id option_ids[] = {
+ { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_COLT) },
+ { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_RICOLA) },
+@@ -554,8 +577,14 @@ static const struct usb_device_id option_ids[] = {
+ { USB_DEVICE(QUANTA_VENDOR_ID, QUANTA_PRODUCT_GLX) },
+ { USB_DEVICE(QUANTA_VENDOR_ID, QUANTA_PRODUCT_GKE) },
+ { USB_DEVICE(QUANTA_VENDOR_ID, QUANTA_PRODUCT_GLE) },
++ { USB_DEVICE(QUANTA_VENDOR_ID, 0xea42),
++ .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
++ { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0x1c05, USB_CLASS_COMM, 0x02, 0xff) },
++ { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0x1c23, USB_CLASS_COMM, 0x02, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E173, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t) &net_intf1_blacklist },
++ { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0x1441, USB_CLASS_COMM, 0x02, 0xff) },
++ { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0x1442, USB_CLASS_COMM, 0x02, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4505, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t) &huawei_cdc12_blacklist },
+ { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3765, 0xff, 0xff, 0xff),
+@@ -773,6 +802,8 @@ static const struct usb_device_id option_ids[] = {
+ { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_CC864_DUAL) },
+ { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_CC864_SINGLE) },
+ { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_DE910_DUAL) },
++ { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920),
++ .driver_info = (kernel_ulong_t)&telit_le920_blacklist },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MF622, 0xff, 0xff, 0xff) }, /* ZTE WCDMA products */
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0002, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&net_intf1_blacklist },
+@@ -923,8 +954,10 @@ static const struct usb_device_id option_ids[] = {
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0254, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0257, 0xff, 0xff, 0xff), /* ZTE MF821 */
+ .driver_info = (kernel_ulong_t)&net_intf3_blacklist },
+- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0265, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0284, 0xff, 0xff, 0xff) },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0265, 0xff, 0xff, 0xff), /* ONDA MT8205 */
++ .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0284, 0xff, 0xff, 0xff), /* ZTE MF880 */
++ .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0317, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0326, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+@@ -1190,7 +1223,16 @@ static const struct usb_device_id option_ids[] = {
+ { USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_X060S_X200),
+ .driver_info = (kernel_ulong_t)&alcatel_x200_blacklist
+ },
+- { USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_X220_X500D) },
++ { USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_X220_X500D),
++ .driver_info = (kernel_ulong_t)&net_intf6_blacklist },
++ { USB_DEVICE(ALCATEL_VENDOR_ID, 0x0052),
++ .driver_info = (kernel_ulong_t)&net_intf6_blacklist },
++ { USB_DEVICE(ALCATEL_VENDOR_ID, 0x00b6),
++ .driver_info = (kernel_ulong_t)&net_intf3_blacklist },
++ { USB_DEVICE(ALCATEL_VENDOR_ID, 0x00b7),
++ .driver_info = (kernel_ulong_t)&net_intf5_blacklist },
++ { USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_L100V),
++ .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ { USB_DEVICE(AIRPLUS_VENDOR_ID, AIRPLUS_PRODUCT_MCD650) },
+ { USB_DEVICE(TLAYTECH_VENDOR_ID, TLAYTECH_PRODUCT_TEU800) },
+ { USB_DEVICE(LONGCHEER_VENDOR_ID, FOUR_G_SYSTEMS_PRODUCT_W14),
+@@ -1295,7 +1337,15 @@ static const struct usb_device_id option_ids[] = {
+ { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_FP_2COM, 0x0a, 0x00, 0x00) },
+ { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_FPDC_1COM, 0x0a, 0x00, 0x00) },
+ { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_FPDC_2COM, 0x0a, 0x00, 0x00) },
++ { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_7103_2COM, 0xff, 0x00, 0x00) },
++ { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_7106_2COM, 0x02, 0x02, 0x01) },
++ { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_DC_4COM2, 0xff, 0x02, 0x01) },
++ { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_DC_4COM2, 0xff, 0x00, 0x00) },
+ { USB_DEVICE(CELLIENT_VENDOR_ID, CELLIENT_PRODUCT_MEN200) },
++ { USB_DEVICE(PETATEL_VENDOR_ID, PETATEL_PRODUCT_NP10T) },
++ { USB_DEVICE(TPLINK_VENDOR_ID, TPLINK_PRODUCT_MA180),
++ .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
++ { USB_DEVICE(CHANGHONG_VENDOR_ID, CHANGHONG_PRODUCT_CH690) },
+ { } /* Terminating entry */
+ };
+ MODULE_DEVICE_TABLE(usb, option_ids);
+diff --git a/drivers/usb/serial/qcserial.c b/drivers/usb/serial/qcserial.c
+index 93232ca..071b529 100644
+--- a/drivers/usb/serial/qcserial.c
++++ b/drivers/usb/serial/qcserial.c
+@@ -55,6 +55,7 @@ static const struct usb_device_id id_table[] = {
+ {DEVICE_G1K(0x05c6, 0x9221)}, /* Generic Gobi QDL device */
+ {DEVICE_G1K(0x05c6, 0x9231)}, /* Generic Gobi QDL device */
+ {DEVICE_G1K(0x1f45, 0x0001)}, /* Unknown Gobi QDL device */
++ {DEVICE_G1K(0x1bc7, 0x900e)}, /* Telit Gobi QDL device */
+
+ /* Gobi 2000 devices */
+ {USB_DEVICE(0x1410, 0xa010)}, /* Novatel Gobi 2000 QDL device */
+diff --git a/drivers/usb/serial/quatech2.c b/drivers/usb/serial/quatech2.c
+index ea69301..cf50c43 100644
+--- a/drivers/usb/serial/quatech2.c
++++ b/drivers/usb/serial/quatech2.c
+@@ -949,19 +949,17 @@ static void qt2_dtr_rts(struct usb_serial_port *port, int on)
+ struct usb_device *dev = port->serial->dev;
+ struct qt2_port_private *port_priv = usb_get_serial_port_data(port);
+
+- mutex_lock(&port->serial->disc_mutex);
+- if (!port->serial->disconnected) {
+- /* Disable flow control */
+- if (!on && qt2_setregister(dev, port_priv->device_port,
++ /* Disable flow control */
++ if (!on) {
++ if (qt2_setregister(dev, port_priv->device_port,
+ UART_MCR, 0) < 0)
+ dev_warn(&port->dev, "error from flowcontrol urb\n");
+- /* drop RTS and DTR */
+- if (on)
+- update_mctrl(port_priv, TIOCM_DTR | TIOCM_RTS, 0);
+- else
+- update_mctrl(port_priv, 0, TIOCM_DTR | TIOCM_RTS);
+ }
+- mutex_unlock(&port->serial->disc_mutex);
++ /* drop RTS and DTR */
++ if (on)
++ update_mctrl(port_priv, TIOCM_DTR | TIOCM_RTS, 0);
++ else
++ update_mctrl(port_priv, 0, TIOCM_DTR | TIOCM_RTS);
+ }
+
+ static void qt2_update_msr(struct usb_serial_port *port, unsigned char *ch)
+diff --git a/drivers/usb/serial/sierra.c b/drivers/usb/serial/sierra.c
+index cf6d149..7d43f69 100644
+--- a/drivers/usb/serial/sierra.c
++++ b/drivers/usb/serial/sierra.c
+@@ -863,19 +863,13 @@ static int sierra_open(struct tty_struct *tty, struct usb_serial_port *port)
+
+ static void sierra_dtr_rts(struct usb_serial_port *port, int on)
+ {
+- struct usb_serial *serial = port->serial;
+ struct sierra_port_private *portdata;
+
+ portdata = usb_get_serial_port_data(port);
+ portdata->rts_state = on;
+ portdata->dtr_state = on;
+
+- if (serial->dev) {
+- mutex_lock(&serial->disc_mutex);
+- if (!serial->disconnected)
+- sierra_send_setup(port);
+- mutex_unlock(&serial->disc_mutex);
+- }
++ sierra_send_setup(port);
+ }
+
+ static int sierra_startup(struct usb_serial *serial)
+diff --git a/drivers/usb/serial/ssu100.c b/drivers/usb/serial/ssu100.c
+index fe3a8a0..9a13ea2 100644
+--- a/drivers/usb/serial/ssu100.c
++++ b/drivers/usb/serial/ssu100.c
+@@ -510,19 +510,16 @@ static void ssu100_dtr_rts(struct usb_serial_port *port, int on)
+ {
+ struct usb_device *dev = port->serial->dev;
+
+- mutex_lock(&port->serial->disc_mutex);
+- if (!port->serial->disconnected) {
+- /* Disable flow control */
+- if (!on &&
+- ssu100_setregister(dev, 0, UART_MCR, 0) < 0)
++ /* Disable flow control */
++ if (!on) {
++ if (ssu100_setregister(dev, 0, UART_MCR, 0) < 0)
+ dev_err(&port->dev, "error from flowcontrol urb\n");
+- /* drop RTS and DTR */
+- if (on)
+- set_mctrl(dev, TIOCM_DTR | TIOCM_RTS);
+- else
+- clear_mctrl(dev, TIOCM_DTR | TIOCM_RTS);
+ }
+- mutex_unlock(&port->serial->disc_mutex);
++ /* drop RTS and DTR */
++ if (on)
++ set_mctrl(dev, TIOCM_DTR | TIOCM_RTS);
++ else
++ clear_mctrl(dev, TIOCM_DTR | TIOCM_RTS);
+ }
+
+ static void ssu100_update_msr(struct usb_serial_port *port, u8 msr)
+diff --git a/drivers/usb/serial/usb-serial.c b/drivers/usb/serial/usb-serial.c
+index 667c39c..771adbd 100644
+--- a/drivers/usb/serial/usb-serial.c
++++ b/drivers/usb/serial/usb-serial.c
+@@ -688,10 +688,20 @@ static int serial_carrier_raised(struct tty_port *port)
+ static void serial_dtr_rts(struct tty_port *port, int on)
+ {
+ struct usb_serial_port *p = container_of(port, struct usb_serial_port, port);
+- struct usb_serial_driver *drv = p->serial->type;
++ struct usb_serial *serial = p->serial;
++ struct usb_serial_driver *drv = serial->type;
+
+- if (drv->dtr_rts)
++ if (!drv->dtr_rts)
++ return;
++ /*
++ * Work-around bug in the tty-layer which can result in dtr_rts
++ * being called after a disconnect (and tty_unregister_device
++ * has returned). Remove once bug has been squashed.
++ */
++ mutex_lock(&serial->disc_mutex);
++ if (!serial->disconnected)
+ drv->dtr_rts(p, on);
++ mutex_unlock(&serial->disc_mutex);
+ }
+
+ static const struct tty_port_operations serial_port_ops = {
+diff --git a/drivers/usb/serial/usb_wwan.c b/drivers/usb/serial/usb_wwan.c
+index 188b5b3..e9031a4 100644
+--- a/drivers/usb/serial/usb_wwan.c
++++ b/drivers/usb/serial/usb_wwan.c
+@@ -41,7 +41,6 @@ static bool debug;
+
+ void usb_wwan_dtr_rts(struct usb_serial_port *port, int on)
+ {
+- struct usb_serial *serial = port->serial;
+ struct usb_wwan_port_private *portdata;
+ struct usb_wwan_intf_private *intfdata;
+
+@@ -51,12 +50,11 @@ void usb_wwan_dtr_rts(struct usb_serial_port *port, int on)
+ return;
+
+ portdata = usb_get_serial_port_data(port);
+- mutex_lock(&serial->disc_mutex);
++ /* FIXME: locking */
+ portdata->rts_state = on;
+ portdata->dtr_state = on;
+- if (serial->dev)
+- intfdata->send_setup(port);
+- mutex_unlock(&serial->disc_mutex);
++
++ intfdata->send_setup(port);
+ }
+ EXPORT_SYMBOL(usb_wwan_dtr_rts);
+
+diff --git a/drivers/usb/storage/initializers.c b/drivers/usb/storage/initializers.c
+index 105d900..7ab9046 100644
+--- a/drivers/usb/storage/initializers.c
++++ b/drivers/usb/storage/initializers.c
+@@ -92,8 +92,8 @@ int usb_stor_ucr61s2b_init(struct us_data *us)
+ return 0;
+ }
+
+-/* This places the HUAWEI E220 devices in multi-port mode */
+-int usb_stor_huawei_e220_init(struct us_data *us)
++/* This places the HUAWEI usb dongles in multi-port mode */
++static int usb_stor_huawei_feature_init(struct us_data *us)
+ {
+ int result;
+
+@@ -104,3 +104,75 @@ int usb_stor_huawei_e220_init(struct us_data *us)
+ US_DEBUGP("Huawei mode set result is %d\n", result);
+ return 0;
+ }
++
++/*
++ * It will send a scsi switch command called rewind' to huawei dongle.
++ * When the dongle receives this command at the first time,
++ * it will reboot immediately. After rebooted, it will ignore this command.
++ * So it is unnecessary to read its response.
++ */
++static int usb_stor_huawei_scsi_init(struct us_data *us)
++{
++ int result = 0;
++ int act_len = 0;
++ struct bulk_cb_wrap *bcbw = (struct bulk_cb_wrap *) us->iobuf;
++ char rewind_cmd[] = {0x11, 0x06, 0x20, 0x00, 0x00, 0x01, 0x01, 0x00,
++ 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
++
++ bcbw->Signature = cpu_to_le32(US_BULK_CB_SIGN);
++ bcbw->Tag = 0;
++ bcbw->DataTransferLength = 0;
++ bcbw->Flags = bcbw->Lun = 0;
++ bcbw->Length = sizeof(rewind_cmd);
++ memset(bcbw->CDB, 0, sizeof(bcbw->CDB));
++ memcpy(bcbw->CDB, rewind_cmd, sizeof(rewind_cmd));
++
++ result = usb_stor_bulk_transfer_buf(us, us->send_bulk_pipe, bcbw,
++ US_BULK_CB_WRAP_LEN, &act_len);
++ US_DEBUGP("transfer actual length=%d, result=%d\n", act_len, result);
++ return result;
++}
++
++/*
++ * It tries to find the supported Huawei USB dongles.
++ * In Huawei, they assign the following product IDs
++ * for all of their mobile broadband dongles,
++ * including the new dongles in the future.
++ * So if the product ID is not included in this list,
++ * it means it is not Huawei's mobile broadband dongles.
++ */
++static int usb_stor_huawei_dongles_pid(struct us_data *us)
++{
++ struct usb_interface_descriptor *idesc;
++ int idProduct;
++
++ idesc = &us->pusb_intf->cur_altsetting->desc;
++ idProduct = le16_to_cpu(us->pusb_dev->descriptor.idProduct);
++ /* The first port is CDROM,
++ * means the dongle in the single port mode,
++ * and a switch command is required to be sent. */
++ if (idesc && idesc->bInterfaceNumber == 0) {
++ if ((idProduct == 0x1001)
++ || (idProduct == 0x1003)
++ || (idProduct == 0x1004)
++ || (idProduct >= 0x1401 && idProduct <= 0x1500)
++ || (idProduct >= 0x1505 && idProduct <= 0x1600)
++ || (idProduct >= 0x1c02 && idProduct <= 0x2202)) {
++ return 1;
++ }
++ }
++ return 0;
++}
++
++int usb_stor_huawei_init(struct us_data *us)
++{
++ int result = 0;
++
++ if (usb_stor_huawei_dongles_pid(us)) {
++ if (le16_to_cpu(us->pusb_dev->descriptor.idProduct) >= 0x1446)
++ result = usb_stor_huawei_scsi_init(us);
++ else
++ result = usb_stor_huawei_feature_init(us);
++ }
++ return result;
++}
+diff --git a/drivers/usb/storage/initializers.h b/drivers/usb/storage/initializers.h
+index 529327f..5376d4f 100644
+--- a/drivers/usb/storage/initializers.h
++++ b/drivers/usb/storage/initializers.h
+@@ -46,5 +46,5 @@ int usb_stor_euscsi_init(struct us_data *us);
+ * flash reader */
+ int usb_stor_ucr61s2b_init(struct us_data *us);
+
+-/* This places the HUAWEI E220 devices in multi-port mode */
+-int usb_stor_huawei_e220_init(struct us_data *us);
++/* This places the HUAWEI usb dongles in multi-port mode */
++int usb_stor_huawei_init(struct us_data *us);
+diff --git a/drivers/usb/storage/unusual_cypress.h b/drivers/usb/storage/unusual_cypress.h
+index 2c85530..65a6a75 100644
+--- a/drivers/usb/storage/unusual_cypress.h
++++ b/drivers/usb/storage/unusual_cypress.h
+@@ -31,7 +31,7 @@ UNUSUAL_DEV( 0x04b4, 0x6831, 0x0000, 0x9999,
+ "Cypress ISD-300LP",
+ USB_SC_CYP_ATACB, USB_PR_DEVICE, NULL, 0),
+
+-UNUSUAL_DEV( 0x14cd, 0x6116, 0x0000, 0x9999,
++UNUSUAL_DEV( 0x14cd, 0x6116, 0x0000, 0x0219,
+ "Super Top",
+ "USB 2.0 SATA BRIDGE",
+ USB_SC_CYP_ATACB, USB_PR_DEVICE, NULL, 0),
+diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
+index 8f98c9a..3be55cf 100644
+--- a/drivers/usb/storage/unusual_devs.h
++++ b/drivers/usb/storage/unusual_devs.h
+@@ -1527,335 +1527,10 @@ UNUSUAL_DEV( 0x1210, 0x0003, 0x0100, 0x0100,
+ /* Reported by fangxiaozhi <huananhu@huawei.com>
+ * This brings the HUAWEI data card devices into multi-port mode
+ */
+-UNUSUAL_DEV( 0x12d1, 0x1001, 0x0000, 0x0000,
++UNUSUAL_VENDOR_INTF(0x12d1, 0x08, 0x06, 0x50,
+ "HUAWEI MOBILE",
+ "Mass Storage",
+- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
+- 0),
+-UNUSUAL_DEV( 0x12d1, 0x1003, 0x0000, 0x0000,
+- "HUAWEI MOBILE",
+- "Mass Storage",
+- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
+- 0),
+-UNUSUAL_DEV( 0x12d1, 0x1004, 0x0000, 0x0000,
+- "HUAWEI MOBILE",
+- "Mass Storage",
+- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
+- 0),
+-UNUSUAL_DEV( 0x12d1, 0x1401, 0x0000, 0x0000,
+- "HUAWEI MOBILE",
+- "Mass Storage",
+- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
+- 0),
+-UNUSUAL_DEV( 0x12d1, 0x1402, 0x0000, 0x0000,
+- "HUAWEI MOBILE",
+- "Mass Storage",
+- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
+- 0),
+-UNUSUAL_DEV( 0x12d1, 0x1403, 0x0000, 0x0000,
+- "HUAWEI MOBILE",
+- "Mass Storage",
+- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
+- 0),
+-UNUSUAL_DEV( 0x12d1, 0x1404, 0x0000, 0x0000,
+- "HUAWEI MOBILE",
+- "Mass Storage",
+- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
+- 0),
+-UNUSUAL_DEV( 0x12d1, 0x1405, 0x0000, 0x0000,
+- "HUAWEI MOBILE",
+- "Mass Storage",
+- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
+- 0),
+-UNUSUAL_DEV( 0x12d1, 0x1406, 0x0000, 0x0000,
+- "HUAWEI MOBILE",
+- "Mass Storage",
+- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
+- 0),
+-UNUSUAL_DEV( 0x12d1, 0x1407, 0x0000, 0x0000,
+- "HUAWEI MOBILE",
+- "Mass Storage",
+- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
+- 0),
+-UNUSUAL_DEV( 0x12d1, 0x1408, 0x0000, 0x0000,
+- "HUAWEI MOBILE",
+- "Mass Storage",
+- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
+- 0),
+-UNUSUAL_DEV( 0x12d1, 0x1409, 0x0000, 0x0000,
+- "HUAWEI MOBILE",
+- "Mass Storage",
+- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
+- 0),
+-UNUSUAL_DEV( 0x12d1, 0x140A, 0x0000, 0x0000,
+- "HUAWEI MOBILE",
+- "Mass Storage",
+- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
+- 0),
+-UNUSUAL_DEV( 0x12d1, 0x140B, 0x0000, 0x0000,
+- "HUAWEI MOBILE",
+- "Mass Storage",
+- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
+- 0),
+-UNUSUAL_DEV( 0x12d1, 0x140C, 0x0000, 0x0000,
+- "HUAWEI MOBILE",
+- "Mass Storage",
+- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
+- 0),
+-UNUSUAL_DEV( 0x12d1, 0x140D, 0x0000, 0x0000,
+- "HUAWEI MOBILE",
+- "Mass Storage",
+- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
+- 0),
+-UNUSUAL_DEV( 0x12d1, 0x140E, 0x0000, 0x0000,
+- "HUAWEI MOBILE",
+- "Mass Storage",
+- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
+- 0),
+-UNUSUAL_DEV( 0x12d1, 0x140F, 0x0000, 0x0000,
+- "HUAWEI MOBILE",
+- "Mass Storage",
+- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
+- 0),
+-UNUSUAL_DEV( 0x12d1, 0x1410, 0x0000, 0x0000,
+- "HUAWEI MOBILE",
+- "Mass Storage",
+- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
+- 0),
+-UNUSUAL_DEV( 0x12d1, 0x1411, 0x0000, 0x0000,
+- "HUAWEI MOBILE",
+- "Mass Storage",
+- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
+- 0),
+-UNUSUAL_DEV( 0x12d1, 0x1412, 0x0000, 0x0000,
+- "HUAWEI MOBILE",
+- "Mass Storage",
+- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
+- 0),
+-UNUSUAL_DEV( 0x12d1, 0x1413, 0x0000, 0x0000,
+- "HUAWEI MOBILE",
+- "Mass Storage",
+- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
+- 0),
+-UNUSUAL_DEV( 0x12d1, 0x1414, 0x0000, 0x0000,
+- "HUAWEI MOBILE",
+- "Mass Storage",
+- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
+- 0),
+-UNUSUAL_DEV( 0x12d1, 0x1415, 0x0000, 0x0000,
+- "HUAWEI MOBILE",
+- "Mass Storage",
+- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
+- 0),
+-UNUSUAL_DEV( 0x12d1, 0x1416, 0x0000, 0x0000,
+- "HUAWEI MOBILE",
+- "Mass Storage",
+- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
+- 0),
+-UNUSUAL_DEV( 0x12d1, 0x1417, 0x0000, 0x0000,
+- "HUAWEI MOBILE",
+- "Mass Storage",
+- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
+- 0),
+-UNUSUAL_DEV( 0x12d1, 0x1418, 0x0000, 0x0000,
+- "HUAWEI MOBILE",
+- "Mass Storage",
+- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
+- 0),
+-UNUSUAL_DEV( 0x12d1, 0x1419, 0x0000, 0x0000,
+- "HUAWEI MOBILE",
+- "Mass Storage",
+- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
+- 0),
+-UNUSUAL_DEV( 0x12d1, 0x141A, 0x0000, 0x0000,
+- "HUAWEI MOBILE",
+- "Mass Storage",
+- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
+- 0),
+-UNUSUAL_DEV( 0x12d1, 0x141B, 0x0000, 0x0000,
+- "HUAWEI MOBILE",
+- "Mass Storage",
+- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
+- 0),
+-UNUSUAL_DEV( 0x12d1, 0x141C, 0x0000, 0x0000,
+- "HUAWEI MOBILE",
+- "Mass Storage",
+- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
+- 0),
+-UNUSUAL_DEV( 0x12d1, 0x141D, 0x0000, 0x0000,
+- "HUAWEI MOBILE",
+- "Mass Storage",
+- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
+- 0),
+-UNUSUAL_DEV( 0x12d1, 0x141E, 0x0000, 0x0000,
+- "HUAWEI MOBILE",
+- "Mass Storage",
+- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
+- 0),
+-UNUSUAL_DEV( 0x12d1, 0x141F, 0x0000, 0x0000,
+- "HUAWEI MOBILE",
+- "Mass Storage",
+- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
+- 0),
+-UNUSUAL_DEV( 0x12d1, 0x1420, 0x0000, 0x0000,
+- "HUAWEI MOBILE",
+- "Mass Storage",
+- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
+- 0),
+-UNUSUAL_DEV( 0x12d1, 0x1421, 0x0000, 0x0000,
+- "HUAWEI MOBILE",
+- "Mass Storage",
+- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
+- 0),
+-UNUSUAL_DEV( 0x12d1, 0x1422, 0x0000, 0x0000,
+- "HUAWEI MOBILE",
+- "Mass Storage",
+- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
+- 0),
+-UNUSUAL_DEV( 0x12d1, 0x1423, 0x0000, 0x0000,
+- "HUAWEI MOBILE",
+- "Mass Storage",
+- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
+- 0),
+-UNUSUAL_DEV( 0x12d1, 0x1424, 0x0000, 0x0000,
+- "HUAWEI MOBILE",
+- "Mass Storage",
+- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
+- 0),
+-UNUSUAL_DEV( 0x12d1, 0x1425, 0x0000, 0x0000,
+- "HUAWEI MOBILE",
+- "Mass Storage",
+- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
+- 0),
+-UNUSUAL_DEV( 0x12d1, 0x1426, 0x0000, 0x0000,
+- "HUAWEI MOBILE",
+- "Mass Storage",
+- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
+- 0),
+-UNUSUAL_DEV( 0x12d1, 0x1427, 0x0000, 0x0000,
+- "HUAWEI MOBILE",
+- "Mass Storage",
+- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
+- 0),
+-UNUSUAL_DEV( 0x12d1, 0x1428, 0x0000, 0x0000,
+- "HUAWEI MOBILE",
+- "Mass Storage",
+- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
+- 0),
+-UNUSUAL_DEV( 0x12d1, 0x1429, 0x0000, 0x0000,
+- "HUAWEI MOBILE",
+- "Mass Storage",
+- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
+- 0),
+-UNUSUAL_DEV( 0x12d1, 0x142A, 0x0000, 0x0000,
+- "HUAWEI MOBILE",
+- "Mass Storage",
+- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
+- 0),
+-UNUSUAL_DEV( 0x12d1, 0x142B, 0x0000, 0x0000,
+- "HUAWEI MOBILE",
+- "Mass Storage",
+- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
+- 0),
+-UNUSUAL_DEV( 0x12d1, 0x142C, 0x0000, 0x0000,
+- "HUAWEI MOBILE",
+- "Mass Storage",
+- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
+- 0),
+-UNUSUAL_DEV( 0x12d1, 0x142D, 0x0000, 0x0000,
+- "HUAWEI MOBILE",
+- "Mass Storage",
+- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
+- 0),
+-UNUSUAL_DEV( 0x12d1, 0x142E, 0x0000, 0x0000,
+- "HUAWEI MOBILE",
+- "Mass Storage",
+- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
+- 0),
+-UNUSUAL_DEV( 0x12d1, 0x142F, 0x0000, 0x0000,
+- "HUAWEI MOBILE",
+- "Mass Storage",
+- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
+- 0),
+-UNUSUAL_DEV( 0x12d1, 0x1430, 0x0000, 0x0000,
+- "HUAWEI MOBILE",
+- "Mass Storage",
+- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
+- 0),
+-UNUSUAL_DEV( 0x12d1, 0x1431, 0x0000, 0x0000,
+- "HUAWEI MOBILE",
+- "Mass Storage",
+- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
+- 0),
+-UNUSUAL_DEV( 0x12d1, 0x1432, 0x0000, 0x0000,
+- "HUAWEI MOBILE",
+- "Mass Storage",
+- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
+- 0),
+-UNUSUAL_DEV( 0x12d1, 0x1433, 0x0000, 0x0000,
+- "HUAWEI MOBILE",
+- "Mass Storage",
+- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
+- 0),
+-UNUSUAL_DEV( 0x12d1, 0x1434, 0x0000, 0x0000,
+- "HUAWEI MOBILE",
+- "Mass Storage",
+- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
+- 0),
+-UNUSUAL_DEV( 0x12d1, 0x1435, 0x0000, 0x0000,
+- "HUAWEI MOBILE",
+- "Mass Storage",
+- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
+- 0),
+-UNUSUAL_DEV( 0x12d1, 0x1436, 0x0000, 0x0000,
+- "HUAWEI MOBILE",
+- "Mass Storage",
+- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
+- 0),
+-UNUSUAL_DEV( 0x12d1, 0x1437, 0x0000, 0x0000,
+- "HUAWEI MOBILE",
+- "Mass Storage",
+- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
+- 0),
+-UNUSUAL_DEV( 0x12d1, 0x1438, 0x0000, 0x0000,
+- "HUAWEI MOBILE",
+- "Mass Storage",
+- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
+- 0),
+-UNUSUAL_DEV( 0x12d1, 0x1439, 0x0000, 0x0000,
+- "HUAWEI MOBILE",
+- "Mass Storage",
+- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
+- 0),
+-UNUSUAL_DEV( 0x12d1, 0x143A, 0x0000, 0x0000,
+- "HUAWEI MOBILE",
+- "Mass Storage",
+- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
+- 0),
+-UNUSUAL_DEV( 0x12d1, 0x143B, 0x0000, 0x0000,
+- "HUAWEI MOBILE",
+- "Mass Storage",
+- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
+- 0),
+-UNUSUAL_DEV( 0x12d1, 0x143C, 0x0000, 0x0000,
+- "HUAWEI MOBILE",
+- "Mass Storage",
+- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
+- 0),
+-UNUSUAL_DEV( 0x12d1, 0x143D, 0x0000, 0x0000,
+- "HUAWEI MOBILE",
+- "Mass Storage",
+- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
+- 0),
+-UNUSUAL_DEV( 0x12d1, 0x143E, 0x0000, 0x0000,
+- "HUAWEI MOBILE",
+- "Mass Storage",
+- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
+- 0),
+-UNUSUAL_DEV( 0x12d1, 0x143F, 0x0000, 0x0000,
+- "HUAWEI MOBILE",
+- "Mass Storage",
+- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
++ USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_init,
+ 0),
+
+ /* Reported by Vilius Bilinkevicius <vilisas AT xxx DOT lt) */
+diff --git a/drivers/usb/storage/usb.c b/drivers/usb/storage/usb.c
+index d012fe4..c34373e 100644
+--- a/drivers/usb/storage/usb.c
++++ b/drivers/usb/storage/usb.c
+@@ -120,6 +120,17 @@ MODULE_PARM_DESC(quirks, "supplemental list of device IDs and their quirks");
+ .useTransport = use_transport, \
+ }
+
++#define UNUSUAL_VENDOR_INTF(idVendor, cl, sc, pr, \
++ vendor_name, product_name, use_protocol, use_transport, \
++ init_function, Flags) \
++{ \
++ .vendorName = vendor_name, \
++ .productName = product_name, \
++ .useProtocol = use_protocol, \
++ .useTransport = use_transport, \
++ .initFunction = init_function, \
++}
++
+ static struct us_unusual_dev us_unusual_dev_list[] = {
+ # include "unusual_devs.h"
+ { } /* Terminating entry */
+@@ -131,6 +142,7 @@ static struct us_unusual_dev for_dynamic_ids =
+ #undef UNUSUAL_DEV
+ #undef COMPLIANT_DEV
+ #undef USUAL_DEV
++#undef UNUSUAL_VENDOR_INTF
+
+ #ifdef CONFIG_LOCKDEP
+
+diff --git a/drivers/usb/storage/usual-tables.c b/drivers/usb/storage/usual-tables.c
+index b969279..a9b5f2e 100644
+--- a/drivers/usb/storage/usual-tables.c
++++ b/drivers/usb/storage/usual-tables.c
+@@ -46,6 +46,20 @@
+ { USB_INTERFACE_INFO(USB_CLASS_MASS_STORAGE, useProto, useTrans), \
+ .driver_info = ((useType)<<24) }
+
++/* Define the device is matched with Vendor ID and interface descriptors */
++#define UNUSUAL_VENDOR_INTF(id_vendor, cl, sc, pr, \
++ vendorName, productName, useProtocol, useTransport, \
++ initFunction, flags) \
++{ \
++ .match_flags = USB_DEVICE_ID_MATCH_INT_INFO \
++ | USB_DEVICE_ID_MATCH_VENDOR, \
++ .idVendor = (id_vendor), \
++ .bInterfaceClass = (cl), \
++ .bInterfaceSubClass = (sc), \
++ .bInterfaceProtocol = (pr), \
++ .driver_info = (flags) \
++}
++
+ struct usb_device_id usb_storage_usb_ids[] = {
+ # include "unusual_devs.h"
+ { } /* Terminating entry */
+@@ -57,6 +71,7 @@ MODULE_DEVICE_TABLE(usb, usb_storage_usb_ids);
+ #undef UNUSUAL_DEV
+ #undef COMPLIANT_DEV
+ #undef USUAL_DEV
++#undef UNUSUAL_VENDOR_INTF
+
+
+ /*
+diff --git a/drivers/video/backlight/adp8860_bl.c b/drivers/video/backlight/adp8860_bl.c
+index 77d1fdb..716daaa 100644
+--- a/drivers/video/backlight/adp8860_bl.c
++++ b/drivers/video/backlight/adp8860_bl.c
+@@ -783,7 +783,7 @@ static int adp8860_i2c_suspend(struct i2c_client *client, pm_message_t message)
+
+ static int adp8860_i2c_resume(struct i2c_client *client)
+ {
+- adp8860_set_bits(client, ADP8860_MDCR, NSTBY);
++ adp8860_set_bits(client, ADP8860_MDCR, NSTBY | BLEN);
+
+ return 0;
+ }
+diff --git a/drivers/video/backlight/adp8870_bl.c b/drivers/video/backlight/adp8870_bl.c
+index edf7f91..f58a189 100644
+--- a/drivers/video/backlight/adp8870_bl.c
++++ b/drivers/video/backlight/adp8870_bl.c
+@@ -957,7 +957,7 @@ static int adp8870_i2c_suspend(struct i2c_client *client, pm_message_t message)
+
+ static int adp8870_i2c_resume(struct i2c_client *client)
+ {
+- adp8870_set_bits(client, ADP8870_MDCR, NSTBY);
++ adp8870_set_bits(client, ADP8870_MDCR, NSTBY | BLEN);
+
+ return 0;
+ }
+diff --git a/drivers/video/console/fbcon.c b/drivers/video/console/fbcon.c
+index fdefa8f..0d4c192 100644
+--- a/drivers/video/console/fbcon.c
++++ b/drivers/video/console/fbcon.c
+@@ -529,6 +529,33 @@ static int search_for_mapped_con(void)
+ return retval;
+ }
+
++static int do_fbcon_takeover(int show_logo)
++{
++ int err, i;
++
++ if (!num_registered_fb)
++ return -ENODEV;
++
++ if (!show_logo)
++ logo_shown = FBCON_LOGO_DONTSHOW;
++
++ for (i = first_fb_vc; i <= last_fb_vc; i++)
++ con2fb_map[i] = info_idx;
++
++ err = do_take_over_console(&fb_con, first_fb_vc, last_fb_vc,
++ fbcon_is_default);
++
++ if (err) {
++ for (i = first_fb_vc; i <= last_fb_vc; i++)
++ con2fb_map[i] = -1;
++ info_idx = -1;
++ } else {
++ fbcon_has_console_bind = 1;
++ }
++
++ return err;
++}
++
+ static int fbcon_takeover(int show_logo)
+ {
+ int err, i;
+@@ -990,7 +1017,7 @@ static const char *fbcon_startup(void)
+ }
+
+ /* Setup default font */
+- if (!p->fontdata) {
++ if (!p->fontdata && !vc->vc_font.data) {
+ if (!fontname[0] || !(font = find_font(fontname)))
+ font = get_default_font(info->var.xres,
+ info->var.yres,
+@@ -1000,6 +1027,8 @@ static const char *fbcon_startup(void)
+ vc->vc_font.height = font->height;
+ vc->vc_font.data = (void *)(p->fontdata = font->data);
+ vc->vc_font.charcount = 256; /* FIXME Need to support more fonts */
++ } else {
++ p->fontdata = vc->vc_font.data;
+ }
+
+ cols = FBCON_SWAP(ops->rotate, info->var.xres, info->var.yres);
+@@ -1159,9 +1188,9 @@ static void fbcon_init(struct vc_data *vc, int init)
+ ops->p = &fb_display[fg_console];
+ }
+
+-static void fbcon_free_font(struct display *p)
++static void fbcon_free_font(struct display *p, bool freefont)
+ {
+- if (p->userfont && p->fontdata && (--REFCOUNT(p->fontdata) == 0))
++ if (freefont && p->userfont && p->fontdata && (--REFCOUNT(p->fontdata) == 0))
+ kfree(p->fontdata - FONT_EXTRA_WORDS * sizeof(int));
+ p->fontdata = NULL;
+ p->userfont = 0;
+@@ -1173,8 +1202,8 @@ static void fbcon_deinit(struct vc_data *vc)
+ struct fb_info *info;
+ struct fbcon_ops *ops;
+ int idx;
++ bool free_font = true;
+
+- fbcon_free_font(p);
+ idx = con2fb_map[vc->vc_num];
+
+ if (idx == -1)
+@@ -1185,6 +1214,8 @@ static void fbcon_deinit(struct vc_data *vc)
+ if (!info)
+ goto finished;
+
++ if (info->flags & FBINFO_MISC_FIRMWARE)
++ free_font = false;
+ ops = info->fbcon_par;
+
+ if (!ops)
+@@ -1196,6 +1227,8 @@ static void fbcon_deinit(struct vc_data *vc)
+ ops->flags &= ~FBCON_FLAGS_INIT;
+ finished:
+
++ fbcon_free_font(p, free_font);
++
+ if (!con_is_bound(&fb_con))
+ fbcon_exit();
+
+@@ -2977,7 +3010,7 @@ static int fbcon_unbind(void)
+ {
+ int ret;
+
+- ret = unbind_con_driver(&fb_con, first_fb_vc, last_fb_vc,
++ ret = do_unbind_con_driver(&fb_con, first_fb_vc, last_fb_vc,
+ fbcon_is_default);
+
+ if (!ret)
+@@ -3050,7 +3083,7 @@ static int fbcon_fb_unregistered(struct fb_info *info)
+ primary_device = -1;
+
+ if (!num_registered_fb)
+- unregister_con_driver(&fb_con);
++ do_unregister_con_driver(&fb_con);
+
+ return 0;
+ }
+@@ -3115,7 +3148,7 @@ static int fbcon_fb_registered(struct fb_info *info)
+ }
+
+ if (info_idx != -1)
+- ret = fbcon_takeover(1);
++ ret = do_fbcon_takeover(1);
+ } else {
+ for (i = first_fb_vc; i <= last_fb_vc; i++) {
+ if (con2fb_map_boot[i] == idx)
+diff --git a/drivers/video/console/vgacon.c b/drivers/video/console/vgacon.c
+index d449a74..5855d17 100644
+--- a/drivers/video/console/vgacon.c
++++ b/drivers/video/console/vgacon.c
+@@ -1064,7 +1064,7 @@ static int vgacon_do_font_op(struct vgastate *state,char *arg,int set,int ch512)
+ unsigned short video_port_status = vga_video_port_reg + 6;
+ int font_select = 0x00, beg, i;
+ char *charmap;
+-
++ bool clear_attribs = false;
+ if (vga_video_type != VIDEO_TYPE_EGAM) {
+ charmap = (char *) VGA_MAP_MEM(colourmap, 0);
+ beg = 0x0e;
+@@ -1169,12 +1169,6 @@ static int vgacon_do_font_op(struct vgastate *state,char *arg,int set,int ch512)
+
+ /* if 512 char mode is already enabled don't re-enable it. */
+ if ((set) && (ch512 != vga_512_chars)) {
+- /* attribute controller */
+- for (i = 0; i < MAX_NR_CONSOLES; i++) {
+- struct vc_data *c = vc_cons[i].d;
+- if (c && c->vc_sw == &vga_con)
+- c->vc_hi_font_mask = ch512 ? 0x0800 : 0;
+- }
+ vga_512_chars = ch512;
+ /* 256-char: enable intensity bit
+ 512-char: disable intensity bit */
+@@ -1185,8 +1179,22 @@ static int vgacon_do_font_op(struct vgastate *state,char *arg,int set,int ch512)
+ it means, but it works, and it appears necessary */
+ inb_p(video_port_status);
+ vga_wattr(state->vgabase, VGA_AR_ENABLE_DISPLAY, 0);
++ clear_attribs = true;
+ }
+ raw_spin_unlock_irq(&vga_lock);
++
++ if (clear_attribs) {
++ for (i = 0; i < MAX_NR_CONSOLES; i++) {
++ struct vc_data *c = vc_cons[i].d;
++ if (c && c->vc_sw == &vga_con) {
++ /* force hi font mask to 0, so we always clear
++ the bit on either transition */
++ c->vc_hi_font_mask = 0x00;
++ clear_buffer_attributes(c);
++ c->vc_hi_font_mask = ch512 ? 0x0800 : 0;
++ }
++ }
++ }
+ return 0;
+ }
+
+diff --git a/drivers/video/fbmem.c b/drivers/video/fbmem.c
+index 0dff12a..afa804f 100644
+--- a/drivers/video/fbmem.c
++++ b/drivers/video/fbmem.c
+@@ -1651,7 +1651,9 @@ static int do_register_framebuffer(struct fb_info *fb_info)
+ event.info = fb_info;
+ if (!lock_fb_info(fb_info))
+ return -ENODEV;
++ console_lock();
+ fb_notifier_call_chain(FB_EVENT_FB_REGISTERED, &event);
++ console_unlock();
+ unlock_fb_info(fb_info);
+ return 0;
+ }
+@@ -1667,8 +1669,10 @@ static int do_unregister_framebuffer(struct fb_info *fb_info)
+
+ if (!lock_fb_info(fb_info))
+ return -ENODEV;
++ console_lock();
+ event.info = fb_info;
+ ret = fb_notifier_call_chain(FB_EVENT_FB_UNBIND, &event);
++ console_unlock();
+ unlock_fb_info(fb_info);
+
+ if (ret)
+@@ -1683,7 +1687,9 @@ static int do_unregister_framebuffer(struct fb_info *fb_info)
+ num_registered_fb--;
+ fb_cleanup_device(fb_info);
+ event.info = fb_info;
++ console_lock();
+ fb_notifier_call_chain(FB_EVENT_FB_UNREGISTERED, &event);
++ console_unlock();
+
+ /* this may free fb info */
+ put_fb_info(fb_info);
+@@ -1854,11 +1860,8 @@ int fb_new_modelist(struct fb_info *info)
+ err = 1;
+
+ if (!list_empty(&info->modelist)) {
+- if (!lock_fb_info(info))
+- return -ENODEV;
+ event.info = info;
+ err = fb_notifier_call_chain(FB_EVENT_NEW_MODELIST, &event);
+- unlock_fb_info(info);
+ }
+
+ return err;
+diff --git a/drivers/video/fbsysfs.c b/drivers/video/fbsysfs.c
+index a55e366..ef476b0 100644
+--- a/drivers/video/fbsysfs.c
++++ b/drivers/video/fbsysfs.c
+@@ -177,6 +177,8 @@ static ssize_t store_modes(struct device *device,
+ if (i * sizeof(struct fb_videomode) != count)
+ return -EINVAL;
+
++ if (!lock_fb_info(fb_info))
++ return -ENODEV;
+ console_lock();
+ list_splice(&fb_info->modelist, &old_list);
+ fb_videomode_to_modelist((const struct fb_videomode *)buf, i,
+@@ -188,6 +190,7 @@ static ssize_t store_modes(struct device *device,
+ fb_destroy_modelist(&old_list);
+
+ console_unlock();
++ unlock_fb_info(fb_info);
+
+ return 0;
+ }
+diff --git a/drivers/video/fsl-diu-fb.c b/drivers/video/fsl-diu-fb.c
+index 458c006..dbce2da 100644
+--- a/drivers/video/fsl-diu-fb.c
++++ b/drivers/video/fsl-diu-fb.c
+@@ -922,7 +922,7 @@ static u32 fsl_diu_get_pixel_format(unsigned int bits_per_pixel)
+ #define PF_COMP_0_MASK 0x0000000F
+ #define PF_COMP_0_SHIFT 0
+
+-#define MAKE_PF(alpha, red, blue, green, size, c0, c1, c2, c3) \
++#define MAKE_PF(alpha, red, green, blue, size, c0, c1, c2, c3) \
+ cpu_to_le32(PF_BYTE_F | (alpha << PF_ALPHA_C_SHIFT) | \
+ (blue << PF_BLUE_C_SHIFT) | (green << PF_GREEN_C_SHIFT) | \
+ (red << PF_RED_C_SHIFT) | (c3 << PF_COMP_3_SHIFT) | \
+@@ -932,10 +932,10 @@ static u32 fsl_diu_get_pixel_format(unsigned int bits_per_pixel)
+ switch (bits_per_pixel) {
+ case 32:
+ /* 0x88883316 */
+- return MAKE_PF(3, 2, 0, 1, 3, 8, 8, 8, 8);
++ return MAKE_PF(3, 2, 1, 0, 3, 8, 8, 8, 8);
+ case 24:
+ /* 0x88082219 */
+- return MAKE_PF(4, 0, 1, 2, 2, 0, 8, 8, 8);
++ return MAKE_PF(4, 0, 1, 2, 2, 8, 8, 8, 0);
+ case 16:
+ /* 0x65053118 */
+ return MAKE_PF(4, 2, 1, 0, 1, 5, 6, 5, 0);
+diff --git a/drivers/video/mxsfb.c b/drivers/video/mxsfb.c
+index 49619b4..f2a49ef 100644
+--- a/drivers/video/mxsfb.c
++++ b/drivers/video/mxsfb.c
+@@ -369,7 +369,8 @@ static void mxsfb_disable_controller(struct fb_info *fb_info)
+ loop--;
+ }
+
+- writel(VDCTRL4_SYNC_SIGNALS_ON, host->base + LCDC_VDCTRL4 + REG_CLR);
++ reg = readl(host->base + LCDC_VDCTRL4);
++ writel(reg & ~VDCTRL4_SYNC_SIGNALS_ON, host->base + LCDC_VDCTRL4);
+
+ clk_disable_unprepare(host->clk);
+
+diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
+index 5aa43c3..52bfd07 100644
+--- a/drivers/virtio/virtio_ring.c
++++ b/drivers/virtio/virtio_ring.c
+@@ -132,6 +132,13 @@ static int vring_add_indirect(struct vring_virtqueue *vq,
+ unsigned head;
+ int i;
+
++ /*
++ * We require lowmem mappings for the descriptors because
++ * otherwise virt_to_phys will give us bogus addresses in the
++ * virtqueue.
++ */
++ gfp &= ~(__GFP_HIGHMEM | __GFP_HIGH);
++
+ desc = kmalloc((out + in) * sizeof(struct vring_desc), gfp);
+ if (!desc)
+ return -ENOMEM;
+diff --git a/drivers/xen/evtchn.c b/drivers/xen/evtchn.c
+index b1f60a0..b2db77e 100644
+--- a/drivers/xen/evtchn.c
++++ b/drivers/xen/evtchn.c
+@@ -269,6 +269,14 @@ static int evtchn_bind_to_user(struct per_user_data *u, int port)
+ u->name, (void *)(unsigned long)port);
+ if (rc >= 0)
+ rc = evtchn_make_refcounted(port);
++ else {
++ /* bind failed, should close the port now */
++ struct evtchn_close close;
++ close.port = port;
++ if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close) != 0)
++ BUG();
++ set_port_user(port, NULL);
++ }
+
+ return rc;
+ }
+@@ -277,6 +285,8 @@ static void evtchn_unbind_from_user(struct per_user_data *u, int port)
+ {
+ int irq = irq_from_evtchn(port);
+
++ BUG_ON(irq < 0);
++
+ unbind_from_irqhandler(irq, (void *)(unsigned long)port);
+
+ set_port_user(port, NULL);
+diff --git a/drivers/xen/grant-table.c b/drivers/xen/grant-table.c
+index 0067266..22be735 100644
+--- a/drivers/xen/grant-table.c
++++ b/drivers/xen/grant-table.c
+@@ -54,10 +54,6 @@
+ /* External tools reserve first few grant table entries. */
+ #define NR_RESERVED_ENTRIES 8
+ #define GNTTAB_LIST_END 0xffffffff
+-#define GREFS_PER_GRANT_FRAME \
+-(grant_table_version == 1 ? \
+-(PAGE_SIZE / sizeof(struct grant_entry_v1)) : \
+-(PAGE_SIZE / sizeof(union grant_entry_v2)))
+
+ static grant_ref_t **gnttab_list;
+ static unsigned int nr_grant_frames;
+@@ -152,6 +148,7 @@ static struct gnttab_ops *gnttab_interface;
+ static grant_status_t *grstatus;
+
+ static int grant_table_version;
++static int grefs_per_grant_frame;
+
+ static struct gnttab_free_callback *gnttab_free_callback_list;
+
+@@ -766,12 +763,14 @@ static int grow_gnttab_list(unsigned int more_frames)
+ unsigned int new_nr_grant_frames, extra_entries, i;
+ unsigned int nr_glist_frames, new_nr_glist_frames;
+
++ BUG_ON(grefs_per_grant_frame == 0);
++
+ new_nr_grant_frames = nr_grant_frames + more_frames;
+- extra_entries = more_frames * GREFS_PER_GRANT_FRAME;
++ extra_entries = more_frames * grefs_per_grant_frame;
+
+- nr_glist_frames = (nr_grant_frames * GREFS_PER_GRANT_FRAME + RPP - 1) / RPP;
++ nr_glist_frames = (nr_grant_frames * grefs_per_grant_frame + RPP - 1) / RPP;
+ new_nr_glist_frames =
+- (new_nr_grant_frames * GREFS_PER_GRANT_FRAME + RPP - 1) / RPP;
++ (new_nr_grant_frames * grefs_per_grant_frame + RPP - 1) / RPP;
+ for (i = nr_glist_frames; i < new_nr_glist_frames; i++) {
+ gnttab_list[i] = (grant_ref_t *)__get_free_page(GFP_ATOMIC);
+ if (!gnttab_list[i])
+@@ -779,12 +778,12 @@ static int grow_gnttab_list(unsigned int more_frames)
+ }
+
+
+- for (i = GREFS_PER_GRANT_FRAME * nr_grant_frames;
+- i < GREFS_PER_GRANT_FRAME * new_nr_grant_frames - 1; i++)
++ for (i = grefs_per_grant_frame * nr_grant_frames;
++ i < grefs_per_grant_frame * new_nr_grant_frames - 1; i++)
+ gnttab_entry(i) = i + 1;
+
+ gnttab_entry(i) = gnttab_free_head;
+- gnttab_free_head = GREFS_PER_GRANT_FRAME * nr_grant_frames;
++ gnttab_free_head = grefs_per_grant_frame * nr_grant_frames;
+ gnttab_free_count += extra_entries;
+
+ nr_grant_frames = new_nr_grant_frames;
+@@ -904,7 +903,8 @@ EXPORT_SYMBOL_GPL(gnttab_unmap_refs);
+
+ static unsigned nr_status_frames(unsigned nr_grant_frames)
+ {
+- return (nr_grant_frames * GREFS_PER_GRANT_FRAME + SPP - 1) / SPP;
++ BUG_ON(grefs_per_grant_frame == 0);
++ return (nr_grant_frames * grefs_per_grant_frame + SPP - 1) / SPP;
+ }
+
+ static int gnttab_map_frames_v1(unsigned long *frames, unsigned int nr_gframes)
+@@ -1062,6 +1062,7 @@ static void gnttab_request_version(void)
+ rc = HYPERVISOR_grant_table_op(GNTTABOP_set_version, &gsv, 1);
+ if (rc == 0 && gsv.version == 2) {
+ grant_table_version = 2;
++ grefs_per_grant_frame = PAGE_SIZE / sizeof(union grant_entry_v2);
+ gnttab_interface = &gnttab_v2_ops;
+ } else if (grant_table_version == 2) {
+ /*
+@@ -1074,17 +1075,17 @@ static void gnttab_request_version(void)
+ panic("we need grant tables version 2, but only version 1 is available");
+ } else {
+ grant_table_version = 1;
++ grefs_per_grant_frame = PAGE_SIZE / sizeof(struct grant_entry_v1);
+ gnttab_interface = &gnttab_v1_ops;
+ }
+ printk(KERN_INFO "Grant tables using version %d layout.\n",
+ grant_table_version);
+ }
+
+-int gnttab_resume(void)
++static int gnttab_setup(void)
+ {
+ unsigned int max_nr_gframes;
+
+- gnttab_request_version();
+ max_nr_gframes = gnttab_max_grant_frames();
+ if (max_nr_gframes < nr_grant_frames)
+ return -ENOSYS;
+@@ -1107,6 +1108,12 @@ int gnttab_resume(void)
+ return 0;
+ }
+
++int gnttab_resume(void)
++{
++ gnttab_request_version();
++ return gnttab_setup();
++}
++
+ int gnttab_suspend(void)
+ {
+ gnttab_interface->unmap_frames();
+@@ -1118,9 +1125,10 @@ static int gnttab_expand(unsigned int req_entries)
+ int rc;
+ unsigned int cur, extra;
+
++ BUG_ON(grefs_per_grant_frame == 0);
+ cur = nr_grant_frames;
+- extra = ((req_entries + (GREFS_PER_GRANT_FRAME-1)) /
+- GREFS_PER_GRANT_FRAME);
++ extra = ((req_entries + (grefs_per_grant_frame-1)) /
++ grefs_per_grant_frame);
+ if (cur + extra > gnttab_max_grant_frames())
+ return -ENOSPC;
+
+@@ -1138,21 +1146,23 @@ int gnttab_init(void)
+ unsigned int nr_init_grefs;
+ int ret;
+
++ gnttab_request_version();
+ nr_grant_frames = 1;
+ boot_max_nr_grant_frames = __max_nr_grant_frames();
+
+ /* Determine the maximum number of frames required for the
+ * grant reference free list on the current hypervisor.
+ */
++ BUG_ON(grefs_per_grant_frame == 0);
+ max_nr_glist_frames = (boot_max_nr_grant_frames *
+- GREFS_PER_GRANT_FRAME / RPP);
++ grefs_per_grant_frame / RPP);
+
+ gnttab_list = kmalloc(max_nr_glist_frames * sizeof(grant_ref_t *),
+ GFP_KERNEL);
+ if (gnttab_list == NULL)
+ return -ENOMEM;
+
+- nr_glist_frames = (nr_grant_frames * GREFS_PER_GRANT_FRAME + RPP - 1) / RPP;
++ nr_glist_frames = (nr_grant_frames * grefs_per_grant_frame + RPP - 1) / RPP;
+ for (i = 0; i < nr_glist_frames; i++) {
+ gnttab_list[i] = (grant_ref_t *)__get_free_page(GFP_KERNEL);
+ if (gnttab_list[i] == NULL) {
+@@ -1161,12 +1171,12 @@ int gnttab_init(void)
+ }
+ }
+
+- if (gnttab_resume() < 0) {
++ if (gnttab_setup() < 0) {
+ ret = -ENODEV;
+ goto ini_nomem;
+ }
+
+- nr_init_grefs = nr_grant_frames * GREFS_PER_GRANT_FRAME;
++ nr_init_grefs = nr_grant_frames * grefs_per_grant_frame;
+
+ for (i = NR_RESERVED_ENTRIES; i < nr_init_grefs - 1; i++)
+ gnttab_entry(i) = i + 1;
+diff --git a/fs/binfmt_misc.c b/fs/binfmt_misc.c
+index 790b3cd..772428d 100644
+--- a/fs/binfmt_misc.c
++++ b/fs/binfmt_misc.c
+@@ -176,7 +176,10 @@ static int load_misc_binary(struct linux_binprm *bprm, struct pt_regs *regs)
+ goto _error;
+ bprm->argc ++;
+
+- bprm->interp = iname; /* for binfmt_script */
++ /* Update interp in case binfmt_script needs it. */
++ retval = bprm_change_interp(iname, bprm);
++ if (retval < 0)
++ goto _error;
+
+ interp_file = open_exec (iname);
+ retval = PTR_ERR (interp_file);
+diff --git a/fs/binfmt_script.c b/fs/binfmt_script.c
+index d3b8c1f..df49d48 100644
+--- a/fs/binfmt_script.c
++++ b/fs/binfmt_script.c
+@@ -82,7 +82,9 @@ static int load_script(struct linux_binprm *bprm,struct pt_regs *regs)
+ retval = copy_strings_kernel(1, &i_name, bprm);
+ if (retval) return retval;
+ bprm->argc++;
+- bprm->interp = interp;
++ retval = bprm_change_interp(interp, bprm);
++ if (retval < 0)
++ return retval;
+
+ /*
+ * OK, now restart the process with the interpreter's dentry.
+diff --git a/fs/block_dev.c b/fs/block_dev.c
+index 38e721b..2577cf4 100644
+--- a/fs/block_dev.c
++++ b/fs/block_dev.c
+@@ -1047,6 +1047,7 @@ int revalidate_disk(struct gendisk *disk)
+
+ mutex_lock(&bdev->bd_mutex);
+ check_disk_size_change(disk, bdev);
++ bdev->bd_invalidated = 0;
+ mutex_unlock(&bdev->bd_mutex);
+ bdput(bdev);
+ return ret;
+diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c
+index 1ced2d8..a2e0b94 100644
+--- a/fs/ceph/addr.c
++++ b/fs/ceph/addr.c
+@@ -267,6 +267,14 @@ static void finish_read(struct ceph_osd_request *req, struct ceph_msg *msg)
+ kfree(req->r_pages);
+ }
+
++static void ceph_unlock_page_vector(struct page **pages, int num_pages)
++{
++ int i;
++
++ for (i = 0; i < num_pages; i++)
++ unlock_page(pages[i]);
++}
++
+ /*
+ * start an async read(ahead) operation. return nr_pages we submitted
+ * a read for on success, or negative error code.
+@@ -308,8 +316,8 @@ static int start_read(struct inode *inode, struct list_head *page_list, int max)
+ NULL, 0,
+ ci->i_truncate_seq, ci->i_truncate_size,
+ NULL, false, 1, 0);
+- if (!req)
+- return -ENOMEM;
++ if (IS_ERR(req))
++ return PTR_ERR(req);
+
+ /* build page vector */
+ nr_pages = len >> PAGE_CACHE_SHIFT;
+@@ -347,6 +355,7 @@ static int start_read(struct inode *inode, struct list_head *page_list, int max)
+ return nr_pages;
+
+ out_pages:
++ ceph_unlock_page_vector(pages, nr_pages);
+ ceph_release_page_vector(pages, nr_pages);
+ out:
+ ceph_osdc_put_request(req);
+@@ -831,8 +840,8 @@ get_more_pages:
+ ci->i_truncate_size,
+ &inode->i_mtime, true, 1, 0);
+
+- if (!req) {
+- rc = -ENOMEM;
++ if (IS_ERR(req)) {
++ rc = PTR_ERR(req);
+ unlock_page(page);
+ break;
+ }
+diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c
+index 620daad..e7d4077 100644
+--- a/fs/ceph/caps.c
++++ b/fs/ceph/caps.c
+@@ -1349,11 +1349,15 @@ int __ceph_mark_dirty_caps(struct ceph_inode_info *ci, int mask)
+ if (!ci->i_head_snapc)
+ ci->i_head_snapc = ceph_get_snap_context(
+ ci->i_snap_realm->cached_context);
+- dout(" inode %p now dirty snapc %p\n", &ci->vfs_inode,
+- ci->i_head_snapc);
++ dout(" inode %p now dirty snapc %p auth cap %p\n",
++ &ci->vfs_inode, ci->i_head_snapc, ci->i_auth_cap);
+ BUG_ON(!list_empty(&ci->i_dirty_item));
+ spin_lock(&mdsc->cap_dirty_lock);
+- list_add(&ci->i_dirty_item, &mdsc->cap_dirty);
++ if (ci->i_auth_cap)
++ list_add(&ci->i_dirty_item, &mdsc->cap_dirty);
++ else
++ list_add(&ci->i_dirty_item,
++ &mdsc->cap_dirty_migrating);
+ spin_unlock(&mdsc->cap_dirty_lock);
+ if (ci->i_flushing_caps == 0) {
+ ihold(inode);
+@@ -2388,7 +2392,7 @@ static void handle_cap_grant(struct inode *inode, struct ceph_mds_caps *grant,
+ &atime);
+
+ /* max size increase? */
+- if (max_size != ci->i_max_size) {
++ if (ci->i_auth_cap == cap && max_size != ci->i_max_size) {
+ dout("max_size %lld -> %llu\n", ci->i_max_size, max_size);
+ ci->i_max_size = max_size;
+ if (max_size >= ci->i_wanted_max_size) {
+@@ -2745,6 +2749,7 @@ static void handle_cap_import(struct ceph_mds_client *mdsc,
+
+ /* make sure we re-request max_size, if necessary */
+ spin_lock(&ci->i_ceph_lock);
++ ci->i_wanted_max_size = 0; /* reset */
+ ci->i_requested_max_size = 0;
+ spin_unlock(&ci->i_ceph_lock);
+ }
+@@ -2840,8 +2845,6 @@ void ceph_handle_caps(struct ceph_mds_session *session,
+ case CEPH_CAP_OP_IMPORT:
+ handle_cap_import(mdsc, inode, h, session,
+ snaptrace, snaptrace_len);
+- ceph_check_caps(ceph_inode(inode), 0, session);
+- goto done_unlocked;
+ }
+
+ /* the rest require a cap */
+@@ -2858,6 +2861,7 @@ void ceph_handle_caps(struct ceph_mds_session *session,
+ switch (op) {
+ case CEPH_CAP_OP_REVOKE:
+ case CEPH_CAP_OP_GRANT:
++ case CEPH_CAP_OP_IMPORT:
+ handle_cap_grant(inode, h, session, cap, msg->middle);
+ goto done_unlocked;
+
+diff --git a/fs/ceph/file.c b/fs/ceph/file.c
+index ecebbc0..5840d2a 100644
+--- a/fs/ceph/file.c
++++ b/fs/ceph/file.c
+@@ -536,8 +536,8 @@ more:
+ do_sync,
+ ci->i_truncate_seq, ci->i_truncate_size,
+ &mtime, false, 2, page_align);
+- if (!req)
+- return -ENOMEM;
++ if (IS_ERR(req))
++ return PTR_ERR(req);
+
+ if (file->f_flags & O_DIRECT) {
+ pages = ceph_get_direct_page_vector(data, num_pages, false);
+diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c
+index 4b5762e..81613bc 100644
+--- a/fs/ceph/inode.c
++++ b/fs/ceph/inode.c
+@@ -1466,7 +1466,7 @@ void __ceph_do_pending_vmtruncate(struct inode *inode)
+ {
+ struct ceph_inode_info *ci = ceph_inode(inode);
+ u64 to;
+- int wrbuffer_refs, wake = 0;
++ int wrbuffer_refs, finish = 0;
+
+ retry:
+ spin_lock(&ci->i_ceph_lock);
+@@ -1498,15 +1498,18 @@ retry:
+ truncate_inode_pages(inode->i_mapping, to);
+
+ spin_lock(&ci->i_ceph_lock);
+- ci->i_truncate_pending--;
+- if (ci->i_truncate_pending == 0)
+- wake = 1;
++ if (to == ci->i_truncate_size) {
++ ci->i_truncate_pending = 0;
++ finish = 1;
++ }
+ spin_unlock(&ci->i_ceph_lock);
++ if (!finish)
++ goto retry;
+
+ if (wrbuffer_refs == 0)
+ ceph_check_caps(ci, CHECK_CAPS_AUTHONLY, NULL);
+- if (wake)
+- wake_up_all(&ci->i_cap_wq);
++
++ wake_up_all(&ci->i_cap_wq);
+ }
+
+
+diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c
+index 1bcf712..0d9864f 100644
+--- a/fs/ceph/mds_client.c
++++ b/fs/ceph/mds_client.c
+@@ -1876,9 +1876,14 @@ finish:
+ static void __wake_requests(struct ceph_mds_client *mdsc,
+ struct list_head *head)
+ {
+- struct ceph_mds_request *req, *nreq;
++ struct ceph_mds_request *req;
++ LIST_HEAD(tmp_list);
++
++ list_splice_init(head, &tmp_list);
+
+- list_for_each_entry_safe(req, nreq, head, r_wait) {
++ while (!list_empty(&tmp_list)) {
++ req = list_entry(tmp_list.next,
++ struct ceph_mds_request, r_wait);
+ list_del_init(&req->r_wait);
+ __do_request(mdsc, req);
+ }
+diff --git a/fs/ceph/super.c b/fs/ceph/super.c
+index b982239..2f6212e 100644
+--- a/fs/ceph/super.c
++++ b/fs/ceph/super.c
+@@ -388,8 +388,6 @@ static int ceph_show_options(struct seq_file *m, struct dentry *root)
+ seq_printf(m, ",mount_timeout=%d", opt->mount_timeout);
+ if (opt->osd_idle_ttl != CEPH_OSD_IDLE_TTL_DEFAULT)
+ seq_printf(m, ",osd_idle_ttl=%d", opt->osd_idle_ttl);
+- if (opt->osd_timeout != CEPH_OSD_TIMEOUT_DEFAULT)
+- seq_printf(m, ",osdtimeout=%d", opt->osd_timeout);
+ if (opt->osd_keepalive_timeout != CEPH_OSD_KEEPALIVE_DEFAULT)
+ seq_printf(m, ",osdkeepalivetimeout=%d",
+ opt->osd_keepalive_timeout);
+diff --git a/fs/cifs/cifs_dfs_ref.c b/fs/cifs/cifs_dfs_ref.c
+index ce5cbd7..210fce2 100644
+--- a/fs/cifs/cifs_dfs_ref.c
++++ b/fs/cifs/cifs_dfs_ref.c
+@@ -226,6 +226,8 @@ compose_mount_options_out:
+ compose_mount_options_err:
+ kfree(mountdata);
+ mountdata = ERR_PTR(rc);
++ kfree(*devname);
++ *devname = NULL;
+ goto compose_mount_options_out;
+ }
+
+diff --git a/fs/eventpoll.c b/fs/eventpoll.c
+index eedec84..3b032dd 100644
+--- a/fs/eventpoll.c
++++ b/fs/eventpoll.c
+@@ -1285,7 +1285,7 @@ static int ep_modify(struct eventpoll *ep, struct epitem *epi, struct epoll_even
+ * otherwise we might miss an event that happens between the
+ * f_op->poll() call and the new event set registering.
+ */
+- epi->event.events = event->events;
++ epi->event.events = event->events; /* need barrier below */
+ pt._key = event->events;
+ epi->event.data = event->data; /* protected by mtx */
+ if (epi->event.events & EPOLLWAKEUP) {
+@@ -1296,6 +1296,26 @@ static int ep_modify(struct eventpoll *ep, struct epitem *epi, struct epoll_even
+ }
+
+ /*
++ * The following barrier has two effects:
++ *
++ * 1) Flush epi changes above to other CPUs. This ensures
++ * we do not miss events from ep_poll_callback if an
++ * event occurs immediately after we call f_op->poll().
++ * We need this because we did not take ep->lock while
++ * changing epi above (but ep_poll_callback does take
++ * ep->lock).
++ *
++ * 2) We also need to ensure we do not miss _past_ events
++ * when calling f_op->poll(). This barrier also
++ * pairs with the barrier in wq_has_sleeper (see
++ * comments for wq_has_sleeper).
++ *
++ * This barrier will now guarantee ep_poll_callback or f_op->poll
++ * (or both) will notice the readiness of an item.
++ */
++ smp_mb();
++
++ /*
+ * Get current event bits. We can safely use the file* here because
+ * its usage count has been increased by the caller of this function.
+ */
+diff --git a/fs/exec.c b/fs/exec.c
+index fab2c6d..59896ae 100644
+--- a/fs/exec.c
++++ b/fs/exec.c
+@@ -1202,9 +1202,24 @@ void free_bprm(struct linux_binprm *bprm)
+ mutex_unlock(&current->signal->cred_guard_mutex);
+ abort_creds(bprm->cred);
+ }
++ /* If a binfmt changed the interp, free it. */
++ if (bprm->interp != bprm->filename)
++ kfree(bprm->interp);
+ kfree(bprm);
+ }
+
++int bprm_change_interp(char *interp, struct linux_binprm *bprm)
++{
++ /* If a binfmt changed the interp, free it first. */
++ if (bprm->interp != bprm->filename)
++ kfree(bprm->interp);
++ bprm->interp = kstrdup(interp, GFP_KERNEL);
++ if (!bprm->interp)
++ return -ENOMEM;
++ return 0;
++}
++EXPORT_SYMBOL(bprm_change_interp);
++
+ /*
+ * install the new credentials for this executable
+ */
+diff --git a/fs/ext4/acl.c b/fs/ext4/acl.c
+index a5c29bb..8535c45 100644
+--- a/fs/ext4/acl.c
++++ b/fs/ext4/acl.c
+@@ -410,8 +410,10 @@ ext4_xattr_set_acl(struct dentry *dentry, const char *name, const void *value,
+
+ retry:
+ handle = ext4_journal_start(inode, EXT4_DATA_TRANS_BLOCKS(inode->i_sb));
+- if (IS_ERR(handle))
+- return PTR_ERR(handle);
++ if (IS_ERR(handle)) {
++ error = PTR_ERR(handle);
++ goto release_and_out;
++ }
+ error = ext4_set_acl(handle, inode, type, acl);
+ ext4_journal_stop(handle);
+ if (error == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries))
+diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
+index 741bb94..31be24d 100644
+--- a/fs/ext4/extents.c
++++ b/fs/ext4/extents.c
+@@ -2176,13 +2176,14 @@ ext4_ext_in_cache(struct inode *inode, ext4_lblk_t block,
+ * removes index from the index block.
+ */
+ static int ext4_ext_rm_idx(handle_t *handle, struct inode *inode,
+- struct ext4_ext_path *path)
++ struct ext4_ext_path *path, int depth)
+ {
+ int err;
+ ext4_fsblk_t leaf;
+
+ /* free index block */
+- path--;
++ depth--;
++ path = path + depth;
+ leaf = ext4_idx_pblock(path->p_idx);
+ if (unlikely(path->p_hdr->eh_entries == 0)) {
+ EXT4_ERROR_INODE(inode, "path->p_hdr->eh_entries == 0");
+@@ -2207,6 +2208,19 @@ static int ext4_ext_rm_idx(handle_t *handle, struct inode *inode,
+
+ ext4_free_blocks(handle, inode, NULL, leaf, 1,
+ EXT4_FREE_BLOCKS_METADATA | EXT4_FREE_BLOCKS_FORGET);
++
++ while (--depth >= 0) {
++ if (path->p_idx != EXT_FIRST_INDEX(path->p_hdr))
++ break;
++ path--;
++ err = ext4_ext_get_access(handle, inode, path);
++ if (err)
++ break;
++ path->p_idx->ei_block = (path+1)->p_idx->ei_block;
++ err = ext4_ext_dirty(handle, inode, path);
++ if (err)
++ break;
++ }
+ return err;
+ }
+
+@@ -2540,7 +2554,7 @@ ext4_ext_rm_leaf(handle_t *handle, struct inode *inode,
+ /* if this leaf is free, then we should
+ * remove it from index block above */
+ if (err == 0 && eh->eh_entries == 0 && path[depth].p_bh != NULL)
+- err = ext4_ext_rm_idx(handle, inode, path + depth);
++ err = ext4_ext_rm_idx(handle, inode, path, depth);
+
+ out:
+ return err;
+@@ -2741,7 +2755,7 @@ cont:
+ /* index is empty, remove it;
+ * handle must be already prepared by the
+ * truncatei_leaf() */
+- err = ext4_ext_rm_idx(handle, inode, path + i);
++ err = ext4_ext_rm_idx(handle, inode, path, i);
+ }
+ /* root level has p_bh == NULL, brelse() eats this */
+ brelse(path[i].p_bh);
+diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c
+index cc2d77c..c7c6e09 100644
+--- a/fs/ext4/ialloc.c
++++ b/fs/ext4/ialloc.c
+@@ -753,7 +753,6 @@ got:
+
+ BUFFER_TRACE(block_bitmap_bh, "dirty block bitmap");
+ err = ext4_handle_dirty_metadata(handle, NULL, block_bitmap_bh);
+- brelse(block_bitmap_bh);
+
+ /* recheck and clear flag under lock if we still need to */
+ ext4_lock_group(sb, group);
+@@ -766,6 +765,7 @@ got:
+ ext4_group_desc_csum_set(sb, group, gdp);
+ }
+ ext4_unlock_group(sb, group);
++ brelse(block_bitmap_bh);
+
+ if (err)
+ goto fail;
+diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
+index 2ce16af..7e424eb 100644
+--- a/fs/ext4/inode.c
++++ b/fs/ext4/inode.c
+@@ -1501,6 +1501,8 @@ static void ext4_da_block_invalidatepages(struct mpage_da_data *mpd)
+
+ index = mpd->first_page;
+ end = mpd->next_page - 1;
++
++ pagevec_init(&pvec, 0);
+ while (index <= end) {
+ nr_pages = pagevec_lookup(&pvec, mapping, index, PAGEVEC_SIZE);
+ if (nr_pages == 0)
+diff --git a/fs/ext4/super.c b/fs/ext4/super.c
+index 2b5fb60..f4665ec 100644
+--- a/fs/ext4/super.c
++++ b/fs/ext4/super.c
+@@ -1643,9 +1643,7 @@ static int parse_options(char *options, struct super_block *sb,
+ unsigned int *journal_ioprio,
+ int is_remount)
+ {
+-#ifdef CONFIG_QUOTA
+ struct ext4_sb_info *sbi = EXT4_SB(sb);
+-#endif
+ char *p;
+ substring_t args[MAX_OPT_ARGS];
+ int token;
+@@ -1694,6 +1692,16 @@ static int parse_options(char *options, struct super_block *sb,
+ }
+ }
+ #endif
++ if (test_opt(sb, DIOREAD_NOLOCK)) {
++ int blocksize =
++ BLOCK_SIZE << le32_to_cpu(sbi->s_es->s_log_block_size);
++
++ if (blocksize < PAGE_CACHE_SIZE) {
++ ext4_msg(sb, KERN_ERR, "can't mount with "
++ "dioread_nolock if block size != PAGE_SIZE");
++ return 0;
++ }
++ }
+ return 1;
+ }
+
+@@ -2184,7 +2192,9 @@ static void ext4_orphan_cleanup(struct super_block *sb,
+ __func__, inode->i_ino, inode->i_size);
+ jbd_debug(2, "truncating inode %lu to %lld bytes\n",
+ inode->i_ino, inode->i_size);
++ mutex_lock(&inode->i_mutex);
+ ext4_truncate(inode);
++ mutex_unlock(&inode->i_mutex);
+ nr_truncates++;
+ } else {
+ ext4_msg(sb, KERN_DEBUG,
+@@ -3409,15 +3419,6 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
+ clear_opt(sb, DELALLOC);
+ }
+
+- blocksize = BLOCK_SIZE << le32_to_cpu(es->s_log_block_size);
+- if (test_opt(sb, DIOREAD_NOLOCK)) {
+- if (blocksize < PAGE_SIZE) {
+- ext4_msg(sb, KERN_ERR, "can't mount with "
+- "dioread_nolock if block size != PAGE_SIZE");
+- goto failed_mount;
+- }
+- }
+-
+ sb->s_flags = (sb->s_flags & ~MS_POSIXACL) |
+ (test_opt(sb, POSIX_ACL) ? MS_POSIXACL : 0);
+
+@@ -3459,6 +3460,7 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
+ if (!ext4_feature_set_ok(sb, (sb->s_flags & MS_RDONLY)))
+ goto failed_mount;
+
++ blocksize = BLOCK_SIZE << le32_to_cpu(es->s_log_block_size);
+ if (blocksize < EXT4_MIN_BLOCK_SIZE ||
+ blocksize > EXT4_MAX_BLOCK_SIZE) {
+ ext4_msg(sb, KERN_ERR,
+@@ -4694,7 +4696,7 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data)
+ }
+
+ ext4_setup_system_zone(sb);
+- if (sbi->s_journal == NULL)
++ if (sbi->s_journal == NULL && !(old_sb_flags & MS_RDONLY))
+ ext4_commit_super(sb, 1);
+
+ unlock_super(sb);
+diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c
+index fb1ab953..2fb20f5 100644
+--- a/fs/jbd2/transaction.c
++++ b/fs/jbd2/transaction.c
+@@ -209,7 +209,8 @@ repeat:
+ if (!new_transaction)
+ goto alloc_transaction;
+ write_lock(&journal->j_state_lock);
+- if (!journal->j_running_transaction) {
++ if (!journal->j_running_transaction &&
++ !journal->j_barrier_count) {
+ jbd2_get_transaction(journal, new_transaction);
+ new_transaction = NULL;
+ }
+diff --git a/fs/jffs2/nodemgmt.c b/fs/jffs2/nodemgmt.c
+index 0c96eb5..0331072 100644
+--- a/fs/jffs2/nodemgmt.c
++++ b/fs/jffs2/nodemgmt.c
+@@ -417,14 +417,16 @@ static int jffs2_do_reserve_space(struct jffs2_sb_info *c, uint32_t minsize,
+ spin_unlock(&c->erase_completion_lock);
+
+ ret = jffs2_prealloc_raw_node_refs(c, jeb, 1);
+- if (ret)
+- return ret;
++
+ /* Just lock it again and continue. Nothing much can change because
+ we hold c->alloc_sem anyway. In fact, it's not entirely clear why
+ we hold c->erase_completion_lock in the majority of this function...
+ but that's a question for another (more caffeine-rich) day. */
+ spin_lock(&c->erase_completion_lock);
+
++ if (ret)
++ return ret;
++
+ waste = jeb->free_size;
+ jffs2_link_node_ref(c, jeb,
+ (jeb->offset + c->sector_size - waste) | REF_OBSOLETE,
+diff --git a/fs/lockd/clntproc.c b/fs/lockd/clntproc.c
+index 05d2912..a5657ff 100644
+--- a/fs/lockd/clntproc.c
++++ b/fs/lockd/clntproc.c
+@@ -551,6 +551,9 @@ again:
+ status = nlmclnt_block(block, req, NLMCLNT_POLL_TIMEOUT);
+ if (status < 0)
+ break;
++ /* Resend the blocking lock request after a server reboot */
++ if (resp->status == nlm_lck_denied_grace_period)
++ continue;
+ if (resp->status != nlm_lck_blocked)
+ break;
+ }
+diff --git a/fs/nfs/blocklayout/blocklayout.c b/fs/nfs/blocklayout/blocklayout.c
+index 1093968..62f18f7 100644
+--- a/fs/nfs/blocklayout/blocklayout.c
++++ b/fs/nfs/blocklayout/blocklayout.c
+@@ -1240,6 +1240,7 @@ static const struct nfs_pageio_ops bl_pg_write_ops = {
+ static struct pnfs_layoutdriver_type blocklayout_type = {
+ .id = LAYOUT_BLOCK_VOLUME,
+ .name = "LAYOUT_BLOCK_VOLUME",
++ .owner = THIS_MODULE,
+ .read_pagelist = bl_read_pagelist,
+ .write_pagelist = bl_write_pagelist,
+ .alloc_layout_hdr = bl_alloc_layout_hdr,
+diff --git a/fs/nfs/client.c b/fs/nfs/client.c
+index 0e7cd89..b59f1d8 100644
+--- a/fs/nfs/client.c
++++ b/fs/nfs/client.c
+@@ -614,8 +614,7 @@ EXPORT_SYMBOL_GPL(nfs_create_rpc_client);
+ */
+ static void nfs_destroy_server(struct nfs_server *server)
+ {
+- if (!(server->flags & NFS_MOUNT_LOCAL_FLOCK) ||
+- !(server->flags & NFS_MOUNT_LOCAL_FCNTL))
++ if (server->nlm_host)
+ nlmclnt_done(server->nlm_host);
+ }
+
+diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
+index 627f108..e210a66 100644
+--- a/fs/nfs/dir.c
++++ b/fs/nfs/dir.c
+@@ -1155,11 +1155,14 @@ static int nfs_dentry_delete(const struct dentry *dentry)
+
+ }
+
++/* Ensure that we revalidate inode->i_nlink */
+ static void nfs_drop_nlink(struct inode *inode)
+ {
+ spin_lock(&inode->i_lock);
+- if (inode->i_nlink > 0)
+- drop_nlink(inode);
++ /* drop the inode if we're reasonably sure this is the last link */
++ if (inode->i_nlink == 1)
++ clear_nlink(inode);
++ NFS_I(inode)->cache_validity |= NFS_INO_INVALID_ATTR;
+ spin_unlock(&inode->i_lock);
+ }
+
+@@ -1174,8 +1177,8 @@ static void nfs_dentry_iput(struct dentry *dentry, struct inode *inode)
+ NFS_I(inode)->cache_validity |= NFS_INO_INVALID_DATA;
+
+ if (dentry->d_flags & DCACHE_NFSFS_RENAMED) {
+- drop_nlink(inode);
+ nfs_complete_unlink(dentry, inode);
++ nfs_drop_nlink(inode);
+ }
+ iput(inode);
+ }
+@@ -1646,10 +1649,8 @@ static int nfs_safe_remove(struct dentry *dentry)
+ if (inode != NULL) {
+ NFS_PROTO(inode)->return_delegation(inode);
+ error = NFS_PROTO(dir)->remove(dir, &dentry->d_name);
+- /* The VFS may want to delete this inode */
+ if (error == 0)
+ nfs_drop_nlink(inode);
+- nfs_mark_for_revalidate(inode);
+ } else
+ error = NFS_PROTO(dir)->remove(dir, &dentry->d_name);
+ if (error == -ENOENT)
+diff --git a/fs/nfs/namespace.c b/fs/nfs/namespace.c
+index dd057bc..fc8dc20 100644
+--- a/fs/nfs/namespace.c
++++ b/fs/nfs/namespace.c
+@@ -177,11 +177,31 @@ out_nofree:
+ return mnt;
+ }
+
++static int
++nfs_namespace_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat)
++{
++ if (NFS_FH(dentry->d_inode)->size != 0)
++ return nfs_getattr(mnt, dentry, stat);
++ generic_fillattr(dentry->d_inode, stat);
++ return 0;
++}
++
++static int
++nfs_namespace_setattr(struct dentry *dentry, struct iattr *attr)
++{
++ if (NFS_FH(dentry->d_inode)->size != 0)
++ return nfs_setattr(dentry, attr);
++ return -EACCES;
++}
++
+ const struct inode_operations nfs_mountpoint_inode_operations = {
+ .getattr = nfs_getattr,
++ .setattr = nfs_setattr,
+ };
+
+ const struct inode_operations nfs_referral_inode_operations = {
++ .getattr = nfs_namespace_getattr,
++ .setattr = nfs_namespace_setattr,
+ };
+
+ static void nfs_expire_automounts(struct work_struct *work)
+diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
+index 7bff871..5e61aac 100644
+--- a/fs/nfs/nfs4proc.c
++++ b/fs/nfs/nfs4proc.c
+@@ -5999,13 +5999,26 @@ static void nfs41_sequence_prepare(struct rpc_task *task, void *data)
+ rpc_call_start(task);
+ }
+
++static void nfs41_sequence_prepare_privileged(struct rpc_task *task, void *data)
++{
++ rpc_task_set_priority(task, RPC_PRIORITY_PRIVILEGED);
++ nfs41_sequence_prepare(task, data);
++}
++
+ static const struct rpc_call_ops nfs41_sequence_ops = {
+ .rpc_call_done = nfs41_sequence_call_done,
+ .rpc_call_prepare = nfs41_sequence_prepare,
+ .rpc_release = nfs41_sequence_release,
+ };
+
+-static struct rpc_task *_nfs41_proc_sequence(struct nfs_client *clp, struct rpc_cred *cred)
++static const struct rpc_call_ops nfs41_sequence_privileged_ops = {
++ .rpc_call_done = nfs41_sequence_call_done,
++ .rpc_call_prepare = nfs41_sequence_prepare_privileged,
++ .rpc_release = nfs41_sequence_release,
++};
++
++static struct rpc_task *_nfs41_proc_sequence(struct nfs_client *clp, struct rpc_cred *cred,
++ const struct rpc_call_ops *seq_ops)
+ {
+ struct nfs4_sequence_data *calldata;
+ struct rpc_message msg = {
+@@ -6015,7 +6028,7 @@ static struct rpc_task *_nfs41_proc_sequence(struct nfs_client *clp, struct rpc_
+ struct rpc_task_setup task_setup_data = {
+ .rpc_client = clp->cl_rpcclient,
+ .rpc_message = &msg,
+- .callback_ops = &nfs41_sequence_ops,
++ .callback_ops = seq_ops,
+ .flags = RPC_TASK_ASYNC | RPC_TASK_SOFT,
+ };
+
+@@ -6042,7 +6055,7 @@ static int nfs41_proc_async_sequence(struct nfs_client *clp, struct rpc_cred *cr
+
+ if ((renew_flags & NFS4_RENEW_TIMEOUT) == 0)
+ return 0;
+- task = _nfs41_proc_sequence(clp, cred);
++ task = _nfs41_proc_sequence(clp, cred, &nfs41_sequence_ops);
+ if (IS_ERR(task))
+ ret = PTR_ERR(task);
+ else
+@@ -6056,7 +6069,7 @@ static int nfs4_proc_sequence(struct nfs_client *clp, struct rpc_cred *cred)
+ struct rpc_task *task;
+ int ret;
+
+- task = _nfs41_proc_sequence(clp, cred);
++ task = _nfs41_proc_sequence(clp, cred, &nfs41_sequence_privileged_ops);
+ if (IS_ERR(task)) {
+ ret = PTR_ERR(task);
+ goto out;
+diff --git a/fs/nfs/objlayout/objio_osd.c b/fs/nfs/objlayout/objio_osd.c
+index ea6d111..75bd459 100644
+--- a/fs/nfs/objlayout/objio_osd.c
++++ b/fs/nfs/objlayout/objio_osd.c
+@@ -640,6 +640,7 @@ static struct pnfs_layoutdriver_type objlayout_type = {
+ .flags = PNFS_LAYOUTRET_ON_SETATTR |
+ PNFS_LAYOUTRET_ON_ERROR,
+
++ .owner = THIS_MODULE,
+ .alloc_layout_hdr = objlayout_alloc_layout_hdr,
+ .free_layout_hdr = objlayout_free_layout_hdr,
+
+diff --git a/fs/nfs/super.c b/fs/nfs/super.c
+index d8d7396..2a31c7f 100644
+--- a/fs/nfs/super.c
++++ b/fs/nfs/super.c
+@@ -1095,7 +1095,7 @@ static int nfs_get_option_str(substring_t args[], char **option)
+ {
+ kfree(*option);
+ *option = match_strdup(args);
+- return !option;
++ return !*option;
+ }
+
+ static int nfs_get_option_ul(substring_t args[], unsigned long *option)
+diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c
+index c9c1c0a..ac784eb 100644
+--- a/fs/nfsd/nfs4proc.c
++++ b/fs/nfsd/nfs4proc.c
+@@ -194,6 +194,7 @@ static __be32
+ do_open_lookup(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_open *open)
+ {
+ struct svc_fh *resfh;
++ int accmode;
+ __be32 status;
+
+ resfh = kmalloc(sizeof(struct svc_fh), GFP_KERNEL);
+@@ -253,9 +254,10 @@ do_open_lookup(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_o
+ /* set reply cache */
+ fh_copy_shallow(&open->op_openowner->oo_owner.so_replay.rp_openfh,
+ &resfh->fh_handle);
+- if (!open->op_created)
+- status = do_open_permission(rqstp, resfh, open,
+- NFSD_MAY_NOP);
++ accmode = NFSD_MAY_NOP;
++ if (open->op_created)
++ accmode |= NFSD_MAY_OWNER_OVERRIDE;
++ status = do_open_permission(rqstp, resfh, open, accmode);
+ set_change_info(&open->op_cinfo, current_fh);
+ fh_dup2(current_fh, resfh);
+ out:
+diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
+index 5b3224c..0953c6c 100644
+--- a/fs/nfsd/nfs4state.c
++++ b/fs/nfsd/nfs4state.c
+@@ -2341,7 +2341,7 @@ nfsd4_init_slabs(void)
+ if (openowner_slab == NULL)
+ goto out_nomem;
+ lockowner_slab = kmem_cache_create("nfsd4_lockowners",
+- sizeof(struct nfs4_openowner), 0, 0, NULL);
++ sizeof(struct nfs4_lockowner), 0, 0, NULL);
+ if (lockowner_slab == NULL)
+ goto out_nomem;
+ file_slab = kmem_cache_create("nfsd4_files",
+diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
+index 6322df3..1e83186 100644
+--- a/fs/nfsd/nfs4xdr.c
++++ b/fs/nfsd/nfs4xdr.c
+@@ -2946,11 +2946,16 @@ nfsd4_encode_read(struct nfsd4_compoundres *resp, __be32 nfserr,
+ len = maxcount;
+ v = 0;
+ while (len > 0) {
+- pn = resp->rqstp->rq_resused++;
++ pn = resp->rqstp->rq_resused;
++ if (!resp->rqstp->rq_respages[pn]) { /* ran out of pages */
++ maxcount -= len;
++ break;
++ }
+ resp->rqstp->rq_vec[v].iov_base =
+ page_address(resp->rqstp->rq_respages[pn]);
+ resp->rqstp->rq_vec[v].iov_len =
+ len < PAGE_SIZE ? len : PAGE_SIZE;
++ resp->rqstp->rq_resused++;
+ v++;
+ len -= PAGE_SIZE;
+ }
+@@ -2996,6 +3001,8 @@ nfsd4_encode_readlink(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd
+ return nfserr;
+ if (resp->xbuf->page_len)
+ return nfserr_resource;
++ if (!resp->rqstp->rq_respages[resp->rqstp->rq_resused])
++ return nfserr_resource;
+
+ page = page_address(resp->rqstp->rq_respages[resp->rqstp->rq_resused++]);
+
+@@ -3045,6 +3052,8 @@ nfsd4_encode_readdir(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4
+ return nfserr;
+ if (resp->xbuf->page_len)
+ return nfserr_resource;
++ if (!resp->rqstp->rq_respages[resp->rqstp->rq_resused])
++ return nfserr_resource;
+
+ RESERVE_SPACE(NFS4_VERIFIER_SIZE);
+ savep = p;
+diff --git a/fs/nfsd/nfssvc.c b/fs/nfsd/nfssvc.c
+index 240473c..0d5e021 100644
+--- a/fs/nfsd/nfssvc.c
++++ b/fs/nfsd/nfssvc.c
+@@ -650,7 +650,7 @@ nfsd_dispatch(struct svc_rqst *rqstp, __be32 *statp)
+ }
+
+ /* Store reply in cache. */
+- nfsd_cache_update(rqstp, proc->pc_cachetype, statp + 1);
++ nfsd_cache_update(rqstp, rqstp->rq_cachetype, statp + 1);
+ return 1;
+ }
+
+diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
+index a9269f1..e1b40c7 100644
+--- a/fs/nfsd/vfs.c
++++ b/fs/nfsd/vfs.c
+@@ -1485,13 +1485,19 @@ do_nfsd_create(struct svc_rqst *rqstp, struct svc_fh *fhp,
+ case NFS3_CREATE_EXCLUSIVE:
+ if ( dchild->d_inode->i_mtime.tv_sec == v_mtime
+ && dchild->d_inode->i_atime.tv_sec == v_atime
+- && dchild->d_inode->i_size == 0 )
++ && dchild->d_inode->i_size == 0 ) {
++ if (created)
++ *created = 1;
+ break;
++ }
+ case NFS4_CREATE_EXCLUSIVE4_1:
+ if ( dchild->d_inode->i_mtime.tv_sec == v_mtime
+ && dchild->d_inode->i_atime.tv_sec == v_atime
+- && dchild->d_inode->i_size == 0 )
++ && dchild->d_inode->i_size == 0 ) {
++ if (created)
++ *created = 1;
+ goto set_attr;
++ }
+ /* fallthru */
+ case NFS3_CREATE_GUARDED:
+ err = nfserr_exist;
+diff --git a/fs/nilfs2/ioctl.c b/fs/nilfs2/ioctl.c
+index fdb1807..f385935 100644
+--- a/fs/nilfs2/ioctl.c
++++ b/fs/nilfs2/ioctl.c
+@@ -664,8 +664,11 @@ static int nilfs_ioctl_clean_segments(struct inode *inode, struct file *filp,
+ if (ret < 0)
+ printk(KERN_ERR "NILFS: GC failed during preparation: "
+ "cannot read source blocks: err=%d\n", ret);
+- else
++ else {
++ if (nilfs_sb_need_update(nilfs))
++ set_nilfs_discontinued(nilfs);
+ ret = nilfs_clean_segments(inode->i_sb, argv, kbufs);
++ }
+
+ nilfs_remove_all_gcinodes(nilfs);
+ clear_nilfs_gc_running(nilfs);
+diff --git a/fs/notify/inotify/inotify_user.c b/fs/notify/inotify/inotify_user.c
+index 8445fbc..6f292dd 100644
+--- a/fs/notify/inotify/inotify_user.c
++++ b/fs/notify/inotify/inotify_user.c
+@@ -579,8 +579,6 @@ static int inotify_update_existing_watch(struct fsnotify_group *group,
+
+ /* don't allow invalid bits: we don't want flags set */
+ mask = inotify_arg_to_mask(arg);
+- if (unlikely(!(mask & IN_ALL_EVENTS)))
+- return -EINVAL;
+
+ fsn_mark = fsnotify_find_inode_mark(group, inode);
+ if (!fsn_mark)
+@@ -632,8 +630,6 @@ static int inotify_new_watch(struct fsnotify_group *group,
+
+ /* don't allow invalid bits: we don't want flags set */
+ mask = inotify_arg_to_mask(arg);
+- if (unlikely(!(mask & IN_ALL_EVENTS)))
+- return -EINVAL;
+
+ tmp_i_mark = kmem_cache_alloc(inotify_inode_mark_cachep, GFP_KERNEL);
+ if (unlikely(!tmp_i_mark))
+diff --git a/fs/ocfs2/dlmglue.c b/fs/ocfs2/dlmglue.c
+index 4f7795f..88577eb 100644
+--- a/fs/ocfs2/dlmglue.c
++++ b/fs/ocfs2/dlmglue.c
+@@ -2545,6 +2545,7 @@ int ocfs2_super_lock(struct ocfs2_super *osb,
+ * everything is up to the caller :) */
+ status = ocfs2_should_refresh_lock_res(lockres);
+ if (status < 0) {
++ ocfs2_cluster_unlock(osb, lockres, level);
+ mlog_errno(status);
+ goto bail;
+ }
+@@ -2553,8 +2554,10 @@ int ocfs2_super_lock(struct ocfs2_super *osb,
+
+ ocfs2_complete_lock_res_refresh(lockres, status);
+
+- if (status < 0)
++ if (status < 0) {
++ ocfs2_cluster_unlock(osb, lockres, level);
+ mlog_errno(status);
++ }
+ ocfs2_track_lock_refresh(lockres);
+ }
+ bail:
+diff --git a/fs/splice.c b/fs/splice.c
+index 41514dd..c69fbb7 100644
+--- a/fs/splice.c
++++ b/fs/splice.c
+@@ -696,8 +696,10 @@ static int pipe_to_sendpage(struct pipe_inode_info *pipe,
+ return -EINVAL;
+
+ more = (sd->flags & SPLICE_F_MORE) ? MSG_MORE : 0;
+- if (sd->len < sd->total_len)
++
++ if (sd->len < sd->total_len && pipe->nrbufs > 1)
+ more |= MSG_SENDPAGE_NOTLAST;
++
+ return file->f_op->sendpage(file, buf->page, buf->offset,
+ sd->len, &pos, more);
+ }
+diff --git a/fs/udf/inode.c b/fs/udf/inode.c
+index aa23346..585ee1c 100644
+--- a/fs/udf/inode.c
++++ b/fs/udf/inode.c
+@@ -574,6 +574,7 @@ static sector_t inode_getblk(struct inode *inode, sector_t block,
+ struct udf_inode_info *iinfo = UDF_I(inode);
+ int goal = 0, pgoal = iinfo->i_location.logicalBlockNum;
+ int lastblock = 0;
++ bool isBeyondEOF;
+
+ *err = 0;
+ *new = 0;
+@@ -653,7 +654,7 @@ static sector_t inode_getblk(struct inode *inode, sector_t block,
+ /* Are we beyond EOF? */
+ if (etype == -1) {
+ int ret;
+-
++ isBeyondEOF = 1;
+ if (count) {
+ if (c)
+ laarr[0] = laarr[1];
+@@ -696,6 +697,7 @@ static sector_t inode_getblk(struct inode *inode, sector_t block,
+ endnum = c + 1;
+ lastblock = 1;
+ } else {
++ isBeyondEOF = 0;
+ endnum = startnum = ((count > 2) ? 2 : count);
+
+ /* if the current extent is in position 0,
+@@ -738,10 +740,13 @@ static sector_t inode_getblk(struct inode *inode, sector_t block,
+ goal, err);
+ if (!newblocknum) {
+ brelse(prev_epos.bh);
++ brelse(cur_epos.bh);
++ brelse(next_epos.bh);
+ *err = -ENOSPC;
+ return 0;
+ }
+- iinfo->i_lenExtents += inode->i_sb->s_blocksize;
++ if (isBeyondEOF)
++ iinfo->i_lenExtents += inode->i_sb->s_blocksize;
+ }
+
+ /* if the extent the requsted block is located in contains multiple
+@@ -768,6 +773,8 @@ static sector_t inode_getblk(struct inode *inode, sector_t block,
+ udf_update_extents(inode, laarr, startnum, endnum, &prev_epos);
+
+ brelse(prev_epos.bh);
++ brelse(cur_epos.bh);
++ brelse(next_epos.bh);
+
+ newblock = udf_get_pblock(inode->i_sb, newblocknum,
+ iinfo->i_location.partitionReferenceNum, 0);
+diff --git a/fs/xfs/xfs_aops.c b/fs/xfs/xfs_aops.c
+index e562dd4..1236b8c 100644
+--- a/fs/xfs/xfs_aops.c
++++ b/fs/xfs/xfs_aops.c
+@@ -86,11 +86,11 @@ xfs_destroy_ioend(
+ }
+
+ if (ioend->io_iocb) {
++ inode_dio_done(ioend->io_inode);
+ if (ioend->io_isasync) {
+ aio_complete(ioend->io_iocb, ioend->io_error ?
+ ioend->io_error : ioend->io_result, 0);
+ }
+- inode_dio_done(ioend->io_inode);
+ }
+
+ mempool_free(ioend, xfs_ioend_pool);
+diff --git a/include/asm-generic/tlb.h b/include/asm-generic/tlb.h
+index ed6642a..25f01d0 100644
+--- a/include/asm-generic/tlb.h
++++ b/include/asm-generic/tlb.h
+@@ -78,6 +78,14 @@ struct mmu_gather_batch {
+ #define MAX_GATHER_BATCH \
+ ((PAGE_SIZE - sizeof(struct mmu_gather_batch)) / sizeof(void *))
+
++/*
++ * Limit the maximum number of mmu_gather batches to reduce a risk of soft
++ * lockups for non-preemptible kernels on huge machines when a lot of memory
++ * is zapped during unmapping.
++ * 10K pages freed at once should be safe even without a preemption point.
++ */
++#define MAX_GATHER_BATCH_COUNT (10000UL/MAX_GATHER_BATCH)
++
+ /* struct mmu_gather is an opaque type used by the mm code for passing around
+ * any data needed by arch specific code for tlb_remove_page.
+ */
+@@ -96,6 +104,7 @@ struct mmu_gather {
+ struct mmu_gather_batch *active;
+ struct mmu_gather_batch local;
+ struct page *__pages[MMU_GATHER_BUNDLE];
++ unsigned int batch_count;
+ };
+
+ #define HAVE_GENERIC_MMU_GATHER
+diff --git a/include/linux/binfmts.h b/include/linux/binfmts.h
+index 366422b..eb53e15 100644
+--- a/include/linux/binfmts.h
++++ b/include/linux/binfmts.h
+@@ -128,6 +128,7 @@ extern int setup_arg_pages(struct linux_binprm * bprm,
+ unsigned long stack_top,
+ int executable_stack);
+ extern int bprm_mm_init(struct linux_binprm *bprm);
++extern int bprm_change_interp(char *interp, struct linux_binprm *bprm);
+ extern int copy_strings_kernel(int argc, const char *const *argv,
+ struct linux_binprm *bprm);
+ extern int prepare_bprm_creds(struct linux_binprm *bprm);
+diff --git a/include/linux/ceph/libceph.h b/include/linux/ceph/libceph.h
+index 4262478..317aff8 100644
+--- a/include/linux/ceph/libceph.h
++++ b/include/linux/ceph/libceph.h
+@@ -43,7 +43,6 @@ struct ceph_options {
+ struct ceph_entity_addr my_addr;
+ int mount_timeout;
+ int osd_idle_ttl;
+- int osd_timeout;
+ int osd_keepalive_timeout;
+
+ /*
+@@ -63,7 +62,6 @@ struct ceph_options {
+ * defaults
+ */
+ #define CEPH_MOUNT_TIMEOUT_DEFAULT 60
+-#define CEPH_OSD_TIMEOUT_DEFAULT 60 /* seconds */
+ #define CEPH_OSD_KEEPALIVE_DEFAULT 5
+ #define CEPH_OSD_IDLE_TTL_DEFAULT 60
+
+diff --git a/include/linux/console.h b/include/linux/console.h
+index 7201ce4..f59e942 100644
+--- a/include/linux/console.h
++++ b/include/linux/console.h
+@@ -77,7 +77,9 @@ extern const struct consw prom_con; /* SPARC PROM console */
+ int con_is_bound(const struct consw *csw);
+ int register_con_driver(const struct consw *csw, int first, int last);
+ int unregister_con_driver(const struct consw *csw);
++int do_unregister_con_driver(const struct consw *csw);
+ int take_over_console(const struct consw *sw, int first, int last, int deflt);
++int do_take_over_console(const struct consw *sw, int first, int last, int deflt);
+ void give_up_console(const struct consw *sw);
+ #ifdef CONFIG_HW_CONSOLE
+ int con_debug_enter(struct vc_data *vc);
+diff --git a/include/linux/efi.h b/include/linux/efi.h
+index 5782114..eee8b0b 100644
+--- a/include/linux/efi.h
++++ b/include/linux/efi.h
+@@ -539,18 +539,30 @@ extern int __init efi_setup_pcdp_console(char *);
+ #endif
+
+ /*
+- * We play games with efi_enabled so that the compiler will, if possible, remove
+- * EFI-related code altogether.
++ * We play games with efi_enabled so that the compiler will, if
++ * possible, remove EFI-related code altogether.
+ */
++#define EFI_BOOT 0 /* Were we booted from EFI? */
++#define EFI_SYSTEM_TABLES 1 /* Can we use EFI system tables? */
++#define EFI_CONFIG_TABLES 2 /* Can we use EFI config tables? */
++#define EFI_RUNTIME_SERVICES 3 /* Can we use runtime services? */
++#define EFI_MEMMAP 4 /* Can we use EFI memory map? */
++#define EFI_64BIT 5 /* Is the firmware 64-bit? */
++
+ #ifdef CONFIG_EFI
+ # ifdef CONFIG_X86
+- extern int efi_enabled;
+- extern bool efi_64bit;
++extern int efi_enabled(int facility);
+ # else
+-# define efi_enabled 1
++static inline int efi_enabled(int facility)
++{
++ return 1;
++}
+ # endif
+ #else
+-# define efi_enabled 0
++static inline int efi_enabled(int facility)
++{
++ return 0;
++}
+ #endif
+
+ /*
+diff --git a/include/linux/freezer.h b/include/linux/freezer.h
+index d09af4b..ee89932 100644
+--- a/include/linux/freezer.h
++++ b/include/linux/freezer.h
+@@ -75,28 +75,62 @@ static inline bool cgroup_freezing(struct task_struct *task)
+ */
+
+
+-/* Tell the freezer not to count the current task as freezable. */
++/**
++ * freezer_do_not_count - tell freezer to ignore %current
++ *
++ * Tell freezers to ignore the current task when determining whether the
++ * target frozen state is reached. IOW, the current task will be
++ * considered frozen enough by freezers.
++ *
++ * The caller shouldn't do anything which isn't allowed for a frozen task
++ * until freezer_cont() is called. Usually, freezer[_do_not]_count() pair
++ * wrap a scheduling operation and nothing much else.
++ */
+ static inline void freezer_do_not_count(void)
+ {
+ current->flags |= PF_FREEZER_SKIP;
+ }
+
+-/*
+- * Tell the freezer to count the current task as freezable again and try to
+- * freeze it.
++/**
++ * freezer_count - tell freezer to stop ignoring %current
++ *
++ * Undo freezer_do_not_count(). It tells freezers that %current should be
++ * considered again and tries to freeze if freezing condition is already in
++ * effect.
+ */
+ static inline void freezer_count(void)
+ {
+ current->flags &= ~PF_FREEZER_SKIP;
++ /*
++ * If freezing is in progress, the following paired with smp_mb()
++ * in freezer_should_skip() ensures that either we see %true
++ * freezing() or freezer_should_skip() sees !PF_FREEZER_SKIP.
++ */
++ smp_mb();
+ try_to_freeze();
+ }
+
+-/*
+- * Check if the task should be counted as freezable by the freezer
++/**
++ * freezer_should_skip - whether to skip a task when determining frozen
++ * state is reached
++ * @p: task in quesion
++ *
++ * This function is used by freezers after establishing %true freezing() to
++ * test whether a task should be skipped when determining the target frozen
++ * state is reached. IOW, if this function returns %true, @p is considered
++ * frozen enough.
+ */
+-static inline int freezer_should_skip(struct task_struct *p)
++static inline bool freezer_should_skip(struct task_struct *p)
+ {
+- return !!(p->flags & PF_FREEZER_SKIP);
++ /*
++ * The following smp_mb() paired with the one in freezer_count()
++ * ensures that either freezer_count() sees %true freezing() or we
++ * see cleared %PF_FREEZER_SKIP and return %false. This makes it
++ * impossible for a task to slip frozen state testing after
++ * clearing %PF_FREEZER_SKIP.
++ */
++ smp_mb();
++ return p->flags & PF_FREEZER_SKIP;
+ }
+
+ /*
+diff --git a/include/linux/if_vlan.h b/include/linux/if_vlan.h
+index 561e130..9b0c614 100644
+--- a/include/linux/if_vlan.h
++++ b/include/linux/if_vlan.h
+@@ -327,7 +327,7 @@ static inline void vlan_set_encap_proto(struct sk_buff *skb,
+ struct vlan_hdr *vhdr)
+ {
+ __be16 proto;
+- unsigned char *rawp;
++ unsigned short *rawp;
+
+ /*
+ * Was a VLAN packet, grab the encapsulated protocol, which the layer
+@@ -340,8 +340,8 @@ static inline void vlan_set_encap_proto(struct sk_buff *skb,
+ return;
+ }
+
+- rawp = skb->data;
+- if (*(unsigned short *) rawp == 0xFFFF)
++ rawp = (unsigned short *)(vhdr + 1);
++ if (*rawp == 0xFFFF)
+ /*
+ * This is a magic hack to spot IPX packets. Older Novell
+ * breaks the protocol design and runs IPX over 802.3 without
+diff --git a/include/linux/mmu_notifier.h b/include/linux/mmu_notifier.h
+index 1d1b1e1..ee2baf0 100644
+--- a/include/linux/mmu_notifier.h
++++ b/include/linux/mmu_notifier.h
+@@ -4,6 +4,7 @@
+ #include <linux/list.h>
+ #include <linux/spinlock.h>
+ #include <linux/mm_types.h>
++#include <linux/srcu.h>
+
+ struct mmu_notifier;
+ struct mmu_notifier_ops;
+diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
+index b5d1384..70473da 100644
+--- a/include/linux/page-flags.h
++++ b/include/linux/page-flags.h
+@@ -362,7 +362,7 @@ static inline void ClearPageCompound(struct page *page)
+ * pages on the LRU and/or pagecache.
+ */
+ TESTPAGEFLAG(Compound, compound)
+-__PAGEFLAG(Head, compound)
++__SETPAGEFLAG(Head, compound) __CLEARPAGEFLAG(Head, compound)
+
+ /*
+ * PG_reclaim is used in combination with PG_compound to mark the
+@@ -374,8 +374,14 @@ __PAGEFLAG(Head, compound)
+ * PG_compound & PG_reclaim => Tail page
+ * PG_compound & ~PG_reclaim => Head page
+ */
++#define PG_head_mask ((1L << PG_compound))
+ #define PG_head_tail_mask ((1L << PG_compound) | (1L << PG_reclaim))
+
++static inline int PageHead(struct page *page)
++{
++ return ((page->flags & PG_head_tail_mask) == PG_head_mask);
++}
++
+ static inline int PageTail(struct page *page)
+ {
+ return ((page->flags & PG_head_tail_mask) == PG_head_tail_mask);
+diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
+index 8d3c427..50a6cbe 100644
+--- a/include/linux/pci_ids.h
++++ b/include/linux/pci_ids.h
+@@ -1566,6 +1566,7 @@
+ #define PCI_DEVICE_ID_RICOH_RL5C476 0x0476
+ #define PCI_DEVICE_ID_RICOH_RL5C478 0x0478
+ #define PCI_DEVICE_ID_RICOH_R5C822 0x0822
++#define PCI_DEVICE_ID_RICOH_R5CE822 0xe822
+ #define PCI_DEVICE_ID_RICOH_R5CE823 0xe823
+ #define PCI_DEVICE_ID_RICOH_R5C832 0x0832
+ #define PCI_DEVICE_ID_RICOH_R5C843 0x0843
+diff --git a/include/linux/sched.h b/include/linux/sched.h
+index 23bddac..d2bbc12 100644
+--- a/include/linux/sched.h
++++ b/include/linux/sched.h
+@@ -2684,7 +2684,16 @@ static inline void thread_group_cputime_init(struct signal_struct *sig)
+ extern void recalc_sigpending_and_wake(struct task_struct *t);
+ extern void recalc_sigpending(void);
+
+-extern void signal_wake_up(struct task_struct *t, int resume_stopped);
++extern void signal_wake_up_state(struct task_struct *t, unsigned int state);
++
++static inline void signal_wake_up(struct task_struct *t, bool resume)
++{
++ signal_wake_up_state(t, resume ? TASK_WAKEKILL : 0);
++}
++static inline void ptrace_signal_wake_up(struct task_struct *t, bool resume)
++{
++ signal_wake_up_state(t, resume ? __TASK_TRACED : 0);
++}
+
+ /*
+ * Wrappers for p->thread_info->cpu access. No-op on UP.
+diff --git a/include/linux/usb/audio.h b/include/linux/usb/audio.h
+index a54b825..6f8b026 100644
+--- a/include/linux/usb/audio.h
++++ b/include/linux/usb/audio.h
+@@ -384,14 +384,16 @@ static inline __u8 uac_processing_unit_iProcessing(struct uac_processing_unit_de
+ int protocol)
+ {
+ __u8 control_size = uac_processing_unit_bControlSize(desc, protocol);
+- return desc->baSourceID[desc->bNrInPins + control_size];
++ return *(uac_processing_unit_bmControls(desc, protocol)
++ + control_size);
+ }
+
+ static inline __u8 *uac_processing_unit_specific(struct uac_processing_unit_descriptor *desc,
+ int protocol)
+ {
+ __u8 control_size = uac_processing_unit_bControlSize(desc, protocol);
+- return &desc->baSourceID[desc->bNrInPins + control_size + 1];
++ return uac_processing_unit_bmControls(desc, protocol)
++ + control_size + 1;
+ }
+
+ /* 4.5.2 Class-Specific AS Interface Descriptor */
+diff --git a/include/linux/usb/ch9.h b/include/linux/usb/ch9.h
+index d1d732c..17de73a 100644
+--- a/include/linux/usb/ch9.h
++++ b/include/linux/usb/ch9.h
+@@ -152,6 +152,12 @@
+ #define USB_INTRF_FUNC_SUSPEND_LP (1 << (8 + 0))
+ #define USB_INTRF_FUNC_SUSPEND_RW (1 << (8 + 1))
+
++/*
++ * Interface status, Figure 9-5 USB 3.0 spec
++ */
++#define USB_INTRF_STAT_FUNC_RW_CAP 1
++#define USB_INTRF_STAT_FUNC_RW 2
++
+ #define USB_ENDPOINT_HALT 0 /* IN/OUT will STALL */
+
+ /* Bit array elements as returned by the USB_REQ_GET_STATUS request. */
+diff --git a/include/linux/vt_kern.h b/include/linux/vt_kern.h
+index 50ae7d0..e8d6571 100644
+--- a/include/linux/vt_kern.h
++++ b/include/linux/vt_kern.h
+@@ -47,6 +47,7 @@ int con_set_cmap(unsigned char __user *cmap);
+ int con_get_cmap(unsigned char __user *cmap);
+ void scrollback(struct vc_data *vc, int lines);
+ void scrollfront(struct vc_data *vc, int lines);
++void clear_buffer_attributes(struct vc_data *vc);
+ void update_region(struct vc_data *vc, unsigned long start, int count);
+ void redraw_screen(struct vc_data *vc, int is_switch);
+ #define update_screen(x) redraw_screen(x, 0)
+@@ -130,6 +131,8 @@ void vt_event_post(unsigned int event, unsigned int old, unsigned int new);
+ int vt_waitactive(int n);
+ void change_console(struct vc_data *new_vc);
+ void reset_vc(struct vc_data *vc);
++extern int do_unbind_con_driver(const struct consw *csw, int first, int last,
++ int deflt);
+ extern int unbind_con_driver(const struct consw *csw, int first, int last,
+ int deflt);
+ int vty_init(const struct file_operations *console_fops);
+diff --git a/include/net/inet6_hashtables.h b/include/net/inet6_hashtables.h
+index 9e34c87..f071f50 100644
+--- a/include/net/inet6_hashtables.h
++++ b/include/net/inet6_hashtables.h
+@@ -28,16 +28,16 @@
+
+ struct inet_hashinfo;
+
+-/* I have no idea if this is a good hash for v6 or not. -DaveM */
+ static inline unsigned int inet6_ehashfn(struct net *net,
+ const struct in6_addr *laddr, const u16 lport,
+ const struct in6_addr *faddr, const __be16 fport)
+ {
+- u32 ports = (lport ^ (__force u16)fport);
++ u32 ports = (((u32)lport) << 16) | (__force u32)fport;
+
+ return jhash_3words((__force u32)laddr->s6_addr32[3],
+- (__force u32)faddr->s6_addr32[3],
+- ports, inet_ehash_secret + net_hash_mix(net));
++ ipv6_addr_jhash(faddr),
++ ports,
++ inet_ehash_secret + net_hash_mix(net));
+ }
+
+ static inline int inet6_sk_ehashfn(const struct sock *sk)
+diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h
+index ba1d361..1832927 100644
+--- a/include/net/inet_connection_sock.h
++++ b/include/net/inet_connection_sock.h
+@@ -318,6 +318,7 @@ extern void inet_csk_reqsk_queue_prune(struct sock *parent,
+ const unsigned long max_rto);
+
+ extern void inet_csk_destroy_sock(struct sock *sk);
++extern void inet_csk_prepare_forced_close(struct sock *sk);
+
+ /*
+ * LISTEN is a special case for poll..
+diff --git a/include/net/inet_sock.h b/include/net/inet_sock.h
+index 613cfa4..8eac4a9 100644
+--- a/include/net/inet_sock.h
++++ b/include/net/inet_sock.h
+@@ -203,6 +203,7 @@ static inline void inet_sk_copy_descendant(struct sock *sk_to,
+ extern int inet_sk_rebuild_header(struct sock *sk);
+
+ extern u32 inet_ehash_secret;
++extern u32 ipv6_hash_secret;
+ extern void build_ehash_secret(void);
+
+ static inline unsigned int inet_ehashfn(struct net *net,
+diff --git a/include/net/ipv6.h b/include/net/ipv6.h
+index 01c34b3..640591f 100644
+--- a/include/net/ipv6.h
++++ b/include/net/ipv6.h
+@@ -15,6 +15,7 @@
+
+ #include <linux/ipv6.h>
+ #include <linux/hardirq.h>
++#include <linux/jhash.h>
+ #include <net/if_inet6.h>
+ #include <net/ndisc.h>
+ #include <net/flow.h>
+@@ -432,6 +433,17 @@ static inline u32 ipv6_addr_hash(const struct in6_addr *a)
+ #endif
+ }
+
++/* more secured version of ipv6_addr_hash() */
++static inline u32 ipv6_addr_jhash(const struct in6_addr *a)
++{
++ u32 v = (__force u32)a->s6_addr32[0] ^ (__force u32)a->s6_addr32[1];
++
++ return jhash_3words(v,
++ (__force u32)a->s6_addr32[2],
++ (__force u32)a->s6_addr32[3],
++ ipv6_hash_secret);
++}
++
+ static inline bool ipv6_addr_loopback(const struct in6_addr *a)
+ {
+ return (a->s6_addr32[0] | a->s6_addr32[1] |
+diff --git a/include/net/sock.h b/include/net/sock.h
+index adb7da2..450a2af 100644
+--- a/include/net/sock.h
++++ b/include/net/sock.h
+@@ -1012,7 +1012,7 @@ static inline void sk_refcnt_debug_dec(struct sock *sk)
+ sk->sk_prot->name, sk, atomic_read(&sk->sk_prot->socks));
+ }
+
+-inline void sk_refcnt_debug_release(const struct sock *sk)
++static inline void sk_refcnt_debug_release(const struct sock *sk)
+ {
+ if (atomic_read(&sk->sk_refcnt) != 1)
+ printk(KERN_DEBUG "Destruction of the %s socket %p delayed, refcnt=%d\n",
+diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
+index 5be8937..fca8bbe 100644
+--- a/include/target/target_core_base.h
++++ b/include/target/target_core_base.h
+@@ -734,6 +734,8 @@ struct se_subsystem_dev {
+ };
+
+ struct se_device {
++#define SE_DEV_LINK_MAGIC 0xfeeddeef
++ u32 dev_link_magic;
+ /* RELATIVE TARGET PORT IDENTIFER Counter */
+ u16 dev_rpti_counter;
+ /* Used for SAM Task Attribute ordering */
+@@ -820,6 +822,8 @@ struct se_port_stat_grps {
+ };
+
+ struct se_lun {
++#define SE_LUN_LINK_MAGIC 0xffff7771
++ u32 lun_link_magic;
+ /* See transport_lun_status_table */
+ enum transport_lun_status_table lun_status;
+ u32 lun_access;
+diff --git a/init/main.c b/init/main.c
+index d61ec54..5973f47 100644
+--- a/init/main.c
++++ b/init/main.c
+@@ -603,7 +603,7 @@ asmlinkage void __init start_kernel(void)
+ pidmap_init();
+ anon_vma_init();
+ #ifdef CONFIG_X86
+- if (efi_enabled)
++ if (efi_enabled(EFI_RUNTIME_SERVICES))
+ efi_enter_virtual_mode();
+ #endif
+ thread_info_cache_init();
+@@ -631,7 +631,7 @@ asmlinkage void __init start_kernel(void)
+ acpi_early_init(); /* before LAPIC and SMP init */
+ sfi_init_late();
+
+- if (efi_enabled)
++ if (efi_enabled(EFI_RUNTIME_SERVICES))
+ efi_free_boot_services();
+
+ ftrace_init();
+diff --git a/kernel/cgroup.c b/kernel/cgroup.c
+index ff2bce5..2c0d5d0 100644
+--- a/kernel/cgroup.c
++++ b/kernel/cgroup.c
+@@ -2635,9 +2635,7 @@ static int cgroup_create_dir(struct cgroup *cgrp, struct dentry *dentry,
+ dentry->d_fsdata = cgrp;
+ inc_nlink(parent->d_inode);
+ rcu_assign_pointer(cgrp->dentry, dentry);
+- dget(dentry);
+ }
+- dput(dentry);
+
+ return error;
+ }
+diff --git a/kernel/futex.c b/kernel/futex.c
+index 19eb089..8879430 100644
+--- a/kernel/futex.c
++++ b/kernel/futex.c
+@@ -2471,8 +2471,6 @@ SYSCALL_DEFINE3(get_robust_list, int, pid,
+ if (!futex_cmpxchg_enabled)
+ return -ENOSYS;
+
+- WARN_ONCE(1, "deprecated: get_robust_list will be deleted in 2013.\n");
+-
+ rcu_read_lock();
+
+ ret = -ESRCH;
+diff --git a/kernel/futex_compat.c b/kernel/futex_compat.c
+index 83e368b..a9642d5 100644
+--- a/kernel/futex_compat.c
++++ b/kernel/futex_compat.c
+@@ -142,8 +142,6 @@ compat_sys_get_robust_list(int pid, compat_uptr_t __user *head_ptr,
+ if (!futex_cmpxchg_enabled)
+ return -ENOSYS;
+
+- WARN_ONCE(1, "deprecated: get_robust_list will be deleted in 2013.\n");
+-
+ rcu_read_lock();
+
+ ret = -ESRCH;
+diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
+index 6db7a5e..cdd5607 100644
+--- a/kernel/hrtimer.c
++++ b/kernel/hrtimer.c
+@@ -640,21 +640,9 @@ static inline void hrtimer_init_hres(struct hrtimer_cpu_base *base)
+ * and expiry check is done in the hrtimer_interrupt or in the softirq.
+ */
+ static inline int hrtimer_enqueue_reprogram(struct hrtimer *timer,
+- struct hrtimer_clock_base *base,
+- int wakeup)
++ struct hrtimer_clock_base *base)
+ {
+- if (base->cpu_base->hres_active && hrtimer_reprogram(timer, base)) {
+- if (wakeup) {
+- raw_spin_unlock(&base->cpu_base->lock);
+- raise_softirq_irqoff(HRTIMER_SOFTIRQ);
+- raw_spin_lock(&base->cpu_base->lock);
+- } else
+- __raise_softirq_irqoff(HRTIMER_SOFTIRQ);
+-
+- return 1;
+- }
+-
+- return 0;
++ return base->cpu_base->hres_active && hrtimer_reprogram(timer, base);
+ }
+
+ static inline ktime_t hrtimer_update_base(struct hrtimer_cpu_base *base)
+@@ -735,8 +723,7 @@ static inline int hrtimer_switch_to_hres(void) { return 0; }
+ static inline void
+ hrtimer_force_reprogram(struct hrtimer_cpu_base *base, int skip_equal) { }
+ static inline int hrtimer_enqueue_reprogram(struct hrtimer *timer,
+- struct hrtimer_clock_base *base,
+- int wakeup)
++ struct hrtimer_clock_base *base)
+ {
+ return 0;
+ }
+@@ -995,8 +982,21 @@ int __hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,
+ *
+ * XXX send_remote_softirq() ?
+ */
+- if (leftmost && new_base->cpu_base == &__get_cpu_var(hrtimer_bases))
+- hrtimer_enqueue_reprogram(timer, new_base, wakeup);
++ if (leftmost && new_base->cpu_base == &__get_cpu_var(hrtimer_bases)
++ && hrtimer_enqueue_reprogram(timer, new_base)) {
++ if (wakeup) {
++ /*
++ * We need to drop cpu_base->lock to avoid a
++ * lock ordering issue vs. rq->lock.
++ */
++ raw_spin_unlock(&new_base->cpu_base->lock);
++ raise_softirq_irqoff(HRTIMER_SOFTIRQ);
++ local_irq_restore(flags);
++ return ret;
++ } else {
++ __raise_softirq_irqoff(HRTIMER_SOFTIRQ);
++ }
++ }
+
+ unlock_hrtimer_base(timer, &flags);
+
+diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
+index 4c69326..e48caf8 100644
+--- a/kernel/irq/manage.c
++++ b/kernel/irq/manage.c
+@@ -716,6 +716,7 @@ static void
+ irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action)
+ {
+ cpumask_var_t mask;
++ bool valid = true;
+
+ if (!test_and_clear_bit(IRQTF_AFFINITY, &action->thread_flags))
+ return;
+@@ -730,10 +731,18 @@ irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action)
+ }
+
+ raw_spin_lock_irq(&desc->lock);
+- cpumask_copy(mask, desc->irq_data.affinity);
++ /*
++ * This code is triggered unconditionally. Check the affinity
++ * mask pointer. For CPU_MASK_OFFSTACK=n this is optimized out.
++ */
++ if (desc->irq_data.affinity)
++ cpumask_copy(mask, desc->irq_data.affinity);
++ else
++ valid = false;
+ raw_spin_unlock_irq(&desc->lock);
+
+- set_cpus_allowed_ptr(current, mask);
++ if (valid)
++ set_cpus_allowed_ptr(current, mask);
+ free_cpumask_var(mask);
+ }
+ #else
+@@ -936,6 +945,16 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
+ */
+ get_task_struct(t);
+ new->thread = t;
++ /*
++ * Tell the thread to set its affinity. This is
++ * important for shared interrupt handlers as we do
++ * not invoke setup_affinity() for the secondary
++ * handlers as everything is already set up. Even for
++ * interrupts marked with IRQF_NO_BALANCE this is
++ * correct as we want the thread to move to the cpu(s)
++ * on which the requesting code placed the interrupt.
++ */
++ set_bit(IRQTF_AFFINITY, &new->thread_flags);
+ }
+
+ if (!alloc_cpumask_var(&mask, GFP_KERNEL)) {
+diff --git a/kernel/irq/spurious.c b/kernel/irq/spurious.c
+index 611cd60..7b5f012 100644
+--- a/kernel/irq/spurious.c
++++ b/kernel/irq/spurious.c
+@@ -80,13 +80,11 @@ static int try_one_irq(int irq, struct irq_desc *desc, bool force)
+
+ /*
+ * All handlers must agree on IRQF_SHARED, so we test just the
+- * first. Check for action->next as well.
++ * first.
+ */
+ action = desc->action;
+ if (!action || !(action->flags & IRQF_SHARED) ||
+- (action->flags & __IRQF_TIMER) ||
+- (action->handler(irq, action->dev_id) == IRQ_HANDLED) ||
+- !action->next)
++ (action->flags & __IRQF_TIMER))
+ goto out;
+
+ /* Already running on another processor */
+@@ -104,6 +102,7 @@ static int try_one_irq(int irq, struct irq_desc *desc, bool force)
+ do {
+ if (handle_irq_event(desc) == IRQ_HANDLED)
+ ret = IRQ_HANDLED;
++ /* Make sure that there is still a valid action */
+ action = desc->action;
+ } while ((desc->istate & IRQS_PENDING) && action);
+ desc->istate &= ~IRQS_POLL_INPROGRESS;
+diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c
+index 125cb67..acbb79c 100644
+--- a/kernel/posix-cpu-timers.c
++++ b/kernel/posix-cpu-timers.c
+@@ -1422,8 +1422,10 @@ static int do_cpu_nanosleep(const clockid_t which_clock, int flags,
+ while (!signal_pending(current)) {
+ if (timer.it.cpu.expires.sched == 0) {
+ /*
+- * Our timer fired and was reset.
++ * Our timer fired and was reset, below
++ * deletion can not fail.
+ */
++ posix_cpu_timer_del(&timer);
+ spin_unlock_irq(&timer.it_lock);
+ return 0;
+ }
+@@ -1441,9 +1443,26 @@ static int do_cpu_nanosleep(const clockid_t which_clock, int flags,
+ * We were interrupted by a signal.
+ */
+ sample_to_timespec(which_clock, timer.it.cpu.expires, rqtp);
+- posix_cpu_timer_set(&timer, 0, &zero_it, it);
++ error = posix_cpu_timer_set(&timer, 0, &zero_it, it);
++ if (!error) {
++ /*
++ * Timer is now unarmed, deletion can not fail.
++ */
++ posix_cpu_timer_del(&timer);
++ }
+ spin_unlock_irq(&timer.it_lock);
+
++ while (error == TIMER_RETRY) {
++ /*
++ * We need to handle case when timer was or is in the
++ * middle of firing. In other cases we already freed
++ * resources.
++ */
++ spin_lock_irq(&timer.it_lock);
++ error = posix_cpu_timer_del(&timer);
++ spin_unlock_irq(&timer.it_lock);
++ }
++
+ if ((it->it_value.tv_sec | it->it_value.tv_nsec) == 0) {
+ /*
+ * It actually did fire already.
+diff --git a/kernel/ptrace.c b/kernel/ptrace.c
+index a232bb5..b96de86b4 100644
+--- a/kernel/ptrace.c
++++ b/kernel/ptrace.c
+@@ -117,11 +117,45 @@ void __ptrace_unlink(struct task_struct *child)
+ * TASK_KILLABLE sleeps.
+ */
+ if (child->jobctl & JOBCTL_STOP_PENDING || task_is_traced(child))
+- signal_wake_up(child, task_is_traced(child));
++ ptrace_signal_wake_up(child, true);
+
+ spin_unlock(&child->sighand->siglock);
+ }
+
++/* Ensure that nothing can wake it up, even SIGKILL */
++static bool ptrace_freeze_traced(struct task_struct *task)
++{
++ bool ret = false;
++
++ /* Lockless, nobody but us can set this flag */
++ if (task->jobctl & JOBCTL_LISTENING)
++ return ret;
++
++ spin_lock_irq(&task->sighand->siglock);
++ if (task_is_traced(task) && !__fatal_signal_pending(task)) {
++ task->state = __TASK_TRACED;
++ ret = true;
++ }
++ spin_unlock_irq(&task->sighand->siglock);
++
++ return ret;
++}
++
++static void ptrace_unfreeze_traced(struct task_struct *task)
++{
++ if (task->state != __TASK_TRACED)
++ return;
++
++ WARN_ON(!task->ptrace || task->parent != current);
++
++ spin_lock_irq(&task->sighand->siglock);
++ if (__fatal_signal_pending(task))
++ wake_up_state(task, __TASK_TRACED);
++ else
++ task->state = TASK_TRACED;
++ spin_unlock_irq(&task->sighand->siglock);
++}
++
+ /**
+ * ptrace_check_attach - check whether ptracee is ready for ptrace operation
+ * @child: ptracee to check for
+@@ -151,24 +185,29 @@ int ptrace_check_attach(struct task_struct *child, bool ignore_state)
+ * be changed by us so it's not changing right after this.
+ */
+ read_lock(&tasklist_lock);
+- if ((child->ptrace & PT_PTRACED) && child->parent == current) {
++ if (child->ptrace && child->parent == current) {
++ WARN_ON(child->state == __TASK_TRACED);
+ /*
+ * child->sighand can't be NULL, release_task()
+ * does ptrace_unlink() before __exit_signal().
+ */
+- spin_lock_irq(&child->sighand->siglock);
+- WARN_ON_ONCE(task_is_stopped(child));
+- if (ignore_state || (task_is_traced(child) &&
+- !(child->jobctl & JOBCTL_LISTENING)))
++ if (ignore_state || ptrace_freeze_traced(child))
+ ret = 0;
+- spin_unlock_irq(&child->sighand->siglock);
+ }
+ read_unlock(&tasklist_lock);
+
+- if (!ret && !ignore_state)
+- ret = wait_task_inactive(child, TASK_TRACED) ? 0 : -ESRCH;
++ if (!ret && !ignore_state) {
++ if (!wait_task_inactive(child, __TASK_TRACED)) {
++ /*
++ * This can only happen if may_ptrace_stop() fails and
++ * ptrace_stop() changes ->state back to TASK_RUNNING,
++ * so we should not worry about leaking __TASK_TRACED.
++ */
++ WARN_ON(child->state == __TASK_TRACED);
++ ret = -ESRCH;
++ }
++ }
+
+- /* All systems go.. */
+ return ret;
+ }
+
+@@ -310,7 +349,7 @@ static int ptrace_attach(struct task_struct *task, long request,
+ */
+ if (task_is_stopped(task) &&
+ task_set_jobctl_pending(task, JOBCTL_TRAP_STOP | JOBCTL_TRAPPING))
+- signal_wake_up(task, 1);
++ signal_wake_up_state(task, __TASK_STOPPED);
+
+ spin_unlock(&task->sighand->siglock);
+
+@@ -727,7 +766,7 @@ int ptrace_request(struct task_struct *child, long request,
+ * tracee into STOP.
+ */
+ if (likely(task_set_jobctl_pending(child, JOBCTL_TRAP_STOP)))
+- signal_wake_up(child, child->jobctl & JOBCTL_LISTENING);
++ ptrace_signal_wake_up(child, child->jobctl & JOBCTL_LISTENING);
+
+ unlock_task_sighand(child, &flags);
+ ret = 0;
+@@ -753,7 +792,7 @@ int ptrace_request(struct task_struct *child, long request,
+ * start of this trap and now. Trigger re-trap.
+ */
+ if (child->jobctl & JOBCTL_TRAP_NOTIFY)
+- signal_wake_up(child, true);
++ ptrace_signal_wake_up(child, true);
+ ret = 0;
+ }
+ unlock_task_sighand(child, &flags);
+@@ -890,6 +929,8 @@ SYSCALL_DEFINE4(ptrace, long, request, long, pid, unsigned long, addr,
+ goto out_put_task_struct;
+
+ ret = arch_ptrace(child, request, addr, data);
++ if (ret || request != PTRACE_DETACH)
++ ptrace_unfreeze_traced(child);
+
+ out_put_task_struct:
+ put_task_struct(child);
+@@ -1029,8 +1070,11 @@ asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid,
+
+ ret = ptrace_check_attach(child, request == PTRACE_KILL ||
+ request == PTRACE_INTERRUPT);
+- if (!ret)
++ if (!ret) {
+ ret = compat_arch_ptrace(child, request, addr, data);
++ if (ret || request != PTRACE_DETACH)
++ ptrace_unfreeze_traced(child);
++ }
+
+ out_put_task_struct:
+ put_task_struct(child);
+diff --git a/kernel/resource.c b/kernel/resource.c
+index 34d4588..73f35d4 100644
+--- a/kernel/resource.c
++++ b/kernel/resource.c
+@@ -763,6 +763,7 @@ static void __init __reserve_region_with_split(struct resource *root,
+ struct resource *parent = root;
+ struct resource *conflict;
+ struct resource *res = kzalloc(sizeof(*res), GFP_ATOMIC);
++ struct resource *next_res = NULL;
+
+ if (!res)
+ return;
+@@ -772,21 +773,46 @@ static void __init __reserve_region_with_split(struct resource *root,
+ res->end = end;
+ res->flags = IORESOURCE_BUSY;
+
+- conflict = __request_resource(parent, res);
+- if (!conflict)
+- return;
++ while (1) {
+
+- /* failed, split and try again */
+- kfree(res);
++ conflict = __request_resource(parent, res);
++ if (!conflict) {
++ if (!next_res)
++ break;
++ res = next_res;
++ next_res = NULL;
++ continue;
++ }
+
+- /* conflict covered whole area */
+- if (conflict->start <= start && conflict->end >= end)
+- return;
++ /* conflict covered whole area */
++ if (conflict->start <= res->start &&
++ conflict->end >= res->end) {
++ kfree(res);
++ WARN_ON(next_res);
++ break;
++ }
++
++ /* failed, split and try again */
++ if (conflict->start > res->start) {
++ end = res->end;
++ res->end = conflict->start - 1;
++ if (conflict->end < end) {
++ next_res = kzalloc(sizeof(*next_res),
++ GFP_ATOMIC);
++ if (!next_res) {
++ kfree(res);
++ break;
++ }
++ next_res->name = name;
++ next_res->start = conflict->end + 1;
++ next_res->end = end;
++ next_res->flags = IORESOURCE_BUSY;
++ }
++ } else {
++ res->start = conflict->end + 1;
++ }
++ }
+
+- if (conflict->start > start)
+- __reserve_region_with_split(root, start, conflict->start-1, name);
+- if (conflict->end < end)
+- __reserve_region_with_split(root, conflict->end+1, end, name);
+ }
+
+ void __init reserve_region_with_split(struct resource *root,
+diff --git a/kernel/sched/core.c b/kernel/sched/core.c
+index 1a48cdb..5eb6c96 100644
+--- a/kernel/sched/core.c
++++ b/kernel/sched/core.c
+@@ -1686,7 +1686,8 @@ out:
+ */
+ int wake_up_process(struct task_struct *p)
+ {
+- return try_to_wake_up(p, TASK_ALL, 0);
++ WARN_ON(task_is_stopped_or_traced(p));
++ return try_to_wake_up(p, TASK_NORMAL, 0);
+ }
+ EXPORT_SYMBOL(wake_up_process);
+
+diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
+index e0b7ba9..71e2fcc 100644
+--- a/kernel/sched/rt.c
++++ b/kernel/sched/rt.c
+@@ -566,7 +566,7 @@ static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq)
+ static int do_balance_runtime(struct rt_rq *rt_rq)
+ {
+ struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
+- struct root_domain *rd = cpu_rq(smp_processor_id())->rd;
++ struct root_domain *rd = rq_of_rt_rq(rt_rq)->rd;
+ int i, weight, more = 0;
+ u64 rt_period;
+
+diff --git a/kernel/signal.c b/kernel/signal.c
+index be4f856..67b64be 100644
+--- a/kernel/signal.c
++++ b/kernel/signal.c
+@@ -678,23 +678,17 @@ int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
+ * No need to set need_resched since signal event passing
+ * goes through ->blocked
+ */
+-void signal_wake_up(struct task_struct *t, int resume)
++void signal_wake_up_state(struct task_struct *t, unsigned int state)
+ {
+- unsigned int mask;
+-
+ set_tsk_thread_flag(t, TIF_SIGPENDING);
+-
+ /*
+- * For SIGKILL, we want to wake it up in the stopped/traced/killable
++ * TASK_WAKEKILL also means wake it up in the stopped/traced/killable
+ * case. We don't check t->state here because there is a race with it
+ * executing another processor and just now entering stopped state.
+ * By using wake_up_state, we ensure the process will wake up and
+ * handle its death signal.
+ */
+- mask = TASK_INTERRUPTIBLE;
+- if (resume)
+- mask |= TASK_WAKEKILL;
+- if (!wake_up_state(t, mask))
++ if (!wake_up_state(t, state | TASK_INTERRUPTIBLE))
+ kick_process(t);
+ }
+
+@@ -842,7 +836,7 @@ static void ptrace_trap_notify(struct task_struct *t)
+ assert_spin_locked(&t->sighand->siglock);
+
+ task_set_jobctl_pending(t, JOBCTL_TRAP_NOTIFY);
+- signal_wake_up(t, t->jobctl & JOBCTL_LISTENING);
++ ptrace_signal_wake_up(t, t->jobctl & JOBCTL_LISTENING);
+ }
+
+ /*
+@@ -1797,6 +1791,10 @@ static inline int may_ptrace_stop(void)
+ * If SIGKILL was already sent before the caller unlocked
+ * ->siglock we must see ->core_state != NULL. Otherwise it
+ * is safe to enter schedule().
++ *
++ * This is almost outdated, a task with the pending SIGKILL can't
++ * block in TASK_TRACED. But PTRACE_EVENT_EXIT can be reported
++ * after SIGKILL was already dequeued.
+ */
+ if (unlikely(current->mm->core_state) &&
+ unlikely(current->mm == current->parent->mm))
+@@ -1922,6 +1920,7 @@ static void ptrace_stop(int exit_code, int why, int clear_code, siginfo_t *info)
+ if (gstop_done)
+ do_notify_parent_cldstop(current, false, why);
+
++ /* tasklist protects us from ptrace_freeze_traced() */
+ __set_current_state(TASK_RUNNING);
+ if (clear_code)
+ current->exit_code = 0;
+diff --git a/kernel/smp.c b/kernel/smp.c
+index 29dd40a..69f38bd 100644
+--- a/kernel/smp.c
++++ b/kernel/smp.c
+@@ -33,6 +33,7 @@ struct call_function_data {
+ struct call_single_data csd;
+ atomic_t refs;
+ cpumask_var_t cpumask;
++ cpumask_var_t cpumask_ipi;
+ };
+
+ static DEFINE_PER_CPU_SHARED_ALIGNED(struct call_function_data, cfd_data);
+@@ -56,6 +57,9 @@ hotplug_cfd(struct notifier_block *nfb, unsigned long action, void *hcpu)
+ if (!zalloc_cpumask_var_node(&cfd->cpumask, GFP_KERNEL,
+ cpu_to_node(cpu)))
+ return notifier_from_errno(-ENOMEM);
++ if (!zalloc_cpumask_var_node(&cfd->cpumask_ipi, GFP_KERNEL,
++ cpu_to_node(cpu)))
++ return notifier_from_errno(-ENOMEM);
+ break;
+
+ #ifdef CONFIG_HOTPLUG_CPU
+@@ -65,6 +69,7 @@ hotplug_cfd(struct notifier_block *nfb, unsigned long action, void *hcpu)
+ case CPU_DEAD:
+ case CPU_DEAD_FROZEN:
+ free_cpumask_var(cfd->cpumask);
++ free_cpumask_var(cfd->cpumask_ipi);
+ break;
+ #endif
+ };
+@@ -526,6 +531,12 @@ void smp_call_function_many(const struct cpumask *mask,
+ return;
+ }
+
++ /*
++ * After we put an entry into the list, data->cpumask
++ * may be cleared again when another CPU sends another IPI for
++ * a SMP function call, so data->cpumask will be zero.
++ */
++ cpumask_copy(data->cpumask_ipi, data->cpumask);
+ raw_spin_lock_irqsave(&call_function.lock, flags);
+ /*
+ * Place entry at the _HEAD_ of the list, so that any cpu still
+@@ -549,7 +560,7 @@ void smp_call_function_many(const struct cpumask *mask,
+ smp_mb();
+
+ /* Send a message to all CPUs in the map */
+- arch_send_call_function_ipi_mask(data->cpumask);
++ arch_send_call_function_ipi_mask(data->cpumask_ipi);
+
+ /* Optionally wait for the CPUs to complete */
+ if (wait)
+diff --git a/kernel/timeconst.pl b/kernel/timeconst.pl
+index eb51d76..3f42652 100644
+--- a/kernel/timeconst.pl
++++ b/kernel/timeconst.pl
+@@ -369,10 +369,8 @@ if ($hz eq '--can') {
+ die "Usage: $0 HZ\n";
+ }
+
+- @val = @{$canned_values{$hz}};
+- if (!defined(@val)) {
+- @val = compute_values($hz);
+- }
++ $cv = $canned_values{$hz};
++ @val = defined($cv) ? @$cv : compute_values($hz);
+ output($hz, @val);
+ }
+ exit 0;
+diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
+index 781ecc2..7f8a8df 100644
+--- a/kernel/trace/ftrace.c
++++ b/kernel/trace/ftrace.c
+@@ -3863,7 +3863,7 @@ static int ftrace_module_notify(struct notifier_block *self,
+
+ struct notifier_block ftrace_module_nb = {
+ .notifier_call = ftrace_module_notify,
+- .priority = 0,
++ .priority = INT_MAX, /* Run before anything that can use kprobes */
+ };
+
+ extern unsigned long __start_mcount_loc[];
+diff --git a/lib/atomic64.c b/lib/atomic64.c
+index 9785378..08a4f06 100644
+--- a/lib/atomic64.c
++++ b/lib/atomic64.c
+@@ -31,7 +31,11 @@
+ static union {
+ raw_spinlock_t lock;
+ char pad[L1_CACHE_BYTES];
+-} atomic64_lock[NR_LOCKS] __cacheline_aligned_in_smp;
++} atomic64_lock[NR_LOCKS] __cacheline_aligned_in_smp = {
++ [0 ... (NR_LOCKS - 1)] = {
++ .lock = __RAW_SPIN_LOCK_UNLOCKED(atomic64_lock.lock),
++ },
++};
+
+ static inline raw_spinlock_t *lock_addr(const atomic64_t *v)
+ {
+@@ -173,14 +177,3 @@ int atomic64_add_unless(atomic64_t *v, long long a, long long u)
+ return ret;
+ }
+ EXPORT_SYMBOL(atomic64_add_unless);
+-
+-static int init_atomic64_lock(void)
+-{
+- int i;
+-
+- for (i = 0; i < NR_LOCKS; ++i)
+- raw_spin_lock_init(&atomic64_lock[i].lock);
+- return 0;
+-}
+-
+-pure_initcall(init_atomic64_lock);
+diff --git a/lib/digsig.c b/lib/digsig.c
+index 8c0e629..dc2be7e 100644
+--- a/lib/digsig.c
++++ b/lib/digsig.c
+@@ -162,6 +162,8 @@ static int digsig_verify_rsa(struct key *key,
+ memset(out1, 0, head);
+ memcpy(out1 + head, p, l);
+
++ kfree(p);
++
+ err = pkcs_1_v1_5_decode_emsa(out1, len, mblen, out2, &len);
+ if (err)
+ goto err;
+diff --git a/mm/compaction.c b/mm/compaction.c
+index 7fcd3a5..214944a 100644
+--- a/mm/compaction.c
++++ b/mm/compaction.c
+@@ -956,7 +956,7 @@ static int compact_node(int nid)
+ }
+
+ /* Compact all nodes in the system */
+-static int compact_nodes(void)
++static void compact_nodes(void)
+ {
+ int nid;
+
+@@ -965,8 +965,6 @@ static int compact_nodes(void)
+
+ for_each_online_node(nid)
+ compact_node(nid);
+-
+- return COMPACT_COMPLETE;
+ }
+
+ /* The written value is actually unused, all memory is compacted */
+@@ -977,7 +975,7 @@ int sysctl_compaction_handler(struct ctl_table *table, int write,
+ void __user *buffer, size_t *length, loff_t *ppos)
+ {
+ if (write)
+- return compact_nodes();
++ compact_nodes();
+
+ return 0;
+ }
+diff --git a/mm/fadvise.c b/mm/fadvise.c
+index 9b75a04..69f317d 100644
+--- a/mm/fadvise.c
++++ b/mm/fadvise.c
+@@ -17,6 +17,7 @@
+ #include <linux/fadvise.h>
+ #include <linux/writeback.h>
+ #include <linux/syscalls.h>
++#include <linux/swap.h>
+
+ #include <asm/unistd.h>
+
+@@ -120,9 +121,22 @@ SYSCALL_DEFINE(fadvise64_64)(int fd, loff_t offset, loff_t len, int advice)
+ start_index = (offset+(PAGE_CACHE_SIZE-1)) >> PAGE_CACHE_SHIFT;
+ end_index = (endbyte >> PAGE_CACHE_SHIFT);
+
+- if (end_index >= start_index)
+- invalidate_mapping_pages(mapping, start_index,
++ if (end_index >= start_index) {
++ unsigned long count = invalidate_mapping_pages(mapping,
++ start_index, end_index);
++
++ /*
++ * If fewer pages were invalidated than expected then
++ * it is possible that some of the pages were on
++ * a per-cpu pagevec for a remote CPU. Drain all
++ * pagevecs and try again.
++ */
++ if (count < (end_index - start_index + 1)) {
++ lru_add_drain_all();
++ invalidate_mapping_pages(mapping, start_index,
+ end_index);
++ }
++ }
+ break;
+ default:
+ ret = -EINVAL;
+diff --git a/mm/memory.c b/mm/memory.c
+index 5736170..29ffb5c 100644
+--- a/mm/memory.c
++++ b/mm/memory.c
+@@ -182,10 +182,14 @@ static int tlb_next_batch(struct mmu_gather *tlb)
+ return 1;
+ }
+
++ if (tlb->batch_count == MAX_GATHER_BATCH_COUNT)
++ return 0;
++
+ batch = (void *)__get_free_pages(GFP_NOWAIT | __GFP_NOWARN, 0);
+ if (!batch)
+ return 0;
+
++ tlb->batch_count++;
+ batch->next = NULL;
+ batch->nr = 0;
+ batch->max = MAX_GATHER_BATCH;
+@@ -214,6 +218,7 @@ void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, bool fullmm)
+ tlb->local.nr = 0;
+ tlb->local.max = ARRAY_SIZE(tlb->__pages);
+ tlb->active = &tlb->local;
++ tlb->batch_count = 0;
+
+ #ifdef CONFIG_HAVE_RCU_TABLE_FREE
+ tlb->batch = NULL;
+diff --git a/mm/mempolicy.c b/mm/mempolicy.c
+index 01350d3..5e0fea1 100644
+--- a/mm/mempolicy.c
++++ b/mm/mempolicy.c
+@@ -2370,8 +2370,7 @@ void numa_default_policy(void)
+ */
+
+ /*
+- * "local" is pseudo-policy: MPOL_PREFERRED with MPOL_F_LOCAL flag
+- * Used only for mpol_parse_str() and mpol_to_str()
++ * "local" is implemented internally by MPOL_PREFERRED with MPOL_F_LOCAL flag.
+ */
+ #define MPOL_LOCAL MPOL_MAX
+ static const char * const policy_modes[] =
+@@ -2386,28 +2385,21 @@ static const char * const policy_modes[] =
+
+ #ifdef CONFIG_TMPFS
+ /**
+- * mpol_parse_str - parse string to mempolicy
++ * mpol_parse_str - parse string to mempolicy, for tmpfs mpol mount option.
+ * @str: string containing mempolicy to parse
+ * @mpol: pointer to struct mempolicy pointer, returned on success.
+- * @no_context: flag whether to "contextualize" the mempolicy
++ * @unused: redundant argument, to be removed later.
+ *
+ * Format of input:
+ * <mode>[=<flags>][:<nodelist>]
+ *
+- * if @no_context is true, save the input nodemask in w.user_nodemask in
+- * the returned mempolicy. This will be used to "clone" the mempolicy in
+- * a specific context [cpuset] at a later time. Used to parse tmpfs mpol
+- * mount option. Note that if 'static' or 'relative' mode flags were
+- * specified, the input nodemask will already have been saved. Saving
+- * it again is redundant, but safe.
+- *
+ * On success, returns 0, else 1
+ */
+-int mpol_parse_str(char *str, struct mempolicy **mpol, int no_context)
++int mpol_parse_str(char *str, struct mempolicy **mpol, int unused)
+ {
+ struct mempolicy *new = NULL;
+ unsigned short mode;
+- unsigned short uninitialized_var(mode_flags);
++ unsigned short mode_flags;
+ nodemask_t nodes;
+ char *nodelist = strchr(str, ':');
+ char *flags = strchr(str, '=');
+@@ -2495,24 +2487,23 @@ int mpol_parse_str(char *str, struct mempolicy **mpol, int no_context)
+ if (IS_ERR(new))
+ goto out;
+
+- if (no_context) {
+- /* save for contextualization */
+- new->w.user_nodemask = nodes;
+- } else {
+- int ret;
+- NODEMASK_SCRATCH(scratch);
+- if (scratch) {
+- task_lock(current);
+- ret = mpol_set_nodemask(new, &nodes, scratch);
+- task_unlock(current);
+- } else
+- ret = -ENOMEM;
+- NODEMASK_SCRATCH_FREE(scratch);
+- if (ret) {
+- mpol_put(new);
+- goto out;
+- }
+- }
++ /*
++ * Save nodes for mpol_to_str() to show the tmpfs mount options
++ * for /proc/mounts, /proc/pid/mounts and /proc/pid/mountinfo.
++ */
++ if (mode != MPOL_PREFERRED)
++ new->v.nodes = nodes;
++ else if (nodelist)
++ new->v.preferred_node = first_node(nodes);
++ else
++ new->flags |= MPOL_F_LOCAL;
++
++ /*
++ * Save nodes for contextualization: this will be used to "clone"
++ * the mempolicy in a specific context [cpuset] at a later time.
++ */
++ new->w.user_nodemask = nodes;
++
+ err = 0;
+
+ out:
+@@ -2532,13 +2523,13 @@ out:
+ * @buffer: to contain formatted mempolicy string
+ * @maxlen: length of @buffer
+ * @pol: pointer to mempolicy to be formatted
+- * @no_context: "context free" mempolicy - use nodemask in w.user_nodemask
++ * @unused: redundant argument, to be removed later.
+ *
+ * Convert a mempolicy into a string.
+ * Returns the number of characters in buffer (if positive)
+ * or an error (negative)
+ */
+-int mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol, int no_context)
++int mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol, int unused)
+ {
+ char *p = buffer;
+ int l;
+@@ -2564,7 +2555,7 @@ int mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol, int no_context)
+ case MPOL_PREFERRED:
+ nodes_clear(nodes);
+ if (flags & MPOL_F_LOCAL)
+- mode = MPOL_LOCAL; /* pseudo-policy */
++ mode = MPOL_LOCAL;
+ else
+ node_set(pol->v.preferred_node, nodes);
+ break;
+@@ -2572,10 +2563,7 @@ int mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol, int no_context)
+ case MPOL_BIND:
+ /* Fall through */
+ case MPOL_INTERLEAVE:
+- if (no_context)
+- nodes = pol->w.user_nodemask;
+- else
+- nodes = pol->v.nodes;
++ nodes = pol->v.nodes;
+ break;
+
+ default:
+diff --git a/mm/mmu_notifier.c b/mm/mmu_notifier.c
+index 862b608..8d1ca2d 100644
+--- a/mm/mmu_notifier.c
++++ b/mm/mmu_notifier.c
+@@ -14,10 +14,14 @@
+ #include <linux/export.h>
+ #include <linux/mm.h>
+ #include <linux/err.h>
++#include <linux/srcu.h>
+ #include <linux/rcupdate.h>
+ #include <linux/sched.h>
+ #include <linux/slab.h>
+
++/* global SRCU for all MMs */
++static struct srcu_struct srcu;
++
+ /*
+ * This function can't run concurrently against mmu_notifier_register
+ * because mm->mm_users > 0 during mmu_notifier_register and exit_mmap
+@@ -25,58 +29,61 @@
+ * in parallel despite there being no task using this mm any more,
+ * through the vmas outside of the exit_mmap context, such as with
+ * vmtruncate. This serializes against mmu_notifier_unregister with
+- * the mmu_notifier_mm->lock in addition to RCU and it serializes
+- * against the other mmu notifiers with RCU. struct mmu_notifier_mm
++ * the mmu_notifier_mm->lock in addition to SRCU and it serializes
++ * against the other mmu notifiers with SRCU. struct mmu_notifier_mm
+ * can't go away from under us as exit_mmap holds an mm_count pin
+ * itself.
+ */
+ void __mmu_notifier_release(struct mm_struct *mm)
+ {
+ struct mmu_notifier *mn;
+- struct hlist_node *n;
++ int id;
+
+ /*
+- * RCU here will block mmu_notifier_unregister until
+- * ->release returns.
++ * srcu_read_lock() here will block synchronize_srcu() in
++ * mmu_notifier_unregister() until all registered
++ * ->release() callouts this function makes have
++ * returned.
+ */
+- rcu_read_lock();
+- hlist_for_each_entry_rcu(mn, n, &mm->mmu_notifier_mm->list, hlist)
+- /*
+- * if ->release runs before mmu_notifier_unregister it
+- * must be handled as it's the only way for the driver
+- * to flush all existing sptes and stop the driver
+- * from establishing any more sptes before all the
+- * pages in the mm are freed.
+- */
+- if (mn->ops->release)
+- mn->ops->release(mn, mm);
+- rcu_read_unlock();
+-
++ id = srcu_read_lock(&srcu);
+ spin_lock(&mm->mmu_notifier_mm->lock);
+ while (unlikely(!hlist_empty(&mm->mmu_notifier_mm->list))) {
+ mn = hlist_entry(mm->mmu_notifier_mm->list.first,
+ struct mmu_notifier,
+ hlist);
++
+ /*
+- * We arrived before mmu_notifier_unregister so
+- * mmu_notifier_unregister will do nothing other than
+- * to wait ->release to finish and
+- * mmu_notifier_unregister to return.
++ * Unlink. This will prevent mmu_notifier_unregister()
++ * from also making the ->release() callout.
+ */
+ hlist_del_init_rcu(&mn->hlist);
++ spin_unlock(&mm->mmu_notifier_mm->lock);
++
++ /*
++ * Clear sptes. (see 'release' description in mmu_notifier.h)
++ */
++ if (mn->ops->release)
++ mn->ops->release(mn, mm);
++
++ spin_lock(&mm->mmu_notifier_mm->lock);
+ }
+ spin_unlock(&mm->mmu_notifier_mm->lock);
+
+ /*
+- * synchronize_rcu here prevents mmu_notifier_release to
+- * return to exit_mmap (which would proceed freeing all pages
+- * in the mm) until the ->release method returns, if it was
+- * invoked by mmu_notifier_unregister.
+- *
+- * The mmu_notifier_mm can't go away from under us because one
+- * mm_count is hold by exit_mmap.
++ * All callouts to ->release() which we have done are complete.
++ * Allow synchronize_srcu() in mmu_notifier_unregister() to complete
++ */
++ srcu_read_unlock(&srcu, id);
++
++ /*
++ * mmu_notifier_unregister() may have unlinked a notifier and may
++ * still be calling out to it. Additionally, other notifiers
++ * may have been active via vmtruncate() et. al. Block here
++ * to ensure that all notifier callouts for this mm have been
++ * completed and the sptes are really cleaned up before returning
++ * to exit_mmap().
+ */
+- synchronize_rcu();
++ synchronize_srcu(&srcu);
+ }
+
+ /*
+@@ -89,14 +96,14 @@ int __mmu_notifier_clear_flush_young(struct mm_struct *mm,
+ {
+ struct mmu_notifier *mn;
+ struct hlist_node *n;
+- int young = 0;
++ int young = 0, id;
+
+- rcu_read_lock();
++ id = srcu_read_lock(&srcu);
+ hlist_for_each_entry_rcu(mn, n, &mm->mmu_notifier_mm->list, hlist) {
+ if (mn->ops->clear_flush_young)
+ young |= mn->ops->clear_flush_young(mn, mm, address);
+ }
+- rcu_read_unlock();
++ srcu_read_unlock(&srcu, id);
+
+ return young;
+ }
+@@ -106,9 +113,9 @@ int __mmu_notifier_test_young(struct mm_struct *mm,
+ {
+ struct mmu_notifier *mn;
+ struct hlist_node *n;
+- int young = 0;
++ int young = 0, id;
+
+- rcu_read_lock();
++ id = srcu_read_lock(&srcu);
+ hlist_for_each_entry_rcu(mn, n, &mm->mmu_notifier_mm->list, hlist) {
+ if (mn->ops->test_young) {
+ young = mn->ops->test_young(mn, mm, address);
+@@ -116,7 +123,7 @@ int __mmu_notifier_test_young(struct mm_struct *mm,
+ break;
+ }
+ }
+- rcu_read_unlock();
++ srcu_read_unlock(&srcu, id);
+
+ return young;
+ }
+@@ -126,8 +133,9 @@ void __mmu_notifier_change_pte(struct mm_struct *mm, unsigned long address,
+ {
+ struct mmu_notifier *mn;
+ struct hlist_node *n;
++ int id;
+
+- rcu_read_lock();
++ id = srcu_read_lock(&srcu);
+ hlist_for_each_entry_rcu(mn, n, &mm->mmu_notifier_mm->list, hlist) {
+ if (mn->ops->change_pte)
+ mn->ops->change_pte(mn, mm, address, pte);
+@@ -138,7 +146,7 @@ void __mmu_notifier_change_pte(struct mm_struct *mm, unsigned long address,
+ else if (mn->ops->invalidate_page)
+ mn->ops->invalidate_page(mn, mm, address);
+ }
+- rcu_read_unlock();
++ srcu_read_unlock(&srcu, id);
+ }
+
+ void __mmu_notifier_invalidate_page(struct mm_struct *mm,
+@@ -146,13 +154,14 @@ void __mmu_notifier_invalidate_page(struct mm_struct *mm,
+ {
+ struct mmu_notifier *mn;
+ struct hlist_node *n;
++ int id;
+
+- rcu_read_lock();
++ id = srcu_read_lock(&srcu);
+ hlist_for_each_entry_rcu(mn, n, &mm->mmu_notifier_mm->list, hlist) {
+ if (mn->ops->invalidate_page)
+ mn->ops->invalidate_page(mn, mm, address);
+ }
+- rcu_read_unlock();
++ srcu_read_unlock(&srcu, id);
+ }
+
+ void __mmu_notifier_invalidate_range_start(struct mm_struct *mm,
+@@ -160,13 +169,14 @@ void __mmu_notifier_invalidate_range_start(struct mm_struct *mm,
+ {
+ struct mmu_notifier *mn;
+ struct hlist_node *n;
++ int id;
+
+- rcu_read_lock();
++ id = srcu_read_lock(&srcu);
+ hlist_for_each_entry_rcu(mn, n, &mm->mmu_notifier_mm->list, hlist) {
+ if (mn->ops->invalidate_range_start)
+ mn->ops->invalidate_range_start(mn, mm, start, end);
+ }
+- rcu_read_unlock();
++ srcu_read_unlock(&srcu, id);
+ }
+
+ void __mmu_notifier_invalidate_range_end(struct mm_struct *mm,
+@@ -174,13 +184,14 @@ void __mmu_notifier_invalidate_range_end(struct mm_struct *mm,
+ {
+ struct mmu_notifier *mn;
+ struct hlist_node *n;
++ int id;
+
+- rcu_read_lock();
++ id = srcu_read_lock(&srcu);
+ hlist_for_each_entry_rcu(mn, n, &mm->mmu_notifier_mm->list, hlist) {
+ if (mn->ops->invalidate_range_end)
+ mn->ops->invalidate_range_end(mn, mm, start, end);
+ }
+- rcu_read_unlock();
++ srcu_read_unlock(&srcu, id);
+ }
+
+ static int do_mmu_notifier_register(struct mmu_notifier *mn,
+@@ -192,6 +203,12 @@ static int do_mmu_notifier_register(struct mmu_notifier *mn,
+
+ BUG_ON(atomic_read(&mm->mm_users) <= 0);
+
++ /*
++ * Verify that mmu_notifier_init() already run and the global srcu is
++ * initialized.
++ */
++ BUG_ON(!srcu.per_cpu_ref);
++
+ ret = -ENOMEM;
+ mmu_notifier_mm = kmalloc(sizeof(struct mmu_notifier_mm), GFP_KERNEL);
+ if (unlikely(!mmu_notifier_mm))
+@@ -274,8 +291,8 @@ void __mmu_notifier_mm_destroy(struct mm_struct *mm)
+ /*
+ * This releases the mm_count pin automatically and frees the mm
+ * structure if it was the last user of it. It serializes against
+- * running mmu notifiers with RCU and against mmu_notifier_unregister
+- * with the unregister lock + RCU. All sptes must be dropped before
++ * running mmu notifiers with SRCU and against mmu_notifier_unregister
++ * with the unregister lock + SRCU. All sptes must be dropped before
+ * calling mmu_notifier_unregister. ->release or any other notifier
+ * method may be invoked concurrently with mmu_notifier_unregister,
+ * and only after mmu_notifier_unregister returned we're guaranteed
+@@ -285,35 +302,43 @@ void mmu_notifier_unregister(struct mmu_notifier *mn, struct mm_struct *mm)
+ {
+ BUG_ON(atomic_read(&mm->mm_count) <= 0);
+
++ spin_lock(&mm->mmu_notifier_mm->lock);
+ if (!hlist_unhashed(&mn->hlist)) {
+- /*
+- * RCU here will force exit_mmap to wait ->release to finish
+- * before freeing the pages.
+- */
+- rcu_read_lock();
++ int id;
+
+ /*
+- * exit_mmap will block in mmu_notifier_release to
+- * guarantee ->release is called before freeing the
+- * pages.
++ * Ensure we synchronize up with __mmu_notifier_release().
+ */
++ id = srcu_read_lock(&srcu);
++
++ hlist_del_rcu(&mn->hlist);
++ spin_unlock(&mm->mmu_notifier_mm->lock);
++
+ if (mn->ops->release)
+ mn->ops->release(mn, mm);
+- rcu_read_unlock();
+
+- spin_lock(&mm->mmu_notifier_mm->lock);
+- hlist_del_rcu(&mn->hlist);
++ /*
++ * Allow __mmu_notifier_release() to complete.
++ */
++ srcu_read_unlock(&srcu, id);
++ } else
+ spin_unlock(&mm->mmu_notifier_mm->lock);
+- }
+
+ /*
+- * Wait any running method to finish, of course including
+- * ->release if it was run by mmu_notifier_relase instead of us.
++ * Wait for any running method to finish, including ->release() if it
++ * was run by __mmu_notifier_release() instead of us.
+ */
+- synchronize_rcu();
++ synchronize_srcu(&srcu);
+
+ BUG_ON(atomic_read(&mm->mm_count) <= 0);
+
+ mmdrop(mm);
+ }
+ EXPORT_SYMBOL_GPL(mmu_notifier_unregister);
++
++static int __init mmu_notifier_init(void)
++{
++ return init_srcu_struct(&srcu);
++}
++
++module_init(mmu_notifier_init);
+diff --git a/mm/page-writeback.c b/mm/page-writeback.c
+index 5ad5ce2..7a5f842 100644
+--- a/mm/page-writeback.c
++++ b/mm/page-writeback.c
+@@ -201,6 +201,18 @@ static unsigned long highmem_dirtyable_memory(unsigned long total)
+ zone_reclaimable_pages(z) - z->dirty_balance_reserve;
+ }
+ /*
++ * Unreclaimable memory (kernel memory or anonymous memory
++ * without swap) can bring down the dirtyable pages below
++ * the zone's dirty balance reserve and the above calculation
++ * will underflow. However we still want to add in nodes
++ * which are below threshold (negative values) to get a more
++ * accurate calculation but make sure that the total never
++ * underflows.
++ */
++ if ((long)x < 0)
++ x = 0;
++
++ /*
+ * Make sure that the number of highmem pages is never larger
+ * than the number of the total dirtyable memory. This can only
+ * occur in very strange VM situations but we want to make sure
+@@ -222,8 +234,8 @@ static unsigned long global_dirtyable_memory(void)
+ {
+ unsigned long x;
+
+- x = global_page_state(NR_FREE_PAGES) + global_reclaimable_pages() -
+- dirty_balance_reserve;
++ x = global_page_state(NR_FREE_PAGES) + global_reclaimable_pages();
++ x -= min(x, dirty_balance_reserve);
+
+ if (!vm_highmem_is_dirtyable)
+ x -= highmem_dirtyable_memory(x);
+@@ -290,9 +302,12 @@ static unsigned long zone_dirtyable_memory(struct zone *zone)
+ * highmem zone can hold its share of dirty pages, so we don't
+ * care about vm_highmem_is_dirtyable here.
+ */
+- return zone_page_state(zone, NR_FREE_PAGES) +
+- zone_reclaimable_pages(zone) -
+- zone->dirty_balance_reserve;
++ unsigned long nr_pages = zone_page_state(zone, NR_FREE_PAGES) +
++ zone_reclaimable_pages(zone);
++
++ /* don't allow this to underflow */
++ nr_pages -= min(nr_pages, zone->dirty_balance_reserve);
++ return nr_pages;
+ }
+
+ /**
+diff --git a/mm/page_alloc.c b/mm/page_alloc.c
+index d2d8f54..fa27e78 100644
+--- a/mm/page_alloc.c
++++ b/mm/page_alloc.c
+@@ -4315,10 +4315,11 @@ static void __meminit calculate_node_totalpages(struct pglist_data *pgdat,
+ * round what is now in bits to nearest long in bits, then return it in
+ * bytes.
+ */
+-static unsigned long __init usemap_size(unsigned long zonesize)
++static unsigned long __init usemap_size(unsigned long zone_start_pfn, unsigned long zonesize)
+ {
+ unsigned long usemapsize;
+
++ zonesize += zone_start_pfn & (pageblock_nr_pages-1);
+ usemapsize = roundup(zonesize, pageblock_nr_pages);
+ usemapsize = usemapsize >> pageblock_order;
+ usemapsize *= NR_PAGEBLOCK_BITS;
+@@ -4328,17 +4329,19 @@ static unsigned long __init usemap_size(unsigned long zonesize)
+ }
+
+ static void __init setup_usemap(struct pglist_data *pgdat,
+- struct zone *zone, unsigned long zonesize)
++ struct zone *zone,
++ unsigned long zone_start_pfn,
++ unsigned long zonesize)
+ {
+- unsigned long usemapsize = usemap_size(zonesize);
++ unsigned long usemapsize = usemap_size(zone_start_pfn, zonesize);
+ zone->pageblock_flags = NULL;
+ if (usemapsize)
+ zone->pageblock_flags = alloc_bootmem_node_nopanic(pgdat,
+ usemapsize);
+ }
+ #else
+-static inline void setup_usemap(struct pglist_data *pgdat,
+- struct zone *zone, unsigned long zonesize) {}
++static inline void setup_usemap(struct pglist_data *pgdat, struct zone *zone,
++ unsigned long zone_start_pfn, unsigned long zonesize) {}
+ #endif /* CONFIG_SPARSEMEM */
+
+ #ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE
+@@ -4461,7 +4464,7 @@ static void __paginginit free_area_init_core(struct pglist_data *pgdat,
+ continue;
+
+ set_pageblock_order();
+- setup_usemap(pgdat, zone, size);
++ setup_usemap(pgdat, zone, zone_start_pfn, size);
+ ret = init_currently_empty_zone(zone, zone_start_pfn,
+ size, MEMMAP_EARLY);
+ BUG_ON(ret);
+@@ -5455,7 +5458,7 @@ static inline int pfn_to_bitidx(struct zone *zone, unsigned long pfn)
+ pfn &= (PAGES_PER_SECTION-1);
+ return (pfn >> pageblock_order) * NR_PAGEBLOCK_BITS;
+ #else
+- pfn = pfn - zone->zone_start_pfn;
++ pfn = pfn - round_down(zone->zone_start_pfn, pageblock_nr_pages);
+ return (pfn >> pageblock_order) * NR_PAGEBLOCK_BITS;
+ #endif /* CONFIG_SPARSEMEM */
+ }
+diff --git a/mm/shmem.c b/mm/shmem.c
+index 6607fee..1a497d1 100644
+--- a/mm/shmem.c
++++ b/mm/shmem.c
+@@ -2543,6 +2543,7 @@ static int shmem_remount_fs(struct super_block *sb, int *flags, char *data)
+ unsigned long inodes;
+ int error = -EINVAL;
+
++ config.mpol = NULL;
+ if (shmem_parse_options(data, &config, true))
+ return error;
+
+@@ -2567,8 +2568,13 @@ static int shmem_remount_fs(struct super_block *sb, int *flags, char *data)
+ sbinfo->max_inodes = config.max_inodes;
+ sbinfo->free_inodes = config.max_inodes - inodes;
+
+- mpol_put(sbinfo->mpol);
+- sbinfo->mpol = config.mpol; /* transfers initial ref */
++ /*
++ * Preserve previous mempolicy unless mpol remount option was specified.
++ */
++ if (config.mpol) {
++ mpol_put(sbinfo->mpol);
++ sbinfo->mpol = config.mpol; /* transfers initial ref */
++ }
+ out:
+ spin_unlock(&sbinfo->stat_lock);
+ return error;
+diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c
+index 469daab..1476f26 100644
+--- a/net/batman-adv/bat_iv_ogm.c
++++ b/net/batman-adv/bat_iv_ogm.c
+@@ -119,7 +119,7 @@ batadv_iv_ogm_emit_send_time(const struct batadv_priv *bat_priv)
+ unsigned int msecs;
+
+ msecs = atomic_read(&bat_priv->orig_interval) - BATADV_JITTER;
+- msecs += (random32() % 2 * BATADV_JITTER);
++ msecs += random32() % (2 * BATADV_JITTER);
+
+ return jiffies + msecs_to_jiffies(msecs);
+ }
+diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
+index 0b997c8..aeb0962 100644
+--- a/net/bluetooth/hci_core.c
++++ b/net/bluetooth/hci_core.c
+@@ -1789,6 +1789,8 @@ void hci_unregister_dev(struct hci_dev *hdev)
+ for (i = 0; i < NUM_REASSEMBLY; i++)
+ kfree_skb(hdev->reassembly[i]);
+
++ cancel_work_sync(&hdev->power_on);
++
+ if (!test_bit(HCI_INIT, &hdev->flags) &&
+ !test_bit(HCI_SETUP, &hdev->dev_flags)) {
+ hci_dev_lock(hdev);
+diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
+index 715d7e3..67d1893 100644
+--- a/net/bluetooth/hci_event.c
++++ b/net/bluetooth/hci_event.c
+@@ -2387,7 +2387,7 @@ static void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
+ if (ev->opcode != HCI_OP_NOP)
+ del_timer(&hdev->cmd_timer);
+
+- if (ev->ncmd) {
++ if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags)) {
+ atomic_set(&hdev->cmd_cnt, 1);
+ if (!skb_queue_empty(&hdev->cmd_q))
+ queue_work(hdev->workqueue, &hdev->cmd_work);
+diff --git a/net/bluetooth/hidp/core.c b/net/bluetooth/hidp/core.c
+index ccd985d..03652f3 100644
+--- a/net/bluetooth/hidp/core.c
++++ b/net/bluetooth/hidp/core.c
+@@ -931,7 +931,7 @@ static int hidp_setup_hid(struct hidp_session *session,
+ hid->version = req->version;
+ hid->country = req->country;
+
+- strncpy(hid->name, req->name, 128);
++ strncpy(hid->name, req->name, sizeof(req->name) - 1);
+ strncpy(hid->phys, batostr(&bt_sk(session->ctrl_sock->sk)->src), 64);
+ strncpy(hid->uniq, batostr(&bt_sk(session->ctrl_sock->sk)->dst), 64);
+
+diff --git a/net/bluetooth/rfcomm/sock.c b/net/bluetooth/rfcomm/sock.c
+index 1a17850..32893a0 100644
+--- a/net/bluetooth/rfcomm/sock.c
++++ b/net/bluetooth/rfcomm/sock.c
+@@ -467,7 +467,7 @@ static int rfcomm_sock_accept(struct socket *sock, struct socket *newsock, int f
+ long timeo;
+ int err = 0;
+
+- lock_sock(sk);
++ lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
+
+ if (sk->sk_type != SOCK_STREAM) {
+ err = -EINVAL;
+@@ -504,7 +504,7 @@ static int rfcomm_sock_accept(struct socket *sock, struct socket *newsock, int f
+
+ release_sock(sk);
+ timeo = schedule_timeout(timeo);
+- lock_sock(sk);
++ lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
+ }
+ __set_current_state(TASK_RUNNING);
+ remove_wait_queue(sk_sleep(sk), &wait);
+diff --git a/net/bluetooth/smp.c b/net/bluetooth/smp.c
+index 2ac8d50..b7320fc 100644
+--- a/net/bluetooth/smp.c
++++ b/net/bluetooth/smp.c
+@@ -859,6 +859,19 @@ int smp_sig_channel(struct l2cap_conn *conn, struct sk_buff *skb)
+
+ skb_pull(skb, sizeof(code));
+
++ /*
++ * The SMP context must be initialized for all other PDUs except
++ * pairing and security requests. If we get any other PDU when
++ * not initialized simply disconnect (done if this function
++ * returns an error).
++ */
++ if (code != SMP_CMD_PAIRING_REQ && code != SMP_CMD_SECURITY_REQ &&
++ !conn->smp_chan) {
++ BT_ERR("Unexpected SMP command 0x%02x. Disconnecting.", code);
++ kfree_skb(skb);
++ return -ENOTSUPP;
++ }
++
+ switch (code) {
+ case SMP_CMD_PAIRING_REQ:
+ reason = smp_cmd_pairing_req(conn, skb);
+diff --git a/net/bridge/br_netfilter.c b/net/bridge/br_netfilter.c
+index 68e8f36..fe43bc7 100644
+--- a/net/bridge/br_netfilter.c
++++ b/net/bridge/br_netfilter.c
+@@ -265,6 +265,9 @@ static int br_parse_ip_options(struct sk_buff *skb)
+ struct net_device *dev = skb->dev;
+ u32 len;
+
++ if (!pskb_may_pull(skb, sizeof(struct iphdr)))
++ goto inhdr_error;
++
+ iph = ip_hdr(skb);
+ opt = &(IPCB(skb)->opt);
+
+diff --git a/net/bridge/br_stp_bpdu.c b/net/bridge/br_stp_bpdu.c
+index fd30a60..a8aa2d5 100644
+--- a/net/bridge/br_stp_bpdu.c
++++ b/net/bridge/br_stp_bpdu.c
+@@ -16,6 +16,7 @@
+ #include <linux/etherdevice.h>
+ #include <linux/llc.h>
+ #include <linux/slab.h>
++#include <linux/pkt_sched.h>
+ #include <net/net_namespace.h>
+ #include <net/llc.h>
+ #include <net/llc_pdu.h>
+@@ -40,6 +41,7 @@ static void br_send_bpdu(struct net_bridge_port *p,
+
+ skb->dev = p->dev;
+ skb->protocol = htons(ETH_P_802_2);
++ skb->priority = TC_PRIO_CONTROL;
+
+ skb_reserve(skb, LLC_RESERVE);
+ memcpy(__skb_put(skb, length), data, length);
+diff --git a/net/ceph/ceph_common.c b/net/ceph/ceph_common.c
+index a802029..ee71ea2 100644
+--- a/net/ceph/ceph_common.c
++++ b/net/ceph/ceph_common.c
+@@ -305,7 +305,6 @@ ceph_parse_options(char *options, const char *dev_name,
+
+ /* start with defaults */
+ opt->flags = CEPH_OPT_DEFAULT;
+- opt->osd_timeout = CEPH_OSD_TIMEOUT_DEFAULT;
+ opt->osd_keepalive_timeout = CEPH_OSD_KEEPALIVE_DEFAULT;
+ opt->mount_timeout = CEPH_MOUNT_TIMEOUT_DEFAULT; /* seconds */
+ opt->osd_idle_ttl = CEPH_OSD_IDLE_TTL_DEFAULT; /* seconds */
+@@ -391,7 +390,7 @@ ceph_parse_options(char *options, const char *dev_name,
+
+ /* misc */
+ case Opt_osdtimeout:
+- opt->osd_timeout = intval;
++ pr_warning("ignoring deprecated osdtimeout option\n");
+ break;
+ case Opt_osdkeepalivetimeout:
+ opt->osd_keepalive_timeout = intval;
+diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c
+index 3ef1759..e9f2159 100644
+--- a/net/ceph/messenger.c
++++ b/net/ceph/messenger.c
+@@ -506,6 +506,7 @@ static void reset_connection(struct ceph_connection *con)
+ {
+ /* reset connection, out_queue, msg_ and connect_seq */
+ /* discard existing out_queue and msg_seq */
++ dout("reset_connection %p\n", con);
+ ceph_msg_remove_list(&con->out_queue);
+ ceph_msg_remove_list(&con->out_sent);
+
+@@ -561,7 +562,7 @@ void ceph_con_open(struct ceph_connection *con,
+ mutex_lock(&con->mutex);
+ dout("con_open %p %s\n", con, ceph_pr_addr(&addr->in_addr));
+
+- BUG_ON(con->state != CON_STATE_CLOSED);
++ WARN_ON(con->state != CON_STATE_CLOSED);
+ con->state = CON_STATE_PREOPEN;
+
+ con->peer_name.type = (__u8) entity_type;
+@@ -1506,13 +1507,6 @@ static int process_banner(struct ceph_connection *con)
+ return 0;
+ }
+
+-static void fail_protocol(struct ceph_connection *con)
+-{
+- reset_connection(con);
+- BUG_ON(con->state != CON_STATE_NEGOTIATING);
+- con->state = CON_STATE_CLOSED;
+-}
+-
+ static int process_connect(struct ceph_connection *con)
+ {
+ u64 sup_feat = con->msgr->supported_features;
+@@ -1530,7 +1524,7 @@ static int process_connect(struct ceph_connection *con)
+ ceph_pr_addr(&con->peer_addr.in_addr),
+ sup_feat, server_feat, server_feat & ~sup_feat);
+ con->error_msg = "missing required protocol features";
+- fail_protocol(con);
++ reset_connection(con);
+ return -1;
+
+ case CEPH_MSGR_TAG_BADPROTOVER:
+@@ -1541,7 +1535,7 @@ static int process_connect(struct ceph_connection *con)
+ le32_to_cpu(con->out_connect.protocol_version),
+ le32_to_cpu(con->in_reply.protocol_version));
+ con->error_msg = "protocol version mismatch";
+- fail_protocol(con);
++ reset_connection(con);
+ return -1;
+
+ case CEPH_MSGR_TAG_BADAUTHORIZER:
+@@ -1631,11 +1625,11 @@ static int process_connect(struct ceph_connection *con)
+ ceph_pr_addr(&con->peer_addr.in_addr),
+ req_feat, server_feat, req_feat & ~server_feat);
+ con->error_msg = "missing required protocol features";
+- fail_protocol(con);
++ reset_connection(con);
+ return -1;
+ }
+
+- BUG_ON(con->state != CON_STATE_NEGOTIATING);
++ WARN_ON(con->state != CON_STATE_NEGOTIATING);
+ con->state = CON_STATE_OPEN;
+
+ con->peer_global_seq = le32_to_cpu(con->in_reply.global_seq);
+@@ -2132,7 +2126,6 @@ more:
+ if (ret < 0)
+ goto out;
+
+- BUG_ON(con->state != CON_STATE_CONNECTING);
+ con->state = CON_STATE_NEGOTIATING;
+
+ /*
+@@ -2160,7 +2153,7 @@ more:
+ goto more;
+ }
+
+- BUG_ON(con->state != CON_STATE_OPEN);
++ WARN_ON(con->state != CON_STATE_OPEN);
+
+ if (con->in_base_pos < 0) {
+ /*
+@@ -2262,6 +2255,35 @@ static void queue_con(struct ceph_connection *con)
+ }
+ }
+
++static bool con_sock_closed(struct ceph_connection *con)
++{
++ if (!test_and_clear_bit(CON_FLAG_SOCK_CLOSED, &con->flags))
++ return false;
++
++#define CASE(x) \
++ case CON_STATE_ ## x: \
++ con->error_msg = "socket closed (con state " #x ")"; \
++ break;
++
++ switch (con->state) {
++ CASE(CLOSED);
++ CASE(PREOPEN);
++ CASE(CONNECTING);
++ CASE(NEGOTIATING);
++ CASE(OPEN);
++ CASE(STANDBY);
++ default:
++ pr_warning("%s con %p unrecognized state %lu\n",
++ __func__, con, con->state);
++ con->error_msg = "unrecognized con state";
++ BUG();
++ break;
++ }
++#undef CASE
++
++ return true;
++}
++
+ /*
+ * Do some work on a connection. Drop a connection ref when we're done.
+ */
+@@ -2273,24 +2295,8 @@ static void con_work(struct work_struct *work)
+
+ mutex_lock(&con->mutex);
+ restart:
+- if (test_and_clear_bit(CON_FLAG_SOCK_CLOSED, &con->flags)) {
+- switch (con->state) {
+- case CON_STATE_CONNECTING:
+- con->error_msg = "connection failed";
+- break;
+- case CON_STATE_NEGOTIATING:
+- con->error_msg = "negotiation failed";
+- break;
+- case CON_STATE_OPEN:
+- con->error_msg = "socket closed";
+- break;
+- default:
+- dout("unrecognized con state %d\n", (int)con->state);
+- con->error_msg = "unrecognized con state";
+- BUG();
+- }
++ if (con_sock_closed(con))
+ goto fault;
+- }
+
+ if (test_and_clear_bit(CON_FLAG_BACKOFF, &con->flags)) {
+ dout("con_work %p backing off\n", con);
+@@ -2356,12 +2362,12 @@ fault:
+ static void ceph_fault(struct ceph_connection *con)
+ __releases(con->mutex)
+ {
+- pr_err("%s%lld %s %s\n", ENTITY_NAME(con->peer_name),
++ pr_warning("%s%lld %s %s\n", ENTITY_NAME(con->peer_name),
+ ceph_pr_addr(&con->peer_addr.in_addr), con->error_msg);
+ dout("fault %p state %lu to peer %s\n",
+ con, con->state, ceph_pr_addr(&con->peer_addr.in_addr));
+
+- BUG_ON(con->state != CON_STATE_CONNECTING &&
++ WARN_ON(con->state != CON_STATE_CONNECTING &&
+ con->state != CON_STATE_NEGOTIATING &&
+ con->state != CON_STATE_OPEN);
+
+diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c
+index f7b56e2..eb9a444 100644
+--- a/net/ceph/osd_client.c
++++ b/net/ceph/osd_client.c
+@@ -221,7 +221,7 @@ struct ceph_osd_request *ceph_osdc_alloc_request(struct ceph_osd_client *osdc,
+ kref_init(&req->r_kref);
+ init_completion(&req->r_completion);
+ init_completion(&req->r_safe_completion);
+- rb_init_node(&req->r_node);
++ RB_CLEAR_NODE(&req->r_node);
+ INIT_LIST_HEAD(&req->r_unsafe_item);
+ INIT_LIST_HEAD(&req->r_linger_item);
+ INIT_LIST_HEAD(&req->r_linger_osd);
+@@ -464,6 +464,7 @@ struct ceph_osd_request *ceph_osdc_new_request(struct ceph_osd_client *osdc,
+ {
+ struct ceph_osd_req_op ops[3];
+ struct ceph_osd_request *req;
++ int r;
+
+ ops[0].op = opcode;
+ ops[0].extent.truncate_seq = truncate_seq;
+@@ -482,10 +483,12 @@ struct ceph_osd_request *ceph_osdc_new_request(struct ceph_osd_client *osdc,
+ use_mempool,
+ GFP_NOFS, NULL, NULL);
+ if (!req)
+- return NULL;
++ return ERR_PTR(-ENOMEM);
+
+ /* calculate max write size */
+- calc_layout(osdc, vino, layout, off, plen, req, ops);
++ r = calc_layout(osdc, vino, layout, off, plen, req, ops);
++ if (r < 0)
++ return ERR_PTR(r);
+ req->r_file_layout = *layout; /* keep a copy */
+
+ /* in case it differs from natural (file) alignment that
+@@ -578,7 +581,7 @@ static void __kick_osd_requests(struct ceph_osd_client *osdc,
+
+ dout("__kick_osd_requests osd%d\n", osd->o_osd);
+ err = __reset_osd(osdc, osd);
+- if (err == -EAGAIN)
++ if (err)
+ return;
+
+ list_for_each_entry(req, &osd->o_requests, r_osd_item) {
+@@ -605,14 +608,6 @@ static void __kick_osd_requests(struct ceph_osd_client *osdc,
+ }
+ }
+
+-static void kick_osd_requests(struct ceph_osd_client *osdc,
+- struct ceph_osd *kickosd)
+-{
+- mutex_lock(&osdc->request_mutex);
+- __kick_osd_requests(osdc, kickosd);
+- mutex_unlock(&osdc->request_mutex);
+-}
+-
+ /*
+ * If the osd connection drops, we need to resubmit all requests.
+ */
+@@ -626,7 +621,9 @@ static void osd_reset(struct ceph_connection *con)
+ dout("osd_reset osd%d\n", osd->o_osd);
+ osdc = osd->o_osdc;
+ down_read(&osdc->map_sem);
+- kick_osd_requests(osdc, osd);
++ mutex_lock(&osdc->request_mutex);
++ __kick_osd_requests(osdc, osd);
++ mutex_unlock(&osdc->request_mutex);
+ send_queued(osdc);
+ up_read(&osdc->map_sem);
+ }
+@@ -645,6 +642,7 @@ static struct ceph_osd *create_osd(struct ceph_osd_client *osdc, int onum)
+ atomic_set(&osd->o_ref, 1);
+ osd->o_osdc = osdc;
+ osd->o_osd = onum;
++ RB_CLEAR_NODE(&osd->o_node);
+ INIT_LIST_HEAD(&osd->o_requests);
+ INIT_LIST_HEAD(&osd->o_linger_requests);
+ INIT_LIST_HEAD(&osd->o_osd_lru);
+@@ -748,6 +746,7 @@ static int __reset_osd(struct ceph_osd_client *osdc, struct ceph_osd *osd)
+ if (list_empty(&osd->o_requests) &&
+ list_empty(&osd->o_linger_requests)) {
+ __remove_osd(osdc, osd);
++ ret = -ENODEV;
+ } else if (memcmp(&osdc->osdmap->osd_addr[osd->o_osd],
+ &osd->o_con.peer_addr,
+ sizeof(osd->o_con.peer_addr)) == 0 &&
+@@ -874,9 +873,9 @@ static void __unregister_request(struct ceph_osd_client *osdc,
+ req->r_osd = NULL;
+ }
+
++ list_del_init(&req->r_req_lru_item);
+ ceph_osdc_put_request(req);
+
+- list_del_init(&req->r_req_lru_item);
+ if (osdc->num_requests == 0) {
+ dout(" no requests, canceling timeout\n");
+ __cancel_osd_timeout(osdc);
+@@ -908,8 +907,8 @@ static void __unregister_linger_request(struct ceph_osd_client *osdc,
+ struct ceph_osd_request *req)
+ {
+ dout("__unregister_linger_request %p\n", req);
++ list_del_init(&req->r_linger_item);
+ if (req->r_osd) {
+- list_del_init(&req->r_linger_item);
+ list_del_init(&req->r_linger_osd);
+
+ if (list_empty(&req->r_osd->o_requests) &&
+@@ -1088,12 +1087,10 @@ static void handle_timeout(struct work_struct *work)
+ {
+ struct ceph_osd_client *osdc =
+ container_of(work, struct ceph_osd_client, timeout_work.work);
+- struct ceph_osd_request *req, *last_req = NULL;
++ struct ceph_osd_request *req;
+ struct ceph_osd *osd;
+- unsigned long timeout = osdc->client->options->osd_timeout * HZ;
+ unsigned long keepalive =
+ osdc->client->options->osd_keepalive_timeout * HZ;
+- unsigned long last_stamp = 0;
+ struct list_head slow_osds;
+ dout("timeout\n");
+ down_read(&osdc->map_sem);
+@@ -1103,37 +1100,6 @@ static void handle_timeout(struct work_struct *work)
+ mutex_lock(&osdc->request_mutex);
+
+ /*
+- * reset osds that appear to be _really_ unresponsive. this
+- * is a failsafe measure.. we really shouldn't be getting to
+- * this point if the system is working properly. the monitors
+- * should mark the osd as failed and we should find out about
+- * it from an updated osd map.
+- */
+- while (timeout && !list_empty(&osdc->req_lru)) {
+- req = list_entry(osdc->req_lru.next, struct ceph_osd_request,
+- r_req_lru_item);
+-
+- /* hasn't been long enough since we sent it? */
+- if (time_before(jiffies, req->r_stamp + timeout))
+- break;
+-
+- /* hasn't been long enough since it was acked? */
+- if (req->r_request->ack_stamp == 0 ||
+- time_before(jiffies, req->r_request->ack_stamp + timeout))
+- break;
+-
+- BUG_ON(req == last_req && req->r_stamp == last_stamp);
+- last_req = req;
+- last_stamp = req->r_stamp;
+-
+- osd = req->r_osd;
+- BUG_ON(!osd);
+- pr_warning(" tid %llu timed out on osd%d, will reset osd\n",
+- req->r_tid, osd->o_osd);
+- __kick_osd_requests(osdc, osd);
+- }
+-
+- /*
+ * ping osds that are a bit slow. this ensures that if there
+ * is a break in the TCP connection we will notice, and reopen
+ * a connection with that osd (from the fault callback).
+@@ -1304,7 +1270,7 @@ static void reset_changed_osds(struct ceph_osd_client *osdc)
+ * Requeue requests whose mapping to an OSD has changed. If requests map to
+ * no osd, request a new map.
+ *
+- * Caller should hold map_sem for read and request_mutex.
++ * Caller should hold map_sem for read.
+ */
+ static void kick_requests(struct ceph_osd_client *osdc, int force_resend)
+ {
+@@ -1318,6 +1284,24 @@ static void kick_requests(struct ceph_osd_client *osdc, int force_resend)
+ for (p = rb_first(&osdc->requests); p; ) {
+ req = rb_entry(p, struct ceph_osd_request, r_node);
+ p = rb_next(p);
++
++ /*
++ * For linger requests that have not yet been
++ * registered, move them to the linger list; they'll
++ * be sent to the osd in the loop below. Unregister
++ * the request before re-registering it as a linger
++ * request to ensure the __map_request() below
++ * will decide it needs to be sent.
++ */
++ if (req->r_linger && list_empty(&req->r_linger_item)) {
++ dout("%p tid %llu restart on osd%d\n",
++ req, req->r_tid,
++ req->r_osd ? req->r_osd->o_osd : -1);
++ __unregister_request(osdc, req);
++ __register_linger_request(osdc, req);
++ continue;
++ }
++
+ err = __map_request(osdc, req, force_resend);
+ if (err < 0)
+ continue; /* error */
+@@ -1332,17 +1316,6 @@ static void kick_requests(struct ceph_osd_client *osdc, int force_resend)
+ req->r_flags |= CEPH_OSD_FLAG_RETRY;
+ }
+ }
+- if (req->r_linger && list_empty(&req->r_linger_item)) {
+- /*
+- * register as a linger so that we will
+- * re-submit below and get a new tid
+- */
+- dout("%p tid %llu restart on osd%d\n",
+- req, req->r_tid,
+- req->r_osd ? req->r_osd->o_osd : -1);
+- __register_linger_request(osdc, req);
+- __unregister_request(osdc, req);
+- }
+ }
+
+ list_for_each_entry_safe(req, nreq, &osdc->req_linger,
+@@ -1350,6 +1323,7 @@ static void kick_requests(struct ceph_osd_client *osdc, int force_resend)
+ dout("linger req=%p req->r_osd=%p\n", req, req->r_osd);
+
+ err = __map_request(osdc, req, force_resend);
++ dout("__map_request returned %d\n", err);
+ if (err == 0)
+ continue; /* no change and no osd was specified */
+ if (err < 0)
+@@ -1362,8 +1336,8 @@ static void kick_requests(struct ceph_osd_client *osdc, int force_resend)
+
+ dout("kicking lingering %p tid %llu osd%d\n", req, req->r_tid,
+ req->r_osd ? req->r_osd->o_osd : -1);
+- __unregister_linger_request(osdc, req);
+ __register_request(osdc, req);
++ __unregister_linger_request(osdc, req);
+ }
+ mutex_unlock(&osdc->request_mutex);
+
+@@ -1371,6 +1345,7 @@ static void kick_requests(struct ceph_osd_client *osdc, int force_resend)
+ dout("%d requests for down osds, need new map\n", needmap);
+ ceph_monc_request_next_osdmap(&osdc->client->monc);
+ }
++ reset_changed_osds(osdc);
+ }
+
+
+@@ -1427,7 +1402,6 @@ void ceph_osdc_handle_map(struct ceph_osd_client *osdc, struct ceph_msg *msg)
+ osdc->osdmap = newmap;
+ }
+ kick_requests(osdc, 0);
+- reset_changed_osds(osdc);
+ } else {
+ dout("ignoring incremental map %u len %d\n",
+ epoch, maplen);
+@@ -1597,6 +1571,7 @@ int ceph_osdc_create_event(struct ceph_osd_client *osdc,
+ event->data = data;
+ event->osdc = osdc;
+ INIT_LIST_HEAD(&event->osd_node);
++ RB_CLEAR_NODE(&event->node);
+ kref_init(&event->kref); /* one ref for us */
+ kref_get(&event->kref); /* one ref for the caller */
+ init_completion(&event->completion);
+@@ -1928,8 +1903,8 @@ int ceph_osdc_readpages(struct ceph_osd_client *osdc,
+ CEPH_OSD_OP_READ, CEPH_OSD_FLAG_READ,
+ NULL, 0, truncate_seq, truncate_size, NULL,
+ false, 1, page_align);
+- if (!req)
+- return -ENOMEM;
++ if (IS_ERR(req))
++ return PTR_ERR(req);
+
+ /* it may be a short read due to an object boundary */
+ req->r_pages = pages;
+@@ -1971,8 +1946,8 @@ int ceph_osdc_writepages(struct ceph_osd_client *osdc, struct ceph_vino vino,
+ snapc, do_sync,
+ truncate_seq, truncate_size, mtime,
+ nofail, 1, page_align);
+- if (!req)
+- return -ENOMEM;
++ if (IS_ERR(req))
++ return PTR_ERR(req);
+
+ /* it may be a short write due to an object boundary */
+ req->r_pages = pages;
+diff --git a/net/ceph/osdmap.c b/net/ceph/osdmap.c
+index 5433fb0..f552aa4 100644
+--- a/net/ceph/osdmap.c
++++ b/net/ceph/osdmap.c
+@@ -645,10 +645,12 @@ struct ceph_osdmap *osdmap_decode(void **p, void *end)
+ ceph_decode_32_safe(p, end, max, bad);
+ while (max--) {
+ ceph_decode_need(p, end, 4 + 1 + sizeof(pi->v), bad);
++ err = -ENOMEM;
+ pi = kzalloc(sizeof(*pi), GFP_NOFS);
+ if (!pi)
+ goto bad;
+ pi->id = ceph_decode_32(p);
++ err = -EINVAL;
+ ev = ceph_decode_8(p); /* encoding version */
+ if (ev > CEPH_PG_POOL_VERSION) {
+ pr_warning("got unknown v %d > %d of ceph_pg_pool\n",
+@@ -664,8 +666,13 @@ struct ceph_osdmap *osdmap_decode(void **p, void *end)
+ __insert_pg_pool(&map->pg_pools, pi);
+ }
+
+- if (version >= 5 && __decode_pool_names(p, end, map) < 0)
+- goto bad;
++ if (version >= 5) {
++ err = __decode_pool_names(p, end, map);
++ if (err < 0) {
++ dout("fail to decode pool names");
++ goto bad;
++ }
++ }
+
+ ceph_decode_32_safe(p, end, map->pool_max, bad);
+
+@@ -745,7 +752,7 @@ struct ceph_osdmap *osdmap_decode(void **p, void *end)
+ return map;
+
+ bad:
+- dout("osdmap_decode fail\n");
++ dout("osdmap_decode fail err %d\n", err);
+ ceph_osdmap_destroy(map);
+ return ERR_PTR(err);
+ }
+@@ -839,6 +846,7 @@ struct ceph_osdmap *osdmap_apply_incremental(void **p, void *end,
+ if (ev > CEPH_PG_POOL_VERSION) {
+ pr_warning("got unknown v %d > %d of ceph_pg_pool\n",
+ ev, CEPH_PG_POOL_VERSION);
++ err = -EINVAL;
+ goto bad;
+ }
+ pi = __lookup_pg_pool(&map->pg_pools, pool);
+@@ -855,8 +863,11 @@ struct ceph_osdmap *osdmap_apply_incremental(void **p, void *end,
+ if (err < 0)
+ goto bad;
+ }
+- if (version >= 5 && __decode_pool_names(p, end, map) < 0)
+- goto bad;
++ if (version >= 5) {
++ err = __decode_pool_names(p, end, map);
++ if (err < 0)
++ goto bad;
++ }
+
+ /* old_pool */
+ ceph_decode_32_safe(p, end, len, bad);
+@@ -932,15 +943,13 @@ struct ceph_osdmap *osdmap_apply_incremental(void **p, void *end,
+ (void) __remove_pg_mapping(&map->pg_temp, pgid);
+
+ /* insert */
+- if (pglen > (UINT_MAX - sizeof(*pg)) / sizeof(u32)) {
+- err = -EINVAL;
++ err = -EINVAL;
++ if (pglen > (UINT_MAX - sizeof(*pg)) / sizeof(u32))
+ goto bad;
+- }
++ err = -ENOMEM;
+ pg = kmalloc(sizeof(*pg) + sizeof(u32)*pglen, GFP_NOFS);
+- if (!pg) {
+- err = -ENOMEM;
++ if (!pg)
+ goto bad;
+- }
+ pg->pgid = pgid;
+ pg->len = pglen;
+ for (j = 0; j < pglen; j++)
+diff --git a/net/core/datagram.c b/net/core/datagram.c
+index 0337e2b..368f9c3 100644
+--- a/net/core/datagram.c
++++ b/net/core/datagram.c
+@@ -187,7 +187,7 @@ struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned int flags,
+ skb_queue_walk(queue, skb) {
+ *peeked = skb->peeked;
+ if (flags & MSG_PEEK) {
+- if (*off >= skb->len) {
++ if (*off >= skb->len && skb->len) {
+ *off -= skb->len;
+ continue;
+ }
+diff --git a/net/core/pktgen.c b/net/core/pktgen.c
+index e356b8d..d7881b2 100644
+--- a/net/core/pktgen.c
++++ b/net/core/pktgen.c
+@@ -1797,10 +1797,13 @@ static ssize_t pktgen_thread_write(struct file *file,
+ return -EFAULT;
+ i += len;
+ mutex_lock(&pktgen_thread_lock);
+- pktgen_add_device(t, f);
++ ret = pktgen_add_device(t, f);
+ mutex_unlock(&pktgen_thread_lock);
+- ret = count;
+- sprintf(pg_result, "OK: add_device=%s", f);
++ if (!ret) {
++ ret = count;
++ sprintf(pg_result, "OK: add_device=%s", f);
++ } else
++ sprintf(pg_result, "ERROR: can not add device %s", f);
+ goto out;
+ }
+
+diff --git a/net/core/sock_diag.c b/net/core/sock_diag.c
+index 9d8755e..a2ad320 100644
+--- a/net/core/sock_diag.c
++++ b/net/core/sock_diag.c
+@@ -121,6 +121,9 @@ static int __sock_diag_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
+ if (nlmsg_len(nlh) < sizeof(*req))
+ return -EINVAL;
+
++ if (req->sdiag_family >= AF_MAX)
++ return -EINVAL;
++
+ hndl = sock_diag_lock_handler(req->sdiag_family);
+ if (hndl == NULL)
+ err = -ENOENT;
+diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c
+index 176ecdb..4f9f5eb 100644
+--- a/net/dccp/ipv4.c
++++ b/net/dccp/ipv4.c
+@@ -439,8 +439,8 @@ exit:
+ NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
+ return NULL;
+ put_and_exit:
+- bh_unlock_sock(newsk);
+- sock_put(newsk);
++ inet_csk_prepare_forced_close(newsk);
++ dccp_done(newsk);
+ goto exit;
+ }
+
+diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c
+index 56840b2..6e05981 100644
+--- a/net/dccp/ipv6.c
++++ b/net/dccp/ipv6.c
+@@ -585,7 +585,8 @@ static struct sock *dccp_v6_request_recv_sock(struct sock *sk,
+ newinet->inet_rcv_saddr = LOOPBACK4_IPV6;
+
+ if (__inet_inherit_port(sk, newsk) < 0) {
+- sock_put(newsk);
++ inet_csk_prepare_forced_close(newsk);
++ dccp_done(newsk);
+ goto out;
+ }
+ __inet6_hash(newsk, NULL);
+diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
+index fe4582c..26222ed 100644
+--- a/net/ipv4/af_inet.c
++++ b/net/ipv4/af_inet.c
+@@ -228,8 +228,12 @@ EXPORT_SYMBOL(inet_listen);
+ u32 inet_ehash_secret __read_mostly;
+ EXPORT_SYMBOL(inet_ehash_secret);
+
++u32 ipv6_hash_secret __read_mostly;
++EXPORT_SYMBOL(ipv6_hash_secret);
++
+ /*
+- * inet_ehash_secret must be set exactly once
++ * inet_ehash_secret must be set exactly once, and to a non nul value
++ * ipv6_hash_secret must be set exactly once.
+ */
+ void build_ehash_secret(void)
+ {
+@@ -239,7 +243,8 @@ void build_ehash_secret(void)
+ get_random_bytes(&rnd, sizeof(rnd));
+ } while (rnd == 0);
+
+- cmpxchg(&inet_ehash_secret, 0, rnd);
++ if (cmpxchg(&inet_ehash_secret, 0, rnd) == 0)
++ get_random_bytes(&ipv6_hash_secret, sizeof(ipv6_hash_secret));
+ }
+ EXPORT_SYMBOL(build_ehash_secret);
+
+diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
+index 0405cc8..567c31f 100644
+--- a/net/ipv4/inet_connection_sock.c
++++ b/net/ipv4/inet_connection_sock.c
+@@ -679,6 +679,22 @@ void inet_csk_destroy_sock(struct sock *sk)
+ }
+ EXPORT_SYMBOL(inet_csk_destroy_sock);
+
++/* This function allows to force a closure of a socket after the call to
++ * tcp/dccp_create_openreq_child().
++ */
++void inet_csk_prepare_forced_close(struct sock *sk)
++{
++ /* sk_clone_lock locked the socket and set refcnt to 2 */
++ bh_unlock_sock(sk);
++ sock_put(sk);
++
++ /* The below has to be done to allow calling inet_csk_destroy_sock */
++ sock_set_flag(sk, SOCK_DEAD);
++ percpu_counter_inc(sk->sk_prot->orphan_count);
++ inet_sk(sk)->inet_num = 0;
++}
++EXPORT_SYMBOL(inet_csk_prepare_forced_close);
++
+ int inet_csk_listen_start(struct sock *sk, const int nr_table_entries)
+ {
+ struct inet_sock *inet = inet_sk(sk);
+diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
+index 14bbfcf..e95d72b 100644
+--- a/net/ipv4/ip_sockglue.c
++++ b/net/ipv4/ip_sockglue.c
+@@ -590,7 +590,7 @@ static int do_ip_setsockopt(struct sock *sk, int level,
+ case IP_TTL:
+ if (optlen < 1)
+ goto e_inval;
+- if (val != -1 && (val < 0 || val > 255))
++ if (val != -1 && (val < 1 || val > 255))
+ goto e_inval;
+ inet->uc_ttl = val;
+ break;
+diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
+index 6232d47..920cb0b 100644
+--- a/net/ipv4/ping.c
++++ b/net/ipv4/ping.c
+@@ -328,8 +328,8 @@ void ping_err(struct sk_buff *skb, u32 info)
+ struct iphdr *iph = (struct iphdr *)skb->data;
+ struct icmphdr *icmph = (struct icmphdr *)(skb->data+(iph->ihl<<2));
+ struct inet_sock *inet_sock;
+- int type = icmph->type;
+- int code = icmph->code;
++ int type = icmp_hdr(skb)->type;
++ int code = icmp_hdr(skb)->code;
+ struct net *net = dev_net(skb->dev);
+ struct sock *sk;
+ int harderr;
+diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
+index c92c4da..8d02e34 100644
+--- a/net/ipv4/tcp_input.c
++++ b/net/ipv4/tcp_input.c
+@@ -3531,6 +3531,11 @@ static bool tcp_process_frto(struct sock *sk, int flag)
+ }
+ } else {
+ if (!(flag & FLAG_DATA_ACKED) && (tp->frto_counter == 1)) {
++ if (!tcp_packets_in_flight(tp)) {
++ tcp_enter_frto_loss(sk, 2, flag);
++ return true;
++ }
++
+ /* Prevent sending of new data. */
+ tp->snd_cwnd = min(tp->snd_cwnd,
+ tcp_packets_in_flight(tp));
+@@ -3579,6 +3584,24 @@ static bool tcp_process_frto(struct sock *sk, int flag)
+ return false;
+ }
+
++/* RFC 5961 7 [ACK Throttling] */
++static void tcp_send_challenge_ack(struct sock *sk)
++{
++ /* unprotected vars, we dont care of overwrites */
++ static u32 challenge_timestamp;
++ static unsigned int challenge_count;
++ u32 now = jiffies / HZ;
++
++ if (now != challenge_timestamp) {
++ challenge_timestamp = now;
++ challenge_count = 0;
++ }
++ if (++challenge_count <= sysctl_tcp_challenge_ack_limit) {
++ NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPCHALLENGEACK);
++ tcp_send_ack(sk);
++ }
++}
++
+ /* This routine deals with incoming acks, but not outgoing ones. */
+ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
+ {
+@@ -3598,8 +3621,14 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
+ /* If the ack is older than previous acks
+ * then we can probably ignore it.
+ */
+- if (before(ack, prior_snd_una))
++ if (before(ack, prior_snd_una)) {
++ /* RFC 5961 5.2 [Blind Data Injection Attack].[Mitigation] */
++ if (before(ack, prior_snd_una - tp->max_window)) {
++ tcp_send_challenge_ack(sk);
++ return -1;
++ }
+ goto old_ack;
++ }
+
+ /* If the ack includes data we haven't sent yet, discard
+ * this segment (RFC793 Section 3.9).
+@@ -5271,23 +5300,6 @@ out:
+ }
+ #endif /* CONFIG_NET_DMA */
+
+-static void tcp_send_challenge_ack(struct sock *sk)
+-{
+- /* unprotected vars, we dont care of overwrites */
+- static u32 challenge_timestamp;
+- static unsigned int challenge_count;
+- u32 now = jiffies / HZ;
+-
+- if (now != challenge_timestamp) {
+- challenge_timestamp = now;
+- challenge_count = 0;
+- }
+- if (++challenge_count <= sysctl_tcp_challenge_ack_limit) {
+- NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPCHALLENGEACK);
+- tcp_send_ack(sk);
+- }
+-}
+-
+ /* Does PAWS and seqno based validation of an incoming segment, flags will
+ * play significant role here.
+ */
+@@ -5340,11 +5352,6 @@ static bool tcp_validate_incoming(struct sock *sk, struct sk_buff *skb,
+ goto discard;
+ }
+
+- /* ts_recent update must be made after we are sure that the packet
+- * is in window.
+- */
+- tcp_replace_ts_recent(tp, TCP_SKB_CB(skb)->seq);
+-
+ /* step 3: check security and precedence [ignored] */
+
+ /* step 4: Check for a SYN
+@@ -5579,6 +5586,11 @@ step5:
+ if (th->ack && tcp_ack(sk, skb, FLAG_SLOWPATH) < 0)
+ goto discard;
+
++ /* ts_recent update must be made after we are sure that the packet
++ * is in window.
++ */
++ tcp_replace_ts_recent(tp, TCP_SKB_CB(skb)->seq);
++
+ tcp_rcv_rtt_measure_ts(sk, skb);
+
+ /* Process urgent data. */
+@@ -6106,6 +6118,11 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
+ } else
+ goto discard;
+
++ /* ts_recent update must be made after we are sure that the packet
++ * is in window.
++ */
++ tcp_replace_ts_recent(tp, TCP_SKB_CB(skb)->seq);
++
+ /* step 6: check the URG bit */
+ tcp_urg(sk, skb, th);
+
+diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
+index db7bfad..4705caf 100644
+--- a/net/ipv4/tcp_ipv4.c
++++ b/net/ipv4/tcp_ipv4.c
+@@ -1537,10 +1537,8 @@ exit:
+ NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
+ return NULL;
+ put_and_exit:
+- tcp_clear_xmit_timers(newsk);
+- tcp_cleanup_congestion_control(newsk);
+- bh_unlock_sock(newsk);
+- sock_put(newsk);
++ inet_csk_prepare_forced_close(newsk);
++ tcp_done(newsk);
+ goto exit;
+ }
+ EXPORT_SYMBOL(tcp_v4_syn_recv_sock);
+diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
+index b10374d..fd82a30 100644
+--- a/net/ipv6/addrconf.c
++++ b/net/ipv6/addrconf.c
+@@ -1736,7 +1736,7 @@ static struct rt6_info *addrconf_get_prefix_route(const struct in6_addr *pfx,
+ continue;
+ if ((rt->rt6i_flags & flags) != flags)
+ continue;
+- if ((noflags != 0) && ((rt->rt6i_flags & flags) != 0))
++ if ((rt->rt6i_flags & noflags) != 0)
+ continue;
+ dst_hold(&rt->dst);
+ break;
+diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
+index 5b2d63e..3d485ec 100644
+--- a/net/ipv6/ip6_output.c
++++ b/net/ipv6/ip6_output.c
+@@ -1287,10 +1287,10 @@ int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to,
+ cork->length = 0;
+ sk->sk_sndmsg_page = NULL;
+ sk->sk_sndmsg_off = 0;
+- exthdrlen = (opt ? opt->opt_flen : 0) - rt->rt6i_nfheader_len;
++ exthdrlen = (opt ? opt->opt_flen : 0);
+ length += exthdrlen;
+ transhdrlen += exthdrlen;
+- dst_exthdrlen = rt->dst.header_len;
++ dst_exthdrlen = rt->dst.header_len - rt->rt6i_nfheader_len;
+ } else {
+ rt = (struct rt6_info *)cork->dst;
+ fl6 = &inet->cork.fl.u.ip6;
+diff --git a/net/ipv6/route.c b/net/ipv6/route.c
+index 070a3ce..2ffaa7a 100644
+--- a/net/ipv6/route.c
++++ b/net/ipv6/route.c
+@@ -881,7 +881,7 @@ restart:
+ dst_hold(&rt->dst);
+ read_unlock_bh(&table->tb6_lock);
+
+- if (!rt->n && !(rt->rt6i_flags & RTF_NONEXTHOP))
++ if (!rt->n && !(rt->rt6i_flags & (RTF_NONEXTHOP | RTF_LOCAL)))
+ nrt = rt6_alloc_cow(rt, &fl6->daddr, &fl6->saddr);
+ else if (!(rt->dst.flags & DST_HOST))
+ nrt = rt6_alloc_clone(rt, &fl6->daddr);
+diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
+index 7e32d42..8b45fb4 100644
+--- a/net/ipv6/tcp_ipv6.c
++++ b/net/ipv6/tcp_ipv6.c
+@@ -1371,7 +1371,8 @@ static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
+ #endif
+
+ if (__inet_inherit_port(sk, newsk) < 0) {
+- sock_put(newsk);
++ inet_csk_prepare_forced_close(newsk);
++ tcp_done(newsk);
+ goto out;
+ }
+ __inet6_hash(newsk, NULL);
+diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
+index a58c0b6..f985911 100644
+--- a/net/mac80211/cfg.c
++++ b/net/mac80211/cfg.c
+@@ -151,7 +151,17 @@ static int ieee80211_add_key(struct wiphy *wiphy, struct net_device *dev,
+ sta = sta_info_get(sdata, mac_addr);
+ else
+ sta = sta_info_get_bss(sdata, mac_addr);
+- if (!sta) {
++ /*
++ * The ASSOC test makes sure the driver is ready to
++ * receive the key. When wpa_supplicant has roamed
++ * using FT, it attempts to set the key before
++ * association has completed, this rejects that attempt
++ * so it will set the key again after assocation.
++ *
++ * TODO: accept the key if we have a station entry and
++ * add it to the device after the station.
++ */
++ if (!sta || !test_sta_flag(sta, WLAN_STA_ASSOC)) {
+ ieee80211_key_free(sdata->local, key);
+ err = -ENOENT;
+ goto out_unlock;
+diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c
+index a5894dd..c55eacc 100644
+--- a/net/mac80211/ibss.c
++++ b/net/mac80211/ibss.c
+@@ -647,8 +647,8 @@ static void ieee80211_sta_merge_ibss(struct ieee80211_sub_if_data *sdata)
+ sdata_info(sdata,
+ "No active IBSS STAs - trying to scan for other IBSS networks with same SSID (merge)\n");
+
+- ieee80211_request_internal_scan(sdata,
+- ifibss->ssid, ifibss->ssid_len, NULL);
++ ieee80211_request_ibss_scan(sdata, ifibss->ssid, ifibss->ssid_len,
++ NULL);
+ }
+
+ static void ieee80211_sta_create_ibss(struct ieee80211_sub_if_data *sdata)
+@@ -746,9 +746,8 @@ static void ieee80211_sta_find_ibss(struct ieee80211_sub_if_data *sdata)
+ IEEE80211_SCAN_INTERVAL)) {
+ sdata_info(sdata, "Trigger new scan to find an IBSS to join\n");
+
+- ieee80211_request_internal_scan(sdata,
+- ifibss->ssid, ifibss->ssid_len,
+- ifibss->fixed_channel ? ifibss->channel : NULL);
++ ieee80211_request_ibss_scan(sdata, ifibss->ssid,
++ ifibss->ssid_len, chan);
+ } else {
+ int interval = IEEE80211_SCAN_INTERVAL;
+
+diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
+index 642a2a3..fcab057 100644
+--- a/net/mac80211/ieee80211_i.h
++++ b/net/mac80211/ieee80211_i.h
+@@ -1239,9 +1239,9 @@ void ieee80211_mesh_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata,
+
+ /* scan/BSS handling */
+ void ieee80211_scan_work(struct work_struct *work);
+-int ieee80211_request_internal_scan(struct ieee80211_sub_if_data *sdata,
+- const u8 *ssid, u8 ssid_len,
+- struct ieee80211_channel *chan);
++int ieee80211_request_ibss_scan(struct ieee80211_sub_if_data *sdata,
++ const u8 *ssid, u8 ssid_len,
++ struct ieee80211_channel *chan);
+ int ieee80211_request_scan(struct ieee80211_sub_if_data *sdata,
+ struct cfg80211_scan_request *req);
+ void ieee80211_scan_cancel(struct ieee80211_local *local);
+@@ -1267,10 +1267,8 @@ int ieee80211_request_sched_scan_stop(struct ieee80211_sub_if_data *sdata);
+ void ieee80211_sched_scan_stopped_work(struct work_struct *work);
+
+ /* off-channel helpers */
+-void ieee80211_offchannel_stop_vifs(struct ieee80211_local *local,
+- bool offchannel_ps_enable);
+-void ieee80211_offchannel_return(struct ieee80211_local *local,
+- bool offchannel_ps_disable);
++void ieee80211_offchannel_stop_vifs(struct ieee80211_local *local);
++void ieee80211_offchannel_return(struct ieee80211_local *local);
+ void ieee80211_roc_setup(struct ieee80211_local *local);
+ void ieee80211_start_next_roc(struct ieee80211_local *local);
+ void ieee80211_roc_purge(struct ieee80211_sub_if_data *sdata);
+diff --git a/net/mac80211/offchannel.c b/net/mac80211/offchannel.c
+index 2138dc3..37e3028 100644
+--- a/net/mac80211/offchannel.c
++++ b/net/mac80211/offchannel.c
+@@ -102,8 +102,7 @@ static void ieee80211_offchannel_ps_disable(struct ieee80211_sub_if_data *sdata)
+ ieee80211_sta_reset_conn_monitor(sdata);
+ }
+
+-void ieee80211_offchannel_stop_vifs(struct ieee80211_local *local,
+- bool offchannel_ps_enable)
++void ieee80211_offchannel_stop_vifs(struct ieee80211_local *local)
+ {
+ struct ieee80211_sub_if_data *sdata;
+
+@@ -128,8 +127,7 @@ void ieee80211_offchannel_stop_vifs(struct ieee80211_local *local,
+
+ if (sdata->vif.type != NL80211_IFTYPE_MONITOR) {
+ netif_tx_stop_all_queues(sdata->dev);
+- if (offchannel_ps_enable &&
+- (sdata->vif.type == NL80211_IFTYPE_STATION) &&
++ if (sdata->vif.type == NL80211_IFTYPE_STATION &&
+ sdata->u.mgd.associated)
+ ieee80211_offchannel_ps_enable(sdata);
+ }
+@@ -137,8 +135,7 @@ void ieee80211_offchannel_stop_vifs(struct ieee80211_local *local,
+ mutex_unlock(&local->iflist_mtx);
+ }
+
+-void ieee80211_offchannel_return(struct ieee80211_local *local,
+- bool offchannel_ps_disable)
++void ieee80211_offchannel_return(struct ieee80211_local *local)
+ {
+ struct ieee80211_sub_if_data *sdata;
+
+@@ -151,11 +148,9 @@ void ieee80211_offchannel_return(struct ieee80211_local *local,
+ continue;
+
+ /* Tell AP we're back */
+- if (offchannel_ps_disable &&
+- sdata->vif.type == NL80211_IFTYPE_STATION) {
+- if (sdata->u.mgd.associated)
+- ieee80211_offchannel_ps_disable(sdata);
+- }
++ if (sdata->vif.type == NL80211_IFTYPE_STATION &&
++ sdata->u.mgd.associated)
++ ieee80211_offchannel_ps_disable(sdata);
+
+ if (sdata->vif.type != NL80211_IFTYPE_MONITOR) {
+ /*
+@@ -376,7 +371,7 @@ void ieee80211_sw_roc_work(struct work_struct *work)
+ local->tmp_channel = NULL;
+ ieee80211_hw_config(local, 0);
+
+- ieee80211_offchannel_return(local, true);
++ ieee80211_offchannel_return(local);
+ }
+
+ ieee80211_recalc_idle(local);
+diff --git a/net/mac80211/scan.c b/net/mac80211/scan.c
+index 839dd97..8719635 100644
+--- a/net/mac80211/scan.c
++++ b/net/mac80211/scan.c
+@@ -310,7 +310,7 @@ static void __ieee80211_scan_completed(struct ieee80211_hw *hw, bool aborted,
+ if (!was_hw_scan) {
+ ieee80211_configure_filter(local);
+ drv_sw_scan_complete(local);
+- ieee80211_offchannel_return(local, true);
++ ieee80211_offchannel_return(local);
+ }
+
+ ieee80211_recalc_idle(local);
+@@ -355,7 +355,7 @@ static int ieee80211_start_sw_scan(struct ieee80211_local *local)
+ local->next_scan_state = SCAN_DECISION;
+ local->scan_channel_idx = 0;
+
+- ieee80211_offchannel_stop_vifs(local, true);
++ ieee80211_offchannel_stop_vifs(local);
+
+ ieee80211_configure_filter(local);
+
+@@ -680,12 +680,8 @@ static void ieee80211_scan_state_suspend(struct ieee80211_local *local,
+ local->scan_channel = NULL;
+ ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_CHANNEL);
+
+- /*
+- * Re-enable vifs and beaconing. Leave PS
+- * in off-channel state..will put that back
+- * on-channel at the end of scanning.
+- */
+- ieee80211_offchannel_return(local, false);
++ /* disable PS */
++ ieee80211_offchannel_return(local);
+
+ *next_delay = HZ / 5;
+ /* afterwards, resume scan & go to next channel */
+@@ -695,8 +691,7 @@ static void ieee80211_scan_state_suspend(struct ieee80211_local *local,
+ static void ieee80211_scan_state_resume(struct ieee80211_local *local,
+ unsigned long *next_delay)
+ {
+- /* PS already is in off-channel mode */
+- ieee80211_offchannel_stop_vifs(local, false);
++ ieee80211_offchannel_stop_vifs(local);
+
+ if (local->ops->flush) {
+ drv_flush(local, false);
+@@ -819,9 +814,9 @@ int ieee80211_request_scan(struct ieee80211_sub_if_data *sdata,
+ return res;
+ }
+
+-int ieee80211_request_internal_scan(struct ieee80211_sub_if_data *sdata,
+- const u8 *ssid, u8 ssid_len,
+- struct ieee80211_channel *chan)
++int ieee80211_request_ibss_scan(struct ieee80211_sub_if_data *sdata,
++ const u8 *ssid, u8 ssid_len,
++ struct ieee80211_channel *chan)
+ {
+ struct ieee80211_local *local = sdata->local;
+ int ret = -EBUSY;
+@@ -835,22 +830,36 @@ int ieee80211_request_internal_scan(struct ieee80211_sub_if_data *sdata,
+
+ /* fill internal scan request */
+ if (!chan) {
+- int i, nchan = 0;
++ int i, max_n;
++ int n_ch = 0;
+
+ for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
+ if (!local->hw.wiphy->bands[band])
+ continue;
+- for (i = 0;
+- i < local->hw.wiphy->bands[band]->n_channels;
+- i++) {
+- local->int_scan_req->channels[nchan] =
++
++ max_n = local->hw.wiphy->bands[band]->n_channels;
++ for (i = 0; i < max_n; i++) {
++ struct ieee80211_channel *tmp_ch =
+ &local->hw.wiphy->bands[band]->channels[i];
+- nchan++;
++
++ if (tmp_ch->flags & (IEEE80211_CHAN_NO_IBSS |
++ IEEE80211_CHAN_DISABLED))
++ continue;
++
++ local->int_scan_req->channels[n_ch] = tmp_ch;
++ n_ch++;
+ }
+ }
+
+- local->int_scan_req->n_channels = nchan;
++ if (WARN_ON_ONCE(n_ch == 0))
++ goto unlock;
++
++ local->int_scan_req->n_channels = n_ch;
+ } else {
++ if (WARN_ON_ONCE(chan->flags & (IEEE80211_CHAN_NO_IBSS |
++ IEEE80211_CHAN_DISABLED)))
++ goto unlock;
++
+ local->int_scan_req->channels[0] = chan;
+ local->int_scan_req->n_channels = 1;
+ }
+diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c
+index 31aa8b8..cd7ca8d 100644
+--- a/net/mac80211/sta_info.c
++++ b/net/mac80211/sta_info.c
+@@ -835,7 +835,7 @@ void sta_info_init(struct ieee80211_local *local)
+
+ void sta_info_stop(struct ieee80211_local *local)
+ {
+- del_timer(&local->sta_cleanup);
++ del_timer_sync(&local->sta_cleanup);
+ sta_info_flush(local, NULL);
+ }
+
+diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
+index c5c9e2a..70f7e18 100644
+--- a/net/packet/af_packet.c
++++ b/net/packet/af_packet.c
+@@ -2443,13 +2443,15 @@ static int packet_release(struct socket *sock)
+
+ packet_flush_mclist(sk);
+
+- memset(&req_u, 0, sizeof(req_u));
+-
+- if (po->rx_ring.pg_vec)
++ if (po->rx_ring.pg_vec) {
++ memset(&req_u, 0, sizeof(req_u));
+ packet_set_ring(sk, &req_u, 1, 0);
++ }
+
+- if (po->tx_ring.pg_vec)
++ if (po->tx_ring.pg_vec) {
++ memset(&req_u, 0, sizeof(req_u));
+ packet_set_ring(sk, &req_u, 1, 1);
++ }
+
+ fanout_release(sk);
+
+diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c
+index 9d75b77..e9ea2f3 100644
+--- a/net/sched/sch_htb.c
++++ b/net/sched/sch_htb.c
+@@ -874,7 +874,7 @@ ok:
+ q->now = psched_get_time();
+ start_at = jiffies;
+
+- next_event = q->now + 5 * PSCHED_TICKS_PER_SEC;
++ next_event = q->now + 5LLU * PSCHED_TICKS_PER_SEC;
+
+ for (level = 0; level < TC_HTB_MAXDEPTH; level++) {
+ /* common case optimization - skip event handler quickly */
+diff --git a/net/sctp/endpointola.c b/net/sctp/endpointola.c
+index 68a385d..58cd035 100644
+--- a/net/sctp/endpointola.c
++++ b/net/sctp/endpointola.c
+@@ -248,6 +248,8 @@ void sctp_endpoint_free(struct sctp_endpoint *ep)
+ /* Final destructor for endpoint. */
+ static void sctp_endpoint_destroy(struct sctp_endpoint *ep)
+ {
++ int i;
++
+ SCTP_ASSERT(ep->base.dead, "Endpoint is not dead", return);
+
+ /* Free up the HMAC transform. */
+@@ -270,6 +272,9 @@ static void sctp_endpoint_destroy(struct sctp_endpoint *ep)
+ sctp_inq_free(&ep->base.inqueue);
+ sctp_bind_addr_free(&ep->base.bind_addr);
+
++ for (i = 0; i < SCTP_HOW_MANY_SECRETS; ++i)
++ memset(&ep->secret_key[i], 0, SCTP_SECRET_SIZE);
++
+ /* Remove and free the port */
+ if (sctp_sk(ep->base.sk)->bind_hash)
+ sctp_put_port(ep->base.sk);
+diff --git a/net/sctp/outqueue.c b/net/sctp/outqueue.c
+index e7aa177c..e0902c9 100644
+--- a/net/sctp/outqueue.c
++++ b/net/sctp/outqueue.c
+@@ -223,7 +223,7 @@ void sctp_outq_init(struct sctp_association *asoc, struct sctp_outq *q)
+
+ /* Free the outqueue structure and any related pending chunks.
+ */
+-void sctp_outq_teardown(struct sctp_outq *q)
++static void __sctp_outq_teardown(struct sctp_outq *q)
+ {
+ struct sctp_transport *transport;
+ struct list_head *lchunk, *temp;
+@@ -276,8 +276,6 @@ void sctp_outq_teardown(struct sctp_outq *q)
+ sctp_chunk_free(chunk);
+ }
+
+- q->error = 0;
+-
+ /* Throw away any leftover control chunks. */
+ list_for_each_entry_safe(chunk, tmp, &q->control_chunk_list, list) {
+ list_del_init(&chunk->list);
+@@ -285,11 +283,17 @@ void sctp_outq_teardown(struct sctp_outq *q)
+ }
+ }
+
++void sctp_outq_teardown(struct sctp_outq *q)
++{
++ __sctp_outq_teardown(q);
++ sctp_outq_init(q->asoc, q);
++}
++
+ /* Free the outqueue structure and any related pending chunks. */
+ void sctp_outq_free(struct sctp_outq *q)
+ {
+ /* Throw away leftover chunks. */
+- sctp_outq_teardown(q);
++ __sctp_outq_teardown(q);
+
+ /* If we were kmalloc()'d, free the memory. */
+ if (q->malloced)
+diff --git a/net/sctp/socket.c b/net/sctp/socket.c
+index cb54123..d32d86d 100644
+--- a/net/sctp/socket.c
++++ b/net/sctp/socket.c
+@@ -3375,7 +3375,7 @@ static int sctp_setsockopt_auth_key(struct sock *sk,
+
+ ret = sctp_auth_set_key(sctp_sk(sk)->ep, asoc, authkey);
+ out:
+- kfree(authkey);
++ kzfree(authkey);
+ return ret;
+ }
+
+diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
+index fa48c60..346c387 100644
+--- a/net/sunrpc/clnt.c
++++ b/net/sunrpc/clnt.c
+@@ -234,7 +234,7 @@ static struct rpc_clnt *rpc_get_client_for_event(struct net *net, int event)
+ spin_lock(&sn->rpc_client_lock);
+ list_for_each_entry(clnt, &sn->all_clients, cl_clients) {
+ if (clnt->cl_program->pipe_dir_name == NULL)
+- break;
++ continue;
+ if (rpc_clnt_skip_event(clnt, event))
+ continue;
+ if (atomic_inc_not_zero(&clnt->cl_count) == 0)
+diff --git a/net/sunrpc/rpc_pipe.c b/net/sunrpc/rpc_pipe.c
+index 21fde99..2a419f1 100644
+--- a/net/sunrpc/rpc_pipe.c
++++ b/net/sunrpc/rpc_pipe.c
+@@ -1152,14 +1152,19 @@ static void rpc_kill_sb(struct super_block *sb)
+ struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
+
+ mutex_lock(&sn->pipefs_sb_lock);
++ if (sn->pipefs_sb != sb) {
++ mutex_unlock(&sn->pipefs_sb_lock);
++ goto out;
++ }
+ sn->pipefs_sb = NULL;
+ mutex_unlock(&sn->pipefs_sb_lock);
+- put_net(net);
+ dprintk("RPC: sending pipefs UMOUNT notification for net %p%s\n", net,
+ NET_NAME(net));
+ blocking_notifier_call_chain(&rpc_pipefs_notifier_list,
+ RPC_PIPEFS_UMOUNT,
+ sb);
++ put_net(net);
++out:
+ kill_litter_super(sb);
+ }
+
+diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c
+index 128494e..7fc5846 100644
+--- a/net/sunrpc/sched.c
++++ b/net/sunrpc/sched.c
+@@ -919,16 +919,35 @@ struct rpc_task *rpc_new_task(const struct rpc_task_setup *setup_data)
+ return task;
+ }
+
++/*
++ * rpc_free_task - release rpc task and perform cleanups
++ *
++ * Note that we free up the rpc_task _after_ rpc_release_calldata()
++ * in order to work around a workqueue dependency issue.
++ *
++ * Tejun Heo states:
++ * "Workqueue currently considers two work items to be the same if they're
++ * on the same address and won't execute them concurrently - ie. it
++ * makes a work item which is queued again while being executed wait
++ * for the previous execution to complete.
++ *
++ * If a work function frees the work item, and then waits for an event
++ * which should be performed by another work item and *that* work item
++ * recycles the freed work item, it can create a false dependency loop.
++ * There really is no reliable way to detect this short of verifying
++ * every memory free."
++ *
++ */
+ static void rpc_free_task(struct rpc_task *task)
+ {
+- const struct rpc_call_ops *tk_ops = task->tk_ops;
+- void *calldata = task->tk_calldata;
++ unsigned short tk_flags = task->tk_flags;
++
++ rpc_release_calldata(task->tk_ops, task->tk_calldata);
+
+- if (task->tk_flags & RPC_TASK_DYNAMIC) {
++ if (tk_flags & RPC_TASK_DYNAMIC) {
+ dprintk("RPC: %5u freeing task\n", task->tk_pid);
+ mempool_free(task, rpc_task_mempool);
+ }
+- rpc_release_calldata(tk_ops, calldata);
+ }
+
+ static void rpc_async_release(struct work_struct *work)
+@@ -938,8 +957,7 @@ static void rpc_async_release(struct work_struct *work)
+
+ static void rpc_release_resources_task(struct rpc_task *task)
+ {
+- if (task->tk_rqstp)
+- xprt_release(task);
++ xprt_release(task);
+ if (task->tk_msg.rpc_cred) {
+ put_rpccred(task->tk_msg.rpc_cred);
+ task->tk_msg.rpc_cred = NULL;
+diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c
+index 5d7f61d..2480c01 100644
+--- a/net/sunrpc/xprt.c
++++ b/net/sunrpc/xprt.c
+@@ -1139,10 +1139,18 @@ static void xprt_request_init(struct rpc_task *task, struct rpc_xprt *xprt)
+ void xprt_release(struct rpc_task *task)
+ {
+ struct rpc_xprt *xprt;
+- struct rpc_rqst *req;
++ struct rpc_rqst *req = task->tk_rqstp;
+
+- if (!(req = task->tk_rqstp))
++ if (req == NULL) {
++ if (task->tk_client) {
++ rcu_read_lock();
++ xprt = rcu_dereference(task->tk_client->cl_xprt);
++ if (xprt->snd_task == task)
++ xprt_release_write(xprt, task);
++ rcu_read_unlock();
++ }
+ return;
++ }
+
+ xprt = req->rq_xprt;
+ if (task->tk_ops->rpc_count_stats != NULL)
+diff --git a/security/integrity/evm/evm_crypto.c b/security/integrity/evm/evm_crypto.c
+index 49a464f..62fa2c5 100644
+--- a/security/integrity/evm/evm_crypto.c
++++ b/security/integrity/evm/evm_crypto.c
+@@ -205,9 +205,9 @@ int evm_update_evmxattr(struct dentry *dentry, const char *xattr_name,
+ rc = __vfs_setxattr_noperm(dentry, XATTR_NAME_EVM,
+ &xattr_data,
+ sizeof(xattr_data), 0);
+- }
+- else if (rc == -ENODATA)
++ } else if (rc == -ENODATA && inode->i_op->removexattr) {
+ rc = inode->i_op->removexattr(dentry, XATTR_NAME_EVM);
++ }
+ return rc;
+ }
+
+diff --git a/sound/arm/pxa2xx-ac97-lib.c b/sound/arm/pxa2xx-ac97-lib.c
+index 48d7c0a..bd3ba88 100644
+--- a/sound/arm/pxa2xx-ac97-lib.c
++++ b/sound/arm/pxa2xx-ac97-lib.c
+@@ -18,6 +18,7 @@
+ #include <linux/delay.h>
+ #include <linux/module.h>
+ #include <linux/io.h>
++#include <linux/gpio.h>
+
+ #include <sound/ac97_codec.h>
+ #include <sound/pxa2xx-lib.h>
+@@ -148,6 +149,8 @@ static inline void pxa_ac97_warm_pxa27x(void)
+
+ static inline void pxa_ac97_cold_pxa27x(void)
+ {
++ unsigned int timeout;
++
+ GCR &= GCR_COLD_RST; /* clear everything but nCRST */
+ GCR &= ~GCR_COLD_RST; /* then assert nCRST */
+
+@@ -157,8 +160,10 @@ static inline void pxa_ac97_cold_pxa27x(void)
+ clk_enable(ac97conf_clk);
+ udelay(5);
+ clk_disable(ac97conf_clk);
+- GCR = GCR_COLD_RST;
+- udelay(50);
++ GCR = GCR_COLD_RST | GCR_WARM_RST;
++ timeout = 100; /* wait for the codec-ready bit to be set */
++ while (!((GSR | gsr_bits) & (GSR_PCR | GSR_SCR)) && timeout--)
++ mdelay(1);
+ }
+ #endif
+
+@@ -340,8 +345,21 @@ int __devinit pxa2xx_ac97_hw_probe(struct platform_device *dev)
+ }
+
+ if (cpu_is_pxa27x()) {
+- /* Use GPIO 113 as AC97 Reset on Bulverde */
++ /*
++ * This gpio is needed for a work-around to a bug in the ac97
++ * controller during warm reset. The direction and level is set
++ * here so that it is an output driven high when switching from
++ * AC97_nRESET alt function to generic gpio.
++ */
++ ret = gpio_request_one(reset_gpio, GPIOF_OUT_INIT_HIGH,
++ "pxa27x ac97 reset");
++ if (ret < 0) {
++ pr_err("%s: gpio_request_one() failed: %d\n",
++ __func__, ret);
++ goto err_conf;
++ }
+ pxa27x_assert_ac97reset(reset_gpio, 0);
++
+ ac97conf_clk = clk_get(&dev->dev, "AC97CONFCLK");
+ if (IS_ERR(ac97conf_clk)) {
+ ret = PTR_ERR(ac97conf_clk);
+@@ -384,6 +402,8 @@ EXPORT_SYMBOL_GPL(pxa2xx_ac97_hw_probe);
+
+ void pxa2xx_ac97_hw_remove(struct platform_device *dev)
+ {
++ if (cpu_is_pxa27x())
++ gpio_free(reset_gpio);
+ GCR |= GCR_ACLINK_OFF;
+ free_irq(IRQ_AC97, NULL);
+ if (ac97conf_clk) {
+diff --git a/sound/pci/ali5451/ali5451.c b/sound/pci/ali5451/ali5451.c
+index ee895f3..be8c176 100644
+--- a/sound/pci/ali5451/ali5451.c
++++ b/sound/pci/ali5451/ali5451.c
+@@ -1435,7 +1435,7 @@ static snd_pcm_uframes_t snd_ali_pointer(struct snd_pcm_substream *substream)
+
+ spin_lock(&codec->reg_lock);
+ if (!pvoice->running) {
+- spin_unlock_irq(&codec->reg_lock);
++ spin_unlock(&codec->reg_lock);
+ return 0;
+ }
+ outb(pvoice->number, ALI_REG(codec, ALI_GC_CIR));
+diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
+index 8f23374..6874743 100644
+--- a/sound/pci/hda/patch_hdmi.c
++++ b/sound/pci/hda/patch_hdmi.c
+@@ -924,8 +924,12 @@ static int hdmi_pcm_open(struct hda_pcm_stream *hinfo,
+ if (!static_hdmi_pcm && eld->eld_valid) {
+ snd_hdmi_eld_update_pcm_info(eld, hinfo);
+ if (hinfo->channels_min > hinfo->channels_max ||
+- !hinfo->rates || !hinfo->formats)
++ !hinfo->rates || !hinfo->formats) {
++ per_cvt->assigned = 0;
++ hinfo->nid = 0;
++ snd_hda_spdif_ctls_unassign(codec, pin_idx);
+ return -ENODEV;
++ }
+ }
+
+ /* Store the updated parameters */
+@@ -989,6 +993,7 @@ static void hdmi_present_sense(struct hdmi_spec_per_pin *per_pin, int repoll)
+ "HDMI status: Codec=%d Pin=%d Presence_Detect=%d ELD_Valid=%d\n",
+ codec->addr, pin_nid, eld->monitor_present, eld_valid);
+
++ eld->eld_valid = false;
+ if (eld_valid) {
+ if (!snd_hdmi_get_eld(eld, codec, pin_nid))
+ snd_hdmi_show_eld(eld);
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index e1b7061..51bbe0d 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -4719,6 +4719,7 @@ static const struct snd_pci_quirk alc880_fixup_tbl[] = {
+ SND_PCI_QUIRK(0x1584, 0x9077, "Uniwill P53", ALC880_FIXUP_VOL_KNOB),
+ SND_PCI_QUIRK(0x161f, 0x203d, "W810", ALC880_FIXUP_W810),
+ SND_PCI_QUIRK(0x161f, 0x205d, "Medion Rim 2150", ALC880_FIXUP_MEDION_RIM),
++ SND_PCI_QUIRK(0x1631, 0xe011, "PB 13201056", ALC880_FIXUP_6ST),
+ SND_PCI_QUIRK(0x1734, 0x107c, "FSC F1734", ALC880_FIXUP_F1734),
+ SND_PCI_QUIRK(0x1734, 0x1094, "FSC Amilo M1451G", ALC880_FIXUP_FUJITSU),
+ SND_PCI_QUIRK(0x1734, 0x10ac, "FSC AMILO Xi 1526", ALC880_FIXUP_F1734),
+@@ -5415,6 +5416,7 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = {
+ SND_PCI_QUIRK(0x1043, 0x835f, "Asus Eee 1601", ALC888_FIXUP_EEE1601),
+ SND_PCI_QUIRK(0x104d, 0x9047, "Sony Vaio TT", ALC889_FIXUP_VAIO_TT),
+ SND_PCI_QUIRK(0x104d, 0x905a, "Sony Vaio Z", ALC882_FIXUP_NO_PRIMARY_HP),
++ SND_PCI_QUIRK(0x104d, 0x9043, "Sony Vaio VGC-LN51JGB", ALC882_FIXUP_NO_PRIMARY_HP),
+
+ /* All Apple entries are in codec SSIDs */
+ SND_PCI_QUIRK(0x106b, 0x00a0, "MacBookPro 3,1", ALC889_FIXUP_MBP_VREF),
+@@ -6562,8 +6564,8 @@ static void alc861vd_fixup_dallas(struct hda_codec *codec,
+ const struct alc_fixup *fix, int action)
+ {
+ if (action == ALC_FIXUP_ACT_PRE_PROBE) {
+- snd_hda_override_pin_caps(codec, 0x18, 0x00001714);
+- snd_hda_override_pin_caps(codec, 0x19, 0x0000171c);
++ snd_hda_override_pin_caps(codec, 0x18, 0x00000734);
++ snd_hda_override_pin_caps(codec, 0x19, 0x0000073c);
+ }
+ }
+
+diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c
+index 3d4722f..f206117 100644
+--- a/sound/pci/hda/patch_sigmatel.c
++++ b/sound/pci/hda/patch_sigmatel.c
+@@ -1698,7 +1698,7 @@ static const struct snd_pci_quirk stac92hd83xxx_cfg_tbl[] = {
+ SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x1658,
+ "HP", STAC_92HD83XXX_HP_cNB11_INTQUAD),
+ SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x1659,
+- "HP", STAC_92HD83XXX_HP_cNB11_INTQUAD),
++ "HP Pavilion dv7", STAC_HP_DV7_4000),
+ SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x165A,
+ "HP", STAC_92HD83XXX_HP_cNB11_INTQUAD),
+ SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x165B,
+diff --git a/sound/pci/rme32.c b/sound/pci/rme32.c
+index 46b3629..f1dec07 100644
+--- a/sound/pci/rme32.c
++++ b/sound/pci/rme32.c
+@@ -1017,7 +1017,7 @@ static int snd_rme32_capture_close(struct snd_pcm_substream *substream)
+ spin_lock_irq(&rme32->lock);
+ rme32->capture_substream = NULL;
+ rme32->capture_periodsize = 0;
+- spin_unlock(&rme32->lock);
++ spin_unlock_irq(&rme32->lock);
+ return 0;
+ }
+
+diff --git a/sound/soc/codecs/sigmadsp.c b/sound/soc/codecs/sigmadsp.c
+index 5be42bf..4068f24 100644
+--- a/sound/soc/codecs/sigmadsp.c
++++ b/sound/soc/codecs/sigmadsp.c
+@@ -225,7 +225,7 @@ EXPORT_SYMBOL(process_sigma_firmware);
+ static int sigma_action_write_regmap(void *control_data,
+ const struct sigma_action *sa, size_t len)
+ {
+- return regmap_raw_write(control_data, le16_to_cpu(sa->addr),
++ return regmap_raw_write(control_data, be16_to_cpu(sa->addr),
+ sa->payload, len - 2);
+ }
+
+diff --git a/sound/soc/codecs/wm2000.c b/sound/soc/codecs/wm2000.c
+index a3acb7a..6275a2b 100644
+--- a/sound/soc/codecs/wm2000.c
++++ b/sound/soc/codecs/wm2000.c
+@@ -188,9 +188,9 @@ static int wm2000_power_up(struct i2c_client *i2c, int analogue)
+
+ ret = wm2000_read(i2c, WM2000_REG_SPEECH_CLARITY);
+ if (wm2000->speech_clarity)
+- ret &= ~WM2000_SPEECH_CLARITY;
+- else
+ ret |= WM2000_SPEECH_CLARITY;
++ else
++ ret &= ~WM2000_SPEECH_CLARITY;
+ wm2000_write(i2c, WM2000_REG_SPEECH_CLARITY, ret);
+
+ wm2000_write(i2c, WM2000_REG_SYS_START0, 0x33);
+diff --git a/sound/soc/codecs/wm2200.c b/sound/soc/codecs/wm2200.c
+index c8bff6d..9932aac 100644
+--- a/sound/soc/codecs/wm2200.c
++++ b/sound/soc/codecs/wm2200.c
+@@ -897,8 +897,6 @@ static const char *wm2200_mixer_texts[] = {
+ "EQR",
+ "LHPF1",
+ "LHPF2",
+- "LHPF3",
+- "LHPF4",
+ "DSP1.1",
+ "DSP1.2",
+ "DSP1.3",
+@@ -931,7 +929,6 @@ static int wm2200_mixer_values[] = {
+ 0x25,
+ 0x50, /* EQ */
+ 0x51,
+- 0x52,
+ 0x60, /* LHPF1 */
+ 0x61, /* LHPF2 */
+ 0x68, /* DSP1 */
+@@ -993,9 +990,9 @@ SOC_DOUBLE_R_TLV("IN3 Volume", WM2200_IN3L_CONTROL, WM2200_IN3R_CONTROL,
+
+ SOC_DOUBLE_R("IN1 Digital Switch", WM2200_ADC_DIGITAL_VOLUME_1L,
+ WM2200_ADC_DIGITAL_VOLUME_1R, WM2200_IN1L_MUTE_SHIFT, 1, 1),
+-SOC_DOUBLE_R("IN2 Digital Switch", WM2200_ADC_DIGITAL_VOLUME_1L,
++SOC_DOUBLE_R("IN2 Digital Switch", WM2200_ADC_DIGITAL_VOLUME_2L,
+ WM2200_ADC_DIGITAL_VOLUME_2R, WM2200_IN2L_MUTE_SHIFT, 1, 1),
+-SOC_DOUBLE_R("IN3 Digital Switch", WM2200_ADC_DIGITAL_VOLUME_1L,
++SOC_DOUBLE_R("IN3 Digital Switch", WM2200_ADC_DIGITAL_VOLUME_3L,
+ WM2200_ADC_DIGITAL_VOLUME_3R, WM2200_IN3L_MUTE_SHIFT, 1, 1),
+
+ SOC_DOUBLE_R_TLV("IN1 Digital Volume", WM2200_ADC_DIGITAL_VOLUME_1L,
+@@ -1380,15 +1377,9 @@ static int wm2200_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
+ case SND_SOC_DAIFMT_DSP_A:
+ fmt_val = 0;
+ break;
+- case SND_SOC_DAIFMT_DSP_B:
+- fmt_val = 1;
+- break;
+ case SND_SOC_DAIFMT_I2S:
+ fmt_val = 2;
+ break;
+- case SND_SOC_DAIFMT_LEFT_J:
+- fmt_val = 3;
+- break;
+ default:
+ dev_err(codec->dev, "Unsupported DAI format %d\n",
+ fmt & SND_SOC_DAIFMT_FORMAT_MASK);
+@@ -1440,7 +1431,7 @@ static int wm2200_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
+ WM2200_AIF1TX_LRCLK_MSTR | WM2200_AIF1TX_LRCLK_INV,
+ lrclk);
+ snd_soc_update_bits(codec, WM2200_AUDIO_IF_1_5,
+- WM2200_AIF1_FMT_MASK << 1, fmt_val << 1);
++ WM2200_AIF1_FMT_MASK, fmt_val);
+
+ return 0;
+ }
+diff --git a/sound/soc/codecs/wm5100.c b/sound/soc/codecs/wm5100.c
+index f481729..10d48cd 100644
+--- a/sound/soc/codecs/wm5100.c
++++ b/sound/soc/codecs/wm5100.c
+@@ -1279,15 +1279,9 @@ static int wm5100_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
+ case SND_SOC_DAIFMT_DSP_A:
+ mask = 0;
+ break;
+- case SND_SOC_DAIFMT_DSP_B:
+- mask = 1;
+- break;
+ case SND_SOC_DAIFMT_I2S:
+ mask = 2;
+ break;
+- case SND_SOC_DAIFMT_LEFT_J:
+- mask = 3;
+- break;
+ default:
+ dev_err(codec->dev, "Unsupported DAI format %d\n",
+ fmt & SND_SOC_DAIFMT_FORMAT_MASK);
+diff --git a/sound/usb/midi.c b/sound/usb/midi.c
+index eeefbce..34b9bb7 100644
+--- a/sound/usb/midi.c
++++ b/sound/usb/midi.c
+@@ -116,6 +116,7 @@ struct snd_usb_midi {
+ struct list_head list;
+ struct timer_list error_timer;
+ spinlock_t disc_lock;
++ struct rw_semaphore disc_rwsem;
+ struct mutex mutex;
+ u32 usb_id;
+ int next_midi_device;
+@@ -125,8 +126,10 @@ struct snd_usb_midi {
+ struct snd_usb_midi_in_endpoint *in;
+ } endpoints[MIDI_MAX_ENDPOINTS];
+ unsigned long input_triggered;
+- unsigned int opened;
++ bool autopm_reference;
++ unsigned int opened[2];
+ unsigned char disconnected;
++ unsigned char input_running;
+
+ struct snd_kcontrol *roland_load_ctl;
+ };
+@@ -148,7 +151,6 @@ struct snd_usb_midi_out_endpoint {
+ struct snd_usb_midi_out_endpoint* ep;
+ struct snd_rawmidi_substream *substream;
+ int active;
+- bool autopm_reference;
+ uint8_t cable; /* cable number << 4 */
+ uint8_t state;
+ #define STATE_UNKNOWN 0
+@@ -1033,29 +1035,58 @@ static void update_roland_altsetting(struct snd_usb_midi* umidi)
+ snd_usbmidi_input_start(&umidi->list);
+ }
+
+-static void substream_open(struct snd_rawmidi_substream *substream, int open)
++static int substream_open(struct snd_rawmidi_substream *substream, int dir,
++ int open)
+ {
+ struct snd_usb_midi* umidi = substream->rmidi->private_data;
+ struct snd_kcontrol *ctl;
++ int err;
++
++ down_read(&umidi->disc_rwsem);
++ if (umidi->disconnected) {
++ up_read(&umidi->disc_rwsem);
++ return open ? -ENODEV : 0;
++ }
+
+ mutex_lock(&umidi->mutex);
+ if (open) {
+- if (umidi->opened++ == 0 && umidi->roland_load_ctl) {
+- ctl = umidi->roland_load_ctl;
+- ctl->vd[0].access |= SNDRV_CTL_ELEM_ACCESS_INACTIVE;
+- snd_ctl_notify(umidi->card,
++ if (!umidi->opened[0] && !umidi->opened[1]) {
++ err = usb_autopm_get_interface(umidi->iface);
++ umidi->autopm_reference = err >= 0;
++ if (err < 0 && err != -EACCES) {
++ mutex_unlock(&umidi->mutex);
++ up_read(&umidi->disc_rwsem);
++ return -EIO;
++ }
++ if (umidi->roland_load_ctl) {
++ ctl = umidi->roland_load_ctl;
++ ctl->vd[0].access |= SNDRV_CTL_ELEM_ACCESS_INACTIVE;
++ snd_ctl_notify(umidi->card,
+ SNDRV_CTL_EVENT_MASK_INFO, &ctl->id);
+- update_roland_altsetting(umidi);
++ update_roland_altsetting(umidi);
++ }
+ }
++ umidi->opened[dir]++;
++ if (umidi->opened[1])
++ snd_usbmidi_input_start(&umidi->list);
+ } else {
+- if (--umidi->opened == 0 && umidi->roland_load_ctl) {
+- ctl = umidi->roland_load_ctl;
+- ctl->vd[0].access &= ~SNDRV_CTL_ELEM_ACCESS_INACTIVE;
+- snd_ctl_notify(umidi->card,
++ umidi->opened[dir]--;
++ if (!umidi->opened[1])
++ snd_usbmidi_input_stop(&umidi->list);
++ if (!umidi->opened[0] && !umidi->opened[1]) {
++ if (umidi->roland_load_ctl) {
++ ctl = umidi->roland_load_ctl;
++ ctl->vd[0].access &= ~SNDRV_CTL_ELEM_ACCESS_INACTIVE;
++ snd_ctl_notify(umidi->card,
+ SNDRV_CTL_EVENT_MASK_INFO, &ctl->id);
++ }
++ if (umidi->autopm_reference)
++ usb_autopm_put_interface(umidi->iface);
+ }
+ }
+ mutex_unlock(&umidi->mutex);
++ up_read(&umidi->disc_rwsem);
++ return 0;
+ }
+
+ static int snd_usbmidi_output_open(struct snd_rawmidi_substream *substream)
+@@ -1063,7 +1094,6 @@ static int snd_usbmidi_output_open(struct snd_rawmidi_substream *substream)
+ struct snd_usb_midi* umidi = substream->rmidi->private_data;
+ struct usbmidi_out_port* port = NULL;
+ int i, j;
+- int err;
+
+ for (i = 0; i < MIDI_MAX_ENDPOINTS; ++i)
+ if (umidi->endpoints[i].out)
+@@ -1076,25 +1106,15 @@ static int snd_usbmidi_output_open(struct snd_rawmidi_substream *substream)
+ snd_BUG();
+ return -ENXIO;
+ }
+- err = usb_autopm_get_interface(umidi->iface);
+- port->autopm_reference = err >= 0;
+- if (err < 0 && err != -EACCES)
+- return -EIO;
++
+ substream->runtime->private_data = port;
+ port->state = STATE_UNKNOWN;
+- substream_open(substream, 1);
+- return 0;
++ return substream_open(substream, 0, 1);
+ }
+
+ static int snd_usbmidi_output_close(struct snd_rawmidi_substream *substream)
+ {
+- struct snd_usb_midi* umidi = substream->rmidi->private_data;
+- struct usbmidi_out_port *port = substream->runtime->private_data;
+-
+- substream_open(substream, 0);
+- if (port->autopm_reference)
+- usb_autopm_put_interface(umidi->iface);
+- return 0;
++ return substream_open(substream, 0, 0);
+ }
+
+ static void snd_usbmidi_output_trigger(struct snd_rawmidi_substream *substream, int up)
+@@ -1147,14 +1167,12 @@ static void snd_usbmidi_output_drain(struct snd_rawmidi_substream *substream)
+
+ static int snd_usbmidi_input_open(struct snd_rawmidi_substream *substream)
+ {
+- substream_open(substream, 1);
+- return 0;
++ return substream_open(substream, 1, 1);
+ }
+
+ static int snd_usbmidi_input_close(struct snd_rawmidi_substream *substream)
+ {
+- substream_open(substream, 0);
+- return 0;
++ return substream_open(substream, 1, 0);
+ }
+
+ static void snd_usbmidi_input_trigger(struct snd_rawmidi_substream *substream, int up)
+@@ -1403,9 +1421,12 @@ void snd_usbmidi_disconnect(struct list_head* p)
+ * a timer may submit an URB. To reliably break the cycle
+ * a flag under lock must be used
+ */
++ down_write(&umidi->disc_rwsem);
+ spin_lock_irq(&umidi->disc_lock);
+ umidi->disconnected = 1;
+ spin_unlock_irq(&umidi->disc_lock);
++ up_write(&umidi->disc_rwsem);
++
+ for (i = 0; i < MIDI_MAX_ENDPOINTS; ++i) {
+ struct snd_usb_midi_endpoint* ep = &umidi->endpoints[i];
+ if (ep->out)
+@@ -2060,12 +2081,15 @@ void snd_usbmidi_input_stop(struct list_head* p)
+ unsigned int i, j;
+
+ umidi = list_entry(p, struct snd_usb_midi, list);
++ if (!umidi->input_running)
++ return;
+ for (i = 0; i < MIDI_MAX_ENDPOINTS; ++i) {
+ struct snd_usb_midi_endpoint* ep = &umidi->endpoints[i];
+ if (ep->in)
+ for (j = 0; j < INPUT_URBS; ++j)
+ usb_kill_urb(ep->in->urbs[j]);
+ }
++ umidi->input_running = 0;
+ }
+
+ static void snd_usbmidi_input_start_ep(struct snd_usb_midi_in_endpoint* ep)
+@@ -2090,8 +2114,11 @@ void snd_usbmidi_input_start(struct list_head* p)
+ int i;
+
+ umidi = list_entry(p, struct snd_usb_midi, list);
++ if (umidi->input_running || !umidi->opened[1])
++ return;
+ for (i = 0; i < MIDI_MAX_ENDPOINTS; ++i)
+ snd_usbmidi_input_start_ep(umidi->endpoints[i].in);
++ umidi->input_running = 1;
+ }
+
+ /*
+@@ -2117,6 +2144,7 @@ int snd_usbmidi_create(struct snd_card *card,
+ umidi->usb_protocol_ops = &snd_usbmidi_standard_ops;
+ init_timer(&umidi->error_timer);
+ spin_lock_init(&umidi->disc_lock);
++ init_rwsem(&umidi->disc_rwsem);
+ mutex_init(&umidi->mutex);
+ umidi->usb_id = USB_ID(le16_to_cpu(umidi->dev->descriptor.idVendor),
+ le16_to_cpu(umidi->dev->descriptor.idProduct));
+@@ -2229,9 +2257,6 @@ int snd_usbmidi_create(struct snd_card *card,
+ }
+
+ list_add_tail(&umidi->list, midi_list);
+-
+- for (i = 0; i < MIDI_MAX_ENDPOINTS; ++i)
+- snd_usbmidi_input_start_ep(umidi->endpoints[i].in);
+ return 0;
+ }
+
+diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c
+index 298070e..41e8bfb 100644
+--- a/sound/usb/mixer.c
++++ b/sound/usb/mixer.c
+@@ -1259,16 +1259,23 @@ static int parse_audio_feature_unit(struct mixer_build *state, int unitid, void
+ }
+ channels = (hdr->bLength - 7) / csize - 1;
+ bmaControls = hdr->bmaControls;
++ if (hdr->bLength < 7 + csize) {
++ snd_printk(KERN_ERR "usbaudio: unit %u: "
++ "invalid UAC_FEATURE_UNIT descriptor\n",
++ unitid);
++ return -EINVAL;
++ }
+ } else {
+ struct uac2_feature_unit_descriptor *ftr = _ftr;
+ csize = 4;
+ channels = (hdr->bLength - 6) / 4 - 1;
+ bmaControls = ftr->bmaControls;
+- }
+-
+- if (hdr->bLength < 7 || !csize || hdr->bLength < 7 + csize) {
+- snd_printk(KERN_ERR "usbaudio: unit %u: invalid UAC_FEATURE_UNIT descriptor\n", unitid);
+- return -EINVAL;
++ if (hdr->bLength < 6 + csize) {
++ snd_printk(KERN_ERR "usbaudio: unit %u: "
++ "invalid UAC_FEATURE_UNIT descriptor\n",
++ unitid);
++ return -EINVAL;
++ }
+ }
+
+ /* parse the source unit */
+diff --git a/sound/usb/quirks-table.h b/sound/usb/quirks-table.h
+index d73ac9b..128eb0c 100644
+--- a/sound/usb/quirks-table.h
++++ b/sound/usb/quirks-table.h
+@@ -1658,7 +1658,7 @@ YAMAHA_DEVICE(0x7010, "UB99"),
+ .driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) {
+ /* .vendor_name = "Roland", */
+ /* .product_name = "A-PRO", */
+- .ifnum = 1,
++ .ifnum = 0,
+ .type = QUIRK_MIDI_FIXED_ENDPOINT,
+ .data = & (const struct snd_usb_midi_endpoint_info) {
+ .out_cables = 0x0003,
+diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
+index 0f58b4b..b8d1ad1 100644
+--- a/sound/usb/quirks.c
++++ b/sound/usb/quirks.c
+@@ -387,11 +387,13 @@ static int snd_usb_fasttrackpro_boot_quirk(struct usb_device *dev)
+ * rules
+ */
+ err = usb_driver_set_configuration(dev, 2);
+- if (err < 0) {
++ if (err < 0)
+ snd_printdd("error usb_driver_set_configuration: %d\n",
+ err);
+- return -ENODEV;
+- }
++ /* Always return an error, so that we stop creating a device
++ that will just be destroyed and recreated with a new
++ configuration */
++ return -ENODEV;
+ } else
+ snd_printk(KERN_INFO "usb-audio: Fast Track Pro config OK\n");
+
+diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
+index d617f69..cd197be 100644
+--- a/virt/kvm/kvm_main.c
++++ b/virt/kvm/kvm_main.c
+@@ -701,8 +701,7 @@ int __kvm_set_memory_region(struct kvm *kvm,
+ int r;
+ gfn_t base_gfn;
+ unsigned long npages;
+- unsigned long i;
+- struct kvm_memory_slot *memslot;
++ struct kvm_memory_slot *memslot, *slot;
+ struct kvm_memory_slot old, new;
+ struct kvm_memslots *slots, *old_memslots;
+
+@@ -749,13 +748,11 @@ int __kvm_set_memory_region(struct kvm *kvm,
+
+ /* Check for overlaps */
+ r = -EEXIST;
+- for (i = 0; i < KVM_MEMORY_SLOTS; ++i) {
+- struct kvm_memory_slot *s = &kvm->memslots->memslots[i];
+-
+- if (s == memslot || !s->npages)
++ kvm_for_each_memslot(slot, kvm->memslots) {
++ if (slot->id >= KVM_MEMORY_SLOTS || slot == memslot)
+ continue;
+- if (!((base_gfn + npages <= s->base_gfn) ||
+- (base_gfn >= s->base_gfn + s->npages)))
++ if (!((base_gfn + npages <= slot->base_gfn) ||
++ (base_gfn >= slot->base_gfn + slot->npages)))
+ goto out_free;
+ }
+