public inbox for gentoo-commits@lists.gentoo.org
 help / color / mirror / Atom feed
* [gentoo-commits] linux-patches r2226 - genpatches-2.6/trunk/3.6
@ 2012-10-29 14:35 Mike Pagano (mpagano)
  0 siblings, 0 replies; only message in thread
From: Mike Pagano (mpagano) @ 2012-10-29 14:35 UTC (permalink / raw
  To: gentoo-commits

Author: mpagano
Date: 2012-10-29 14:34:04 +0000 (Mon, 29 Oct 2012)
New Revision: 2226

Added:
   genpatches-2.6/trunk/3.6/1002_linux-3.6.3.patch
   genpatches-2.6/trunk/3.6/1003_linux-3.6.4.patch
Modified:
   genpatches-2.6/trunk/3.6/0000_README
Log:
Linux patches 3.6.3 and 3.6.4

Modified: genpatches-2.6/trunk/3.6/0000_README
===================================================================
--- genpatches-2.6/trunk/3.6/0000_README	2012-10-19 23:31:19 UTC (rev 2225)
+++ genpatches-2.6/trunk/3.6/0000_README	2012-10-29 14:34:04 UTC (rev 2226)
@@ -47,6 +47,14 @@
 From:   http://www.kernel.org
 Desc:   Linux 3.6.2
 
+Patch:  1002_linux-3.6.3.patch
+From:   http://www.kernel.org
+Desc:   Linux 3.6.3
+
+Patch:  1003_linux-3.6.4.patch
+From:   http://www.kernel.org
+Desc:   Linux 3.6.4
+
 Patch:  2400_kcopy-patch-for-infiniband-driver.patch
 From:   Alexey Shvetsov <alexxy@gentoo.org>
 Desc:   Zero copy for infiniband psm userspace driver

Added: genpatches-2.6/trunk/3.6/1002_linux-3.6.3.patch
===================================================================
--- genpatches-2.6/trunk/3.6/1002_linux-3.6.3.patch	                        (rev 0)
+++ genpatches-2.6/trunk/3.6/1002_linux-3.6.3.patch	2012-10-29 14:34:04 UTC (rev 2226)
@@ -0,0 +1,3132 @@
+diff --git a/Makefile b/Makefile
+index af5d6a9..6cdadf4 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 3
+ PATCHLEVEL = 6
+-SUBLEVEL = 2
++SUBLEVEL = 3
+ EXTRAVERSION =
+ NAME = Terrified Chipmunk
+ 
+diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
+index 2f88d8d..48c19d4 100644
+--- a/arch/arm/Kconfig
++++ b/arch/arm/Kconfig
+@@ -1413,6 +1413,16 @@ config PL310_ERRATA_769419
+ 	  on systems with an outer cache, the store buffer is drained
+ 	  explicitly.
+ 
++config ARM_ERRATA_775420
++       bool "ARM errata: A data cache maintenance operation which aborts, might lead to deadlock"
++       depends on CPU_V7
++       help
++	 This option enables the workaround for the 775420 Cortex-A9 (r2p2,
++	 r2p6,r2p8,r2p10,r3p0) erratum. In case a date cache maintenance
++	 operation aborts with MMU exception, it might cause the processor
++	 to deadlock. This workaround puts DSB before executing ISB if
++	 an abort may occur on cache maintenance.
++
+ endmenu
+ 
+ source "arch/arm/common/Kconfig"
+diff --git a/arch/arm/include/asm/vfpmacros.h b/arch/arm/include/asm/vfpmacros.h
+index 3d5fc41..bf53047 100644
+--- a/arch/arm/include/asm/vfpmacros.h
++++ b/arch/arm/include/asm/vfpmacros.h
+@@ -28,7 +28,7 @@
+ 	ldr	\tmp, =elf_hwcap		    @ may not have MVFR regs
+ 	ldr	\tmp, [\tmp, #0]
+ 	tst	\tmp, #HWCAP_VFPv3D16
+-	ldceq	p11, cr0, [\base],#32*4		    @ FLDMIAD \base!, {d16-d31}
++	ldceql	p11, cr0, [\base],#32*4		    @ FLDMIAD \base!, {d16-d31}
+ 	addne	\base, \base, #32*4		    @ step over unused register space
+ #else
+ 	VFPFMRX	\tmp, MVFR0			    @ Media and VFP Feature Register 0
+@@ -52,7 +52,7 @@
+ 	ldr	\tmp, =elf_hwcap		    @ may not have MVFR regs
+ 	ldr	\tmp, [\tmp, #0]
+ 	tst	\tmp, #HWCAP_VFPv3D16
+-	stceq	p11, cr0, [\base],#32*4		    @ FSTMIAD \base!, {d16-d31}
++	stceql	p11, cr0, [\base],#32*4		    @ FSTMIAD \base!, {d16-d31}
+ 	addne	\base, \base, #32*4		    @ step over unused register space
+ #else
+ 	VFPFMRX	\tmp, MVFR0			    @ Media and VFP Feature Register 0
+diff --git a/arch/arm/mm/cache-v7.S b/arch/arm/mm/cache-v7.S
+index 39e3fb3..3b17227 100644
+--- a/arch/arm/mm/cache-v7.S
++++ b/arch/arm/mm/cache-v7.S
+@@ -211,6 +211,9 @@ ENTRY(v7_coherent_user_range)
+  * isn't mapped, fail with -EFAULT.
+  */
+ 9001:
++#ifdef CONFIG_ARM_ERRATA_775420
++	dsb
++#endif
+ 	mov	r0, #-EFAULT
+ 	mov	pc, lr
+  UNWIND(.fnend		)
+diff --git a/arch/arm/plat-omap/counter_32k.c b/arch/arm/plat-omap/counter_32k.c
+index dbf1e03..2bc51fb 100644
+--- a/arch/arm/plat-omap/counter_32k.c
++++ b/arch/arm/plat-omap/counter_32k.c
+@@ -55,22 +55,29 @@ static u32 notrace omap_32k_read_sched_clock(void)
+  * nsecs and adds to a monotonically increasing timespec.
+  */
+ static struct timespec persistent_ts;
+-static cycles_t cycles, last_cycles;
++static cycles_t cycles;
+ static unsigned int persistent_mult, persistent_shift;
++static DEFINE_SPINLOCK(read_persistent_clock_lock);
++
+ static void omap_read_persistent_clock(struct timespec *ts)
+ {
+ 	unsigned long long nsecs;
+-	cycles_t delta;
+-	struct timespec *tsp = &persistent_ts;
++	cycles_t last_cycles;
++	unsigned long flags;
++
++	spin_lock_irqsave(&read_persistent_clock_lock, flags);
+ 
+ 	last_cycles = cycles;
+ 	cycles = sync32k_cnt_reg ? __raw_readl(sync32k_cnt_reg) : 0;
+-	delta = cycles - last_cycles;
+ 
+-	nsecs = clocksource_cyc2ns(delta, persistent_mult, persistent_shift);
++	nsecs = clocksource_cyc2ns(cycles - last_cycles,
++					persistent_mult, persistent_shift);
++
++	timespec_add_ns(&persistent_ts, nsecs);
++
++	*ts = persistent_ts;
+ 
+-	timespec_add_ns(tsp, nsecs);
+-	*ts = *tsp;
++	spin_unlock_irqrestore(&read_persistent_clock_lock, flags);
+ }
+ 
+ /**
+diff --git a/arch/mips/ath79/clock.c b/arch/mips/ath79/clock.c
+index d272857..579f452 100644
+--- a/arch/mips/ath79/clock.c
++++ b/arch/mips/ath79/clock.c
+@@ -17,6 +17,8 @@
+ #include <linux/err.h>
+ #include <linux/clk.h>
+ 
++#include <asm/div64.h>
++
+ #include <asm/mach-ath79/ath79.h>
+ #include <asm/mach-ath79/ar71xx_regs.h>
+ #include "common.h"
+@@ -166,11 +168,34 @@ static void __init ar933x_clocks_init(void)
+ 	ath79_uart_clk.rate = ath79_ref_clk.rate;
+ }
+ 
++static u32 __init ar934x_get_pll_freq(u32 ref, u32 ref_div, u32 nint, u32 nfrac,
++				      u32 frac, u32 out_div)
++{
++	u64 t;
++	u32 ret;
++
++	t = ath79_ref_clk.rate;
++	t *= nint;
++	do_div(t, ref_div);
++	ret = t;
++
++	t = ath79_ref_clk.rate;
++	t *= nfrac;
++	do_div(t, ref_div * frac);
++	ret += t;
++
++	ret /= (1 << out_div);
++	return ret;
++}
++
+ static void __init ar934x_clocks_init(void)
+ {
+-	u32 pll, out_div, ref_div, nint, frac, clk_ctrl, postdiv;
++	u32 pll, out_div, ref_div, nint, nfrac, frac, clk_ctrl, postdiv;
+ 	u32 cpu_pll, ddr_pll;
+ 	u32 bootstrap;
++	void __iomem *dpll_base;
++
++	dpll_base = ioremap(AR934X_SRIF_BASE, AR934X_SRIF_SIZE);
+ 
+ 	bootstrap = ath79_reset_rr(AR934X_RESET_REG_BOOTSTRAP);
+ 	if (bootstrap &	AR934X_BOOTSTRAP_REF_CLK_40)
+@@ -178,33 +203,59 @@ static void __init ar934x_clocks_init(void)
+ 	else
+ 		ath79_ref_clk.rate = 25 * 1000 * 1000;
+ 
+-	pll = ath79_pll_rr(AR934X_PLL_CPU_CONFIG_REG);
+-	out_div = (pll >> AR934X_PLL_CPU_CONFIG_OUTDIV_SHIFT) &
+-		  AR934X_PLL_CPU_CONFIG_OUTDIV_MASK;
+-	ref_div = (pll >> AR934X_PLL_CPU_CONFIG_REFDIV_SHIFT) &
+-		  AR934X_PLL_CPU_CONFIG_REFDIV_MASK;
+-	nint = (pll >> AR934X_PLL_CPU_CONFIG_NINT_SHIFT) &
+-	       AR934X_PLL_CPU_CONFIG_NINT_MASK;
+-	frac = (pll >> AR934X_PLL_CPU_CONFIG_NFRAC_SHIFT) &
+-	       AR934X_PLL_CPU_CONFIG_NFRAC_MASK;
+-
+-	cpu_pll = nint * ath79_ref_clk.rate / ref_div;
+-	cpu_pll += frac * ath79_ref_clk.rate / (ref_div * (1 << 6));
+-	cpu_pll /= (1 << out_div);
+-
+-	pll = ath79_pll_rr(AR934X_PLL_DDR_CONFIG_REG);
+-	out_div = (pll >> AR934X_PLL_DDR_CONFIG_OUTDIV_SHIFT) &
+-		  AR934X_PLL_DDR_CONFIG_OUTDIV_MASK;
+-	ref_div = (pll >> AR934X_PLL_DDR_CONFIG_REFDIV_SHIFT) &
+-		  AR934X_PLL_DDR_CONFIG_REFDIV_MASK;
+-	nint = (pll >> AR934X_PLL_DDR_CONFIG_NINT_SHIFT) &
+-	       AR934X_PLL_DDR_CONFIG_NINT_MASK;
+-	frac = (pll >> AR934X_PLL_DDR_CONFIG_NFRAC_SHIFT) &
+-	       AR934X_PLL_DDR_CONFIG_NFRAC_MASK;
+-
+-	ddr_pll = nint * ath79_ref_clk.rate / ref_div;
+-	ddr_pll += frac * ath79_ref_clk.rate / (ref_div * (1 << 10));
+-	ddr_pll /= (1 << out_div);
++	pll = __raw_readl(dpll_base + AR934X_SRIF_CPU_DPLL2_REG);
++	if (pll & AR934X_SRIF_DPLL2_LOCAL_PLL) {
++		out_div = (pll >> AR934X_SRIF_DPLL2_OUTDIV_SHIFT) &
++			  AR934X_SRIF_DPLL2_OUTDIV_MASK;
++		pll = __raw_readl(dpll_base + AR934X_SRIF_CPU_DPLL1_REG);
++		nint = (pll >> AR934X_SRIF_DPLL1_NINT_SHIFT) &
++		       AR934X_SRIF_DPLL1_NINT_MASK;
++		nfrac = pll & AR934X_SRIF_DPLL1_NFRAC_MASK;
++		ref_div = (pll >> AR934X_SRIF_DPLL1_REFDIV_SHIFT) &
++			  AR934X_SRIF_DPLL1_REFDIV_MASK;
++		frac = 1 << 18;
++	} else {
++		pll = ath79_pll_rr(AR934X_PLL_CPU_CONFIG_REG);
++		out_div = (pll >> AR934X_PLL_CPU_CONFIG_OUTDIV_SHIFT) &
++			AR934X_PLL_CPU_CONFIG_OUTDIV_MASK;
++		ref_div = (pll >> AR934X_PLL_CPU_CONFIG_REFDIV_SHIFT) &
++			  AR934X_PLL_CPU_CONFIG_REFDIV_MASK;
++		nint = (pll >> AR934X_PLL_CPU_CONFIG_NINT_SHIFT) &
++		       AR934X_PLL_CPU_CONFIG_NINT_MASK;
++		nfrac = (pll >> AR934X_PLL_CPU_CONFIG_NFRAC_SHIFT) &
++			AR934X_PLL_CPU_CONFIG_NFRAC_MASK;
++		frac = 1 << 6;
++	}
++
++	cpu_pll = ar934x_get_pll_freq(ath79_ref_clk.rate, ref_div, nint,
++				      nfrac, frac, out_div);
++
++	pll = __raw_readl(dpll_base + AR934X_SRIF_DDR_DPLL2_REG);
++	if (pll & AR934X_SRIF_DPLL2_LOCAL_PLL) {
++		out_div = (pll >> AR934X_SRIF_DPLL2_OUTDIV_SHIFT) &
++			  AR934X_SRIF_DPLL2_OUTDIV_MASK;
++		pll = __raw_readl(dpll_base + AR934X_SRIF_DDR_DPLL1_REG);
++		nint = (pll >> AR934X_SRIF_DPLL1_NINT_SHIFT) &
++		       AR934X_SRIF_DPLL1_NINT_MASK;
++		nfrac = pll & AR934X_SRIF_DPLL1_NFRAC_MASK;
++		ref_div = (pll >> AR934X_SRIF_DPLL1_REFDIV_SHIFT) &
++			  AR934X_SRIF_DPLL1_REFDIV_MASK;
++		frac = 1 << 18;
++	} else {
++		pll = ath79_pll_rr(AR934X_PLL_DDR_CONFIG_REG);
++		out_div = (pll >> AR934X_PLL_DDR_CONFIG_OUTDIV_SHIFT) &
++			  AR934X_PLL_DDR_CONFIG_OUTDIV_MASK;
++		ref_div = (pll >> AR934X_PLL_DDR_CONFIG_REFDIV_SHIFT) &
++			   AR934X_PLL_DDR_CONFIG_REFDIV_MASK;
++		nint = (pll >> AR934X_PLL_DDR_CONFIG_NINT_SHIFT) &
++		       AR934X_PLL_DDR_CONFIG_NINT_MASK;
++		nfrac = (pll >> AR934X_PLL_DDR_CONFIG_NFRAC_SHIFT) &
++			AR934X_PLL_DDR_CONFIG_NFRAC_MASK;
++		frac = 1 << 10;
++	}
++
++	ddr_pll = ar934x_get_pll_freq(ath79_ref_clk.rate, ref_div, nint,
++				      nfrac, frac, out_div);
+ 
+ 	clk_ctrl = ath79_pll_rr(AR934X_PLL_CPU_DDR_CLK_CTRL_REG);
+ 
+@@ -240,6 +291,8 @@ static void __init ar934x_clocks_init(void)
+ 
+ 	ath79_wdt_clk.rate = ath79_ref_clk.rate;
+ 	ath79_uart_clk.rate = ath79_ref_clk.rate;
++
++	iounmap(dpll_base);
+ }
+ 
+ void __init ath79_clocks_init(void)
+diff --git a/arch/mips/include/asm/mach-ath79/ar71xx_regs.h b/arch/mips/include/asm/mach-ath79/ar71xx_regs.h
+index dde5044..31a9a7c 100644
+--- a/arch/mips/include/asm/mach-ath79/ar71xx_regs.h
++++ b/arch/mips/include/asm/mach-ath79/ar71xx_regs.h
+@@ -63,6 +63,8 @@
+ 
+ #define AR934X_WMAC_BASE	(AR71XX_APB_BASE + 0x00100000)
+ #define AR934X_WMAC_SIZE	0x20000
++#define AR934X_SRIF_BASE	(AR71XX_APB_BASE + 0x00116000)
++#define AR934X_SRIF_SIZE	0x1000
+ 
+ /*
+  * DDR_CTRL block
+@@ -399,4 +401,25 @@
+ #define AR933X_GPIO_COUNT		30
+ #define AR934X_GPIO_COUNT		23
+ 
++/*
++ * SRIF block
++ */
++#define AR934X_SRIF_CPU_DPLL1_REG	0x1c0
++#define AR934X_SRIF_CPU_DPLL2_REG	0x1c4
++#define AR934X_SRIF_CPU_DPLL3_REG	0x1c8
++
++#define AR934X_SRIF_DDR_DPLL1_REG	0x240
++#define AR934X_SRIF_DDR_DPLL2_REG	0x244
++#define AR934X_SRIF_DDR_DPLL3_REG	0x248
++
++#define AR934X_SRIF_DPLL1_REFDIV_SHIFT	27
++#define AR934X_SRIF_DPLL1_REFDIV_MASK	0x1f
++#define AR934X_SRIF_DPLL1_NINT_SHIFT	18
++#define AR934X_SRIF_DPLL1_NINT_MASK	0x1ff
++#define AR934X_SRIF_DPLL1_NFRAC_MASK	0x0003ffff
++
++#define AR934X_SRIF_DPLL2_LOCAL_PLL	BIT(30)
++#define AR934X_SRIF_DPLL2_OUTDIV_SHIFT	13
++#define AR934X_SRIF_DPLL2_OUTDIV_MASK	0x7
++
+ #endif /* __ASM_MACH_AR71XX_REGS_H */
+diff --git a/arch/mips/kernel/kgdb.c b/arch/mips/kernel/kgdb.c
+index f4546e9..23817a6 100644
+--- a/arch/mips/kernel/kgdb.c
++++ b/arch/mips/kernel/kgdb.c
+@@ -283,6 +283,15 @@ static int kgdb_mips_notify(struct notifier_block *self, unsigned long cmd,
+ 	struct pt_regs *regs = args->regs;
+ 	int trap = (regs->cp0_cause & 0x7c) >> 2;
+ 
++#ifdef CONFIG_KPROBES
++	/*
++	 * Return immediately if the kprobes fault notifier has set
++	 * DIE_PAGE_FAULT.
++	 */
++	if (cmd == DIE_PAGE_FAULT)
++		return NOTIFY_DONE;
++#endif /* CONFIG_KPROBES */
++
+ 	/* Userspace events, ignore. */
+ 	if (user_mode(regs))
+ 		return NOTIFY_DONE;
+diff --git a/arch/x86/Makefile b/arch/x86/Makefile
+index 58790bd..05afcca 100644
+--- a/arch/x86/Makefile
++++ b/arch/x86/Makefile
+@@ -142,7 +142,7 @@ KBUILD_CFLAGS += $(call cc-option,-mno-avx,)
+ KBUILD_CFLAGS += $(mflags-y)
+ KBUILD_AFLAGS += $(mflags-y)
+ 
+-archscripts:
++archscripts: scripts_basic
+ 	$(Q)$(MAKE) $(build)=arch/x86/tools relocs
+ 
+ ###
+diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
+index 1fbe75a..c1461de 100644
+--- a/arch/x86/xen/enlighten.c
++++ b/arch/x86/xen/enlighten.c
+@@ -984,7 +984,16 @@ static void xen_write_cr4(unsigned long cr4)
+ 
+ 	native_write_cr4(cr4);
+ }
+-
++#ifdef CONFIG_X86_64
++static inline unsigned long xen_read_cr8(void)
++{
++	return 0;
++}
++static inline void xen_write_cr8(unsigned long val)
++{
++	BUG_ON(val);
++}
++#endif
+ static int xen_write_msr_safe(unsigned int msr, unsigned low, unsigned high)
+ {
+ 	int ret;
+@@ -1153,6 +1162,11 @@ static const struct pv_cpu_ops xen_cpu_ops __initconst = {
+ 	.read_cr4_safe = native_read_cr4_safe,
+ 	.write_cr4 = xen_write_cr4,
+ 
++#ifdef CONFIG_X86_64
++	.read_cr8 = xen_read_cr8,
++	.write_cr8 = xen_write_cr8,
++#endif
++
+ 	.wbinvd = native_wbinvd,
+ 
+ 	.read_msr = native_read_msr_safe,
+@@ -1161,6 +1175,8 @@ static const struct pv_cpu_ops xen_cpu_ops __initconst = {
+ 	.read_tsc = native_read_tsc,
+ 	.read_pmc = native_read_pmc,
+ 
++	.read_tscp = native_read_tscp,
++
+ 	.iret = xen_iret,
+ 	.irq_enable_sysexit = xen_sysexit,
+ #ifdef CONFIG_X86_64
+diff --git a/block/blk-core.c b/block/blk-core.c
+index ee3cb3a..8471fb7 100644
+--- a/block/blk-core.c
++++ b/block/blk-core.c
+@@ -696,7 +696,7 @@ blk_init_allocated_queue(struct request_queue *q, request_fn_proc *rfn,
+ 	q->request_fn		= rfn;
+ 	q->prep_rq_fn		= NULL;
+ 	q->unprep_rq_fn		= NULL;
+-	q->queue_flags		= QUEUE_FLAG_DEFAULT;
++	q->queue_flags		|= QUEUE_FLAG_DEFAULT;
+ 
+ 	/* Override internal queue lock with supplied lock pointer */
+ 	if (lock)
+diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
+index 7edaccc..a51df96 100644
+--- a/drivers/acpi/ec.c
++++ b/drivers/acpi/ec.c
+@@ -71,9 +71,6 @@ enum ec_command {
+ #define ACPI_EC_UDELAY_GLK	1000	/* Wait 1ms max. to get global lock */
+ #define ACPI_EC_MSI_UDELAY	550	/* Wait 550us for MSI EC */
+ 
+-#define ACPI_EC_STORM_THRESHOLD 8	/* number of false interrupts
+-					   per one transaction */
+-
+ enum {
+ 	EC_FLAGS_QUERY_PENDING,		/* Query is pending */
+ 	EC_FLAGS_GPE_STORM,		/* GPE storm detected */
+@@ -87,6 +84,15 @@ static unsigned int ec_delay __read_mostly = ACPI_EC_DELAY;
+ module_param(ec_delay, uint, 0644);
+ MODULE_PARM_DESC(ec_delay, "Timeout(ms) waited until an EC command completes");
+ 
++/*
++ * If the number of false interrupts per one transaction exceeds
++ * this threshold, will think there is a GPE storm happened and
++ * will disable the GPE for normal transaction.
++ */
++static unsigned int ec_storm_threshold  __read_mostly = 8;
++module_param(ec_storm_threshold, uint, 0644);
++MODULE_PARM_DESC(ec_storm_threshold, "Maxim false GPE numbers not considered as GPE storm");
++
+ /* If we find an EC via the ECDT, we need to keep a ptr to its context */
+ /* External interfaces use first EC only, so remember */
+ typedef int (*acpi_ec_query_func) (void *data);
+@@ -319,7 +325,7 @@ static int acpi_ec_transaction(struct acpi_ec *ec, struct transaction *t)
+ 		msleep(1);
+ 		/* It is safe to enable the GPE outside of the transaction. */
+ 		acpi_enable_gpe(NULL, ec->gpe);
+-	} else if (t->irq_count > ACPI_EC_STORM_THRESHOLD) {
++	} else if (t->irq_count > ec_storm_threshold) {
+ 		pr_info(PREFIX "GPE storm detected, "
+ 			"transactions will use polling mode\n");
+ 		set_bit(EC_FLAGS_GPE_STORM, &ec->flags);
+@@ -924,6 +930,17 @@ static int ec_flag_msi(const struct dmi_system_id *id)
+ 	return 0;
+ }
+ 
++/*
++ * Clevo M720 notebook actually works ok with IRQ mode, if we lifted
++ * the GPE storm threshold back to 20
++ */
++static int ec_enlarge_storm_threshold(const struct dmi_system_id *id)
++{
++	pr_debug("Setting the EC GPE storm threshold to 20\n");
++	ec_storm_threshold  = 20;
++	return 0;
++}
++
+ static struct dmi_system_id __initdata ec_dmi_table[] = {
+ 	{
+ 	ec_skip_dsdt_scan, "Compal JFL92", {
+@@ -955,10 +972,13 @@ static struct dmi_system_id __initdata ec_dmi_table[] = {
+ 	{
+ 	ec_validate_ecdt, "ASUS hardware", {
+ 	DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer Inc.") }, NULL},
++	{
++	ec_enlarge_storm_threshold, "CLEVO hardware", {
++	DMI_MATCH(DMI_SYS_VENDOR, "CLEVO Co."),
++	DMI_MATCH(DMI_PRODUCT_NAME, "M720T/M730T"),}, NULL},
+ 	{},
+ };
+ 
+-
+ int __init acpi_ec_ecdt_probe(void)
+ {
+ 	acpi_status status;
+diff --git a/drivers/char/tpm/tpm.c b/drivers/char/tpm/tpm.c
+index 817f0ee..4dc8024 100644
+--- a/drivers/char/tpm/tpm.c
++++ b/drivers/char/tpm/tpm.c
+@@ -1186,17 +1186,20 @@ ssize_t tpm_write(struct file *file, const char __user *buf,
+ 		  size_t size, loff_t *off)
+ {
+ 	struct tpm_chip *chip = file->private_data;
+-	size_t in_size = size, out_size;
++	size_t in_size = size;
++	ssize_t out_size;
+ 
+ 	/* cannot perform a write until the read has cleared
+-	   either via tpm_read or a user_read_timer timeout */
+-	while (atomic_read(&chip->data_pending) != 0)
+-		msleep(TPM_TIMEOUT);
+-
+-	mutex_lock(&chip->buffer_mutex);
++	   either via tpm_read or a user_read_timer timeout.
++	   This also prevents splitted buffered writes from blocking here.
++	*/
++	if (atomic_read(&chip->data_pending) != 0)
++		return -EBUSY;
+ 
+ 	if (in_size > TPM_BUFSIZE)
+-		in_size = TPM_BUFSIZE;
++		return -E2BIG;
++
++	mutex_lock(&chip->buffer_mutex);
+ 
+ 	if (copy_from_user
+ 	    (chip->data_buffer, (void __user *) buf, in_size)) {
+@@ -1206,6 +1209,10 @@ ssize_t tpm_write(struct file *file, const char __user *buf,
+ 
+ 	/* atomic tpm command send and result receive */
+ 	out_size = tpm_transmit(chip, chip->data_buffer, TPM_BUFSIZE);
++	if (out_size < 0) {
++		mutex_unlock(&chip->buffer_mutex);
++		return out_size;
++	}
+ 
+ 	atomic_set(&chip->data_pending, out_size);
+ 	mutex_unlock(&chip->buffer_mutex);
+diff --git a/drivers/firewire/core-cdev.c b/drivers/firewire/core-cdev.c
+index 2783f69..f8d2287 100644
+--- a/drivers/firewire/core-cdev.c
++++ b/drivers/firewire/core-cdev.c
+@@ -473,8 +473,8 @@ static int ioctl_get_info(struct client *client, union ioctl_arg *arg)
+ 	client->bus_reset_closure = a->bus_reset_closure;
+ 	if (a->bus_reset != 0) {
+ 		fill_bus_reset_event(&bus_reset, client);
+-		ret = copy_to_user(u64_to_uptr(a->bus_reset),
+-				   &bus_reset, sizeof(bus_reset));
++		/* unaligned size of bus_reset is 36 bytes */
++		ret = copy_to_user(u64_to_uptr(a->bus_reset), &bus_reset, 36);
+ 	}
+ 	if (ret == 0 && list_empty(&client->link))
+ 		list_add_tail(&client->link, &client->device->client_list);
+diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
+index 274d25d..97d4f4b 100644
+--- a/drivers/gpu/drm/i915/i915_gem.c
++++ b/drivers/gpu/drm/i915/i915_gem.c
+@@ -3893,7 +3893,6 @@ i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
+ 
+ 	BUG_ON(!list_empty(&dev_priv->mm.active_list));
+ 	BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
+-	BUG_ON(!list_empty(&dev_priv->mm.inactive_list));
+ 	mutex_unlock(&dev->struct_mutex);
+ 
+ 	ret = drm_irq_install(dev);
+diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
+index a3e53c5..f02cfad 100644
+--- a/drivers/gpu/drm/i915/i915_reg.h
++++ b/drivers/gpu/drm/i915/i915_reg.h
+@@ -513,7 +513,7 @@
+  */
+ # define _3D_CHICKEN2_WM_READ_PIPELINED			(1 << 14)
+ #define _3D_CHICKEN3	0x02090
+-#define  _3D_CHICKEN_SF_DISABLE_FASTCLIP_CULL		(1 << 5)
++#define  _3D_CHICKEN3_SF_DISABLE_FASTCLIP_CULL		(1 << 5)
+ 
+ #define MI_MODE		0x0209c
+ # define VS_TIMER_DISPATCH				(1 << 6)
+diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
+index 0c7f4aa..b634f6f 100644
+--- a/drivers/gpu/drm/i915/intel_display.c
++++ b/drivers/gpu/drm/i915/intel_display.c
+@@ -4351,7 +4351,7 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
+ 	/* default to 8bpc */
+ 	pipeconf &= ~(PIPECONF_BPP_MASK | PIPECONF_DITHER_EN);
+ 	if (is_dp) {
+-		if (mode->private_flags & INTEL_MODE_DP_FORCE_6BPC) {
++		if (adjusted_mode->private_flags & INTEL_MODE_DP_FORCE_6BPC) {
+ 			pipeconf |= PIPECONF_BPP_6 |
+ 				    PIPECONF_DITHER_EN |
+ 				    PIPECONF_DITHER_TYPE_SP;
+@@ -4705,7 +4705,7 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
+ 	/* determine panel color depth */
+ 	temp = I915_READ(PIPECONF(pipe));
+ 	temp &= ~PIPE_BPC_MASK;
+-	dither = intel_choose_pipe_bpp_dither(crtc, &pipe_bpp, mode);
++	dither = intel_choose_pipe_bpp_dither(crtc, &pipe_bpp, adjusted_mode);
+ 	switch (pipe_bpp) {
+ 	case 18:
+ 		temp |= PIPE_6BPC;
+diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
+index 8c73fae..c23c9ea 100644
+--- a/drivers/gpu/drm/i915/intel_pm.c
++++ b/drivers/gpu/drm/i915/intel_pm.c
+@@ -3355,8 +3355,8 @@ static void gen6_init_clock_gating(struct drm_device *dev)
+ 		   GEN6_RCCUNIT_CLOCK_GATE_DISABLE);
+ 
+ 	/* Bspec says we need to always set all mask bits. */
+-	I915_WRITE(_3D_CHICKEN, (0xFFFF << 16) |
+-		   _3D_CHICKEN_SF_DISABLE_FASTCLIP_CULL);
++	I915_WRITE(_3D_CHICKEN3, (0xFFFF << 16) |
++		   _3D_CHICKEN3_SF_DISABLE_FASTCLIP_CULL);
+ 
+ 	/*
+ 	 * According to the spec the following bits should be
+diff --git a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
+index 670e991..d16f50f 100644
+--- a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
++++ b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
+@@ -974,11 +974,7 @@ static void radeon_legacy_tmds_ext_mode_set(struct drm_encoder *encoder,
+ static void radeon_ext_tmds_enc_destroy(struct drm_encoder *encoder)
+ {
+ 	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+-	struct radeon_encoder_ext_tmds *tmds = radeon_encoder->enc_priv;
+-	if (tmds) {
+-		if (tmds->i2c_bus)
+-			radeon_i2c_destroy(tmds->i2c_bus);
+-	}
++	/* don't destroy the i2c bus record here, this will be done in radeon_i2c_fini */
+ 	kfree(radeon_encoder->enc_priv);
+ 	drm_encoder_cleanup(encoder);
+ 	kfree(radeon_encoder);
+diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
+index 0138a72..a48c215 100644
+--- a/drivers/md/raid10.c
++++ b/drivers/md/raid10.c
+@@ -3158,7 +3158,7 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr,
+ 				else {
+ 					bad_sectors -= (sector - first_bad);
+ 					if (max_sync > bad_sectors)
+-						max_sync = max_sync;
++						max_sync = bad_sectors;
+ 					continue;
+ 				}
+ 			}
+diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c
+index a11253a..c429abd 100644
+--- a/drivers/mtd/nand/nand_base.c
++++ b/drivers/mtd/nand/nand_base.c
+@@ -2914,8 +2914,7 @@ static int nand_flash_detect_onfi(struct mtd_info *mtd, struct nand_chip *chip,
+ 	if (le16_to_cpu(p->features) & 1)
+ 		*busw = NAND_BUSWIDTH_16;
+ 
+-	chip->options &= ~NAND_CHIPOPTIONS_MSK;
+-	chip->options |= NAND_NO_READRDY & NAND_CHIPOPTIONS_MSK;
++	chip->options |= NAND_NO_READRDY;
+ 
+ 	pr_info("ONFI flash detected\n");
+ 	return 1;
+@@ -3080,9 +3079,8 @@ static struct nand_flash_dev *nand_get_flash_type(struct mtd_info *mtd,
+ 			mtd->erasesize <<= ((id_data[3] & 0x03) << 1);
+ 		}
+ 	}
+-	/* Get chip options, preserve non chip based options */
+-	chip->options &= ~NAND_CHIPOPTIONS_MSK;
+-	chip->options |= type->options & NAND_CHIPOPTIONS_MSK;
++	/* Get chip options */
++	chip->options |= type->options;
+ 
+ 	/*
+ 	 * Check if chip is not a Samsung device. Do not clear the
+diff --git a/drivers/net/ethernet/intel/e1000e/e1000.h b/drivers/net/ethernet/intel/e1000e/e1000.h
+index cb3356c..04668b4 100644
+--- a/drivers/net/ethernet/intel/e1000e/e1000.h
++++ b/drivers/net/ethernet/intel/e1000e/e1000.h
+@@ -175,13 +175,13 @@ struct e1000_info;
+ /*
+  * in the case of WTHRESH, it appears at least the 82571/2 hardware
+  * writes back 4 descriptors when WTHRESH=5, and 3 descriptors when
+- * WTHRESH=4, and since we want 64 bytes at a time written back, set
+- * it to 5
++ * WTHRESH=4, so a setting of 5 gives the most efficient bus
++ * utilization but to avoid possible Tx stalls, set it to 1
+  */
+ #define E1000_TXDCTL_DMA_BURST_ENABLE                          \
+ 	(E1000_TXDCTL_GRAN | /* set descriptor granularity */  \
+ 	 E1000_TXDCTL_COUNT_DESC |                             \
+-	 (5 << 16) | /* wthresh must be +1 more than desired */\
++	 (1 << 16) | /* wthresh must be +1 more than desired */\
+ 	 (1 << 8)  | /* hthresh */                             \
+ 	 0x1f)       /* pthresh */
+ 
+diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
+index d01a099..a46e75e 100644
+--- a/drivers/net/ethernet/intel/e1000e/netdev.c
++++ b/drivers/net/ethernet/intel/e1000e/netdev.c
+@@ -2831,7 +2831,7 @@ static void e1000_configure_tx(struct e1000_adapter *adapter)
+ 		 * set up some performance related parameters to encourage the
+ 		 * hardware to use the bus more efficiently in bursts, depends
+ 		 * on the tx_int_delay to be enabled,
+-		 * wthresh = 5 ==> burst write a cacheline (64 bytes) at a time
++		 * wthresh = 1 ==> burst write is disabled to avoid Tx stalls
+ 		 * hthresh = 1 ==> prefetch when one or more available
+ 		 * pthresh = 0x1f ==> prefetch if internal cache 31 or less
+ 		 * BEWARE: this seems to work but should be considered first if
+diff --git a/drivers/net/usb/mcs7830.c b/drivers/net/usb/mcs7830.c
+index 03c2d8d..cc7e720 100644
+--- a/drivers/net/usb/mcs7830.c
++++ b/drivers/net/usb/mcs7830.c
+@@ -117,6 +117,7 @@ enum {
+ struct mcs7830_data {
+ 	u8 multi_filter[8];
+ 	u8 config;
++	u8 link_counter;
+ };
+ 
+ static const char driver_name[] = "MOSCHIP usb-ethernet driver";
+@@ -632,20 +633,31 @@ static int mcs7830_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
+ static void mcs7830_status(struct usbnet *dev, struct urb *urb)
+ {
+ 	u8 *buf = urb->transfer_buffer;
+-	bool link;
++	bool link, link_changed;
++	struct mcs7830_data *data = mcs7830_get_data(dev);
+ 
+ 	if (urb->actual_length < 16)
+ 		return;
+ 
+ 	link = !(buf[1] & 0x20);
+-	if (netif_carrier_ok(dev->net) != link) {
+-		if (link) {
+-			netif_carrier_on(dev->net);
+-			usbnet_defer_kevent(dev, EVENT_LINK_RESET);
+-		} else
+-			netif_carrier_off(dev->net);
+-		netdev_dbg(dev->net, "Link Status is: %d\n", link);
+-	}
++	link_changed = netif_carrier_ok(dev->net) != link;
++	if (link_changed) {
++		data->link_counter++;
++		/*
++		   track link state 20 times to guard against erroneous
++		   link state changes reported sometimes by the chip
++		 */
++		if (data->link_counter > 20) {
++			data->link_counter = 0;
++			if (link) {
++				netif_carrier_on(dev->net);
++				usbnet_defer_kevent(dev, EVENT_LINK_RESET);
++			} else
++				netif_carrier_off(dev->net);
++			netdev_dbg(dev->net, "Link Status is: %d\n", link);
++		}
++	} else
++		data->link_counter = 0;
+ }
+ 
+ static const struct driver_info moschip_info = {
+diff --git a/drivers/net/wireless/ath/ath9k/beacon.c b/drivers/net/wireless/ath/ath9k/beacon.c
+index 76f07d8..1b48414 100644
+--- a/drivers/net/wireless/ath/ath9k/beacon.c
++++ b/drivers/net/wireless/ath/ath9k/beacon.c
+@@ -120,7 +120,7 @@ static void ath9k_tx_cabq(struct ieee80211_hw *hw, struct sk_buff *skb)
+ 
+ 	if (ath_tx_start(hw, skb, &txctl) != 0) {
+ 		ath_dbg(common, XMIT, "CABQ TX failed\n");
+-		dev_kfree_skb_any(skb);
++		ieee80211_free_txskb(hw, skb);
+ 	}
+ }
+ 
+diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
+index a22df74..61e08e6 100644
+--- a/drivers/net/wireless/ath/ath9k/main.c
++++ b/drivers/net/wireless/ath/ath9k/main.c
+@@ -767,7 +767,7 @@ static void ath9k_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
+ 
+ 	return;
+ exit:
+-	dev_kfree_skb_any(skb);
++	ieee80211_free_txskb(hw, skb);
+ }
+ 
+ static void ath9k_stop(struct ieee80211_hw *hw)
+diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
+index 0d4155a..423a9f3 100644
+--- a/drivers/net/wireless/ath/ath9k/xmit.c
++++ b/drivers/net/wireless/ath/ath9k/xmit.c
+@@ -66,8 +66,7 @@ static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
+ static struct ath_buf *ath_tx_setup_buffer(struct ath_softc *sc,
+ 					   struct ath_txq *txq,
+ 					   struct ath_atx_tid *tid,
+-					   struct sk_buff *skb,
+-					   bool dequeue);
++					   struct sk_buff *skb);
+ 
+ enum {
+ 	MCS_HT20,
+@@ -176,7 +175,15 @@ static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
+ 		fi = get_frame_info(skb);
+ 		bf = fi->bf;
+ 
+-		if (bf && fi->retries) {
++		if (!bf) {
++			bf = ath_tx_setup_buffer(sc, txq, tid, skb);
++			if (!bf) {
++				ieee80211_free_txskb(sc->hw, skb);
++				continue;
++			}
++		}
++
++		if (fi->retries) {
+ 			list_add_tail(&bf->list, &bf_head);
+ 			ath_tx_update_baw(sc, tid, bf->bf_state.seqno);
+ 			ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0);
+@@ -785,10 +792,13 @@ static enum ATH_AGGR_STATUS ath_tx_form_aggr(struct ath_softc *sc,
+ 		fi = get_frame_info(skb);
+ 		bf = fi->bf;
+ 		if (!fi->bf)
+-			bf = ath_tx_setup_buffer(sc, txq, tid, skb, true);
++			bf = ath_tx_setup_buffer(sc, txq, tid, skb);
+ 
+-		if (!bf)
++		if (!bf) {
++			__skb_unlink(skb, &tid->buf_q);
++			ieee80211_free_txskb(sc->hw, skb);
+ 			continue;
++		}
+ 
+ 		bf->bf_state.bf_type = BUF_AMPDU | BUF_AGGR;
+ 		seqno = bf->bf_state.seqno;
+@@ -1731,9 +1741,11 @@ static void ath_tx_send_ampdu(struct ath_softc *sc, struct ath_atx_tid *tid,
+ 		return;
+ 	}
+ 
+-	bf = ath_tx_setup_buffer(sc, txctl->txq, tid, skb, false);
+-	if (!bf)
++	bf = ath_tx_setup_buffer(sc, txctl->txq, tid, skb);
++	if (!bf) {
++		ieee80211_free_txskb(sc->hw, skb);
+ 		return;
++	}
+ 
+ 	bf->bf_state.bf_type = BUF_AMPDU;
+ 	INIT_LIST_HEAD(&bf_head);
+@@ -1757,11 +1769,6 @@ static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq,
+ 	struct ath_buf *bf;
+ 
+ 	bf = fi->bf;
+-	if (!bf)
+-		bf = ath_tx_setup_buffer(sc, txq, tid, skb, false);
+-
+-	if (!bf)
+-		return;
+ 
+ 	INIT_LIST_HEAD(&bf_head);
+ 	list_add_tail(&bf->list, &bf_head);
+@@ -1834,8 +1841,7 @@ u8 ath_txchainmask_reduction(struct ath_softc *sc, u8 chainmask, u32 rate)
+ static struct ath_buf *ath_tx_setup_buffer(struct ath_softc *sc,
+ 					   struct ath_txq *txq,
+ 					   struct ath_atx_tid *tid,
+-					   struct sk_buff *skb,
+-					   bool dequeue)
++					   struct sk_buff *skb)
+ {
+ 	struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+ 	struct ath_frame_info *fi = get_frame_info(skb);
+@@ -1847,7 +1853,7 @@ static struct ath_buf *ath_tx_setup_buffer(struct ath_softc *sc,
+ 	bf = ath_tx_get_buffer(sc);
+ 	if (!bf) {
+ 		ath_dbg(common, XMIT, "TX buffers are full\n");
+-		goto error;
++		return NULL;
+ 	}
+ 
+ 	ATH_TXBUF_RESET(bf);
+@@ -1876,18 +1882,12 @@ static struct ath_buf *ath_tx_setup_buffer(struct ath_softc *sc,
+ 		ath_err(ath9k_hw_common(sc->sc_ah),
+ 			"dma_mapping_error() on TX\n");
+ 		ath_tx_return_buffer(sc, bf);
+-		goto error;
++		return NULL;
+ 	}
+ 
+ 	fi->bf = bf;
+ 
+ 	return bf;
+-
+-error:
+-	if (dequeue)
+-		__skb_unlink(skb, &tid->buf_q);
+-	dev_kfree_skb_any(skb);
+-	return NULL;
+ }
+ 
+ /* FIXME: tx power */
+@@ -1916,9 +1916,14 @@ static void ath_tx_start_dma(struct ath_softc *sc, struct sk_buff *skb,
+ 		 */
+ 		ath_tx_send_ampdu(sc, tid, skb, txctl);
+ 	} else {
+-		bf = ath_tx_setup_buffer(sc, txctl->txq, tid, skb, false);
+-		if (!bf)
++		bf = ath_tx_setup_buffer(sc, txctl->txq, tid, skb);
++		if (!bf) {
++			if (txctl->paprd)
++				dev_kfree_skb_any(skb);
++			else
++				ieee80211_free_txskb(sc->hw, skb);
+ 			return;
++		}
+ 
+ 		bf->bf_state.bfs_paprd = txctl->paprd;
+ 
+diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
+index 5b30132..41b74ba 100644
+--- a/drivers/scsi/qla2xxx/qla_target.c
++++ b/drivers/scsi/qla2xxx/qla_target.c
+@@ -1403,7 +1403,7 @@ static void qlt_24xx_send_task_mgmt_ctio(struct scsi_qla_host *ha,
+ 	ctio->u.status1.scsi_status =
+ 	    __constant_cpu_to_le16(SS_RESPONSE_INFO_LEN_VALID);
+ 	ctio->u.status1.response_len = __constant_cpu_to_le16(8);
+-	((uint32_t *)ctio->u.status1.sense_data)[0] = cpu_to_be32(resp_code);
++	ctio->u.status1.sense_data[0] = resp_code;
+ 
+ 	qla2x00_start_iocbs(ha, ha->req);
+ }
+diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
+index 182d5a5..f4cc413 100644
+--- a/drivers/scsi/scsi_debug.c
++++ b/drivers/scsi/scsi_debug.c
+@@ -2054,7 +2054,7 @@ static void unmap_region(sector_t lba, unsigned int len)
+ 		block = lba + alignment;
+ 		rem = do_div(block, granularity);
+ 
+-		if (rem == 0 && lba + granularity <= end && block < map_size) {
++		if (rem == 0 && lba + granularity < end && block < map_size) {
+ 			clear_bit(block, map_storep);
+ 			if (scsi_debug_lbprz)
+ 				memset(fake_storep +
+diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c
+index 528d52b..0144078 100644
+--- a/drivers/scsi/storvsc_drv.c
++++ b/drivers/scsi/storvsc_drv.c
+@@ -1221,7 +1221,12 @@ static int storvsc_host_reset_handler(struct scsi_cmnd *scmnd)
+ 	/*
+ 	 * At this point, all outstanding requests in the adapter
+ 	 * should have been flushed out and return to us
++	 * There is a potential race here where the host may be in
++	 * the process of responding when we return from here.
++	 * Just wait for all in-transit packets to be accounted for
++	 * before we return from here.
+ 	 */
++	storvsc_wait_to_drain(stor_device);
+ 
+ 	return SUCCESS;
+ }
+diff --git a/drivers/scsi/virtio_scsi.c b/drivers/scsi/virtio_scsi.c
+index 3e79a2f..7554d78 100644
+--- a/drivers/scsi/virtio_scsi.c
++++ b/drivers/scsi/virtio_scsi.c
+@@ -219,7 +219,7 @@ static int virtscsi_kick_event(struct virtio_scsi *vscsi,
+ 	struct scatterlist sg;
+ 	unsigned long flags;
+ 
+-	sg_set_buf(&sg, &event_node->event, sizeof(struct virtio_scsi_event));
++	sg_init_one(&sg, &event_node->event, sizeof(struct virtio_scsi_event));
+ 
+ 	spin_lock_irqsave(&vscsi->event_vq.vq_lock, flags);
+ 
+diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
+index 97c0f78..dd4fce2 100644
+--- a/drivers/target/iscsi/iscsi_target.c
++++ b/drivers/target/iscsi/iscsi_target.c
+@@ -3271,7 +3271,6 @@ static int iscsit_build_sendtargets_response(struct iscsi_cmd *cmd)
+ 		len += 1;
+ 
+ 		if ((len + payload_len) > buffer_len) {
+-			spin_unlock(&tiqn->tiqn_tpg_lock);
+ 			end_of_buf = 1;
+ 			goto eob;
+ 		}
+@@ -3424,6 +3423,7 @@ static int iscsit_send_reject(
+ 	hdr->opcode		= ISCSI_OP_REJECT;
+ 	hdr->flags		|= ISCSI_FLAG_CMD_FINAL;
+ 	hton24(hdr->dlength, ISCSI_HDR_LEN);
++	hdr->ffffffff		= 0xffffffff;
+ 	cmd->stat_sn		= conn->stat_sn++;
+ 	hdr->statsn		= cpu_to_be32(cmd->stat_sn);
+ 	hdr->exp_cmdsn	= cpu_to_be32(conn->sess->exp_cmd_sn);
+diff --git a/drivers/target/iscsi/iscsi_target_core.h b/drivers/target/iscsi/iscsi_target_core.h
+index 8a908b2..a90294f 100644
+--- a/drivers/target/iscsi/iscsi_target_core.h
++++ b/drivers/target/iscsi/iscsi_target_core.h
+@@ -25,10 +25,10 @@
+ #define NA_DATAOUT_TIMEOUT_RETRIES	5
+ #define NA_DATAOUT_TIMEOUT_RETRIES_MAX	15
+ #define NA_DATAOUT_TIMEOUT_RETRIES_MIN	1
+-#define NA_NOPIN_TIMEOUT		5
++#define NA_NOPIN_TIMEOUT		15
+ #define NA_NOPIN_TIMEOUT_MAX		60
+ #define NA_NOPIN_TIMEOUT_MIN		3
+-#define NA_NOPIN_RESPONSE_TIMEOUT	5
++#define NA_NOPIN_RESPONSE_TIMEOUT	30
+ #define NA_NOPIN_RESPONSE_TIMEOUT_MAX	60
+ #define NA_NOPIN_RESPONSE_TIMEOUT_MIN	3
+ #define NA_RANDOM_DATAIN_PDU_OFFSETS	0
+diff --git a/drivers/target/iscsi/iscsi_target_tpg.c b/drivers/target/iscsi/iscsi_target_tpg.c
+index a38a3f8..de9ea32 100644
+--- a/drivers/target/iscsi/iscsi_target_tpg.c
++++ b/drivers/target/iscsi/iscsi_target_tpg.c
+@@ -677,6 +677,12 @@ int iscsit_ta_generate_node_acls(
+ 	pr_debug("iSCSI_TPG[%hu] - Generate Initiator Portal Group ACLs: %s\n",
+ 		tpg->tpgt, (a->generate_node_acls) ? "Enabled" : "Disabled");
+ 
++	if (flag == 1 && a->cache_dynamic_acls == 0) {
++		pr_debug("Explicitly setting cache_dynamic_acls=1 when "
++			"generate_node_acls=1\n");
++		a->cache_dynamic_acls = 1;
++	}
++
+ 	return 0;
+ }
+ 
+@@ -716,6 +722,12 @@ int iscsit_ta_cache_dynamic_acls(
+ 		return -EINVAL;
+ 	}
+ 
++	if (a->generate_node_acls == 1 && flag == 0) {
++		pr_debug("Skipping cache_dynamic_acls=0 when"
++			" generate_node_acls=1\n");
++		return 0;
++	}
++
+ 	a->cache_dynamic_acls = flag;
+ 	pr_debug("iSCSI_TPG[%hu] - Cache Dynamic Initiator Portal Group"
+ 		" ACLs %s\n", tpg->tpgt, (a->cache_dynamic_acls) ?
+diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c
+index 801efa8..06aca11 100644
+--- a/drivers/target/target_core_configfs.c
++++ b/drivers/target/target_core_configfs.c
+@@ -3132,6 +3132,7 @@ static int __init target_core_init_configfs(void)
+ 				GFP_KERNEL);
+ 	if (!target_cg->default_groups) {
+ 		pr_err("Unable to allocate target_cg->default_groups\n");
++		ret = -ENOMEM;
+ 		goto out_global;
+ 	}
+ 
+@@ -3147,6 +3148,7 @@ static int __init target_core_init_configfs(void)
+ 				GFP_KERNEL);
+ 	if (!hba_cg->default_groups) {
+ 		pr_err("Unable to allocate hba_cg->default_groups\n");
++		ret = -ENOMEM;
+ 		goto out_global;
+ 	}
+ 	config_group_init_type_name(&alua_group,
+@@ -3162,6 +3164,7 @@ static int __init target_core_init_configfs(void)
+ 			GFP_KERNEL);
+ 	if (!alua_cg->default_groups) {
+ 		pr_err("Unable to allocate alua_cg->default_groups\n");
++		ret = -ENOMEM;
+ 		goto out_global;
+ 	}
+ 
+@@ -3173,14 +3176,17 @@ static int __init target_core_init_configfs(void)
+ 	 * Add core/alua/lu_gps/default_lu_gp
+ 	 */
+ 	lu_gp = core_alua_allocate_lu_gp("default_lu_gp", 1);
+-	if (IS_ERR(lu_gp))
++	if (IS_ERR(lu_gp)) {
++		ret = -ENOMEM;
+ 		goto out_global;
++	}
+ 
+ 	lu_gp_cg = &alua_lu_gps_group;
+ 	lu_gp_cg->default_groups = kzalloc(sizeof(struct config_group) * 2,
+ 			GFP_KERNEL);
+ 	if (!lu_gp_cg->default_groups) {
+ 		pr_err("Unable to allocate lu_gp_cg->default_groups\n");
++		ret = -ENOMEM;
+ 		goto out_global;
+ 	}
+ 
+diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c
+index cbb5aaf..5c5ed7a 100644
+--- a/drivers/target/target_core_file.c
++++ b/drivers/target/target_core_file.c
+@@ -125,6 +125,19 @@ static struct se_device *fd_create_virtdevice(
+ 	 * of pure timestamp updates.
+ 	 */
+ 	flags = O_RDWR | O_CREAT | O_LARGEFILE | O_DSYNC;
++	/*
++	 * Optionally allow fd_buffered_io=1 to be enabled for people
++	 * who want use the fs buffer cache as an WriteCache mechanism.
++	 *
++	 * This means that in event of a hard failure, there is a risk
++	 * of silent data-loss if the SCSI client has *not* performed a
++	 * forced unit access (FUA) write, or issued SYNCHRONIZE_CACHE
++	 * to write-out the entire device cache.
++	 */
++	if (fd_dev->fbd_flags & FDBD_HAS_BUFFERED_IO_WCE) {
++		pr_debug("FILEIO: Disabling O_DSYNC, using buffered FILEIO\n");
++		flags &= ~O_DSYNC;
++	}
+ 
+ 	file = filp_open(fd_dev->fd_dev_name, flags, 0600);
+ 	if (IS_ERR(file)) {
+@@ -188,6 +201,12 @@ static struct se_device *fd_create_virtdevice(
+ 	if (!dev)
+ 		goto fail;
+ 
++	if (fd_dev->fbd_flags & FDBD_HAS_BUFFERED_IO_WCE) {
++		pr_debug("FILEIO: Forcing setting of emulate_write_cache=1"
++			" with FDBD_HAS_BUFFERED_IO_WCE\n");
++		dev->se_sub_dev->se_dev_attrib.emulate_write_cache = 1;
++	}
++
+ 	fd_dev->fd_dev_id = fd_host->fd_host_dev_id_count++;
+ 	fd_dev->fd_queue_depth = dev->queue_depth;
+ 
+@@ -407,6 +426,7 @@ enum {
+ static match_table_t tokens = {
+ 	{Opt_fd_dev_name, "fd_dev_name=%s"},
+ 	{Opt_fd_dev_size, "fd_dev_size=%s"},
++	{Opt_fd_buffered_io, "fd_buffered_io=%d"},
+ 	{Opt_err, NULL}
+ };
+ 
+@@ -418,7 +438,7 @@ static ssize_t fd_set_configfs_dev_params(
+ 	struct fd_dev *fd_dev = se_dev->se_dev_su_ptr;
+ 	char *orig, *ptr, *arg_p, *opts;
+ 	substring_t args[MAX_OPT_ARGS];
+-	int ret = 0, token;
++	int ret = 0, arg, token;
+ 
+ 	opts = kstrdup(page, GFP_KERNEL);
+ 	if (!opts)
+@@ -459,6 +479,19 @@ static ssize_t fd_set_configfs_dev_params(
+ 					" bytes\n", fd_dev->fd_dev_size);
+ 			fd_dev->fbd_flags |= FBDF_HAS_SIZE;
+ 			break;
++		case Opt_fd_buffered_io:
++			match_int(args, &arg);
++			if (arg != 1) {
++				pr_err("bogus fd_buffered_io=%d value\n", arg);
++				ret = -EINVAL;
++				goto out;
++			}
++
++			pr_debug("FILEIO: Using buffered I/O"
++				" operations for struct fd_dev\n");
++
++			fd_dev->fbd_flags |= FDBD_HAS_BUFFERED_IO_WCE;
++			break;
+ 		default:
+ 			break;
+ 		}
+@@ -490,8 +523,10 @@ static ssize_t fd_show_configfs_dev_params(
+ 	ssize_t bl = 0;
+ 
+ 	bl = sprintf(b + bl, "TCM FILEIO ID: %u", fd_dev->fd_dev_id);
+-	bl += sprintf(b + bl, "        File: %s  Size: %llu  Mode: O_DSYNC\n",
+-		fd_dev->fd_dev_name, fd_dev->fd_dev_size);
++	bl += sprintf(b + bl, "        File: %s  Size: %llu  Mode: %s\n",
++		fd_dev->fd_dev_name, fd_dev->fd_dev_size,
++		(fd_dev->fbd_flags & FDBD_HAS_BUFFERED_IO_WCE) ?
++		"Buffered-WCE" : "O_DSYNC");
+ 	return bl;
+ }
+ 
+diff --git a/drivers/target/target_core_file.h b/drivers/target/target_core_file.h
+index 70ce7fd..876ae53 100644
+--- a/drivers/target/target_core_file.h
++++ b/drivers/target/target_core_file.h
+@@ -14,6 +14,7 @@
+ 
+ #define FBDF_HAS_PATH		0x01
+ #define FBDF_HAS_SIZE		0x02
++#define FDBD_HAS_BUFFERED_IO_WCE 0x04
+ 
+ struct fd_dev {
+ 	u32		fbd_flags;
+diff --git a/drivers/target/target_core_spc.c b/drivers/target/target_core_spc.c
+index 388a922..9229bd9 100644
+--- a/drivers/target/target_core_spc.c
++++ b/drivers/target/target_core_spc.c
+@@ -600,30 +600,11 @@ static int spc_emulate_inquiry(struct se_cmd *cmd)
+ {
+ 	struct se_device *dev = cmd->se_dev;
+ 	struct se_portal_group *tpg = cmd->se_lun->lun_sep->sep_tpg;
+-	unsigned char *buf, *map_buf;
++	unsigned char *rbuf;
+ 	unsigned char *cdb = cmd->t_task_cdb;
++	unsigned char buf[SE_INQUIRY_BUF];
+ 	int p, ret;
+ 
+-	map_buf = transport_kmap_data_sg(cmd);
+-	/*
+-	 * If SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC is not set, then we
+-	 * know we actually allocated a full page.  Otherwise, if the
+-	 * data buffer is too small, allocate a temporary buffer so we
+-	 * don't have to worry about overruns in all our INQUIRY
+-	 * emulation handling.
+-	 */
+-	if (cmd->data_length < SE_INQUIRY_BUF &&
+-	    (cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC)) {
+-		buf = kzalloc(SE_INQUIRY_BUF, GFP_KERNEL);
+-		if (!buf) {
+-			transport_kunmap_data_sg(cmd);
+-			cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+-			return -ENOMEM;
+-		}
+-	} else {
+-		buf = map_buf;
+-	}
+-
+ 	if (dev == tpg->tpg_virt_lun0.lun_se_dev)
+ 		buf[0] = 0x3f; /* Not connected */
+ 	else
+@@ -655,11 +636,11 @@ static int spc_emulate_inquiry(struct se_cmd *cmd)
+ 	ret = -EINVAL;
+ 
+ out:
+-	if (buf != map_buf) {
+-		memcpy(map_buf, buf, cmd->data_length);
+-		kfree(buf);
++	rbuf = transport_kmap_data_sg(cmd);
++	if (rbuf) {
++		memcpy(rbuf, buf, min_t(u32, sizeof(buf), cmd->data_length));
++		transport_kunmap_data_sg(cmd);
+ 	}
+-	transport_kunmap_data_sg(cmd);
+ 
+ 	if (!ret)
+ 		target_complete_cmd(cmd, GOOD);
+@@ -803,7 +784,7 @@ static int spc_emulate_modesense(struct se_cmd *cmd)
+ 	unsigned char *rbuf;
+ 	int type = dev->transport->get_device_type(dev);
+ 	int ten = (cmd->t_task_cdb[0] == MODE_SENSE_10);
+-	int offset = ten ? 8 : 4;
++	u32 offset = ten ? 8 : 4;
+ 	int length = 0;
+ 	unsigned char buf[SE_MODE_PAGE_BUF];
+ 
+@@ -836,6 +817,7 @@ static int spc_emulate_modesense(struct se_cmd *cmd)
+ 		offset -= 2;
+ 		buf[0] = (offset >> 8) & 0xff;
+ 		buf[1] = offset & 0xff;
++		offset += 2;
+ 
+ 		if ((cmd->se_lun->lun_access & TRANSPORT_LUNFLAGS_READ_ONLY) ||
+ 		    (cmd->se_deve &&
+@@ -845,13 +827,10 @@ static int spc_emulate_modesense(struct se_cmd *cmd)
+ 		if ((dev->se_sub_dev->se_dev_attrib.emulate_write_cache > 0) &&
+ 		    (dev->se_sub_dev->se_dev_attrib.emulate_fua_write > 0))
+ 			spc_modesense_dpofua(&buf[3], type);
+-
+-		if ((offset + 2) > cmd->data_length)
+-			offset = cmd->data_length;
+-
+ 	} else {
+ 		offset -= 1;
+ 		buf[0] = offset & 0xff;
++		offset += 1;
+ 
+ 		if ((cmd->se_lun->lun_access & TRANSPORT_LUNFLAGS_READ_ONLY) ||
+ 		    (cmd->se_deve &&
+@@ -861,14 +840,13 @@ static int spc_emulate_modesense(struct se_cmd *cmd)
+ 		if ((dev->se_sub_dev->se_dev_attrib.emulate_write_cache > 0) &&
+ 		    (dev->se_sub_dev->se_dev_attrib.emulate_fua_write > 0))
+ 			spc_modesense_dpofua(&buf[2], type);
+-
+-		if ((offset + 1) > cmd->data_length)
+-			offset = cmd->data_length;
+ 	}
+ 
+ 	rbuf = transport_kmap_data_sg(cmd);
+-	memcpy(rbuf, buf, offset);
+-	transport_kunmap_data_sg(cmd);
++	if (rbuf) {
++		memcpy(rbuf, buf, min(offset, cmd->data_length));
++		transport_kunmap_data_sg(cmd);
++	}
+ 
+ 	target_complete_cmd(cmd, GOOD);
+ 	return 0;
+diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c
+index 84cbf29..a13f7e1 100644
+--- a/drivers/tty/vt/vt.c
++++ b/drivers/tty/vt/vt.c
+@@ -3475,6 +3475,19 @@ int con_debug_enter(struct vc_data *vc)
+ 			kdb_set(2, setargs);
+ 		}
+ 	}
++	if (vc->vc_cols < 999) {
++		int colcount;
++		char cols[4];
++		const char *setargs[3] = {
++			"set",
++			"COLUMNS",
++			cols,
++		};
++		if (kdbgetintenv(setargs[0], &colcount)) {
++			snprintf(cols, 4, "%i", vc->vc_cols);
++			kdb_set(2, setargs);
++		}
++	}
+ #endif /* CONFIG_KGDB_KDB */
+ 	return ret;
+ }
+diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
+index f763ed7..e8007b8 100644
+--- a/drivers/usb/class/cdc-acm.c
++++ b/drivers/usb/class/cdc-acm.c
+@@ -1551,6 +1551,9 @@ static const struct usb_device_id acm_ids[] = {
+ 					   Maybe we should define a new
+ 					   quirk for this. */
+ 	},
++	{ USB_DEVICE(0x0572, 0x1340), /* Conexant CX93010-2x UCMxx */
++	.driver_info = NO_UNION_NORMAL,
++	},
+ 	{ USB_DEVICE(0x1bbb, 0x0003), /* Alcatel OT-I650 */
+ 	.driver_info = NO_UNION_NORMAL, /* reports zero length descriptor */
+ 	},
+diff --git a/drivers/usb/gadget/at91_udc.c b/drivers/usb/gadget/at91_udc.c
+index 1e35963..660fd53 100644
+--- a/drivers/usb/gadget/at91_udc.c
++++ b/drivers/usb/gadget/at91_udc.c
+@@ -1699,7 +1699,7 @@ static int __devinit at91udc_probe(struct platform_device *pdev)
+ 	int		retval;
+ 	struct resource	*res;
+ 
+-	if (!dev->platform_data) {
++	if (!dev->platform_data && !pdev->dev.of_node) {
+ 		/* small (so we copy it) but critical! */
+ 		DBG("missing platform_data\n");
+ 		return -ENODEV;
+diff --git a/drivers/vfio/pci/vfio_pci_intrs.c b/drivers/vfio/pci/vfio_pci_intrs.c
+index d8dedc7..3639371 100644
+--- a/drivers/vfio/pci/vfio_pci_intrs.c
++++ b/drivers/vfio/pci/vfio_pci_intrs.c
+@@ -366,6 +366,17 @@ static int vfio_intx_enable(struct vfio_pci_device *vdev)
+ 		return -ENOMEM;
+ 
+ 	vdev->num_ctx = 1;
++
++	/*
++	 * If the virtual interrupt is masked, restore it.  Devices
++	 * supporting DisINTx can be masked at the hardware level
++	 * here, non-PCI-2.3 devices will have to wait until the
++	 * interrupt is enabled.
++	 */
++	vdev->ctx[0].masked = vdev->virq_disabled;
++	if (vdev->pci_2_3)
++		pci_intx(vdev->pdev, !vdev->ctx[0].masked);
++
+ 	vdev->irq_type = VFIO_PCI_INTX_IRQ_INDEX;
+ 
+ 	return 0;
+@@ -400,25 +411,26 @@ static int vfio_intx_set_signal(struct vfio_pci_device *vdev, int fd)
+ 		return PTR_ERR(trigger);
+ 	}
+ 
++	vdev->ctx[0].trigger = trigger;
++
+ 	if (!vdev->pci_2_3)
+ 		irqflags = 0;
+ 
+ 	ret = request_irq(pdev->irq, vfio_intx_handler,
+ 			  irqflags, vdev->ctx[0].name, vdev);
+ 	if (ret) {
++		vdev->ctx[0].trigger = NULL;
+ 		kfree(vdev->ctx[0].name);
+ 		eventfd_ctx_put(trigger);
+ 		return ret;
+ 	}
+ 
+-	vdev->ctx[0].trigger = trigger;
+-
+ 	/*
+ 	 * INTx disable will stick across the new irq setup,
+ 	 * disable_irq won't.
+ 	 */
+ 	spin_lock_irqsave(&vdev->irqlock, flags);
+-	if (!vdev->pci_2_3 && (vdev->ctx[0].masked || vdev->virq_disabled))
++	if (!vdev->pci_2_3 && vdev->ctx[0].masked)
+ 		disable_irq_nosync(pdev->irq);
+ 	spin_unlock_irqrestore(&vdev->irqlock, flags);
+ 
+diff --git a/drivers/video/udlfb.c b/drivers/video/udlfb.c
+index 8af6414..38fcfff 100644
+--- a/drivers/video/udlfb.c
++++ b/drivers/video/udlfb.c
+@@ -647,7 +647,7 @@ static ssize_t dlfb_ops_write(struct fb_info *info, const char __user *buf,
+ 	result = fb_sys_write(info, buf, count, ppos);
+ 
+ 	if (result > 0) {
+-		int start = max((int)(offset / info->fix.line_length) - 1, 0);
++		int start = max((int)(offset / info->fix.line_length), 0);
+ 		int lines = min((u32)((result / info->fix.line_length) + 1),
+ 				(u32)info->var.yres);
+ 
+diff --git a/drivers/video/via/via_clock.c b/drivers/video/via/via_clock.c
+index af8f26b..db1e392 100644
+--- a/drivers/video/via/via_clock.c
++++ b/drivers/video/via/via_clock.c
+@@ -25,6 +25,7 @@
+ 
+ #include <linux/kernel.h>
+ #include <linux/via-core.h>
++#include <asm/olpc.h>
+ #include "via_clock.h"
+ #include "global.h"
+ #include "debug.h"
+@@ -289,6 +290,10 @@ static void dummy_set_pll(struct via_pll_config config)
+ 	printk(KERN_INFO "Using undocumented set PLL.\n%s", via_slap);
+ }
+ 
++static void noop_set_clock_state(u8 state)
++{
++}
++
+ void via_clock_init(struct via_clock *clock, int gfx_chip)
+ {
+ 	switch (gfx_chip) {
+@@ -346,4 +351,18 @@ void via_clock_init(struct via_clock *clock, int gfx_chip)
+ 		break;
+ 
+ 	}
++
++	if (machine_is_olpc()) {
++		/* The OLPC XO-1.5 cannot suspend/resume reliably if the
++		 * IGA1/IGA2 clocks are set as on or off (memory rot
++		 * occasionally happens during suspend under such
++		 * configurations).
++		 *
++		 * The only known stable scenario is to leave this bits as-is,
++		 * which in their default states are documented to enable the
++		 * clock only when it is needed.
++		 */
++		clock->set_primary_clock_state = noop_set_clock_state;
++		clock->set_secondary_clock_state = noop_set_clock_state;
++	}
+ }
+diff --git a/drivers/xen/xenbus/xenbus_xs.c b/drivers/xen/xenbus/xenbus_xs.c
+index bce15cf..ca373d1 100644
+--- a/drivers/xen/xenbus/xenbus_xs.c
++++ b/drivers/xen/xenbus/xenbus_xs.c
+@@ -47,6 +47,7 @@
+ #include <xen/xenbus.h>
+ #include <xen/xen.h>
+ #include "xenbus_comms.h"
++#include <asm/xen/hypervisor.h>
+ 
+ struct xs_stored_msg {
+ 	struct list_head list;
+@@ -617,7 +618,24 @@ static struct xenbus_watch *find_watch(const char *token)
+ 
+ 	return NULL;
+ }
++/*
++ * Certain older XenBus toolstack cannot handle reading values that are
++ * not populated. Some Xen 3.4 installation are incapable of doing this
++ * so if we are running on anything older than 4 do not attempt to read
++ * control/platform-feature-xs_reset_watches.
++ */
++static bool xen_strict_xenbus_quirk()
++{
++	uint32_t eax, ebx, ecx, edx, base;
++
++	base = xen_cpuid_base();
++	cpuid(base + 1, &eax, &ebx, &ecx, &edx);
+ 
++	if ((eax >> 16) < 4)
++		return true;
++	return false;
++
++}
+ static void xs_reset_watches(void)
+ {
+ 	int err, supported = 0;
+@@ -625,6 +643,9 @@ static void xs_reset_watches(void)
+ 	if (!xen_hvm_domain())
+ 		return;
+ 
++	if (xen_strict_xenbus_quirk())
++		return;
++
+ 	err = xenbus_scanf(XBT_NIL, "control",
+ 			"platform-feature-xs_reset_watches", "%d", &supported);
+ 	if (err != 1 || !supported)
+diff --git a/fs/autofs4/root.c b/fs/autofs4/root.c
+index e7396cf..91b1165 100644
+--- a/fs/autofs4/root.c
++++ b/fs/autofs4/root.c
+@@ -392,10 +392,12 @@ static struct vfsmount *autofs4_d_automount(struct path *path)
+ 		ino->flags |= AUTOFS_INF_PENDING;
+ 		spin_unlock(&sbi->fs_lock);
+ 		status = autofs4_mount_wait(dentry);
+-		if (status)
+-			return ERR_PTR(status);
+ 		spin_lock(&sbi->fs_lock);
+ 		ino->flags &= ~AUTOFS_INF_PENDING;
++		if (status) {
++			spin_unlock(&sbi->fs_lock);
++			return ERR_PTR(status);
++		}
+ 	}
+ done:
+ 	if (!(ino->flags & AUTOFS_INF_EXPIRING)) {
+diff --git a/fs/ceph/export.c b/fs/ceph/export.c
+index 8e1b60e..02ce909 100644
+--- a/fs/ceph/export.c
++++ b/fs/ceph/export.c
+@@ -99,7 +99,7 @@ static int ceph_encode_fh(struct inode *inode, u32 *rawfh, int *max_len,
+  * FIXME: we should try harder by querying the mds for the ino.
+  */
+ static struct dentry *__fh_to_dentry(struct super_block *sb,
+-				     struct ceph_nfs_fh *fh)
++				     struct ceph_nfs_fh *fh, int fh_len)
+ {
+ 	struct ceph_mds_client *mdsc = ceph_sb_to_client(sb)->mdsc;
+ 	struct inode *inode;
+@@ -107,6 +107,9 @@ static struct dentry *__fh_to_dentry(struct super_block *sb,
+ 	struct ceph_vino vino;
+ 	int err;
+ 
++	if (fh_len < sizeof(*fh) / 4)
++		return ERR_PTR(-ESTALE);
++
+ 	dout("__fh_to_dentry %llx\n", fh->ino);
+ 	vino.ino = fh->ino;
+ 	vino.snap = CEPH_NOSNAP;
+@@ -150,7 +153,7 @@ static struct dentry *__fh_to_dentry(struct super_block *sb,
+  * convert connectable fh to dentry
+  */
+ static struct dentry *__cfh_to_dentry(struct super_block *sb,
+-				      struct ceph_nfs_confh *cfh)
++				      struct ceph_nfs_confh *cfh, int fh_len)
+ {
+ 	struct ceph_mds_client *mdsc = ceph_sb_to_client(sb)->mdsc;
+ 	struct inode *inode;
+@@ -158,6 +161,9 @@ static struct dentry *__cfh_to_dentry(struct super_block *sb,
+ 	struct ceph_vino vino;
+ 	int err;
+ 
++	if (fh_len < sizeof(*cfh) / 4)
++		return ERR_PTR(-ESTALE);
++
+ 	dout("__cfh_to_dentry %llx (%llx/%x)\n",
+ 	     cfh->ino, cfh->parent_ino, cfh->parent_name_hash);
+ 
+@@ -207,9 +213,11 @@ static struct dentry *ceph_fh_to_dentry(struct super_block *sb, struct fid *fid,
+ 					int fh_len, int fh_type)
+ {
+ 	if (fh_type == 1)
+-		return __fh_to_dentry(sb, (struct ceph_nfs_fh *)fid->raw);
++		return __fh_to_dentry(sb, (struct ceph_nfs_fh *)fid->raw,
++								fh_len);
+ 	else
+-		return __cfh_to_dentry(sb, (struct ceph_nfs_confh *)fid->raw);
++		return __cfh_to_dentry(sb, (struct ceph_nfs_confh *)fid->raw,
++								fh_len);
+ }
+ 
+ /*
+@@ -230,6 +238,8 @@ static struct dentry *ceph_fh_to_parent(struct super_block *sb,
+ 
+ 	if (fh_type == 1)
+ 		return ERR_PTR(-ESTALE);
++	if (fh_len < sizeof(*cfh) / 4)
++		return ERR_PTR(-ESTALE);
+ 
+ 	pr_debug("fh_to_parent %llx/%d\n", cfh->parent_ino,
+ 		 cfh->parent_name_hash);
+diff --git a/fs/gfs2/export.c b/fs/gfs2/export.c
+index e8ed6d4..4767774 100644
+--- a/fs/gfs2/export.c
++++ b/fs/gfs2/export.c
+@@ -161,6 +161,8 @@ static struct dentry *gfs2_fh_to_dentry(struct super_block *sb, struct fid *fid,
+ 	case GFS2_SMALL_FH_SIZE:
+ 	case GFS2_LARGE_FH_SIZE:
+ 	case GFS2_OLD_FH_SIZE:
++		if (fh_len < GFS2_SMALL_FH_SIZE)
++			return NULL;
+ 		this.no_formal_ino = ((u64)be32_to_cpu(fh[0])) << 32;
+ 		this.no_formal_ino |= be32_to_cpu(fh[1]);
+ 		this.no_addr = ((u64)be32_to_cpu(fh[2])) << 32;
+@@ -180,6 +182,8 @@ static struct dentry *gfs2_fh_to_parent(struct super_block *sb, struct fid *fid,
+ 	switch (fh_type) {
+ 	case GFS2_LARGE_FH_SIZE:
+ 	case GFS2_OLD_FH_SIZE:
++		if (fh_len < GFS2_LARGE_FH_SIZE)
++			return NULL;
+ 		parent.no_formal_ino = ((u64)be32_to_cpu(fh[4])) << 32;
+ 		parent.no_formal_ino |= be32_to_cpu(fh[5]);
+ 		parent.no_addr = ((u64)be32_to_cpu(fh[6])) << 32;
+diff --git a/fs/isofs/export.c b/fs/isofs/export.c
+index 1d38044..2b4f235 100644
+--- a/fs/isofs/export.c
++++ b/fs/isofs/export.c
+@@ -175,7 +175,7 @@ static struct dentry *isofs_fh_to_parent(struct super_block *sb,
+ {
+ 	struct isofs_fid *ifid = (struct isofs_fid *)fid;
+ 
+-	if (fh_type != 2)
++	if (fh_len < 2 || fh_type != 2)
+ 		return NULL;
+ 
+ 	return isofs_export_iget(sb,
+diff --git a/fs/jbd/commit.c b/fs/jbd/commit.c
+index 52c15c7..86b39b1 100644
+--- a/fs/jbd/commit.c
++++ b/fs/jbd/commit.c
+@@ -86,7 +86,12 @@ nope:
+ static void release_data_buffer(struct buffer_head *bh)
+ {
+ 	if (buffer_freed(bh)) {
++		WARN_ON_ONCE(buffer_dirty(bh));
+ 		clear_buffer_freed(bh);
++		clear_buffer_mapped(bh);
++		clear_buffer_new(bh);
++		clear_buffer_req(bh);
++		bh->b_bdev = NULL;
+ 		release_buffer_page(bh);
+ 	} else
+ 		put_bh(bh);
+@@ -866,17 +871,35 @@ restart_loop:
+ 		 * there's no point in keeping a checkpoint record for
+ 		 * it. */
+ 
+-		/* A buffer which has been freed while still being
+-		 * journaled by a previous transaction may end up still
+-		 * being dirty here, but we want to avoid writing back
+-		 * that buffer in the future after the "add to orphan"
+-		 * operation been committed,  That's not only a performance
+-		 * gain, it also stops aliasing problems if the buffer is
+-		 * left behind for writeback and gets reallocated for another
+-		 * use in a different page. */
+-		if (buffer_freed(bh) && !jh->b_next_transaction) {
+-			clear_buffer_freed(bh);
+-			clear_buffer_jbddirty(bh);
++		/*
++		 * A buffer which has been freed while still being journaled by
++		 * a previous transaction.
++		 */
++		if (buffer_freed(bh)) {
++			/*
++			 * If the running transaction is the one containing
++			 * "add to orphan" operation (b_next_transaction !=
++			 * NULL), we have to wait for that transaction to
++			 * commit before we can really get rid of the buffer.
++			 * So just clear b_modified to not confuse transaction
++			 * credit accounting and refile the buffer to
++			 * BJ_Forget of the running transaction. If the just
++			 * committed transaction contains "add to orphan"
++			 * operation, we can completely invalidate the buffer
++			 * now. We are rather throughout in that since the
++			 * buffer may be still accessible when blocksize <
++			 * pagesize and it is attached to the last partial
++			 * page.
++			 */
++			jh->b_modified = 0;
++			if (!jh->b_next_transaction) {
++				clear_buffer_freed(bh);
++				clear_buffer_jbddirty(bh);
++				clear_buffer_mapped(bh);
++				clear_buffer_new(bh);
++				clear_buffer_req(bh);
++				bh->b_bdev = NULL;
++			}
+ 		}
+ 
+ 		if (buffer_jbddirty(bh)) {
+diff --git a/fs/jbd/transaction.c b/fs/jbd/transaction.c
+index febc10d..78b7f84 100644
+--- a/fs/jbd/transaction.c
++++ b/fs/jbd/transaction.c
+@@ -1843,15 +1843,16 @@ static int __dispose_buffer(struct journal_head *jh, transaction_t *transaction)
+  * We're outside-transaction here.  Either or both of j_running_transaction
+  * and j_committing_transaction may be NULL.
+  */
+-static int journal_unmap_buffer(journal_t *journal, struct buffer_head *bh)
++static int journal_unmap_buffer(journal_t *journal, struct buffer_head *bh,
++				int partial_page)
+ {
+ 	transaction_t *transaction;
+ 	struct journal_head *jh;
+ 	int may_free = 1;
+-	int ret;
+ 
+ 	BUFFER_TRACE(bh, "entry");
+ 
++retry:
+ 	/*
+ 	 * It is safe to proceed here without the j_list_lock because the
+ 	 * buffers cannot be stolen by try_to_free_buffers as long as we are
+@@ -1879,10 +1880,18 @@ static int journal_unmap_buffer(journal_t *journal, struct buffer_head *bh)
+ 	 * clear the buffer dirty bit at latest at the moment when the
+ 	 * transaction marking the buffer as freed in the filesystem
+ 	 * structures is committed because from that moment on the
+-	 * buffer can be reallocated and used by a different page.
++	 * block can be reallocated and used by a different page.
+ 	 * Since the block hasn't been freed yet but the inode has
+ 	 * already been added to orphan list, it is safe for us to add
+ 	 * the buffer to BJ_Forget list of the newest transaction.
++	 *
++	 * Also we have to clear buffer_mapped flag of a truncated buffer
++	 * because the buffer_head may be attached to the page straddling
++	 * i_size (can happen only when blocksize < pagesize) and thus the
++	 * buffer_head can be reused when the file is extended again. So we end
++	 * up keeping around invalidated buffers attached to transactions'
++	 * BJ_Forget list just to stop checkpointing code from cleaning up
++	 * the transaction this buffer was modified in.
+ 	 */
+ 	transaction = jh->b_transaction;
+ 	if (transaction == NULL) {
+@@ -1909,13 +1918,9 @@ static int journal_unmap_buffer(journal_t *journal, struct buffer_head *bh)
+ 			 * committed, the buffer won't be needed any
+ 			 * longer. */
+ 			JBUFFER_TRACE(jh, "checkpointed: add to BJ_Forget");
+-			ret = __dispose_buffer(jh,
++			may_free = __dispose_buffer(jh,
+ 					journal->j_running_transaction);
+-			journal_put_journal_head(jh);
+-			spin_unlock(&journal->j_list_lock);
+-			jbd_unlock_bh_state(bh);
+-			spin_unlock(&journal->j_state_lock);
+-			return ret;
++			goto zap_buffer;
+ 		} else {
+ 			/* There is no currently-running transaction. So the
+ 			 * orphan record which we wrote for this file must have
+@@ -1923,13 +1928,9 @@ static int journal_unmap_buffer(journal_t *journal, struct buffer_head *bh)
+ 			 * the committing transaction, if it exists. */
+ 			if (journal->j_committing_transaction) {
+ 				JBUFFER_TRACE(jh, "give to committing trans");
+-				ret = __dispose_buffer(jh,
++				may_free = __dispose_buffer(jh,
+ 					journal->j_committing_transaction);
+-				journal_put_journal_head(jh);
+-				spin_unlock(&journal->j_list_lock);
+-				jbd_unlock_bh_state(bh);
+-				spin_unlock(&journal->j_state_lock);
+-				return ret;
++				goto zap_buffer;
+ 			} else {
+ 				/* The orphan record's transaction has
+ 				 * committed.  We can cleanse this buffer */
+@@ -1950,10 +1951,24 @@ static int journal_unmap_buffer(journal_t *journal, struct buffer_head *bh)
+ 		}
+ 		/*
+ 		 * The buffer is committing, we simply cannot touch
+-		 * it. So we just set j_next_transaction to the
+-		 * running transaction (if there is one) and mark
+-		 * buffer as freed so that commit code knows it should
+-		 * clear dirty bits when it is done with the buffer.
++		 * it. If the page is straddling i_size we have to wait
++		 * for commit and try again.
++		 */
++		if (partial_page) {
++			tid_t tid = journal->j_committing_transaction->t_tid;
++
++			journal_put_journal_head(jh);
++			spin_unlock(&journal->j_list_lock);
++			jbd_unlock_bh_state(bh);
++			spin_unlock(&journal->j_state_lock);
++			log_wait_commit(journal, tid);
++			goto retry;
++		}
++		/*
++		 * OK, buffer won't be reachable after truncate. We just set
++		 * j_next_transaction to the running transaction (if there is
++		 * one) and mark buffer as freed so that commit code knows it
++		 * should clear dirty bits when it is done with the buffer.
+ 		 */
+ 		set_buffer_freed(bh);
+ 		if (journal->j_running_transaction && buffer_jbddirty(bh))
+@@ -1976,6 +1991,14 @@ static int journal_unmap_buffer(journal_t *journal, struct buffer_head *bh)
+ 	}
+ 
+ zap_buffer:
++	/*
++	 * This is tricky. Although the buffer is truncated, it may be reused
++	 * if blocksize < pagesize and it is attached to the page straddling
++	 * EOF. Since the buffer might have been added to BJ_Forget list of the
++	 * running transaction, journal_get_write_access() won't clear
++	 * b_modified and credit accounting gets confused. So clear b_modified
++	 * here. */
++	jh->b_modified = 0;
+ 	journal_put_journal_head(jh);
+ zap_buffer_no_jh:
+ 	spin_unlock(&journal->j_list_lock);
+@@ -2024,7 +2047,8 @@ void journal_invalidatepage(journal_t *journal,
+ 		if (offset <= curr_off) {
+ 			/* This block is wholly outside the truncation point */
+ 			lock_buffer(bh);
+-			may_free &= journal_unmap_buffer(journal, bh);
++			may_free &= journal_unmap_buffer(journal, bh,
++							 offset > 0);
+ 			unlock_buffer(bh);
+ 		}
+ 		curr_off = next_off;
+diff --git a/fs/lockd/mon.c b/fs/lockd/mon.c
+index 7ef14b3..e4fb3ba 100644
+--- a/fs/lockd/mon.c
++++ b/fs/lockd/mon.c
+@@ -7,7 +7,6 @@
+  */
+ 
+ #include <linux/types.h>
+-#include <linux/utsname.h>
+ #include <linux/kernel.h>
+ #include <linux/ktime.h>
+ #include <linux/slab.h>
+@@ -19,6 +18,8 @@
+ 
+ #include <asm/unaligned.h>
+ 
++#include "netns.h"
++
+ #define NLMDBG_FACILITY		NLMDBG_MONITOR
+ #define NSM_PROGRAM		100024
+ #define NSM_VERSION		1
+@@ -40,6 +41,7 @@ struct nsm_args {
+ 	u32			proc;
+ 
+ 	char			*mon_name;
++	char			*nodename;
+ };
+ 
+ struct nsm_res {
+@@ -70,7 +72,7 @@ static struct rpc_clnt *nsm_create(struct net *net)
+ 	};
+ 	struct rpc_create_args args = {
+ 		.net			= net,
+-		.protocol		= XPRT_TRANSPORT_UDP,
++		.protocol		= XPRT_TRANSPORT_TCP,
+ 		.address		= (struct sockaddr *)&sin,
+ 		.addrsize		= sizeof(sin),
+ 		.servername		= "rpc.statd",
+@@ -83,10 +85,54 @@ static struct rpc_clnt *nsm_create(struct net *net)
+ 	return rpc_create(&args);
+ }
+ 
+-static int nsm_mon_unmon(struct nsm_handle *nsm, u32 proc, struct nsm_res *res,
+-			 struct net *net)
++static struct rpc_clnt *nsm_client_get(struct net *net)
+ {
++	static DEFINE_MUTEX(nsm_create_mutex);
+ 	struct rpc_clnt	*clnt;
++	struct lockd_net *ln = net_generic(net, lockd_net_id);
++
++	spin_lock(&ln->nsm_clnt_lock);
++	if (ln->nsm_users) {
++		ln->nsm_users++;
++		clnt = ln->nsm_clnt;
++		spin_unlock(&ln->nsm_clnt_lock);
++		goto out;
++	}
++	spin_unlock(&ln->nsm_clnt_lock);
++
++	mutex_lock(&nsm_create_mutex);
++	clnt = nsm_create(net);
++	if (!IS_ERR(clnt)) {
++		ln->nsm_clnt = clnt;
++		smp_wmb();
++		ln->nsm_users = 1;
++	}
++	mutex_unlock(&nsm_create_mutex);
++out:
++	return clnt;
++}
++
++static void nsm_client_put(struct net *net)
++{
++	struct lockd_net *ln = net_generic(net, lockd_net_id);
++	struct rpc_clnt	*clnt = ln->nsm_clnt;
++	int shutdown = 0;
++
++	spin_lock(&ln->nsm_clnt_lock);
++	if (ln->nsm_users) {
++		if (--ln->nsm_users)
++			ln->nsm_clnt = NULL;
++		shutdown = !ln->nsm_users;
++	}
++	spin_unlock(&ln->nsm_clnt_lock);
++
++	if (shutdown)
++		rpc_shutdown_client(clnt);
++}
++
++static int nsm_mon_unmon(struct nsm_handle *nsm, u32 proc, struct nsm_res *res,
++			 struct rpc_clnt *clnt)
++{
+ 	int		status;
+ 	struct nsm_args args = {
+ 		.priv		= &nsm->sm_priv,
+@@ -94,31 +140,24 @@ static int nsm_mon_unmon(struct nsm_handle *nsm, u32 proc, struct nsm_res *res,
+ 		.vers		= 3,
+ 		.proc		= NLMPROC_NSM_NOTIFY,
+ 		.mon_name	= nsm->sm_mon_name,
++		.nodename	= clnt->cl_nodename,
+ 	};
+ 	struct rpc_message msg = {
+ 		.rpc_argp	= &args,
+ 		.rpc_resp	= res,
+ 	};
+ 
+-	clnt = nsm_create(net);
+-	if (IS_ERR(clnt)) {
+-		status = PTR_ERR(clnt);
+-		dprintk("lockd: failed to create NSM upcall transport, "
+-				"status=%d\n", status);
+-		goto out;
+-	}
++	BUG_ON(clnt == NULL);
+ 
+ 	memset(res, 0, sizeof(*res));
+ 
+ 	msg.rpc_proc = &clnt->cl_procinfo[proc];
+-	status = rpc_call_sync(clnt, &msg, 0);
++	status = rpc_call_sync(clnt, &msg, RPC_TASK_SOFTCONN);
+ 	if (status < 0)
+ 		dprintk("lockd: NSM upcall RPC failed, status=%d\n",
+ 				status);
+ 	else
+ 		status = 0;
+-	rpc_shutdown_client(clnt);
+- out:
+ 	return status;
+ }
+ 
+@@ -138,6 +177,7 @@ int nsm_monitor(const struct nlm_host *host)
+ 	struct nsm_handle *nsm = host->h_nsmhandle;
+ 	struct nsm_res	res;
+ 	int		status;
++	struct rpc_clnt *clnt;
+ 
+ 	dprintk("lockd: nsm_monitor(%s)\n", nsm->sm_name);
+ 
+@@ -150,7 +190,15 @@ int nsm_monitor(const struct nlm_host *host)
+ 	 */
+ 	nsm->sm_mon_name = nsm_use_hostnames ? nsm->sm_name : nsm->sm_addrbuf;
+ 
+-	status = nsm_mon_unmon(nsm, NSMPROC_MON, &res, host->net);
++	clnt = nsm_client_get(host->net);
++	if (IS_ERR(clnt)) {
++		status = PTR_ERR(clnt);
++		dprintk("lockd: failed to create NSM upcall transport, "
++				"status=%d, net=%p\n", status, host->net);
++		return status;
++	}
++
++	status = nsm_mon_unmon(nsm, NSMPROC_MON, &res, clnt);
+ 	if (unlikely(res.status != 0))
+ 		status = -EIO;
+ 	if (unlikely(status < 0)) {
+@@ -182,9 +230,11 @@ void nsm_unmonitor(const struct nlm_host *host)
+ 
+ 	if (atomic_read(&nsm->sm_count) == 1
+ 	 && nsm->sm_monitored && !nsm->sm_sticky) {
++		struct lockd_net *ln = net_generic(host->net, lockd_net_id);
++
+ 		dprintk("lockd: nsm_unmonitor(%s)\n", nsm->sm_name);
+ 
+-		status = nsm_mon_unmon(nsm, NSMPROC_UNMON, &res, host->net);
++		status = nsm_mon_unmon(nsm, NSMPROC_UNMON, &res, ln->nsm_clnt);
+ 		if (res.status != 0)
+ 			status = -EIO;
+ 		if (status < 0)
+@@ -192,6 +242,8 @@ void nsm_unmonitor(const struct nlm_host *host)
+ 					nsm->sm_name);
+ 		else
+ 			nsm->sm_monitored = 0;
++
++		nsm_client_put(host->net);
+ 	}
+ }
+ 
+@@ -430,7 +482,7 @@ static void encode_my_id(struct xdr_stream *xdr, const struct nsm_args *argp)
+ {
+ 	__be32 *p;
+ 
+-	encode_nsm_string(xdr, utsname()->nodename);
++	encode_nsm_string(xdr, argp->nodename);
+ 	p = xdr_reserve_space(xdr, 4 + 4 + 4);
+ 	*p++ = cpu_to_be32(argp->prog);
+ 	*p++ = cpu_to_be32(argp->vers);
+diff --git a/fs/lockd/netns.h b/fs/lockd/netns.h
+index 4eee248..5010b55 100644
+--- a/fs/lockd/netns.h
++++ b/fs/lockd/netns.h
+@@ -12,6 +12,10 @@ struct lockd_net {
+ 	struct delayed_work grace_period_end;
+ 	struct lock_manager lockd_manager;
+ 	struct list_head grace_list;
++
++	spinlock_t nsm_clnt_lock;
++	unsigned int nsm_users;
++	struct rpc_clnt *nsm_clnt;
+ };
+ 
+ extern int lockd_net_id;
+diff --git a/fs/lockd/svc.c b/fs/lockd/svc.c
+index 31a63f8..7e35587 100644
+--- a/fs/lockd/svc.c
++++ b/fs/lockd/svc.c
+@@ -596,6 +596,7 @@ static int lockd_init_net(struct net *net)
+ 
+ 	INIT_DELAYED_WORK(&ln->grace_period_end, grace_ender);
+ 	INIT_LIST_HEAD(&ln->grace_list);
++	spin_lock_init(&ln->nsm_clnt_lock);
+ 	return 0;
+ }
+ 
+diff --git a/fs/namei.c b/fs/namei.c
+index dd1ed1b..81bd546 100644
+--- a/fs/namei.c
++++ b/fs/namei.c
+@@ -692,9 +692,9 @@ static inline int may_follow_link(struct path *link, struct nameidata *nd)
+ 	if (parent->i_uid == inode->i_uid)
+ 		return 0;
+ 
++	audit_log_link_denied("follow_link", link);
+ 	path_put_conditional(link, nd);
+ 	path_put(&nd->path);
+-	audit_log_link_denied("follow_link", link);
+ 	return -EACCES;
+ }
+ 
+diff --git a/fs/nfs/blocklayout/blocklayout.c b/fs/nfs/blocklayout/blocklayout.c
+index dd392ed..f3d16ad 100644
+--- a/fs/nfs/blocklayout/blocklayout.c
++++ b/fs/nfs/blocklayout/blocklayout.c
+@@ -162,25 +162,39 @@ static struct bio *bl_alloc_init_bio(int npg, sector_t isect,
+ 	return bio;
+ }
+ 
+-static struct bio *bl_add_page_to_bio(struct bio *bio, int npg, int rw,
++static struct bio *do_add_page_to_bio(struct bio *bio, int npg, int rw,
+ 				      sector_t isect, struct page *page,
+ 				      struct pnfs_block_extent *be,
+ 				      void (*end_io)(struct bio *, int err),
+-				      struct parallel_io *par)
++				      struct parallel_io *par,
++				      unsigned int offset, int len)
+ {
++	isect = isect + (offset >> SECTOR_SHIFT);
++	dprintk("%s: npg %d rw %d isect %llu offset %u len %d\n", __func__,
++		npg, rw, (unsigned long long)isect, offset, len);
+ retry:
+ 	if (!bio) {
+ 		bio = bl_alloc_init_bio(npg, isect, be, end_io, par);
+ 		if (!bio)
+ 			return ERR_PTR(-ENOMEM);
+ 	}
+-	if (bio_add_page(bio, page, PAGE_CACHE_SIZE, 0) < PAGE_CACHE_SIZE) {
++	if (bio_add_page(bio, page, len, offset) < len) {
+ 		bio = bl_submit_bio(rw, bio);
+ 		goto retry;
+ 	}
+ 	return bio;
+ }
+ 
++static struct bio *bl_add_page_to_bio(struct bio *bio, int npg, int rw,
++				      sector_t isect, struct page *page,
++				      struct pnfs_block_extent *be,
++				      void (*end_io)(struct bio *, int err),
++				      struct parallel_io *par)
++{
++	return do_add_page_to_bio(bio, npg, rw, isect, page, be,
++				  end_io, par, 0, PAGE_CACHE_SIZE);
++}
++
+ /* This is basically copied from mpage_end_io_read */
+ static void bl_end_io_read(struct bio *bio, int err)
+ {
+@@ -461,6 +475,106 @@ map_block(struct buffer_head *bh, sector_t isect, struct pnfs_block_extent *be)
+ 	return;
+ }
+ 
++static void
++bl_read_single_end_io(struct bio *bio, int error)
++{
++	struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
++	struct page *page = bvec->bv_page;
++
++	/* Only one page in bvec */
++	unlock_page(page);
++}
++
++static int
++bl_do_readpage_sync(struct page *page, struct pnfs_block_extent *be,
++		    unsigned int offset, unsigned int len)
++{
++	struct bio *bio;
++	struct page *shadow_page;
++	sector_t isect;
++	char *kaddr, *kshadow_addr;
++	int ret = 0;
++
++	dprintk("%s: offset %u len %u\n", __func__, offset, len);
++
++	shadow_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
++	if (shadow_page == NULL)
++		return -ENOMEM;
++
++	bio = bio_alloc(GFP_NOIO, 1);
++	if (bio == NULL)
++		return -ENOMEM;
++
++	isect = (page->index << PAGE_CACHE_SECTOR_SHIFT) +
++		(offset / SECTOR_SIZE);
++
++	bio->bi_sector = isect - be->be_f_offset + be->be_v_offset;
++	bio->bi_bdev = be->be_mdev;
++	bio->bi_end_io = bl_read_single_end_io;
++
++	lock_page(shadow_page);
++	if (bio_add_page(bio, shadow_page,
++			 SECTOR_SIZE, round_down(offset, SECTOR_SIZE)) == 0) {
++		unlock_page(shadow_page);
++		bio_put(bio);
++		return -EIO;
++	}
++
++	submit_bio(READ, bio);
++	wait_on_page_locked(shadow_page);
++	if (unlikely(!test_bit(BIO_UPTODATE, &bio->bi_flags))) {
++		ret = -EIO;
++	} else {
++		kaddr = kmap_atomic(page);
++		kshadow_addr = kmap_atomic(shadow_page);
++		memcpy(kaddr + offset, kshadow_addr + offset, len);
++		kunmap_atomic(kshadow_addr);
++		kunmap_atomic(kaddr);
++	}
++	__free_page(shadow_page);
++	bio_put(bio);
++
++	return ret;
++}
++
++static int
++bl_read_partial_page_sync(struct page *page, struct pnfs_block_extent *be,
++			  unsigned int dirty_offset, unsigned int dirty_len,
++			  bool full_page)
++{
++	int ret = 0;
++	unsigned int start, end;
++
++	if (full_page) {
++		start = 0;
++		end = PAGE_CACHE_SIZE;
++	} else {
++		start = round_down(dirty_offset, SECTOR_SIZE);
++		end = round_up(dirty_offset + dirty_len, SECTOR_SIZE);
++	}
++
++	dprintk("%s: offset %u len %d\n", __func__, dirty_offset, dirty_len);
++	if (!be) {
++		zero_user_segments(page, start, dirty_offset,
++				   dirty_offset + dirty_len, end);
++		if (start == 0 && end == PAGE_CACHE_SIZE &&
++		    trylock_page(page)) {
++			SetPageUptodate(page);
++			unlock_page(page);
++		}
++		return ret;
++	}
++
++	if (start != dirty_offset)
++		ret = bl_do_readpage_sync(page, be, start, dirty_offset - start);
++
++	if (!ret && (dirty_offset + dirty_len < end))
++		ret = bl_do_readpage_sync(page, be, dirty_offset + dirty_len,
++					  end - dirty_offset - dirty_len);
++
++	return ret;
++}
++
+ /* Given an unmapped page, zero it or read in page for COW, page is locked
+  * by caller.
+  */
+@@ -494,7 +608,6 @@ init_page_for_write(struct page *page, struct pnfs_block_extent *cow_read)
+ 	SetPageUptodate(page);
+ 
+ cleanup:
+-	bl_put_extent(cow_read);
+ 	if (bh)
+ 		free_buffer_head(bh);
+ 	if (ret) {
+@@ -566,6 +679,7 @@ bl_write_pagelist(struct nfs_write_data *wdata, int sync)
+ 	struct parallel_io *par = NULL;
+ 	loff_t offset = wdata->args.offset;
+ 	size_t count = wdata->args.count;
++	unsigned int pg_offset, pg_len, saved_len;
+ 	struct page **pages = wdata->args.pages;
+ 	struct page *page;
+ 	pgoff_t index;
+@@ -674,10 +788,11 @@ next_page:
+ 		if (!extent_length) {
+ 			/* We've used up the previous extent */
+ 			bl_put_extent(be);
++			bl_put_extent(cow_read);
+ 			bio = bl_submit_bio(WRITE, bio);
+ 			/* Get the next one */
+ 			be = bl_find_get_extent(BLK_LSEG2EXT(header->lseg),
+-					     isect, NULL);
++					     isect, &cow_read);
+ 			if (!be || !is_writable(be, isect)) {
+ 				header->pnfs_error = -EINVAL;
+ 				goto out;
+@@ -694,7 +809,26 @@ next_page:
+ 			extent_length = be->be_length -
+ 			    (isect - be->be_f_offset);
+ 		}
+-		if (be->be_state == PNFS_BLOCK_INVALID_DATA) {
++
++		dprintk("%s offset %lld count %Zu\n", __func__, offset, count);
++		pg_offset = offset & ~PAGE_CACHE_MASK;
++		if (pg_offset + count > PAGE_CACHE_SIZE)
++			pg_len = PAGE_CACHE_SIZE - pg_offset;
++		else
++			pg_len = count;
++
++		saved_len = pg_len;
++		if (be->be_state == PNFS_BLOCK_INVALID_DATA &&
++		    !bl_is_sector_init(be->be_inval, isect)) {
++			ret = bl_read_partial_page_sync(pages[i], cow_read,
++							pg_offset, pg_len, true);
++			if (ret) {
++				dprintk("%s bl_read_partial_page_sync fail %d\n",
++					__func__, ret);
++				header->pnfs_error = ret;
++				goto out;
++			}
++
+ 			ret = bl_mark_sectors_init(be->be_inval, isect,
+ 						       PAGE_CACHE_SECTORS);
+ 			if (unlikely(ret)) {
+@@ -703,15 +837,35 @@ next_page:
+ 				header->pnfs_error = ret;
+ 				goto out;
+ 			}
++
++			/* Expand to full page write */
++			pg_offset = 0;
++			pg_len = PAGE_CACHE_SIZE;
++		} else if  ((pg_offset & (SECTOR_SIZE - 1)) ||
++			    (pg_len & (SECTOR_SIZE - 1))){
++			/* ahh, nasty case. We have to do sync full sector
++			 * read-modify-write cycles.
++			 */
++			unsigned int saved_offset = pg_offset;
++			ret = bl_read_partial_page_sync(pages[i], be, pg_offset,
++							pg_len, false);
++			pg_offset = round_down(pg_offset, SECTOR_SIZE);
++			pg_len = round_up(saved_offset + pg_len, SECTOR_SIZE)
++				 - pg_offset;
+ 		}
+-		bio = bl_add_page_to_bio(bio, wdata->pages.npages - i, WRITE,
++
++
++		bio = do_add_page_to_bio(bio, wdata->pages.npages - i, WRITE,
+ 					 isect, pages[i], be,
+-					 bl_end_io_write, par);
++					 bl_end_io_write, par,
++					 pg_offset, pg_len);
+ 		if (IS_ERR(bio)) {
+ 			header->pnfs_error = PTR_ERR(bio);
+ 			bio = NULL;
+ 			goto out;
+ 		}
++		offset += saved_len;
++		count -= saved_len;
+ 		isect += PAGE_CACHE_SECTORS;
+ 		last_isect = isect;
+ 		extent_length -= PAGE_CACHE_SECTORS;
+@@ -729,17 +883,16 @@ next_page:
+ 	}
+ 
+ write_done:
+-	wdata->res.count = (last_isect << SECTOR_SHIFT) - (offset);
+-	if (count < wdata->res.count) {
+-		wdata->res.count = count;
+-	}
++	wdata->res.count = wdata->args.count;
+ out:
+ 	bl_put_extent(be);
++	bl_put_extent(cow_read);
+ 	bl_submit_bio(WRITE, bio);
+ 	put_parallel(par);
+ 	return PNFS_ATTEMPTED;
+ out_mds:
+ 	bl_put_extent(be);
++	bl_put_extent(cow_read);
+ 	kfree(par);
+ 	return PNFS_NOT_ATTEMPTED;
+ }
+diff --git a/fs/nfs/blocklayout/blocklayout.h b/fs/nfs/blocklayout/blocklayout.h
+index 0335069..39bb51a 100644
+--- a/fs/nfs/blocklayout/blocklayout.h
++++ b/fs/nfs/blocklayout/blocklayout.h
+@@ -41,6 +41,7 @@
+ 
+ #define PAGE_CACHE_SECTORS (PAGE_CACHE_SIZE >> SECTOR_SHIFT)
+ #define PAGE_CACHE_SECTOR_SHIFT (PAGE_CACHE_SHIFT - SECTOR_SHIFT)
++#define SECTOR_SIZE (1 << SECTOR_SHIFT)
+ 
+ struct block_mount_id {
+ 	spinlock_t			bm_lock;    /* protects list */
+diff --git a/fs/nfs/client.c b/fs/nfs/client.c
+index 9969444..0e7cd89 100644
+--- a/fs/nfs/client.c
++++ b/fs/nfs/client.c
+@@ -855,7 +855,6 @@ static void nfs_server_set_fsinfo(struct nfs_server *server,
+ 	if (server->wsize > NFS_MAX_FILE_IO_SIZE)
+ 		server->wsize = NFS_MAX_FILE_IO_SIZE;
+ 	server->wpages = (server->wsize + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
+-	server->pnfs_blksize = fsinfo->blksize;
+ 
+ 	server->wtmult = nfs_block_bits(fsinfo->wtmult, NULL);
+ 
+diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
+index 1e50326..d5a0cf1 100644
+--- a/fs/nfs/nfs4proc.c
++++ b/fs/nfs/nfs4proc.c
+@@ -1774,7 +1774,11 @@ static void nfs41_clear_delegation_stateid(struct nfs4_state *state)
+ 		 * informs us the stateid is unrecognized. */
+ 		if (status != -NFS4ERR_BAD_STATEID)
+ 			nfs41_free_stateid(server, stateid);
++		nfs_remove_bad_delegation(state->inode);
+ 
++		write_seqlock(&state->seqlock);
++		nfs4_stateid_copy(&state->stateid, &state->open_stateid);
++		write_sequnlock(&state->seqlock);
+ 		clear_bit(NFS_DELEGATED_STATE, &state->flags);
+ 	}
+ }
+@@ -3362,8 +3366,11 @@ static int nfs4_proc_fsinfo(struct nfs_server *server, struct nfs_fh *fhandle, s
+ 
+ 	nfs_fattr_init(fsinfo->fattr);
+ 	error = nfs4_do_fsinfo(server, fhandle, fsinfo);
+-	if (error == 0)
++	if (error == 0) {
++		/* block layout checks this! */
++		server->pnfs_blksize = fsinfo->blksize;
+ 		set_pnfs_layoutdriver(server, fhandle, fsinfo->layouttype);
++	}
+ 
+ 	return error;
+ }
+diff --git a/fs/nfsd/nfs4idmap.c b/fs/nfsd/nfs4idmap.c
+index fdc91a6..ccfe0d0 100644
+--- a/fs/nfsd/nfs4idmap.c
++++ b/fs/nfsd/nfs4idmap.c
+@@ -598,7 +598,7 @@ numeric_name_to_id(struct svc_rqst *rqstp, int type, const char *name, u32 namel
+ 	/* Just to make sure it's null-terminated: */
+ 	memcpy(buf, name, namelen);
+ 	buf[namelen] = '\0';
+-	ret = kstrtouint(name, 10, id);
++	ret = kstrtouint(buf, 10, id);
+ 	return ret == 0;
+ }
+ 
+diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
+index cc894ed..5b3224c 100644
+--- a/fs/nfsd/nfs4state.c
++++ b/fs/nfsd/nfs4state.c
+@@ -1223,10 +1223,26 @@ static bool groups_equal(struct group_info *g1, struct group_info *g2)
+ 	return true;
+ }
+ 
++/*
++ * RFC 3530 language requires clid_inuse be returned when the
++ * "principal" associated with a requests differs from that previously
++ * used.  We use uid, gid's, and gss principal string as our best
++ * approximation.  We also don't want to allow non-gss use of a client
++ * established using gss: in theory cr_principal should catch that
++ * change, but in practice cr_principal can be null even in the gss case
++ * since gssd doesn't always pass down a principal string.
++ */
++static bool is_gss_cred(struct svc_cred *cr)
++{
++	/* Is cr_flavor one of the gss "pseudoflavors"?: */
++	return (cr->cr_flavor > RPC_AUTH_MAXFLAVOR);
++}
++
++
+ static bool
+ same_creds(struct svc_cred *cr1, struct svc_cred *cr2)
+ {
+-	if ((cr1->cr_flavor != cr2->cr_flavor)
++	if ((is_gss_cred(cr1) != is_gss_cred(cr2))
+ 		|| (cr1->cr_uid != cr2->cr_uid)
+ 		|| (cr1->cr_gid != cr2->cr_gid)
+ 		|| !groups_equal(cr1->cr_group_info, cr2->cr_group_info))
+@@ -3766,6 +3782,7 @@ nfsd4_close(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
+ 	memcpy(&close->cl_stateid, &stp->st_stid.sc_stateid, sizeof(stateid_t));
+ 
+ 	nfsd4_close_open_stateid(stp);
++	release_last_closed_stateid(oo);
+ 	oo->oo_last_closed_stid = stp;
+ 
+ 	if (list_empty(&oo->oo_owner.so_stateids)) {
+diff --git a/fs/reiserfs/inode.c b/fs/reiserfs/inode.c
+index 855da58..63ce6be 100644
+--- a/fs/reiserfs/inode.c
++++ b/fs/reiserfs/inode.c
+@@ -1573,8 +1573,10 @@ struct dentry *reiserfs_fh_to_dentry(struct super_block *sb, struct fid *fid,
+ 			reiserfs_warning(sb, "reiserfs-13077",
+ 				"nfsd/reiserfs, fhtype=%d, len=%d - odd",
+ 				fh_type, fh_len);
+-		fh_type = 5;
++		fh_type = fh_len;
+ 	}
++	if (fh_len < 2)
++		return NULL;
+ 
+ 	return reiserfs_get_dentry(sb, fid->raw[0], fid->raw[1],
+ 		(fh_type == 3 || fh_type >= 5) ? fid->raw[2] : 0);
+@@ -1583,6 +1585,8 @@ struct dentry *reiserfs_fh_to_dentry(struct super_block *sb, struct fid *fid,
+ struct dentry *reiserfs_fh_to_parent(struct super_block *sb, struct fid *fid,
+ 		int fh_len, int fh_type)
+ {
++	if (fh_type > fh_len)
++		fh_type = fh_len;
+ 	if (fh_type < 4)
+ 		return NULL;
+ 
+diff --git a/fs/xfs/xfs_export.c b/fs/xfs/xfs_export.c
+index 4267922..8c6d1d7 100644
+--- a/fs/xfs/xfs_export.c
++++ b/fs/xfs/xfs_export.c
+@@ -189,6 +189,9 @@ xfs_fs_fh_to_parent(struct super_block *sb, struct fid *fid,
+ 	struct xfs_fid64	*fid64 = (struct xfs_fid64 *)fid;
+ 	struct inode		*inode = NULL;
+ 
++	if (fh_len < xfs_fileid_length(fileid_type))
++		return NULL;
++
+ 	switch (fileid_type) {
+ 	case FILEID_INO32_GEN_PARENT:
+ 		inode = xfs_nfs_get_inode(sb, fid->i32.parent_ino,
+diff --git a/include/linux/mtd/nand.h b/include/linux/mtd/nand.h
+index 57977c6..e5cf2c8 100644
+--- a/include/linux/mtd/nand.h
++++ b/include/linux/mtd/nand.h
+@@ -212,9 +212,6 @@ typedef enum {
+ #define NAND_SUBPAGE_READ(chip) ((chip->ecc.mode == NAND_ECC_SOFT) \
+ 					&& (chip->page_shift > 9))
+ 
+-/* Mask to zero out the chip options, which come from the id table */
+-#define NAND_CHIPOPTIONS_MSK	0x0000ffff
+-
+ /* Non chip related options */
+ /* This option skips the bbt scan during initialization. */
+ #define NAND_SKIP_BBTSCAN	0x00010000
+diff --git a/kernel/audit.c b/kernel/audit.c
+index ea3b7b6..a8c84be 100644
+--- a/kernel/audit.c
++++ b/kernel/audit.c
+@@ -1466,6 +1466,8 @@ void audit_log_link_denied(const char *operation, struct path *link)
+ 
+ 	ab = audit_log_start(current->audit_context, GFP_KERNEL,
+ 			     AUDIT_ANOM_LINK);
++	if (!ab)
++		return;
+ 	audit_log_format(ab, "op=%s action=denied", operation);
+ 	audit_log_format(ab, " pid=%d comm=", current->pid);
+ 	audit_log_untrustedstring(ab, current->comm);
+diff --git a/kernel/debug/kdb/kdb_io.c b/kernel/debug/kdb/kdb_io.c
+index 0a69d2a..14ff484 100644
+--- a/kernel/debug/kdb/kdb_io.c
++++ b/kernel/debug/kdb/kdb_io.c
+@@ -552,6 +552,7 @@ int vkdb_printf(const char *fmt, va_list ap)
+ {
+ 	int diag;
+ 	int linecount;
++	int colcount;
+ 	int logging, saved_loglevel = 0;
+ 	int saved_trap_printk;
+ 	int got_printf_lock = 0;
+@@ -584,6 +585,10 @@ int vkdb_printf(const char *fmt, va_list ap)
+ 	if (diag || linecount <= 1)
+ 		linecount = 24;
+ 
++	diag = kdbgetintenv("COLUMNS", &colcount);
++	if (diag || colcount <= 1)
++		colcount = 80;
++
+ 	diag = kdbgetintenv("LOGGING", &logging);
+ 	if (diag)
+ 		logging = 0;
+@@ -690,7 +695,7 @@ kdb_printit:
+ 		gdbstub_msg_write(kdb_buffer, retlen);
+ 	} else {
+ 		if (dbg_io_ops && !dbg_io_ops->is_console) {
+-			len = strlen(kdb_buffer);
++			len = retlen;
+ 			cp = kdb_buffer;
+ 			while (len--) {
+ 				dbg_io_ops->write_char(*cp);
+@@ -709,11 +714,29 @@ kdb_printit:
+ 		printk(KERN_INFO "%s", kdb_buffer);
+ 	}
+ 
+-	if (KDB_STATE(PAGER) && strchr(kdb_buffer, '\n'))
+-		kdb_nextline++;
++	if (KDB_STATE(PAGER)) {
++		/*
++		 * Check printed string to decide how to bump the
++		 * kdb_nextline to control when the more prompt should
++		 * show up.
++		 */
++		int got = 0;
++		len = retlen;
++		while (len--) {
++			if (kdb_buffer[len] == '\n') {
++				kdb_nextline++;
++				got = 0;
++			} else if (kdb_buffer[len] == '\r') {
++				got = 0;
++			} else {
++				got++;
++			}
++		}
++		kdb_nextline += got / (colcount + 1);
++	}
+ 
+ 	/* check for having reached the LINES number of printed lines */
+-	if (kdb_nextline == linecount) {
++	if (kdb_nextline >= linecount) {
+ 		char buf1[16] = "";
+ 
+ 		/* Watch out for recursion here.  Any routine that calls
+@@ -765,7 +788,7 @@ kdb_printit:
+ 			kdb_grepping_flag = 0;
+ 			kdb_printf("\n");
+ 		} else if (buf1[0] == ' ') {
+-			kdb_printf("\n");
++			kdb_printf("\r");
+ 			suspend_grep = 1; /* for this recursion */
+ 		} else if (buf1[0] == '\n') {
+ 			kdb_nextline = linecount - 1;
+diff --git a/kernel/module.c b/kernel/module.c
+index 4edbd9c..9ad9ee9 100644
+--- a/kernel/module.c
++++ b/kernel/module.c
+@@ -2730,6 +2730,10 @@ static int check_module_license_and_versions(struct module *mod)
+ 	if (strcmp(mod->name, "driverloader") == 0)
+ 		add_taint_module(mod, TAINT_PROPRIETARY_MODULE);
+ 
++	/* lve claims to be GPL but upstream won't provide source */
++	if (strcmp(mod->name, "lve") == 0)
++		add_taint_module(mod, TAINT_PROPRIETARY_MODULE);
++
+ #ifdef CONFIG_MODVERSIONS
+ 	if ((mod->num_syms && !mod->crcs)
+ 	    || (mod->num_gpl_syms && !mod->gpl_crcs)
+diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
+index 3a9e5d5..e430b97 100644
+--- a/kernel/time/tick-sched.c
++++ b/kernel/time/tick-sched.c
+@@ -835,7 +835,7 @@ static enum hrtimer_restart tick_sched_timer(struct hrtimer *timer)
+ 		 */
+ 		if (ts->tick_stopped) {
+ 			touch_softlockup_watchdog();
+-			if (idle_cpu(cpu))
++			if (is_idle_task(current))
+ 				ts->idle_jiffies++;
+ 		}
+ 		update_process_times(user_mode(regs));
+diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
+index d3b91e7..f791637 100644
+--- a/kernel/time/timekeeping.c
++++ b/kernel/time/timekeeping.c
+@@ -1111,7 +1111,7 @@ static cycle_t logarithmic_accumulation(struct timekeeper *tk, cycle_t offset,
+ 	accumulate_nsecs_to_secs(tk);
+ 
+ 	/* Accumulate raw time */
+-	raw_nsecs = tk->raw_interval << shift;
++	raw_nsecs = (u64)tk->raw_interval << shift;
+ 	raw_nsecs += tk->raw_time.tv_nsec;
+ 	if (raw_nsecs >= NSEC_PER_SEC) {
+ 		u64 raw_secs = raw_nsecs;
+diff --git a/kernel/timer.c b/kernel/timer.c
+index 8c5e7b9..46ef2b1 100644
+--- a/kernel/timer.c
++++ b/kernel/timer.c
+@@ -63,6 +63,7 @@ EXPORT_SYMBOL(jiffies_64);
+ #define TVR_SIZE (1 << TVR_BITS)
+ #define TVN_MASK (TVN_SIZE - 1)
+ #define TVR_MASK (TVR_SIZE - 1)
++#define MAX_TVAL ((unsigned long)((1ULL << (TVR_BITS + 4*TVN_BITS)) - 1))
+ 
+ struct tvec {
+ 	struct list_head vec[TVN_SIZE];
+@@ -358,11 +359,12 @@ __internal_add_timer(struct tvec_base *base, struct timer_list *timer)
+ 		vec = base->tv1.vec + (base->timer_jiffies & TVR_MASK);
+ 	} else {
+ 		int i;
+-		/* If the timeout is larger than 0xffffffff on 64-bit
+-		 * architectures then we use the maximum timeout:
++		/* If the timeout is larger than MAX_TVAL (on 64-bit
++		 * architectures or with CONFIG_BASE_SMALL=1) then we
++		 * use the maximum timeout.
+ 		 */
+-		if (idx > 0xffffffffUL) {
+-			idx = 0xffffffffUL;
++		if (idx > MAX_TVAL) {
++			idx = MAX_TVAL;
+ 			expires = idx + base->timer_jiffies;
+ 		}
+ 		i = (expires >> (TVR_BITS + 3 * TVN_BITS)) & TVN_MASK;
+diff --git a/mm/shmem.c b/mm/shmem.c
+index d4e184e..d2eeca1 100644
+--- a/mm/shmem.c
++++ b/mm/shmem.c
+@@ -2366,12 +2366,14 @@ static struct dentry *shmem_fh_to_dentry(struct super_block *sb,
+ {
+ 	struct inode *inode;
+ 	struct dentry *dentry = NULL;
+-	u64 inum = fid->raw[2];
+-	inum = (inum << 32) | fid->raw[1];
++	u64 inum;
+ 
+ 	if (fh_len < 3)
+ 		return NULL;
+ 
++	inum = fid->raw[2];
++	inum = (inum << 32) | fid->raw[1];
++
+ 	inode = ilookup5(sb, (unsigned long)(inum + fid->raw[0]),
+ 			shmem_match, fid->raw);
+ 	if (inode) {
+diff --git a/net/core/pktgen.c b/net/core/pktgen.c
+index 148e73d..e356b8d 100644
+--- a/net/core/pktgen.c
++++ b/net/core/pktgen.c
+@@ -2927,7 +2927,7 @@ static struct sk_buff *fill_packet_ipv6(struct net_device *odev,
+ 		  sizeof(struct ipv6hdr) - sizeof(struct udphdr) -
+ 		  pkt_dev->pkt_overhead;
+ 
+-	if (datalen < sizeof(struct pktgen_hdr)) {
++	if (datalen < 0 || datalen < sizeof(struct pktgen_hdr)) {
+ 		datalen = sizeof(struct pktgen_hdr);
+ 		net_info_ratelimited("increased datalen to %d\n", datalen);
+ 	}
+diff --git a/net/mac80211/status.c b/net/mac80211/status.c
+index 8cd7291..118329a 100644
+--- a/net/mac80211/status.c
++++ b/net/mac80211/status.c
+@@ -34,7 +34,7 @@ void ieee80211_tx_status_irqsafe(struct ieee80211_hw *hw,
+ 		skb_queue_len(&local->skb_queue_unreliable);
+ 	while (tmp > IEEE80211_IRQSAFE_QUEUE_LIMIT &&
+ 	       (skb = skb_dequeue(&local->skb_queue_unreliable))) {
+-		dev_kfree_skb_irq(skb);
++		ieee80211_free_txskb(hw, skb);
+ 		tmp--;
+ 		I802_DEBUG_INC(local->tx_status_drop);
+ 	}
+@@ -159,7 +159,7 @@ static void ieee80211_handle_filtered_frame(struct ieee80211_local *local,
+ 			   "dropped TX filtered frame, queue_len=%d PS=%d @%lu\n",
+ 			   skb_queue_len(&sta->tx_filtered[ac]),
+ 			   !!test_sta_flag(sta, WLAN_STA_PS_STA), jiffies);
+-	dev_kfree_skb(skb);
++	ieee80211_free_txskb(&local->hw, skb);
+ }
+ 
+ static void ieee80211_check_pending_bar(struct sta_info *sta, u8 *addr, u8 tid)
+diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
+index c5e8c9c..362c418 100644
+--- a/net/mac80211/tx.c
++++ b/net/mac80211/tx.c
+@@ -354,7 +354,7 @@ static void purge_old_ps_buffers(struct ieee80211_local *local)
+ 			total += skb_queue_len(&sta->ps_tx_buf[ac]);
+ 			if (skb) {
+ 				purged++;
+-				dev_kfree_skb(skb);
++				ieee80211_free_txskb(&local->hw, skb);
+ 				break;
+ 			}
+ 		}
+@@ -466,7 +466,7 @@ ieee80211_tx_h_unicast_ps_buf(struct ieee80211_tx_data *tx)
+ 			ps_dbg(tx->sdata,
+ 			       "STA %pM TX buffer for AC %d full - dropping oldest frame\n",
+ 			       sta->sta.addr, ac);
+-			dev_kfree_skb(old);
++			ieee80211_free_txskb(&local->hw, old);
+ 		} else
+ 			tx->local->total_ps_buffered++;
+ 
+@@ -1103,7 +1103,7 @@ static bool ieee80211_tx_prep_agg(struct ieee80211_tx_data *tx,
+ 		spin_unlock(&tx->sta->lock);
+ 
+ 		if (purge_skb)
+-			dev_kfree_skb(purge_skb);
++			ieee80211_free_txskb(&tx->local->hw, purge_skb);
+ 	}
+ 
+ 	/* reset session timer */
+@@ -1214,7 +1214,7 @@ static bool ieee80211_tx_frags(struct ieee80211_local *local,
+ #ifdef CONFIG_MAC80211_VERBOSE_DEBUG
+ 		if (WARN_ON_ONCE(q >= local->hw.queues)) {
+ 			__skb_unlink(skb, skbs);
+-			dev_kfree_skb(skb);
++			ieee80211_free_txskb(&local->hw, skb);
+ 			continue;
+ 		}
+ #endif
+@@ -1356,7 +1356,7 @@ static int invoke_tx_handlers(struct ieee80211_tx_data *tx)
+ 	if (unlikely(res == TX_DROP)) {
+ 		I802_DEBUG_INC(tx->local->tx_handlers_drop);
+ 		if (tx->skb)
+-			dev_kfree_skb(tx->skb);
++			ieee80211_free_txskb(&tx->local->hw, tx->skb);
+ 		else
+ 			__skb_queue_purge(&tx->skbs);
+ 		return -1;
+@@ -1393,7 +1393,7 @@ static bool ieee80211_tx(struct ieee80211_sub_if_data *sdata,
+ 	res_prepare = ieee80211_tx_prepare(sdata, &tx, skb);
+ 
+ 	if (unlikely(res_prepare == TX_DROP)) {
+-		dev_kfree_skb(skb);
++		ieee80211_free_txskb(&local->hw, skb);
+ 		goto out;
+ 	} else if (unlikely(res_prepare == TX_QUEUED)) {
+ 		goto out;
+@@ -1466,7 +1466,7 @@ void ieee80211_xmit(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb)
+ 	headroom = max_t(int, 0, headroom);
+ 
+ 	if (ieee80211_skb_resize(sdata, skb, headroom, may_encrypt)) {
+-		dev_kfree_skb(skb);
++		ieee80211_free_txskb(&local->hw, skb);
+ 		rcu_read_unlock();
+ 		return;
+ 	}
+@@ -2060,8 +2060,10 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
+ 		head_need += IEEE80211_ENCRYPT_HEADROOM;
+ 		head_need += local->tx_headroom;
+ 		head_need = max_t(int, 0, head_need);
+-		if (ieee80211_skb_resize(sdata, skb, head_need, true))
+-			goto fail;
++		if (ieee80211_skb_resize(sdata, skb, head_need, true)) {
++			ieee80211_free_txskb(&local->hw, skb);
++			return NETDEV_TX_OK;
++		}
+ 	}
+ 
+ 	if (encaps_data) {
+@@ -2196,7 +2198,7 @@ void ieee80211_tx_pending(unsigned long data)
+ 			struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ 
+ 			if (WARN_ON(!info->control.vif)) {
+-				kfree_skb(skb);
++				ieee80211_free_txskb(&local->hw, skb);
+ 				continue;
+ 			}
+ 
+diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
+index a35b8e5..d1988cf 100644
+--- a/net/sunrpc/xprtsock.c
++++ b/net/sunrpc/xprtsock.c
+@@ -1025,6 +1025,16 @@ static void xs_udp_data_ready(struct sock *sk, int len)
+ 	read_unlock_bh(&sk->sk_callback_lock);
+ }
+ 
++/*
++ * Helper function to force a TCP close if the server is sending
++ * junk and/or it has put us in CLOSE_WAIT
++ */
++static void xs_tcp_force_close(struct rpc_xprt *xprt)
++{
++	set_bit(XPRT_CONNECTION_CLOSE, &xprt->state);
++	xprt_force_disconnect(xprt);
++}
++
+ static inline void xs_tcp_read_fraghdr(struct rpc_xprt *xprt, struct xdr_skb_reader *desc)
+ {
+ 	struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
+@@ -1051,7 +1061,7 @@ static inline void xs_tcp_read_fraghdr(struct rpc_xprt *xprt, struct xdr_skb_rea
+ 	/* Sanity check of the record length */
+ 	if (unlikely(transport->tcp_reclen < 8)) {
+ 		dprintk("RPC:       invalid TCP record fragment length\n");
+-		xprt_force_disconnect(xprt);
++		xs_tcp_force_close(xprt);
+ 		return;
+ 	}
+ 	dprintk("RPC:       reading TCP record fragment of length %d\n",
+@@ -1132,7 +1142,7 @@ static inline void xs_tcp_read_calldir(struct sock_xprt *transport,
+ 		break;
+ 	default:
+ 		dprintk("RPC:       invalid request message type\n");
+-		xprt_force_disconnect(&transport->xprt);
++		xs_tcp_force_close(&transport->xprt);
+ 	}
+ 	xs_tcp_check_fraghdr(transport);
+ }
+@@ -1455,6 +1465,8 @@ static void xs_tcp_cancel_linger_timeout(struct rpc_xprt *xprt)
+ static void xs_sock_mark_closed(struct rpc_xprt *xprt)
+ {
+ 	smp_mb__before_clear_bit();
++	clear_bit(XPRT_CONNECTION_ABORT, &xprt->state);
++	clear_bit(XPRT_CONNECTION_CLOSE, &xprt->state);
+ 	clear_bit(XPRT_CLOSE_WAIT, &xprt->state);
+ 	clear_bit(XPRT_CLOSING, &xprt->state);
+ 	smp_mb__after_clear_bit();
+@@ -1512,8 +1524,8 @@ static void xs_tcp_state_change(struct sock *sk)
+ 		break;
+ 	case TCP_CLOSE_WAIT:
+ 		/* The server initiated a shutdown of the socket */
+-		xprt_force_disconnect(xprt);
+ 		xprt->connect_cookie++;
++		xs_tcp_force_close(xprt);
+ 	case TCP_CLOSING:
+ 		/*
+ 		 * If the server closed down the connection, make sure that
+@@ -2199,8 +2211,7 @@ static void xs_tcp_setup_socket(struct work_struct *work)
+ 		/* We're probably in TIME_WAIT. Get rid of existing socket,
+ 		 * and retry
+ 		 */
+-		set_bit(XPRT_CONNECTION_CLOSE, &xprt->state);
+-		xprt_force_disconnect(xprt);
++		xs_tcp_force_close(xprt);
+ 		break;
+ 	case -ECONNREFUSED:
+ 	case -ECONNRESET:
+diff --git a/scripts/Makefile.fwinst b/scripts/Makefile.fwinst
+index c3f69ae..4d908d1 100644
+--- a/scripts/Makefile.fwinst
++++ b/scripts/Makefile.fwinst
+@@ -27,7 +27,7 @@ endif
+ installed-mod-fw := $(addprefix $(INSTALL_FW_PATH)/,$(mod-fw))
+ 
+ installed-fw := $(addprefix $(INSTALL_FW_PATH)/,$(fw-shipped-all))
+-installed-fw-dirs := $(sort $(dir $(installed-fw))) $(INSTALL_FW_PATH)/.
++installed-fw-dirs := $(sort $(dir $(installed-fw))) $(INSTALL_FW_PATH)/./
+ 
+ # Workaround for make < 3.81, where .SECONDEXPANSION doesn't work.
+ PHONY += $(INSTALL_FW_PATH)/$$(%) install-all-dirs
+@@ -42,7 +42,7 @@ quiet_cmd_install = INSTALL $(subst $(srctree)/,,$@)
+ $(installed-fw-dirs):
+ 	$(call cmd,mkdir)
+ 
+-$(installed-fw): $(INSTALL_FW_PATH)/%: $(obj)/% | $$(dir $(INSTALL_FW_PATH)/%)
++$(installed-fw): $(INSTALL_FW_PATH)/%: $(obj)/% | $(INSTALL_FW_PATH)/$$(dir %)
+ 	$(call cmd,install)
+ 
+ PHONY +=  __fw_install __fw_modinst FORCE
+diff --git a/sound/pci/ac97/ac97_codec.c b/sound/pci/ac97/ac97_codec.c
+index 9473fca..8b0f996 100644
+--- a/sound/pci/ac97/ac97_codec.c
++++ b/sound/pci/ac97/ac97_codec.c
+@@ -1271,6 +1271,8 @@ static int snd_ac97_cvol_new(struct snd_card *card, char *name, int reg, unsigne
+ 		tmp.index = ac97->num;
+ 		kctl = snd_ctl_new1(&tmp, ac97);
+ 	}
++	if (!kctl)
++		return -ENOMEM;
+ 	if (reg >= AC97_PHONE && reg <= AC97_PCM)
+ 		set_tlv_db_scale(kctl, db_scale_5bit_12db_max);
+ 	else
+diff --git a/sound/pci/emu10k1/emu10k1_main.c b/sound/pci/emu10k1/emu10k1_main.c
+index 7549240..a78fdf4 100644
+--- a/sound/pci/emu10k1/emu10k1_main.c
++++ b/sound/pci/emu10k1/emu10k1_main.c
+@@ -1416,6 +1416,15 @@ static struct snd_emu_chip_details emu_chip_details[] = {
+ 	 .ca0108_chip = 1,
+ 	 .spk71 = 1,
+ 	 .emu_model = EMU_MODEL_EMU1010B}, /* EMU 1010 new revision */
++	/* Tested by Maxim Kachur <mcdebugger@duganet.ru> 17th Oct 2012. */
++	/* This is MAEM8986, 0202 is MAEM8980 */
++	{.vendor = 0x1102, .device = 0x0008, .subsystem = 0x40071102,
++	 .driver = "Audigy2", .name = "E-mu 1010 PCIe [MAEM8986]",
++	 .id = "EMU1010",
++	 .emu10k2_chip = 1,
++	 .ca0108_chip = 1,
++	 .spk71 = 1,
++	 .emu_model = EMU_MODEL_EMU1010B}, /* EMU 1010 PCIe */
+ 	/* Tested by James@superbug.co.uk 8th July 2005. */
+ 	/* This is MAEM8810, 0202 is MAEM8820 */
+ 	{.vendor = 0x1102, .device = 0x0004, .subsystem = 0x40011102,
+diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
+index 12a9432..a5dc746 100644
+--- a/sound/pci/hda/hda_intel.c
++++ b/sound/pci/hda/hda_intel.c
+@@ -487,6 +487,7 @@ struct azx {
+ 
+ 	/* VGA-switcheroo setup */
+ 	unsigned int use_vga_switcheroo:1;
++	unsigned int vga_switcheroo_registered:1;
+ 	unsigned int init_failed:1; /* delayed init failed */
+ 	unsigned int disabled:1; /* disabled by VGA-switcher */
+ 
+@@ -2135,9 +2136,12 @@ static unsigned int azx_get_position(struct azx *chip,
+ 		if (delay < 0)
+ 			delay += azx_dev->bufsize;
+ 		if (delay >= azx_dev->period_bytes) {
+-			snd_printdd("delay %d > period_bytes %d\n",
+-				delay, azx_dev->period_bytes);
+-			delay = 0; /* something is wrong */
++			snd_printk(KERN_WARNING SFX
++				   "Unstable LPIB (%d >= %d); "
++				   "disabling LPIB delay counting\n",
++				   delay, azx_dev->period_bytes);
++			delay = 0;
++			chip->driver_caps &= ~AZX_DCAPS_COUNT_LPIB_DELAY;
+ 		}
+ 		azx_dev->substream->runtime->delay =
+ 			bytes_to_frames(azx_dev->substream->runtime, delay);
+@@ -2556,7 +2560,9 @@ static void azx_vs_set_state(struct pci_dev *pci,
+ 		if (disabled) {
+ 			azx_suspend(&pci->dev);
+ 			chip->disabled = true;
+-			snd_hda_lock_devices(chip->bus);
++			if (snd_hda_lock_devices(chip->bus))
++				snd_printk(KERN_WARNING SFX
++					   "Cannot lock devices!\n");
+ 		} else {
+ 			snd_hda_unlock_devices(chip->bus);
+ 			chip->disabled = false;
+@@ -2599,14 +2605,20 @@ static const struct vga_switcheroo_client_ops azx_vs_ops = {
+ 
+ static int __devinit register_vga_switcheroo(struct azx *chip)
+ {
++	int err;
++
+ 	if (!chip->use_vga_switcheroo)
+ 		return 0;
+ 	/* FIXME: currently only handling DIS controller
+ 	 * is there any machine with two switchable HDMI audio controllers?
+ 	 */
+-	return vga_switcheroo_register_audio_client(chip->pci, &azx_vs_ops,
++	err = vga_switcheroo_register_audio_client(chip->pci, &azx_vs_ops,
+ 						    VGA_SWITCHEROO_DIS,
+ 						    chip->bus != NULL);
++	if (err < 0)
++		return err;
++	chip->vga_switcheroo_registered = 1;
++	return 0;
+ }
+ #else
+ #define init_vga_switcheroo(chip)		/* NOP */
+@@ -2626,7 +2638,8 @@ static int azx_free(struct azx *chip)
+ 	if (use_vga_switcheroo(chip)) {
+ 		if (chip->disabled && chip->bus)
+ 			snd_hda_unlock_devices(chip->bus);
+-		vga_switcheroo_unregister_client(chip->pci);
++		if (chip->vga_switcheroo_registered)
++			vga_switcheroo_unregister_client(chip->pci);
+ 	}
+ 
+ 	if (chip->initialized) {
+@@ -2974,14 +2987,6 @@ static int __devinit azx_create(struct snd_card *card, struct pci_dev *pci,
+ 	}
+ 
+  ok:
+-	err = register_vga_switcheroo(chip);
+-	if (err < 0) {
+-		snd_printk(KERN_ERR SFX
+-			   "Error registering VGA-switcheroo client\n");
+-		azx_free(chip);
+-		return err;
+-	}
+-
+ 	err = snd_device_new(card, SNDRV_DEV_LOWLEVEL, chip, &ops);
+ 	if (err < 0) {
+ 		snd_printk(KERN_ERR SFX "Error creating device [card]!\n");
+@@ -3208,6 +3213,13 @@ static int __devinit azx_probe(struct pci_dev *pci,
+ 
+ 	pci_set_drvdata(pci, card);
+ 
++	err = register_vga_switcheroo(chip);
++	if (err < 0) {
++		snd_printk(KERN_ERR SFX
++			   "Error registering VGA-switcheroo client\n");
++		goto out_free;
++	}
++
+ 	dev++;
+ 	return 0;
+ 
+diff --git a/sound/pci/hda/patch_cirrus.c b/sound/pci/hda/patch_cirrus.c
+index 0c4c1a6..cc31346 100644
+--- a/sound/pci/hda/patch_cirrus.c
++++ b/sound/pci/hda/patch_cirrus.c
+@@ -1417,7 +1417,7 @@ static int patch_cs420x(struct hda_codec *codec)
+ 	return 0;
+ 
+  error:
+-	kfree(codec->spec);
++	cs_free(codec);
+ 	codec->spec = NULL;
+ 	return err;
+ }
+@@ -1974,7 +1974,7 @@ static int patch_cs4210(struct hda_codec *codec)
+ 	return 0;
+ 
+  error:
+-	kfree(codec->spec);
++	cs_free(codec);
+ 	codec->spec = NULL;
+ 	return err;
+ }
+@@ -1999,7 +1999,7 @@ static int patch_cs4213(struct hda_codec *codec)
+ 	return 0;
+ 
+  error:
+-	kfree(codec->spec);
++	cs_free(codec);
+ 	codec->spec = NULL;
+ 	return err;
+ }
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index 56a3eef..155cbd2 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -611,6 +611,8 @@ static void alc_line_automute(struct hda_codec *codec)
+ {
+ 	struct alc_spec *spec = codec->spec;
+ 
++	if (spec->autocfg.line_out_type == AUTO_PIN_SPEAKER_OUT)
++		return;
+ 	/* check LO jack only when it's different from HP */
+ 	if (spec->autocfg.line_out_pins[0] == spec->autocfg.hp_pins[0])
+ 		return;
+@@ -2627,8 +2629,10 @@ static const char *alc_get_line_out_pfx(struct alc_spec *spec, int ch,
+ 			return "PCM";
+ 		break;
+ 	}
+-	if (snd_BUG_ON(ch >= ARRAY_SIZE(channel_name)))
++	if (ch >= ARRAY_SIZE(channel_name)) {
++		snd_BUG();
+ 		return "PCM";
++	}
+ 
+ 	return channel_name[ch];
+ }
+diff --git a/sound/pci/hda/patch_via.c b/sound/pci/hda/patch_via.c
+index 4b4072f..4c404a0 100644
+--- a/sound/pci/hda/patch_via.c
++++ b/sound/pci/hda/patch_via.c
+@@ -118,6 +118,8 @@ enum {
+ };
+ 
+ struct via_spec {
++	struct hda_gen_spec gen;
++
+ 	/* codec parameterization */
+ 	const struct snd_kcontrol_new *mixers[6];
+ 	unsigned int num_mixers;
+@@ -246,6 +248,7 @@ static struct via_spec * via_new_spec(struct hda_codec *codec)
+ 	/* VT1708BCE & VT1708S are almost same */
+ 	if (spec->codec_type == VT1708BCE)
+ 		spec->codec_type = VT1708S;
++	snd_hda_gen_init(&spec->gen);
+ 	return spec;
+ }
+ 
+@@ -1628,6 +1631,7 @@ static void via_free(struct hda_codec *codec)
+ 	vt1708_stop_hp_work(spec);
+ 	kfree(spec->bind_cap_vol);
+ 	kfree(spec->bind_cap_sw);
++	snd_hda_gen_free(&spec->gen);
+ 	kfree(spec);
+ }
+ 
+diff --git a/sound/soc/codecs/wm2200.c b/sound/soc/codecs/wm2200.c
+index 32682c1..c8bff6d 100644
+--- a/sound/soc/codecs/wm2200.c
++++ b/sound/soc/codecs/wm2200.c
+@@ -1028,7 +1028,7 @@ SOC_DOUBLE_R_TLV("OUT2 Digital Volume", WM2200_DAC_DIGITAL_VOLUME_2L,
+ 		 WM2200_DAC_DIGITAL_VOLUME_2R, WM2200_OUT2L_VOL_SHIFT, 0x9f, 0,
+ 		 digital_tlv),
+ SOC_DOUBLE("OUT2 Switch", WM2200_PDM_1, WM2200_SPK1L_MUTE_SHIFT,
+-	   WM2200_SPK1R_MUTE_SHIFT, 1, 0),
++	   WM2200_SPK1R_MUTE_SHIFT, 1, 1),
+ };
+ 
+ WM2200_MIXER_ENUMS(OUT1L, WM2200_OUT1LMIX_INPUT_1_SOURCE);
+@@ -2091,6 +2091,7 @@ static __devinit int wm2200_i2c_probe(struct i2c_client *i2c,
+ 
+ 	switch (wm2200->rev) {
+ 	case 0:
++	case 1:
+ 		ret = regmap_register_patch(wm2200->regmap, wm2200_reva_patch,
+ 					    ARRAY_SIZE(wm2200_reva_patch));
+ 		if (ret != 0) {
+diff --git a/sound/soc/omap/omap-abe-twl6040.c b/sound/soc/omap/omap-abe-twl6040.c
+index 9d93793..f8fba57 100644
+--- a/sound/soc/omap/omap-abe-twl6040.c
++++ b/sound/soc/omap/omap-abe-twl6040.c
+@@ -190,7 +190,7 @@ static int omap_abe_twl6040_init(struct snd_soc_pcm_runtime *rtd)
+ 	twl6040_disconnect_pin(dapm, pdata->has_hf, "Ext Spk");
+ 	twl6040_disconnect_pin(dapm, pdata->has_ep, "Earphone Spk");
+ 	twl6040_disconnect_pin(dapm, pdata->has_aux, "Line Out");
+-	twl6040_disconnect_pin(dapm, pdata->has_vibra, "Vinrator");
++	twl6040_disconnect_pin(dapm, pdata->has_vibra, "Vibrator");
+ 	twl6040_disconnect_pin(dapm, pdata->has_hsmic, "Headset Mic");
+ 	twl6040_disconnect_pin(dapm, pdata->has_mainmic, "Main Handset Mic");
+ 	twl6040_disconnect_pin(dapm, pdata->has_submic, "Sub Handset Mic");
+diff --git a/sound/soc/sh/fsi.c b/sound/soc/sh/fsi.c
+index 0540408..1bb0d58c 100644
+--- a/sound/soc/sh/fsi.c
++++ b/sound/soc/sh/fsi.c
+@@ -20,6 +20,7 @@
+ #include <linux/sh_dma.h>
+ #include <linux/slab.h>
+ #include <linux/module.h>
++#include <linux/workqueue.h>
+ #include <sound/soc.h>
+ #include <sound/sh_fsi.h>
+ 
+@@ -223,7 +224,7 @@ struct fsi_stream {
+ 	 */
+ 	struct dma_chan		*chan;
+ 	struct sh_dmae_slave	slave; /* see fsi_handler_init() */
+-	struct tasklet_struct	tasklet;
++	struct work_struct	work;
+ 	dma_addr_t		dma;
+ };
+ 
+@@ -1085,9 +1086,9 @@ static void fsi_dma_complete(void *data)
+ 	snd_pcm_period_elapsed(io->substream);
+ }
+ 
+-static void fsi_dma_do_tasklet(unsigned long data)
++static void fsi_dma_do_work(struct work_struct *work)
+ {
+-	struct fsi_stream *io = (struct fsi_stream *)data;
++	struct fsi_stream *io = container_of(work, struct fsi_stream, work);
+ 	struct fsi_priv *fsi = fsi_stream_to_priv(io);
+ 	struct snd_soc_dai *dai;
+ 	struct dma_async_tx_descriptor *desc;
+@@ -1129,7 +1130,7 @@ static void fsi_dma_do_tasklet(unsigned long data)
+ 	 * FIXME
+ 	 *
+ 	 * In DMAEngine case, codec and FSI cannot be started simultaneously
+-	 * since FSI is using tasklet.
++	 * since FSI is using the scheduler work queue.
+ 	 * Therefore, in capture case, probably FSI FIFO will have got
+ 	 * overflow error in this point.
+ 	 * in that case, DMA cannot start transfer until error was cleared.
+@@ -1153,7 +1154,7 @@ static bool fsi_dma_filter(struct dma_chan *chan, void *param)
+ 
+ static int fsi_dma_transfer(struct fsi_priv *fsi, struct fsi_stream *io)
+ {
+-	tasklet_schedule(&io->tasklet);
++	schedule_work(&io->work);
+ 
+ 	return 0;
+ }
+@@ -1195,14 +1196,14 @@ static int fsi_dma_probe(struct fsi_priv *fsi, struct fsi_stream *io, struct dev
+ 		return fsi_stream_probe(fsi, dev);
+ 	}
+ 
+-	tasklet_init(&io->tasklet, fsi_dma_do_tasklet, (unsigned long)io);
++	INIT_WORK(&io->work, fsi_dma_do_work);
+ 
+ 	return 0;
+ }
+ 
+ static int fsi_dma_remove(struct fsi_priv *fsi, struct fsi_stream *io)
+ {
+-	tasklet_kill(&io->tasklet);
++	cancel_work_sync(&io->work);
+ 
+ 	fsi_stream_stop(fsi, io);
+ 

Added: genpatches-2.6/trunk/3.6/1003_linux-3.6.4.patch
===================================================================
--- genpatches-2.6/trunk/3.6/1003_linux-3.6.4.patch	                        (rev 0)
+++ genpatches-2.6/trunk/3.6/1003_linux-3.6.4.patch	2012-10-29 14:34:04 UTC (rev 2226)
@@ -0,0 +1,4828 @@
+diff --git a/Documentation/devicetree/bindings/pinctrl/nvidia,tegra20-pinmux.txt b/Documentation/devicetree/bindings/pinctrl/nvidia,tegra20-pinmux.txt
+index c8e5782..683fde9 100644
+--- a/Documentation/devicetree/bindings/pinctrl/nvidia,tegra20-pinmux.txt
++++ b/Documentation/devicetree/bindings/pinctrl/nvidia,tegra20-pinmux.txt
+@@ -93,7 +93,7 @@ Valid values for pin and group names are:
+ 
+     With some exceptions, these support nvidia,high-speed-mode,
+     nvidia,schmitt, nvidia,low-power-mode, nvidia,pull-down-strength,
+-    nvidia,pull-up-strength, nvidia,slew_rate-rising, nvidia,slew_rate-falling.
++    nvidia,pull-up-strength, nvidia,slew-rate-rising, nvidia,slew-rate-falling.
+ 
+     drive_ao1, drive_ao2, drive_at1, drive_at2, drive_cdev1, drive_cdev2,
+     drive_csus, drive_dap1, drive_dap2, drive_dap3, drive_dap4, drive_dbg,
+diff --git a/Documentation/devicetree/bindings/pinctrl/nvidia,tegra30-pinmux.txt b/Documentation/devicetree/bindings/pinctrl/nvidia,tegra30-pinmux.txt
+index c275b70..6f426ed 100644
+--- a/Documentation/devicetree/bindings/pinctrl/nvidia,tegra30-pinmux.txt
++++ b/Documentation/devicetree/bindings/pinctrl/nvidia,tegra30-pinmux.txt
+@@ -83,7 +83,7 @@ Valid values for pin and group names are:
+   drive groups:
+ 
+     These all support nvidia,pull-down-strength, nvidia,pull-up-strength,
+-    nvidia,slew_rate-rising, nvidia,slew_rate-falling. Most but not all
++    nvidia,slew-rate-rising, nvidia,slew-rate-falling. Most but not all
+     support nvidia,high-speed-mode, nvidia,schmitt, nvidia,low-power-mode.
+ 
+     ao1, ao2, at1, at2, at3, at4, at5, cdev1, cdev2, cec, crt, csus, dap1,
+diff --git a/Documentation/hwmon/coretemp b/Documentation/hwmon/coretemp
+index c86b50c..f17256f 100644
+--- a/Documentation/hwmon/coretemp
++++ b/Documentation/hwmon/coretemp
+@@ -105,6 +105,7 @@ Process		Processor					TjMax(C)
+ 		330/230						125
+ 		E680/660/640/620				90
+ 		E680T/660T/640T/620T				110
++		CE4170/4150/4110				110
+ 
+ 45nm		Core2 Processors
+ 		Solo ULV SU3500/3300				100
+diff --git a/Makefile b/Makefile
+index 6cdadf4..dcf132a 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 3
+ PATCHLEVEL = 6
+-SUBLEVEL = 3
++SUBLEVEL = 4
+ EXTRAVERSION =
+ NAME = Terrified Chipmunk
+ 
+diff --git a/arch/s390/boot/compressed/vmlinux.lds.S b/arch/s390/boot/compressed/vmlinux.lds.S
+index d80f79d..8e1fb82 100644
+--- a/arch/s390/boot/compressed/vmlinux.lds.S
++++ b/arch/s390/boot/compressed/vmlinux.lds.S
+@@ -5,7 +5,7 @@ OUTPUT_FORMAT("elf64-s390", "elf64-s390", "elf64-s390")
+ OUTPUT_ARCH(s390:64-bit)
+ #else
+ OUTPUT_FORMAT("elf32-s390", "elf32-s390", "elf32-s390")
+-OUTPUT_ARCH(s390)
++OUTPUT_ARCH(s390:31-bit)
+ #endif
+ 
+ ENTRY(startup)
+diff --git a/arch/s390/kernel/vmlinux.lds.S b/arch/s390/kernel/vmlinux.lds.S
+index de8fa9b..79cb51a 100644
+--- a/arch/s390/kernel/vmlinux.lds.S
++++ b/arch/s390/kernel/vmlinux.lds.S
+@@ -8,7 +8,7 @@
+ 
+ #ifndef CONFIG_64BIT
+ OUTPUT_FORMAT("elf32-s390", "elf32-s390", "elf32-s390")
+-OUTPUT_ARCH(s390)
++OUTPUT_ARCH(s390:31-bit)
+ ENTRY(startup)
+ jiffies = jiffies_64 + 4;
+ #else
+diff --git a/arch/sparc/kernel/perf_event.c b/arch/sparc/kernel/perf_event.c
+index 5713957..ac18037 100644
+--- a/arch/sparc/kernel/perf_event.c
++++ b/arch/sparc/kernel/perf_event.c
+@@ -557,11 +557,13 @@ static u64 nop_for_index(int idx)
+ 
+ static inline void sparc_pmu_enable_event(struct cpu_hw_events *cpuc, struct hw_perf_event *hwc, int idx)
+ {
+-	u64 val, mask = mask_for_index(idx);
++	u64 enc, val, mask = mask_for_index(idx);
++
++	enc = perf_event_get_enc(cpuc->events[idx]);
+ 
+ 	val = cpuc->pcr;
+ 	val &= ~mask;
+-	val |= hwc->config;
++	val |= event_encoding(enc, idx);
+ 	cpuc->pcr = val;
+ 
+ 	pcr_ops->write(cpuc->pcr);
+@@ -1426,8 +1428,6 @@ static void perf_callchain_user_64(struct perf_callchain_entry *entry,
+ {
+ 	unsigned long ufp;
+ 
+-	perf_callchain_store(entry, regs->tpc);
+-
+ 	ufp = regs->u_regs[UREG_I6] + STACK_BIAS;
+ 	do {
+ 		struct sparc_stackf *usf, sf;
+@@ -1448,8 +1448,6 @@ static void perf_callchain_user_32(struct perf_callchain_entry *entry,
+ {
+ 	unsigned long ufp;
+ 
+-	perf_callchain_store(entry, regs->tpc);
+-
+ 	ufp = regs->u_regs[UREG_I6] & 0xffffffffUL;
+ 	do {
+ 		struct sparc_stackf32 *usf, sf;
+@@ -1468,6 +1466,11 @@ static void perf_callchain_user_32(struct perf_callchain_entry *entry,
+ void
+ perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
+ {
++	perf_callchain_store(entry, regs->tpc);
++
++	if (!current->mm)
++		return;
++
+ 	flushw_user();
+ 	if (test_thread_flag(TIF_32BIT))
+ 		perf_callchain_user_32(entry, regs);
+diff --git a/arch/sparc/kernel/syscalls.S b/arch/sparc/kernel/syscalls.S
+index 1d7e274..7f5f65d 100644
+--- a/arch/sparc/kernel/syscalls.S
++++ b/arch/sparc/kernel/syscalls.S
+@@ -212,24 +212,20 @@ linux_sparc_syscall:
+ 3:	stx	%o0, [%sp + PTREGS_OFF + PT_V9_I0]
+ ret_sys_call:
+ 	ldx	[%sp + PTREGS_OFF + PT_V9_TSTATE], %g3
+-	ldx	[%sp + PTREGS_OFF + PT_V9_TNPC], %l1 ! pc = npc
+ 	sra	%o0, 0, %o0
+ 	mov	%ulo(TSTATE_XCARRY | TSTATE_ICARRY), %g2
+ 	sllx	%g2, 32, %g2
+ 
+-	/* Check if force_successful_syscall_return()
+-	 * was invoked.
+-	 */
+-	ldub	[%g6 + TI_SYS_NOERROR], %l2
+-	brnz,a,pn %l2, 80f
+-	 stb	%g0, [%g6 + TI_SYS_NOERROR]
+-
+ 	cmp	%o0, -ERESTART_RESTARTBLOCK
+ 	bgeu,pn	%xcc, 1f
+-	 andcc	%l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT), %l6
+-80:
++	 andcc	%l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT), %g0
++	ldx	[%sp + PTREGS_OFF + PT_V9_TNPC], %l1 ! pc = npc
++
++2:
++	stb	%g0, [%g6 + TI_SYS_NOERROR]
+ 	/* System call success, clear Carry condition code. */
+ 	andn	%g3, %g2, %g3
++3:
+ 	stx	%g3, [%sp + PTREGS_OFF + PT_V9_TSTATE]	
+ 	bne,pn	%icc, linux_syscall_trace2
+ 	 add	%l1, 0x4, %l2			! npc = npc+4
+@@ -238,20 +234,20 @@ ret_sys_call:
+ 	 stx	%l2, [%sp + PTREGS_OFF + PT_V9_TNPC]
+ 
+ 1:
++	/* Check if force_successful_syscall_return()
++	 * was invoked.
++	 */
++	ldub	[%g6 + TI_SYS_NOERROR], %l2
++	brnz,pn %l2, 2b
++	 ldx	[%sp + PTREGS_OFF + PT_V9_TNPC], %l1 ! pc = npc
+ 	/* System call failure, set Carry condition code.
+ 	 * Also, get abs(errno) to return to the process.
+ 	 */
+-	andcc	%l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT), %l6	
+ 	sub	%g0, %o0, %o0
+-	or	%g3, %g2, %g3
+ 	stx	%o0, [%sp + PTREGS_OFF + PT_V9_I0]
+-	stx	%g3, [%sp + PTREGS_OFF + PT_V9_TSTATE]
+-	bne,pn	%icc, linux_syscall_trace2
+-	 add	%l1, 0x4, %l2			! npc = npc+4
+-	stx	%l1, [%sp + PTREGS_OFF + PT_V9_TPC]
++	ba,pt	%xcc, 3b
++	 or	%g3, %g2, %g3
+ 
+-	b,pt	%xcc, rtrap
+-	 stx	%l2, [%sp + PTREGS_OFF + PT_V9_TNPC]
+ linux_syscall_trace2:
+ 	call	syscall_trace_leave
+ 	 add	%sp, PTREGS_OFF, %o0
+diff --git a/arch/tile/Makefile b/arch/tile/Makefile
+index 55640cf..3d15364 100644
+--- a/arch/tile/Makefile
++++ b/arch/tile/Makefile
+@@ -26,6 +26,10 @@ $(error Set TILERA_ROOT or CROSS_COMPILE when building $(ARCH) on $(HOST_ARCH))
+   endif
+ endif
+ 
++# The tile compiler may emit .eh_frame information for backtracing.
++# In kernel modules, this causes load failures due to unsupported relocations.
++KBUILD_CFLAGS   += -fno-asynchronous-unwind-tables
++
+ ifneq ($(CONFIG_DEBUG_EXTRA_FLAGS),"")
+ KBUILD_CFLAGS   += $(CONFIG_DEBUG_EXTRA_FLAGS)
+ endif
+diff --git a/arch/x86/kernel/cpu/mcheck/mce_amd.c b/arch/x86/kernel/cpu/mcheck/mce_amd.c
+index c4e916d..698b6ec 100644
+--- a/arch/x86/kernel/cpu/mcheck/mce_amd.c
++++ b/arch/x86/kernel/cpu/mcheck/mce_amd.c
+@@ -576,12 +576,10 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank)
+ 	int err = 0;
+ 
+ 	if (shared_bank[bank]) {
+-
+ 		nb = node_to_amd_nb(amd_get_nb_id(cpu));
+-		WARN_ON(!nb);
+ 
+ 		/* threshold descriptor already initialized on this node? */
+-		if (nb->bank4) {
++		if (nb && nb->bank4) {
+ 			/* yes, use it */
+ 			b = nb->bank4;
+ 			err = kobject_add(b->kobj, &dev->kobj, name);
+@@ -615,8 +613,10 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank)
+ 		atomic_set(&b->cpus, 1);
+ 
+ 		/* nb is already initialized, see above */
+-		WARN_ON(nb->bank4);
+-		nb->bank4 = b;
++		if (nb) {
++			WARN_ON(nb->bank4);
++			nb->bank4 = b;
++		}
+ 	}
+ 
+ 	err = allocate_threshold_blocks(cpu, bank, 0,
+diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S
+index 623f288..8f8e8ee 100644
+--- a/arch/x86/kernel/entry_32.S
++++ b/arch/x86/kernel/entry_32.S
+@@ -1016,7 +1016,7 @@ ENTRY(xen_sysenter_target)
+ 
+ ENTRY(xen_hypervisor_callback)
+ 	CFI_STARTPROC
+-	pushl_cfi $0
++	pushl_cfi $-1 /* orig_ax = -1 => not a system call */
+ 	SAVE_ALL
+ 	TRACE_IRQS_OFF
+ 
+@@ -1058,14 +1058,16 @@ ENTRY(xen_failsafe_callback)
+ 2:	mov 8(%esp),%es
+ 3:	mov 12(%esp),%fs
+ 4:	mov 16(%esp),%gs
++	/* EAX == 0 => Category 1 (Bad segment)
++	   EAX != 0 => Category 2 (Bad IRET) */
+ 	testl %eax,%eax
+ 	popl_cfi %eax
+ 	lea 16(%esp),%esp
+ 	CFI_ADJUST_CFA_OFFSET -16
+ 	jz 5f
+ 	addl $16,%esp
+-	jmp iret_exc		# EAX != 0 => Category 2 (Bad IRET)
+-5:	pushl_cfi $0		# EAX == 0 => Category 1 (Bad segment)
++	jmp iret_exc
++5:	pushl_cfi $-1 /* orig_ax = -1 => not a system call */
+ 	SAVE_ALL
+ 	jmp ret_from_exception
+ 	CFI_ENDPROC
+diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
+index 69babd8..dcdd0ea 100644
+--- a/arch/x86/kernel/entry_64.S
++++ b/arch/x86/kernel/entry_64.S
+@@ -1363,7 +1363,7 @@ ENTRY(xen_failsafe_callback)
+ 	CFI_RESTORE r11
+ 	addq $0x30,%rsp
+ 	CFI_ADJUST_CFA_OFFSET -0x30
+-	pushq_cfi $0
++	pushq_cfi $-1 /* orig_ax = -1 => not a system call */
+ 	SAVE_ALL
+ 	jmp error_exit
+ 	CFI_ENDPROC
+diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
+index f4b9b80..198e774 100644
+--- a/arch/x86/kernel/setup.c
++++ b/arch/x86/kernel/setup.c
+@@ -919,8 +919,21 @@ void __init setup_arch(char **cmdline_p)
+ 
+ #ifdef CONFIG_X86_64
+ 	if (max_pfn > max_low_pfn) {
+-		max_pfn_mapped = init_memory_mapping(1UL<<32,
+-						     max_pfn<<PAGE_SHIFT);
++		int i;
++		for (i = 0; i < e820.nr_map; i++) {
++			struct e820entry *ei = &e820.map[i];
++
++			if (ei->addr + ei->size <= 1UL << 32)
++				continue;
++
++			if (ei->type == E820_RESERVED)
++				continue;
++
++			max_pfn_mapped = init_memory_mapping(
++				ei->addr < 1UL << 32 ? 1UL << 32 : ei->addr,
++				ei->addr + ei->size);
++		}
++
+ 		/* can we preseve max_low_pfn ?*/
+ 		max_low_pfn = max_pfn;
+ 	}
+diff --git a/arch/x86/oprofile/nmi_int.c b/arch/x86/oprofile/nmi_int.c
+index 26b8a85..48768df 100644
+--- a/arch/x86/oprofile/nmi_int.c
++++ b/arch/x86/oprofile/nmi_int.c
+@@ -55,7 +55,7 @@ u64 op_x86_get_ctrl(struct op_x86_model_spec const *model,
+ 	val |= counter_config->extra;
+ 	event &= model->event_mask ? model->event_mask : 0xFF;
+ 	val |= event & 0xFF;
+-	val |= (event & 0x0F00) << 24;
++	val |= (u64)(event & 0x0F00) << 24;
+ 
+ 	return val;
+ }
+diff --git a/arch/xtensa/include/asm/unistd.h b/arch/xtensa/include/asm/unistd.h
+index bc7e005..6b3ff98 100644
+--- a/arch/xtensa/include/asm/unistd.h
++++ b/arch/xtensa/include/asm/unistd.h
+@@ -148,8 +148,8 @@ __SYSCALL( 59, sys_getdents, 3)
+ __SYSCALL( 60, sys_getdents64, 3)
+ #define __NR_fcntl64 				 61
+ __SYSCALL( 61, sys_fcntl64, 3)
+-#define __NR_available62			 62
+-__SYSCALL( 62, sys_ni_syscall, 0)
++#define __NR_fallocate				 62
++__SYSCALL( 62, sys_fallocate, 6)
+ #define __NR_fadvise64_64 			 63
+ __SYSCALL( 63, xtensa_fadvise64_64, 6)
+ #define __NR_utime				 64	/* glibc 2.3.3 ?? */
+@@ -264,8 +264,8 @@ __SYSCALL(112, sys_socketpair, 4)
+ __SYSCALL(113, sys_sendfile, 4)
+ #define __NR_sendfile64 			114
+ __SYSCALL(114, sys_sendfile64, 4)
+-#define __NR_available115			115
+-__SYSCALL(115, sys_ni_syscall, 0)
++#define __NR_sendmmsg				115
++__SYSCALL(115, sys_sendmmsg, 4)
+ 
+ /* Process Operations */
+ 
+@@ -380,11 +380,11 @@ __SYSCALL(168, sys_msgrcv, 4)
+ __SYSCALL(169, sys_msgctl, 4)
+ #define __NR_available170			170
+ __SYSCALL(170, sys_ni_syscall, 0)
+-#define __NR_available171			171
+-__SYSCALL(171, sys_ni_syscall, 0)
+ 
+ /* File System */
+ 
++#define __NR_umount2				171
++__SYSCALL(171, sys_umount, 2)
+ #define __NR_mount 				172
+ __SYSCALL(172, sys_mount, 5)
+ #define __NR_swapon 				173
+@@ -399,8 +399,8 @@ __SYSCALL(176, sys_umount, 2)
+ __SYSCALL(177, sys_swapoff, 1)
+ #define __NR_sync 				178
+ __SYSCALL(178, sys_sync, 0)
+-#define __NR_available179			179
+-__SYSCALL(179, sys_ni_syscall, 0)
++#define __NR_syncfs				179
++__SYSCALL(179, sys_syncfs, 1)
+ #define __NR_setfsuid 				180
+ __SYSCALL(180, sys_setfsuid, 1)
+ #define __NR_setfsgid 				181
+@@ -455,7 +455,7 @@ __SYSCALL(203, sys_reboot, 3)
+ #define __NR_quotactl 				204
+ __SYSCALL(204, sys_quotactl, 4)
+ #define __NR_nfsservctl 			205
+-__SYSCALL(205, sys_ni_syscall, 0)
++__SYSCALL(205, sys_ni_syscall, 0)			/* old nfsservctl */
+ #define __NR__sysctl 				206
+ __SYSCALL(206, sys_sysctl, 1)
+ #define __NR_bdflush 				207
+@@ -562,7 +562,7 @@ __SYSCALL(252, sys_timer_getoverrun, 1)
+ 
+ /* System */
+ 
+-#define __NR_reserved244 			253
++#define __NR_reserved253			253
+ __SYSCALL(253, sys_ni_syscall, 0)
+ #define __NR_lookup_dcookie 			254
+ __SYSCALL(254, sys_lookup_dcookie, 4)
+@@ -609,8 +609,8 @@ __SYSCALL(272, sys_pselect6, 0)
+ __SYSCALL(273, sys_ppoll, 0)
+ #define __NR_epoll_pwait			274
+ __SYSCALL(274, sys_epoll_pwait, 0)
+-#define __NR_available275			275
+-__SYSCALL(275, sys_ni_syscall, 0)
++#define __NR_epoll_create1		275
++__SYSCALL(275, sys_epoll_create1, 1)
+ 
+ #define __NR_inotify_init			276
+ __SYSCALL(276, sys_inotify_init, 0)
+@@ -618,8 +618,8 @@ __SYSCALL(276, sys_inotify_init, 0)
+ __SYSCALL(277, sys_inotify_add_watch, 3)
+ #define __NR_inotify_rm_watch			278
+ __SYSCALL(278, sys_inotify_rm_watch, 2)
+-#define __NR_available279			279
+-__SYSCALL(279, sys_ni_syscall, 0)
++#define __NR_inotify_init1			279
++__SYSCALL(279, sys_inotify_init1, 1)
+ 
+ #define __NR_getcpu				280
+ __SYSCALL(280, sys_getcpu, 0)
+@@ -635,10 +635,10 @@ __SYSCALL(283, sys_ioprio_get, 3)
+ __SYSCALL(284, sys_set_robust_list, 3)
+ #define __NR_get_robust_list			285
+ __SYSCALL(285, sys_get_robust_list, 3)
+-#define __NR_reserved286			286	/* sync_file_rangeX */
+-__SYSCALL(286, sys_ni_syscall, 3)
++#define __NR_available286			286
++__SYSCALL(286, sys_ni_syscall, 0)
+ #define __NR_available287			287
+-__SYSCALL(287, sys_faccessat, 0)
++__SYSCALL(287, sys_ni_syscall, 0)
+ 
+ /* Relative File Operations */
+ 
+@@ -683,10 +683,63 @@ __SYSCALL(305, sys_ni_syscall, 0)
+ __SYSCALL(306, sys_eventfd, 1)
+ #define __NR_recvmmsg				307
+ __SYSCALL(307, sys_recvmmsg, 5)
++
+ #define __NR_setns				308
+ __SYSCALL(308, sys_setns, 2)
+-
+-#define __NR_syscall_count			309
++#define __NR_signalfd4				309
++__SYSCALL(309, sys_signalfd4, 4)
++#define __NR_dup3				310
++__SYSCALL(310, sys_dup3, 3)
++#define __NR_pipe2				311
++__SYSCALL(311, sys_pipe2, 2)
++
++#define __NR_timerfd_create			312
++__SYSCALL(312, sys_timerfd_create, 2)
++#define __NR_timerfd_settime			313
++__SYSCALL(313, sys_timerfd_settime, 4)
++#define __NR_timerfd_gettime			314
++__SYSCALL(314, sys_timerfd_gettime, 2)
++#define __NR_available315			315
++__SYSCALL(315, sys_ni_syscall, 0)
++
++#define __NR_eventfd2				316
++__SYSCALL(316, sys_eventfd2, 2)
++#define __NR_preadv				317
++__SYSCALL(317, sys_preadv, 5)
++#define __NR_pwritev				318
++__SYSCALL(318, sys_pwritev, 5)
++#define __NR_available319			319
++__SYSCALL(319, sys_ni_syscall, 0)
++
++#define __NR_fanotify_init			320
++__SYSCALL(320, sys_fanotify_init, 2)
++#define __NR_fanotify_mark			321
++__SYSCALL(321, sys_fanotify_mark, 6)
++#define __NR_process_vm_readv			322
++__SYSCALL(322, sys_process_vm_readv, 6)
++#define __NR_process_vm_writev			323
++__SYSCALL(323, sys_process_vm_writev, 6)
++
++#define __NR_name_to_handle_at			324
++__SYSCALL(324, sys_name_to_handle_at, 5)
++#define __NR_open_by_handle_at			325
++__SYSCALL(325, sys_open_by_handle_at, 3)
++#define __NR_sync_file_range			326
++__SYSCALL(326, sys_sync_file_range2, 6)
++#define __NR_perf_event_open			327
++__SYSCALL(327, sys_perf_event_open, 5)
++
++#define __NR_rt_tgsigqueueinfo			328
++__SYSCALL(328, sys_rt_tgsigqueueinfo, 4)
++#define __NR_clock_adjtime			329
++__SYSCALL(329, sys_clock_adjtime, 2)
++#define __NR_prlimit64				330
++__SYSCALL(330, sys_prlimit64, 4)
++#define __NR_kcmp				331
++__SYSCALL(331, sys_kcmp, 5)
++
++
++#define __NR_syscall_count			332
+ 
+ /*
+  * sysxtensa syscall handler
+diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
+index 5a297a2..cc8e7c7 100644
+--- a/drivers/edac/amd64_edac.c
++++ b/drivers/edac/amd64_edac.c
+@@ -170,8 +170,11 @@ static int __amd64_set_scrub_rate(struct pci_dev *ctl, u32 new_bw, u32 min_rate)
+ 	 * memory controller and apply to register. Search for the first
+ 	 * bandwidth entry that is greater or equal than the setting requested
+ 	 * and program that. If at last entry, turn off DRAM scrubbing.
++	 *
++	 * If no suitable bandwidth is found, turn off DRAM scrubbing entirely
++	 * by falling back to the last element in scrubrates[].
+ 	 */
+-	for (i = 0; i < ARRAY_SIZE(scrubrates); i++) {
++	for (i = 0; i < ARRAY_SIZE(scrubrates) - 1; i++) {
+ 		/*
+ 		 * skip scrub rates which aren't recommended
+ 		 * (see F10 BKDG, F3x58)
+@@ -181,12 +184,6 @@ static int __amd64_set_scrub_rate(struct pci_dev *ctl, u32 new_bw, u32 min_rate)
+ 
+ 		if (scrubrates[i].bandwidth <= new_bw)
+ 			break;
+-
+-		/*
+-		 * if no suitable bandwidth found, turn off DRAM scrubbing
+-		 * entirely by falling back to the last element in the
+-		 * scrubrates array.
+-		 */
+ 	}
+ 
+ 	scrubval = scrubrates[i].scrubval;
+diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+index ff2819e..cdf46b5 100644
+--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
++++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+@@ -269,6 +269,7 @@ eb_destroy(struct eb_objects *eb)
+ static inline int use_cpu_reloc(struct drm_i915_gem_object *obj)
+ {
+ 	return (obj->base.write_domain == I915_GEM_DOMAIN_CPU ||
++		!obj->map_and_fenceable ||
+ 		obj->cache_level != I915_CACHE_NONE);
+ }
+ 
+diff --git a/drivers/hwmon/coretemp.c b/drivers/hwmon/coretemp.c
+index 984a3f1..47b8d84 100644
+--- a/drivers/hwmon/coretemp.c
++++ b/drivers/hwmon/coretemp.c
+@@ -205,8 +205,11 @@ static const struct tjmax __cpuinitconst tjmax_table[] = {
+ 	{ "CPU N455", 100000 },
+ 	{ "CPU N470", 100000 },
+ 	{ "CPU N475", 100000 },
+-	{ "CPU  230", 100000 },
+-	{ "CPU  330", 125000 },
++	{ "CPU  230", 100000 },		/* Model 0x1c, stepping 2	*/
++	{ "CPU  330", 125000 },		/* Model 0x1c, stepping 2	*/
++	{ "CPU CE4110", 110000 },	/* Model 0x1c, stepping 10	*/
++	{ "CPU CE4150", 110000 },	/* Model 0x1c, stepping 10	*/
++	{ "CPU CE4170", 110000 },	/* Model 0x1c, stepping 10	*/
+ };
+ 
+ static int __cpuinit adjust_tjmax(struct cpuinfo_x86 *c, u32 id,
+diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
+index 7172559..d1febf0 100644
+--- a/drivers/infiniband/core/cma.c
++++ b/drivers/infiniband/core/cma.c
+@@ -3495,7 +3495,8 @@ out:
+ }
+ 
+ static const struct ibnl_client_cbs cma_cb_table[] = {
+-	[RDMA_NL_RDMA_CM_ID_STATS] = { .dump = cma_get_id_stats },
++	[RDMA_NL_RDMA_CM_ID_STATS] = { .dump = cma_get_id_stats,
++				       .module = THIS_MODULE },
+ };
+ 
+ static int __init cma_init(void)
+diff --git a/drivers/infiniband/core/netlink.c b/drivers/infiniband/core/netlink.c
+index 3ae2bfd..53b43a5 100644
+--- a/drivers/infiniband/core/netlink.c
++++ b/drivers/infiniband/core/netlink.c
+@@ -154,6 +154,7 @@ static int ibnl_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
+ 			{
+ 				struct netlink_dump_control c = {
+ 					.dump = client->cb_table[op].dump,
++					.module = client->cb_table[op].module,
+ 				};
+ 				return netlink_dump_start(nls, skb, nlh, &c);
+ 			}
+diff --git a/drivers/iommu/tegra-smmu.c b/drivers/iommu/tegra-smmu.c
+index 2a4bb36..cef812a 100644
+--- a/drivers/iommu/tegra-smmu.c
++++ b/drivers/iommu/tegra-smmu.c
+@@ -146,7 +146,7 @@
+ 
+ #define SMMU_ADDR_TO_PFN(addr)	((addr) >> 12)
+ #define SMMU_ADDR_TO_PDN(addr)	((addr) >> 22)
+-#define SMMU_PDN_TO_ADDR(addr)	((pdn) << 22)
++#define SMMU_PDN_TO_ADDR(pdn)	((pdn) << 22)
+ 
+ #define _READABLE	(1 << SMMU_PTB_DATA_ASID_READABLE_SHIFT)
+ #define _WRITABLE	(1 << SMMU_PTB_DATA_ASID_WRITABLE_SHIFT)
+diff --git a/drivers/media/video/au0828/au0828-video.c b/drivers/media/video/au0828/au0828-video.c
+index ac3dd73..902825a 100644
+--- a/drivers/media/video/au0828/au0828-video.c
++++ b/drivers/media/video/au0828/au0828-video.c
+@@ -1692,14 +1692,18 @@ static int vidioc_streamoff(struct file *file, void *priv,
+ 			(AUVI_INPUT(i).audio_setup)(dev, 0);
+ 		}
+ 
+-		videobuf_streamoff(&fh->vb_vidq);
+-		res_free(fh, AU0828_RESOURCE_VIDEO);
++		if (res_check(fh, AU0828_RESOURCE_VIDEO)) {
++			videobuf_streamoff(&fh->vb_vidq);
++			res_free(fh, AU0828_RESOURCE_VIDEO);
++		}
+ 	} else if (fh->type == V4L2_BUF_TYPE_VBI_CAPTURE) {
+ 		dev->vbi_timeout_running = 0;
+ 		del_timer_sync(&dev->vbi_timeout);
+ 
+-		videobuf_streamoff(&fh->vb_vbiq);
+-		res_free(fh, AU0828_RESOURCE_VBI);
++		if (res_check(fh, AU0828_RESOURCE_VBI)) {
++			videobuf_streamoff(&fh->vb_vbiq);
++			res_free(fh, AU0828_RESOURCE_VBI);
++		}
+ 	}
+ 
+ 	return 0;
+diff --git a/drivers/net/ethernet/calxeda/xgmac.c b/drivers/net/ethernet/calxeda/xgmac.c
+index 2b4b4f5..16814b3 100644
+--- a/drivers/net/ethernet/calxeda/xgmac.c
++++ b/drivers/net/ethernet/calxeda/xgmac.c
+@@ -375,7 +375,6 @@ struct xgmac_priv {
+ 	unsigned int tx_tail;
+ 
+ 	void __iomem *base;
+-	struct sk_buff_head rx_recycle;
+ 	unsigned int dma_buf_sz;
+ 	dma_addr_t dma_rx_phy;
+ 	dma_addr_t dma_tx_phy;
+@@ -672,9 +671,7 @@ static void xgmac_rx_refill(struct xgmac_priv *priv)
+ 		p = priv->dma_rx + entry;
+ 
+ 		if (priv->rx_skbuff[entry] == NULL) {
+-			skb = __skb_dequeue(&priv->rx_recycle);
+-			if (skb == NULL)
+-				skb = netdev_alloc_skb(priv->dev, priv->dma_buf_sz);
++			skb = netdev_alloc_skb(priv->dev, priv->dma_buf_sz);
+ 			if (unlikely(skb == NULL))
+ 				break;
+ 
+@@ -887,17 +884,7 @@ static void xgmac_tx_complete(struct xgmac_priv *priv)
+ 				       desc_get_buf_len(p), DMA_TO_DEVICE);
+ 		}
+ 
+-		/*
+-		 * If there's room in the queue (limit it to size)
+-		 * we add this skb back into the pool,
+-		 * if it's the right size.
+-		 */
+-		if ((skb_queue_len(&priv->rx_recycle) <
+-			DMA_RX_RING_SZ) &&
+-			skb_recycle_check(skb, priv->dma_buf_sz))
+-			__skb_queue_head(&priv->rx_recycle, skb);
+-		else
+-			dev_kfree_skb(skb);
++		dev_kfree_skb(skb);
+ 	}
+ 
+ 	if (dma_ring_space(priv->tx_head, priv->tx_tail, DMA_TX_RING_SZ) >
+@@ -1016,7 +1003,6 @@ static int xgmac_open(struct net_device *dev)
+ 			dev->dev_addr);
+ 	}
+ 
+-	skb_queue_head_init(&priv->rx_recycle);
+ 	memset(&priv->xstats, 0, sizeof(struct xgmac_extra_stats));
+ 
+ 	/* Initialize the XGMAC and descriptors */
+@@ -1053,7 +1039,6 @@ static int xgmac_stop(struct net_device *dev)
+ 		napi_disable(&priv->napi);
+ 
+ 	writel(0, priv->base + XGMAC_DMA_INTR_ENA);
+-	skb_queue_purge(&priv->rx_recycle);
+ 
+ 	/* Disable the MAC core */
+ 	xgmac_mac_disable(priv->base);
+diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
+index d3233f5..e0ef1e5 100644
+--- a/drivers/net/ethernet/freescale/gianfar.c
++++ b/drivers/net/ethernet/freescale/gianfar.c
+@@ -1757,7 +1757,6 @@ static void free_skb_resources(struct gfar_private *priv)
+ 			  sizeof(struct rxbd8) * priv->total_rx_ring_size,
+ 			  priv->tx_queue[0]->tx_bd_base,
+ 			  priv->tx_queue[0]->tx_bd_dma_base);
+-	skb_queue_purge(&priv->rx_recycle);
+ }
+ 
+ void gfar_start(struct net_device *dev)
+@@ -1935,8 +1934,6 @@ static int gfar_enet_open(struct net_device *dev)
+ 
+ 	enable_napi(priv);
+ 
+-	skb_queue_head_init(&priv->rx_recycle);
+-
+ 	/* Initialize a bunch of registers */
+ 	init_registers(dev);
+ 
+@@ -2525,16 +2522,7 @@ static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
+ 
+ 		bytes_sent += skb->len;
+ 
+-		/* If there's room in the queue (limit it to rx_buffer_size)
+-		 * we add this skb back into the pool, if it's the right size
+-		 */
+-		if (skb_queue_len(&priv->rx_recycle) < rx_queue->rx_ring_size &&
+-		    skb_recycle_check(skb, priv->rx_buffer_size +
+-				      RXBUF_ALIGNMENT)) {
+-			gfar_align_skb(skb);
+-			skb_queue_head(&priv->rx_recycle, skb);
+-		} else
+-			dev_kfree_skb_any(skb);
++		dev_kfree_skb_any(skb);
+ 
+ 		tx_queue->tx_skbuff[skb_dirtytx] = NULL;
+ 
+@@ -2600,7 +2588,7 @@ static void gfar_new_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp,
+ static struct sk_buff *gfar_alloc_skb(struct net_device *dev)
+ {
+ 	struct gfar_private *priv = netdev_priv(dev);
+-	struct sk_buff *skb = NULL;
++	struct sk_buff *skb;
+ 
+ 	skb = netdev_alloc_skb(dev, priv->rx_buffer_size + RXBUF_ALIGNMENT);
+ 	if (!skb)
+@@ -2613,14 +2601,7 @@ static struct sk_buff *gfar_alloc_skb(struct net_device *dev)
+ 
+ struct sk_buff *gfar_new_skb(struct net_device *dev)
+ {
+-	struct gfar_private *priv = netdev_priv(dev);
+-	struct sk_buff *skb = NULL;
+-
+-	skb = skb_dequeue(&priv->rx_recycle);
+-	if (!skb)
+-		skb = gfar_alloc_skb(dev);
+-
+-	return skb;
++	return gfar_alloc_skb(dev);
+ }
+ 
+ static inline void count_errors(unsigned short status, struct net_device *dev)
+@@ -2779,7 +2760,7 @@ int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit)
+ 			if (unlikely(!newskb))
+ 				newskb = skb;
+ 			else if (skb)
+-				skb_queue_head(&priv->rx_recycle, skb);
++				dev_kfree_skb(skb);
+ 		} else {
+ 			/* Increment the number of packets */
+ 			rx_queue->stats.rx_packets++;
+diff --git a/drivers/net/ethernet/freescale/gianfar.h b/drivers/net/ethernet/freescale/gianfar.h
+index 2136c7f..27d8aeb 100644
+--- a/drivers/net/ethernet/freescale/gianfar.h
++++ b/drivers/net/ethernet/freescale/gianfar.h
+@@ -1072,8 +1072,6 @@ struct gfar_private {
+ 
+ 	u32 cur_filer_idx;
+ 
+-	struct sk_buff_head rx_recycle;
+-
+ 	/* RX queue filer rule set*/
+ 	struct ethtool_rx_list rx_list;
+ 	struct mutex rx_queue_access;
+diff --git a/drivers/net/ethernet/freescale/ucc_geth.c b/drivers/net/ethernet/freescale/ucc_geth.c
+index 21c6574..f6259ff 100644
+--- a/drivers/net/ethernet/freescale/ucc_geth.c
++++ b/drivers/net/ethernet/freescale/ucc_geth.c
+@@ -210,14 +210,12 @@ static struct list_head *dequeue(struct list_head *lh)
+ static struct sk_buff *get_new_skb(struct ucc_geth_private *ugeth,
+ 		u8 __iomem *bd)
+ {
+-	struct sk_buff *skb = NULL;
++	struct sk_buff *skb;
+ 
+-	skb = __skb_dequeue(&ugeth->rx_recycle);
++	skb = netdev_alloc_skb(ugeth->ndev,
++			       ugeth->ug_info->uf_info.max_rx_buf_length +
++			       UCC_GETH_RX_DATA_BUF_ALIGNMENT);
+ 	if (!skb)
+-		skb = netdev_alloc_skb(ugeth->ndev,
+-				      ugeth->ug_info->uf_info.max_rx_buf_length +
+-				      UCC_GETH_RX_DATA_BUF_ALIGNMENT);
+-	if (skb == NULL)
+ 		return NULL;
+ 
+ 	/* We need the data buffer to be aligned properly.  We will reserve
+@@ -2021,8 +2019,6 @@ static void ucc_geth_memclean(struct ucc_geth_private *ugeth)
+ 		iounmap(ugeth->ug_regs);
+ 		ugeth->ug_regs = NULL;
+ 	}
+-
+-	skb_queue_purge(&ugeth->rx_recycle);
+ }
+ 
+ static void ucc_geth_set_multi(struct net_device *dev)
+@@ -2231,8 +2227,6 @@ static int ucc_struct_init(struct ucc_geth_private *ugeth)
+ 		return -ENOMEM;
+ 	}
+ 
+-	skb_queue_head_init(&ugeth->rx_recycle);
+-
+ 	return 0;
+ }
+ 
+@@ -3275,12 +3269,7 @@ static int ucc_geth_rx(struct ucc_geth_private *ugeth, u8 rxQ, int rx_work_limit
+ 			if (netif_msg_rx_err(ugeth))
+ 				ugeth_err("%s, %d: ERROR!!! skb - 0x%08x",
+ 					   __func__, __LINE__, (u32) skb);
+-			if (skb) {
+-				skb->data = skb->head + NET_SKB_PAD;
+-				skb->len = 0;
+-				skb_reset_tail_pointer(skb);
+-				__skb_queue_head(&ugeth->rx_recycle, skb);
+-			}
++			dev_kfree_skb(skb);
+ 
+ 			ugeth->rx_skbuff[rxQ][ugeth->skb_currx[rxQ]] = NULL;
+ 			dev->stats.rx_dropped++;
+@@ -3350,13 +3339,7 @@ static int ucc_geth_tx(struct net_device *dev, u8 txQ)
+ 
+ 		dev->stats.tx_packets++;
+ 
+-		if (skb_queue_len(&ugeth->rx_recycle) < RX_BD_RING_LEN &&
+-			     skb_recycle_check(skb,
+-				    ugeth->ug_info->uf_info.max_rx_buf_length +
+-				    UCC_GETH_RX_DATA_BUF_ALIGNMENT))
+-			__skb_queue_head(&ugeth->rx_recycle, skb);
+-		else
+-			dev_kfree_skb(skb);
++		dev_kfree_skb(skb);
+ 
+ 		ugeth->tx_skbuff[txQ][ugeth->skb_dirtytx[txQ]] = NULL;
+ 		ugeth->skb_dirtytx[txQ] =
+diff --git a/drivers/net/ethernet/freescale/ucc_geth.h b/drivers/net/ethernet/freescale/ucc_geth.h
+index f71b3e7..75f3371 100644
+--- a/drivers/net/ethernet/freescale/ucc_geth.h
++++ b/drivers/net/ethernet/freescale/ucc_geth.h
+@@ -1214,8 +1214,6 @@ struct ucc_geth_private {
+ 	/* index of the first skb which hasn't been transmitted yet. */
+ 	u16 skb_dirtytx[NUM_TX_QUEUES];
+ 
+-	struct sk_buff_head rx_recycle;
+-
+ 	struct ugeth_mii_info *mii_info;
+ 	struct phy_device *phydev;
+ 	phy_interface_t phy_interface;
+diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c
+index 087b9e0..84c1326 100644
+--- a/drivers/net/ethernet/marvell/mv643xx_eth.c
++++ b/drivers/net/ethernet/marvell/mv643xx_eth.c
+@@ -412,7 +412,6 @@ struct mv643xx_eth_private {
+ 	u8 work_rx_refill;
+ 
+ 	int skb_size;
+-	struct sk_buff_head rx_recycle;
+ 
+ 	/*
+ 	 * RX state.
+@@ -673,9 +672,7 @@ static int rxq_refill(struct rx_queue *rxq, int budget)
+ 		struct rx_desc *rx_desc;
+ 		int size;
+ 
+-		skb = __skb_dequeue(&mp->rx_recycle);
+-		if (skb == NULL)
+-			skb = netdev_alloc_skb(mp->dev, mp->skb_size);
++		skb = netdev_alloc_skb(mp->dev, mp->skb_size);
+ 
+ 		if (skb == NULL) {
+ 			mp->oom = 1;
+@@ -989,14 +986,7 @@ static int txq_reclaim(struct tx_queue *txq, int budget, int force)
+ 				       desc->byte_cnt, DMA_TO_DEVICE);
+ 		}
+ 
+-		if (skb != NULL) {
+-			if (skb_queue_len(&mp->rx_recycle) <
+-					mp->rx_ring_size &&
+-			    skb_recycle_check(skb, mp->skb_size))
+-				__skb_queue_head(&mp->rx_recycle, skb);
+-			else
+-				dev_kfree_skb(skb);
+-		}
++		dev_kfree_skb(skb);
+ 	}
+ 
+ 	__netif_tx_unlock(nq);
+@@ -2349,8 +2339,6 @@ static int mv643xx_eth_open(struct net_device *dev)
+ 
+ 	napi_enable(&mp->napi);
+ 
+-	skb_queue_head_init(&mp->rx_recycle);
+-
+ 	mp->int_mask = INT_EXT;
+ 
+ 	for (i = 0; i < mp->rxq_count; i++) {
+@@ -2445,8 +2433,6 @@ static int mv643xx_eth_stop(struct net_device *dev)
+ 	mib_counters_update(mp);
+ 	del_timer_sync(&mp->mib_counters_timer);
+ 
+-	skb_queue_purge(&mp->rx_recycle);
+-
+ 	for (i = 0; i < mp->rxq_count; i++)
+ 		rxq_deinit(mp->rxq + i);
+ 	for (i = 0; i < mp->txq_count; i++)
+diff --git a/drivers/net/ethernet/marvell/skge.c b/drivers/net/ethernet/marvell/skge.c
+index 5a30bf8..f4be8f7 100644
+--- a/drivers/net/ethernet/marvell/skge.c
++++ b/drivers/net/ethernet/marvell/skge.c
+@@ -4153,6 +4153,13 @@ static struct dmi_system_id skge_32bit_dma_boards[] = {
+ 			DMI_MATCH(DMI_BOARD_NAME, "nForce"),
+ 		},
+ 	},
++	{
++		.ident = "ASUS P5NSLI",
++		.matches = {
++			DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."),
++			DMI_MATCH(DMI_BOARD_NAME, "P5NSLI")
++		},
++	},
+ 	{}
+ };
+ 
+diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
+index e872e1d..7d51a65 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h
++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
+@@ -50,7 +50,6 @@ struct stmmac_priv {
+ 	unsigned int dirty_rx;
+ 	struct sk_buff **rx_skbuff;
+ 	dma_addr_t *rx_skbuff_dma;
+-	struct sk_buff_head rx_recycle;
+ 
+ 	struct net_device *dev;
+ 	dma_addr_t dma_rx_phy;
+diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+index 3be8833..c6cdbc4 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+@@ -747,18 +747,7 @@ static void stmmac_tx(struct stmmac_priv *priv)
+ 		priv->hw->ring->clean_desc3(p);
+ 
+ 		if (likely(skb != NULL)) {
+-			/*
+-			 * If there's room in the queue (limit it to size)
+-			 * we add this skb back into the pool,
+-			 * if it's the right size.
+-			 */
+-			if ((skb_queue_len(&priv->rx_recycle) <
+-				priv->dma_rx_size) &&
+-				skb_recycle_check(skb, priv->dma_buf_sz))
+-				__skb_queue_head(&priv->rx_recycle, skb);
+-			else
+-				dev_kfree_skb(skb);
+-
++			dev_kfree_skb(skb);
+ 			priv->tx_skbuff[entry] = NULL;
+ 		}
+ 
+@@ -1169,7 +1158,6 @@ static int stmmac_open(struct net_device *dev)
+ 	priv->eee_enabled = stmmac_eee_init(priv);
+ 
+ 	napi_enable(&priv->napi);
+-	skb_queue_head_init(&priv->rx_recycle);
+ 	netif_start_queue(dev);
+ 
+ 	return 0;
+@@ -1222,7 +1210,6 @@ static int stmmac_release(struct net_device *dev)
+ 		kfree(priv->tm);
+ #endif
+ 	napi_disable(&priv->napi);
+-	skb_queue_purge(&priv->rx_recycle);
+ 
+ 	/* Free the IRQ lines */
+ 	free_irq(dev->irq, dev);
+@@ -1388,10 +1375,7 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv)
+ 		if (likely(priv->rx_skbuff[entry] == NULL)) {
+ 			struct sk_buff *skb;
+ 
+-			skb = __skb_dequeue(&priv->rx_recycle);
+-			if (skb == NULL)
+-				skb = netdev_alloc_skb_ip_align(priv->dev,
+-								bfsize);
++			skb = netdev_alloc_skb_ip_align(priv->dev, bfsize);
+ 
+ 			if (unlikely(skb == NULL))
+ 				break;
+diff --git a/drivers/pcmcia/pxa2xx_sharpsl.c b/drivers/pcmcia/pxa2xx_sharpsl.c
+index b066273..7dd879c 100644
+--- a/drivers/pcmcia/pxa2xx_sharpsl.c
++++ b/drivers/pcmcia/pxa2xx_sharpsl.c
+@@ -194,7 +194,7 @@ static void sharpsl_pcmcia_socket_suspend(struct soc_pcmcia_socket *skt)
+ 	sharpsl_pcmcia_init_reset(skt);
+ }
+ 
+-static struct pcmcia_low_level sharpsl_pcmcia_ops __initdata = {
++static struct pcmcia_low_level sharpsl_pcmcia_ops = {
+ 	.owner                  = THIS_MODULE,
+ 	.hw_init                = sharpsl_pcmcia_hw_init,
+ 	.socket_state           = sharpsl_pcmcia_socket_state,
+diff --git a/drivers/pinctrl/core.c b/drivers/pinctrl/core.c
+index dc5c126..63560c3 100644
+--- a/drivers/pinctrl/core.c
++++ b/drivers/pinctrl/core.c
+@@ -1059,8 +1059,10 @@ static int pinctrl_groups_show(struct seq_file *s, void *what)
+ 			seq_printf(s, "group: %s\n", gname);
+ 			for (i = 0; i < num_pins; i++) {
+ 				pname = pin_get_name(pctldev, pins[i]);
+-				if (WARN_ON(!pname))
++				if (WARN_ON(!pname)) {
++					mutex_unlock(&pinctrl_mutex);
+ 					return -EINVAL;
++				}
+ 				seq_printf(s, "pin %d (%s)\n", pins[i], pname);
+ 			}
+ 			seq_puts(s, "\n");
+diff --git a/drivers/pinctrl/pinconf.c b/drivers/pinctrl/pinconf.c
+index 43f474c..baee2cc 100644
+--- a/drivers/pinctrl/pinconf.c
++++ b/drivers/pinctrl/pinconf.c
+@@ -537,8 +537,6 @@ static int pinconf_groups_show(struct seq_file *s, void *what)
+ 	seq_puts(s, "Pin config settings per pin group\n");
+ 	seq_puts(s, "Format: group (name): configs\n");
+ 
+-	mutex_lock(&pinctrl_mutex);
+-
+ 	while (selector < ngroups) {
+ 		const char *gname = pctlops->get_group_name(pctldev, selector);
+ 
+@@ -549,8 +547,6 @@ static int pinconf_groups_show(struct seq_file *s, void *what)
+ 		selector++;
+ 	}
+ 
+-	mutex_unlock(&pinctrl_mutex);
+-
+ 	return 0;
+ }
+ 
+diff --git a/drivers/pinctrl/pinctrl-tegra.c b/drivers/pinctrl/pinctrl-tegra.c
+index ae52e4e..5a5f261 100644
+--- a/drivers/pinctrl/pinctrl-tegra.c
++++ b/drivers/pinctrl/pinctrl-tegra.c
+@@ -466,7 +466,7 @@ static int tegra_pinconf_reg(struct tegra_pmx *pmx,
+ 		*bank = g->drv_bank;
+ 		*reg = g->drv_reg;
+ 		*bit = g->lpmd_bit;
+-		*width = 1;
++		*width = 2;
+ 		break;
+ 	case TEGRA_PINCONF_PARAM_DRIVE_DOWN_STRENGTH:
+ 		*bank = g->drv_bank;
+diff --git a/drivers/pinctrl/pinctrl-tegra30.c b/drivers/pinctrl/pinctrl-tegra30.c
+index 0386fdf..7894f14 100644
+--- a/drivers/pinctrl/pinctrl-tegra30.c
++++ b/drivers/pinctrl/pinctrl-tegra30.c
+@@ -3345,10 +3345,10 @@ static const struct tegra_function tegra30_functions[] = {
+ 	FUNCTION(vi_alt3),
+ };
+ 
+-#define MUXCTL_REG_A	0x3000
+-#define PINGROUP_REG_A	0x868
++#define DRV_PINGROUP_REG_A	0x868	/* bank 0 */
++#define PINGROUP_REG_A		0x3000	/* bank 1 */
+ 
+-#define PINGROUP_REG_Y(r) ((r) - MUXCTL_REG_A)
++#define PINGROUP_REG_Y(r) ((r) - PINGROUP_REG_A)
+ #define PINGROUP_REG_N(r) -1
+ 
+ #define PINGROUP(pg_name, f0, f1, f2, f3, f_safe, r, od, ior)	\
+@@ -3364,25 +3364,25 @@ static const struct tegra_function tegra30_functions[] = {
+ 		},						\
+ 		.func_safe = TEGRA_MUX_ ## f_safe,		\
+ 		.mux_reg = PINGROUP_REG_Y(r),			\
+-		.mux_bank = 0,					\
++		.mux_bank = 1,					\
+ 		.mux_bit = 0,					\
+ 		.pupd_reg = PINGROUP_REG_Y(r),			\
+-		.pupd_bank = 0,					\
++		.pupd_bank = 1,					\
+ 		.pupd_bit = 2,					\
+ 		.tri_reg = PINGROUP_REG_Y(r),			\
+-		.tri_bank = 0,					\
++		.tri_bank = 1,					\
+ 		.tri_bit = 4,					\
+ 		.einput_reg = PINGROUP_REG_Y(r),		\
+-		.einput_bank = 0,				\
++		.einput_bank = 1,				\
+ 		.einput_bit = 5,				\
+ 		.odrain_reg = PINGROUP_REG_##od(r),		\
+-		.odrain_bank = 0,				\
++		.odrain_bank = 1,				\
+ 		.odrain_bit = 6,				\
+ 		.lock_reg = PINGROUP_REG_Y(r),			\
+-		.lock_bank = 0,					\
++		.lock_bank = 1,					\
+ 		.lock_bit = 7,					\
+ 		.ioreset_reg = PINGROUP_REG_##ior(r),		\
+-		.ioreset_bank = 0,				\
++		.ioreset_bank = 1,				\
+ 		.ioreset_bit = 8,				\
+ 		.drv_reg = -1,					\
+ 	}
+@@ -3401,8 +3401,8 @@ static const struct tegra_function tegra30_functions[] = {
+ 		.odrain_reg = -1,				\
+ 		.lock_reg = -1,					\
+ 		.ioreset_reg = -1,				\
+-		.drv_reg = ((r) - PINGROUP_REG_A),		\
+-		.drv_bank = 1,					\
++		.drv_reg = ((r) - DRV_PINGROUP_REG_A),		\
++		.drv_bank = 0,					\
+ 		.hsm_bit = hsm_b,				\
+ 		.schmitt_bit = schmitt_b,			\
+ 		.lpmd_bit = lpmd_b,				\
+diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
+index e8007b8..84f7dff 100644
+--- a/drivers/usb/class/cdc-acm.c
++++ b/drivers/usb/class/cdc-acm.c
+@@ -818,10 +818,6 @@ static const __u32 acm_tty_speed[] = {
+ 	2500000, 3000000, 3500000, 4000000
+ };
+ 
+-static const __u8 acm_tty_size[] = {
+-	5, 6, 7, 8
+-};
+-
+ static void acm_tty_set_termios(struct tty_struct *tty,
+ 						struct ktermios *termios_old)
+ {
+@@ -835,7 +831,21 @@ static void acm_tty_set_termios(struct tty_struct *tty,
+ 	newline.bParityType = termios->c_cflag & PARENB ?
+ 				(termios->c_cflag & PARODD ? 1 : 2) +
+ 				(termios->c_cflag & CMSPAR ? 2 : 0) : 0;
+-	newline.bDataBits = acm_tty_size[(termios->c_cflag & CSIZE) >> 4];
++	switch (termios->c_cflag & CSIZE) {
++	case CS5:
++		newline.bDataBits = 5;
++		break;
++	case CS6:
++		newline.bDataBits = 6;
++		break;
++	case CS7:
++		newline.bDataBits = 7;
++		break;
++	case CS8:
++	default:
++		newline.bDataBits = 8;
++		break;
++	}
+ 	/* FIXME: Needs to clear unsupported bits in the termios */
+ 	acm->clocal = ((termios->c_cflag & CLOCAL) != 0);
+ 
+@@ -1234,7 +1244,7 @@ made_compressed_probe:
+ 
+ 		if (usb_endpoint_xfer_int(epwrite))
+ 			usb_fill_int_urb(snd->urb, usb_dev,
+-				usb_sndbulkpipe(usb_dev, epwrite->bEndpointAddress),
++				usb_sndintpipe(usb_dev, epwrite->bEndpointAddress),
+ 				NULL, acm->writesize, acm_write_bulk, snd, epwrite->bInterval);
+ 		else
+ 			usb_fill_bulk_urb(snd->urb, usb_dev,
+diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c
+index ebb8a9d..7f75343 100644
+--- a/drivers/usb/core/devio.c
++++ b/drivers/usb/core/devio.c
+@@ -1348,6 +1348,7 @@ static int proc_do_submiturb(struct dev_state *ps, struct usbdevfs_urb *uurb,
+ 					ret = -EFAULT;
+ 					goto error;
+ 				}
++				uurb->buffer += u;
+ 			}
+ 			totlen -= u;
+ 		}
+diff --git a/drivers/usb/core/driver.c b/drivers/usb/core/driver.c
+index 445455a..fb6e97f 100644
+--- a/drivers/usb/core/driver.c
++++ b/drivers/usb/core/driver.c
+@@ -372,6 +372,10 @@ static int usb_probe_interface(struct device *dev)
+ 	intf->condition = USB_INTERFACE_UNBOUND;
+ 	usb_cancel_queued_reset(intf);
+ 
++	/* If the LPM disable succeeded, balance the ref counts. */
++	if (!lpm_disable_error)
++		usb_unlocked_enable_lpm(udev);
++
+ 	/* Unbound interfaces are always runtime-PM-disabled and -suspended */
+ 	if (driver->supports_autosuspend)
+ 		pm_runtime_disable(dev);
+diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
+index 128a804..bbff143 100644
+--- a/drivers/usb/core/hub.c
++++ b/drivers/usb/core/hub.c
+@@ -3258,16 +3258,6 @@ static int usb_set_device_initiated_lpm(struct usb_device *udev,
+ 
+ 	if (enable) {
+ 		/*
+-		 * First, let the device know about the exit latencies
+-		 * associated with the link state we're about to enable.
+-		 */
+-		ret = usb_req_set_sel(udev, state);
+-		if (ret < 0) {
+-			dev_warn(&udev->dev, "Set SEL for device-initiated "
+-					"%s failed.\n", usb3_lpm_names[state]);
+-			return -EBUSY;
+-		}
+-		/*
+ 		 * Now send the control transfer to enable device-initiated LPM
+ 		 * for either U1 or U2.
+ 		 */
+@@ -3352,7 +3342,28 @@ static int usb_set_lpm_timeout(struct usb_device *udev,
+ static void usb_enable_link_state(struct usb_hcd *hcd, struct usb_device *udev,
+ 		enum usb3_link_state state)
+ {
+-	int timeout;
++	int timeout, ret;
++	__u8 u1_mel = udev->bos->ss_cap->bU1devExitLat;
++	__le16 u2_mel = udev->bos->ss_cap->bU2DevExitLat;
++
++	/* If the device says it doesn't have *any* exit latency to come out of
++	 * U1 or U2, it's probably lying.  Assume it doesn't implement that link
++	 * state.
++	 */
++	if ((state == USB3_LPM_U1 && u1_mel == 0) ||
++			(state == USB3_LPM_U2 && u2_mel == 0))
++		return;
++
++	/*
++	 * First, let the device know about the exit latencies
++	 * associated with the link state we're about to enable.
++	 */
++	ret = usb_req_set_sel(udev, state);
++	if (ret < 0) {
++		dev_warn(&udev->dev, "Set SEL for device-initiated %s failed.\n",
++				usb3_lpm_names[state]);
++		return;
++	}
+ 
+ 	/* We allow the host controller to set the U1/U2 timeout internally
+ 	 * first, so that it can change its schedule to account for the
+diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
+index c2813c2b..eb0fd10 100644
+--- a/drivers/usb/dwc3/gadget.c
++++ b/drivers/usb/dwc3/gadget.c
+@@ -1899,6 +1899,7 @@ static void dwc3_stop_active_transfer(struct dwc3 *dwc, u32 epnum)
+ 	ret = dwc3_send_gadget_ep_cmd(dwc, dep->number, cmd, &params);
+ 	WARN_ON_ONCE(ret);
+ 	dep->resource_index = 0;
++	dep->flags &= ~DWC3_EP_BUSY;
+ }
+ 
+ static void dwc3_stop_active_transfers(struct dwc3 *dwc)
+diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
+index a49868d..0644f65 100644
+--- a/drivers/usb/host/xhci.c
++++ b/drivers/usb/host/xhci.c
+@@ -479,7 +479,8 @@ static bool compliance_mode_recovery_timer_quirk_check(void)
+ 
+ 	if (strstr(dmi_product_name, "Z420") ||
+ 			strstr(dmi_product_name, "Z620") ||
+-			strstr(dmi_product_name, "Z820"))
++			strstr(dmi_product_name, "Z820") ||
++			strstr(dmi_product_name, "Z1"))
+ 		return true;
+ 
+ 	return false;
+diff --git a/drivers/usb/musb/am35x.c b/drivers/usb/musb/am35x.c
+index 7a95ab8..ee9ec13 100644
+--- a/drivers/usb/musb/am35x.c
++++ b/drivers/usb/musb/am35x.c
+@@ -312,6 +312,12 @@ static irqreturn_t am35x_musb_interrupt(int irq, void *hci)
+ 		ret = IRQ_HANDLED;
+ 	}
+ 
++	/* Drop spurious RX and TX if device is disconnected */
++	if (musb->int_usb & MUSB_INTR_DISCONNECT) {
++		musb->int_tx = 0;
++		musb->int_rx = 0;
++	}
++
+ 	if (musb->int_tx || musb->int_rx || musb->int_usb)
+ 		ret |= musb_interrupt(musb);
+ 
+diff --git a/drivers/usb/serial/ark3116.c b/drivers/usb/serial/ark3116.c
+index f8ce97d..a6758ff 100644
+--- a/drivers/usb/serial/ark3116.c
++++ b/drivers/usb/serial/ark3116.c
+@@ -126,9 +126,6 @@ static inline int calc_divisor(int bps)
+ 
+ static int ark3116_attach(struct usb_serial *serial)
+ {
+-	struct usb_serial_port *port = serial->port[0];
+-	struct ark3116_private *priv;
+-
+ 	/* make sure we have our end-points */
+ 	if ((serial->num_bulk_in == 0) ||
+ 	    (serial->num_bulk_out == 0) ||
+@@ -143,8 +140,15 @@ static int ark3116_attach(struct usb_serial *serial)
+ 		return -EINVAL;
+ 	}
+ 
+-	priv = kzalloc(sizeof(struct ark3116_private),
+-		       GFP_KERNEL);
++	return 0;
++}
++
++static int ark3116_port_probe(struct usb_serial_port *port)
++{
++	struct usb_serial *serial = port->serial;
++	struct ark3116_private *priv;
++
++	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ 	if (!priv)
+ 		return -ENOMEM;
+ 
+@@ -199,18 +203,15 @@ static int ark3116_attach(struct usb_serial *serial)
+ 	return 0;
+ }
+ 
+-static void ark3116_release(struct usb_serial *serial)
++static int ark3116_port_remove(struct usb_serial_port *port)
+ {
+-	struct usb_serial_port *port = serial->port[0];
+ 	struct ark3116_private *priv = usb_get_serial_port_data(port);
+ 
+ 	/* device is closed, so URBs and DMA should be down */
+-
+-	usb_set_serial_port_data(port, NULL);
+-
+ 	mutex_destroy(&priv->hw_lock);
+-
+ 	kfree(priv);
++
++	return 0;
+ }
+ 
+ static void ark3116_init_termios(struct tty_struct *tty)
+@@ -725,7 +726,8 @@ static struct usb_serial_driver ark3116_device = {
+ 	.id_table =		id_table,
+ 	.num_ports =		1,
+ 	.attach =		ark3116_attach,
+-	.release =		ark3116_release,
++	.port_probe =		ark3116_port_probe,
++	.port_remove =		ark3116_port_remove,
+ 	.set_termios =		ark3116_set_termios,
+ 	.init_termios =		ark3116_init_termios,
+ 	.ioctl =		ark3116_ioctl,
+diff --git a/drivers/usb/serial/belkin_sa.c b/drivers/usb/serial/belkin_sa.c
+index 6b73656..89a3dd3 100644
+--- a/drivers/usb/serial/belkin_sa.c
++++ b/drivers/usb/serial/belkin_sa.c
+@@ -47,8 +47,8 @@ static bool debug;
+ #define DRIVER_DESC "USB Belkin Serial converter driver"
+ 
+ /* function prototypes for a Belkin USB Serial Adapter F5U103 */
+-static int  belkin_sa_startup(struct usb_serial *serial);
+-static void belkin_sa_release(struct usb_serial *serial);
++static int belkin_sa_port_probe(struct usb_serial_port *port);
++static int belkin_sa_port_remove(struct usb_serial_port *port);
+ static int  belkin_sa_open(struct tty_struct *tty,
+ 			struct usb_serial_port *port);
+ static void belkin_sa_close(struct usb_serial_port *port);
+@@ -90,8 +90,8 @@ static struct usb_serial_driver belkin_device = {
+ 	.break_ctl =		belkin_sa_break_ctl,
+ 	.tiocmget =		belkin_sa_tiocmget,
+ 	.tiocmset =		belkin_sa_tiocmset,
+-	.attach =		belkin_sa_startup,
+-	.release =		belkin_sa_release,
++	.port_probe =		belkin_sa_port_probe,
++	.port_remove =		belkin_sa_port_remove,
+ };
+ 
+ static struct usb_serial_driver * const serial_drivers[] = {
+@@ -120,17 +120,15 @@ struct belkin_sa_private {
+ 					    (c), BELKIN_SA_SET_REQUEST_TYPE, \
+ 					    (v), 0, NULL, 0, WDR_TIMEOUT)
+ 
+-/* do some startup allocations not currently performed by usb_serial_probe() */
+-static int belkin_sa_startup(struct usb_serial *serial)
++static int belkin_sa_port_probe(struct usb_serial_port *port)
+ {
+-	struct usb_device *dev = serial->dev;
++	struct usb_device *dev = port->serial->dev;
+ 	struct belkin_sa_private *priv;
+ 
+-	/* allocate the private data structure */
+ 	priv = kmalloc(sizeof(struct belkin_sa_private), GFP_KERNEL);
+ 	if (!priv)
+-		return -1; /* error */
+-	/* set initial values for control structures */
++		return -ENOMEM;
++
+ 	spin_lock_init(&priv->lock);
+ 	priv->control_state = 0;
+ 	priv->last_lsr = 0;
+@@ -142,18 +140,19 @@ static int belkin_sa_startup(struct usb_serial *serial)
+ 					le16_to_cpu(dev->descriptor.bcdDevice),
+ 					priv->bad_flow_control);
+ 
+-	init_waitqueue_head(&serial->port[0]->write_wait);
+-	usb_set_serial_port_data(serial->port[0], priv);
++	usb_set_serial_port_data(port, priv);
+ 
+ 	return 0;
+ }
+ 
+-static void belkin_sa_release(struct usb_serial *serial)
++static int belkin_sa_port_remove(struct usb_serial_port *port)
+ {
+-	int i;
++	struct belkin_sa_private *priv;
+ 
+-	for (i = 0; i < serial->num_ports; ++i)
+-		kfree(usb_get_serial_port_data(serial->port[i]));
++	priv = usb_get_serial_port_data(port);
++	kfree(priv);
++
++	return 0;
+ }
+ 
+ static int belkin_sa_open(struct tty_struct *tty,
+diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
+index 1e71079..4ea9e33 100644
+--- a/drivers/usb/serial/cp210x.c
++++ b/drivers/usb/serial/cp210x.c
+@@ -164,7 +164,7 @@ static const struct usb_device_id id_table[] = {
+ 
+ MODULE_DEVICE_TABLE(usb, id_table);
+ 
+-struct cp210x_port_private {
++struct cp210x_serial_private {
+ 	__u8			bInterfaceNumber;
+ };
+ 
+@@ -278,7 +278,7 @@ static int cp210x_get_config(struct usb_serial_port *port, u8 request,
+ 		unsigned int *data, int size)
+ {
+ 	struct usb_serial *serial = port->serial;
+-	struct cp210x_port_private *port_priv = usb_get_serial_port_data(port);
++	struct cp210x_serial_private *spriv = usb_get_serial_data(serial);
+ 	__le32 *buf;
+ 	int result, i, length;
+ 
+@@ -294,7 +294,7 @@ static int cp210x_get_config(struct usb_serial_port *port, u8 request,
+ 	/* Issue the request, attempting to read 'size' bytes */
+ 	result = usb_control_msg(serial->dev, usb_rcvctrlpipe(serial->dev, 0),
+ 				request, REQTYPE_INTERFACE_TO_HOST, 0x0000,
+-				port_priv->bInterfaceNumber, buf, size,
++				spriv->bInterfaceNumber, buf, size,
+ 				USB_CTRL_GET_TIMEOUT);
+ 
+ 	/* Convert data into an array of integers */
+@@ -326,7 +326,7 @@ static int cp210x_set_config(struct usb_serial_port *port, u8 request,
+ 		unsigned int *data, int size)
+ {
+ 	struct usb_serial *serial = port->serial;
+-	struct cp210x_port_private *port_priv = usb_get_serial_port_data(port);
++	struct cp210x_serial_private *spriv = usb_get_serial_data(serial);
+ 	__le32 *buf;
+ 	int result, i, length;
+ 
+@@ -348,13 +348,13 @@ static int cp210x_set_config(struct usb_serial_port *port, u8 request,
+ 		result = usb_control_msg(serial->dev,
+ 				usb_sndctrlpipe(serial->dev, 0),
+ 				request, REQTYPE_HOST_TO_INTERFACE, 0x0000,
+-				port_priv->bInterfaceNumber, buf, size,
++				spriv->bInterfaceNumber, buf, size,
+ 				USB_CTRL_SET_TIMEOUT);
+ 	} else {
+ 		result = usb_control_msg(serial->dev,
+ 				usb_sndctrlpipe(serial->dev, 0),
+ 				request, REQTYPE_HOST_TO_INTERFACE, data[0],
+-				port_priv->bInterfaceNumber, NULL, 0,
++				spriv->bInterfaceNumber, NULL, 0,
+ 				USB_CTRL_SET_TIMEOUT);
+ 	}
+ 
+@@ -854,37 +854,30 @@ static void cp210x_break_ctl (struct tty_struct *tty, int break_state)
+ 
+ static int cp210x_startup(struct usb_serial *serial)
+ {
+-	struct cp210x_port_private *port_priv;
+-	int i;
++	struct usb_host_interface *cur_altsetting;
++	struct cp210x_serial_private *spriv;
+ 
+ 	/* cp210x buffers behave strangely unless device is reset */
+ 	usb_reset_device(serial->dev);
+ 
+-	for (i = 0; i < serial->num_ports; i++) {
+-		port_priv = kzalloc(sizeof(*port_priv), GFP_KERNEL);
+-		if (!port_priv)
+-			return -ENOMEM;
++	spriv = kzalloc(sizeof(*spriv), GFP_KERNEL);
++	if (!spriv)
++		return -ENOMEM;
+ 
+-		memset(port_priv, 0x00, sizeof(*port_priv));
+-		port_priv->bInterfaceNumber =
+-		    serial->interface->cur_altsetting->desc.bInterfaceNumber;
++	cur_altsetting = serial->interface->cur_altsetting;
++	spriv->bInterfaceNumber = cur_altsetting->desc.bInterfaceNumber;
+ 
+-		usb_set_serial_port_data(serial->port[i], port_priv);
+-	}
++	usb_set_serial_data(serial, spriv);
+ 
+ 	return 0;
+ }
+ 
+ static void cp210x_release(struct usb_serial *serial)
+ {
+-	struct cp210x_port_private *port_priv;
+-	int i;
++	struct cp210x_serial_private *spriv;
+ 
+-	for (i = 0; i < serial->num_ports; i++) {
+-		port_priv = usb_get_serial_port_data(serial->port[i]);
+-		kfree(port_priv);
+-		usb_set_serial_port_data(serial->port[i], NULL);
+-	}
++	spriv = usb_get_serial_data(serial);
++	kfree(spriv);
+ }
+ 
+ module_usb_serial_driver(serial_drivers, id_table);
+diff --git a/drivers/usb/serial/cyberjack.c b/drivers/usb/serial/cyberjack.c
+index 3aa0b53..bfec6f4 100644
+--- a/drivers/usb/serial/cyberjack.c
++++ b/drivers/usb/serial/cyberjack.c
+@@ -57,9 +57,9 @@ static bool debug;
+ #define CYBERJACK_PRODUCT_ID	0x0100
+ 
+ /* Function prototypes */
+-static int cyberjack_startup(struct usb_serial *serial);
+ static void cyberjack_disconnect(struct usb_serial *serial);
+-static void cyberjack_release(struct usb_serial *serial);
++static int cyberjack_port_probe(struct usb_serial_port *port);
++static int cyberjack_port_remove(struct usb_serial_port *port);
+ static int  cyberjack_open(struct tty_struct *tty,
+ 	struct usb_serial_port *port);
+ static void cyberjack_close(struct usb_serial_port *port);
+@@ -85,9 +85,9 @@ static struct usb_serial_driver cyberjack_device = {
+ 	.description =		"Reiner SCT Cyberjack USB card reader",
+ 	.id_table =		id_table,
+ 	.num_ports =		1,
+-	.attach =		cyberjack_startup,
+ 	.disconnect =		cyberjack_disconnect,
+-	.release =		cyberjack_release,
++	.port_probe =		cyberjack_port_probe,
++	.port_remove =		cyberjack_port_remove,
+ 	.open =			cyberjack_open,
+ 	.close =		cyberjack_close,
+ 	.write =		cyberjack_write,
+@@ -109,55 +109,45 @@ struct cyberjack_private {
+ 	short		wrsent;		/* Data already sent */
+ };
+ 
+-/* do some startup allocations not currently performed by usb_serial_probe() */
+-static int cyberjack_startup(struct usb_serial *serial)
++static int cyberjack_port_probe(struct usb_serial_port *port)
+ {
+ 	struct cyberjack_private *priv;
+-	int i;
++	int result;
+ 
+-	/* allocate the private data structure */
+ 	priv = kmalloc(sizeof(struct cyberjack_private), GFP_KERNEL);
+ 	if (!priv)
+ 		return -ENOMEM;
+ 
+-	/* set initial values */
+ 	spin_lock_init(&priv->lock);
+ 	priv->rdtodo = 0;
+ 	priv->wrfilled = 0;
+ 	priv->wrsent = 0;
+-	usb_set_serial_port_data(serial->port[0], priv);
+ 
+-	init_waitqueue_head(&serial->port[0]->write_wait);
++	usb_set_serial_port_data(port, priv);
+ 
+-	for (i = 0; i < serial->num_ports; ++i) {
+-		int result;
+-		result = usb_submit_urb(serial->port[i]->interrupt_in_urb,
+-					GFP_KERNEL);
+-		if (result)
+-			dev_err(&serial->dev->dev,
+-				"usb_submit_urb(read int) failed\n");
+-		dbg("%s - usb_submit_urb(int urb)", __func__);
+-	}
++	result = usb_submit_urb(port->interrupt_in_urb, GFP_KERNEL);
++	if (result)
++		dev_err(&port->dev, "usb_submit_urb(read int) failed\n");
+ 
+ 	return 0;
+ }
+ 
+-static void cyberjack_disconnect(struct usb_serial *serial)
++static int cyberjack_port_remove(struct usb_serial_port *port)
+ {
+-	int i;
++	struct cyberjack_private *priv;
+ 
+-	for (i = 0; i < serial->num_ports; ++i)
+-		usb_kill_urb(serial->port[i]->interrupt_in_urb);
++	priv = usb_get_serial_port_data(port);
++	kfree(priv);
++
++	return 0;
+ }
+ 
+-static void cyberjack_release(struct usb_serial *serial)
++static void cyberjack_disconnect(struct usb_serial *serial)
+ {
+ 	int i;
+ 
+-	for (i = 0; i < serial->num_ports; ++i) {
+-		/* My special items, the standard routines free my urbs */
+-		kfree(usb_get_serial_port_data(serial->port[i]));
+-	}
++	for (i = 0; i < serial->num_ports; ++i)
++		usb_kill_urb(serial->port[i]->interrupt_in_urb);
+ }
+ 
+ static int  cyberjack_open(struct tty_struct *tty,
+diff --git a/drivers/usb/serial/cypress_m8.c b/drivers/usb/serial/cypress_m8.c
+index b78c34e..5ad6e7d 100644
+--- a/drivers/usb/serial/cypress_m8.c
++++ b/drivers/usb/serial/cypress_m8.c
+@@ -124,10 +124,10 @@ struct cypress_private {
+ };
+ 
+ /* function prototypes for the Cypress USB to serial device */
+-static int  cypress_earthmate_startup(struct usb_serial *serial);
+-static int  cypress_hidcom_startup(struct usb_serial *serial);
+-static int  cypress_ca42v2_startup(struct usb_serial *serial);
+-static void cypress_release(struct usb_serial *serial);
++static int  cypress_earthmate_port_probe(struct usb_serial_port *port);
++static int  cypress_hidcom_port_probe(struct usb_serial_port *port);
++static int  cypress_ca42v2_port_probe(struct usb_serial_port *port);
++static int  cypress_port_remove(struct usb_serial_port *port);
+ static int  cypress_open(struct tty_struct *tty, struct usb_serial_port *port);
+ static void cypress_close(struct usb_serial_port *port);
+ static void cypress_dtr_rts(struct usb_serial_port *port, int on);
+@@ -157,8 +157,8 @@ static struct usb_serial_driver cypress_earthmate_device = {
+ 	.description =			"DeLorme Earthmate USB",
+ 	.id_table =			id_table_earthmate,
+ 	.num_ports =			1,
+-	.attach =			cypress_earthmate_startup,
+-	.release =			cypress_release,
++	.port_probe =			cypress_earthmate_port_probe,
++	.port_remove =			cypress_port_remove,
+ 	.open =				cypress_open,
+ 	.close =			cypress_close,
+ 	.dtr_rts =			cypress_dtr_rts,
+@@ -183,8 +183,8 @@ static struct usb_serial_driver cypress_hidcom_device = {
+ 	.description =			"HID->COM RS232 Adapter",
+ 	.id_table =			id_table_cyphidcomrs232,
+ 	.num_ports =			1,
+-	.attach =			cypress_hidcom_startup,
+-	.release =			cypress_release,
++	.port_probe =			cypress_hidcom_port_probe,
++	.port_remove =			cypress_port_remove,
+ 	.open =				cypress_open,
+ 	.close =			cypress_close,
+ 	.dtr_rts =			cypress_dtr_rts,
+@@ -209,8 +209,8 @@ static struct usb_serial_driver cypress_ca42v2_device = {
+ 	.description =			"Nokia CA-42 V2 Adapter",
+ 	.id_table =			id_table_nokiaca42v2,
+ 	.num_ports =			1,
+-	.attach =			cypress_ca42v2_startup,
+-	.release =			cypress_release,
++	.port_probe =			cypress_ca42v2_port_probe,
++	.port_remove =			cypress_port_remove,
+ 	.open =				cypress_open,
+ 	.close =			cypress_close,
+ 	.dtr_rts =			cypress_dtr_rts,
+@@ -437,10 +437,10 @@ static void cypress_set_dead(struct usb_serial_port *port)
+  *****************************************************************************/
+ 
+ 
+-static int generic_startup(struct usb_serial *serial)
++static int cypress_generic_port_probe(struct usb_serial_port *port)
+ {
++	struct usb_serial *serial = port->serial;
+ 	struct cypress_private *priv;
+-	struct usb_serial_port *port = serial->port[0];
+ 
+ 	priv = kzalloc(sizeof(struct cypress_private), GFP_KERNEL);
+ 	if (!priv)
+@@ -489,15 +489,17 @@ static int generic_startup(struct usb_serial *serial)
+ }
+ 
+ 
+-static int cypress_earthmate_startup(struct usb_serial *serial)
++static int cypress_earthmate_port_probe(struct usb_serial_port *port)
+ {
++	struct usb_serial *serial = port->serial;
+ 	struct cypress_private *priv;
+-	struct usb_serial_port *port = serial->port[0];
++	int ret;
+ 
+-	if (generic_startup(serial)) {
++	ret = cypress_generic_port_probe(port);
++	if (ret) {
+ 		dbg("%s - Failed setting up port %d", __func__,
+ 				port->number);
+-		return 1;
++		return ret;
+ 	}
+ 
+ 	priv = usb_get_serial_port_data(port);
+@@ -517,54 +519,52 @@ static int cypress_earthmate_startup(struct usb_serial *serial)
+ 	}
+ 
+ 	return 0;
+-} /* cypress_earthmate_startup */
+-
++}
+ 
+-static int cypress_hidcom_startup(struct usb_serial *serial)
++static int cypress_hidcom_port_probe(struct usb_serial_port *port)
+ {
+ 	struct cypress_private *priv;
++	int ret;
+ 
+-	if (generic_startup(serial)) {
+-		dbg("%s - Failed setting up port %d", __func__,
+-				serial->port[0]->number);
+-		return 1;
++	ret = cypress_generic_port_probe(port);
++	if (ret) {
++ 		dev_dbg(&port->dev, "%s - Failed setting up port\n", __func__);
++		return ret;
+ 	}
+ 
+-	priv = usb_get_serial_port_data(serial->port[0]);
++	priv = usb_get_serial_port_data(port);
+ 	priv->chiptype = CT_CYPHIDCOM;
+ 
+ 	return 0;
+-} /* cypress_hidcom_startup */
+-
++}
+ 
+-static int cypress_ca42v2_startup(struct usb_serial *serial)
++static int cypress_ca42v2_port_probe(struct usb_serial_port *port)
+ {
+ 	struct cypress_private *priv;
++	int ret;
+ 
+-	if (generic_startup(serial)) {
+-		dbg("%s - Failed setting up port %d", __func__,
+-				serial->port[0]->number);
+-		return 1;
++	ret = cypress_generic_port_probe(port);
++	if (ret) {
++ 		dev_dbg(&port->dev, "%s - Failed setting up port\n", __func__);
++		return ret;
+ 	}
+ 
+-	priv = usb_get_serial_port_data(serial->port[0]);
++	priv = usb_get_serial_port_data(port);
+ 	priv->chiptype = CT_CA42V2;
+ 
+ 	return 0;
+-} /* cypress_ca42v2_startup */
+-
++}
+ 
+-static void cypress_release(struct usb_serial *serial)
++static int cypress_port_remove(struct usb_serial_port *port)
+ {
+ 	struct cypress_private *priv;
+ 
+-	/* all open ports are closed at this point */
+-	priv = usb_get_serial_port_data(serial->port[0]);
++	priv = usb_get_serial_port_data(port);
++
++	kfifo_free(&priv->write_fifo);
++	kfree(priv);
+ 
+-	if (priv) {
+-		kfifo_free(&priv->write_fifo);
+-		kfree(priv);
+-	}
++	return 0;
+ }
+ 
+ 
+diff --git a/drivers/usb/serial/f81232.c b/drivers/usb/serial/f81232.c
+index 499b15f..be94436 100644
+--- a/drivers/usb/serial/f81232.c
++++ b/drivers/usb/serial/f81232.c
+@@ -319,39 +319,30 @@ static int f81232_ioctl(struct tty_struct *tty,
+ 	return -ENOIOCTLCMD;
+ }
+ 
+-static int f81232_startup(struct usb_serial *serial)
++static int f81232_port_probe(struct usb_serial_port *port)
+ {
+ 	struct f81232_private *priv;
+-	int i;
+ 
+-	for (i = 0; i < serial->num_ports; ++i) {
+-		priv = kzalloc(sizeof(struct f81232_private), GFP_KERNEL);
+-		if (!priv)
+-			goto cleanup;
+-		spin_lock_init(&priv->lock);
+-		init_waitqueue_head(&priv->delta_msr_wait);
+-		usb_set_serial_port_data(serial->port[i], priv);
+-	}
+-	return 0;
++	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
++	if (!priv)
++		return -ENOMEM;
+ 
+-cleanup:
+-	for (--i; i >= 0; --i) {
+-		priv = usb_get_serial_port_data(serial->port[i]);
+-		kfree(priv);
+-		usb_set_serial_port_data(serial->port[i], NULL);
+-	}
+-	return -ENOMEM;
++	spin_lock_init(&priv->lock);
++	init_waitqueue_head(&priv->delta_msr_wait);
++
++	usb_set_serial_port_data(port, priv);
++
++	return 0;
+ }
+ 
+-static void f81232_release(struct usb_serial *serial)
++static int f81232_port_remove(struct usb_serial_port *port)
+ {
+-	int i;
+ 	struct f81232_private *priv;
+ 
+-	for (i = 0; i < serial->num_ports; ++i) {
+-		priv = usb_get_serial_port_data(serial->port[i]);
+-		kfree(priv);
+-	}
++	priv = usb_get_serial_port_data(port);
++	kfree(priv);
++
++	return 0;
+ }
+ 
+ static struct usb_serial_driver f81232_device = {
+@@ -374,8 +365,8 @@ static struct usb_serial_driver f81232_device = {
+ 	.tiocmset =		f81232_tiocmset,
+ 	.process_read_urb =	f81232_process_read_urb,
+ 	.read_int_callback =	f81232_read_int_callback,
+-	.attach =		f81232_startup,
+-	.release =		f81232_release,
++	.port_probe =		f81232_port_probe,
++	.port_remove =		f81232_port_remove,
+ };
+ 
+ static struct usb_serial_driver * const serial_drivers[] = {
+diff --git a/drivers/usb/serial/garmin_gps.c b/drivers/usb/serial/garmin_gps.c
+index 346c15a..de9cfd4 100644
+--- a/drivers/usb/serial/garmin_gps.c
++++ b/drivers/usb/serial/garmin_gps.c
+@@ -1414,11 +1414,10 @@ static void timeout_handler(unsigned long data)
+ 
+ 
+ 
+-static int garmin_attach(struct usb_serial *serial)
++static int garmin_port_probe(struct usb_serial_port *port)
+ {
+-	int status = 0;
+-	struct usb_serial_port *port = serial->port[0];
+-	struct garmin_data *garmin_data_p = NULL;
++	int status;
++	struct garmin_data *garmin_data_p;
+ 
+ 	garmin_data_p = kzalloc(sizeof(struct garmin_data), GFP_KERNEL);
+ 	if (garmin_data_p == NULL) {
+@@ -1443,22 +1442,14 @@ static int garmin_attach(struct usb_serial *serial)
+ }
+ 
+ 
+-static void garmin_disconnect(struct usb_serial *serial)
++static int garmin_port_remove(struct usb_serial_port *port)
+ {
+-	struct usb_serial_port *port = serial->port[0];
+ 	struct garmin_data *garmin_data_p = usb_get_serial_port_data(port);
+ 
+ 	usb_kill_urb(port->interrupt_in_urb);
+ 	del_timer_sync(&garmin_data_p->timer);
+-}
+-
+-
+-static void garmin_release(struct usb_serial *serial)
+-{
+-	struct usb_serial_port *port = serial->port[0];
+-	struct garmin_data *garmin_data_p = usb_get_serial_port_data(port);
+-
+ 	kfree(garmin_data_p);
++	return 0;
+ }
+ 
+ 
+@@ -1475,9 +1466,8 @@ static struct usb_serial_driver garmin_device = {
+ 	.close               = garmin_close,
+ 	.throttle            = garmin_throttle,
+ 	.unthrottle          = garmin_unthrottle,
+-	.attach              = garmin_attach,
+-	.disconnect          = garmin_disconnect,
+-	.release             = garmin_release,
++	.port_probe		= garmin_port_probe,
++	.port_remove		= garmin_port_remove,
+ 	.write               = garmin_write,
+ 	.write_room          = garmin_write_room,
+ 	.write_bulk_callback = garmin_write_bulk_callback,
+diff --git a/drivers/usb/serial/io_edgeport.c b/drivers/usb/serial/io_edgeport.c
+index e1f5ccd..dca27a5 100644
+--- a/drivers/usb/serial/io_edgeport.c
++++ b/drivers/usb/serial/io_edgeport.c
+@@ -228,6 +228,8 @@ static int  edge_get_icount(struct tty_struct *tty,
+ static int  edge_startup(struct usb_serial *serial);
+ static void edge_disconnect(struct usb_serial *serial);
+ static void edge_release(struct usb_serial *serial);
++static int edge_port_probe(struct usb_serial_port *port);
++static int edge_port_remove(struct usb_serial_port *port);
+ 
+ #include "io_tables.h"	/* all of the devices that this driver supports */
+ 
+@@ -2921,9 +2923,8 @@ static void load_application_firmware(struct edgeport_serial *edge_serial)
+ static int edge_startup(struct usb_serial *serial)
+ {
+ 	struct edgeport_serial *edge_serial;
+-	struct edgeport_port *edge_port;
+ 	struct usb_device *dev;
+-	int i, j;
++	int i;
+ 	int response;
+ 	bool interrupt_in_found;
+ 	bool bulk_in_found;
+@@ -3007,26 +3008,6 @@ static int edge_startup(struct usb_serial *serial)
+ 	/* we set up the pointers to the endpoints in the edge_open function,
+ 	 * as the structures aren't created yet. */
+ 
+-	/* set up our port private structures */
+-	for (i = 0; i < serial->num_ports; ++i) {
+-		edge_port = kzalloc(sizeof(struct edgeport_port), GFP_KERNEL);
+-		if (edge_port == NULL) {
+-			dev_err(&serial->dev->dev, "%s - Out of memory\n",
+-								   __func__);
+-			for (j = 0; j < i; ++j) {
+-				kfree(usb_get_serial_port_data(serial->port[j]));
+-				usb_set_serial_port_data(serial->port[j],
+-									NULL);
+-			}
+-			usb_set_serial_data(serial, NULL);
+-			kfree(edge_serial);
+-			return -ENOMEM;
+-		}
+-		spin_lock_init(&edge_port->ep_lock);
+-		edge_port->port = serial->port[i];
+-		usb_set_serial_port_data(serial->port[i], edge_port);
+-	}
+-
+ 	response = 0;
+ 
+ 	if (edge_serial->is_epic) {
+@@ -3175,12 +3156,35 @@ static void edge_release(struct usb_serial *serial)
+ 
+ 	dbg("%s", __func__);
+ 
+-	for (i = 0; i < serial->num_ports; ++i)
+-		kfree(usb_get_serial_port_data(serial->port[i]));
+-
+ 	kfree(edge_serial);
+ }
+ 
++static int edge_port_probe(struct usb_serial_port *port)
++{
++	struct edgeport_port *edge_port;
++
++	edge_port = kzalloc(sizeof(*edge_port), GFP_KERNEL);
++	if (!edge_port)
++		return -ENOMEM;
++
++	spin_lock_init(&edge_port->ep_lock);
++	edge_port->port = port;
++
++	usb_set_serial_port_data(port, edge_port);
++
++	return 0;
++}
++
++static int edge_port_remove(struct usb_serial_port *port)
++{
++	struct edgeport_port *edge_port;
++
++	edge_port = usb_get_serial_port_data(port);
++	kfree(edge_port);
++
++	return 0;
++}
++
+ module_usb_serial_driver(serial_drivers, id_table_combined);
+ 
+ MODULE_AUTHOR(DRIVER_AUTHOR);
+diff --git a/drivers/usb/serial/io_tables.h b/drivers/usb/serial/io_tables.h
+index 350afdd..1511dd0 100644
+--- a/drivers/usb/serial/io_tables.h
++++ b/drivers/usb/serial/io_tables.h
+@@ -110,6 +110,8 @@ static struct usb_serial_driver edgeport_2port_device = {
+ 	.attach			= edge_startup,
+ 	.disconnect		= edge_disconnect,
+ 	.release		= edge_release,
++	.port_probe		= edge_port_probe,
++	.port_remove		= edge_port_remove,
+ 	.ioctl			= edge_ioctl,
+ 	.set_termios		= edge_set_termios,
+ 	.tiocmget		= edge_tiocmget,
+@@ -139,6 +141,8 @@ static struct usb_serial_driver edgeport_4port_device = {
+ 	.attach			= edge_startup,
+ 	.disconnect		= edge_disconnect,
+ 	.release		= edge_release,
++	.port_probe		= edge_port_probe,
++	.port_remove		= edge_port_remove,
+ 	.ioctl			= edge_ioctl,
+ 	.set_termios		= edge_set_termios,
+ 	.tiocmget		= edge_tiocmget,
+@@ -168,6 +172,8 @@ static struct usb_serial_driver edgeport_8port_device = {
+ 	.attach			= edge_startup,
+ 	.disconnect		= edge_disconnect,
+ 	.release		= edge_release,
++	.port_probe		= edge_port_probe,
++	.port_remove		= edge_port_remove,
+ 	.ioctl			= edge_ioctl,
+ 	.set_termios		= edge_set_termios,
+ 	.tiocmget		= edge_tiocmget,
+@@ -197,6 +203,8 @@ static struct usb_serial_driver epic_device = {
+ 	.attach			= edge_startup,
+ 	.disconnect		= edge_disconnect,
+ 	.release		= edge_release,
++	.port_probe		= edge_port_probe,
++	.port_remove		= edge_port_remove,
+ 	.ioctl			= edge_ioctl,
+ 	.set_termios		= edge_set_termios,
+ 	.tiocmget		= edge_tiocmget,
+diff --git a/drivers/usb/serial/io_ti.c b/drivers/usb/serial/io_ti.c
+index 3936904..ccf297d 100644
+--- a/drivers/usb/serial/io_ti.c
++++ b/drivers/usb/serial/io_ti.c
+@@ -2594,12 +2594,7 @@ static void edge_break(struct tty_struct *tty, int break_state)
+ static int edge_startup(struct usb_serial *serial)
+ {
+ 	struct edgeport_serial *edge_serial;
+-	struct edgeport_port *edge_port;
+-	struct usb_device *dev;
+ 	int status;
+-	int i;
+-
+-	dev = serial->dev;
+ 
+ 	/* create our private serial structure */
+ 	edge_serial = kzalloc(sizeof(struct edgeport_serial), GFP_KERNEL);
+@@ -2617,40 +2612,7 @@ static int edge_startup(struct usb_serial *serial)
+ 		return status;
+ 	}
+ 
+-	/* set up our port private structures */
+-	for (i = 0; i < serial->num_ports; ++i) {
+-		edge_port = kzalloc(sizeof(struct edgeport_port), GFP_KERNEL);
+-		if (edge_port == NULL) {
+-			dev_err(&serial->dev->dev, "%s - Out of memory\n",
+-								__func__);
+-			goto cleanup;
+-		}
+-		spin_lock_init(&edge_port->ep_lock);
+-		if (kfifo_alloc(&edge_port->write_fifo, EDGE_OUT_BUF_SIZE,
+-								GFP_KERNEL)) {
+-			dev_err(&serial->dev->dev, "%s - Out of memory\n",
+-								__func__);
+-			kfree(edge_port);
+-			goto cleanup;
+-		}
+-		edge_port->port = serial->port[i];
+-		edge_port->edge_serial = edge_serial;
+-		usb_set_serial_port_data(serial->port[i], edge_port);
+-		edge_port->bUartMode = default_uart_mode;
+-	}
+-
+ 	return 0;
+-
+-cleanup:
+-	for (--i; i >= 0; --i) {
+-		edge_port = usb_get_serial_port_data(serial->port[i]);
+-		kfifo_free(&edge_port->write_fifo);
+-		kfree(edge_port);
+-		usb_set_serial_port_data(serial->port[i], NULL);
+-	}
+-	kfree(edge_serial);
+-	usb_set_serial_data(serial, NULL);
+-	return -ENOMEM;
+ }
+ 
+ static void edge_disconnect(struct usb_serial *serial)
+@@ -2660,19 +2622,54 @@ static void edge_disconnect(struct usb_serial *serial)
+ 
+ static void edge_release(struct usb_serial *serial)
+ {
+-	int i;
++	kfree(usb_get_serial_data(serial));
++}
++
++static int edge_port_probe(struct usb_serial_port *port)
++{
+ 	struct edgeport_port *edge_port;
++	int ret;
+ 
+-	dbg("%s", __func__);
++	edge_port = kzalloc(sizeof(*edge_port), GFP_KERNEL);
++	if (!edge_port)
++		return -ENOMEM;
++
++	ret = kfifo_alloc(&edge_port->write_fifo, EDGE_OUT_BUF_SIZE,
++								GFP_KERNEL);
++	if (ret) {
++		kfree(edge_port);
++		return -ENOMEM;
++	}
++
++	spin_lock_init(&edge_port->ep_lock);
++	edge_port->port = port;
++	edge_port->edge_serial = usb_get_serial_data(port->serial);
++	edge_port->bUartMode = default_uart_mode;
++
++	usb_set_serial_port_data(port, edge_port);
+ 
+-	for (i = 0; i < serial->num_ports; ++i) {
+-		edge_port = usb_get_serial_port_data(serial->port[i]);
++	ret = edge_create_sysfs_attrs(port);
++	if (ret) {
+ 		kfifo_free(&edge_port->write_fifo);
+ 		kfree(edge_port);
++		return ret;
+ 	}
+-	kfree(usb_get_serial_data(serial));
++
++	return 0;
+ }
+ 
++static int edge_port_remove(struct usb_serial_port *port)
++{
++	struct edgeport_port *edge_port;
++
++	edge_port = usb_get_serial_port_data(port);
++
++	edge_remove_sysfs_attrs(port);
++	kfifo_free(&edge_port->write_fifo);
++	kfree(edge_port);
++
++	return 0;
++}
+ 
+ /* Sysfs Attributes */
+ 
+@@ -2732,8 +2729,8 @@ static struct usb_serial_driver edgeport_1port_device = {
+ 	.attach			= edge_startup,
+ 	.disconnect		= edge_disconnect,
+ 	.release		= edge_release,
+-	.port_probe		= edge_create_sysfs_attrs,
+-	.port_remove		= edge_remove_sysfs_attrs,
++	.port_probe		= edge_port_probe,
++	.port_remove		= edge_port_remove,
+ 	.ioctl			= edge_ioctl,
+ 	.set_termios		= edge_set_termios,
+ 	.tiocmget		= edge_tiocmget,
+@@ -2763,8 +2760,8 @@ static struct usb_serial_driver edgeport_2port_device = {
+ 	.attach			= edge_startup,
+ 	.disconnect		= edge_disconnect,
+ 	.release		= edge_release,
+-	.port_probe		= edge_create_sysfs_attrs,
+-	.port_remove		= edge_remove_sysfs_attrs,
++	.port_probe		= edge_port_probe,
++	.port_remove		= edge_port_remove,
+ 	.ioctl			= edge_ioctl,
+ 	.set_termios		= edge_set_termios,
+ 	.tiocmget		= edge_tiocmget,
+diff --git a/drivers/usb/serial/iuu_phoenix.c b/drivers/usb/serial/iuu_phoenix.c
+index 22b1eb5..cdf0f99 100644
+--- a/drivers/usb/serial/iuu_phoenix.c
++++ b/drivers/usb/serial/iuu_phoenix.c
+@@ -60,6 +60,8 @@ static int iuu_cardout;
+ static bool xmas;
+ static int vcc_default = 5;
+ 
++static int iuu_create_sysfs_attrs(struct usb_serial_port *port);
++static int iuu_remove_sysfs_attrs(struct usb_serial_port *port);
+ static void read_rxcmd_callback(struct urb *urb);
+ 
+ struct iuu_private {
+@@ -80,64 +82,64 @@ struct iuu_private {
+ 	u32 clk;
+ };
+ 
+-
+-static void iuu_free_buf(struct iuu_private *priv)
++static int iuu_port_probe(struct usb_serial_port *port)
+ {
+-	kfree(priv->buf);
+-	kfree(priv->dbgbuf);
+-	kfree(priv->writebuf);
+-}
++	struct iuu_private *priv;
++	int ret;
++
++	priv = kzalloc(sizeof(struct iuu_private), GFP_KERNEL);
++	if (!priv)
++		return -ENOMEM;
+ 
+-static int iuu_alloc_buf(struct iuu_private *priv)
+-{
+ 	priv->buf = kzalloc(256, GFP_KERNEL);
+-	priv->dbgbuf = kzalloc(256, GFP_KERNEL);
+-	priv->writebuf = kzalloc(256, GFP_KERNEL);
+-	if (!priv->buf || !priv->dbgbuf || !priv->writebuf) {
+-		iuu_free_buf(priv);
+-		dbg("%s problem allocation buffer", __func__);
++	if (!priv->buf) {
++		kfree(priv);
+ 		return -ENOMEM;
+ 	}
+-	dbg("%s - Privates buffers allocation success", __func__);
+-	return 0;
+-}
+ 
+-static int iuu_startup(struct usb_serial *serial)
+-{
+-	struct iuu_private *priv;
+-	priv = kzalloc(sizeof(struct iuu_private), GFP_KERNEL);
+-	dbg("%s- priv allocation success", __func__);
+-	if (!priv)
++	priv->writebuf = kzalloc(256, GFP_KERNEL);
++	if (!priv->writebuf) {
++		kfree(priv->buf);
++		kfree(priv);
+ 		return -ENOMEM;
+-	if (iuu_alloc_buf(priv)) {
++	}
++
++	priv->dbgbuf = kzalloc(256, GFP_KERNEL);
++	if (!priv->writebuf) {
++		kfree(priv->writebuf);
++		kfree(priv->buf);
+ 		kfree(priv);
+ 		return -ENOMEM;
+ 	}
++
+ 	priv->vcc = vcc_default;
+ 	spin_lock_init(&priv->lock);
+ 	init_waitqueue_head(&priv->delta_msr_wait);
+-	usb_set_serial_port_data(serial->port[0], priv);
++
++	usb_set_serial_port_data(port, priv);
++
++	ret = iuu_create_sysfs_attrs(port);
++	if (ret) {
++		kfree(priv->writebuf);
++		kfree(priv->buf);
++		kfree(priv);
++		return ret;
++	}
++
+ 	return 0;
+ }
+ 
+-/* Release function */
+-static void iuu_release(struct usb_serial *serial)
++static int iuu_port_remove(struct usb_serial_port *port)
+ {
+-	struct usb_serial_port *port = serial->port[0];
+ 	struct iuu_private *priv = usb_get_serial_port_data(port);
+-	if (!port)
+-		return;
+-
+-	if (priv) {
+-		iuu_free_buf(priv);
+-		dbg("%s - I will free all", __func__);
+-		usb_set_serial_port_data(port, NULL);
+ 
+-		dbg("%s - priv is not anymore in port structure", __func__);
+-		kfree(priv);
++	iuu_remove_sysfs_attrs(port);
++	kfree(priv->dbgbuf);
++	kfree(priv->writebuf);
++	kfree(priv->buf);
++	kfree(priv);
+ 
+-		dbg("%s priv is now kfree", __func__);
+-	}
++	return 0;
+ }
+ 
+ static int iuu_tiocmset(struct tty_struct *tty,
+@@ -1231,8 +1233,6 @@ static struct usb_serial_driver iuu_device = {
+ 	.num_ports = 1,
+ 	.bulk_in_size = 512,
+ 	.bulk_out_size = 512,
+-	.port_probe = iuu_create_sysfs_attrs,
+-	.port_remove = iuu_remove_sysfs_attrs,
+ 	.open = iuu_open,
+ 	.close = iuu_close,
+ 	.write = iuu_uart_write,
+@@ -1241,8 +1241,8 @@ static struct usb_serial_driver iuu_device = {
+ 	.tiocmset = iuu_tiocmset,
+ 	.set_termios = iuu_set_termios,
+ 	.init_termios = iuu_init_termios,
+-	.attach = iuu_startup,
+-	.release = iuu_release,
++	.port_probe = iuu_port_probe,
++	.port_remove = iuu_port_remove,
+ };
+ 
+ static struct usb_serial_driver * const serial_drivers[] = {
+diff --git a/drivers/usb/serial/keyspan_pda.c b/drivers/usb/serial/keyspan_pda.c
+index a4ac3cf..1e73171 100644
+--- a/drivers/usb/serial/keyspan_pda.c
++++ b/drivers/usb/serial/keyspan_pda.c
+@@ -735,29 +735,33 @@ MODULE_FIRMWARE("keyspan_pda/keyspan_pda.fw");
+ MODULE_FIRMWARE("keyspan_pda/xircom_pgs.fw");
+ #endif
+ 
+-static int keyspan_pda_startup(struct usb_serial *serial)
++static int keyspan_pda_port_probe(struct usb_serial_port *port)
+ {
+ 
+ 	struct keyspan_pda_private *priv;
+ 
+-	/* allocate the private data structures for all ports. Well, for all
+-	   one ports. */
+-
+ 	priv = kmalloc(sizeof(struct keyspan_pda_private), GFP_KERNEL);
+ 	if (!priv)
+-		return 1; /* error */
+-	usb_set_serial_port_data(serial->port[0], priv);
+-	init_waitqueue_head(&serial->port[0]->write_wait);
++		return -ENOMEM;
++
+ 	INIT_WORK(&priv->wakeup_work, keyspan_pda_wakeup_write);
+ 	INIT_WORK(&priv->unthrottle_work, keyspan_pda_request_unthrottle);
+-	priv->serial = serial;
+-	priv->port = serial->port[0];
++	priv->serial = port->serial;
++	priv->port = port;
++
++	usb_set_serial_port_data(port, priv);
++
+ 	return 0;
+ }
+ 
+-static void keyspan_pda_release(struct usb_serial *serial)
++static int keyspan_pda_port_remove(struct usb_serial_port *port)
+ {
+-	kfree(usb_get_serial_port_data(serial->port[0]));
++	struct keyspan_pda_private *priv;
++
++	priv = usb_get_serial_port_data(port);
++	kfree(priv);
++
++	return 0;
+ }
+ 
+ #ifdef KEYSPAN
+@@ -808,8 +812,8 @@ static struct usb_serial_driver keyspan_pda_device = {
+ 	.break_ctl =		keyspan_pda_break_ctl,
+ 	.tiocmget =		keyspan_pda_tiocmget,
+ 	.tiocmset =		keyspan_pda_tiocmset,
+-	.attach =		keyspan_pda_startup,
+-	.release =		keyspan_pda_release,
++	.port_probe =		keyspan_pda_port_probe,
++	.port_remove =		keyspan_pda_port_remove,
+ };
+ 
+ static struct usb_serial_driver * const serial_drivers[] = {
+diff --git a/drivers/usb/serial/kl5kusb105.c b/drivers/usb/serial/kl5kusb105.c
+index 5bed59c..366ddee 100644
+--- a/drivers/usb/serial/kl5kusb105.c
++++ b/drivers/usb/serial/kl5kusb105.c
+@@ -62,8 +62,8 @@ static bool debug;
+ /*
+  * Function prototypes
+  */
+-static int  klsi_105_startup(struct usb_serial *serial);
+-static void klsi_105_release(struct usb_serial *serial);
++static int klsi_105_port_probe(struct usb_serial_port *port);
++static int klsi_105_port_remove(struct usb_serial_port *port);
+ static int  klsi_105_open(struct tty_struct *tty, struct usb_serial_port *port);
+ static void klsi_105_close(struct usb_serial_port *port);
+ static void klsi_105_set_termios(struct tty_struct *tty,
+@@ -101,8 +101,8 @@ static struct usb_serial_driver kl5kusb105d_device = {
+ 	/*.break_ctl =		klsi_105_break_ctl,*/
+ 	.tiocmget =		klsi_105_tiocmget,
+ 	.tiocmset =		klsi_105_tiocmset,
+-	.attach =		klsi_105_startup,
+-	.release =		klsi_105_release,
++	.port_probe =		klsi_105_port_probe,
++	.port_remove =		klsi_105_port_remove,
+ 	.throttle =		usb_serial_generic_throttle,
+ 	.unthrottle =		usb_serial_generic_unthrottle,
+ 	.process_read_urb =	klsi_105_process_read_urb,
+@@ -225,58 +225,40 @@ static int klsi_105_get_line_state(struct usb_serial_port *port,
+  * Driver's tty interface functions
+  */
+ 
+-static int klsi_105_startup(struct usb_serial *serial)
++static int klsi_105_port_probe(struct usb_serial_port *port)
+ {
+ 	struct klsi_105_private *priv;
+-	int i;
+ 
+-	/* check if we support the product id (see keyspan.c)
+-	 * FIXME
+-	 */
++	priv = kmalloc(sizeof(*priv), GFP_KERNEL);
++	if (!priv)
++		return -ENOMEM;
+ 
+-	/* allocate the private data structure */
+-	for (i = 0; i < serial->num_ports; i++) {
+-		priv = kmalloc(sizeof(struct klsi_105_private),
+-						   GFP_KERNEL);
+-		if (!priv) {
+-			dbg("%skmalloc for klsi_105_private failed.", __func__);
+-			i--;
+-			goto err_cleanup;
+-		}
+-		/* set initial values for control structures */
+-		priv->cfg.pktlen    = 5;
+-		priv->cfg.baudrate  = kl5kusb105a_sio_b9600;
+-		priv->cfg.databits  = kl5kusb105a_dtb_8;
+-		priv->cfg.unknown1  = 0;
+-		priv->cfg.unknown2  = 1;
++	/* set initial values for control structures */
++	priv->cfg.pktlen    = 5;
++	priv->cfg.baudrate  = kl5kusb105a_sio_b9600;
++	priv->cfg.databits  = kl5kusb105a_dtb_8;
++	priv->cfg.unknown1  = 0;
++	priv->cfg.unknown2  = 1;
+ 
+-		priv->line_state    = 0;
++	priv->line_state    = 0;
+ 
+-		usb_set_serial_port_data(serial->port[i], priv);
++	spin_lock_init(&priv->lock);
+ 
+-		spin_lock_init(&priv->lock);
++	/* priv->termios is left uninitialized until port opening */
+ 
+-		/* priv->termios is left uninitialized until port opening */
+-		init_waitqueue_head(&serial->port[i]->write_wait);
+-	}
++	usb_set_serial_port_data(port, priv);
+ 
+ 	return 0;
+-
+-err_cleanup:
+-	for (; i >= 0; i--) {
+-		priv = usb_get_serial_port_data(serial->port[i]);
+-		kfree(priv);
+-		usb_set_serial_port_data(serial->port[i], NULL);
+-	}
+-	return -ENOMEM;
+ }
+ 
+-static void klsi_105_release(struct usb_serial *serial)
++static int klsi_105_port_remove(struct usb_serial_port *port)
+ {
+-	int i;
++	struct klsi_105_private *priv;
+ 
+-	for (i = 0; i < serial->num_ports; ++i)
+-		kfree(usb_get_serial_port_data(serial->port[i]));
++	priv = usb_get_serial_port_data(port);
++	kfree(priv);
++
++	return 0;
+ }
+ 
+ static int  klsi_105_open(struct tty_struct *tty, struct usb_serial_port *port)
+diff --git a/drivers/usb/serial/kobil_sct.c b/drivers/usb/serial/kobil_sct.c
+index fafeabb..a579fdc 100644
+--- a/drivers/usb/serial/kobil_sct.c
++++ b/drivers/usb/serial/kobil_sct.c
+@@ -56,8 +56,8 @@ static bool debug;
+ 
+ 
+ /* Function prototypes */
+-static int  kobil_startup(struct usb_serial *serial);
+-static void kobil_release(struct usb_serial *serial);
++static int kobil_port_probe(struct usb_serial_port *probe);
++static int kobil_port_remove(struct usb_serial_port *probe);
+ static int  kobil_open(struct tty_struct *tty, struct usb_serial_port *port);
+ static void kobil_close(struct usb_serial_port *port);
+ static int  kobil_write(struct tty_struct *tty, struct usb_serial_port *port,
+@@ -91,8 +91,8 @@ static struct usb_serial_driver kobil_device = {
+ 	.description =		"KOBIL USB smart card terminal",
+ 	.id_table =		id_table,
+ 	.num_ports =		1,
+-	.attach =		kobil_startup,
+-	.release =		kobil_release,
++	.port_probe =		kobil_port_probe,
++	.port_remove =		kobil_port_remove,
+ 	.ioctl =		kobil_ioctl,
+ 	.set_termios =		kobil_set_termios,
+ 	.init_termios =		kobil_init_termios,
+@@ -119,9 +119,10 @@ struct kobil_private {
+ };
+ 
+ 
+-static int kobil_startup(struct usb_serial *serial)
++static int kobil_port_probe(struct usb_serial_port *port)
+ {
+ 	int i;
++	struct usb_serial *serial = port->serial;
+ 	struct kobil_private *priv;
+ 	struct usb_device *pdev;
+ 	struct usb_host_config *actconfig;
+@@ -152,7 +153,7 @@ static int kobil_startup(struct usb_serial *serial)
+ 		printk(KERN_DEBUG "KOBIL KAAN SIM detected\n");
+ 		break;
+ 	}
+-	usb_set_serial_port_data(serial->port[0], priv);
++	usb_set_serial_port_data(port, priv);
+ 
+ 	/* search for the necessary endpoints */
+ 	pdev = serial->dev;
+@@ -180,12 +181,14 @@ static int kobil_startup(struct usb_serial *serial)
+ }
+ 
+ 
+-static void kobil_release(struct usb_serial *serial)
++static int kobil_port_remove(struct usb_serial_port *port)
+ {
+-	int i;
++	struct kobil_private *priv;
+ 
+-	for (i = 0; i < serial->num_ports; ++i)
+-		kfree(usb_get_serial_port_data(serial->port[i]));
++	priv = usb_get_serial_port_data(port);
++	kfree(priv);
++
++	return 0;
+ }
+ 
+ static void kobil_init_termios(struct tty_struct *tty)
+diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
+index 5f30800..a0542ca 100644
+--- a/drivers/usb/serial/option.c
++++ b/drivers/usb/serial/option.c
+@@ -503,11 +503,19 @@ static const struct option_blacklist_info net_intf5_blacklist = {
+ 	.reserved = BIT(5),
+ };
+ 
++static const struct option_blacklist_info net_intf6_blacklist = {
++	.reserved = BIT(6),
++};
++
+ static const struct option_blacklist_info zte_mf626_blacklist = {
+ 	.sendsetup = BIT(0) | BIT(1),
+ 	.reserved = BIT(4),
+ };
+ 
++static const struct option_blacklist_info zte_1255_blacklist = {
++	.reserved = BIT(3) | BIT(4),
++};
++
+ static const struct usb_device_id option_ids[] = {
+ 	{ USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_COLT) },
+ 	{ USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_RICOLA) },
+@@ -853,13 +861,19 @@ static const struct usb_device_id option_ids[] = {
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0113, 0xff, 0xff, 0xff),
+ 		.driver_info = (kernel_ulong_t)&net_intf5_blacklist },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0117, 0xff, 0xff, 0xff) },
+-	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0118, 0xff, 0xff, 0xff) },
+-	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0121, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0118, 0xff, 0xff, 0xff),
++		.driver_info = (kernel_ulong_t)&net_intf5_blacklist },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0121, 0xff, 0xff, 0xff),
++		.driver_info = (kernel_ulong_t)&net_intf5_blacklist },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0122, 0xff, 0xff, 0xff) },
+-	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0123, 0xff, 0xff, 0xff) },
+-	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0124, 0xff, 0xff, 0xff) },
+-	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0125, 0xff, 0xff, 0xff) },
+-	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0126, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0123, 0xff, 0xff, 0xff),
++		.driver_info = (kernel_ulong_t)&net_intf4_blacklist },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0124, 0xff, 0xff, 0xff),
++		.driver_info = (kernel_ulong_t)&net_intf5_blacklist },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0125, 0xff, 0xff, 0xff),
++		.driver_info = (kernel_ulong_t)&net_intf6_blacklist },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0126, 0xff, 0xff, 0xff),
++		.driver_info = (kernel_ulong_t)&net_intf5_blacklist },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0128, 0xff, 0xff, 0xff) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0142, 0xff, 0xff, 0xff) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0143, 0xff, 0xff, 0xff) },
+@@ -872,7 +886,8 @@ static const struct usb_device_id option_ids[] = {
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0156, 0xff, 0xff, 0xff) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0157, 0xff, 0xff, 0xff),
+ 	  .driver_info = (kernel_ulong_t)&net_intf5_blacklist },
+-	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0158, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0158, 0xff, 0xff, 0xff),
++	  .driver_info = (kernel_ulong_t)&net_intf3_blacklist },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0159, 0xff, 0xff, 0xff) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0161, 0xff, 0xff, 0xff) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0162, 0xff, 0xff, 0xff) },
+@@ -880,13 +895,22 @@ static const struct usb_device_id option_ids[] = {
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0165, 0xff, 0xff, 0xff) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0167, 0xff, 0xff, 0xff),
+ 	  .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0191, 0xff, 0xff, 0xff), /* ZTE EuFi890 */
++	  .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0199, 0xff, 0xff, 0xff), /* ZTE MF820S */
++	  .driver_info = (kernel_ulong_t)&net_intf1_blacklist },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0257, 0xff, 0xff, 0xff), /* ZTE MF821 */
++	  .driver_info = (kernel_ulong_t)&net_intf3_blacklist },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0326, 0xff, 0xff, 0xff),
+ 	  .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1008, 0xff, 0xff, 0xff),
+ 	  .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1010, 0xff, 0xff, 0xff),
+ 	  .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+-	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1012, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1012, 0xff, 0xff, 0xff),
++	  .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1021, 0xff, 0xff, 0xff),
++	  .driver_info = (kernel_ulong_t)&net_intf2_blacklist },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1057, 0xff, 0xff, 0xff) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1058, 0xff, 0xff, 0xff) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1059, 0xff, 0xff, 0xff) },
+@@ -1002,18 +1026,24 @@ static const struct usb_device_id option_ids[] = {
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1169, 0xff, 0xff, 0xff) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1170, 0xff, 0xff, 0xff) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1244, 0xff, 0xff, 0xff) },
+-	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1245, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1245, 0xff, 0xff, 0xff),
++	  .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1246, 0xff, 0xff, 0xff) },
+-	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1247, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1247, 0xff, 0xff, 0xff),
++	  .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1248, 0xff, 0xff, 0xff) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1249, 0xff, 0xff, 0xff) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1250, 0xff, 0xff, 0xff) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1251, 0xff, 0xff, 0xff) },
+-	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1252, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1252, 0xff, 0xff, 0xff),
++	  .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1253, 0xff, 0xff, 0xff) },
+-	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1254, 0xff, 0xff, 0xff) },
+-	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1255, 0xff, 0xff, 0xff) },
+-	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1256, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1254, 0xff, 0xff, 0xff),
++	  .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1255, 0xff, 0xff, 0xff),
++	  .driver_info = (kernel_ulong_t)&zte_1255_blacklist },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1256, 0xff, 0xff, 0xff),
++	  .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1257, 0xff, 0xff, 0xff) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1258, 0xff, 0xff, 0xff) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1259, 0xff, 0xff, 0xff) },
+@@ -1058,8 +1088,16 @@ static const struct usb_device_id option_ids[] = {
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1298, 0xff, 0xff, 0xff) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1299, 0xff, 0xff, 0xff) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1300, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1401, 0xff, 0xff, 0xff),
++		.driver_info = (kernel_ulong_t)&net_intf2_blacklist },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1402, 0xff, 0xff, 0xff),
+ 		.driver_info = (kernel_ulong_t)&net_intf2_blacklist },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1424, 0xff, 0xff, 0xff),
++		.driver_info = (kernel_ulong_t)&net_intf2_blacklist },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1425, 0xff, 0xff, 0xff),
++		.driver_info = (kernel_ulong_t)&net_intf2_blacklist },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1426, 0xff, 0xff, 0xff),  /* ZTE MF91 */
++		.driver_info = (kernel_ulong_t)&net_intf2_blacklist },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x2002, 0xff,
+ 	  0xff, 0xff), .driver_info = (kernel_ulong_t)&zte_k3765_z_blacklist },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x2003, 0xff, 0xff, 0xff) },
+@@ -1071,15 +1109,21 @@ static const struct usb_device_id option_ids[] = {
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0070, 0xff, 0xff, 0xff) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0073, 0xff, 0xff, 0xff) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0094, 0xff, 0xff, 0xff) },
+-	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0130, 0xff, 0xff, 0xff) },
+-	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0133, 0xff, 0xff, 0xff) },
+-	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0141, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0130, 0xff, 0xff, 0xff),
++		.driver_info = (kernel_ulong_t)&net_intf1_blacklist },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0133, 0xff, 0xff, 0xff),
++		.driver_info = (kernel_ulong_t)&net_intf3_blacklist },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0141, 0xff, 0xff, 0xff),
++		.driver_info = (kernel_ulong_t)&net_intf5_blacklist },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0147, 0xff, 0xff, 0xff) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0152, 0xff, 0xff, 0xff) },
+-	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0168, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0168, 0xff, 0xff, 0xff),
++		.driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0170, 0xff, 0xff, 0xff) },
+-	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0176, 0xff, 0xff, 0xff) },
+-	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0178, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0176, 0xff, 0xff, 0xff),
++		.driver_info = (kernel_ulong_t)&net_intf3_blacklist },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0178, 0xff, 0xff, 0xff),
++		.driver_info = (kernel_ulong_t)&net_intf3_blacklist },
+ 
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_CDMA_TECH, 0xff, 0xff, 0xff) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_AC8710, 0xff, 0xff, 0xff) },
+diff --git a/drivers/usb/serial/oti6858.c b/drivers/usb/serial/oti6858.c
+index 5976b65..3aa582e 100644
+--- a/drivers/usb/serial/oti6858.c
++++ b/drivers/usb/serial/oti6858.c
+@@ -139,8 +139,8 @@ static int oti6858_chars_in_buffer(struct tty_struct *tty);
+ static int oti6858_tiocmget(struct tty_struct *tty);
+ static int oti6858_tiocmset(struct tty_struct *tty,
+ 				unsigned int set, unsigned int clear);
+-static int oti6858_startup(struct usb_serial *serial);
+-static void oti6858_release(struct usb_serial *serial);
++static int oti6858_port_probe(struct usb_serial_port *port);
++static int oti6858_port_remove(struct usb_serial_port *port);
+ 
+ /* device info */
+ static struct usb_serial_driver oti6858_device = {
+@@ -163,8 +163,8 @@ static struct usb_serial_driver oti6858_device = {
+ 	.write_bulk_callback =	oti6858_write_bulk_callback,
+ 	.write_room =		oti6858_write_room,
+ 	.chars_in_buffer =	oti6858_chars_in_buffer,
+-	.attach =		oti6858_startup,
+-	.release =		oti6858_release,
++	.port_probe =		oti6858_port_probe,
++	.port_remove =		oti6858_port_remove,
+ };
+ 
+ static struct usb_serial_driver * const serial_drivers[] = {
+@@ -333,36 +333,33 @@ static void send_data(struct work_struct *work)
+ 	usb_serial_port_softint(port);
+ }
+ 
+-static int oti6858_startup(struct usb_serial *serial)
++static int oti6858_port_probe(struct usb_serial_port *port)
+ {
+-	struct usb_serial_port *port = serial->port[0];
+ 	struct oti6858_private *priv;
+-	int i;
+-
+-	for (i = 0; i < serial->num_ports; ++i) {
+-		priv = kzalloc(sizeof(struct oti6858_private), GFP_KERNEL);
+-		if (!priv)
+-			break;
+-
+-		spin_lock_init(&priv->lock);
+-		init_waitqueue_head(&priv->intr_wait);
+-/*		INIT_WORK(&priv->setup_work, setup_line, serial->port[i]); */
+-/*		INIT_WORK(&priv->write_work, send_data, serial->port[i]); */
+-		priv->port = port;
+-		INIT_DELAYED_WORK(&priv->delayed_setup_work, setup_line);
+-		INIT_DELAYED_WORK(&priv->delayed_write_work, send_data);
+-
+-		usb_set_serial_port_data(serial->port[i], priv);
+-	}
+-	if (i == serial->num_ports)
+-		return 0;
+ 
+-	for (--i; i >= 0; --i) {
+-		priv = usb_get_serial_port_data(serial->port[i]);
+-		kfree(priv);
+-		usb_set_serial_port_data(serial->port[i], NULL);
+-	}
+-	return -ENOMEM;
++	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
++	if (!priv)
++		return -ENOMEM;
++
++	spin_lock_init(&priv->lock);
++	init_waitqueue_head(&priv->intr_wait);
++	priv->port = port;
++	INIT_DELAYED_WORK(&priv->delayed_setup_work, setup_line);
++	INIT_DELAYED_WORK(&priv->delayed_write_work, send_data);
++
++	usb_set_serial_port_data(port, priv);
++
++	return 0;
++}
++
++static int oti6858_port_remove(struct usb_serial_port *port)
++{
++	struct oti6858_private *priv;
++
++	priv = usb_get_serial_port_data(port);
++	kfree(priv);
++
++	return 0;
+ }
+ 
+ static int oti6858_write(struct tty_struct *tty, struct usb_serial_port *port,
+@@ -714,15 +711,6 @@ static int oti6858_ioctl(struct tty_struct *tty,
+ 	return -ENOIOCTLCMD;
+ }
+ 
+-
+-static void oti6858_release(struct usb_serial *serial)
+-{
+-	int i;
+-
+-	for (i = 0; i < serial->num_ports; ++i)
+-		kfree(usb_get_serial_port_data(serial->port[i]));
+-}
+-
+ static void oti6858_read_int_callback(struct urb *urb)
+ {
+ 	struct usb_serial_port *port =  urb->context;
+diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c
+index 13b8dd6..4d8d210 100644
+--- a/drivers/usb/serial/pl2303.c
++++ b/drivers/usb/serial/pl2303.c
+@@ -135,12 +135,15 @@ enum pl2303_type {
+ 	HX,		/* HX version of the pl2303 chip */
+ };
+ 
++struct pl2303_serial_private {
++	enum pl2303_type type;
++};
++
+ struct pl2303_private {
+ 	spinlock_t lock;
+ 	wait_queue_head_t delta_msr_wait;
+ 	u8 line_control;
+ 	u8 line_status;
+-	enum pl2303_type type;
+ };
+ 
+ static int pl2303_vendor_read(__u16 value, __u16 index,
+@@ -169,14 +172,19 @@ static int pl2303_vendor_write(__u16 value, __u16 index,
+ 
+ static int pl2303_startup(struct usb_serial *serial)
+ {
+-	struct pl2303_private *priv;
++	struct pl2303_serial_private *spriv;
+ 	enum pl2303_type type = type_0;
+ 	unsigned char *buf;
+-	int i;
++
++	spriv = kzalloc(sizeof(*spriv), GFP_KERNEL);
++	if (!spriv)
++		return -ENOMEM;
+ 
+ 	buf = kmalloc(10, GFP_KERNEL);
+-	if (buf == NULL)
++	if (!buf) {
++		kfree(spriv);
+ 		return -ENOMEM;
++	}
+ 
+ 	if (serial->dev->descriptor.bDeviceClass == 0x02)
+ 		type = type_0;
+@@ -188,15 +196,8 @@ static int pl2303_startup(struct usb_serial *serial)
+ 		type = type_1;
+ 	dev_dbg(&serial->interface->dev, "device type: %d\n", type);
+ 
+-	for (i = 0; i < serial->num_ports; ++i) {
+-		priv = kzalloc(sizeof(struct pl2303_private), GFP_KERNEL);
+-		if (!priv)
+-			goto cleanup;
+-		spin_lock_init(&priv->lock);
+-		init_waitqueue_head(&priv->delta_msr_wait);
+-		priv->type = type;
+-		usb_set_serial_port_data(serial->port[i], priv);
+-	}
++	spriv->type = type;
++	usb_set_serial_data(serial, spriv);
+ 
+ 	pl2303_vendor_read(0x8484, 0, serial, buf);
+ 	pl2303_vendor_write(0x0404, 0, serial);
+@@ -215,15 +216,40 @@ static int pl2303_startup(struct usb_serial *serial)
+ 
+ 	kfree(buf);
+ 	return 0;
++}
+ 
+-cleanup:
+-	kfree(buf);
+-	for (--i; i >= 0; --i) {
+-		priv = usb_get_serial_port_data(serial->port[i]);
+-		kfree(priv);
+-		usb_set_serial_port_data(serial->port[i], NULL);
+-	}
+-	return -ENOMEM;
++static void pl2303_release(struct usb_serial *serial)
++{
++	struct pl2303_serial_private *spriv;
++
++	spriv = usb_get_serial_data(serial);
++	kfree(spriv);
++}
++
++static int pl2303_port_probe(struct usb_serial_port *port)
++{
++	struct pl2303_private *priv;
++
++	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
++	if (!priv)
++		return -ENOMEM;
++
++	spin_lock_init(&priv->lock);
++	init_waitqueue_head(&priv->delta_msr_wait);
++
++	usb_set_serial_port_data(port, priv);
++
++	return 0;
++}
++
++static int pl2303_port_remove(struct usb_serial_port *port)
++{
++	struct pl2303_private *priv;
++
++	priv = usb_get_serial_port_data(port);
++	kfree(priv);
++
++	return 0;
+ }
+ 
+ static int set_control_lines(struct usb_device *dev, u8 value)
+@@ -242,6 +268,7 @@ static void pl2303_set_termios(struct tty_struct *tty,
+ 		struct usb_serial_port *port, struct ktermios *old_termios)
+ {
+ 	struct usb_serial *serial = port->serial;
++	struct pl2303_serial_private *spriv = usb_get_serial_data(serial);
+ 	struct pl2303_private *priv = usb_get_serial_port_data(port);
+ 	unsigned long flags;
+ 	unsigned int cflag;
+@@ -325,7 +352,7 @@ static void pl2303_set_termios(struct tty_struct *tty,
+ 		}
+ 		if (baud > 1228800) {
+ 			/* type_0, type_1 only support up to 1228800 baud */
+-			if (priv->type != HX)
++			if (spriv->type != HX)
+ 				baud = 1228800;
+ 			else if (baud > 6000000)
+ 				baud = 6000000;
+@@ -428,7 +455,7 @@ static void pl2303_set_termios(struct tty_struct *tty,
+ 	     buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6]);
+ 
+ 	if (cflag & CRTSCTS) {
+-		if (priv->type == HX)
++		if (spriv->type == HX)
+ 			pl2303_vendor_write(0x0, 0x61, serial);
+ 		else
+ 			pl2303_vendor_write(0x0, 0x41, serial);
+@@ -470,10 +497,10 @@ static int pl2303_open(struct tty_struct *tty, struct usb_serial_port *port)
+ {
+ 	struct ktermios tmp_termios;
+ 	struct usb_serial *serial = port->serial;
+-	struct pl2303_private *priv = usb_get_serial_port_data(port);
++	struct pl2303_serial_private *spriv = usb_get_serial_data(serial);
+ 	int result;
+ 
+-	if (priv->type != HX) {
++	if (spriv->type != HX) {
+ 		usb_clear_halt(serial->dev, port->write_urb->pipe);
+ 		usb_clear_halt(serial->dev, port->read_urb->pipe);
+ 	} else {
+@@ -657,17 +684,6 @@ static void pl2303_break_ctl(struct tty_struct *tty, int break_state)
+ 		dev_err(&port->dev, "error sending break = %d\n", result);
+ }
+ 
+-static void pl2303_release(struct usb_serial *serial)
+-{
+-	int i;
+-	struct pl2303_private *priv;
+-
+-	for (i = 0; i < serial->num_ports; ++i) {
+-		priv = usb_get_serial_port_data(serial->port[i]);
+-		kfree(priv);
+-	}
+-}
+-
+ static void pl2303_update_line_status(struct usb_serial_port *port,
+ 				      unsigned char *data,
+ 				      unsigned int actual_length)
+@@ -829,6 +845,8 @@ static struct usb_serial_driver pl2303_device = {
+ 	.read_int_callback =	pl2303_read_int_callback,
+ 	.attach =		pl2303_startup,
+ 	.release =		pl2303_release,
++	.port_probe =		pl2303_port_probe,
++	.port_remove =		pl2303_port_remove,
+ };
+ 
+ static struct usb_serial_driver * const serial_drivers[] = {
+diff --git a/drivers/usb/serial/spcp8x5.c b/drivers/usb/serial/spcp8x5.c
+index cad6089..2451c08 100644
+--- a/drivers/usb/serial/spcp8x5.c
++++ b/drivers/usb/serial/spcp8x5.c
+@@ -159,13 +159,10 @@ struct spcp8x5_private {
+ 	u8 			line_status;
+ };
+ 
+-/* desc : when device plug in,this function would be called.
+- * thanks to usb_serial subsystem,then do almost every things for us. And what
+- * we should do just alloc the buffer */
+-static int spcp8x5_startup(struct usb_serial *serial)
++static int spcp8x5_port_probe(struct usb_serial_port *port)
+ {
++	struct usb_serial *serial = port->serial;
+ 	struct spcp8x5_private *priv;
+-	int i;
+ 	enum spcp8x5_type type = SPCP825_007_TYPE;
+ 	u16 product = le16_to_cpu(serial->dev->descriptor.idProduct);
+ 
+@@ -182,34 +179,27 @@ static int spcp8x5_startup(struct usb_serial *serial)
+ 		type = SPCP825_PHILIP_TYPE;
+ 	dev_dbg(&serial->dev->dev, "device type = %d\n", (int)type);
+ 
+-	for (i = 0; i < serial->num_ports; ++i) {
+-		priv = kzalloc(sizeof(struct spcp8x5_private), GFP_KERNEL);
+-		if (!priv)
+-			goto cleanup;
++	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
++	if (!priv)
++		return -ENOMEM;
+ 
+-		spin_lock_init(&priv->lock);
+-		init_waitqueue_head(&priv->delta_msr_wait);
+-		priv->type = type;
+-		usb_set_serial_port_data(serial->port[i] , priv);
+-	}
++	spin_lock_init(&priv->lock);
++	init_waitqueue_head(&priv->delta_msr_wait);
++	priv->type = type;
++
++	usb_set_serial_port_data(port , priv);
+ 
+ 	return 0;
+-cleanup:
+-	for (--i; i >= 0; --i) {
+-		priv = usb_get_serial_port_data(serial->port[i]);
+-		kfree(priv);
+-		usb_set_serial_port_data(serial->port[i] , NULL);
+-	}
+-	return -ENOMEM;
+ }
+ 
+-/* call when the device plug out. free all the memory alloced by probe */
+-static void spcp8x5_release(struct usb_serial *serial)
++static int spcp8x5_port_remove(struct usb_serial_port *port)
+ {
+-	int i;
++	struct spcp8x5_private *priv;
+ 
+-	for (i = 0; i < serial->num_ports; i++)
+-		kfree(usb_get_serial_port_data(serial->port[i]));
++	priv = usb_get_serial_port_data(port);
++	kfree(priv);
++
++	return 0;
+ }
+ 
+ /* set the modem control line of the device.
+@@ -651,8 +641,8 @@ static struct usb_serial_driver spcp8x5_device = {
+ 	.ioctl 			= spcp8x5_ioctl,
+ 	.tiocmget 		= spcp8x5_tiocmget,
+ 	.tiocmset 		= spcp8x5_tiocmset,
+-	.attach 		= spcp8x5_startup,
+-	.release 		= spcp8x5_release,
++	.port_probe		= spcp8x5_port_probe,
++	.port_remove		= spcp8x5_port_remove,
+ 	.process_read_urb	= spcp8x5_process_read_urb,
+ };
+ 
+diff --git a/drivers/usb/serial/ssu100.c b/drivers/usb/serial/ssu100.c
+index 3fee23b..fe3a8a0 100644
+--- a/drivers/usb/serial/ssu100.c
++++ b/drivers/usb/serial/ssu100.c
+@@ -69,13 +69,6 @@ struct ssu100_port_private {
+ 	struct async_icount icount;
+ };
+ 
+-static void ssu100_release(struct usb_serial *serial)
+-{
+-	struct ssu100_port_private *priv = usb_get_serial_port_data(*serial->port);
+-
+-	kfree(priv);
+-}
+-
+ static inline int ssu100_control_msg(struct usb_device *dev,
+ 				     u8 request, u16 data, u16 index)
+ {
+@@ -444,21 +437,33 @@ static int ssu100_ioctl(struct tty_struct *tty,
+ 
+ static int ssu100_attach(struct usb_serial *serial)
+ {
++	return ssu100_initdevice(serial->dev);
++}
++
++static int ssu100_port_probe(struct usb_serial_port *port)
++{
+ 	struct ssu100_port_private *priv;
+-	struct usb_serial_port *port = *serial->port;
+ 
+ 	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+-	if (!priv) {
+-		dev_err(&port->dev, "%s- kmalloc(%Zd) failed.\n", __func__,
+-			sizeof(*priv));
++	if (!priv)
+ 		return -ENOMEM;
+-	}
+ 
+ 	spin_lock_init(&priv->status_lock);
+ 	init_waitqueue_head(&priv->delta_msr_wait);
++
+ 	usb_set_serial_port_data(port, priv);
+ 
+-	return ssu100_initdevice(serial->dev);
++	return 0;
++}
++
++static int ssu100_port_remove(struct usb_serial_port *port)
++{
++	struct ssu100_port_private *priv;
++
++	priv = usb_get_serial_port_data(port);
++	kfree(priv);
++
++	return 0;
+ }
+ 
+ static int ssu100_tiocmget(struct tty_struct *tty)
+@@ -649,7 +654,8 @@ static struct usb_serial_driver ssu100_device = {
+ 	.open		     = ssu100_open,
+ 	.close		     = ssu100_close,
+ 	.attach              = ssu100_attach,
+-	.release             = ssu100_release,
++	.port_probe          = ssu100_port_probe,
++	.port_remove         = ssu100_port_remove,
+ 	.dtr_rts             = ssu100_dtr_rts,
+ 	.process_read_urb    = ssu100_process_read_urb,
+ 	.tiocmget            = ssu100_tiocmget,
+diff --git a/drivers/usb/serial/ti_usb_3410_5052.c b/drivers/usb/serial/ti_usb_3410_5052.c
+index a4404f5..33537bb 100644
+--- a/drivers/usb/serial/ti_usb_3410_5052.c
++++ b/drivers/usb/serial/ti_usb_3410_5052.c
+@@ -98,6 +98,8 @@ struct ti_device {
+ 
+ static int ti_startup(struct usb_serial *serial);
+ static void ti_release(struct usb_serial *serial);
++static int ti_port_probe(struct usb_serial_port *port);
++static int ti_port_remove(struct usb_serial_port *port);
+ static int ti_open(struct tty_struct *tty, struct usb_serial_port *port);
+ static void ti_close(struct usb_serial_port *port);
+ static int ti_write(struct tty_struct *tty, struct usb_serial_port *port,
+@@ -223,6 +225,8 @@ static struct usb_serial_driver ti_1port_device = {
+ 	.num_ports		= 1,
+ 	.attach			= ti_startup,
+ 	.release		= ti_release,
++	.port_probe		= ti_port_probe,
++	.port_remove		= ti_port_remove,
+ 	.open			= ti_open,
+ 	.close			= ti_close,
+ 	.write			= ti_write,
+@@ -251,6 +255,8 @@ static struct usb_serial_driver ti_2port_device = {
+ 	.num_ports		= 2,
+ 	.attach			= ti_startup,
+ 	.release		= ti_release,
++	.port_probe		= ti_port_probe,
++	.port_remove		= ti_port_remove,
+ 	.open			= ti_open,
+ 	.close			= ti_close,
+ 	.write			= ti_write,
+@@ -358,11 +364,8 @@ module_exit(ti_exit);
+ static int ti_startup(struct usb_serial *serial)
+ {
+ 	struct ti_device *tdev;
+-	struct ti_port *tport;
+ 	struct usb_device *dev = serial->dev;
+ 	int status;
+-	int i;
+-
+ 
+ 	dbg("%s - product 0x%4X, num configurations %d, configuration value %d",
+ 	    __func__, le16_to_cpu(dev->descriptor.idProduct),
+@@ -409,42 +412,8 @@ static int ti_startup(struct usb_serial *serial)
+ 		goto free_tdev;
+ 	}
+ 
+-	/* set up port structures */
+-	for (i = 0; i < serial->num_ports; ++i) {
+-		tport = kzalloc(sizeof(struct ti_port), GFP_KERNEL);
+-		if (tport == NULL) {
+-			dev_err(&dev->dev, "%s - out of memory\n", __func__);
+-			status = -ENOMEM;
+-			goto free_tports;
+-		}
+-		spin_lock_init(&tport->tp_lock);
+-		tport->tp_uart_base_addr = (i == 0 ?
+-				TI_UART1_BASE_ADDR : TI_UART2_BASE_ADDR);
+-		tport->tp_closing_wait = closing_wait;
+-		init_waitqueue_head(&tport->tp_msr_wait);
+-		init_waitqueue_head(&tport->tp_write_wait);
+-		if (kfifo_alloc(&tport->write_fifo, TI_WRITE_BUF_SIZE,
+-								GFP_KERNEL)) {
+-			dev_err(&dev->dev, "%s - out of memory\n", __func__);
+-			kfree(tport);
+-			status = -ENOMEM;
+-			goto free_tports;
+-		}
+-		tport->tp_port = serial->port[i];
+-		tport->tp_tdev = tdev;
+-		usb_set_serial_port_data(serial->port[i], tport);
+-		tport->tp_uart_mode = 0;	/* default is RS232 */
+-	}
+-
+ 	return 0;
+ 
+-free_tports:
+-	for (--i; i >= 0; --i) {
+-		tport = usb_get_serial_port_data(serial->port[i]);
+-		kfifo_free(&tport->write_fifo);
+-		kfree(tport);
+-		usb_set_serial_port_data(serial->port[i], NULL);
+-	}
+ free_tdev:
+ 	kfree(tdev);
+ 	usb_set_serial_data(serial, NULL);
+@@ -454,21 +423,50 @@ free_tdev:
+ 
+ static void ti_release(struct usb_serial *serial)
+ {
+-	int i;
+ 	struct ti_device *tdev = usb_get_serial_data(serial);
++
++	kfree(tdev);
++}
++
++static int ti_port_probe(struct usb_serial_port *port)
++{
+ 	struct ti_port *tport;
+ 
+-	for (i = 0; i < serial->num_ports; ++i) {
+-		tport = usb_get_serial_port_data(serial->port[i]);
+-		if (tport) {
+-			kfifo_free(&tport->write_fifo);
+-			kfree(tport);
+-		}
++	tport = kzalloc(sizeof(*tport), GFP_KERNEL);
++	if (!tport)
++		return -ENOMEM;
++
++	spin_lock_init(&tport->tp_lock);
++	if (port == port->serial->port[0])
++		tport->tp_uart_base_addr = TI_UART1_BASE_ADDR;
++	else
++		tport->tp_uart_base_addr = TI_UART2_BASE_ADDR;
++	tport->tp_closing_wait = closing_wait;
++	init_waitqueue_head(&tport->tp_msr_wait);
++	init_waitqueue_head(&tport->tp_write_wait);
++	if (kfifo_alloc(&tport->write_fifo, TI_WRITE_BUF_SIZE, GFP_KERNEL)) {
++		kfree(tport);
++		return -ENOMEM;
+ 	}
++	tport->tp_port = port;
++	tport->tp_tdev = usb_get_serial_data(port->serial);
++	tport->tp_uart_mode = 0;	/* default is RS232 */
+ 
+-	kfree(tdev);
++	usb_set_serial_port_data(port, tport);
++
++	return 0;
+ }
+ 
++static int ti_port_remove(struct usb_serial_port *port)
++{
++	struct ti_port *tport;
++
++	tport = usb_get_serial_port_data(port);
++	kfifo_free(&tport->write_fifo);
++	kfree(tport);
++
++	return 0;
++}
+ 
+ static int ti_open(struct tty_struct *tty, struct usb_serial_port *port)
+ {
+diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c
+index 1b50890..cf18217 100644
+--- a/fs/ext4/balloc.c
++++ b/fs/ext4/balloc.c
+@@ -174,8 +174,7 @@ void ext4_init_block_bitmap(struct super_block *sb, struct buffer_head *bh,
+ 		ext4_free_inodes_set(sb, gdp, 0);
+ 		ext4_itable_unused_set(sb, gdp, 0);
+ 		memset(bh->b_data, 0xff, sb->s_blocksize);
+-		ext4_block_bitmap_csum_set(sb, block_group, gdp, bh,
+-					   EXT4_BLOCKS_PER_GROUP(sb) / 8);
++		ext4_block_bitmap_csum_set(sb, block_group, gdp, bh);
+ 		return;
+ 	}
+ 	memset(bh->b_data, 0, sb->s_blocksize);
+@@ -212,8 +211,7 @@ void ext4_init_block_bitmap(struct super_block *sb, struct buffer_head *bh,
+ 	 */
+ 	ext4_mark_bitmap_end(num_clusters_in_group(sb, block_group),
+ 			     sb->s_blocksize * 8, bh->b_data);
+-	ext4_block_bitmap_csum_set(sb, block_group, gdp, bh,
+-				   EXT4_BLOCKS_PER_GROUP(sb) / 8);
++	ext4_block_bitmap_csum_set(sb, block_group, gdp, bh);
+ 	ext4_group_desc_csum_set(sb, block_group, gdp);
+ }
+ 
+@@ -350,7 +348,7 @@ void ext4_validate_block_bitmap(struct super_block *sb,
+ 		return;
+ 	}
+ 	if (unlikely(!ext4_block_bitmap_csum_verify(sb, block_group,
+-			desc, bh, EXT4_BLOCKS_PER_GROUP(sb) / 8))) {
++			desc, bh))) {
+ 		ext4_unlock_group(sb, block_group);
+ 		ext4_error(sb, "bg %u: bad block bitmap checksum", block_group);
+ 		return;
+diff --git a/fs/ext4/bitmap.c b/fs/ext4/bitmap.c
+index 5c2d181..3285aa5 100644
+--- a/fs/ext4/bitmap.c
++++ b/fs/ext4/bitmap.c
+@@ -58,11 +58,12 @@ void ext4_inode_bitmap_csum_set(struct super_block *sb, ext4_group_t group,
+ 
+ int ext4_block_bitmap_csum_verify(struct super_block *sb, ext4_group_t group,
+ 				  struct ext4_group_desc *gdp,
+-				  struct buffer_head *bh, int sz)
++				  struct buffer_head *bh)
+ {
+ 	__u32 hi;
+ 	__u32 provided, calculated;
+ 	struct ext4_sb_info *sbi = EXT4_SB(sb);
++	int sz = EXT4_CLUSTERS_PER_GROUP(sb) / 8;
+ 
+ 	if (!EXT4_HAS_RO_COMPAT_FEATURE(sb,
+ 					EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
+@@ -84,8 +85,9 @@ int ext4_block_bitmap_csum_verify(struct super_block *sb, ext4_group_t group,
+ 
+ void ext4_block_bitmap_csum_set(struct super_block *sb, ext4_group_t group,
+ 				struct ext4_group_desc *gdp,
+-				struct buffer_head *bh, int sz)
++				struct buffer_head *bh)
+ {
++	int sz = EXT4_CLUSTERS_PER_GROUP(sb) / 8;
+ 	__u32 csum;
+ 	struct ext4_sb_info *sbi = EXT4_SB(sb);
+ 
+diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
+index c3411d4..5c69f2b 100644
+--- a/fs/ext4/ext4.h
++++ b/fs/ext4/ext4.h
+@@ -1867,10 +1867,10 @@ int ext4_inode_bitmap_csum_verify(struct super_block *sb, ext4_group_t group,
+ 				  struct buffer_head *bh, int sz);
+ void ext4_block_bitmap_csum_set(struct super_block *sb, ext4_group_t group,
+ 				struct ext4_group_desc *gdp,
+-				struct buffer_head *bh, int sz);
++				struct buffer_head *bh);
+ int ext4_block_bitmap_csum_verify(struct super_block *sb, ext4_group_t group,
+ 				  struct ext4_group_desc *gdp,
+-				  struct buffer_head *bh, int sz);
++				  struct buffer_head *bh);
+ 
+ /* balloc.c */
+ extern void ext4_validate_block_bitmap(struct super_block *sb,
+diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
+index aabbb3f..741bb94 100644
+--- a/fs/ext4/extents.c
++++ b/fs/ext4/extents.c
+@@ -52,6 +52,9 @@
+ #define EXT4_EXT_MARK_UNINIT1	0x2  /* mark first half uninitialized */
+ #define EXT4_EXT_MARK_UNINIT2	0x4  /* mark second half uninitialized */
+ 
++#define EXT4_EXT_DATA_VALID1	0x8  /* first half contains valid data */
++#define EXT4_EXT_DATA_VALID2	0x10 /* second half contains valid data */
++
+ static __le32 ext4_extent_block_csum(struct inode *inode,
+ 				     struct ext4_extent_header *eh)
+ {
+@@ -2895,6 +2898,9 @@ static int ext4_split_extent_at(handle_t *handle,
+ 	unsigned int ee_len, depth;
+ 	int err = 0;
+ 
++	BUG_ON((split_flag & (EXT4_EXT_DATA_VALID1 | EXT4_EXT_DATA_VALID2)) ==
++	       (EXT4_EXT_DATA_VALID1 | EXT4_EXT_DATA_VALID2));
++
+ 	ext_debug("ext4_split_extents_at: inode %lu, logical"
+ 		"block %llu\n", inode->i_ino, (unsigned long long)split);
+ 
+@@ -2953,7 +2959,14 @@ static int ext4_split_extent_at(handle_t *handle,
+ 
+ 	err = ext4_ext_insert_extent(handle, inode, path, &newex, flags);
+ 	if (err == -ENOSPC && (EXT4_EXT_MAY_ZEROOUT & split_flag)) {
+-		err = ext4_ext_zeroout(inode, &orig_ex);
++		if (split_flag & (EXT4_EXT_DATA_VALID1|EXT4_EXT_DATA_VALID2)) {
++			if (split_flag & EXT4_EXT_DATA_VALID1)
++				err = ext4_ext_zeroout(inode, ex2);
++			else
++				err = ext4_ext_zeroout(inode, ex);
++		} else
++			err = ext4_ext_zeroout(inode, &orig_ex);
++
+ 		if (err)
+ 			goto fix_extent_len;
+ 		/* update the extent length and mark as initialized */
+@@ -3006,12 +3019,13 @@ static int ext4_split_extent(handle_t *handle,
+ 	uninitialized = ext4_ext_is_uninitialized(ex);
+ 
+ 	if (map->m_lblk + map->m_len < ee_block + ee_len) {
+-		split_flag1 = split_flag & EXT4_EXT_MAY_ZEROOUT ?
+-			      EXT4_EXT_MAY_ZEROOUT : 0;
++		split_flag1 = split_flag & EXT4_EXT_MAY_ZEROOUT;
+ 		flags1 = flags | EXT4_GET_BLOCKS_PRE_IO;
+ 		if (uninitialized)
+ 			split_flag1 |= EXT4_EXT_MARK_UNINIT1 |
+ 				       EXT4_EXT_MARK_UNINIT2;
++		if (split_flag & EXT4_EXT_DATA_VALID2)
++			split_flag1 |= EXT4_EXT_DATA_VALID1;
+ 		err = ext4_split_extent_at(handle, inode, path,
+ 				map->m_lblk + map->m_len, split_flag1, flags1);
+ 		if (err)
+@@ -3024,8 +3038,8 @@ static int ext4_split_extent(handle_t *handle,
+ 		return PTR_ERR(path);
+ 
+ 	if (map->m_lblk >= ee_block) {
+-		split_flag1 = split_flag & EXT4_EXT_MAY_ZEROOUT ?
+-			      EXT4_EXT_MAY_ZEROOUT : 0;
++		split_flag1 = split_flag & (EXT4_EXT_MAY_ZEROOUT |
++					    EXT4_EXT_DATA_VALID2);
+ 		if (uninitialized)
+ 			split_flag1 |= EXT4_EXT_MARK_UNINIT1;
+ 		if (split_flag & EXT4_EXT_MARK_UNINIT2)
+@@ -3303,26 +3317,47 @@ static int ext4_split_unwritten_extents(handle_t *handle,
+ 
+ 	split_flag |= ee_block + ee_len <= eof_block ? EXT4_EXT_MAY_ZEROOUT : 0;
+ 	split_flag |= EXT4_EXT_MARK_UNINIT2;
+-
++	if (flags & EXT4_GET_BLOCKS_CONVERT)
++		split_flag |= EXT4_EXT_DATA_VALID2;
+ 	flags |= EXT4_GET_BLOCKS_PRE_IO;
+ 	return ext4_split_extent(handle, inode, path, map, split_flag, flags);
+ }
+ 
+ static int ext4_convert_unwritten_extents_endio(handle_t *handle,
+-					      struct inode *inode,
+-					      struct ext4_ext_path *path)
++						struct inode *inode,
++						struct ext4_map_blocks *map,
++						struct ext4_ext_path *path)
+ {
+ 	struct ext4_extent *ex;
++	ext4_lblk_t ee_block;
++	unsigned int ee_len;
+ 	int depth;
+ 	int err = 0;
+ 
+ 	depth = ext_depth(inode);
+ 	ex = path[depth].p_ext;
++	ee_block = le32_to_cpu(ex->ee_block);
++	ee_len = ext4_ext_get_actual_len(ex);
+ 
+ 	ext_debug("ext4_convert_unwritten_extents_endio: inode %lu, logical"
+ 		"block %llu, max_blocks %u\n", inode->i_ino,
+-		(unsigned long long)le32_to_cpu(ex->ee_block),
+-		ext4_ext_get_actual_len(ex));
++		  (unsigned long long)ee_block, ee_len);
++
++	/* If extent is larger than requested then split is required */
++	if (ee_block != map->m_lblk || ee_len > map->m_len) {
++		err = ext4_split_unwritten_extents(handle, inode, map, path,
++						   EXT4_GET_BLOCKS_CONVERT);
++		if (err < 0)
++			goto out;
++		ext4_ext_drop_refs(path);
++		path = ext4_ext_find_extent(inode, map->m_lblk, path);
++		if (IS_ERR(path)) {
++			err = PTR_ERR(path);
++			goto out;
++		}
++		depth = ext_depth(inode);
++		ex = path[depth].p_ext;
++	}
+ 
+ 	err = ext4_ext_get_access(handle, inode, path + depth);
+ 	if (err)
+@@ -3630,7 +3665,7 @@ ext4_ext_handle_uninitialized_extents(handle_t *handle, struct inode *inode,
+ 	}
+ 	/* IO end_io complete, convert the filled extent to written */
+ 	if ((flags & EXT4_GET_BLOCKS_CONVERT)) {
+-		ret = ext4_convert_unwritten_extents_endio(handle, inode,
++		ret = ext4_convert_unwritten_extents_endio(handle, inode, map,
+ 							path);
+ 		if (ret >= 0) {
+ 			ext4_update_inode_fsync_trans(handle, inode, 1);
+diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c
+index 26154b8..8ce0076 100644
+--- a/fs/ext4/ialloc.c
++++ b/fs/ext4/ialloc.c
+@@ -753,9 +753,7 @@ got:
+ 			ext4_free_group_clusters_set(sb, gdp,
+ 				ext4_free_clusters_after_init(sb, group, gdp));
+ 			ext4_block_bitmap_csum_set(sb, group, gdp,
+-						   block_bitmap_bh,
+-						   EXT4_BLOCKS_PER_GROUP(sb) /
+-						   8);
++						   block_bitmap_bh);
+ 			ext4_group_desc_csum_set(sb, group, gdp);
+ 		}
+ 		ext4_unlock_group(sb, group);
+diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
+index 8eae947..b26410c 100644
+--- a/fs/ext4/mballoc.c
++++ b/fs/ext4/mballoc.c
+@@ -2804,8 +2804,7 @@ ext4_mb_mark_diskspace_used(struct ext4_allocation_context *ac,
+ 	}
+ 	len = ext4_free_group_clusters(sb, gdp) - ac->ac_b_ex.fe_len;
+ 	ext4_free_group_clusters_set(sb, gdp, len);
+-	ext4_block_bitmap_csum_set(sb, ac->ac_b_ex.fe_group, gdp, bitmap_bh,
+-				   EXT4_BLOCKS_PER_GROUP(sb) / 8);
++	ext4_block_bitmap_csum_set(sb, ac->ac_b_ex.fe_group, gdp, bitmap_bh);
+ 	ext4_group_desc_csum_set(sb, ac->ac_b_ex.fe_group, gdp);
+ 
+ 	ext4_unlock_group(sb, ac->ac_b_ex.fe_group);
+@@ -4664,8 +4663,7 @@ do_more:
+ 
+ 	ret = ext4_free_group_clusters(sb, gdp) + count_clusters;
+ 	ext4_free_group_clusters_set(sb, gdp, ret);
+-	ext4_block_bitmap_csum_set(sb, block_group, gdp, bitmap_bh,
+-				   EXT4_BLOCKS_PER_GROUP(sb) / 8);
++	ext4_block_bitmap_csum_set(sb, block_group, gdp, bitmap_bh);
+ 	ext4_group_desc_csum_set(sb, block_group, gdp);
+ 	ext4_unlock_group(sb, block_group);
+ 	percpu_counter_add(&sbi->s_freeclusters_counter, count_clusters);
+@@ -4809,8 +4807,7 @@ int ext4_group_add_blocks(handle_t *handle, struct super_block *sb,
+ 	mb_free_blocks(NULL, &e4b, bit, count);
+ 	blk_free_count = blocks_freed + ext4_free_group_clusters(sb, desc);
+ 	ext4_free_group_clusters_set(sb, desc, blk_free_count);
+-	ext4_block_bitmap_csum_set(sb, block_group, desc, bitmap_bh,
+-				   EXT4_BLOCKS_PER_GROUP(sb) / 8);
++	ext4_block_bitmap_csum_set(sb, block_group, desc, bitmap_bh);
+ 	ext4_group_desc_csum_set(sb, block_group, desc);
+ 	ext4_unlock_group(sb, block_group);
+ 	percpu_counter_add(&sbi->s_freeclusters_counter,
+@@ -4990,8 +4987,9 @@ int ext4_trim_fs(struct super_block *sb, struct fstrim_range *range)
+ 	end = start + (range->len >> sb->s_blocksize_bits) - 1;
+ 	minlen = range->minlen >> sb->s_blocksize_bits;
+ 
+-	if (unlikely(minlen > EXT4_CLUSTERS_PER_GROUP(sb)) ||
+-	    unlikely(start >= max_blks))
++	if (minlen > EXT4_CLUSTERS_PER_GROUP(sb) ||
++	    start >= max_blks ||
++	    range->len < sb->s_blocksize)
+ 		return -EINVAL;
+ 	if (end >= max_blks)
+ 		end = max_blks - 1;
+diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c
+index 0be1789..71241bc 100644
+--- a/fs/ext4/resize.c
++++ b/fs/ext4/resize.c
+@@ -1121,8 +1121,7 @@ static int ext4_set_bitmap_checksums(struct super_block *sb,
+ 	bh = ext4_get_bitmap(sb, group_data->block_bitmap);
+ 	if (!bh)
+ 		return -EIO;
+-	ext4_block_bitmap_csum_set(sb, group, gdp, bh,
+-				   EXT4_BLOCKS_PER_GROUP(sb) / 8);
++	ext4_block_bitmap_csum_set(sb, group, gdp, bh);
+ 	brelse(bh);
+ 
+ 	return 0;
+diff --git a/fs/lockd/clntxdr.c b/fs/lockd/clntxdr.c
+index d269ada..982d267 100644
+--- a/fs/lockd/clntxdr.c
++++ b/fs/lockd/clntxdr.c
+@@ -223,7 +223,7 @@ static void encode_nlm_stat(struct xdr_stream *xdr,
+ {
+ 	__be32 *p;
+ 
+-	BUG_ON(be32_to_cpu(stat) > NLM_LCK_DENIED_GRACE_PERIOD);
++	WARN_ON_ONCE(be32_to_cpu(stat) > NLM_LCK_DENIED_GRACE_PERIOD);
+ 	p = xdr_reserve_space(xdr, 4);
+ 	*p = stat;
+ }
+diff --git a/fs/lockd/svcproc.c b/fs/lockd/svcproc.c
+index 3009a36..21171f0 100644
+--- a/fs/lockd/svcproc.c
++++ b/fs/lockd/svcproc.c
+@@ -68,7 +68,8 @@ nlmsvc_retrieve_args(struct svc_rqst *rqstp, struct nlm_args *argp,
+ 
+ 	/* Obtain file pointer. Not used by FREE_ALL call. */
+ 	if (filp != NULL) {
+-		if ((error = nlm_lookup_file(rqstp, &file, &lock->fh)) != 0)
++		error = cast_status(nlm_lookup_file(rqstp, &file, &lock->fh));
++		if (error != 0)
+ 			goto no_locks;
+ 		*filp = file;
+ 
+diff --git a/fs/proc/stat.c b/fs/proc/stat.c
+index 64c3b31..e296572 100644
+--- a/fs/proc/stat.c
++++ b/fs/proc/stat.c
+@@ -45,10 +45,13 @@ static cputime64_t get_iowait_time(int cpu)
+ 
+ static u64 get_idle_time(int cpu)
+ {
+-	u64 idle, idle_time = get_cpu_idle_time_us(cpu, NULL);
++	u64 idle, idle_time = -1ULL;
++
++	if (cpu_online(cpu))
++		idle_time = get_cpu_idle_time_us(cpu, NULL);
+ 
+ 	if (idle_time == -1ULL)
+-		/* !NO_HZ so we can rely on cpustat.idle */
++		/* !NO_HZ or cpu offline so we can rely on cpustat.idle */
+ 		idle = kcpustat_cpu(cpu).cpustat[CPUTIME_IDLE];
+ 	else
+ 		idle = usecs_to_cputime64(idle_time);
+@@ -58,10 +61,13 @@ static u64 get_idle_time(int cpu)
+ 
+ static u64 get_iowait_time(int cpu)
+ {
+-	u64 iowait, iowait_time = get_cpu_iowait_time_us(cpu, NULL);
++	u64 iowait, iowait_time = -1ULL;
++
++	if (cpu_online(cpu))
++		iowait_time = get_cpu_iowait_time_us(cpu, NULL);
+ 
+ 	if (iowait_time == -1ULL)
+-		/* !NO_HZ so we can rely on cpustat.iowait */
++		/* !NO_HZ or cpu offline so we can rely on cpustat.iowait */
+ 		iowait = kcpustat_cpu(cpu).cpustat[CPUTIME_IOWAIT];
+ 	else
+ 		iowait = usecs_to_cputime64(iowait_time);
+diff --git a/include/linux/if_vlan.h b/include/linux/if_vlan.h
+index a810987..561e130 100644
+--- a/include/linux/if_vlan.h
++++ b/include/linux/if_vlan.h
+@@ -82,6 +82,8 @@ static inline int is_vlan_dev(struct net_device *dev)
+ }
+ 
+ #define vlan_tx_tag_present(__skb)	((__skb)->vlan_tci & VLAN_TAG_PRESENT)
++#define vlan_tx_nonzero_tag_present(__skb) \
++	(vlan_tx_tag_present(__skb) && ((__skb)->vlan_tci & VLAN_VID_MASK))
+ #define vlan_tx_tag_get(__skb)		((__skb)->vlan_tci & ~VLAN_TAG_PRESENT)
+ 
+ #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
+@@ -91,7 +93,7 @@ extern struct net_device *__vlan_find_dev_deep(struct net_device *real_dev,
+ extern struct net_device *vlan_dev_real_dev(const struct net_device *dev);
+ extern u16 vlan_dev_vlan_id(const struct net_device *dev);
+ 
+-extern bool vlan_do_receive(struct sk_buff **skb, bool last_handler);
++extern bool vlan_do_receive(struct sk_buff **skb);
+ extern struct sk_buff *vlan_untag(struct sk_buff *skb);
+ 
+ extern int vlan_vid_add(struct net_device *dev, unsigned short vid);
+@@ -120,10 +122,8 @@ static inline u16 vlan_dev_vlan_id(const struct net_device *dev)
+ 	return 0;
+ }
+ 
+-static inline bool vlan_do_receive(struct sk_buff **skb, bool last_handler)
++static inline bool vlan_do_receive(struct sk_buff **skb)
+ {
+-	if (((*skb)->vlan_tci & VLAN_VID_MASK) && last_handler)
+-		(*skb)->pkt_type = PACKET_OTHERHOST;
+ 	return false;
+ }
+ 
+diff --git a/include/linux/netlink.h b/include/linux/netlink.h
+index f74dd13..e893736 100644
+--- a/include/linux/netlink.h
++++ b/include/linux/netlink.h
+@@ -153,6 +153,7 @@ struct nlattr {
+ 
+ #include <linux/capability.h>
+ #include <linux/skbuff.h>
++#include <linux/export.h>
+ 
+ struct net;
+ 
+@@ -232,6 +233,8 @@ struct netlink_callback {
+ 					struct netlink_callback *cb);
+ 	int			(*done)(struct netlink_callback *cb);
+ 	void			*data;
++	/* the module that dump function belong to */
++	struct module		*module;
+ 	u16			family;
+ 	u16			min_dump_alloc;
+ 	unsigned int		prev_seq, seq;
+@@ -249,14 +252,24 @@ __nlmsg_put(struct sk_buff *skb, u32 pid, u32 seq, int type, int len, int flags)
+ 
+ struct netlink_dump_control {
+ 	int (*dump)(struct sk_buff *skb, struct netlink_callback *);
+-	int (*done)(struct netlink_callback*);
++	int (*done)(struct netlink_callback *);
+ 	void *data;
++	struct module *module;
+ 	u16 min_dump_alloc;
+ };
+ 
+-extern int netlink_dump_start(struct sock *ssk, struct sk_buff *skb,
+-			      const struct nlmsghdr *nlh,
+-			      struct netlink_dump_control *control);
++extern int __netlink_dump_start(struct sock *ssk, struct sk_buff *skb,
++				const struct nlmsghdr *nlh,
++				struct netlink_dump_control *control);
++static inline int netlink_dump_start(struct sock *ssk, struct sk_buff *skb,
++				     const struct nlmsghdr *nlh,
++				     struct netlink_dump_control *control)
++{
++	if (!control->module)
++		control->module = THIS_MODULE;
++
++	return __netlink_dump_start(ssk, skb, nlh, control);
++}
+ 
+ 
+ #define NL_NONROOT_RECV 0x1
+diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
+index 7632c87..f3165d2 100644
+--- a/include/linux/skbuff.h
++++ b/include/linux/skbuff.h
+@@ -589,9 +589,6 @@ static inline struct sk_buff *alloc_skb_fclone(unsigned int size,
+ 	return __alloc_skb(size, priority, SKB_ALLOC_FCLONE, NUMA_NO_NODE);
+ }
+ 
+-extern void skb_recycle(struct sk_buff *skb);
+-extern bool skb_recycle_check(struct sk_buff *skb, int skb_size);
+-
+ extern struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src);
+ extern int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask);
+ extern struct sk_buff *skb_clone(struct sk_buff *skb,
+@@ -2642,27 +2639,6 @@ static inline void skb_checksum_none_assert(const struct sk_buff *skb)
+ 
+ bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off);
+ 
+-static inline bool skb_is_recycleable(const struct sk_buff *skb, int skb_size)
+-{
+-	if (irqs_disabled())
+-		return false;
+-
+-	if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY)
+-		return false;
+-
+-	if (skb_is_nonlinear(skb) || skb->fclone != SKB_FCLONE_UNAVAILABLE)
+-		return false;
+-
+-	skb_size = SKB_DATA_ALIGN(skb_size + NET_SKB_PAD);
+-	if (skb_end_offset(skb) < skb_size)
+-		return false;
+-
+-	if (skb_shared(skb) || skb_cloned(skb))
+-		return false;
+-
+-	return true;
+-}
+-
+ /**
+  * skb_head_is_locked - Determine if the skb->head is locked down
+  * @skb: skb to check
+diff --git a/include/net/flow.h b/include/net/flow.h
+index e1dd508..628e11b 100644
+--- a/include/net/flow.h
++++ b/include/net/flow.h
+@@ -21,6 +21,7 @@ struct flowi_common {
+ 	__u8	flowic_flags;
+ #define FLOWI_FLAG_ANYSRC		0x01
+ #define FLOWI_FLAG_CAN_SLEEP		0x02
++#define FLOWI_FLAG_KNOWN_NH		0x04
+ 	__u32	flowic_secid;
+ };
+ 
+diff --git a/include/net/route.h b/include/net/route.h
+index da22243..bc40b63 100644
+--- a/include/net/route.h
++++ b/include/net/route.h
+@@ -48,7 +48,8 @@ struct rtable {
+ 	int			rt_genid;
+ 	unsigned int		rt_flags;
+ 	__u16			rt_type;
+-	__u16			rt_is_input;
++	__u8			rt_is_input;
++	__u8			rt_uses_gateway;
+ 
+ 	int			rt_iif;
+ 
+diff --git a/include/rdma/rdma_netlink.h b/include/rdma/rdma_netlink.h
+index 3c5363a..bd3d8b2 100644
+--- a/include/rdma/rdma_netlink.h
++++ b/include/rdma/rdma_netlink.h
+@@ -39,6 +39,7 @@ struct rdma_cm_id_stats {
+ 
+ struct ibnl_client_cbs {
+ 	int (*dump)(struct sk_buff *skb, struct netlink_callback *nlcb);
++	struct module *module;
+ };
+ 
+ int ibnl_init(void);
+diff --git a/kernel/cgroup.c b/kernel/cgroup.c
+index 7981850..ff2bce5 100644
+--- a/kernel/cgroup.c
++++ b/kernel/cgroup.c
+@@ -1923,9 +1923,8 @@ static void cgroup_task_migrate(struct cgroup *cgrp, struct cgroup *oldcgrp,
+ 	 * trading it for newcg is protected by cgroup_mutex, we're safe to drop
+ 	 * it here; it will be freed under RCU.
+ 	 */
+-	put_css_set(oldcg);
+-
+ 	set_bit(CGRP_RELEASABLE, &oldcgrp->flags);
++	put_css_set(oldcg);
+ }
+ 
+ /**
+@@ -4695,31 +4694,20 @@ static const struct file_operations proc_cgroupstats_operations = {
+  *
+  * A pointer to the shared css_set was automatically copied in
+  * fork.c by dup_task_struct().  However, we ignore that copy, since
+- * it was not made under the protection of RCU, cgroup_mutex or
+- * threadgroup_change_begin(), so it might no longer be a valid
+- * cgroup pointer.  cgroup_attach_task() might have already changed
+- * current->cgroups, allowing the previously referenced cgroup
+- * group to be removed and freed.
+- *
+- * Outside the pointer validity we also need to process the css_set
+- * inheritance between threadgoup_change_begin() and
+- * threadgoup_change_end(), this way there is no leak in any process
+- * wide migration performed by cgroup_attach_proc() that could otherwise
+- * miss a thread because it is too early or too late in the fork stage.
++ * it was not made under the protection of RCU or cgroup_mutex, so
++ * might no longer be a valid cgroup pointer.  cgroup_attach_task() might
++ * have already changed current->cgroups, allowing the previously
++ * referenced cgroup group to be removed and freed.
+  *
+  * At the point that cgroup_fork() is called, 'current' is the parent
+  * task, and the passed argument 'child' points to the child task.
+  */
+ void cgroup_fork(struct task_struct *child)
+ {
+-	/*
+-	 * We don't need to task_lock() current because current->cgroups
+-	 * can't be changed concurrently here. The parent obviously hasn't
+-	 * exited and called cgroup_exit(), and we are synchronized against
+-	 * cgroup migration through threadgroup_change_begin().
+-	 */
++	task_lock(current);
+ 	child->cgroups = current->cgroups;
+ 	get_css_set(child->cgroups);
++	task_unlock(current);
+ 	INIT_LIST_HEAD(&child->cg_list);
+ }
+ 
+@@ -4772,19 +4760,10 @@ void cgroup_post_fork(struct task_struct *child)
+ 	 */
+ 	if (use_task_css_set_links) {
+ 		write_lock(&css_set_lock);
+-		if (list_empty(&child->cg_list)) {
+-			/*
+-			 * It's safe to use child->cgroups without task_lock()
+-			 * here because we are protected through
+-			 * threadgroup_change_begin() against concurrent
+-			 * css_set change in cgroup_task_migrate(). Also
+-			 * the task can't exit at that point until
+-			 * wake_up_new_task() is called, so we are protected
+-			 * against cgroup_exit() setting child->cgroup to
+-			 * init_css_set.
+-			 */
++		task_lock(child);
++		if (list_empty(&child->cg_list))
+ 			list_add(&child->cg_list, &child->cgroups->tasks);
+-		}
++		task_unlock(child);
+ 		write_unlock(&css_set_lock);
+ 	}
+ }
+diff --git a/kernel/sys.c b/kernel/sys.c
+index 6fab59a..909148a 100644
+--- a/kernel/sys.c
++++ b/kernel/sys.c
+@@ -1265,15 +1265,16 @@ DECLARE_RWSEM(uts_sem);
+  * Work around broken programs that cannot handle "Linux 3.0".
+  * Instead we map 3.x to 2.6.40+x, so e.g. 3.0 would be 2.6.40
+  */
+-static int override_release(char __user *release, int len)
++static int override_release(char __user *release, size_t len)
+ {
+ 	int ret = 0;
+-	char buf[65];
+ 
+ 	if (current->personality & UNAME26) {
+-		char *rest = UTS_RELEASE;
++		const char *rest = UTS_RELEASE;
++		char buf[65] = { 0 };
+ 		int ndots = 0;
+ 		unsigned v;
++		size_t copy;
+ 
+ 		while (*rest) {
+ 			if (*rest == '.' && ++ndots >= 3)
+@@ -1283,8 +1284,9 @@ static int override_release(char __user *release, int len)
+ 			rest++;
+ 		}
+ 		v = ((LINUX_VERSION_CODE >> 8) & 0xff) + 40;
+-		snprintf(buf, len, "2.6.%u%s", v, rest);
+-		ret = copy_to_user(release, buf, len);
++		copy = clamp_t(size_t, len, 1, sizeof(buf));
++		copy = scnprintf(buf, copy, "2.6.%u%s", v, rest);
++		ret = copy_to_user(release, buf, copy + 1);
+ 	}
+ 	return ret;
+ }
+diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
+index 49491fa..ebd284f 100644
+--- a/kernel/trace/ring_buffer.c
++++ b/kernel/trace/ring_buffer.c
+@@ -1567,6 +1567,10 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size,
+ 
+ 		put_online_cpus();
+ 	} else {
++		/* Make sure this CPU has been intitialized */
++		if (!cpumask_test_cpu(cpu_id, buffer->cpumask))
++			goto out;
++
+ 		cpu_buffer = buffer->buffers[cpu_id];
+ 
+ 		if (nr_pages == cpu_buffer->nr_pages)
+diff --git a/net/8021q/vlan_core.c b/net/8021q/vlan_core.c
+index 830059d..fe29a64 100644
+--- a/net/8021q/vlan_core.c
++++ b/net/8021q/vlan_core.c
+@@ -5,7 +5,7 @@
+ #include <linux/export.h>
+ #include "vlan.h"
+ 
+-bool vlan_do_receive(struct sk_buff **skbp, bool last_handler)
++bool vlan_do_receive(struct sk_buff **skbp)
+ {
+ 	struct sk_buff *skb = *skbp;
+ 	u16 vlan_id = skb->vlan_tci & VLAN_VID_MASK;
+@@ -13,14 +13,8 @@ bool vlan_do_receive(struct sk_buff **skbp, bool last_handler)
+ 	struct vlan_pcpu_stats *rx_stats;
+ 
+ 	vlan_dev = vlan_find_dev(skb->dev, vlan_id);
+-	if (!vlan_dev) {
+-		/* Only the last call to vlan_do_receive() should change
+-		 * pkt_type to PACKET_OTHERHOST
+-		 */
+-		if (vlan_id && last_handler)
+-			skb->pkt_type = PACKET_OTHERHOST;
++	if (!vlan_dev)
+ 		return false;
+-	}
+ 
+ 	skb = *skbp = skb_share_check(skb, GFP_ATOMIC);
+ 	if (unlikely(!skb))
+diff --git a/net/core/dev.c b/net/core/dev.c
+index 89e33a5..2fb9f59 100644
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -3275,18 +3275,18 @@ ncls:
+ 				&& !skb_pfmemalloc_protocol(skb))
+ 		goto drop;
+ 
+-	rx_handler = rcu_dereference(skb->dev->rx_handler);
+ 	if (vlan_tx_tag_present(skb)) {
+ 		if (pt_prev) {
+ 			ret = deliver_skb(skb, pt_prev, orig_dev);
+ 			pt_prev = NULL;
+ 		}
+-		if (vlan_do_receive(&skb, !rx_handler))
++		if (vlan_do_receive(&skb))
+ 			goto another_round;
+ 		else if (unlikely(!skb))
+ 			goto unlock;
+ 	}
+ 
++	rx_handler = rcu_dereference(skb->dev->rx_handler);
+ 	if (rx_handler) {
+ 		if (pt_prev) {
+ 			ret = deliver_skb(skb, pt_prev, orig_dev);
+@@ -3306,6 +3306,9 @@ ncls:
+ 		}
+ 	}
+ 
++	if (vlan_tx_nonzero_tag_present(skb))
++		skb->pkt_type = PACKET_OTHERHOST;
++
+ 	/* deliver only exact match when indicated */
+ 	null_or_dev = deliver_exact ? skb->dev : NULL;
+ 
+diff --git a/net/core/neighbour.c b/net/core/neighbour.c
+index 117afaf..058bb1e 100644
+--- a/net/core/neighbour.c
++++ b/net/core/neighbour.c
+@@ -1301,8 +1301,6 @@ int neigh_resolve_output(struct neighbour *neigh, struct sk_buff *skb)
+ 	if (!dst)
+ 		goto discard;
+ 
+-	__skb_pull(skb, skb_network_offset(skb));
+-
+ 	if (!neigh_event_send(neigh, skb)) {
+ 		int err;
+ 		struct net_device *dev = neigh->dev;
+@@ -1312,6 +1310,7 @@ int neigh_resolve_output(struct neighbour *neigh, struct sk_buff *skb)
+ 			neigh_hh_init(neigh, dst);
+ 
+ 		do {
++			__skb_pull(skb, skb_network_offset(skb));
+ 			seq = read_seqbegin(&neigh->ha_lock);
+ 			err = dev_hard_header(skb, dev, ntohs(skb->protocol),
+ 					      neigh->ha, NULL, skb->len);
+@@ -1342,9 +1341,8 @@ int neigh_connected_output(struct neighbour *neigh, struct sk_buff *skb)
+ 	unsigned int seq;
+ 	int err;
+ 
+-	__skb_pull(skb, skb_network_offset(skb));
+-
+ 	do {
++		__skb_pull(skb, skb_network_offset(skb));
+ 		seq = read_seqbegin(&neigh->ha_lock);
+ 		err = dev_hard_header(skb, dev, ntohs(skb->protocol),
+ 				      neigh->ha, NULL, skb->len);
+diff --git a/net/core/skbuff.c b/net/core/skbuff.c
+index e33ebae..ef172af 100644
+--- a/net/core/skbuff.c
++++ b/net/core/skbuff.c
+@@ -641,53 +641,6 @@ void consume_skb(struct sk_buff *skb)
+ }
+ EXPORT_SYMBOL(consume_skb);
+ 
+-/**
+- * 	skb_recycle - clean up an skb for reuse
+- * 	@skb: buffer
+- *
+- * 	Recycles the skb to be reused as a receive buffer. This
+- * 	function does any necessary reference count dropping, and
+- * 	cleans up the skbuff as if it just came from __alloc_skb().
+- */
+-void skb_recycle(struct sk_buff *skb)
+-{
+-	struct skb_shared_info *shinfo;
+-
+-	skb_release_head_state(skb);
+-
+-	shinfo = skb_shinfo(skb);
+-	memset(shinfo, 0, offsetof(struct skb_shared_info, dataref));
+-	atomic_set(&shinfo->dataref, 1);
+-
+-	memset(skb, 0, offsetof(struct sk_buff, tail));
+-	skb->data = skb->head + NET_SKB_PAD;
+-	skb_reset_tail_pointer(skb);
+-}
+-EXPORT_SYMBOL(skb_recycle);
+-
+-/**
+- *	skb_recycle_check - check if skb can be reused for receive
+- *	@skb: buffer
+- *	@skb_size: minimum receive buffer size
+- *
+- *	Checks that the skb passed in is not shared or cloned, and
+- *	that it is linear and its head portion at least as large as
+- *	skb_size so that it can be recycled as a receive buffer.
+- *	If these conditions are met, this function does any necessary
+- *	reference count dropping and cleans up the skbuff as if it
+- *	just came from __alloc_skb().
+- */
+-bool skb_recycle_check(struct sk_buff *skb, int skb_size)
+-{
+-	if (!skb_is_recycleable(skb, skb_size))
+-		return false;
+-
+-	skb_recycle(skb);
+-
+-	return true;
+-}
+-EXPORT_SYMBOL(skb_recycle_check);
+-
+ static void __copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
+ {
+ 	new->tstamp		= old->tstamp;
+diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
+index 8e2b475..f444ac7 100644
+--- a/net/ipv4/fib_frontend.c
++++ b/net/ipv4/fib_frontend.c
+@@ -322,7 +322,8 @@ int fib_validate_source(struct sk_buff *skb, __be32 src, __be32 dst,
+ {
+ 	int r = secpath_exists(skb) ? 0 : IN_DEV_RPFILTER(idev);
+ 
+-	if (!r && !fib_num_tclassid_users(dev_net(dev))) {
++	if (!r && !fib_num_tclassid_users(dev_net(dev)) &&
++	    (dev->ifindex != oif || !IN_DEV_TX_REDIRECTS(idev))) {
+ 		*itag = 0;
+ 		return 0;
+ 	}
+diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
+index a747100..9633661 100644
+--- a/net/ipv4/fib_semantics.c
++++ b/net/ipv4/fib_semantics.c
+@@ -840,6 +840,8 @@ struct fib_info *fib_create_info(struct fib_config *cfg)
+ 	change_nexthops(fi) {
+ 		nexthop_nh->nh_parent = fi;
+ 		nexthop_nh->nh_pcpu_rth_output = alloc_percpu(struct rtable __rcu *);
++		if (!nexthop_nh->nh_pcpu_rth_output)
++			goto failure;
+ 	} endfor_nexthops(fi)
+ 
+ 	if (cfg->fc_mx) {
+diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
+index 7f75f21..0405cc8 100644
+--- a/net/ipv4/inet_connection_sock.c
++++ b/net/ipv4/inet_connection_sock.c
+@@ -386,7 +386,7 @@ struct dst_entry *inet_csk_route_req(struct sock *sk,
+ 	rt = ip_route_output_flow(net, fl4, sk);
+ 	if (IS_ERR(rt))
+ 		goto no_route;
+-	if (opt && opt->opt.is_strictroute && rt->rt_gateway)
++	if (opt && opt->opt.is_strictroute && rt->rt_uses_gateway)
+ 		goto route_err;
+ 	return &rt->dst;
+ 
+@@ -422,7 +422,7 @@ struct dst_entry *inet_csk_route_child_sock(struct sock *sk,
+ 	rt = ip_route_output_flow(net, fl4, sk);
+ 	if (IS_ERR(rt))
+ 		goto no_route;
+-	if (opt && opt->opt.is_strictroute && rt->rt_gateway)
++	if (opt && opt->opt.is_strictroute && rt->rt_uses_gateway)
+ 		goto route_err;
+ 	rcu_read_unlock();
+ 	return &rt->dst;
+diff --git a/net/ipv4/ip_forward.c b/net/ipv4/ip_forward.c
+index ab09b12..694de3b 100644
+--- a/net/ipv4/ip_forward.c
++++ b/net/ipv4/ip_forward.c
+@@ -85,7 +85,7 @@ int ip_forward(struct sk_buff *skb)
+ 
+ 	rt = skb_rtable(skb);
+ 
+-	if (opt->is_strictroute && opt->nexthop != rt->rt_gateway)
++	if (opt->is_strictroute && rt->rt_uses_gateway)
+ 		goto sr_failed;
+ 
+ 	if (unlikely(skb->len > dst_mtu(&rt->dst) && !skb_is_gso(skb) &&
+diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
+index c196d74..94ad7ea 100644
+--- a/net/ipv4/ip_output.c
++++ b/net/ipv4/ip_output.c
+@@ -193,7 +193,7 @@ static inline int ip_finish_output2(struct sk_buff *skb)
+ 	}
+ 
+ 	rcu_read_lock_bh();
+-	nexthop = rt->rt_gateway ? rt->rt_gateway : ip_hdr(skb)->daddr;
++	nexthop = (__force u32) rt_nexthop(rt, ip_hdr(skb)->daddr);
+ 	neigh = __ipv4_neigh_lookup_noref(dev, nexthop);
+ 	if (unlikely(!neigh))
+ 		neigh = __neigh_create(&arp_tbl, &nexthop, dev, false);
+@@ -371,7 +371,7 @@ int ip_queue_xmit(struct sk_buff *skb, struct flowi *fl)
+ 	skb_dst_set_noref(skb, &rt->dst);
+ 
+ packet_routed:
+-	if (inet_opt && inet_opt->opt.is_strictroute && rt->rt_gateway)
++	if (inet_opt && inet_opt->opt.is_strictroute && rt->rt_uses_gateway)
+ 		goto no_route;
+ 
+ 	/* OK, we know where to send it, allocate and build IP header. */
+diff --git a/net/ipv4/route.c b/net/ipv4/route.c
+index fd9af60..2a1383c 100644
+--- a/net/ipv4/route.c
++++ b/net/ipv4/route.c
+@@ -802,7 +802,8 @@ void ip_rt_send_redirect(struct sk_buff *skb)
+ 	net = dev_net(rt->dst.dev);
+ 	peer = inet_getpeer_v4(net->ipv4.peers, ip_hdr(skb)->saddr, 1);
+ 	if (!peer) {
+-		icmp_send(skb, ICMP_REDIRECT, ICMP_REDIR_HOST, rt->rt_gateway);
++		icmp_send(skb, ICMP_REDIRECT, ICMP_REDIR_HOST,
++			  rt_nexthop(rt, ip_hdr(skb)->daddr));
+ 		return;
+ 	}
+ 
+@@ -827,7 +828,9 @@ void ip_rt_send_redirect(struct sk_buff *skb)
+ 	    time_after(jiffies,
+ 		       (peer->rate_last +
+ 			(ip_rt_redirect_load << peer->rate_tokens)))) {
+-		icmp_send(skb, ICMP_REDIRECT, ICMP_REDIR_HOST, rt->rt_gateway);
++		__be32 gw = rt_nexthop(rt, ip_hdr(skb)->daddr);
++
++		icmp_send(skb, ICMP_REDIRECT, ICMP_REDIR_HOST, gw);
+ 		peer->rate_last = jiffies;
+ 		++peer->rate_tokens;
+ #ifdef CONFIG_IP_ROUTE_VERBOSE
+@@ -835,7 +838,7 @@ void ip_rt_send_redirect(struct sk_buff *skb)
+ 		    peer->rate_tokens == ip_rt_redirect_number)
+ 			net_warn_ratelimited("host %pI4/if%d ignores redirects for %pI4 to %pI4\n",
+ 					     &ip_hdr(skb)->saddr, inet_iif(skb),
+-					     &ip_hdr(skb)->daddr, &rt->rt_gateway);
++					     &ip_hdr(skb)->daddr, &gw);
+ #endif
+ 	}
+ out_put_peer:
+@@ -904,22 +907,32 @@ out:	kfree_skb(skb);
+ 	return 0;
+ }
+ 
+-static u32 __ip_rt_update_pmtu(struct rtable *rt, struct flowi4 *fl4, u32 mtu)
++static void __ip_rt_update_pmtu(struct rtable *rt, struct flowi4 *fl4, u32 mtu)
+ {
++	struct dst_entry *dst = &rt->dst;
+ 	struct fib_result res;
+ 
++	if (dst->dev->mtu < mtu)
++		return;
++
+ 	if (mtu < ip_rt_min_pmtu)
+ 		mtu = ip_rt_min_pmtu;
+ 
++	if (!rt->rt_pmtu) {
++		dst->obsolete = DST_OBSOLETE_KILL;
++	} else {
++		rt->rt_pmtu = mtu;
++		dst->expires = max(1UL, jiffies + ip_rt_mtu_expires);
++	}
++
+ 	rcu_read_lock();
+-	if (fib_lookup(dev_net(rt->dst.dev), fl4, &res) == 0) {
++	if (fib_lookup(dev_net(dst->dev), fl4, &res) == 0) {
+ 		struct fib_nh *nh = &FIB_RES_NH(res);
+ 
+ 		update_or_create_fnhe(nh, fl4->daddr, 0, mtu,
+ 				      jiffies + ip_rt_mtu_expires);
+ 	}
+ 	rcu_read_unlock();
+-	return mtu;
+ }
+ 
+ static void ip_rt_update_pmtu(struct dst_entry *dst, struct sock *sk,
+@@ -929,14 +942,7 @@ static void ip_rt_update_pmtu(struct dst_entry *dst, struct sock *sk,
+ 	struct flowi4 fl4;
+ 
+ 	ip_rt_build_flow_key(&fl4, sk, skb);
+-	mtu = __ip_rt_update_pmtu(rt, &fl4, mtu);
+-
+-	if (!rt->rt_pmtu) {
+-		dst->obsolete = DST_OBSOLETE_KILL;
+-	} else {
+-		rt->rt_pmtu = mtu;
+-		rt->dst.expires = max(1UL, jiffies + ip_rt_mtu_expires);
+-	}
++	__ip_rt_update_pmtu(rt, &fl4, mtu);
+ }
+ 
+ void ipv4_update_pmtu(struct sk_buff *skb, struct net *net, u32 mtu,
+@@ -1123,7 +1129,7 @@ static unsigned int ipv4_mtu(const struct dst_entry *dst)
+ 	mtu = dst->dev->mtu;
+ 
+ 	if (unlikely(dst_metric_locked(dst, RTAX_MTU))) {
+-		if (rt->rt_gateway && mtu > 576)
++		if (rt->rt_uses_gateway && mtu > 576)
+ 			mtu = 576;
+ 	}
+ 
+@@ -1174,7 +1180,9 @@ static bool rt_bind_exception(struct rtable *rt, struct fib_nh_exception *fnhe,
+ 		if (fnhe->fnhe_gw) {
+ 			rt->rt_flags |= RTCF_REDIRECTED;
+ 			rt->rt_gateway = fnhe->fnhe_gw;
+-		}
++			rt->rt_uses_gateway = 1;
++		} else if (!rt->rt_gateway)
++			rt->rt_gateway = daddr;
+ 
+ 		orig = rcu_dereference(fnhe->fnhe_rth);
+ 		rcu_assign_pointer(fnhe->fnhe_rth, rt);
+@@ -1183,13 +1191,6 @@ static bool rt_bind_exception(struct rtable *rt, struct fib_nh_exception *fnhe,
+ 
+ 		fnhe->fnhe_stamp = jiffies;
+ 		ret = true;
+-	} else {
+-		/* Routes we intend to cache in nexthop exception have
+-		 * the DST_NOCACHE bit clear.  However, if we are
+-		 * unsuccessful at storing this route into the cache
+-		 * we really need to set it.
+-		 */
+-		rt->dst.flags |= DST_NOCACHE;
+ 	}
+ 	spin_unlock_bh(&fnhe_lock);
+ 
+@@ -1204,8 +1205,6 @@ static bool rt_cache_route(struct fib_nh *nh, struct rtable *rt)
+ 	if (rt_is_input_route(rt)) {
+ 		p = (struct rtable **)&nh->nh_rth_input;
+ 	} else {
+-		if (!nh->nh_pcpu_rth_output)
+-			goto nocache;
+ 		p = (struct rtable **)__this_cpu_ptr(nh->nh_pcpu_rth_output);
+ 	}
+ 	orig = *p;
+@@ -1214,16 +1213,8 @@ static bool rt_cache_route(struct fib_nh *nh, struct rtable *rt)
+ 	if (prev == orig) {
+ 		if (orig)
+ 			rt_free(orig);
+-	} else {
+-		/* Routes we intend to cache in the FIB nexthop have
+-		 * the DST_NOCACHE bit clear.  However, if we are
+-		 * unsuccessful at storing this route into the cache
+-		 * we really need to set it.
+-		 */
+-nocache:
+-		rt->dst.flags |= DST_NOCACHE;
++	} else
+ 		ret = false;
+-	}
+ 
+ 	return ret;
+ }
+@@ -1284,8 +1275,10 @@ static void rt_set_nexthop(struct rtable *rt, __be32 daddr,
+ 	if (fi) {
+ 		struct fib_nh *nh = &FIB_RES_NH(*res);
+ 
+-		if (nh->nh_gw && nh->nh_scope == RT_SCOPE_LINK)
++		if (nh->nh_gw && nh->nh_scope == RT_SCOPE_LINK) {
+ 			rt->rt_gateway = nh->nh_gw;
++			rt->rt_uses_gateway = 1;
++		}
+ 		dst_init_metrics(&rt->dst, fi->fib_metrics, true);
+ #ifdef CONFIG_IP_ROUTE_CLASSID
+ 		rt->dst.tclassid = nh->nh_tclassid;
+@@ -1294,8 +1287,18 @@ static void rt_set_nexthop(struct rtable *rt, __be32 daddr,
+ 			cached = rt_bind_exception(rt, fnhe, daddr);
+ 		else if (!(rt->dst.flags & DST_NOCACHE))
+ 			cached = rt_cache_route(nh, rt);
+-	}
+-	if (unlikely(!cached))
++		if (unlikely(!cached)) {
++			/* Routes we intend to cache in nexthop exception or
++			 * FIB nexthop have the DST_NOCACHE bit clear.
++			 * However, if we are unsuccessful at storing this
++			 * route into the cache we really need to set it.
++			 */
++			rt->dst.flags |= DST_NOCACHE;
++			if (!rt->rt_gateway)
++				rt->rt_gateway = daddr;
++			rt_add_uncached_list(rt);
++		}
++	} else
+ 		rt_add_uncached_list(rt);
+ 
+ #ifdef CONFIG_IP_ROUTE_CLASSID
+@@ -1363,6 +1366,7 @@ static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr,
+ 	rth->rt_iif	= 0;
+ 	rth->rt_pmtu	= 0;
+ 	rth->rt_gateway	= 0;
++	rth->rt_uses_gateway = 0;
+ 	INIT_LIST_HEAD(&rth->rt_uncached);
+ 	if (our) {
+ 		rth->dst.input= ip_local_deliver;
+@@ -1432,7 +1436,6 @@ static int __mkroute_input(struct sk_buff *skb,
+ 		return -EINVAL;
+ 	}
+ 
+-
+ 	err = fib_validate_source(skb, saddr, daddr, tos, FIB_RES_OIF(*res),
+ 				  in_dev->dev, in_dev, &itag);
+ 	if (err < 0) {
+@@ -1442,10 +1445,13 @@ static int __mkroute_input(struct sk_buff *skb,
+ 		goto cleanup;
+ 	}
+ 
+-	if (out_dev == in_dev && err &&
++	do_cache = res->fi && !itag;
++	if (out_dev == in_dev && err && IN_DEV_TX_REDIRECTS(out_dev) &&
+ 	    (IN_DEV_SHARED_MEDIA(out_dev) ||
+-	     inet_addr_onlink(out_dev, saddr, FIB_RES_GW(*res))))
++	     inet_addr_onlink(out_dev, saddr, FIB_RES_GW(*res)))) {
+ 		flags |= RTCF_DOREDIRECT;
++		do_cache = false;
++	}
+ 
+ 	if (skb->protocol != htons(ETH_P_IP)) {
+ 		/* Not IP (i.e. ARP). Do not create route, if it is
+@@ -1462,15 +1468,11 @@ static int __mkroute_input(struct sk_buff *skb,
+ 		}
+ 	}
+ 
+-	do_cache = false;
+-	if (res->fi) {
+-		if (!itag) {
+-			rth = rcu_dereference(FIB_RES_NH(*res).nh_rth_input);
+-			if (rt_cache_valid(rth)) {
+-				skb_dst_set_noref(skb, &rth->dst);
+-				goto out;
+-			}
+-			do_cache = true;
++	if (do_cache) {
++		rth = rcu_dereference(FIB_RES_NH(*res).nh_rth_input);
++		if (rt_cache_valid(rth)) {
++			skb_dst_set_noref(skb, &rth->dst);
++			goto out;
+ 		}
+ 	}
+ 
+@@ -1489,6 +1491,7 @@ static int __mkroute_input(struct sk_buff *skb,
+ 	rth->rt_iif 	= 0;
+ 	rth->rt_pmtu	= 0;
+ 	rth->rt_gateway	= 0;
++	rth->rt_uses_gateway = 0;
+ 	INIT_LIST_HEAD(&rth->rt_uncached);
+ 
+ 	rth->dst.input = ip_forward;
+@@ -1656,6 +1659,7 @@ local_input:
+ 	rth->rt_iif	= 0;
+ 	rth->rt_pmtu	= 0;
+ 	rth->rt_gateway	= 0;
++	rth->rt_uses_gateway = 0;
+ 	INIT_LIST_HEAD(&rth->rt_uncached);
+ 	if (res.type == RTN_UNREACHABLE) {
+ 		rth->dst.input= ip_error;
+@@ -1758,6 +1762,7 @@ static struct rtable *__mkroute_output(const struct fib_result *res,
+ 	struct in_device *in_dev;
+ 	u16 type = res->type;
+ 	struct rtable *rth;
++	bool do_cache;
+ 
+ 	in_dev = __in_dev_get_rcu(dev_out);
+ 	if (!in_dev)
+@@ -1794,24 +1799,36 @@ static struct rtable *__mkroute_output(const struct fib_result *res,
+ 	}
+ 
+ 	fnhe = NULL;
++	do_cache = fi != NULL;
+ 	if (fi) {
+ 		struct rtable __rcu **prth;
++		struct fib_nh *nh = &FIB_RES_NH(*res);
+ 
+-		fnhe = find_exception(&FIB_RES_NH(*res), fl4->daddr);
++		fnhe = find_exception(nh, fl4->daddr);
+ 		if (fnhe)
+ 			prth = &fnhe->fnhe_rth;
+-		else
+-			prth = __this_cpu_ptr(FIB_RES_NH(*res).nh_pcpu_rth_output);
++		else {
++			if (unlikely(fl4->flowi4_flags &
++				     FLOWI_FLAG_KNOWN_NH &&
++				     !(nh->nh_gw &&
++				       nh->nh_scope == RT_SCOPE_LINK))) {
++				do_cache = false;
++				goto add;
++			}
++			prth = __this_cpu_ptr(nh->nh_pcpu_rth_output);
++		}
+ 		rth = rcu_dereference(*prth);
+ 		if (rt_cache_valid(rth)) {
+ 			dst_hold(&rth->dst);
+ 			return rth;
+ 		}
+ 	}
++
++add:
+ 	rth = rt_dst_alloc(dev_out,
+ 			   IN_DEV_CONF_GET(in_dev, NOPOLICY),
+ 			   IN_DEV_CONF_GET(in_dev, NOXFRM),
+-			   fi);
++			   do_cache);
+ 	if (!rth)
+ 		return ERR_PTR(-ENOBUFS);
+ 
+@@ -1824,6 +1841,7 @@ static struct rtable *__mkroute_output(const struct fib_result *res,
+ 	rth->rt_iif	= orig_oif ? : 0;
+ 	rth->rt_pmtu	= 0;
+ 	rth->rt_gateway = 0;
++	rth->rt_uses_gateway = 0;
+ 	INIT_LIST_HEAD(&rth->rt_uncached);
+ 
+ 	RT_CACHE_STAT_INC(out_slow_tot);
+@@ -2102,6 +2120,7 @@ struct dst_entry *ipv4_blackhole_route(struct net *net, struct dst_entry *dst_or
+ 		rt->rt_flags = ort->rt_flags;
+ 		rt->rt_type = ort->rt_type;
+ 		rt->rt_gateway = ort->rt_gateway;
++		rt->rt_uses_gateway = ort->rt_uses_gateway;
+ 
+ 		INIT_LIST_HEAD(&rt->rt_uncached);
+ 
+@@ -2180,12 +2199,22 @@ static int rt_fill_info(struct net *net,  __be32 dst, __be32 src,
+ 		if (nla_put_be32(skb, RTA_PREFSRC, fl4->saddr))
+ 			goto nla_put_failure;
+ 	}
+-	if (rt->rt_gateway &&
++	if (rt->rt_uses_gateway &&
+ 	    nla_put_be32(skb, RTA_GATEWAY, rt->rt_gateway))
+ 		goto nla_put_failure;
+ 
++	expires = rt->dst.expires;
++	if (expires) {
++		unsigned long now = jiffies;
++
++		if (time_before(now, expires))
++			expires -= now;
++		else
++			expires = 0;
++	}
++
+ 	memcpy(metrics, dst_metrics_ptr(&rt->dst), sizeof(metrics));
+-	if (rt->rt_pmtu)
++	if (rt->rt_pmtu && expires)
+ 		metrics[RTAX_MTU - 1] = rt->rt_pmtu;
+ 	if (rtnetlink_put_metrics(skb, metrics) < 0)
+ 		goto nla_put_failure;
+@@ -2195,13 +2224,6 @@ static int rt_fill_info(struct net *net,  __be32 dst, __be32 src,
+ 		goto nla_put_failure;
+ 
+ 	error = rt->dst.error;
+-	expires = rt->dst.expires;
+-	if (expires) {
+-		if (time_before(jiffies, expires))
+-			expires -= jiffies;
+-		else
+-			expires = 0;
+-	}
+ 
+ 	if (rt_is_input_route(rt)) {
+ 		if (nla_put_u32(skb, RTA_IIF, rt->rt_iif))
+diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
+index 00a748d..db7bfad 100644
+--- a/net/ipv4/tcp_ipv4.c
++++ b/net/ipv4/tcp_ipv4.c
+@@ -693,10 +693,11 @@ static void tcp_v4_send_reset(struct sock *sk, struct sk_buff *skb)
+ 	arg.csumoffset = offsetof(struct tcphdr, check) / 2;
+ 	arg.flags = (sk && inet_sk(sk)->transparent) ? IP_REPLY_ARG_NOSRCCHECK : 0;
+ 	/* When socket is gone, all binding information is lost.
+-	 * routing might fail in this case. using iif for oif to
+-	 * make sure we can deliver it
++	 * routing might fail in this case. No choice here, if we choose to force
++	 * input interface, we will misroute in case of asymmetric route.
+ 	 */
+-	arg.bound_dev_if = sk ? sk->sk_bound_dev_if : inet_iif(skb);
++	if (sk)
++		arg.bound_dev_if = sk->sk_bound_dev_if;
+ 
+ 	net = dev_net(skb_dst(skb)->dev);
+ 	arg.tos = ip_hdr(skb)->tos;
+diff --git a/net/ipv4/xfrm4_policy.c b/net/ipv4/xfrm4_policy.c
+index 681ea2f..05c5ab8 100644
+--- a/net/ipv4/xfrm4_policy.c
++++ b/net/ipv4/xfrm4_policy.c
+@@ -91,6 +91,7 @@ static int xfrm4_fill_dst(struct xfrm_dst *xdst, struct net_device *dev,
+ 					      RTCF_LOCAL);
+ 	xdst->u.rt.rt_type = rt->rt_type;
+ 	xdst->u.rt.rt_gateway = rt->rt_gateway;
++	xdst->u.rt.rt_uses_gateway = rt->rt_uses_gateway;
+ 	xdst->u.rt.rt_pmtu = rt->rt_pmtu;
+ 	INIT_LIST_HEAD(&xdst->u.rt.rt_uncached);
+ 
+diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
+index ea3e9af..b10374d 100644
+--- a/net/ipv6/addrconf.c
++++ b/net/ipv6/addrconf.c
+@@ -3088,14 +3088,15 @@ static struct inet6_ifaddr *if6_get_first(struct seq_file *seq, loff_t pos)
+ 		struct hlist_node *n;
+ 		hlist_for_each_entry_rcu_bh(ifa, n, &inet6_addr_lst[state->bucket],
+ 					 addr_lst) {
++			if (!net_eq(dev_net(ifa->idev->dev), net))
++				continue;
+ 			/* sync with offset */
+ 			if (p < state->offset) {
+ 				p++;
+ 				continue;
+ 			}
+ 			state->offset++;
+-			if (net_eq(dev_net(ifa->idev->dev), net))
+-				return ifa;
++			return ifa;
+ 		}
+ 
+ 		/* prepare for next bucket */
+@@ -3113,18 +3114,20 @@ static struct inet6_ifaddr *if6_get_next(struct seq_file *seq,
+ 	struct hlist_node *n = &ifa->addr_lst;
+ 
+ 	hlist_for_each_entry_continue_rcu_bh(ifa, n, addr_lst) {
++		if (!net_eq(dev_net(ifa->idev->dev), net))
++			continue;
+ 		state->offset++;
+-		if (net_eq(dev_net(ifa->idev->dev), net))
+-			return ifa;
++		return ifa;
+ 	}
+ 
+ 	while (++state->bucket < IN6_ADDR_HSIZE) {
+ 		state->offset = 0;
+ 		hlist_for_each_entry_rcu_bh(ifa, n,
+ 				     &inet6_addr_lst[state->bucket], addr_lst) {
++			if (!net_eq(dev_net(ifa->idev->dev), net))
++				continue;
+ 			state->offset++;
+-			if (net_eq(dev_net(ifa->idev->dev), net))
+-				return ifa;
++			return ifa;
+ 		}
+ 	}
+ 
+diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
+index e22e6d8..f757e3b 100644
+--- a/net/ipv6/af_inet6.c
++++ b/net/ipv6/af_inet6.c
+@@ -880,22 +880,25 @@ static struct sk_buff **ipv6_gro_receive(struct sk_buff **head,
+ 	nlen = skb_network_header_len(skb);
+ 
+ 	for (p = *head; p; p = p->next) {
+-		struct ipv6hdr *iph2;
++		const struct ipv6hdr *iph2;
++		__be32 first_word; /* <Version:4><Traffic_Class:8><Flow_Label:20> */
+ 
+ 		if (!NAPI_GRO_CB(p)->same_flow)
+ 			continue;
+ 
+ 		iph2 = ipv6_hdr(p);
++		first_word = *(__be32 *)iph ^ *(__be32 *)iph2 ;
+ 
+-		/* All fields must match except length. */
++		/* All fields must match except length and Traffic Class. */
+ 		if (nlen != skb_network_header_len(p) ||
+-		    memcmp(iph, iph2, offsetof(struct ipv6hdr, payload_len)) ||
++		    (first_word & htonl(0xF00FFFFF)) ||
+ 		    memcmp(&iph->nexthdr, &iph2->nexthdr,
+ 			   nlen - offsetof(struct ipv6hdr, nexthdr))) {
+ 			NAPI_GRO_CB(p)->same_flow = 0;
+ 			continue;
+ 		}
+-
++		/* flush if Traffic Class fields are different */
++		NAPI_GRO_CB(p)->flush |= !!(first_word & htonl(0x0FF00000));
+ 		NAPI_GRO_CB(p)->flush |= flush;
+ 	}
+ 
+diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
+index acd32e3..7e32d42 100644
+--- a/net/ipv6/tcp_ipv6.c
++++ b/net/ipv6/tcp_ipv6.c
+@@ -863,7 +863,8 @@ static void tcp_v6_send_response(struct sk_buff *skb, u32 seq, u32 ack, u32 win,
+ 	__tcp_v6_send_check(buff, &fl6.saddr, &fl6.daddr);
+ 
+ 	fl6.flowi6_proto = IPPROTO_TCP;
+-	fl6.flowi6_oif = inet6_iif(skb);
++	if (ipv6_addr_type(&fl6.daddr) & IPV6_ADDR_LINKLOCAL)
++		fl6.flowi6_oif = inet6_iif(skb);
+ 	fl6.fl6_dport = t1->dest;
+ 	fl6.fl6_sport = t1->source;
+ 	security_skb_classify_flow(skb, flowi6_to_flowi(&fl6));
+diff --git a/net/mac80211/mesh_sync.c b/net/mac80211/mesh_sync.c
+index accfa00..a16b7b4 100644
+--- a/net/mac80211/mesh_sync.c
++++ b/net/mac80211/mesh_sync.c
+@@ -56,7 +56,6 @@ void mesh_sync_adjust_tbtt(struct ieee80211_sub_if_data *sdata)
+ 	u64 tsfdelta;
+ 
+ 	spin_lock_bh(&ifmsh->sync_offset_lock);
+-
+ 	if (ifmsh->sync_offset_clockdrift_max < beacon_int_fraction) {
+ 		msync_dbg(sdata, "TBTT : max clockdrift=%lld; adjusting\n",
+ 			  (long long) ifmsh->sync_offset_clockdrift_max);
+@@ -69,11 +68,11 @@ void mesh_sync_adjust_tbtt(struct ieee80211_sub_if_data *sdata)
+ 		tsfdelta = -beacon_int_fraction;
+ 		ifmsh->sync_offset_clockdrift_max -= beacon_int_fraction;
+ 	}
++	spin_unlock_bh(&ifmsh->sync_offset_lock);
+ 
+ 	tsf = drv_get_tsf(local, sdata);
+ 	if (tsf != -1ULL)
+ 		drv_set_tsf(local, sdata, tsf + tsfdelta);
+-	spin_unlock_bh(&ifmsh->sync_offset_lock);
+ }
+ 
+ static void mesh_sync_offset_rx_bcn_presp(struct ieee80211_sub_if_data *sdata,
+diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c
+index 65b616a..c3c6291 100644
+--- a/net/netfilter/ipvs/ip_vs_xmit.c
++++ b/net/netfilter/ipvs/ip_vs_xmit.c
+@@ -49,6 +49,7 @@ enum {
+ 	IP_VS_RT_MODE_RDR	= 4, /* Allow redirect from remote daddr to
+ 				      * local
+ 				      */
++	IP_VS_RT_MODE_KNOWN_NH	= 16,/* Route via remote addr */
+ };
+ 
+ /*
+@@ -103,6 +104,8 @@ __ip_vs_get_out_rt(struct sk_buff *skb, struct ip_vs_dest *dest,
+ 			memset(&fl4, 0, sizeof(fl4));
+ 			fl4.daddr = dest->addr.ip;
+ 			fl4.flowi4_tos = rtos;
++			fl4.flowi4_flags = (rt_mode & IP_VS_RT_MODE_KNOWN_NH) ?
++					   FLOWI_FLAG_KNOWN_NH : 0;
+ 			rt = ip_route_output_key(net, &fl4);
+ 			if (IS_ERR(rt)) {
+ 				spin_unlock(&dest->dst_lock);
+@@ -127,6 +130,8 @@ __ip_vs_get_out_rt(struct sk_buff *skb, struct ip_vs_dest *dest,
+ 		memset(&fl4, 0, sizeof(fl4));
+ 		fl4.daddr = daddr;
+ 		fl4.flowi4_tos = rtos;
++		fl4.flowi4_flags = (rt_mode & IP_VS_RT_MODE_KNOWN_NH) ?
++				   FLOWI_FLAG_KNOWN_NH : 0;
+ 		rt = ip_route_output_key(net, &fl4);
+ 		if (IS_ERR(rt)) {
+ 			IP_VS_DBG_RL("ip_route_output error, dest: %pI4\n",
+@@ -1014,7 +1019,8 @@ ip_vs_dr_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
+ 	if (!(rt = __ip_vs_get_out_rt(skb, cp->dest, cp->daddr.ip,
+ 				      RT_TOS(iph->tos),
+ 				      IP_VS_RT_MODE_LOCAL |
+-					IP_VS_RT_MODE_NON_LOCAL, NULL)))
++				      IP_VS_RT_MODE_NON_LOCAL |
++				      IP_VS_RT_MODE_KNOWN_NH, NULL)))
+ 		goto tx_error_icmp;
+ 	if (rt->rt_flags & RTCF_LOCAL) {
+ 		ip_rt_put(rt);
+diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
+index 5270238..9172179 100644
+--- a/net/netlink/af_netlink.c
++++ b/net/netlink/af_netlink.c
+@@ -169,6 +169,8 @@ static void netlink_sock_destruct(struct sock *sk)
+ 	if (nlk->cb) {
+ 		if (nlk->cb->done)
+ 			nlk->cb->done(nlk->cb);
++
++		module_put(nlk->cb->module);
+ 		netlink_destroy_callback(nlk->cb);
+ 	}
+ 
+@@ -1760,6 +1762,7 @@ static int netlink_dump(struct sock *sk)
+ 	nlk->cb = NULL;
+ 	mutex_unlock(nlk->cb_mutex);
+ 
++	module_put(cb->module);
+ 	netlink_consume_callback(cb);
+ 	return 0;
+ 
+@@ -1769,9 +1772,9 @@ errout_skb:
+ 	return err;
+ }
+ 
+-int netlink_dump_start(struct sock *ssk, struct sk_buff *skb,
+-		       const struct nlmsghdr *nlh,
+-		       struct netlink_dump_control *control)
++int __netlink_dump_start(struct sock *ssk, struct sk_buff *skb,
++			 const struct nlmsghdr *nlh,
++			 struct netlink_dump_control *control)
+ {
+ 	struct netlink_callback *cb;
+ 	struct sock *sk;
+@@ -1786,6 +1789,7 @@ int netlink_dump_start(struct sock *ssk, struct sk_buff *skb,
+ 	cb->done = control->done;
+ 	cb->nlh = nlh;
+ 	cb->data = control->data;
++	cb->module = control->module;
+ 	cb->min_dump_alloc = control->min_dump_alloc;
+ 	atomic_inc(&skb->users);
+ 	cb->skb = skb;
+@@ -1796,19 +1800,28 @@ int netlink_dump_start(struct sock *ssk, struct sk_buff *skb,
+ 		return -ECONNREFUSED;
+ 	}
+ 	nlk = nlk_sk(sk);
+-	/* A dump is in progress... */
++
+ 	mutex_lock(nlk->cb_mutex);
++	/* A dump is in progress... */
+ 	if (nlk->cb) {
+ 		mutex_unlock(nlk->cb_mutex);
+ 		netlink_destroy_callback(cb);
+-		sock_put(sk);
+-		return -EBUSY;
++		ret = -EBUSY;
++		goto out;
+ 	}
++	/* add reference of module which cb->dump belongs to */
++	if (!try_module_get(cb->module)) {
++		mutex_unlock(nlk->cb_mutex);
++		netlink_destroy_callback(cb);
++		ret = -EPROTONOSUPPORT;
++		goto out;
++	}
++
+ 	nlk->cb = cb;
+ 	mutex_unlock(nlk->cb_mutex);
+ 
+ 	ret = netlink_dump(sk);
+-
++out:
+ 	sock_put(sk);
+ 
+ 	if (ret)
+@@ -1819,7 +1832,7 @@ int netlink_dump_start(struct sock *ssk, struct sk_buff *skb,
+ 	 */
+ 	return -EINTR;
+ }
+-EXPORT_SYMBOL(netlink_dump_start);
++EXPORT_SYMBOL(__netlink_dump_start);
+ 
+ void netlink_ack(struct sk_buff *in_skb, struct nlmsghdr *nlh, int err)
+ {
+diff --git a/net/rds/send.c b/net/rds/send.c
+index 96531d4..88eace5 100644
+--- a/net/rds/send.c
++++ b/net/rds/send.c
+@@ -1122,7 +1122,7 @@ rds_send_pong(struct rds_connection *conn, __be16 dport)
+ 	rds_stats_inc(s_send_pong);
+ 
+ 	if (!test_bit(RDS_LL_SEND_FULL, &conn->c_flags))
+-		rds_send_xmit(conn);
++		queue_delayed_work(rds_wq, &conn->c_send_w, 0);
+ 
+ 	rds_message_put(rm);
+ 	return 0;
+diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c
+index 2afd2a8..f86d95e 100644
+--- a/net/sunrpc/cache.c
++++ b/net/sunrpc/cache.c
+@@ -1409,11 +1409,11 @@ static ssize_t read_flush(struct file *file, char __user *buf,
+ 			  size_t count, loff_t *ppos,
+ 			  struct cache_detail *cd)
+ {
+-	char tbuf[20];
++	char tbuf[22];
+ 	unsigned long p = *ppos;
+ 	size_t len;
+ 
+-	sprintf(tbuf, "%lu\n", convert_to_wallclock(cd->flush_time));
++	snprintf(tbuf, sizeof(tbuf), "%lu\n", convert_to_wallclock(cd->flush_time));
+ 	len = strlen(tbuf);
+ 	if (p >= len)
+ 		return 0;
+diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
+index d1988cf..97f8918 100644
+--- a/net/sunrpc/xprtsock.c
++++ b/net/sunrpc/xprtsock.c
+@@ -2539,6 +2539,7 @@ static struct rpc_xprt_ops xs_tcp_ops = {
+ static struct rpc_xprt_ops bc_tcp_ops = {
+ 	.reserve_xprt		= xprt_reserve_xprt,
+ 	.release_xprt		= xprt_release_xprt,
++	.alloc_slot		= xprt_alloc_slot,
+ 	.rpcbind		= xs_local_rpcbind,
+ 	.buf_alloc		= bc_malloc,
+ 	.buf_free		= bc_free,



^ permalink raw reply	[flat|nested] only message in thread

only message in thread, other threads:[~2012-10-29 14:35 UTC | newest]

Thread overview: (only message) (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2012-10-29 14:35 [gentoo-commits] linux-patches r2226 - genpatches-2.6/trunk/3.6 Mike Pagano (mpagano)

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox