public inbox for gentoo-commits@lists.gentoo.org
 help / color / mirror / Atom feed
* [gentoo-commits] linux-patches r2291 - in genpatches-2.6/trunk: 3.0 3.2 3.4
@ 2013-02-28 19:14 Tom Wijsman (tomwij)
  0 siblings, 0 replies; only message in thread
From: Tom Wijsman (tomwij) @ 2013-02-28 19:14 UTC (permalink / raw
  To: gentoo-commits

Author: tomwij
Date: 2013-02-28 19:13:50 +0000 (Thu, 28 Feb 2013)
New Revision: 2291

Added:
   genpatches-2.6/trunk/3.0/1065_linux-3.0.66.patch
   genpatches-2.6/trunk/3.0/1066_linux-3.0.67.patch
   genpatches-2.6/trunk/3.2/1038_linux-3.2.39.patch
   genpatches-2.6/trunk/3.4/1032_linux-3.4.33.patch
   genpatches-2.6/trunk/3.4/1033_linux-3.4.34.patch
Modified:
   genpatches-2.6/trunk/3.0/0000_README
   genpatches-2.6/trunk/3.2/0000_README
   genpatches-2.6/trunk/3.4/0000_README
Log:
Linux patches 3.0.66 to 3.0.67, 3.2.39 and 3.4.33 to 3.4.34.

Modified: genpatches-2.6/trunk/3.0/0000_README
===================================================================
--- genpatches-2.6/trunk/3.0/0000_README	2013-02-23 19:07:21 UTC (rev 2290)
+++ genpatches-2.6/trunk/3.0/0000_README	2013-02-28 19:13:50 UTC (rev 2291)
@@ -295,6 +295,14 @@
 From:   http://www.kernel.org
 Desc:   Linux 3.0.65
 
+Patch:  1065_linux-3.0.66.patch
+From:   http://www.kernel.org
+Desc:   Linux 3.0.66
+
+Patch:  1066_linux-3.0.67.patch
+From:   http://www.kernel.org
+Desc:   Linux 3.0.67
+
 Patch:  1800_fix-zcache-build.patch
 From:   http://bugs.gentoo.org/show_bug.cgi?id=376325
 Desc:   Fix zcache build error

Added: genpatches-2.6/trunk/3.0/1065_linux-3.0.66.patch
===================================================================
--- genpatches-2.6/trunk/3.0/1065_linux-3.0.66.patch	                        (rev 0)
+++ genpatches-2.6/trunk/3.0/1065_linux-3.0.66.patch	2013-02-28 19:13:50 UTC (rev 2291)
@@ -0,0 +1,54 @@
+diff --git a/Makefile b/Makefile
+index cdba5c1..da3ff21 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 3
+ PATCHLEVEL = 0
+-SUBLEVEL = 65
++SUBLEVEL = 66
+ EXTRAVERSION =
+ NAME = Sneaky Weasel
+ 
+diff --git a/include/linux/syslog.h b/include/linux/syslog.h
+index 3891139..ce4c665 100644
+--- a/include/linux/syslog.h
++++ b/include/linux/syslog.h
+@@ -47,6 +47,12 @@
+ #define SYSLOG_FROM_CALL 0
+ #define SYSLOG_FROM_FILE 1
+ 
++/*
++ * Syslog priority (PRI) maximum length in char : '<[0-9]{1,3}>'
++ * See RFC5424 for details
++*/
++#define SYSLOG_PRI_MAX_LENGTH 5
++
+ int do_syslog(int type, char __user *buf, int count, bool from_file);
+ 
+ #endif /* _LINUX_SYSLOG_H */
+diff --git a/kernel/printk.c b/kernel/printk.c
+index 3fc4708..6edc4e89 100644
+--- a/kernel/printk.c
++++ b/kernel/printk.c
+@@ -633,8 +633,19 @@ static void call_console_drivers(unsigned start, unsigned end)
+ 	start_print = start;
+ 	while (cur_index != end) {
+ 		if (msg_level < 0 && ((end - cur_index) > 2)) {
++			/*
++			 * prepare buf_prefix, as a contiguous array,
++			 * to be processed by log_prefix function
++			 */
++			char buf_prefix[SYSLOG_PRI_MAX_LENGTH+1];
++			unsigned i;
++			for (i = 0; i < ((end - cur_index)) && (i < SYSLOG_PRI_MAX_LENGTH); i++) {
++				buf_prefix[i] = LOG_BUF(cur_index + i);
++			}
++			buf_prefix[i] = '\0'; /* force '\0' as last string character */
++
+ 			/* strip log prefix */
+-			cur_index += log_prefix(&LOG_BUF(cur_index), &msg_level, NULL);
++			cur_index += log_prefix((const char *)&buf_prefix, &msg_level, NULL);
+ 			start_print = cur_index;
+ 		}
+ 		while (cur_index != end) {

Added: genpatches-2.6/trunk/3.0/1066_linux-3.0.67.patch
===================================================================
--- genpatches-2.6/trunk/3.0/1066_linux-3.0.67.patch	                        (rev 0)
+++ genpatches-2.6/trunk/3.0/1066_linux-3.0.67.patch	2013-02-28 19:13:50 UTC (rev 2291)
@@ -0,0 +1,2195 @@
+diff --git a/Makefile b/Makefile
+index da3ff21..7d4347a 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 3
+ PATCHLEVEL = 0
+-SUBLEVEL = 66
++SUBLEVEL = 67
+ EXTRAVERSION =
+ NAME = Sneaky Weasel
+ 
+diff --git a/arch/arm/mach-pxa/include/mach/smemc.h b/arch/arm/mach-pxa/include/mach/smemc.h
+index 654adc9..301bf0e 100644
+--- a/arch/arm/mach-pxa/include/mach/smemc.h
++++ b/arch/arm/mach-pxa/include/mach/smemc.h
+@@ -37,6 +37,7 @@
+ #define CSADRCFG1	(SMEMC_VIRT + 0x84)  /* Address Configuration Register for CS1 */
+ #define CSADRCFG2	(SMEMC_VIRT + 0x88)  /* Address Configuration Register for CS2 */
+ #define CSADRCFG3	(SMEMC_VIRT + 0x8C)  /* Address Configuration Register for CS3 */
++#define CSMSADRCFG	(SMEMC_VIRT + 0xA0)  /* Chip Select Configuration Register */
+ 
+ /*
+  * More handy macros for PCMCIA
+diff --git a/arch/arm/mach-pxa/smemc.c b/arch/arm/mach-pxa/smemc.c
+index 7992305..f38aa89 100644
+--- a/arch/arm/mach-pxa/smemc.c
++++ b/arch/arm/mach-pxa/smemc.c
+@@ -40,6 +40,8 @@ static void pxa3xx_smemc_resume(void)
+ 	__raw_writel(csadrcfg[1], CSADRCFG1);
+ 	__raw_writel(csadrcfg[2], CSADRCFG2);
+ 	__raw_writel(csadrcfg[3], CSADRCFG3);
++	/* CSMSADRCFG wakes up in its default state (0), so we need to set it */
++	__raw_writel(0x2, CSMSADRCFG);
+ }
+ 
+ static struct syscore_ops smemc_syscore_ops = {
+@@ -49,8 +51,19 @@ static struct syscore_ops smemc_syscore_ops = {
+ 
+ static int __init smemc_init(void)
+ {
+-	if (cpu_is_pxa3xx())
++	if (cpu_is_pxa3xx()) {
++		/*
++		 * The only documentation we have on the
++		 * Chip Select Configuration Register (CSMSADRCFG) is that
++		 * it must be programmed to 0x2.
++		 * Moreover, in the bit definitions, the second bit
++		 * (CSMSADRCFG[1]) is called "SETALWAYS".
++		 * Other bits are reserved in this register.
++		 */
++		__raw_writel(0x2, CSMSADRCFG);
++
+ 		register_syscore_ops(&smemc_syscore_ops);
++	}
+ 
+ 	return 0;
+ }
+diff --git a/arch/parisc/include/asm/pgtable.h b/arch/parisc/include/asm/pgtable.h
+index 22dadeb..9d35a3e 100644
+--- a/arch/parisc/include/asm/pgtable.h
++++ b/arch/parisc/include/asm/pgtable.h
+@@ -12,11 +12,10 @@
+ 
+ #include <linux/bitops.h>
+ #include <linux/spinlock.h>
++#include <linux/mm_types.h>
+ #include <asm/processor.h>
+ #include <asm/cache.h>
+ 
+-struct vm_area_struct;
+-
+ /*
+  * kern_addr_valid(ADDR) tests if ADDR is pointing to valid kernel
+  * memory.  For the return value to be meaningful, ADDR must be >=
+@@ -40,7 +39,14 @@ struct vm_area_struct;
+         do{                                                     \
+                 *(pteptr) = (pteval);                           \
+         } while(0)
+-#define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval)
++
++extern void purge_tlb_entries(struct mm_struct *, unsigned long);
++
++#define set_pte_at(mm, addr, ptep, pteval)                      \
++	do {                                                    \
++		set_pte(ptep, pteval);                          \
++		purge_tlb_entries(mm, addr);                    \
++	} while (0)
+ 
+ #endif /* !__ASSEMBLY__ */
+ 
+@@ -464,6 +470,7 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr,
+ 		old = pte_val(*ptep);
+ 		new = pte_val(pte_wrprotect(__pte (old)));
+ 	} while (cmpxchg((unsigned long *) ptep, old, new) != old);
++	purge_tlb_entries(mm, addr);
+ #else
+ 	pte_t old_pte = *ptep;
+ 	set_pte_at(mm, addr, ptep, pte_wrprotect(old_pte));
+diff --git a/arch/parisc/kernel/cache.c b/arch/parisc/kernel/cache.c
+index 83335f3..5241698 100644
+--- a/arch/parisc/kernel/cache.c
++++ b/arch/parisc/kernel/cache.c
+@@ -421,6 +421,24 @@ void kunmap_parisc(void *addr)
+ EXPORT_SYMBOL(kunmap_parisc);
+ #endif
+ 
++void purge_tlb_entries(struct mm_struct *mm, unsigned long addr)
++{
++	unsigned long flags;
++
++	/* Note: purge_tlb_entries can be called at startup with
++	   no context.  */
++
++	/* Disable preemption while we play with %sr1.  */
++	preempt_disable();
++	mtsp(mm->context, 1);
++	purge_tlb_start(flags);
++	pdtlb(addr);
++	pitlb(addr);
++	purge_tlb_end(flags);
++	preempt_enable();
++}
++EXPORT_SYMBOL(purge_tlb_entries);
++
+ void __flush_tlb_range(unsigned long sid, unsigned long start,
+ 		       unsigned long end)
+ {
+diff --git a/arch/powerpc/kernel/machine_kexec_64.c b/arch/powerpc/kernel/machine_kexec_64.c
+index 583af70..cac9d2c 100644
+--- a/arch/powerpc/kernel/machine_kexec_64.c
++++ b/arch/powerpc/kernel/machine_kexec_64.c
+@@ -163,6 +163,8 @@ static int kexec_all_irq_disabled = 0;
+ static void kexec_smp_down(void *arg)
+ {
+ 	local_irq_disable();
++	hard_irq_disable();
++
+ 	mb(); /* make sure our irqs are disabled before we say they are */
+ 	get_paca()->kexec_state = KEXEC_STATE_IRQS_OFF;
+ 	while(kexec_all_irq_disabled == 0)
+@@ -245,6 +247,8 @@ static void kexec_prepare_cpus(void)
+ 	wake_offline_cpus();
+ 	smp_call_function(kexec_smp_down, NULL, /* wait */0);
+ 	local_irq_disable();
++	hard_irq_disable();
++
+ 	mb(); /* make sure IRQs are disabled before we say they are */
+ 	get_paca()->kexec_state = KEXEC_STATE_IRQS_OFF;
+ 
+@@ -282,6 +286,7 @@ static void kexec_prepare_cpus(void)
+ 	if (ppc_md.kexec_cpu_down)
+ 		ppc_md.kexec_cpu_down(0, 0);
+ 	local_irq_disable();
++	hard_irq_disable();
+ }
+ 
+ #endif /* SMP */
+diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
+index 2ada634..25ab200 100644
+--- a/arch/s390/kvm/kvm-s390.c
++++ b/arch/s390/kvm/kvm-s390.c
+@@ -584,6 +584,14 @@ int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
+ 	} else
+ 		prefix = 0;
+ 
++	/*
++	 * The guest FPRS and ACRS are in the host FPRS/ACRS due to the lazy
++	 * copying in vcpu load/put. Lets update our copies before we save
++	 * it into the save area
++	 */
++	save_fp_regs(&vcpu->arch.guest_fpregs);
++	save_access_regs(vcpu->run->s.regs.acrs);
++
+ 	if (__guestcopy(vcpu, addr + offsetof(struct save_area, fp_regs),
+ 			vcpu->arch.guest_fpregs.fprs, 128, prefix))
+ 		return -EFAULT;
+diff --git a/arch/x86/include/asm/mmzone_32.h b/arch/x86/include/asm/mmzone_32.h
+index ffa037f..a6a6414 100644
+--- a/arch/x86/include/asm/mmzone_32.h
++++ b/arch/x86/include/asm/mmzone_32.h
+@@ -14,12 +14,6 @@ extern struct pglist_data *node_data[];
+ 
+ #include <asm/numaq.h>
+ 
+-extern void resume_map_numa_kva(pgd_t *pgd);
+-
+-#else /* !CONFIG_NUMA */
+-
+-static inline void resume_map_numa_kva(pgd_t *pgd) {}
+-
+ #endif /* CONFIG_NUMA */
+ 
+ #ifdef CONFIG_DISCONTIGMEM
+diff --git a/arch/x86/power/hibernate_32.c b/arch/x86/power/hibernate_32.c
+index 3769079..a09ecb9 100644
+--- a/arch/x86/power/hibernate_32.c
++++ b/arch/x86/power/hibernate_32.c
+@@ -130,8 +130,6 @@ static int resume_physical_mapping_init(pgd_t *pgd_base)
+ 		}
+ 	}
+ 
+-	resume_map_numa_kva(pgd_base);
+-
+ 	return 0;
+ }
+ 
+diff --git a/arch/x86/xen/spinlock.c b/arch/x86/xen/spinlock.c
+index cc9b1e1..d99537f 100644
+--- a/arch/x86/xen/spinlock.c
++++ b/arch/x86/xen/spinlock.c
+@@ -313,7 +313,6 @@ static noinline void xen_spin_unlock_slow(struct xen_spinlock *xl)
+ 		if (per_cpu(lock_spinners, cpu) == xl) {
+ 			ADD_STATS(released_slow_kicked, 1);
+ 			xen_send_IPI_one(cpu, XEN_SPIN_UNLOCK_VECTOR);
+-			break;
+ 		}
+ 	}
+ }
+diff --git a/drivers/base/bus.c b/drivers/base/bus.c
+index 000e7b2..8b8e8c0 100644
+--- a/drivers/base/bus.c
++++ b/drivers/base/bus.c
+@@ -289,7 +289,7 @@ int bus_for_each_dev(struct bus_type *bus, struct device *start,
+ 	struct device *dev;
+ 	int error = 0;
+ 
+-	if (!bus)
++	if (!bus || !bus->p)
+ 		return -EINVAL;
+ 
+ 	klist_iter_init_node(&bus->p->klist_devices, &i,
+@@ -323,7 +323,7 @@ struct device *bus_find_device(struct bus_type *bus,
+ 	struct klist_iter i;
+ 	struct device *dev;
+ 
+-	if (!bus)
++	if (!bus || !bus->p)
+ 		return NULL;
+ 
+ 	klist_iter_init_node(&bus->p->klist_devices, &i,
+diff --git a/drivers/block/sunvdc.c b/drivers/block/sunvdc.c
+index 48e8fee..94f6ae2 100644
+--- a/drivers/block/sunvdc.c
++++ b/drivers/block/sunvdc.c
+@@ -461,7 +461,7 @@ static int generic_request(struct vdc_port *port, u8 op, void *buf, int len)
+ 	int op_len, err;
+ 	void *req_buf;
+ 
+-	if (!(((u64)1 << ((u64)op - 1)) & port->operations))
++	if (!(((u64)1 << (u64)op) & port->operations))
+ 		return -EOPNOTSUPP;
+ 
+ 	switch (op) {
+diff --git a/drivers/dca/dca-core.c b/drivers/dca/dca-core.c
+index 4abd089..7065851 100644
+--- a/drivers/dca/dca-core.c
++++ b/drivers/dca/dca-core.c
+@@ -409,6 +409,11 @@ void unregister_dca_provider(struct dca_provider *dca, struct device *dev)
+ 
+ 	spin_lock_irqsave(&dca_lock, flags);
+ 
++	if (list_empty(&dca_domains)) {
++		raw_spin_unlock_irqrestore(&dca_lock, flags);
++		return;
++	}
++
+ 	list_del(&dca->node);
+ 
+ 	pci_rc = dca_pci_rc_from_dev(dev);
+diff --git a/drivers/gpu/drm/drm_usb.c b/drivers/gpu/drm/drm_usb.c
+index 206d230..0c853f5 100644
+--- a/drivers/gpu/drm/drm_usb.c
++++ b/drivers/gpu/drm/drm_usb.c
+@@ -18,7 +18,7 @@ int drm_get_usb_dev(struct usb_interface *interface,
+ 
+ 	usbdev = interface_to_usbdev(interface);
+ 	dev->usbdev = usbdev;
+-	dev->dev = &usbdev->dev;
++	dev->dev = &interface->dev;
+ 
+ 	mutex_lock(&drm_global_mutex);
+ 
+diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
+index b4f4d12..11ecb0c 100644
+--- a/drivers/gpu/drm/i915/intel_display.c
++++ b/drivers/gpu/drm/i915/intel_display.c
+@@ -2898,6 +2898,7 @@ static void i9xx_crtc_disable(struct drm_crtc *crtc)
+ 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ 	int pipe = intel_crtc->pipe;
+ 	int plane = intel_crtc->plane;
++	u32 pctl;
+ 
+ 	if (!intel_crtc->active)
+ 		return;
+@@ -2914,6 +2915,13 @@ static void i9xx_crtc_disable(struct drm_crtc *crtc)
+ 
+ 	intel_disable_plane(dev_priv, plane, pipe);
+ 	intel_disable_pipe(dev_priv, pipe);
++
++	/* Disable pannel fitter if it is on this pipe. */
++	pctl = I915_READ(PFIT_CONTROL);
++	if ((pctl & PFIT_ENABLE) &&
++	    ((pctl & PFIT_PIPE_MASK) >> PFIT_PIPE_SHIFT) == pipe)
++		I915_WRITE(PFIT_CONTROL, 0);
++
+ 	intel_disable_pll(dev_priv, pipe);
+ 
+ 	intel_crtc->active = false;
+diff --git a/drivers/net/wireless/p54/p54usb.c b/drivers/net/wireless/p54/p54usb.c
+index f1fa763..99e7e7f 100644
+--- a/drivers/net/wireless/p54/p54usb.c
++++ b/drivers/net/wireless/p54/p54usb.c
+@@ -83,8 +83,8 @@ static struct usb_device_id p54u_table[] = {
+ 	{USB_DEVICE(0x06b9, 0x0121)},	/* Thomson SpeedTouch 121g */
+ 	{USB_DEVICE(0x0707, 0xee13)},   /* SMC 2862W-G version 2 */
+ 	{USB_DEVICE(0x0803, 0x4310)},	/* Zoom 4410a */
+-	{USB_DEVICE(0x083a, 0x4503)},	/* T-Com Sinus 154 data II */
+ 	{USB_DEVICE(0x083a, 0x4521)},   /* Siemens Gigaset USB Adapter 54 version 2 */
++	{USB_DEVICE(0x083a, 0x4531)},	/* T-Com Sinus 154 data II */
+ 	{USB_DEVICE(0x083a, 0xc501)},	/* Zoom Wireless-G 4410 */
+ 	{USB_DEVICE(0x083a, 0xf503)},	/* Accton FD7050E ver 1010ec  */
+ 	{USB_DEVICE(0x0846, 0x4240)},	/* Netgear WG111 (v2) */
+diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c
+index 5925e0b..8eaf0e2 100644
+--- a/drivers/net/xen-netback/interface.c
++++ b/drivers/net/xen-netback/interface.c
+@@ -132,6 +132,7 @@ static void xenvif_up(struct xenvif *vif)
+ static void xenvif_down(struct xenvif *vif)
+ {
+ 	disable_irq(vif->irq);
++	del_timer_sync(&vif->credit_timeout);
+ 	xen_netbk_deschedule_xenvif(vif);
+ 	xen_netbk_remove_xenvif(vif);
+ }
+@@ -362,8 +363,6 @@ void xenvif_disconnect(struct xenvif *vif)
+ 	atomic_dec(&vif->refcnt);
+ 	wait_event(vif->waiting_to_free, atomic_read(&vif->refcnt) == 0);
+ 
+-	del_timer_sync(&vif->credit_timeout);
+-
+ 	if (vif->irq)
+ 		unbind_from_irqhandler(vif->irq, vif);
+ 
+diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
+index 9068d32..1260bf0 100644
+--- a/drivers/net/xen-netback/netback.c
++++ b/drivers/net/xen-netback/netback.c
+@@ -870,13 +870,13 @@ static int netbk_count_requests(struct xenvif *vif,
+ 		if (frags >= work_to_do) {
+ 			netdev_err(vif->dev, "Need more frags\n");
+ 			netbk_fatal_tx_err(vif);
+-			return -frags;
++			return -ENODATA;
+ 		}
+ 
+ 		if (unlikely(frags >= MAX_SKB_FRAGS)) {
+ 			netdev_err(vif->dev, "Too many frags\n");
+ 			netbk_fatal_tx_err(vif);
+-			return -frags;
++			return -E2BIG;
+ 		}
+ 
+ 		memcpy(txp, RING_GET_REQUEST(&vif->tx, cons + frags),
+@@ -884,7 +884,7 @@ static int netbk_count_requests(struct xenvif *vif,
+ 		if (txp->size > first->size) {
+ 			netdev_err(vif->dev, "Frag is bigger than frame.\n");
+ 			netbk_fatal_tx_err(vif);
+-			return -frags;
++			return -EIO;
+ 		}
+ 
+ 		first->size -= txp->size;
+@@ -894,7 +894,7 @@ static int netbk_count_requests(struct xenvif *vif,
+ 			netdev_err(vif->dev, "txp->offset: %x, size: %u\n",
+ 				 txp->offset, txp->size);
+ 			netbk_fatal_tx_err(vif);
+-			return -frags;
++			return -EINVAL;
+ 		}
+ 	} while ((txp++)->flags & XEN_NETTXF_more_data);
+ 	return frags;
+@@ -990,7 +990,7 @@ static int xen_netbk_tx_check_gop(struct xen_netbk *netbk,
+ 		xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_ERROR);
+ 
+ 	/* Skip first skb fragment if it is on same page as header fragment. */
+-	start = ((unsigned long)shinfo->frags[i].page == pending_idx);
++	start = ((unsigned long)shinfo->frags[0].page == pending_idx);
+ 
+ 	for (i = start; i < nr_frags; i++) {
+ 		int j, newerr;
+diff --git a/drivers/pcmcia/vrc4171_card.c b/drivers/pcmcia/vrc4171_card.c
+index 86e4a1a..6bb02ab 100644
+--- a/drivers/pcmcia/vrc4171_card.c
++++ b/drivers/pcmcia/vrc4171_card.c
+@@ -246,6 +246,7 @@ static int pccard_init(struct pcmcia_socket *sock)
+ 	socket = &vrc4171_sockets[slot];
+ 	socket->csc_irq = search_nonuse_irq();
+ 	socket->io_irq = search_nonuse_irq();
++	spin_lock_init(&socket->lock);
+ 
+ 	return 0;
+ }
+diff --git a/drivers/s390/kvm/kvm_virtio.c b/drivers/s390/kvm/kvm_virtio.c
+index aec60d5..481037d 100644
+--- a/drivers/s390/kvm/kvm_virtio.c
++++ b/drivers/s390/kvm/kvm_virtio.c
+@@ -413,6 +413,26 @@ static void kvm_extint_handler(unsigned int ext_int_code,
+ }
+ 
+ /*
++ * For s390-virtio, we expect a page above main storage containing
++ * the virtio configuration. Try to actually load from this area
++ * in order to figure out if the host provides this page.
++ */
++static int __init test_devices_support(unsigned long addr)
++{
++	int ret = -EIO;
++
++	asm volatile(
++		"0:	lura	0,%1\n"
++		"1:	xgr	%0,%0\n"
++		"2:\n"
++		EX_TABLE(0b,2b)
++		EX_TABLE(1b,2b)
++		: "+d" (ret)
++		: "a" (addr)
++		: "0", "cc");
++	return ret;
++}
++/*
+  * Init function for virtio
+  * devices are in a single page above top of "normal" mem
+  */
+@@ -423,21 +443,23 @@ static int __init kvm_devices_init(void)
+ 	if (!MACHINE_IS_KVM)
+ 		return -ENODEV;
+ 
++	if (test_devices_support(real_memory_size) < 0)
++		return -ENODEV;
++
++	rc = vmem_add_mapping(real_memory_size, PAGE_SIZE);
++	if (rc)
++		return rc;
++
++	kvm_devices = (void *) real_memory_size;
++
+ 	kvm_root = root_device_register("kvm_s390");
+ 	if (IS_ERR(kvm_root)) {
+ 		rc = PTR_ERR(kvm_root);
+ 		printk(KERN_ERR "Could not register kvm_s390 root device");
++		vmem_remove_mapping(real_memory_size, PAGE_SIZE);
+ 		return rc;
+ 	}
+ 
+-	rc = vmem_add_mapping(real_memory_size, PAGE_SIZE);
+-	if (rc) {
+-		root_device_unregister(kvm_root);
+-		return rc;
+-	}
+-
+-	kvm_devices = (void *) real_memory_size;
+-
+ 	INIT_WORK(&hotplug_work, hotplug_devices);
+ 
+ 	service_subclass_irq_register();
+diff --git a/drivers/staging/comedi/comedi_fops.c b/drivers/staging/comedi/comedi_fops.c
+index 39be673..4b9d8f0 100644
+--- a/drivers/staging/comedi/comedi_fops.c
++++ b/drivers/staging/comedi/comedi_fops.c
+@@ -136,6 +136,11 @@ static long comedi_unlocked_ioctl(struct file *file, unsigned int cmd,
+ 	/* Device config is special, because it must work on
+ 	 * an unconfigured device. */
+ 	if (cmd == COMEDI_DEVCONFIG) {
++		if (minor >= COMEDI_NUM_BOARD_MINORS) {
++			/* Device config not appropriate on non-board minors. */
++			rc = -ENOTTY;
++			goto done;
++		}
+ 		rc = do_devconfig_ioctl(dev,
+ 					(struct comedi_devconfig __user *)arg);
+ 		if (rc == 0)
+diff --git a/drivers/staging/hv/hv_kvp.c b/drivers/staging/hv/hv_kvp.c
+index 13b0ecf..9f8efd4 100644
+--- a/drivers/staging/hv/hv_kvp.c
++++ b/drivers/staging/hv/hv_kvp.c
+@@ -201,11 +201,13 @@ kvp_respond_to_host(char *key, char *value, int error)
+ 	 * The windows host expects the key/value pair to be encoded
+ 	 * in utf16.
+ 	 */
+-	keylen = utf8s_to_utf16s(key_name, strlen(key_name),
+-				(wchar_t *)kvp_data->data.key);
++	keylen = utf8s_to_utf16s(key_name, strlen(key_name), UTF16_HOST_ENDIAN,
++				(wchar_t *) kvp_data->data.key,
++				HV_KVP_EXCHANGE_MAX_KEY_SIZE / 2);
+ 	kvp_data->data.key_size = 2*(keylen + 1); /* utf16 encoding */
+-	valuelen = utf8s_to_utf16s(value, strlen(value),
+-				(wchar_t *)kvp_data->data.value);
++	valuelen = utf8s_to_utf16s(value, strlen(value), UTF16_HOST_ENDIAN,
++				(wchar_t *) kvp_data->data.value,
++				HV_KVP_EXCHANGE_MAX_VALUE_SIZE / 2);
+ 	kvp_data->data.value_size = 2*(valuelen + 1); /* utf16 encoding */
+ 
+ 	kvp_data->data.value_type = REG_SZ; /* all our values are strings */
+diff --git a/drivers/staging/vt6656/usbpipe.c b/drivers/staging/vt6656/usbpipe.c
+index c612ab5..f759352 100644
+--- a/drivers/staging/vt6656/usbpipe.c
++++ b/drivers/staging/vt6656/usbpipe.c
+@@ -168,6 +168,11 @@ int PIPEnsControlOut(
+     if (pDevice->Flags & fMP_CONTROL_WRITES)
+         return STATUS_FAILURE;
+ 
++	if (pDevice->Flags & fMP_CONTROL_READS)
++		return STATUS_FAILURE;
++
++	MP_SET_FLAG(pDevice, fMP_CONTROL_WRITES);
++
+ 	pDevice->sUsbCtlRequest.bRequestType = 0x40;
+ 	pDevice->sUsbCtlRequest.bRequest = byRequest;
+ 	pDevice->sUsbCtlRequest.wValue = cpu_to_le16p(&wValue);
+@@ -182,12 +187,13 @@ int PIPEnsControlOut(
+ 
+ 	ntStatus = usb_submit_urb(pDevice->pControlURB, GFP_ATOMIC);
+ 	if (ntStatus != 0) {
+-		DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"control send request submission failed: %d\n", ntStatus);
++		DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO
++			"control send request submission failed: %d\n",
++				ntStatus);
++		MP_CLEAR_FLAG(pDevice, fMP_CONTROL_WRITES);
+ 		return STATUS_FAILURE;
+ 	}
+-	else {
+-	    MP_SET_FLAG(pDevice, fMP_CONTROL_WRITES);
+-	}
++
+ 	spin_unlock_irq(&pDevice->lock);
+     for (ii = 0; ii <= USB_CTL_WAIT; ii ++) {
+ 
+@@ -227,6 +233,11 @@ int PIPEnsControlIn(
+     if (pDevice->Flags & fMP_CONTROL_READS)
+ 	return STATUS_FAILURE;
+ 
++	if (pDevice->Flags & fMP_CONTROL_WRITES)
++		return STATUS_FAILURE;
++
++	MP_SET_FLAG(pDevice, fMP_CONTROL_READS);
++
+ 	pDevice->sUsbCtlRequest.bRequestType = 0xC0;
+ 	pDevice->sUsbCtlRequest.bRequest = byRequest;
+ 	pDevice->sUsbCtlRequest.wValue = cpu_to_le16p(&wValue);
+@@ -240,10 +251,11 @@ int PIPEnsControlIn(
+ 
+ 	ntStatus = usb_submit_urb(pDevice->pControlURB, GFP_ATOMIC);
+ 	if (ntStatus != 0) {
+-		DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"control request submission failed: %d\n", ntStatus);
+-	}else {
+-		MP_SET_FLAG(pDevice, fMP_CONTROL_READS);
+-    }
++		DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO
++			"control request submission failed: %d\n", ntStatus);
++		MP_CLEAR_FLAG(pDevice, fMP_CONTROL_READS);
++		return STATUS_FAILURE;
++	}
+ 
+ 	spin_unlock_irq(&pDevice->lock);
+     for (ii = 0; ii <= USB_CTL_WAIT; ii ++) {
+diff --git a/drivers/tty/tty_ioctl.c b/drivers/tty/tty_ioctl.c
+index 53f2442..3047873 100644
+--- a/drivers/tty/tty_ioctl.c
++++ b/drivers/tty/tty_ioctl.c
+@@ -617,7 +617,7 @@ static int set_termios(struct tty_struct *tty, void __user *arg, int opt)
+ 	if (opt & TERMIOS_WAIT) {
+ 		tty_wait_until_sent(tty, 0);
+ 		if (signal_pending(current))
+-			return -EINTR;
++			return -ERESTARTSYS;
+ 	}
+ 
+ 	tty_set_termios(tty, &tmp_termios);
+@@ -684,7 +684,7 @@ static int set_termiox(struct tty_struct *tty, void __user *arg, int opt)
+ 	if (opt & TERMIOS_WAIT) {
+ 		tty_wait_until_sent(tty, 0);
+ 		if (signal_pending(current))
+-			return -EINTR;
++			return -ERESTARTSYS;
+ 	}
+ 
+ 	mutex_lock(&tty->termios_mutex);
+diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c
+index b3915b7..e41288a 100644
+--- a/drivers/tty/vt/vt.c
++++ b/drivers/tty/vt/vt.c
+@@ -3016,7 +3016,7 @@ int __init vty_init(const struct file_operations *console_fops)
+ 
+ static struct class *vtconsole_class;
+ 
+-static int bind_con_driver(const struct consw *csw, int first, int last,
++static int do_bind_con_driver(const struct consw *csw, int first, int last,
+ 			   int deflt)
+ {
+ 	struct module *owner = csw->owner;
+@@ -3027,7 +3027,7 @@ static int bind_con_driver(const struct consw *csw, int first, int last,
+ 	if (!try_module_get(owner))
+ 		return -ENODEV;
+ 
+-	console_lock();
++	WARN_CONSOLE_UNLOCKED();
+ 
+ 	/* check if driver is registered */
+ 	for (i = 0; i < MAX_NR_CON_DRIVER; i++) {
+@@ -3112,11 +3112,22 @@ static int bind_con_driver(const struct consw *csw, int first, int last,
+ 
+ 	retval = 0;
+ err:
+-	console_unlock();
+ 	module_put(owner);
+ 	return retval;
+ };
+ 
++
++static int bind_con_driver(const struct consw *csw, int first, int last,
++			   int deflt)
++{
++	int ret;
++
++	console_lock();
++	ret = do_bind_con_driver(csw, first, last, deflt);
++	console_unlock();
++	return ret;
++}
++
+ #ifdef CONFIG_VT_HW_CONSOLE_BINDING
+ static int con_is_graphics(const struct consw *csw, int first, int last)
+ {
+@@ -3153,6 +3164,18 @@ static int con_is_graphics(const struct consw *csw, int first, int last)
+  */
+ int unbind_con_driver(const struct consw *csw, int first, int last, int deflt)
+ {
++	int retval;
++
++	console_lock();
++	retval = do_unbind_con_driver(csw, first, last, deflt);
++	console_unlock();
++	return retval;
++}
++EXPORT_SYMBOL(unbind_con_driver);
++
++/* unlocked version of unbind_con_driver() */
++int do_unbind_con_driver(const struct consw *csw, int first, int last, int deflt)
++{
+ 	struct module *owner = csw->owner;
+ 	const struct consw *defcsw = NULL;
+ 	struct con_driver *con_driver = NULL, *con_back = NULL;
+@@ -3161,7 +3184,7 @@ int unbind_con_driver(const struct consw *csw, int first, int last, int deflt)
+ 	if (!try_module_get(owner))
+ 		return -ENODEV;
+ 
+-	console_lock();
++	WARN_CONSOLE_UNLOCKED();
+ 
+ 	/* check if driver is registered and if it is unbindable */
+ 	for (i = 0; i < MAX_NR_CON_DRIVER; i++) {
+@@ -3174,10 +3197,8 @@ int unbind_con_driver(const struct consw *csw, int first, int last, int deflt)
+ 		}
+ 	}
+ 
+-	if (retval) {
+-		console_unlock();
++	if (retval)
+ 		goto err;
+-	}
+ 
+ 	retval = -ENODEV;
+ 
+@@ -3193,15 +3214,11 @@ int unbind_con_driver(const struct consw *csw, int first, int last, int deflt)
+ 		}
+ 	}
+ 
+-	if (retval) {
+-		console_unlock();
++	if (retval)
+ 		goto err;
+-	}
+ 
+-	if (!con_is_bound(csw)) {
+-		console_unlock();
++	if (!con_is_bound(csw))
+ 		goto err;
+-	}
+ 
+ 	first = max(first, con_driver->first);
+ 	last = min(last, con_driver->last);
+@@ -3228,15 +3245,14 @@ int unbind_con_driver(const struct consw *csw, int first, int last, int deflt)
+ 	if (!con_is_bound(csw))
+ 		con_driver->flag &= ~CON_DRIVER_FLAG_INIT;
+ 
+-	console_unlock();
+ 	/* ignore return value, binding should not fail */
+-	bind_con_driver(defcsw, first, last, deflt);
++	do_bind_con_driver(defcsw, first, last, deflt);
+ err:
+ 	module_put(owner);
+ 	return retval;
+ 
+ }
+-EXPORT_SYMBOL(unbind_con_driver);
++EXPORT_SYMBOL_GPL(do_unbind_con_driver);
+ 
+ static int vt_bind(struct con_driver *con)
+ {
+@@ -3508,28 +3524,18 @@ int con_debug_leave(void)
+ }
+ EXPORT_SYMBOL_GPL(con_debug_leave);
+ 
+-/**
+- * register_con_driver - register console driver to console layer
+- * @csw: console driver
+- * @first: the first console to take over, minimum value is 0
+- * @last: the last console to take over, maximum value is MAX_NR_CONSOLES -1
+- *
+- * DESCRIPTION: This function registers a console driver which can later
+- * bind to a range of consoles specified by @first and @last. It will
+- * also initialize the console driver by calling con_startup().
+- */
+-int register_con_driver(const struct consw *csw, int first, int last)
++static int do_register_con_driver(const struct consw *csw, int first, int last)
+ {
+ 	struct module *owner = csw->owner;
+ 	struct con_driver *con_driver;
+ 	const char *desc;
+ 	int i, retval = 0;
+ 
++	WARN_CONSOLE_UNLOCKED();
++
+ 	if (!try_module_get(owner))
+ 		return -ENODEV;
+ 
+-	console_lock();
+-
+ 	for (i = 0; i < MAX_NR_CON_DRIVER; i++) {
+ 		con_driver = &registered_con_driver[i];
+ 
+@@ -3582,10 +3588,29 @@ int register_con_driver(const struct consw *csw, int first, int last)
+ 	}
+ 
+ err:
+-	console_unlock();
+ 	module_put(owner);
+ 	return retval;
+ }
++
++/**
++ * register_con_driver - register console driver to console layer
++ * @csw: console driver
++ * @first: the first console to take over, minimum value is 0
++ * @last: the last console to take over, maximum value is MAX_NR_CONSOLES -1
++ *
++ * DESCRIPTION: This function registers a console driver which can later
++ * bind to a range of consoles specified by @first and @last. It will
++ * also initialize the console driver by calling con_startup().
++ */
++int register_con_driver(const struct consw *csw, int first, int last)
++{
++	int retval;
++
++	console_lock();
++	retval = do_register_con_driver(csw, first, last);
++	console_unlock();
++	return retval;
++}
+ EXPORT_SYMBOL(register_con_driver);
+ 
+ /**
+@@ -3601,9 +3626,18 @@ EXPORT_SYMBOL(register_con_driver);
+  */
+ int unregister_con_driver(const struct consw *csw)
+ {
+-	int i, retval = -ENODEV;
++	int retval;
+ 
+ 	console_lock();
++	retval = do_unregister_con_driver(csw);
++	console_unlock();
++	return retval;
++}
++EXPORT_SYMBOL(unregister_con_driver);
++
++int do_unregister_con_driver(const struct consw *csw)
++{
++	int i, retval = -ENODEV;
+ 
+ 	/* cannot unregister a bound driver */
+ 	if (con_is_bound(csw))
+@@ -3629,27 +3663,53 @@ int unregister_con_driver(const struct consw *csw)
+ 		}
+ 	}
+ err:
+-	console_unlock();
+ 	return retval;
+ }
+-EXPORT_SYMBOL(unregister_con_driver);
++EXPORT_SYMBOL_GPL(do_unregister_con_driver);
+ 
+ /*
+  *	If we support more console drivers, this function is used
+  *	when a driver wants to take over some existing consoles
+  *	and become default driver for newly opened ones.
+  *
+- *      take_over_console is basically a register followed by unbind
++ *	take_over_console is basically a register followed by unbind
++ */
++int do_take_over_console(const struct consw *csw, int first, int last, int deflt)
++{
++	int err;
++
++	err = do_register_con_driver(csw, first, last);
++	/*
++	 * If we get an busy error we still want to bind the console driver
++	 * and return success, as we may have unbound the console driver
++	 * but not unregistered it.
++	 */
++	if (err == -EBUSY)
++		err = 0;
++	if (!err)
++		do_bind_con_driver(csw, first, last, deflt);
++
++	return err;
++}
++EXPORT_SYMBOL_GPL(do_take_over_console);
++
++/*
++ *	If we support more console drivers, this function is used
++ *	when a driver wants to take over some existing consoles
++ *	and become default driver for newly opened ones.
++ *
++ *	take_over_console is basically a register followed by unbind
+  */
+ int take_over_console(const struct consw *csw, int first, int last, int deflt)
+ {
+ 	int err;
+ 
+ 	err = register_con_driver(csw, first, last);
+-	/* if we get an busy error we still want to bind the console driver
++	/*
++	 * If we get an busy error we still want to bind the console driver
+ 	 * and return success, as we may have unbound the console driver
+-	 * but not unregistered it.
+-	*/
++	 * but not unregistered it.
++	 */
+ 	if (err == -EBUSY)
+ 		err = 0;
+ 	if (!err)
+diff --git a/drivers/usb/host/ehci-omap.c b/drivers/usb/host/ehci-omap.c
+index 55a57c2..028c572 100644
+--- a/drivers/usb/host/ehci-omap.c
++++ b/drivers/usb/host/ehci-omap.c
+@@ -321,7 +321,7 @@ static const struct hc_driver ehci_omap_hc_driver = {
+ 	.clear_tt_buffer_complete = ehci_clear_tt_buffer_complete,
+ };
+ 
+-MODULE_ALIAS("platform:omap-ehci");
++MODULE_ALIAS("platform:ehci-omap");
+ MODULE_AUTHOR("Texas Instruments, Inc.");
+ MODULE_AUTHOR("Felipe Balbi <felipe.balbi@nokia.com>");
+ 
+diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
+index 52cd814..24a3ea6 100644
+--- a/drivers/usb/serial/option.c
++++ b/drivers/usb/serial/option.c
+@@ -479,6 +479,7 @@ static const struct option_blacklist_info four_g_w14_blacklist = {
+ 
+ static const struct option_blacklist_info alcatel_x200_blacklist = {
+ 	.sendsetup = BIT(0) | BIT(1),
++	.reserved = BIT(4),
+ };
+ 
+ static const struct option_blacklist_info zte_0037_blacklist = {
+@@ -575,8 +576,14 @@ static const struct usb_device_id option_ids[] = {
+ 	{ USB_DEVICE(QUANTA_VENDOR_ID, QUANTA_PRODUCT_GLX) },
+ 	{ USB_DEVICE(QUANTA_VENDOR_ID, QUANTA_PRODUCT_GKE) },
+ 	{ USB_DEVICE(QUANTA_VENDOR_ID, QUANTA_PRODUCT_GLE) },
++	{ USB_DEVICE(QUANTA_VENDOR_ID, 0xea42),
++		.driver_info = (kernel_ulong_t)&net_intf4_blacklist },
++	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0x1c05, USB_CLASS_COMM, 0x02, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0x1c23, USB_CLASS_COMM, 0x02, 0xff) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E173, 0xff, 0xff, 0xff),
+ 		.driver_info = (kernel_ulong_t) &net_intf1_blacklist },
++	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0x1441, USB_CLASS_COMM, 0x02, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0x1442, USB_CLASS_COMM, 0x02, 0xff) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4505, 0xff, 0xff, 0xff),
+ 		.driver_info = (kernel_ulong_t) &huawei_cdc12_blacklist },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3765, 0xff, 0xff, 0xff),
+@@ -1215,7 +1222,14 @@ static const struct usb_device_id option_ids[] = {
+ 	{ USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_X060S_X200),
+ 	  .driver_info = (kernel_ulong_t)&alcatel_x200_blacklist
+ 	},
+-	{ USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_X220_X500D) },
++	{ USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_X220_X500D),
++	  .driver_info = (kernel_ulong_t)&net_intf6_blacklist },
++	{ USB_DEVICE(ALCATEL_VENDOR_ID, 0x0052),
++	  .driver_info = (kernel_ulong_t)&net_intf6_blacklist },
++	{ USB_DEVICE(ALCATEL_VENDOR_ID, 0x00b6),
++	  .driver_info = (kernel_ulong_t)&net_intf3_blacklist },
++	{ USB_DEVICE(ALCATEL_VENDOR_ID, 0x00b7),
++	  .driver_info = (kernel_ulong_t)&net_intf5_blacklist },
+ 	{ USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_L100V),
+ 	  .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ 	{ USB_DEVICE(AIRPLUS_VENDOR_ID, AIRPLUS_PRODUCT_MCD650) },
+diff --git a/drivers/usb/storage/initializers.c b/drivers/usb/storage/initializers.c
+index 16b0bf0..7ab9046 100644
+--- a/drivers/usb/storage/initializers.c
++++ b/drivers/usb/storage/initializers.c
+@@ -147,7 +147,7 @@ static int usb_stor_huawei_dongles_pid(struct us_data *us)
+ 	int idProduct;
+ 
+ 	idesc = &us->pusb_intf->cur_altsetting->desc;
+-	idProduct = us->pusb_dev->descriptor.idProduct;
++	idProduct = le16_to_cpu(us->pusb_dev->descriptor.idProduct);
+ 	/* The first port is CDROM,
+ 	 * means the dongle in the single port mode,
+ 	 * and a switch command is required to be sent. */
+@@ -169,7 +169,7 @@ int usb_stor_huawei_init(struct us_data *us)
+ 	int result = 0;
+ 
+ 	if (usb_stor_huawei_dongles_pid(us)) {
+-		if (us->pusb_dev->descriptor.idProduct >= 0x1446)
++		if (le16_to_cpu(us->pusb_dev->descriptor.idProduct) >= 0x1446)
+ 			result = usb_stor_huawei_scsi_init(us);
+ 		else
+ 			result = usb_stor_huawei_feature_init(us);
+diff --git a/drivers/usb/storage/unusual_cypress.h b/drivers/usb/storage/unusual_cypress.h
+index 2c85530..65a6a75 100644
+--- a/drivers/usb/storage/unusual_cypress.h
++++ b/drivers/usb/storage/unusual_cypress.h
+@@ -31,7 +31,7 @@ UNUSUAL_DEV(  0x04b4, 0x6831, 0x0000, 0x9999,
+ 		"Cypress ISD-300LP",
+ 		USB_SC_CYP_ATACB, USB_PR_DEVICE, NULL, 0),
+ 
+-UNUSUAL_DEV( 0x14cd, 0x6116, 0x0000, 0x9999,
++UNUSUAL_DEV( 0x14cd, 0x6116, 0x0000, 0x0219,
+ 		"Super Top",
+ 		"USB 2.0  SATA BRIDGE",
+ 		USB_SC_CYP_ATACB, USB_PR_DEVICE, NULL, 0),
+diff --git a/drivers/video/backlight/adp8860_bl.c b/drivers/video/backlight/adp8860_bl.c
+index d2a96a4..ee0f001 100644
+--- a/drivers/video/backlight/adp8860_bl.c
++++ b/drivers/video/backlight/adp8860_bl.c
+@@ -793,7 +793,7 @@ static int adp8860_i2c_suspend(struct i2c_client *client, pm_message_t message)
+ 
+ static int adp8860_i2c_resume(struct i2c_client *client)
+ {
+-	adp8860_set_bits(client, ADP8860_MDCR, NSTBY);
++	adp8860_set_bits(client, ADP8860_MDCR, NSTBY | BLEN);
+ 
+ 	return 0;
+ }
+diff --git a/drivers/video/backlight/adp8870_bl.c b/drivers/video/backlight/adp8870_bl.c
+index 05a8832..bc0503a 100644
+--- a/drivers/video/backlight/adp8870_bl.c
++++ b/drivers/video/backlight/adp8870_bl.c
+@@ -968,7 +968,7 @@ static int adp8870_i2c_suspend(struct i2c_client *client, pm_message_t message)
+ 
+ static int adp8870_i2c_resume(struct i2c_client *client)
+ {
+-	adp8870_set_bits(client, ADP8870_MDCR, NSTBY);
++	adp8870_set_bits(client, ADP8870_MDCR, NSTBY | BLEN);
+ 
+ 	return 0;
+ }
+diff --git a/drivers/video/console/fbcon.c b/drivers/video/console/fbcon.c
+index bf9a9b7..9b8bcab 100644
+--- a/drivers/video/console/fbcon.c
++++ b/drivers/video/console/fbcon.c
+@@ -530,6 +530,33 @@ static int search_for_mapped_con(void)
+ 	return retval;
+ }
+ 
++static int do_fbcon_takeover(int show_logo)
++{
++	int err, i;
++
++	if (!num_registered_fb)
++		return -ENODEV;
++
++	if (!show_logo)
++		logo_shown = FBCON_LOGO_DONTSHOW;
++
++	for (i = first_fb_vc; i <= last_fb_vc; i++)
++		con2fb_map[i] = info_idx;
++
++	err = do_take_over_console(&fb_con, first_fb_vc, last_fb_vc,
++				fbcon_is_default);
++
++	if (err) {
++		for (i = first_fb_vc; i <= last_fb_vc; i++)
++			con2fb_map[i] = -1;
++		info_idx = -1;
++	} else {
++		fbcon_has_console_bind = 1;
++	}
++
++	return err;
++}
++
+ static int fbcon_takeover(int show_logo)
+ {
+ 	int err, i;
+@@ -991,7 +1018,7 @@ static const char *fbcon_startup(void)
+ 	}
+ 
+ 	/* Setup default font */
+-	if (!p->fontdata) {
++	if (!p->fontdata && !vc->vc_font.data) {
+ 		if (!fontname[0] || !(font = find_font(fontname)))
+ 			font = get_default_font(info->var.xres,
+ 						info->var.yres,
+@@ -1001,6 +1028,8 @@ static const char *fbcon_startup(void)
+ 		vc->vc_font.height = font->height;
+ 		vc->vc_font.data = (void *)(p->fontdata = font->data);
+ 		vc->vc_font.charcount = 256; /* FIXME  Need to support more fonts */
++	} else {
++		p->fontdata = vc->vc_font.data;
+ 	}
+ 
+ 	cols = FBCON_SWAP(ops->rotate, info->var.xres, info->var.yres);
+@@ -1160,9 +1189,9 @@ static void fbcon_init(struct vc_data *vc, int init)
+ 	ops->p = &fb_display[fg_console];
+ }
+ 
+-static void fbcon_free_font(struct display *p)
++static void fbcon_free_font(struct display *p, bool freefont)
+ {
+-	if (p->userfont && p->fontdata && (--REFCOUNT(p->fontdata) == 0))
++	if (freefont && p->userfont && p->fontdata && (--REFCOUNT(p->fontdata) == 0))
+ 		kfree(p->fontdata - FONT_EXTRA_WORDS * sizeof(int));
+ 	p->fontdata = NULL;
+ 	p->userfont = 0;
+@@ -1174,8 +1203,8 @@ static void fbcon_deinit(struct vc_data *vc)
+ 	struct fb_info *info;
+ 	struct fbcon_ops *ops;
+ 	int idx;
++	bool free_font = true;
+ 
+-	fbcon_free_font(p);
+ 	idx = con2fb_map[vc->vc_num];
+ 
+ 	if (idx == -1)
+@@ -1186,6 +1215,8 @@ static void fbcon_deinit(struct vc_data *vc)
+ 	if (!info)
+ 		goto finished;
+ 
++	if (info->flags & FBINFO_MISC_FIRMWARE)
++		free_font = false;
+ 	ops = info->fbcon_par;
+ 
+ 	if (!ops)
+@@ -1197,6 +1228,8 @@ static void fbcon_deinit(struct vc_data *vc)
+ 	ops->flags &= ~FBCON_FLAGS_INIT;
+ finished:
+ 
++	fbcon_free_font(p, free_font);
++
+ 	if (!con_is_bound(&fb_con))
+ 		fbcon_exit();
+ 
+@@ -2978,7 +3011,7 @@ static int fbcon_unbind(void)
+ {
+ 	int ret;
+ 
+-	ret = unbind_con_driver(&fb_con, first_fb_vc, last_fb_vc,
++	ret = do_unbind_con_driver(&fb_con, first_fb_vc, last_fb_vc,
+ 				fbcon_is_default);
+ 
+ 	if (!ret)
+@@ -3051,7 +3084,7 @@ static int fbcon_fb_unregistered(struct fb_info *info)
+ 		primary_device = -1;
+ 
+ 	if (!num_registered_fb)
+-		unregister_con_driver(&fb_con);
++		do_unregister_con_driver(&fb_con);
+ 
+ 	return 0;
+ }
+@@ -3116,7 +3149,7 @@ static int fbcon_fb_registered(struct fb_info *info)
+ 		}
+ 
+ 		if (info_idx != -1)
+-			ret = fbcon_takeover(1);
++			ret = do_fbcon_takeover(1);
+ 	} else {
+ 		for (i = first_fb_vc; i <= last_fb_vc; i++) {
+ 			if (con2fb_map_boot[i] == idx)
+diff --git a/drivers/video/fbmem.c b/drivers/video/fbmem.c
+index 7a41220..c133dde 100644
+--- a/drivers/video/fbmem.c
++++ b/drivers/video/fbmem.c
+@@ -1628,7 +1628,9 @@ static int do_register_framebuffer(struct fb_info *fb_info)
+ 	event.info = fb_info;
+ 	if (!lock_fb_info(fb_info))
+ 		return -ENODEV;
++	console_lock();
+ 	fb_notifier_call_chain(FB_EVENT_FB_REGISTERED, &event);
++	console_unlock();
+ 	unlock_fb_info(fb_info);
+ 	return 0;
+ }
+@@ -1644,8 +1646,10 @@ static int do_unregister_framebuffer(struct fb_info *fb_info)
+ 
+ 	if (!lock_fb_info(fb_info))
+ 		return -ENODEV;
++	console_lock();
+ 	event.info = fb_info;
+ 	ret = fb_notifier_call_chain(FB_EVENT_FB_UNBIND, &event);
++	console_unlock();
+ 	unlock_fb_info(fb_info);
+ 
+ 	if (ret)
+@@ -1660,7 +1664,9 @@ static int do_unregister_framebuffer(struct fb_info *fb_info)
+ 	num_registered_fb--;
+ 	fb_cleanup_device(fb_info);
+ 	event.info = fb_info;
++	console_lock();
+ 	fb_notifier_call_chain(FB_EVENT_FB_UNREGISTERED, &event);
++	console_unlock();
+ 
+ 	/* this may free fb info */
+ 	put_fb_info(fb_info);
+@@ -1831,11 +1837,8 @@ int fb_new_modelist(struct fb_info *info)
+ 	err = 1;
+ 
+ 	if (!list_empty(&info->modelist)) {
+-		if (!lock_fb_info(info))
+-			return -ENODEV;
+ 		event.info = info;
+ 		err = fb_notifier_call_chain(FB_EVENT_NEW_MODELIST, &event);
+-		unlock_fb_info(info);
+ 	}
+ 
+ 	return err;
+diff --git a/drivers/video/fbsysfs.c b/drivers/video/fbsysfs.c
+index 67afa9c..303fb9f 100644
+--- a/drivers/video/fbsysfs.c
++++ b/drivers/video/fbsysfs.c
+@@ -175,6 +175,8 @@ static ssize_t store_modes(struct device *device,
+ 	if (i * sizeof(struct fb_videomode) != count)
+ 		return -EINVAL;
+ 
++	if (!lock_fb_info(fb_info))
++		return -ENODEV;
+ 	console_lock();
+ 	list_splice(&fb_info->modelist, &old_list);
+ 	fb_videomode_to_modelist((const struct fb_videomode *)buf, i,
+@@ -186,6 +188,7 @@ static ssize_t store_modes(struct device *device,
+ 		fb_destroy_modelist(&old_list);
+ 
+ 	console_unlock();
++	unlock_fb_info(fb_info);
+ 
+ 	return 0;
+ }
+diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c
+index 80bbc9c..244100f 100644
+--- a/fs/ext4/resize.c
++++ b/fs/ext4/resize.c
+@@ -499,6 +499,7 @@ static int add_new_gdb(handle_t *handle, struct inode *inode,
+ 	return err;
+ 
+ exit_inode:
++	kfree(n_group_desc);
+ 	/* ext4_handle_release_buffer(handle, iloc.bh); */
+ 	brelse(iloc.bh);
+ exit_dindj:
+diff --git a/fs/ext4/super.c b/fs/ext4/super.c
+index f1aa1a2..c6a3363 100644
+--- a/fs/ext4/super.c
++++ b/fs/ext4/super.c
+@@ -3681,22 +3681,19 @@ no_journal:
+ 	if (err) {
+ 		ext4_msg(sb, KERN_ERR, "failed to initialize mballoc (%d)",
+ 			 err);
+-		goto failed_mount4;
++		goto failed_mount5;
+ 	}
+ 
+ 	err = ext4_register_li_request(sb, first_not_zeroed);
+ 	if (err)
+-		goto failed_mount4;
++		goto failed_mount6;
+ 
+ 	sbi->s_kobj.kset = ext4_kset;
+ 	init_completion(&sbi->s_kobj_unregister);
+ 	err = kobject_init_and_add(&sbi->s_kobj, &ext4_ktype, NULL,
+ 				   "%s", sb->s_id);
+-	if (err) {
+-		ext4_mb_release(sb);
+-		ext4_ext_release(sb);
+-		goto failed_mount4;
+-	};
++	if (err)
++		goto failed_mount7;
+ 
+ 	EXT4_SB(sb)->s_mount_state |= EXT4_ORPHAN_FS;
+ 	ext4_orphan_cleanup(sb, es);
+@@ -3730,13 +3727,19 @@ cantfind_ext4:
+ 		ext4_msg(sb, KERN_ERR, "VFS: Can't find ext4 filesystem");
+ 	goto failed_mount;
+ 
++failed_mount7:
++	ext4_unregister_li_request(sb);
++failed_mount6:
++	ext4_ext_release(sb);
++failed_mount5:
++	ext4_mb_release(sb);
++	ext4_release_system_zone(sb);
+ failed_mount4:
+ 	iput(root);
+ 	sb->s_root = NULL;
+ 	ext4_msg(sb, KERN_ERR, "mount failed");
+ 	destroy_workqueue(EXT4_SB(sb)->dio_unwritten_wq);
+ failed_mount_wq:
+-	ext4_release_system_zone(sb);
+ 	if (sbi->s_journal) {
+ 		jbd2_journal_destroy(sbi->s_journal);
+ 		sbi->s_journal = NULL;
+diff --git a/fs/fat/namei_vfat.c b/fs/fat/namei_vfat.c
+index 20b4ea5..6ee3c36 100644
+--- a/fs/fat/namei_vfat.c
++++ b/fs/fat/namei_vfat.c
+@@ -514,7 +514,8 @@ xlate_to_uni(const unsigned char *name, int len, unsigned char *outname,
+ 	int charlen;
+ 
+ 	if (utf8) {
+-		*outlen = utf8s_to_utf16s(name, len, (wchar_t *)outname);
++		*outlen = utf8s_to_utf16s(name, len, UTF16_HOST_ENDIAN,
++				(wchar_t *) outname, FAT_LFN_LEN + 2);
+ 		if (*outlen < 0)
+ 			return *outlen;
+ 		else if (*outlen > FAT_LFN_LEN)
+diff --git a/fs/lockd/clntproc.c b/fs/lockd/clntproc.c
+index e374050..5ee055e 100644
+--- a/fs/lockd/clntproc.c
++++ b/fs/lockd/clntproc.c
+@@ -550,6 +550,9 @@ again:
+ 		status = nlmclnt_block(block, req, NLMCLNT_POLL_TIMEOUT);
+ 		if (status < 0)
+ 			break;
++		/* Resend the blocking lock request after a server reboot */
++		if (resp->status ==  nlm_lck_denied_grace_period)
++			continue;
+ 		if (resp->status != nlm_lck_blocked)
+ 			break;
+ 	}
+diff --git a/fs/nls/nls_base.c b/fs/nls/nls_base.c
+index 44a88a9..0eb059e 100644
+--- a/fs/nls/nls_base.c
++++ b/fs/nls/nls_base.c
+@@ -114,34 +114,57 @@ int utf32_to_utf8(unicode_t u, u8 *s, int maxlen)
+ }
+ EXPORT_SYMBOL(utf32_to_utf8);
+ 
+-int utf8s_to_utf16s(const u8 *s, int len, wchar_t *pwcs)
++static inline void put_utf16(wchar_t *s, unsigned c, enum utf16_endian endian)
++{
++	switch (endian) {
++	default:
++		*s = (wchar_t) c;
++		break;
++	case UTF16_LITTLE_ENDIAN:
++		*s = __cpu_to_le16(c);
++		break;
++	case UTF16_BIG_ENDIAN:
++		*s = __cpu_to_be16(c);
++		break;
++	}
++}
++
++int utf8s_to_utf16s(const u8 *s, int len, enum utf16_endian endian,
++		wchar_t *pwcs, int maxlen)
+ {
+ 	u16 *op;
+ 	int size;
+ 	unicode_t u;
+ 
+ 	op = pwcs;
+-	while (*s && len > 0) {
++	while (len > 0 && maxlen > 0 && *s) {
+ 		if (*s & 0x80) {
+ 			size = utf8_to_utf32(s, len, &u);
+ 			if (size < 0)
+ 				return -EINVAL;
++			s += size;
++			len -= size;
+ 
+ 			if (u >= PLANE_SIZE) {
++				if (maxlen < 2)
++					break;
+ 				u -= PLANE_SIZE;
+-				*op++ = (wchar_t) (SURROGATE_PAIR |
+-						((u >> 10) & SURROGATE_BITS));
+-				*op++ = (wchar_t) (SURROGATE_PAIR |
++				put_utf16(op++, SURROGATE_PAIR |
++						((u >> 10) & SURROGATE_BITS),
++						endian);
++				put_utf16(op++, SURROGATE_PAIR |
+ 						SURROGATE_LOW |
+-						(u & SURROGATE_BITS));
++						(u & SURROGATE_BITS),
++						endian);
++				maxlen -= 2;
+ 			} else {
+-				*op++ = (wchar_t) u;
++				put_utf16(op++, u, endian);
++				maxlen--;
+ 			}
+-			s += size;
+-			len -= size;
+ 		} else {
+-			*op++ = *s++;
++			put_utf16(op++, *s++, endian);
+ 			len--;
++			maxlen--;
+ 		}
+ 	}
+ 	return op - pwcs;
+diff --git a/fs/notify/inotify/inotify_user.c b/fs/notify/inotify/inotify_user.c
+index 8445fbc..6f292dd 100644
+--- a/fs/notify/inotify/inotify_user.c
++++ b/fs/notify/inotify/inotify_user.c
+@@ -579,8 +579,6 @@ static int inotify_update_existing_watch(struct fsnotify_group *group,
+ 
+ 	/* don't allow invalid bits: we don't want flags set */
+ 	mask = inotify_arg_to_mask(arg);
+-	if (unlikely(!(mask & IN_ALL_EVENTS)))
+-		return -EINVAL;
+ 
+ 	fsn_mark = fsnotify_find_inode_mark(group, inode);
+ 	if (!fsn_mark)
+@@ -632,8 +630,6 @@ static int inotify_new_watch(struct fsnotify_group *group,
+ 
+ 	/* don't allow invalid bits: we don't want flags set */
+ 	mask = inotify_arg_to_mask(arg);
+-	if (unlikely(!(mask & IN_ALL_EVENTS)))
+-		return -EINVAL;
+ 
+ 	tmp_i_mark = kmem_cache_alloc(inotify_inode_mark_cachep, GFP_KERNEL);
+ 	if (unlikely(!tmp_i_mark))
+diff --git a/fs/ocfs2/dlmglue.c b/fs/ocfs2/dlmglue.c
+index 7642d7c..ab4046f 100644
+--- a/fs/ocfs2/dlmglue.c
++++ b/fs/ocfs2/dlmglue.c
+@@ -2539,6 +2539,7 @@ int ocfs2_super_lock(struct ocfs2_super *osb,
+ 	 * everything is up to the caller :) */
+ 	status = ocfs2_should_refresh_lock_res(lockres);
+ 	if (status < 0) {
++		ocfs2_cluster_unlock(osb, lockres, level);
+ 		mlog_errno(status);
+ 		goto bail;
+ 	}
+@@ -2547,8 +2548,10 @@ int ocfs2_super_lock(struct ocfs2_super *osb,
+ 
+ 		ocfs2_complete_lock_res_refresh(lockres, status);
+ 
+-		if (status < 0)
++		if (status < 0) {
++			ocfs2_cluster_unlock(osb, lockres, level);
+ 			mlog_errno(status);
++		}
+ 		ocfs2_track_lock_refresh(lockres);
+ 	}
+ bail:
+diff --git a/include/linux/console.h b/include/linux/console.h
+index 7453cfd..6ae6a15 100644
+--- a/include/linux/console.h
++++ b/include/linux/console.h
+@@ -77,7 +77,9 @@ extern const struct consw prom_con;	/* SPARC PROM console */
+ int con_is_bound(const struct consw *csw);
+ int register_con_driver(const struct consw *csw, int first, int last);
+ int unregister_con_driver(const struct consw *csw);
++int do_unregister_con_driver(const struct consw *csw);
+ int take_over_console(const struct consw *sw, int first, int last, int deflt);
++int do_take_over_console(const struct consw *sw, int first, int last, int deflt);
+ void give_up_console(const struct consw *sw);
+ #ifdef CONFIG_HW_CONSOLE
+ int con_debug_enter(struct vc_data *vc);
+diff --git a/include/linux/mmu_notifier.h b/include/linux/mmu_notifier.h
+index 1d1b1e1..ee2baf0 100644
+--- a/include/linux/mmu_notifier.h
++++ b/include/linux/mmu_notifier.h
+@@ -4,6 +4,7 @@
+ #include <linux/list.h>
+ #include <linux/spinlock.h>
+ #include <linux/mm_types.h>
++#include <linux/srcu.h>
+ 
+ struct mmu_notifier;
+ struct mmu_notifier_ops;
+diff --git a/include/linux/nls.h b/include/linux/nls.h
+index d47beef..5dc635f 100644
+--- a/include/linux/nls.h
++++ b/include/linux/nls.h
+@@ -43,7 +43,7 @@ enum utf16_endian {
+ 	UTF16_BIG_ENDIAN
+ };
+ 
+-/* nls.c */
++/* nls_base.c */
+ extern int register_nls(struct nls_table *);
+ extern int unregister_nls(struct nls_table *);
+ extern struct nls_table *load_nls(char *);
+@@ -52,7 +52,8 @@ extern struct nls_table *load_nls_default(void);
+ 
+ extern int utf8_to_utf32(const u8 *s, int len, unicode_t *pu);
+ extern int utf32_to_utf8(unicode_t u, u8 *s, int maxlen);
+-extern int utf8s_to_utf16s(const u8 *s, int len, wchar_t *pwcs);
++extern int utf8s_to_utf16s(const u8 *s, int len,
++		enum utf16_endian endian, wchar_t *pwcs, int maxlen);
+ extern int utf16s_to_utf8s(const wchar_t *pwcs, int len,
+ 		enum utf16_endian endian, u8 *s, int maxlen);
+ 
+diff --git a/include/linux/usb/audio.h b/include/linux/usb/audio.h
+index a54b825..6f8b026 100644
+--- a/include/linux/usb/audio.h
++++ b/include/linux/usb/audio.h
+@@ -384,14 +384,16 @@ static inline __u8 uac_processing_unit_iProcessing(struct uac_processing_unit_de
+ 						   int protocol)
+ {
+ 	__u8 control_size = uac_processing_unit_bControlSize(desc, protocol);
+-	return desc->baSourceID[desc->bNrInPins + control_size];
++	return *(uac_processing_unit_bmControls(desc, protocol)
++			+ control_size);
+ }
+ 
+ static inline __u8 *uac_processing_unit_specific(struct uac_processing_unit_descriptor *desc,
+ 						 int protocol)
+ {
+ 	__u8 control_size = uac_processing_unit_bControlSize(desc, protocol);
+-	return &desc->baSourceID[desc->bNrInPins + control_size + 1];
++	return uac_processing_unit_bmControls(desc, protocol)
++			+ control_size + 1;
+ }
+ 
+ /* 4.5.2 Class-Specific AS Interface Descriptor */
+diff --git a/include/linux/vt_kern.h b/include/linux/vt_kern.h
+index 4d05e14..90538b4 100644
+--- a/include/linux/vt_kern.h
++++ b/include/linux/vt_kern.h
+@@ -131,6 +131,8 @@ void vt_event_post(unsigned int event, unsigned int old, unsigned int new);
+ int vt_waitactive(int n);
+ void change_console(struct vc_data *new_vc);
+ void reset_vc(struct vc_data *vc);
++extern int do_unbind_con_driver(const struct consw *csw, int first, int last,
++			     int deflt);
+ extern int unbind_con_driver(const struct consw *csw, int first, int last,
+ 			     int deflt);
+ int vty_init(const struct file_operations *console_fops);
+diff --git a/include/net/inet6_hashtables.h b/include/net/inet6_hashtables.h
+index e46674d..f9ce2fa 100644
+--- a/include/net/inet6_hashtables.h
++++ b/include/net/inet6_hashtables.h
+@@ -28,16 +28,16 @@
+ 
+ struct inet_hashinfo;
+ 
+-/* I have no idea if this is a good hash for v6 or not. -DaveM */
+ static inline unsigned int inet6_ehashfn(struct net *net,
+ 				const struct in6_addr *laddr, const u16 lport,
+ 				const struct in6_addr *faddr, const __be16 fport)
+ {
+-	u32 ports = (lport ^ (__force u16)fport);
++	u32 ports = (((u32)lport) << 16) | (__force u32)fport;
+ 
+ 	return jhash_3words((__force u32)laddr->s6_addr32[3],
+-			    (__force u32)faddr->s6_addr32[3],
+-			    ports, inet_ehash_secret + net_hash_mix(net));
++			    ipv6_addr_jhash(faddr),
++			    ports,
++			    inet_ehash_secret + net_hash_mix(net));
+ }
+ 
+ static inline int inet6_sk_ehashfn(const struct sock *sk)
+diff --git a/include/net/inet_sock.h b/include/net/inet_sock.h
+index 14dd9c7..26490b3 100644
+--- a/include/net/inet_sock.h
++++ b/include/net/inet_sock.h
+@@ -199,6 +199,7 @@ static inline void inet_sk_copy_descendant(struct sock *sk_to,
+ extern int inet_sk_rebuild_header(struct sock *sk);
+ 
+ extern u32 inet_ehash_secret;
++extern u32 ipv6_hash_secret;
+ extern void build_ehash_secret(void);
+ 
+ static inline unsigned int inet_ehashfn(struct net *net,
+diff --git a/include/net/ipv6.h b/include/net/ipv6.h
+index c39121f..879aadf 100644
+--- a/include/net/ipv6.h
++++ b/include/net/ipv6.h
+@@ -15,6 +15,7 @@
+ 
+ #include <linux/ipv6.h>
+ #include <linux/hardirq.h>
++#include <linux/jhash.h>
+ #include <net/if_inet6.h>
+ #include <net/ndisc.h>
+ #include <net/flow.h>
+@@ -386,6 +387,17 @@ struct ip6_create_arg {
+ void ip6_frag_init(struct inet_frag_queue *q, void *a);
+ int ip6_frag_match(struct inet_frag_queue *q, void *a);
+ 
++/* more secured version of ipv6_addr_hash() */
++static inline u32 ipv6_addr_jhash(const struct in6_addr *a)
++{
++	u32 v = (__force u32)a->s6_addr32[0] ^ (__force u32)a->s6_addr32[1];
++
++	return jhash_3words(v,
++			    (__force u32)a->s6_addr32[2],
++			    (__force u32)a->s6_addr32[3],
++			    ipv6_hash_secret);
++}
++
+ static inline int ipv6_addr_any(const struct in6_addr *a)
+ {
+ 	return (a->s6_addr32[0] | a->s6_addr32[1] |
+diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
+index 957869f..e079c3e 100644
+--- a/kernel/hrtimer.c
++++ b/kernel/hrtimer.c
+@@ -640,21 +640,9 @@ static inline void hrtimer_init_hres(struct hrtimer_cpu_base *base)
+  * and expiry check is done in the hrtimer_interrupt or in the softirq.
+  */
+ static inline int hrtimer_enqueue_reprogram(struct hrtimer *timer,
+-					    struct hrtimer_clock_base *base,
+-					    int wakeup)
++					    struct hrtimer_clock_base *base)
+ {
+-	if (base->cpu_base->hres_active && hrtimer_reprogram(timer, base)) {
+-		if (wakeup) {
+-			raw_spin_unlock(&base->cpu_base->lock);
+-			raise_softirq_irqoff(HRTIMER_SOFTIRQ);
+-			raw_spin_lock(&base->cpu_base->lock);
+-		} else
+-			__raise_softirq_irqoff(HRTIMER_SOFTIRQ);
+-
+-		return 1;
+-	}
+-
+-	return 0;
++	return base->cpu_base->hres_active && hrtimer_reprogram(timer, base);
+ }
+ 
+ static inline ktime_t hrtimer_update_base(struct hrtimer_cpu_base *base)
+@@ -735,8 +723,7 @@ static inline int hrtimer_switch_to_hres(void) { return 0; }
+ static inline void
+ hrtimer_force_reprogram(struct hrtimer_cpu_base *base, int skip_equal) { }
+ static inline int hrtimer_enqueue_reprogram(struct hrtimer *timer,
+-					    struct hrtimer_clock_base *base,
+-					    int wakeup)
++					    struct hrtimer_clock_base *base)
+ {
+ 	return 0;
+ }
+@@ -995,8 +982,21 @@ int __hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,
+ 	 *
+ 	 * XXX send_remote_softirq() ?
+ 	 */
+-	if (leftmost && new_base->cpu_base == &__get_cpu_var(hrtimer_bases))
+-		hrtimer_enqueue_reprogram(timer, new_base, wakeup);
++	if (leftmost && new_base->cpu_base == &__get_cpu_var(hrtimer_bases)
++		&& hrtimer_enqueue_reprogram(timer, new_base)) {
++		if (wakeup) {
++			/*
++			 * We need to drop cpu_base->lock to avoid a
++			 * lock ordering issue vs. rq->lock.
++			 */
++			raw_spin_unlock(&new_base->cpu_base->lock);
++			raise_softirq_irqoff(HRTIMER_SOFTIRQ);
++			local_irq_restore(flags);
++			return ret;
++		} else {
++			__raise_softirq_irqoff(HRTIMER_SOFTIRQ);
++		}
++	}
+ 
+ 	unlock_hrtimer_base(timer, &flags);
+ 
+diff --git a/kernel/irq/spurious.c b/kernel/irq/spurious.c
+index dc813a9..63633a3 100644
+--- a/kernel/irq/spurious.c
++++ b/kernel/irq/spurious.c
+@@ -80,13 +80,11 @@ static int try_one_irq(int irq, struct irq_desc *desc, bool force)
+ 
+ 	/*
+ 	 * All handlers must agree on IRQF_SHARED, so we test just the
+-	 * first. Check for action->next as well.
++	 * first.
+ 	 */
+ 	action = desc->action;
+ 	if (!action || !(action->flags & IRQF_SHARED) ||
+-	    (action->flags & __IRQF_TIMER) ||
+-	    (action->handler(irq, action->dev_id) == IRQ_HANDLED) ||
+-	    !action->next)
++	    (action->flags & __IRQF_TIMER))
+ 		goto out;
+ 
+ 	/* Already running on another processor */
+@@ -104,6 +102,7 @@ static int try_one_irq(int irq, struct irq_desc *desc, bool force)
+ 	do {
+ 		if (handle_irq_event(desc) == IRQ_HANDLED)
+ 			ret = IRQ_HANDLED;
++		/* Make sure that there is still a valid action */
+ 		action = desc->action;
+ 	} while ((desc->istate & IRQS_PENDING) && action);
+ 	desc->istate &= ~IRQS_POLL_INPROGRESS;
+diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c
+index 640ded8..93d5e4a 100644
+--- a/kernel/posix-cpu-timers.c
++++ b/kernel/posix-cpu-timers.c
+@@ -1450,8 +1450,10 @@ static int do_cpu_nanosleep(const clockid_t which_clock, int flags,
+ 		while (!signal_pending(current)) {
+ 			if (timer.it.cpu.expires.sched == 0) {
+ 				/*
+-				 * Our timer fired and was reset.
++				 * Our timer fired and was reset, below
++				 * deletion can not fail.
+ 				 */
++				posix_cpu_timer_del(&timer);
+ 				spin_unlock_irq(&timer.it_lock);
+ 				return 0;
+ 			}
+@@ -1469,9 +1471,26 @@ static int do_cpu_nanosleep(const clockid_t which_clock, int flags,
+ 		 * We were interrupted by a signal.
+ 		 */
+ 		sample_to_timespec(which_clock, timer.it.cpu.expires, rqtp);
+-		posix_cpu_timer_set(&timer, 0, &zero_it, it);
++		error = posix_cpu_timer_set(&timer, 0, &zero_it, it);
++		if (!error) {
++			/*
++			 * Timer is now unarmed, deletion can not fail.
++			 */
++			posix_cpu_timer_del(&timer);
++		}
+ 		spin_unlock_irq(&timer.it_lock);
+ 
++		while (error == TIMER_RETRY) {
++			/*
++			 * We need to handle case when timer was or is in the
++			 * middle of firing. In other cases we already freed
++			 * resources.
++			 */
++			spin_lock_irq(&timer.it_lock);
++			error = posix_cpu_timer_del(&timer);
++			spin_unlock_irq(&timer.it_lock);
++		}
++
+ 		if ((it->it_value.tv_sec | it->it_value.tv_nsec) == 0) {
+ 			/*
+ 			 * It actually did fire already.
+diff --git a/kernel/timeconst.pl b/kernel/timeconst.pl
+index eb51d76..3f42652 100644
+--- a/kernel/timeconst.pl
++++ b/kernel/timeconst.pl
+@@ -369,10 +369,8 @@ if ($hz eq '--can') {
+ 		die "Usage: $0 HZ\n";
+ 	}
+ 
+-	@val = @{$canned_values{$hz}};
+-	if (!defined(@val)) {
+-		@val = compute_values($hz);
+-	}
++	$cv = $canned_values{$hz};
++	@val = defined($cv) ? @$cv : compute_values($hz);
+ 	output($hz, @val);
+ }
+ exit 0;
+diff --git a/mm/fadvise.c b/mm/fadvise.c
+index 8d723c9..35b2bb0 100644
+--- a/mm/fadvise.c
++++ b/mm/fadvise.c
+@@ -17,6 +17,7 @@
+ #include <linux/fadvise.h>
+ #include <linux/writeback.h>
+ #include <linux/syscalls.h>
++#include <linux/swap.h>
+ 
+ #include <asm/unistd.h>
+ 
+@@ -123,9 +124,22 @@ SYSCALL_DEFINE(fadvise64_64)(int fd, loff_t offset, loff_t len, int advice)
+ 		start_index = (offset+(PAGE_CACHE_SIZE-1)) >> PAGE_CACHE_SHIFT;
+ 		end_index = (endbyte >> PAGE_CACHE_SHIFT);
+ 
+-		if (end_index >= start_index)
+-			invalidate_mapping_pages(mapping, start_index,
++		if (end_index >= start_index) {
++			unsigned long count = invalidate_mapping_pages(mapping,
++						start_index, end_index);
++
++			/*
++			 * If fewer pages were invalidated than expected then
++			 * it is possible that some of the pages were on
++			 * a per-cpu pagevec for a remote CPU. Drain all
++			 * pagevecs and try again.
++			 */
++			if (count < (end_index - start_index + 1)) {
++				lru_add_drain_all();
++				invalidate_mapping_pages(mapping, start_index,
+ 						end_index);
++			}
++		}
+ 		break;
+ 	default:
+ 		ret = -EINVAL;
+diff --git a/mm/mmu_notifier.c b/mm/mmu_notifier.c
+index 71c7811..88fa54d 100644
+--- a/mm/mmu_notifier.c
++++ b/mm/mmu_notifier.c
+@@ -14,10 +14,14 @@
+ #include <linux/module.h>
+ #include <linux/mm.h>
+ #include <linux/err.h>
++#include <linux/srcu.h>
+ #include <linux/rcupdate.h>
+ #include <linux/sched.h>
+ #include <linux/slab.h>
+ 
++/* global SRCU for all MMs */
++static struct srcu_struct srcu;
++
+ /*
+  * This function can't run concurrently against mmu_notifier_register
+  * because mm->mm_users > 0 during mmu_notifier_register and exit_mmap
+@@ -25,58 +29,61 @@
+  * in parallel despite there being no task using this mm any more,
+  * through the vmas outside of the exit_mmap context, such as with
+  * vmtruncate. This serializes against mmu_notifier_unregister with
+- * the mmu_notifier_mm->lock in addition to RCU and it serializes
+- * against the other mmu notifiers with RCU. struct mmu_notifier_mm
++ * the mmu_notifier_mm->lock in addition to SRCU and it serializes
++ * against the other mmu notifiers with SRCU. struct mmu_notifier_mm
+  * can't go away from under us as exit_mmap holds an mm_count pin
+  * itself.
+  */
+ void __mmu_notifier_release(struct mm_struct *mm)
+ {
+ 	struct mmu_notifier *mn;
+-	struct hlist_node *n;
++	int id;
+ 
+ 	/*
+-	 * RCU here will block mmu_notifier_unregister until
+-	 * ->release returns.
++	 * srcu_read_lock() here will block synchronize_srcu() in
++	 * mmu_notifier_unregister() until all registered
++	 * ->release() callouts this function makes have
++	 * returned.
+ 	 */
+-	rcu_read_lock();
+-	hlist_for_each_entry_rcu(mn, n, &mm->mmu_notifier_mm->list, hlist)
+-		/*
+-		 * if ->release runs before mmu_notifier_unregister it
+-		 * must be handled as it's the only way for the driver
+-		 * to flush all existing sptes and stop the driver
+-		 * from establishing any more sptes before all the
+-		 * pages in the mm are freed.
+-		 */
+-		if (mn->ops->release)
+-			mn->ops->release(mn, mm);
+-	rcu_read_unlock();
+-
++	id = srcu_read_lock(&srcu);
+ 	spin_lock(&mm->mmu_notifier_mm->lock);
+ 	while (unlikely(!hlist_empty(&mm->mmu_notifier_mm->list))) {
+ 		mn = hlist_entry(mm->mmu_notifier_mm->list.first,
+ 				 struct mmu_notifier,
+ 				 hlist);
++
+ 		/*
+-		 * We arrived before mmu_notifier_unregister so
+-		 * mmu_notifier_unregister will do nothing other than
+-		 * to wait ->release to finish and
+-		 * mmu_notifier_unregister to return.
++		 * Unlink.  This will prevent mmu_notifier_unregister()
++		 * from also making the ->release() callout.
+ 		 */
+ 		hlist_del_init_rcu(&mn->hlist);
++		spin_unlock(&mm->mmu_notifier_mm->lock);
++
++		/*
++		 * Clear sptes. (see 'release' description in mmu_notifier.h)
++		 */
++		if (mn->ops->release)
++			mn->ops->release(mn, mm);
++
++		spin_lock(&mm->mmu_notifier_mm->lock);
+ 	}
+ 	spin_unlock(&mm->mmu_notifier_mm->lock);
+ 
+ 	/*
+-	 * synchronize_rcu here prevents mmu_notifier_release to
+-	 * return to exit_mmap (which would proceed freeing all pages
+-	 * in the mm) until the ->release method returns, if it was
+-	 * invoked by mmu_notifier_unregister.
+-	 *
+-	 * The mmu_notifier_mm can't go away from under us because one
+-	 * mm_count is hold by exit_mmap.
++	 * All callouts to ->release() which we have done are complete.
++	 * Allow synchronize_srcu() in mmu_notifier_unregister() to complete
++	 */
++	srcu_read_unlock(&srcu, id);
++
++	/*
++	 * mmu_notifier_unregister() may have unlinked a notifier and may
++	 * still be calling out to it.	Additionally, other notifiers
++	 * may have been active via vmtruncate() et. al. Block here
++	 * to ensure that all notifier callouts for this mm have been
++	 * completed and the sptes are really cleaned up before returning
++	 * to exit_mmap().
+ 	 */
+-	synchronize_rcu();
++	synchronize_srcu(&srcu);
+ }
+ 
+ /*
+@@ -89,14 +96,14 @@ int __mmu_notifier_clear_flush_young(struct mm_struct *mm,
+ {
+ 	struct mmu_notifier *mn;
+ 	struct hlist_node *n;
+-	int young = 0;
++	int young = 0, id;
+ 
+-	rcu_read_lock();
++	id = srcu_read_lock(&srcu);
+ 	hlist_for_each_entry_rcu(mn, n, &mm->mmu_notifier_mm->list, hlist) {
+ 		if (mn->ops->clear_flush_young)
+ 			young |= mn->ops->clear_flush_young(mn, mm, address);
+ 	}
+-	rcu_read_unlock();
++	srcu_read_unlock(&srcu, id);
+ 
+ 	return young;
+ }
+@@ -106,9 +113,9 @@ int __mmu_notifier_test_young(struct mm_struct *mm,
+ {
+ 	struct mmu_notifier *mn;
+ 	struct hlist_node *n;
+-	int young = 0;
++	int young = 0, id;
+ 
+-	rcu_read_lock();
++	id = srcu_read_lock(&srcu);
+ 	hlist_for_each_entry_rcu(mn, n, &mm->mmu_notifier_mm->list, hlist) {
+ 		if (mn->ops->test_young) {
+ 			young = mn->ops->test_young(mn, mm, address);
+@@ -116,7 +123,7 @@ int __mmu_notifier_test_young(struct mm_struct *mm,
+ 				break;
+ 		}
+ 	}
+-	rcu_read_unlock();
++	srcu_read_unlock(&srcu, id);
+ 
+ 	return young;
+ }
+@@ -126,8 +133,9 @@ void __mmu_notifier_change_pte(struct mm_struct *mm, unsigned long address,
+ {
+ 	struct mmu_notifier *mn;
+ 	struct hlist_node *n;
++	int id;
+ 
+-	rcu_read_lock();
++	id = srcu_read_lock(&srcu);
+ 	hlist_for_each_entry_rcu(mn, n, &mm->mmu_notifier_mm->list, hlist) {
+ 		if (mn->ops->change_pte)
+ 			mn->ops->change_pte(mn, mm, address, pte);
+@@ -138,7 +146,7 @@ void __mmu_notifier_change_pte(struct mm_struct *mm, unsigned long address,
+ 		else if (mn->ops->invalidate_page)
+ 			mn->ops->invalidate_page(mn, mm, address);
+ 	}
+-	rcu_read_unlock();
++	srcu_read_unlock(&srcu, id);
+ }
+ 
+ void __mmu_notifier_invalidate_page(struct mm_struct *mm,
+@@ -146,13 +154,14 @@ void __mmu_notifier_invalidate_page(struct mm_struct *mm,
+ {
+ 	struct mmu_notifier *mn;
+ 	struct hlist_node *n;
++	int id;
+ 
+-	rcu_read_lock();
++	id = srcu_read_lock(&srcu);
+ 	hlist_for_each_entry_rcu(mn, n, &mm->mmu_notifier_mm->list, hlist) {
+ 		if (mn->ops->invalidate_page)
+ 			mn->ops->invalidate_page(mn, mm, address);
+ 	}
+-	rcu_read_unlock();
++	srcu_read_unlock(&srcu, id);
+ }
+ 
+ void __mmu_notifier_invalidate_range_start(struct mm_struct *mm,
+@@ -160,13 +169,14 @@ void __mmu_notifier_invalidate_range_start(struct mm_struct *mm,
+ {
+ 	struct mmu_notifier *mn;
+ 	struct hlist_node *n;
++	int id;
+ 
+-	rcu_read_lock();
++	id = srcu_read_lock(&srcu);
+ 	hlist_for_each_entry_rcu(mn, n, &mm->mmu_notifier_mm->list, hlist) {
+ 		if (mn->ops->invalidate_range_start)
+ 			mn->ops->invalidate_range_start(mn, mm, start, end);
+ 	}
+-	rcu_read_unlock();
++	srcu_read_unlock(&srcu, id);
+ }
+ 
+ void __mmu_notifier_invalidate_range_end(struct mm_struct *mm,
+@@ -174,13 +184,14 @@ void __mmu_notifier_invalidate_range_end(struct mm_struct *mm,
+ {
+ 	struct mmu_notifier *mn;
+ 	struct hlist_node *n;
++	int id;
+ 
+-	rcu_read_lock();
++	id = srcu_read_lock(&srcu);
+ 	hlist_for_each_entry_rcu(mn, n, &mm->mmu_notifier_mm->list, hlist) {
+ 		if (mn->ops->invalidate_range_end)
+ 			mn->ops->invalidate_range_end(mn, mm, start, end);
+ 	}
+-	rcu_read_unlock();
++	srcu_read_unlock(&srcu, id);
+ }
+ 
+ static int do_mmu_notifier_register(struct mmu_notifier *mn,
+@@ -192,6 +203,12 @@ static int do_mmu_notifier_register(struct mmu_notifier *mn,
+ 
+ 	BUG_ON(atomic_read(&mm->mm_users) <= 0);
+ 
++	/*
++	* Verify that mmu_notifier_init() already run and the global srcu is
++	* initialized.
++	*/
++	BUG_ON(!srcu.per_cpu_ref);
++
+ 	ret = -ENOMEM;
+ 	mmu_notifier_mm = kmalloc(sizeof(struct mmu_notifier_mm), GFP_KERNEL);
+ 	if (unlikely(!mmu_notifier_mm))
+@@ -274,8 +291,8 @@ void __mmu_notifier_mm_destroy(struct mm_struct *mm)
+ /*
+  * This releases the mm_count pin automatically and frees the mm
+  * structure if it was the last user of it. It serializes against
+- * running mmu notifiers with RCU and against mmu_notifier_unregister
+- * with the unregister lock + RCU. All sptes must be dropped before
++ * running mmu notifiers with SRCU and against mmu_notifier_unregister
++ * with the unregister lock + SRCU. All sptes must be dropped before
+  * calling mmu_notifier_unregister. ->release or any other notifier
+  * method may be invoked concurrently with mmu_notifier_unregister,
+  * and only after mmu_notifier_unregister returned we're guaranteed
+@@ -285,35 +302,43 @@ void mmu_notifier_unregister(struct mmu_notifier *mn, struct mm_struct *mm)
+ {
+ 	BUG_ON(atomic_read(&mm->mm_count) <= 0);
+ 
++	spin_lock(&mm->mmu_notifier_mm->lock);
+ 	if (!hlist_unhashed(&mn->hlist)) {
+-		/*
+-		 * RCU here will force exit_mmap to wait ->release to finish
+-		 * before freeing the pages.
+-		 */
+-		rcu_read_lock();
++		int id;
+ 
+ 		/*
+-		 * exit_mmap will block in mmu_notifier_release to
+-		 * guarantee ->release is called before freeing the
+-		 * pages.
++		 * Ensure we synchronize up with __mmu_notifier_release().
+ 		 */
++		id = srcu_read_lock(&srcu);
++
++		hlist_del_rcu(&mn->hlist);
++		spin_unlock(&mm->mmu_notifier_mm->lock);
++
+ 		if (mn->ops->release)
+ 			mn->ops->release(mn, mm);
+-		rcu_read_unlock();
+ 
+-		spin_lock(&mm->mmu_notifier_mm->lock);
+-		hlist_del_rcu(&mn->hlist);
++		/*
++		 * Allow __mmu_notifier_release() to complete.
++		 */
++		srcu_read_unlock(&srcu, id);
++	} else
+ 		spin_unlock(&mm->mmu_notifier_mm->lock);
+-	}
+ 
+ 	/*
+-	 * Wait any running method to finish, of course including
+-	 * ->release if it was run by mmu_notifier_relase instead of us.
++	 * Wait for any running method to finish, including ->release() if it
++	 * was run by __mmu_notifier_release() instead of us.
+ 	 */
+-	synchronize_rcu();
++	synchronize_srcu(&srcu);
+ 
+ 	BUG_ON(atomic_read(&mm->mm_count) <= 0);
+ 
+ 	mmdrop(mm);
+ }
+ EXPORT_SYMBOL_GPL(mmu_notifier_unregister);
++
++static int __init mmu_notifier_init(void)
++{
++	return init_srcu_struct(&srcu);
++}
++
++module_init(mmu_notifier_init);
+diff --git a/mm/page_alloc.c b/mm/page_alloc.c
+index 0ec869e..1b94f08 100644
+--- a/mm/page_alloc.c
++++ b/mm/page_alloc.c
+@@ -4264,10 +4264,11 @@ static void __meminit calculate_node_totalpages(struct pglist_data *pgdat,
+  * round what is now in bits to nearest long in bits, then return it in
+  * bytes.
+  */
+-static unsigned long __init usemap_size(unsigned long zonesize)
++static unsigned long __init usemap_size(unsigned long zone_start_pfn, unsigned long zonesize)
+ {
+ 	unsigned long usemapsize;
+ 
++	zonesize += zone_start_pfn & (pageblock_nr_pages-1);
+ 	usemapsize = roundup(zonesize, pageblock_nr_pages);
+ 	usemapsize = usemapsize >> pageblock_order;
+ 	usemapsize *= NR_PAGEBLOCK_BITS;
+@@ -4277,17 +4278,19 @@ static unsigned long __init usemap_size(unsigned long zonesize)
+ }
+ 
+ static void __init setup_usemap(struct pglist_data *pgdat,
+-				struct zone *zone, unsigned long zonesize)
++				struct zone *zone,
++				unsigned long zone_start_pfn,
++				unsigned long zonesize)
+ {
+-	unsigned long usemapsize = usemap_size(zonesize);
++	unsigned long usemapsize = usemap_size(zone_start_pfn, zonesize);
+ 	zone->pageblock_flags = NULL;
+ 	if (usemapsize)
+ 		zone->pageblock_flags = alloc_bootmem_node_nopanic(pgdat,
+ 								   usemapsize);
+ }
+ #else
+-static inline void setup_usemap(struct pglist_data *pgdat,
+-				struct zone *zone, unsigned long zonesize) {}
++static inline void setup_usemap(struct pglist_data *pgdat, struct zone *zone,
++				unsigned long zone_start_pfn, unsigned long zonesize) {}
+ #endif /* CONFIG_SPARSEMEM */
+ 
+ #ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE
+@@ -4415,7 +4418,7 @@ static void __paginginit free_area_init_core(struct pglist_data *pgdat,
+ 			continue;
+ 
+ 		set_pageblock_order(pageblock_default_order());
+-		setup_usemap(pgdat, zone, size);
++		setup_usemap(pgdat, zone, zone_start_pfn, size);
+ 		ret = init_currently_empty_zone(zone, zone_start_pfn,
+ 						size, MEMMAP_EARLY);
+ 		BUG_ON(ret);
+diff --git a/mm/shmem.c b/mm/shmem.c
+index b952332..8b38477 100644
+--- a/mm/shmem.c
++++ b/mm/shmem.c
+@@ -2505,6 +2505,7 @@ static int shmem_remount_fs(struct super_block *sb, int *flags, char *data)
+ 	unsigned long inodes;
+ 	int error = -EINVAL;
+ 
++	config.mpol = NULL;
+ 	if (shmem_parse_options(data, &config, true))
+ 		return error;
+ 
+@@ -2530,8 +2531,13 @@ static int shmem_remount_fs(struct super_block *sb, int *flags, char *data)
+ 	sbinfo->max_inodes  = config.max_inodes;
+ 	sbinfo->free_inodes = config.max_inodes - inodes;
+ 
+-	mpol_put(sbinfo->mpol);
+-	sbinfo->mpol        = config.mpol;	/* transfers initial ref */
++	/*
++	 * Preserve previous mempolicy unless mpol remount option was specified.
++	 */
++	if (config.mpol) {
++		mpol_put(sbinfo->mpol);
++		sbinfo->mpol = config.mpol;	/* transfers initial ref */
++	}
+ out:
+ 	spin_unlock(&sbinfo->stat_lock);
+ 	return error;
+diff --git a/net/bridge/br_stp_bpdu.c b/net/bridge/br_stp_bpdu.c
+index 289646e..f26516a 100644
+--- a/net/bridge/br_stp_bpdu.c
++++ b/net/bridge/br_stp_bpdu.c
+@@ -16,6 +16,7 @@
+ #include <linux/etherdevice.h>
+ #include <linux/llc.h>
+ #include <linux/slab.h>
++#include <linux/pkt_sched.h>
+ #include <net/net_namespace.h>
+ #include <net/llc.h>
+ #include <net/llc_pdu.h>
+@@ -40,6 +41,7 @@ static void br_send_bpdu(struct net_bridge_port *p,
+ 
+ 	skb->dev = p->dev;
+ 	skb->protocol = htons(ETH_P_802_2);
++	skb->priority = TC_PRIO_CONTROL;
+ 
+ 	skb_reserve(skb, LLC_RESERVE);
+ 	memcpy(__skb_put(skb, length), data, length);
+diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
+index ef1528a..f5dde14 100644
+--- a/net/ipv4/af_inet.c
++++ b/net/ipv4/af_inet.c
+@@ -226,8 +226,12 @@ EXPORT_SYMBOL(inet_listen);
+ u32 inet_ehash_secret __read_mostly;
+ EXPORT_SYMBOL(inet_ehash_secret);
+ 
++u32 ipv6_hash_secret __read_mostly;
++EXPORT_SYMBOL(ipv6_hash_secret);
++
+ /*
+- * inet_ehash_secret must be set exactly once
++ * inet_ehash_secret must be set exactly once, and to a non nul value
++ * ipv6_hash_secret must be set exactly once.
+  */
+ void build_ehash_secret(void)
+ {
+@@ -237,7 +241,8 @@ void build_ehash_secret(void)
+ 		get_random_bytes(&rnd, sizeof(rnd));
+ 	} while (rnd == 0);
+ 
+-	cmpxchg(&inet_ehash_secret, 0, rnd);
++	if (cmpxchg(&inet_ehash_secret, 0, rnd) == 0)
++		get_random_bytes(&ipv6_hash_secret, sizeof(ipv6_hash_secret));
+ }
+ EXPORT_SYMBOL(build_ehash_secret);
+ 
+diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
+index 39b403f..0ae2cf1 100644
+--- a/net/ipv4/ping.c
++++ b/net/ipv4/ping.c
+@@ -320,8 +320,8 @@ void ping_err(struct sk_buff *skb, u32 info)
+ 	struct iphdr *iph = (struct iphdr *)skb->data;
+ 	struct icmphdr *icmph = (struct icmphdr *)(skb->data+(iph->ihl<<2));
+ 	struct inet_sock *inet_sock;
+-	int type = icmph->type;
+-	int code = icmph->code;
++	int type = icmp_hdr(skb)->type;
++	int code = icmp_hdr(skb)->code;
+ 	struct net *net = dev_net(skb->dev);
+ 	struct sock *sk;
+ 	int harderr;
+diff --git a/sound/pci/ali5451/ali5451.c b/sound/pci/ali5451/ali5451.c
+index 5c6e322..f71a0ff 100644
+--- a/sound/pci/ali5451/ali5451.c
++++ b/sound/pci/ali5451/ali5451.c
+@@ -1435,7 +1435,7 @@ static snd_pcm_uframes_t snd_ali_pointer(struct snd_pcm_substream *substream)
+ 
+ 	spin_lock(&codec->reg_lock);
+ 	if (!pvoice->running) {
+-		spin_unlock_irq(&codec->reg_lock);
++		spin_unlock(&codec->reg_lock);
+ 		return 0;
+ 	}
+ 	outb(pvoice->number, ALI_REG(codec, ALI_GC_CIR));
+diff --git a/sound/pci/rme32.c b/sound/pci/rme32.c
+index 3c04524..1026820 100644
+--- a/sound/pci/rme32.c
++++ b/sound/pci/rme32.c
+@@ -1017,7 +1017,7 @@ static int snd_rme32_capture_close(struct snd_pcm_substream *substream)
+ 	spin_lock_irq(&rme32->lock);
+ 	rme32->capture_substream = NULL;
+ 	rme32->capture_periodsize = 0;
+-	spin_unlock(&rme32->lock);
++	spin_unlock_irq(&rme32->lock);
+ 	return 0;
+ }
+ 
+diff --git a/sound/usb/quirks-table.h b/sound/usb/quirks-table.h
+index 7ccffb2..11a9f86 100644
+--- a/sound/usb/quirks-table.h
++++ b/sound/usb/quirks-table.h
+@@ -1613,7 +1613,7 @@ YAMAHA_DEVICE(0x7010, "UB99"),
+ 	.driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) {
+ 		/* .vendor_name = "Roland", */
+ 		/* .product_name = "A-PRO", */
+-		.ifnum = 1,
++		.ifnum = 0,
+ 		.type = QUIRK_MIDI_FIXED_ENDPOINT,
+ 		.data = & (const struct snd_usb_midi_endpoint_info) {
+ 			.out_cables = 0x0003,

Modified: genpatches-2.6/trunk/3.2/0000_README
===================================================================
--- genpatches-2.6/trunk/3.2/0000_README	2013-02-23 19:07:21 UTC (rev 2290)
+++ genpatches-2.6/trunk/3.2/0000_README	2013-02-28 19:13:50 UTC (rev 2291)
@@ -192,6 +192,10 @@
 From:   http://www.kernel.org
 Desc:   Linux 3.2.38
 
+Patch:  1038_linux-3.2.39.patch
+From:   http://www.kernel.org
+Desc:   Linux 3.2.39
+
 Patch:  2300_per-pci-device-msi-irq-listing.patch
 From:   http://git.kernel.org/?p=linux/kernel/git/torvalds/linux-2.6.git;a=commitdiff;h=da8d1c8ba4dcb16d60be54b233deca9a7cac98dc
 Desc:   Add a per-pci-device subdirectory in sysfs

Added: genpatches-2.6/trunk/3.2/1038_linux-3.2.39.patch
===================================================================
--- genpatches-2.6/trunk/3.2/1038_linux-3.2.39.patch	                        (rev 0)
+++ genpatches-2.6/trunk/3.2/1038_linux-3.2.39.patch	2013-02-28 19:13:50 UTC (rev 2291)
@@ -0,0 +1,2660 @@
+diff --git a/MAINTAINERS b/MAINTAINERS
+index 82d7fa6..83f156e 100644
+--- a/MAINTAINERS
++++ b/MAINTAINERS
+@@ -2584,7 +2584,7 @@ S:	Maintained
+ F:	drivers/net/ethernet/i825xx/eexpress.*
+ 
+ ETHERNET BRIDGE
+-M:	Stephen Hemminger <shemminger@vyatta.com>
++M:	Stephen Hemminger <stephen@networkplumber.org>
+ L:	bridge@lists.linux-foundation.org
+ L:	netdev@vger.kernel.org
+ W:	http://www.linuxfoundation.org/en/Net:Bridge
+@@ -4475,7 +4475,7 @@ S:	Supported
+ F:	drivers/infiniband/hw/nes/
+ 
+ NETEM NETWORK EMULATOR
+-M:	Stephen Hemminger <shemminger@vyatta.com>
++M:	Stephen Hemminger <stephen@networkplumber.org>
+ L:	netem@lists.linux-foundation.org
+ S:	Maintained
+ F:	net/sched/sch_netem.c
+@@ -5993,7 +5993,7 @@ S:	Maintained
+ F:	drivers/usb/misc/sisusbvga/
+ 
+ SKGE, SKY2 10/100/1000 GIGABIT ETHERNET DRIVERS
+-M:	Stephen Hemminger <shemminger@vyatta.com>
++M:	Stephen Hemminger <stephen@networkplumber.org>
+ L:	netdev@vger.kernel.org
+ S:	Maintained
+ F:	drivers/net/ethernet/marvell/sk*
+diff --git a/Makefile b/Makefile
+index c8c9d02..0fceb8b 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 3
+ PATCHLEVEL = 2
+-SUBLEVEL = 38
++SUBLEVEL = 39
+ EXTRAVERSION =
+ NAME = Saber-toothed Squirrel
+ 
+diff --git a/arch/x86/ia32/ia32entry.S b/arch/x86/ia32/ia32entry.S
+index a6253ec..95b4eb3 100644
+--- a/arch/x86/ia32/ia32entry.S
++++ b/arch/x86/ia32/ia32entry.S
+@@ -208,7 +208,7 @@ sysexit_from_sys_call:
+ 	testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags(%r10)
+ 	jnz ia32_ret_from_sys_call
+ 	TRACE_IRQS_ON
+-	sti
++	ENABLE_INTERRUPTS(CLBR_NONE)
+ 	movl %eax,%esi		/* second arg, syscall return value */
+ 	cmpl $0,%eax		/* is it < 0? */
+ 	setl %al		/* 1 if so, 0 if not */
+@@ -218,7 +218,7 @@ sysexit_from_sys_call:
+ 	GET_THREAD_INFO(%r10)
+ 	movl RAX-ARGOFFSET(%rsp),%eax	/* reload syscall return value */
+ 	movl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),%edi
+-	cli
++	DISABLE_INTERRUPTS(CLBR_NONE)
+ 	TRACE_IRQS_OFF
+ 	testl %edi,TI_flags(%r10)
+ 	jz \exit
+diff --git a/arch/x86/kernel/step.c b/arch/x86/kernel/step.c
+index c346d11..d4f278e 100644
+--- a/arch/x86/kernel/step.c
++++ b/arch/x86/kernel/step.c
+@@ -157,6 +157,34 @@ static int enable_single_step(struct task_struct *child)
+ 	return 1;
+ }
+ 
++static void set_task_blockstep(struct task_struct *task, bool on)
++{
++	unsigned long debugctl;
++
++	/*
++	 * Ensure irq/preemption can't change debugctl in between.
++	 * Note also that both TIF_BLOCKSTEP and debugctl should
++	 * be changed atomically wrt preemption.
++	 *
++	 * NOTE: this means that set/clear TIF_BLOCKSTEP is only safe if
++	 * task is current or it can't be running, otherwise we can race
++	 * with __switch_to_xtra(). We rely on ptrace_freeze_traced() but
++	 * PTRACE_KILL is not safe.
++	 */
++	local_irq_disable();
++	debugctl = get_debugctlmsr();
++	if (on) {
++		debugctl |= DEBUGCTLMSR_BTF;
++		set_tsk_thread_flag(task, TIF_BLOCKSTEP);
++	} else {
++		debugctl &= ~DEBUGCTLMSR_BTF;
++		clear_tsk_thread_flag(task, TIF_BLOCKSTEP);
++	}
++	if (task == current)
++		update_debugctlmsr(debugctl);
++	local_irq_enable();
++}
++
+ /*
+  * Enable single or block step.
+  */
+@@ -169,19 +197,10 @@ static void enable_step(struct task_struct *child, bool block)
+ 	 * So no one should try to use debugger block stepping in a program
+ 	 * that uses user-mode single stepping itself.
+ 	 */
+-	if (enable_single_step(child) && block) {
+-		unsigned long debugctl = get_debugctlmsr();
+-
+-		debugctl |= DEBUGCTLMSR_BTF;
+-		update_debugctlmsr(debugctl);
+-		set_tsk_thread_flag(child, TIF_BLOCKSTEP);
+-	} else if (test_tsk_thread_flag(child, TIF_BLOCKSTEP)) {
+-		unsigned long debugctl = get_debugctlmsr();
+-
+-		debugctl &= ~DEBUGCTLMSR_BTF;
+-		update_debugctlmsr(debugctl);
+-		clear_tsk_thread_flag(child, TIF_BLOCKSTEP);
+-	}
++	if (enable_single_step(child) && block)
++		set_task_blockstep(child, true);
++	else if (test_tsk_thread_flag(child, TIF_BLOCKSTEP))
++		set_task_blockstep(child, false);
+ }
+ 
+ void user_enable_single_step(struct task_struct *child)
+@@ -199,13 +218,8 @@ void user_disable_single_step(struct task_struct *child)
+ 	/*
+ 	 * Make sure block stepping (BTF) is disabled.
+ 	 */
+-	if (test_tsk_thread_flag(child, TIF_BLOCKSTEP)) {
+-		unsigned long debugctl = get_debugctlmsr();
+-
+-		debugctl &= ~DEBUGCTLMSR_BTF;
+-		update_debugctlmsr(debugctl);
+-		clear_tsk_thread_flag(child, TIF_BLOCKSTEP);
+-	}
++	if (test_tsk_thread_flag(child, TIF_BLOCKSTEP))
++		set_task_blockstep(child, false);
+ 
+ 	/* Always clear TIF_SINGLESTEP... */
+ 	clear_tsk_thread_flag(child, TIF_SINGLESTEP);
+diff --git a/arch/x86/xen/xen-asm_32.S b/arch/x86/xen/xen-asm_32.S
+index b040b0e..7328f71 100644
+--- a/arch/x86/xen/xen-asm_32.S
++++ b/arch/x86/xen/xen-asm_32.S
+@@ -88,11 +88,11 @@ ENTRY(xen_iret)
+ 	 */
+ #ifdef CONFIG_SMP
+ 	GET_THREAD_INFO(%eax)
+-	movl TI_cpu(%eax), %eax
+-	movl __per_cpu_offset(,%eax,4), %eax
+-	mov xen_vcpu(%eax), %eax
++	movl %ss:TI_cpu(%eax), %eax
++	movl %ss:__per_cpu_offset(,%eax,4), %eax
++	mov %ss:xen_vcpu(%eax), %eax
+ #else
+-	movl xen_vcpu, %eax
++	movl %ss:xen_vcpu, %eax
+ #endif
+ 
+ 	/* check IF state we're restoring */
+@@ -105,11 +105,11 @@ ENTRY(xen_iret)
+ 	 * resuming the code, so we don't have to be worried about
+ 	 * being preempted to another CPU.
+ 	 */
+-	setz XEN_vcpu_info_mask(%eax)
++	setz %ss:XEN_vcpu_info_mask(%eax)
+ xen_iret_start_crit:
+ 
+ 	/* check for unmasked and pending */
+-	cmpw $0x0001, XEN_vcpu_info_pending(%eax)
++	cmpw $0x0001, %ss:XEN_vcpu_info_pending(%eax)
+ 
+ 	/*
+ 	 * If there's something pending, mask events again so we can
+@@ -117,7 +117,7 @@ xen_iret_start_crit:
+ 	 * touch XEN_vcpu_info_mask.
+ 	 */
+ 	jne 1f
+-	movb $1, XEN_vcpu_info_mask(%eax)
++	movb $1, %ss:XEN_vcpu_info_mask(%eax)
+ 
+ 1:	popl %eax
+ 
+diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
+index b07edc4..62c1325 100644
+--- a/drivers/ata/ahci.c
++++ b/drivers/ata/ahci.c
+@@ -52,7 +52,9 @@
+ #define DRV_VERSION	"3.0"
+ 
+ enum {
+-	AHCI_PCI_BAR		= 5,
++	AHCI_PCI_BAR_STA2X11	= 0,
++	AHCI_PCI_BAR_ENMOTUS	= 2,
++	AHCI_PCI_BAR_STANDARD	= 5,
+ };
+ 
+ enum board_ids {
+@@ -375,6 +377,9 @@ static const struct pci_device_id ahci_pci_tbl[] = {
+ 	{ PCI_VDEVICE(SI, 0x1185), board_ahci },		/* SiS 968 */
+ 	{ PCI_VDEVICE(SI, 0x0186), board_ahci },		/* SiS 968 */
+ 
++	/* ST Microelectronics */
++	{ PCI_VDEVICE(STMICRO, 0xCC06), board_ahci },		/* ST ConneXt */
++
+ 	/* Marvell */
+ 	{ PCI_VDEVICE(MARVELL, 0x6145), board_ahci_mv },	/* 6145 */
+ 	{ PCI_VDEVICE(MARVELL, 0x6121), board_ahci_mv },	/* 6121 */
+@@ -400,6 +405,9 @@ static const struct pci_device_id ahci_pci_tbl[] = {
+ 	{ PCI_VDEVICE(ASMEDIA, 0x0611), board_ahci },	/* ASM1061 */
+ 	{ PCI_VDEVICE(ASMEDIA, 0x0612), board_ahci },	/* ASM1062 */
+ 
++	/* Enmotus */
++	{ PCI_DEVICE(0x1c44, 0x8000), board_ahci },
++
+ 	/* Generic, PCI class code for AHCI */
+ 	{ PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
+ 	  PCI_CLASS_STORAGE_SATA_AHCI, 0xffffff, board_ahci },
+@@ -629,6 +637,13 @@ static int ahci_configure_dma_masks(struct pci_dev *pdev, int using_dac)
+ {
+ 	int rc;
+ 
++	/*
++	 * If the device fixup already set the dma_mask to some non-standard
++	 * value, don't extend it here. This happens on STA2X11, for example.
++	 */
++	if (pdev->dma_mask && pdev->dma_mask < DMA_BIT_MASK(32))
++		return 0;
++
+ 	if (using_dac &&
+ 	    !pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
+ 		rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
+@@ -1033,6 +1048,7 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
+ 	struct ahci_host_priv *hpriv;
+ 	struct ata_host *host;
+ 	int n_ports, i, rc;
++	int ahci_pci_bar = AHCI_PCI_BAR_STANDARD;
+ 
+ 	VPRINTK("ENTER\n");
+ 
+@@ -1064,6 +1080,12 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
+ 		dev_info(&pdev->dev,
+ 			 "PDC42819 can only drive SATA devices with this driver\n");
+ 
++	/* Both Connext and Enmotus devices use non-standard BARs */
++	if (pdev->vendor == PCI_VENDOR_ID_STMICRO && pdev->device == 0xCC06)
++		ahci_pci_bar = AHCI_PCI_BAR_STA2X11;
++	else if (pdev->vendor == 0x1c44 && pdev->device == 0x8000)
++		ahci_pci_bar = AHCI_PCI_BAR_ENMOTUS;
++
+ 	/* acquire resources */
+ 	rc = pcim_enable_device(pdev);
+ 	if (rc)
+@@ -1072,7 +1094,7 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
+ 	/* AHCI controllers often implement SFF compatible interface.
+ 	 * Grab all PCI BARs just in case.
+ 	 */
+-	rc = pcim_iomap_regions_request_all(pdev, 1 << AHCI_PCI_BAR, DRV_NAME);
++	rc = pcim_iomap_regions_request_all(pdev, 1 << ahci_pci_bar, DRV_NAME);
+ 	if (rc == -EBUSY)
+ 		pcim_pin_device(pdev);
+ 	if (rc)
+@@ -1115,7 +1137,7 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
+ 	if ((hpriv->flags & AHCI_HFLAG_NO_MSI) || pci_enable_msi(pdev))
+ 		pci_intx(pdev, 1);
+ 
+-	hpriv->mmio = pcim_iomap_table(pdev)[AHCI_PCI_BAR];
++	hpriv->mmio = pcim_iomap_table(pdev)[ahci_pci_bar];
+ 
+ 	/* save initial config */
+ 	ahci_pci_save_initial_config(pdev, hpriv);
+@@ -1179,8 +1201,8 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
+ 	for (i = 0; i < host->n_ports; i++) {
+ 		struct ata_port *ap = host->ports[i];
+ 
+-		ata_port_pbar_desc(ap, AHCI_PCI_BAR, -1, "abar");
+-		ata_port_pbar_desc(ap, AHCI_PCI_BAR,
++		ata_port_pbar_desc(ap, ahci_pci_bar, -1, "abar");
++		ata_port_pbar_desc(ap, ahci_pci_bar,
+ 				   0x100 + ap->port_no * 0x80, "port");
+ 
+ 		/* set enclosure management message type */
+diff --git a/drivers/atm/iphase.h b/drivers/atm/iphase.h
+index 6a0955e..53ecac5 100644
+--- a/drivers/atm/iphase.h
++++ b/drivers/atm/iphase.h
+@@ -636,82 +636,82 @@ struct rx_buf_desc {
+ #define SEG_BASE IPHASE5575_FRAG_CONTROL_REG_BASE  
+ #define REASS_BASE IPHASE5575_REASS_CONTROL_REG_BASE  
+ 
+-typedef volatile u_int  freg_t;
++typedef volatile u_int	ffreg_t;
+ typedef u_int   rreg_t;
+ 
+ typedef struct _ffredn_t {
+-        freg_t  idlehead_high;  /* Idle cell header (high)              */
+-        freg_t  idlehead_low;   /* Idle cell header (low)               */
+-        freg_t  maxrate;        /* Maximum rate                         */
+-        freg_t  stparms;        /* Traffic Management Parameters        */
+-        freg_t  abrubr_abr;     /* ABRUBR Priority Byte 1, TCR Byte 0   */
+-        freg_t  rm_type;        /*                                      */
+-        u_int   filler5[0x17 - 0x06];
+-        freg_t  cmd_reg;        /* Command register                     */
+-        u_int   filler18[0x20 - 0x18];
+-        freg_t  cbr_base;       /* CBR Pointer Base                     */
+-        freg_t  vbr_base;       /* VBR Pointer Base                     */
+-        freg_t  abr_base;       /* ABR Pointer Base                     */
+-        freg_t  ubr_base;       /* UBR Pointer Base                     */
+-        u_int   filler24;
+-        freg_t  vbrwq_base;     /* VBR Wait Queue Base                  */
+-        freg_t  abrwq_base;     /* ABR Wait Queue Base                  */
+-        freg_t  ubrwq_base;     /* UBR Wait Queue Base                  */
+-        freg_t  vct_base;       /* Main VC Table Base                   */
+-        freg_t  vcte_base;      /* Extended Main VC Table Base          */
+-        u_int   filler2a[0x2C - 0x2A];
+-        freg_t  cbr_tab_beg;    /* CBR Table Begin                      */
+-        freg_t  cbr_tab_end;    /* CBR Table End                        */
+-        freg_t  cbr_pointer;    /* CBR Pointer                          */
+-        u_int   filler2f[0x30 - 0x2F];
+-        freg_t  prq_st_adr;     /* Packet Ready Queue Start Address     */
+-        freg_t  prq_ed_adr;     /* Packet Ready Queue End Address       */
+-        freg_t  prq_rd_ptr;     /* Packet Ready Queue read pointer      */
+-        freg_t  prq_wr_ptr;     /* Packet Ready Queue write pointer     */
+-        freg_t  tcq_st_adr;     /* Transmit Complete Queue Start Address*/
+-        freg_t  tcq_ed_adr;     /* Transmit Complete Queue End Address  */
+-        freg_t  tcq_rd_ptr;     /* Transmit Complete Queue read pointer */
+-        freg_t  tcq_wr_ptr;     /* Transmit Complete Queue write pointer*/
+-        u_int   filler38[0x40 - 0x38];
+-        freg_t  queue_base;     /* Base address for PRQ and TCQ         */
+-        freg_t  desc_base;      /* Base address of descriptor table     */
+-        u_int   filler42[0x45 - 0x42];
+-        freg_t  mode_reg_0;     /* Mode register 0                      */
+-        freg_t  mode_reg_1;     /* Mode register 1                      */
+-        freg_t  intr_status_reg;/* Interrupt Status register            */
+-        freg_t  mask_reg;       /* Mask Register                        */
+-        freg_t  cell_ctr_high1; /* Total cell transfer count (high)     */
+-        freg_t  cell_ctr_lo1;   /* Total cell transfer count (low)      */
+-        freg_t  state_reg;      /* Status register                      */
+-        u_int   filler4c[0x58 - 0x4c];
+-        freg_t  curr_desc_num;  /* Contains the current descriptor num  */
+-        freg_t  next_desc;      /* Next descriptor                      */
+-        freg_t  next_vc;        /* Next VC                              */
+-        u_int   filler5b[0x5d - 0x5b];
+-        freg_t  present_slot_cnt;/* Present slot count                  */
+-        u_int   filler5e[0x6a - 0x5e];
+-        freg_t  new_desc_num;   /* New descriptor number                */
+-        freg_t  new_vc;         /* New VC                               */
+-        freg_t  sched_tbl_ptr;  /* Schedule table pointer               */
+-        freg_t  vbrwq_wptr;     /* VBR wait queue write pointer         */
+-        freg_t  vbrwq_rptr;     /* VBR wait queue read pointer          */
+-        freg_t  abrwq_wptr;     /* ABR wait queue write pointer         */
+-        freg_t  abrwq_rptr;     /* ABR wait queue read pointer          */
+-        freg_t  ubrwq_wptr;     /* UBR wait queue write pointer         */
+-        freg_t  ubrwq_rptr;     /* UBR wait queue read pointer          */
+-        freg_t  cbr_vc;         /* CBR VC                               */
+-        freg_t  vbr_sb_vc;      /* VBR SB VC                            */
+-        freg_t  abr_sb_vc;      /* ABR SB VC                            */
+-        freg_t  ubr_sb_vc;      /* UBR SB VC                            */
+-        freg_t  vbr_next_link;  /* VBR next link                        */
+-        freg_t  abr_next_link;  /* ABR next link                        */
+-        freg_t  ubr_next_link;  /* UBR next link                        */
+-        u_int   filler7a[0x7c-0x7a];
+-        freg_t  out_rate_head;  /* Out of rate head                     */
+-        u_int   filler7d[0xca-0x7d]; /* pad out to full address space   */
+-        freg_t  cell_ctr_high1_nc;/* Total cell transfer count (high)   */
+-        freg_t  cell_ctr_lo1_nc;/* Total cell transfer count (low)      */
+-        u_int   fillercc[0x100-0xcc]; /* pad out to full address space   */
++	ffreg_t	idlehead_high;	/* Idle cell header (high)		*/
++	ffreg_t	idlehead_low;	/* Idle cell header (low)		*/
++	ffreg_t	maxrate;	/* Maximum rate				*/
++	ffreg_t	stparms;	/* Traffic Management Parameters	*/
++	ffreg_t	abrubr_abr;	/* ABRUBR Priority Byte 1, TCR Byte 0	*/
++	ffreg_t	rm_type;	/*					*/
++	u_int	filler5[0x17 - 0x06];
++	ffreg_t	cmd_reg;	/* Command register			*/
++	u_int	filler18[0x20 - 0x18];
++	ffreg_t	cbr_base;	/* CBR Pointer Base			*/
++	ffreg_t	vbr_base;	/* VBR Pointer Base			*/
++	ffreg_t	abr_base;	/* ABR Pointer Base			*/
++	ffreg_t	ubr_base;	/* UBR Pointer Base			*/
++	u_int	filler24;
++	ffreg_t	vbrwq_base;	/* VBR Wait Queue Base			*/
++	ffreg_t	abrwq_base;	/* ABR Wait Queue Base			*/
++	ffreg_t	ubrwq_base;	/* UBR Wait Queue Base			*/
++	ffreg_t	vct_base;	/* Main VC Table Base			*/
++	ffreg_t	vcte_base;	/* Extended Main VC Table Base		*/
++	u_int	filler2a[0x2C - 0x2A];
++	ffreg_t	cbr_tab_beg;	/* CBR Table Begin			*/
++	ffreg_t	cbr_tab_end;	/* CBR Table End			*/
++	ffreg_t	cbr_pointer;	/* CBR Pointer				*/
++	u_int	filler2f[0x30 - 0x2F];
++	ffreg_t	prq_st_adr;	/* Packet Ready Queue Start Address	*/
++	ffreg_t	prq_ed_adr;	/* Packet Ready Queue End Address	*/
++	ffreg_t	prq_rd_ptr;	/* Packet Ready Queue read pointer	*/
++	ffreg_t	prq_wr_ptr;	/* Packet Ready Queue write pointer	*/
++	ffreg_t	tcq_st_adr;	/* Transmit Complete Queue Start Address*/
++	ffreg_t	tcq_ed_adr;	/* Transmit Complete Queue End Address	*/
++	ffreg_t	tcq_rd_ptr;	/* Transmit Complete Queue read pointer */
++	ffreg_t	tcq_wr_ptr;	/* Transmit Complete Queue write pointer*/
++	u_int	filler38[0x40 - 0x38];
++	ffreg_t	queue_base;	/* Base address for PRQ and TCQ		*/
++	ffreg_t	desc_base;	/* Base address of descriptor table	*/
++	u_int	filler42[0x45 - 0x42];
++	ffreg_t	mode_reg_0;	/* Mode register 0			*/
++	ffreg_t	mode_reg_1;	/* Mode register 1			*/
++	ffreg_t	intr_status_reg;/* Interrupt Status register		*/
++	ffreg_t	mask_reg;	/* Mask Register			*/
++	ffreg_t	cell_ctr_high1; /* Total cell transfer count (high)	*/
++	ffreg_t	cell_ctr_lo1;	/* Total cell transfer count (low)	*/
++	ffreg_t	state_reg;	/* Status register			*/
++	u_int	filler4c[0x58 - 0x4c];
++	ffreg_t	curr_desc_num;	/* Contains the current descriptor num	*/
++	ffreg_t	next_desc;	/* Next descriptor			*/
++	ffreg_t	next_vc;	/* Next VC				*/
++	u_int	filler5b[0x5d - 0x5b];
++	ffreg_t	present_slot_cnt;/* Present slot count			*/
++	u_int	filler5e[0x6a - 0x5e];
++	ffreg_t	new_desc_num;	/* New descriptor number		*/
++	ffreg_t	new_vc;		/* New VC				*/
++	ffreg_t	sched_tbl_ptr;	/* Schedule table pointer		*/
++	ffreg_t	vbrwq_wptr;	/* VBR wait queue write pointer		*/
++	ffreg_t	vbrwq_rptr;	/* VBR wait queue read pointer		*/
++	ffreg_t	abrwq_wptr;	/* ABR wait queue write pointer		*/
++	ffreg_t	abrwq_rptr;	/* ABR wait queue read pointer		*/
++	ffreg_t	ubrwq_wptr;	/* UBR wait queue write pointer		*/
++	ffreg_t	ubrwq_rptr;	/* UBR wait queue read pointer		*/
++	ffreg_t	cbr_vc;		/* CBR VC				*/
++	ffreg_t	vbr_sb_vc;	/* VBR SB VC				*/
++	ffreg_t	abr_sb_vc;	/* ABR SB VC				*/
++	ffreg_t	ubr_sb_vc;	/* UBR SB VC				*/
++	ffreg_t	vbr_next_link;	/* VBR next link			*/
++	ffreg_t	abr_next_link;	/* ABR next link			*/
++	ffreg_t	ubr_next_link;	/* UBR next link			*/
++	u_int	filler7a[0x7c-0x7a];
++	ffreg_t	out_rate_head;	/* Out of rate head			*/
++	u_int	filler7d[0xca-0x7d]; /* pad out to full address space	*/
++	ffreg_t	cell_ctr_high1_nc;/* Total cell transfer count (high)	*/
++	ffreg_t	cell_ctr_lo1_nc;/* Total cell transfer count (low)	*/
++	u_int	fillercc[0x100-0xcc]; /* pad out to full address space	 */
+ } ffredn_t;
+ 
+ typedef struct _rfredn_t {
+diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
+index 8e3c46d..7795d1e 100644
+--- a/drivers/char/virtio_console.c
++++ b/drivers/char/virtio_console.c
+@@ -1789,7 +1789,8 @@ static void virtcons_remove(struct virtio_device *vdev)
+ 	/* Disable interrupts for vqs */
+ 	vdev->config->reset(vdev);
+ 	/* Finish up work that's lined up */
+-	cancel_work_sync(&portdev->control_work);
++	if (use_multiport(portdev))
++		cancel_work_sync(&portdev->control_work);
+ 
+ 	list_for_each_entry_safe(port, port2, &portdev->ports, list)
+ 		unplug_port(port);
+diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
+index c05e825..7817429 100644
+--- a/drivers/gpu/drm/i915/intel_display.c
++++ b/drivers/gpu/drm/i915/intel_display.c
+@@ -7156,8 +7156,6 @@ static int intel_gen4_queue_flip(struct drm_device *dev,
+ 	OUT_RING(pf | pipesrc);
+ 
+ 	intel_mark_page_flip_active(intel_crtc);
+-
+-	intel_mark_page_flip_active(intel_crtc);
+ 	ADVANCE_LP_RING();
+ 	return 0;
+ 
+@@ -7193,6 +7191,8 @@ static int intel_gen6_queue_flip(struct drm_device *dev,
+ 	pf = I915_READ(PF_CTL(intel_crtc->pipe)) & PF_ENABLE;
+ 	pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
+ 	OUT_RING(pf | pipesrc);
++
++	intel_mark_page_flip_active(intel_crtc);
+ 	ADVANCE_LP_RING();
+ 	return 0;
+ 
+diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c
+index 1b98338..ec36dd9 100644
+--- a/drivers/gpu/drm/radeon/radeon_combios.c
++++ b/drivers/gpu/drm/radeon/radeon_combios.c
+@@ -2455,6 +2455,14 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev)
+ 								   1),
+ 								  ATOM_DEVICE_CRT1_SUPPORT);
+ 				}
++				/* RV100 board with external TDMS bit mis-set.
++				 * Actually uses internal TMDS, clear the bit.
++				 */
++				if (dev->pdev->device == 0x5159 &&
++				    dev->pdev->subsystem_vendor == 0x1014 &&
++				    dev->pdev->subsystem_device == 0x029A) {
++					tmp &= ~(1 << 4);
++				}
+ 				if ((tmp >> 4) & 0x1) {
+ 					devices |= ATOM_DEVICE_DFP2_SUPPORT;
+ 					radeon_add_legacy_encoder(dev,
+diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
+index aec8e0c..63e7143 100644
+--- a/drivers/gpu/drm/radeon/radeon_display.c
++++ b/drivers/gpu/drm/radeon/radeon_display.c
+@@ -1110,8 +1110,10 @@ radeon_user_framebuffer_create(struct drm_device *dev,
+ 	}
+ 
+ 	radeon_fb = kzalloc(sizeof(*radeon_fb), GFP_KERNEL);
+-	if (radeon_fb == NULL)
++	if (radeon_fb == NULL) {
++		drm_gem_object_unreference_unlocked(obj);
+ 		return ERR_PTR(-ENOMEM);
++	}
+ 
+ 	radeon_framebuffer_init(dev, radeon_fb, mode_cmd, obj);
+ 
+diff --git a/drivers/gpu/drm/radeon/radeon_ring.c b/drivers/gpu/drm/radeon/radeon_ring.c
+index 49d5820..65be5e8 100644
+--- a/drivers/gpu/drm/radeon/radeon_ring.c
++++ b/drivers/gpu/drm/radeon/radeon_ring.c
+@@ -306,6 +306,9 @@ int radeon_ring_alloc(struct radeon_device *rdev, unsigned ndw)
+ {
+ 	int r;
+ 
++	/* make sure we aren't trying to allocate more space than there is on the ring */
++	if (ndw > (rdev->cp.ring_size / 4))
++		return -ENOMEM;
+ 	/* Align requested size with padding so unlock_commit can
+ 	 * pad safely */
+ 	ndw = (ndw + rdev->cp.align_mask) & ~rdev->cp.align_mask;
+diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
+index 2d41336..c15c38e 100644
+--- a/drivers/hid/hid-ids.h
++++ b/drivers/hid/hid-ids.h
+@@ -278,6 +278,9 @@
+ #define USB_VENDOR_ID_EZKEY		0x0518
+ #define USB_DEVICE_ID_BTC_8193		0x0002
+ 
++#define USB_VENDOR_ID_FORMOSA          0x147a
++#define USB_DEVICE_ID_FORMOSA_IR_RECEIVER      0xe03e
++
+ #define USB_VENDOR_ID_FREESCALE		0x15A2
+ #define USB_DEVICE_ID_FREESCALE_MX28	0x004F
+ 
+diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c
+index aec3fa3..e26eddf 100644
+--- a/drivers/hid/usbhid/hid-quirks.c
++++ b/drivers/hid/usbhid/hid-quirks.c
+@@ -68,6 +68,7 @@ static const struct hid_blacklist {
+ 	{ USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_AXIS_295, HID_QUIRK_NOGET },
+ 	{ USB_VENDOR_ID_DMI, USB_DEVICE_ID_DMI_ENC, HID_QUIRK_NOGET },
+ 	{ USB_VENDOR_ID_ELO, USB_DEVICE_ID_ELO_TS2700, HID_QUIRK_NOGET },
++	{ USB_VENDOR_ID_FORMOSA, USB_DEVICE_ID_FORMOSA_IR_RECEIVER, HID_QUIRK_NO_INIT_REPORTS },
+ 	{ USB_VENDOR_ID_FREESCALE, USB_DEVICE_ID_FREESCALE_MX28, HID_QUIRK_NOGET },
+ 	{ USB_VENDOR_ID_MGE, USB_DEVICE_ID_MGE_UPS, HID_QUIRK_NOGET },
+ 	{ USB_VENDOR_ID_PIXART, USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN, HID_QUIRK_NO_INIT_REPORTS },
+diff --git a/drivers/isdn/gigaset/capi.c b/drivers/isdn/gigaset/capi.c
+index fd17bb3..08c2329 100644
+--- a/drivers/isdn/gigaset/capi.c
++++ b/drivers/isdn/gigaset/capi.c
+@@ -264,6 +264,8 @@ static inline void dump_rawmsg(enum debuglevel level, const char *tag,
+ 		CAPIMSG_APPID(data), CAPIMSG_MSGID(data), l,
+ 		CAPIMSG_CONTROL(data));
+ 	l -= 12;
++	if (l <= 0)
++		return;
+ 	dbgline = kmalloc(3*l, GFP_ATOMIC);
+ 	if (!dbgline)
+ 		return;
+diff --git a/drivers/media/video/gspca/kinect.c b/drivers/media/video/gspca/kinect.c
+index 4fe51fd..acaef66 100644
+--- a/drivers/media/video/gspca/kinect.c
++++ b/drivers/media/video/gspca/kinect.c
+@@ -390,6 +390,7 @@ static const struct sd_desc sd_desc = {
+ /* -- module initialisation -- */
+ static const struct usb_device_id device_table[] = {
+ 	{USB_DEVICE(0x045e, 0x02ae)},
++	{USB_DEVICE(0x045e, 0x02bf)},
+ 	{}
+ };
+ 
+diff --git a/drivers/net/can/c_can/c_can.c b/drivers/net/can/c_can/c_can.c
+index 21a3d77..64647d4 100644
+--- a/drivers/net/can/c_can/c_can.c
++++ b/drivers/net/can/c_can/c_can.c
+@@ -446,8 +446,12 @@ static void c_can_setup_receive_object(struct net_device *dev, int iface,
+ 
+ 	priv->write_reg(priv, &priv->regs->ifregs[iface].mask1,
+ 			IFX_WRITE_LOW_16BIT(mask));
++
++	/* According to C_CAN documentation, the reserved bit
++	 * in IFx_MASK2 register is fixed 1
++	 */
+ 	priv->write_reg(priv, &priv->regs->ifregs[iface].mask2,
+-			IFX_WRITE_HIGH_16BIT(mask));
++			IFX_WRITE_HIGH_16BIT(mask) | BIT(13));
+ 
+ 	priv->write_reg(priv, &priv->regs->ifregs[iface].arb1,
+ 			IFX_WRITE_LOW_16BIT(id));
+diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
+index 01bc102..c86fa50 100644
+--- a/drivers/net/ethernet/broadcom/tg3.c
++++ b/drivers/net/ethernet/broadcom/tg3.c
+@@ -1135,14 +1135,26 @@ static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, u32 set)
+ 	return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg);
+ }
+ 
+-#define TG3_PHY_AUXCTL_SMDSP_ENABLE(tp) \
+-	tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \
+-			     MII_TG3_AUXCTL_ACTL_SMDSP_ENA | \
+-			     MII_TG3_AUXCTL_ACTL_TX_6DB)
++static int tg3_phy_toggle_auxctl_smdsp(struct tg3 *tp, bool enable)
++{
++	u32 val;
++	int err;
+ 
+-#define TG3_PHY_AUXCTL_SMDSP_DISABLE(tp) \
+-	tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \
+-			     MII_TG3_AUXCTL_ACTL_TX_6DB);
++	err = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
++
++	if (err)
++		return err;
++	if (enable)
++
++		val |= MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
++	else
++		val &= ~MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
++
++	err = tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
++				   val | MII_TG3_AUXCTL_ACTL_TX_6DB);
++
++	return err;
++}
+ 
+ static int tg3_bmcr_reset(struct tg3 *tp)
+ {
+@@ -2087,7 +2099,7 @@ static void tg3_phy_apply_otp(struct tg3 *tp)
+ 
+ 	otp = tp->phy_otp;
+ 
+-	if (TG3_PHY_AUXCTL_SMDSP_ENABLE(tp))
++	if (tg3_phy_toggle_auxctl_smdsp(tp, true))
+ 		return;
+ 
+ 	phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
+@@ -2112,7 +2124,7 @@ static void tg3_phy_apply_otp(struct tg3 *tp)
+ 	      ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
+ 	tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
+ 
+-	TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
++	tg3_phy_toggle_auxctl_smdsp(tp, false);
+ }
+ 
+ static void tg3_phy_eee_adjust(struct tg3 *tp, u32 current_link_up)
+@@ -2148,9 +2160,9 @@ static void tg3_phy_eee_adjust(struct tg3 *tp, u32 current_link_up)
+ 
+ 	if (!tp->setlpicnt) {
+ 		if (current_link_up == 1 &&
+-		   !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
++		   !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
+ 			tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0000);
+-			TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
++			tg3_phy_toggle_auxctl_smdsp(tp, false);
+ 		}
+ 
+ 		val = tr32(TG3_CPMU_EEE_MODE);
+@@ -2166,11 +2178,11 @@ static void tg3_phy_eee_enable(struct tg3 *tp)
+ 	    (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
+ 	     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
+ 	     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) &&
+-	    !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
++	    !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
+ 		val = MII_TG3_DSP_TAP26_ALNOKO |
+ 		      MII_TG3_DSP_TAP26_RMRXSTO;
+ 		tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
+-		TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
++		tg3_phy_toggle_auxctl_smdsp(tp, false);
+ 	}
+ 
+ 	val = tr32(TG3_CPMU_EEE_MODE);
+@@ -2314,7 +2326,7 @@ static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
+ 		tg3_writephy(tp, MII_CTRL1000,
+ 			     CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
+ 
+-		err = TG3_PHY_AUXCTL_SMDSP_ENABLE(tp);
++		err = tg3_phy_toggle_auxctl_smdsp(tp, true);
+ 		if (err)
+ 			return err;
+ 
+@@ -2335,7 +2347,7 @@ static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
+ 	tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
+ 	tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000);
+ 
+-	TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
++	tg3_phy_toggle_auxctl_smdsp(tp, false);
+ 
+ 	tg3_writephy(tp, MII_CTRL1000, phy9_orig);
+ 
+@@ -2424,10 +2436,10 @@ static int tg3_phy_reset(struct tg3 *tp)
+ 
+ out:
+ 	if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) &&
+-	    !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
++	    !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
+ 		tg3_phydsp_write(tp, 0x201f, 0x2aaa);
+ 		tg3_phydsp_write(tp, 0x000a, 0x0323);
+-		TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
++		tg3_phy_toggle_auxctl_smdsp(tp, false);
+ 	}
+ 
+ 	if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) {
+@@ -2436,14 +2448,14 @@ out:
+ 	}
+ 
+ 	if (tp->phy_flags & TG3_PHYFLG_BER_BUG) {
+-		if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
++		if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
+ 			tg3_phydsp_write(tp, 0x000a, 0x310b);
+ 			tg3_phydsp_write(tp, 0x201f, 0x9506);
+ 			tg3_phydsp_write(tp, 0x401f, 0x14e2);
+-			TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
++			tg3_phy_toggle_auxctl_smdsp(tp, false);
+ 		}
+ 	} else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) {
+-		if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
++		if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
+ 			tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
+ 			if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) {
+ 				tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
+@@ -2452,7 +2464,7 @@ out:
+ 			} else
+ 				tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
+ 
+-			TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
++			tg3_phy_toggle_auxctl_smdsp(tp, false);
+ 		}
+ 	}
+ 
+@@ -3639,7 +3651,7 @@ static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
+ 	tw32(TG3_CPMU_EEE_MODE,
+ 	     tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE);
+ 
+-	err = TG3_PHY_AUXCTL_SMDSP_ENABLE(tp);
++	err = tg3_phy_toggle_auxctl_smdsp(tp, true);
+ 	if (!err) {
+ 		u32 err2;
+ 
+@@ -3671,7 +3683,7 @@ static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
+ 						 MII_TG3_DSP_CH34TP2_HIBW01);
+ 		}
+ 
+-		err2 = TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
++		err2 = tg3_phy_toggle_auxctl_smdsp(tp, false);
+ 		if (!err)
+ 			err = err2;
+ 	}
+@@ -6353,6 +6365,9 @@ static void tg3_poll_controller(struct net_device *dev)
+ 	int i;
+ 	struct tg3 *tp = netdev_priv(dev);
+ 
++	if (tg3_irq_sync(tp))
++		return;
++
+ 	for (i = 0; i < tp->irq_cnt; i++)
+ 		tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]);
+ }
+@@ -15388,6 +15403,7 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
+ 	tp->pm_cap = pm_cap;
+ 	tp->rx_mode = TG3_DEF_RX_MODE;
+ 	tp->tx_mode = TG3_DEF_TX_MODE;
++	tp->irq_sync = 1;
+ 
+ 	if (tg3_debug > 0)
+ 		tp->msg_enable = tg3_debug;
+diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c
+index a8259cc..5674145 100644
+--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c
++++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c
+@@ -144,7 +144,7 @@ void netxen_release_tx_buffers(struct netxen_adapter *adapter)
+ 					 buffrag->length, PCI_DMA_TODEVICE);
+ 			buffrag->dma = 0ULL;
+ 		}
+-		for (j = 0; j < cmd_buf->frag_count; j++) {
++		for (j = 1; j < cmd_buf->frag_count; j++) {
+ 			buffrag++;
+ 			if (buffrag->dma) {
+ 				pci_unmap_page(adapter->pdev, buffrag->dma,
+diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
+index da5204d..4a238a4 100644
+--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
++++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
+@@ -1924,10 +1924,12 @@ unwind:
+ 	while (--i >= 0) {
+ 		nf = &pbuf->frag_array[i+1];
+ 		pci_unmap_page(pdev, nf->dma, nf->length, PCI_DMA_TODEVICE);
++		nf->dma = 0ULL;
+ 	}
+ 
+ 	nf = &pbuf->frag_array[0];
+ 	pci_unmap_single(pdev, nf->dma, skb_headlen(skb), PCI_DMA_TODEVICE);
++	nf->dma = 0ULL;
+ 
+ out_err:
+ 	return -ENOMEM;
+diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
+index b8db4cd..a6153f1 100644
+--- a/drivers/net/ethernet/realtek/r8169.c
++++ b/drivers/net/ethernet/realtek/r8169.c
+@@ -5829,13 +5829,6 @@ static int rtl8169_rx_interrupt(struct net_device *dev,
+ 			dev->stats.rx_bytes += pkt_size;
+ 			dev->stats.rx_packets++;
+ 		}
+-
+-		/* Work around for AMD plateform. */
+-		if ((desc->opts2 & cpu_to_le32(0xfffe000)) &&
+-		    (tp->mac_version == RTL_GIGA_MAC_VER_05)) {
+-			desc->opts2 = 0;
+-			cur_rx++;
+-		}
+ 	}
+ 
+ 	count = cur_rx - tp->cur_rx;
+diff --git a/drivers/net/loopback.c b/drivers/net/loopback.c
+index 4ce9e5f..d0893e4 100644
+--- a/drivers/net/loopback.c
++++ b/drivers/net/loopback.c
+@@ -78,6 +78,11 @@ static netdev_tx_t loopback_xmit(struct sk_buff *skb,
+ 
+ 	skb_orphan(skb);
+ 
++	/* Before queueing this packet to netif_rx(),
++	 * make sure dst is refcounted.
++	 */
++	skb_dst_force(skb);
++
+ 	skb->protocol = eth_type_trans(skb, dev);
+ 
+ 	/* it's OK to use per_cpu_ptr() because BHs are off */
+diff --git a/drivers/net/wireless/mwifiex/scan.c b/drivers/net/wireless/mwifiex/scan.c
+index 8d3ab37..6618dd6 100644
+--- a/drivers/net/wireless/mwifiex/scan.c
++++ b/drivers/net/wireless/mwifiex/scan.c
+@@ -1594,7 +1594,7 @@ int mwifiex_ret_802_11_scan(struct mwifiex_private *priv,
+ 		dev_err(adapter->dev, "SCAN_RESP: too many AP returned (%d)\n",
+ 		       scan_rsp->number_of_sets);
+ 		ret = -1;
+-		goto done;
++		goto check_next_scan;
+ 	}
+ 
+ 	bytes_left = le16_to_cpu(scan_rsp->bss_descript_size);
+@@ -1663,7 +1663,8 @@ int mwifiex_ret_802_11_scan(struct mwifiex_private *priv,
+ 		if (!beacon_size || beacon_size > bytes_left) {
+ 			bss_info += bytes_left;
+ 			bytes_left = 0;
+-			return -1;
++			ret = -1;
++			goto check_next_scan;
+ 		}
+ 
+ 		/* Initialize the current working beacon pointer for this BSS
+@@ -1716,7 +1717,7 @@ int mwifiex_ret_802_11_scan(struct mwifiex_private *priv,
+ 				dev_err(priv->adapter->dev, "%s: in processing"
+ 					" IE, bytes left < IE length\n",
+ 					__func__);
+-				goto done;
++				goto check_next_scan;
+ 			}
+ 			if (element_id == WLAN_EID_DS_PARAMS) {
+ 				channel = *(u8 *) (current_ptr +
+@@ -1782,6 +1783,7 @@ int mwifiex_ret_802_11_scan(struct mwifiex_private *priv,
+ 		}
+ 	}
+ 
++check_next_scan:
+ 	spin_lock_irqsave(&adapter->scan_pending_q_lock, flags);
+ 	if (list_empty(&adapter->scan_pending_q)) {
+ 		spin_unlock_irqrestore(&adapter->scan_pending_q_lock, flags);
+@@ -1812,7 +1814,6 @@ int mwifiex_ret_802_11_scan(struct mwifiex_private *priv,
+ 		mwifiex_insert_cmd_to_pending_q(adapter, cmd_node, true);
+ 	}
+ 
+-done:
+ 	return ret;
+ }
+ 
+diff --git a/drivers/net/wireless/rt2x00/rt2500usb.c b/drivers/net/wireless/rt2x00/rt2500usb.c
+index 22ed6df..2be9880 100644
+--- a/drivers/net/wireless/rt2x00/rt2500usb.c
++++ b/drivers/net/wireless/rt2x00/rt2500usb.c
+@@ -1921,7 +1921,7 @@ static struct usb_device_id rt2500usb_device_table[] = {
+ 	{ USB_DEVICE(0x0b05, 0x1706) },
+ 	{ USB_DEVICE(0x0b05, 0x1707) },
+ 	/* Belkin */
+-	{ USB_DEVICE(0x050d, 0x7050) },
++	{ USB_DEVICE(0x050d, 0x7050) },	/* FCC ID: K7SF5D7050A ver. 2.x */
+ 	{ USB_DEVICE(0x050d, 0x7051) },
+ 	/* Cisco Systems */
+ 	{ USB_DEVICE(0x13b1, 0x000d) },
+diff --git a/drivers/net/wireless/rt2x00/rt2800usb.c b/drivers/net/wireless/rt2x00/rt2800usb.c
+index b66a61b..3d4ea1f 100644
+--- a/drivers/net/wireless/rt2x00/rt2800usb.c
++++ b/drivers/net/wireless/rt2x00/rt2800usb.c
+@@ -959,6 +959,7 @@ static struct usb_device_id rt2800usb_device_table[] = {
+ 	{ USB_DEVICE(0x07d1, 0x3c15) },
+ 	{ USB_DEVICE(0x07d1, 0x3c16) },
+ 	{ USB_DEVICE(0x2001, 0x3c1b) },
++	{ USB_DEVICE(0x2001, 0x3c1e) },
+ 	/* Draytek */
+ 	{ USB_DEVICE(0x07fa, 0x7712) },
+ 	/* DVICO */
+@@ -1090,6 +1091,7 @@ static struct usb_device_id rt2800usb_device_table[] = {
+ 	{ USB_DEVICE(0x177f, 0x0153) },
+ 	{ USB_DEVICE(0x177f, 0x0302) },
+ 	{ USB_DEVICE(0x177f, 0x0313) },
++	{ USB_DEVICE(0x177f, 0x0323) },
+ 	/* U-Media */
+ 	{ USB_DEVICE(0x157e, 0x300e) },
+ 	{ USB_DEVICE(0x157e, 0x3013) },
+diff --git a/drivers/net/wireless/rt2x00/rt73usb.c b/drivers/net/wireless/rt2x00/rt73usb.c
+index 2ad468d..9e724eb 100644
+--- a/drivers/net/wireless/rt2x00/rt73usb.c
++++ b/drivers/net/wireless/rt2x00/rt73usb.c
+@@ -2421,6 +2421,7 @@ static struct usb_device_id rt73usb_device_table[] = {
+ 	{ USB_DEVICE(0x0b05, 0x1723) },
+ 	{ USB_DEVICE(0x0b05, 0x1724) },
+ 	/* Belkin */
++	{ USB_DEVICE(0x050d, 0x7050) },	/* FCC ID: K7SF5D7050B ver. 3.x */
+ 	{ USB_DEVICE(0x050d, 0x705a) },
+ 	{ USB_DEVICE(0x050d, 0x905b) },
+ 	{ USB_DEVICE(0x050d, 0x905c) },
+diff --git a/drivers/net/wireless/rtlwifi/usb.c b/drivers/net/wireless/rtlwifi/usb.c
+index a49e848..30dd0a9 100644
+--- a/drivers/net/wireless/rtlwifi/usb.c
++++ b/drivers/net/wireless/rtlwifi/usb.c
+@@ -503,8 +503,8 @@ static void _rtl_rx_pre_process(struct ieee80211_hw *hw, struct sk_buff *skb)
+ 	WARN_ON(skb_queue_empty(&rx_queue));
+ 	while (!skb_queue_empty(&rx_queue)) {
+ 		_skb = skb_dequeue(&rx_queue);
+-		_rtl_usb_rx_process_agg(hw, skb);
+-		ieee80211_rx_irqsafe(hw, skb);
++		_rtl_usb_rx_process_agg(hw, _skb);
++		ieee80211_rx_irqsafe(hw, _skb);
+ 	}
+ }
+ 
+diff --git a/drivers/net/xen-netback/common.h b/drivers/net/xen-netback/common.h
+index 94b79c3..9d7f172 100644
+--- a/drivers/net/xen-netback/common.h
++++ b/drivers/net/xen-netback/common.h
+@@ -151,6 +151,9 @@ void xen_netbk_queue_tx_skb(struct xenvif *vif, struct sk_buff *skb);
+ /* Notify xenvif that ring now has space to send an skb to the frontend */
+ void xenvif_notify_tx_completion(struct xenvif *vif);
+ 
++/* Prevent the device from generating any further traffic. */
++void xenvif_carrier_off(struct xenvif *vif);
++
+ /* Returns number of ring slots required to send an skb to the frontend */
+ unsigned int xen_netbk_count_skb_slots(struct xenvif *vif, struct sk_buff *skb);
+ 
+diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c
+index 1825629..5925e0b 100644
+--- a/drivers/net/xen-netback/interface.c
++++ b/drivers/net/xen-netback/interface.c
+@@ -342,17 +342,22 @@ err:
+ 	return err;
+ }
+ 
+-void xenvif_disconnect(struct xenvif *vif)
++void xenvif_carrier_off(struct xenvif *vif)
+ {
+ 	struct net_device *dev = vif->dev;
+-	if (netif_carrier_ok(dev)) {
+-		rtnl_lock();
+-		netif_carrier_off(dev); /* discard queued packets */
+-		if (netif_running(dev))
+-			xenvif_down(vif);
+-		rtnl_unlock();
+-		xenvif_put(vif);
+-	}
++
++	rtnl_lock();
++	netif_carrier_off(dev); /* discard queued packets */
++	if (netif_running(dev))
++		xenvif_down(vif);
++	rtnl_unlock();
++	xenvif_put(vif);
++}
++
++void xenvif_disconnect(struct xenvif *vif)
++{
++	if (netif_carrier_ok(vif->dev))
++		xenvif_carrier_off(vif);
+ 
+ 	atomic_dec(&vif->refcnt);
+ 	wait_event(vif->waiting_to_free, atomic_read(&vif->refcnt) == 0);
+diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
+index 15e332d..b802bb3 100644
+--- a/drivers/net/xen-netback/netback.c
++++ b/drivers/net/xen-netback/netback.c
+@@ -146,7 +146,8 @@ void xen_netbk_remove_xenvif(struct xenvif *vif)
+ 	atomic_dec(&netbk->netfront_count);
+ }
+ 
+-static void xen_netbk_idx_release(struct xen_netbk *netbk, u16 pending_idx);
++static void xen_netbk_idx_release(struct xen_netbk *netbk, u16 pending_idx,
++				  u8 status);
+ static void make_tx_response(struct xenvif *vif,
+ 			     struct xen_netif_tx_request *txp,
+ 			     s8       st);
+@@ -851,7 +852,7 @@ static void netbk_tx_err(struct xenvif *vif,
+ 
+ 	do {
+ 		make_tx_response(vif, txp, XEN_NETIF_RSP_ERROR);
+-		if (cons >= end)
++		if (cons == end)
+ 			break;
+ 		txp = RING_GET_REQUEST(&vif->tx, cons++);
+ 	} while (1);
+@@ -860,6 +861,13 @@ static void netbk_tx_err(struct xenvif *vif,
+ 	xenvif_put(vif);
+ }
+ 
++static void netbk_fatal_tx_err(struct xenvif *vif)
++{
++	netdev_err(vif->dev, "fatal error; disabling device\n");
++	xenvif_carrier_off(vif);
++	xenvif_put(vif);
++}
++
+ static int netbk_count_requests(struct xenvif *vif,
+ 				struct xen_netif_tx_request *first,
+ 				struct xen_netif_tx_request *txp,
+@@ -873,19 +881,22 @@ static int netbk_count_requests(struct xenvif *vif,
+ 
+ 	do {
+ 		if (frags >= work_to_do) {
+-			netdev_dbg(vif->dev, "Need more frags\n");
++			netdev_err(vif->dev, "Need more frags\n");
++			netbk_fatal_tx_err(vif);
+ 			return -frags;
+ 		}
+ 
+ 		if (unlikely(frags >= MAX_SKB_FRAGS)) {
+-			netdev_dbg(vif->dev, "Too many frags\n");
++			netdev_err(vif->dev, "Too many frags\n");
++			netbk_fatal_tx_err(vif);
+ 			return -frags;
+ 		}
+ 
+ 		memcpy(txp, RING_GET_REQUEST(&vif->tx, cons + frags),
+ 		       sizeof(*txp));
+ 		if (txp->size > first->size) {
+-			netdev_dbg(vif->dev, "Frags galore\n");
++			netdev_err(vif->dev, "Frag is bigger than frame.\n");
++			netbk_fatal_tx_err(vif);
+ 			return -frags;
+ 		}
+ 
+@@ -893,8 +904,9 @@ static int netbk_count_requests(struct xenvif *vif,
+ 		frags++;
+ 
+ 		if (unlikely((txp->offset + txp->size) > PAGE_SIZE)) {
+-			netdev_dbg(vif->dev, "txp->offset: %x, size: %u\n",
++			netdev_err(vif->dev, "txp->offset: %x, size: %u\n",
+ 				 txp->offset, txp->size);
++			netbk_fatal_tx_err(vif);
+ 			return -frags;
+ 		}
+ 	} while ((txp++)->flags & XEN_NETTXF_more_data);
+@@ -938,7 +950,7 @@ static struct gnttab_copy *xen_netbk_get_requests(struct xen_netbk *netbk,
+ 		pending_idx = netbk->pending_ring[index];
+ 		page = xen_netbk_alloc_page(netbk, skb, pending_idx);
+ 		if (!page)
+-			return NULL;
++			goto err;
+ 
+ 		netbk->mmap_pages[pending_idx] = page;
+ 
+@@ -962,6 +974,17 @@ static struct gnttab_copy *xen_netbk_get_requests(struct xen_netbk *netbk,
+ 	}
+ 
+ 	return gop;
++err:
++	/* Unwind, freeing all pages and sending error responses. */
++	while (i-- > start) {
++		xen_netbk_idx_release(netbk, frag_get_pending_idx(&frags[i]),
++				      XEN_NETIF_RSP_ERROR);
++	}
++	/* The head too, if necessary. */
++	if (start)
++		xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_ERROR);
++
++	return NULL;
+ }
+ 
+ static int xen_netbk_tx_check_gop(struct xen_netbk *netbk,
+@@ -970,30 +993,20 @@ static int xen_netbk_tx_check_gop(struct xen_netbk *netbk,
+ {
+ 	struct gnttab_copy *gop = *gopp;
+ 	u16 pending_idx = *((u16 *)skb->data);
+-	struct pending_tx_info *pending_tx_info = netbk->pending_tx_info;
+-	struct xenvif *vif = pending_tx_info[pending_idx].vif;
+-	struct xen_netif_tx_request *txp;
+ 	struct skb_shared_info *shinfo = skb_shinfo(skb);
+ 	int nr_frags = shinfo->nr_frags;
+ 	int i, err, start;
+ 
+ 	/* Check status of header. */
+ 	err = gop->status;
+-	if (unlikely(err)) {
+-		pending_ring_idx_t index;
+-		index = pending_index(netbk->pending_prod++);
+-		txp = &pending_tx_info[pending_idx].req;
+-		make_tx_response(vif, txp, XEN_NETIF_RSP_ERROR);
+-		netbk->pending_ring[index] = pending_idx;
+-		xenvif_put(vif);
+-	}
++	if (unlikely(err))
++		xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_ERROR);
+ 
+ 	/* Skip first skb fragment if it is on same page as header fragment. */
+ 	start = (frag_get_pending_idx(&shinfo->frags[0]) == pending_idx);
+ 
+ 	for (i = start; i < nr_frags; i++) {
+ 		int j, newerr;
+-		pending_ring_idx_t index;
+ 
+ 		pending_idx = frag_get_pending_idx(&shinfo->frags[i]);
+ 
+@@ -1002,16 +1015,12 @@ static int xen_netbk_tx_check_gop(struct xen_netbk *netbk,
+ 		if (likely(!newerr)) {
+ 			/* Had a previous error? Invalidate this fragment. */
+ 			if (unlikely(err))
+-				xen_netbk_idx_release(netbk, pending_idx);
++				xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_OKAY);
+ 			continue;
+ 		}
+ 
+ 		/* Error on this fragment: respond to client with an error. */
+-		txp = &netbk->pending_tx_info[pending_idx].req;
+-		make_tx_response(vif, txp, XEN_NETIF_RSP_ERROR);
+-		index = pending_index(netbk->pending_prod++);
+-		netbk->pending_ring[index] = pending_idx;
+-		xenvif_put(vif);
++		xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_ERROR);
+ 
+ 		/* Not the first error? Preceding frags already invalidated. */
+ 		if (err)
+@@ -1019,10 +1028,10 @@ static int xen_netbk_tx_check_gop(struct xen_netbk *netbk,
+ 
+ 		/* First error: invalidate header and preceding fragments. */
+ 		pending_idx = *((u16 *)skb->data);
+-		xen_netbk_idx_release(netbk, pending_idx);
++		xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_OKAY);
+ 		for (j = start; j < i; j++) {
+ 			pending_idx = frag_get_pending_idx(&shinfo->frags[j]);
+-			xen_netbk_idx_release(netbk, pending_idx);
++			xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_OKAY);
+ 		}
+ 
+ 		/* Remember the error: invalidate all subsequent fragments. */
+@@ -1056,7 +1065,7 @@ static void xen_netbk_fill_frags(struct xen_netbk *netbk, struct sk_buff *skb)
+ 
+ 		/* Take an extra reference to offset xen_netbk_idx_release */
+ 		get_page(netbk->mmap_pages[pending_idx]);
+-		xen_netbk_idx_release(netbk, pending_idx);
++		xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_OKAY);
+ 	}
+ }
+ 
+@@ -1069,7 +1078,8 @@ static int xen_netbk_get_extras(struct xenvif *vif,
+ 
+ 	do {
+ 		if (unlikely(work_to_do-- <= 0)) {
+-			netdev_dbg(vif->dev, "Missing extra info\n");
++			netdev_err(vif->dev, "Missing extra info\n");
++			netbk_fatal_tx_err(vif);
+ 			return -EBADR;
+ 		}
+ 
+@@ -1078,8 +1088,9 @@ static int xen_netbk_get_extras(struct xenvif *vif,
+ 		if (unlikely(!extra.type ||
+ 			     extra.type >= XEN_NETIF_EXTRA_TYPE_MAX)) {
+ 			vif->tx.req_cons = ++cons;
+-			netdev_dbg(vif->dev,
++			netdev_err(vif->dev,
+ 				   "Invalid extra type: %d\n", extra.type);
++			netbk_fatal_tx_err(vif);
+ 			return -EINVAL;
+ 		}
+ 
+@@ -1095,13 +1106,15 @@ static int netbk_set_skb_gso(struct xenvif *vif,
+ 			     struct xen_netif_extra_info *gso)
+ {
+ 	if (!gso->u.gso.size) {
+-		netdev_dbg(vif->dev, "GSO size must not be zero.\n");
++		netdev_err(vif->dev, "GSO size must not be zero.\n");
++		netbk_fatal_tx_err(vif);
+ 		return -EINVAL;
+ 	}
+ 
+ 	/* Currently only TCPv4 S.O. is supported. */
+ 	if (gso->u.gso.type != XEN_NETIF_GSO_TYPE_TCPV4) {
+-		netdev_dbg(vif->dev, "Bad GSO type %d.\n", gso->u.gso.type);
++		netdev_err(vif->dev, "Bad GSO type %d.\n", gso->u.gso.type);
++		netbk_fatal_tx_err(vif);
+ 		return -EINVAL;
+ 	}
+ 
+@@ -1238,9 +1251,25 @@ static unsigned xen_netbk_tx_build_gops(struct xen_netbk *netbk)
+ 
+ 		/* Get a netif from the list with work to do. */
+ 		vif = poll_net_schedule_list(netbk);
++		/* This can sometimes happen because the test of
++		 * list_empty(net_schedule_list) at the top of the
++		 * loop is unlocked.  Just go back and have another
++		 * look.
++		 */
+ 		if (!vif)
+ 			continue;
+ 
++		if (vif->tx.sring->req_prod - vif->tx.req_cons >
++		    XEN_NETIF_TX_RING_SIZE) {
++			netdev_err(vif->dev,
++				   "Impossible number of requests. "
++				   "req_prod %d, req_cons %d, size %ld\n",
++				   vif->tx.sring->req_prod, vif->tx.req_cons,
++				   XEN_NETIF_TX_RING_SIZE);
++			netbk_fatal_tx_err(vif);
++			continue;
++		}
++
+ 		RING_FINAL_CHECK_FOR_REQUESTS(&vif->tx, work_to_do);
+ 		if (!work_to_do) {
+ 			xenvif_put(vif);
+@@ -1268,17 +1297,14 @@ static unsigned xen_netbk_tx_build_gops(struct xen_netbk *netbk)
+ 			work_to_do = xen_netbk_get_extras(vif, extras,
+ 							  work_to_do);
+ 			idx = vif->tx.req_cons;
+-			if (unlikely(work_to_do < 0)) {
+-				netbk_tx_err(vif, &txreq, idx);
++			if (unlikely(work_to_do < 0))
+ 				continue;
+-			}
+ 		}
+ 
+ 		ret = netbk_count_requests(vif, &txreq, txfrags, work_to_do);
+-		if (unlikely(ret < 0)) {
+-			netbk_tx_err(vif, &txreq, idx - ret);
++		if (unlikely(ret < 0))
+ 			continue;
+-		}
++
+ 		idx += ret;
+ 
+ 		if (unlikely(txreq.size < ETH_HLEN)) {
+@@ -1290,11 +1316,11 @@ static unsigned xen_netbk_tx_build_gops(struct xen_netbk *netbk)
+ 
+ 		/* No crossing a page as the payload mustn't fragment. */
+ 		if (unlikely((txreq.offset + txreq.size) > PAGE_SIZE)) {
+-			netdev_dbg(vif->dev,
++			netdev_err(vif->dev,
+ 				   "txreq.offset: %x, size: %u, end: %lu\n",
+ 				   txreq.offset, txreq.size,
+ 				   (txreq.offset&~PAGE_MASK) + txreq.size);
+-			netbk_tx_err(vif, &txreq, idx);
++			netbk_fatal_tx_err(vif);
+ 			continue;
+ 		}
+ 
+@@ -1322,8 +1348,8 @@ static unsigned xen_netbk_tx_build_gops(struct xen_netbk *netbk)
+ 			gso = &extras[XEN_NETIF_EXTRA_TYPE_GSO - 1];
+ 
+ 			if (netbk_set_skb_gso(vif, skb, gso)) {
++				/* Failure in netbk_set_skb_gso is fatal. */
+ 				kfree_skb(skb);
+-				netbk_tx_err(vif, &txreq, idx);
+ 				continue;
+ 			}
+ 		}
+@@ -1424,7 +1450,7 @@ static void xen_netbk_tx_submit(struct xen_netbk *netbk)
+ 			txp->size -= data_len;
+ 		} else {
+ 			/* Schedule a response immediately. */
+-			xen_netbk_idx_release(netbk, pending_idx);
++			xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_OKAY);
+ 		}
+ 
+ 		if (txp->flags & XEN_NETTXF_csum_blank)
+@@ -1479,7 +1505,8 @@ static void xen_netbk_tx_action(struct xen_netbk *netbk)
+ 
+ }
+ 
+-static void xen_netbk_idx_release(struct xen_netbk *netbk, u16 pending_idx)
++static void xen_netbk_idx_release(struct xen_netbk *netbk, u16 pending_idx,
++				  u8 status)
+ {
+ 	struct xenvif *vif;
+ 	struct pending_tx_info *pending_tx_info;
+@@ -1493,7 +1520,7 @@ static void xen_netbk_idx_release(struct xen_netbk *netbk, u16 pending_idx)
+ 
+ 	vif = pending_tx_info->vif;
+ 
+-	make_tx_response(vif, &pending_tx_info->req, XEN_NETIF_RSP_OKAY);
++	make_tx_response(vif, &pending_tx_info->req, status);
+ 
+ 	index = pending_index(netbk->pending_prod++);
+ 	netbk->pending_ring[index] = pending_idx;
+diff --git a/drivers/rtc/rtc-isl1208.c b/drivers/rtc/rtc-isl1208.c
+index da8beb8..627b66a 100644
+--- a/drivers/rtc/rtc-isl1208.c
++++ b/drivers/rtc/rtc-isl1208.c
+@@ -494,6 +494,7 @@ isl1208_rtc_interrupt(int irq, void *data)
+ {
+ 	unsigned long timeout = jiffies + msecs_to_jiffies(1000);
+ 	struct i2c_client *client = data;
++	struct rtc_device *rtc = i2c_get_clientdata(client);
+ 	int handled = 0, sr, err;
+ 
+ 	/*
+@@ -516,6 +517,8 @@ isl1208_rtc_interrupt(int irq, void *data)
+ 	if (sr & ISL1208_REG_SR_ALM) {
+ 		dev_dbg(&client->dev, "alarm!\n");
+ 
++		rtc_update_irq(rtc, 1, RTC_IRQF | RTC_AF);
++
+ 		/* Clear the alarm */
+ 		sr &= ~ISL1208_REG_SR_ALM;
+ 		sr = i2c_smbus_write_byte_data(client, ISL1208_REG_SR, sr);
+diff --git a/drivers/rtc/rtc-pl031.c b/drivers/rtc/rtc-pl031.c
+index 1e80a48..73816d8 100644
+--- a/drivers/rtc/rtc-pl031.c
++++ b/drivers/rtc/rtc-pl031.c
+@@ -44,6 +44,7 @@
+ #define RTC_YMR		0x34	/* Year match register */
+ #define RTC_YLR		0x38	/* Year data load register */
+ 
++#define RTC_CR_EN	(1 << 0)	/* counter enable bit */
+ #define RTC_CR_CWEN	(1 << 26)	/* Clockwatch enable bit */
+ 
+ #define RTC_TCR_EN	(1 << 1) /* Periodic timer enable bit */
+@@ -312,7 +313,7 @@ static int pl031_probe(struct amba_device *adev, const struct amba_id *id)
+ 	int ret;
+ 	struct pl031_local *ldata;
+ 	struct rtc_class_ops *ops = id->data;
+-	unsigned long time;
++	unsigned long time, data;
+ 
+ 	ret = amba_request_regions(adev, NULL);
+ 	if (ret)
+@@ -339,10 +340,11 @@ static int pl031_probe(struct amba_device *adev, const struct amba_id *id)
+ 	dev_dbg(&adev->dev, "designer ID = 0x%02x\n", ldata->hw_designer);
+ 	dev_dbg(&adev->dev, "revision = 0x%01x\n", ldata->hw_revision);
+ 
++	data = readl(ldata->base + RTC_CR);
+ 	/* Enable the clockwatch on ST Variants */
+ 	if (ldata->hw_designer == AMBA_VENDOR_ST)
+-		writel(readl(ldata->base + RTC_CR) | RTC_CR_CWEN,
+-		       ldata->base + RTC_CR);
++		data |= RTC_CR_CWEN;
++	writel(data | RTC_CR_EN, ldata->base + RTC_CR);
+ 
+ 	/*
+ 	 * On ST PL031 variants, the RTC reset value does not provide correct
+diff --git a/drivers/usb/host/ehci-sched.c b/drivers/usb/host/ehci-sched.c
+index 34655d0..08e470f 100644
+--- a/drivers/usb/host/ehci-sched.c
++++ b/drivers/usb/host/ehci-sched.c
+@@ -236,7 +236,7 @@ static inline unsigned char tt_start_uframe(struct ehci_hcd *ehci, __hc32 mask)
+ }
+ 
+ static const unsigned char
+-max_tt_usecs[] = { 125, 125, 125, 125, 125, 125, 30, 0 };
++max_tt_usecs[] = { 125, 125, 125, 125, 125, 125, 125, 25 };
+ 
+ /* carryover low/fullspeed bandwidth that crosses uframe boundries */
+ static inline void carryover_tt_bandwidth(unsigned short tt_usecs[8])
+diff --git a/drivers/usb/host/pci-quirks.c b/drivers/usb/host/pci-quirks.c
+index 5cc401b..c7cfbce 100644
+--- a/drivers/usb/host/pci-quirks.c
++++ b/drivers/usb/host/pci-quirks.c
+@@ -780,6 +780,7 @@ void usb_enable_xhci_ports(struct pci_dev *xhci_pdev)
+ 				"defaulting to EHCI.\n");
+ 		dev_warn(&xhci_pdev->dev,
+ 				"USB 3.0 devices will work at USB 2.0 speeds.\n");
++		usb_disable_xhci_ports(xhci_pdev);
+ 		return;
+ 	}
+ 
+diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
+index 2ed591d..5c1f9e7 100644
+--- a/drivers/usb/host/xhci-ring.c
++++ b/drivers/usb/host/xhci-ring.c
+@@ -2504,6 +2504,8 @@ cleanup:
+ 				(trb_comp_code != COMP_STALL &&
+ 					trb_comp_code != COMP_BABBLE))
+ 				xhci_urb_free_priv(xhci, urb_priv);
++			else
++				kfree(urb_priv);
+ 
+ 			usb_hcd_unlink_urb_from_ep(bus_to_hcd(urb->dev->bus), urb);
+ 			if ((urb->actual_length != urb->transfer_buffer_length &&
+@@ -3032,7 +3034,7 @@ static u32 xhci_v1_0_td_remainder(int running_total, int trb_buff_len,
+ 	 * running_total.
+ 	 */
+ 	packets_transferred = (running_total + trb_buff_len) /
+-		usb_endpoint_maxp(&urb->ep->desc);
++		GET_MAX_PACKET(usb_endpoint_maxp(&urb->ep->desc));
+ 
+ 	if ((total_packet_count - packets_transferred) > 31)
+ 		return 31 << 17;
+@@ -3594,7 +3596,8 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
+ 		td_len = urb->iso_frame_desc[i].length;
+ 		td_remain_len = td_len;
+ 		total_packet_count = DIV_ROUND_UP(td_len,
+-				usb_endpoint_maxp(&urb->ep->desc));
++				GET_MAX_PACKET(
++					usb_endpoint_maxp(&urb->ep->desc)));
+ 		/* A zero-length transfer still involves at least one packet. */
+ 		if (total_packet_count == 0)
+ 			total_packet_count++;
+@@ -3617,9 +3620,11 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
+ 		td = urb_priv->td[i];
+ 		for (j = 0; j < trbs_per_td; j++) {
+ 			u32 remainder = 0;
+-			field = TRB_TBC(burst_count) | TRB_TLBPC(residue);
++			field = 0;
+ 
+ 			if (first_trb) {
++				field = TRB_TBC(burst_count) |
++					TRB_TLBPC(residue);
+ 				/* Queue the isoc TRB */
+ 				field |= TRB_TYPE(TRB_ISOC);
+ 				/* Assume URB_ISO_ASAP is set */
+diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
+index 2cc7c18..d644a66 100644
+--- a/drivers/usb/serial/ftdi_sio.c
++++ b/drivers/usb/serial/ftdi_sio.c
+@@ -590,6 +590,7 @@ static struct usb_device_id id_table_combined [] = {
+ 	/*
+ 	 * ELV devices:
+ 	 */
++	{ USB_DEVICE(FTDI_ELV_VID, FTDI_ELV_WS300_PID) },
+ 	{ USB_DEVICE(FTDI_VID, FTDI_ELV_USR_PID) },
+ 	{ USB_DEVICE(FTDI_VID, FTDI_ELV_MSM1_PID) },
+ 	{ USB_DEVICE(FTDI_VID, FTDI_ELV_KL100_PID) },
+@@ -676,6 +677,7 @@ static struct usb_device_id id_table_combined [] = {
+ 	{ USB_DEVICE(FTDI_VID, XSENS_CONVERTER_5_PID) },
+ 	{ USB_DEVICE(FTDI_VID, XSENS_CONVERTER_6_PID) },
+ 	{ USB_DEVICE(FTDI_VID, XSENS_CONVERTER_7_PID) },
++	{ USB_DEVICE(FTDI_VID, FTDI_OMNI1509) },
+ 	{ USB_DEVICE(MOBILITY_VID, MOBILITY_USB_SERIAL_PID) },
+ 	{ USB_DEVICE(FTDI_VID, FTDI_ACTIVE_ROBOTS_PID) },
+ 	{ USB_DEVICE(FTDI_VID, FTDI_MHAM_KW_PID) },
+diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
+index dd6edf8..97e0a6b 100644
+--- a/drivers/usb/serial/ftdi_sio_ids.h
++++ b/drivers/usb/serial/ftdi_sio_ids.h
+@@ -147,6 +147,11 @@
+ #define XSENS_CONVERTER_6_PID	0xD38E
+ #define XSENS_CONVERTER_7_PID	0xD38F
+ 
++/**
++ * Zolix (www.zolix.com.cb) product ids
++ */
++#define FTDI_OMNI1509			0xD491	/* Omni1509 embedded USB-serial */
++
+ /*
+  * NDI (www.ndigital.com) product ids
+  */
+@@ -204,7 +209,7 @@
+ 
+ /*
+  * ELV USB devices submitted by Christian Abt of ELV (www.elv.de).
+- * All of these devices use FTDI's vendor ID (0x0403).
++ * Almost all of these devices use FTDI's vendor ID (0x0403).
+  * Further IDs taken from ELV Windows .inf file.
+  *
+  * The previously included PID for the UO 100 module was incorrect.
+@@ -212,6 +217,8 @@
+  *
+  * Armin Laeuger originally sent the PID for the UM 100 module.
+  */
++#define FTDI_ELV_VID	0x1B1F	/* ELV AG */
++#define FTDI_ELV_WS300_PID	0xC006	/* eQ3 WS 300 PC II */
+ #define FTDI_ELV_USR_PID	0xE000	/* ELV Universal-Sound-Recorder */
+ #define FTDI_ELV_MSM1_PID	0xE001	/* ELV Mini-Sound-Modul */
+ #define FTDI_ELV_KL100_PID	0xE002	/* ELV Kfz-Leistungsmesser KL 100 */
+diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
+index 9db3e23..52cd814 100644
+--- a/drivers/usb/serial/option.c
++++ b/drivers/usb/serial/option.c
+@@ -242,6 +242,7 @@ static void option_instat_callback(struct urb *urb);
+ #define TELIT_PRODUCT_CC864_DUAL		0x1005
+ #define TELIT_PRODUCT_CC864_SINGLE		0x1006
+ #define TELIT_PRODUCT_DE910_DUAL		0x1010
++#define TELIT_PRODUCT_LE920			0x1200
+ 
+ /* ZTE PRODUCTS */
+ #define ZTE_VENDOR_ID				0x19d2
+@@ -453,6 +454,10 @@ static void option_instat_callback(struct urb *urb);
+ #define TPLINK_VENDOR_ID			0x2357
+ #define TPLINK_PRODUCT_MA180			0x0201
+ 
++/* Changhong products */
++#define CHANGHONG_VENDOR_ID			0x2077
++#define CHANGHONG_PRODUCT_CH690			0x7001
++
+ /* some devices interfaces need special handling due to a number of reasons */
+ enum option_blacklist_reason {
+ 		OPTION_BLACKLIST_NONE = 0,
+@@ -534,6 +539,11 @@ static const struct option_blacklist_info zte_1255_blacklist = {
+ 	.reserved = BIT(3) | BIT(4),
+ };
+ 
++static const struct option_blacklist_info telit_le920_blacklist = {
++	.sendsetup = BIT(0),
++	.reserved = BIT(1) | BIT(5),
++};
++
+ static const struct usb_device_id option_ids[] = {
+ 	{ USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_COLT) },
+ 	{ USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_RICOLA) },
+@@ -784,6 +794,8 @@ static const struct usb_device_id option_ids[] = {
+ 	{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_CC864_DUAL) },
+ 	{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_CC864_SINGLE) },
+ 	{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_DE910_DUAL) },
++	{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920),
++		.driver_info = (kernel_ulong_t)&telit_le920_blacklist },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MF622, 0xff, 0xff, 0xff) }, /* ZTE WCDMA products */
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0002, 0xff, 0xff, 0xff),
+ 		.driver_info = (kernel_ulong_t)&net_intf1_blacklist },
+@@ -1318,6 +1330,7 @@ static const struct usb_device_id option_ids[] = {
+ 	{ USB_DEVICE(PETATEL_VENDOR_ID, PETATEL_PRODUCT_NP10T) },
+ 	{ USB_DEVICE(TPLINK_VENDOR_ID, TPLINK_PRODUCT_MA180),
+ 	  .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
++	{ USB_DEVICE(CHANGHONG_VENDOR_ID, CHANGHONG_PRODUCT_CH690) },
+ 	{ } /* Terminating entry */
+ };
+ MODULE_DEVICE_TABLE(usb, option_ids);
+diff --git a/drivers/usb/serial/qcserial.c b/drivers/usb/serial/qcserial.c
+index 6634477..14c4a82 100644
+--- a/drivers/usb/serial/qcserial.c
++++ b/drivers/usb/serial/qcserial.c
+@@ -55,6 +55,7 @@ static const struct usb_device_id id_table[] = {
+ 	{DEVICE_G1K(0x05c6, 0x9221)},	/* Generic Gobi QDL device */
+ 	{DEVICE_G1K(0x05c6, 0x9231)},	/* Generic Gobi QDL device */
+ 	{DEVICE_G1K(0x1f45, 0x0001)},	/* Unknown Gobi QDL device */
++	{DEVICE_G1K(0x1bc7, 0x900e)},	/* Telit Gobi QDL device */
+ 
+ 	/* Gobi 2000 devices */
+ 	{USB_DEVICE(0x1410, 0xa010)},	/* Novatel Gobi 2000 QDL device */
+diff --git a/drivers/usb/storage/initializers.c b/drivers/usb/storage/initializers.c
+index 105d900..16b0bf0 100644
+--- a/drivers/usb/storage/initializers.c
++++ b/drivers/usb/storage/initializers.c
+@@ -92,8 +92,8 @@ int usb_stor_ucr61s2b_init(struct us_data *us)
+ 	return 0;
+ }
+ 
+-/* This places the HUAWEI E220 devices in multi-port mode */
+-int usb_stor_huawei_e220_init(struct us_data *us)
++/* This places the HUAWEI usb dongles in multi-port mode */
++static int usb_stor_huawei_feature_init(struct us_data *us)
+ {
+ 	int result;
+ 
+@@ -104,3 +104,75 @@ int usb_stor_huawei_e220_init(struct us_data *us)
+ 	US_DEBUGP("Huawei mode set result is %d\n", result);
+ 	return 0;
+ }
++
++/*
++ * It will send a scsi switch command called rewind' to huawei dongle.
++ * When the dongle receives this command at the first time,
++ * it will reboot immediately. After rebooted, it will ignore this command.
++ * So it is  unnecessary to read its response.
++ */
++static int usb_stor_huawei_scsi_init(struct us_data *us)
++{
++	int result = 0;
++	int act_len = 0;
++	struct bulk_cb_wrap *bcbw = (struct bulk_cb_wrap *) us->iobuf;
++	char rewind_cmd[] = {0x11, 0x06, 0x20, 0x00, 0x00, 0x01, 0x01, 0x00,
++			0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
++
++	bcbw->Signature = cpu_to_le32(US_BULK_CB_SIGN);
++	bcbw->Tag = 0;
++	bcbw->DataTransferLength = 0;
++	bcbw->Flags = bcbw->Lun = 0;
++	bcbw->Length = sizeof(rewind_cmd);
++	memset(bcbw->CDB, 0, sizeof(bcbw->CDB));
++	memcpy(bcbw->CDB, rewind_cmd, sizeof(rewind_cmd));
++
++	result = usb_stor_bulk_transfer_buf(us, us->send_bulk_pipe, bcbw,
++					US_BULK_CB_WRAP_LEN, &act_len);
++	US_DEBUGP("transfer actual length=%d, result=%d\n", act_len, result);
++	return result;
++}
++
++/*
++ * It tries to find the supported Huawei USB dongles.
++ * In Huawei, they assign the following product IDs
++ * for all of their mobile broadband dongles,
++ * including the new dongles in the future.
++ * So if the product ID is not included in this list,
++ * it means it is not Huawei's mobile broadband dongles.
++ */
++static int usb_stor_huawei_dongles_pid(struct us_data *us)
++{
++	struct usb_interface_descriptor *idesc;
++	int idProduct;
++
++	idesc = &us->pusb_intf->cur_altsetting->desc;
++	idProduct = us->pusb_dev->descriptor.idProduct;
++	/* The first port is CDROM,
++	 * means the dongle in the single port mode,
++	 * and a switch command is required to be sent. */
++	if (idesc && idesc->bInterfaceNumber == 0) {
++		if ((idProduct == 0x1001)
++			|| (idProduct == 0x1003)
++			|| (idProduct == 0x1004)
++			|| (idProduct >= 0x1401 && idProduct <= 0x1500)
++			|| (idProduct >= 0x1505 && idProduct <= 0x1600)
++			|| (idProduct >= 0x1c02 && idProduct <= 0x2202)) {
++			return 1;
++		}
++	}
++	return 0;
++}
++
++int usb_stor_huawei_init(struct us_data *us)
++{
++	int result = 0;
++
++	if (usb_stor_huawei_dongles_pid(us)) {
++		if (us->pusb_dev->descriptor.idProduct >= 0x1446)
++			result = usb_stor_huawei_scsi_init(us);
++		else
++			result = usb_stor_huawei_feature_init(us);
++	}
++	return result;
++}
+diff --git a/drivers/usb/storage/initializers.h b/drivers/usb/storage/initializers.h
+index 529327f..5376d4f 100644
+--- a/drivers/usb/storage/initializers.h
++++ b/drivers/usb/storage/initializers.h
+@@ -46,5 +46,5 @@ int usb_stor_euscsi_init(struct us_data *us);
+  * flash reader */
+ int usb_stor_ucr61s2b_init(struct us_data *us);
+ 
+-/* This places the HUAWEI E220 devices in multi-port mode */
+-int usb_stor_huawei_e220_init(struct us_data *us);
++/* This places the HUAWEI usb dongles in multi-port mode */
++int usb_stor_huawei_init(struct us_data *us);
+diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
+index fa8a1b2..12640ef 100644
+--- a/drivers/usb/storage/unusual_devs.h
++++ b/drivers/usb/storage/unusual_devs.h
+@@ -1515,335 +1515,10 @@ UNUSUAL_DEV(  0x1210, 0x0003, 0x0100, 0x0100,
+ /* Reported by fangxiaozhi <huananhu@huawei.com>
+  * This brings the HUAWEI data card devices into multi-port mode
+  */
+-UNUSUAL_DEV(  0x12d1, 0x1001, 0x0000, 0x0000,
++UNUSUAL_VENDOR_INTF(0x12d1, 0x08, 0x06, 0x50,
+ 		"HUAWEI MOBILE",
+ 		"Mass Storage",
+-		USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
+-		0),
+-UNUSUAL_DEV(  0x12d1, 0x1003, 0x0000, 0x0000,
+-		"HUAWEI MOBILE",
+-		"Mass Storage",
+-		USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
+-		0),
+-UNUSUAL_DEV(  0x12d1, 0x1004, 0x0000, 0x0000,
+-		"HUAWEI MOBILE",
+-		"Mass Storage",
+-		USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
+-		0),
+-UNUSUAL_DEV(  0x12d1, 0x1401, 0x0000, 0x0000,
+-		"HUAWEI MOBILE",
+-		"Mass Storage",
+-		USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
+-		0),
+-UNUSUAL_DEV(  0x12d1, 0x1402, 0x0000, 0x0000,
+-		"HUAWEI MOBILE",
+-		"Mass Storage",
+-		USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
+-		0),
+-UNUSUAL_DEV(  0x12d1, 0x1403, 0x0000, 0x0000,
+-		"HUAWEI MOBILE",
+-		"Mass Storage",
+-		USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
+-		0),
+-UNUSUAL_DEV(  0x12d1, 0x1404, 0x0000, 0x0000,
+-		"HUAWEI MOBILE",
+-		"Mass Storage",
+-		USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
+-		0),
+-UNUSUAL_DEV(  0x12d1, 0x1405, 0x0000, 0x0000,
+-		"HUAWEI MOBILE",
+-		"Mass Storage",
+-		USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
+-		0),
+-UNUSUAL_DEV(  0x12d1, 0x1406, 0x0000, 0x0000,
+-		"HUAWEI MOBILE",
+-		"Mass Storage",
+-		USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
+-		0),
+-UNUSUAL_DEV(  0x12d1, 0x1407, 0x0000, 0x0000,
+-		"HUAWEI MOBILE",
+-		"Mass Storage",
+-		USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
+-		0),
+-UNUSUAL_DEV(  0x12d1, 0x1408, 0x0000, 0x0000,
+-		"HUAWEI MOBILE",
+-		"Mass Storage",
+-		USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
+-		0),
+-UNUSUAL_DEV(  0x12d1, 0x1409, 0x0000, 0x0000,
+-		"HUAWEI MOBILE",
+-		"Mass Storage",
+-		USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
+-		0),
+-UNUSUAL_DEV(  0x12d1, 0x140A, 0x0000, 0x0000,
+-		"HUAWEI MOBILE",
+-		"Mass Storage",
+-		USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
+-		0),
+-UNUSUAL_DEV(  0x12d1, 0x140B, 0x0000, 0x0000,
+-		"HUAWEI MOBILE",
+-		"Mass Storage",
+-		USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
+-		0),
+-UNUSUAL_DEV(  0x12d1, 0x140C, 0x0000, 0x0000,
+-		"HUAWEI MOBILE",
+-		"Mass Storage",
+-		USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
+-		0),
+-UNUSUAL_DEV(  0x12d1, 0x140D, 0x0000, 0x0000,
+-		"HUAWEI MOBILE",
+-		"Mass Storage",
+-		USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
+-		0),
+-UNUSUAL_DEV(  0x12d1, 0x140E, 0x0000, 0x0000,
+-		"HUAWEI MOBILE",
+-		"Mass Storage",
+-		USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
+-		0),
+-UNUSUAL_DEV(  0x12d1, 0x140F, 0x0000, 0x0000,
+-		"HUAWEI MOBILE",
+-		"Mass Storage",
+-		USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
+-		0),
+-UNUSUAL_DEV(  0x12d1, 0x1410, 0x0000, 0x0000,
+-		"HUAWEI MOBILE",
+-		"Mass Storage",
+-		USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
+-		0),
+-UNUSUAL_DEV(  0x12d1, 0x1411, 0x0000, 0x0000,
+-		"HUAWEI MOBILE",
+-		"Mass Storage",
+-		USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
+-		0),
+-UNUSUAL_DEV(  0x12d1, 0x1412, 0x0000, 0x0000,
+-		"HUAWEI MOBILE",
+-		"Mass Storage",
+-		USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
+-		0),
+-UNUSUAL_DEV(  0x12d1, 0x1413, 0x0000, 0x0000,
+-		"HUAWEI MOBILE",
+-		"Mass Storage",
+-		USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
+-		0),
+-UNUSUAL_DEV(  0x12d1, 0x1414, 0x0000, 0x0000,
+-		"HUAWEI MOBILE",
+-		"Mass Storage",
+-		USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
+-		0),
+-UNUSUAL_DEV(  0x12d1, 0x1415, 0x0000, 0x0000,
+-		"HUAWEI MOBILE",
+-		"Mass Storage",
+-		USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
+-		0),
+-UNUSUAL_DEV(  0x12d1, 0x1416, 0x0000, 0x0000,
+-		"HUAWEI MOBILE",
+-		"Mass Storage",
+-		USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
+-		0),
+-UNUSUAL_DEV(  0x12d1, 0x1417, 0x0000, 0x0000,
+-		"HUAWEI MOBILE",
+-		"Mass Storage",
+-		USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
+-		0),
+-UNUSUAL_DEV(  0x12d1, 0x1418, 0x0000, 0x0000,
+-		"HUAWEI MOBILE",
+-		"Mass Storage",
+-		USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
+-		0),
+-UNUSUAL_DEV(  0x12d1, 0x1419, 0x0000, 0x0000,
+-		"HUAWEI MOBILE",
+-		"Mass Storage",
+-		USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
+-		0),
+-UNUSUAL_DEV(  0x12d1, 0x141A, 0x0000, 0x0000,
+-		"HUAWEI MOBILE",
+-		"Mass Storage",
+-		USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
+-		0),
+-UNUSUAL_DEV(  0x12d1, 0x141B, 0x0000, 0x0000,
+-		"HUAWEI MOBILE",
+-		"Mass Storage",
+-		USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
+-		0),
+-UNUSUAL_DEV(  0x12d1, 0x141C, 0x0000, 0x0000,
+-		"HUAWEI MOBILE",
+-		"Mass Storage",
+-		USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
+-		0),
+-UNUSUAL_DEV(  0x12d1, 0x141D, 0x0000, 0x0000,
+-		"HUAWEI MOBILE",
+-		"Mass Storage",
+-		USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
+-		0),
+-UNUSUAL_DEV(  0x12d1, 0x141E, 0x0000, 0x0000,
+-		"HUAWEI MOBILE",
+-		"Mass Storage",
+-		USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
+-		0),
+-UNUSUAL_DEV(  0x12d1, 0x141F, 0x0000, 0x0000,
+-		"HUAWEI MOBILE",
+-		"Mass Storage",
+-		USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
+-		0),
+-UNUSUAL_DEV(  0x12d1, 0x1420, 0x0000, 0x0000,
+-		"HUAWEI MOBILE",
+-		"Mass Storage",
+-		USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
+-		0),
+-UNUSUAL_DEV(  0x12d1, 0x1421, 0x0000, 0x0000,
+-		"HUAWEI MOBILE",
+-		"Mass Storage",
+-		USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
+-		0),
+-UNUSUAL_DEV(  0x12d1, 0x1422, 0x0000, 0x0000,
+-		"HUAWEI MOBILE",
+-		"Mass Storage",
+-		USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
+-		0),
+-UNUSUAL_DEV(  0x12d1, 0x1423, 0x0000, 0x0000,
+-		"HUAWEI MOBILE",
+-		"Mass Storage",
+-		USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
+-		0),
+-UNUSUAL_DEV(  0x12d1, 0x1424, 0x0000, 0x0000,
+-		"HUAWEI MOBILE",
+-		"Mass Storage",
+-		USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
+-		0),
+-UNUSUAL_DEV(  0x12d1, 0x1425, 0x0000, 0x0000,
+-		"HUAWEI MOBILE",
+-		"Mass Storage",
+-		USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
+-		0),
+-UNUSUAL_DEV(  0x12d1, 0x1426, 0x0000, 0x0000,
+-		"HUAWEI MOBILE",
+-		"Mass Storage",
+-		USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
+-		0),
+-UNUSUAL_DEV(  0x12d1, 0x1427, 0x0000, 0x0000,
+-		"HUAWEI MOBILE",
+-		"Mass Storage",
+-		USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
+-		0),
+-UNUSUAL_DEV(  0x12d1, 0x1428, 0x0000, 0x0000,
+-		"HUAWEI MOBILE",
+-		"Mass Storage",
+-		USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
+-		0),
+-UNUSUAL_DEV(  0x12d1, 0x1429, 0x0000, 0x0000,
+-		"HUAWEI MOBILE",
+-		"Mass Storage",
+-		USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
+-		0),
+-UNUSUAL_DEV(  0x12d1, 0x142A, 0x0000, 0x0000,
+-		"HUAWEI MOBILE",
+-		"Mass Storage",
+-		USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
+-		0),
+-UNUSUAL_DEV(  0x12d1, 0x142B, 0x0000, 0x0000,
+-		"HUAWEI MOBILE",
+-		"Mass Storage",
+-		USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
+-		0),
+-UNUSUAL_DEV(  0x12d1, 0x142C, 0x0000, 0x0000,
+-		"HUAWEI MOBILE",
+-		"Mass Storage",
+-		USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
+-		0),
+-UNUSUAL_DEV(  0x12d1, 0x142D, 0x0000, 0x0000,
+-		"HUAWEI MOBILE",
+-		"Mass Storage",
+-		USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
+-		0),
+-UNUSUAL_DEV(  0x12d1, 0x142E, 0x0000, 0x0000,
+-		"HUAWEI MOBILE",
+-		"Mass Storage",
+-		USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
+-		0),
+-UNUSUAL_DEV(  0x12d1, 0x142F, 0x0000, 0x0000,
+-		"HUAWEI MOBILE",
+-		"Mass Storage",
+-		USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
+-		0),
+-UNUSUAL_DEV(  0x12d1, 0x1430, 0x0000, 0x0000,
+-		"HUAWEI MOBILE",
+-		"Mass Storage",
+-		USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
+-		0),
+-UNUSUAL_DEV(  0x12d1, 0x1431, 0x0000, 0x0000,
+-		"HUAWEI MOBILE",
+-		"Mass Storage",
+-		USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
+-		0),
+-UNUSUAL_DEV(  0x12d1, 0x1432, 0x0000, 0x0000,
+-		"HUAWEI MOBILE",
+-		"Mass Storage",
+-		USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
+-		0),
+-UNUSUAL_DEV(  0x12d1, 0x1433, 0x0000, 0x0000,
+-		"HUAWEI MOBILE",
+-		"Mass Storage",
+-		USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
+-		0),
+-UNUSUAL_DEV(  0x12d1, 0x1434, 0x0000, 0x0000,
+-		"HUAWEI MOBILE",
+-		"Mass Storage",
+-		USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
+-		0),
+-UNUSUAL_DEV(  0x12d1, 0x1435, 0x0000, 0x0000,
+-		"HUAWEI MOBILE",
+-		"Mass Storage",
+-		USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
+-		0),
+-UNUSUAL_DEV(  0x12d1, 0x1436, 0x0000, 0x0000,
+-		"HUAWEI MOBILE",
+-		"Mass Storage",
+-		USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
+-		0),
+-UNUSUAL_DEV(  0x12d1, 0x1437, 0x0000, 0x0000,
+-		"HUAWEI MOBILE",
+-		"Mass Storage",
+-		USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
+-		0),
+-UNUSUAL_DEV(  0x12d1, 0x1438, 0x0000, 0x0000,
+-		"HUAWEI MOBILE",
+-		"Mass Storage",
+-		USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
+-		0),
+-UNUSUAL_DEV(  0x12d1, 0x1439, 0x0000, 0x0000,
+-		"HUAWEI MOBILE",
+-		"Mass Storage",
+-		USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
+-		0),
+-UNUSUAL_DEV(  0x12d1, 0x143A, 0x0000, 0x0000,
+-		"HUAWEI MOBILE",
+-		"Mass Storage",
+-		USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
+-		0),
+-UNUSUAL_DEV(  0x12d1, 0x143B, 0x0000, 0x0000,
+-		"HUAWEI MOBILE",
+-		"Mass Storage",
+-		USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
+-		0),
+-UNUSUAL_DEV(  0x12d1, 0x143C, 0x0000, 0x0000,
+-		"HUAWEI MOBILE",
+-		"Mass Storage",
+-		USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
+-		0),
+-UNUSUAL_DEV(  0x12d1, 0x143D, 0x0000, 0x0000,
+-		"HUAWEI MOBILE",
+-		"Mass Storage",
+-		USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
+-		0),
+-UNUSUAL_DEV(  0x12d1, 0x143E, 0x0000, 0x0000,
+-		"HUAWEI MOBILE",
+-		"Mass Storage",
+-		USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
+-		0),
+-UNUSUAL_DEV(  0x12d1, 0x143F, 0x0000, 0x0000,
+-		"HUAWEI MOBILE",
+-		"Mass Storage",
+-		USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
++		USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_init,
+ 		0),
+ 
+ /* Reported by Vilius Bilinkevicius <vilisas AT xxx DOT lt) */
+diff --git a/drivers/usb/storage/usb.c b/drivers/usb/storage/usb.c
+index db51ba1..d582af4 100644
+--- a/drivers/usb/storage/usb.c
++++ b/drivers/usb/storage/usb.c
+@@ -120,6 +120,17 @@ MODULE_PARM_DESC(quirks, "supplemental list of device IDs and their quirks");
+ 	.useTransport = use_transport,	\
+ }
+ 
++#define UNUSUAL_VENDOR_INTF(idVendor, cl, sc, pr, \
++		vendor_name, product_name, use_protocol, use_transport, \
++		init_function, Flags) \
++{ \
++	.vendorName = vendor_name,	\
++	.productName = product_name,	\
++	.useProtocol = use_protocol,	\
++	.useTransport = use_transport,	\
++	.initFunction = init_function,	\
++}
++
+ static struct us_unusual_dev us_unusual_dev_list[] = {
+ #	include "unusual_devs.h" 
+ 	{ }		/* Terminating entry */
+@@ -128,6 +139,7 @@ static struct us_unusual_dev us_unusual_dev_list[] = {
+ #undef UNUSUAL_DEV
+ #undef COMPLIANT_DEV
+ #undef USUAL_DEV
++#undef UNUSUAL_VENDOR_INTF
+ 
+ 
+ #ifdef CONFIG_PM	/* Minimal support for suspend and resume */
+diff --git a/drivers/usb/storage/usual-tables.c b/drivers/usb/storage/usual-tables.c
+index b969279..a9b5f2e 100644
+--- a/drivers/usb/storage/usual-tables.c
++++ b/drivers/usb/storage/usual-tables.c
+@@ -46,6 +46,20 @@
+ { USB_INTERFACE_INFO(USB_CLASS_MASS_STORAGE, useProto, useTrans), \
+   .driver_info = ((useType)<<24) }
+ 
++/* Define the device is matched with Vendor ID and interface descriptors */
++#define UNUSUAL_VENDOR_INTF(id_vendor, cl, sc, pr, \
++			vendorName, productName, useProtocol, useTransport, \
++			initFunction, flags) \
++{ \
++	.match_flags = USB_DEVICE_ID_MATCH_INT_INFO \
++				| USB_DEVICE_ID_MATCH_VENDOR, \
++	.idVendor    = (id_vendor), \
++	.bInterfaceClass = (cl), \
++	.bInterfaceSubClass = (sc), \
++	.bInterfaceProtocol = (pr), \
++	.driver_info = (flags) \
++}
++
+ struct usb_device_id usb_storage_usb_ids[] = {
+ #	include "unusual_devs.h"
+ 	{ }		/* Terminating entry */
+@@ -57,6 +71,7 @@ MODULE_DEVICE_TABLE(usb, usb_storage_usb_ids);
+ #undef UNUSUAL_DEV
+ #undef COMPLIANT_DEV
+ #undef USUAL_DEV
++#undef UNUSUAL_VENDOR_INTF
+ 
+ 
+ /*
+diff --git a/fs/nilfs2/ioctl.c b/fs/nilfs2/ioctl.c
+index c598cfb..2b5e695 100644
+--- a/fs/nilfs2/ioctl.c
++++ b/fs/nilfs2/ioctl.c
+@@ -664,8 +664,11 @@ static int nilfs_ioctl_clean_segments(struct inode *inode, struct file *filp,
+ 	if (ret < 0)
+ 		printk(KERN_ERR "NILFS: GC failed during preparation: "
+ 			"cannot read source blocks: err=%d\n", ret);
+-	else
++	else {
++		if (nilfs_sb_need_update(nilfs))
++			set_nilfs_discontinued(nilfs);
+ 		ret = nilfs_clean_segments(inode->i_sb, argv, kbufs);
++	}
+ 
+ 	nilfs_remove_all_gcinodes(nilfs);
+ 	clear_nilfs_gc_running(nilfs);
+diff --git a/fs/splice.c b/fs/splice.c
+index 014fcb4..58ab918 100644
+--- a/fs/splice.c
++++ b/fs/splice.c
+@@ -697,8 +697,10 @@ static int pipe_to_sendpage(struct pipe_inode_info *pipe,
+ 		return -EINVAL;
+ 
+ 	more = (sd->flags & SPLICE_F_MORE) ? MSG_MORE : 0;
+-	if (sd->len < sd->total_len)
++
++	if (sd->len < sd->total_len && pipe->nrbufs > 1)
+ 		more |= MSG_SENDPAGE_NOTLAST;
++
+ 	return file->f_op->sendpage(file, buf->page, buf->offset,
+ 				    sd->len, &pos, more);
+ }
+diff --git a/include/linux/sched.h b/include/linux/sched.h
+index 1e86bb4..8204898 100644
+--- a/include/linux/sched.h
++++ b/include/linux/sched.h
+@@ -2597,7 +2597,16 @@ static inline void thread_group_cputime_init(struct signal_struct *sig)
+ extern void recalc_sigpending_and_wake(struct task_struct *t);
+ extern void recalc_sigpending(void);
+ 
+-extern void signal_wake_up(struct task_struct *t, int resume_stopped);
++extern void signal_wake_up_state(struct task_struct *t, unsigned int state);
++
++static inline void signal_wake_up(struct task_struct *t, bool resume)
++{
++	signal_wake_up_state(t, resume ? TASK_WAKEKILL : 0);
++}
++static inline void ptrace_signal_wake_up(struct task_struct *t, bool resume)
++{
++	signal_wake_up_state(t, resume ? __TASK_TRACED : 0);
++}
+ 
+ /*
+  * Wrappers for p->thread_info->cpu access. No-op on UP.
+diff --git a/kernel/ptrace.c b/kernel/ptrace.c
+index 78ab24a..67fedad 100644
+--- a/kernel/ptrace.c
++++ b/kernel/ptrace.c
+@@ -117,11 +117,45 @@ void __ptrace_unlink(struct task_struct *child)
+ 	 * TASK_KILLABLE sleeps.
+ 	 */
+ 	if (child->jobctl & JOBCTL_STOP_PENDING || task_is_traced(child))
+-		signal_wake_up(child, task_is_traced(child));
++		ptrace_signal_wake_up(child, true);
+ 
+ 	spin_unlock(&child->sighand->siglock);
+ }
+ 
++/* Ensure that nothing can wake it up, even SIGKILL */
++static bool ptrace_freeze_traced(struct task_struct *task)
++{
++	bool ret = false;
++
++	/* Lockless, nobody but us can set this flag */
++	if (task->jobctl & JOBCTL_LISTENING)
++		return ret;
++
++	spin_lock_irq(&task->sighand->siglock);
++	if (task_is_traced(task) && !__fatal_signal_pending(task)) {
++		task->state = __TASK_TRACED;
++		ret = true;
++	}
++	spin_unlock_irq(&task->sighand->siglock);
++
++	return ret;
++}
++
++static void ptrace_unfreeze_traced(struct task_struct *task)
++{
++	if (task->state != __TASK_TRACED)
++		return;
++
++	WARN_ON(!task->ptrace || task->parent != current);
++
++	spin_lock_irq(&task->sighand->siglock);
++	if (__fatal_signal_pending(task))
++		wake_up_state(task, __TASK_TRACED);
++	else
++		task->state = TASK_TRACED;
++	spin_unlock_irq(&task->sighand->siglock);
++}
++
+ /**
+  * ptrace_check_attach - check whether ptracee is ready for ptrace operation
+  * @child: ptracee to check for
+@@ -151,24 +185,29 @@ int ptrace_check_attach(struct task_struct *child, bool ignore_state)
+ 	 * be changed by us so it's not changing right after this.
+ 	 */
+ 	read_lock(&tasklist_lock);
+-	if ((child->ptrace & PT_PTRACED) && child->parent == current) {
++	if (child->ptrace && child->parent == current) {
++		WARN_ON(child->state == __TASK_TRACED);
+ 		/*
+ 		 * child->sighand can't be NULL, release_task()
+ 		 * does ptrace_unlink() before __exit_signal().
+ 		 */
+-		spin_lock_irq(&child->sighand->siglock);
+-		WARN_ON_ONCE(task_is_stopped(child));
+-		if (ignore_state || (task_is_traced(child) &&
+-				     !(child->jobctl & JOBCTL_LISTENING)))
++		if (ignore_state || ptrace_freeze_traced(child))
+ 			ret = 0;
+-		spin_unlock_irq(&child->sighand->siglock);
+ 	}
+ 	read_unlock(&tasklist_lock);
+ 
+-	if (!ret && !ignore_state)
+-		ret = wait_task_inactive(child, TASK_TRACED) ? 0 : -ESRCH;
++	if (!ret && !ignore_state) {
++		if (!wait_task_inactive(child, __TASK_TRACED)) {
++			/*
++			 * This can only happen if may_ptrace_stop() fails and
++			 * ptrace_stop() changes ->state back to TASK_RUNNING,
++			 * so we should not worry about leaking __TASK_TRACED.
++			 */
++			WARN_ON(child->state == __TASK_TRACED);
++			ret = -ESRCH;
++		}
++	}
+ 
+-	/* All systems go.. */
+ 	return ret;
+ }
+ 
+@@ -307,7 +346,7 @@ static int ptrace_attach(struct task_struct *task, long request,
+ 	 */
+ 	if (task_is_stopped(task) &&
+ 	    task_set_jobctl_pending(task, JOBCTL_TRAP_STOP | JOBCTL_TRAPPING))
+-		signal_wake_up(task, 1);
++		signal_wake_up_state(task, __TASK_STOPPED);
+ 
+ 	spin_unlock(&task->sighand->siglock);
+ 
+@@ -736,7 +775,7 @@ int ptrace_request(struct task_struct *child, long request,
+ 		 * tracee into STOP.
+ 		 */
+ 		if (likely(task_set_jobctl_pending(child, JOBCTL_TRAP_STOP)))
+-			signal_wake_up(child, child->jobctl & JOBCTL_LISTENING);
++			ptrace_signal_wake_up(child, child->jobctl & JOBCTL_LISTENING);
+ 
+ 		unlock_task_sighand(child, &flags);
+ 		ret = 0;
+@@ -762,7 +801,7 @@ int ptrace_request(struct task_struct *child, long request,
+ 			 * start of this trap and now.  Trigger re-trap.
+ 			 */
+ 			if (child->jobctl & JOBCTL_TRAP_NOTIFY)
+-				signal_wake_up(child, true);
++				ptrace_signal_wake_up(child, true);
+ 			ret = 0;
+ 		}
+ 		unlock_task_sighand(child, &flags);
+@@ -899,6 +938,8 @@ SYSCALL_DEFINE4(ptrace, long, request, long, pid, unsigned long, addr,
+ 		goto out_put_task_struct;
+ 
+ 	ret = arch_ptrace(child, request, addr, data);
++	if (ret || request != PTRACE_DETACH)
++		ptrace_unfreeze_traced(child);
+ 
+  out_put_task_struct:
+ 	put_task_struct(child);
+@@ -1038,8 +1079,11 @@ asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid,
+ 
+ 	ret = ptrace_check_attach(child, request == PTRACE_KILL ||
+ 				  request == PTRACE_INTERRUPT);
+-	if (!ret)
++	if (!ret) {
+ 		ret = compat_arch_ptrace(child, request, addr, data);
++		if (ret || request != PTRACE_DETACH)
++			ptrace_unfreeze_traced(child);
++	}
+ 
+  out_put_task_struct:
+ 	put_task_struct(child);
+diff --git a/kernel/resource.c b/kernel/resource.c
+index 7640b3a..08aa28e 100644
+--- a/kernel/resource.c
++++ b/kernel/resource.c
+@@ -757,6 +757,7 @@ static void __init __reserve_region_with_split(struct resource *root,
+ 	struct resource *parent = root;
+ 	struct resource *conflict;
+ 	struct resource *res = kzalloc(sizeof(*res), GFP_ATOMIC);
++	struct resource *next_res = NULL;
+ 
+ 	if (!res)
+ 		return;
+@@ -766,21 +767,46 @@ static void __init __reserve_region_with_split(struct resource *root,
+ 	res->end = end;
+ 	res->flags = IORESOURCE_BUSY;
+ 
+-	conflict = __request_resource(parent, res);
+-	if (!conflict)
+-		return;
++	while (1) {
+ 
+-	/* failed, split and try again */
+-	kfree(res);
++		conflict = __request_resource(parent, res);
++		if (!conflict) {
++			if (!next_res)
++				break;
++			res = next_res;
++			next_res = NULL;
++			continue;
++		}
+ 
+-	/* conflict covered whole area */
+-	if (conflict->start <= start && conflict->end >= end)
+-		return;
++		/* conflict covered whole area */
++		if (conflict->start <= res->start &&
++				conflict->end >= res->end) {
++			kfree(res);
++			WARN_ON(next_res);
++			break;
++		}
++
++		/* failed, split and try again */
++		if (conflict->start > res->start) {
++			end = res->end;
++			res->end = conflict->start - 1;
++			if (conflict->end < end) {
++				next_res = kzalloc(sizeof(*next_res),
++						GFP_ATOMIC);
++				if (!next_res) {
++					kfree(res);
++					break;
++				}
++				next_res->name = name;
++				next_res->start = conflict->end + 1;
++				next_res->end = end;
++				next_res->flags = IORESOURCE_BUSY;
++			}
++		} else {
++			res->start = conflict->end + 1;
++		}
++	}
+ 
+-	if (conflict->start > start)
+-		__reserve_region_with_split(root, start, conflict->start-1, name);
+-	if (conflict->end < end)
+-		__reserve_region_with_split(root, conflict->end+1, end, name);
+ }
+ 
+ void __init reserve_region_with_split(struct resource *root,
+diff --git a/kernel/sched.c b/kernel/sched.c
+index fcc893f..eeeec4e 100644
+--- a/kernel/sched.c
++++ b/kernel/sched.c
+@@ -2924,7 +2924,8 @@ out:
+  */
+ int wake_up_process(struct task_struct *p)
+ {
+-	return try_to_wake_up(p, TASK_ALL, 0);
++	WARN_ON(task_is_stopped_or_traced(p));
++	return try_to_wake_up(p, TASK_NORMAL, 0);
+ }
+ EXPORT_SYMBOL(wake_up_process);
+ 
+diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c
+index 78fcacf..6ad4fb3 100644
+--- a/kernel/sched_rt.c
++++ b/kernel/sched_rt.c
+@@ -384,7 +384,7 @@ static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq)
+ static int do_balance_runtime(struct rt_rq *rt_rq)
+ {
+ 	struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
+-	struct root_domain *rd = cpu_rq(smp_processor_id())->rd;
++	struct root_domain *rd = rq_of_rt_rq(rt_rq)->rd;
+ 	int i, weight, more = 0;
+ 	u64 rt_period;
+ 
+diff --git a/kernel/signal.c b/kernel/signal.c
+index 08e0b97..d2f55ea 100644
+--- a/kernel/signal.c
++++ b/kernel/signal.c
+@@ -676,23 +676,17 @@ int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
+  * No need to set need_resched since signal event passing
+  * goes through ->blocked
+  */
+-void signal_wake_up(struct task_struct *t, int resume)
++void signal_wake_up_state(struct task_struct *t, unsigned int state)
+ {
+-	unsigned int mask;
+-
+ 	set_tsk_thread_flag(t, TIF_SIGPENDING);
+-
+ 	/*
+-	 * For SIGKILL, we want to wake it up in the stopped/traced/killable
++	 * TASK_WAKEKILL also means wake it up in the stopped/traced/killable
+ 	 * case. We don't check t->state here because there is a race with it
+ 	 * executing another processor and just now entering stopped state.
+ 	 * By using wake_up_state, we ensure the process will wake up and
+ 	 * handle its death signal.
+ 	 */
+-	mask = TASK_INTERRUPTIBLE;
+-	if (resume)
+-		mask |= TASK_WAKEKILL;
+-	if (!wake_up_state(t, mask))
++	if (!wake_up_state(t, state | TASK_INTERRUPTIBLE))
+ 		kick_process(t);
+ }
+ 
+@@ -841,7 +835,7 @@ static void ptrace_trap_notify(struct task_struct *t)
+ 	assert_spin_locked(&t->sighand->siglock);
+ 
+ 	task_set_jobctl_pending(t, JOBCTL_TRAP_NOTIFY);
+-	signal_wake_up(t, t->jobctl & JOBCTL_LISTENING);
++	ptrace_signal_wake_up(t, t->jobctl & JOBCTL_LISTENING);
+ }
+ 
+ /*
+@@ -1765,6 +1759,10 @@ static inline int may_ptrace_stop(void)
+ 	 * If SIGKILL was already sent before the caller unlocked
+ 	 * ->siglock we must see ->core_state != NULL. Otherwise it
+ 	 * is safe to enter schedule().
++	 *
++	 * This is almost outdated, a task with the pending SIGKILL can't
++	 * block in TASK_TRACED. But PTRACE_EVENT_EXIT can be reported
++	 * after SIGKILL was already dequeued.
+ 	 */
+ 	if (unlikely(current->mm->core_state) &&
+ 	    unlikely(current->mm == current->parent->mm))
+@@ -1890,6 +1888,7 @@ static void ptrace_stop(int exit_code, int why, int clear_code, siginfo_t *info)
+ 		if (gstop_done)
+ 			do_notify_parent_cldstop(current, false, why);
+ 
++		/* tasklist protects us from ptrace_freeze_traced() */
+ 		__set_current_state(TASK_RUNNING);
+ 		if (clear_code)
+ 			current->exit_code = 0;
+diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
+index 6033f02..7a157b3 100644
+--- a/net/bluetooth/hci_event.c
++++ b/net/bluetooth/hci_event.c
+@@ -1972,7 +1972,7 @@ static inline void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *sk
+ 	if (ev->opcode != HCI_OP_NOP)
+ 		del_timer(&hdev->cmd_timer);
+ 
+-	if (ev->ncmd) {
++	if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags)) {
+ 		atomic_set(&hdev->cmd_cnt, 1);
+ 		if (!skb_queue_empty(&hdev->cmd_q))
+ 			tasklet_schedule(&hdev->cmd_task);
+diff --git a/net/bluetooth/smp.c b/net/bluetooth/smp.c
+index 1849ee0..9ab60e6 100644
+--- a/net/bluetooth/smp.c
++++ b/net/bluetooth/smp.c
+@@ -642,6 +642,19 @@ int smp_sig_channel(struct l2cap_conn *conn, struct sk_buff *skb)
+ 
+ 	skb_pull(skb, sizeof(code));
+ 
++	/*
++	 * The SMP context must be initialized for all other PDUs except
++	 * pairing and security requests. If we get any other PDU when
++	 * not initialized simply disconnect (done if this function
++	 * returns an error).
++	 */
++	if (code != SMP_CMD_PAIRING_REQ && code != SMP_CMD_SECURITY_REQ &&
++	    !conn->smp_chan) {
++		BT_ERR("Unexpected SMP command 0x%02x. Disconnecting.", code);
++		kfree_skb(skb);
++		return -ENOTSUPP;
++	}
++
+ 	switch (code) {
+ 	case SMP_CMD_PAIRING_REQ:
+ 		reason = smp_cmd_pairing_req(conn, skb);
+diff --git a/net/bridge/br_netfilter.c b/net/bridge/br_netfilter.c
+index 577ea5d..7c1745d 100644
+--- a/net/bridge/br_netfilter.c
++++ b/net/bridge/br_netfilter.c
+@@ -245,6 +245,9 @@ static int br_parse_ip_options(struct sk_buff *skb)
+ 	struct net_device *dev = skb->dev;
+ 	u32 len;
+ 
++	if (!pskb_may_pull(skb, sizeof(struct iphdr)))
++		goto inhdr_error;
++
+ 	iph = ip_hdr(skb);
+ 	opt = &(IPCB(skb)->opt);
+ 
+diff --git a/net/core/pktgen.c b/net/core/pktgen.c
+index 7bc9991..2ef7da0 100644
+--- a/net/core/pktgen.c
++++ b/net/core/pktgen.c
+@@ -1803,10 +1803,13 @@ static ssize_t pktgen_thread_write(struct file *file,
+ 			return -EFAULT;
+ 		i += len;
+ 		mutex_lock(&pktgen_thread_lock);
+-		pktgen_add_device(t, f);
++		ret = pktgen_add_device(t, f);
+ 		mutex_unlock(&pktgen_thread_lock);
+-		ret = count;
+-		sprintf(pg_result, "OK: add_device=%s", f);
++		if (!ret) {
++			ret = count;
++			sprintf(pg_result, "OK: add_device=%s", f);
++		} else
++			sprintf(pg_result, "ERROR: can not add device %s", f);
+ 		goto out;
+ 	}
+ 
+diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
+index 0106d25..3b36002 100644
+--- a/net/ipv4/ip_sockglue.c
++++ b/net/ipv4/ip_sockglue.c
+@@ -600,7 +600,7 @@ static int do_ip_setsockopt(struct sock *sk, int level,
+ 	case IP_TTL:
+ 		if (optlen < 1)
+ 			goto e_inval;
+-		if (val != -1 && (val < 0 || val > 255))
++		if (val != -1 && (val < 1 || val > 255))
+ 			goto e_inval;
+ 		inet->uc_ttl = val;
+ 		break;
+diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
+index aab8f08..e865ed1 100644
+--- a/net/ipv4/tcp_input.c
++++ b/net/ipv4/tcp_input.c
+@@ -3655,6 +3655,11 @@ static int tcp_process_frto(struct sock *sk, int flag)
+ 		}
+ 	} else {
+ 		if (!(flag & FLAG_DATA_ACKED) && (tp->frto_counter == 1)) {
++			if (!tcp_packets_in_flight(tp)) {
++				tcp_enter_frto_loss(sk, 2, flag);
++				return true;
++			}
++
+ 			/* Prevent sending of new data. */
+ 			tp->snd_cwnd = min(tp->snd_cwnd,
+ 					   tcp_packets_in_flight(tp));
+diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
+index aef80d7..b27baed 100644
+--- a/net/ipv6/addrconf.c
++++ b/net/ipv6/addrconf.c
+@@ -1739,7 +1739,7 @@ static struct rt6_info *addrconf_get_prefix_route(const struct in6_addr *pfx,
+ 			continue;
+ 		if ((rt->rt6i_flags & flags) != flags)
+ 			continue;
+-		if ((noflags != 0) && ((rt->rt6i_flags & flags) != 0))
++		if ((rt->rt6i_flags & noflags) != 0)
+ 			continue;
+ 		dst_hold(&rt->dst);
+ 		break;
+diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
+index ae98e09..3ccd9b2 100644
+--- a/net/ipv6/ip6_output.c
++++ b/net/ipv6/ip6_output.c
+@@ -1284,10 +1284,10 @@ int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to,
+ 		cork->length = 0;
+ 		sk->sk_sndmsg_page = NULL;
+ 		sk->sk_sndmsg_off = 0;
+-		exthdrlen = (opt ? opt->opt_flen : 0) - rt->rt6i_nfheader_len;
++		exthdrlen = (opt ? opt->opt_flen : 0);
+ 		length += exthdrlen;
+ 		transhdrlen += exthdrlen;
+-		dst_exthdrlen = rt->dst.header_len;
++		dst_exthdrlen = rt->dst.header_len - rt->rt6i_nfheader_len;
+ 	} else {
+ 		rt = (struct rt6_info *)cork->dst;
+ 		fl6 = &inet->cork.fl.u.ip6;
+diff --git a/net/ipv6/route.c b/net/ipv6/route.c
+index 19724bd..791c1fa 100644
+--- a/net/ipv6/route.c
++++ b/net/ipv6/route.c
+@@ -819,7 +819,8 @@ restart:
+ 	dst_hold(&rt->dst);
+ 	read_unlock_bh(&table->tb6_lock);
+ 
+-	if (!dst_get_neighbour_raw(&rt->dst) && !(rt->rt6i_flags & RTF_NONEXTHOP))
++	if (!dst_get_neighbour_raw(&rt->dst)
++	    && !(rt->rt6i_flags & (RTF_NONEXTHOP | RTF_LOCAL)))
+ 		nrt = rt6_alloc_cow(rt, &fl6->daddr, &fl6->saddr);
+ 	else if (!(rt->dst.flags & DST_HOST))
+ 		nrt = rt6_alloc_clone(rt, &fl6->daddr);
+diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
+index 85afc13..835fcea 100644
+--- a/net/packet/af_packet.c
++++ b/net/packet/af_packet.c
+@@ -2422,13 +2422,15 @@ static int packet_release(struct socket *sock)
+ 
+ 	packet_flush_mclist(sk);
+ 
+-	memset(&req_u, 0, sizeof(req_u));
+-
+-	if (po->rx_ring.pg_vec)
++	if (po->rx_ring.pg_vec) {
++		memset(&req_u, 0, sizeof(req_u));
+ 		packet_set_ring(sk, &req_u, 1, 0);
++	}
+ 
+-	if (po->tx_ring.pg_vec)
++	if (po->tx_ring.pg_vec) {
++		memset(&req_u, 0, sizeof(req_u));
+ 		packet_set_ring(sk, &req_u, 1, 1);
++	}
+ 
+ 	fanout_release(sk);
+ 
+diff --git a/net/sctp/endpointola.c b/net/sctp/endpointola.c
+index c8cc24e..dbe5870a 100644
+--- a/net/sctp/endpointola.c
++++ b/net/sctp/endpointola.c
+@@ -248,6 +248,8 @@ void sctp_endpoint_free(struct sctp_endpoint *ep)
+ /* Final destructor for endpoint.  */
+ static void sctp_endpoint_destroy(struct sctp_endpoint *ep)
+ {
++	int i;
++
+ 	SCTP_ASSERT(ep->base.dead, "Endpoint is not dead", return);
+ 
+ 	/* Free up the HMAC transform. */
+@@ -270,6 +272,9 @@ static void sctp_endpoint_destroy(struct sctp_endpoint *ep)
+ 	sctp_inq_free(&ep->base.inqueue);
+ 	sctp_bind_addr_free(&ep->base.bind_addr);
+ 
++	for (i = 0; i < SCTP_HOW_MANY_SECRETS; ++i)
++		memset(&ep->secret_key[i], 0, SCTP_SECRET_SIZE);
++
+ 	/* Remove and free the port */
+ 	if (sctp_sk(ep->base.sk)->bind_hash)
+ 		sctp_put_port(ep->base.sk);
+diff --git a/net/sctp/outqueue.c b/net/sctp/outqueue.c
+index cfeb1d4..96eb168 100644
+--- a/net/sctp/outqueue.c
++++ b/net/sctp/outqueue.c
+@@ -223,7 +223,7 @@ void sctp_outq_init(struct sctp_association *asoc, struct sctp_outq *q)
+ 
+ /* Free the outqueue structure and any related pending chunks.
+  */
+-void sctp_outq_teardown(struct sctp_outq *q)
++static void __sctp_outq_teardown(struct sctp_outq *q)
+ {
+ 	struct sctp_transport *transport;
+ 	struct list_head *lchunk, *temp;
+@@ -276,8 +276,6 @@ void sctp_outq_teardown(struct sctp_outq *q)
+ 		sctp_chunk_free(chunk);
+ 	}
+ 
+-	q->error = 0;
+-
+ 	/* Throw away any leftover control chunks. */
+ 	list_for_each_entry_safe(chunk, tmp, &q->control_chunk_list, list) {
+ 		list_del_init(&chunk->list);
+@@ -285,11 +283,17 @@ void sctp_outq_teardown(struct sctp_outq *q)
+ 	}
+ }
+ 
++void sctp_outq_teardown(struct sctp_outq *q)
++{
++	__sctp_outq_teardown(q);
++	sctp_outq_init(q->asoc, q);
++}
++
+ /* Free the outqueue structure and any related pending chunks.  */
+ void sctp_outq_free(struct sctp_outq *q)
+ {
+ 	/* Throw away leftover chunks. */
+-	sctp_outq_teardown(q);
++	__sctp_outq_teardown(q);
+ 
+ 	/* If we were kmalloc()'d, free the memory.  */
+ 	if (q->malloced)
+diff --git a/net/sctp/socket.c b/net/sctp/socket.c
+index fa8333b..5e0d86e 100644
+--- a/net/sctp/socket.c
++++ b/net/sctp/socket.c
+@@ -3375,7 +3375,7 @@ static int sctp_setsockopt_auth_key(struct sock *sk,
+ 
+ 	ret = sctp_auth_set_key(sctp_sk(sk)->ep, asoc, authkey);
+ out:
+-	kfree(authkey);
++	kzfree(authkey);
+ 	return ret;
+ }
+ 

Modified: genpatches-2.6/trunk/3.4/0000_README
===================================================================
--- genpatches-2.6/trunk/3.4/0000_README	2013-02-23 19:07:21 UTC (rev 2290)
+++ genpatches-2.6/trunk/3.4/0000_README	2013-02-28 19:13:50 UTC (rev 2291)
@@ -167,6 +167,14 @@
 From:   http://www.kernel.org
 Desc:   Linux 3.4.32
 
+Patch:  1032_linux-3.4.33.patch
+From:   http://www.kernel.org
+Desc:   Linux 3.4.33
+
+Patch:  1033_linux-3.4.34.patch
+From:   http://www.kernel.org
+Desc:   Linux 3.4.34
+
 Patch:  1700_correct-bnx2-firware-ver-mips.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=424609
 Desc:   Correct firmware version for bnx2 on mips

Added: genpatches-2.6/trunk/3.4/1032_linux-3.4.33.patch
===================================================================
--- genpatches-2.6/trunk/3.4/1032_linux-3.4.33.patch	                        (rev 0)
+++ genpatches-2.6/trunk/3.4/1032_linux-3.4.33.patch	2013-02-28 19:13:50 UTC (rev 2291)
@@ -0,0 +1,54 @@
+diff --git a/Makefile b/Makefile
+index ece8970..a26d19a 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 3
+ PATCHLEVEL = 4
+-SUBLEVEL = 32
++SUBLEVEL = 33
+ EXTRAVERSION =
+ NAME = Saber-toothed Squirrel
+ 
+diff --git a/include/linux/syslog.h b/include/linux/syslog.h
+index 3891139..ce4c665 100644
+--- a/include/linux/syslog.h
++++ b/include/linux/syslog.h
+@@ -47,6 +47,12 @@
+ #define SYSLOG_FROM_CALL 0
+ #define SYSLOG_FROM_FILE 1
+ 
++/*
++ * Syslog priority (PRI) maximum length in char : '<[0-9]{1,3}>'
++ * See RFC5424 for details
++*/
++#define SYSLOG_PRI_MAX_LENGTH 5
++
+ int do_syslog(int type, char __user *buf, int count, bool from_file);
+ 
+ #endif /* _LINUX_SYSLOG_H */
+diff --git a/kernel/printk.c b/kernel/printk.c
+index b663c2c..e95c662 100644
+--- a/kernel/printk.c
++++ b/kernel/printk.c
+@@ -638,8 +638,19 @@ static void call_console_drivers(unsigned start, unsigned end)
+ 	start_print = start;
+ 	while (cur_index != end) {
+ 		if (msg_level < 0 && ((end - cur_index) > 2)) {
++			/*
++			 * prepare buf_prefix, as a contiguous array,
++			 * to be processed by log_prefix function
++			 */
++			char buf_prefix[SYSLOG_PRI_MAX_LENGTH+1];
++			unsigned i;
++			for (i = 0; i < ((end - cur_index)) && (i < SYSLOG_PRI_MAX_LENGTH); i++) {
++				buf_prefix[i] = LOG_BUF(cur_index + i);
++			}
++			buf_prefix[i] = '\0'; /* force '\0' as last string character */
++
+ 			/* strip log prefix */
+-			cur_index += log_prefix(&LOG_BUF(cur_index), &msg_level, NULL);
++			cur_index += log_prefix((const char *)&buf_prefix, &msg_level, NULL);
+ 			start_print = cur_index;
+ 		}
+ 		while (cur_index != end) {

Added: genpatches-2.6/trunk/3.4/1033_linux-3.4.34.patch
===================================================================
--- genpatches-2.6/trunk/3.4/1033_linux-3.4.34.patch	                        (rev 0)
+++ genpatches-2.6/trunk/3.4/1033_linux-3.4.34.patch	2013-02-28 19:13:50 UTC (rev 2291)
@@ -0,0 +1,3543 @@
+diff --git a/Makefile b/Makefile
+index a26d19a..250be36 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 3
+ PATCHLEVEL = 4
+-SUBLEVEL = 33
++SUBLEVEL = 34
+ EXTRAVERSION =
+ NAME = Saber-toothed Squirrel
+ 
+diff --git a/arch/arm/kernel/sched_clock.c b/arch/arm/kernel/sched_clock.c
+index 27d186a..6bbf936 100644
+--- a/arch/arm/kernel/sched_clock.c
++++ b/arch/arm/kernel/sched_clock.c
+@@ -84,11 +84,11 @@ static void notrace update_sched_clock(void)
+ 	 * detectable in cyc_to_fixed_sched_clock().
+ 	 */
+ 	raw_local_irq_save(flags);
+-	cd.epoch_cyc = cyc;
++	cd.epoch_cyc_copy = cyc;
+ 	smp_wmb();
+ 	cd.epoch_ns = ns;
+ 	smp_wmb();
+-	cd.epoch_cyc_copy = cyc;
++	cd.epoch_cyc = cyc;
+ 	raw_local_irq_restore(flags);
+ }
+ 
+diff --git a/arch/arm/mach-pxa/include/mach/smemc.h b/arch/arm/mach-pxa/include/mach/smemc.h
+index b7de471..b802f28 100644
+--- a/arch/arm/mach-pxa/include/mach/smemc.h
++++ b/arch/arm/mach-pxa/include/mach/smemc.h
+@@ -37,6 +37,7 @@
+ #define CSADRCFG1	(SMEMC_VIRT + 0x84)  /* Address Configuration Register for CS1 */
+ #define CSADRCFG2	(SMEMC_VIRT + 0x88)  /* Address Configuration Register for CS2 */
+ #define CSADRCFG3	(SMEMC_VIRT + 0x8C)  /* Address Configuration Register for CS3 */
++#define CSMSADRCFG	(SMEMC_VIRT + 0xA0)  /* Chip Select Configuration Register */
+ 
+ /*
+  * More handy macros for PCMCIA
+diff --git a/arch/arm/mach-pxa/smemc.c b/arch/arm/mach-pxa/smemc.c
+index 7992305..f38aa89 100644
+--- a/arch/arm/mach-pxa/smemc.c
++++ b/arch/arm/mach-pxa/smemc.c
+@@ -40,6 +40,8 @@ static void pxa3xx_smemc_resume(void)
+ 	__raw_writel(csadrcfg[1], CSADRCFG1);
+ 	__raw_writel(csadrcfg[2], CSADRCFG2);
+ 	__raw_writel(csadrcfg[3], CSADRCFG3);
++	/* CSMSADRCFG wakes up in its default state (0), so we need to set it */
++	__raw_writel(0x2, CSMSADRCFG);
+ }
+ 
+ static struct syscore_ops smemc_syscore_ops = {
+@@ -49,8 +51,19 @@ static struct syscore_ops smemc_syscore_ops = {
+ 
+ static int __init smemc_init(void)
+ {
+-	if (cpu_is_pxa3xx())
++	if (cpu_is_pxa3xx()) {
++		/*
++		 * The only documentation we have on the
++		 * Chip Select Configuration Register (CSMSADRCFG) is that
++		 * it must be programmed to 0x2.
++		 * Moreover, in the bit definitions, the second bit
++		 * (CSMSADRCFG[1]) is called "SETALWAYS".
++		 * Other bits are reserved in this register.
++		 */
++		__raw_writel(0x2, CSMSADRCFG);
++
+ 		register_syscore_ops(&smemc_syscore_ops);
++	}
+ 
+ 	return 0;
+ }
+diff --git a/arch/arm/mach-s3c24xx/include/mach/debug-macro.S b/arch/arm/mach-s3c24xx/include/mach/debug-macro.S
+index 4135de8..13ed33c 100644
+--- a/arch/arm/mach-s3c24xx/include/mach/debug-macro.S
++++ b/arch/arm/mach-s3c24xx/include/mach/debug-macro.S
+@@ -40,17 +40,17 @@
+ 		addeq	\rd, \rx, #(S3C24XX_PA_GPIO - S3C24XX_PA_UART)
+ 		addne	\rd, \rx, #(S3C24XX_VA_GPIO - S3C24XX_VA_UART)
+ 		bic	\rd, \rd, #0xff000
+-		ldr	\rd, [ \rd, # S3C2410_GSTATUS1 - S3C2410_GPIOREG(0) ]
++		ldr	\rd, [\rd, # S3C2410_GSTATUS1 - S3C2410_GPIOREG(0)]
+ 		and	\rd, \rd, #0x00ff0000
+ 		teq	\rd, #0x00440000		@ is it 2440?
+ 1004:
+-		ldr	\rd, [ \rx, # S3C2410_UFSTAT ]
++		ldr	\rd, [\rx, # S3C2410_UFSTAT]
+ 		moveq	\rd, \rd, lsr #SHIFT_2440TXF
+ 		tst	\rd, #S3C2410_UFSTAT_TXFULL
+ 	.endm
+ 
+ 	.macro  fifo_full_s3c2410 rd, rx
+-		ldr	\rd, [ \rx, # S3C2410_UFSTAT ]
++		ldr	\rd, [\rx, # S3C2410_UFSTAT]
+ 		tst	\rd, #S3C2410_UFSTAT_TXFULL
+ 	.endm
+ 
+@@ -68,18 +68,18 @@
+ 		addeq	\rd, \rx, #(S3C24XX_PA_GPIO - S3C24XX_PA_UART)
+ 		addne	\rd, \rx, #(S3C24XX_VA_GPIO - S3C24XX_VA_UART)
+ 		bic	\rd, \rd, #0xff000
+-		ldr	\rd, [ \rd, # S3C2410_GSTATUS1 - S3C2410_GPIOREG(0) ]
++		ldr	\rd, [\rd, # S3C2410_GSTATUS1 - S3C2410_GPIOREG(0)]
+ 		and	\rd, \rd, #0x00ff0000
+ 		teq	\rd, #0x00440000		@ is it 2440?
+ 
+ 10000:
+-		ldr	\rd, [ \rx, # S3C2410_UFSTAT ]
++		ldr	\rd, [\rx, # S3C2410_UFSTAT]
+ 		andne	\rd, \rd, #S3C2410_UFSTAT_TXMASK
+ 		andeq	\rd, \rd, #S3C2440_UFSTAT_TXMASK
+ 	.endm
+ 
+ 	.macro fifo_level_s3c2410 rd, rx
+-		ldr	\rd, [ \rx, # S3C2410_UFSTAT ]
++		ldr	\rd, [\rx, # S3C2410_UFSTAT]
+ 		and	\rd, \rd, #S3C2410_UFSTAT_TXMASK
+ 	.endm
+ 
+diff --git a/arch/arm/mach-s3c24xx/include/mach/entry-macro.S b/arch/arm/mach-s3c24xx/include/mach/entry-macro.S
+index 7615a14..6a21bee 100644
+--- a/arch/arm/mach-s3c24xx/include/mach/entry-macro.S
++++ b/arch/arm/mach-s3c24xx/include/mach/entry-macro.S
+@@ -31,10 +31,10 @@
+ 
+ 		@@ try the interrupt offset register, since it is there
+ 
+-		ldr	\irqstat, [ \base, #INTPND ]
++		ldr	\irqstat, [\base, #INTPND ]
+ 		teq	\irqstat, #0
+ 		beq	1002f
+-		ldr	\irqnr, [ \base, #INTOFFSET ]
++		ldr	\irqnr, [\base, #INTOFFSET ]
+ 		mov	\tmp, #1
+ 		tst	\irqstat, \tmp, lsl \irqnr
+ 		bne	1001f
+diff --git a/arch/arm/mach-s3c24xx/pm-h1940.S b/arch/arm/mach-s3c24xx/pm-h1940.S
+index c93bf2d..6183a68 100644
+--- a/arch/arm/mach-s3c24xx/pm-h1940.S
++++ b/arch/arm/mach-s3c24xx/pm-h1940.S
+@@ -30,4 +30,4 @@
+ 
+ h1940_pm_return:
+ 	mov	r0, #S3C2410_PA_GPIO
+-	ldr	pc, [ r0, #S3C2410_GSTATUS3 - S3C24XX_VA_GPIO ]
++	ldr	pc, [r0, #S3C2410_GSTATUS3 - S3C24XX_VA_GPIO]
+diff --git a/arch/arm/mach-s3c24xx/sleep-s3c2410.S b/arch/arm/mach-s3c24xx/sleep-s3c2410.S
+index dd5b638..65200ae 100644
+--- a/arch/arm/mach-s3c24xx/sleep-s3c2410.S
++++ b/arch/arm/mach-s3c24xx/sleep-s3c2410.S
+@@ -45,9 +45,9 @@ ENTRY(s3c2410_cpu_suspend)
+ 	ldr	r4, =S3C2410_REFRESH
+ 	ldr	r5, =S3C24XX_MISCCR
+ 	ldr	r6, =S3C2410_CLKCON
+-	ldr	r7, [ r4 ]		@ get REFRESH (and ensure in TLB)
+-	ldr	r8, [ r5 ]		@ get MISCCR (and ensure in TLB)
+-	ldr	r9, [ r6 ]		@ get CLKCON (and ensure in TLB)
++	ldr	r7, [r4]		@ get REFRESH (and ensure in TLB)
++	ldr	r8, [r5]		@ get MISCCR (and ensure in TLB)
++	ldr	r9, [r6]		@ get CLKCON (and ensure in TLB)
+ 
+ 	orr	r7, r7, #S3C2410_REFRESH_SELF	@ SDRAM sleep command
+ 	orr	r8, r8, #S3C2410_MISCCR_SDSLEEP @ SDRAM power-down signals
+@@ -61,8 +61,8 @@ ENTRY(s3c2410_cpu_suspend)
+ 	@@ align next bit of code to cache line
+ 	.align	5
+ s3c2410_do_sleep:
+-	streq	r7, [ r4 ]			@ SDRAM sleep command
+-	streq	r8, [ r5 ]			@ SDRAM power-down config
+-	streq	r9, [ r6 ]			@ CPU sleep
++	streq	r7, [r4]			@ SDRAM sleep command
++	streq	r8, [r5]			@ SDRAM power-down config
++	streq	r9, [r6]			@ CPU sleep
+ 1:	beq	1b
+ 	mov	pc, r14
+diff --git a/arch/arm/mach-s3c24xx/sleep-s3c2412.S b/arch/arm/mach-s3c24xx/sleep-s3c2412.S
+index c82418e..5adaceb 100644
+--- a/arch/arm/mach-s3c24xx/sleep-s3c2412.S
++++ b/arch/arm/mach-s3c24xx/sleep-s3c2412.S
+@@ -57,12 +57,12 @@ s3c2412_sleep_enter1:
+ 	 * retry, as simply returning causes the system to lock.
+ 	*/
+ 
+-	ldrne	r9, [ r1 ]
+-	strne	r9, [ r1 ]
+-	ldrne	r9, [ r2 ]
+-	strne	r9, [ r2 ]
+-	ldrne	r9, [ r3 ]
+-	strne	r9, [ r3 ]
++	ldrne	r9, [r1]
++	strne	r9, [r1]
++	ldrne	r9, [r2]
++	strne	r9, [r2]
++	ldrne	r9, [r3]
++	strne	r9, [r3]
+ 	bne	s3c2412_sleep_enter1
+ 
+ 	mov	pc, r14
+diff --git a/arch/arm/plat-samsung/include/plat/debug-macro.S b/arch/arm/plat-samsung/include/plat/debug-macro.S
+index 207e275..f3a9cff 100644
+--- a/arch/arm/plat-samsung/include/plat/debug-macro.S
++++ b/arch/arm/plat-samsung/include/plat/debug-macro.S
+@@ -14,12 +14,12 @@
+ /* The S5PV210/S5PC110 implementations are as belows. */
+ 
+ 	.macro fifo_level_s5pv210 rd, rx
+-		ldr	\rd, [ \rx, # S3C2410_UFSTAT ]
++		ldr	\rd, [\rx, # S3C2410_UFSTAT]
+ 		and	\rd, \rd, #S5PV210_UFSTAT_TXMASK
+ 	.endm
+ 
+ 	.macro  fifo_full_s5pv210 rd, rx
+-		ldr	\rd, [ \rx, # S3C2410_UFSTAT ]
++		ldr	\rd, [\rx, # S3C2410_UFSTAT]
+ 		tst	\rd, #S5PV210_UFSTAT_TXFULL
+ 	.endm
+ 
+@@ -27,7 +27,7 @@
+  * most widely re-used */
+ 
+ 	.macro fifo_level_s3c2440 rd, rx
+-		ldr	\rd, [ \rx, # S3C2410_UFSTAT ]
++		ldr	\rd, [\rx, # S3C2410_UFSTAT]
+ 		and	\rd, \rd, #S3C2440_UFSTAT_TXMASK
+ 	.endm
+ 
+@@ -36,7 +36,7 @@
+ #endif
+ 
+ 	.macro  fifo_full_s3c2440 rd, rx
+-		ldr	\rd, [ \rx, # S3C2410_UFSTAT ]
++		ldr	\rd, [\rx, # S3C2410_UFSTAT]
+ 		tst	\rd, #S3C2440_UFSTAT_TXFULL
+ 	.endm
+ 
+@@ -45,11 +45,11 @@
+ #endif
+ 
+ 	.macro	senduart,rd,rx
+-		strb 	\rd, [\rx, # S3C2410_UTXH ]
++		strb 	\rd, [\rx, # S3C2410_UTXH]
+ 	.endm
+ 
+ 	.macro	busyuart, rd, rx
+-		ldr	\rd, [ \rx, # S3C2410_UFCON ]
++		ldr	\rd, [\rx, # S3C2410_UFCON]
+ 		tst	\rd, #S3C2410_UFCON_FIFOMODE	@ fifo enabled?
+ 		beq	1001f				@
+ 		@ FIFO enabled...
+@@ -60,7 +60,7 @@
+ 
+ 1001:
+ 		@ busy waiting for non fifo
+-		ldr	\rd, [ \rx, # S3C2410_UTRSTAT ]
++		ldr	\rd, [\rx, # S3C2410_UTRSTAT]
+ 		tst	\rd, #S3C2410_UTRSTAT_TXFE
+ 		beq	1001b
+ 
+@@ -68,7 +68,7 @@
+ 	.endm
+ 
+ 	.macro	waituart,rd,rx
+-		ldr	\rd, [ \rx, # S3C2410_UFCON ]
++		ldr	\rd, [\rx, # S3C2410_UFCON]
+ 		tst	\rd, #S3C2410_UFCON_FIFOMODE	@ fifo enabled?
+ 		beq	1001f				@
+ 		@ FIFO enabled...
+@@ -79,7 +79,7 @@
+ 		b	1002f
+ 1001:
+ 		@ idle waiting for non fifo
+-		ldr	\rd, [ \rx, # S3C2410_UTRSTAT ]
++		ldr	\rd, [\rx, # S3C2410_UTRSTAT]
+ 		tst	\rd, #S3C2410_UTRSTAT_TXFE
+ 		beq	1001b
+ 
+diff --git a/arch/parisc/include/asm/pgtable.h b/arch/parisc/include/asm/pgtable.h
+index ee99f23..7df49fa 100644
+--- a/arch/parisc/include/asm/pgtable.h
++++ b/arch/parisc/include/asm/pgtable.h
+@@ -12,11 +12,10 @@
+ 
+ #include <linux/bitops.h>
+ #include <linux/spinlock.h>
++#include <linux/mm_types.h>
+ #include <asm/processor.h>
+ #include <asm/cache.h>
+ 
+-struct vm_area_struct;
+-
+ /*
+  * kern_addr_valid(ADDR) tests if ADDR is pointing to valid kernel
+  * memory.  For the return value to be meaningful, ADDR must be >=
+@@ -40,7 +39,14 @@ struct vm_area_struct;
+         do{                                                     \
+                 *(pteptr) = (pteval);                           \
+         } while(0)
+-#define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval)
++
++extern void purge_tlb_entries(struct mm_struct *, unsigned long);
++
++#define set_pte_at(mm, addr, ptep, pteval)                      \
++	do {                                                    \
++		set_pte(ptep, pteval);                          \
++		purge_tlb_entries(mm, addr);                    \
++	} while (0)
+ 
+ #endif /* !__ASSEMBLY__ */
+ 
+@@ -466,6 +472,7 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr,
+ 		old = pte_val(*ptep);
+ 		new = pte_val(pte_wrprotect(__pte (old)));
+ 	} while (cmpxchg((unsigned long *) ptep, old, new) != old);
++	purge_tlb_entries(mm, addr);
+ #else
+ 	pte_t old_pte = *ptep;
+ 	set_pte_at(mm, addr, ptep, pte_wrprotect(old_pte));
+diff --git a/arch/parisc/kernel/cache.c b/arch/parisc/kernel/cache.c
+index 9d18189..fa21463 100644
+--- a/arch/parisc/kernel/cache.c
++++ b/arch/parisc/kernel/cache.c
+@@ -420,6 +420,24 @@ void kunmap_parisc(void *addr)
+ EXPORT_SYMBOL(kunmap_parisc);
+ #endif
+ 
++void purge_tlb_entries(struct mm_struct *mm, unsigned long addr)
++{
++	unsigned long flags;
++
++	/* Note: purge_tlb_entries can be called at startup with
++	   no context.  */
++
++	/* Disable preemption while we play with %sr1.  */
++	preempt_disable();
++	mtsp(mm->context, 1);
++	purge_tlb_start(flags);
++	pdtlb(addr);
++	pitlb(addr);
++	purge_tlb_end(flags);
++	preempt_enable();
++}
++EXPORT_SYMBOL(purge_tlb_entries);
++
+ void __flush_tlb_range(unsigned long sid, unsigned long start,
+ 		       unsigned long end)
+ {
+diff --git a/arch/powerpc/kernel/machine_kexec_64.c b/arch/powerpc/kernel/machine_kexec_64.c
+index d7f6090..39833e0 100644
+--- a/arch/powerpc/kernel/machine_kexec_64.c
++++ b/arch/powerpc/kernel/machine_kexec_64.c
+@@ -162,6 +162,8 @@ static int kexec_all_irq_disabled = 0;
+ static void kexec_smp_down(void *arg)
+ {
+ 	local_irq_disable();
++	hard_irq_disable();
++
+ 	mb(); /* make sure our irqs are disabled before we say they are */
+ 	get_paca()->kexec_state = KEXEC_STATE_IRQS_OFF;
+ 	while(kexec_all_irq_disabled == 0)
+@@ -244,6 +246,8 @@ static void kexec_prepare_cpus(void)
+ 	wake_offline_cpus();
+ 	smp_call_function(kexec_smp_down, NULL, /* wait */0);
+ 	local_irq_disable();
++	hard_irq_disable();
++
+ 	mb(); /* make sure IRQs are disabled before we say they are */
+ 	get_paca()->kexec_state = KEXEC_STATE_IRQS_OFF;
+ 
+@@ -281,6 +285,7 @@ static void kexec_prepare_cpus(void)
+ 	if (ppc_md.kexec_cpu_down)
+ 		ppc_md.kexec_cpu_down(0, 0);
+ 	local_irq_disable();
++	hard_irq_disable();
+ }
+ 
+ #endif /* SMP */
+diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
+index 217ce44..e00accf 100644
+--- a/arch/s390/kvm/kvm-s390.c
++++ b/arch/s390/kvm/kvm-s390.c
+@@ -677,6 +677,14 @@ int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
+ 	} else
+ 		prefix = 0;
+ 
++	/*
++	 * The guest FPRS and ACRS are in the host FPRS/ACRS due to the lazy
++	 * copying in vcpu load/put. Lets update our copies before we save
++	 * it into the save area
++	 */
++	save_fp_regs(&vcpu->arch.guest_fpregs);
++	save_access_regs(vcpu->run->s.regs.acrs);
++
+ 	if (__guestcopy(vcpu, addr + offsetof(struct save_area, fp_regs),
+ 			vcpu->arch.guest_fpregs.fprs, 128, prefix))
+ 		return -EFAULT;
+diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
+index c9866b0..b1478f4 100644
+--- a/arch/x86/Kconfig
++++ b/arch/x86/Kconfig
+@@ -1243,10 +1243,6 @@ config HAVE_ARCH_BOOTMEM
+ 	def_bool y
+ 	depends on X86_32 && NUMA
+ 
+-config HAVE_ARCH_ALLOC_REMAP
+-	def_bool y
+-	depends on X86_32 && NUMA
+-
+ config ARCH_HAVE_MEMORY_PRESENT
+ 	def_bool y
+ 	depends on X86_32 && DISCONTIGMEM
+diff --git a/arch/x86/include/asm/mmzone_32.h b/arch/x86/include/asm/mmzone_32.h
+index 55728e1..5e0286f 100644
+--- a/arch/x86/include/asm/mmzone_32.h
++++ b/arch/x86/include/asm/mmzone_32.h
+@@ -14,12 +14,6 @@ extern struct pglist_data *node_data[];
+ 
+ #include <asm/numaq.h>
+ 
+-extern void resume_map_numa_kva(pgd_t *pgd);
+-
+-#else /* !CONFIG_NUMA */
+-
+-static inline void resume_map_numa_kva(pgd_t *pgd) {}
+-
+ #endif /* CONFIG_NUMA */
+ 
+ #ifdef CONFIG_DISCONTIGMEM
+diff --git a/arch/x86/kernel/cpu/mshyperv.c b/arch/x86/kernel/cpu/mshyperv.c
+index 0a630dd..646d192 100644
+--- a/arch/x86/kernel/cpu/mshyperv.c
++++ b/arch/x86/kernel/cpu/mshyperv.c
+@@ -68,7 +68,8 @@ static void __init ms_hyperv_init_platform(void)
+ 	printk(KERN_INFO "HyperV: features 0x%x, hints 0x%x\n",
+ 	       ms_hyperv.features, ms_hyperv.hints);
+ 
+-	clocksource_register_hz(&hyperv_cs, NSEC_PER_SEC/100);
++	if (ms_hyperv.features & HV_X64_MSR_TIME_REF_COUNT_AVAILABLE)
++		clocksource_register_hz(&hyperv_cs, NSEC_PER_SEC/100);
+ }
+ 
+ const __refconst struct hypervisor_x86 x86_hyper_ms_hyperv = {
+diff --git a/arch/x86/mm/numa.c b/arch/x86/mm/numa.c
+index 19d3fa0..c1e8394 100644
+--- a/arch/x86/mm/numa.c
++++ b/arch/x86/mm/numa.c
+@@ -193,7 +193,6 @@ int __init numa_add_memblk(int nid, u64 start, u64 end)
+ static void __init setup_node_data(int nid, u64 start, u64 end)
+ {
+ 	const size_t nd_size = roundup(sizeof(pg_data_t), PAGE_SIZE);
+-	bool remapped = false;
+ 	u64 nd_pa;
+ 	void *nd;
+ 	int tnid;
+@@ -205,37 +204,28 @@ static void __init setup_node_data(int nid, u64 start, u64 end)
+ 	if (end && (end - start) < NODE_MIN_SIZE)
+ 		return;
+ 
+-	/* initialize remap allocator before aligning to ZONE_ALIGN */
+-	init_alloc_remap(nid, start, end);
+-
+ 	start = roundup(start, ZONE_ALIGN);
+ 
+ 	printk(KERN_INFO "Initmem setup node %d %016Lx-%016Lx\n",
+ 	       nid, start, end);
+ 
+ 	/*
+-	 * Allocate node data.  Try remap allocator first, node-local
+-	 * memory and then any node.  Never allocate in DMA zone.
++	 * Allocate node data.  Try node-local memory and then any node.
++	 * Never allocate in DMA zone.
+ 	 */
+-	nd = alloc_remap(nid, nd_size);
+-	if (nd) {
+-		nd_pa = __pa(nd);
+-		remapped = true;
+-	} else {
+-		nd_pa = memblock_alloc_nid(nd_size, SMP_CACHE_BYTES, nid);
+-		if (!nd_pa) {
+-			pr_err("Cannot find %zu bytes in node %d\n",
+-			       nd_size, nid);
+-			return;
+-		}
+-		nd = __va(nd_pa);
++	nd_pa = memblock_alloc_nid(nd_size, SMP_CACHE_BYTES, nid);
++	if (!nd_pa) {
++		pr_err("Cannot find %zu bytes in node %d\n",
++		       nd_size, nid);
++		return;
+ 	}
++	nd = __va(nd_pa);
+ 
+ 	/* report and initialize */
+-	printk(KERN_INFO "  NODE_DATA [%016Lx - %016Lx]%s\n",
+-	       nd_pa, nd_pa + nd_size - 1, remapped ? " (remapped)" : "");
++	printk(KERN_INFO "  NODE_DATA [mem %#010Lx-%#010Lx]\n",
++	       nd_pa, nd_pa + nd_size - 1);
+ 	tnid = early_pfn_to_nid(nd_pa >> PAGE_SHIFT);
+-	if (!remapped && tnid != nid)
++	if (tnid != nid)
+ 		printk(KERN_INFO "    NODE_DATA(%d) on node %d\n", nid, tnid);
+ 
+ 	node_data[nid] = nd;
+diff --git a/arch/x86/mm/numa_32.c b/arch/x86/mm/numa_32.c
+index 534255a..73a6d73 100644
+--- a/arch/x86/mm/numa_32.c
++++ b/arch/x86/mm/numa_32.c
+@@ -73,167 +73,6 @@ unsigned long node_memmap_size_bytes(int nid, unsigned long start_pfn,
+ 
+ extern unsigned long highend_pfn, highstart_pfn;
+ 
+-#define LARGE_PAGE_BYTES (PTRS_PER_PTE * PAGE_SIZE)
+-
+-static void *node_remap_start_vaddr[MAX_NUMNODES];
+-void set_pmd_pfn(unsigned long vaddr, unsigned long pfn, pgprot_t flags);
+-
+-/*
+- * Remap memory allocator
+- */
+-static unsigned long node_remap_start_pfn[MAX_NUMNODES];
+-static void *node_remap_end_vaddr[MAX_NUMNODES];
+-static void *node_remap_alloc_vaddr[MAX_NUMNODES];
+-
+-/**
+- * alloc_remap - Allocate remapped memory
+- * @nid: NUMA node to allocate memory from
+- * @size: The size of allocation
+- *
+- * Allocate @size bytes from the remap area of NUMA node @nid.  The
+- * size of the remap area is predetermined by init_alloc_remap() and
+- * only the callers considered there should call this function.  For
+- * more info, please read the comment on top of init_alloc_remap().
+- *
+- * The caller must be ready to handle allocation failure from this
+- * function and fall back to regular memory allocator in such cases.
+- *
+- * CONTEXT:
+- * Single CPU early boot context.
+- *
+- * RETURNS:
+- * Pointer to the allocated memory on success, %NULL on failure.
+- */
+-void *alloc_remap(int nid, unsigned long size)
+-{
+-	void *allocation = node_remap_alloc_vaddr[nid];
+-
+-	size = ALIGN(size, L1_CACHE_BYTES);
+-
+-	if (!allocation || (allocation + size) > node_remap_end_vaddr[nid])
+-		return NULL;
+-
+-	node_remap_alloc_vaddr[nid] += size;
+-	memset(allocation, 0, size);
+-
+-	return allocation;
+-}
+-
+-#ifdef CONFIG_HIBERNATION
+-/**
+- * resume_map_numa_kva - add KVA mapping to the temporary page tables created
+- *                       during resume from hibernation
+- * @pgd_base - temporary resume page directory
+- */
+-void resume_map_numa_kva(pgd_t *pgd_base)
+-{
+-	int node;
+-
+-	for_each_online_node(node) {
+-		unsigned long start_va, start_pfn, nr_pages, pfn;
+-
+-		start_va = (unsigned long)node_remap_start_vaddr[node];
+-		start_pfn = node_remap_start_pfn[node];
+-		nr_pages = (node_remap_end_vaddr[node] -
+-			    node_remap_start_vaddr[node]) >> PAGE_SHIFT;
+-
+-		printk(KERN_DEBUG "%s: node %d\n", __func__, node);
+-
+-		for (pfn = 0; pfn < nr_pages; pfn += PTRS_PER_PTE) {
+-			unsigned long vaddr = start_va + (pfn << PAGE_SHIFT);
+-			pgd_t *pgd = pgd_base + pgd_index(vaddr);
+-			pud_t *pud = pud_offset(pgd, vaddr);
+-			pmd_t *pmd = pmd_offset(pud, vaddr);
+-
+-			set_pmd(pmd, pfn_pmd(start_pfn + pfn,
+-						PAGE_KERNEL_LARGE_EXEC));
+-
+-			printk(KERN_DEBUG "%s: %08lx -> pfn %08lx\n",
+-				__func__, vaddr, start_pfn + pfn);
+-		}
+-	}
+-}
+-#endif
+-
+-/**
+- * init_alloc_remap - Initialize remap allocator for a NUMA node
+- * @nid: NUMA node to initizlie remap allocator for
+- *
+- * NUMA nodes may end up without any lowmem.  As allocating pgdat and
+- * memmap on a different node with lowmem is inefficient, a special
+- * remap allocator is implemented which can be used by alloc_remap().
+- *
+- * For each node, the amount of memory which will be necessary for
+- * pgdat and memmap is calculated and two memory areas of the size are
+- * allocated - one in the node and the other in lowmem; then, the area
+- * in the node is remapped to the lowmem area.
+- *
+- * As pgdat and memmap must be allocated in lowmem anyway, this
+- * doesn't waste lowmem address space; however, the actual lowmem
+- * which gets remapped over is wasted.  The amount shouldn't be
+- * problematic on machines this feature will be used.
+- *
+- * Initialization failure isn't fatal.  alloc_remap() is used
+- * opportunistically and the callers will fall back to other memory
+- * allocation mechanisms on failure.
+- */
+-void __init init_alloc_remap(int nid, u64 start, u64 end)
+-{
+-	unsigned long start_pfn = start >> PAGE_SHIFT;
+-	unsigned long end_pfn = end >> PAGE_SHIFT;
+-	unsigned long size, pfn;
+-	u64 node_pa, remap_pa;
+-	void *remap_va;
+-
+-	/*
+-	 * The acpi/srat node info can show hot-add memroy zones where
+-	 * memory could be added but not currently present.
+-	 */
+-	printk(KERN_DEBUG "node %d pfn: [%lx - %lx]\n",
+-	       nid, start_pfn, end_pfn);
+-
+-	/* calculate the necessary space aligned to large page size */
+-	size = node_memmap_size_bytes(nid, start_pfn, end_pfn);
+-	size += ALIGN(sizeof(pg_data_t), PAGE_SIZE);
+-	size = ALIGN(size, LARGE_PAGE_BYTES);
+-
+-	/* allocate node memory and the lowmem remap area */
+-	node_pa = memblock_find_in_range(start, end, size, LARGE_PAGE_BYTES);
+-	if (!node_pa) {
+-		pr_warning("remap_alloc: failed to allocate %lu bytes for node %d\n",
+-			   size, nid);
+-		return;
+-	}
+-	memblock_reserve(node_pa, size);
+-
+-	remap_pa = memblock_find_in_range(min_low_pfn << PAGE_SHIFT,
+-					  max_low_pfn << PAGE_SHIFT,
+-					  size, LARGE_PAGE_BYTES);
+-	if (!remap_pa) {
+-		pr_warning("remap_alloc: failed to allocate %lu bytes remap area for node %d\n",
+-			   size, nid);
+-		memblock_free(node_pa, size);
+-		return;
+-	}
+-	memblock_reserve(remap_pa, size);
+-	remap_va = phys_to_virt(remap_pa);
+-
+-	/* perform actual remap */
+-	for (pfn = 0; pfn < size >> PAGE_SHIFT; pfn += PTRS_PER_PTE)
+-		set_pmd_pfn((unsigned long)remap_va + (pfn << PAGE_SHIFT),
+-			    (node_pa >> PAGE_SHIFT) + pfn,
+-			    PAGE_KERNEL_LARGE);
+-
+-	/* initialize remap allocator parameters */
+-	node_remap_start_pfn[nid] = node_pa >> PAGE_SHIFT;
+-	node_remap_start_vaddr[nid] = remap_va;
+-	node_remap_end_vaddr[nid] = remap_va + size;
+-	node_remap_alloc_vaddr[nid] = remap_va;
+-
+-	printk(KERN_DEBUG "remap_alloc: node %d [%08llx-%08llx) -> [%p-%p)\n",
+-	       nid, node_pa, node_pa + size, remap_va, remap_va + size);
+-}
+-
+ void __init initmem_init(void)
+ {
+ 	x86_numa_init();
+diff --git a/arch/x86/mm/numa_internal.h b/arch/x86/mm/numa_internal.h
+index 7178c3a..ad86ec9 100644
+--- a/arch/x86/mm/numa_internal.h
++++ b/arch/x86/mm/numa_internal.h
+@@ -21,12 +21,6 @@ void __init numa_reset_distance(void);
+ 
+ void __init x86_numa_init(void);
+ 
+-#ifdef CONFIG_X86_64
+-static inline void init_alloc_remap(int nid, u64 start, u64 end)	{ }
+-#else
+-void __init init_alloc_remap(int nid, u64 start, u64 end);
+-#endif
+-
+ #ifdef CONFIG_NUMA_EMU
+ void __init numa_emulation(struct numa_meminfo *numa_meminfo,
+ 			   int numa_dist_cnt);
+diff --git a/arch/x86/power/hibernate_32.c b/arch/x86/power/hibernate_32.c
+index 74202c1..7d28c88 100644
+--- a/arch/x86/power/hibernate_32.c
++++ b/arch/x86/power/hibernate_32.c
+@@ -129,8 +129,6 @@ static int resume_physical_mapping_init(pgd_t *pgd_base)
+ 		}
+ 	}
+ 
+-	resume_map_numa_kva(pgd_base);
+-
+ 	return 0;
+ }
+ 
+diff --git a/arch/x86/xen/spinlock.c b/arch/x86/xen/spinlock.c
+index d69cc6c..67bc7ba 100644
+--- a/arch/x86/xen/spinlock.c
++++ b/arch/x86/xen/spinlock.c
+@@ -328,7 +328,6 @@ static noinline void xen_spin_unlock_slow(struct xen_spinlock *xl)
+ 		if (per_cpu(lock_spinners, cpu) == xl) {
+ 			ADD_STATS(released_slow_kicked, 1);
+ 			xen_send_IPI_one(cpu, XEN_SPIN_UNLOCK_VECTOR);
+-			break;
+ 		}
+ 	}
+ }
+diff --git a/drivers/base/bus.c b/drivers/base/bus.c
+index 26a06b8..b850cec 100644
+--- a/drivers/base/bus.c
++++ b/drivers/base/bus.c
+@@ -294,7 +294,7 @@ int bus_for_each_dev(struct bus_type *bus, struct device *start,
+ 	struct device *dev;
+ 	int error = 0;
+ 
+-	if (!bus)
++	if (!bus || !bus->p)
+ 		return -EINVAL;
+ 
+ 	klist_iter_init_node(&bus->p->klist_devices, &i,
+@@ -328,7 +328,7 @@ struct device *bus_find_device(struct bus_type *bus,
+ 	struct klist_iter i;
+ 	struct device *dev;
+ 
+-	if (!bus)
++	if (!bus || !bus->p)
+ 		return NULL;
+ 
+ 	klist_iter_init_node(&bus->p->klist_devices, &i,
+diff --git a/drivers/base/dd.c b/drivers/base/dd.c
+index 1b1cbb5..97fc774 100644
+--- a/drivers/base/dd.c
++++ b/drivers/base/dd.c
+@@ -160,6 +160,8 @@ static int deferred_probe_initcall(void)
+ 
+ 	driver_deferred_probe_enable = true;
+ 	driver_deferred_probe_trigger();
++	/* Sort as many dependencies as possible before exiting initcalls */
++	flush_workqueue(deferred_wq);
+ 	return 0;
+ }
+ late_initcall(deferred_probe_initcall);
+diff --git a/drivers/block/sunvdc.c b/drivers/block/sunvdc.c
+index 9dcf76a..31dd451 100644
+--- a/drivers/block/sunvdc.c
++++ b/drivers/block/sunvdc.c
+@@ -461,7 +461,7 @@ static int generic_request(struct vdc_port *port, u8 op, void *buf, int len)
+ 	int op_len, err;
+ 	void *req_buf;
+ 
+-	if (!(((u64)1 << ((u64)op - 1)) & port->operations))
++	if (!(((u64)1 << (u64)op) & port->operations))
+ 		return -EOPNOTSUPP;
+ 
+ 	switch (op) {
+diff --git a/drivers/dca/dca-core.c b/drivers/dca/dca-core.c
+index bc6f5fa..819dfda 100644
+--- a/drivers/dca/dca-core.c
++++ b/drivers/dca/dca-core.c
+@@ -420,6 +420,11 @@ void unregister_dca_provider(struct dca_provider *dca, struct device *dev)
+ 
+ 	raw_spin_lock_irqsave(&dca_lock, flags);
+ 
++	if (list_empty(&dca_domains)) {
++		raw_spin_unlock_irqrestore(&dca_lock, flags);
++		return;
++	}
++
+ 	list_del(&dca->node);
+ 
+ 	pci_rc = dca_pci_rc_from_dev(dev);
+diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
+index 4fd363f..c61e672 100644
+--- a/drivers/gpu/drm/drm_crtc.c
++++ b/drivers/gpu/drm/drm_crtc.c
+@@ -2023,7 +2023,7 @@ uint32_t drm_mode_legacy_fb_format(uint32_t bpp, uint32_t depth)
+ 
+ 	switch (bpp) {
+ 	case 8:
+-		fmt = DRM_FORMAT_RGB332;
++		fmt = DRM_FORMAT_C8;
+ 		break;
+ 	case 16:
+ 		if (depth == 15)
+@@ -3409,6 +3409,7 @@ void drm_fb_get_bpp_depth(uint32_t format, unsigned int *depth,
+ 			  int *bpp)
+ {
+ 	switch (format) {
++	case DRM_FORMAT_C8:
+ 	case DRM_FORMAT_RGB332:
+ 	case DRM_FORMAT_BGR233:
+ 		*depth = 8;
+diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
+index 9d9835a..384edc6 100644
+--- a/drivers/gpu/drm/drm_edid.c
++++ b/drivers/gpu/drm/drm_edid.c
+@@ -1769,7 +1769,8 @@ int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid)
+ 	num_modes += add_cvt_modes(connector, edid);
+ 	num_modes += add_standard_modes(connector, edid);
+ 	num_modes += add_established_modes(connector, edid);
+-	num_modes += add_inferred_modes(connector, edid);
++	if (edid->features & DRM_EDID_FEATURE_DEFAULT_GTF)
++		num_modes += add_inferred_modes(connector, edid);
+ 	num_modes += add_cea_modes(connector, edid);
+ 
+ 	if (quirks & (EDID_QUIRK_PREFER_LARGE_60 | EDID_QUIRK_PREFER_LARGE_75))
+diff --git a/drivers/gpu/drm/drm_usb.c b/drivers/gpu/drm/drm_usb.c
+index 37c9a52..767782a 100644
+--- a/drivers/gpu/drm/drm_usb.c
++++ b/drivers/gpu/drm/drm_usb.c
+@@ -18,7 +18,7 @@ int drm_get_usb_dev(struct usb_interface *interface,
+ 
+ 	usbdev = interface_to_usbdev(interface);
+ 	dev->usbdev = usbdev;
+-	dev->dev = &usbdev->dev;
++	dev->dev = &interface->dev;
+ 
+ 	mutex_lock(&drm_global_mutex);
+ 
+diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
+index 3c9b9c5..67f6db5 100644
+--- a/drivers/gpu/drm/i915/intel_display.c
++++ b/drivers/gpu/drm/i915/intel_display.c
+@@ -142,8 +142,8 @@ static const intel_limit_t intel_limits_i9xx_sdvo = {
+ 	.vco = { .min = 1400000, .max = 2800000 },
+ 	.n = { .min = 1, .max = 6 },
+ 	.m = { .min = 70, .max = 120 },
+-	.m1 = { .min = 10, .max = 22 },
+-	.m2 = { .min = 5, .max = 9 },
++	.m1 = { .min = 8, .max = 18 },
++	.m2 = { .min = 3, .max = 7 },
+ 	.p = { .min = 5, .max = 80 },
+ 	.p1 = { .min = 1, .max = 8 },
+ 	.p2 = { .dot_limit = 200000,
+@@ -3303,6 +3303,7 @@ static void i9xx_crtc_disable(struct drm_crtc *crtc)
+ 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ 	int pipe = intel_crtc->pipe;
+ 	int plane = intel_crtc->plane;
++	u32 pctl;
+ 
+ 	if (!intel_crtc->active)
+ 		return;
+@@ -3318,6 +3319,13 @@ static void i9xx_crtc_disable(struct drm_crtc *crtc)
+ 
+ 	intel_disable_plane(dev_priv, plane, pipe);
+ 	intel_disable_pipe(dev_priv, pipe);
++
++	/* Disable pannel fitter if it is on this pipe. */
++	pctl = I915_READ(PFIT_CONTROL);
++	if ((pctl & PFIT_ENABLE) &&
++	    ((pctl & PFIT_PIPE_MASK) >> PFIT_PIPE_SHIFT) == pipe)
++		I915_WRITE(PFIT_CONTROL, 0);
++
+ 	intel_disable_pll(dev_priv, pipe);
+ 
+ 	intel_crtc->active = false;
+diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
+index 15594a3..ebbfbd2 100644
+--- a/drivers/gpu/drm/radeon/atombios_crtc.c
++++ b/drivers/gpu/drm/radeon/atombios_crtc.c
+@@ -258,8 +258,6 @@ void atombios_crtc_dpms(struct drm_crtc *crtc, int mode)
+ 		radeon_crtc->enabled = true;
+ 		/* adjust pm to dpms changes BEFORE enabling crtcs */
+ 		radeon_pm_compute_clocks(rdev);
+-		if (ASIC_IS_DCE6(rdev) && !radeon_crtc->in_mode_set)
+-			atombios_powergate_crtc(crtc, ATOM_DISABLE);
+ 		atombios_enable_crtc(crtc, ATOM_ENABLE);
+ 		if (ASIC_IS_DCE3(rdev) && !ASIC_IS_DCE6(rdev))
+ 			atombios_enable_crtc_memreq(crtc, ATOM_ENABLE);
+@@ -277,8 +275,6 @@ void atombios_crtc_dpms(struct drm_crtc *crtc, int mode)
+ 			atombios_enable_crtc_memreq(crtc, ATOM_DISABLE);
+ 		atombios_enable_crtc(crtc, ATOM_DISABLE);
+ 		radeon_crtc->enabled = false;
+-		if (ASIC_IS_DCE6(rdev) && !radeon_crtc->in_mode_set)
+-			atombios_powergate_crtc(crtc, ATOM_ENABLE);
+ 		/* adjust pm to dpms changes AFTER disabling crtcs */
+ 		radeon_pm_compute_clocks(rdev);
+ 		break;
+@@ -1670,6 +1666,8 @@ static void atombios_crtc_disable(struct drm_crtc *crtc)
+ 	int i;
+ 
+ 	atombios_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
++	if (ASIC_IS_DCE6(rdev))
++		atombios_powergate_crtc(crtc, ATOM_ENABLE);
+ 
+ 	for (i = 0; i < rdev->num_crtc; i++) {
+ 		if (rdev->mode_info.crtcs[i] &&
+diff --git a/drivers/gpu/drm/udl/udl_drv.h b/drivers/gpu/drm/udl/udl_drv.h
+index e760575..2b8c4fd 100644
+--- a/drivers/gpu/drm/udl/udl_drv.h
++++ b/drivers/gpu/drm/udl/udl_drv.h
+@@ -74,6 +74,8 @@ struct udl_framebuffer {
+ 	struct drm_framebuffer base;
+ 	struct udl_gem_object *obj;
+ 	bool active_16; /* active on the 16-bit channel */
++	int x1, y1, x2, y2; /* dirty rect */
++	spinlock_t dirty_lock;
+ };
+ 
+ #define to_udl_fb(x) container_of(x, struct udl_framebuffer, base)
+diff --git a/drivers/gpu/drm/udl/udl_fb.c b/drivers/gpu/drm/udl/udl_fb.c
+index b9282cf..f02d223 100644
+--- a/drivers/gpu/drm/udl/udl_fb.c
++++ b/drivers/gpu/drm/udl/udl_fb.c
+@@ -22,9 +22,9 @@
+ 
+ #include "drm_fb_helper.h"
+ 
+-#define DL_DEFIO_WRITE_DELAY    5 /* fb_deferred_io.delay in jiffies */
++#define DL_DEFIO_WRITE_DELAY    (HZ/20) /* fb_deferred_io.delay in jiffies */
+ 
+-static int fb_defio = 1;  /* Optionally enable experimental fb_defio mmap support */
++static int fb_defio = 0;  /* Optionally enable experimental fb_defio mmap support */
+ static int fb_bpp = 16;
+ 
+ module_param(fb_bpp, int, S_IWUSR | S_IRUSR | S_IWGRP | S_IRGRP);
+@@ -153,6 +153,9 @@ int udl_handle_damage(struct udl_framebuffer *fb, int x, int y,
+ 	struct urb *urb;
+ 	int aligned_x;
+ 	int bpp = (fb->base.bits_per_pixel / 8);
++	int x2, y2;
++	bool store_for_later = false;
++	unsigned long flags;
+ 
+ 	if (!fb->active_16)
+ 		return 0;
+@@ -160,8 +163,6 @@ int udl_handle_damage(struct udl_framebuffer *fb, int x, int y,
+ 	if (!fb->obj->vmapping)
+ 		udl_gem_vmap(fb->obj);
+ 
+-	start_cycles = get_cycles();
+-
+ 	aligned_x = DL_ALIGN_DOWN(x, sizeof(unsigned long));
+ 	width = DL_ALIGN_UP(width + (x-aligned_x), sizeof(unsigned long));
+ 	x = aligned_x;
+@@ -171,19 +172,53 @@ int udl_handle_damage(struct udl_framebuffer *fb, int x, int y,
+ 	    (y + height > fb->base.height))
+ 		return -EINVAL;
+ 
++	/* if we are in atomic just store the info
++	   can't test inside spin lock */
++	if (in_atomic())
++		store_for_later = true;
++
++	x2 = x + width - 1;
++	y2 = y + height - 1;
++
++	spin_lock_irqsave(&fb->dirty_lock, flags);
++
++	if (fb->y1 < y)
++		y = fb->y1;
++	if (fb->y2 > y2)
++		y2 = fb->y2;
++	if (fb->x1 < x)
++		x = fb->x1;
++	if (fb->x2 > x2)
++		x2 = fb->x2;
++
++	if (store_for_later) {
++		fb->x1 = x;
++		fb->x2 = x2;
++		fb->y1 = y;
++		fb->y2 = y2;
++		spin_unlock_irqrestore(&fb->dirty_lock, flags);
++		return 0;
++	}
++
++	fb->x1 = fb->y1 = INT_MAX;
++	fb->x2 = fb->y2 = 0;
++
++	spin_unlock_irqrestore(&fb->dirty_lock, flags);
++	start_cycles = get_cycles();
++
+ 	urb = udl_get_urb(dev);
+ 	if (!urb)
+ 		return 0;
+ 	cmd = urb->transfer_buffer;
+ 
+-	for (i = y; i < y + height ; i++) {
++	for (i = y; i <= y2 ; i++) {
+ 		const int line_offset = fb->base.pitches[0] * i;
+ 		const int byte_offset = line_offset + (x * bpp);
+ 		const int dev_byte_offset = (fb->base.width * bpp * i) + (x * bpp);
+ 		if (udl_render_hline(dev, bpp, &urb,
+ 				     (char *) fb->obj->vmapping,
+ 				     &cmd, byte_offset, dev_byte_offset,
+-				     width * bpp,
++				     (x2 - x + 1) * bpp,
+ 				     &bytes_identical, &bytes_sent))
+ 			goto error;
+ 	}
+@@ -408,6 +443,7 @@ udl_framebuffer_init(struct drm_device *dev,
+ {
+ 	int ret;
+ 
++	spin_lock_init(&ufb->dirty_lock);
+ 	ufb->obj = obj;
+ 	ret = drm_framebuffer_init(dev, &ufb->base, &udlfb_funcs);
+ 	drm_helper_mode_fill_fb_struct(&ufb->base, mode_cmd);
+diff --git a/drivers/hid/hid-wiimote-ext.c b/drivers/hid/hid-wiimote-ext.c
+index aa95870..9e57285 100644
+--- a/drivers/hid/hid-wiimote-ext.c
++++ b/drivers/hid/hid-wiimote-ext.c
+@@ -378,14 +378,14 @@ static void handler_nunchuck(struct wiimote_ext *ext, const __u8 *payload)
+ 
+ 	if (ext->motionp) {
+ 		input_report_key(ext->input,
+-			wiiext_keymap[WIIEXT_KEY_Z], !!(payload[5] & 0x04));
++			wiiext_keymap[WIIEXT_KEY_Z], !(payload[5] & 0x04));
+ 		input_report_key(ext->input,
+-			wiiext_keymap[WIIEXT_KEY_C], !!(payload[5] & 0x08));
++			wiiext_keymap[WIIEXT_KEY_C], !(payload[5] & 0x08));
+ 	} else {
+ 		input_report_key(ext->input,
+-			wiiext_keymap[WIIEXT_KEY_Z], !!(payload[5] & 0x01));
++			wiiext_keymap[WIIEXT_KEY_Z], !(payload[5] & 0x01));
+ 		input_report_key(ext->input,
+-			wiiext_keymap[WIIEXT_KEY_C], !!(payload[5] & 0x02));
++			wiiext_keymap[WIIEXT_KEY_C], !(payload[5] & 0x02));
+ 	}
+ 
+ 	input_sync(ext->input);
+diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
+index 5d71873..1711924 100644
+--- a/drivers/iommu/intel-iommu.c
++++ b/drivers/iommu/intel-iommu.c
+@@ -4212,13 +4212,19 @@ static void __devinit quirk_iommu_rwbf(struct pci_dev *dev)
+ {
+ 	/*
+ 	 * Mobile 4 Series Chipset neglects to set RWBF capability,
+-	 * but needs it:
++	 * but needs it. Same seems to hold for the desktop versions.
+ 	 */
+ 	printk(KERN_INFO "DMAR: Forcing write-buffer flush capability\n");
+ 	rwbf_quirk = 1;
+ }
+ 
+ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_rwbf);
++DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e00, quirk_iommu_rwbf);
++DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e10, quirk_iommu_rwbf);
++DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e20, quirk_iommu_rwbf);
++DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e30, quirk_iommu_rwbf);
++DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e40, quirk_iommu_rwbf);
++DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e90, quirk_iommu_rwbf);
+ 
+ #define GGC 0x52
+ #define GGC_MEMORY_SIZE_MASK	(0xf << 8)
+diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c
+index 8abdaf6..be46052 100644
+--- a/drivers/mmc/host/sdhci-esdhc-imx.c
++++ b/drivers/mmc/host/sdhci-esdhc-imx.c
+@@ -232,15 +232,18 @@ static void esdhc_writel_le(struct sdhci_host *host, u32 val, int reg)
+ 
+ static u16 esdhc_readw_le(struct sdhci_host *host, int reg)
+ {
++	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
++	struct pltfm_imx_data *imx_data = pltfm_host->priv;
++
+ 	if (unlikely(reg == SDHCI_HOST_VERSION)) {
+-		u16 val = readw(host->ioaddr + (reg ^ 2));
+-		/*
+-		 * uSDHC supports SDHCI v3.0, but it's encoded as value
+-		 * 0x3 in host controller version register, which violates
+-		 * SDHCI_SPEC_300 definition.  Work it around here.
+-		 */
+-		if ((val & SDHCI_SPEC_VER_MASK) == 3)
+-			return --val;
++		reg ^= 2;
++		if (is_imx6q_usdhc(imx_data)) {
++			/*
++			 * The usdhc register returns a wrong host version.
++			 * Correct it here.
++			 */
++			return SDHCI_SPEC_300;
++		}
+ 	}
+ 
+ 	return readw(host->ioaddr + reg);
+diff --git a/drivers/net/wireless/b43/dma.h b/drivers/net/wireless/b43/dma.h
+index 315b96e..9fdd198 100644
+--- a/drivers/net/wireless/b43/dma.h
++++ b/drivers/net/wireless/b43/dma.h
+@@ -169,7 +169,7 @@ struct b43_dmadesc_generic {
+ 
+ /* DMA engine tuning knobs */
+ #define B43_TXRING_SLOTS		256
+-#define B43_RXRING_SLOTS		64
++#define B43_RXRING_SLOTS		256
+ #define B43_DMA0_RX_FW598_BUFSIZE	(B43_DMA0_RX_FW598_FO + IEEE80211_MAX_FRAME_LEN)
+ #define B43_DMA0_RX_FW351_BUFSIZE	(B43_DMA0_RX_FW351_FO + IEEE80211_MAX_FRAME_LEN)
+ 
+diff --git a/drivers/net/wireless/p54/p54usb.c b/drivers/net/wireless/p54/p54usb.c
+index be20cf7..af30777 100644
+--- a/drivers/net/wireless/p54/p54usb.c
++++ b/drivers/net/wireless/p54/p54usb.c
+@@ -84,8 +84,8 @@ static struct usb_device_id p54u_table[] = {
+ 	{USB_DEVICE(0x06b9, 0x0121)},	/* Thomson SpeedTouch 121g */
+ 	{USB_DEVICE(0x0707, 0xee13)},   /* SMC 2862W-G version 2 */
+ 	{USB_DEVICE(0x0803, 0x4310)},	/* Zoom 4410a */
+-	{USB_DEVICE(0x083a, 0x4503)},	/* T-Com Sinus 154 data II */
+ 	{USB_DEVICE(0x083a, 0x4521)},   /* Siemens Gigaset USB Adapter 54 version 2 */
++	{USB_DEVICE(0x083a, 0x4531)},	/* T-Com Sinus 154 data II */
+ 	{USB_DEVICE(0x083a, 0xc501)},	/* Zoom Wireless-G 4410 */
+ 	{USB_DEVICE(0x083a, 0xf503)},	/* Accton FD7050E ver 1010ec  */
+ 	{USB_DEVICE(0x0846, 0x4240)},	/* Netgear WG111 (v2) */
+diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
+index 61e5768..8cf41bb 100644
+--- a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
++++ b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
+@@ -285,6 +285,7 @@ static struct usb_device_id rtl8192c_usb_ids[] = {
+ 	{RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x817f, rtl92cu_hal_cfg)},
+ 	/* RTL8188CUS-VL */
+ 	{RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x818a, rtl92cu_hal_cfg)},
++	{RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x819a, rtl92cu_hal_cfg)},
+ 	/* 8188 Combo for BC4 */
+ 	{RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x8754, rtl92cu_hal_cfg)},
+ 
+diff --git a/drivers/net/wireless/rtlwifi/usb.c b/drivers/net/wireless/rtlwifi/usb.c
+index 17cd028..6ce8484 100644
+--- a/drivers/net/wireless/rtlwifi/usb.c
++++ b/drivers/net/wireless/rtlwifi/usb.c
+@@ -42,8 +42,12 @@
+ 
+ static void usbctrl_async_callback(struct urb *urb)
+ {
+-	if (urb)
+-		kfree(urb->context);
++	if (urb) {
++		/* free dr */
++		kfree(urb->setup_packet);
++		/* free databuf */
++		kfree(urb->transfer_buffer);
++	}
+ }
+ 
+ static int _usbctrl_vendorreq_async_write(struct usb_device *udev, u8 request,
+@@ -55,39 +59,47 @@ static int _usbctrl_vendorreq_async_write(struct usb_device *udev, u8 request,
+ 	u8 reqtype;
+ 	struct usb_ctrlrequest *dr;
+ 	struct urb *urb;
+-	struct rtl819x_async_write_data {
+-		u8 data[REALTEK_USB_VENQT_MAX_BUF_SIZE];
+-		struct usb_ctrlrequest dr;
+-	} *buf;
++	const u16 databuf_maxlen = REALTEK_USB_VENQT_MAX_BUF_SIZE;
++	u8 *databuf;
++
++	if (WARN_ON_ONCE(len > databuf_maxlen))
++		len = databuf_maxlen;
+ 
+ 	pipe = usb_sndctrlpipe(udev, 0); /* write_out */
+ 	reqtype =  REALTEK_USB_VENQT_WRITE;
+ 
+-	buf = kmalloc(sizeof(*buf), GFP_ATOMIC);
+-	if (!buf)
++	dr = kmalloc(sizeof(*dr), GFP_ATOMIC);
++	if (!dr)
+ 		return -ENOMEM;
+ 
++	databuf = kmalloc(databuf_maxlen, GFP_ATOMIC);
++	if (!databuf) {
++		kfree(dr);
++		return -ENOMEM;
++	}
++
+ 	urb = usb_alloc_urb(0, GFP_ATOMIC);
+ 	if (!urb) {
+-		kfree(buf);
++		kfree(databuf);
++		kfree(dr);
+ 		return -ENOMEM;
+ 	}
+ 
+-	dr = &buf->dr;
+-
+ 	dr->bRequestType = reqtype;
+ 	dr->bRequest = request;
+ 	dr->wValue = cpu_to_le16(value);
+ 	dr->wIndex = cpu_to_le16(index);
+ 	dr->wLength = cpu_to_le16(len);
+ 	/* data are already in little-endian order */
+-	memcpy(buf, pdata, len);
++	memcpy(databuf, pdata, len);
+ 	usb_fill_control_urb(urb, udev, pipe,
+-			     (unsigned char *)dr, buf, len,
+-			     usbctrl_async_callback, buf);
++			     (unsigned char *)dr, databuf, len,
++			     usbctrl_async_callback, NULL);
+ 	rc = usb_submit_urb(urb, GFP_ATOMIC);
+-	if (rc < 0)
+-		kfree(buf);
++	if (rc < 0) {
++		kfree(databuf);
++		kfree(dr);
++	}
+ 	usb_free_urb(urb);
+ 	return rc;
+ }
+diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c
+index b8c5193..221f426 100644
+--- a/drivers/net/xen-netback/interface.c
++++ b/drivers/net/xen-netback/interface.c
+@@ -132,6 +132,7 @@ static void xenvif_up(struct xenvif *vif)
+ static void xenvif_down(struct xenvif *vif)
+ {
+ 	disable_irq(vif->irq);
++	del_timer_sync(&vif->credit_timeout);
+ 	xen_netbk_deschedule_xenvif(vif);
+ 	xen_netbk_remove_xenvif(vif);
+ }
+@@ -363,8 +364,6 @@ void xenvif_disconnect(struct xenvif *vif)
+ 	atomic_dec(&vif->refcnt);
+ 	wait_event(vif->waiting_to_free, atomic_read(&vif->refcnt) == 0);
+ 
+-	del_timer_sync(&vif->credit_timeout);
+-
+ 	if (vif->irq)
+ 		unbind_from_irqhandler(vif->irq, vif);
+ 
+diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
+index e2793d0..2bdf798 100644
+--- a/drivers/net/xen-netback/netback.c
++++ b/drivers/net/xen-netback/netback.c
+@@ -883,13 +883,13 @@ static int netbk_count_requests(struct xenvif *vif,
+ 		if (frags >= work_to_do) {
+ 			netdev_err(vif->dev, "Need more frags\n");
+ 			netbk_fatal_tx_err(vif);
+-			return -frags;
++			return -ENODATA;
+ 		}
+ 
+ 		if (unlikely(frags >= MAX_SKB_FRAGS)) {
+ 			netdev_err(vif->dev, "Too many frags\n");
+ 			netbk_fatal_tx_err(vif);
+-			return -frags;
++			return -E2BIG;
+ 		}
+ 
+ 		memcpy(txp, RING_GET_REQUEST(&vif->tx, cons + frags),
+@@ -897,7 +897,7 @@ static int netbk_count_requests(struct xenvif *vif,
+ 		if (txp->size > first->size) {
+ 			netdev_err(vif->dev, "Frag is bigger than frame.\n");
+ 			netbk_fatal_tx_err(vif);
+-			return -frags;
++			return -EIO;
+ 		}
+ 
+ 		first->size -= txp->size;
+@@ -907,7 +907,7 @@ static int netbk_count_requests(struct xenvif *vif,
+ 			netdev_err(vif->dev, "txp->offset: %x, size: %u\n",
+ 				 txp->offset, txp->size);
+ 			netbk_fatal_tx_err(vif);
+-			return -frags;
++			return -EINVAL;
+ 		}
+ 	} while ((txp++)->flags & XEN_NETTXF_more_data);
+ 	return frags;
+diff --git a/drivers/pcmcia/vrc4171_card.c b/drivers/pcmcia/vrc4171_card.c
+index 86e4a1a..6bb02ab 100644
+--- a/drivers/pcmcia/vrc4171_card.c
++++ b/drivers/pcmcia/vrc4171_card.c
+@@ -246,6 +246,7 @@ static int pccard_init(struct pcmcia_socket *sock)
+ 	socket = &vrc4171_sockets[slot];
+ 	socket->csc_irq = search_nonuse_irq();
+ 	socket->io_irq = search_nonuse_irq();
++	spin_lock_init(&socket->lock);
+ 
+ 	return 0;
+ }
+diff --git a/drivers/s390/kvm/kvm_virtio.c b/drivers/s390/kvm/kvm_virtio.c
+index d74e9ae..f97b2aa 100644
+--- a/drivers/s390/kvm/kvm_virtio.c
++++ b/drivers/s390/kvm/kvm_virtio.c
+@@ -418,6 +418,26 @@ static void kvm_extint_handler(struct ext_code ext_code,
+ }
+ 
+ /*
++ * For s390-virtio, we expect a page above main storage containing
++ * the virtio configuration. Try to actually load from this area
++ * in order to figure out if the host provides this page.
++ */
++static int __init test_devices_support(unsigned long addr)
++{
++	int ret = -EIO;
++
++	asm volatile(
++		"0:	lura	0,%1\n"
++		"1:	xgr	%0,%0\n"
++		"2:\n"
++		EX_TABLE(0b,2b)
++		EX_TABLE(1b,2b)
++		: "+d" (ret)
++		: "a" (addr)
++		: "0", "cc");
++	return ret;
++}
++/*
+  * Init function for virtio
+  * devices are in a single page above top of "normal" mem
+  */
+@@ -428,21 +448,23 @@ static int __init kvm_devices_init(void)
+ 	if (!MACHINE_IS_KVM)
+ 		return -ENODEV;
+ 
++	if (test_devices_support(real_memory_size) < 0)
++		return -ENODEV;
++
++	rc = vmem_add_mapping(real_memory_size, PAGE_SIZE);
++	if (rc)
++		return rc;
++
++	kvm_devices = (void *) real_memory_size;
++
+ 	kvm_root = root_device_register("kvm_s390");
+ 	if (IS_ERR(kvm_root)) {
+ 		rc = PTR_ERR(kvm_root);
+ 		printk(KERN_ERR "Could not register kvm_s390 root device");
++		vmem_remove_mapping(real_memory_size, PAGE_SIZE);
+ 		return rc;
+ 	}
+ 
+-	rc = vmem_add_mapping(real_memory_size, PAGE_SIZE);
+-	if (rc) {
+-		root_device_unregister(kvm_root);
+-		return rc;
+-	}
+-
+-	kvm_devices = (void *) real_memory_size;
+-
+ 	INIT_WORK(&hotplug_work, hotplug_devices);
+ 
+ 	service_subclass_irq_register();
+diff --git a/drivers/staging/comedi/comedi_fops.c b/drivers/staging/comedi/comedi_fops.c
+index b67c107..cf67ce5 100644
+--- a/drivers/staging/comedi/comedi_fops.c
++++ b/drivers/staging/comedi/comedi_fops.c
+@@ -136,6 +136,11 @@ static long comedi_unlocked_ioctl(struct file *file, unsigned int cmd,
+ 	/* Device config is special, because it must work on
+ 	 * an unconfigured device. */
+ 	if (cmd == COMEDI_DEVCONFIG) {
++		if (minor >= COMEDI_NUM_BOARD_MINORS) {
++			/* Device config not appropriate on non-board minors. */
++			rc = -ENOTTY;
++			goto done;
++		}
+ 		rc = do_devconfig_ioctl(dev,
+ 					(struct comedi_devconfig __user *)arg);
+ 		if (rc == 0)
+diff --git a/drivers/staging/vt6656/usbpipe.c b/drivers/staging/vt6656/usbpipe.c
+index c612ab5..f759352 100644
+--- a/drivers/staging/vt6656/usbpipe.c
++++ b/drivers/staging/vt6656/usbpipe.c
+@@ -168,6 +168,11 @@ int PIPEnsControlOut(
+     if (pDevice->Flags & fMP_CONTROL_WRITES)
+         return STATUS_FAILURE;
+ 
++	if (pDevice->Flags & fMP_CONTROL_READS)
++		return STATUS_FAILURE;
++
++	MP_SET_FLAG(pDevice, fMP_CONTROL_WRITES);
++
+ 	pDevice->sUsbCtlRequest.bRequestType = 0x40;
+ 	pDevice->sUsbCtlRequest.bRequest = byRequest;
+ 	pDevice->sUsbCtlRequest.wValue = cpu_to_le16p(&wValue);
+@@ -182,12 +187,13 @@ int PIPEnsControlOut(
+ 
+ 	ntStatus = usb_submit_urb(pDevice->pControlURB, GFP_ATOMIC);
+ 	if (ntStatus != 0) {
+-		DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"control send request submission failed: %d\n", ntStatus);
++		DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO
++			"control send request submission failed: %d\n",
++				ntStatus);
++		MP_CLEAR_FLAG(pDevice, fMP_CONTROL_WRITES);
+ 		return STATUS_FAILURE;
+ 	}
+-	else {
+-	    MP_SET_FLAG(pDevice, fMP_CONTROL_WRITES);
+-	}
++
+ 	spin_unlock_irq(&pDevice->lock);
+     for (ii = 0; ii <= USB_CTL_WAIT; ii ++) {
+ 
+@@ -227,6 +233,11 @@ int PIPEnsControlIn(
+     if (pDevice->Flags & fMP_CONTROL_READS)
+ 	return STATUS_FAILURE;
+ 
++	if (pDevice->Flags & fMP_CONTROL_WRITES)
++		return STATUS_FAILURE;
++
++	MP_SET_FLAG(pDevice, fMP_CONTROL_READS);
++
+ 	pDevice->sUsbCtlRequest.bRequestType = 0xC0;
+ 	pDevice->sUsbCtlRequest.bRequest = byRequest;
+ 	pDevice->sUsbCtlRequest.wValue = cpu_to_le16p(&wValue);
+@@ -240,10 +251,11 @@ int PIPEnsControlIn(
+ 
+ 	ntStatus = usb_submit_urb(pDevice->pControlURB, GFP_ATOMIC);
+ 	if (ntStatus != 0) {
+-		DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"control request submission failed: %d\n", ntStatus);
+-	}else {
+-		MP_SET_FLAG(pDevice, fMP_CONTROL_READS);
+-    }
++		DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO
++			"control request submission failed: %d\n", ntStatus);
++		MP_CLEAR_FLAG(pDevice, fMP_CONTROL_READS);
++		return STATUS_FAILURE;
++	}
+ 
+ 	spin_unlock_irq(&pDevice->lock);
+     for (ii = 0; ii <= USB_CTL_WAIT; ii ++) {
+diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
+index 26c62f0..4ecf9d6 100644
+--- a/drivers/target/target_core_device.c
++++ b/drivers/target/target_core_device.c
+@@ -1230,6 +1230,8 @@ int se_dev_set_max_sectors(struct se_device *dev, u32 max_sectors)
+ 
+ int se_dev_set_fabric_max_sectors(struct se_device *dev, u32 fabric_max_sectors)
+ {
++	int block_size = dev->se_sub_dev->se_dev_attrib.block_size;
++
+ 	if (atomic_read(&dev->dev_export_obj.obj_access_count)) {
+ 		pr_err("dev[%p]: Unable to change SE Device"
+ 			" fabric_max_sectors while dev_export_obj: %d count exists\n",
+@@ -1267,8 +1269,12 @@ int se_dev_set_fabric_max_sectors(struct se_device *dev, u32 fabric_max_sectors)
+ 	/*
+ 	 * Align max_sectors down to PAGE_SIZE to follow transport_allocate_data_tasks()
+ 	 */
++	if (!block_size) {
++		block_size = 512;
++		pr_warn("Defaulting to 512 for zero block_size\n");
++	}
+ 	fabric_max_sectors = se_dev_align_max_sectors(fabric_max_sectors,
+-						      dev->se_sub_dev->se_dev_attrib.block_size);
++						      block_size);
+ 
+ 	dev->se_sub_dev->se_dev_attrib.fabric_max_sectors = fabric_max_sectors;
+ 	pr_debug("dev[%p]: SE Device max_sectors changed to %u\n",
+diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c
+index 90dff82..4a418e4 100644
+--- a/drivers/tty/n_gsm.c
++++ b/drivers/tty/n_gsm.c
+@@ -1692,6 +1692,8 @@ static inline void dlci_put(struct gsm_dlci *dlci)
+ 	kref_put(&dlci->ref, gsm_dlci_free);
+ }
+ 
++static void gsm_destroy_network(struct gsm_dlci *dlci);
++
+ /**
+  *	gsm_dlci_release		-	release DLCI
+  *	@dlci: DLCI to destroy
+@@ -1705,9 +1707,19 @@ static void gsm_dlci_release(struct gsm_dlci *dlci)
+ {
+ 	struct tty_struct *tty = tty_port_tty_get(&dlci->port);
+ 	if (tty) {
++		mutex_lock(&dlci->mutex);
++		gsm_destroy_network(dlci);
++		mutex_unlock(&dlci->mutex);
++
++		/* tty_vhangup needs the tty_lock, so unlock and
++		   relock after doing the hangup. */
++		tty_unlock();
+ 		tty_vhangup(tty);
++		tty_lock();
++		tty_port_tty_set(&dlci->port, NULL);
+ 		tty_kref_put(tty);
+ 	}
++	dlci->state = DLCI_CLOSED;
+ 	dlci_put(dlci);
+ }
+ 
+@@ -2933,6 +2945,8 @@ static void gsmtty_close(struct tty_struct *tty, struct file *filp)
+ 
+ 	if (dlci == NULL)
+ 		return;
++	if (dlci->state == DLCI_CLOSED)
++		return;
+ 	mutex_lock(&dlci->mutex);
+ 	gsm_destroy_network(dlci);
+ 	mutex_unlock(&dlci->mutex);
+@@ -2951,6 +2965,8 @@ out:
+ static void gsmtty_hangup(struct tty_struct *tty)
+ {
+ 	struct gsm_dlci *dlci = tty->driver_data;
++	if (dlci->state == DLCI_CLOSED)
++		return;
+ 	tty_port_hangup(&dlci->port);
+ 	gsm_dlci_begin_close(dlci);
+ }
+@@ -2958,9 +2974,12 @@ static void gsmtty_hangup(struct tty_struct *tty)
+ static int gsmtty_write(struct tty_struct *tty, const unsigned char *buf,
+ 								    int len)
+ {
++	int sent;
+ 	struct gsm_dlci *dlci = tty->driver_data;
++	if (dlci->state == DLCI_CLOSED)
++		return -EINVAL;
+ 	/* Stuff the bytes into the fifo queue */
+-	int sent = kfifo_in_locked(dlci->fifo, buf, len, &dlci->lock);
++	sent = kfifo_in_locked(dlci->fifo, buf, len, &dlci->lock);
+ 	/* Need to kick the channel */
+ 	gsm_dlci_data_kick(dlci);
+ 	return sent;
+@@ -2969,18 +2988,24 @@ static int gsmtty_write(struct tty_struct *tty, const unsigned char *buf,
+ static int gsmtty_write_room(struct tty_struct *tty)
+ {
+ 	struct gsm_dlci *dlci = tty->driver_data;
++	if (dlci->state == DLCI_CLOSED)
++		return -EINVAL;
+ 	return TX_SIZE - kfifo_len(dlci->fifo);
+ }
+ 
+ static int gsmtty_chars_in_buffer(struct tty_struct *tty)
+ {
+ 	struct gsm_dlci *dlci = tty->driver_data;
++	if (dlci->state == DLCI_CLOSED)
++		return -EINVAL;
+ 	return kfifo_len(dlci->fifo);
+ }
+ 
+ static void gsmtty_flush_buffer(struct tty_struct *tty)
+ {
+ 	struct gsm_dlci *dlci = tty->driver_data;
++	if (dlci->state == DLCI_CLOSED)
++		return;
+ 	/* Caution needed: If we implement reliable transport classes
+ 	   then the data being transmitted can't simply be junked once
+ 	   it has first hit the stack. Until then we can just blow it
+@@ -2999,6 +3024,8 @@ static void gsmtty_wait_until_sent(struct tty_struct *tty, int timeout)
+ static int gsmtty_tiocmget(struct tty_struct *tty)
+ {
+ 	struct gsm_dlci *dlci = tty->driver_data;
++	if (dlci->state == DLCI_CLOSED)
++		return -EINVAL;
+ 	return dlci->modem_rx;
+ }
+ 
+@@ -3008,6 +3035,8 @@ static int gsmtty_tiocmset(struct tty_struct *tty,
+ 	struct gsm_dlci *dlci = tty->driver_data;
+ 	unsigned int modem_tx = dlci->modem_tx;
+ 
++	if (dlci->state == DLCI_CLOSED)
++		return -EINVAL;
+ 	modem_tx &= ~clear;
+ 	modem_tx |= set;
+ 
+@@ -3026,6 +3055,8 @@ static int gsmtty_ioctl(struct tty_struct *tty,
+ 	struct gsm_netconfig nc;
+ 	int index;
+ 
++	if (dlci->state == DLCI_CLOSED)
++		return -EINVAL;
+ 	switch (cmd) {
+ 	case GSMIOC_ENABLE_NET:
+ 		if (copy_from_user(&nc, (void __user *)arg, sizeof(nc)))
+@@ -3052,6 +3083,9 @@ static int gsmtty_ioctl(struct tty_struct *tty,
+ 
+ static void gsmtty_set_termios(struct tty_struct *tty, struct ktermios *old)
+ {
++	struct gsm_dlci *dlci = tty->driver_data;
++	if (dlci->state == DLCI_CLOSED)
++		return;
+ 	/* For the moment its fixed. In actual fact the speed information
+ 	   for the virtual channel can be propogated in both directions by
+ 	   the RPN control message. This however rapidly gets nasty as we
+@@ -3063,6 +3097,8 @@ static void gsmtty_set_termios(struct tty_struct *tty, struct ktermios *old)
+ static void gsmtty_throttle(struct tty_struct *tty)
+ {
+ 	struct gsm_dlci *dlci = tty->driver_data;
++	if (dlci->state == DLCI_CLOSED)
++		return;
+ 	if (tty->termios->c_cflag & CRTSCTS)
+ 		dlci->modem_tx &= ~TIOCM_DTR;
+ 	dlci->throttled = 1;
+@@ -3073,6 +3109,8 @@ static void gsmtty_throttle(struct tty_struct *tty)
+ static void gsmtty_unthrottle(struct tty_struct *tty)
+ {
+ 	struct gsm_dlci *dlci = tty->driver_data;
++	if (dlci->state == DLCI_CLOSED)
++		return;
+ 	if (tty->termios->c_cflag & CRTSCTS)
+ 		dlci->modem_tx |= TIOCM_DTR;
+ 	dlci->throttled = 0;
+@@ -3084,6 +3122,8 @@ static int gsmtty_break_ctl(struct tty_struct *tty, int state)
+ {
+ 	struct gsm_dlci *dlci = tty->driver_data;
+ 	int encode = 0;	/* Off */
++	if (dlci->state == DLCI_CLOSED)
++		return -EINVAL;
+ 
+ 	if (state == -1)	/* "On indefinitely" - we can't encode this
+ 				    properly */
+diff --git a/drivers/tty/tty_ioctl.c b/drivers/tty/tty_ioctl.c
+index a1b9a2f..f8d03da 100644
+--- a/drivers/tty/tty_ioctl.c
++++ b/drivers/tty/tty_ioctl.c
+@@ -617,7 +617,7 @@ static int set_termios(struct tty_struct *tty, void __user *arg, int opt)
+ 	if (opt & TERMIOS_WAIT) {
+ 		tty_wait_until_sent(tty, 0);
+ 		if (signal_pending(current))
+-			return -EINTR;
++			return -ERESTARTSYS;
+ 	}
+ 
+ 	tty_set_termios(tty, &tmp_termios);
+@@ -684,7 +684,7 @@ static int set_termiox(struct tty_struct *tty, void __user *arg, int opt)
+ 	if (opt & TERMIOS_WAIT) {
+ 		tty_wait_until_sent(tty, 0);
+ 		if (signal_pending(current))
+-			return -EINTR;
++			return -ERESTARTSYS;
+ 	}
+ 
+ 	mutex_lock(&tty->termios_mutex);
+diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c
+index 18d06be..268294c 100644
+--- a/drivers/tty/vt/vt.c
++++ b/drivers/tty/vt/vt.c
+@@ -656,7 +656,7 @@ static inline void save_screen(struct vc_data *vc)
+  *	Redrawing of screen
+  */
+ 
+-static void clear_buffer_attributes(struct vc_data *vc)
++void clear_buffer_attributes(struct vc_data *vc)
+ {
+ 	unsigned short *p = (unsigned short *)vc->vc_origin;
+ 	int count = vc->vc_screenbuf_size / 2;
+@@ -3017,7 +3017,7 @@ int __init vty_init(const struct file_operations *console_fops)
+ 
+ static struct class *vtconsole_class;
+ 
+-static int bind_con_driver(const struct consw *csw, int first, int last,
++static int do_bind_con_driver(const struct consw *csw, int first, int last,
+ 			   int deflt)
+ {
+ 	struct module *owner = csw->owner;
+@@ -3028,7 +3028,7 @@ static int bind_con_driver(const struct consw *csw, int first, int last,
+ 	if (!try_module_get(owner))
+ 		return -ENODEV;
+ 
+-	console_lock();
++	WARN_CONSOLE_UNLOCKED();
+ 
+ 	/* check if driver is registered */
+ 	for (i = 0; i < MAX_NR_CON_DRIVER; i++) {
+@@ -3113,11 +3113,22 @@ static int bind_con_driver(const struct consw *csw, int first, int last,
+ 
+ 	retval = 0;
+ err:
+-	console_unlock();
+ 	module_put(owner);
+ 	return retval;
+ };
+ 
++
++static int bind_con_driver(const struct consw *csw, int first, int last,
++			   int deflt)
++{
++	int ret;
++
++	console_lock();
++	ret = do_bind_con_driver(csw, first, last, deflt);
++	console_unlock();
++	return ret;
++}
++
+ #ifdef CONFIG_VT_HW_CONSOLE_BINDING
+ static int con_is_graphics(const struct consw *csw, int first, int last)
+ {
+@@ -3154,6 +3165,18 @@ static int con_is_graphics(const struct consw *csw, int first, int last)
+  */
+ int unbind_con_driver(const struct consw *csw, int first, int last, int deflt)
+ {
++	int retval;
++
++	console_lock();
++	retval = do_unbind_con_driver(csw, first, last, deflt);
++	console_unlock();
++	return retval;
++}
++EXPORT_SYMBOL(unbind_con_driver);
++
++/* unlocked version of unbind_con_driver() */
++int do_unbind_con_driver(const struct consw *csw, int first, int last, int deflt)
++{
+ 	struct module *owner = csw->owner;
+ 	const struct consw *defcsw = NULL;
+ 	struct con_driver *con_driver = NULL, *con_back = NULL;
+@@ -3162,7 +3185,7 @@ int unbind_con_driver(const struct consw *csw, int first, int last, int deflt)
+ 	if (!try_module_get(owner))
+ 		return -ENODEV;
+ 
+-	console_lock();
++	WARN_CONSOLE_UNLOCKED();
+ 
+ 	/* check if driver is registered and if it is unbindable */
+ 	for (i = 0; i < MAX_NR_CON_DRIVER; i++) {
+@@ -3175,10 +3198,8 @@ int unbind_con_driver(const struct consw *csw, int first, int last, int deflt)
+ 		}
+ 	}
+ 
+-	if (retval) {
+-		console_unlock();
++	if (retval)
+ 		goto err;
+-	}
+ 
+ 	retval = -ENODEV;
+ 
+@@ -3194,15 +3215,11 @@ int unbind_con_driver(const struct consw *csw, int first, int last, int deflt)
+ 		}
+ 	}
+ 
+-	if (retval) {
+-		console_unlock();
++	if (retval)
+ 		goto err;
+-	}
+ 
+-	if (!con_is_bound(csw)) {
+-		console_unlock();
++	if (!con_is_bound(csw))
+ 		goto err;
+-	}
+ 
+ 	first = max(first, con_driver->first);
+ 	last = min(last, con_driver->last);
+@@ -3229,15 +3246,14 @@ int unbind_con_driver(const struct consw *csw, int first, int last, int deflt)
+ 	if (!con_is_bound(csw))
+ 		con_driver->flag &= ~CON_DRIVER_FLAG_INIT;
+ 
+-	console_unlock();
+ 	/* ignore return value, binding should not fail */
+-	bind_con_driver(defcsw, first, last, deflt);
++	do_bind_con_driver(defcsw, first, last, deflt);
+ err:
+ 	module_put(owner);
+ 	return retval;
+ 
+ }
+-EXPORT_SYMBOL(unbind_con_driver);
++EXPORT_SYMBOL_GPL(do_unbind_con_driver);
+ 
+ static int vt_bind(struct con_driver *con)
+ {
+@@ -3522,28 +3538,18 @@ int con_debug_leave(void)
+ }
+ EXPORT_SYMBOL_GPL(con_debug_leave);
+ 
+-/**
+- * register_con_driver - register console driver to console layer
+- * @csw: console driver
+- * @first: the first console to take over, minimum value is 0
+- * @last: the last console to take over, maximum value is MAX_NR_CONSOLES -1
+- *
+- * DESCRIPTION: This function registers a console driver which can later
+- * bind to a range of consoles specified by @first and @last. It will
+- * also initialize the console driver by calling con_startup().
+- */
+-int register_con_driver(const struct consw *csw, int first, int last)
++static int do_register_con_driver(const struct consw *csw, int first, int last)
+ {
+ 	struct module *owner = csw->owner;
+ 	struct con_driver *con_driver;
+ 	const char *desc;
+ 	int i, retval = 0;
+ 
++	WARN_CONSOLE_UNLOCKED();
++
+ 	if (!try_module_get(owner))
+ 		return -ENODEV;
+ 
+-	console_lock();
+-
+ 	for (i = 0; i < MAX_NR_CON_DRIVER; i++) {
+ 		con_driver = &registered_con_driver[i];
+ 
+@@ -3596,10 +3602,29 @@ int register_con_driver(const struct consw *csw, int first, int last)
+ 	}
+ 
+ err:
+-	console_unlock();
+ 	module_put(owner);
+ 	return retval;
+ }
++
++/**
++ * register_con_driver - register console driver to console layer
++ * @csw: console driver
++ * @first: the first console to take over, minimum value is 0
++ * @last: the last console to take over, maximum value is MAX_NR_CONSOLES -1
++ *
++ * DESCRIPTION: This function registers a console driver which can later
++ * bind to a range of consoles specified by @first and @last. It will
++ * also initialize the console driver by calling con_startup().
++ */
++int register_con_driver(const struct consw *csw, int first, int last)
++{
++	int retval;
++
++	console_lock();
++	retval = do_register_con_driver(csw, first, last);
++	console_unlock();
++	return retval;
++}
+ EXPORT_SYMBOL(register_con_driver);
+ 
+ /**
+@@ -3615,9 +3640,18 @@ EXPORT_SYMBOL(register_con_driver);
+  */
+ int unregister_con_driver(const struct consw *csw)
+ {
+-	int i, retval = -ENODEV;
++	int retval;
+ 
+ 	console_lock();
++	retval = do_unregister_con_driver(csw);
++	console_unlock();
++	return retval;
++}
++EXPORT_SYMBOL(unregister_con_driver);
++
++int do_unregister_con_driver(const struct consw *csw)
++{
++	int i, retval = -ENODEV;
+ 
+ 	/* cannot unregister a bound driver */
+ 	if (con_is_bound(csw))
+@@ -3643,27 +3677,53 @@ int unregister_con_driver(const struct consw *csw)
+ 		}
+ 	}
+ err:
+-	console_unlock();
+ 	return retval;
+ }
+-EXPORT_SYMBOL(unregister_con_driver);
++EXPORT_SYMBOL_GPL(do_unregister_con_driver);
+ 
+ /*
+  *	If we support more console drivers, this function is used
+  *	when a driver wants to take over some existing consoles
+  *	and become default driver for newly opened ones.
+  *
+- *      take_over_console is basically a register followed by unbind
++ *	take_over_console is basically a register followed by unbind
++ */
++int do_take_over_console(const struct consw *csw, int first, int last, int deflt)
++{
++	int err;
++
++	err = do_register_con_driver(csw, first, last);
++	/*
++	 * If we get an busy error we still want to bind the console driver
++	 * and return success, as we may have unbound the console driver
++	 * but not unregistered it.
++	 */
++	if (err == -EBUSY)
++		err = 0;
++	if (!err)
++		do_bind_con_driver(csw, first, last, deflt);
++
++	return err;
++}
++EXPORT_SYMBOL_GPL(do_take_over_console);
++
++/*
++ *	If we support more console drivers, this function is used
++ *	when a driver wants to take over some existing consoles
++ *	and become default driver for newly opened ones.
++ *
++ *	take_over_console is basically a register followed by unbind
+  */
+ int take_over_console(const struct consw *csw, int first, int last, int deflt)
+ {
+ 	int err;
+ 
+ 	err = register_con_driver(csw, first, last);
+-	/* if we get an busy error we still want to bind the console driver
++	/*
++	 * If we get an busy error we still want to bind the console driver
+ 	 * and return success, as we may have unbound the console driver
+-	 * but not unregistered it.
+-	*/
++	 * but not unregistered it.
++	 */
+ 	if (err == -EBUSY)
+ 		err = 0;
+ 	if (!err)
+diff --git a/drivers/usb/host/ehci-omap.c b/drivers/usb/host/ehci-omap.c
+index e669c6a..12d3f28 100644
+--- a/drivers/usb/host/ehci-omap.c
++++ b/drivers/usb/host/ehci-omap.c
+@@ -371,7 +371,7 @@ static const struct hc_driver ehci_omap_hc_driver = {
+ 	.clear_tt_buffer_complete = ehci_clear_tt_buffer_complete,
+ };
+ 
+-MODULE_ALIAS("platform:omap-ehci");
++MODULE_ALIAS("platform:ehci-omap");
+ MODULE_AUTHOR("Texas Instruments, Inc.");
+ MODULE_AUTHOR("Felipe Balbi <felipe.balbi@nokia.com>");
+ 
+diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
+index 7746944..87ef150 100644
+--- a/drivers/usb/serial/ftdi_sio.c
++++ b/drivers/usb/serial/ftdi_sio.c
+@@ -1919,24 +1919,22 @@ static void ftdi_dtr_rts(struct usb_serial_port *port, int on)
+ {
+ 	struct ftdi_private *priv = usb_get_serial_port_data(port);
+ 
+-	mutex_lock(&port->serial->disc_mutex);
+-	if (!port->serial->disconnected) {
+-		/* Disable flow control */
+-		if (!on && usb_control_msg(port->serial->dev,
++	/* Disable flow control */
++	if (!on) {
++		if (usb_control_msg(port->serial->dev,
+ 			    usb_sndctrlpipe(port->serial->dev, 0),
+ 			    FTDI_SIO_SET_FLOW_CTRL_REQUEST,
+ 			    FTDI_SIO_SET_FLOW_CTRL_REQUEST_TYPE,
+ 			    0, priv->interface, NULL, 0,
+ 			    WDR_TIMEOUT) < 0) {
+-			    dev_err(&port->dev, "error from flowcontrol urb\n");
++			dev_err(&port->dev, "error from flowcontrol urb\n");
+ 		}
+-		/* drop RTS and DTR */
+-		if (on)
+-			set_mctrl(port, TIOCM_DTR | TIOCM_RTS);
+-		else
+-			clear_mctrl(port, TIOCM_DTR | TIOCM_RTS);
+ 	}
+-	mutex_unlock(&port->serial->disc_mutex);
++	/* drop RTS and DTR */
++	if (on)
++		set_mctrl(port, TIOCM_DTR | TIOCM_RTS);
++	else
++		clear_mctrl(port, TIOCM_DTR | TIOCM_RTS);
+ }
+ 
+ /*
+diff --git a/drivers/usb/serial/mct_u232.c b/drivers/usb/serial/mct_u232.c
+index d0bf56d..933dd07 100644
+--- a/drivers/usb/serial/mct_u232.c
++++ b/drivers/usb/serial/mct_u232.c
+@@ -514,19 +514,15 @@ static void mct_u232_dtr_rts(struct usb_serial_port *port, int on)
+ 	unsigned int control_state;
+ 	struct mct_u232_private *priv = usb_get_serial_port_data(port);
+ 
+-	mutex_lock(&port->serial->disc_mutex);
+-	if (!port->serial->disconnected) {
+-		/* drop DTR and RTS */
+-		spin_lock_irq(&priv->lock);
+-		if (on)
+-			priv->control_state |= TIOCM_DTR | TIOCM_RTS;
+-		else
+-			priv->control_state &= ~(TIOCM_DTR | TIOCM_RTS);
+-		control_state = priv->control_state;
+-		spin_unlock_irq(&priv->lock);
+-		mct_u232_set_modem_ctrl(port->serial, control_state);
+-	}
+-	mutex_unlock(&port->serial->disc_mutex);
++	spin_lock_irq(&priv->lock);
++	if (on)
++		priv->control_state |= TIOCM_DTR | TIOCM_RTS;
++	else
++		priv->control_state &= ~(TIOCM_DTR | TIOCM_RTS);
++	control_state = priv->control_state;
++	spin_unlock_irq(&priv->lock);
++
++	mct_u232_set_modem_ctrl(port->serial, control_state);
+ }
+ 
+ static void mct_u232_close(struct usb_serial_port *port)
+diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
+index 6c077a1..539247b 100644
+--- a/drivers/usb/serial/option.c
++++ b/drivers/usb/serial/option.c
+@@ -479,6 +479,7 @@ static const struct option_blacklist_info four_g_w14_blacklist = {
+ 
+ static const struct option_blacklist_info alcatel_x200_blacklist = {
+ 	.sendsetup = BIT(0) | BIT(1),
++	.reserved = BIT(4),
+ };
+ 
+ static const struct option_blacklist_info zte_0037_blacklist = {
+@@ -575,8 +576,14 @@ static const struct usb_device_id option_ids[] = {
+ 	{ USB_DEVICE(QUANTA_VENDOR_ID, QUANTA_PRODUCT_GLX) },
+ 	{ USB_DEVICE(QUANTA_VENDOR_ID, QUANTA_PRODUCT_GKE) },
+ 	{ USB_DEVICE(QUANTA_VENDOR_ID, QUANTA_PRODUCT_GLE) },
++	{ USB_DEVICE(QUANTA_VENDOR_ID, 0xea42),
++		.driver_info = (kernel_ulong_t)&net_intf4_blacklist },
++	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0x1c05, USB_CLASS_COMM, 0x02, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0x1c23, USB_CLASS_COMM, 0x02, 0xff) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E173, 0xff, 0xff, 0xff),
+ 		.driver_info = (kernel_ulong_t) &net_intf1_blacklist },
++	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0x1441, USB_CLASS_COMM, 0x02, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0x1442, USB_CLASS_COMM, 0x02, 0xff) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4505, 0xff, 0xff, 0xff),
+ 		.driver_info = (kernel_ulong_t) &huawei_cdc12_blacklist },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3765, 0xff, 0xff, 0xff),
+@@ -1215,7 +1222,14 @@ static const struct usb_device_id option_ids[] = {
+ 	{ USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_X060S_X200),
+ 	  .driver_info = (kernel_ulong_t)&alcatel_x200_blacklist
+ 	},
+-	{ USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_X220_X500D) },
++	{ USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_X220_X500D),
++	  .driver_info = (kernel_ulong_t)&net_intf6_blacklist },
++	{ USB_DEVICE(ALCATEL_VENDOR_ID, 0x0052),
++	  .driver_info = (kernel_ulong_t)&net_intf6_blacklist },
++	{ USB_DEVICE(ALCATEL_VENDOR_ID, 0x00b6),
++	  .driver_info = (kernel_ulong_t)&net_intf3_blacklist },
++	{ USB_DEVICE(ALCATEL_VENDOR_ID, 0x00b7),
++	  .driver_info = (kernel_ulong_t)&net_intf5_blacklist },
+ 	{ USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_L100V),
+ 	  .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ 	{ USB_DEVICE(AIRPLUS_VENDOR_ID, AIRPLUS_PRODUCT_MCD650) },
+diff --git a/drivers/usb/serial/sierra.c b/drivers/usb/serial/sierra.c
+index b622d69..8ec15c2 100644
+--- a/drivers/usb/serial/sierra.c
++++ b/drivers/usb/serial/sierra.c
+@@ -890,19 +890,13 @@ static int sierra_open(struct tty_struct *tty, struct usb_serial_port *port)
+ 
+ static void sierra_dtr_rts(struct usb_serial_port *port, int on)
+ {
+-	struct usb_serial *serial = port->serial;
+ 	struct sierra_port_private *portdata;
+ 
+ 	portdata = usb_get_serial_port_data(port);
+ 	portdata->rts_state = on;
+ 	portdata->dtr_state = on;
+ 
+-	if (serial->dev) {
+-		mutex_lock(&serial->disc_mutex);
+-		if (!serial->disconnected)
+-			sierra_send_setup(port);
+-		mutex_unlock(&serial->disc_mutex);
+-	}
++	sierra_send_setup(port);
+ }
+ 
+ static int sierra_startup(struct usb_serial *serial)
+diff --git a/drivers/usb/serial/ssu100.c b/drivers/usb/serial/ssu100.c
+index 3cdc8a5..b8db69d 100644
+--- a/drivers/usb/serial/ssu100.c
++++ b/drivers/usb/serial/ssu100.c
+@@ -532,19 +532,16 @@ static void ssu100_dtr_rts(struct usb_serial_port *port, int on)
+ 
+ 	dbg("%s\n", __func__);
+ 
+-	mutex_lock(&port->serial->disc_mutex);
+-	if (!port->serial->disconnected) {
+-		/* Disable flow control */
+-		if (!on &&
+-		    ssu100_setregister(dev, 0, UART_MCR, 0) < 0)
++	/* Disable flow control */
++	if (!on) {
++		if (ssu100_setregister(dev, 0, UART_MCR, 0) < 0)
+ 			dev_err(&port->dev, "error from flowcontrol urb\n");
+-		/* drop RTS and DTR */
+-		if (on)
+-			set_mctrl(dev, TIOCM_DTR | TIOCM_RTS);
+-		else
+-			clear_mctrl(dev, TIOCM_DTR | TIOCM_RTS);
+ 	}
+-	mutex_unlock(&port->serial->disc_mutex);
++	/* drop RTS and DTR */
++	if (on)
++		set_mctrl(dev, TIOCM_DTR | TIOCM_RTS);
++	else
++		clear_mctrl(dev, TIOCM_DTR | TIOCM_RTS);
+ }
+ 
+ static void ssu100_update_msr(struct usb_serial_port *port, u8 msr)
+diff --git a/drivers/usb/serial/usb-serial.c b/drivers/usb/serial/usb-serial.c
+index c627ba2..e4b199c 100644
+--- a/drivers/usb/serial/usb-serial.c
++++ b/drivers/usb/serial/usb-serial.c
+@@ -699,10 +699,20 @@ static int serial_carrier_raised(struct tty_port *port)
+ static void serial_dtr_rts(struct tty_port *port, int on)
+ {
+ 	struct usb_serial_port *p = container_of(port, struct usb_serial_port, port);
+-	struct usb_serial_driver *drv = p->serial->type;
++	struct usb_serial *serial = p->serial;
++	struct usb_serial_driver *drv = serial->type;
+ 
+-	if (drv->dtr_rts)
++	if (!drv->dtr_rts)
++		return;
++	/*
++	 * Work-around bug in the tty-layer which can result in dtr_rts
++	 * being called after a disconnect (and tty_unregister_device
++	 * has returned). Remove once bug has been squashed.
++	 */
++	mutex_lock(&serial->disc_mutex);
++	if (!serial->disconnected)
+ 		drv->dtr_rts(p, on);
++	mutex_unlock(&serial->disc_mutex);
+ }
+ 
+ static const struct tty_port_operations serial_port_ops = {
+diff --git a/drivers/usb/serial/usb_wwan.c b/drivers/usb/serial/usb_wwan.c
+index c88657d..820436e 100644
+--- a/drivers/usb/serial/usb_wwan.c
++++ b/drivers/usb/serial/usb_wwan.c
+@@ -41,7 +41,6 @@ static bool debug;
+ 
+ void usb_wwan_dtr_rts(struct usb_serial_port *port, int on)
+ {
+-	struct usb_serial *serial = port->serial;
+ 	struct usb_wwan_port_private *portdata;
+ 
+ 	struct usb_wwan_intf_private *intfdata;
+@@ -54,12 +53,11 @@ void usb_wwan_dtr_rts(struct usb_serial_port *port, int on)
+ 		return;
+ 
+ 	portdata = usb_get_serial_port_data(port);
+-	mutex_lock(&serial->disc_mutex);
++	/* FIXME: locking */
+ 	portdata->rts_state = on;
+ 	portdata->dtr_state = on;
+-	if (serial->dev)
+-		intfdata->send_setup(port);
+-	mutex_unlock(&serial->disc_mutex);
++
++	intfdata->send_setup(port);
+ }
+ EXPORT_SYMBOL(usb_wwan_dtr_rts);
+ 
+diff --git a/drivers/usb/storage/initializers.c b/drivers/usb/storage/initializers.c
+index 16b0bf0..7ab9046 100644
+--- a/drivers/usb/storage/initializers.c
++++ b/drivers/usb/storage/initializers.c
+@@ -147,7 +147,7 @@ static int usb_stor_huawei_dongles_pid(struct us_data *us)
+ 	int idProduct;
+ 
+ 	idesc = &us->pusb_intf->cur_altsetting->desc;
+-	idProduct = us->pusb_dev->descriptor.idProduct;
++	idProduct = le16_to_cpu(us->pusb_dev->descriptor.idProduct);
+ 	/* The first port is CDROM,
+ 	 * means the dongle in the single port mode,
+ 	 * and a switch command is required to be sent. */
+@@ -169,7 +169,7 @@ int usb_stor_huawei_init(struct us_data *us)
+ 	int result = 0;
+ 
+ 	if (usb_stor_huawei_dongles_pid(us)) {
+-		if (us->pusb_dev->descriptor.idProduct >= 0x1446)
++		if (le16_to_cpu(us->pusb_dev->descriptor.idProduct) >= 0x1446)
+ 			result = usb_stor_huawei_scsi_init(us);
+ 		else
+ 			result = usb_stor_huawei_feature_init(us);
+diff --git a/drivers/usb/storage/unusual_cypress.h b/drivers/usb/storage/unusual_cypress.h
+index 2c85530..65a6a75 100644
+--- a/drivers/usb/storage/unusual_cypress.h
++++ b/drivers/usb/storage/unusual_cypress.h
+@@ -31,7 +31,7 @@ UNUSUAL_DEV(  0x04b4, 0x6831, 0x0000, 0x9999,
+ 		"Cypress ISD-300LP",
+ 		USB_SC_CYP_ATACB, USB_PR_DEVICE, NULL, 0),
+ 
+-UNUSUAL_DEV( 0x14cd, 0x6116, 0x0000, 0x9999,
++UNUSUAL_DEV( 0x14cd, 0x6116, 0x0000, 0x0219,
+ 		"Super Top",
+ 		"USB 2.0  SATA BRIDGE",
+ 		USB_SC_CYP_ATACB, USB_PR_DEVICE, NULL, 0),
+diff --git a/drivers/video/backlight/adp8860_bl.c b/drivers/video/backlight/adp8860_bl.c
+index 550dbf0..feda482 100644
+--- a/drivers/video/backlight/adp8860_bl.c
++++ b/drivers/video/backlight/adp8860_bl.c
+@@ -791,7 +791,7 @@ static int adp8860_i2c_suspend(struct i2c_client *client, pm_message_t message)
+ 
+ static int adp8860_i2c_resume(struct i2c_client *client)
+ {
+-	adp8860_set_bits(client, ADP8860_MDCR, NSTBY);
++	adp8860_set_bits(client, ADP8860_MDCR, NSTBY | BLEN);
+ 
+ 	return 0;
+ }
+diff --git a/drivers/video/backlight/adp8870_bl.c b/drivers/video/backlight/adp8870_bl.c
+index 9be58c6..c7a2c35 100644
+--- a/drivers/video/backlight/adp8870_bl.c
++++ b/drivers/video/backlight/adp8870_bl.c
+@@ -965,7 +965,7 @@ static int adp8870_i2c_suspend(struct i2c_client *client, pm_message_t message)
+ 
+ static int adp8870_i2c_resume(struct i2c_client *client)
+ {
+-	adp8870_set_bits(client, ADP8870_MDCR, NSTBY);
++	adp8870_set_bits(client, ADP8870_MDCR, NSTBY | BLEN);
+ 
+ 	return 0;
+ }
+diff --git a/drivers/video/console/fbcon.c b/drivers/video/console/fbcon.c
+index 88e9204..5bf163e 100644
+--- a/drivers/video/console/fbcon.c
++++ b/drivers/video/console/fbcon.c
+@@ -529,6 +529,33 @@ static int search_for_mapped_con(void)
+ 	return retval;
+ }
+ 
++static int do_fbcon_takeover(int show_logo)
++{
++	int err, i;
++
++	if (!num_registered_fb)
++		return -ENODEV;
++
++	if (!show_logo)
++		logo_shown = FBCON_LOGO_DONTSHOW;
++
++	for (i = first_fb_vc; i <= last_fb_vc; i++)
++		con2fb_map[i] = info_idx;
++
++	err = do_take_over_console(&fb_con, first_fb_vc, last_fb_vc,
++				fbcon_is_default);
++
++	if (err) {
++		for (i = first_fb_vc; i <= last_fb_vc; i++)
++			con2fb_map[i] = -1;
++		info_idx = -1;
++	} else {
++		fbcon_has_console_bind = 1;
++	}
++
++	return err;
++}
++
+ static int fbcon_takeover(int show_logo)
+ {
+ 	int err, i;
+@@ -990,7 +1017,7 @@ static const char *fbcon_startup(void)
+ 	}
+ 
+ 	/* Setup default font */
+-	if (!p->fontdata) {
++	if (!p->fontdata && !vc->vc_font.data) {
+ 		if (!fontname[0] || !(font = find_font(fontname)))
+ 			font = get_default_font(info->var.xres,
+ 						info->var.yres,
+@@ -1000,6 +1027,8 @@ static const char *fbcon_startup(void)
+ 		vc->vc_font.height = font->height;
+ 		vc->vc_font.data = (void *)(p->fontdata = font->data);
+ 		vc->vc_font.charcount = 256; /* FIXME  Need to support more fonts */
++	} else {
++		p->fontdata = vc->vc_font.data;
+ 	}
+ 
+ 	cols = FBCON_SWAP(ops->rotate, info->var.xres, info->var.yres);
+@@ -1159,9 +1188,9 @@ static void fbcon_init(struct vc_data *vc, int init)
+ 	ops->p = &fb_display[fg_console];
+ }
+ 
+-static void fbcon_free_font(struct display *p)
++static void fbcon_free_font(struct display *p, bool freefont)
+ {
+-	if (p->userfont && p->fontdata && (--REFCOUNT(p->fontdata) == 0))
++	if (freefont && p->userfont && p->fontdata && (--REFCOUNT(p->fontdata) == 0))
+ 		kfree(p->fontdata - FONT_EXTRA_WORDS * sizeof(int));
+ 	p->fontdata = NULL;
+ 	p->userfont = 0;
+@@ -1173,8 +1202,8 @@ static void fbcon_deinit(struct vc_data *vc)
+ 	struct fb_info *info;
+ 	struct fbcon_ops *ops;
+ 	int idx;
++	bool free_font = true;
+ 
+-	fbcon_free_font(p);
+ 	idx = con2fb_map[vc->vc_num];
+ 
+ 	if (idx == -1)
+@@ -1185,6 +1214,8 @@ static void fbcon_deinit(struct vc_data *vc)
+ 	if (!info)
+ 		goto finished;
+ 
++	if (info->flags & FBINFO_MISC_FIRMWARE)
++		free_font = false;
+ 	ops = info->fbcon_par;
+ 
+ 	if (!ops)
+@@ -1196,6 +1227,8 @@ static void fbcon_deinit(struct vc_data *vc)
+ 	ops->flags &= ~FBCON_FLAGS_INIT;
+ finished:
+ 
++	fbcon_free_font(p, free_font);
++
+ 	if (!con_is_bound(&fb_con))
+ 		fbcon_exit();
+ 
+@@ -2977,7 +3010,7 @@ static int fbcon_unbind(void)
+ {
+ 	int ret;
+ 
+-	ret = unbind_con_driver(&fb_con, first_fb_vc, last_fb_vc,
++	ret = do_unbind_con_driver(&fb_con, first_fb_vc, last_fb_vc,
+ 				fbcon_is_default);
+ 
+ 	if (!ret)
+@@ -3050,7 +3083,7 @@ static int fbcon_fb_unregistered(struct fb_info *info)
+ 		primary_device = -1;
+ 
+ 	if (!num_registered_fb)
+-		unregister_con_driver(&fb_con);
++		do_unregister_con_driver(&fb_con);
+ 
+ 	return 0;
+ }
+@@ -3115,7 +3148,7 @@ static int fbcon_fb_registered(struct fb_info *info)
+ 		}
+ 
+ 		if (info_idx != -1)
+-			ret = fbcon_takeover(1);
++			ret = do_fbcon_takeover(1);
+ 	} else {
+ 		for (i = first_fb_vc; i <= last_fb_vc; i++) {
+ 			if (con2fb_map_boot[i] == idx)
+diff --git a/drivers/video/console/vgacon.c b/drivers/video/console/vgacon.c
+index d449a74..5855d17 100644
+--- a/drivers/video/console/vgacon.c
++++ b/drivers/video/console/vgacon.c
+@@ -1064,7 +1064,7 @@ static int vgacon_do_font_op(struct vgastate *state,char *arg,int set,int ch512)
+ 	unsigned short video_port_status = vga_video_port_reg + 6;
+ 	int font_select = 0x00, beg, i;
+ 	char *charmap;
+-	
++	bool clear_attribs = false;
+ 	if (vga_video_type != VIDEO_TYPE_EGAM) {
+ 		charmap = (char *) VGA_MAP_MEM(colourmap, 0);
+ 		beg = 0x0e;
+@@ -1169,12 +1169,6 @@ static int vgacon_do_font_op(struct vgastate *state,char *arg,int set,int ch512)
+ 
+ 	/* if 512 char mode is already enabled don't re-enable it. */
+ 	if ((set) && (ch512 != vga_512_chars)) {
+-		/* attribute controller */
+-		for (i = 0; i < MAX_NR_CONSOLES; i++) {
+-			struct vc_data *c = vc_cons[i].d;
+-			if (c && c->vc_sw == &vga_con)
+-				c->vc_hi_font_mask = ch512 ? 0x0800 : 0;
+-		}
+ 		vga_512_chars = ch512;
+ 		/* 256-char: enable intensity bit
+ 		   512-char: disable intensity bit */
+@@ -1185,8 +1179,22 @@ static int vgacon_do_font_op(struct vgastate *state,char *arg,int set,int ch512)
+ 		   it means, but it works, and it appears necessary */
+ 		inb_p(video_port_status);
+ 		vga_wattr(state->vgabase, VGA_AR_ENABLE_DISPLAY, 0);	
++		clear_attribs = true;
+ 	}
+ 	raw_spin_unlock_irq(&vga_lock);
++
++	if (clear_attribs) {
++		for (i = 0; i < MAX_NR_CONSOLES; i++) {
++			struct vc_data *c = vc_cons[i].d;
++			if (c && c->vc_sw == &vga_con) {
++				/* force hi font mask to 0, so we always clear
++				   the bit on either transition */
++				c->vc_hi_font_mask = 0x00;
++				clear_buffer_attributes(c);
++				c->vc_hi_font_mask = ch512 ? 0x0800 : 0;
++			}
++		}
++	}
+ 	return 0;
+ }
+ 
+diff --git a/drivers/video/fbmem.c b/drivers/video/fbmem.c
+index c6ce416..90f1315 100644
+--- a/drivers/video/fbmem.c
++++ b/drivers/video/fbmem.c
+@@ -1642,7 +1642,9 @@ static int do_register_framebuffer(struct fb_info *fb_info)
+ 	event.info = fb_info;
+ 	if (!lock_fb_info(fb_info))
+ 		return -ENODEV;
++	console_lock();
+ 	fb_notifier_call_chain(FB_EVENT_FB_REGISTERED, &event);
++	console_unlock();
+ 	unlock_fb_info(fb_info);
+ 	return 0;
+ }
+@@ -1658,8 +1660,10 @@ static int do_unregister_framebuffer(struct fb_info *fb_info)
+ 
+ 	if (!lock_fb_info(fb_info))
+ 		return -ENODEV;
++	console_lock();
+ 	event.info = fb_info;
+ 	ret = fb_notifier_call_chain(FB_EVENT_FB_UNBIND, &event);
++	console_unlock();
+ 	unlock_fb_info(fb_info);
+ 
+ 	if (ret)
+@@ -1674,7 +1678,9 @@ static int do_unregister_framebuffer(struct fb_info *fb_info)
+ 	num_registered_fb--;
+ 	fb_cleanup_device(fb_info);
+ 	event.info = fb_info;
++	console_lock();
+ 	fb_notifier_call_chain(FB_EVENT_FB_UNREGISTERED, &event);
++	console_unlock();
+ 
+ 	/* this may free fb info */
+ 	put_fb_info(fb_info);
+@@ -1845,11 +1851,8 @@ int fb_new_modelist(struct fb_info *info)
+ 	err = 1;
+ 
+ 	if (!list_empty(&info->modelist)) {
+-		if (!lock_fb_info(info))
+-			return -ENODEV;
+ 		event.info = info;
+ 		err = fb_notifier_call_chain(FB_EVENT_NEW_MODELIST, &event);
+-		unlock_fb_info(info);
+ 	}
+ 
+ 	return err;
+diff --git a/drivers/video/fbsysfs.c b/drivers/video/fbsysfs.c
+index 67afa9c..303fb9f 100644
+--- a/drivers/video/fbsysfs.c
++++ b/drivers/video/fbsysfs.c
+@@ -175,6 +175,8 @@ static ssize_t store_modes(struct device *device,
+ 	if (i * sizeof(struct fb_videomode) != count)
+ 		return -EINVAL;
+ 
++	if (!lock_fb_info(fb_info))
++		return -ENODEV;
+ 	console_lock();
+ 	list_splice(&fb_info->modelist, &old_list);
+ 	fb_videomode_to_modelist((const struct fb_videomode *)buf, i,
+@@ -186,6 +188,7 @@ static ssize_t store_modes(struct device *device,
+ 		fb_destroy_modelist(&old_list);
+ 
+ 	console_unlock();
++	unlock_fb_info(fb_info);
+ 
+ 	return 0;
+ }
+diff --git a/drivers/video/fsl-diu-fb.c b/drivers/video/fsl-diu-fb.c
+index 6af3f16..d02a538 100644
+--- a/drivers/video/fsl-diu-fb.c
++++ b/drivers/video/fsl-diu-fb.c
+@@ -923,7 +923,7 @@ static u32 fsl_diu_get_pixel_format(unsigned int bits_per_pixel)
+ #define PF_COMP_0_MASK		0x0000000F
+ #define PF_COMP_0_SHIFT		0
+ 
+-#define MAKE_PF(alpha, red, blue, green, size, c0, c1, c2, c3) \
++#define MAKE_PF(alpha, red, green, blue, size, c0, c1, c2, c3) \
+ 	cpu_to_le32(PF_BYTE_F | (alpha << PF_ALPHA_C_SHIFT) | \
+ 	(blue << PF_BLUE_C_SHIFT) | (green << PF_GREEN_C_SHIFT) | \
+ 	(red << PF_RED_C_SHIFT) | (c3 << PF_COMP_3_SHIFT) | \
+@@ -933,10 +933,10 @@ static u32 fsl_diu_get_pixel_format(unsigned int bits_per_pixel)
+ 	switch (bits_per_pixel) {
+ 	case 32:
+ 		/* 0x88883316 */
+-		return MAKE_PF(3, 2, 0, 1, 3, 8, 8, 8, 8);
++		return MAKE_PF(3, 2, 1, 0, 3, 8, 8, 8, 8);
+ 	case 24:
+ 		/* 0x88082219 */
+-		return MAKE_PF(4, 0, 1, 2, 2, 0, 8, 8, 8);
++		return MAKE_PF(4, 0, 1, 2, 2, 8, 8, 8, 0);
+ 	case 16:
+ 		/* 0x65053118 */
+ 		return MAKE_PF(4, 2, 1, 0, 1, 5, 6, 5, 0);
+diff --git a/drivers/xen/evtchn.c b/drivers/xen/evtchn.c
+index b1f60a0..b2db77e 100644
+--- a/drivers/xen/evtchn.c
++++ b/drivers/xen/evtchn.c
+@@ -269,6 +269,14 @@ static int evtchn_bind_to_user(struct per_user_data *u, int port)
+ 				       u->name, (void *)(unsigned long)port);
+ 	if (rc >= 0)
+ 		rc = evtchn_make_refcounted(port);
++	else {
++		/* bind failed, should close the port now */
++		struct evtchn_close close;
++		close.port = port;
++		if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close) != 0)
++			BUG();
++		set_port_user(port, NULL);
++	}
+ 
+ 	return rc;
+ }
+@@ -277,6 +285,8 @@ static void evtchn_unbind_from_user(struct per_user_data *u, int port)
+ {
+ 	int irq = irq_from_evtchn(port);
+ 
++	BUG_ON(irq < 0);
++
+ 	unbind_from_irqhandler(irq, (void *)(unsigned long)port);
+ 
+ 	set_port_user(port, NULL);
+diff --git a/fs/block_dev.c b/fs/block_dev.c
+index ba11c30..b3be92c 100644
+--- a/fs/block_dev.c
++++ b/fs/block_dev.c
+@@ -1047,6 +1047,7 @@ int revalidate_disk(struct gendisk *disk)
+ 
+ 	mutex_lock(&bdev->bd_mutex);
+ 	check_disk_size_change(disk, bdev);
++	bdev->bd_invalidated = 0;
+ 	mutex_unlock(&bdev->bd_mutex);
+ 	bdput(bdev);
+ 	return ret;
+diff --git a/fs/lockd/clntproc.c b/fs/lockd/clntproc.c
+index 8392cb8..a3a0987 100644
+--- a/fs/lockd/clntproc.c
++++ b/fs/lockd/clntproc.c
+@@ -551,6 +551,9 @@ again:
+ 		status = nlmclnt_block(block, req, NLMCLNT_POLL_TIMEOUT);
+ 		if (status < 0)
+ 			break;
++		/* Resend the blocking lock request after a server reboot */
++		if (resp->status ==  nlm_lck_denied_grace_period)
++			continue;
+ 		if (resp->status != nlm_lck_blocked)
+ 			break;
+ 	}
+diff --git a/fs/nfs/blocklayout/blocklayout.c b/fs/nfs/blocklayout/blocklayout.c
+index d16dae2..3a9c247 100644
+--- a/fs/nfs/blocklayout/blocklayout.c
++++ b/fs/nfs/blocklayout/blocklayout.c
+@@ -1155,6 +1155,7 @@ static const struct nfs_pageio_ops bl_pg_write_ops = {
+ static struct pnfs_layoutdriver_type blocklayout_type = {
+ 	.id				= LAYOUT_BLOCK_VOLUME,
+ 	.name				= "LAYOUT_BLOCK_VOLUME",
++	.owner				= THIS_MODULE,
+ 	.read_pagelist			= bl_read_pagelist,
+ 	.write_pagelist			= bl_write_pagelist,
+ 	.alloc_layout_hdr		= bl_alloc_layout_hdr,
+diff --git a/fs/nfs/objlayout/objio_osd.c b/fs/nfs/objlayout/objio_osd.c
+index 1afe74c..65538f5 100644
+--- a/fs/nfs/objlayout/objio_osd.c
++++ b/fs/nfs/objlayout/objio_osd.c
+@@ -589,6 +589,7 @@ static struct pnfs_layoutdriver_type objlayout_type = {
+ 	.flags                   = PNFS_LAYOUTRET_ON_SETATTR |
+ 				   PNFS_LAYOUTRET_ON_ERROR,
+ 
++	.owner		       	 = THIS_MODULE,
+ 	.alloc_layout_hdr        = objlayout_alloc_layout_hdr,
+ 	.free_layout_hdr         = objlayout_free_layout_hdr,
+ 
+diff --git a/fs/notify/inotify/inotify_user.c b/fs/notify/inotify/inotify_user.c
+index 8445fbc..6f292dd 100644
+--- a/fs/notify/inotify/inotify_user.c
++++ b/fs/notify/inotify/inotify_user.c
+@@ -579,8 +579,6 @@ static int inotify_update_existing_watch(struct fsnotify_group *group,
+ 
+ 	/* don't allow invalid bits: we don't want flags set */
+ 	mask = inotify_arg_to_mask(arg);
+-	if (unlikely(!(mask & IN_ALL_EVENTS)))
+-		return -EINVAL;
+ 
+ 	fsn_mark = fsnotify_find_inode_mark(group, inode);
+ 	if (!fsn_mark)
+@@ -632,8 +630,6 @@ static int inotify_new_watch(struct fsnotify_group *group,
+ 
+ 	/* don't allow invalid bits: we don't want flags set */
+ 	mask = inotify_arg_to_mask(arg);
+-	if (unlikely(!(mask & IN_ALL_EVENTS)))
+-		return -EINVAL;
+ 
+ 	tmp_i_mark = kmem_cache_alloc(inotify_inode_mark_cachep, GFP_KERNEL);
+ 	if (unlikely(!tmp_i_mark))
+diff --git a/fs/ocfs2/dlmglue.c b/fs/ocfs2/dlmglue.c
+index 81a4cd2..231eab2 100644
+--- a/fs/ocfs2/dlmglue.c
++++ b/fs/ocfs2/dlmglue.c
+@@ -2545,6 +2545,7 @@ int ocfs2_super_lock(struct ocfs2_super *osb,
+ 	 * everything is up to the caller :) */
+ 	status = ocfs2_should_refresh_lock_res(lockres);
+ 	if (status < 0) {
++		ocfs2_cluster_unlock(osb, lockres, level);
+ 		mlog_errno(status);
+ 		goto bail;
+ 	}
+@@ -2553,8 +2554,10 @@ int ocfs2_super_lock(struct ocfs2_super *osb,
+ 
+ 		ocfs2_complete_lock_res_refresh(lockres, status);
+ 
+-		if (status < 0)
++		if (status < 0) {
++			ocfs2_cluster_unlock(osb, lockres, level);
+ 			mlog_errno(status);
++		}
+ 		ocfs2_track_lock_refresh(lockres);
+ 	}
+ bail:
+diff --git a/include/linux/console.h b/include/linux/console.h
+index 7201ce4..f59e942 100644
+--- a/include/linux/console.h
++++ b/include/linux/console.h
+@@ -77,7 +77,9 @@ extern const struct consw prom_con;	/* SPARC PROM console */
+ int con_is_bound(const struct consw *csw);
+ int register_con_driver(const struct consw *csw, int first, int last);
+ int unregister_con_driver(const struct consw *csw);
++int do_unregister_con_driver(const struct consw *csw);
+ int take_over_console(const struct consw *sw, int first, int last, int deflt);
++int do_take_over_console(const struct consw *sw, int first, int last, int deflt);
+ void give_up_console(const struct consw *sw);
+ #ifdef CONFIG_HW_CONSOLE
+ int con_debug_enter(struct vc_data *vc);
+diff --git a/include/linux/if_vlan.h b/include/linux/if_vlan.h
+index 561e130..9b0c614 100644
+--- a/include/linux/if_vlan.h
++++ b/include/linux/if_vlan.h
+@@ -327,7 +327,7 @@ static inline void vlan_set_encap_proto(struct sk_buff *skb,
+ 					struct vlan_hdr *vhdr)
+ {
+ 	__be16 proto;
+-	unsigned char *rawp;
++	unsigned short *rawp;
+ 
+ 	/*
+ 	 * Was a VLAN packet, grab the encapsulated protocol, which the layer
+@@ -340,8 +340,8 @@ static inline void vlan_set_encap_proto(struct sk_buff *skb,
+ 		return;
+ 	}
+ 
+-	rawp = skb->data;
+-	if (*(unsigned short *) rawp == 0xFFFF)
++	rawp = (unsigned short *)(vhdr + 1);
++	if (*rawp == 0xFFFF)
+ 		/*
+ 		 * This is a magic hack to spot IPX packets. Older Novell
+ 		 * breaks the protocol design and runs IPX over 802.3 without
+diff --git a/include/linux/mmu_notifier.h b/include/linux/mmu_notifier.h
+index 1d1b1e1..ee2baf0 100644
+--- a/include/linux/mmu_notifier.h
++++ b/include/linux/mmu_notifier.h
+@@ -4,6 +4,7 @@
+ #include <linux/list.h>
+ #include <linux/spinlock.h>
+ #include <linux/mm_types.h>
++#include <linux/srcu.h>
+ 
+ struct mmu_notifier;
+ struct mmu_notifier_ops;
+diff --git a/include/linux/usb/audio.h b/include/linux/usb/audio.h
+index a54b825..6f8b026 100644
+--- a/include/linux/usb/audio.h
++++ b/include/linux/usb/audio.h
+@@ -384,14 +384,16 @@ static inline __u8 uac_processing_unit_iProcessing(struct uac_processing_unit_de
+ 						   int protocol)
+ {
+ 	__u8 control_size = uac_processing_unit_bControlSize(desc, protocol);
+-	return desc->baSourceID[desc->bNrInPins + control_size];
++	return *(uac_processing_unit_bmControls(desc, protocol)
++			+ control_size);
+ }
+ 
+ static inline __u8 *uac_processing_unit_specific(struct uac_processing_unit_descriptor *desc,
+ 						 int protocol)
+ {
+ 	__u8 control_size = uac_processing_unit_bControlSize(desc, protocol);
+-	return &desc->baSourceID[desc->bNrInPins + control_size + 1];
++	return uac_processing_unit_bmControls(desc, protocol)
++			+ control_size + 1;
+ }
+ 
+ /* 4.5.2 Class-Specific AS Interface Descriptor */
+diff --git a/include/linux/vt_kern.h b/include/linux/vt_kern.h
+index e33d77f..57eeb14 100644
+--- a/include/linux/vt_kern.h
++++ b/include/linux/vt_kern.h
+@@ -47,6 +47,7 @@ int con_set_cmap(unsigned char __user *cmap);
+ int con_get_cmap(unsigned char __user *cmap);
+ void scrollback(struct vc_data *vc, int lines);
+ void scrollfront(struct vc_data *vc, int lines);
++void clear_buffer_attributes(struct vc_data *vc);
+ void update_region(struct vc_data *vc, unsigned long start, int count);
+ void redraw_screen(struct vc_data *vc, int is_switch);
+ #define update_screen(x) redraw_screen(x, 0)
+@@ -131,6 +132,8 @@ void vt_event_post(unsigned int event, unsigned int old, unsigned int new);
+ int vt_waitactive(int n);
+ void change_console(struct vc_data *new_vc);
+ void reset_vc(struct vc_data *vc);
++extern int do_unbind_con_driver(const struct consw *csw, int first, int last,
++			     int deflt);
+ extern int unbind_con_driver(const struct consw *csw, int first, int last,
+ 			     int deflt);
+ int vty_init(const struct file_operations *console_fops);
+diff --git a/include/net/inet6_hashtables.h b/include/net/inet6_hashtables.h
+index 00cbb43..2da45ce 100644
+--- a/include/net/inet6_hashtables.h
++++ b/include/net/inet6_hashtables.h
+@@ -28,16 +28,16 @@
+ 
+ struct inet_hashinfo;
+ 
+-/* I have no idea if this is a good hash for v6 or not. -DaveM */
+ static inline unsigned int inet6_ehashfn(struct net *net,
+ 				const struct in6_addr *laddr, const u16 lport,
+ 				const struct in6_addr *faddr, const __be16 fport)
+ {
+-	u32 ports = (lport ^ (__force u16)fport);
++	u32 ports = (((u32)lport) << 16) | (__force u32)fport;
+ 
+ 	return jhash_3words((__force u32)laddr->s6_addr32[3],
+-			    (__force u32)faddr->s6_addr32[3],
+-			    ports, inet_ehash_secret + net_hash_mix(net));
++			    ipv6_addr_jhash(faddr),
++			    ports,
++			    inet_ehash_secret + net_hash_mix(net));
+ }
+ 
+ static inline int inet6_sk_ehashfn(const struct sock *sk)
+diff --git a/include/net/inet_sock.h b/include/net/inet_sock.h
+index ae17e13..8cd2e1d 100644
+--- a/include/net/inet_sock.h
++++ b/include/net/inet_sock.h
+@@ -202,6 +202,7 @@ static inline void inet_sk_copy_descendant(struct sock *sk_to,
+ extern int inet_sk_rebuild_header(struct sock *sk);
+ 
+ extern u32 inet_ehash_secret;
++extern u32 ipv6_hash_secret;
+ extern void build_ehash_secret(void);
+ 
+ static inline unsigned int inet_ehashfn(struct net *net,
+diff --git a/include/net/ipv6.h b/include/net/ipv6.h
+index e4170a2..12a1bd2 100644
+--- a/include/net/ipv6.h
++++ b/include/net/ipv6.h
+@@ -15,6 +15,7 @@
+ 
+ #include <linux/ipv6.h>
+ #include <linux/hardirq.h>
++#include <linux/jhash.h>
+ #include <net/if_inet6.h>
+ #include <net/ndisc.h>
+ #include <net/flow.h>
+@@ -390,6 +391,17 @@ struct ip6_create_arg {
+ void ip6_frag_init(struct inet_frag_queue *q, void *a);
+ int ip6_frag_match(struct inet_frag_queue *q, void *a);
+ 
++/* more secured version of ipv6_addr_hash() */
++static inline u32 ipv6_addr_jhash(const struct in6_addr *a)
++{
++	u32 v = (__force u32)a->s6_addr32[0] ^ (__force u32)a->s6_addr32[1];
++
++	return jhash_3words(v,
++			    (__force u32)a->s6_addr32[2],
++			    (__force u32)a->s6_addr32[3],
++			    ipv6_hash_secret);
++}
++
+ static inline int ipv6_addr_any(const struct in6_addr *a)
+ {
+ 	return (a->s6_addr32[0] | a->s6_addr32[1] |
+diff --git a/include/net/sock.h b/include/net/sock.h
+index 5878118..59a8947 100644
+--- a/include/net/sock.h
++++ b/include/net/sock.h
+@@ -944,7 +944,7 @@ static inline void sk_refcnt_debug_dec(struct sock *sk)
+ 	       sk->sk_prot->name, sk, atomic_read(&sk->sk_prot->socks));
+ }
+ 
+-inline void sk_refcnt_debug_release(const struct sock *sk)
++static inline void sk_refcnt_debug_release(const struct sock *sk)
+ {
+ 	if (atomic_read(&sk->sk_refcnt) != 1)
+ 		printk(KERN_DEBUG "Destruction of the %s socket %p delayed, refcnt=%d\n",
+diff --git a/kernel/futex.c b/kernel/futex.c
+index 19eb089..8879430 100644
+--- a/kernel/futex.c
++++ b/kernel/futex.c
+@@ -2471,8 +2471,6 @@ SYSCALL_DEFINE3(get_robust_list, int, pid,
+ 	if (!futex_cmpxchg_enabled)
+ 		return -ENOSYS;
+ 
+-	WARN_ONCE(1, "deprecated: get_robust_list will be deleted in 2013.\n");
+-
+ 	rcu_read_lock();
+ 
+ 	ret = -ESRCH;
+diff --git a/kernel/futex_compat.c b/kernel/futex_compat.c
+index 83e368b..a9642d5 100644
+--- a/kernel/futex_compat.c
++++ b/kernel/futex_compat.c
+@@ -142,8 +142,6 @@ compat_sys_get_robust_list(int pid, compat_uptr_t __user *head_ptr,
+ 	if (!futex_cmpxchg_enabled)
+ 		return -ENOSYS;
+ 
+-	WARN_ONCE(1, "deprecated: get_robust_list will be deleted in 2013.\n");
+-
+ 	rcu_read_lock();
+ 
+ 	ret = -ESRCH;
+diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
+index 6db7a5e..cdd5607 100644
+--- a/kernel/hrtimer.c
++++ b/kernel/hrtimer.c
+@@ -640,21 +640,9 @@ static inline void hrtimer_init_hres(struct hrtimer_cpu_base *base)
+  * and expiry check is done in the hrtimer_interrupt or in the softirq.
+  */
+ static inline int hrtimer_enqueue_reprogram(struct hrtimer *timer,
+-					    struct hrtimer_clock_base *base,
+-					    int wakeup)
++					    struct hrtimer_clock_base *base)
+ {
+-	if (base->cpu_base->hres_active && hrtimer_reprogram(timer, base)) {
+-		if (wakeup) {
+-			raw_spin_unlock(&base->cpu_base->lock);
+-			raise_softirq_irqoff(HRTIMER_SOFTIRQ);
+-			raw_spin_lock(&base->cpu_base->lock);
+-		} else
+-			__raise_softirq_irqoff(HRTIMER_SOFTIRQ);
+-
+-		return 1;
+-	}
+-
+-	return 0;
++	return base->cpu_base->hres_active && hrtimer_reprogram(timer, base);
+ }
+ 
+ static inline ktime_t hrtimer_update_base(struct hrtimer_cpu_base *base)
+@@ -735,8 +723,7 @@ static inline int hrtimer_switch_to_hres(void) { return 0; }
+ static inline void
+ hrtimer_force_reprogram(struct hrtimer_cpu_base *base, int skip_equal) { }
+ static inline int hrtimer_enqueue_reprogram(struct hrtimer *timer,
+-					    struct hrtimer_clock_base *base,
+-					    int wakeup)
++					    struct hrtimer_clock_base *base)
+ {
+ 	return 0;
+ }
+@@ -995,8 +982,21 @@ int __hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,
+ 	 *
+ 	 * XXX send_remote_softirq() ?
+ 	 */
+-	if (leftmost && new_base->cpu_base == &__get_cpu_var(hrtimer_bases))
+-		hrtimer_enqueue_reprogram(timer, new_base, wakeup);
++	if (leftmost && new_base->cpu_base == &__get_cpu_var(hrtimer_bases)
++		&& hrtimer_enqueue_reprogram(timer, new_base)) {
++		if (wakeup) {
++			/*
++			 * We need to drop cpu_base->lock to avoid a
++			 * lock ordering issue vs. rq->lock.
++			 */
++			raw_spin_unlock(&new_base->cpu_base->lock);
++			raise_softirq_irqoff(HRTIMER_SOFTIRQ);
++			local_irq_restore(flags);
++			return ret;
++		} else {
++			__raise_softirq_irqoff(HRTIMER_SOFTIRQ);
++		}
++	}
+ 
+ 	unlock_hrtimer_base(timer, &flags);
+ 
+diff --git a/kernel/irq/spurious.c b/kernel/irq/spurious.c
+index 611cd60..7b5f012 100644
+--- a/kernel/irq/spurious.c
++++ b/kernel/irq/spurious.c
+@@ -80,13 +80,11 @@ static int try_one_irq(int irq, struct irq_desc *desc, bool force)
+ 
+ 	/*
+ 	 * All handlers must agree on IRQF_SHARED, so we test just the
+-	 * first. Check for action->next as well.
++	 * first.
+ 	 */
+ 	action = desc->action;
+ 	if (!action || !(action->flags & IRQF_SHARED) ||
+-	    (action->flags & __IRQF_TIMER) ||
+-	    (action->handler(irq, action->dev_id) == IRQ_HANDLED) ||
+-	    !action->next)
++	    (action->flags & __IRQF_TIMER))
+ 		goto out;
+ 
+ 	/* Already running on another processor */
+@@ -104,6 +102,7 @@ static int try_one_irq(int irq, struct irq_desc *desc, bool force)
+ 	do {
+ 		if (handle_irq_event(desc) == IRQ_HANDLED)
+ 			ret = IRQ_HANDLED;
++		/* Make sure that there is still a valid action */
+ 		action = desc->action;
+ 	} while ((desc->istate & IRQS_PENDING) && action);
+ 	desc->istate &= ~IRQS_POLL_INPROGRESS;
+diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c
+index 125cb67..acbb79c 100644
+--- a/kernel/posix-cpu-timers.c
++++ b/kernel/posix-cpu-timers.c
+@@ -1422,8 +1422,10 @@ static int do_cpu_nanosleep(const clockid_t which_clock, int flags,
+ 		while (!signal_pending(current)) {
+ 			if (timer.it.cpu.expires.sched == 0) {
+ 				/*
+-				 * Our timer fired and was reset.
++				 * Our timer fired and was reset, below
++				 * deletion can not fail.
+ 				 */
++				posix_cpu_timer_del(&timer);
+ 				spin_unlock_irq(&timer.it_lock);
+ 				return 0;
+ 			}
+@@ -1441,9 +1443,26 @@ static int do_cpu_nanosleep(const clockid_t which_clock, int flags,
+ 		 * We were interrupted by a signal.
+ 		 */
+ 		sample_to_timespec(which_clock, timer.it.cpu.expires, rqtp);
+-		posix_cpu_timer_set(&timer, 0, &zero_it, it);
++		error = posix_cpu_timer_set(&timer, 0, &zero_it, it);
++		if (!error) {
++			/*
++			 * Timer is now unarmed, deletion can not fail.
++			 */
++			posix_cpu_timer_del(&timer);
++		}
+ 		spin_unlock_irq(&timer.it_lock);
+ 
++		while (error == TIMER_RETRY) {
++			/*
++			 * We need to handle case when timer was or is in the
++			 * middle of firing. In other cases we already freed
++			 * resources.
++			 */
++			spin_lock_irq(&timer.it_lock);
++			error = posix_cpu_timer_del(&timer);
++			spin_unlock_irq(&timer.it_lock);
++		}
++
+ 		if ((it->it_value.tv_sec | it->it_value.tv_nsec) == 0) {
+ 			/*
+ 			 * It actually did fire already.
+diff --git a/kernel/timeconst.pl b/kernel/timeconst.pl
+index eb51d76..3f42652 100644
+--- a/kernel/timeconst.pl
++++ b/kernel/timeconst.pl
+@@ -369,10 +369,8 @@ if ($hz eq '--can') {
+ 		die "Usage: $0 HZ\n";
+ 	}
+ 
+-	@val = @{$canned_values{$hz}};
+-	if (!defined(@val)) {
+-		@val = compute_values($hz);
+-	}
++	$cv = $canned_values{$hz};
++	@val = defined($cv) ? @$cv : compute_values($hz);
+ 	output($hz, @val);
+ }
+ exit 0;
+diff --git a/mm/fadvise.c b/mm/fadvise.c
+index 469491e0..dcb9872 100644
+--- a/mm/fadvise.c
++++ b/mm/fadvise.c
+@@ -17,6 +17,7 @@
+ #include <linux/fadvise.h>
+ #include <linux/writeback.h>
+ #include <linux/syscalls.h>
++#include <linux/swap.h>
+ 
+ #include <asm/unistd.h>
+ 
+@@ -124,9 +125,22 @@ SYSCALL_DEFINE(fadvise64_64)(int fd, loff_t offset, loff_t len, int advice)
+ 		start_index = (offset+(PAGE_CACHE_SIZE-1)) >> PAGE_CACHE_SHIFT;
+ 		end_index = (endbyte >> PAGE_CACHE_SHIFT);
+ 
+-		if (end_index >= start_index)
+-			invalidate_mapping_pages(mapping, start_index,
++		if (end_index >= start_index) {
++			unsigned long count = invalidate_mapping_pages(mapping,
++						start_index, end_index);
++
++			/*
++			 * If fewer pages were invalidated than expected then
++			 * it is possible that some of the pages were on
++			 * a per-cpu pagevec for a remote CPU. Drain all
++			 * pagevecs and try again.
++			 */
++			if (count < (end_index - start_index + 1)) {
++				lru_add_drain_all();
++				invalidate_mapping_pages(mapping, start_index,
+ 						end_index);
++			}
++		}
+ 		break;
+ 	default:
+ 		ret = -EINVAL;
+diff --git a/mm/mmu_notifier.c b/mm/mmu_notifier.c
+index 862b608..8d1ca2d 100644
+--- a/mm/mmu_notifier.c
++++ b/mm/mmu_notifier.c
+@@ -14,10 +14,14 @@
+ #include <linux/export.h>
+ #include <linux/mm.h>
+ #include <linux/err.h>
++#include <linux/srcu.h>
+ #include <linux/rcupdate.h>
+ #include <linux/sched.h>
+ #include <linux/slab.h>
+ 
++/* global SRCU for all MMs */
++static struct srcu_struct srcu;
++
+ /*
+  * This function can't run concurrently against mmu_notifier_register
+  * because mm->mm_users > 0 during mmu_notifier_register and exit_mmap
+@@ -25,58 +29,61 @@
+  * in parallel despite there being no task using this mm any more,
+  * through the vmas outside of the exit_mmap context, such as with
+  * vmtruncate. This serializes against mmu_notifier_unregister with
+- * the mmu_notifier_mm->lock in addition to RCU and it serializes
+- * against the other mmu notifiers with RCU. struct mmu_notifier_mm
++ * the mmu_notifier_mm->lock in addition to SRCU and it serializes
++ * against the other mmu notifiers with SRCU. struct mmu_notifier_mm
+  * can't go away from under us as exit_mmap holds an mm_count pin
+  * itself.
+  */
+ void __mmu_notifier_release(struct mm_struct *mm)
+ {
+ 	struct mmu_notifier *mn;
+-	struct hlist_node *n;
++	int id;
+ 
+ 	/*
+-	 * RCU here will block mmu_notifier_unregister until
+-	 * ->release returns.
++	 * srcu_read_lock() here will block synchronize_srcu() in
++	 * mmu_notifier_unregister() until all registered
++	 * ->release() callouts this function makes have
++	 * returned.
+ 	 */
+-	rcu_read_lock();
+-	hlist_for_each_entry_rcu(mn, n, &mm->mmu_notifier_mm->list, hlist)
+-		/*
+-		 * if ->release runs before mmu_notifier_unregister it
+-		 * must be handled as it's the only way for the driver
+-		 * to flush all existing sptes and stop the driver
+-		 * from establishing any more sptes before all the
+-		 * pages in the mm are freed.
+-		 */
+-		if (mn->ops->release)
+-			mn->ops->release(mn, mm);
+-	rcu_read_unlock();
+-
++	id = srcu_read_lock(&srcu);
+ 	spin_lock(&mm->mmu_notifier_mm->lock);
+ 	while (unlikely(!hlist_empty(&mm->mmu_notifier_mm->list))) {
+ 		mn = hlist_entry(mm->mmu_notifier_mm->list.first,
+ 				 struct mmu_notifier,
+ 				 hlist);
++
+ 		/*
+-		 * We arrived before mmu_notifier_unregister so
+-		 * mmu_notifier_unregister will do nothing other than
+-		 * to wait ->release to finish and
+-		 * mmu_notifier_unregister to return.
++		 * Unlink.  This will prevent mmu_notifier_unregister()
++		 * from also making the ->release() callout.
+ 		 */
+ 		hlist_del_init_rcu(&mn->hlist);
++		spin_unlock(&mm->mmu_notifier_mm->lock);
++
++		/*
++		 * Clear sptes. (see 'release' description in mmu_notifier.h)
++		 */
++		if (mn->ops->release)
++			mn->ops->release(mn, mm);
++
++		spin_lock(&mm->mmu_notifier_mm->lock);
+ 	}
+ 	spin_unlock(&mm->mmu_notifier_mm->lock);
+ 
+ 	/*
+-	 * synchronize_rcu here prevents mmu_notifier_release to
+-	 * return to exit_mmap (which would proceed freeing all pages
+-	 * in the mm) until the ->release method returns, if it was
+-	 * invoked by mmu_notifier_unregister.
+-	 *
+-	 * The mmu_notifier_mm can't go away from under us because one
+-	 * mm_count is hold by exit_mmap.
++	 * All callouts to ->release() which we have done are complete.
++	 * Allow synchronize_srcu() in mmu_notifier_unregister() to complete
++	 */
++	srcu_read_unlock(&srcu, id);
++
++	/*
++	 * mmu_notifier_unregister() may have unlinked a notifier and may
++	 * still be calling out to it.	Additionally, other notifiers
++	 * may have been active via vmtruncate() et. al. Block here
++	 * to ensure that all notifier callouts for this mm have been
++	 * completed and the sptes are really cleaned up before returning
++	 * to exit_mmap().
+ 	 */
+-	synchronize_rcu();
++	synchronize_srcu(&srcu);
+ }
+ 
+ /*
+@@ -89,14 +96,14 @@ int __mmu_notifier_clear_flush_young(struct mm_struct *mm,
+ {
+ 	struct mmu_notifier *mn;
+ 	struct hlist_node *n;
+-	int young = 0;
++	int young = 0, id;
+ 
+-	rcu_read_lock();
++	id = srcu_read_lock(&srcu);
+ 	hlist_for_each_entry_rcu(mn, n, &mm->mmu_notifier_mm->list, hlist) {
+ 		if (mn->ops->clear_flush_young)
+ 			young |= mn->ops->clear_flush_young(mn, mm, address);
+ 	}
+-	rcu_read_unlock();
++	srcu_read_unlock(&srcu, id);
+ 
+ 	return young;
+ }
+@@ -106,9 +113,9 @@ int __mmu_notifier_test_young(struct mm_struct *mm,
+ {
+ 	struct mmu_notifier *mn;
+ 	struct hlist_node *n;
+-	int young = 0;
++	int young = 0, id;
+ 
+-	rcu_read_lock();
++	id = srcu_read_lock(&srcu);
+ 	hlist_for_each_entry_rcu(mn, n, &mm->mmu_notifier_mm->list, hlist) {
+ 		if (mn->ops->test_young) {
+ 			young = mn->ops->test_young(mn, mm, address);
+@@ -116,7 +123,7 @@ int __mmu_notifier_test_young(struct mm_struct *mm,
+ 				break;
+ 		}
+ 	}
+-	rcu_read_unlock();
++	srcu_read_unlock(&srcu, id);
+ 
+ 	return young;
+ }
+@@ -126,8 +133,9 @@ void __mmu_notifier_change_pte(struct mm_struct *mm, unsigned long address,
+ {
+ 	struct mmu_notifier *mn;
+ 	struct hlist_node *n;
++	int id;
+ 
+-	rcu_read_lock();
++	id = srcu_read_lock(&srcu);
+ 	hlist_for_each_entry_rcu(mn, n, &mm->mmu_notifier_mm->list, hlist) {
+ 		if (mn->ops->change_pte)
+ 			mn->ops->change_pte(mn, mm, address, pte);
+@@ -138,7 +146,7 @@ void __mmu_notifier_change_pte(struct mm_struct *mm, unsigned long address,
+ 		else if (mn->ops->invalidate_page)
+ 			mn->ops->invalidate_page(mn, mm, address);
+ 	}
+-	rcu_read_unlock();
++	srcu_read_unlock(&srcu, id);
+ }
+ 
+ void __mmu_notifier_invalidate_page(struct mm_struct *mm,
+@@ -146,13 +154,14 @@ void __mmu_notifier_invalidate_page(struct mm_struct *mm,
+ {
+ 	struct mmu_notifier *mn;
+ 	struct hlist_node *n;
++	int id;
+ 
+-	rcu_read_lock();
++	id = srcu_read_lock(&srcu);
+ 	hlist_for_each_entry_rcu(mn, n, &mm->mmu_notifier_mm->list, hlist) {
+ 		if (mn->ops->invalidate_page)
+ 			mn->ops->invalidate_page(mn, mm, address);
+ 	}
+-	rcu_read_unlock();
++	srcu_read_unlock(&srcu, id);
+ }
+ 
+ void __mmu_notifier_invalidate_range_start(struct mm_struct *mm,
+@@ -160,13 +169,14 @@ void __mmu_notifier_invalidate_range_start(struct mm_struct *mm,
+ {
+ 	struct mmu_notifier *mn;
+ 	struct hlist_node *n;
++	int id;
+ 
+-	rcu_read_lock();
++	id = srcu_read_lock(&srcu);
+ 	hlist_for_each_entry_rcu(mn, n, &mm->mmu_notifier_mm->list, hlist) {
+ 		if (mn->ops->invalidate_range_start)
+ 			mn->ops->invalidate_range_start(mn, mm, start, end);
+ 	}
+-	rcu_read_unlock();
++	srcu_read_unlock(&srcu, id);
+ }
+ 
+ void __mmu_notifier_invalidate_range_end(struct mm_struct *mm,
+@@ -174,13 +184,14 @@ void __mmu_notifier_invalidate_range_end(struct mm_struct *mm,
+ {
+ 	struct mmu_notifier *mn;
+ 	struct hlist_node *n;
++	int id;
+ 
+-	rcu_read_lock();
++	id = srcu_read_lock(&srcu);
+ 	hlist_for_each_entry_rcu(mn, n, &mm->mmu_notifier_mm->list, hlist) {
+ 		if (mn->ops->invalidate_range_end)
+ 			mn->ops->invalidate_range_end(mn, mm, start, end);
+ 	}
+-	rcu_read_unlock();
++	srcu_read_unlock(&srcu, id);
+ }
+ 
+ static int do_mmu_notifier_register(struct mmu_notifier *mn,
+@@ -192,6 +203,12 @@ static int do_mmu_notifier_register(struct mmu_notifier *mn,
+ 
+ 	BUG_ON(atomic_read(&mm->mm_users) <= 0);
+ 
++	/*
++	* Verify that mmu_notifier_init() already run and the global srcu is
++	* initialized.
++	*/
++	BUG_ON(!srcu.per_cpu_ref);
++
+ 	ret = -ENOMEM;
+ 	mmu_notifier_mm = kmalloc(sizeof(struct mmu_notifier_mm), GFP_KERNEL);
+ 	if (unlikely(!mmu_notifier_mm))
+@@ -274,8 +291,8 @@ void __mmu_notifier_mm_destroy(struct mm_struct *mm)
+ /*
+  * This releases the mm_count pin automatically and frees the mm
+  * structure if it was the last user of it. It serializes against
+- * running mmu notifiers with RCU and against mmu_notifier_unregister
+- * with the unregister lock + RCU. All sptes must be dropped before
++ * running mmu notifiers with SRCU and against mmu_notifier_unregister
++ * with the unregister lock + SRCU. All sptes must be dropped before
+  * calling mmu_notifier_unregister. ->release or any other notifier
+  * method may be invoked concurrently with mmu_notifier_unregister,
+  * and only after mmu_notifier_unregister returned we're guaranteed
+@@ -285,35 +302,43 @@ void mmu_notifier_unregister(struct mmu_notifier *mn, struct mm_struct *mm)
+ {
+ 	BUG_ON(atomic_read(&mm->mm_count) <= 0);
+ 
++	spin_lock(&mm->mmu_notifier_mm->lock);
+ 	if (!hlist_unhashed(&mn->hlist)) {
+-		/*
+-		 * RCU here will force exit_mmap to wait ->release to finish
+-		 * before freeing the pages.
+-		 */
+-		rcu_read_lock();
++		int id;
+ 
+ 		/*
+-		 * exit_mmap will block in mmu_notifier_release to
+-		 * guarantee ->release is called before freeing the
+-		 * pages.
++		 * Ensure we synchronize up with __mmu_notifier_release().
+ 		 */
++		id = srcu_read_lock(&srcu);
++
++		hlist_del_rcu(&mn->hlist);
++		spin_unlock(&mm->mmu_notifier_mm->lock);
++
+ 		if (mn->ops->release)
+ 			mn->ops->release(mn, mm);
+-		rcu_read_unlock();
+ 
+-		spin_lock(&mm->mmu_notifier_mm->lock);
+-		hlist_del_rcu(&mn->hlist);
++		/*
++		 * Allow __mmu_notifier_release() to complete.
++		 */
++		srcu_read_unlock(&srcu, id);
++	} else
+ 		spin_unlock(&mm->mmu_notifier_mm->lock);
+-	}
+ 
+ 	/*
+-	 * Wait any running method to finish, of course including
+-	 * ->release if it was run by mmu_notifier_relase instead of us.
++	 * Wait for any running method to finish, including ->release() if it
++	 * was run by __mmu_notifier_release() instead of us.
+ 	 */
+-	synchronize_rcu();
++	synchronize_srcu(&srcu);
+ 
+ 	BUG_ON(atomic_read(&mm->mm_count) <= 0);
+ 
+ 	mmdrop(mm);
+ }
+ EXPORT_SYMBOL_GPL(mmu_notifier_unregister);
++
++static int __init mmu_notifier_init(void)
++{
++	return init_srcu_struct(&srcu);
++}
++
++module_init(mmu_notifier_init);
+diff --git a/mm/page_alloc.c b/mm/page_alloc.c
+index 691b8ec..533ea80 100644
+--- a/mm/page_alloc.c
++++ b/mm/page_alloc.c
+@@ -4216,10 +4216,11 @@ static void __meminit calculate_node_totalpages(struct pglist_data *pgdat,
+  * round what is now in bits to nearest long in bits, then return it in
+  * bytes.
+  */
+-static unsigned long __init usemap_size(unsigned long zonesize)
++static unsigned long __init usemap_size(unsigned long zone_start_pfn, unsigned long zonesize)
+ {
+ 	unsigned long usemapsize;
+ 
++	zonesize += zone_start_pfn & (pageblock_nr_pages-1);
+ 	usemapsize = roundup(zonesize, pageblock_nr_pages);
+ 	usemapsize = usemapsize >> pageblock_order;
+ 	usemapsize *= NR_PAGEBLOCK_BITS;
+@@ -4229,17 +4230,19 @@ static unsigned long __init usemap_size(unsigned long zonesize)
+ }
+ 
+ static void __init setup_usemap(struct pglist_data *pgdat,
+-				struct zone *zone, unsigned long zonesize)
++				struct zone *zone,
++				unsigned long zone_start_pfn,
++				unsigned long zonesize)
+ {
+-	unsigned long usemapsize = usemap_size(zonesize);
++	unsigned long usemapsize = usemap_size(zone_start_pfn, zonesize);
+ 	zone->pageblock_flags = NULL;
+ 	if (usemapsize)
+ 		zone->pageblock_flags = alloc_bootmem_node_nopanic(pgdat,
+ 								   usemapsize);
+ }
+ #else
+-static inline void setup_usemap(struct pglist_data *pgdat,
+-				struct zone *zone, unsigned long zonesize) {}
++static inline void setup_usemap(struct pglist_data *pgdat, struct zone *zone,
++				unsigned long zone_start_pfn, unsigned long zonesize) {}
+ #endif /* CONFIG_SPARSEMEM */
+ 
+ #ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE
+@@ -4367,7 +4370,7 @@ static void __paginginit free_area_init_core(struct pglist_data *pgdat,
+ 			continue;
+ 
+ 		set_pageblock_order(pageblock_default_order());
+-		setup_usemap(pgdat, zone, size);
++		setup_usemap(pgdat, zone, zone_start_pfn, size);
+ 		ret = init_currently_empty_zone(zone, zone_start_pfn,
+ 						size, MEMMAP_EARLY);
+ 		BUG_ON(ret);
+diff --git a/mm/shmem.c b/mm/shmem.c
+index a409bd8..58c4a47 100644
+--- a/mm/shmem.c
++++ b/mm/shmem.c
+@@ -2177,6 +2177,7 @@ static int shmem_remount_fs(struct super_block *sb, int *flags, char *data)
+ 	unsigned long inodes;
+ 	int error = -EINVAL;
+ 
++	config.mpol = NULL;
+ 	if (shmem_parse_options(data, &config, true))
+ 		return error;
+ 
+@@ -2201,8 +2202,13 @@ static int shmem_remount_fs(struct super_block *sb, int *flags, char *data)
+ 	sbinfo->max_inodes  = config.max_inodes;
+ 	sbinfo->free_inodes = config.max_inodes - inodes;
+ 
+-	mpol_put(sbinfo->mpol);
+-	sbinfo->mpol        = config.mpol;	/* transfers initial ref */
++	/*
++	 * Preserve previous mempolicy unless mpol remount option was specified.
++	 */
++	if (config.mpol) {
++		mpol_put(sbinfo->mpol);
++		sbinfo->mpol = config.mpol;	/* transfers initial ref */
++	}
+ out:
+ 	spin_unlock(&sbinfo->stat_lock);
+ 	return error;
+diff --git a/net/bridge/br_stp_bpdu.c b/net/bridge/br_stp_bpdu.c
+index e16aade..718cbe8 100644
+--- a/net/bridge/br_stp_bpdu.c
++++ b/net/bridge/br_stp_bpdu.c
+@@ -16,6 +16,7 @@
+ #include <linux/etherdevice.h>
+ #include <linux/llc.h>
+ #include <linux/slab.h>
++#include <linux/pkt_sched.h>
+ #include <net/net_namespace.h>
+ #include <net/llc.h>
+ #include <net/llc_pdu.h>
+@@ -40,6 +41,7 @@ static void br_send_bpdu(struct net_bridge_port *p,
+ 
+ 	skb->dev = p->dev;
+ 	skb->protocol = htons(ETH_P_802_2);
++	skb->priority = TC_PRIO_CONTROL;
+ 
+ 	skb_reserve(skb, LLC_RESERVE);
+ 	memcpy(__skb_put(skb, length), data, length);
+diff --git a/net/core/datagram.c b/net/core/datagram.c
+index e4fbfd6..da7e0c8 100644
+--- a/net/core/datagram.c
++++ b/net/core/datagram.c
+@@ -187,7 +187,7 @@ struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned flags,
+ 		skb_queue_walk(queue, skb) {
+ 			*peeked = skb->peeked;
+ 			if (flags & MSG_PEEK) {
+-				if (*off >= skb->len) {
++				if (*off >= skb->len && skb->len) {
+ 					*off -= skb->len;
+ 					continue;
+ 				}
+diff --git a/net/core/sock_diag.c b/net/core/sock_diag.c
+index b9868e1..aa74be4 100644
+--- a/net/core/sock_diag.c
++++ b/net/core/sock_diag.c
+@@ -126,6 +126,9 @@ static int __sock_diag_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
+ 	if (nlmsg_len(nlh) < sizeof(*req))
+ 		return -EINVAL;
+ 
++	if (req->sdiag_family >= AF_MAX)
++		return -EINVAL;
++
+ 	hndl = sock_diag_lock_handler(req->sdiag_family);
+ 	if (hndl == NULL)
+ 		err = -ENOENT;
+diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
+index 10e3751..78ec298 100644
+--- a/net/ipv4/af_inet.c
++++ b/net/ipv4/af_inet.c
+@@ -227,8 +227,12 @@ EXPORT_SYMBOL(inet_listen);
+ u32 inet_ehash_secret __read_mostly;
+ EXPORT_SYMBOL(inet_ehash_secret);
+ 
++u32 ipv6_hash_secret __read_mostly;
++EXPORT_SYMBOL(ipv6_hash_secret);
++
+ /*
+- * inet_ehash_secret must be set exactly once
++ * inet_ehash_secret must be set exactly once, and to a non nul value
++ * ipv6_hash_secret must be set exactly once.
+  */
+ void build_ehash_secret(void)
+ {
+@@ -238,7 +242,8 @@ void build_ehash_secret(void)
+ 		get_random_bytes(&rnd, sizeof(rnd));
+ 	} while (rnd == 0);
+ 
+-	cmpxchg(&inet_ehash_secret, 0, rnd);
++	if (cmpxchg(&inet_ehash_secret, 0, rnd) == 0)
++		get_random_bytes(&ipv6_hash_secret, sizeof(ipv6_hash_secret));
+ }
+ EXPORT_SYMBOL(build_ehash_secret);
+ 
+diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
+index 50009c7..c234bda 100644
+--- a/net/ipv4/ping.c
++++ b/net/ipv4/ping.c
+@@ -321,8 +321,8 @@ void ping_err(struct sk_buff *skb, u32 info)
+ 	struct iphdr *iph = (struct iphdr *)skb->data;
+ 	struct icmphdr *icmph = (struct icmphdr *)(skb->data+(iph->ihl<<2));
+ 	struct inet_sock *inet_sock;
+-	int type = icmph->type;
+-	int code = icmph->code;
++	int type = icmp_hdr(skb)->type;
++	int code = icmp_hdr(skb)->code;
+ 	struct net *net = dev_net(skb->dev);
+ 	struct sock *sk;
+ 	int harderr;
+diff --git a/sound/pci/ali5451/ali5451.c b/sound/pci/ali5451/ali5451.c
+index bdd6164..131fd1f 100644
+--- a/sound/pci/ali5451/ali5451.c
++++ b/sound/pci/ali5451/ali5451.c
+@@ -1435,7 +1435,7 @@ static snd_pcm_uframes_t snd_ali_pointer(struct snd_pcm_substream *substream)
+ 
+ 	spin_lock(&codec->reg_lock);
+ 	if (!pvoice->running) {
+-		spin_unlock_irq(&codec->reg_lock);
++		spin_unlock(&codec->reg_lock);
+ 		return 0;
+ 	}
+ 	outb(pvoice->number, ALI_REG(codec, ALI_GC_CIR));
+diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
+index d1b805a..02a6e3f 100644
+--- a/sound/pci/hda/patch_hdmi.c
++++ b/sound/pci/hda/patch_hdmi.c
+@@ -924,8 +924,12 @@ static int hdmi_pcm_open(struct hda_pcm_stream *hinfo,
+ 	if (!static_hdmi_pcm && eld->eld_valid) {
+ 		snd_hdmi_eld_update_pcm_info(eld, hinfo);
+ 		if (hinfo->channels_min > hinfo->channels_max ||
+-		    !hinfo->rates || !hinfo->formats)
++		    !hinfo->rates || !hinfo->formats) {
++			per_cvt->assigned = 0;
++			hinfo->nid = 0;
++			snd_hda_spdif_ctls_unassign(codec, pin_idx);
+ 			return -ENODEV;
++		}
+ 	}
+ 
+ 	/* Store the updated parameters */
+@@ -989,6 +993,7 @@ static void hdmi_present_sense(struct hdmi_spec_per_pin *per_pin, int repoll)
+ 		"HDMI status: Codec=%d Pin=%d Presence_Detect=%d ELD_Valid=%d\n",
+ 		codec->addr, pin_nid, eld->monitor_present, eld_valid);
+ 
++	eld->eld_valid = false;
+ 	if (eld_valid) {
+ 		if (!snd_hdmi_get_eld(eld, codec, pin_nid))
+ 			snd_hdmi_show_eld(eld);
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index f7f8776..adb97d6 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -5440,6 +5440,7 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x1043, 0x835f, "Asus Eee 1601", ALC888_FIXUP_EEE1601),
+ 	SND_PCI_QUIRK(0x104d, 0x9047, "Sony Vaio TT", ALC889_FIXUP_VAIO_TT),
+ 	SND_PCI_QUIRK(0x104d, 0x905a, "Sony Vaio Z", ALC882_FIXUP_NO_PRIMARY_HP),
++	SND_PCI_QUIRK(0x104d, 0x9043, "Sony Vaio VGC-LN51JGB", ALC882_FIXUP_NO_PRIMARY_HP),
+ 
+ 	/* All Apple entries are in codec SSIDs */
+ 	SND_PCI_QUIRK(0x106b, 0x00a0, "MacBookPro 3,1", ALC889_FIXUP_MBP_VREF),
+diff --git a/sound/pci/rme32.c b/sound/pci/rme32.c
+index b4819d5..64da910 100644
+--- a/sound/pci/rme32.c
++++ b/sound/pci/rme32.c
+@@ -1017,7 +1017,7 @@ static int snd_rme32_capture_close(struct snd_pcm_substream *substream)
+ 	spin_lock_irq(&rme32->lock);
+ 	rme32->capture_substream = NULL;
+ 	rme32->capture_periodsize = 0;
+-	spin_unlock(&rme32->lock);
++	spin_unlock_irq(&rme32->lock);
+ 	return 0;
+ }
+ 
+diff --git a/sound/soc/codecs/wm2200.c b/sound/soc/codecs/wm2200.c
+index 8e0cf14..9932aac 100644
+--- a/sound/soc/codecs/wm2200.c
++++ b/sound/soc/codecs/wm2200.c
+@@ -990,9 +990,9 @@ SOC_DOUBLE_R_TLV("IN3 Volume", WM2200_IN3L_CONTROL, WM2200_IN3R_CONTROL,
+ 
+ SOC_DOUBLE_R("IN1 Digital Switch", WM2200_ADC_DIGITAL_VOLUME_1L,
+ 	     WM2200_ADC_DIGITAL_VOLUME_1R, WM2200_IN1L_MUTE_SHIFT, 1, 1),
+-SOC_DOUBLE_R("IN2 Digital Switch", WM2200_ADC_DIGITAL_VOLUME_1L,
++SOC_DOUBLE_R("IN2 Digital Switch", WM2200_ADC_DIGITAL_VOLUME_2L,
+ 	     WM2200_ADC_DIGITAL_VOLUME_2R, WM2200_IN2L_MUTE_SHIFT, 1, 1),
+-SOC_DOUBLE_R("IN3 Digital Switch", WM2200_ADC_DIGITAL_VOLUME_1L,
++SOC_DOUBLE_R("IN3 Digital Switch", WM2200_ADC_DIGITAL_VOLUME_3L,
+ 	     WM2200_ADC_DIGITAL_VOLUME_3R, WM2200_IN3L_MUTE_SHIFT, 1, 1),
+ 
+ SOC_DOUBLE_R_TLV("IN1 Digital Volume", WM2200_ADC_DIGITAL_VOLUME_1L,
+diff --git a/sound/usb/quirks-table.h b/sound/usb/quirks-table.h
+index 63128cd..fa4c2f7 100644
+--- a/sound/usb/quirks-table.h
++++ b/sound/usb/quirks-table.h
+@@ -1658,7 +1658,7 @@ YAMAHA_DEVICE(0x7010, "UB99"),
+ 	.driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) {
+ 		/* .vendor_name = "Roland", */
+ 		/* .product_name = "A-PRO", */
+-		.ifnum = 1,
++		.ifnum = 0,
+ 		.type = QUIRK_MIDI_FIXED_ENDPOINT,
+ 		.data = & (const struct snd_usb_midi_endpoint_info) {
+ 			.out_cables = 0x0003,



^ permalink raw reply	[flat|nested] only message in thread

only message in thread, other threads:[~2013-02-28 19:14 UTC | newest]

Thread overview: (only message) (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2013-02-28 19:14 [gentoo-commits] linux-patches r2291 - in genpatches-2.6/trunk: 3.0 3.2 3.4 Tom Wijsman (tomwij)

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox