public inbox for gentoo-commits@lists.gentoo.org
 help / color / mirror / Atom feed
* [gentoo-commits] proj/hardened-patchset:master commit in: 3.2.2/
@ 2012-01-27 11:52 Anthony G. Basile
  0 siblings, 0 replies; only message in thread
From: Anthony G. Basile @ 2012-01-27 11:52 UTC (permalink / raw
  To: gentoo-commits

commit:     84c88d2374b2eb7906db747ce93e6046d0c7d644
Author:     Anthony G. Basile <blueness <AT> gentoo <DOT> org>
AuthorDate: Fri Jan 27 11:52:36 2012 +0000
Commit:     Anthony G. Basile <blueness <AT> gentoo <DOT> org>
CommitDate: Fri Jan 27 11:52:36 2012 +0000
URL:        http://git.overlays.gentoo.org/gitweb/?p=proj/hardened-patchset.git;a=commit;h=84c88d23

Added needed patch to bump to 3.2.2

---
 3.2.2/0000_README            |    4 +
 3.2.2/1001_linux-3.2.2.patch | 6552 ++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 6556 insertions(+), 0 deletions(-)

diff --git a/3.2.2/0000_README b/3.2.2/0000_README
index ab46037..742124c 100644
--- a/3.2.2/0000_README
+++ b/3.2.2/0000_README
@@ -2,6 +2,10 @@ README
 -----------------------------------------------------------------------------
 Individual Patch Descriptions:
 -----------------------------------------------------------------------------
+Patch:	1001_linux-3.2.2.patch
+From:	http://www.kernel.org
+Desc:	Linux 3.2.2
+
 Patch:	4420_grsecurity-2.2.2-3.2.2-201201252117.patch
 From:	http://www.grsecurity.net
 Desc:	hardened-sources base patch from upstream grsecurity

diff --git a/3.2.2/1001_linux-3.2.2.patch b/3.2.2/1001_linux-3.2.2.patch
new file mode 100644
index 0000000..ec16cce
--- /dev/null
+++ b/3.2.2/1001_linux-3.2.2.patch
@@ -0,0 +1,6552 @@
+diff --git a/Makefile b/Makefile
+index c5edffa..2f684da 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 3
+ PATCHLEVEL = 2
+-SUBLEVEL = 1
++SUBLEVEL = 2
+ EXTRAVERSION =
+ NAME = Saber-toothed Squirrel
+ 
+diff --git a/arch/ia64/kernel/acpi.c b/arch/ia64/kernel/acpi.c
+index bfb4d01..5207035 100644
+--- a/arch/ia64/kernel/acpi.c
++++ b/arch/ia64/kernel/acpi.c
+@@ -429,22 +429,24 @@ static u32 __devinitdata pxm_flag[PXM_FLAG_LEN];
+ static struct acpi_table_slit __initdata *slit_table;
+ cpumask_t early_cpu_possible_map = CPU_MASK_NONE;
+ 
+-static int get_processor_proximity_domain(struct acpi_srat_cpu_affinity *pa)
++static int __init
++get_processor_proximity_domain(struct acpi_srat_cpu_affinity *pa)
+ {
+ 	int pxm;
+ 
+ 	pxm = pa->proximity_domain_lo;
+-	if (ia64_platform_is("sn2"))
++	if (ia64_platform_is("sn2") || acpi_srat_revision >= 2)
+ 		pxm += pa->proximity_domain_hi[0] << 8;
+ 	return pxm;
+ }
+ 
+-static int get_memory_proximity_domain(struct acpi_srat_mem_affinity *ma)
++static int __init
++get_memory_proximity_domain(struct acpi_srat_mem_affinity *ma)
+ {
+ 	int pxm;
+ 
+ 	pxm = ma->proximity_domain;
+-	if (!ia64_platform_is("sn2"))
++	if (!ia64_platform_is("sn2") && acpi_srat_revision <= 1)
+ 		pxm &= 0xff;
+ 
+ 	return pxm;
+diff --git a/arch/score/kernel/entry.S b/arch/score/kernel/entry.S
+index 577abba..83bb960 100644
+--- a/arch/score/kernel/entry.S
++++ b/arch/score/kernel/entry.S
+@@ -408,7 +408,7 @@ ENTRY(handle_sys)
+ 	sw	r9, [r0, PT_EPC]
+ 
+ 	cmpi.c	r27, __NR_syscalls 	# check syscall number
+-	bgtu	illegal_syscall
++	bgeu	illegal_syscall
+ 
+ 	slli	r8, r27, 2		# get syscall routine
+ 	la	r11, sys_call_table
+diff --git a/arch/x86/include/asm/amd_nb.h b/arch/x86/include/asm/amd_nb.h
+index 8e41071..49ad773 100644
+--- a/arch/x86/include/asm/amd_nb.h
++++ b/arch/x86/include/asm/amd_nb.h
+@@ -1,6 +1,7 @@
+ #ifndef _ASM_X86_AMD_NB_H
+ #define _ASM_X86_AMD_NB_H
+ 
++#include <linux/ioport.h>
+ #include <linux/pci.h>
+ 
+ struct amd_nb_bus_dev_range {
+@@ -13,6 +14,7 @@ extern const struct pci_device_id amd_nb_misc_ids[];
+ extern const struct amd_nb_bus_dev_range amd_nb_bus_dev_ranges[];
+ 
+ extern bool early_is_amd_nb(u32 value);
++extern struct resource *amd_get_mmconfig_range(struct resource *res);
+ extern int amd_cache_northbridges(void);
+ extern void amd_flush_garts(void);
+ extern int amd_numa_init(void);
+diff --git a/arch/x86/include/asm/uv/uv_bau.h b/arch/x86/include/asm/uv/uv_bau.h
+index 8e862aa..1b82f7e 100644
+--- a/arch/x86/include/asm/uv/uv_bau.h
++++ b/arch/x86/include/asm/uv/uv_bau.h
+@@ -65,7 +65,7 @@
+  * UV2: Bit 19 selects between
+  *  (0): 10 microsecond timebase and
+  *  (1): 80 microseconds
+- *  we're using 655us, similar to UV1: 65 units of 10us
++ *  we're using 560us, similar to UV1: 65 units of 10us
+  */
+ #define UV1_INTD_SOFT_ACK_TIMEOUT_PERIOD (9UL)
+ #define UV2_INTD_SOFT_ACK_TIMEOUT_PERIOD (15UL)
+@@ -167,6 +167,7 @@
+ #define FLUSH_RETRY_TIMEOUT		2
+ #define FLUSH_GIVEUP			3
+ #define FLUSH_COMPLETE			4
++#define FLUSH_RETRY_BUSYBUG		5
+ 
+ /*
+  * tuning the action when the numalink network is extremely delayed
+@@ -235,10 +236,10 @@ struct bau_msg_payload {
+ 
+ 
+ /*
+- * Message header:  16 bytes (128 bits) (bytes 0x30-0x3f of descriptor)
++ * UV1 Message header:  16 bytes (128 bits) (bytes 0x30-0x3f of descriptor)
+  * see table 4.2.3.0.1 in broacast_assist spec.
+  */
+-struct bau_msg_header {
++struct uv1_bau_msg_header {
+ 	unsigned int	dest_subnodeid:6;	/* must be 0x10, for the LB */
+ 	/* bits 5:0 */
+ 	unsigned int	base_dest_nasid:15;	/* nasid of the first bit */
+@@ -318,19 +319,87 @@ struct bau_msg_header {
+ };
+ 
+ /*
++ * UV2 Message header:  16 bytes (128 bits) (bytes 0x30-0x3f of descriptor)
++ * see figure 9-2 of harp_sys.pdf
++ */
++struct uv2_bau_msg_header {
++	unsigned int	base_dest_nasid:15;	/* nasid of the first bit */
++	/* bits 14:0 */				/* in uvhub map */
++	unsigned int	dest_subnodeid:5;	/* must be 0x10, for the LB */
++	/* bits 19:15 */
++	unsigned int	rsvd_1:1;		/* must be zero */
++	/* bit 20 */
++	/* Address bits 59:21 */
++	/* bits 25:2 of address (44:21) are payload */
++	/* these next 24 bits become bytes 12-14 of msg */
++	/* bits 28:21 land in byte 12 */
++	unsigned int	replied_to:1;		/* sent as 0 by the source to
++						   byte 12 */
++	/* bit 21 */
++	unsigned int	msg_type:3;		/* software type of the
++						   message */
++	/* bits 24:22 */
++	unsigned int	canceled:1;		/* message canceled, resource
++						   is to be freed*/
++	/* bit 25 */
++	unsigned int	payload_1:3;		/* not currently used */
++	/* bits 28:26 */
++
++	/* bits 36:29 land in byte 13 */
++	unsigned int	payload_2a:3;		/* not currently used */
++	unsigned int	payload_2b:5;		/* not currently used */
++	/* bits 36:29 */
++
++	/* bits 44:37 land in byte 14 */
++	unsigned int	payload_3:8;		/* not currently used */
++	/* bits 44:37 */
++
++	unsigned int	rsvd_2:7;		/* reserved */
++	/* bits 51:45 */
++	unsigned int	swack_flag:1;		/* software acknowledge flag */
++	/* bit 52 */
++	unsigned int	rsvd_3a:3;		/* must be zero */
++	unsigned int	rsvd_3b:8;		/* must be zero */
++	unsigned int	rsvd_3c:8;		/* must be zero */
++	unsigned int	rsvd_3d:3;		/* must be zero */
++	/* bits 74:53 */
++	unsigned int	fairness:3;		/* usually zero */
++	/* bits 77:75 */
++
++	unsigned int	sequence:16;		/* message sequence number */
++	/* bits 93:78  Suppl_A  */
++	unsigned int	chaining:1;		/* next descriptor is part of
++						   this activation*/
++	/* bit 94 */
++	unsigned int	multilevel:1;		/* multi-level multicast
++						   format */
++	/* bit 95 */
++	unsigned int	rsvd_4:24;		/* ordered / source node /
++						   source subnode / aging
++						   must be zero */
++	/* bits 119:96 */
++	unsigned int	command:8;		/* message type */
++	/* bits 127:120 */
++};
++
++/*
+  * The activation descriptor:
+  * The format of the message to send, plus all accompanying control
+  * Should be 64 bytes
+  */
+ struct bau_desc {
+-	struct pnmask			distribution;
++	struct pnmask				distribution;
+ 	/*
+ 	 * message template, consisting of header and payload:
+ 	 */
+-	struct bau_msg_header		header;
+-	struct bau_msg_payload		payload;
++	union bau_msg_header {
++		struct uv1_bau_msg_header	uv1_hdr;
++		struct uv2_bau_msg_header	uv2_hdr;
++	} header;
++
++	struct bau_msg_payload			payload;
+ };
+-/*
++/* UV1:
+  *   -payload--    ---------header------
+  *   bytes 0-11    bits 41-56  bits 58-81
+  *       A           B  (2)      C (3)
+@@ -340,6 +409,16 @@ struct bau_desc {
+  *   bytes 0-11  bytes 12-14  bytes 16-17  (byte 15 filled in by hw as vector)
+  *   ------------payload queue-----------
+  */
++/* UV2:
++ *   -payload--    ---------header------
++ *   bytes 0-11    bits 70-78  bits 21-44
++ *       A           B  (2)      C (3)
++ *
++ *            A/B/C are moved to:
++ *       A            C          B
++ *   bytes 0-11  bytes 12-14  bytes 16-17  (byte 15 filled in by hw as vector)
++ *   ------------payload queue-----------
++ */
+ 
+ /*
+  * The payload queue on the destination side is an array of these.
+@@ -385,7 +464,6 @@ struct bau_pq_entry {
+ struct msg_desc {
+ 	struct bau_pq_entry	*msg;
+ 	int			msg_slot;
+-	int			swack_slot;
+ 	struct bau_pq_entry	*queue_first;
+ 	struct bau_pq_entry	*queue_last;
+ };
+@@ -439,6 +517,9 @@ struct ptc_stats {
+ 	unsigned long	s_retry_messages;	/* retry broadcasts */
+ 	unsigned long	s_bau_reenabled;	/* for bau enable/disable */
+ 	unsigned long	s_bau_disabled;		/* for bau enable/disable */
++	unsigned long	s_uv2_wars;		/* uv2 workaround, perm. busy */
++	unsigned long	s_uv2_wars_hw;		/* uv2 workaround, hiwater */
++	unsigned long	s_uv2_war_waits;	/* uv2 workaround, long waits */
+ 	/* destination statistics */
+ 	unsigned long	d_alltlb;		/* times all tlb's on this
+ 						   cpu were flushed */
+@@ -511,9 +592,12 @@ struct bau_control {
+ 	short			osnode;
+ 	short			uvhub_cpu;
+ 	short			uvhub;
++	short			uvhub_version;
+ 	short			cpus_in_socket;
+ 	short			cpus_in_uvhub;
+ 	short			partition_base_pnode;
++	short			using_desc; /* an index, like uvhub_cpu */
++	unsigned int		inuse_map;
+ 	unsigned short		message_number;
+ 	unsigned short		uvhub_quiesce;
+ 	short			socket_acknowledge_count[DEST_Q_SIZE];
+@@ -531,6 +615,7 @@ struct bau_control {
+ 	int			cong_response_us;
+ 	int			cong_reps;
+ 	int			cong_period;
++	unsigned long		clocks_per_100_usec;
+ 	cycles_t		period_time;
+ 	long			period_requests;
+ 	struct hub_and_pnode	*thp;
+@@ -591,6 +676,11 @@ static inline void write_mmr_sw_ack(unsigned long mr)
+ 	uv_write_local_mmr(UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_ALIAS, mr);
+ }
+ 
++static inline void write_gmmr_sw_ack(int pnode, unsigned long mr)
++{
++	write_gmmr(pnode, UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_ALIAS, mr);
++}
++
+ static inline unsigned long read_mmr_sw_ack(void)
+ {
+ 	return read_lmmr(UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE);
+diff --git a/arch/x86/kernel/amd_nb.c b/arch/x86/kernel/amd_nb.c
+index 4c39baa..bae1efe 100644
+--- a/arch/x86/kernel/amd_nb.c
++++ b/arch/x86/kernel/amd_nb.c
+@@ -119,6 +119,37 @@ bool __init early_is_amd_nb(u32 device)
+ 	return false;
+ }
+ 
++struct resource *amd_get_mmconfig_range(struct resource *res)
++{
++	u32 address;
++	u64 base, msr;
++	unsigned segn_busn_bits;
++
++	if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD)
++		return NULL;
++
++	/* assume all cpus from fam10h have mmconfig */
++        if (boot_cpu_data.x86 < 0x10)
++		return NULL;
++
++	address = MSR_FAM10H_MMIO_CONF_BASE;
++	rdmsrl(address, msr);
++
++	/* mmconfig is not enabled */
++	if (!(msr & FAM10H_MMIO_CONF_ENABLE))
++		return NULL;
++
++	base = msr & (FAM10H_MMIO_CONF_BASE_MASK<<FAM10H_MMIO_CONF_BASE_SHIFT);
++
++	segn_busn_bits = (msr >> FAM10H_MMIO_CONF_BUSRANGE_SHIFT) &
++			 FAM10H_MMIO_CONF_BUSRANGE_MASK;
++
++	res->flags = IORESOURCE_MEM;
++	res->start = base;
++	res->end = base + (1ULL<<(segn_busn_bits + 20)) - 1;
++	return res;
++}
++
+ int amd_get_subcaches(int cpu)
+ {
+ 	struct pci_dev *link = node_to_amd_nb(amd_get_nb_id(cpu))->link;
+diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c
+index 9d59bba..79b05b8 100644
+--- a/arch/x86/kernel/apic/x2apic_uv_x.c
++++ b/arch/x86/kernel/apic/x2apic_uv_x.c
+@@ -769,7 +769,12 @@ void __init uv_system_init(void)
+ 	for(i = 0; i < UVH_NODE_PRESENT_TABLE_DEPTH; i++)
+ 		uv_possible_blades +=
+ 		  hweight64(uv_read_local_mmr( UVH_NODE_PRESENT_TABLE + i * 8));
+-	printk(KERN_DEBUG "UV: Found %d blades\n", uv_num_possible_blades());
++
++	/* uv_num_possible_blades() is really the hub count */
++	printk(KERN_INFO "UV: Found %d blades, %d hubs\n",
++			is_uv1_hub() ? uv_num_possible_blades() :
++			(uv_num_possible_blades() + 1) / 2,
++			uv_num_possible_blades());
+ 
+ 	bytes = sizeof(struct uv_blade_info) * uv_num_possible_blades();
+ 	uv_blade_info = kzalloc(bytes, GFP_KERNEL);
+diff --git a/arch/x86/mm/mmap.c b/arch/x86/mm/mmap.c
+index 4b5ba85..845df68 100644
+--- a/arch/x86/mm/mmap.c
++++ b/arch/x86/mm/mmap.c
+@@ -75,9 +75,9 @@ static unsigned long mmap_rnd(void)
+ 	*/
+ 	if (current->flags & PF_RANDOMIZE) {
+ 		if (mmap_is_ia32())
+-			rnd = (long)get_random_int() % (1<<8);
++			rnd = get_random_int() % (1<<8);
+ 		else
+-			rnd = (long)(get_random_int() % (1<<28));
++			rnd = get_random_int() % (1<<28);
+ 	}
+ 	return rnd << PAGE_SHIFT;
+ }
+diff --git a/arch/x86/mm/srat.c b/arch/x86/mm/srat.c
+index 81dbfde..7efd0c6 100644
+--- a/arch/x86/mm/srat.c
++++ b/arch/x86/mm/srat.c
+@@ -104,6 +104,8 @@ acpi_numa_processor_affinity_init(struct acpi_srat_cpu_affinity *pa)
+ 	if ((pa->flags & ACPI_SRAT_CPU_ENABLED) == 0)
+ 		return;
+ 	pxm = pa->proximity_domain_lo;
++	if (acpi_srat_revision >= 2)
++		pxm |= *((unsigned int*)pa->proximity_domain_hi) << 8;
+ 	node = setup_node(pxm);
+ 	if (node < 0) {
+ 		printk(KERN_ERR "SRAT: Too many proximity domains %x\n", pxm);
+@@ -155,6 +157,8 @@ acpi_numa_memory_affinity_init(struct acpi_srat_mem_affinity *ma)
+ 	start = ma->base_address;
+ 	end = start + ma->length;
+ 	pxm = ma->proximity_domain;
++	if (acpi_srat_revision <= 1)
++		pxm &= 0xff;
+ 	node = setup_node(pxm);
+ 	if (node < 0) {
+ 		printk(KERN_ERR "SRAT: Too many proximity domains.\n");
+diff --git a/arch/x86/pci/Makefile b/arch/x86/pci/Makefile
+index 6b8759f..d24d3da 100644
+--- a/arch/x86/pci/Makefile
++++ b/arch/x86/pci/Makefile
+@@ -18,8 +18,9 @@ obj-$(CONFIG_X86_NUMAQ)		+= numaq_32.o
+ obj-$(CONFIG_X86_MRST)		+= mrst.o
+ 
+ obj-y				+= common.o early.o
+-obj-y				+= amd_bus.o bus_numa.o
++obj-y				+= bus_numa.o
+ 
++obj-$(CONFIG_AMD_NB)		+= amd_bus.o
+ obj-$(CONFIG_PCI_CNB20LE_QUIRK)	+= broadcom_bus.o
+ 
+ ifeq ($(CONFIG_PCI_DEBUG),y)
+diff --git a/arch/x86/pci/acpi.c b/arch/x86/pci/acpi.c
+index 404f21a..f8348ab 100644
+--- a/arch/x86/pci/acpi.c
++++ b/arch/x86/pci/acpi.c
+@@ -149,7 +149,7 @@ setup_resource(struct acpi_resource *acpi_res, void *data)
+ 	struct acpi_resource_address64 addr;
+ 	acpi_status status;
+ 	unsigned long flags;
+-	u64 start, end;
++	u64 start, orig_end, end;
+ 
+ 	status = resource_to_addr(acpi_res, &addr);
+ 	if (!ACPI_SUCCESS(status))
+@@ -165,7 +165,21 @@ setup_resource(struct acpi_resource *acpi_res, void *data)
+ 		return AE_OK;
+ 
+ 	start = addr.minimum + addr.translation_offset;
+-	end = addr.maximum + addr.translation_offset;
++	orig_end = end = addr.maximum + addr.translation_offset;
++
++	/* Exclude non-addressable range or non-addressable portion of range */
++	end = min(end, (u64)iomem_resource.end);
++	if (end <= start) {
++		dev_info(&info->bridge->dev,
++			"host bridge window [%#llx-%#llx] "
++			"(ignored, not CPU addressable)\n", start, orig_end);
++		return AE_OK;
++	} else if (orig_end != end) {
++		dev_info(&info->bridge->dev,
++			"host bridge window [%#llx-%#llx] "
++			"([%#llx-%#llx] ignored, not CPU addressable)\n",
++			start, orig_end, end + 1, orig_end);
++	}
+ 
+ 	res = &info->res[info->res_num];
+ 	res->name = info->name;
+diff --git a/arch/x86/pci/amd_bus.c b/arch/x86/pci/amd_bus.c
+index 026e493..385a940 100644
+--- a/arch/x86/pci/amd_bus.c
++++ b/arch/x86/pci/amd_bus.c
+@@ -30,34 +30,6 @@ static struct pci_hostbridge_probe pci_probes[] __initdata = {
+ 	{ 0, 0x18, PCI_VENDOR_ID_AMD, 0x1300 },
+ };
+ 
+-static u64 __initdata fam10h_mmconf_start;
+-static u64 __initdata fam10h_mmconf_end;
+-static void __init get_pci_mmcfg_amd_fam10h_range(void)
+-{
+-	u32 address;
+-	u64 base, msr;
+-	unsigned segn_busn_bits;
+-
+-	/* assume all cpus from fam10h have mmconf */
+-        if (boot_cpu_data.x86 < 0x10)
+-		return;
+-
+-	address = MSR_FAM10H_MMIO_CONF_BASE;
+-	rdmsrl(address, msr);
+-
+-	/* mmconfig is not enable */
+-	if (!(msr & FAM10H_MMIO_CONF_ENABLE))
+-		return;
+-
+-	base = msr & (FAM10H_MMIO_CONF_BASE_MASK<<FAM10H_MMIO_CONF_BASE_SHIFT);
+-
+-	segn_busn_bits = (msr >> FAM10H_MMIO_CONF_BUSRANGE_SHIFT) &
+-			 FAM10H_MMIO_CONF_BUSRANGE_MASK;
+-
+-	fam10h_mmconf_start = base;
+-	fam10h_mmconf_end = base + (1ULL<<(segn_busn_bits + 20)) - 1;
+-}
+-
+ #define RANGE_NUM 16
+ 
+ /**
+@@ -85,6 +57,9 @@ static int __init early_fill_mp_bus_info(void)
+ 	u64 val;
+ 	u32 address;
+ 	bool found;
++	struct resource fam10h_mmconf_res, *fam10h_mmconf;
++	u64 fam10h_mmconf_start;
++	u64 fam10h_mmconf_end;
+ 
+ 	if (!early_pci_allowed())
+ 		return -1;
+@@ -211,12 +186,17 @@ static int __init early_fill_mp_bus_info(void)
+ 		subtract_range(range, RANGE_NUM, 0, end);
+ 
+ 	/* get mmconfig */
+-	get_pci_mmcfg_amd_fam10h_range();
++	fam10h_mmconf = amd_get_mmconfig_range(&fam10h_mmconf_res);
+ 	/* need to take out mmconf range */
+-	if (fam10h_mmconf_end) {
+-		printk(KERN_DEBUG "Fam 10h mmconf [%llx, %llx]\n", fam10h_mmconf_start, fam10h_mmconf_end);
++	if (fam10h_mmconf) {
++		printk(KERN_DEBUG "Fam 10h mmconf %pR\n", fam10h_mmconf);
++		fam10h_mmconf_start = fam10h_mmconf->start;
++		fam10h_mmconf_end = fam10h_mmconf->end;
+ 		subtract_range(range, RANGE_NUM, fam10h_mmconf_start,
+ 				 fam10h_mmconf_end + 1);
++	} else {
++		fam10h_mmconf_start = 0;
++		fam10h_mmconf_end = 0;
+ 	}
+ 
+ 	/* mmio resource */
+diff --git a/arch/x86/platform/uv/tlb_uv.c b/arch/x86/platform/uv/tlb_uv.c
+index 5b55219..9010ca7 100644
+--- a/arch/x86/platform/uv/tlb_uv.c
++++ b/arch/x86/platform/uv/tlb_uv.c
+@@ -157,13 +157,14 @@ static int __init uvhub_to_first_apicid(int uvhub)
+  * clear of the Timeout bit (as well) will free the resource. No reply will
+  * be sent (the hardware will only do one reply per message).
+  */
+-static void reply_to_message(struct msg_desc *mdp, struct bau_control *bcp)
++static void reply_to_message(struct msg_desc *mdp, struct bau_control *bcp,
++						int do_acknowledge)
+ {
+ 	unsigned long dw;
+ 	struct bau_pq_entry *msg;
+ 
+ 	msg = mdp->msg;
+-	if (!msg->canceled) {
++	if (!msg->canceled && do_acknowledge) {
+ 		dw = (msg->swack_vec << UV_SW_ACK_NPENDING) | msg->swack_vec;
+ 		write_mmr_sw_ack(dw);
+ 	}
+@@ -212,8 +213,8 @@ static void bau_process_retry_msg(struct msg_desc *mdp,
+ 			if (mmr & (msg_res << UV_SW_ACK_NPENDING)) {
+ 				unsigned long mr;
+ 				/*
+-				 * is the resource timed out?
+-				 * make everyone ignore the cancelled message.
++				 * Is the resource timed out?
++				 * Make everyone ignore the cancelled message.
+ 				 */
+ 				msg2->canceled = 1;
+ 				stat->d_canceled++;
+@@ -231,8 +232,8 @@ static void bau_process_retry_msg(struct msg_desc *mdp,
+  * Do all the things a cpu should do for a TLB shootdown message.
+  * Other cpu's may come here at the same time for this message.
+  */
+-static void bau_process_message(struct msg_desc *mdp,
+-					struct bau_control *bcp)
++static void bau_process_message(struct msg_desc *mdp, struct bau_control *bcp,
++						int do_acknowledge)
+ {
+ 	short socket_ack_count = 0;
+ 	short *sp;
+@@ -284,8 +285,9 @@ static void bau_process_message(struct msg_desc *mdp,
+ 		if (msg_ack_count == bcp->cpus_in_uvhub) {
+ 			/*
+ 			 * All cpus in uvhub saw it; reply
++			 * (unless we are in the UV2 workaround)
+ 			 */
+-			reply_to_message(mdp, bcp);
++			reply_to_message(mdp, bcp, do_acknowledge);
+ 		}
+ 	}
+ 
+@@ -491,27 +493,138 @@ static int uv1_wait_completion(struct bau_desc *bau_desc,
+ /*
+  * UV2 has an extra bit of status in the ACTIVATION_STATUS_2 register.
+  */
+-static unsigned long uv2_read_status(unsigned long offset, int rshft, int cpu)
++static unsigned long uv2_read_status(unsigned long offset, int rshft, int desc)
+ {
+ 	unsigned long descriptor_status;
+ 	unsigned long descriptor_status2;
+ 
+ 	descriptor_status = ((read_lmmr(offset) >> rshft) & UV_ACT_STATUS_MASK);
+-	descriptor_status2 = (read_mmr_uv2_status() >> cpu) & 0x1UL;
++	descriptor_status2 = (read_mmr_uv2_status() >> desc) & 0x1UL;
+ 	descriptor_status = (descriptor_status << 1) | descriptor_status2;
+ 	return descriptor_status;
+ }
+ 
++/*
++ * Return whether the status of the descriptor that is normally used for this
++ * cpu (the one indexed by its hub-relative cpu number) is busy.
++ * The status of the original 32 descriptors is always reflected in the 64
++ * bits of UVH_LB_BAU_SB_ACTIVATION_STATUS_0.
++ * The bit provided by the activation_status_2 register is irrelevant to
++ * the status if it is only being tested for busy or not busy.
++ */
++int normal_busy(struct bau_control *bcp)
++{
++	int cpu = bcp->uvhub_cpu;
++	int mmr_offset;
++	int right_shift;
++
++	mmr_offset = UVH_LB_BAU_SB_ACTIVATION_STATUS_0;
++	right_shift = cpu * UV_ACT_STATUS_SIZE;
++	return (((((read_lmmr(mmr_offset) >> right_shift) &
++				UV_ACT_STATUS_MASK)) << 1) == UV2H_DESC_BUSY);
++}
++
++/*
++ * Entered when a bau descriptor has gone into a permanent busy wait because
++ * of a hardware bug.
++ * Workaround the bug.
++ */
++int handle_uv2_busy(struct bau_control *bcp)
++{
++	int busy_one = bcp->using_desc;
++	int normal = bcp->uvhub_cpu;
++	int selected = -1;
++	int i;
++	unsigned long descriptor_status;
++	unsigned long status;
++	int mmr_offset;
++	struct bau_desc *bau_desc_old;
++	struct bau_desc *bau_desc_new;
++	struct bau_control *hmaster = bcp->uvhub_master;
++	struct ptc_stats *stat = bcp->statp;
++	cycles_t ttm;
++
++	stat->s_uv2_wars++;
++	spin_lock(&hmaster->uvhub_lock);
++	/* try for the original first */
++	if (busy_one != normal) {
++		if (!normal_busy(bcp))
++			selected = normal;
++	}
++	if (selected < 0) {
++		/* can't use the normal, select an alternate */
++		mmr_offset = UVH_LB_BAU_SB_ACTIVATION_STATUS_1;
++		descriptor_status = read_lmmr(mmr_offset);
++
++		/* scan available descriptors 32-63 */
++		for (i = 0; i < UV_CPUS_PER_AS; i++) {
++			if ((hmaster->inuse_map & (1 << i)) == 0) {
++				status = ((descriptor_status >>
++						(i * UV_ACT_STATUS_SIZE)) &
++						UV_ACT_STATUS_MASK) << 1;
++				if (status != UV2H_DESC_BUSY) {
++					selected = i + UV_CPUS_PER_AS;
++					break;
++				}
++			}
++		}
++	}
++
++	if (busy_one != normal)
++		/* mark the busy alternate as not in-use */
++		hmaster->inuse_map &= ~(1 << (busy_one - UV_CPUS_PER_AS));
++
++	if (selected >= 0) {
++		/* switch to the selected descriptor */
++		if (selected != normal) {
++			/* set the selected alternate as in-use */
++			hmaster->inuse_map |=
++					(1 << (selected - UV_CPUS_PER_AS));
++			if (selected > stat->s_uv2_wars_hw)
++				stat->s_uv2_wars_hw = selected;
++		}
++		bau_desc_old = bcp->descriptor_base;
++		bau_desc_old += (ITEMS_PER_DESC * busy_one);
++		bcp->using_desc = selected;
++		bau_desc_new = bcp->descriptor_base;
++		bau_desc_new += (ITEMS_PER_DESC * selected);
++		*bau_desc_new = *bau_desc_old;
++	} else {
++		/*
++		 * All are busy. Wait for the normal one for this cpu to
++		 * free up.
++		 */
++		stat->s_uv2_war_waits++;
++		spin_unlock(&hmaster->uvhub_lock);
++		ttm = get_cycles();
++		do {
++			cpu_relax();
++		} while (normal_busy(bcp));
++		spin_lock(&hmaster->uvhub_lock);
++		/* switch to the original descriptor */
++		bcp->using_desc = normal;
++		bau_desc_old = bcp->descriptor_base;
++		bau_desc_old += (ITEMS_PER_DESC * bcp->using_desc);
++		bcp->using_desc = (ITEMS_PER_DESC * normal);
++		bau_desc_new = bcp->descriptor_base;
++		bau_desc_new += (ITEMS_PER_DESC * normal);
++		*bau_desc_new = *bau_desc_old; /* copy the entire descriptor */
++	}
++	spin_unlock(&hmaster->uvhub_lock);
++	return FLUSH_RETRY_BUSYBUG;
++}
++
+ static int uv2_wait_completion(struct bau_desc *bau_desc,
+ 				unsigned long mmr_offset, int right_shift,
+ 				struct bau_control *bcp, long try)
+ {
+ 	unsigned long descriptor_stat;
+ 	cycles_t ttm;
+-	int cpu = bcp->uvhub_cpu;
++	int desc = bcp->using_desc;
++	long busy_reps = 0;
+ 	struct ptc_stats *stat = bcp->statp;
+ 
+-	descriptor_stat = uv2_read_status(mmr_offset, right_shift, cpu);
++	descriptor_stat = uv2_read_status(mmr_offset, right_shift, desc);
+ 
+ 	/* spin on the status MMR, waiting for it to go idle */
+ 	while (descriptor_stat != UV2H_DESC_IDLE) {
+@@ -542,12 +655,23 @@ static int uv2_wait_completion(struct bau_desc *bau_desc,
+ 			bcp->conseccompletes = 0;
+ 			return FLUSH_RETRY_TIMEOUT;
+ 		} else {
++			busy_reps++;
++			if (busy_reps > 1000000) {
++				/* not to hammer on the clock */
++				busy_reps = 0;
++				ttm = get_cycles();
++				if ((ttm - bcp->send_message) >
++					(bcp->clocks_per_100_usec)) {
++					return handle_uv2_busy(bcp);
++				}
++			}
+ 			/*
+ 			 * descriptor_stat is still BUSY
+ 			 */
+ 			cpu_relax();
+ 		}
+-		descriptor_stat = uv2_read_status(mmr_offset, right_shift, cpu);
++		descriptor_stat = uv2_read_status(mmr_offset, right_shift,
++									desc);
+ 	}
+ 	bcp->conseccompletes++;
+ 	return FLUSH_COMPLETE;
+@@ -563,17 +687,17 @@ static int wait_completion(struct bau_desc *bau_desc,
+ {
+ 	int right_shift;
+ 	unsigned long mmr_offset;
+-	int cpu = bcp->uvhub_cpu;
++	int desc = bcp->using_desc;
+ 
+-	if (cpu < UV_CPUS_PER_AS) {
++	if (desc < UV_CPUS_PER_AS) {
+ 		mmr_offset = UVH_LB_BAU_SB_ACTIVATION_STATUS_0;
+-		right_shift = cpu * UV_ACT_STATUS_SIZE;
++		right_shift = desc * UV_ACT_STATUS_SIZE;
+ 	} else {
+ 		mmr_offset = UVH_LB_BAU_SB_ACTIVATION_STATUS_1;
+-		right_shift = ((cpu - UV_CPUS_PER_AS) * UV_ACT_STATUS_SIZE);
++		right_shift = ((desc - UV_CPUS_PER_AS) * UV_ACT_STATUS_SIZE);
+ 	}
+ 
+-	if (is_uv1_hub())
++	if (bcp->uvhub_version == 1)
+ 		return uv1_wait_completion(bau_desc, mmr_offset, right_shift,
+ 								bcp, try);
+ 	else
+@@ -752,19 +876,22 @@ static void handle_cmplt(int completion_status, struct bau_desc *bau_desc,
+  * Returns 1 if it gives up entirely and the original cpu mask is to be
+  * returned to the kernel.
+  */
+-int uv_flush_send_and_wait(struct bau_desc *bau_desc,
+-			struct cpumask *flush_mask, struct bau_control *bcp)
++int uv_flush_send_and_wait(struct cpumask *flush_mask, struct bau_control *bcp)
+ {
+ 	int seq_number = 0;
+ 	int completion_stat = 0;
++	int uv1 = 0;
+ 	long try = 0;
+ 	unsigned long index;
+ 	cycles_t time1;
+ 	cycles_t time2;
+ 	struct ptc_stats *stat = bcp->statp;
+ 	struct bau_control *hmaster = bcp->uvhub_master;
++	struct uv1_bau_msg_header *uv1_hdr = NULL;
++	struct uv2_bau_msg_header *uv2_hdr = NULL;
++	struct bau_desc *bau_desc;
+ 
+-	if (is_uv1_hub())
++	if (bcp->uvhub_version == 1)
+ 		uv1_throttle(hmaster, stat);
+ 
+ 	while (hmaster->uvhub_quiesce)
+@@ -772,22 +899,39 @@ int uv_flush_send_and_wait(struct bau_desc *bau_desc,
+ 
+ 	time1 = get_cycles();
+ 	do {
+-		if (try == 0) {
+-			bau_desc->header.msg_type = MSG_REGULAR;
++		bau_desc = bcp->descriptor_base;
++		bau_desc += (ITEMS_PER_DESC * bcp->using_desc);
++		if (bcp->uvhub_version == 1) {
++			uv1 = 1;
++			uv1_hdr = &bau_desc->header.uv1_hdr;
++		} else
++			uv2_hdr = &bau_desc->header.uv2_hdr;
++		if ((try == 0) || (completion_stat == FLUSH_RETRY_BUSYBUG)) {
++			if (uv1)
++				uv1_hdr->msg_type = MSG_REGULAR;
++			else
++				uv2_hdr->msg_type = MSG_REGULAR;
+ 			seq_number = bcp->message_number++;
+ 		} else {
+-			bau_desc->header.msg_type = MSG_RETRY;
++			if (uv1)
++				uv1_hdr->msg_type = MSG_RETRY;
++			else
++				uv2_hdr->msg_type = MSG_RETRY;
+ 			stat->s_retry_messages++;
+ 		}
+ 
+-		bau_desc->header.sequence = seq_number;
+-		index = (1UL << AS_PUSH_SHIFT) | bcp->uvhub_cpu;
++		if (uv1)
++			uv1_hdr->sequence = seq_number;
++		else
++			uv2_hdr->sequence = seq_number;
++		index = (1UL << AS_PUSH_SHIFT) | bcp->using_desc;
+ 		bcp->send_message = get_cycles();
+ 
+ 		write_mmr_activation(index);
+ 
+ 		try++;
+ 		completion_stat = wait_completion(bau_desc, bcp, try);
++		/* UV2: wait_completion() may change the bcp->using_desc */
+ 
+ 		handle_cmplt(completion_stat, bau_desc, bcp, hmaster, stat);
+ 
+@@ -798,6 +942,7 @@ int uv_flush_send_and_wait(struct bau_desc *bau_desc,
+ 		}
+ 		cpu_relax();
+ 	} while ((completion_stat == FLUSH_RETRY_PLUGGED) ||
++		 (completion_stat == FLUSH_RETRY_BUSYBUG) ||
+ 		 (completion_stat == FLUSH_RETRY_TIMEOUT));
+ 
+ 	time2 = get_cycles();
+@@ -812,6 +957,7 @@ int uv_flush_send_and_wait(struct bau_desc *bau_desc,
+ 	record_send_stats(time1, time2, bcp, stat, completion_stat, try);
+ 
+ 	if (completion_stat == FLUSH_GIVEUP)
++		/* FLUSH_GIVEUP will fall back to using IPI's for tlb flush */
+ 		return 1;
+ 	return 0;
+ }
+@@ -967,7 +1113,7 @@ const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask,
+ 		stat->s_ntargself++;
+ 
+ 	bau_desc = bcp->descriptor_base;
+-	bau_desc += ITEMS_PER_DESC * bcp->uvhub_cpu;
++	bau_desc += (ITEMS_PER_DESC * bcp->using_desc);
+ 	bau_uvhubs_clear(&bau_desc->distribution, UV_DISTRIBUTION_SIZE);
+ 	if (set_distrib_bits(flush_mask, bcp, bau_desc, &locals, &remotes))
+ 		return NULL;
+@@ -980,13 +1126,86 @@ const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask,
+ 	 * uv_flush_send_and_wait returns 0 if all cpu's were messaged,
+ 	 * or 1 if it gave up and the original cpumask should be returned.
+ 	 */
+-	if (!uv_flush_send_and_wait(bau_desc, flush_mask, bcp))
++	if (!uv_flush_send_and_wait(flush_mask, bcp))
+ 		return NULL;
+ 	else
+ 		return cpumask;
+ }
+ 
+ /*
++ * Search the message queue for any 'other' message with the same software
++ * acknowledge resource bit vector.
++ */
++struct bau_pq_entry *find_another_by_swack(struct bau_pq_entry *msg,
++			struct bau_control *bcp, unsigned char swack_vec)
++{
++	struct bau_pq_entry *msg_next = msg + 1;
++
++	if (msg_next > bcp->queue_last)
++		msg_next = bcp->queue_first;
++	while ((msg_next->swack_vec != 0) && (msg_next != msg)) {
++		if (msg_next->swack_vec == swack_vec)
++			return msg_next;
++		msg_next++;
++		if (msg_next > bcp->queue_last)
++			msg_next = bcp->queue_first;
++	}
++	return NULL;
++}
++
++/*
++ * UV2 needs to work around a bug in which an arriving message has not
++ * set a bit in the UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE register.
++ * Such a message must be ignored.
++ */
++void process_uv2_message(struct msg_desc *mdp, struct bau_control *bcp)
++{
++	unsigned long mmr_image;
++	unsigned char swack_vec;
++	struct bau_pq_entry *msg = mdp->msg;
++	struct bau_pq_entry *other_msg;
++
++	mmr_image = read_mmr_sw_ack();
++	swack_vec = msg->swack_vec;
++
++	if ((swack_vec & mmr_image) == 0) {
++		/*
++		 * This message was assigned a swack resource, but no
++		 * reserved acknowlegment is pending.
++		 * The bug has prevented this message from setting the MMR.
++		 * And no other message has used the same sw_ack resource.
++		 * Do the requested shootdown but do not reply to the msg.
++		 * (the 0 means make no acknowledge)
++		 */
++		bau_process_message(mdp, bcp, 0);
++		return;
++	}
++
++	/*
++	 * Some message has set the MMR 'pending' bit; it might have been
++	 * another message.  Look for that message.
++	 */
++	other_msg = find_another_by_swack(msg, bcp, msg->swack_vec);
++	if (other_msg) {
++		/* There is another.  Do not ack the current one. */
++		bau_process_message(mdp, bcp, 0);
++		/*
++		 * Let the natural processing of that message acknowledge
++		 * it. Don't get the processing of sw_ack's out of order.
++		 */
++		return;
++	}
++
++	/*
++	 * There is no other message using this sw_ack, so it is safe to
++	 * acknowledge it.
++	 */
++	bau_process_message(mdp, bcp, 1);
++
++	return;
++}
++
++/*
+  * The BAU message interrupt comes here. (registered by set_intr_gate)
+  * See entry_64.S
+  *
+@@ -1022,9 +1241,11 @@ void uv_bau_message_interrupt(struct pt_regs *regs)
+ 		count++;
+ 
+ 		msgdesc.msg_slot = msg - msgdesc.queue_first;
+-		msgdesc.swack_slot = ffs(msg->swack_vec) - 1;
+ 		msgdesc.msg = msg;
+-		bau_process_message(&msgdesc, bcp);
++		if (bcp->uvhub_version == 2)
++			process_uv2_message(&msgdesc, bcp);
++		else
++			bau_process_message(&msgdesc, bcp, 1);
+ 
+ 		msg++;
+ 		if (msg > msgdesc.queue_last)
+@@ -1083,7 +1304,7 @@ static void __init enable_timeouts(void)
+ 		 */
+ 		mmr_image |= (1L << SOFTACK_MSHIFT);
+ 		if (is_uv2_hub()) {
+-			mmr_image |= (1L << UV2_LEG_SHFT);
++			mmr_image &= ~(1L << UV2_LEG_SHFT);
+ 			mmr_image |= (1L << UV2_EXT_SHFT);
+ 		}
+ 		write_mmr_misc_control(pnode, mmr_image);
+@@ -1142,7 +1363,7 @@ static int ptc_seq_show(struct seq_file *file, void *data)
+ 		seq_printf(file,
+ 			"all one mult none retry canc nocan reset rcan ");
+ 		seq_printf(file,
+-			"disable enable\n");
++			"disable enable wars warshw warwaits\n");
+ 	}
+ 	if (cpu < num_possible_cpus() && cpu_online(cpu)) {
+ 		stat = &per_cpu(ptcstats, cpu);
+@@ -1173,8 +1394,10 @@ static int ptc_seq_show(struct seq_file *file, void *data)
+ 			   stat->d_nomsg, stat->d_retries, stat->d_canceled,
+ 			   stat->d_nocanceled, stat->d_resets,
+ 			   stat->d_rcanceled);
+-		seq_printf(file, "%ld %ld\n",
+-			stat->s_bau_disabled, stat->s_bau_reenabled);
++		seq_printf(file, "%ld %ld %ld %ld %ld\n",
++			stat->s_bau_disabled, stat->s_bau_reenabled,
++			stat->s_uv2_wars, stat->s_uv2_wars_hw,
++			stat->s_uv2_war_waits);
+ 	}
+ 	return 0;
+ }
+@@ -1432,12 +1655,15 @@ static void activation_descriptor_init(int node, int pnode, int base_pnode)
+ {
+ 	int i;
+ 	int cpu;
++	int uv1 = 0;
+ 	unsigned long gpa;
+ 	unsigned long m;
+ 	unsigned long n;
+ 	size_t dsize;
+ 	struct bau_desc *bau_desc;
+ 	struct bau_desc *bd2;
++	struct uv1_bau_msg_header *uv1_hdr;
++	struct uv2_bau_msg_header *uv2_hdr;
+ 	struct bau_control *bcp;
+ 
+ 	/*
+@@ -1451,6 +1677,8 @@ static void activation_descriptor_init(int node, int pnode, int base_pnode)
+ 	gpa = uv_gpa(bau_desc);
+ 	n = uv_gpa_to_gnode(gpa);
+ 	m = uv_gpa_to_offset(gpa);
++	if (is_uv1_hub())
++		uv1 = 1;
+ 
+ 	/* the 14-bit pnode */
+ 	write_mmr_descriptor_base(pnode, (n << UV_DESC_PSHIFT | m));
+@@ -1461,21 +1689,33 @@ static void activation_descriptor_init(int node, int pnode, int base_pnode)
+ 	 */
+ 	for (i = 0, bd2 = bau_desc; i < (ADP_SZ * ITEMS_PER_DESC); i++, bd2++) {
+ 		memset(bd2, 0, sizeof(struct bau_desc));
+-		bd2->header.swack_flag =	1;
+-		/*
+-		 * The base_dest_nasid set in the message header is the nasid
+-		 * of the first uvhub in the partition. The bit map will
+-		 * indicate destination pnode numbers relative to that base.
+-		 * They may not be consecutive if nasid striding is being used.
+-		 */
+-		bd2->header.base_dest_nasid =	UV_PNODE_TO_NASID(base_pnode);
+-		bd2->header.dest_subnodeid =	UV_LB_SUBNODEID;
+-		bd2->header.command =		UV_NET_ENDPOINT_INTD;
+-		bd2->header.int_both =		1;
+-		/*
+-		 * all others need to be set to zero:
+-		 *   fairness chaining multilevel count replied_to
+-		 */
++		if (uv1) {
++			uv1_hdr = &bd2->header.uv1_hdr;
++			uv1_hdr->swack_flag =	1;
++			/*
++			 * The base_dest_nasid set in the message header
++			 * is the nasid of the first uvhub in the partition.
++			 * The bit map will indicate destination pnode numbers
++			 * relative to that base. They may not be consecutive
++			 * if nasid striding is being used.
++			 */
++			uv1_hdr->base_dest_nasid =
++						UV_PNODE_TO_NASID(base_pnode);
++			uv1_hdr->dest_subnodeid =	UV_LB_SUBNODEID;
++			uv1_hdr->command =		UV_NET_ENDPOINT_INTD;
++			uv1_hdr->int_both =		1;
++			/*
++			 * all others need to be set to zero:
++			 *   fairness chaining multilevel count replied_to
++			 */
++		} else {
++			uv2_hdr = &bd2->header.uv2_hdr;
++			uv2_hdr->swack_flag =	1;
++			uv2_hdr->base_dest_nasid =
++						UV_PNODE_TO_NASID(base_pnode);
++			uv2_hdr->dest_subnodeid =	UV_LB_SUBNODEID;
++			uv2_hdr->command =		UV_NET_ENDPOINT_INTD;
++		}
+ 	}
+ 	for_each_present_cpu(cpu) {
+ 		if (pnode != uv_blade_to_pnode(uv_cpu_to_blade_id(cpu)))
+@@ -1531,6 +1771,7 @@ static void pq_init(int node, int pnode)
+ 	write_mmr_payload_first(pnode, pn_first);
+ 	write_mmr_payload_tail(pnode, first);
+ 	write_mmr_payload_last(pnode, last);
++	write_gmmr_sw_ack(pnode, 0xffffUL);
+ 
+ 	/* in effect, all msg_type's are set to MSG_NOOP */
+ 	memset(pqp, 0, sizeof(struct bau_pq_entry) * DEST_Q_SIZE);
+@@ -1584,14 +1825,14 @@ static int calculate_destination_timeout(void)
+ 		ts_ns = base * mult1 * mult2;
+ 		ret = ts_ns / 1000;
+ 	} else {
+-		/* 4 bits  0/1 for 10/80us, 3 bits of multiplier */
+-		mmr_image = uv_read_local_mmr(UVH_AGING_PRESCALE_SEL);
++		/* 4 bits  0/1 for 10/80us base, 3 bits of multiplier */
++		mmr_image = uv_read_local_mmr(UVH_LB_BAU_MISC_CONTROL);
+ 		mmr_image = (mmr_image & UV_SA_MASK) >> UV_SA_SHFT;
+ 		if (mmr_image & (1L << UV2_ACK_UNITS_SHFT))
+-			mult1 = 80;
++			base = 80;
+ 		else
+-			mult1 = 10;
+-		base = mmr_image & UV2_ACK_MASK;
++			base = 10;
++		mult1 = mmr_image & UV2_ACK_MASK;
+ 		ret = mult1 * base;
+ 	}
+ 	return ret;
+@@ -1618,6 +1859,7 @@ static void __init init_per_cpu_tunables(void)
+ 		bcp->cong_response_us		= congested_respns_us;
+ 		bcp->cong_reps			= congested_reps;
+ 		bcp->cong_period		= congested_period;
++		bcp->clocks_per_100_usec =	usec_2_cycles(100);
+ 	}
+ }
+ 
+@@ -1728,8 +1970,17 @@ static int scan_sock(struct socket_desc *sdp, struct uvhub_desc *bdp,
+ 		bcp->cpus_in_socket = sdp->num_cpus;
+ 		bcp->socket_master = *smasterp;
+ 		bcp->uvhub = bdp->uvhub;
++		if (is_uv1_hub())
++			bcp->uvhub_version = 1;
++		else if (is_uv2_hub())
++			bcp->uvhub_version = 2;
++		else {
++			printk(KERN_EMERG "uvhub version not 1 or 2\n");
++			return 1;
++		}
+ 		bcp->uvhub_master = *hmasterp;
+ 		bcp->uvhub_cpu = uv_cpu_hub_info(cpu)->blade_processor_id;
++		bcp->using_desc = bcp->uvhub_cpu;
+ 		if (bcp->uvhub_cpu >= MAX_CPUS_PER_UVHUB) {
+ 			printk(KERN_EMERG "%d cpus per uvhub invalid\n",
+ 				bcp->uvhub_cpu);
+@@ -1845,6 +2096,8 @@ static int __init uv_bau_init(void)
+ 			uv_base_pnode = uv_blade_to_pnode(uvhub);
+ 	}
+ 
++	enable_timeouts();
++
+ 	if (init_per_cpu(nuvhubs, uv_base_pnode)) {
+ 		nobau = 1;
+ 		return 0;
+@@ -1855,7 +2108,6 @@ static int __init uv_bau_init(void)
+ 		if (uv_blade_nr_possible_cpus(uvhub))
+ 			init_uvhub(uvhub, vector, uv_base_pnode);
+ 
+-	enable_timeouts();
+ 	alloc_intr_gate(vector, uv_bau_message_intr1);
+ 
+ 	for_each_possible_blade(uvhub) {
+@@ -1867,7 +2119,8 @@ static int __init uv_bau_init(void)
+ 			val = 1L << 63;
+ 			write_gmmr_activation(pnode, val);
+ 			mmr = 1; /* should be 1 to broadcast to both sockets */
+-			write_mmr_data_broadcast(pnode, mmr);
++			if (!is_uv1_hub())
++				write_mmr_data_broadcast(pnode, mmr);
+ 		}
+ 	}
+ 
+diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c
+index fbdf0d8..688be8a 100644
+--- a/block/scsi_ioctl.c
++++ b/block/scsi_ioctl.c
+@@ -24,6 +24,7 @@
+ #include <linux/capability.h>
+ #include <linux/completion.h>
+ #include <linux/cdrom.h>
++#include <linux/ratelimit.h>
+ #include <linux/slab.h>
+ #include <linux/times.h>
+ #include <asm/uaccess.h>
+@@ -690,6 +691,57 @@ int scsi_cmd_ioctl(struct request_queue *q, struct gendisk *bd_disk, fmode_t mod
+ }
+ EXPORT_SYMBOL(scsi_cmd_ioctl);
+ 
++int scsi_verify_blk_ioctl(struct block_device *bd, unsigned int cmd)
++{
++	if (bd && bd == bd->bd_contains)
++		return 0;
++
++	/* Actually none of these is particularly useful on a partition,
++	 * but they are safe.
++	 */
++	switch (cmd) {
++	case SCSI_IOCTL_GET_IDLUN:
++	case SCSI_IOCTL_GET_BUS_NUMBER:
++	case SCSI_IOCTL_GET_PCI:
++	case SCSI_IOCTL_PROBE_HOST:
++	case SG_GET_VERSION_NUM:
++	case SG_SET_TIMEOUT:
++	case SG_GET_TIMEOUT:
++	case SG_GET_RESERVED_SIZE:
++	case SG_SET_RESERVED_SIZE:
++	case SG_EMULATED_HOST:
++		return 0;
++	case CDROM_GET_CAPABILITY:
++		/* Keep this until we remove the printk below.  udev sends it
++		 * and we do not want to spam dmesg about it.   CD-ROMs do
++		 * not have partitions, so we get here only for disks.
++		 */
++		return -ENOTTY;
++	default:
++		break;
++	}
++
++	/* In particular, rule out all resets and host-specific ioctls.  */
++	printk_ratelimited(KERN_WARNING
++			   "%s: sending ioctl %x to a partition!\n", current->comm, cmd);
++
++	return capable(CAP_SYS_RAWIO) ? 0 : -ENOTTY;
++}
++EXPORT_SYMBOL(scsi_verify_blk_ioctl);
++
++int scsi_cmd_blk_ioctl(struct block_device *bd, fmode_t mode,
++		       unsigned int cmd, void __user *arg)
++{
++	int ret;
++
++	ret = scsi_verify_blk_ioctl(bd, cmd);
++	if (ret < 0)
++		return ret;
++
++	return scsi_cmd_ioctl(bd->bd_disk->queue, bd->bd_disk, mode, cmd, arg);
++}
++EXPORT_SYMBOL(scsi_cmd_blk_ioctl);
++
+ static int __init blk_scsi_ioctl_init(void)
+ {
+ 	blk_set_cmd_filter_defaults(&blk_default_cmd_filter);
+diff --git a/drivers/acpi/acpica/dsargs.c b/drivers/acpi/acpica/dsargs.c
+index 8c7b997..42163d8 100644
+--- a/drivers/acpi/acpica/dsargs.c
++++ b/drivers/acpi/acpica/dsargs.c
+@@ -387,5 +387,29 @@ acpi_status acpi_ds_get_region_arguments(union acpi_operand_object *obj_desc)
+ 	status = acpi_ds_execute_arguments(node, node->parent,
+ 					   extra_desc->extra.aml_length,
+ 					   extra_desc->extra.aml_start);
++	if (ACPI_FAILURE(status)) {
++		return_ACPI_STATUS(status);
++	}
++
++	/* Validate the region address/length via the host OS */
++
++	status = acpi_os_validate_address(obj_desc->region.space_id,
++					  obj_desc->region.address,
++					  (acpi_size) obj_desc->region.length,
++					  acpi_ut_get_node_name(node));
++
++	if (ACPI_FAILURE(status)) {
++		/*
++		 * Invalid address/length. We will emit an error message and mark
++		 * the region as invalid, so that it will cause an additional error if
++		 * it is ever used. Then return AE_OK.
++		 */
++		ACPI_EXCEPTION((AE_INFO, status,
++				"During address validation of OpRegion [%4.4s]",
++				node->name.ascii));
++		obj_desc->common.flags |= AOPOBJ_INVALID;
++		status = AE_OK;
++	}
++
+ 	return_ACPI_STATUS(status);
+ }
+diff --git a/drivers/acpi/numa.c b/drivers/acpi/numa.c
+index 3b5c318..e56f3be 100644
+--- a/drivers/acpi/numa.c
++++ b/drivers/acpi/numa.c
+@@ -45,6 +45,8 @@ static int pxm_to_node_map[MAX_PXM_DOMAINS]
+ static int node_to_pxm_map[MAX_NUMNODES]
+ 			= { [0 ... MAX_NUMNODES - 1] = PXM_INVAL };
+ 
++unsigned char acpi_srat_revision __initdata;
++
+ int pxm_to_node(int pxm)
+ {
+ 	if (pxm < 0)
+@@ -255,9 +257,13 @@ acpi_parse_memory_affinity(struct acpi_subtable_header * header,
+ 
+ static int __init acpi_parse_srat(struct acpi_table_header *table)
+ {
++	struct acpi_table_srat *srat;
+ 	if (!table)
+ 		return -EINVAL;
+ 
++	srat = (struct acpi_table_srat *)table;
++	acpi_srat_revision = srat->header.revision;
++
+ 	/* Real work done in acpi_table_parse_srat below. */
+ 
+ 	return 0;
+diff --git a/drivers/acpi/processor_core.c b/drivers/acpi/processor_core.c
+index 3a0428e..c850de4 100644
+--- a/drivers/acpi/processor_core.c
++++ b/drivers/acpi/processor_core.c
+@@ -173,8 +173,30 @@ int acpi_get_cpuid(acpi_handle handle, int type, u32 acpi_id)
+ 	apic_id = map_mat_entry(handle, type, acpi_id);
+ 	if (apic_id == -1)
+ 		apic_id = map_madt_entry(type, acpi_id);
+-	if (apic_id == -1)
+-		return apic_id;
++	if (apic_id == -1) {
++		/*
++		 * On UP processor, there is no _MAT or MADT table.
++		 * So above apic_id is always set to -1.
++		 *
++		 * BIOS may define multiple CPU handles even for UP processor.
++		 * For example,
++		 *
++		 * Scope (_PR)
++                 * {
++		 *     Processor (CPU0, 0x00, 0x00000410, 0x06) {}
++		 *     Processor (CPU1, 0x01, 0x00000410, 0x06) {}
++		 *     Processor (CPU2, 0x02, 0x00000410, 0x06) {}
++		 *     Processor (CPU3, 0x03, 0x00000410, 0x06) {}
++		 * }
++		 *
++		 * Ignores apic_id and always return 0 for CPU0's handle.
++		 * Return -1 for other CPU's handle.
++		 */
++		if (acpi_id == 0)
++			return acpi_id;
++		else
++			return apic_id;
++	}
+ 
+ #ifdef CONFIG_SMP
+ 	for_each_possible_cpu(i) {
+diff --git a/drivers/bcma/host_pci.c b/drivers/bcma/host_pci.c
+index 990f5a8..48e06be 100644
+--- a/drivers/bcma/host_pci.c
++++ b/drivers/bcma/host_pci.c
+@@ -227,11 +227,14 @@ static void bcma_host_pci_remove(struct pci_dev *dev)
+ #ifdef CONFIG_PM
+ static int bcma_host_pci_suspend(struct pci_dev *dev, pm_message_t state)
+ {
++	struct bcma_bus *bus = pci_get_drvdata(dev);
++
+ 	/* Host specific */
+ 	pci_save_state(dev);
+ 	pci_disable_device(dev);
+ 	pci_set_power_state(dev, pci_choose_state(dev, state));
+ 
++	bus->mapped_core = NULL;
+ 	return 0;
+ }
+ 
+diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
+index 587cce5..b0f553b 100644
+--- a/drivers/block/cciss.c
++++ b/drivers/block/cciss.c
+@@ -1735,7 +1735,7 @@ static int cciss_ioctl(struct block_device *bdev, fmode_t mode,
+ 	case CCISS_BIG_PASSTHRU:
+ 		return cciss_bigpassthru(h, argp);
+ 
+-	/* scsi_cmd_ioctl handles these, below, though some are not */
++	/* scsi_cmd_blk_ioctl handles these, below, though some are not */
+ 	/* very meaningful for cciss.  SG_IO is the main one people want. */
+ 
+ 	case SG_GET_VERSION_NUM:
+@@ -1746,9 +1746,9 @@ static int cciss_ioctl(struct block_device *bdev, fmode_t mode,
+ 	case SG_EMULATED_HOST:
+ 	case SG_IO:
+ 	case SCSI_IOCTL_SEND_COMMAND:
+-		return scsi_cmd_ioctl(disk->queue, disk, mode, cmd, argp);
++		return scsi_cmd_blk_ioctl(bdev, mode, cmd, argp);
+ 
+-	/* scsi_cmd_ioctl would normally handle these, below, but */
++	/* scsi_cmd_blk_ioctl would normally handle these, below, but */
+ 	/* they aren't a good fit for cciss, as CD-ROMs are */
+ 	/* not supported, and we don't have any bus/target/lun */
+ 	/* which we present to the kernel. */
+diff --git a/drivers/block/ub.c b/drivers/block/ub.c
+index 0e376d4..7333b9e 100644
+--- a/drivers/block/ub.c
++++ b/drivers/block/ub.c
+@@ -1744,12 +1744,11 @@ static int ub_bd_release(struct gendisk *disk, fmode_t mode)
+ static int ub_bd_ioctl(struct block_device *bdev, fmode_t mode,
+     unsigned int cmd, unsigned long arg)
+ {
+-	struct gendisk *disk = bdev->bd_disk;
+ 	void __user *usermem = (void __user *) arg;
+ 	int ret;
+ 
+ 	mutex_lock(&ub_mutex);
+-	ret = scsi_cmd_ioctl(disk->queue, disk, mode, cmd, usermem);
++	ret = scsi_cmd_blk_ioctl(bdev, mode, cmd, usermem);
+ 	mutex_unlock(&ub_mutex);
+ 
+ 	return ret;
+diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
+index 4d0b70a..e46f2f7 100644
+--- a/drivers/block/virtio_blk.c
++++ b/drivers/block/virtio_blk.c
+@@ -243,8 +243,8 @@ static int virtblk_ioctl(struct block_device *bdev, fmode_t mode,
+ 	if (!virtio_has_feature(vblk->vdev, VIRTIO_BLK_F_SCSI))
+ 		return -ENOTTY;
+ 
+-	return scsi_cmd_ioctl(disk->queue, disk, mode, cmd,
+-			      (void __user *)data);
++	return scsi_cmd_blk_ioctl(bdev, mode, cmd,
++				  (void __user *)data);
+ }
+ 
+ /* We provide getgeo only to please some old bootloader/partitioning tools */
+diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c
+index f997c27..cedb231 100644
+--- a/drivers/cdrom/cdrom.c
++++ b/drivers/cdrom/cdrom.c
+@@ -2747,12 +2747,11 @@ int cdrom_ioctl(struct cdrom_device_info *cdi, struct block_device *bdev,
+ {
+ 	void __user *argp = (void __user *)arg;
+ 	int ret;
+-	struct gendisk *disk = bdev->bd_disk;
+ 
+ 	/*
+ 	 * Try the generic SCSI command ioctl's first.
+ 	 */
+-	ret = scsi_cmd_ioctl(disk->queue, disk, mode, cmd, argp);
++	ret = scsi_cmd_blk_ioctl(bdev, mode, cmd, argp);
+ 	if (ret != -ENOTTY)
+ 		return ret;
+ 
+diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
+index bfc08f6..31b0d1a 100644
+--- a/drivers/gpu/drm/radeon/r100.c
++++ b/drivers/gpu/drm/radeon/r100.c
+@@ -2177,6 +2177,7 @@ bool r100_gpu_is_lockup(struct radeon_device *rdev)
+ void r100_bm_disable(struct radeon_device *rdev)
+ {
+ 	u32 tmp;
++	u16 tmp16;
+ 
+ 	/* disable bus mastering */
+ 	tmp = RREG32(R_000030_BUS_CNTL);
+@@ -2187,8 +2188,8 @@ void r100_bm_disable(struct radeon_device *rdev)
+ 	WREG32(R_000030_BUS_CNTL, (tmp & 0xFFFFFFFF) | 0x00000040);
+ 	tmp = RREG32(RADEON_BUS_CNTL);
+ 	mdelay(1);
+-	pci_read_config_word(rdev->pdev, 0x4, (u16*)&tmp);
+-	pci_write_config_word(rdev->pdev, 0x4, tmp & 0xFFFB);
++	pci_read_config_word(rdev->pdev, 0x4, &tmp16);
++	pci_write_config_word(rdev->pdev, 0x4, tmp16 & 0xFFFB);
+ 	mdelay(1);
+ }
+ 
+diff --git a/drivers/gpu/drm/radeon/r600_hdmi.c b/drivers/gpu/drm/radeon/r600_hdmi.c
+index f5ac7e7..c45d921 100644
+--- a/drivers/gpu/drm/radeon/r600_hdmi.c
++++ b/drivers/gpu/drm/radeon/r600_hdmi.c
+@@ -196,6 +196,13 @@ static void r600_hdmi_videoinfoframe(
+ 	frame[0xD] = (right_bar >> 8);
+ 
+ 	r600_hdmi_infoframe_checksum(0x82, 0x02, 0x0D, frame);
++	/* Our header values (type, version, length) should be alright, Intel
++	 * is using the same. Checksum function also seems to be OK, it works
++	 * fine for audio infoframe. However calculated value is always lower
++	 * by 2 in comparison to fglrx. It breaks displaying anything in case
++	 * of TVs that strictly check the checksum. Hack it manually here to
++	 * workaround this issue. */
++	frame[0x0] += 2;
+ 
+ 	WREG32(offset+R600_HDMI_VIDEOINFOFRAME_0,
+ 		frame[0x0] | (frame[0x1] << 8) | (frame[0x2] << 16) | (frame[0x3] << 24));
+diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
+index c4d00a1..9b39145 100644
+--- a/drivers/gpu/drm/radeon/radeon_device.c
++++ b/drivers/gpu/drm/radeon/radeon_device.c
+@@ -224,8 +224,11 @@ int radeon_wb_init(struct radeon_device *rdev)
+ 	if (radeon_no_wb == 1)
+ 		rdev->wb.enabled = false;
+ 	else {
+-		/* often unreliable on AGP */
+ 		if (rdev->flags & RADEON_IS_AGP) {
++			/* often unreliable on AGP */
++			rdev->wb.enabled = false;
++		} else if (rdev->family < CHIP_R300) {
++			/* often unreliable on pre-r300 */
+ 			rdev->wb.enabled = false;
+ 		} else {
+ 			rdev->wb.enabled = true;
+diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c
+index b1053d6..c259e21 100644
+--- a/drivers/gpu/drm/radeon/rs600.c
++++ b/drivers/gpu/drm/radeon/rs600.c
+@@ -324,10 +324,10 @@ void rs600_hpd_fini(struct radeon_device *rdev)
+ 
+ void rs600_bm_disable(struct radeon_device *rdev)
+ {
+-	u32 tmp;
++	u16 tmp;
+ 
+ 	/* disable bus mastering */
+-	pci_read_config_word(rdev->pdev, 0x4, (u16*)&tmp);
++	pci_read_config_word(rdev->pdev, 0x4, &tmp);
+ 	pci_write_config_word(rdev->pdev, 0x4, tmp & 0xFFFB);
+ 	mdelay(1);
+ }
+diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig
+index 22a4a05..d21f6d0 100644
+--- a/drivers/hid/Kconfig
++++ b/drivers/hid/Kconfig
+@@ -335,6 +335,7 @@ config HID_MULTITOUCH
+ 	  Say Y here if you have one of the following devices:
+ 	  - 3M PCT touch screens
+ 	  - ActionStar dual touch panels
++	  - Atmel panels
+ 	  - Cando dual touch panels
+ 	  - Chunghwa panels
+ 	  - CVTouch panels
+@@ -355,6 +356,7 @@ config HID_MULTITOUCH
+ 	  - Touch International Panels
+ 	  - Unitec Panels
+ 	  - XAT optical touch panels
++	  - Xiroku optical touch panels
+ 
+ 	  If unsure, say N.
+ 
+@@ -620,6 +622,7 @@ config HID_WIIMOTE
+ 	depends on BT_HIDP
+ 	depends on LEDS_CLASS
+ 	select POWER_SUPPLY
++	select INPUT_FF_MEMLESS
+ 	---help---
+ 	Support for the Nintendo Wii Remote bluetooth device.
+ 
+diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
+index af35384..bb656d8 100644
+--- a/drivers/hid/hid-core.c
++++ b/drivers/hid/hid-core.c
+@@ -362,7 +362,7 @@ static int hid_parser_global(struct hid_parser *parser, struct hid_item *item)
+ 
+ 	case HID_GLOBAL_ITEM_TAG_REPORT_SIZE:
+ 		parser->global.report_size = item_udata(item);
+-		if (parser->global.report_size > 32) {
++		if (parser->global.report_size > 96) {
+ 			dbg_hid("invalid report_size %d\n",
+ 					parser->global.report_size);
+ 			return -1;
+@@ -1404,11 +1404,13 @@ static const struct hid_device_id hid_have_special_driver[] = {
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_TRUETOUCH) },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, 0x0006) },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, 0x0011) },
+-	{ HID_USB_DEVICE(USB_VENDOR_ID_DWAV, USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH) },
+-	{ HID_USB_DEVICE(USB_VENDOR_ID_DWAV, USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH1) },
+-	{ HID_USB_DEVICE(USB_VENDOR_ID_DWAV, USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH2) },
+-	{ HID_USB_DEVICE(USB_VENDOR_ID_DWAV, USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH3) },
+-	{ HID_USB_DEVICE(USB_VENDOR_ID_DWAV, USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH4) },
++	{ HID_USB_DEVICE(USB_VENDOR_ID_DWAV, USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_480D) },
++	{ HID_USB_DEVICE(USB_VENDOR_ID_DWAV, USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_480E) },
++	{ HID_USB_DEVICE(USB_VENDOR_ID_DWAV, USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_720C) },
++	{ HID_USB_DEVICE(USB_VENDOR_ID_DWAV, USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_726B) },
++	{ HID_USB_DEVICE(USB_VENDOR_ID_DWAV, USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_72A1) },
++	{ HID_USB_DEVICE(USB_VENDOR_ID_DWAV, USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_7302) },
++	{ HID_USB_DEVICE(USB_VENDOR_ID_DWAV, USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_A001) },
+ 	{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_BM084) },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_ELO, USB_DEVICE_ID_ELO_TS2515) },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_EMS, USB_DEVICE_ID_EMS_TRIO_LINKER_PLUS_II) },
+@@ -1423,6 +1425,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_GYRATION, USB_DEVICE_ID_GYRATION_REMOTE_2) },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_GYRATION, USB_DEVICE_ID_GYRATION_REMOTE_3) },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_HANVON, USB_DEVICE_ID_HANVON_MULTITOUCH) },
++	{ HID_USB_DEVICE(USB_VENDOR_ID_HANVON_ALT, USB_DEVICE_ID_HANVON_ALT_MULTITOUCH) },
+  	{ HID_USB_DEVICE(USB_VENDOR_ID_IDEACOM, USB_DEVICE_ID_IDEACOM_IDC6650) },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK, USB_DEVICE_ID_HOLTEK_ON_LINE_GRIP) },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_ILITEK, USB_DEVICE_ID_ILITEK_MULTITOUCH) },
+@@ -1549,6 +1552,15 @@ static const struct hid_device_id hid_have_special_driver[] = {
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_MEDIA_TABLET_10_6_INCH) },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_MEDIA_TABLET_14_1_INCH) },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_XAT, USB_DEVICE_ID_XAT_CSR) },
++	{ HID_USB_DEVICE(USB_VENDOR_ID_XIROKU, USB_DEVICE_ID_XIROKU_SPX) },
++	{ HID_USB_DEVICE(USB_VENDOR_ID_XIROKU, USB_DEVICE_ID_XIROKU_MPX) },
++	{ HID_USB_DEVICE(USB_VENDOR_ID_XIROKU, USB_DEVICE_ID_XIROKU_CSR) },
++	{ HID_USB_DEVICE(USB_VENDOR_ID_XIROKU, USB_DEVICE_ID_XIROKU_SPX1) },
++	{ HID_USB_DEVICE(USB_VENDOR_ID_XIROKU, USB_DEVICE_ID_XIROKU_MPX1) },
++	{ HID_USB_DEVICE(USB_VENDOR_ID_XIROKU, USB_DEVICE_ID_XIROKU_CSR1) },
++	{ HID_USB_DEVICE(USB_VENDOR_ID_XIROKU, USB_DEVICE_ID_XIROKU_SPX2) },
++	{ HID_USB_DEVICE(USB_VENDOR_ID_XIROKU, USB_DEVICE_ID_XIROKU_MPX2) },
++	{ HID_USB_DEVICE(USB_VENDOR_ID_XIROKU, USB_DEVICE_ID_XIROKU_CSR2) },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_X_TENSIONS, USB_DEVICE_ID_SPEEDLINK_VAD_CEZANNE) },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_ZEROPLUS, 0x0005) },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_ZEROPLUS, 0x0030) },
+diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
+index 4a441a6..00cabb3 100644
+--- a/drivers/hid/hid-ids.h
++++ b/drivers/hid/hid-ids.h
+@@ -21,6 +21,7 @@
+ #define USB_VENDOR_ID_3M		0x0596
+ #define USB_DEVICE_ID_3M1968		0x0500
+ #define USB_DEVICE_ID_3M2256		0x0502
++#define USB_DEVICE_ID_3M3266		0x0506
+ 
+ #define USB_VENDOR_ID_A4TECH		0x09da
+ #define USB_DEVICE_ID_A4TECH_WCP32PU	0x0006
+@@ -145,6 +146,9 @@
+ #define USB_DEVICE_ID_ATEN_4PORTKVM	0x2205
+ #define USB_DEVICE_ID_ATEN_4PORTKVMC	0x2208
+ 
++#define USB_VENDOR_ID_ATMEL		0x03eb
++#define USB_DEVICE_ID_ATMEL_MULTITOUCH	0x211c
++
+ #define USB_VENDOR_ID_AVERMEDIA		0x07ca
+ #define USB_DEVICE_ID_AVER_FM_MR800	0xb800
+ 
+@@ -230,11 +234,14 @@
+ 
+ #define USB_VENDOR_ID_DWAV		0x0eef
+ #define USB_DEVICE_ID_EGALAX_TOUCHCONTROLLER	0x0001
+-#define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH	0x480d
+-#define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH1	0x720c
+-#define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH2	0x72a1
+-#define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH3	0x480e
+-#define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH4	0x726b
++#define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_480D	0x480d
++#define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_480E	0x480e
++#define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_720C	0x720c
++#define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_726B	0x726b
++#define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_72A1	0x72a1
++#define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_72FA	0x72fa
++#define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_7302	0x7302
++#define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_A001	0xa001
+ 
+ #define USB_VENDOR_ID_ELECOM		0x056e
+ #define USB_DEVICE_ID_ELECOM_BM084	0x0061
+@@ -356,6 +363,9 @@
+ #define USB_VENDOR_ID_HANVON		0x20b3
+ #define USB_DEVICE_ID_HANVON_MULTITOUCH	0x0a18
+ 
++#define USB_VENDOR_ID_HANVON_ALT	0x22ed
++#define USB_DEVICE_ID_HANVON_ALT_MULTITOUCH	0x1010
++
+ #define USB_VENDOR_ID_HAPP		0x078b
+ #define USB_DEVICE_ID_UGCI_DRIVING	0x0010
+ #define USB_DEVICE_ID_UGCI_FLYING	0x0020
+@@ -707,6 +717,17 @@
+ #define USB_VENDOR_ID_XAT	0x2505
+ #define USB_DEVICE_ID_XAT_CSR	0x0220
+ 
++#define USB_VENDOR_ID_XIROKU		0x1477
++#define USB_DEVICE_ID_XIROKU_SPX	0x1006
++#define USB_DEVICE_ID_XIROKU_MPX	0x1007
++#define USB_DEVICE_ID_XIROKU_CSR	0x100e
++#define USB_DEVICE_ID_XIROKU_SPX1	0x1021
++#define USB_DEVICE_ID_XIROKU_CSR1	0x1022
++#define USB_DEVICE_ID_XIROKU_MPX1	0x1023
++#define USB_DEVICE_ID_XIROKU_SPX2	0x1024
++#define USB_DEVICE_ID_XIROKU_CSR2	0x1025
++#define USB_DEVICE_ID_XIROKU_MPX2	0x1026
++
+ #define USB_VENDOR_ID_YEALINK		0x6993
+ #define USB_DEVICE_ID_YEALINK_P1K_P4K_B2K	0xb001
+ 
+diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c
+index f1c909f..995fc4c 100644
+--- a/drivers/hid/hid-multitouch.c
++++ b/drivers/hid/hid-multitouch.c
+@@ -609,12 +609,20 @@ static const struct hid_device_id mt_devices[] = {
+ 	{ .driver_data = MT_CLS_3M,
+ 		HID_USB_DEVICE(USB_VENDOR_ID_3M,
+ 			USB_DEVICE_ID_3M2256) },
++	{ .driver_data = MT_CLS_3M,
++		HID_USB_DEVICE(USB_VENDOR_ID_3M,
++			USB_DEVICE_ID_3M3266) },
+ 
+ 	/* ActionStar panels */
+ 	{ .driver_data = MT_CLS_DEFAULT,
+ 		HID_USB_DEVICE(USB_VENDOR_ID_ACTIONSTAR,
+ 			USB_DEVICE_ID_ACTIONSTAR_1011) },
+ 
++	/* Atmel panels */
++	{ .driver_data = MT_CLS_SERIAL,
++		HID_USB_DEVICE(USB_VENDOR_ID_ATMEL,
++			USB_DEVICE_ID_ATMEL_MULTITOUCH) },
++
+ 	/* Cando panels */
+ 	{ .driver_data = MT_CLS_DUAL_INRANGE_CONTACTNUMBER,
+ 		HID_USB_DEVICE(USB_VENDOR_ID_CANDO,
+@@ -645,23 +653,32 @@ static const struct hid_device_id mt_devices[] = {
+ 			USB_DEVICE_ID_CYPRESS_TRUETOUCH) },
+ 
+ 	/* eGalax devices (resistive) */
+-	{  .driver_data = MT_CLS_EGALAX,
++	{ .driver_data = MT_CLS_EGALAX,
+ 		HID_USB_DEVICE(USB_VENDOR_ID_DWAV,
+-			USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH) },
+-	{  .driver_data = MT_CLS_EGALAX,
++			USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_480D) },
++	{ .driver_data = MT_CLS_EGALAX,
+ 		HID_USB_DEVICE(USB_VENDOR_ID_DWAV,
+-			USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH3) },
++			USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_480E) },
+ 
+ 	/* eGalax devices (capacitive) */
+-	{  .driver_data = MT_CLS_EGALAX,
++	{ .driver_data = MT_CLS_EGALAX,
++		HID_USB_DEVICE(USB_VENDOR_ID_DWAV,
++			USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_720C) },
++	{ .driver_data = MT_CLS_EGALAX,
+ 		HID_USB_DEVICE(USB_VENDOR_ID_DWAV,
+-			USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH1) },
+-	{  .driver_data = MT_CLS_EGALAX,
++			USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_726B) },
++	{ .driver_data = MT_CLS_EGALAX,
+ 		HID_USB_DEVICE(USB_VENDOR_ID_DWAV,
+-			USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH2) },
+-	{  .driver_data = MT_CLS_EGALAX,
++			USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_72A1) },
++	{ .driver_data = MT_CLS_EGALAX,
+ 		HID_USB_DEVICE(USB_VENDOR_ID_DWAV,
+-			USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH4) },
++			USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_72FA) },
++	{ .driver_data = MT_CLS_EGALAX,
++		HID_USB_DEVICE(USB_VENDOR_ID_DWAV,
++			USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_7302) },
++	{ .driver_data = MT_CLS_EGALAX,
++		HID_USB_DEVICE(USB_VENDOR_ID_DWAV,
++			USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_A001) },
+ 
+ 	/* Elo TouchSystems IntelliTouch Plus panel */
+ 	{ .driver_data = MT_CLS_DUAL_NSMU_CONTACTID,
+@@ -678,6 +695,11 @@ static const struct hid_device_id mt_devices[] = {
+ 		HID_USB_DEVICE(USB_VENDOR_ID_GOODTOUCH,
+ 			USB_DEVICE_ID_GOODTOUCH_000f) },
+ 
++	/* Hanvon panels */
++	{ .driver_data = MT_CLS_DUAL_INRANGE_CONTACTID,
++		HID_USB_DEVICE(USB_VENDOR_ID_HANVON_ALT,
++			USB_DEVICE_ID_HANVON_ALT_MULTITOUCH) },
++
+ 	/* Ideacom panel */
+ 	{ .driver_data = MT_CLS_SERIAL,
+ 		HID_USB_DEVICE(USB_VENDOR_ID_IDEACOM,
+@@ -758,6 +780,35 @@ static const struct hid_device_id mt_devices[] = {
+ 		HID_USB_DEVICE(USB_VENDOR_ID_XAT,
+ 			USB_DEVICE_ID_XAT_CSR) },
+ 
++	/* Xiroku */
++	{ .driver_data = MT_CLS_DEFAULT,
++		HID_USB_DEVICE(USB_VENDOR_ID_XIROKU,
++			USB_DEVICE_ID_XIROKU_SPX) },
++	{ .driver_data = MT_CLS_DEFAULT,
++		HID_USB_DEVICE(USB_VENDOR_ID_XIROKU,
++			USB_DEVICE_ID_XIROKU_MPX) },
++	{ .driver_data = MT_CLS_DEFAULT,
++		HID_USB_DEVICE(USB_VENDOR_ID_XIROKU,
++			USB_DEVICE_ID_XIROKU_CSR) },
++	{ .driver_data = MT_CLS_DEFAULT,
++		HID_USB_DEVICE(USB_VENDOR_ID_XIROKU,
++			USB_DEVICE_ID_XIROKU_SPX1) },
++	{ .driver_data = MT_CLS_DEFAULT,
++		HID_USB_DEVICE(USB_VENDOR_ID_XIROKU,
++			USB_DEVICE_ID_XIROKU_MPX1) },
++	{ .driver_data = MT_CLS_DEFAULT,
++		HID_USB_DEVICE(USB_VENDOR_ID_XIROKU,
++			USB_DEVICE_ID_XIROKU_CSR1) },
++	{ .driver_data = MT_CLS_DEFAULT,
++		HID_USB_DEVICE(USB_VENDOR_ID_XIROKU,
++			USB_DEVICE_ID_XIROKU_SPX2) },
++	{ .driver_data = MT_CLS_DEFAULT,
++		HID_USB_DEVICE(USB_VENDOR_ID_XIROKU,
++			USB_DEVICE_ID_XIROKU_MPX2) },
++	{ .driver_data = MT_CLS_DEFAULT,
++		HID_USB_DEVICE(USB_VENDOR_ID_XIROKU,
++			USB_DEVICE_ID_XIROKU_CSR2) },
++
+ 	{ }
+ };
+ MODULE_DEVICE_TABLE(hid, mt_devices);
+diff --git a/drivers/i2c/busses/i2c-ali1535.c b/drivers/i2c/busses/i2c-ali1535.c
+index b6807db..5b667e5 100644
+--- a/drivers/i2c/busses/i2c-ali1535.c
++++ b/drivers/i2c/busses/i2c-ali1535.c
+@@ -140,7 +140,7 @@ static unsigned short ali1535_smba;
+    defined to make the transition easier. */
+ static int __devinit ali1535_setup(struct pci_dev *dev)
+ {
+-	int retval = -ENODEV;
++	int retval;
+ 	unsigned char temp;
+ 
+ 	/* Check the following things:
+@@ -155,6 +155,7 @@ static int __devinit ali1535_setup(struct pci_dev *dev)
+ 	if (ali1535_smba == 0) {
+ 		dev_warn(&dev->dev,
+ 			"ALI1535_smb region uninitialized - upgrade BIOS?\n");
++		retval = -ENODEV;
+ 		goto exit;
+ 	}
+ 
+@@ -167,6 +168,7 @@ static int __devinit ali1535_setup(struct pci_dev *dev)
+ 			    ali1535_driver.name)) {
+ 		dev_err(&dev->dev, "ALI1535_smb region 0x%x already in use!\n",
+ 			ali1535_smba);
++		retval = -EBUSY;
+ 		goto exit;
+ 	}
+ 
+@@ -174,6 +176,7 @@ static int __devinit ali1535_setup(struct pci_dev *dev)
+ 	pci_read_config_byte(dev, SMBCFG, &temp);
+ 	if ((temp & ALI1535_SMBIO_EN) == 0) {
+ 		dev_err(&dev->dev, "SMB device not enabled - upgrade BIOS?\n");
++		retval = -ENODEV;
+ 		goto exit_free;
+ 	}
+ 
+@@ -181,6 +184,7 @@ static int __devinit ali1535_setup(struct pci_dev *dev)
+ 	pci_read_config_byte(dev, SMBHSTCFG, &temp);
+ 	if ((temp & 1) == 0) {
+ 		dev_err(&dev->dev, "SMBus controller not enabled - upgrade BIOS?\n");
++		retval = -ENODEV;
+ 		goto exit_free;
+ 	}
+ 
+@@ -198,12 +202,11 @@ static int __devinit ali1535_setup(struct pci_dev *dev)
+ 	dev_dbg(&dev->dev, "SMBREV = 0x%X\n", temp);
+ 	dev_dbg(&dev->dev, "ALI1535_smba = 0x%X\n", ali1535_smba);
+ 
+-	retval = 0;
+-exit:
+-	return retval;
++	return 0;
+ 
+ exit_free:
+ 	release_region(ali1535_smba, ALI1535_SMB_IOSIZE);
++exit:
+ 	return retval;
+ }
+ 
+diff --git a/drivers/i2c/busses/i2c-eg20t.c b/drivers/i2c/busses/i2c-eg20t.c
+index 18936ac..730215e 100644
+--- a/drivers/i2c/busses/i2c-eg20t.c
++++ b/drivers/i2c/busses/i2c-eg20t.c
+@@ -243,7 +243,7 @@ static void pch_i2c_init(struct i2c_algo_pch_data *adap)
+ 	if (pch_clk > PCH_MAX_CLK)
+ 		pch_clk = 62500;
+ 
+-	pch_i2cbc = (pch_clk + (pch_i2c_speed * 4)) / pch_i2c_speed * 8;
++	pch_i2cbc = (pch_clk + (pch_i2c_speed * 4)) / (pch_i2c_speed * 8);
+ 	/* Set transfer speed in I2CBC */
+ 	iowrite32(pch_i2cbc, p + PCH_I2CBC);
+ 
+diff --git a/drivers/i2c/busses/i2c-nforce2.c b/drivers/i2c/busses/i2c-nforce2.c
+index ff1e127..4853b52 100644
+--- a/drivers/i2c/busses/i2c-nforce2.c
++++ b/drivers/i2c/busses/i2c-nforce2.c
+@@ -356,7 +356,7 @@ static int __devinit nforce2_probe_smb (struct pci_dev *dev, int bar,
+ 	error = acpi_check_region(smbus->base, smbus->size,
+ 				  nforce2_driver.name);
+ 	if (error)
+-		return -1;
++		return error;
+ 
+ 	if (!request_region(smbus->base, smbus->size, nforce2_driver.name)) {
+ 		dev_err(&smbus->adapter.dev, "Error requesting region %02x .. %02X for %s\n",
+diff --git a/drivers/i2c/busses/i2c-omap.c b/drivers/i2c/busses/i2c-omap.c
+index fa23faa..257c1a5 100644
+--- a/drivers/i2c/busses/i2c-omap.c
++++ b/drivers/i2c/busses/i2c-omap.c
+@@ -235,7 +235,7 @@ static const u8 reg_map_ip_v2[] = {
+ 	[OMAP_I2C_BUF_REG] = 0x94,
+ 	[OMAP_I2C_CNT_REG] = 0x98,
+ 	[OMAP_I2C_DATA_REG] = 0x9c,
+-	[OMAP_I2C_SYSC_REG] = 0x20,
++	[OMAP_I2C_SYSC_REG] = 0x10,
+ 	[OMAP_I2C_CON_REG] = 0xa4,
+ 	[OMAP_I2C_OA_REG] = 0xa8,
+ 	[OMAP_I2C_SA_REG] = 0xac,
+diff --git a/drivers/i2c/busses/i2c-sis5595.c b/drivers/i2c/busses/i2c-sis5595.c
+index 4375866..6d60284 100644
+--- a/drivers/i2c/busses/i2c-sis5595.c
++++ b/drivers/i2c/busses/i2c-sis5595.c
+@@ -147,7 +147,7 @@ static int __devinit sis5595_setup(struct pci_dev *SIS5595_dev)
+ 	u16 a;
+ 	u8 val;
+ 	int *i;
+-	int retval = -ENODEV;
++	int retval;
+ 
+ 	/* Look for imposters */
+ 	for (i = blacklist; *i != 0; i++) {
+@@ -223,7 +223,7 @@ static int __devinit sis5595_setup(struct pci_dev *SIS5595_dev)
+ 
+ error:
+ 	release_region(sis5595_base + SMB_INDEX, 2);
+-	return retval;
++	return -ENODEV;
+ }
+ 
+ static int sis5595_transaction(struct i2c_adapter *adap)
+diff --git a/drivers/i2c/busses/i2c-sis630.c b/drivers/i2c/busses/i2c-sis630.c
+index e6f539e..b617fd0 100644
+--- a/drivers/i2c/busses/i2c-sis630.c
++++ b/drivers/i2c/busses/i2c-sis630.c
+@@ -393,7 +393,7 @@ static int __devinit sis630_setup(struct pci_dev *sis630_dev)
+ {
+ 	unsigned char b;
+ 	struct pci_dev *dummy = NULL;
+-	int retval = -ENODEV, i;
++	int retval, i;
+ 
+ 	/* check for supported SiS devices */
+ 	for (i=0; supported[i] > 0 ; i++) {
+@@ -418,18 +418,21 @@ static int __devinit sis630_setup(struct pci_dev *sis630_dev)
+ 	*/
+ 	if (pci_read_config_byte(sis630_dev, SIS630_BIOS_CTL_REG,&b)) {
+ 		dev_err(&sis630_dev->dev, "Error: Can't read bios ctl reg\n");
++		retval = -ENODEV;
+ 		goto exit;
+ 	}
+ 	/* if ACPI already enabled , do nothing */
+ 	if (!(b & 0x80) &&
+ 	    pci_write_config_byte(sis630_dev, SIS630_BIOS_CTL_REG, b | 0x80)) {
+ 		dev_err(&sis630_dev->dev, "Error: Can't enable ACPI\n");
++		retval = -ENODEV;
+ 		goto exit;
+ 	}
+ 
+ 	/* Determine the ACPI base address */
+ 	if (pci_read_config_word(sis630_dev,SIS630_ACPI_BASE_REG,&acpi_base)) {
+ 		dev_err(&sis630_dev->dev, "Error: Can't determine ACPI base address\n");
++		retval = -ENODEV;
+ 		goto exit;
+ 	}
+ 
+@@ -445,6 +448,7 @@ static int __devinit sis630_setup(struct pci_dev *sis630_dev)
+ 			    sis630_driver.name)) {
+ 		dev_err(&sis630_dev->dev, "SMBus registers 0x%04x-0x%04x already "
+ 			"in use!\n", acpi_base + SMB_STS, acpi_base + SMB_SAA);
++		retval = -EBUSY;
+ 		goto exit;
+ 	}
+ 
+diff --git a/drivers/i2c/busses/i2c-viapro.c b/drivers/i2c/busses/i2c-viapro.c
+index 0b012f1..58261d4 100644
+--- a/drivers/i2c/busses/i2c-viapro.c
++++ b/drivers/i2c/busses/i2c-viapro.c
+@@ -324,7 +324,7 @@ static int __devinit vt596_probe(struct pci_dev *pdev,
+ 				 const struct pci_device_id *id)
+ {
+ 	unsigned char temp;
+-	int error = -ENODEV;
++	int error;
+ 
+ 	/* Determine the address of the SMBus areas */
+ 	if (force_addr) {
+@@ -390,6 +390,7 @@ found:
+ 			dev_err(&pdev->dev, "SMBUS: Error: Host SMBus "
+ 				"controller not enabled! - upgrade BIOS or "
+ 				"use force=1\n");
++			error = -ENODEV;
+ 			goto release_region;
+ 		}
+ 	}
+@@ -422,9 +423,11 @@ found:
+ 		 "SMBus Via Pro adapter at %04x", vt596_smba);
+ 
+ 	vt596_pdev = pci_dev_get(pdev);
+-	if (i2c_add_adapter(&vt596_adapter)) {
++	error = i2c_add_adapter(&vt596_adapter);
++	if (error) {
+ 		pci_dev_put(vt596_pdev);
+ 		vt596_pdev = NULL;
++		goto release_region;
+ 	}
+ 
+ 	/* Always return failure here.  This is to allow other drivers to bind
+diff --git a/drivers/ide/ide-floppy_ioctl.c b/drivers/ide/ide-floppy_ioctl.c
+index d267b7a..a22ca84 100644
+--- a/drivers/ide/ide-floppy_ioctl.c
++++ b/drivers/ide/ide-floppy_ioctl.c
+@@ -292,8 +292,7 @@ int ide_floppy_ioctl(ide_drive_t *drive, struct block_device *bdev,
+ 	 * and CDROM_SEND_PACKET (legacy) ioctls
+ 	 */
+ 	if (cmd != CDROM_SEND_PACKET && cmd != SCSI_IOCTL_SEND_COMMAND)
+-		err = scsi_cmd_ioctl(bdev->bd_disk->queue, bdev->bd_disk,
+-				mode, cmd, argp);
++		err = scsi_cmd_blk_ioctl(bdev, mode, cmd, argp);
+ 
+ 	if (err == -ENOTTY)
+ 		err = generic_ide_ioctl(drive, bdev, cmd, arg);
+diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c
+index 5d2f8e1..5b39216 100644
+--- a/drivers/idle/intel_idle.c
++++ b/drivers/idle/intel_idle.c
+@@ -348,7 +348,8 @@ static int intel_idle_probe(void)
+ 	cpuid(CPUID_MWAIT_LEAF, &eax, &ebx, &ecx, &mwait_substates);
+ 
+ 	if (!(ecx & CPUID5_ECX_EXTENSIONS_SUPPORTED) ||
+-		!(ecx & CPUID5_ECX_INTERRUPT_BREAK))
++	    !(ecx & CPUID5_ECX_INTERRUPT_BREAK) ||
++	    !mwait_substates)
+ 			return -ENODEV;
+ 
+ 	pr_debug(PREFIX "MWAIT substates: 0x%x\n", mwait_substates);
+@@ -394,7 +395,7 @@ static int intel_idle_probe(void)
+ 	if (boot_cpu_has(X86_FEATURE_ARAT))	/* Always Reliable APIC Timer */
+ 		lapic_timer_reliable_states = LAPIC_TIMER_ALWAYS_RELIABLE;
+ 	else {
+-		smp_call_function(__setup_broadcast_timer, (void *)true, 1);
++		on_each_cpu(__setup_broadcast_timer, (void *)true, 1);
+ 		register_cpu_notifier(&setup_broadcast_notifier);
+ 	}
+ 
+@@ -471,7 +472,7 @@ static int intel_idle_cpuidle_driver_init(void)
+ 	}
+ 
+ 	if (auto_demotion_disable_flags)
+-		smp_call_function(auto_demotion_disable, NULL, 1);
++		on_each_cpu(auto_demotion_disable, NULL, 1);
+ 
+ 	return 0;
+ }
+@@ -568,7 +569,7 @@ static void __exit intel_idle_exit(void)
+ 	cpuidle_unregister_driver(&intel_idle_driver);
+ 
+ 	if (lapic_timer_reliable_states != LAPIC_TIMER_ALWAYS_RELIABLE) {
+-		smp_call_function(__setup_broadcast_timer, (void *)false, 1);
++		on_each_cpu(__setup_broadcast_timer, (void *)false, 1);
+ 		unregister_cpu_notifier(&setup_broadcast_notifier);
+ 	}
+ 
+diff --git a/drivers/md/dm-flakey.c b/drivers/md/dm-flakey.c
+index f84c080..9fb18c1 100644
+--- a/drivers/md/dm-flakey.c
++++ b/drivers/md/dm-flakey.c
+@@ -368,8 +368,17 @@ static int flakey_status(struct dm_target *ti, status_type_t type,
+ static int flakey_ioctl(struct dm_target *ti, unsigned int cmd, unsigned long arg)
+ {
+ 	struct flakey_c *fc = ti->private;
++	struct dm_dev *dev = fc->dev;
++	int r = 0;
+ 
+-	return __blkdev_driver_ioctl(fc->dev->bdev, fc->dev->mode, cmd, arg);
++	/*
++	 * Only pass ioctls through if the device sizes match exactly.
++	 */
++	if (fc->start ||
++	    ti->len != i_size_read(dev->bdev->bd_inode) >> SECTOR_SHIFT)
++		r = scsi_verify_blk_ioctl(NULL, cmd);
++
++	return r ? : __blkdev_driver_ioctl(dev->bdev, dev->mode, cmd, arg);
+ }
+ 
+ static int flakey_merge(struct dm_target *ti, struct bvec_merge_data *bvm,
+diff --git a/drivers/md/dm-linear.c b/drivers/md/dm-linear.c
+index 3921e3b..9728839 100644
+--- a/drivers/md/dm-linear.c
++++ b/drivers/md/dm-linear.c
+@@ -116,7 +116,17 @@ static int linear_ioctl(struct dm_target *ti, unsigned int cmd,
+ 			unsigned long arg)
+ {
+ 	struct linear_c *lc = (struct linear_c *) ti->private;
+-	return __blkdev_driver_ioctl(lc->dev->bdev, lc->dev->mode, cmd, arg);
++	struct dm_dev *dev = lc->dev;
++	int r = 0;
++
++	/*
++	 * Only pass ioctls through if the device sizes match exactly.
++	 */
++	if (lc->start ||
++	    ti->len != i_size_read(dev->bdev->bd_inode) >> SECTOR_SHIFT)
++		r = scsi_verify_blk_ioctl(NULL, cmd);
++
++	return r ? : __blkdev_driver_ioctl(dev->bdev, dev->mode, cmd, arg);
+ }
+ 
+ static int linear_merge(struct dm_target *ti, struct bvec_merge_data *bvm,
+diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
+index 5e0090e..801d92d 100644
+--- a/drivers/md/dm-mpath.c
++++ b/drivers/md/dm-mpath.c
+@@ -1520,6 +1520,12 @@ static int multipath_ioctl(struct dm_target *ti, unsigned int cmd,
+ 
+ 	spin_unlock_irqrestore(&m->lock, flags);
+ 
++	/*
++	 * Only pass ioctls through if the device sizes match exactly.
++	 */
++	if (!r && ti->len != i_size_read(bdev->bd_inode) >> SECTOR_SHIFT)
++		r = scsi_verify_blk_ioctl(NULL, cmd);
++
+ 	return r ? : __blkdev_driver_ioctl(bdev, mode, cmd, arg);
+ }
+ 
+diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
+index ede2461..7d9e071 100644
+--- a/drivers/md/raid1.c
++++ b/drivers/md/raid1.c
+@@ -525,8 +525,17 @@ static int read_balance(struct r1conf *conf, struct r1bio *r1_bio, int *max_sect
+ 		if (test_bit(WriteMostly, &rdev->flags)) {
+ 			/* Don't balance among write-mostly, just
+ 			 * use the first as a last resort */
+-			if (best_disk < 0)
++			if (best_disk < 0) {
++				if (is_badblock(rdev, this_sector, sectors,
++						&first_bad, &bad_sectors)) {
++					if (first_bad < this_sector)
++						/* Cannot use this */
++						continue;
++					best_good_sectors = first_bad - this_sector;
++				} else
++					best_good_sectors = sectors;
+ 				best_disk = disk;
++			}
+ 			continue;
+ 		}
+ 		/* This is a reasonable device to use.  It might
+diff --git a/drivers/media/video/cx23885/cx23885-dvb.c b/drivers/media/video/cx23885/cx23885-dvb.c
+index bcb45be..f0482b2 100644
+--- a/drivers/media/video/cx23885/cx23885-dvb.c
++++ b/drivers/media/video/cx23885/cx23885-dvb.c
+@@ -940,6 +940,11 @@ static int dvb_register(struct cx23885_tsport *port)
+ 
+ 			fe = dvb_attach(xc4000_attach, fe0->dvb.frontend,
+ 					&dev->i2c_bus[1].i2c_adap, &cfg);
++			if (!fe) {
++				printk(KERN_ERR "%s/2: xc4000 attach failed\n",
++				       dev->name);
++				goto frontend_detach;
++			}
+ 		}
+ 		break;
+ 	case CX23885_BOARD_TBS_6920:
+diff --git a/drivers/media/video/cx88/cx88-cards.c b/drivers/media/video/cx88/cx88-cards.c
+index 0d719fa..3929d93 100644
+--- a/drivers/media/video/cx88/cx88-cards.c
++++ b/drivers/media/video/cx88/cx88-cards.c
+@@ -1573,8 +1573,8 @@ static const struct cx88_board cx88_boards[] = {
+ 		.name           = "Pinnacle Hybrid PCTV",
+ 		.tuner_type     = TUNER_XC2028,
+ 		.tuner_addr     = 0x61,
+-		.radio_type     = TUNER_XC2028,
+-		.radio_addr     = 0x61,
++		.radio_type     = UNSET,
++		.radio_addr     = ADDR_UNSET,
+ 		.input          = { {
+ 			.type   = CX88_VMUX_TELEVISION,
+ 			.vmux   = 0,
+@@ -1611,8 +1611,8 @@ static const struct cx88_board cx88_boards[] = {
+ 		.name           = "Leadtek TV2000 XP Global",
+ 		.tuner_type     = TUNER_XC2028,
+ 		.tuner_addr     = 0x61,
+-		.radio_type     = TUNER_XC2028,
+-		.radio_addr     = 0x61,
++		.radio_type     = UNSET,
++		.radio_addr     = ADDR_UNSET,
+ 		.input          = { {
+ 			.type   = CX88_VMUX_TELEVISION,
+ 			.vmux   = 0,
+@@ -2043,8 +2043,8 @@ static const struct cx88_board cx88_boards[] = {
+ 		.name           = "Terratec Cinergy HT PCI MKII",
+ 		.tuner_type     = TUNER_XC2028,
+ 		.tuner_addr     = 0x61,
+-		.radio_type     = TUNER_XC2028,
+-		.radio_addr     = 0x61,
++		.radio_type     = UNSET,
++		.radio_addr     = ADDR_UNSET,
+ 		.input          = { {
+ 			.type   = CX88_VMUX_TELEVISION,
+ 			.vmux   = 0,
+@@ -2082,9 +2082,9 @@ static const struct cx88_board cx88_boards[] = {
+ 	[CX88_BOARD_WINFAST_DTV1800H] = {
+ 		.name           = "Leadtek WinFast DTV1800 Hybrid",
+ 		.tuner_type     = TUNER_XC2028,
+-		.radio_type     = TUNER_XC2028,
++		.radio_type     = UNSET,
+ 		.tuner_addr     = 0x61,
+-		.radio_addr     = 0x61,
++		.radio_addr     = ADDR_UNSET,
+ 		/*
+ 		 * GPIO setting
+ 		 *
+@@ -2123,9 +2123,9 @@ static const struct cx88_board cx88_boards[] = {
+ 	[CX88_BOARD_WINFAST_DTV1800H_XC4000] = {
+ 		.name		= "Leadtek WinFast DTV1800 H (XC4000)",
+ 		.tuner_type	= TUNER_XC4000,
+-		.radio_type	= TUNER_XC4000,
++		.radio_type	= UNSET,
+ 		.tuner_addr	= 0x61,
+-		.radio_addr	= 0x61,
++		.radio_addr	= ADDR_UNSET,
+ 		/*
+ 		 * GPIO setting
+ 		 *
+@@ -2164,9 +2164,9 @@ static const struct cx88_board cx88_boards[] = {
+ 	[CX88_BOARD_WINFAST_DTV2000H_PLUS] = {
+ 		.name		= "Leadtek WinFast DTV2000 H PLUS",
+ 		.tuner_type	= TUNER_XC4000,
+-		.radio_type	= TUNER_XC4000,
++		.radio_type	= UNSET,
+ 		.tuner_addr	= 0x61,
+-		.radio_addr	= 0x61,
++		.radio_addr	= ADDR_UNSET,
+ 		/*
+ 		 * GPIO
+ 		 *   2: 1: mute audio
+diff --git a/drivers/media/video/uvc/uvc_v4l2.c b/drivers/media/video/uvc/uvc_v4l2.c
+index dadf11f..cf7788f 100644
+--- a/drivers/media/video/uvc/uvc_v4l2.c
++++ b/drivers/media/video/uvc/uvc_v4l2.c
+@@ -58,6 +58,15 @@ static int uvc_ioctl_ctrl_map(struct uvc_video_chain *chain,
+ 		break;
+ 
+ 	case V4L2_CTRL_TYPE_MENU:
++		/* Prevent excessive memory consumption, as well as integer
++		 * overflows.
++		 */
++		if (xmap->menu_count == 0 ||
++		    xmap->menu_count > UVC_MAX_CONTROL_MENU_ENTRIES) {
++			ret = -EINVAL;
++			goto done;
++		}
++
+ 		size = xmap->menu_count * sizeof(*map->menu_info);
+ 		map->menu_info = kmalloc(size, GFP_KERNEL);
+ 		if (map->menu_info == NULL) {
+diff --git a/drivers/media/video/uvc/uvcvideo.h b/drivers/media/video/uvc/uvcvideo.h
+index 4c1392e..bc446ba 100644
+--- a/drivers/media/video/uvc/uvcvideo.h
++++ b/drivers/media/video/uvc/uvcvideo.h
+@@ -113,6 +113,7 @@
+ 
+ /* Maximum allowed number of control mappings per device */
+ #define UVC_MAX_CONTROL_MAPPINGS	1024
++#define UVC_MAX_CONTROL_MENU_ENTRIES	32
+ 
+ /* Devices quirks */
+ #define UVC_QUIRK_STATUS_INTERVAL	0x00000001
+diff --git a/drivers/media/video/v4l2-ioctl.c b/drivers/media/video/v4l2-ioctl.c
+index e1da8fc..639abee 100644
+--- a/drivers/media/video/v4l2-ioctl.c
++++ b/drivers/media/video/v4l2-ioctl.c
+@@ -2226,6 +2226,10 @@ static int check_array_args(unsigned int cmd, void *parg, size_t *array_size,
+ 		struct v4l2_ext_controls *ctrls = parg;
+ 
+ 		if (ctrls->count != 0) {
++			if (ctrls->count > V4L2_CID_MAX_CTRLS) {
++				ret = -EINVAL;
++				break;
++			}
+ 			*user_ptr = (void __user *)ctrls->controls;
+ 			*kernel_ptr = (void *)&ctrls->controls;
+ 			*array_size = sizeof(struct v4l2_ext_control)
+diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
+index d240427..fb7c27f 100644
+--- a/drivers/mmc/core/mmc.c
++++ b/drivers/mmc/core/mmc.c
+@@ -1048,7 +1048,7 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
+ 			 *
+ 			 * WARNING: eMMC rules are NOT the same as SD DDR
+ 			 */
+-			if (ddr == EXT_CSD_CARD_TYPE_DDR_1_2V) {
++			if (ddr == MMC_1_2V_DDR_MODE) {
+ 				err = mmc_set_signal_voltage(host,
+ 					MMC_SIGNAL_VOLTAGE_120, 0);
+ 				if (err)
+diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
+index 19ed580..6ce32a7 100644
+--- a/drivers/mmc/host/sdhci.c
++++ b/drivers/mmc/host/sdhci.c
+@@ -1364,8 +1364,7 @@ static void sdhci_do_set_ios(struct sdhci_host *host, struct mmc_ios *ios)
+ 		if ((ios->timing == MMC_TIMING_UHS_SDR50) ||
+ 		    (ios->timing == MMC_TIMING_UHS_SDR104) ||
+ 		    (ios->timing == MMC_TIMING_UHS_DDR50) ||
+-		    (ios->timing == MMC_TIMING_UHS_SDR25) ||
+-		    (ios->timing == MMC_TIMING_UHS_SDR12))
++		    (ios->timing == MMC_TIMING_UHS_SDR25))
+ 			ctrl |= SDHCI_CTRL_HISPD;
+ 
+ 		ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
+@@ -2336,9 +2335,8 @@ int sdhci_suspend_host(struct sdhci_host *host)
+ 	/* Disable tuning since we are suspending */
+ 	if (host->version >= SDHCI_SPEC_300 && host->tuning_count &&
+ 	    host->tuning_mode == SDHCI_TUNING_MODE_1) {
++		del_timer_sync(&host->tuning_timer);
+ 		host->flags &= ~SDHCI_NEEDS_RETUNING;
+-		mod_timer(&host->tuning_timer, jiffies +
+-			host->tuning_count * HZ);
+ 	}
+ 
+ 	ret = mmc_suspend_host(host->mmc);
+diff --git a/drivers/mtd/mtd_blkdevs.c b/drivers/mtd/mtd_blkdevs.c
+index ed8b5e7..424ca5f 100644
+--- a/drivers/mtd/mtd_blkdevs.c
++++ b/drivers/mtd/mtd_blkdevs.c
+@@ -215,7 +215,7 @@ static int blktrans_open(struct block_device *bdev, fmode_t mode)
+ 
+ 	mutex_lock(&dev->lock);
+ 
+-	if (dev->open++)
++	if (dev->open)
+ 		goto unlock;
+ 
+ 	kref_get(&dev->ref);
+@@ -235,6 +235,7 @@ static int blktrans_open(struct block_device *bdev, fmode_t mode)
+ 		goto error_release;
+ 
+ unlock:
++	dev->open++;
+ 	mutex_unlock(&dev->lock);
+ 	blktrans_dev_put(dev);
+ 	return ret;
+diff --git a/drivers/mtd/mtdoops.c b/drivers/mtd/mtdoops.c
+index 1e2fa62..f3cdce9 100644
+--- a/drivers/mtd/mtdoops.c
++++ b/drivers/mtd/mtdoops.c
+@@ -253,6 +253,9 @@ static void find_next_position(struct mtdoops_context *cxt)
+ 	size_t retlen;
+ 
+ 	for (page = 0; page < cxt->oops_pages; page++) {
++		if (mtd->block_isbad &&
++		    mtd->block_isbad(mtd, page * record_size))
++			continue;
+ 		/* Assume the page is used */
+ 		mark_page_used(cxt, page);
+ 		ret = mtd->read(mtd, page * record_size, MTDOOPS_HEADER_SIZE,
+@@ -369,7 +372,7 @@ static void mtdoops_notify_add(struct mtd_info *mtd)
+ 
+ 	/* oops_page_used is a bit field */
+ 	cxt->oops_page_used = vmalloc(DIV_ROUND_UP(mtdoops_pages,
+-			BITS_PER_LONG));
++			BITS_PER_LONG) * sizeof(unsigned long));
+ 	if (!cxt->oops_page_used) {
+ 		printk(KERN_ERR "mtdoops: could not allocate page array\n");
+ 		return;
+diff --git a/drivers/mtd/tests/mtd_stresstest.c b/drivers/mtd/tests/mtd_stresstest.c
+index 52ffd91..811642f 100644
+--- a/drivers/mtd/tests/mtd_stresstest.c
++++ b/drivers/mtd/tests/mtd_stresstest.c
+@@ -284,6 +284,12 @@ static int __init mtd_stresstest_init(void)
+ 	       (unsigned long long)mtd->size, mtd->erasesize,
+ 	       pgsize, ebcnt, pgcnt, mtd->oobsize);
+ 
++	if (ebcnt < 2) {
++		printk(PRINT_PREF "error: need at least 2 eraseblocks\n");
++		err = -ENOSPC;
++		goto out_put_mtd;
++	}
++
+ 	/* Read or write up 2 eraseblocks at a time */
+ 	bufsize = mtd->erasesize * 2;
+ 
+@@ -322,6 +328,7 @@ out:
+ 	kfree(bbt);
+ 	vfree(writebuf);
+ 	vfree(readbuf);
++out_put_mtd:
+ 	put_mtd_device(mtd);
+ 	if (err)
+ 		printk(PRINT_PREF "error %d occurred\n", err);
+diff --git a/drivers/mtd/ubi/cdev.c b/drivers/mtd/ubi/cdev.c
+index 3320a50..ad76592 100644
+--- a/drivers/mtd/ubi/cdev.c
++++ b/drivers/mtd/ubi/cdev.c
+@@ -632,6 +632,9 @@ static int verify_mkvol_req(const struct ubi_device *ubi,
+ 	if (req->alignment != 1 && n)
+ 		goto bad;
+ 
++	if (!req->name[0] || !req->name_len)
++		goto bad;
++
+ 	if (req->name_len > UBI_VOL_NAME_MAX) {
+ 		err = -ENAMETOOLONG;
+ 		goto bad;
+diff --git a/drivers/mtd/ubi/debug.h b/drivers/mtd/ubi/debug.h
+index 64fbb00..ead2cd1 100644
+--- a/drivers/mtd/ubi/debug.h
++++ b/drivers/mtd/ubi/debug.h
+@@ -43,7 +43,10 @@
+ 	pr_debug("UBI DBG " type ": " fmt "\n", ##__VA_ARGS__)
+ 
+ /* Just a debugging messages not related to any specific UBI subsystem */
+-#define dbg_msg(fmt, ...) ubi_dbg_msg("msg", fmt, ##__VA_ARGS__)
++#define dbg_msg(fmt, ...)                                    \
++	printk(KERN_DEBUG "UBI DBG (pid %d): %s: " fmt "\n", \
++	       current->pid, __func__, ##__VA_ARGS__)
++
+ /* General debugging messages */
+ #define dbg_gen(fmt, ...) ubi_dbg_msg("gen", fmt, ##__VA_ARGS__)
+ /* Messages from the eraseblock association sub-system */
+diff --git a/drivers/mtd/ubi/eba.c b/drivers/mtd/ubi/eba.c
+index fb7f19b..cd26da8 100644
+--- a/drivers/mtd/ubi/eba.c
++++ b/drivers/mtd/ubi/eba.c
+@@ -1028,12 +1028,14 @@ int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
+ 	 * 'ubi_wl_put_peb()' function on the @ubi->move_mutex. In turn, we are
+ 	 * holding @ubi->move_mutex and go sleep on the LEB lock. So, if the
+ 	 * LEB is already locked, we just do not move it and return
+-	 * %MOVE_CANCEL_RACE, which means that UBI will re-try, but later.
++	 * %MOVE_RETRY. Note, we do not return %MOVE_CANCEL_RACE here because
++	 * we do not know the reasons of the contention - it may be just a
++	 * normal I/O on this LEB, so we want to re-try.
+ 	 */
+ 	err = leb_write_trylock(ubi, vol_id, lnum);
+ 	if (err) {
+ 		dbg_wl("contention on LEB %d:%d, cancel", vol_id, lnum);
+-		return MOVE_CANCEL_RACE;
++		return MOVE_RETRY;
+ 	}
+ 
+ 	/*
+diff --git a/drivers/mtd/ubi/ubi.h b/drivers/mtd/ubi/ubi.h
+index dc64c76..d51d75d 100644
+--- a/drivers/mtd/ubi/ubi.h
++++ b/drivers/mtd/ubi/ubi.h
+@@ -120,6 +120,7 @@ enum {
+  *                     PEB
+  * MOVE_CANCEL_BITFLIPS: canceled because a bit-flip was detected in the
+  *                       target PEB
++ * MOVE_RETRY: retry scrubbing the PEB
+  */
+ enum {
+ 	MOVE_CANCEL_RACE = 1,
+@@ -127,6 +128,7 @@ enum {
+ 	MOVE_TARGET_RD_ERR,
+ 	MOVE_TARGET_WR_ERR,
+ 	MOVE_CANCEL_BITFLIPS,
++	MOVE_RETRY,
+ };
+ 
+ /**
+diff --git a/drivers/mtd/ubi/vtbl.c b/drivers/mtd/ubi/vtbl.c
+index 9ad18da..890754c 100644
+--- a/drivers/mtd/ubi/vtbl.c
++++ b/drivers/mtd/ubi/vtbl.c
+@@ -306,7 +306,7 @@ static int create_vtbl(struct ubi_device *ubi, struct ubi_scan_info *si,
+ 		       int copy, void *vtbl)
+ {
+ 	int err, tries = 0;
+-	static struct ubi_vid_hdr *vid_hdr;
++	struct ubi_vid_hdr *vid_hdr;
+ 	struct ubi_scan_leb *new_seb;
+ 
+ 	ubi_msg("create volume table (copy #%d)", copy + 1);
+diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c
+index 42c684c..0696e36 100644
+--- a/drivers/mtd/ubi/wl.c
++++ b/drivers/mtd/ubi/wl.c
+@@ -795,7 +795,10 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
+ 			protect = 1;
+ 			goto out_not_moved;
+ 		}
+-
++		if (err == MOVE_RETRY) {
++			scrubbing = 1;
++			goto out_not_moved;
++		}
+ 		if (err == MOVE_CANCEL_BITFLIPS || err == MOVE_TARGET_WR_ERR ||
+ 		    err == MOVE_TARGET_RD_ERR) {
+ 			/*
+@@ -1049,7 +1052,6 @@ static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
+ 
+ 	ubi_err("failed to erase PEB %d, error %d", pnum, err);
+ 	kfree(wl_wrk);
+-	kmem_cache_free(ubi_wl_entry_slab, e);
+ 
+ 	if (err == -EINTR || err == -ENOMEM || err == -EAGAIN ||
+ 	    err == -EBUSY) {
+@@ -1062,14 +1064,16 @@ static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
+ 			goto out_ro;
+ 		}
+ 		return err;
+-	} else if (err != -EIO) {
++	}
++
++	kmem_cache_free(ubi_wl_entry_slab, e);
++	if (err != -EIO)
+ 		/*
+ 		 * If this is not %-EIO, we have no idea what to do. Scheduling
+ 		 * this physical eraseblock for erasure again would cause
+ 		 * errors again and again. Well, lets switch to R/O mode.
+ 		 */
+ 		goto out_ro;
+-	}
+ 
+ 	/* It is %-EIO, the PEB went bad */
+ 
+diff --git a/drivers/net/usb/asix.c b/drivers/net/usb/asix.c
+index dd2625a..f5e063a 100644
+--- a/drivers/net/usb/asix.c
++++ b/drivers/net/usb/asix.c
+@@ -974,6 +974,7 @@ static int ax88772_link_reset(struct usbnet *dev)
+ 
+ static int ax88772_reset(struct usbnet *dev)
+ {
++	struct asix_data *data = (struct asix_data *)&dev->data;
+ 	int ret, embd_phy;
+ 	u16 rx_ctl;
+ 
+@@ -1051,6 +1052,13 @@ static int ax88772_reset(struct usbnet *dev)
+ 		goto out;
+ 	}
+ 
++	/* Rewrite MAC address */
++	memcpy(data->mac_addr, dev->net->dev_addr, ETH_ALEN);
++	ret = asix_write_cmd(dev, AX_CMD_WRITE_NODE_ID, 0, 0, ETH_ALEN,
++							data->mac_addr);
++	if (ret < 0)
++		goto out;
++
+ 	/* Set RX_CTL to default values with 2k buffer, and enable cactus */
+ 	ret = asix_write_rx_ctl(dev, AX_DEFAULT_RX_CTL);
+ 	if (ret < 0)
+@@ -1316,6 +1324,13 @@ static int ax88178_reset(struct usbnet *dev)
+ 	if (ret < 0)
+ 		return ret;
+ 
++	/* Rewrite MAC address */
++	memcpy(data->mac_addr, dev->net->dev_addr, ETH_ALEN);
++	ret = asix_write_cmd(dev, AX_CMD_WRITE_NODE_ID, 0, 0, ETH_ALEN,
++							data->mac_addr);
++	if (ret < 0)
++		return ret;
++
+ 	ret = asix_write_rx_ctl(dev, AX_DEFAULT_RX_CTL);
+ 	if (ret < 0)
+ 		return ret;
+diff --git a/drivers/net/wireless/ath/ath9k/ar9003_mac.c b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
+index ccde784..f5ae3c6 100644
+--- a/drivers/net/wireless/ath/ath9k/ar9003_mac.c
++++ b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
+@@ -526,10 +526,11 @@ int ath9k_hw_process_rxdesc_edma(struct ath_hw *ah, struct ath_rx_status *rxs,
+ 			rxs->rs_status |= ATH9K_RXERR_DECRYPT;
+ 		else if (rxsp->status11 & AR_MichaelErr)
+ 			rxs->rs_status |= ATH9K_RXERR_MIC;
+-		if (rxsp->status11 & AR_KeyMiss)
+-			rxs->rs_status |= ATH9K_RXERR_KEYMISS;
+ 	}
+ 
++	if (rxsp->status11 & AR_KeyMiss)
++		rxs->rs_status |= ATH9K_RXERR_KEYMISS;
++
+ 	return 0;
+ }
+ EXPORT_SYMBOL(ath9k_hw_process_rxdesc_edma);
+diff --git a/drivers/net/wireless/ath/ath9k/calib.c b/drivers/net/wireless/ath/ath9k/calib.c
+index 9953881..8ddef3e 100644
+--- a/drivers/net/wireless/ath/ath9k/calib.c
++++ b/drivers/net/wireless/ath/ath9k/calib.c
+@@ -402,6 +402,7 @@ bool ath9k_hw_getnf(struct ath_hw *ah, struct ath9k_channel *chan)
+ 	ah->noise = ath9k_hw_getchan_noise(ah, chan);
+ 	return true;
+ }
++EXPORT_SYMBOL(ath9k_hw_getnf);
+ 
+ void ath9k_init_nfcal_hist_buffer(struct ath_hw *ah,
+ 				  struct ath9k_channel *chan)
+diff --git a/drivers/net/wireless/ath/ath9k/mac.c b/drivers/net/wireless/ath/ath9k/mac.c
+index ecdb6fd..bbcb777 100644
+--- a/drivers/net/wireless/ath/ath9k/mac.c
++++ b/drivers/net/wireless/ath/ath9k/mac.c
+@@ -621,10 +621,11 @@ int ath9k_hw_rxprocdesc(struct ath_hw *ah, struct ath_desc *ds,
+ 			rs->rs_status |= ATH9K_RXERR_DECRYPT;
+ 		else if (ads.ds_rxstatus8 & AR_MichaelErr)
+ 			rs->rs_status |= ATH9K_RXERR_MIC;
+-		if (ads.ds_rxstatus8 & AR_KeyMiss)
+-			rs->rs_status |= ATH9K_RXERR_KEYMISS;
+ 	}
+ 
++	if (ads.ds_rxstatus8 & AR_KeyMiss)
++		rs->rs_status |= ATH9K_RXERR_KEYMISS;
++
+ 	return 0;
+ }
+ EXPORT_SYMBOL(ath9k_hw_rxprocdesc);
+diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
+index a9c5ae7..f76a814 100644
+--- a/drivers/net/wireless/ath/ath9k/main.c
++++ b/drivers/net/wireless/ath/ath9k/main.c
+@@ -1667,7 +1667,6 @@ static int ath9k_config(struct ieee80211_hw *hw, u32 changed)
+ 
+ 	if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
+ 		struct ieee80211_channel *curchan = hw->conf.channel;
+-		struct ath9k_channel old_chan;
+ 		int pos = curchan->hw_value;
+ 		int old_pos = -1;
+ 		unsigned long flags;
+@@ -1693,11 +1692,8 @@ static int ath9k_config(struct ieee80211_hw *hw, u32 changed)
+ 		 * Preserve the current channel values, before updating
+ 		 * the same channel
+ 		 */
+-		if (old_pos == pos) {
+-			memcpy(&old_chan, &sc->sc_ah->channels[pos],
+-				sizeof(struct ath9k_channel));
+-			ah->curchan = &old_chan;
+-		}
++		if (ah->curchan && (old_pos == pos))
++			ath9k_hw_getnf(ah, ah->curchan);
+ 
+ 		ath9k_cmn_update_ichannel(&sc->sc_ah->channels[pos],
+ 					  curchan, conf->channel_type);
+diff --git a/drivers/net/wireless/iwlegacy/iwl3945-base.c b/drivers/net/wireless/iwlegacy/iwl3945-base.c
+index b282d86..05f2ad1 100644
+--- a/drivers/net/wireless/iwlegacy/iwl3945-base.c
++++ b/drivers/net/wireless/iwlegacy/iwl3945-base.c
+@@ -2656,14 +2656,13 @@ int iwl3945_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
+ 		IWL_WARN(priv, "Invalid scan band\n");
+ 		return -EIO;
+ 	}
+-
+ 	/*
+-	 * If active scaning is requested but a certain channel
+-	 * is marked passive, we can do active scanning if we
+-	 * detect transmissions.
++	 * If active scaning is requested but a certain channel is marked
++	 * passive, we can do active scanning if we detect transmissions. For
++	 * passive only scanning disable switching to active on any channel.
+ 	 */
+ 	scan->good_CRC_th = is_active ? IWL_GOOD_CRC_TH_DEFAULT :
+-					IWL_GOOD_CRC_TH_DISABLED;
++					IWL_GOOD_CRC_TH_NEVER;
+ 
+ 	len = iwl_legacy_fill_probe_req(priv, (struct ieee80211_mgmt *)scan->data,
+ 					vif->addr, priv->scan_request->ie,
+diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-lib.c b/drivers/net/wireless/iwlwifi/iwl-agn-lib.c
+index 1a52ed2..6465983 100644
+--- a/drivers/net/wireless/iwlwifi/iwl-agn-lib.c
++++ b/drivers/net/wireless/iwlwifi/iwl-agn-lib.c
+@@ -827,6 +827,7 @@ static int iwl_get_idle_rx_chain_count(struct iwl_priv *priv, int active_cnt)
+ 	case IEEE80211_SMPS_STATIC:
+ 	case IEEE80211_SMPS_DYNAMIC:
+ 		return IWL_NUM_IDLE_CHAINS_SINGLE;
++	case IEEE80211_SMPS_AUTOMATIC:
+ 	case IEEE80211_SMPS_OFF:
+ 		return active_cnt;
+ 	default:
+diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-rxon.c b/drivers/net/wireless/iwlwifi/iwl-agn-rxon.c
+index 5c7c17c..d552fa3 100644
+--- a/drivers/net/wireless/iwlwifi/iwl-agn-rxon.c
++++ b/drivers/net/wireless/iwlwifi/iwl-agn-rxon.c
+@@ -559,6 +559,9 @@ int iwlagn_mac_config(struct ieee80211_hw *hw, u32 changed)
+ 
+ 	mutex_lock(&priv->shrd->mutex);
+ 
++	if (test_bit(STATUS_EXIT_PENDING, &priv->shrd->status))
++		goto out;
++
+ 	if (unlikely(test_bit(STATUS_SCANNING, &priv->shrd->status))) {
+ 		IWL_DEBUG_MAC80211(priv, "leave - scanning\n");
+ 		goto out;
+diff --git a/drivers/net/wireless/rt2x00/rt2800pci.c b/drivers/net/wireless/rt2x00/rt2800pci.c
+index da48c8a..837b460 100644
+--- a/drivers/net/wireless/rt2x00/rt2800pci.c
++++ b/drivers/net/wireless/rt2x00/rt2800pci.c
+@@ -422,7 +422,6 @@ static int rt2800pci_init_queues(struct rt2x00_dev *rt2x00dev)
+ static void rt2800pci_toggle_irq(struct rt2x00_dev *rt2x00dev,
+ 				 enum dev_state state)
+ {
+-	int mask = (state == STATE_RADIO_IRQ_ON);
+ 	u32 reg;
+ 	unsigned long flags;
+ 
+@@ -436,25 +435,14 @@ static void rt2800pci_toggle_irq(struct rt2x00_dev *rt2x00dev,
+ 	}
+ 
+ 	spin_lock_irqsave(&rt2x00dev->irqmask_lock, flags);
+-	rt2x00pci_register_read(rt2x00dev, INT_MASK_CSR, &reg);
+-	rt2x00_set_field32(&reg, INT_MASK_CSR_RXDELAYINT, 0);
+-	rt2x00_set_field32(&reg, INT_MASK_CSR_TXDELAYINT, 0);
+-	rt2x00_set_field32(&reg, INT_MASK_CSR_RX_DONE, mask);
+-	rt2x00_set_field32(&reg, INT_MASK_CSR_AC0_DMA_DONE, 0);
+-	rt2x00_set_field32(&reg, INT_MASK_CSR_AC1_DMA_DONE, 0);
+-	rt2x00_set_field32(&reg, INT_MASK_CSR_AC2_DMA_DONE, 0);
+-	rt2x00_set_field32(&reg, INT_MASK_CSR_AC3_DMA_DONE, 0);
+-	rt2x00_set_field32(&reg, INT_MASK_CSR_HCCA_DMA_DONE, 0);
+-	rt2x00_set_field32(&reg, INT_MASK_CSR_MGMT_DMA_DONE, 0);
+-	rt2x00_set_field32(&reg, INT_MASK_CSR_MCU_COMMAND, 0);
+-	rt2x00_set_field32(&reg, INT_MASK_CSR_RXTX_COHERENT, 0);
+-	rt2x00_set_field32(&reg, INT_MASK_CSR_TBTT, mask);
+-	rt2x00_set_field32(&reg, INT_MASK_CSR_PRE_TBTT, mask);
+-	rt2x00_set_field32(&reg, INT_MASK_CSR_TX_FIFO_STATUS, mask);
+-	rt2x00_set_field32(&reg, INT_MASK_CSR_AUTO_WAKEUP, mask);
+-	rt2x00_set_field32(&reg, INT_MASK_CSR_GPTIMER, 0);
+-	rt2x00_set_field32(&reg, INT_MASK_CSR_RX_COHERENT, 0);
+-	rt2x00_set_field32(&reg, INT_MASK_CSR_TX_COHERENT, 0);
++	reg = 0;
++	if (state == STATE_RADIO_IRQ_ON) {
++		rt2x00_set_field32(&reg, INT_MASK_CSR_RX_DONE, 1);
++		rt2x00_set_field32(&reg, INT_MASK_CSR_TBTT, 1);
++		rt2x00_set_field32(&reg, INT_MASK_CSR_PRE_TBTT, 1);
++		rt2x00_set_field32(&reg, INT_MASK_CSR_TX_FIFO_STATUS, 1);
++		rt2x00_set_field32(&reg, INT_MASK_CSR_AUTO_WAKEUP, 1);
++	}
+ 	rt2x00pci_register_write(rt2x00dev, INT_MASK_CSR, reg);
+ 	spin_unlock_irqrestore(&rt2x00dev->irqmask_lock, flags);
+ 
+diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/fw.c b/drivers/net/wireless/rtlwifi/rtl8192se/fw.c
+index 6f91a14..3fda6b1 100644
+--- a/drivers/net/wireless/rtlwifi/rtl8192se/fw.c
++++ b/drivers/net/wireless/rtlwifi/rtl8192se/fw.c
+@@ -196,6 +196,8 @@ static bool _rtl92s_firmware_downloadcode(struct ieee80211_hw *hw,
+ 		/* Allocate skb buffer to contain firmware */
+ 		/* info and tx descriptor info. */
+ 		skb = dev_alloc_skb(frag_length);
++		if (!skb)
++			return false;
+ 		skb_reserve(skb, extra_descoffset);
+ 		seg_ptr = (u8 *)skb_put(skb, (u32)(frag_length -
+ 					extra_descoffset));
+@@ -573,6 +575,8 @@ static bool _rtl92s_firmware_set_h2c_cmd(struct ieee80211_hw *hw, u8 h2c_cmd,
+ 
+ 	len = _rtl92s_get_h2c_cmdlen(MAX_TRANSMIT_BUFFER_SIZE, 1, &cmd_len);
+ 	skb = dev_alloc_skb(len);
++	if (!skb)
++		return false;
+ 	cb_desc = (struct rtl_tcb_desc *)(skb->cb);
+ 	cb_desc->queue_index = TXCMD_QUEUE;
+ 	cb_desc->cmd_or_init = DESC_PACKET_TYPE_NORMAL;
+diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c
+index 0e6d04d..e3efb43 100644
+--- a/drivers/pci/msi.c
++++ b/drivers/pci/msi.c
+@@ -870,5 +870,15 @@ EXPORT_SYMBOL(pci_msi_enabled);
+ 
+ void pci_msi_init_pci_dev(struct pci_dev *dev)
+ {
++	int pos;
+ 	INIT_LIST_HEAD(&dev->msi_list);
++
++	/* Disable the msi hardware to avoid screaming interrupts
++	 * during boot.  This is the power on reset default so
++	 * usually this should be a noop.
++	 */
++	pos = pci_find_capability(dev, PCI_CAP_ID_MSI);
++	if (pos)
++		msi_set_enable(dev, pos, 0);
++	msix_set_enable(dev, 0);
+ }
+diff --git a/drivers/pnp/quirks.c b/drivers/pnp/quirks.c
+index dfbd5a6..258fef2 100644
+--- a/drivers/pnp/quirks.c
++++ b/drivers/pnp/quirks.c
+@@ -295,6 +295,45 @@ static void quirk_system_pci_resources(struct pnp_dev *dev)
+ 	}
+ }
+ 
++#ifdef CONFIG_AMD_NB
++
++#include <asm/amd_nb.h>
++
++static void quirk_amd_mmconfig_area(struct pnp_dev *dev)
++{
++	resource_size_t start, end;
++	struct pnp_resource *pnp_res;
++	struct resource *res;
++	struct resource mmconfig_res, *mmconfig;
++
++	mmconfig = amd_get_mmconfig_range(&mmconfig_res);
++	if (!mmconfig)
++		return;
++
++	list_for_each_entry(pnp_res, &dev->resources, list) {
++		res = &pnp_res->res;
++		if (res->end < mmconfig->start || res->start > mmconfig->end ||
++		    (res->start == mmconfig->start && res->end == mmconfig->end))
++			continue;
++
++		dev_info(&dev->dev, FW_BUG
++			 "%pR covers only part of AMD MMCONFIG area %pR; adding more reservations\n",
++			 res, mmconfig);
++		if (mmconfig->start < res->start) {
++			start = mmconfig->start;
++			end = res->start - 1;
++			pnp_add_mem_resource(dev, start, end, 0);
++		}
++		if (mmconfig->end > res->end) {
++			start = res->end + 1;
++			end = mmconfig->end;
++			pnp_add_mem_resource(dev, start, end, 0);
++		}
++		break;
++	}
++}
++#endif
++
+ /*
+  *  PnP Quirks
+  *  Cards or devices that need some tweaking due to incomplete resource info
+@@ -322,6 +361,9 @@ static struct pnp_fixup pnp_fixups[] = {
+ 	/* PnP resources that might overlap PCI BARs */
+ 	{"PNP0c01", quirk_system_pci_resources},
+ 	{"PNP0c02", quirk_system_pci_resources},
++#ifdef CONFIG_AMD_NB
++	{"PNP0c01", quirk_amd_mmconfig_area},
++#endif
+ 	{""}
+ };
+ 
+diff --git a/drivers/rtc/interface.c b/drivers/rtc/interface.c
+index 8e28625..8a1c031 100644
+--- a/drivers/rtc/interface.c
++++ b/drivers/rtc/interface.c
+@@ -228,11 +228,11 @@ int __rtc_read_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
+ 		alarm->time.tm_hour = now.tm_hour;
+ 
+ 	/* For simplicity, only support date rollover for now */
+-	if (alarm->time.tm_mday == -1) {
++	if (alarm->time.tm_mday < 1 || alarm->time.tm_mday > 31) {
+ 		alarm->time.tm_mday = now.tm_mday;
+ 		missing = day;
+ 	}
+-	if (alarm->time.tm_mon == -1) {
++	if ((unsigned)alarm->time.tm_mon >= 12) {
+ 		alarm->time.tm_mon = now.tm_mon;
+ 		if (missing == none)
+ 			missing = month;
+diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.c b/drivers/scsi/mpt2sas/mpt2sas_base.c
+index beda04a..0794c72 100644
+--- a/drivers/scsi/mpt2sas/mpt2sas_base.c
++++ b/drivers/scsi/mpt2sas/mpt2sas_base.c
+@@ -65,6 +65,8 @@ static MPT_CALLBACK	mpt_callbacks[MPT_MAX_CALLBACKS];
+ 
+ #define FAULT_POLLING_INTERVAL 1000 /* in milliseconds */
+ 
++#define MAX_HBA_QUEUE_DEPTH	30000
++#define MAX_CHAIN_DEPTH		100000
+ static int max_queue_depth = -1;
+ module_param(max_queue_depth, int, 0);
+ MODULE_PARM_DESC(max_queue_depth, " max controller queue depth ");
+@@ -2311,8 +2313,6 @@ _base_release_memory_pools(struct MPT2SAS_ADAPTER *ioc)
+ 		}
+ 		if (ioc->chain_dma_pool)
+ 			pci_pool_destroy(ioc->chain_dma_pool);
+-	}
+-	if (ioc->chain_lookup) {
+ 		free_pages((ulong)ioc->chain_lookup, ioc->chain_pages);
+ 		ioc->chain_lookup = NULL;
+ 	}
+@@ -2330,9 +2330,7 @@ static int
+ _base_allocate_memory_pools(struct MPT2SAS_ADAPTER *ioc,  int sleep_flag)
+ {
+ 	struct mpt2sas_facts *facts;
+-	u32 queue_size, queue_diff;
+ 	u16 max_sge_elements;
+-	u16 num_of_reply_frames;
+ 	u16 chains_needed_per_io;
+ 	u32 sz, total_sz, reply_post_free_sz;
+ 	u32 retry_sz;
+@@ -2359,7 +2357,8 @@ _base_allocate_memory_pools(struct MPT2SAS_ADAPTER *ioc,  int sleep_flag)
+ 		max_request_credit = (max_queue_depth < facts->RequestCredit)
+ 		    ? max_queue_depth : facts->RequestCredit;
+ 	else
+-		max_request_credit = facts->RequestCredit;
++		max_request_credit = min_t(u16, facts->RequestCredit,
++		    MAX_HBA_QUEUE_DEPTH);
+ 
+ 	ioc->hba_queue_depth = max_request_credit;
+ 	ioc->hi_priority_depth = facts->HighPriorityCredit;
+@@ -2400,50 +2399,25 @@ _base_allocate_memory_pools(struct MPT2SAS_ADAPTER *ioc,  int sleep_flag)
+ 	}
+ 	ioc->chains_needed_per_io = chains_needed_per_io;
+ 
+-	/* reply free queue sizing - taking into account for events */
+-	num_of_reply_frames = ioc->hba_queue_depth + 32;
+-
+-	/* number of replies frames can't be a multiple of 16 */
+-	/* decrease number of reply frames by 1 */
+-	if (!(num_of_reply_frames % 16))
+-		num_of_reply_frames--;
+-
+-	/* calculate number of reply free queue entries
+-	 *  (must be multiple of 16)
+-	 */
+-
+-	/* (we know reply_free_queue_depth is not a multiple of 16) */
+-	queue_size = num_of_reply_frames;
+-	queue_size += 16 - (queue_size % 16);
+-	ioc->reply_free_queue_depth = queue_size;
+-
+-	/* reply descriptor post queue sizing */
+-	/* this size should be the number of request frames + number of reply
+-	 * frames
+-	 */
+-
+-	queue_size = ioc->hba_queue_depth + num_of_reply_frames + 1;
+-	/* round up to 16 byte boundary */
+-	if (queue_size % 16)
+-		queue_size += 16 - (queue_size % 16);
+-
+-	/* check against IOC maximum reply post queue depth */
+-	if (queue_size > facts->MaxReplyDescriptorPostQueueDepth) {
+-		queue_diff = queue_size -
+-		    facts->MaxReplyDescriptorPostQueueDepth;
++	/* reply free queue sizing - taking into account for 64 FW events */
++	ioc->reply_free_queue_depth = ioc->hba_queue_depth + 64;
+ 
+-		/* round queue_diff up to multiple of 16 */
+-		if (queue_diff % 16)
+-			queue_diff += 16 - (queue_diff % 16);
+-
+-		/* adjust hba_queue_depth, reply_free_queue_depth,
+-		 * and queue_size
+-		 */
+-		ioc->hba_queue_depth -= (queue_diff / 2);
+-		ioc->reply_free_queue_depth -= (queue_diff / 2);
+-		queue_size = facts->MaxReplyDescriptorPostQueueDepth;
++	/* align the reply post queue on the next 16 count boundary */
++	if (!ioc->reply_free_queue_depth % 16)
++		ioc->reply_post_queue_depth = ioc->reply_free_queue_depth + 16;
++	else
++		ioc->reply_post_queue_depth = ioc->reply_free_queue_depth +
++				32 - (ioc->reply_free_queue_depth % 16);
++	if (ioc->reply_post_queue_depth >
++	    facts->MaxReplyDescriptorPostQueueDepth) {
++		ioc->reply_post_queue_depth = min_t(u16,
++		    (facts->MaxReplyDescriptorPostQueueDepth -
++		    (facts->MaxReplyDescriptorPostQueueDepth % 16)),
++		    (ioc->hba_queue_depth - (ioc->hba_queue_depth % 16)));
++		ioc->reply_free_queue_depth = ioc->reply_post_queue_depth - 16;
++		ioc->hba_queue_depth = ioc->reply_free_queue_depth - 64;
+ 	}
+-	ioc->reply_post_queue_depth = queue_size;
++
+ 
+ 	dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "scatter gather: "
+ 	    "sge_in_main_msg(%d), sge_per_chain(%d), sge_per_io(%d), "
+@@ -2529,15 +2503,12 @@ _base_allocate_memory_pools(struct MPT2SAS_ADAPTER *ioc,  int sleep_flag)
+ 	    "depth(%d)\n", ioc->name, ioc->request,
+ 	    ioc->scsiio_depth));
+ 
+-	/* loop till the allocation succeeds */
+-	do {
+-		sz = ioc->chain_depth * sizeof(struct chain_tracker);
+-		ioc->chain_pages = get_order(sz);
+-		ioc->chain_lookup = (struct chain_tracker *)__get_free_pages(
+-		    GFP_KERNEL, ioc->chain_pages);
+-		if (ioc->chain_lookup == NULL)
+-			ioc->chain_depth -= 100;
+-	} while (ioc->chain_lookup == NULL);
++	ioc->chain_depth = min_t(u32, ioc->chain_depth, MAX_CHAIN_DEPTH);
++	sz = ioc->chain_depth * sizeof(struct chain_tracker);
++	ioc->chain_pages = get_order(sz);
++
++	ioc->chain_lookup = (struct chain_tracker *)__get_free_pages(
++	    GFP_KERNEL, ioc->chain_pages);
+ 	ioc->chain_dma_pool = pci_pool_create("chain pool", ioc->pdev,
+ 	    ioc->request_sz, 16, 0);
+ 	if (!ioc->chain_dma_pool) {
+diff --git a/drivers/scsi/mpt2sas/mpt2sas_scsih.c b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
+index d570573..9bc6fb2 100644
+--- a/drivers/scsi/mpt2sas/mpt2sas_scsih.c
++++ b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
+@@ -1007,8 +1007,8 @@ _scsih_get_chain_buffer_tracker(struct MPT2SAS_ADAPTER *ioc, u16 smid)
+ 	spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
+ 	if (list_empty(&ioc->free_chain_list)) {
+ 		spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
+-		printk(MPT2SAS_WARN_FMT "chain buffers not available\n",
+-		    ioc->name);
++		dfailprintk(ioc, printk(MPT2SAS_WARN_FMT "chain buffers not "
++			"available\n", ioc->name));
+ 		return NULL;
+ 	}
+ 	chain_req = list_entry(ioc->free_chain_list.next,
+@@ -6714,6 +6714,7 @@ _scsih_mark_responding_raid_device(struct MPT2SAS_ADAPTER *ioc, u64 wwid,
+ 			} else
+ 				sas_target_priv_data = NULL;
+ 			raid_device->responding = 1;
++			spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
+ 			starget_printk(KERN_INFO, raid_device->starget,
+ 			    "handle(0x%04x), wwid(0x%016llx)\n", handle,
+ 			    (unsigned long long)raid_device->wwid);
+@@ -6724,16 +6725,16 @@ _scsih_mark_responding_raid_device(struct MPT2SAS_ADAPTER *ioc, u64 wwid,
+ 			 */
+ 			_scsih_init_warpdrive_properties(ioc, raid_device);
+ 			if (raid_device->handle == handle)
+-				goto out;
++				return;
+ 			printk(KERN_INFO "\thandle changed from(0x%04x)!!!\n",
+ 			    raid_device->handle);
+ 			raid_device->handle = handle;
+ 			if (sas_target_priv_data)
+ 				sas_target_priv_data->handle = handle;
+-			goto out;
++			return;
+ 		}
+ 	}
+- out:
++
+ 	spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
+ }
+ 
+diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
+index fa3a591..4b63c73 100644
+--- a/drivers/scsi/sd.c
++++ b/drivers/scsi/sd.c
+@@ -1074,6 +1074,10 @@ static int sd_ioctl(struct block_device *bdev, fmode_t mode,
+ 	SCSI_LOG_IOCTL(1, sd_printk(KERN_INFO, sdkp, "sd_ioctl: disk=%s, "
+ 				    "cmd=0x%x\n", disk->disk_name, cmd));
+ 
++	error = scsi_verify_blk_ioctl(bdev, cmd);
++	if (error < 0)
++		return error;
++
+ 	/*
+ 	 * If we are in the middle of error recovery, don't let anyone
+ 	 * else try and use this device.  Also, if error recovery fails, it
+@@ -1096,7 +1100,7 @@ static int sd_ioctl(struct block_device *bdev, fmode_t mode,
+ 			error = scsi_ioctl(sdp, cmd, p);
+ 			break;
+ 		default:
+-			error = scsi_cmd_ioctl(disk->queue, disk, mode, cmd, p);
++			error = scsi_cmd_blk_ioctl(bdev, mode, cmd, p);
+ 			if (error != -ENOTTY)
+ 				break;
+ 			error = scsi_ioctl(sdp, cmd, p);
+@@ -1266,6 +1270,11 @@ static int sd_compat_ioctl(struct block_device *bdev, fmode_t mode,
+ 			   unsigned int cmd, unsigned long arg)
+ {
+ 	struct scsi_device *sdev = scsi_disk(bdev->bd_disk)->device;
++	int ret;
++
++	ret = scsi_verify_blk_ioctl(bdev, cmd);
++	if (ret < 0)
++		return -ENOIOCTLCMD;
+ 
+ 	/*
+ 	 * If we are in the middle of error recovery, don't let anyone
+@@ -1277,8 +1286,6 @@ static int sd_compat_ioctl(struct block_device *bdev, fmode_t mode,
+ 		return -ENODEV;
+ 	       
+ 	if (sdev->host->hostt->compat_ioctl) {
+-		int ret;
+-
+ 		ret = sdev->host->hostt->compat_ioctl(sdev, cmd, (void __user *)arg);
+ 
+ 		return ret;
+diff --git a/drivers/scsi/sym53c8xx_2/sym_glue.c b/drivers/scsi/sym53c8xx_2/sym_glue.c
+index b4543f5..36d1ed7 100644
+--- a/drivers/scsi/sym53c8xx_2/sym_glue.c
++++ b/drivers/scsi/sym53c8xx_2/sym_glue.c
+@@ -839,6 +839,10 @@ static void sym53c8xx_slave_destroy(struct scsi_device *sdev)
+ 	struct sym_lcb *lp = sym_lp(tp, sdev->lun);
+ 	unsigned long flags;
+ 
++	/* if slave_alloc returned before allocating a sym_lcb, return */
++	if (!lp)
++		return;
++
+ 	spin_lock_irqsave(np->s.host->host_lock, flags);
+ 
+ 	if (lp->busy_itlq || lp->busy_itl) {
+diff --git a/drivers/target/target_core_cdb.c b/drivers/target/target_core_cdb.c
+index 831468b..2e8c1be 100644
+--- a/drivers/target/target_core_cdb.c
++++ b/drivers/target/target_core_cdb.c
+@@ -94,6 +94,18 @@ target_emulate_inquiry_std(struct se_cmd *cmd)
+ 	buf[2] = dev->transport->get_device_rev(dev);
+ 
+ 	/*
++	 * NORMACA and HISUP = 0, RESPONSE DATA FORMAT = 2
++	 *
++	 * SPC4 says:
++	 *   A RESPONSE DATA FORMAT field set to 2h indicates that the
++	 *   standard INQUIRY data is in the format defined in this
++	 *   standard. Response data format values less than 2h are
++	 *   obsolete. Response data format values greater than 2h are
++	 *   reserved.
++	 */
++	buf[3] = 2;
++
++	/*
+ 	 * Enable SCCS and TPGS fields for Emulated ALUA
+ 	 */
+ 	if (dev->se_sub_dev->t10_alua.alua_type == SPC3_ALUA_EMULATED)
+diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
+index 0257658..e87d0eb 100644
+--- a/drivers/target/target_core_transport.c
++++ b/drivers/target/target_core_transport.c
+@@ -4353,6 +4353,7 @@ int transport_send_check_condition_and_sense(
+ 	case TCM_NON_EXISTENT_LUN:
+ 		/* CURRENT ERROR */
+ 		buffer[offset] = 0x70;
++		buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
+ 		/* ILLEGAL REQUEST */
+ 		buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
+ 		/* LOGICAL UNIT NOT SUPPORTED */
+@@ -4362,6 +4363,7 @@ int transport_send_check_condition_and_sense(
+ 	case TCM_SECTOR_COUNT_TOO_MANY:
+ 		/* CURRENT ERROR */
+ 		buffer[offset] = 0x70;
++		buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
+ 		/* ILLEGAL REQUEST */
+ 		buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
+ 		/* INVALID COMMAND OPERATION CODE */
+@@ -4370,6 +4372,7 @@ int transport_send_check_condition_and_sense(
+ 	case TCM_UNKNOWN_MODE_PAGE:
+ 		/* CURRENT ERROR */
+ 		buffer[offset] = 0x70;
++		buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
+ 		/* ILLEGAL REQUEST */
+ 		buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
+ 		/* INVALID FIELD IN CDB */
+@@ -4378,6 +4381,7 @@ int transport_send_check_condition_and_sense(
+ 	case TCM_CHECK_CONDITION_ABORT_CMD:
+ 		/* CURRENT ERROR */
+ 		buffer[offset] = 0x70;
++		buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
+ 		/* ABORTED COMMAND */
+ 		buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
+ 		/* BUS DEVICE RESET FUNCTION OCCURRED */
+@@ -4387,6 +4391,7 @@ int transport_send_check_condition_and_sense(
+ 	case TCM_INCORRECT_AMOUNT_OF_DATA:
+ 		/* CURRENT ERROR */
+ 		buffer[offset] = 0x70;
++		buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
+ 		/* ABORTED COMMAND */
+ 		buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
+ 		/* WRITE ERROR */
+@@ -4397,6 +4402,7 @@ int transport_send_check_condition_and_sense(
+ 	case TCM_INVALID_CDB_FIELD:
+ 		/* CURRENT ERROR */
+ 		buffer[offset] = 0x70;
++		buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
+ 		/* ABORTED COMMAND */
+ 		buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
+ 		/* INVALID FIELD IN CDB */
+@@ -4405,6 +4411,7 @@ int transport_send_check_condition_and_sense(
+ 	case TCM_INVALID_PARAMETER_LIST:
+ 		/* CURRENT ERROR */
+ 		buffer[offset] = 0x70;
++		buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
+ 		/* ABORTED COMMAND */
+ 		buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
+ 		/* INVALID FIELD IN PARAMETER LIST */
+@@ -4413,6 +4420,7 @@ int transport_send_check_condition_and_sense(
+ 	case TCM_UNEXPECTED_UNSOLICITED_DATA:
+ 		/* CURRENT ERROR */
+ 		buffer[offset] = 0x70;
++		buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
+ 		/* ABORTED COMMAND */
+ 		buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
+ 		/* WRITE ERROR */
+@@ -4423,6 +4431,7 @@ int transport_send_check_condition_and_sense(
+ 	case TCM_SERVICE_CRC_ERROR:
+ 		/* CURRENT ERROR */
+ 		buffer[offset] = 0x70;
++		buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
+ 		/* ABORTED COMMAND */
+ 		buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
+ 		/* PROTOCOL SERVICE CRC ERROR */
+@@ -4433,6 +4442,7 @@ int transport_send_check_condition_and_sense(
+ 	case TCM_SNACK_REJECTED:
+ 		/* CURRENT ERROR */
+ 		buffer[offset] = 0x70;
++		buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
+ 		/* ABORTED COMMAND */
+ 		buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
+ 		/* READ ERROR */
+@@ -4443,6 +4453,7 @@ int transport_send_check_condition_and_sense(
+ 	case TCM_WRITE_PROTECTED:
+ 		/* CURRENT ERROR */
+ 		buffer[offset] = 0x70;
++		buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
+ 		/* DATA PROTECT */
+ 		buffer[offset+SPC_SENSE_KEY_OFFSET] = DATA_PROTECT;
+ 		/* WRITE PROTECTED */
+@@ -4451,6 +4462,7 @@ int transport_send_check_condition_and_sense(
+ 	case TCM_CHECK_CONDITION_UNIT_ATTENTION:
+ 		/* CURRENT ERROR */
+ 		buffer[offset] = 0x70;
++		buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
+ 		/* UNIT ATTENTION */
+ 		buffer[offset+SPC_SENSE_KEY_OFFSET] = UNIT_ATTENTION;
+ 		core_scsi3_ua_for_check_condition(cmd, &asc, &ascq);
+@@ -4460,6 +4472,7 @@ int transport_send_check_condition_and_sense(
+ 	case TCM_CHECK_CONDITION_NOT_READY:
+ 		/* CURRENT ERROR */
+ 		buffer[offset] = 0x70;
++		buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
+ 		/* Not Ready */
+ 		buffer[offset+SPC_SENSE_KEY_OFFSET] = NOT_READY;
+ 		transport_get_sense_codes(cmd, &asc, &ascq);
+@@ -4470,6 +4483,7 @@ int transport_send_check_condition_and_sense(
+ 	default:
+ 		/* CURRENT ERROR */
+ 		buffer[offset] = 0x70;
++		buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
+ 		/* ILLEGAL REQUEST */
+ 		buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
+ 		/* LOGICAL UNIT COMMUNICATION FAILURE */
+diff --git a/drivers/xen/xenbus/xenbus_xs.c b/drivers/xen/xenbus/xenbus_xs.c
+index ede860f..a580b17 100644
+--- a/drivers/xen/xenbus/xenbus_xs.c
++++ b/drivers/xen/xenbus/xenbus_xs.c
+@@ -801,6 +801,12 @@ static int process_msg(void)
+ 		goto out;
+ 	}
+ 
++	if (msg->hdr.len > XENSTORE_PAYLOAD_MAX) {
++		kfree(msg);
++		err = -EINVAL;
++		goto out;
++	}
++
+ 	body = kmalloc(msg->hdr.len + 1, GFP_NOIO | __GFP_HIGH);
+ 	if (body == NULL) {
+ 		kfree(msg);
+diff --git a/fs/aio.c b/fs/aio.c
+index 78c514c..969beb0 100644
+--- a/fs/aio.c
++++ b/fs/aio.c
+@@ -476,14 +476,21 @@ static void kiocb_batch_init(struct kiocb_batch *batch, long total)
+ 	batch->count = total;
+ }
+ 
+-static void kiocb_batch_free(struct kiocb_batch *batch)
++static void kiocb_batch_free(struct kioctx *ctx, struct kiocb_batch *batch)
+ {
+ 	struct kiocb *req, *n;
+ 
++	if (list_empty(&batch->head))
++		return;
++
++	spin_lock_irq(&ctx->ctx_lock);
+ 	list_for_each_entry_safe(req, n, &batch->head, ki_batch) {
+ 		list_del(&req->ki_batch);
++		list_del(&req->ki_list);
+ 		kmem_cache_free(kiocb_cachep, req);
++		ctx->reqs_active--;
+ 	}
++	spin_unlock_irq(&ctx->ctx_lock);
+ }
+ 
+ /*
+@@ -1742,7 +1749,7 @@ long do_io_submit(aio_context_t ctx_id, long nr,
+ 	}
+ 	blk_finish_plug(&plug);
+ 
+-	kiocb_batch_free(&batch);
++	kiocb_batch_free(ctx, &batch);
+ 	put_ioctx(ctx);
+ 	return i ? i : ret;
+ }
+diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
+index f3670cf..63e4be4 100644
+--- a/fs/cifs/connect.c
++++ b/fs/cifs/connect.c
+@@ -2914,18 +2914,33 @@ void cifs_setup_cifs_sb(struct smb_vol *pvolume_info,
+ #define CIFS_DEFAULT_IOSIZE (1024 * 1024)
+ 
+ /*
+- * Windows only supports a max of 60k reads. Default to that when posix
+- * extensions aren't in force.
++ * Windows only supports a max of 60kb reads and 65535 byte writes. Default to
++ * those values when posix extensions aren't in force. In actuality here, we
++ * use 65536 to allow for a write that is a multiple of 4k. Most servers seem
++ * to be ok with the extra byte even though Windows doesn't send writes that
++ * are that large.
++ *
++ * Citation:
++ *
++ * http://blogs.msdn.com/b/openspecification/archive/2009/04/10/smb-maximum-transmit-buffer-size-and-performance-tuning.aspx
+  */
+ #define CIFS_DEFAULT_NON_POSIX_RSIZE (60 * 1024)
++#define CIFS_DEFAULT_NON_POSIX_WSIZE (65536)
+ 
+ static unsigned int
+ cifs_negotiate_wsize(struct cifs_tcon *tcon, struct smb_vol *pvolume_info)
+ {
+ 	__u64 unix_cap = le64_to_cpu(tcon->fsUnixInfo.Capability);
+ 	struct TCP_Server_Info *server = tcon->ses->server;
+-	unsigned int wsize = pvolume_info->wsize ? pvolume_info->wsize :
+-				CIFS_DEFAULT_IOSIZE;
++	unsigned int wsize;
++
++	/* start with specified wsize, or default */
++	if (pvolume_info->wsize)
++		wsize = pvolume_info->wsize;
++	else if (tcon->unix_ext && (unix_cap & CIFS_UNIX_LARGE_WRITE_CAP))
++		wsize = CIFS_DEFAULT_IOSIZE;
++	else
++		wsize = CIFS_DEFAULT_NON_POSIX_WSIZE;
+ 
+ 	/* can server support 24-bit write sizes? (via UNIX extensions) */
+ 	if (!tcon->unix_ext || !(unix_cap & CIFS_UNIX_LARGE_WRITE_CAP))
+diff --git a/fs/dcache.c b/fs/dcache.c
+index 89509b5..f7908ae 100644
+--- a/fs/dcache.c
++++ b/fs/dcache.c
+@@ -242,6 +242,7 @@ static void dentry_lru_add(struct dentry *dentry)
+ static void __dentry_lru_del(struct dentry *dentry)
+ {
+ 	list_del_init(&dentry->d_lru);
++	dentry->d_flags &= ~DCACHE_SHRINK_LIST;
+ 	dentry->d_sb->s_nr_dentry_unused--;
+ 	dentry_stat.nr_unused--;
+ }
+@@ -275,15 +276,15 @@ static void dentry_lru_prune(struct dentry *dentry)
+ 	}
+ }
+ 
+-static void dentry_lru_move_tail(struct dentry *dentry)
++static void dentry_lru_move_list(struct dentry *dentry, struct list_head *list)
+ {
+ 	spin_lock(&dcache_lru_lock);
+ 	if (list_empty(&dentry->d_lru)) {
+-		list_add_tail(&dentry->d_lru, &dentry->d_sb->s_dentry_lru);
++		list_add_tail(&dentry->d_lru, list);
+ 		dentry->d_sb->s_nr_dentry_unused++;
+ 		dentry_stat.nr_unused++;
+ 	} else {
+-		list_move_tail(&dentry->d_lru, &dentry->d_sb->s_dentry_lru);
++		list_move_tail(&dentry->d_lru, list);
+ 	}
+ 	spin_unlock(&dcache_lru_lock);
+ }
+@@ -769,14 +770,18 @@ static void shrink_dentry_list(struct list_head *list)
+ }
+ 
+ /**
+- * __shrink_dcache_sb - shrink the dentry LRU on a given superblock
+- * @sb:		superblock to shrink dentry LRU.
+- * @count:	number of entries to prune
+- * @flags:	flags to control the dentry processing
++ * prune_dcache_sb - shrink the dcache
++ * @sb: superblock
++ * @count: number of entries to try to free
++ *
++ * Attempt to shrink the superblock dcache LRU by @count entries. This is
++ * done when we need more memory an called from the superblock shrinker
++ * function.
+  *
+- * If flags contains DCACHE_REFERENCED reference dentries will not be pruned.
++ * This function may fail to free any resources if all the dentries are in
++ * use.
+  */
+-static void __shrink_dcache_sb(struct super_block *sb, int count, int flags)
++void prune_dcache_sb(struct super_block *sb, int count)
+ {
+ 	struct dentry *dentry;
+ 	LIST_HEAD(referenced);
+@@ -795,18 +800,13 @@ relock:
+ 			goto relock;
+ 		}
+ 
+-		/*
+-		 * If we are honouring the DCACHE_REFERENCED flag and the
+-		 * dentry has this flag set, don't free it.  Clear the flag
+-		 * and put it back on the LRU.
+-		 */
+-		if (flags & DCACHE_REFERENCED &&
+-				dentry->d_flags & DCACHE_REFERENCED) {
++		if (dentry->d_flags & DCACHE_REFERENCED) {
+ 			dentry->d_flags &= ~DCACHE_REFERENCED;
+ 			list_move(&dentry->d_lru, &referenced);
+ 			spin_unlock(&dentry->d_lock);
+ 		} else {
+ 			list_move_tail(&dentry->d_lru, &tmp);
++			dentry->d_flags |= DCACHE_SHRINK_LIST;
+ 			spin_unlock(&dentry->d_lock);
+ 			if (!--count)
+ 				break;
+@@ -821,23 +821,6 @@ relock:
+ }
+ 
+ /**
+- * prune_dcache_sb - shrink the dcache
+- * @sb: superblock
+- * @nr_to_scan: number of entries to try to free
+- *
+- * Attempt to shrink the superblock dcache LRU by @nr_to_scan entries. This is
+- * done when we need more memory an called from the superblock shrinker
+- * function.
+- *
+- * This function may fail to free any resources if all the dentries are in
+- * use.
+- */
+-void prune_dcache_sb(struct super_block *sb, int nr_to_scan)
+-{
+-	__shrink_dcache_sb(sb, nr_to_scan, DCACHE_REFERENCED);
+-}
+-
+-/**
+  * shrink_dcache_sb - shrink dcache for a superblock
+  * @sb: superblock
+  *
+@@ -1091,7 +1074,7 @@ EXPORT_SYMBOL(have_submounts);
+  * drop the lock and return early due to latency
+  * constraints.
+  */
+-static int select_parent(struct dentry * parent)
++static int select_parent(struct dentry *parent, struct list_head *dispose)
+ {
+ 	struct dentry *this_parent;
+ 	struct list_head *next;
+@@ -1113,17 +1096,21 @@ resume:
+ 
+ 		spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
+ 
+-		/* 
+-		 * move only zero ref count dentries to the end 
+-		 * of the unused list for prune_dcache
++		/*
++		 * move only zero ref count dentries to the dispose list.
++		 *
++		 * Those which are presently on the shrink list, being processed
++		 * by shrink_dentry_list(), shouldn't be moved.  Otherwise the
++		 * loop in shrink_dcache_parent() might not make any progress
++		 * and loop forever.
+ 		 */
+-		if (!dentry->d_count) {
+-			dentry_lru_move_tail(dentry);
+-			found++;
+-		} else {
++		if (dentry->d_count) {
+ 			dentry_lru_del(dentry);
++		} else if (!(dentry->d_flags & DCACHE_SHRINK_LIST)) {
++			dentry_lru_move_list(dentry, dispose);
++			dentry->d_flags |= DCACHE_SHRINK_LIST;
++			found++;
+ 		}
+-
+ 		/*
+ 		 * We can return to the caller if we have found some (this
+ 		 * ensures forward progress). We'll be coming back to find
+@@ -1180,14 +1167,13 @@ rename_retry:
+  *
+  * Prune the dcache to remove unused children of the parent dentry.
+  */
+- 
+ void shrink_dcache_parent(struct dentry * parent)
+ {
+-	struct super_block *sb = parent->d_sb;
++	LIST_HEAD(dispose);
+ 	int found;
+ 
+-	while ((found = select_parent(parent)) != 0)
+-		__shrink_dcache_sb(sb, found, 0);
++	while ((found = select_parent(parent, &dispose)) != 0)
++		shrink_dentry_list(&dispose);
+ }
+ EXPORT_SYMBOL(shrink_dcache_parent);
+ 
+diff --git a/fs/ext4/ioctl.c b/fs/ext4/ioctl.c
+index a567968..ab25f57 100644
+--- a/fs/ext4/ioctl.c
++++ b/fs/ext4/ioctl.c
+@@ -182,19 +182,22 @@ setversion_out:
+ 		if (err)
+ 			return err;
+ 
+-		if (get_user(n_blocks_count, (__u32 __user *)arg))
+-			return -EFAULT;
++		if (get_user(n_blocks_count, (__u32 __user *)arg)) {
++			err = -EFAULT;
++			goto group_extend_out;
++		}
+ 
+ 		if (EXT4_HAS_RO_COMPAT_FEATURE(sb,
+ 			       EXT4_FEATURE_RO_COMPAT_BIGALLOC)) {
+ 			ext4_msg(sb, KERN_ERR,
+ 				 "Online resizing not supported with bigalloc");
+-			return -EOPNOTSUPP;
++			err = -EOPNOTSUPP;
++			goto group_extend_out;
+ 		}
+ 
+ 		err = mnt_want_write(filp->f_path.mnt);
+ 		if (err)
+-			return err;
++			goto group_extend_out;
+ 
+ 		err = ext4_group_extend(sb, EXT4_SB(sb)->s_es, n_blocks_count);
+ 		if (EXT4_SB(sb)->s_journal) {
+@@ -204,9 +207,10 @@ setversion_out:
+ 		}
+ 		if (err == 0)
+ 			err = err2;
++
+ 		mnt_drop_write(filp->f_path.mnt);
++group_extend_out:
+ 		ext4_resize_end(sb);
+-
+ 		return err;
+ 	}
+ 
+@@ -267,19 +271,22 @@ mext_out:
+ 			return err;
+ 
+ 		if (copy_from_user(&input, (struct ext4_new_group_input __user *)arg,
+-				sizeof(input)))
+-			return -EFAULT;
++				sizeof(input))) {
++			err = -EFAULT;
++			goto group_add_out;
++		}
+ 
+ 		if (EXT4_HAS_RO_COMPAT_FEATURE(sb,
+ 			       EXT4_FEATURE_RO_COMPAT_BIGALLOC)) {
+ 			ext4_msg(sb, KERN_ERR,
+ 				 "Online resizing not supported with bigalloc");
+-			return -EOPNOTSUPP;
++			err = -EOPNOTSUPP;
++			goto group_add_out;
+ 		}
+ 
+ 		err = mnt_want_write(filp->f_path.mnt);
+ 		if (err)
+-			return err;
++			goto group_add_out;
+ 
+ 		err = ext4_group_add(sb, &input);
+ 		if (EXT4_SB(sb)->s_journal) {
+@@ -289,9 +296,10 @@ mext_out:
+ 		}
+ 		if (err == 0)
+ 			err = err2;
++
+ 		mnt_drop_write(filp->f_path.mnt);
++group_add_out:
+ 		ext4_resize_end(sb);
+-
+ 		return err;
+ 	}
+ 
+diff --git a/fs/ext4/super.c b/fs/ext4/super.c
+index 3e1329e..9281dbe 100644
+--- a/fs/ext4/super.c
++++ b/fs/ext4/super.c
+@@ -2006,17 +2006,16 @@ static int ext4_fill_flex_info(struct super_block *sb)
+ 	struct ext4_group_desc *gdp = NULL;
+ 	ext4_group_t flex_group_count;
+ 	ext4_group_t flex_group;
+-	int groups_per_flex = 0;
++	unsigned int groups_per_flex = 0;
+ 	size_t size;
+ 	int i;
+ 
+ 	sbi->s_log_groups_per_flex = sbi->s_es->s_log_groups_per_flex;
+-	groups_per_flex = 1 << sbi->s_log_groups_per_flex;
+-
+-	if (groups_per_flex < 2) {
++	if (sbi->s_log_groups_per_flex < 1 || sbi->s_log_groups_per_flex > 31) {
+ 		sbi->s_log_groups_per_flex = 0;
+ 		return 1;
+ 	}
++	groups_per_flex = 1 << sbi->s_log_groups_per_flex;
+ 
+ 	/* We allocate both existing and potentially added groups */
+ 	flex_group_count = ((sbi->s_groups_count + groups_per_flex - 1) +
+diff --git a/fs/nfs/blocklayout/blocklayout.c b/fs/nfs/blocklayout/blocklayout.c
+index 281ae95..3db6b82 100644
+--- a/fs/nfs/blocklayout/blocklayout.c
++++ b/fs/nfs/blocklayout/blocklayout.c
+@@ -146,14 +146,19 @@ static struct bio *bl_alloc_init_bio(int npg, sector_t isect,
+ {
+ 	struct bio *bio;
+ 
++	npg = min(npg, BIO_MAX_PAGES);
+ 	bio = bio_alloc(GFP_NOIO, npg);
+-	if (!bio)
+-		return NULL;
++	if (!bio && (current->flags & PF_MEMALLOC)) {
++		while (!bio && (npg /= 2))
++			bio = bio_alloc(GFP_NOIO, npg);
++	}
+ 
+-	bio->bi_sector = isect - be->be_f_offset + be->be_v_offset;
+-	bio->bi_bdev = be->be_mdev;
+-	bio->bi_end_io = end_io;
+-	bio->bi_private = par;
++	if (bio) {
++		bio->bi_sector = isect - be->be_f_offset + be->be_v_offset;
++		bio->bi_bdev = be->be_mdev;
++		bio->bi_end_io = end_io;
++		bio->bi_private = par;
++	}
+ 	return bio;
+ }
+ 
+@@ -779,16 +784,13 @@ bl_cleanup_layoutcommit(struct nfs4_layoutcommit_data *lcdata)
+ static void free_blk_mountid(struct block_mount_id *mid)
+ {
+ 	if (mid) {
+-		struct pnfs_block_dev *dev;
+-		spin_lock(&mid->bm_lock);
+-		while (!list_empty(&mid->bm_devlist)) {
+-			dev = list_first_entry(&mid->bm_devlist,
+-					       struct pnfs_block_dev,
+-					       bm_node);
++		struct pnfs_block_dev *dev, *tmp;
++
++		/* No need to take bm_lock as we are last user freeing bm_devlist */
++		list_for_each_entry_safe(dev, tmp, &mid->bm_devlist, bm_node) {
+ 			list_del(&dev->bm_node);
+ 			bl_free_block_dev(dev);
+ 		}
+-		spin_unlock(&mid->bm_lock);
+ 		kfree(mid);
+ 	}
+ }
+diff --git a/fs/nfs/blocklayout/extents.c b/fs/nfs/blocklayout/extents.c
+index 19fa7b0..c69682a 100644
+--- a/fs/nfs/blocklayout/extents.c
++++ b/fs/nfs/blocklayout/extents.c
+@@ -139,11 +139,13 @@ static int _set_range(struct my_tree *tree, int32_t tag, u64 s, u64 length)
+ }
+ 
+ /* Ensure that future operations on given range of tree will not malloc */
+-static int _preload_range(struct my_tree *tree, u64 offset, u64 length)
++static int _preload_range(struct pnfs_inval_markings *marks,
++		u64 offset, u64 length)
+ {
+ 	u64 start, end, s;
+ 	int count, i, used = 0, status = -ENOMEM;
+ 	struct pnfs_inval_tracking **storage;
++	struct my_tree  *tree = &marks->im_tree;
+ 
+ 	dprintk("%s(%llu, %llu) enter\n", __func__, offset, length);
+ 	start = normalize(offset, tree->mtt_step_size);
+@@ -161,12 +163,11 @@ static int _preload_range(struct my_tree *tree, u64 offset, u64 length)
+ 			goto out_cleanup;
+ 	}
+ 
+-	/* Now need lock - HOW??? */
+-
++	spin_lock(&marks->im_lock);
+ 	for (s = start; s < end; s += tree->mtt_step_size)
+ 		used += _add_entry(tree, s, INTERNAL_EXISTS, storage[used]);
++	spin_unlock(&marks->im_lock);
+ 
+-	/* Unlock - HOW??? */
+ 	status = 0;
+ 
+  out_cleanup:
+@@ -286,7 +287,7 @@ int bl_mark_sectors_init(struct pnfs_inval_markings *marks,
+ 
+ 	start = normalize(offset, marks->im_block_size);
+ 	end = normalize_up(offset + length, marks->im_block_size);
+-	if (_preload_range(&marks->im_tree, start, end - start))
++	if (_preload_range(marks, start, end - start))
+ 		goto outerr;
+ 
+ 	spin_lock(&marks->im_lock);
+diff --git a/fs/nfs/callback_proc.c b/fs/nfs/callback_proc.c
+index 43926ad..54cea8a 100644
+--- a/fs/nfs/callback_proc.c
++++ b/fs/nfs/callback_proc.c
+@@ -339,7 +339,7 @@ validate_seqid(struct nfs4_slot_table *tbl, struct cb_sequenceargs * args)
+ 	dprintk("%s enter. slotid %d seqid %d\n",
+ 		__func__, args->csa_slotid, args->csa_sequenceid);
+ 
+-	if (args->csa_slotid > NFS41_BC_MAX_CALLBACKS)
++	if (args->csa_slotid >= NFS41_BC_MAX_CALLBACKS)
+ 		return htonl(NFS4ERR_BADSLOT);
+ 
+ 	slot = tbl->slots + args->csa_slotid;
+diff --git a/fs/nfs/file.c b/fs/nfs/file.c
+index 606ef0f..c43a452 100644
+--- a/fs/nfs/file.c
++++ b/fs/nfs/file.c
+@@ -272,13 +272,13 @@ nfs_file_fsync(struct file *file, loff_t start, loff_t end, int datasync)
+ 			datasync);
+ 
+ 	ret = filemap_write_and_wait_range(inode->i_mapping, start, end);
+-	if (ret)
+-		return ret;
+ 	mutex_lock(&inode->i_mutex);
+ 
+ 	nfs_inc_stats(inode, NFSIOS_VFSFSYNC);
+ 	have_error = test_and_clear_bit(NFS_CONTEXT_ERROR_WRITE, &ctx->flags);
+ 	status = nfs_commit_inode(inode, FLUSH_SYNC);
++	if (status >= 0 && ret < 0)
++		status = ret;
+ 	have_error |= test_bit(NFS_CONTEXT_ERROR_WRITE, &ctx->flags);
+ 	if (have_error)
+ 		ret = xchg(&ctx->error, 0);
+diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
+index d9f4d78..055d702 100644
+--- a/fs/nfs/nfs4proc.c
++++ b/fs/nfs/nfs4proc.c
+@@ -3430,19 +3430,6 @@ static inline int nfs4_server_supports_acls(struct nfs_server *server)
+  */
+ #define NFS4ACL_MAXPAGES (XATTR_SIZE_MAX >> PAGE_CACHE_SHIFT)
+ 
+-static void buf_to_pages(const void *buf, size_t buflen,
+-		struct page **pages, unsigned int *pgbase)
+-{
+-	const void *p = buf;
+-
+-	*pgbase = offset_in_page(buf);
+-	p -= *pgbase;
+-	while (p < buf + buflen) {
+-		*(pages++) = virt_to_page(p);
+-		p += PAGE_CACHE_SIZE;
+-	}
+-}
+-
+ static int buf_to_pages_noslab(const void *buf, size_t buflen,
+ 		struct page **pages, unsigned int *pgbase)
+ {
+@@ -3539,9 +3526,19 @@ out:
+ 	nfs4_set_cached_acl(inode, acl);
+ }
+ 
++/*
++ * The getxattr API returns the required buffer length when called with a
++ * NULL buf. The NFSv4 acl tool then calls getxattr again after allocating
++ * the required buf.  On a NULL buf, we send a page of data to the server
++ * guessing that the ACL request can be serviced by a page. If so, we cache
++ * up to the page of ACL data, and the 2nd call to getxattr is serviced by
++ * the cache. If not so, we throw away the page, and cache the required
++ * length. The next getxattr call will then produce another round trip to
++ * the server, this time with the input buf of the required size.
++ */
+ static ssize_t __nfs4_get_acl_uncached(struct inode *inode, void *buf, size_t buflen)
+ {
+-	struct page *pages[NFS4ACL_MAXPAGES];
++	struct page *pages[NFS4ACL_MAXPAGES] = {NULL, };
+ 	struct nfs_getaclargs args = {
+ 		.fh = NFS_FH(inode),
+ 		.acl_pages = pages,
+@@ -3556,41 +3553,60 @@ static ssize_t __nfs4_get_acl_uncached(struct inode *inode, void *buf, size_t bu
+ 		.rpc_argp = &args,
+ 		.rpc_resp = &res,
+ 	};
+-	struct page *localpage = NULL;
+-	int ret;
++	int ret = -ENOMEM, npages, i, acl_len = 0;
+ 
+-	if (buflen < PAGE_SIZE) {
+-		/* As long as we're doing a round trip to the server anyway,
+-		 * let's be prepared for a page of acl data. */
+-		localpage = alloc_page(GFP_KERNEL);
+-		resp_buf = page_address(localpage);
+-		if (localpage == NULL)
+-			return -ENOMEM;
+-		args.acl_pages[0] = localpage;
+-		args.acl_pgbase = 0;
+-		args.acl_len = PAGE_SIZE;
+-	} else {
+-		resp_buf = buf;
+-		buf_to_pages(buf, buflen, args.acl_pages, &args.acl_pgbase);
++	npages = (buflen + PAGE_SIZE - 1) >> PAGE_SHIFT;
++	/* As long as we're doing a round trip to the server anyway,
++	 * let's be prepared for a page of acl data. */
++	if (npages == 0)
++		npages = 1;
++
++	for (i = 0; i < npages; i++) {
++		pages[i] = alloc_page(GFP_KERNEL);
++		if (!pages[i])
++			goto out_free;
++	}
++	if (npages > 1) {
++		/* for decoding across pages */
++		args.acl_scratch = alloc_page(GFP_KERNEL);
++		if (!args.acl_scratch)
++			goto out_free;
+ 	}
+-	ret = nfs4_call_sync(NFS_SERVER(inode)->client, NFS_SERVER(inode), &msg, &args.seq_args, &res.seq_res, 0);
++	args.acl_len = npages * PAGE_SIZE;
++	args.acl_pgbase = 0;
++	/* Let decode_getfacl know not to fail if the ACL data is larger than
++	 * the page we send as a guess */
++	if (buf == NULL)
++		res.acl_flags |= NFS4_ACL_LEN_REQUEST;
++	resp_buf = page_address(pages[0]);
++
++	dprintk("%s  buf %p buflen %ld npages %d args.acl_len %ld\n",
++		__func__, buf, buflen, npages, args.acl_len);
++	ret = nfs4_call_sync(NFS_SERVER(inode)->client, NFS_SERVER(inode),
++			     &msg, &args.seq_args, &res.seq_res, 0);
+ 	if (ret)
+ 		goto out_free;
+-	if (res.acl_len > args.acl_len)
+-		nfs4_write_cached_acl(inode, NULL, res.acl_len);
++
++	acl_len = res.acl_len - res.acl_data_offset;
++	if (acl_len > args.acl_len)
++		nfs4_write_cached_acl(inode, NULL, acl_len);
+ 	else
+-		nfs4_write_cached_acl(inode, resp_buf, res.acl_len);
++		nfs4_write_cached_acl(inode, resp_buf + res.acl_data_offset,
++				      acl_len);
+ 	if (buf) {
+ 		ret = -ERANGE;
+-		if (res.acl_len > buflen)
++		if (acl_len > buflen)
+ 			goto out_free;
+-		if (localpage)
+-			memcpy(buf, resp_buf, res.acl_len);
++		_copy_from_pages(buf, pages, res.acl_data_offset,
++				res.acl_len);
+ 	}
+-	ret = res.acl_len;
++	ret = acl_len;
+ out_free:
+-	if (localpage)
+-		__free_page(localpage);
++	for (i = 0; i < npages; i++)
++		if (pages[i])
++			__free_page(pages[i]);
++	if (args.acl_scratch)
++		__free_page(args.acl_scratch);
+ 	return ret;
+ }
+ 
+@@ -3621,6 +3637,8 @@ static ssize_t nfs4_proc_get_acl(struct inode *inode, void *buf, size_t buflen)
+ 		nfs_zap_acl_cache(inode);
+ 	ret = nfs4_read_cached_acl(inode, buf, buflen);
+ 	if (ret != -ENOENT)
++		/* -ENOENT is returned if there is no ACL or if there is an ACL
++		 * but no cached acl data, just the acl length */
+ 		return ret;
+ 	return nfs4_get_acl_uncached(inode, buf, buflen);
+ }
+diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c
+index e6161b2..dcaf693 100644
+--- a/fs/nfs/nfs4xdr.c
++++ b/fs/nfs/nfs4xdr.c
+@@ -2517,11 +2517,13 @@ static void nfs4_xdr_enc_getacl(struct rpc_rqst *req, struct xdr_stream *xdr,
+ 	encode_compound_hdr(xdr, req, &hdr);
+ 	encode_sequence(xdr, &args->seq_args, &hdr);
+ 	encode_putfh(xdr, args->fh, &hdr);
+-	replen = hdr.replen + op_decode_hdr_maxsz + nfs4_fattr_bitmap_maxsz + 1;
++	replen = hdr.replen + op_decode_hdr_maxsz + 1;
+ 	encode_getattr_two(xdr, FATTR4_WORD0_ACL, 0, &hdr);
+ 
+ 	xdr_inline_pages(&req->rq_rcv_buf, replen << 2,
+ 		args->acl_pages, args->acl_pgbase, args->acl_len);
++	xdr_set_scratch_buffer(xdr, page_address(args->acl_scratch), PAGE_SIZE);
++
+ 	encode_nops(&hdr);
+ }
+ 
+@@ -4957,17 +4959,18 @@ decode_restorefh(struct xdr_stream *xdr)
+ }
+ 
+ static int decode_getacl(struct xdr_stream *xdr, struct rpc_rqst *req,
+-		size_t *acl_len)
++			 struct nfs_getaclres *res)
+ {
+-	__be32 *savep;
++	__be32 *savep, *bm_p;
+ 	uint32_t attrlen,
+ 		 bitmap[3] = {0};
+ 	struct kvec *iov = req->rq_rcv_buf.head;
+ 	int status;
+ 
+-	*acl_len = 0;
++	res->acl_len = 0;
+ 	if ((status = decode_op_hdr(xdr, OP_GETATTR)) != 0)
+ 		goto out;
++	bm_p = xdr->p;
+ 	if ((status = decode_attr_bitmap(xdr, bitmap)) != 0)
+ 		goto out;
+ 	if ((status = decode_attr_length(xdr, &attrlen, &savep)) != 0)
+@@ -4979,18 +4982,30 @@ static int decode_getacl(struct xdr_stream *xdr, struct rpc_rqst *req,
+ 		size_t hdrlen;
+ 		u32 recvd;
+ 
++		/* The bitmap (xdr len + bitmaps) and the attr xdr len words
++		 * are stored with the acl data to handle the problem of
++		 * variable length bitmaps.*/
++		xdr->p = bm_p;
++		res->acl_data_offset = be32_to_cpup(bm_p) + 2;
++		res->acl_data_offset <<= 2;
++
+ 		/* We ignore &savep and don't do consistency checks on
+ 		 * the attr length.  Let userspace figure it out.... */
+ 		hdrlen = (u8 *)xdr->p - (u8 *)iov->iov_base;
++		attrlen += res->acl_data_offset;
+ 		recvd = req->rq_rcv_buf.len - hdrlen;
+ 		if (attrlen > recvd) {
+-			dprintk("NFS: server cheating in getattr"
+-					" acl reply: attrlen %u > recvd %u\n",
++			if (res->acl_flags & NFS4_ACL_LEN_REQUEST) {
++				/* getxattr interface called with a NULL buf */
++				res->acl_len = attrlen;
++				goto out;
++			}
++			dprintk("NFS: acl reply: attrlen %u > recvd %u\n",
+ 					attrlen, recvd);
+ 			return -EINVAL;
+ 		}
+ 		xdr_read_pages(xdr, attrlen);
+-		*acl_len = attrlen;
++		res->acl_len = attrlen;
+ 	} else
+ 		status = -EOPNOTSUPP;
+ 
+@@ -6028,7 +6043,7 @@ nfs4_xdr_dec_getacl(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
+ 	status = decode_putfh(xdr);
+ 	if (status)
+ 		goto out;
+-	status = decode_getacl(xdr, rqstp, &res->acl_len);
++	status = decode_getacl(xdr, rqstp, res);
+ 
+ out:
+ 	return status;
+diff --git a/fs/nfs/objlayout/objio_osd.c b/fs/nfs/objlayout/objio_osd.c
+index c807ab9..55d0128 100644
+--- a/fs/nfs/objlayout/objio_osd.c
++++ b/fs/nfs/objlayout/objio_osd.c
+@@ -551,7 +551,8 @@ static const struct nfs_pageio_ops objio_pg_write_ops = {
+ static struct pnfs_layoutdriver_type objlayout_type = {
+ 	.id = LAYOUT_OSD2_OBJECTS,
+ 	.name = "LAYOUT_OSD2_OBJECTS",
+-	.flags                   = PNFS_LAYOUTRET_ON_SETATTR,
++	.flags                   = PNFS_LAYOUTRET_ON_SETATTR |
++				   PNFS_LAYOUTRET_ON_ERROR,
+ 
+ 	.alloc_layout_hdr        = objlayout_alloc_layout_hdr,
+ 	.free_layout_hdr         = objlayout_free_layout_hdr,
+diff --git a/fs/nfs/objlayout/objlayout.c b/fs/nfs/objlayout/objlayout.c
+index 72074e3..b3c2903 100644
+--- a/fs/nfs/objlayout/objlayout.c
++++ b/fs/nfs/objlayout/objlayout.c
+@@ -254,6 +254,8 @@ objlayout_read_done(struct objlayout_io_res *oir, ssize_t status, bool sync)
+ 	oir->status = rdata->task.tk_status = status;
+ 	if (status >= 0)
+ 		rdata->res.count = status;
++	else
++		rdata->pnfs_error = status;
+ 	objlayout_iodone(oir);
+ 	/* must not use oir after this point */
+ 
+@@ -334,6 +336,8 @@ objlayout_write_done(struct objlayout_io_res *oir, ssize_t status, bool sync)
+ 	if (status >= 0) {
+ 		wdata->res.count = status;
+ 		wdata->verf.committed = oir->committed;
++	} else {
++		wdata->pnfs_error = status;
+ 	}
+ 	objlayout_iodone(oir);
+ 	/* must not use oir after this point */
+diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c
+index 8e672a2..f881a63 100644
+--- a/fs/nfs/pnfs.c
++++ b/fs/nfs/pnfs.c
+@@ -1178,6 +1178,15 @@ void pnfs_ld_write_done(struct nfs_write_data *data)
+ 		put_lseg(data->lseg);
+ 		data->lseg = NULL;
+ 		dprintk("pnfs write error = %d\n", data->pnfs_error);
++		if (NFS_SERVER(data->inode)->pnfs_curr_ld->flags &
++						PNFS_LAYOUTRET_ON_ERROR) {
++			/* Don't lo_commit on error, Server will needs to
++			 * preform a file recovery.
++			 */
++			clear_bit(NFS_INO_LAYOUTCOMMIT,
++				  &NFS_I(data->inode)->flags);
++			pnfs_return_layout(data->inode);
++		}
+ 	}
+ 	data->mds_ops->rpc_release(data);
+ }
+@@ -1267,6 +1276,9 @@ static void pnfs_ld_handle_read_error(struct nfs_read_data *data)
+ 	put_lseg(data->lseg);
+ 	data->lseg = NULL;
+ 	dprintk("pnfs write error = %d\n", data->pnfs_error);
++	if (NFS_SERVER(data->inode)->pnfs_curr_ld->flags &
++						PNFS_LAYOUTRET_ON_ERROR)
++		pnfs_return_layout(data->inode);
+ 
+ 	nfs_pageio_init_read_mds(&pgio, data->inode);
+ 
+diff --git a/fs/nfs/pnfs.h b/fs/nfs/pnfs.h
+index 1509530..53d593a 100644
+--- a/fs/nfs/pnfs.h
++++ b/fs/nfs/pnfs.h
+@@ -68,6 +68,7 @@ enum {
+ enum layoutdriver_policy_flags {
+ 	/* Should the pNFS client commit and return the layout upon a setattr */
+ 	PNFS_LAYOUTRET_ON_SETATTR	= 1 << 0,
++	PNFS_LAYOUTRET_ON_ERROR		= 1 << 1,
+ };
+ 
+ struct nfs4_deviceid_node;
+diff --git a/fs/nfs/super.c b/fs/nfs/super.c
+index 1347774..3ada13c 100644
+--- a/fs/nfs/super.c
++++ b/fs/nfs/super.c
+@@ -909,10 +909,24 @@ static struct nfs_parsed_mount_data *nfs_alloc_parsed_mount_data(unsigned int ve
+ 		data->auth_flavor_len	= 1;
+ 		data->version		= version;
+ 		data->minorversion	= 0;
++		security_init_mnt_opts(&data->lsm_opts);
+ 	}
+ 	return data;
+ }
+ 
++static void nfs_free_parsed_mount_data(struct nfs_parsed_mount_data *data)
++{
++	if (data) {
++		kfree(data->client_address);
++		kfree(data->mount_server.hostname);
++		kfree(data->nfs_server.export_path);
++		kfree(data->nfs_server.hostname);
++		kfree(data->fscache_uniq);
++		security_free_mnt_opts(&data->lsm_opts);
++		kfree(data);
++	}
++}
++
+ /*
+  * Sanity-check a server address provided by the mount command.
+  *
+@@ -2220,9 +2234,7 @@ static struct dentry *nfs_fs_mount(struct file_system_type *fs_type,
+ 	data = nfs_alloc_parsed_mount_data(NFS_DEFAULT_VERSION);
+ 	mntfh = nfs_alloc_fhandle();
+ 	if (data == NULL || mntfh == NULL)
+-		goto out_free_fh;
+-
+-	security_init_mnt_opts(&data->lsm_opts);
++		goto out;
+ 
+ 	/* Validate the mount data */
+ 	error = nfs_validate_mount_data(raw_data, data, mntfh, dev_name);
+@@ -2234,8 +2246,6 @@ static struct dentry *nfs_fs_mount(struct file_system_type *fs_type,
+ #ifdef CONFIG_NFS_V4
+ 	if (data->version == 4) {
+ 		mntroot = nfs4_try_mount(flags, dev_name, data);
+-		kfree(data->client_address);
+-		kfree(data->nfs_server.export_path);
+ 		goto out;
+ 	}
+ #endif	/* CONFIG_NFS_V4 */
+@@ -2290,13 +2300,8 @@ static struct dentry *nfs_fs_mount(struct file_system_type *fs_type,
+ 	s->s_flags |= MS_ACTIVE;
+ 
+ out:
+-	kfree(data->nfs_server.hostname);
+-	kfree(data->mount_server.hostname);
+-	kfree(data->fscache_uniq);
+-	security_free_mnt_opts(&data->lsm_opts);
+-out_free_fh:
++	nfs_free_parsed_mount_data(data);
+ 	nfs_free_fhandle(mntfh);
+-	kfree(data);
+ 	return mntroot;
+ 
+ out_err_nosb:
+@@ -2623,9 +2628,7 @@ nfs4_remote_mount(struct file_system_type *fs_type, int flags,
+ 
+ 	mntfh = nfs_alloc_fhandle();
+ 	if (data == NULL || mntfh == NULL)
+-		goto out_free_fh;
+-
+-	security_init_mnt_opts(&data->lsm_opts);
++		goto out;
+ 
+ 	/* Get a volume representation */
+ 	server = nfs4_create_server(data, mntfh);
+@@ -2677,13 +2680,10 @@ nfs4_remote_mount(struct file_system_type *fs_type, int flags,
+ 
+ 	s->s_flags |= MS_ACTIVE;
+ 
+-	security_free_mnt_opts(&data->lsm_opts);
+ 	nfs_free_fhandle(mntfh);
+ 	return mntroot;
+ 
+ out:
+-	security_free_mnt_opts(&data->lsm_opts);
+-out_free_fh:
+ 	nfs_free_fhandle(mntfh);
+ 	return ERR_PTR(error);
+ 
+@@ -2838,7 +2838,7 @@ static struct dentry *nfs4_mount(struct file_system_type *fs_type,
+ 
+ 	data = nfs_alloc_parsed_mount_data(4);
+ 	if (data == NULL)
+-		goto out_free_data;
++		goto out;
+ 
+ 	/* Validate the mount data */
+ 	error = nfs4_validate_mount_data(raw_data, data, dev_name);
+@@ -2852,12 +2852,7 @@ static struct dentry *nfs4_mount(struct file_system_type *fs_type,
+ 		error = PTR_ERR(res);
+ 
+ out:
+-	kfree(data->client_address);
+-	kfree(data->nfs_server.export_path);
+-	kfree(data->nfs_server.hostname);
+-	kfree(data->fscache_uniq);
+-out_free_data:
+-	kfree(data);
++	nfs_free_parsed_mount_data(data);
+ 	dprintk("<-- nfs4_mount() = %d%s\n", error,
+ 			error != 0 ? " [error]" : "");
+ 	return res;
+diff --git a/fs/nfsd/export.c b/fs/nfsd/export.c
+index 62f3b90..5f312ab 100644
+--- a/fs/nfsd/export.c
++++ b/fs/nfsd/export.c
+@@ -87,7 +87,7 @@ static int expkey_parse(struct cache_detail *cd, char *mesg, int mlen)
+ 	struct svc_expkey key;
+ 	struct svc_expkey *ek = NULL;
+ 
+-	if (mesg[mlen-1] != '\n')
++	if (mlen < 1 || mesg[mlen-1] != '\n')
+ 		return -EINVAL;
+ 	mesg[mlen-1] = 0;
+ 
+diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
+index 47e94e3..5abced7 100644
+--- a/fs/nfsd/nfs4state.c
++++ b/fs/nfsd/nfs4state.c
+@@ -3809,16 +3809,29 @@ nevermind:
+ 		deny->ld_type = NFS4_WRITE_LT;
+ }
+ 
++static bool same_lockowner_ino(struct nfs4_lockowner *lo, struct inode *inode, clientid_t *clid, struct xdr_netobj *owner)
++{
++	struct nfs4_ol_stateid *lst;
++
++	if (!same_owner_str(&lo->lo_owner, owner, clid))
++		return false;
++	lst = list_first_entry(&lo->lo_owner.so_stateids,
++			       struct nfs4_ol_stateid, st_perstateowner);
++	return lst->st_file->fi_inode == inode;
++}
++
+ static struct nfs4_lockowner *
+ find_lockowner_str(struct inode *inode, clientid_t *clid,
+ 		struct xdr_netobj *owner)
+ {
+ 	unsigned int hashval = lock_ownerstr_hashval(inode, clid->cl_id, owner);
++	struct nfs4_lockowner *lo;
+ 	struct nfs4_stateowner *op;
+ 
+ 	list_for_each_entry(op, &lock_ownerstr_hashtbl[hashval], so_strhash) {
+-		if (same_owner_str(op, owner, clid))
+-			return lockowner(op);
++		lo = lockowner(op);
++		if (same_lockowner_ino(lo, inode, clid, owner))
++			return lo;
+ 	}
+ 	return NULL;
+ }
+diff --git a/fs/notify/mark.c b/fs/notify/mark.c
+index e14587d..f104d56 100644
+--- a/fs/notify/mark.c
++++ b/fs/notify/mark.c
+@@ -135,9 +135,6 @@ void fsnotify_destroy_mark(struct fsnotify_mark *mark)
+ 
+ 	mark->flags &= ~FSNOTIFY_MARK_FLAG_ALIVE;
+ 
+-	/* 1 from caller and 1 for being on i_list/g_list */
+-	BUG_ON(atomic_read(&mark->refcnt) < 2);
+-
+ 	spin_lock(&group->mark_lock);
+ 
+ 	if (mark->flags & FSNOTIFY_MARK_FLAG_INODE) {
+@@ -182,6 +179,11 @@ void fsnotify_destroy_mark(struct fsnotify_mark *mark)
+ 		iput(inode);
+ 
+ 	/*
++	 * We don't necessarily have a ref on mark from caller so the above iput
++	 * may have already destroyed it.  Don't touch from now on.
++	 */
++
++	/*
+ 	 * it's possible that this group tried to destroy itself, but this
+ 	 * this mark was simultaneously being freed by inode.  If that's the
+ 	 * case, we finish freeing the group here.
+diff --git a/fs/proc/base.c b/fs/proc/base.c
+index 851ba3d..1fc1dca 100644
+--- a/fs/proc/base.c
++++ b/fs/proc/base.c
+@@ -194,65 +194,7 @@ static int proc_root_link(struct inode *inode, struct path *path)
+ 	return result;
+ }
+ 
+-static struct mm_struct *__check_mem_permission(struct task_struct *task)
+-{
+-	struct mm_struct *mm;
+-
+-	mm = get_task_mm(task);
+-	if (!mm)
+-		return ERR_PTR(-EINVAL);
+-
+-	/*
+-	 * A task can always look at itself, in case it chooses
+-	 * to use system calls instead of load instructions.
+-	 */
+-	if (task == current)
+-		return mm;
+-
+-	/*
+-	 * If current is actively ptrace'ing, and would also be
+-	 * permitted to freshly attach with ptrace now, permit it.
+-	 */
+-	if (task_is_stopped_or_traced(task)) {
+-		int match;
+-		rcu_read_lock();
+-		match = (ptrace_parent(task) == current);
+-		rcu_read_unlock();
+-		if (match && ptrace_may_access(task, PTRACE_MODE_ATTACH))
+-			return mm;
+-	}
+-
+-	/*
+-	 * No one else is allowed.
+-	 */
+-	mmput(mm);
+-	return ERR_PTR(-EPERM);
+-}
+-
+-/*
+- * If current may access user memory in @task return a reference to the
+- * corresponding mm, otherwise ERR_PTR.
+- */
+-static struct mm_struct *check_mem_permission(struct task_struct *task)
+-{
+-	struct mm_struct *mm;
+-	int err;
+-
+-	/*
+-	 * Avoid racing if task exec's as we might get a new mm but validate
+-	 * against old credentials.
+-	 */
+-	err = mutex_lock_killable(&task->signal->cred_guard_mutex);
+-	if (err)
+-		return ERR_PTR(err);
+-
+-	mm = __check_mem_permission(task);
+-	mutex_unlock(&task->signal->cred_guard_mutex);
+-
+-	return mm;
+-}
+-
+-struct mm_struct *mm_for_maps(struct task_struct *task)
++static struct mm_struct *mm_access(struct task_struct *task, unsigned int mode)
+ {
+ 	struct mm_struct *mm;
+ 	int err;
+@@ -263,7 +205,7 @@ struct mm_struct *mm_for_maps(struct task_struct *task)
+ 
+ 	mm = get_task_mm(task);
+ 	if (mm && mm != current->mm &&
+-			!ptrace_may_access(task, PTRACE_MODE_READ)) {
++			!ptrace_may_access(task, mode)) {
+ 		mmput(mm);
+ 		mm = ERR_PTR(-EACCES);
+ 	}
+@@ -272,6 +214,11 @@ struct mm_struct *mm_for_maps(struct task_struct *task)
+ 	return mm;
+ }
+ 
++struct mm_struct *mm_for_maps(struct task_struct *task)
++{
++	return mm_access(task, PTRACE_MODE_READ);
++}
++
+ static int proc_pid_cmdline(struct task_struct *task, char * buffer)
+ {
+ 	int res = 0;
+@@ -816,38 +763,39 @@ static const struct file_operations proc_single_file_operations = {
+ 
+ static int mem_open(struct inode* inode, struct file* file)
+ {
+-	file->private_data = (void*)((long)current->self_exec_id);
++	struct task_struct *task = get_proc_task(file->f_path.dentry->d_inode);
++	struct mm_struct *mm;
++
++	if (!task)
++		return -ESRCH;
++
++	mm = mm_access(task, PTRACE_MODE_ATTACH);
++	put_task_struct(task);
++
++	if (IS_ERR(mm))
++		return PTR_ERR(mm);
++
+ 	/* OK to pass negative loff_t, we can catch out-of-range */
+ 	file->f_mode |= FMODE_UNSIGNED_OFFSET;
++	file->private_data = mm;
++
+ 	return 0;
+ }
+ 
+ static ssize_t mem_read(struct file * file, char __user * buf,
+ 			size_t count, loff_t *ppos)
+ {
+-	struct task_struct *task = get_proc_task(file->f_path.dentry->d_inode);
++	int ret;
+ 	char *page;
+ 	unsigned long src = *ppos;
+-	int ret = -ESRCH;
+-	struct mm_struct *mm;
++	struct mm_struct *mm = file->private_data;
+ 
+-	if (!task)
+-		goto out_no_task;
++	if (!mm)
++		return 0;
+ 
+-	ret = -ENOMEM;
+ 	page = (char *)__get_free_page(GFP_TEMPORARY);
+ 	if (!page)
+-		goto out;
+-
+-	mm = check_mem_permission(task);
+-	ret = PTR_ERR(mm);
+-	if (IS_ERR(mm))
+-		goto out_free;
+-
+-	ret = -EIO;
+- 
+-	if (file->private_data != (void*)((long)current->self_exec_id))
+-		goto out_put;
++		return -ENOMEM;
+ 
+ 	ret = 0;
+  
+@@ -874,13 +822,7 @@ static ssize_t mem_read(struct file * file, char __user * buf,
+ 	}
+ 	*ppos = src;
+ 
+-out_put:
+-	mmput(mm);
+-out_free:
+ 	free_page((unsigned long) page);
+-out:
+-	put_task_struct(task);
+-out_no_task:
+ 	return ret;
+ }
+ 
+@@ -889,27 +831,15 @@ static ssize_t mem_write(struct file * file, const char __user *buf,
+ {
+ 	int copied;
+ 	char *page;
+-	struct task_struct *task = get_proc_task(file->f_path.dentry->d_inode);
+ 	unsigned long dst = *ppos;
+-	struct mm_struct *mm;
++	struct mm_struct *mm = file->private_data;
+ 
+-	copied = -ESRCH;
+-	if (!task)
+-		goto out_no_task;
++	if (!mm)
++		return 0;
+ 
+-	copied = -ENOMEM;
+ 	page = (char *)__get_free_page(GFP_TEMPORARY);
+ 	if (!page)
+-		goto out_task;
+-
+-	mm = check_mem_permission(task);
+-	copied = PTR_ERR(mm);
+-	if (IS_ERR(mm))
+-		goto out_free;
+-
+-	copied = -EIO;
+-	if (file->private_data != (void *)((long)current->self_exec_id))
+-		goto out_mm;
++		return -ENOMEM;
+ 
+ 	copied = 0;
+ 	while (count > 0) {
+@@ -933,13 +863,7 @@ static ssize_t mem_write(struct file * file, const char __user *buf,
+ 	}
+ 	*ppos = dst;
+ 
+-out_mm:
+-	mmput(mm);
+-out_free:
+ 	free_page((unsigned long) page);
+-out_task:
+-	put_task_struct(task);
+-out_no_task:
+ 	return copied;
+ }
+ 
+@@ -959,11 +883,20 @@ loff_t mem_lseek(struct file *file, loff_t offset, int orig)
+ 	return file->f_pos;
+ }
+ 
++static int mem_release(struct inode *inode, struct file *file)
++{
++	struct mm_struct *mm = file->private_data;
++
++	mmput(mm);
++	return 0;
++}
++
+ static const struct file_operations proc_mem_operations = {
+ 	.llseek		= mem_lseek,
+ 	.read		= mem_read,
+ 	.write		= mem_write,
+ 	.open		= mem_open,
++	.release	= mem_release,
+ };
+ 
+ static ssize_t environ_read(struct file *file, char __user *buf,
+diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
+index e418c5a..7dcd2a2 100644
+--- a/fs/proc/task_mmu.c
++++ b/fs/proc/task_mmu.c
+@@ -518,6 +518,9 @@ static int clear_refs_pte_range(pmd_t *pmd, unsigned long addr,
+ 		if (!page)
+ 			continue;
+ 
++		if (PageReserved(page))
++			continue;
++
+ 		/* Clear accessed and referenced bits. */
+ 		ptep_test_and_clear_young(vma, addr, pte);
+ 		ClearPageReferenced(page);
+diff --git a/fs/proc/uptime.c b/fs/proc/uptime.c
+index 766b1d4..29166ec 100644
+--- a/fs/proc/uptime.c
++++ b/fs/proc/uptime.c
+@@ -11,15 +11,20 @@ static int uptime_proc_show(struct seq_file *m, void *v)
+ {
+ 	struct timespec uptime;
+ 	struct timespec idle;
++	cputime64_t idletime;
++	u64 nsec;
++	u32 rem;
+ 	int i;
+-	cputime_t idletime = cputime_zero;
+ 
++	idletime = 0;
+ 	for_each_possible_cpu(i)
+ 		idletime = cputime64_add(idletime, kstat_cpu(i).cpustat.idle);
+ 
+ 	do_posix_clock_monotonic_gettime(&uptime);
+ 	monotonic_to_bootbased(&uptime);
+-	cputime_to_timespec(idletime, &idle);
++	nsec = cputime64_to_jiffies64(idletime) * TICK_NSEC;
++	idle.tv_sec = div_u64_rem(nsec, NSEC_PER_SEC, &rem);
++	idle.tv_nsec = rem;
+ 	seq_printf(m, "%lu.%02lu %lu.%02lu\n",
+ 			(unsigned long) uptime.tv_sec,
+ 			(uptime.tv_nsec / (NSEC_PER_SEC / 100)),
+diff --git a/fs/ubifs/debug.h b/fs/ubifs/debug.h
+index 8d9c468..c9d2941 100644
+--- a/fs/ubifs/debug.h
++++ b/fs/ubifs/debug.h
+@@ -175,22 +175,23 @@ const char *dbg_key_str1(const struct ubifs_info *c,
+ 			 const union ubifs_key *key);
+ 
+ /*
+- * DBGKEY macros require @dbg_lock to be held, which it is in the dbg message
+- * macros.
++ * TODO: these macros are now broken because there is no locking around them
++ * and we use a global buffer for the key string. This means that in case of
++ * concurrent execution we will end up with incorrect and messy key strings.
+  */
+ #define DBGKEY(key) dbg_key_str0(c, (key))
+ #define DBGKEY1(key) dbg_key_str1(c, (key))
+ 
+ extern spinlock_t dbg_lock;
+ 
+-#define ubifs_dbg_msg(type, fmt, ...) do {                        \
+-	spin_lock(&dbg_lock);                                     \
+-	pr_debug("UBIFS DBG " type ": " fmt "\n", ##__VA_ARGS__); \
+-	spin_unlock(&dbg_lock);                                   \
+-} while (0)
++#define ubifs_dbg_msg(type, fmt, ...) \
++	pr_debug("UBIFS DBG " type ": " fmt "\n", ##__VA_ARGS__)
+ 
+ /* Just a debugging messages not related to any specific UBIFS subsystem */
+-#define dbg_msg(fmt, ...)   ubifs_dbg_msg("msg", fmt, ##__VA_ARGS__)
++#define dbg_msg(fmt, ...)                                                     \
++	printk(KERN_DEBUG "UBIFS DBG (pid %d): %s: " fmt "\n", current->pid,  \
++	       __func__, ##__VA_ARGS__)
++
+ /* General messages */
+ #define dbg_gen(fmt, ...)   ubifs_dbg_msg("gen", fmt, ##__VA_ARGS__)
+ /* Additional journal messages */
+diff --git a/fs/xfs/xfs_discard.c b/fs/xfs/xfs_discard.c
+index 8a24f0c..286a051 100644
+--- a/fs/xfs/xfs_discard.c
++++ b/fs/xfs/xfs_discard.c
+@@ -68,7 +68,7 @@ xfs_trim_extents(
+ 	 * Look up the longest btree in the AGF and start with it.
+ 	 */
+ 	error = xfs_alloc_lookup_le(cur, 0,
+-				    XFS_BUF_TO_AGF(agbp)->agf_longest, &i);
++			    be32_to_cpu(XFS_BUF_TO_AGF(agbp)->agf_longest), &i);
+ 	if (error)
+ 		goto out_del_cursor;
+ 
+@@ -84,7 +84,7 @@ xfs_trim_extents(
+ 		if (error)
+ 			goto out_del_cursor;
+ 		XFS_WANT_CORRUPTED_GOTO(i == 1, out_del_cursor);
+-		ASSERT(flen <= XFS_BUF_TO_AGF(agbp)->agf_longest);
++		ASSERT(flen <= be32_to_cpu(XFS_BUF_TO_AGF(agbp)->agf_longest));
+ 
+ 		/*
+ 		 * Too small?  Give up.
+diff --git a/include/acpi/acpi_numa.h b/include/acpi/acpi_numa.h
+index 1739726..451823c 100644
+--- a/include/acpi/acpi_numa.h
++++ b/include/acpi/acpi_numa.h
+@@ -15,6 +15,7 @@ extern int pxm_to_node(int);
+ extern int node_to_pxm(int);
+ extern void __acpi_map_pxm_to_node(int, int);
+ extern int acpi_map_pxm_to_node(int);
++extern unsigned char acpi_srat_revision;
+ 
+ #endif				/* CONFIG_ACPI_NUMA */
+ #endif				/* __ACP_NUMA_H */
+diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
+index 94acd81..0ed1eb0 100644
+--- a/include/linux/blkdev.h
++++ b/include/linux/blkdev.h
+@@ -675,6 +675,9 @@ extern int blk_insert_cloned_request(struct request_queue *q,
+ 				     struct request *rq);
+ extern void blk_delay_queue(struct request_queue *, unsigned long);
+ extern void blk_recount_segments(struct request_queue *, struct bio *);
++extern int scsi_verify_blk_ioctl(struct block_device *, unsigned int);
++extern int scsi_cmd_blk_ioctl(struct block_device *, fmode_t,
++			      unsigned int, void __user *);
+ extern int scsi_cmd_ioctl(struct request_queue *, struct gendisk *, fmode_t,
+ 			  unsigned int, void __user *);
+ extern int sg_scsi_ioctl(struct request_queue *, struct gendisk *, fmode_t,
+diff --git a/include/linux/crash_dump.h b/include/linux/crash_dump.h
+index 5c4abce..b936763 100644
+--- a/include/linux/crash_dump.h
++++ b/include/linux/crash_dump.h
+@@ -5,6 +5,7 @@
+ #include <linux/kexec.h>
+ #include <linux/device.h>
+ #include <linux/proc_fs.h>
++#include <linux/elf.h>
+ 
+ #define ELFCORE_ADDR_MAX	(-1ULL)
+ #define ELFCORE_ADDR_ERR	(-2ULL)
+diff --git a/include/linux/dcache.h b/include/linux/dcache.h
+index ed9f74f..4eb8c80 100644
+--- a/include/linux/dcache.h
++++ b/include/linux/dcache.h
+@@ -203,6 +203,7 @@ struct dentry_operations {
+ 
+ #define DCACHE_CANT_MOUNT	0x0100
+ #define DCACHE_GENOCIDE		0x0200
++#define DCACHE_SHRINK_LIST	0x0400
+ 
+ #define DCACHE_NFSFS_RENAMED	0x1000
+      /* this dentry has been "silly renamed" and has to be deleted on the last
+diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
+index b87068a..81572af 100644
+--- a/include/linux/memcontrol.h
++++ b/include/linux/memcontrol.h
+@@ -119,6 +119,8 @@ struct zone_reclaim_stat*
+ mem_cgroup_get_reclaim_stat_from_page(struct page *page);
+ extern void mem_cgroup_print_oom_info(struct mem_cgroup *memcg,
+ 					struct task_struct *p);
++extern void mem_cgroup_replace_page_cache(struct page *oldpage,
++					struct page *newpage);
+ 
+ #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
+ extern int do_swap_account;
+@@ -366,6 +368,10 @@ static inline
+ void mem_cgroup_count_vm_event(struct mm_struct *mm, enum vm_event_item idx)
+ {
+ }
++static inline void mem_cgroup_replace_page_cache(struct page *oldpage,
++				struct page *newpage)
++{
++}
+ #endif /* CONFIG_CGROUP_MEM_CONT */
+ 
+ #if !defined(CONFIG_CGROUP_MEM_RES_CTLR) || !defined(CONFIG_DEBUG_VM)
+diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h
+index 2a7c533..6c898af 100644
+--- a/include/linux/nfs_xdr.h
++++ b/include/linux/nfs_xdr.h
+@@ -602,11 +602,16 @@ struct nfs_getaclargs {
+ 	size_t				acl_len;
+ 	unsigned int			acl_pgbase;
+ 	struct page **			acl_pages;
++	struct page *			acl_scratch;
+ 	struct nfs4_sequence_args 	seq_args;
+ };
+ 
++/* getxattr ACL interface flags */
++#define NFS4_ACL_LEN_REQUEST	0x0001	/* zero length getxattr buffer */
+ struct nfs_getaclres {
+ 	size_t				acl_len;
++	size_t				acl_data_offset;
++	int				acl_flags;
+ 	struct nfs4_sequence_res	seq_res;
+ };
+ 
+diff --git a/include/linux/pci_regs.h b/include/linux/pci_regs.h
+index b5d9657..411c412 100644
+--- a/include/linux/pci_regs.h
++++ b/include/linux/pci_regs.h
+@@ -392,7 +392,7 @@
+ #define  PCI_EXP_TYPE_DOWNSTREAM 0x6	/* Downstream Port */
+ #define  PCI_EXP_TYPE_PCI_BRIDGE 0x7	/* PCI/PCI-X Bridge */
+ #define  PCI_EXP_TYPE_RC_END	0x9	/* Root Complex Integrated Endpoint */
+-#define  PCI_EXP_TYPE_RC_EC	0x10	/* Root Complex Event Collector */
++#define  PCI_EXP_TYPE_RC_EC	0xa	/* Root Complex Event Collector */
+ #define PCI_EXP_FLAGS_SLOT	0x0100	/* Slot implemented */
+ #define PCI_EXP_FLAGS_IRQ	0x3e00	/* Interrupt message number */
+ #define PCI_EXP_DEVCAP		4	/* Device capabilities */
+diff --git a/include/linux/shmem_fs.h b/include/linux/shmem_fs.h
+index 9291ac3..6f10c9c 100644
+--- a/include/linux/shmem_fs.h
++++ b/include/linux/shmem_fs.h
+@@ -48,6 +48,7 @@ extern struct file *shmem_file_setup(const char *name,
+ 					loff_t size, unsigned long flags);
+ extern int shmem_zero_setup(struct vm_area_struct *);
+ extern int shmem_lock(struct file *file, int lock, struct user_struct *user);
++extern void shmem_unlock_mapping(struct address_space *mapping);
+ extern struct page *shmem_read_mapping_page_gfp(struct address_space *mapping,
+ 					pgoff_t index, gfp_t gfp_mask);
+ extern void shmem_truncate_range(struct inode *inode, loff_t start, loff_t end);
+diff --git a/include/linux/sunrpc/svcsock.h b/include/linux/sunrpc/svcsock.h
+index 85c50b4..c84e974 100644
+--- a/include/linux/sunrpc/svcsock.h
++++ b/include/linux/sunrpc/svcsock.h
+@@ -34,7 +34,7 @@ struct svc_sock {
+ /*
+  * Function prototypes.
+  */
+-void		svc_close_all(struct list_head *);
++void		svc_close_all(struct svc_serv *);
+ int		svc_recv(struct svc_rqst *, long);
+ int		svc_send(struct svc_rqst *);
+ void		svc_drop(struct svc_rqst *);
+diff --git a/include/linux/sunrpc/xdr.h b/include/linux/sunrpc/xdr.h
+index a20970e..af70af3 100644
+--- a/include/linux/sunrpc/xdr.h
++++ b/include/linux/sunrpc/xdr.h
+@@ -191,6 +191,8 @@ extern int xdr_decode_array2(struct xdr_buf *buf, unsigned int base,
+ 			     struct xdr_array2_desc *desc);
+ extern int xdr_encode_array2(struct xdr_buf *buf, unsigned int base,
+ 			     struct xdr_array2_desc *desc);
++extern void _copy_from_pages(char *p, struct page **pages, size_t pgbase,
++			     size_t len);
+ 
+ /*
+  * Provide some simple tools for XDR buffer overflow-checking etc.
+diff --git a/include/linux/swap.h b/include/linux/swap.h
+index 1e22e12..67b3fa3 100644
+--- a/include/linux/swap.h
++++ b/include/linux/swap.h
+@@ -272,7 +272,7 @@ static inline int zone_reclaim(struct zone *z, gfp_t mask, unsigned int order)
+ #endif
+ 
+ extern int page_evictable(struct page *page, struct vm_area_struct *vma);
+-extern void scan_mapping_unevictable_pages(struct address_space *);
++extern void check_move_unevictable_pages(struct page **, int nr_pages);
+ 
+ extern unsigned long scan_unevictable_pages;
+ extern int scan_unevictable_handler(struct ctl_table *, int,
+diff --git a/include/linux/videodev2.h b/include/linux/videodev2.h
+index 4b752d5..45a7698 100644
+--- a/include/linux/videodev2.h
++++ b/include/linux/videodev2.h
+@@ -1131,6 +1131,7 @@ struct v4l2_querymenu {
+ #define V4L2_CTRL_FLAG_NEXT_CTRL	0x80000000
+ 
+ /*  User-class control IDs defined by V4L2 */
++#define V4L2_CID_MAX_CTRLS		1024
+ #define V4L2_CID_BASE			(V4L2_CTRL_CLASS_USER | 0x900)
+ #define V4L2_CID_USER_BASE 		V4L2_CID_BASE
+ /*  IDs reserved for driver specific controls */
+diff --git a/include/media/tuner.h b/include/media/tuner.h
+index 89c290b..29e1920 100644
+--- a/include/media/tuner.h
++++ b/include/media/tuner.h
+@@ -127,7 +127,6 @@
+ #define TUNER_PHILIPS_FMD1216MEX_MK3	78
+ #define TUNER_PHILIPS_FM1216MK5		79
+ #define TUNER_PHILIPS_FQ1216LME_MK3	80	/* Active loopthrough, no FM */
+-#define TUNER_XC4000			81	/* Xceive Silicon Tuner */
+ 
+ #define TUNER_PARTSNIC_PTI_5NF05	81
+ #define TUNER_PHILIPS_CU1216L           82
+@@ -136,6 +135,8 @@
+ #define TUNER_PHILIPS_FQ1236_MK5	85	/* NTSC, TDA9885, no FM radio */
+ #define TUNER_TENA_TNF_5337		86
+ 
++#define TUNER_XC4000			87	/* Xceive Silicon Tuner */
++
+ /* tv card specific */
+ #define TDA9887_PRESENT 		(1<<0)
+ #define TDA9887_PORT1_INACTIVE 		(1<<1)
+diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
+index 6873c7d..a79886c 100644
+--- a/include/target/target_core_base.h
++++ b/include/target/target_core_base.h
+@@ -34,6 +34,7 @@
+ #define TRANSPORT_SENSE_BUFFER			SCSI_SENSE_BUFFERSIZE
+ /* Used by transport_send_check_condition_and_sense() */
+ #define SPC_SENSE_KEY_OFFSET			2
++#define SPC_ADD_SENSE_LEN_OFFSET		7
+ #define SPC_ASC_KEY_OFFSET			12
+ #define SPC_ASCQ_KEY_OFFSET			13
+ #define TRANSPORT_IQN_LEN			224
+diff --git a/include/xen/interface/io/xs_wire.h b/include/xen/interface/io/xs_wire.h
+index f6f07aa..7cdfca2 100644
+--- a/include/xen/interface/io/xs_wire.h
++++ b/include/xen/interface/io/xs_wire.h
+@@ -87,4 +87,7 @@ struct xenstore_domain_interface {
+     XENSTORE_RING_IDX rsp_cons, rsp_prod;
+ };
+ 
++/* Violating this is very bad.  See docs/misc/xenstore.txt. */
++#define XENSTORE_PAYLOAD_MAX 4096
++
+ #endif /* _XS_WIRE_H */
+diff --git a/init/do_mounts.c b/init/do_mounts.c
+index 0f6e1d9..db6e5ee 100644
+--- a/init/do_mounts.c
++++ b/init/do_mounts.c
+@@ -398,15 +398,42 @@ out:
+ }
+  
+ #ifdef CONFIG_ROOT_NFS
++
++#define NFSROOT_TIMEOUT_MIN	5
++#define NFSROOT_TIMEOUT_MAX	30
++#define NFSROOT_RETRY_MAX	5
++
+ static int __init mount_nfs_root(void)
+ {
+ 	char *root_dev, *root_data;
++	unsigned int timeout;
++	int try, err;
+ 
+-	if (nfs_root_data(&root_dev, &root_data) != 0)
+-		return 0;
+-	if (do_mount_root(root_dev, "nfs", root_mountflags, root_data) != 0)
++	err = nfs_root_data(&root_dev, &root_data);
++	if (err != 0)
+ 		return 0;
+-	return 1;
++
++	/*
++	 * The server or network may not be ready, so try several
++	 * times.  Stop after a few tries in case the client wants
++	 * to fall back to other boot methods.
++	 */
++	timeout = NFSROOT_TIMEOUT_MIN;
++	for (try = 1; ; try++) {
++		err = do_mount_root(root_dev, "nfs",
++					root_mountflags, root_data);
++		if (err == 0)
++			return 1;
++		if (try > NFSROOT_RETRY_MAX)
++			break;
++
++		/* Wait, in case the server refused us immediately */
++		ssleep(timeout);
++		timeout <<= 1;
++		if (timeout > NFSROOT_TIMEOUT_MAX)
++			timeout = NFSROOT_TIMEOUT_MAX;
++	}
++	return 0;
+ }
+ #endif
+ 
+diff --git a/ipc/shm.c b/ipc/shm.c
+index 02ecf2c..b76be5b 100644
+--- a/ipc/shm.c
++++ b/ipc/shm.c
+@@ -870,9 +870,7 @@ SYSCALL_DEFINE3(shmctl, int, shmid, int, cmd, struct shmid_ds __user *, buf)
+ 	case SHM_LOCK:
+ 	case SHM_UNLOCK:
+ 	{
+-		struct file *uninitialized_var(shm_file);
+-
+-		lru_add_drain_all();  /* drain pagevecs to lru lists */
++		struct file *shm_file;
+ 
+ 		shp = shm_lock_check(ns, shmid);
+ 		if (IS_ERR(shp)) {
+@@ -895,22 +893,31 @@ SYSCALL_DEFINE3(shmctl, int, shmid, int, cmd, struct shmid_ds __user *, buf)
+ 		err = security_shm_shmctl(shp, cmd);
+ 		if (err)
+ 			goto out_unlock;
+-		
+-		if(cmd==SHM_LOCK) {
++
++		shm_file = shp->shm_file;
++		if (is_file_hugepages(shm_file))
++			goto out_unlock;
++
++		if (cmd == SHM_LOCK) {
+ 			struct user_struct *user = current_user();
+-			if (!is_file_hugepages(shp->shm_file)) {
+-				err = shmem_lock(shp->shm_file, 1, user);
+-				if (!err && !(shp->shm_perm.mode & SHM_LOCKED)){
+-					shp->shm_perm.mode |= SHM_LOCKED;
+-					shp->mlock_user = user;
+-				}
++			err = shmem_lock(shm_file, 1, user);
++			if (!err && !(shp->shm_perm.mode & SHM_LOCKED)) {
++				shp->shm_perm.mode |= SHM_LOCKED;
++				shp->mlock_user = user;
+ 			}
+-		} else if (!is_file_hugepages(shp->shm_file)) {
+-			shmem_lock(shp->shm_file, 0, shp->mlock_user);
+-			shp->shm_perm.mode &= ~SHM_LOCKED;
+-			shp->mlock_user = NULL;
++			goto out_unlock;
+ 		}
++
++		/* SHM_UNLOCK */
++		if (!(shp->shm_perm.mode & SHM_LOCKED))
++			goto out_unlock;
++		shmem_lock(shm_file, 0, shp->mlock_user);
++		shp->shm_perm.mode &= ~SHM_LOCKED;
++		shp->mlock_user = NULL;
++		get_file(shm_file);
+ 		shm_unlock(shp);
++		shmem_unlock_mapping(shm_file->f_mapping);
++		fput(shm_file);
+ 		goto out;
+ 	}
+ 	case IPC_RMID:
+diff --git a/kernel/kprobes.c b/kernel/kprobes.c
+index e5d8464..52fd049 100644
+--- a/kernel/kprobes.c
++++ b/kernel/kprobes.c
+@@ -1077,6 +1077,7 @@ void __kprobes kprobe_flush_task(struct task_struct *tk)
+ 		/* Early boot.  kretprobe_table_locks not yet initialized. */
+ 		return;
+ 
++	INIT_HLIST_HEAD(&empty_rp);
+ 	hash = hash_ptr(tk, KPROBE_HASH_BITS);
+ 	head = &kretprobe_inst_table[hash];
+ 	kretprobe_table_lock(hash, &flags);
+@@ -1085,7 +1086,6 @@ void __kprobes kprobe_flush_task(struct task_struct *tk)
+ 			recycle_rp_inst(ri, &empty_rp);
+ 	}
+ 	kretprobe_table_unlock(hash, &flags);
+-	INIT_HLIST_HEAD(&empty_rp);
+ 	hlist_for_each_entry_safe(ri, node, tmp, &empty_rp, hlist) {
+ 		hlist_del(&ri->hlist);
+ 		kfree(ri);
+diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
+index b1e8943..25b4f4d 100644
+--- a/kernel/trace/ftrace.c
++++ b/kernel/trace/ftrace.c
+@@ -948,7 +948,7 @@ struct ftrace_func_probe {
+ };
+ 
+ enum {
+-	FTRACE_ENABLE_CALLS		= (1 << 0),
++	FTRACE_UPDATE_CALLS		= (1 << 0),
+ 	FTRACE_DISABLE_CALLS		= (1 << 1),
+ 	FTRACE_UPDATE_TRACE_FUNC	= (1 << 2),
+ 	FTRACE_START_FUNC_RET		= (1 << 3),
+@@ -1519,7 +1519,7 @@ int ftrace_text_reserved(void *start, void *end)
+ 
+ 
+ static int
+-__ftrace_replace_code(struct dyn_ftrace *rec, int enable)
++__ftrace_replace_code(struct dyn_ftrace *rec, int update)
+ {
+ 	unsigned long ftrace_addr;
+ 	unsigned long flag = 0UL;
+@@ -1527,17 +1527,17 @@ __ftrace_replace_code(struct dyn_ftrace *rec, int enable)
+ 	ftrace_addr = (unsigned long)FTRACE_ADDR;
+ 
+ 	/*
+-	 * If we are enabling tracing:
++	 * If we are updating calls:
+ 	 *
+ 	 *   If the record has a ref count, then we need to enable it
+ 	 *   because someone is using it.
+ 	 *
+ 	 *   Otherwise we make sure its disabled.
+ 	 *
+-	 * If we are disabling tracing, then disable all records that
++	 * If we are disabling calls, then disable all records that
+ 	 * are enabled.
+ 	 */
+-	if (enable && (rec->flags & ~FTRACE_FL_MASK))
++	if (update && (rec->flags & ~FTRACE_FL_MASK))
+ 		flag = FTRACE_FL_ENABLED;
+ 
+ 	/* If the state of this record hasn't changed, then do nothing */
+@@ -1553,7 +1553,7 @@ __ftrace_replace_code(struct dyn_ftrace *rec, int enable)
+ 	return ftrace_make_nop(NULL, rec, ftrace_addr);
+ }
+ 
+-static void ftrace_replace_code(int enable)
++static void ftrace_replace_code(int update)
+ {
+ 	struct dyn_ftrace *rec;
+ 	struct ftrace_page *pg;
+@@ -1567,7 +1567,7 @@ static void ftrace_replace_code(int enable)
+ 		if (rec->flags & FTRACE_FL_FREE)
+ 			continue;
+ 
+-		failed = __ftrace_replace_code(rec, enable);
++		failed = __ftrace_replace_code(rec, update);
+ 		if (failed) {
+ 			ftrace_bug(failed, rec->ip);
+ 			/* Stop processing */
+@@ -1623,7 +1623,7 @@ static int __ftrace_modify_code(void *data)
+ 	 */
+ 	function_trace_stop++;
+ 
+-	if (*command & FTRACE_ENABLE_CALLS)
++	if (*command & FTRACE_UPDATE_CALLS)
+ 		ftrace_replace_code(1);
+ 	else if (*command & FTRACE_DISABLE_CALLS)
+ 		ftrace_replace_code(0);
+@@ -1691,7 +1691,7 @@ static int ftrace_startup(struct ftrace_ops *ops, int command)
+ 		return -ENODEV;
+ 
+ 	ftrace_start_up++;
+-	command |= FTRACE_ENABLE_CALLS;
++	command |= FTRACE_UPDATE_CALLS;
+ 
+ 	/* ops marked global share the filter hashes */
+ 	if (ops->flags & FTRACE_OPS_FL_GLOBAL) {
+@@ -1743,8 +1743,7 @@ static void ftrace_shutdown(struct ftrace_ops *ops, int command)
+ 	if (ops != &global_ops || !global_start_up)
+ 		ops->flags &= ~FTRACE_OPS_FL_ENABLED;
+ 
+-	if (!ftrace_start_up)
+-		command |= FTRACE_DISABLE_CALLS;
++	command |= FTRACE_UPDATE_CALLS;
+ 
+ 	if (saved_ftrace_func != ftrace_trace_function) {
+ 		saved_ftrace_func = ftrace_trace_function;
+@@ -1766,7 +1765,7 @@ static void ftrace_startup_sysctl(void)
+ 	saved_ftrace_func = NULL;
+ 	/* ftrace_start_up is true if we want ftrace running */
+ 	if (ftrace_start_up)
+-		ftrace_run_update_code(FTRACE_ENABLE_CALLS);
++		ftrace_run_update_code(FTRACE_UPDATE_CALLS);
+ }
+ 
+ static void ftrace_shutdown_sysctl(void)
+@@ -2919,7 +2918,7 @@ ftrace_set_regex(struct ftrace_ops *ops, unsigned char *buf, int len,
+ 	ret = ftrace_hash_move(ops, enable, orig_hash, hash);
+ 	if (!ret && ops->flags & FTRACE_OPS_FL_ENABLED
+ 	    && ftrace_enabled)
+-		ftrace_run_update_code(FTRACE_ENABLE_CALLS);
++		ftrace_run_update_code(FTRACE_UPDATE_CALLS);
+ 
+ 	mutex_unlock(&ftrace_lock);
+ 
+@@ -3107,7 +3106,7 @@ ftrace_regex_release(struct inode *inode, struct file *file)
+ 				       orig_hash, iter->hash);
+ 		if (!ret && (iter->ops->flags & FTRACE_OPS_FL_ENABLED)
+ 		    && ftrace_enabled)
+-			ftrace_run_update_code(FTRACE_ENABLE_CALLS);
++			ftrace_run_update_code(FTRACE_UPDATE_CALLS);
+ 
+ 		mutex_unlock(&ftrace_lock);
+ 	}
+diff --git a/kernel/tracepoint.c b/kernel/tracepoint.c
+index db110b8..f1539de 100644
+--- a/kernel/tracepoint.c
++++ b/kernel/tracepoint.c
+@@ -634,10 +634,11 @@ static int tracepoint_module_coming(struct module *mod)
+ 	int ret = 0;
+ 
+ 	/*
+-	 * We skip modules that tain the kernel, especially those with different
+-	 * module header (for forced load), to make sure we don't cause a crash.
++	 * We skip modules that taint the kernel, especially those with different
++	 * module headers (for forced load), to make sure we don't cause a crash.
++	 * Staging and out-of-tree GPL modules are fine.
+ 	 */
+-	if (mod->taints)
++	if (mod->taints & ~((1 << TAINT_OOT_MODULE) | (1 << TAINT_CRAP)))
+ 		return 0;
+ 	mutex_lock(&tracepoints_mutex);
+ 	tp_mod = kmalloc(sizeof(struct tp_module), GFP_KERNEL);
+diff --git a/mm/filemap.c b/mm/filemap.c
+index 5f0a3c9..90286a4 100644
+--- a/mm/filemap.c
++++ b/mm/filemap.c
+@@ -393,24 +393,11 @@ EXPORT_SYMBOL(filemap_write_and_wait_range);
+ int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask)
+ {
+ 	int error;
+-	struct mem_cgroup *memcg = NULL;
+ 
+ 	VM_BUG_ON(!PageLocked(old));
+ 	VM_BUG_ON(!PageLocked(new));
+ 	VM_BUG_ON(new->mapping);
+ 
+-	/*
+-	 * This is not page migration, but prepare_migration and
+-	 * end_migration does enough work for charge replacement.
+-	 *
+-	 * In the longer term we probably want a specialized function
+-	 * for moving the charge from old to new in a more efficient
+-	 * manner.
+-	 */
+-	error = mem_cgroup_prepare_migration(old, new, &memcg, gfp_mask);
+-	if (error)
+-		return error;
+-
+ 	error = radix_tree_preload(gfp_mask & ~__GFP_HIGHMEM);
+ 	if (!error) {
+ 		struct address_space *mapping = old->mapping;
+@@ -432,13 +419,12 @@ int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask)
+ 		if (PageSwapBacked(new))
+ 			__inc_zone_page_state(new, NR_SHMEM);
+ 		spin_unlock_irq(&mapping->tree_lock);
++		/* mem_cgroup codes must not be called under tree_lock */
++		mem_cgroup_replace_page_cache(old, new);
+ 		radix_tree_preload_end();
+ 		if (freepage)
+ 			freepage(old);
+ 		page_cache_release(old);
+-		mem_cgroup_end_migration(memcg, old, new, true);
+-	} else {
+-		mem_cgroup_end_migration(memcg, old, new, false);
+ 	}
+ 
+ 	return error;
+diff --git a/mm/memcontrol.c b/mm/memcontrol.c
+index b63f5f7..f538e9b 100644
+--- a/mm/memcontrol.c
++++ b/mm/memcontrol.c
+@@ -3366,6 +3366,50 @@ void mem_cgroup_end_migration(struct mem_cgroup *memcg,
+ 	cgroup_release_and_wakeup_rmdir(&memcg->css);
+ }
+ 
++/*
++ * At replace page cache, newpage is not under any memcg but it's on
++ * LRU. So, this function doesn't touch res_counter but handles LRU
++ * in correct way. Both pages are locked so we cannot race with uncharge.
++ */
++void mem_cgroup_replace_page_cache(struct page *oldpage,
++				  struct page *newpage)
++{
++	struct mem_cgroup *memcg;
++	struct page_cgroup *pc;
++	struct zone *zone;
++	enum charge_type type = MEM_CGROUP_CHARGE_TYPE_CACHE;
++	unsigned long flags;
++
++	if (mem_cgroup_disabled())
++		return;
++
++	pc = lookup_page_cgroup(oldpage);
++	/* fix accounting on old pages */
++	lock_page_cgroup(pc);
++	memcg = pc->mem_cgroup;
++	mem_cgroup_charge_statistics(memcg, PageCgroupCache(pc), -1);
++	ClearPageCgroupUsed(pc);
++	unlock_page_cgroup(pc);
++
++	if (PageSwapBacked(oldpage))
++		type = MEM_CGROUP_CHARGE_TYPE_SHMEM;
++
++	zone = page_zone(newpage);
++	pc = lookup_page_cgroup(newpage);
++	/*
++	 * Even if newpage->mapping was NULL before starting replacement,
++	 * the newpage may be on LRU(or pagevec for LRU) already. We lock
++	 * LRU while we overwrite pc->mem_cgroup.
++	 */
++	spin_lock_irqsave(&zone->lru_lock, flags);
++	if (PageLRU(newpage))
++		del_page_from_lru_list(zone, newpage, page_lru(newpage));
++	__mem_cgroup_commit_charge(memcg, newpage, 1, pc, type);
++	if (PageLRU(newpage))
++		add_page_to_lru_list(zone, newpage, page_lru(newpage));
++	spin_unlock_irqrestore(&zone->lru_lock, flags);
++}
++
+ #ifdef CONFIG_DEBUG_VM
+ static struct page_cgroup *lookup_page_cgroup_used(struct page *page)
+ {
+diff --git a/mm/page_alloc.c b/mm/page_alloc.c
+index 2b8ba3a..485be89 100644
+--- a/mm/page_alloc.c
++++ b/mm/page_alloc.c
+@@ -5608,6 +5608,17 @@ __count_immobile_pages(struct zone *zone, struct page *page, int count)
+ bool is_pageblock_removable_nolock(struct page *page)
+ {
+ 	struct zone *zone = page_zone(page);
++	unsigned long pfn = page_to_pfn(page);
++
++	/*
++	 * We have to be careful here because we are iterating over memory
++	 * sections which are not zone aware so we might end up outside of
++	 * the zone but still within the section.
++	 */
++	if (!zone || zone->zone_start_pfn > pfn ||
++			zone->zone_start_pfn + zone->spanned_pages <= pfn)
++		return false;
++
+ 	return __count_immobile_pages(zone, page, 0);
+ }
+ 
+diff --git a/mm/shmem.c b/mm/shmem.c
+index d672250..6c253f7 100644
+--- a/mm/shmem.c
++++ b/mm/shmem.c
+@@ -379,7 +379,7 @@ static int shmem_free_swap(struct address_space *mapping,
+ /*
+  * Pagevec may contain swap entries, so shuffle up pages before releasing.
+  */
+-static void shmem_pagevec_release(struct pagevec *pvec)
++static void shmem_deswap_pagevec(struct pagevec *pvec)
+ {
+ 	int i, j;
+ 
+@@ -389,7 +389,36 @@ static void shmem_pagevec_release(struct pagevec *pvec)
+ 			pvec->pages[j++] = page;
+ 	}
+ 	pvec->nr = j;
+-	pagevec_release(pvec);
++}
++
++/*
++ * SysV IPC SHM_UNLOCK restore Unevictable pages to their evictable lists.
++ */
++void shmem_unlock_mapping(struct address_space *mapping)
++{
++	struct pagevec pvec;
++	pgoff_t indices[PAGEVEC_SIZE];
++	pgoff_t index = 0;
++
++	pagevec_init(&pvec, 0);
++	/*
++	 * Minor point, but we might as well stop if someone else SHM_LOCKs it.
++	 */
++	while (!mapping_unevictable(mapping)) {
++		/*
++		 * Avoid pagevec_lookup(): find_get_pages() returns 0 as if it
++		 * has finished, if it hits a row of PAGEVEC_SIZE swap entries.
++		 */
++		pvec.nr = shmem_find_get_pages_and_swap(mapping, index,
++					PAGEVEC_SIZE, pvec.pages, indices);
++		if (!pvec.nr)
++			break;
++		index = indices[pvec.nr - 1] + 1;
++		shmem_deswap_pagevec(&pvec);
++		check_move_unevictable_pages(pvec.pages, pvec.nr);
++		pagevec_release(&pvec);
++		cond_resched();
++	}
+ }
+ 
+ /*
+@@ -440,7 +469,8 @@ void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend)
+ 			}
+ 			unlock_page(page);
+ 		}
+-		shmem_pagevec_release(&pvec);
++		shmem_deswap_pagevec(&pvec);
++		pagevec_release(&pvec);
+ 		mem_cgroup_uncharge_end();
+ 		cond_resched();
+ 		index++;
+@@ -470,7 +500,8 @@ void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend)
+ 			continue;
+ 		}
+ 		if (index == start && indices[0] > end) {
+-			shmem_pagevec_release(&pvec);
++			shmem_deswap_pagevec(&pvec);
++			pagevec_release(&pvec);
+ 			break;
+ 		}
+ 		mem_cgroup_uncharge_start();
+@@ -494,7 +525,8 @@ void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend)
+ 			}
+ 			unlock_page(page);
+ 		}
+-		shmem_pagevec_release(&pvec);
++		shmem_deswap_pagevec(&pvec);
++		pagevec_release(&pvec);
+ 		mem_cgroup_uncharge_end();
+ 		index++;
+ 	}
+@@ -1068,13 +1100,6 @@ int shmem_lock(struct file *file, int lock, struct user_struct *user)
+ 		user_shm_unlock(inode->i_size, user);
+ 		info->flags &= ~VM_LOCKED;
+ 		mapping_clear_unevictable(file->f_mapping);
+-		/*
+-		 * Ensure that a racing putback_lru_page() can see
+-		 * the pages of this mapping are evictable when we
+-		 * skip them due to !PageLRU during the scan.
+-		 */
+-		smp_mb__after_clear_bit();
+-		scan_mapping_unevictable_pages(file->f_mapping);
+ 	}
+ 	retval = 0;
+ 
+@@ -2446,6 +2471,10 @@ int shmem_lock(struct file *file, int lock, struct user_struct *user)
+ 	return 0;
+ }
+ 
++void shmem_unlock_mapping(struct address_space *mapping)
++{
++}
++
+ void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend)
+ {
+ 	truncate_inode_pages_range(inode->i_mapping, lstart, lend);
+diff --git a/mm/slub.c b/mm/slub.c
+index ed3334d..1a919f0 100644
+--- a/mm/slub.c
++++ b/mm/slub.c
+@@ -2166,6 +2166,11 @@ redo:
+ 		goto new_slab;
+ 	}
+ 
++	/* must check again c->freelist in case of cpu migration or IRQ */
++	object = c->freelist;
++	if (object)
++		goto load_freelist;
++
+ 	stat(s, ALLOC_SLOWPATH);
+ 
+ 	do {
+diff --git a/mm/vmscan.c b/mm/vmscan.c
+index f54a05b..cb33d9c 100644
+--- a/mm/vmscan.c
++++ b/mm/vmscan.c
+@@ -636,7 +636,7 @@ redo:
+ 		 * When racing with an mlock or AS_UNEVICTABLE clearing
+ 		 * (page is unlocked) make sure that if the other thread
+ 		 * does not observe our setting of PG_lru and fails
+-		 * isolation/check_move_unevictable_page,
++		 * isolation/check_move_unevictable_pages,
+ 		 * we see PG_mlocked/AS_UNEVICTABLE cleared below and move
+ 		 * the page back to the evictable list.
+ 		 *
+@@ -3353,97 +3353,59 @@ int page_evictable(struct page *page, struct vm_area_struct *vma)
+ 	return 1;
+ }
+ 
++#ifdef CONFIG_SHMEM
+ /**
+- * check_move_unevictable_page - check page for evictability and move to appropriate zone lru list
+- * @page: page to check evictability and move to appropriate lru list
+- * @zone: zone page is in
++ * check_move_unevictable_pages - check pages for evictability and move to appropriate zone lru list
++ * @pages:	array of pages to check
++ * @nr_pages:	number of pages to check
+  *
+- * Checks a page for evictability and moves the page to the appropriate
+- * zone lru list.
++ * Checks pages for evictability and moves them to the appropriate lru list.
+  *
+- * Restrictions: zone->lru_lock must be held, page must be on LRU and must
+- * have PageUnevictable set.
++ * This function is only used for SysV IPC SHM_UNLOCK.
+  */
+-static void check_move_unevictable_page(struct page *page, struct zone *zone)
++void check_move_unevictable_pages(struct page **pages, int nr_pages)
+ {
+-	VM_BUG_ON(PageActive(page));
+-
+-retry:
+-	ClearPageUnevictable(page);
+-	if (page_evictable(page, NULL)) {
+-		enum lru_list l = page_lru_base_type(page);
++	struct zone *zone = NULL;
++	int pgscanned = 0;
++	int pgrescued = 0;
++	int i;
+ 
+-		__dec_zone_state(zone, NR_UNEVICTABLE);
+-		list_move(&page->lru, &zone->lru[l].list);
+-		mem_cgroup_move_lists(page, LRU_UNEVICTABLE, l);
+-		__inc_zone_state(zone, NR_INACTIVE_ANON + l);
+-		__count_vm_event(UNEVICTABLE_PGRESCUED);
+-	} else {
+-		/*
+-		 * rotate unevictable list
+-		 */
+-		SetPageUnevictable(page);
+-		list_move(&page->lru, &zone->lru[LRU_UNEVICTABLE].list);
+-		mem_cgroup_rotate_lru_list(page, LRU_UNEVICTABLE);
+-		if (page_evictable(page, NULL))
+-			goto retry;
+-	}
+-}
++	for (i = 0; i < nr_pages; i++) {
++		struct page *page = pages[i];
++		struct zone *pagezone;
+ 
+-/**
+- * scan_mapping_unevictable_pages - scan an address space for evictable pages
+- * @mapping: struct address_space to scan for evictable pages
+- *
+- * Scan all pages in mapping.  Check unevictable pages for
+- * evictability and move them to the appropriate zone lru list.
+- */
+-void scan_mapping_unevictable_pages(struct address_space *mapping)
+-{
+-	pgoff_t next = 0;
+-	pgoff_t end   = (i_size_read(mapping->host) + PAGE_CACHE_SIZE - 1) >>
+-			 PAGE_CACHE_SHIFT;
+-	struct zone *zone;
+-	struct pagevec pvec;
++		pgscanned++;
++		pagezone = page_zone(page);
++		if (pagezone != zone) {
++			if (zone)
++				spin_unlock_irq(&zone->lru_lock);
++			zone = pagezone;
++			spin_lock_irq(&zone->lru_lock);
++		}
+ 
+-	if (mapping->nrpages == 0)
+-		return;
++		if (!PageLRU(page) || !PageUnevictable(page))
++			continue;
+ 
+-	pagevec_init(&pvec, 0);
+-	while (next < end &&
+-		pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE)) {
+-		int i;
+-		int pg_scanned = 0;
+-
+-		zone = NULL;
+-
+-		for (i = 0; i < pagevec_count(&pvec); i++) {
+-			struct page *page = pvec.pages[i];
+-			pgoff_t page_index = page->index;
+-			struct zone *pagezone = page_zone(page);
+-
+-			pg_scanned++;
+-			if (page_index > next)
+-				next = page_index;
+-			next++;
+-
+-			if (pagezone != zone) {
+-				if (zone)
+-					spin_unlock_irq(&zone->lru_lock);
+-				zone = pagezone;
+-				spin_lock_irq(&zone->lru_lock);
+-			}
++		if (page_evictable(page, NULL)) {
++			enum lru_list lru = page_lru_base_type(page);
+ 
+-			if (PageLRU(page) && PageUnevictable(page))
+-				check_move_unevictable_page(page, zone);
++			VM_BUG_ON(PageActive(page));
++			ClearPageUnevictable(page);
++			__dec_zone_state(zone, NR_UNEVICTABLE);
++			list_move(&page->lru, &zone->lru[lru].list);
++			mem_cgroup_move_lists(page, LRU_UNEVICTABLE, lru);
++			__inc_zone_state(zone, NR_INACTIVE_ANON + lru);
++			pgrescued++;
+ 		}
+-		if (zone)
+-			spin_unlock_irq(&zone->lru_lock);
+-		pagevec_release(&pvec);
+-
+-		count_vm_events(UNEVICTABLE_PGSCANNED, pg_scanned);
+ 	}
+ 
++	if (zone) {
++		__count_vm_events(UNEVICTABLE_PGRESCUED, pgrescued);
++		__count_vm_events(UNEVICTABLE_PGSCANNED, pgscanned);
++		spin_unlock_irq(&zone->lru_lock);
++	}
+ }
++#endif /* CONFIG_SHMEM */
+ 
+ static void warn_scan_unevictable_pages(void)
+ {
+diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
+index ea10a51..73495f1 100644
+--- a/net/mac80211/ieee80211_i.h
++++ b/net/mac80211/ieee80211_i.h
+@@ -702,6 +702,8 @@ struct tpt_led_trigger {
+  *	well be on the operating channel
+  * @SCAN_HW_SCANNING: The hardware is scanning for us, we have no way to
+  *	determine if we are on the operating channel or not
++ * @SCAN_OFF_CHANNEL: We're off our operating channel for scanning,
++ *	gets only set in conjunction with SCAN_SW_SCANNING
+  * @SCAN_COMPLETED: Set for our scan work function when the driver reported
+  *	that the scan completed.
+  * @SCAN_ABORTED: Set for our scan work function when the driver reported
+@@ -710,6 +712,7 @@ struct tpt_led_trigger {
+ enum {
+ 	SCAN_SW_SCANNING,
+ 	SCAN_HW_SCANNING,
++	SCAN_OFF_CHANNEL,
+ 	SCAN_COMPLETED,
+ 	SCAN_ABORTED,
+ };
+@@ -1140,14 +1143,10 @@ int ieee80211_request_sched_scan_stop(struct ieee80211_sub_if_data *sdata);
+ void ieee80211_sched_scan_stopped_work(struct work_struct *work);
+ 
+ /* off-channel helpers */
+-bool ieee80211_cfg_on_oper_channel(struct ieee80211_local *local);
+-void ieee80211_offchannel_enable_all_ps(struct ieee80211_local *local,
+-					bool tell_ap);
+-void ieee80211_offchannel_stop_vifs(struct ieee80211_local *local,
+-				    bool offchannel_ps_enable);
++void ieee80211_offchannel_stop_beaconing(struct ieee80211_local *local);
++void ieee80211_offchannel_stop_station(struct ieee80211_local *local);
+ void ieee80211_offchannel_return(struct ieee80211_local *local,
+-				 bool enable_beaconing,
+-				 bool offchannel_ps_disable);
++				 bool enable_beaconing);
+ void ieee80211_hw_roc_setup(struct ieee80211_local *local);
+ 
+ /* interface handling */
+diff --git a/net/mac80211/main.c b/net/mac80211/main.c
+index cae4435..a7536fd 100644
+--- a/net/mac80211/main.c
++++ b/net/mac80211/main.c
+@@ -92,47 +92,6 @@ static void ieee80211_reconfig_filter(struct work_struct *work)
+ 	ieee80211_configure_filter(local);
+ }
+ 
+-/*
+- * Returns true if we are logically configured to be on
+- * the operating channel AND the hardware-conf is currently
+- * configured on the operating channel.  Compares channel-type
+- * as well.
+- */
+-bool ieee80211_cfg_on_oper_channel(struct ieee80211_local *local)
+-{
+-	struct ieee80211_channel *chan, *scan_chan;
+-	enum nl80211_channel_type channel_type;
+-
+-	/* This logic needs to match logic in ieee80211_hw_config */
+-	if (local->scan_channel) {
+-		chan = local->scan_channel;
+-		/* If scanning on oper channel, use whatever channel-type
+-		 * is currently in use.
+-		 */
+-		if (chan == local->oper_channel)
+-			channel_type = local->_oper_channel_type;
+-		else
+-			channel_type = NL80211_CHAN_NO_HT;
+-	} else if (local->tmp_channel) {
+-		chan = scan_chan = local->tmp_channel;
+-		channel_type = local->tmp_channel_type;
+-	} else {
+-		chan = local->oper_channel;
+-		channel_type = local->_oper_channel_type;
+-	}
+-
+-	if (chan != local->oper_channel ||
+-	    channel_type != local->_oper_channel_type)
+-		return false;
+-
+-	/* Check current hardware-config against oper_channel. */
+-	if ((local->oper_channel != local->hw.conf.channel) ||
+-	    (local->_oper_channel_type != local->hw.conf.channel_type))
+-		return false;
+-
+-	return true;
+-}
+-
+ int ieee80211_hw_config(struct ieee80211_local *local, u32 changed)
+ {
+ 	struct ieee80211_channel *chan, *scan_chan;
+@@ -145,9 +104,6 @@ int ieee80211_hw_config(struct ieee80211_local *local, u32 changed)
+ 
+ 	scan_chan = local->scan_channel;
+ 
+-	/* If this off-channel logic ever changes,  ieee80211_on_oper_channel
+-	 * may need to change as well.
+-	 */
+ 	offchannel_flag = local->hw.conf.flags & IEEE80211_CONF_OFFCHANNEL;
+ 	if (scan_chan) {
+ 		chan = scan_chan;
+@@ -158,19 +114,17 @@ int ieee80211_hw_config(struct ieee80211_local *local, u32 changed)
+ 			channel_type = local->_oper_channel_type;
+ 		else
+ 			channel_type = NL80211_CHAN_NO_HT;
+-	} else if (local->tmp_channel) {
++		local->hw.conf.flags |= IEEE80211_CONF_OFFCHANNEL;
++	} else if (local->tmp_channel &&
++		   local->oper_channel != local->tmp_channel) {
+ 		chan = scan_chan = local->tmp_channel;
+ 		channel_type = local->tmp_channel_type;
++		local->hw.conf.flags |= IEEE80211_CONF_OFFCHANNEL;
+ 	} else {
+ 		chan = local->oper_channel;
+ 		channel_type = local->_oper_channel_type;
+-	}
+-
+-	if (chan != local->oper_channel ||
+-	    channel_type != local->_oper_channel_type)
+-		local->hw.conf.flags |= IEEE80211_CONF_OFFCHANNEL;
+-	else
+ 		local->hw.conf.flags &= ~IEEE80211_CONF_OFFCHANNEL;
++	}
+ 
+ 	offchannel_flag ^= local->hw.conf.flags & IEEE80211_CONF_OFFCHANNEL;
+ 
+@@ -279,7 +233,7 @@ void ieee80211_bss_info_change_notify(struct ieee80211_sub_if_data *sdata,
+ 
+ 	if (changed & BSS_CHANGED_BEACON_ENABLED) {
+ 		if (local->quiescing || !ieee80211_sdata_running(sdata) ||
+-		    test_bit(SDATA_STATE_OFFCHANNEL, &sdata->state)) {
++		    test_bit(SCAN_SW_SCANNING, &local->scanning)) {
+ 			sdata->vif.bss_conf.enable_beacon = false;
+ 		} else {
+ 			/*
+diff --git a/net/mac80211/offchannel.c b/net/mac80211/offchannel.c
+index 3d41441..1b239be 100644
+--- a/net/mac80211/offchannel.c
++++ b/net/mac80211/offchannel.c
+@@ -18,14 +18,10 @@
+ #include "driver-trace.h"
+ 
+ /*
+- * Tell our hardware to disable PS.
+- * Optionally inform AP that we will go to sleep so that it will buffer
+- * the frames while we are doing off-channel work.  This is optional
+- * because we *may* be doing work on-operating channel, and want our
+- * hardware unconditionally awake, but still let the AP send us normal frames.
++ * inform AP that we will go to sleep so that it will buffer the frames
++ * while we scan
+  */
+-static void ieee80211_offchannel_ps_enable(struct ieee80211_sub_if_data *sdata,
+-					   bool tell_ap)
++static void ieee80211_offchannel_ps_enable(struct ieee80211_sub_if_data *sdata)
+ {
+ 	struct ieee80211_local *local = sdata->local;
+ 	struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
+@@ -46,8 +42,8 @@ static void ieee80211_offchannel_ps_enable(struct ieee80211_sub_if_data *sdata,
+ 		ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_PS);
+ 	}
+ 
+-	if (tell_ap && (!local->offchannel_ps_enabled ||
+-			!(local->hw.flags & IEEE80211_HW_PS_NULLFUNC_STACK)))
++	if (!(local->offchannel_ps_enabled) ||
++	    !(local->hw.flags & IEEE80211_HW_PS_NULLFUNC_STACK))
+ 		/*
+ 		 * If power save was enabled, no need to send a nullfunc
+ 		 * frame because AP knows that we are sleeping. But if the
+@@ -82,9 +78,6 @@ static void ieee80211_offchannel_ps_disable(struct ieee80211_sub_if_data *sdata)
+ 		 * we are sleeping, let's just enable power save mode in
+ 		 * hardware.
+ 		 */
+-		/* TODO:  Only set hardware if CONF_PS changed?
+-		 * TODO:  Should we set offchannel_ps_enabled to false?
+-		 */
+ 		local->hw.conf.flags |= IEEE80211_CONF_PS;
+ 		ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_PS);
+ 	} else if (local->hw.conf.dynamic_ps_timeout > 0) {
+@@ -103,61 +96,63 @@ static void ieee80211_offchannel_ps_disable(struct ieee80211_sub_if_data *sdata)
+ 	ieee80211_sta_reset_conn_monitor(sdata);
+ }
+ 
+-void ieee80211_offchannel_stop_vifs(struct ieee80211_local *local,
+-				    bool offchannel_ps_enable)
++void ieee80211_offchannel_stop_beaconing(struct ieee80211_local *local)
+ {
+ 	struct ieee80211_sub_if_data *sdata;
+ 
+-	/*
+-	 * notify the AP about us leaving the channel and stop all
+-	 * STA interfaces.
+-	 */
+ 	mutex_lock(&local->iflist_mtx);
+ 	list_for_each_entry(sdata, &local->interfaces, list) {
+ 		if (!ieee80211_sdata_running(sdata))
+ 			continue;
+ 
+-		if (sdata->vif.type != NL80211_IFTYPE_MONITOR)
+-			set_bit(SDATA_STATE_OFFCHANNEL, &sdata->state);
+-
+-		/* Check to see if we should disable beaconing. */
++		/* disable beaconing */
+ 		if (sdata->vif.type == NL80211_IFTYPE_AP ||
+ 		    sdata->vif.type == NL80211_IFTYPE_ADHOC ||
+ 		    sdata->vif.type == NL80211_IFTYPE_MESH_POINT)
+ 			ieee80211_bss_info_change_notify(
+ 				sdata, BSS_CHANGED_BEACON_ENABLED);
+ 
+-		if (sdata->vif.type != NL80211_IFTYPE_MONITOR) {
++		/*
++		 * only handle non-STA interfaces here, STA interfaces
++		 * are handled in ieee80211_offchannel_stop_station(),
++		 * e.g., from the background scan state machine.
++		 *
++		 * In addition, do not stop monitor interface to allow it to be
++		 * used from user space controlled off-channel operations.
++		 */
++		if (sdata->vif.type != NL80211_IFTYPE_STATION &&
++		    sdata->vif.type != NL80211_IFTYPE_MONITOR) {
++			set_bit(SDATA_STATE_OFFCHANNEL, &sdata->state);
+ 			netif_tx_stop_all_queues(sdata->dev);
+-			if (offchannel_ps_enable &&
+-			    (sdata->vif.type == NL80211_IFTYPE_STATION) &&
+-			    sdata->u.mgd.associated)
+-				ieee80211_offchannel_ps_enable(sdata, true);
+ 		}
+ 	}
+ 	mutex_unlock(&local->iflist_mtx);
+ }
+ 
+-void ieee80211_offchannel_enable_all_ps(struct ieee80211_local *local,
+-					bool tell_ap)
++void ieee80211_offchannel_stop_station(struct ieee80211_local *local)
+ {
+ 	struct ieee80211_sub_if_data *sdata;
+ 
++	/*
++	 * notify the AP about us leaving the channel and stop all STA interfaces
++	 */
+ 	mutex_lock(&local->iflist_mtx);
+ 	list_for_each_entry(sdata, &local->interfaces, list) {
+ 		if (!ieee80211_sdata_running(sdata))
+ 			continue;
+ 
+-		if (sdata->vif.type == NL80211_IFTYPE_STATION &&
+-		    sdata->u.mgd.associated)
+-			ieee80211_offchannel_ps_enable(sdata, tell_ap);
++		if (sdata->vif.type == NL80211_IFTYPE_STATION) {
++			set_bit(SDATA_STATE_OFFCHANNEL, &sdata->state);
++			netif_tx_stop_all_queues(sdata->dev);
++			if (sdata->u.mgd.associated)
++				ieee80211_offchannel_ps_enable(sdata);
++		}
+ 	}
+ 	mutex_unlock(&local->iflist_mtx);
+ }
+ 
+ void ieee80211_offchannel_return(struct ieee80211_local *local,
+-				 bool enable_beaconing,
+-				 bool offchannel_ps_disable)
++				 bool enable_beaconing)
+ {
+ 	struct ieee80211_sub_if_data *sdata;
+ 
+@@ -167,8 +162,7 @@ void ieee80211_offchannel_return(struct ieee80211_local *local,
+ 			continue;
+ 
+ 		/* Tell AP we're back */
+-		if (offchannel_ps_disable &&
+-		    sdata->vif.type == NL80211_IFTYPE_STATION) {
++		if (sdata->vif.type == NL80211_IFTYPE_STATION) {
+ 			if (sdata->u.mgd.associated)
+ 				ieee80211_offchannel_ps_disable(sdata);
+ 		}
+@@ -188,7 +182,7 @@ void ieee80211_offchannel_return(struct ieee80211_local *local,
+ 			netif_tx_wake_all_queues(sdata->dev);
+ 		}
+ 
+-		/* Check to see if we should re-enable beaconing */
++		/* re-enable beaconing */
+ 		if (enable_beaconing &&
+ 		    (sdata->vif.type == NL80211_IFTYPE_AP ||
+ 		     sdata->vif.type == NL80211_IFTYPE_ADHOC ||
+diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
+index fb123e2..5c51607 100644
+--- a/net/mac80211/rx.c
++++ b/net/mac80211/rx.c
+@@ -421,10 +421,16 @@ ieee80211_rx_h_passive_scan(struct ieee80211_rx_data *rx)
+ 		return RX_CONTINUE;
+ 
+ 	if (test_bit(SCAN_HW_SCANNING, &local->scanning) ||
+-	    test_bit(SCAN_SW_SCANNING, &local->scanning) ||
+ 	    local->sched_scanning)
+ 		return ieee80211_scan_rx(rx->sdata, skb);
+ 
++	if (test_bit(SCAN_SW_SCANNING, &local->scanning)) {
++		/* drop all the other packets during a software scan anyway */
++		if (ieee80211_scan_rx(rx->sdata, skb) != RX_QUEUED)
++			dev_kfree_skb(skb);
++		return RX_QUEUED;
++	}
++
+ 	/* scanning finished during invoking of handlers */
+ 	I802_DEBUG_INC(local->rx_handlers_drop_passive_scan);
+ 	return RX_DROP_UNUSABLE;
+@@ -2858,7 +2864,7 @@ static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw,
+ 		local->dot11ReceivedFragmentCount++;
+ 
+ 	if (unlikely(test_bit(SCAN_HW_SCANNING, &local->scanning) ||
+-		     test_bit(SCAN_SW_SCANNING, &local->scanning)))
++		     test_bit(SCAN_OFF_CHANNEL, &local->scanning)))
+ 		status->rx_flags |= IEEE80211_RX_IN_SCAN;
+ 
+ 	if (ieee80211_is_mgmt(fc))
+diff --git a/net/mac80211/scan.c b/net/mac80211/scan.c
+index 105436d..5279300 100644
+--- a/net/mac80211/scan.c
++++ b/net/mac80211/scan.c
+@@ -213,14 +213,6 @@ ieee80211_scan_rx(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb)
+ 	if (bss)
+ 		ieee80211_rx_bss_put(sdata->local, bss);
+ 
+-	/* If we are on-operating-channel, and this packet is for the
+-	 * current channel, pass the pkt on up the stack so that
+-	 * the rest of the stack can make use of it.
+-	 */
+-	if (ieee80211_cfg_on_oper_channel(sdata->local)
+-	    && (channel == sdata->local->oper_channel))
+-		return RX_CONTINUE;
+-
+ 	dev_kfree_skb(skb);
+ 	return RX_QUEUED;
+ }
+@@ -264,8 +256,6 @@ static void __ieee80211_scan_completed(struct ieee80211_hw *hw, bool aborted,
+ 				       bool was_hw_scan)
+ {
+ 	struct ieee80211_local *local = hw_to_local(hw);
+-	bool on_oper_chan;
+-	bool enable_beacons = false;
+ 
+ 	lockdep_assert_held(&local->mtx);
+ 
+@@ -298,25 +288,11 @@ static void __ieee80211_scan_completed(struct ieee80211_hw *hw, bool aborted,
+ 	local->scanning = 0;
+ 	local->scan_channel = NULL;
+ 
+-	on_oper_chan = ieee80211_cfg_on_oper_channel(local);
+-
+-	if (was_hw_scan || !on_oper_chan)
+-		ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_CHANNEL);
+-	else
+-		/* Set power back to normal operating levels. */
+-		ieee80211_hw_config(local, 0);
+-
++	ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_CHANNEL);
+ 	if (!was_hw_scan) {
+-		bool on_oper_chan2;
+ 		ieee80211_configure_filter(local);
+ 		drv_sw_scan_complete(local);
+-		on_oper_chan2 = ieee80211_cfg_on_oper_channel(local);
+-		/* We should always be on-channel at this point. */
+-		WARN_ON(!on_oper_chan2);
+-		if (on_oper_chan2 && (on_oper_chan != on_oper_chan2))
+-			enable_beacons = true;
+-
+-		ieee80211_offchannel_return(local, enable_beacons, true);
++		ieee80211_offchannel_return(local, true);
+ 	}
+ 
+ 	ieee80211_recalc_idle(local);
+@@ -357,15 +333,13 @@ static int ieee80211_start_sw_scan(struct ieee80211_local *local)
+ 	 */
+ 	drv_sw_scan_start(local);
+ 
++	ieee80211_offchannel_stop_beaconing(local);
++
+ 	local->leave_oper_channel_time = 0;
+ 	local->next_scan_state = SCAN_DECISION;
+ 	local->scan_channel_idx = 0;
+ 
+-	/* We always want to use off-channel PS, even if we
+-	 * are not really leaving oper-channel.  Don't
+-	 * tell the AP though, as long as we are on-channel.
+-	 */
+-	ieee80211_offchannel_enable_all_ps(local, false);
++	drv_flush(local, false);
+ 
+ 	ieee80211_configure_filter(local);
+ 
+@@ -508,20 +482,7 @@ static void ieee80211_scan_state_decision(struct ieee80211_local *local,
+ 	}
+ 	mutex_unlock(&local->iflist_mtx);
+ 
+-	next_chan = local->scan_req->channels[local->scan_channel_idx];
+-
+-	if (ieee80211_cfg_on_oper_channel(local)) {
+-		/* We're currently on operating channel. */
+-		if (next_chan == local->oper_channel)
+-			/* We don't need to move off of operating channel. */
+-			local->next_scan_state = SCAN_SET_CHANNEL;
+-		else
+-			/*
+-			 * We do need to leave operating channel, as next
+-			 * scan is somewhere else.
+-			 */
+-			local->next_scan_state = SCAN_LEAVE_OPER_CHANNEL;
+-	} else {
++	if (local->scan_channel) {
+ 		/*
+ 		 * we're currently scanning a different channel, let's
+ 		 * see if we can scan another channel without interfering
+@@ -537,6 +498,7 @@ static void ieee80211_scan_state_decision(struct ieee80211_local *local,
+ 		 *
+ 		 * Otherwise switch back to the operating channel.
+ 		 */
++		next_chan = local->scan_req->channels[local->scan_channel_idx];
+ 
+ 		bad_latency = time_after(jiffies +
+ 				ieee80211_scan_get_channel_time(next_chan),
+@@ -554,6 +516,12 @@ static void ieee80211_scan_state_decision(struct ieee80211_local *local,
+ 			local->next_scan_state = SCAN_ENTER_OPER_CHANNEL;
+ 		else
+ 			local->next_scan_state = SCAN_SET_CHANNEL;
++	} else {
++		/*
++		 * we're on the operating channel currently, let's
++		 * leave that channel now to scan another one
++		 */
++		local->next_scan_state = SCAN_LEAVE_OPER_CHANNEL;
+ 	}
+ 
+ 	*next_delay = 0;
+@@ -562,10 +530,9 @@ static void ieee80211_scan_state_decision(struct ieee80211_local *local,
+ static void ieee80211_scan_state_leave_oper_channel(struct ieee80211_local *local,
+ 						    unsigned long *next_delay)
+ {
+-	/* PS will already be in off-channel mode,
+-	 * we do that once at the beginning of scanning.
+-	 */
+-	ieee80211_offchannel_stop_vifs(local, false);
++	ieee80211_offchannel_stop_station(local);
++
++	__set_bit(SCAN_OFF_CHANNEL, &local->scanning);
+ 
+ 	/*
+ 	 * What if the nullfunc frames didn't arrive?
+@@ -588,15 +555,15 @@ static void ieee80211_scan_state_enter_oper_channel(struct ieee80211_local *loca
+ {
+ 	/* switch back to the operating channel */
+ 	local->scan_channel = NULL;
+-	if (!ieee80211_cfg_on_oper_channel(local))
+-		ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_CHANNEL);
++	ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_CHANNEL);
+ 
+ 	/*
+-	 * Re-enable vifs and beaconing.  Leave PS
+-	 * in off-channel state..will put that back
+-	 * on-channel at the end of scanning.
++	 * Only re-enable station mode interface now; beaconing will be
++	 * re-enabled once the full scan has been completed.
+ 	 */
+-	ieee80211_offchannel_return(local, true, false);
++	ieee80211_offchannel_return(local, false);
++
++	__clear_bit(SCAN_OFF_CHANNEL, &local->scanning);
+ 
+ 	*next_delay = HZ / 5;
+ 	local->next_scan_state = SCAN_DECISION;
+diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
+index 1f8b120..eff1f4e 100644
+--- a/net/mac80211/tx.c
++++ b/net/mac80211/tx.c
+@@ -259,8 +259,7 @@ ieee80211_tx_h_check_assoc(struct ieee80211_tx_data *tx)
+ 	if (unlikely(info->flags & IEEE80211_TX_CTL_INJECTED))
+ 		return TX_CONTINUE;
+ 
+-	if (unlikely(test_bit(SCAN_SW_SCANNING, &tx->local->scanning)) &&
+-	    test_bit(SDATA_STATE_OFFCHANNEL, &tx->sdata->state) &&
++	if (unlikely(test_bit(SCAN_OFF_CHANNEL, &tx->local->scanning)) &&
+ 	    !ieee80211_is_probe_req(hdr->frame_control) &&
+ 	    !ieee80211_is_nullfunc(hdr->frame_control))
+ 		/*
+diff --git a/net/mac80211/work.c b/net/mac80211/work.c
+index 6c53b6d..99165ef 100644
+--- a/net/mac80211/work.c
++++ b/net/mac80211/work.c
+@@ -899,26 +899,6 @@ static bool ieee80211_work_ct_coexists(enum nl80211_channel_type wk_ct,
+ 	return false;
+ }
+ 
+-static enum nl80211_channel_type
+-ieee80211_calc_ct(enum nl80211_channel_type wk_ct,
+-		  enum nl80211_channel_type oper_ct)
+-{
+-	switch (wk_ct) {
+-	case NL80211_CHAN_NO_HT:
+-		return oper_ct;
+-	case NL80211_CHAN_HT20:
+-		if (oper_ct != NL80211_CHAN_NO_HT)
+-			return oper_ct;
+-		return wk_ct;
+-	case NL80211_CHAN_HT40MINUS:
+-	case NL80211_CHAN_HT40PLUS:
+-		return wk_ct;
+-	}
+-	WARN_ON(1); /* shouldn't get here */
+-	return wk_ct;
+-}
+-
+-
+ static void ieee80211_work_timer(unsigned long data)
+ {
+ 	struct ieee80211_local *local = (void *) data;
+@@ -969,52 +949,18 @@ static void ieee80211_work_work(struct work_struct *work)
+ 		}
+ 
+ 		if (!started && !local->tmp_channel) {
+-			bool on_oper_chan;
+-			bool tmp_chan_changed = false;
+-			bool on_oper_chan2;
+-			enum nl80211_channel_type wk_ct;
+-			on_oper_chan = ieee80211_cfg_on_oper_channel(local);
+-
+-			/* Work with existing channel type if possible. */
+-			wk_ct = wk->chan_type;
+-			if (wk->chan == local->hw.conf.channel)
+-				wk_ct = ieee80211_calc_ct(wk->chan_type,
+-						local->hw.conf.channel_type);
+-
+-			if (local->tmp_channel)
+-				if ((local->tmp_channel != wk->chan) ||
+-				    (local->tmp_channel_type != wk_ct))
+-					tmp_chan_changed = true;
+-
+-			local->tmp_channel = wk->chan;
+-			local->tmp_channel_type = wk_ct;
+ 			/*
+-			 * Leave the station vifs in awake mode if they
+-			 * happen to be on the same channel as
+-			 * the requested channel.
++			 * TODO: could optimize this by leaving the
++			 *	 station vifs in awake mode if they
++			 *	 happen to be on the same channel as
++			 *	 the requested channel
+ 			 */
+-			on_oper_chan2 = ieee80211_cfg_on_oper_channel(local);
+-			if (on_oper_chan != on_oper_chan2) {
+-				if (on_oper_chan2) {
+-					/* going off oper channel, PS too */
+-					ieee80211_offchannel_stop_vifs(local,
+-								       true);
+-					ieee80211_hw_config(local, 0);
+-				} else {
+-					/* going on channel, but leave PS
+-					 * off-channel. */
+-					ieee80211_hw_config(local, 0);
+-					ieee80211_offchannel_return(local,
+-								    true,
+-								    false);
+-				}
+-			} else if (tmp_chan_changed)
+-				/* Still off-channel, but on some other
+-				 * channel, so update hardware.
+-				 * PS should already be off-channel.
+-				 */
+-				ieee80211_hw_config(local, 0);
++			ieee80211_offchannel_stop_beaconing(local);
++			ieee80211_offchannel_stop_station(local);
+ 
++			local->tmp_channel = wk->chan;
++			local->tmp_channel_type = wk->chan_type;
++			ieee80211_hw_config(local, 0);
+ 			started = true;
+ 			wk->timeout = jiffies;
+ 		}
+@@ -1100,8 +1046,7 @@ static void ieee80211_work_work(struct work_struct *work)
+ 		 * we still need to do a hardware config.  Currently,
+ 		 * we cannot be here while scanning, however.
+ 		 */
+-		if (!ieee80211_cfg_on_oper_channel(local))
+-			ieee80211_hw_config(local, 0);
++		ieee80211_hw_config(local, 0);
+ 
+ 		/* At the least, we need to disable offchannel_ps,
+ 		 * so just go ahead and run the entire offchannel
+@@ -1109,7 +1054,7 @@ static void ieee80211_work_work(struct work_struct *work)
+ 		 * beaconing if we were already on-oper-channel
+ 		 * as a future optimization.
+ 		 */
+-		ieee80211_offchannel_return(local, true, true);
++		ieee80211_offchannel_return(local, true);
+ 
+ 		/* give connection some time to breathe */
+ 		run_again(local, jiffies + HZ/2);
+diff --git a/net/mac80211/wpa.c b/net/mac80211/wpa.c
+index f614ce7..28a39bb 100644
+--- a/net/mac80211/wpa.c
++++ b/net/mac80211/wpa.c
+@@ -106,7 +106,7 @@ ieee80211_rx_h_michael_mic_verify(struct ieee80211_rx_data *rx)
+ 		if (status->flag & RX_FLAG_MMIC_ERROR)
+ 			goto mic_fail;
+ 
+-		if (!(status->flag & RX_FLAG_IV_STRIPPED))
++		if (!(status->flag & RX_FLAG_IV_STRIPPED) && rx->key)
+ 			goto update_iv;
+ 
+ 		return RX_CONTINUE;
+diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c
+index 6e03888..d4ad50e 100644
+--- a/net/sunrpc/svc.c
++++ b/net/sunrpc/svc.c
+@@ -167,6 +167,7 @@ svc_pool_map_alloc_arrays(struct svc_pool_map *m, unsigned int maxpools)
+ 
+ fail_free:
+ 	kfree(m->to_pool);
++	m->to_pool = NULL;
+ fail:
+ 	return -ENOMEM;
+ }
+@@ -287,7 +288,9 @@ svc_pool_map_put(void)
+ 	if (!--m->count) {
+ 		m->mode = SVC_POOL_DEFAULT;
+ 		kfree(m->to_pool);
++		m->to_pool = NULL;
+ 		kfree(m->pool_to);
++		m->pool_to = NULL;
+ 		m->npools = 0;
+ 	}
+ 
+@@ -527,17 +530,20 @@ svc_destroy(struct svc_serv *serv)
+ 		printk("svc_destroy: no threads for serv=%p!\n", serv);
+ 
+ 	del_timer_sync(&serv->sv_temptimer);
+-
+-	svc_close_all(&serv->sv_tempsocks);
++	/*
++	 * The set of xprts (contained in the sv_tempsocks and
++	 * sv_permsocks lists) is now constant, since it is modified
++	 * only by accepting new sockets (done by service threads in
++	 * svc_recv) or aging old ones (done by sv_temptimer), or
++	 * configuration changes (excluded by whatever locking the
++	 * caller is using--nfsd_mutex in the case of nfsd).  So it's
++	 * safe to traverse those lists and shut everything down:
++	 */
++	svc_close_all(serv);
+ 
+ 	if (serv->sv_shutdown)
+ 		serv->sv_shutdown(serv);
+ 
+-	svc_close_all(&serv->sv_permsocks);
+-
+-	BUG_ON(!list_empty(&serv->sv_permsocks));
+-	BUG_ON(!list_empty(&serv->sv_tempsocks));
+-
+ 	cache_clean_deferred(serv);
+ 
+ 	if (svc_serv_is_pooled(serv))
+diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c
+index 447cd0e..9ed2cd0 100644
+--- a/net/sunrpc/svc_xprt.c
++++ b/net/sunrpc/svc_xprt.c
+@@ -893,14 +893,7 @@ void svc_delete_xprt(struct svc_xprt *xprt)
+ 	spin_lock_bh(&serv->sv_lock);
+ 	if (!test_and_set_bit(XPT_DETACHED, &xprt->xpt_flags))
+ 		list_del_init(&xprt->xpt_list);
+-	/*
+-	 * The only time we're called while xpt_ready is still on a list
+-	 * is while the list itself is about to be destroyed (in
+-	 * svc_destroy).  BUT svc_xprt_enqueue could still be attempting
+-	 * to add new entries to the sp_sockets list, so we can't leave
+-	 * a freed xprt on it.
+-	 */
+-	list_del_init(&xprt->xpt_ready);
++	BUG_ON(!list_empty(&xprt->xpt_ready));
+ 	if (test_bit(XPT_TEMP, &xprt->xpt_flags))
+ 		serv->sv_tmpcnt--;
+ 	spin_unlock_bh(&serv->sv_lock);
+@@ -928,22 +921,48 @@ void svc_close_xprt(struct svc_xprt *xprt)
+ }
+ EXPORT_SYMBOL_GPL(svc_close_xprt);
+ 
+-void svc_close_all(struct list_head *xprt_list)
++static void svc_close_list(struct list_head *xprt_list)
++{
++	struct svc_xprt *xprt;
++
++	list_for_each_entry(xprt, xprt_list, xpt_list) {
++		set_bit(XPT_CLOSE, &xprt->xpt_flags);
++		set_bit(XPT_BUSY, &xprt->xpt_flags);
++	}
++}
++
++void svc_close_all(struct svc_serv *serv)
+ {
++	struct svc_pool *pool;
+ 	struct svc_xprt *xprt;
+ 	struct svc_xprt *tmp;
++	int i;
++
++	svc_close_list(&serv->sv_tempsocks);
++	svc_close_list(&serv->sv_permsocks);
+ 
++	for (i = 0; i < serv->sv_nrpools; i++) {
++		pool = &serv->sv_pools[i];
++
++		spin_lock_bh(&pool->sp_lock);
++		while (!list_empty(&pool->sp_sockets)) {
++			xprt = list_first_entry(&pool->sp_sockets, struct svc_xprt, xpt_ready);
++			list_del_init(&xprt->xpt_ready);
++		}
++		spin_unlock_bh(&pool->sp_lock);
++	}
+ 	/*
+-	 * The server is shutting down, and no more threads are running.
+-	 * svc_xprt_enqueue() might still be running, but at worst it
+-	 * will re-add the xprt to sp_sockets, which will soon get
+-	 * freed.  So we don't bother with any more locking, and don't
+-	 * leave the close to the (nonexistent) server threads:
++	 * At this point the sp_sockets lists will stay empty, since
++	 * svc_enqueue will not add new entries without taking the
++	 * sp_lock and checking XPT_BUSY.
+ 	 */
+-	list_for_each_entry_safe(xprt, tmp, xprt_list, xpt_list) {
+-		set_bit(XPT_CLOSE, &xprt->xpt_flags);
++	list_for_each_entry_safe(xprt, tmp, &serv->sv_tempsocks, xpt_list)
+ 		svc_delete_xprt(xprt);
+-	}
++	list_for_each_entry_safe(xprt, tmp, &serv->sv_permsocks, xpt_list)
++		svc_delete_xprt(xprt);
++
++	BUG_ON(!list_empty(&serv->sv_permsocks));
++	BUG_ON(!list_empty(&serv->sv_tempsocks));
+ }
+ 
+ /*
+diff --git a/net/sunrpc/xdr.c b/net/sunrpc/xdr.c
+index 277ebd4..593f4c6 100644
+--- a/net/sunrpc/xdr.c
++++ b/net/sunrpc/xdr.c
+@@ -296,7 +296,7 @@ _copy_to_pages(struct page **pages, size_t pgbase, const char *p, size_t len)
+  * Copies data into an arbitrary memory location from an array of pages
+  * The copy is assumed to be non-overlapping.
+  */
+-static void
++void
+ _copy_from_pages(char *p, struct page **pages, size_t pgbase, size_t len)
+ {
+ 	struct page **pgfrom;
+@@ -324,6 +324,7 @@ _copy_from_pages(char *p, struct page **pages, size_t pgbase, size_t len)
+ 
+ 	} while ((len -= copy) != 0);
+ }
++EXPORT_SYMBOL_GPL(_copy_from_pages);
+ 
+ /*
+  * xdr_shrink_bufhead
+diff --git a/scripts/kconfig/streamline_config.pl b/scripts/kconfig/streamline_config.pl
+index ec7afce..bccf07d 100644
+--- a/scripts/kconfig/streamline_config.pl
++++ b/scripts/kconfig/streamline_config.pl
+@@ -250,33 +250,61 @@ if ($kconfig) {
+     read_kconfig($kconfig);
+ }
+ 
++sub convert_vars {
++    my ($line, %vars) = @_;
++
++    my $process = "";
++
++    while ($line =~ s/^(.*?)(\$\((.*?)\))//) {
++	my $start = $1;
++	my $variable = $2;
++	my $var = $3;
++
++	if (defined($vars{$var})) {
++	    $process .= $start . $vars{$var};
++	} else {
++	    $process .= $start . $variable;
++	}
++    }
++
++    $process .= $line;
++
++    return $process;
++}
++
+ # Read all Makefiles to map the configs to the objects
+ foreach my $makefile (@makefiles) {
+ 
+-    my $cont = 0;
++    my $line = "";
++    my %make_vars;
+ 
+     open(MIN,$makefile) || die "Can't open $makefile";
+     while (<MIN>) {
++	# if this line ends with a backslash, continue
++	chomp;
++	if (/^(.*)\\$/) {
++	    $line .= $1;
++	    next;
++	}
++
++	$line .= $_;
++	$_ = $line;
++	$line = "";
++
+ 	my $objs;
+ 
+-	# is this a line after a line with a backslash?
+-	if ($cont && /(\S.*)$/) {
+-	    $objs = $1;
+-	}
+-	$cont = 0;
++	$_ = convert_vars($_, %make_vars);
+ 
+ 	# collect objects after obj-$(CONFIG_FOO_BAR)
+ 	if (/obj-\$\((CONFIG_[^\)]*)\)\s*[+:]?=\s*(.*)/) {
+ 	    $var = $1;
+ 	    $objs = $2;
++
++	# check if variables are set
++	} elsif (/^\s*(\S+)\s*[:]?=\s*(.*\S)/) {
++	    $make_vars{$1} = $2;
+ 	}
+ 	if (defined($objs)) {
+-	    # test if the line ends with a backslash
+-	    if ($objs =~ m,(.*)\\$,) {
+-		$objs = $1;
+-		$cont = 1;
+-	    }
+-
+ 	    foreach my $obj (split /\s+/,$objs) {
+ 		$obj =~ s/-/_/g;
+ 		if ($obj =~ /(.*)\.o$/) {
+diff --git a/scripts/recordmcount.h b/scripts/recordmcount.h
+index f40a6af6..54e35c1 100644
+--- a/scripts/recordmcount.h
++++ b/scripts/recordmcount.h
+@@ -462,7 +462,7 @@ __has_rel_mcount(Elf_Shdr const *const relhdr,  /* is SHT_REL or SHT_RELA */
+ 		succeed_file();
+ 	}
+ 	if (w(txthdr->sh_type) != SHT_PROGBITS ||
+-	    !(w(txthdr->sh_flags) & SHF_EXECINSTR))
++	    !(_w(txthdr->sh_flags) & SHF_EXECINSTR))
+ 		return NULL;
+ 	return txtname;
+ }
+diff --git a/security/integrity/ima/ima_api.c b/security/integrity/ima/ima_api.c
+index 0d50df0..88a2788 100644
+--- a/security/integrity/ima/ima_api.c
++++ b/security/integrity/ima/ima_api.c
+@@ -178,8 +178,8 @@ void ima_store_measurement(struct integrity_iint_cache *iint,
+ 	strncpy(entry->template.file_name, filename, IMA_EVENT_NAME_LEN_MAX);
+ 
+ 	result = ima_store_template(entry, violation, inode);
+-	if (!result)
++	if (!result || result == -EEXIST)
+ 		iint->flags |= IMA_MEASURED;
+-	else
++	if (result < 0)
+ 		kfree(entry);
+ }
+diff --git a/security/integrity/ima/ima_queue.c b/security/integrity/ima/ima_queue.c
+index 8e28f04..55a6271 100644
+--- a/security/integrity/ima/ima_queue.c
++++ b/security/integrity/ima/ima_queue.c
+@@ -23,6 +23,8 @@
+ #include <linux/slab.h>
+ #include "ima.h"
+ 
++#define AUDIT_CAUSE_LEN_MAX 32
++
+ LIST_HEAD(ima_measurements);	/* list of all measurements */
+ 
+ /* key: inode (before secure-hashing a file) */
+@@ -94,7 +96,8 @@ static int ima_pcr_extend(const u8 *hash)
+ 
+ 	result = tpm_pcr_extend(TPM_ANY_NUM, CONFIG_IMA_MEASURE_PCR_IDX, hash);
+ 	if (result != 0)
+-		pr_err("IMA: Error Communicating to TPM chip\n");
++		pr_err("IMA: Error Communicating to TPM chip, result: %d\n",
++		       result);
+ 	return result;
+ }
+ 
+@@ -106,14 +109,16 @@ int ima_add_template_entry(struct ima_template_entry *entry, int violation,
+ {
+ 	u8 digest[IMA_DIGEST_SIZE];
+ 	const char *audit_cause = "hash_added";
++	char tpm_audit_cause[AUDIT_CAUSE_LEN_MAX];
+ 	int audit_info = 1;
+-	int result = 0;
++	int result = 0, tpmresult = 0;
+ 
+ 	mutex_lock(&ima_extend_list_mutex);
+ 	if (!violation) {
+ 		memcpy(digest, entry->digest, sizeof digest);
+ 		if (ima_lookup_digest_entry(digest)) {
+ 			audit_cause = "hash_exists";
++			result = -EEXIST;
+ 			goto out;
+ 		}
+ 	}
+@@ -128,9 +133,11 @@ int ima_add_template_entry(struct ima_template_entry *entry, int violation,
+ 	if (violation)		/* invalidate pcr */
+ 		memset(digest, 0xff, sizeof digest);
+ 
+-	result = ima_pcr_extend(digest);
+-	if (result != 0) {
+-		audit_cause = "TPM error";
++	tpmresult = ima_pcr_extend(digest);
++	if (tpmresult != 0) {
++		snprintf(tpm_audit_cause, AUDIT_CAUSE_LEN_MAX, "TPM_error(%d)",
++			 tpmresult);
++		audit_cause = tpm_audit_cause;
+ 		audit_info = 0;
+ 	}
+ out:
+diff --git a/security/tomoyo/util.c b/security/tomoyo/util.c
+index 4a9b4b2..867558c 100644
+--- a/security/tomoyo/util.c
++++ b/security/tomoyo/util.c
+@@ -492,13 +492,13 @@ static bool tomoyo_correct_word2(const char *string, size_t len)
+ 				if (d < '0' || d > '7' || e < '0' || e > '7')
+ 					break;
+ 				c = tomoyo_make_byte(c, d, e);
+-				if (tomoyo_invalid(c))
+-					continue; /* pattern is not \000 */
++				if (c <= ' ' || c >= 127)
++					continue;
+ 			}
+ 			goto out;
+ 		} else if (in_repetition && c == '/') {
+ 			goto out;
+-		} else if (tomoyo_invalid(c)) {
++		} else if (c <= ' ' || c >= 127) {
+ 			goto out;
+ 		}
+ 	}
+diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
+index c2f79e6..5b2b75b 100644
+--- a/sound/pci/hda/hda_intel.c
++++ b/sound/pci/hda/hda_intel.c
+@@ -2509,6 +2509,7 @@ static struct snd_pci_quirk position_fix_list[] __devinitdata = {
+ 	SND_PCI_QUIRK(0x1043, 0x81e7, "ASUS M2V", POS_FIX_LPIB),
+ 	SND_PCI_QUIRK(0x1043, 0x83ce, "ASUS 1101HA", POS_FIX_LPIB),
+ 	SND_PCI_QUIRK(0x104d, 0x9069, "Sony VPCS11V9E", POS_FIX_LPIB),
++	SND_PCI_QUIRK(0x10de, 0xcb89, "Macbook Pro 7,1", POS_FIX_LPIB),
+ 	SND_PCI_QUIRK(0x1297, 0x3166, "Shuttle", POS_FIX_LPIB),
+ 	SND_PCI_QUIRK(0x1458, 0xa022, "ga-ma770-ud3", POS_FIX_LPIB),
+ 	SND_PCI_QUIRK(0x1462, 0x1002, "MSI Wind U115", POS_FIX_LPIB),
+diff --git a/sound/pci/hda/hda_local.h b/sound/pci/hda/hda_local.h
+index 618ddad..368f0c5 100644
+--- a/sound/pci/hda/hda_local.h
++++ b/sound/pci/hda/hda_local.h
+@@ -487,7 +487,12 @@ static inline u32 get_wcaps(struct hda_codec *codec, hda_nid_t nid)
+ }
+ 
+ /* get the widget type from widget capability bits */
+-#define get_wcaps_type(wcaps) (((wcaps) & AC_WCAP_TYPE) >> AC_WCAP_TYPE_SHIFT)
++static inline int get_wcaps_type(unsigned int wcaps)
++{
++	if (!wcaps)
++		return -1; /* invalid type */
++	return (wcaps & AC_WCAP_TYPE) >> AC_WCAP_TYPE_SHIFT;
++}
+ 
+ static inline unsigned int get_wcaps_channels(u32 wcaps)
+ {
+diff --git a/sound/pci/hda/hda_proc.c b/sound/pci/hda/hda_proc.c
+index 2c981b5..254ab52 100644
+--- a/sound/pci/hda/hda_proc.c
++++ b/sound/pci/hda/hda_proc.c
+@@ -54,6 +54,8 @@ static const char *get_wid_type_name(unsigned int wid_value)
+ 		[AC_WID_BEEP] = "Beep Generator Widget",
+ 		[AC_WID_VENDOR] = "Vendor Defined Widget",
+ 	};
++	if (wid_value == -1)
++		return "UNKNOWN Widget";
+ 	wid_value &= 0xf;
+ 	if (names[wid_value])
+ 		return names[wid_value];
+diff --git a/sound/pci/hda/patch_cirrus.c b/sound/pci/hda/patch_cirrus.c
+index 70a7abd..5b0a9bb 100644
+--- a/sound/pci/hda/patch_cirrus.c
++++ b/sound/pci/hda/patch_cirrus.c
+@@ -920,16 +920,14 @@ static void cs_automute(struct hda_codec *codec)
+ 
+ 	/* mute speakers if spdif or hp jack is plugged in */
+ 	for (i = 0; i < cfg->speaker_outs; i++) {
++		int pin_ctl = hp_present ? 0 : PIN_OUT;
++		/* detect on spdif is specific to CS421x */
++		if (spdif_present && (spec->vendor_nid == CS421X_VENDOR_NID))
++			pin_ctl = 0;
++
+ 		nid = cfg->speaker_pins[i];
+ 		snd_hda_codec_write(codec, nid, 0,
+-				    AC_VERB_SET_PIN_WIDGET_CONTROL,
+-				    hp_present ? 0 : PIN_OUT);
+-		/* detect on spdif is specific to CS421x */
+-		if (spec->vendor_nid == CS421X_VENDOR_NID) {
+-			snd_hda_codec_write(codec, nid, 0,
+-					AC_VERB_SET_PIN_WIDGET_CONTROL,
+-					spdif_present ? 0 : PIN_OUT);
+-		}
++				    AC_VERB_SET_PIN_WIDGET_CONTROL, pin_ctl);
+ 	}
+ 	if (spec->gpio_eapd_hp) {
+ 		unsigned int gpio = hp_present ?
+@@ -1771,30 +1769,19 @@ static int build_cs421x_output(struct hda_codec *codec)
+ 	struct auto_pin_cfg *cfg = &spec->autocfg;
+ 	struct snd_kcontrol *kctl;
+ 	int err;
+-	char *name = "HP/Speakers";
++	char *name = "Master";
+ 
+ 	fix_volume_caps(codec, dac);
+-	if (!spec->vmaster_sw) {
+-		err = add_vmaster(codec, dac);
+-		if (err < 0)
+-			return err;
+-	}
+ 
+ 	err = add_mute(codec, name, 0,
+ 			HDA_COMPOSE_AMP_VAL(dac, 3, 0, HDA_OUTPUT), 0, &kctl);
+ 	if (err < 0)
+ 		return err;
+-	err = snd_ctl_add_slave(spec->vmaster_sw, kctl);
+-	if (err < 0)
+-		return err;
+ 
+ 	err = add_volume(codec, name, 0,
+ 			HDA_COMPOSE_AMP_VAL(dac, 3, 0, HDA_OUTPUT), 0, &kctl);
+ 	if (err < 0)
+ 		return err;
+-	err = snd_ctl_add_slave(spec->vmaster_vol, kctl);
+-	if (err < 0)
+-		return err;
+ 
+ 	if (cfg->speaker_outs) {
+ 		err = snd_hda_ctl_add(codec, 0,
+diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
+index 0de2119..7072251 100644
+--- a/sound/pci/hda/patch_conexant.c
++++ b/sound/pci/hda/patch_conexant.c
+@@ -1120,8 +1120,6 @@ static const char * const cxt5045_models[CXT5045_MODELS] = {
+ 
+ static const struct snd_pci_quirk cxt5045_cfg_tbl[] = {
+ 	SND_PCI_QUIRK(0x103c, 0x30d5, "HP 530", CXT5045_LAPTOP_HP530),
+-	SND_PCI_QUIRK_MASK(0x103c, 0xff00, 0x3000, "HP DV Series",
+-			   CXT5045_LAPTOP_HPSENSE),
+ 	SND_PCI_QUIRK(0x1179, 0xff31, "Toshiba P105", CXT5045_LAPTOP_MICSENSE),
+ 	SND_PCI_QUIRK(0x152d, 0x0753, "Benq R55E", CXT5045_BENQ),
+ 	SND_PCI_QUIRK(0x1734, 0x10ad, "Fujitsu Si1520", CXT5045_LAPTOP_MICSENSE),
+diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c
+index 616678f..f3c73a9 100644
+--- a/sound/pci/hda/patch_sigmatel.c
++++ b/sound/pci/hda/patch_sigmatel.c
+@@ -1631,7 +1631,7 @@ static const struct snd_pci_quirk stac92hd73xx_cfg_tbl[] = {
+ 	SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x02bd,
+ 				"Dell Studio 1557", STAC_DELL_M6_DMIC),
+ 	SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x02fe,
+-				"Dell Studio XPS 1645", STAC_DELL_M6_BOTH),
++				"Dell Studio XPS 1645", STAC_DELL_M6_DMIC),
+ 	SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x0413,
+ 				"Dell Studio 1558", STAC_DELL_M6_DMIC),
+ 	{} /* terminator */
+@@ -4326,6 +4326,27 @@ static void stac_store_hints(struct hda_codec *codec)
+ 	}
+ }
+ 
++static void stac_issue_unsol_events(struct hda_codec *codec, int num_pins,
++				    const hda_nid_t *pins)
++{
++	while (num_pins--)
++		stac_issue_unsol_event(codec, *pins++);
++}
++
++/* fake event to set up pins */
++static void stac_fake_hp_events(struct hda_codec *codec)
++{
++	struct sigmatel_spec *spec = codec->spec;
++
++	if (spec->autocfg.hp_outs)
++		stac_issue_unsol_events(codec, spec->autocfg.hp_outs,
++					spec->autocfg.hp_pins);
++	if (spec->autocfg.line_outs &&
++	    spec->autocfg.line_out_pins[0] != spec->autocfg.hp_pins[0])
++		stac_issue_unsol_events(codec, spec->autocfg.line_outs,
++					spec->autocfg.line_out_pins);
++}
++
+ static int stac92xx_init(struct hda_codec *codec)
+ {
+ 	struct sigmatel_spec *spec = codec->spec;
+@@ -4376,10 +4397,7 @@ static int stac92xx_init(struct hda_codec *codec)
+ 		stac92xx_auto_set_pinctl(codec, spec->autocfg.line_out_pins[0],
+ 				AC_PINCTL_OUT_EN);
+ 		/* fake event to set up pins */
+-		if (cfg->hp_pins[0])
+-			stac_issue_unsol_event(codec, cfg->hp_pins[0]);
+-		else if (cfg->line_out_pins[0])
+-			stac_issue_unsol_event(codec, cfg->line_out_pins[0]);
++		stac_fake_hp_events(codec);
+ 	} else {
+ 		stac92xx_auto_init_multi_out(codec);
+ 		stac92xx_auto_init_hp_out(codec);
+@@ -5028,19 +5046,11 @@ static void stac927x_proc_hook(struct snd_info_buffer *buffer,
+ #ifdef CONFIG_PM
+ static int stac92xx_resume(struct hda_codec *codec)
+ {
+-	struct sigmatel_spec *spec = codec->spec;
+-
+ 	stac92xx_init(codec);
+ 	snd_hda_codec_resume_amp(codec);
+ 	snd_hda_codec_resume_cache(codec);
+ 	/* fake event to set up pins again to override cached values */
+-	if (spec->hp_detect) {
+-		if (spec->autocfg.hp_pins[0])
+-			stac_issue_unsol_event(codec, spec->autocfg.hp_pins[0]);
+-		else if (spec->autocfg.line_out_pins[0])
+-			stac_issue_unsol_event(codec,
+-					       spec->autocfg.line_out_pins[0]);
+-	}
++	stac_fake_hp_events(codec);
+ 	return 0;
+ }
+ 
+diff --git a/sound/pci/hda/patch_via.c b/sound/pci/hda/patch_via.c
+index b513762..8d69e59 100644
+--- a/sound/pci/hda/patch_via.c
++++ b/sound/pci/hda/patch_via.c
+@@ -2200,7 +2200,10 @@ static int via_auto_create_loopback_switch(struct hda_codec *codec)
+ {
+ 	struct via_spec *spec = codec->spec;
+ 
+-	if (!spec->aa_mix_nid || !spec->out_mix_path.depth)
++	if (!spec->aa_mix_nid)
++		return 0; /* no loopback switching available */
++	if (!(spec->out_mix_path.depth || spec->hp_mix_path.depth ||
++	      spec->speaker_path.depth))
+ 		return 0; /* no loopback switching available */
+ 	if (!via_clone_control(spec, &via_aamix_ctl_enum))
+ 		return -ENOMEM;
+diff --git a/sound/pci/ice1712/amp.c b/sound/pci/ice1712/amp.c
+index e328cfb..e525da2 100644
+--- a/sound/pci/ice1712/amp.c
++++ b/sound/pci/ice1712/amp.c
+@@ -68,8 +68,11 @@ static int __devinit snd_vt1724_amp_init(struct snd_ice1712 *ice)
+ 
+ static int __devinit snd_vt1724_amp_add_controls(struct snd_ice1712 *ice)
+ {
+-	/* we use pins 39 and 41 of the VT1616 for left and right read outputs */
+-	snd_ac97_write_cache(ice->ac97, 0x5a, snd_ac97_read(ice->ac97, 0x5a) & ~0x8000);
++	if (ice->ac97)
++		/* we use pins 39 and 41 of the VT1616 for left and right
++		read outputs */
++		snd_ac97_write_cache(ice->ac97, 0x5a,
++			snd_ac97_read(ice->ac97, 0x5a) & ~0x8000);
+ 	return 0;
+ }
+ 
+diff --git a/sound/pci/oxygen/xonar_wm87x6.c b/sound/pci/oxygen/xonar_wm87x6.c
+index 42d1ab1..915546a 100644
+--- a/sound/pci/oxygen/xonar_wm87x6.c
++++ b/sound/pci/oxygen/xonar_wm87x6.c
+@@ -177,6 +177,7 @@ static void wm8776_registers_init(struct oxygen *chip)
+ 	struct xonar_wm87x6 *data = chip->model_data;
+ 
+ 	wm8776_write(chip, WM8776_RESET, 0);
++	wm8776_write(chip, WM8776_PHASESWAP, WM8776_PH_MASK);
+ 	wm8776_write(chip, WM8776_DACCTRL1, WM8776_DZCEN |
+ 		     WM8776_PL_LEFT_LEFT | WM8776_PL_RIGHT_RIGHT);
+ 	wm8776_write(chip, WM8776_DACMUTE, chip->dac_mute ? WM8776_DMUTE : 0);
+diff --git a/sound/usb/endpoint.c b/sound/usb/endpoint.c
+index 81c6ede..08dcce5 100644
+--- a/sound/usb/endpoint.c
++++ b/sound/usb/endpoint.c
+@@ -17,6 +17,7 @@
+ 
+ #include <linux/gfp.h>
+ #include <linux/init.h>
++#include <linux/ratelimit.h>
+ #include <linux/usb.h>
+ #include <linux/usb/audio.h>
+ 
+@@ -458,8 +459,8 @@ static int retire_capture_urb(struct snd_usb_substream *subs,
+ 
+ 	for (i = 0; i < urb->number_of_packets; i++) {
+ 		cp = (unsigned char *)urb->transfer_buffer + urb->iso_frame_desc[i].offset;
+-		if (urb->iso_frame_desc[i].status) {
+-			snd_printd(KERN_ERR "frame %d active: %d\n", i, urb->iso_frame_desc[i].status);
++		if (urb->iso_frame_desc[i].status && printk_ratelimit()) {
++			snd_printdd("frame %d active: %d\n", i, urb->iso_frame_desc[i].status);
+ 			// continue;
+ 		}
+ 		bytes = urb->iso_frame_desc[i].actual_length;
+diff --git a/sound/usb/usx2y/usb_stream.c b/sound/usb/usx2y/usb_stream.c
+index c400ade..1e7a47a 100644
+--- a/sound/usb/usx2y/usb_stream.c
++++ b/sound/usb/usx2y/usb_stream.c
+@@ -674,7 +674,7 @@ dotry:
+ 		inurb->transfer_buffer_length =
+ 			inurb->number_of_packets *
+ 			inurb->iso_frame_desc[0].length;
+-		preempt_disable();
++
+ 		if (u == 0) {
+ 			int now;
+ 			struct usb_device *dev = inurb->dev;
+@@ -686,19 +686,17 @@ dotry:
+ 		}
+ 		err = usb_submit_urb(inurb, GFP_ATOMIC);
+ 		if (err < 0) {
+-			preempt_enable();
+ 			snd_printk(KERN_ERR"usb_submit_urb(sk->inurb[%i])"
+ 				   " returned %i\n", u, err);
+ 			return err;
+ 		}
+ 		err = usb_submit_urb(outurb, GFP_ATOMIC);
+ 		if (err < 0) {
+-			preempt_enable();
+ 			snd_printk(KERN_ERR"usb_submit_urb(sk->outurb[%i])"
+ 				   " returned %i\n", u, err);
+ 			return err;
+ 		}
+-		preempt_enable();
++
+ 		if (inurb->start_frame != outurb->start_frame) {
+ 			snd_printd(KERN_DEBUG
+ 				   "u[%i] start_frames differ in:%u out:%u\n",



^ permalink raw reply related	[flat|nested] only message in thread

only message in thread, other threads:[~2012-01-27 11:52 UTC | newest]

Thread overview: (only message) (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2012-01-27 11:52 [gentoo-commits] proj/hardened-patchset:master commit in: 3.2.2/ Anthony G. Basile

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox