public inbox for gentoo-commits@lists.gentoo.org
 help / color / mirror / Atom feed
* [gentoo-commits] linux-patches r2686 - genpatches-2.6/trunk/3.12
@ 2014-02-21 14:49 Mike Pagano (mpagano)
  0 siblings, 0 replies; only message in thread
From: Mike Pagano (mpagano) @ 2014-02-21 14:49 UTC (permalink / raw
  To: gentoo-commits

Author: mpagano
Date: 2014-02-21 14:49:57 +0000 (Fri, 21 Feb 2014)
New Revision: 2686

Added:
   genpatches-2.6/trunk/3.12/1011_linux-3.12.12.patch
Modified:
   genpatches-2.6/trunk/3.12/0000_README
Log:
Linux patch 3.12.12

Modified: genpatches-2.6/trunk/3.12/0000_README
===================================================================
--- genpatches-2.6/trunk/3.12/0000_README	2014-02-20 22:18:34 UTC (rev 2685)
+++ genpatches-2.6/trunk/3.12/0000_README	2014-02-21 14:49:57 UTC (rev 2686)
@@ -86,6 +86,10 @@
 From:   http://www.kernel.org
 Desc:   Linux 3.12.11
 
+Patch:  1010_linux-3.12.12.patch
+From:   http://www.kernel.org
+Desc:   Linux 3.12.12
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

Added: genpatches-2.6/trunk/3.12/1011_linux-3.12.12.patch
===================================================================
--- genpatches-2.6/trunk/3.12/1011_linux-3.12.12.patch	                        (rev 0)
+++ genpatches-2.6/trunk/3.12/1011_linux-3.12.12.patch	2014-02-21 14:49:57 UTC (rev 2686)
@@ -0,0 +1,1111 @@
+diff --git a/Makefile b/Makefile
+index b9e092666bf9..563297e159b7 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 3
+ PATCHLEVEL = 12
+-SUBLEVEL = 11
++SUBLEVEL = 12
+ EXTRAVERSION =
+ NAME = One Giant Leap for Frogkind
+ 
+diff --git a/arch/arm64/include/asm/cacheflush.h b/arch/arm64/include/asm/cacheflush.h
+index fea9ee327206..889324981aa4 100644
+--- a/arch/arm64/include/asm/cacheflush.h
++++ b/arch/arm64/include/asm/cacheflush.h
+@@ -116,6 +116,7 @@ extern void flush_dcache_page(struct page *);
+ static inline void __flush_icache_all(void)
+ {
+ 	asm("ic	ialluis");
++	dsb();
+ }
+ 
+ #define flush_dcache_mmap_lock(mapping) \
+diff --git a/arch/arm64/kernel/vdso.c b/arch/arm64/kernel/vdso.c
+index 6a389dc1bd49..0ea7a22bcdf2 100644
+--- a/arch/arm64/kernel/vdso.c
++++ b/arch/arm64/kernel/vdso.c
+@@ -235,6 +235,8 @@ void update_vsyscall(struct timekeeper *tk)
+ 	vdso_data->use_syscall			= use_syscall;
+ 	vdso_data->xtime_coarse_sec		= xtime_coarse.tv_sec;
+ 	vdso_data->xtime_coarse_nsec		= xtime_coarse.tv_nsec;
++	vdso_data->wtm_clock_sec		= tk->wall_to_monotonic.tv_sec;
++	vdso_data->wtm_clock_nsec		= tk->wall_to_monotonic.tv_nsec;
+ 
+ 	if (!use_syscall) {
+ 		vdso_data->cs_cycle_last	= tk->clock->cycle_last;
+@@ -242,8 +244,6 @@ void update_vsyscall(struct timekeeper *tk)
+ 		vdso_data->xtime_clock_nsec	= tk->xtime_nsec;
+ 		vdso_data->cs_mult		= tk->mult;
+ 		vdso_data->cs_shift		= tk->shift;
+-		vdso_data->wtm_clock_sec	= tk->wall_to_monotonic.tv_sec;
+-		vdso_data->wtm_clock_nsec	= tk->wall_to_monotonic.tv_nsec;
+ 	}
+ 
+ 	smp_wmb();
+diff --git a/arch/arm64/kernel/vdso/Makefile b/arch/arm64/kernel/vdso/Makefile
+index d8064af42e62..6d20b7d162d8 100644
+--- a/arch/arm64/kernel/vdso/Makefile
++++ b/arch/arm64/kernel/vdso/Makefile
+@@ -48,7 +48,7 @@ $(obj-vdso): %.o: %.S
+ 
+ # Actual build commands
+ quiet_cmd_vdsold = VDSOL $@
+-      cmd_vdsold = $(CC) $(c_flags) -Wl,-T $^ -o $@
++      cmd_vdsold = $(CC) $(c_flags) -Wl,-n -Wl,-T $^ -o $@
+ quiet_cmd_vdsoas = VDSOA $@
+       cmd_vdsoas = $(CC) $(a_flags) -c -o $@ $<
+ 
+diff --git a/arch/arm64/kernel/vdso/gettimeofday.S b/arch/arm64/kernel/vdso/gettimeofday.S
+index f0a6d10b5211..fe652ffd34c2 100644
+--- a/arch/arm64/kernel/vdso/gettimeofday.S
++++ b/arch/arm64/kernel/vdso/gettimeofday.S
+@@ -103,6 +103,8 @@ ENTRY(__kernel_clock_gettime)
+ 	bl	__do_get_tspec
+ 	seqcnt_check w9, 1b
+ 
++	mov	x30, x2
++
+ 	cmp	w0, #CLOCK_MONOTONIC
+ 	b.ne	6f
+ 
+@@ -118,6 +120,9 @@ ENTRY(__kernel_clock_gettime)
+ 	ccmp	w0, #CLOCK_MONOTONIC_COARSE, #0x4, ne
+ 	b.ne	8f
+ 
++	/* xtime_coarse_nsec is already right-shifted */
++	mov	x12, #0
++
+ 	/* Get coarse timespec. */
+ 	adr	vdso_data, _vdso_data
+ 3:	seqcnt_acquire
+@@ -156,7 +161,7 @@ ENTRY(__kernel_clock_gettime)
+ 	lsr	x11, x11, x12
+ 	stp	x10, x11, [x1, #TSPEC_TV_SEC]
+ 	mov	x0, xzr
+-	ret	x2
++	ret
+ 7:
+ 	mov	x30, x2
+ 8:	/* Syscall fallback. */
+diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
+index f557ebbe7013..f8dc7e8fce6f 100644
+--- a/arch/arm64/mm/mmu.c
++++ b/arch/arm64/mm/mmu.c
+@@ -203,10 +203,18 @@ static void __init alloc_init_pmd(pud_t *pud, unsigned long addr,
+ 	do {
+ 		next = pmd_addr_end(addr, end);
+ 		/* try section mapping first */
+-		if (((addr | next | phys) & ~SECTION_MASK) == 0)
++		if (((addr | next | phys) & ~SECTION_MASK) == 0) {
++			pmd_t old_pmd =*pmd;
+ 			set_pmd(pmd, __pmd(phys | prot_sect_kernel));
+-		else
++			/*
++			 * Check for previous table entries created during
++			 * boot (__create_page_tables) and flush them.
++			 */
++			if (!pmd_none(old_pmd))
++				flush_tlb_all();
++		} else {
+ 			alloc_init_pte(pmd, addr, next, __phys_to_pfn(phys));
++		}
+ 		phys += next - addr;
+ 	} while (pmd++, addr = next, addr != end);
+ }
+diff --git a/arch/s390/crypto/aes_s390.c b/arch/s390/crypto/aes_s390.c
+index 94e20dd2729f..2a245b55bb71 100644
+--- a/arch/s390/crypto/aes_s390.c
++++ b/arch/s390/crypto/aes_s390.c
+@@ -25,6 +25,7 @@
+ #include <linux/err.h>
+ #include <linux/module.h>
+ #include <linux/init.h>
++#include <linux/spinlock.h>
+ #include "crypt_s390.h"
+ 
+ #define AES_KEYLEN_128		1
+@@ -32,6 +33,7 @@
+ #define AES_KEYLEN_256		4
+ 
+ static u8 *ctrblk;
++static DEFINE_SPINLOCK(ctrblk_lock);
+ static char keylen_flag;
+ 
+ struct s390_aes_ctx {
+@@ -756,43 +758,67 @@ static int ctr_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
+ 	return aes_set_key(tfm, in_key, key_len);
+ }
+ 
++static unsigned int __ctrblk_init(u8 *ctrptr, unsigned int nbytes)
++{
++	unsigned int i, n;
++
++	/* only use complete blocks, max. PAGE_SIZE */
++	n = (nbytes > PAGE_SIZE) ? PAGE_SIZE : nbytes & ~(AES_BLOCK_SIZE - 1);
++	for (i = AES_BLOCK_SIZE; i < n; i += AES_BLOCK_SIZE) {
++		memcpy(ctrptr + i, ctrptr + i - AES_BLOCK_SIZE,
++		       AES_BLOCK_SIZE);
++		crypto_inc(ctrptr + i, AES_BLOCK_SIZE);
++	}
++	return n;
++}
++
+ static int ctr_aes_crypt(struct blkcipher_desc *desc, long func,
+ 			 struct s390_aes_ctx *sctx, struct blkcipher_walk *walk)
+ {
+ 	int ret = blkcipher_walk_virt_block(desc, walk, AES_BLOCK_SIZE);
+-	unsigned int i, n, nbytes;
+-	u8 buf[AES_BLOCK_SIZE];
+-	u8 *out, *in;
++	unsigned int n, nbytes;
++	u8 buf[AES_BLOCK_SIZE], ctrbuf[AES_BLOCK_SIZE];
++	u8 *out, *in, *ctrptr = ctrbuf;
+ 
+ 	if (!walk->nbytes)
+ 		return ret;
+ 
+-	memcpy(ctrblk, walk->iv, AES_BLOCK_SIZE);
++	if (spin_trylock(&ctrblk_lock))
++		ctrptr = ctrblk;
++
++	memcpy(ctrptr, walk->iv, AES_BLOCK_SIZE);
+ 	while ((nbytes = walk->nbytes) >= AES_BLOCK_SIZE) {
+ 		out = walk->dst.virt.addr;
+ 		in = walk->src.virt.addr;
+ 		while (nbytes >= AES_BLOCK_SIZE) {
+-			/* only use complete blocks, max. PAGE_SIZE */
+-			n = (nbytes > PAGE_SIZE) ? PAGE_SIZE :
+-						 nbytes & ~(AES_BLOCK_SIZE - 1);
+-			for (i = AES_BLOCK_SIZE; i < n; i += AES_BLOCK_SIZE) {
+-				memcpy(ctrblk + i, ctrblk + i - AES_BLOCK_SIZE,
+-				       AES_BLOCK_SIZE);
+-				crypto_inc(ctrblk + i, AES_BLOCK_SIZE);
+-			}
+-			ret = crypt_s390_kmctr(func, sctx->key, out, in, n, ctrblk);
+-			if (ret < 0 || ret != n)
++			if (ctrptr == ctrblk)
++				n = __ctrblk_init(ctrptr, nbytes);
++			else
++				n = AES_BLOCK_SIZE;
++			ret = crypt_s390_kmctr(func, sctx->key, out, in,
++					       n, ctrptr);
++			if (ret < 0 || ret != n) {
++				if (ctrptr == ctrblk)
++					spin_unlock(&ctrblk_lock);
+ 				return -EIO;
++			}
+ 			if (n > AES_BLOCK_SIZE)
+-				memcpy(ctrblk, ctrblk + n - AES_BLOCK_SIZE,
++				memcpy(ctrptr, ctrptr + n - AES_BLOCK_SIZE,
+ 				       AES_BLOCK_SIZE);
+-			crypto_inc(ctrblk, AES_BLOCK_SIZE);
++			crypto_inc(ctrptr, AES_BLOCK_SIZE);
+ 			out += n;
+ 			in += n;
+ 			nbytes -= n;
+ 		}
+ 		ret = blkcipher_walk_done(desc, walk, nbytes);
+ 	}
++	if (ctrptr == ctrblk) {
++		if (nbytes)
++			memcpy(ctrbuf, ctrptr, AES_BLOCK_SIZE);
++		else
++			memcpy(walk->iv, ctrptr, AES_BLOCK_SIZE);
++		spin_unlock(&ctrblk_lock);
++	}
+ 	/*
+ 	 * final block may be < AES_BLOCK_SIZE, copy only nbytes
+ 	 */
+@@ -800,14 +826,15 @@ static int ctr_aes_crypt(struct blkcipher_desc *desc, long func,
+ 		out = walk->dst.virt.addr;
+ 		in = walk->src.virt.addr;
+ 		ret = crypt_s390_kmctr(func, sctx->key, buf, in,
+-				       AES_BLOCK_SIZE, ctrblk);
++				       AES_BLOCK_SIZE, ctrbuf);
+ 		if (ret < 0 || ret != AES_BLOCK_SIZE)
+ 			return -EIO;
+ 		memcpy(out, buf, nbytes);
+-		crypto_inc(ctrblk, AES_BLOCK_SIZE);
++		crypto_inc(ctrbuf, AES_BLOCK_SIZE);
+ 		ret = blkcipher_walk_done(desc, walk, 0);
++		memcpy(walk->iv, ctrbuf, AES_BLOCK_SIZE);
+ 	}
+-	memcpy(walk->iv, ctrblk, AES_BLOCK_SIZE);
++
+ 	return ret;
+ }
+ 
+diff --git a/arch/s390/crypto/des_s390.c b/arch/s390/crypto/des_s390.c
+index bcca01c9989d..2d96e68febb2 100644
+--- a/arch/s390/crypto/des_s390.c
++++ b/arch/s390/crypto/des_s390.c
+@@ -25,6 +25,7 @@
+ #define DES3_KEY_SIZE	(3 * DES_KEY_SIZE)
+ 
+ static u8 *ctrblk;
++static DEFINE_SPINLOCK(ctrblk_lock);
+ 
+ struct s390_des_ctx {
+ 	u8 iv[DES_BLOCK_SIZE];
+@@ -105,29 +106,35 @@ static int ecb_desall_crypt(struct blkcipher_desc *desc, long func,
+ }
+ 
+ static int cbc_desall_crypt(struct blkcipher_desc *desc, long func,
+-			    u8 *iv, struct blkcipher_walk *walk)
++			    struct blkcipher_walk *walk)
+ {
++	struct s390_des_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
+ 	int ret = blkcipher_walk_virt(desc, walk);
+ 	unsigned int nbytes = walk->nbytes;
++	struct {
++		u8 iv[DES_BLOCK_SIZE];
++		u8 key[DES3_KEY_SIZE];
++	} param;
+ 
+ 	if (!nbytes)
+ 		goto out;
+ 
+-	memcpy(iv, walk->iv, DES_BLOCK_SIZE);
++	memcpy(param.iv, walk->iv, DES_BLOCK_SIZE);
++	memcpy(param.key, ctx->key, DES3_KEY_SIZE);
+ 	do {
+ 		/* only use complete blocks */
+ 		unsigned int n = nbytes & ~(DES_BLOCK_SIZE - 1);
+ 		u8 *out = walk->dst.virt.addr;
+ 		u8 *in = walk->src.virt.addr;
+ 
+-		ret = crypt_s390_kmc(func, iv, out, in, n);
++		ret = crypt_s390_kmc(func, &param, out, in, n);
+ 		if (ret < 0 || ret != n)
+ 			return -EIO;
+ 
+ 		nbytes &= DES_BLOCK_SIZE - 1;
+ 		ret = blkcipher_walk_done(desc, walk, nbytes);
+ 	} while ((nbytes = walk->nbytes));
+-	memcpy(walk->iv, iv, DES_BLOCK_SIZE);
++	memcpy(walk->iv, param.iv, DES_BLOCK_SIZE);
+ 
+ out:
+ 	return ret;
+@@ -179,22 +186,20 @@ static int cbc_des_encrypt(struct blkcipher_desc *desc,
+ 			   struct scatterlist *dst, struct scatterlist *src,
+ 			   unsigned int nbytes)
+ {
+-	struct s390_des_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
+ 	struct blkcipher_walk walk;
+ 
+ 	blkcipher_walk_init(&walk, dst, src, nbytes);
+-	return cbc_desall_crypt(desc, KMC_DEA_ENCRYPT, ctx->iv, &walk);
++	return cbc_desall_crypt(desc, KMC_DEA_ENCRYPT, &walk);
+ }
+ 
+ static int cbc_des_decrypt(struct blkcipher_desc *desc,
+ 			   struct scatterlist *dst, struct scatterlist *src,
+ 			   unsigned int nbytes)
+ {
+-	struct s390_des_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
+ 	struct blkcipher_walk walk;
+ 
+ 	blkcipher_walk_init(&walk, dst, src, nbytes);
+-	return cbc_desall_crypt(desc, KMC_DEA_DECRYPT, ctx->iv, &walk);
++	return cbc_desall_crypt(desc, KMC_DEA_DECRYPT, &walk);
+ }
+ 
+ static struct crypto_alg cbc_des_alg = {
+@@ -327,22 +332,20 @@ static int cbc_des3_encrypt(struct blkcipher_desc *desc,
+ 			    struct scatterlist *dst, struct scatterlist *src,
+ 			    unsigned int nbytes)
+ {
+-	struct s390_des_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
+ 	struct blkcipher_walk walk;
+ 
+ 	blkcipher_walk_init(&walk, dst, src, nbytes);
+-	return cbc_desall_crypt(desc, KMC_TDEA_192_ENCRYPT, ctx->iv, &walk);
++	return cbc_desall_crypt(desc, KMC_TDEA_192_ENCRYPT, &walk);
+ }
+ 
+ static int cbc_des3_decrypt(struct blkcipher_desc *desc,
+ 			    struct scatterlist *dst, struct scatterlist *src,
+ 			    unsigned int nbytes)
+ {
+-	struct s390_des_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
+ 	struct blkcipher_walk walk;
+ 
+ 	blkcipher_walk_init(&walk, dst, src, nbytes);
+-	return cbc_desall_crypt(desc, KMC_TDEA_192_DECRYPT, ctx->iv, &walk);
++	return cbc_desall_crypt(desc, KMC_TDEA_192_DECRYPT, &walk);
+ }
+ 
+ static struct crypto_alg cbc_des3_alg = {
+@@ -366,54 +369,80 @@ static struct crypto_alg cbc_des3_alg = {
+ 	}
+ };
+ 
++static unsigned int __ctrblk_init(u8 *ctrptr, unsigned int nbytes)
++{
++	unsigned int i, n;
++
++	/* align to block size, max. PAGE_SIZE */
++	n = (nbytes > PAGE_SIZE) ? PAGE_SIZE : nbytes & ~(DES_BLOCK_SIZE - 1);
++	for (i = DES_BLOCK_SIZE; i < n; i += DES_BLOCK_SIZE) {
++		memcpy(ctrptr + i, ctrptr + i - DES_BLOCK_SIZE, DES_BLOCK_SIZE);
++		crypto_inc(ctrptr + i, DES_BLOCK_SIZE);
++	}
++	return n;
++}
++
+ static int ctr_desall_crypt(struct blkcipher_desc *desc, long func,
+-			    struct s390_des_ctx *ctx, struct blkcipher_walk *walk)
++			    struct s390_des_ctx *ctx,
++			    struct blkcipher_walk *walk)
+ {
+ 	int ret = blkcipher_walk_virt_block(desc, walk, DES_BLOCK_SIZE);
+-	unsigned int i, n, nbytes;
+-	u8 buf[DES_BLOCK_SIZE];
+-	u8 *out, *in;
++	unsigned int n, nbytes;
++	u8 buf[DES_BLOCK_SIZE], ctrbuf[DES_BLOCK_SIZE];
++	u8 *out, *in, *ctrptr = ctrbuf;
++
++	if (!walk->nbytes)
++		return ret;
+ 
+-	memcpy(ctrblk, walk->iv, DES_BLOCK_SIZE);
++	if (spin_trylock(&ctrblk_lock))
++		ctrptr = ctrblk;
++
++	memcpy(ctrptr, walk->iv, DES_BLOCK_SIZE);
+ 	while ((nbytes = walk->nbytes) >= DES_BLOCK_SIZE) {
+ 		out = walk->dst.virt.addr;
+ 		in = walk->src.virt.addr;
+ 		while (nbytes >= DES_BLOCK_SIZE) {
+-			/* align to block size, max. PAGE_SIZE */
+-			n = (nbytes > PAGE_SIZE) ? PAGE_SIZE :
+-				nbytes & ~(DES_BLOCK_SIZE - 1);
+-			for (i = DES_BLOCK_SIZE; i < n; i += DES_BLOCK_SIZE) {
+-				memcpy(ctrblk + i, ctrblk + i - DES_BLOCK_SIZE,
+-				       DES_BLOCK_SIZE);
+-				crypto_inc(ctrblk + i, DES_BLOCK_SIZE);
+-			}
+-			ret = crypt_s390_kmctr(func, ctx->key, out, in, n, ctrblk);
+-			if (ret < 0 || ret != n)
++			if (ctrptr == ctrblk)
++				n = __ctrblk_init(ctrptr, nbytes);
++			else
++				n = DES_BLOCK_SIZE;
++			ret = crypt_s390_kmctr(func, ctx->key, out, in,
++					       n, ctrptr);
++			if (ret < 0 || ret != n) {
++				if (ctrptr == ctrblk)
++					spin_unlock(&ctrblk_lock);
+ 				return -EIO;
++			}
+ 			if (n > DES_BLOCK_SIZE)
+-				memcpy(ctrblk, ctrblk + n - DES_BLOCK_SIZE,
++				memcpy(ctrptr, ctrptr + n - DES_BLOCK_SIZE,
+ 				       DES_BLOCK_SIZE);
+-			crypto_inc(ctrblk, DES_BLOCK_SIZE);
++			crypto_inc(ctrptr, DES_BLOCK_SIZE);
+ 			out += n;
+ 			in += n;
+ 			nbytes -= n;
+ 		}
+ 		ret = blkcipher_walk_done(desc, walk, nbytes);
+ 	}
+-
++	if (ctrptr == ctrblk) {
++		if (nbytes)
++			memcpy(ctrbuf, ctrptr, DES_BLOCK_SIZE);
++		else
++			memcpy(walk->iv, ctrptr, DES_BLOCK_SIZE);
++		spin_unlock(&ctrblk_lock);
++	}
+ 	/* final block may be < DES_BLOCK_SIZE, copy only nbytes */
+ 	if (nbytes) {
+ 		out = walk->dst.virt.addr;
+ 		in = walk->src.virt.addr;
+ 		ret = crypt_s390_kmctr(func, ctx->key, buf, in,
+-				       DES_BLOCK_SIZE, ctrblk);
++				       DES_BLOCK_SIZE, ctrbuf);
+ 		if (ret < 0 || ret != DES_BLOCK_SIZE)
+ 			return -EIO;
+ 		memcpy(out, buf, nbytes);
+-		crypto_inc(ctrblk, DES_BLOCK_SIZE);
++		crypto_inc(ctrbuf, DES_BLOCK_SIZE);
+ 		ret = blkcipher_walk_done(desc, walk, 0);
++		memcpy(walk->iv, ctrbuf, DES_BLOCK_SIZE);
+ 	}
+-	memcpy(walk->iv, ctrblk, DES_BLOCK_SIZE);
+ 	return ret;
+ }
+ 
+diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
+index a51efc90b534..87c0be59970a 100644
+--- a/arch/x86/kernel/cpu/intel.c
++++ b/arch/x86/kernel/cpu/intel.c
+@@ -628,7 +628,7 @@ static void intel_tlb_flushall_shift_set(struct cpuinfo_x86 *c)
+ 		tlb_flushall_shift = 5;
+ 		break;
+ 	case 0x63a: /* Ivybridge */
+-		tlb_flushall_shift = 1;
++		tlb_flushall_shift = 2;
+ 		break;
+ 	default:
+ 		tlb_flushall_shift = 6;
+diff --git a/drivers/irqchip/irq-armada-370-xp.c b/drivers/irqchip/irq-armada-370-xp.c
+index bb328a366122..a51ee009ed83 100644
+--- a/drivers/irqchip/irq-armada-370-xp.c
++++ b/drivers/irqchip/irq-armada-370-xp.c
+@@ -229,7 +229,7 @@ armada_370_xp_handle_irq(struct pt_regs *regs)
+ 						ARMADA_370_XP_IN_DRBEL_CAUSE_OFFS)
+ 				& IPI_DOORBELL_MASK;
+ 
+-			writel(~IPI_DOORBELL_MASK, per_cpu_int_base +
++			writel(~ipimask, per_cpu_int_base +
+ 				ARMADA_370_XP_IN_DRBEL_CAUSE_OFFS);
+ 
+ 			/* Handle all pending doorbells */
+diff --git a/drivers/media/usb/dvb-usb-v2/af9035.c b/drivers/media/usb/dvb-usb-v2/af9035.c
+index 8f9b2cea88f0..8ede8ea762e6 100644
+--- a/drivers/media/usb/dvb-usb-v2/af9035.c
++++ b/drivers/media/usb/dvb-usb-v2/af9035.c
+@@ -1539,6 +1539,8 @@ static const struct usb_device_id af9035_id_table[] = {
+ 		&af9035_props, "TerraTec Cinergy T Stick Dual RC (rev. 2)", NULL) },
+ 	{ DVB_USB_DEVICE(USB_VID_LEADTEK, 0x6a05,
+ 		&af9035_props, "Leadtek WinFast DTV Dongle Dual", NULL) },
++	{ DVB_USB_DEVICE(USB_VID_HAUPPAUGE, 0xf900,
++		&af9035_props, "Hauppauge WinTV-MiniStick 2", NULL) },
+ 	{ }
+ };
+ MODULE_DEVICE_TABLE(usb, af9035_id_table);
+diff --git a/drivers/media/usb/dvb-usb-v2/mxl111sf-tuner.h b/drivers/media/usb/dvb-usb-v2/mxl111sf-tuner.h
+index 90f583e5d6a6..a8f65d88c9e7 100644
+--- a/drivers/media/usb/dvb-usb-v2/mxl111sf-tuner.h
++++ b/drivers/media/usb/dvb-usb-v2/mxl111sf-tuner.h
+@@ -68,7 +68,7 @@ struct dvb_frontend *mxl111sf_tuner_attach(struct dvb_frontend *fe,
+ #else
+ static inline
+ struct dvb_frontend *mxl111sf_tuner_attach(struct dvb_frontend *fe,
+-					   struct mxl111sf_state *mxl_state
++					   struct mxl111sf_state *mxl_state,
+ 					   struct mxl111sf_tuner_config *cfg)
+ {
+ 	printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
+diff --git a/drivers/media/usb/dvb-usb-v2/mxl111sf.c b/drivers/media/usb/dvb-usb-v2/mxl111sf.c
+index 2627553f7de1..3b948e3d4583 100644
+--- a/drivers/media/usb/dvb-usb-v2/mxl111sf.c
++++ b/drivers/media/usb/dvb-usb-v2/mxl111sf.c
+@@ -105,7 +105,7 @@ int mxl111sf_read_reg(struct mxl111sf_state *state, u8 addr, u8 *data)
+ 		ret = -EINVAL;
+ 	}
+ 
+-	pr_debug("R: (0x%02x, 0x%02x)\n", addr, *data);
++	pr_debug("R: (0x%02x, 0x%02x)\n", addr, buf[1]);
+ fail:
+ 	return ret;
+ }
+diff --git a/drivers/media/v4l2-core/videobuf-dma-contig.c b/drivers/media/v4l2-core/videobuf-dma-contig.c
+index 65411adcd0ea..7e6b209b7002 100644
+--- a/drivers/media/v4l2-core/videobuf-dma-contig.c
++++ b/drivers/media/v4l2-core/videobuf-dma-contig.c
+@@ -66,14 +66,11 @@ static void __videobuf_dc_free(struct device *dev,
+ static void videobuf_vm_open(struct vm_area_struct *vma)
+ {
+ 	struct videobuf_mapping *map = vma->vm_private_data;
+-	struct videobuf_queue *q = map->q;
+ 
+-	dev_dbg(q->dev, "vm_open %p [count=%u,vma=%08lx-%08lx]\n",
++	dev_dbg(map->q->dev, "vm_open %p [count=%u,vma=%08lx-%08lx]\n",
+ 		map, map->count, vma->vm_start, vma->vm_end);
+ 
+-	videobuf_queue_lock(q);
+ 	map->count++;
+-	videobuf_queue_unlock(q);
+ }
+ 
+ static void videobuf_vm_close(struct vm_area_struct *vma)
+@@ -85,11 +82,12 @@ static void videobuf_vm_close(struct vm_area_struct *vma)
+ 	dev_dbg(q->dev, "vm_close %p [count=%u,vma=%08lx-%08lx]\n",
+ 		map, map->count, vma->vm_start, vma->vm_end);
+ 
+-	videobuf_queue_lock(q);
+-	if (!--map->count) {
++	map->count--;
++	if (0 == map->count) {
+ 		struct videobuf_dma_contig_memory *mem;
+ 
+ 		dev_dbg(q->dev, "munmap %p q=%p\n", map, q);
++		videobuf_queue_lock(q);
+ 
+ 		/* We need first to cancel streams, before unmapping */
+ 		if (q->streaming)
+@@ -128,8 +126,8 @@ static void videobuf_vm_close(struct vm_area_struct *vma)
+ 
+ 		kfree(map);
+ 
++		videobuf_queue_unlock(q);
+ 	}
+-	videobuf_queue_unlock(q);
+ }
+ 
+ static const struct vm_operations_struct videobuf_vm_ops = {
+diff --git a/drivers/media/v4l2-core/videobuf-dma-sg.c b/drivers/media/v4l2-core/videobuf-dma-sg.c
+index 9db674ccdc68..828e7c10bd70 100644
+--- a/drivers/media/v4l2-core/videobuf-dma-sg.c
++++ b/drivers/media/v4l2-core/videobuf-dma-sg.c
+@@ -338,14 +338,11 @@ EXPORT_SYMBOL_GPL(videobuf_dma_free);
+ static void videobuf_vm_open(struct vm_area_struct *vma)
+ {
+ 	struct videobuf_mapping *map = vma->vm_private_data;
+-	struct videobuf_queue *q = map->q;
+ 
+ 	dprintk(2, "vm_open %p [count=%d,vma=%08lx-%08lx]\n", map,
+ 		map->count, vma->vm_start, vma->vm_end);
+ 
+-	videobuf_queue_lock(q);
+ 	map->count++;
+-	videobuf_queue_unlock(q);
+ }
+ 
+ static void videobuf_vm_close(struct vm_area_struct *vma)
+@@ -358,9 +355,10 @@ static void videobuf_vm_close(struct vm_area_struct *vma)
+ 	dprintk(2, "vm_close %p [count=%d,vma=%08lx-%08lx]\n", map,
+ 		map->count, vma->vm_start, vma->vm_end);
+ 
+-	videobuf_queue_lock(q);
+-	if (!--map->count) {
++	map->count--;
++	if (0 == map->count) {
+ 		dprintk(1, "munmap %p q=%p\n", map, q);
++		videobuf_queue_lock(q);
+ 		for (i = 0; i < VIDEO_MAX_FRAME; i++) {
+ 			if (NULL == q->bufs[i])
+ 				continue;
+@@ -376,9 +374,9 @@ static void videobuf_vm_close(struct vm_area_struct *vma)
+ 			q->bufs[i]->baddr = 0;
+ 			q->ops->buf_release(q, q->bufs[i]);
+ 		}
++		videobuf_queue_unlock(q);
+ 		kfree(map);
+ 	}
+-	videobuf_queue_unlock(q);
+ 	return;
+ }
+ 
+diff --git a/drivers/media/v4l2-core/videobuf-vmalloc.c b/drivers/media/v4l2-core/videobuf-vmalloc.c
+index 1365c651c177..2ff7fcc77b11 100644
+--- a/drivers/media/v4l2-core/videobuf-vmalloc.c
++++ b/drivers/media/v4l2-core/videobuf-vmalloc.c
+@@ -54,14 +54,11 @@ MODULE_LICENSE("GPL");
+ static void videobuf_vm_open(struct vm_area_struct *vma)
+ {
+ 	struct videobuf_mapping *map = vma->vm_private_data;
+-	struct videobuf_queue *q = map->q;
+ 
+ 	dprintk(2, "vm_open %p [count=%u,vma=%08lx-%08lx]\n", map,
+ 		map->count, vma->vm_start, vma->vm_end);
+ 
+-	videobuf_queue_lock(q);
+ 	map->count++;
+-	videobuf_queue_unlock(q);
+ }
+ 
+ static void videobuf_vm_close(struct vm_area_struct *vma)
+@@ -73,11 +70,12 @@ static void videobuf_vm_close(struct vm_area_struct *vma)
+ 	dprintk(2, "vm_close %p [count=%u,vma=%08lx-%08lx]\n", map,
+ 		map->count, vma->vm_start, vma->vm_end);
+ 
+-	videobuf_queue_lock(q);
+-	if (!--map->count) {
++	map->count--;
++	if (0 == map->count) {
+ 		struct videobuf_vmalloc_memory *mem;
+ 
+ 		dprintk(1, "munmap %p q=%p\n", map, q);
++		videobuf_queue_lock(q);
+ 
+ 		/* We need first to cancel streams, before unmapping */
+ 		if (q->streaming)
+@@ -116,8 +114,8 @@ static void videobuf_vm_close(struct vm_area_struct *vma)
+ 
+ 		kfree(map);
+ 
++		videobuf_queue_unlock(q);
+ 	}
+-	videobuf_queue_unlock(q);
+ 
+ 	return;
+ }
+diff --git a/drivers/pinctrl/core.c b/drivers/pinctrl/core.c
+index 92f86ab30a13..a1ffae4c3770 100644
+--- a/drivers/pinctrl/core.c
++++ b/drivers/pinctrl/core.c
+@@ -837,7 +837,9 @@ static struct pinctrl *create_pinctrl(struct device *dev)
+ 	kref_init(&p->users);
+ 
+ 	/* Add the pinctrl handle to the global list */
++	mutex_lock(&pinctrl_list_mutex);
+ 	list_add_tail(&p->node, &pinctrl_list);
++	mutex_unlock(&pinctrl_list_mutex);
+ 
+ 	return p;
+ }
+diff --git a/drivers/pinctrl/pinctrl-at91.c b/drivers/pinctrl/pinctrl-at91.c
+index f350fd2e170e..f4e99a9491cc 100644
+--- a/drivers/pinctrl/pinctrl-at91.c
++++ b/drivers/pinctrl/pinctrl-at91.c
+@@ -1251,22 +1251,22 @@ static int alt_gpio_irq_type(struct irq_data *d, unsigned type)
+ 
+ 	switch (type) {
+ 	case IRQ_TYPE_EDGE_RISING:
+-		irq_set_handler(d->irq, handle_simple_irq);
++		__irq_set_handler_locked(d->irq, handle_simple_irq);
+ 		writel_relaxed(mask, pio + PIO_ESR);
+ 		writel_relaxed(mask, pio + PIO_REHLSR);
+ 		break;
+ 	case IRQ_TYPE_EDGE_FALLING:
+-		irq_set_handler(d->irq, handle_simple_irq);
++		__irq_set_handler_locked(d->irq, handle_simple_irq);
+ 		writel_relaxed(mask, pio + PIO_ESR);
+ 		writel_relaxed(mask, pio + PIO_FELLSR);
+ 		break;
+ 	case IRQ_TYPE_LEVEL_LOW:
+-		irq_set_handler(d->irq, handle_level_irq);
++		__irq_set_handler_locked(d->irq, handle_level_irq);
+ 		writel_relaxed(mask, pio + PIO_LSR);
+ 		writel_relaxed(mask, pio + PIO_FELLSR);
+ 		break;
+ 	case IRQ_TYPE_LEVEL_HIGH:
+-		irq_set_handler(d->irq, handle_level_irq);
++		__irq_set_handler_locked(d->irq, handle_level_irq);
+ 		writel_relaxed(mask, pio + PIO_LSR);
+ 		writel_relaxed(mask, pio + PIO_REHLSR);
+ 		break;
+@@ -1275,7 +1275,7 @@ static int alt_gpio_irq_type(struct irq_data *d, unsigned type)
+ 		 * disable additional interrupt modes:
+ 		 * fall back to default behavior
+ 		 */
+-		irq_set_handler(d->irq, handle_simple_irq);
++		__irq_set_handler_locked(d->irq, handle_simple_irq);
+ 		writel_relaxed(mask, pio + PIO_AIMDR);
+ 		return 0;
+ 	case IRQ_TYPE_NONE:
+diff --git a/drivers/pinctrl/vt8500/pinctrl-wmt.c b/drivers/pinctrl/vt8500/pinctrl-wmt.c
+index 39aec0850810..25ab2eec92e4 100644
+--- a/drivers/pinctrl/vt8500/pinctrl-wmt.c
++++ b/drivers/pinctrl/vt8500/pinctrl-wmt.c
+@@ -276,7 +276,20 @@ static int wmt_pctl_dt_node_to_map_pull(struct wmt_pinctrl_data *data,
+ 	if (!configs)
+ 		return -ENOMEM;
+ 
+-	configs[0] = pull;
++	switch (pull) {
++	case 0:
++		configs[0] = PIN_CONFIG_BIAS_DISABLE;
++		break;
++	case 1:
++		configs[0] = PIN_CONFIG_BIAS_PULL_DOWN;
++		break;
++	case 2:
++		configs[0] = PIN_CONFIG_BIAS_PULL_UP;
++		break;
++	default:
++		configs[0] = PIN_CONFIG_BIAS_DISABLE;
++		dev_err(data->dev, "invalid pull state %d - disabling\n", pull);
++	}
+ 
+ 	map->type = PIN_MAP_TYPE_CONFIGS_PIN;
+ 	map->data.configs.group_or_pin = data->groups[group];
+diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
+index 8b8eff051493..1b63d29e44b7 100644
+--- a/fs/btrfs/inode.c
++++ b/fs/btrfs/inode.c
+@@ -2613,7 +2613,7 @@ static int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered_extent)
+ 			EXTENT_DEFRAG, 1, cached_state);
+ 	if (ret) {
+ 		u64 last_snapshot = btrfs_root_last_snapshot(&root->root_item);
+-		if (last_snapshot >= BTRFS_I(inode)->generation)
++		if (0 && last_snapshot >= BTRFS_I(inode)->generation)
+ 			/* the inode is shared */
+ 			new = record_old_file_extents(inode, ordered_extent);
+ 
+diff --git a/fs/buffer.c b/fs/buffer.c
+index 6024877335ca..aeeea6529bcd 100644
+--- a/fs/buffer.c
++++ b/fs/buffer.c
+@@ -654,14 +654,16 @@ EXPORT_SYMBOL(mark_buffer_dirty_inode);
+ static void __set_page_dirty(struct page *page,
+ 		struct address_space *mapping, int warn)
+ {
+-	spin_lock_irq(&mapping->tree_lock);
++	unsigned long flags;
++
++	spin_lock_irqsave(&mapping->tree_lock, flags);
+ 	if (page->mapping) {	/* Race with truncate? */
+ 		WARN_ON_ONCE(warn && !PageUptodate(page));
+ 		account_page_dirtied(page, mapping);
+ 		radix_tree_tag_set(&mapping->page_tree,
+ 				page_index(page), PAGECACHE_TAG_DIRTY);
+ 	}
+-	spin_unlock_irq(&mapping->tree_lock);
++	spin_unlock_irqrestore(&mapping->tree_lock, flags);
+ 	__mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
+ }
+ 
+diff --git a/fs/nfs/nfs4client.c b/fs/nfs/nfs4client.c
+index 8a572ddde55b..55ebebec4d3b 100644
+--- a/fs/nfs/nfs4client.c
++++ b/fs/nfs/nfs4client.c
+@@ -169,7 +169,7 @@ void nfs41_shutdown_client(struct nfs_client *clp)
+ void nfs40_shutdown_client(struct nfs_client *clp)
+ {
+ 	if (clp->cl_slot_tbl) {
+-		nfs4_release_slot_table(clp->cl_slot_tbl);
++		nfs4_shutdown_slot_table(clp->cl_slot_tbl);
+ 		kfree(clp->cl_slot_tbl);
+ 	}
+ }
+diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
+index 29c5a2c08f02..d3d7766f55e3 100644
+--- a/fs/nfs/nfs4proc.c
++++ b/fs/nfs/nfs4proc.c
+@@ -1611,15 +1611,15 @@ static void nfs4_open_confirm_prepare(struct rpc_task *task, void *calldata)
+ {
+ 	struct nfs4_opendata *data = calldata;
+ 
+-	nfs40_setup_sequence(data->o_arg.server, &data->o_arg.seq_args,
+-				&data->o_res.seq_res, task);
++	nfs40_setup_sequence(data->o_arg.server, &data->c_arg.seq_args,
++				&data->c_res.seq_res, task);
+ }
+ 
+ static void nfs4_open_confirm_done(struct rpc_task *task, void *calldata)
+ {
+ 	struct nfs4_opendata *data = calldata;
+ 
+-	nfs40_sequence_done(task, &data->o_res.seq_res);
++	nfs40_sequence_done(task, &data->c_res.seq_res);
+ 
+ 	data->rpc_status = task->tk_status;
+ 	if (data->rpc_status == 0) {
+@@ -1677,7 +1677,7 @@ static int _nfs4_proc_open_confirm(struct nfs4_opendata *data)
+ 	};
+ 	int status;
+ 
+-	nfs4_init_sequence(&data->o_arg.seq_args, &data->o_res.seq_res, 1);
++	nfs4_init_sequence(&data->c_arg.seq_args, &data->c_res.seq_res, 1);
+ 	kref_get(&data->kref);
+ 	data->rpc_done = 0;
+ 	data->rpc_status = 0;
+diff --git a/fs/nfs/nfs4session.c b/fs/nfs/nfs4session.c
+index cf883c7ae053..e799dc3c3b1d 100644
+--- a/fs/nfs/nfs4session.c
++++ b/fs/nfs/nfs4session.c
+@@ -231,14 +231,23 @@ out:
+ 	return ret;
+ }
+ 
++/*
++ * nfs4_release_slot_table - release all slot table entries
++ */
++static void nfs4_release_slot_table(struct nfs4_slot_table *tbl)
++{
++	nfs4_shrink_slot_table(tbl, 0);
++}
++
+ /**
+- * nfs4_release_slot_table - release resources attached to a slot table
++ * nfs4_shutdown_slot_table - release resources attached to a slot table
+  * @tbl: slot table to shut down
+  *
+  */
+-void nfs4_release_slot_table(struct nfs4_slot_table *tbl)
++void nfs4_shutdown_slot_table(struct nfs4_slot_table *tbl)
+ {
+-	nfs4_shrink_slot_table(tbl, 0);
++	nfs4_release_slot_table(tbl);
++	rpc_destroy_wait_queue(&tbl->slot_tbl_waitq);
+ }
+ 
+ /**
+@@ -422,7 +431,7 @@ void nfs41_update_target_slotid(struct nfs4_slot_table *tbl,
+ 	spin_unlock(&tbl->slot_tbl_lock);
+ }
+ 
+-static void nfs4_destroy_session_slot_tables(struct nfs4_session *session)
++static void nfs4_release_session_slot_tables(struct nfs4_session *session)
+ {
+ 	nfs4_release_slot_table(&session->fc_slot_table);
+ 	nfs4_release_slot_table(&session->bc_slot_table);
+@@ -450,7 +459,7 @@ int nfs4_setup_session_slot_tables(struct nfs4_session *ses)
+ 	if (status && tbl->slots == NULL)
+ 		/* Fore and back channel share a connection so get
+ 		 * both slot tables or neither */
+-		nfs4_destroy_session_slot_tables(ses);
++		nfs4_release_session_slot_tables(ses);
+ 	return status;
+ }
+ 
+@@ -470,6 +479,12 @@ struct nfs4_session *nfs4_alloc_session(struct nfs_client *clp)
+ 	return session;
+ }
+ 
++static void nfs4_destroy_session_slot_tables(struct nfs4_session *session)
++{
++	nfs4_shutdown_slot_table(&session->fc_slot_table);
++	nfs4_shutdown_slot_table(&session->bc_slot_table);
++}
++
+ void nfs4_destroy_session(struct nfs4_session *session)
+ {
+ 	struct rpc_xprt *xprt;
+diff --git a/fs/nfs/nfs4session.h b/fs/nfs/nfs4session.h
+index 232306100651..b34ada9bc6a2 100644
+--- a/fs/nfs/nfs4session.h
++++ b/fs/nfs/nfs4session.h
+@@ -74,7 +74,7 @@ enum nfs4_session_state {
+ 
+ extern int nfs4_setup_slot_table(struct nfs4_slot_table *tbl,
+ 		unsigned int max_reqs, const char *queue);
+-extern void nfs4_release_slot_table(struct nfs4_slot_table *tbl);
++extern void nfs4_shutdown_slot_table(struct nfs4_slot_table *tbl);
+ extern struct nfs4_slot *nfs4_alloc_slot(struct nfs4_slot_table *tbl);
+ extern void nfs4_free_slot(struct nfs4_slot_table *tbl, struct nfs4_slot *slot);
+ extern void nfs4_slot_tbl_drain_complete(struct nfs4_slot_table *tbl);
+diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h
+index 49f52c8f4422..ea9e076a91bf 100644
+--- a/include/linux/nfs_xdr.h
++++ b/include/linux/nfs_xdr.h
+@@ -379,12 +379,14 @@ struct nfs_openres {
+  * Arguments to the open_confirm call.
+  */
+ struct nfs_open_confirmargs {
++	struct nfs4_sequence_args	seq_args;
+ 	const struct nfs_fh *	fh;
+ 	nfs4_stateid *		stateid;
+ 	struct nfs_seqid *	seqid;
+ };
+ 
+ struct nfs_open_confirmres {
++	struct nfs4_sequence_res	seq_res;
+ 	nfs4_stateid            stateid;
+ 	struct nfs_seqid *	seqid;
+ };
+diff --git a/kernel/irq/Kconfig b/kernel/irq/Kconfig
+index 4a1fef09f658..07cbdfea9ae2 100644
+--- a/kernel/irq/Kconfig
++++ b/kernel/irq/Kconfig
+@@ -40,6 +40,7 @@ config IRQ_EDGE_EOI_HANDLER
+ # Generic configurable interrupt chip implementation
+ config GENERIC_IRQ_CHIP
+        bool
++       select IRQ_DOMAIN
+ 
+ # Generic irq_domain hw <--> linux irq number translation
+ config IRQ_DOMAIN
+diff --git a/lib/Makefile b/lib/Makefile
+index f3bb2cb98adf..6af6fbb053e3 100644
+--- a/lib/Makefile
++++ b/lib/Makefile
+@@ -47,6 +47,7 @@ lib-$(CONFIG_RWSEM_GENERIC_SPINLOCK) += rwsem-spinlock.o
+ lib-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem.o
+ lib-$(CONFIG_PERCPU_RWSEM) += percpu-rwsem.o
+ 
++GCOV_PROFILE_hweight.o := n
+ CFLAGS_hweight.o = $(subst $(quote),,$(CONFIG_ARCH_HWEIGHT_CFLAGS))
+ obj-$(CONFIG_GENERIC_HWEIGHT) += hweight.o
+ 
+diff --git a/mm/page-writeback.c b/mm/page-writeback.c
+index 2d30e2cfe804..7106cb1aca8e 100644
+--- a/mm/page-writeback.c
++++ b/mm/page-writeback.c
+@@ -2173,11 +2173,12 @@ int __set_page_dirty_nobuffers(struct page *page)
+ 	if (!TestSetPageDirty(page)) {
+ 		struct address_space *mapping = page_mapping(page);
+ 		struct address_space *mapping2;
++		unsigned long flags;
+ 
+ 		if (!mapping)
+ 			return 1;
+ 
+-		spin_lock_irq(&mapping->tree_lock);
++		spin_lock_irqsave(&mapping->tree_lock, flags);
+ 		mapping2 = page_mapping(page);
+ 		if (mapping2) { /* Race with truncate? */
+ 			BUG_ON(mapping2 != mapping);
+@@ -2186,7 +2187,7 @@ int __set_page_dirty_nobuffers(struct page *page)
+ 			radix_tree_tag_set(&mapping->page_tree,
+ 				page_index(page), PAGECACHE_TAG_DIRTY);
+ 		}
+-		spin_unlock_irq(&mapping->tree_lock);
++		spin_unlock_irqrestore(&mapping->tree_lock, flags);
+ 		if (mapping->host) {
+ 			/* !PageAnon && !swapper_space */
+ 			__mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
+diff --git a/mm/swapfile.c b/mm/swapfile.c
+index de7c904e52e5..0ec2eaf3ccfd 100644
+--- a/mm/swapfile.c
++++ b/mm/swapfile.c
+@@ -1922,7 +1922,6 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
+ 	p->swap_map = NULL;
+ 	cluster_info = p->cluster_info;
+ 	p->cluster_info = NULL;
+-	p->flags = 0;
+ 	frontswap_map = frontswap_map_get(p);
+ 	frontswap_map_set(p, NULL);
+ 	spin_unlock(&p->lock);
+@@ -1948,6 +1947,16 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
+ 		mutex_unlock(&inode->i_mutex);
+ 	}
+ 	filp_close(swap_file, NULL);
++
++	/*
++	 * Clear the SWP_USED flag after all resources are freed so that swapon
++	 * can reuse this swap_info in alloc_swap_info() safely.  It is ok to
++	 * not hold p->lock after we cleared its SWP_WRITEOK.
++	 */
++	spin_lock(&swap_lock);
++	p->flags = 0;
++	spin_unlock(&swap_lock);
++
+ 	err = 0;
+ 	atomic_inc(&proc_poll_event);
+ 	wake_up_interruptible(&proc_poll_wait);
+diff --git a/security/selinux/ss/services.c b/security/selinux/ss/services.c
+index b4feecc3fe01..18caa16de27b 100644
+--- a/security/selinux/ss/services.c
++++ b/security/selinux/ss/services.c
+@@ -1231,6 +1231,10 @@ static int security_context_to_sid_core(const char *scontext, u32 scontext_len,
+ 	struct context context;
+ 	int rc = 0;
+ 
++	/* An empty security context is never valid. */
++	if (!scontext_len)
++		return -EINVAL;
++
+ 	if (!ss_initialized) {
+ 		int i;
+ 
+diff --git a/sound/pci/hda/patch_analog.c b/sound/pci/hda/patch_analog.c
+index 31230c68b603..7fc15814c618 100644
+--- a/sound/pci/hda/patch_analog.c
++++ b/sound/pci/hda/patch_analog.c
+@@ -227,6 +227,19 @@ static void ad_fixup_inv_jack_detect(struct hda_codec *codec,
+ 	}
+ }
+ 
++/* Toshiba Satellite L40 implements EAPD in a standard way unlike others */
++static void ad1986a_fixup_eapd(struct hda_codec *codec,
++			       const struct hda_fixup *fix, int action)
++{
++	struct ad198x_spec *spec = codec->spec;
++
++	if (action == HDA_FIXUP_ACT_PRE_PROBE) {
++		codec->inv_eapd = 0;
++		spec->gen.keep_eapd_on = 1;
++		spec->eapd_nid = 0x1b;
++	}
++}
++
+ enum {
+ 	AD1986A_FIXUP_INV_JACK_DETECT,
+ 	AD1986A_FIXUP_ULTRA,
+@@ -234,6 +247,7 @@ enum {
+ 	AD1986A_FIXUP_3STACK,
+ 	AD1986A_FIXUP_LAPTOP,
+ 	AD1986A_FIXUP_LAPTOP_IMIC,
++	AD1986A_FIXUP_EAPD,
+ };
+ 
+ static const struct hda_fixup ad1986a_fixups[] = {
+@@ -294,6 +308,10 @@ static const struct hda_fixup ad1986a_fixups[] = {
+ 		.chained_before = 1,
+ 		.chain_id = AD1986A_FIXUP_LAPTOP,
+ 	},
++	[AD1986A_FIXUP_EAPD] = {
++		.type = HDA_FIXUP_FUNC,
++		.v.func = ad1986a_fixup_eapd,
++	},
+ };
+ 
+ static const struct snd_pci_quirk ad1986a_fixup_tbl[] = {
+@@ -301,6 +319,7 @@ static const struct snd_pci_quirk ad1986a_fixup_tbl[] = {
+ 	SND_PCI_QUIRK_MASK(0x1043, 0xff00, 0x8100, "ASUS P5", AD1986A_FIXUP_3STACK),
+ 	SND_PCI_QUIRK_MASK(0x1043, 0xff00, 0x8200, "ASUS M2", AD1986A_FIXUP_3STACK),
+ 	SND_PCI_QUIRK(0x10de, 0xcb84, "ASUS A8N-VM", AD1986A_FIXUP_3STACK),
++	SND_PCI_QUIRK(0x1179, 0xff40, "Toshiba Satellite L40", AD1986A_FIXUP_EAPD),
+ 	SND_PCI_QUIRK(0x144d, 0xc01e, "FSC V2060", AD1986A_FIXUP_LAPTOP),
+ 	SND_PCI_QUIRK_MASK(0x144d, 0xff00, 0xc000, "Samsung", AD1986A_FIXUP_SAMSUNG),
+ 	SND_PCI_QUIRK(0x144d, 0xc027, "Samsung Q1", AD1986A_FIXUP_ULTRA),
+@@ -455,6 +474,8 @@ static int ad1983_add_spdif_mux_ctl(struct hda_codec *codec)
+ static int patch_ad1983(struct hda_codec *codec)
+ {
+ 	struct ad198x_spec *spec;
++	static hda_nid_t conn_0c[] = { 0x08 };
++	static hda_nid_t conn_0d[] = { 0x09 };
+ 	int err;
+ 
+ 	err = alloc_ad_spec(codec);
+@@ -462,8 +483,14 @@ static int patch_ad1983(struct hda_codec *codec)
+ 		return err;
+ 	spec = codec->spec;
+ 
++	spec->gen.mixer_nid = 0x0e;
+ 	spec->gen.beep_nid = 0x10;
+ 	set_beep_amp(spec, 0x10, 0, HDA_OUTPUT);
++
++	/* limit the loopback routes not to confuse the parser */
++	snd_hda_override_conn_list(codec, 0x0c, ARRAY_SIZE(conn_0c), conn_0c);
++	snd_hda_override_conn_list(codec, 0x0d, ARRAY_SIZE(conn_0d), conn_0d);
++
+ 	err = ad198x_parse_auto_config(codec, false);
+ 	if (err < 0)
+ 		goto error;
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index 56b62555eef4..6195a4f8d9b8 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -1771,6 +1771,7 @@ enum {
+ 	ALC889_FIXUP_IMAC91_VREF,
+ 	ALC889_FIXUP_MBA11_VREF,
+ 	ALC889_FIXUP_MBA21_VREF,
++	ALC889_FIXUP_MP11_VREF,
+ 	ALC882_FIXUP_INV_DMIC,
+ 	ALC882_FIXUP_NO_PRIMARY_HP,
+ 	ALC887_FIXUP_ASUS_BASS,
+@@ -2127,6 +2128,12 @@ static const struct hda_fixup alc882_fixups[] = {
+ 		.chained = true,
+ 		.chain_id = ALC889_FIXUP_MBP_VREF,
+ 	},
++	[ALC889_FIXUP_MP11_VREF] = {
++		.type = HDA_FIXUP_FUNC,
++		.v.func = alc889_fixup_mba11_vref,
++		.chained = true,
++		.chain_id = ALC885_FIXUP_MACPRO_GPIO,
++	},
+ 	[ALC882_FIXUP_INV_DMIC] = {
+ 		.type = HDA_FIXUP_FUNC,
+ 		.v.func = alc_fixup_inv_dmic_0x12,
+@@ -2184,7 +2191,7 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x106b, 0x00a0, "MacBookPro 3,1", ALC889_FIXUP_MBP_VREF),
+ 	SND_PCI_QUIRK(0x106b, 0x00a1, "Macbook", ALC889_FIXUP_MBP_VREF),
+ 	SND_PCI_QUIRK(0x106b, 0x00a4, "MacbookPro 4,1", ALC889_FIXUP_MBP_VREF),
+-	SND_PCI_QUIRK(0x106b, 0x0c00, "Mac Pro", ALC885_FIXUP_MACPRO_GPIO),
++	SND_PCI_QUIRK(0x106b, 0x0c00, "Mac Pro", ALC889_FIXUP_MP11_VREF),
+ 	SND_PCI_QUIRK(0x106b, 0x1000, "iMac 24", ALC885_FIXUP_MACPRO_GPIO),
+ 	SND_PCI_QUIRK(0x106b, 0x2800, "AppleTV", ALC885_FIXUP_MACPRO_GPIO),
+ 	SND_PCI_QUIRK(0x106b, 0x2c00, "MacbookPro rev3", ALC889_FIXUP_MBP_VREF),
+diff --git a/sound/usb/Kconfig b/sound/usb/Kconfig
+index de9408b83f75..e05a86b7c0da 100644
+--- a/sound/usb/Kconfig
++++ b/sound/usb/Kconfig
+@@ -14,6 +14,7 @@ config SND_USB_AUDIO
+ 	select SND_HWDEP
+ 	select SND_RAWMIDI
+ 	select SND_PCM
++	select BITREVERSE
+ 	help
+ 	  Say Y here to include support for USB audio and USB MIDI
+ 	  devices.



^ permalink raw reply	[flat|nested] only message in thread

only message in thread, other threads:[~2014-02-21 14:50 UTC | newest]

Thread overview: (only message) (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2014-02-21 14:49 [gentoo-commits] linux-patches r2686 - genpatches-2.6/trunk/3.12 Mike Pagano (mpagano)

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox