public inbox for gentoo-commits@lists.gentoo.org
 help / color / mirror / Atom feed
From: "Mike Pagano" <mpagano@gentoo.org>
To: gentoo-commits@lists.gentoo.org
Subject: [gentoo-commits] proj/linux-patches:4.12 commit in: /
Date: Wed, 13 Sep 2017 22:28:59 +0000 (UTC)	[thread overview]
Message-ID: <1505341731.74695b4b8b53d91a791c3227c4d6db6b45b0371a.mpagano@gentoo> (raw)

commit:     74695b4b8b53d91a791c3227c4d6db6b45b0371a
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Wed Sep 13 22:28:51 2017 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Wed Sep 13 22:28:51 2017 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=74695b4b

Linux patch 4.12.13

 0000_README              |    4 +
 1012_linux-4.12.13.patch | 1076 ++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 1080 insertions(+)

diff --git a/0000_README b/0000_README
index bd9f666..5320ea5 100644
--- a/0000_README
+++ b/0000_README
@@ -91,6 +91,10 @@ Patch:  1011_linux-4.12.12.patch
 From:   http://www.kernel.org
 Desc:   Linux 4.12.12
 
+Patch:  1012_linux-4.12.13.patch
+From:   http://www.kernel.org
+Desc:   Linux 4.12.13
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1012_linux-4.12.13.patch b/1012_linux-4.12.13.patch
new file mode 100644
index 0000000..763a970
--- /dev/null
+++ b/1012_linux-4.12.13.patch
@@ -0,0 +1,1076 @@
+diff --git a/Makefile b/Makefile
+index e96306381ee8..983224467a4d 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 4
+ PATCHLEVEL = 12
+-SUBLEVEL = 12
++SUBLEVEL = 13
+ EXTRAVERSION =
+ NAME = Fearless Coyote
+ 
+diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
+index ff8b0aa2dfde..42f585379e19 100644
+--- a/arch/arm/mm/fault.c
++++ b/arch/arm/mm/fault.c
+@@ -315,8 +315,11 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
+ 	 * signal first. We do not need to release the mmap_sem because
+ 	 * it would already be released in __lock_page_or_retry in
+ 	 * mm/filemap.c. */
+-	if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
++	if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) {
++		if (!user_mode(regs))
++			goto no_context;
+ 		return 0;
++	}
+ 
+ 	/*
+ 	 * Major/minor page fault accounting is only done on the
+diff --git a/arch/arm64/boot/dts/marvell/armada-37xx.dtsi b/arch/arm64/boot/dts/marvell/armada-37xx.dtsi
+index b69e4a4ecdd8..1ce5e773dd30 100644
+--- a/arch/arm64/boot/dts/marvell/armada-37xx.dtsi
++++ b/arch/arm64/boot/dts/marvell/armada-37xx.dtsi
+@@ -312,6 +312,7 @@
+ 				interrupt-controller;
+ 				reg = <0x1d00000 0x10000>, /* GICD */
+ 				      <0x1d40000 0x40000>; /* GICR */
++				interrupts = <GIC_PPI 9 IRQ_TYPE_LEVEL_HIGH>;
+ 			};
+ 		};
+ 
+diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
+index cb8225969255..97fc5f18b0a8 100644
+--- a/arch/x86/kvm/mmu.c
++++ b/arch/x86/kvm/mmu.c
+@@ -4759,7 +4759,8 @@ int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u64 error_code,
+ 	 * Note: AMD only (since it supports the PFERR_GUEST_PAGE_MASK used
+ 	 *       in PFERR_NEXT_GUEST_PAGE)
+ 	 */
+-	if (error_code == PFERR_NESTED_GUEST_PAGE) {
++	if (vcpu->arch.mmu.direct_map &&
++		error_code == PFERR_NESTED_GUEST_PAGE) {
+ 		kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(cr2));
+ 		return 1;
+ 	}
+diff --git a/drivers/mtd/nand/mxc_nand.c b/drivers/mtd/nand/mxc_nand.c
+index 61ca020c5272..d929111b5ebe 100644
+--- a/drivers/mtd/nand/mxc_nand.c
++++ b/drivers/mtd/nand/mxc_nand.c
+@@ -877,6 +877,8 @@ static void mxc_do_addr_cycle(struct mtd_info *mtd, int column, int page_addr)
+ 	}
+ }
+ 
++#define MXC_V1_ECCBYTES		5
++
+ static int mxc_v1_ooblayout_ecc(struct mtd_info *mtd, int section,
+ 				struct mtd_oob_region *oobregion)
+ {
+@@ -886,7 +888,7 @@ static int mxc_v1_ooblayout_ecc(struct mtd_info *mtd, int section,
+ 		return -ERANGE;
+ 
+ 	oobregion->offset = (section * 16) + 6;
+-	oobregion->length = nand_chip->ecc.bytes;
++	oobregion->length = MXC_V1_ECCBYTES;
+ 
+ 	return 0;
+ }
+@@ -908,8 +910,7 @@ static int mxc_v1_ooblayout_free(struct mtd_info *mtd, int section,
+ 			oobregion->length = 4;
+ 		}
+ 	} else {
+-		oobregion->offset = ((section - 1) * 16) +
+-				    nand_chip->ecc.bytes + 6;
++		oobregion->offset = ((section - 1) * 16) + MXC_V1_ECCBYTES + 6;
+ 		if (section < nand_chip->ecc.steps)
+ 			oobregion->length = (section * 16) + 6 -
+ 					    oobregion->offset;
+diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c
+index 6f9771e82476..2be78d1bc195 100644
+--- a/drivers/mtd/nand/nand_base.c
++++ b/drivers/mtd/nand/nand_base.c
+@@ -3972,10 +3972,13 @@ static void nand_manufacturer_detect(struct nand_chip *chip)
+ 	 * nand_decode_ext_id() otherwise.
+ 	 */
+ 	if (chip->manufacturer.desc && chip->manufacturer.desc->ops &&
+-	    chip->manufacturer.desc->ops->detect)
++	    chip->manufacturer.desc->ops->detect) {
++		/* The 3rd id byte holds MLC / multichip data */
++		chip->bits_per_cell = nand_get_bits_per_cell(chip->id.data[2]);
+ 		chip->manufacturer.desc->ops->detect(chip);
+-	else
++	} else {
+ 		nand_decode_ext_id(chip);
++	}
+ }
+ 
+ /*
+diff --git a/drivers/mtd/nand/nand_hynix.c b/drivers/mtd/nand/nand_hynix.c
+index b12dc7325378..bd9a6e343848 100644
+--- a/drivers/mtd/nand/nand_hynix.c
++++ b/drivers/mtd/nand/nand_hynix.c
+@@ -477,7 +477,7 @@ static void hynix_nand_extract_ecc_requirements(struct nand_chip *chip,
+ 		 * The ECC requirements field meaning depends on the
+ 		 * NAND technology.
+ 		 */
+-		u8 nand_tech = chip->id.data[5] & 0x3;
++		u8 nand_tech = chip->id.data[5] & 0x7;
+ 
+ 		if (nand_tech < 3) {
+ 			/* > 26nm, reference: H27UBG8T2A datasheet */
+@@ -533,7 +533,7 @@ static void hynix_nand_extract_scrambling_requirements(struct nand_chip *chip,
+ 		if (nand_tech > 0)
+ 			chip->options |= NAND_NEED_SCRAMBLING;
+ 	} else {
+-		nand_tech = chip->id.data[5] & 0x3;
++		nand_tech = chip->id.data[5] & 0x7;
+ 
+ 		/* < 32nm */
+ 		if (nand_tech > 2)
+diff --git a/drivers/mtd/nand/qcom_nandc.c b/drivers/mtd/nand/qcom_nandc.c
+index 57d483ac5765..6f0fd1512ad2 100644
+--- a/drivers/mtd/nand/qcom_nandc.c
++++ b/drivers/mtd/nand/qcom_nandc.c
+@@ -109,7 +109,11 @@
+ #define	READ_ADDR			0
+ 
+ /* NAND_DEV_CMD_VLD bits */
+-#define	READ_START_VLD			0
++#define	READ_START_VLD			BIT(0)
++#define	READ_STOP_VLD			BIT(1)
++#define	WRITE_START_VLD			BIT(2)
++#define	ERASE_START_VLD			BIT(3)
++#define	SEQ_READ_START_VLD		BIT(4)
+ 
+ /* NAND_EBI2_ECC_BUF_CFG bits */
+ #define	NUM_STEPS			0
+@@ -148,6 +152,10 @@
+ #define	FETCH_ID			0xb
+ #define	RESET_DEVICE			0xd
+ 
++/* Default Value for NAND_DEV_CMD_VLD */
++#define NAND_DEV_CMD_VLD_VAL		(READ_START_VLD | WRITE_START_VLD | \
++					 ERASE_START_VLD | SEQ_READ_START_VLD)
++
+ /*
+  * the NAND controller performs reads/writes with ECC in 516 byte chunks.
+  * the driver calls the chunks 'step' or 'codeword' interchangeably
+@@ -672,8 +680,7 @@ static int nandc_param(struct qcom_nand_host *host)
+ 
+ 	/* configure CMD1 and VLD for ONFI param probing */
+ 	nandc_set_reg(nandc, NAND_DEV_CMD_VLD,
+-		      (nandc->vld & ~(1 << READ_START_VLD))
+-		      | 0 << READ_START_VLD);
++		      (nandc->vld & ~READ_START_VLD));
+ 	nandc_set_reg(nandc, NAND_DEV_CMD1,
+ 		      (nandc->cmd1 & ~(0xFF << READ_ADDR))
+ 		      | NAND_CMD_PARAM << READ_ADDR);
+@@ -1893,7 +1900,7 @@ static int qcom_nand_host_setup(struct qcom_nand_host *host)
+ 				| wide_bus << WIDE_FLASH
+ 				| 1 << DEV0_CFG1_ECC_DISABLE;
+ 
+-	host->ecc_bch_cfg = host->bch_enabled << ECC_CFG_ECC_DISABLE
++	host->ecc_bch_cfg = !host->bch_enabled << ECC_CFG_ECC_DISABLE
+ 				| 0 << ECC_SW_RESET
+ 				| host->cw_data << ECC_NUM_DATA_BYTES
+ 				| 1 << ECC_FORCE_CLK_OPEN
+@@ -1972,13 +1979,14 @@ static int qcom_nandc_setup(struct qcom_nand_controller *nandc)
+ {
+ 	/* kill onenand */
+ 	nandc_write(nandc, SFLASHC_BURST_CFG, 0);
++	nandc_write(nandc, NAND_DEV_CMD_VLD, NAND_DEV_CMD_VLD_VAL);
+ 
+ 	/* enable ADM DMA */
+ 	nandc_write(nandc, NAND_FLASH_CHIP_SELECT, DM_EN);
+ 
+ 	/* save the original values of these registers */
+ 	nandc->cmd1 = nandc_read(nandc, NAND_DEV_CMD1);
+-	nandc->vld = nandc_read(nandc, NAND_DEV_CMD_VLD);
++	nandc->vld = NAND_DEV_CMD_VLD_VAL;
+ 
+ 	return 0;
+ }
+diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800lib.c b/drivers/net/wireless/ralink/rt2x00/rt2800lib.c
+index d11c7b210e81..5672aec48572 100644
+--- a/drivers/net/wireless/ralink/rt2x00/rt2800lib.c
++++ b/drivers/net/wireless/ralink/rt2x00/rt2800lib.c
+@@ -3699,7 +3699,10 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev,
+ 	if (rt2x00_rt(rt2x00dev, RT3572))
+ 		rt2800_rfcsr_write(rt2x00dev, 8, 0);
+ 
+-	rt2800_register_read(rt2x00dev, TX_PIN_CFG, &tx_pin);
++	if (rt2x00_rt(rt2x00dev, RT6352))
++		rt2800_register_read(rt2x00dev, TX_PIN_CFG, &tx_pin);
++	else
++		tx_pin = 0;
+ 
+ 	switch (rt2x00dev->default_ant.tx_chain_num) {
+ 	case 3:
+diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b2ant.c b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b2ant.c
+index 2f3946be4ce2..34cb46a0c904 100644
+--- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b2ant.c
++++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b2ant.c
+@@ -1153,7 +1153,10 @@ static void btc8723b2ant_set_ant_path(struct btc_coexist *btcoexist,
+ 		}
+ 
+ 		/* fixed internal switch S1->WiFi, S0->BT */
+-		btcoexist->btc_write_4byte(btcoexist, 0x948, 0x0);
++		if (board_info->btdm_ant_pos == BTC_ANTENNA_AT_MAIN_PORT)
++			btcoexist->btc_write_2byte(btcoexist, 0x948, 0x0);
++		else
++			btcoexist->btc_write_2byte(btcoexist, 0x948, 0x280);
+ 
+ 		switch (antpos_type) {
+ 		case BTC_ANT_WIFI_AT_MAIN:
+diff --git a/drivers/nvme/host/fabrics.c b/drivers/nvme/host/fabrics.c
+index 990e6fb32a63..e353e151ffa9 100644
+--- a/drivers/nvme/host/fabrics.c
++++ b/drivers/nvme/host/fabrics.c
+@@ -77,7 +77,7 @@ static struct nvmf_host *nvmf_host_default(void)
+ 	kref_init(&host->ref);
+ 	uuid_be_gen(&host->id);
+ 	snprintf(host->nqn, NVMF_NQN_SIZE,
+-		"nqn.2014-08.org.nvmexpress:NVMf:uuid:%pUb", &host->id);
++		"nqn.2014-08.org.nvmexpress:uuid:%pUb", &host->id);
+ 
+ 	mutex_lock(&nvmf_hosts_mutex);
+ 	list_add_tail(&host->list, &nvmf_hosts);
+diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
+index 4f1cdd5058f1..76209e7fb6e1 100644
+--- a/fs/btrfs/super.c
++++ b/fs/btrfs/super.c
+@@ -1828,6 +1828,8 @@ static int btrfs_remount(struct super_block *sb, int *flags, char *data)
+ 			goto restore;
+ 		}
+ 
++		btrfs_qgroup_rescan_resume(fs_info);
++
+ 		if (!fs_info->uuid_root) {
+ 			btrfs_info(fs_info, "creating UUID tree");
+ 			ret = btrfs_create_uuid_tree(fs_info);
+diff --git a/fs/nfs/file.c b/fs/nfs/file.c
+index d264363559db..426e4e06b333 100644
+--- a/fs/nfs/file.c
++++ b/fs/nfs/file.c
+@@ -629,11 +629,11 @@ ssize_t nfs_file_write(struct kiocb *iocb, struct iov_iter *from)
+ 	if (result <= 0)
+ 		goto out;
+ 
+-	result = generic_write_sync(iocb, result);
+-	if (result < 0)
+-		goto out;
+ 	written = result;
+ 	iocb->ki_pos += written;
++	result = generic_write_sync(iocb, written);
++	if (result < 0)
++		goto out;
+ 
+ 	/* Return error values */
+ 	if (nfs_need_check_write(file, inode)) {
+diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h
+index 3e24392f2caa..4651bf48aa86 100644
+--- a/fs/nfs/internal.h
++++ b/fs/nfs/internal.h
+@@ -248,7 +248,6 @@ int nfs_iocounter_wait(struct nfs_lock_context *l_ctx);
+ extern const struct nfs_pageio_ops nfs_pgio_rw_ops;
+ struct nfs_pgio_header *nfs_pgio_header_alloc(const struct nfs_rw_ops *);
+ void nfs_pgio_header_free(struct nfs_pgio_header *);
+-void nfs_pgio_data_destroy(struct nfs_pgio_header *);
+ int nfs_generic_pgio(struct nfs_pageio_descriptor *, struct nfs_pgio_header *);
+ int nfs_initiate_pgio(struct rpc_clnt *clnt, struct nfs_pgio_header *hdr,
+ 		      struct rpc_cred *cred, const struct nfs_rpc_ops *rpc_ops,
+diff --git a/fs/nfs/pagelist.c b/fs/nfs/pagelist.c
+index 7ddba5022948..0e1d3f263f8c 100644
+--- a/fs/nfs/pagelist.c
++++ b/fs/nfs/pagelist.c
+@@ -515,16 +515,6 @@ struct nfs_pgio_header *nfs_pgio_header_alloc(const struct nfs_rw_ops *ops)
+ }
+ EXPORT_SYMBOL_GPL(nfs_pgio_header_alloc);
+ 
+-/*
+- * nfs_pgio_header_free - Free a read or write header
+- * @hdr: The header to free
+- */
+-void nfs_pgio_header_free(struct nfs_pgio_header *hdr)
+-{
+-	hdr->rw_ops->rw_free_header(hdr);
+-}
+-EXPORT_SYMBOL_GPL(nfs_pgio_header_free);
+-
+ /**
+  * nfs_pgio_data_destroy - make @hdr suitable for reuse
+  *
+@@ -533,14 +523,24 @@ EXPORT_SYMBOL_GPL(nfs_pgio_header_free);
+  *
+  * @hdr: A header that has had nfs_generic_pgio called
+  */
+-void nfs_pgio_data_destroy(struct nfs_pgio_header *hdr)
++static void nfs_pgio_data_destroy(struct nfs_pgio_header *hdr)
+ {
+ 	if (hdr->args.context)
+ 		put_nfs_open_context(hdr->args.context);
+ 	if (hdr->page_array.pagevec != hdr->page_array.page_array)
+ 		kfree(hdr->page_array.pagevec);
+ }
+-EXPORT_SYMBOL_GPL(nfs_pgio_data_destroy);
++
++/*
++ * nfs_pgio_header_free - Free a read or write header
++ * @hdr: The header to free
++ */
++void nfs_pgio_header_free(struct nfs_pgio_header *hdr)
++{
++	nfs_pgio_data_destroy(hdr);
++	hdr->rw_ops->rw_free_header(hdr);
++}
++EXPORT_SYMBOL_GPL(nfs_pgio_header_free);
+ 
+ /**
+  * nfs_pgio_rpcsetup - Set up arguments for a pageio call
+@@ -654,7 +654,6 @@ EXPORT_SYMBOL_GPL(nfs_initiate_pgio);
+ static void nfs_pgio_error(struct nfs_pgio_header *hdr)
+ {
+ 	set_bit(NFS_IOHDR_REDO, &hdr->flags);
+-	nfs_pgio_data_destroy(hdr);
+ 	hdr->completion_ops->completion(hdr);
+ }
+ 
+@@ -665,7 +664,6 @@ static void nfs_pgio_error(struct nfs_pgio_header *hdr)
+ static void nfs_pgio_release(void *calldata)
+ {
+ 	struct nfs_pgio_header *hdr = calldata;
+-	nfs_pgio_data_destroy(hdr);
+ 	hdr->completion_ops->completion(hdr);
+ }
+ 
+@@ -699,9 +697,6 @@ void nfs_pageio_init(struct nfs_pageio_descriptor *desc,
+ 		     int io_flags,
+ 		     gfp_t gfp_flags)
+ {
+-	struct nfs_pgio_mirror *new;
+-	int i;
+-
+ 	desc->pg_moreio = 0;
+ 	desc->pg_inode = inode;
+ 	desc->pg_ops = pg_ops;
+@@ -717,21 +712,9 @@ void nfs_pageio_init(struct nfs_pageio_descriptor *desc,
+ 	desc->pg_mirror_count = 1;
+ 	desc->pg_mirror_idx = 0;
+ 
+-	if (pg_ops->pg_get_mirror_count) {
+-		/* until we have a request, we don't have an lseg and no
+-		 * idea how many mirrors there will be */
+-		new = kcalloc(NFS_PAGEIO_DESCRIPTOR_MIRROR_MAX,
+-			      sizeof(struct nfs_pgio_mirror), gfp_flags);
+-		desc->pg_mirrors_dynamic = new;
+-		desc->pg_mirrors = new;
+-
+-		for (i = 0; i < NFS_PAGEIO_DESCRIPTOR_MIRROR_MAX; i++)
+-			nfs_pageio_mirror_init(&desc->pg_mirrors[i], bsize);
+-	} else {
+-		desc->pg_mirrors_dynamic = NULL;
+-		desc->pg_mirrors = desc->pg_mirrors_static;
+-		nfs_pageio_mirror_init(&desc->pg_mirrors[0], bsize);
+-	}
++	desc->pg_mirrors_dynamic = NULL;
++	desc->pg_mirrors = desc->pg_mirrors_static;
++	nfs_pageio_mirror_init(&desc->pg_mirrors[0], bsize);
+ }
+ EXPORT_SYMBOL_GPL(nfs_pageio_init);
+ 
+@@ -850,32 +833,52 @@ static int nfs_generic_pg_pgios(struct nfs_pageio_descriptor *desc)
+ 	return ret;
+ }
+ 
++static struct nfs_pgio_mirror *
++nfs_pageio_alloc_mirrors(struct nfs_pageio_descriptor *desc,
++		unsigned int mirror_count)
++{
++	struct nfs_pgio_mirror *ret;
++	unsigned int i;
++
++	kfree(desc->pg_mirrors_dynamic);
++	desc->pg_mirrors_dynamic = NULL;
++	if (mirror_count == 1)
++		return desc->pg_mirrors_static;
++	ret = kmalloc_array(mirror_count, sizeof(*ret), GFP_NOFS);
++	if (ret != NULL) {
++		for (i = 0; i < mirror_count; i++)
++			nfs_pageio_mirror_init(&ret[i], desc->pg_bsize);
++		desc->pg_mirrors_dynamic = ret;
++	}
++	return ret;
++}
++
+ /*
+  * nfs_pageio_setup_mirroring - determine if mirroring is to be used
+  *				by calling the pg_get_mirror_count op
+  */
+-static int nfs_pageio_setup_mirroring(struct nfs_pageio_descriptor *pgio,
++static void nfs_pageio_setup_mirroring(struct nfs_pageio_descriptor *pgio,
+ 				       struct nfs_page *req)
+ {
+-	int mirror_count = 1;
++	unsigned int mirror_count = 1;
+ 
+-	if (!pgio->pg_ops->pg_get_mirror_count)
+-		return 0;
+-
+-	mirror_count = pgio->pg_ops->pg_get_mirror_count(pgio, req);
+-
+-	if (pgio->pg_error < 0)
+-		return pgio->pg_error;
+-
+-	if (!mirror_count || mirror_count > NFS_PAGEIO_DESCRIPTOR_MIRROR_MAX)
+-		return -EINVAL;
++	if (pgio->pg_ops->pg_get_mirror_count)
++		mirror_count = pgio->pg_ops->pg_get_mirror_count(pgio, req);
++	if (mirror_count == pgio->pg_mirror_count || pgio->pg_error < 0)
++		return;
+ 
+-	if (WARN_ON_ONCE(!pgio->pg_mirrors_dynamic))
+-		return -EINVAL;
++	if (!mirror_count || mirror_count > NFS_PAGEIO_DESCRIPTOR_MIRROR_MAX) {
++		pgio->pg_error = -EINVAL;
++		return;
++	}
+ 
++	pgio->pg_mirrors = nfs_pageio_alloc_mirrors(pgio, mirror_count);
++	if (pgio->pg_mirrors == NULL) {
++		pgio->pg_error = -ENOMEM;
++		pgio->pg_mirrors = pgio->pg_mirrors_static;
++		mirror_count = 1;
++	}
+ 	pgio->pg_mirror_count = mirror_count;
+-
+-	return 0;
+ }
+ 
+ /*
+diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c
+index c383d0913b54..64bb20130edf 100644
+--- a/fs/nfs/pnfs.c
++++ b/fs/nfs/pnfs.c
+@@ -2274,7 +2274,6 @@ pnfs_write_through_mds(struct nfs_pageio_descriptor *desc,
+ 		nfs_pageio_reset_write_mds(desc);
+ 		mirror->pg_recoalesce = 1;
+ 	}
+-	nfs_pgio_data_destroy(hdr);
+ 	hdr->release(hdr);
+ }
+ 
+@@ -2398,7 +2397,6 @@ pnfs_read_through_mds(struct nfs_pageio_descriptor *desc,
+ 		nfs_pageio_reset_read_mds(desc);
+ 		mirror->pg_recoalesce = 1;
+ 	}
+-	nfs_pgio_data_destroy(hdr);
+ 	hdr->release(hdr);
+ }
+ 
+diff --git a/fs/xfs/xfs_linux.h b/fs/xfs/xfs_linux.h
+index 044fb0e15390..f6586691d989 100644
+--- a/fs/xfs/xfs_linux.h
++++ b/fs/xfs/xfs_linux.h
+@@ -279,7 +279,14 @@ static inline __uint64_t howmany_64(__uint64_t x, __uint32_t y)
+ #endif /* DEBUG */
+ 
+ #ifdef CONFIG_XFS_RT
+-#define XFS_IS_REALTIME_INODE(ip) ((ip)->i_d.di_flags & XFS_DIFLAG_REALTIME)
++
++/*
++ * make sure we ignore the inode flag if the filesystem doesn't have a
++ * configured realtime device.
++ */
++#define XFS_IS_REALTIME_INODE(ip)			\
++	(((ip)->i_d.di_flags & XFS_DIFLAG_REALTIME) &&	\
++	 (ip)->i_mount->m_rtdev_targp)
+ #else
+ #define XFS_IS_REALTIME_INODE(ip) (0)
+ #endif
+diff --git a/lib/radix-tree.c b/lib/radix-tree.c
+index 898e87998417..79a804f1aab9 100644
+--- a/lib/radix-tree.c
++++ b/lib/radix-tree.c
+@@ -463,7 +463,7 @@ radix_tree_node_free(struct radix_tree_node *node)
+  * To make use of this facility, the radix tree must be initialised without
+  * __GFP_DIRECT_RECLAIM being passed to INIT_RADIX_TREE().
+  */
+-static int __radix_tree_preload(gfp_t gfp_mask, unsigned nr)
++static __must_check int __radix_tree_preload(gfp_t gfp_mask, unsigned nr)
+ {
+ 	struct radix_tree_preload *rtp;
+ 	struct radix_tree_node *node;
+@@ -2103,7 +2103,8 @@ EXPORT_SYMBOL(radix_tree_tagged);
+  */
+ void idr_preload(gfp_t gfp_mask)
+ {
+-	__radix_tree_preload(gfp_mask, IDR_PRELOAD_SIZE);
++	if (__radix_tree_preload(gfp_mask, IDR_PRELOAD_SIZE))
++		preempt_disable();
+ }
+ EXPORT_SYMBOL(idr_preload);
+ 
+@@ -2117,13 +2118,13 @@ EXPORT_SYMBOL(idr_preload);
+  */
+ int ida_pre_get(struct ida *ida, gfp_t gfp)
+ {
+-	__radix_tree_preload(gfp, IDA_PRELOAD_SIZE);
+ 	/*
+ 	 * The IDA API has no preload_end() equivalent.  Instead,
+ 	 * ida_get_new() can return -EAGAIN, prompting the caller
+ 	 * to return to the ida_pre_get() step.
+ 	 */
+-	preempt_enable();
++	if (!__radix_tree_preload(gfp, IDA_PRELOAD_SIZE))
++		preempt_enable();
+ 
+ 	if (!this_cpu_read(ida_bitmap)) {
+ 		struct ida_bitmap *bitmap = kmalloc(sizeof(*bitmap), gfp);
+diff --git a/mm/memory.c b/mm/memory.c
+index 9e50ffcf9639..0a98a1a55dfa 100644
+--- a/mm/memory.c
++++ b/mm/memory.c
+@@ -3843,6 +3843,11 @@ int handle_mm_fault(struct vm_area_struct *vma, unsigned long address,
+ 	/* do counter updates before entering really critical section. */
+ 	check_sync_rss_stat(current);
+ 
++	if (!arch_vma_access_permitted(vma, flags & FAULT_FLAG_WRITE,
++					    flags & FAULT_FLAG_INSTRUCTION,
++					    flags & FAULT_FLAG_REMOTE))
++		return VM_FAULT_SIGSEGV;
++
+ 	/*
+ 	 * Enable the memcg OOM handling for faults triggered in user
+ 	 * space.  Kernel faults are handled more gracefully.
+@@ -3850,11 +3855,6 @@ int handle_mm_fault(struct vm_area_struct *vma, unsigned long address,
+ 	if (flags & FAULT_FLAG_USER)
+ 		mem_cgroup_oom_enable();
+ 
+-	if (!arch_vma_access_permitted(vma, flags & FAULT_FLAG_WRITE,
+-					    flags & FAULT_FLAG_INSTRUCTION,
+-					    flags & FAULT_FLAG_REMOTE))
+-		return VM_FAULT_SIGSEGV;
+-
+ 	if (unlikely(is_vm_hugetlb_page(vma)))
+ 		ret = hugetlb_fault(vma->vm_mm, vma, address, flags);
+ 	else
+diff --git a/mm/swapfile.c b/mm/swapfile.c
+index 4f6cba1b6632..2e09f67bc99b 100644
+--- a/mm/swapfile.c
++++ b/mm/swapfile.c
+@@ -2903,7 +2903,8 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
+ 	p->flags = 0;
+ 	spin_unlock(&swap_lock);
+ 	vfree(swap_map);
+-	vfree(cluster_info);
++	kvfree(cluster_info);
++	kvfree(frontswap_map);
+ 	if (swap_file) {
+ 		if (inode && S_ISREG(inode->i_mode)) {
+ 			inode_unlock(inode);
+diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
+index f88ac99528ce..6754e93d2096 100644
+--- a/net/bluetooth/l2cap_core.c
++++ b/net/bluetooth/l2cap_core.c
+@@ -58,7 +58,7 @@ static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
+ 				       u8 code, u8 ident, u16 dlen, void *data);
+ static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
+ 			   void *data);
+-static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data);
++static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data, size_t data_size);
+ static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err);
+ 
+ static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
+@@ -1473,7 +1473,7 @@ static void l2cap_conn_start(struct l2cap_conn *conn)
+ 
+ 			set_bit(CONF_REQ_SENT, &chan->conf_state);
+ 			l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
+-				       l2cap_build_conf_req(chan, buf), buf);
++				       l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
+ 			chan->num_conf_req++;
+ 		}
+ 
+@@ -2987,12 +2987,15 @@ static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen,
+ 	return len;
+ }
+ 
+-static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
++static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val, size_t size)
+ {
+ 	struct l2cap_conf_opt *opt = *ptr;
+ 
+ 	BT_DBG("type 0x%2.2x len %u val 0x%lx", type, len, val);
+ 
++	if (size < L2CAP_CONF_OPT_SIZE + len)
++		return;
++
+ 	opt->type = type;
+ 	opt->len  = len;
+ 
+@@ -3017,7 +3020,7 @@ static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
+ 	*ptr += L2CAP_CONF_OPT_SIZE + len;
+ }
+ 
+-static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan)
++static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan, size_t size)
+ {
+ 	struct l2cap_conf_efs efs;
+ 
+@@ -3045,7 +3048,7 @@ static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan)
+ 	}
+ 
+ 	l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs),
+-			   (unsigned long) &efs);
++			   (unsigned long) &efs, size);
+ }
+ 
+ static void l2cap_ack_timeout(struct work_struct *work)
+@@ -3191,11 +3194,12 @@ static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
+ 	chan->ack_win = chan->tx_win;
+ }
+ 
+-static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
++static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data, size_t data_size)
+ {
+ 	struct l2cap_conf_req *req = data;
+ 	struct l2cap_conf_rfc rfc = { .mode = chan->mode };
+ 	void *ptr = req->data;
++	void *endptr = data + data_size;
+ 	u16 size;
+ 
+ 	BT_DBG("chan %p", chan);
+@@ -3220,7 +3224,7 @@ static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
+ 
+ done:
+ 	if (chan->imtu != L2CAP_DEFAULT_MTU)
+-		l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
++		l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu, endptr - ptr);
+ 
+ 	switch (chan->mode) {
+ 	case L2CAP_MODE_BASIC:
+@@ -3239,7 +3243,7 @@ static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
+ 		rfc.max_pdu_size    = 0;
+ 
+ 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
+-				   (unsigned long) &rfc);
++				   (unsigned long) &rfc, endptr - ptr);
+ 		break;
+ 
+ 	case L2CAP_MODE_ERTM:
+@@ -3259,21 +3263,21 @@ static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
+ 				       L2CAP_DEFAULT_TX_WINDOW);
+ 
+ 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
+-				   (unsigned long) &rfc);
++				   (unsigned long) &rfc, endptr - ptr);
+ 
+ 		if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
+-			l2cap_add_opt_efs(&ptr, chan);
++			l2cap_add_opt_efs(&ptr, chan, endptr - ptr);
+ 
+ 		if (test_bit(FLAG_EXT_CTRL, &chan->flags))
+ 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
+-					   chan->tx_win);
++					   chan->tx_win, endptr - ptr);
+ 
+ 		if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
+ 			if (chan->fcs == L2CAP_FCS_NONE ||
+ 			    test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
+ 				chan->fcs = L2CAP_FCS_NONE;
+ 				l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
+-						   chan->fcs);
++						   chan->fcs, endptr - ptr);
+ 			}
+ 		break;
+ 
+@@ -3291,17 +3295,17 @@ static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
+ 		rfc.max_pdu_size = cpu_to_le16(size);
+ 
+ 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
+-				   (unsigned long) &rfc);
++				   (unsigned long) &rfc, endptr - ptr);
+ 
+ 		if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
+-			l2cap_add_opt_efs(&ptr, chan);
++			l2cap_add_opt_efs(&ptr, chan, endptr - ptr);
+ 
+ 		if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
+ 			if (chan->fcs == L2CAP_FCS_NONE ||
+ 			    test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
+ 				chan->fcs = L2CAP_FCS_NONE;
+ 				l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
+-						   chan->fcs);
++						   chan->fcs, endptr - ptr);
+ 			}
+ 		break;
+ 	}
+@@ -3312,10 +3316,11 @@ static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
+ 	return ptr - data;
+ }
+ 
+-static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
++static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data, size_t data_size)
+ {
+ 	struct l2cap_conf_rsp *rsp = data;
+ 	void *ptr = rsp->data;
++	void *endptr = data + data_size;
+ 	void *req = chan->conf_req;
+ 	int len = chan->conf_len;
+ 	int type, hint, olen;
+@@ -3417,7 +3422,7 @@ static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
+ 			return -ECONNREFUSED;
+ 
+ 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
+-				   (unsigned long) &rfc);
++				   (unsigned long) &rfc, endptr - ptr);
+ 	}
+ 
+ 	if (result == L2CAP_CONF_SUCCESS) {
+@@ -3430,7 +3435,7 @@ static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
+ 			chan->omtu = mtu;
+ 			set_bit(CONF_MTU_DONE, &chan->conf_state);
+ 		}
+-		l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu);
++		l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu, endptr - ptr);
+ 
+ 		if (remote_efs) {
+ 			if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
+@@ -3444,7 +3449,7 @@ static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
+ 
+ 				l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
+ 						   sizeof(efs),
+-						   (unsigned long) &efs);
++						   (unsigned long) &efs, endptr - ptr);
+ 			} else {
+ 				/* Send PENDING Conf Rsp */
+ 				result = L2CAP_CONF_PENDING;
+@@ -3477,7 +3482,7 @@ static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
+ 			set_bit(CONF_MODE_DONE, &chan->conf_state);
+ 
+ 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
+-					   sizeof(rfc), (unsigned long) &rfc);
++					   sizeof(rfc), (unsigned long) &rfc, endptr - ptr);
+ 
+ 			if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
+ 				chan->remote_id = efs.id;
+@@ -3491,7 +3496,7 @@ static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
+ 					le32_to_cpu(efs.sdu_itime);
+ 				l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
+ 						   sizeof(efs),
+-						   (unsigned long) &efs);
++						   (unsigned long) &efs, endptr - ptr);
+ 			}
+ 			break;
+ 
+@@ -3505,7 +3510,7 @@ static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
+ 			set_bit(CONF_MODE_DONE, &chan->conf_state);
+ 
+ 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
+-					   (unsigned long) &rfc);
++					   (unsigned long) &rfc, endptr - ptr);
+ 
+ 			break;
+ 
+@@ -3527,10 +3532,11 @@ static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
+ }
+ 
+ static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
+-				void *data, u16 *result)
++				void *data, size_t size, u16 *result)
+ {
+ 	struct l2cap_conf_req *req = data;
+ 	void *ptr = req->data;
++	void *endptr = data + size;
+ 	int type, olen;
+ 	unsigned long val;
+ 	struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
+@@ -3548,13 +3554,13 @@ static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
+ 				chan->imtu = L2CAP_DEFAULT_MIN_MTU;
+ 			} else
+ 				chan->imtu = val;
+-			l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
++			l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu, endptr - ptr);
+ 			break;
+ 
+ 		case L2CAP_CONF_FLUSH_TO:
+ 			chan->flush_to = val;
+ 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
+-					   2, chan->flush_to);
++					   2, chan->flush_to, endptr - ptr);
+ 			break;
+ 
+ 		case L2CAP_CONF_RFC:
+@@ -3568,13 +3574,13 @@ static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
+ 			chan->fcs = 0;
+ 
+ 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
+-					   sizeof(rfc), (unsigned long) &rfc);
++					   sizeof(rfc), (unsigned long) &rfc, endptr - ptr);
+ 			break;
+ 
+ 		case L2CAP_CONF_EWS:
+ 			chan->ack_win = min_t(u16, val, chan->ack_win);
+ 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
+-					   chan->tx_win);
++					   chan->tx_win, endptr - ptr);
+ 			break;
+ 
+ 		case L2CAP_CONF_EFS:
+@@ -3587,7 +3593,7 @@ static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
+ 				return -ECONNREFUSED;
+ 
+ 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS, sizeof(efs),
+-					   (unsigned long) &efs);
++					   (unsigned long) &efs, endptr - ptr);
+ 			break;
+ 
+ 		case L2CAP_CONF_FCS:
+@@ -3692,7 +3698,7 @@ void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
+ 		return;
+ 
+ 	l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
+-		       l2cap_build_conf_req(chan, buf), buf);
++		       l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
+ 	chan->num_conf_req++;
+ }
+ 
+@@ -3900,7 +3906,7 @@ static struct l2cap_chan *l2cap_connect(struct l2cap_conn *conn,
+ 		u8 buf[128];
+ 		set_bit(CONF_REQ_SENT, &chan->conf_state);
+ 		l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
+-			       l2cap_build_conf_req(chan, buf), buf);
++			       l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
+ 		chan->num_conf_req++;
+ 	}
+ 
+@@ -3978,7 +3984,7 @@ static int l2cap_connect_create_rsp(struct l2cap_conn *conn,
+ 			break;
+ 
+ 		l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
+-			       l2cap_build_conf_req(chan, req), req);
++			       l2cap_build_conf_req(chan, req, sizeof(req)), req);
+ 		chan->num_conf_req++;
+ 		break;
+ 
+@@ -4090,7 +4096,7 @@ static inline int l2cap_config_req(struct l2cap_conn *conn,
+ 	}
+ 
+ 	/* Complete config. */
+-	len = l2cap_parse_conf_req(chan, rsp);
++	len = l2cap_parse_conf_req(chan, rsp, sizeof(rsp));
+ 	if (len < 0) {
+ 		l2cap_send_disconn_req(chan, ECONNRESET);
+ 		goto unlock;
+@@ -4124,7 +4130,7 @@ static inline int l2cap_config_req(struct l2cap_conn *conn,
+ 	if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
+ 		u8 buf[64];
+ 		l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
+-			       l2cap_build_conf_req(chan, buf), buf);
++			       l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
+ 		chan->num_conf_req++;
+ 	}
+ 
+@@ -4184,7 +4190,7 @@ static inline int l2cap_config_rsp(struct l2cap_conn *conn,
+ 			char buf[64];
+ 
+ 			len = l2cap_parse_conf_rsp(chan, rsp->data, len,
+-						   buf, &result);
++						   buf, sizeof(buf), &result);
+ 			if (len < 0) {
+ 				l2cap_send_disconn_req(chan, ECONNRESET);
+ 				goto done;
+@@ -4214,7 +4220,7 @@ static inline int l2cap_config_rsp(struct l2cap_conn *conn,
+ 			/* throw out any old stored conf requests */
+ 			result = L2CAP_CONF_SUCCESS;
+ 			len = l2cap_parse_conf_rsp(chan, rsp->data, len,
+-						   req, &result);
++						   req, sizeof(req), &result);
+ 			if (len < 0) {
+ 				l2cap_send_disconn_req(chan, ECONNRESET);
+ 				goto done;
+@@ -4791,7 +4797,7 @@ static void l2cap_do_create(struct l2cap_chan *chan, int result,
+ 			set_bit(CONF_REQ_SENT, &chan->conf_state);
+ 			l2cap_send_cmd(chan->conn, l2cap_get_ident(chan->conn),
+ 				       L2CAP_CONF_REQ,
+-				       l2cap_build_conf_req(chan, buf), buf);
++				       l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
+ 			chan->num_conf_req++;
+ 		}
+ 	}
+@@ -7465,7 +7471,7 @@ static void l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
+ 				set_bit(CONF_REQ_SENT, &chan->conf_state);
+ 				l2cap_send_cmd(conn, l2cap_get_ident(conn),
+ 					       L2CAP_CONF_REQ,
+-					       l2cap_build_conf_req(chan, buf),
++					       l2cap_build_conf_req(chan, buf, sizeof(buf)),
+ 					       buf);
+ 				chan->num_conf_req++;
+ 			}
+diff --git a/sound/isa/msnd/msnd_midi.c b/sound/isa/msnd/msnd_midi.c
+index 912b5a9ccbab..013d8d1170fe 100644
+--- a/sound/isa/msnd/msnd_midi.c
++++ b/sound/isa/msnd/msnd_midi.c
+@@ -120,24 +120,24 @@ void snd_msndmidi_input_read(void *mpuv)
+ 	unsigned long flags;
+ 	struct snd_msndmidi *mpu = mpuv;
+ 	void *pwMIDQData = mpu->dev->mappedbase + MIDQ_DATA_BUFF;
++	u16 head, tail, size;
+ 
+ 	spin_lock_irqsave(&mpu->input_lock, flags);
+-	while (readw(mpu->dev->MIDQ + JQS_wTail) !=
+-	       readw(mpu->dev->MIDQ + JQS_wHead)) {
+-		u16 wTmp, val;
+-		val = readw(pwMIDQData + 2 * readw(mpu->dev->MIDQ + JQS_wHead));
+-
+-			if (test_bit(MSNDMIDI_MODE_BIT_INPUT_TRIGGER,
+-				     &mpu->mode))
+-				snd_rawmidi_receive(mpu->substream_input,
+-						    (unsigned char *)&val, 1);
+-
+-		wTmp = readw(mpu->dev->MIDQ + JQS_wHead) + 1;
+-		if (wTmp > readw(mpu->dev->MIDQ + JQS_wSize))
+-			writew(0,  mpu->dev->MIDQ + JQS_wHead);
+-		else
+-			writew(wTmp,  mpu->dev->MIDQ + JQS_wHead);
++	head = readw(mpu->dev->MIDQ + JQS_wHead);
++	tail = readw(mpu->dev->MIDQ + JQS_wTail);
++	size = readw(mpu->dev->MIDQ + JQS_wSize);
++	if (head > size || tail > size)
++		goto out;
++	while (head != tail) {
++		unsigned char val = readw(pwMIDQData + 2 * head);
++
++		if (test_bit(MSNDMIDI_MODE_BIT_INPUT_TRIGGER, &mpu->mode))
++			snd_rawmidi_receive(mpu->substream_input, &val, 1);
++		if (++head > size)
++			head = 0;
++		writew(head, mpu->dev->MIDQ + JQS_wHead);
+ 	}
++ out:
+ 	spin_unlock_irqrestore(&mpu->input_lock, flags);
+ }
+ EXPORT_SYMBOL(snd_msndmidi_input_read);
+diff --git a/sound/isa/msnd/msnd_pinnacle.c b/sound/isa/msnd/msnd_pinnacle.c
+index ad4897337df5..fc4fb1904aef 100644
+--- a/sound/isa/msnd/msnd_pinnacle.c
++++ b/sound/isa/msnd/msnd_pinnacle.c
+@@ -170,23 +170,24 @@ static irqreturn_t snd_msnd_interrupt(int irq, void *dev_id)
+ {
+ 	struct snd_msnd *chip = dev_id;
+ 	void *pwDSPQData = chip->mappedbase + DSPQ_DATA_BUFF;
++	u16 head, tail, size;
+ 
+ 	/* Send ack to DSP */
+ 	/* inb(chip->io + HP_RXL); */
+ 
+ 	/* Evaluate queued DSP messages */
+-	while (readw(chip->DSPQ + JQS_wTail) != readw(chip->DSPQ + JQS_wHead)) {
+-		u16 wTmp;
+-
+-		snd_msnd_eval_dsp_msg(chip,
+-			readw(pwDSPQData + 2 * readw(chip->DSPQ + JQS_wHead)));
+-
+-		wTmp = readw(chip->DSPQ + JQS_wHead) + 1;
+-		if (wTmp > readw(chip->DSPQ + JQS_wSize))
+-			writew(0, chip->DSPQ + JQS_wHead);
+-		else
+-			writew(wTmp, chip->DSPQ + JQS_wHead);
++	head = readw(chip->DSPQ + JQS_wHead);
++	tail = readw(chip->DSPQ + JQS_wTail);
++	size = readw(chip->DSPQ + JQS_wSize);
++	if (head > size || tail > size)
++		goto out;
++	while (head != tail) {
++		snd_msnd_eval_dsp_msg(chip, readw(pwDSPQData + 2 * head));
++		if (++head > size)
++			head = 0;
++		writew(head, chip->DSPQ + JQS_wHead);
+ 	}
++ out:
+ 	/* Send ack to DSP */
+ 	inb(chip->io + HP_RXL);
+ 	return IRQ_HANDLED;
+diff --git a/tools/testing/selftests/x86/fsgsbase.c b/tools/testing/selftests/x86/fsgsbase.c
+index b4967d875236..f249e042b3b5 100644
+--- a/tools/testing/selftests/x86/fsgsbase.c
++++ b/tools/testing/selftests/x86/fsgsbase.c
+@@ -285,9 +285,12 @@ static void *threadproc(void *ctx)
+ 	}
+ }
+ 
+-static void set_gs_and_switch_to(unsigned long local, unsigned long remote)
++static void set_gs_and_switch_to(unsigned long local,
++				 unsigned short force_sel,
++				 unsigned long remote)
+ {
+ 	unsigned long base;
++	unsigned short sel_pre_sched, sel_post_sched;
+ 
+ 	bool hard_zero = false;
+ 	if (local == HARD_ZERO) {
+@@ -297,6 +300,8 @@ static void set_gs_and_switch_to(unsigned long local, unsigned long remote)
+ 
+ 	printf("[RUN]\tARCH_SET_GS(0x%lx)%s, then schedule to 0x%lx\n",
+ 	       local, hard_zero ? " and clear gs" : "", remote);
++	if (force_sel)
++		printf("\tBefore schedule, set selector to 0x%hx\n", force_sel);
+ 	if (syscall(SYS_arch_prctl, ARCH_SET_GS, local) != 0)
+ 		err(1, "ARCH_SET_GS");
+ 	if (hard_zero)
+@@ -307,18 +312,35 @@ static void set_gs_and_switch_to(unsigned long local, unsigned long remote)
+ 		printf("[FAIL]\tGSBASE wasn't set as expected\n");
+ 	}
+ 
++	if (force_sel) {
++		asm volatile ("mov %0, %%gs" : : "rm" (force_sel));
++		sel_pre_sched = force_sel;
++		local = read_base(GS);
++
++		/*
++		 * Signal delivery seems to mess up weird selectors.  Put it
++		 * back.
++		 */
++		asm volatile ("mov %0, %%gs" : : "rm" (force_sel));
++	} else {
++		asm volatile ("mov %%gs, %0" : "=rm" (sel_pre_sched));
++	}
++
+ 	remote_base = remote;
+ 	ftx = 1;
+ 	syscall(SYS_futex, &ftx, FUTEX_WAKE, 0, NULL, NULL, 0);
+ 	while (ftx != 0)
+ 		syscall(SYS_futex, &ftx, FUTEX_WAIT, 1, NULL, NULL, 0);
+ 
++	asm volatile ("mov %%gs, %0" : "=rm" (sel_post_sched));
+ 	base = read_base(GS);
+-	if (base == local) {
+-		printf("[OK]\tGSBASE remained 0x%lx\n", local);
++	if (base == local && sel_pre_sched == sel_post_sched) {
++		printf("[OK]\tGS/BASE remained 0x%hx/0x%lx\n",
++		       sel_pre_sched, local);
+ 	} else {
+ 		nerrs++;
+-		printf("[FAIL]\tGSBASE changed to 0x%lx\n", base);
++		printf("[FAIL]\tGS/BASE changed from 0x%hx/0x%lx to 0x%hx/0x%lx\n",
++		       sel_pre_sched, local, sel_post_sched, base);
+ 	}
+ }
+ 
+@@ -381,8 +403,15 @@ int main()
+ 
+ 	for (int local = 0; local < 4; local++) {
+ 		for (int remote = 0; remote < 4; remote++) {
+-			set_gs_and_switch_to(bases_with_hard_zero[local],
+-					     bases_with_hard_zero[remote]);
++			for (unsigned short s = 0; s < 5; s++) {
++				unsigned short sel = s;
++				if (s == 4)
++					asm ("mov %%ss, %0" : "=rm" (sel));
++				set_gs_and_switch_to(
++					bases_with_hard_zero[local],
++					sel,
++					bases_with_hard_zero[remote]);
++			}
+ 		}
+ 	}
+ 


             reply	other threads:[~2017-09-13 22:29 UTC|newest]

Thread overview: 12+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2017-09-13 22:28 Mike Pagano [this message]
  -- strict thread matches above, loose matches on Subject: below --
2017-09-20 10:10 [gentoo-commits] proj/linux-patches:4.12 commit in: / Mike Pagano
2017-09-13 23:09 Mike Pagano
2017-09-13 12:23 Mike Pagano
2017-09-10 14:37 Mike Pagano
2017-09-07 22:45 Mike Pagano
2017-08-30 10:05 Mike Pagano
2017-08-25 11:00 Mike Pagano
2017-08-16 22:28 Mike Pagano
2017-08-13 16:37 Mike Pagano
2017-08-11 17:40 Mike Pagano
2017-08-06 19:34 Mike Pagano

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1505341731.74695b4b8b53d91a791c3227c4d6db6b45b0371a.mpagano@gentoo \
    --to=mpagano@gentoo.org \
    --cc=gentoo-commits@lists.gentoo.org \
    --cc=gentoo-dev@lists.gentoo.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox