public inbox for gentoo-commits@lists.gentoo.org
 help / color / mirror / Atom feed
From: "Mike Pagano" <mpagano@gentoo.org>
To: gentoo-commits@lists.gentoo.org
Subject: [gentoo-commits] proj/linux-patches:3.18 commit in: /
Date: Tue, 24 May 2016 12:03:22 +0000 (UTC)	[thread overview]
Message-ID: <1464091397.35a866a10473dea83b293537d3682b15140b8dbc.mpagano@gentoo> (raw)

commit:     35a866a10473dea83b293537d3682b15140b8dbc
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Tue May 24 12:03:17 2016 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Tue May 24 12:03:17 2016 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=35a866a1

Linux patch 3.18.34

 0000_README              |    4 +
 1033_linux-3.18.34.patch | 2143 ++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 2147 insertions(+)

diff --git a/0000_README b/0000_README
index e4866a4..a6bf155 100644
--- a/0000_README
+++ b/0000_README
@@ -175,6 +175,10 @@ Patch:  1032_linux-3.18.33.patch
 From:   http://www.kernel.org
 Desc:   Linux 3.18.33
 
+Patch:  1033_linux-3.18.34.patch
+From:   http://www.kernel.org
+Desc:   Linux 3.18.34
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1033_linux-3.18.34.patch b/1033_linux-3.18.34.patch
new file mode 100644
index 0000000..2998f69
--- /dev/null
+++ b/1033_linux-3.18.34.patch
@@ -0,0 +1,2143 @@
+diff --git a/Documentation/devicetree/bindings/ata/ahci-platform.txt b/Documentation/devicetree/bindings/ata/ahci-platform.txt
+index 4ab09f2202d4..e6d5a4aa953f 100644
+--- a/Documentation/devicetree/bindings/ata/ahci-platform.txt
++++ b/Documentation/devicetree/bindings/ata/ahci-platform.txt
+@@ -30,6 +30,10 @@ Optional properties:
+ - target-supply     : regulator for SATA target power
+ - phys              : reference to the SATA PHY node
+ - phy-names         : must be "sata-phy"
++- ports-implemented : Mask that indicates which ports that the HBA supports
++		      are available for software to use. Useful if PORTS_IMPL
++		      is not programmed by the BIOS, which is true with
++		      some embedded SOC's.
+ 
+ Required properties when using sub-nodes:
+ - #address-cells    : number of cells to encode an address
+diff --git a/MAINTAINERS b/MAINTAINERS
+index 090eaae42181..f553cf7abe6a 100644
+--- a/MAINTAINERS
++++ b/MAINTAINERS
+@@ -3558,8 +3558,8 @@ F:	Documentation/efi-stub.txt
+ F:	arch/ia64/kernel/efi.c
+ F:	arch/x86/boot/compressed/eboot.[ch]
+ F:	arch/x86/include/asm/efi.h
+-F:	arch/x86/platform/efi/*
+-F:	drivers/firmware/efi/*
++F:	arch/x86/platform/efi/
++F:	drivers/firmware/efi/
+ F:	include/linux/efi*.h
+ 
+ EFI VARIABLE FILESYSTEM
+diff --git a/Makefile b/Makefile
+index 59d0737f9524..0df57f0a9e11 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 3
+ PATCHLEVEL = 18
+-SUBLEVEL = 33
++SUBLEVEL = 34
+ EXTRAVERSION =
+ NAME = Diseased Newt
+ 
+diff --git a/arch/arm/mach-socfpga/headsmp.S b/arch/arm/mach-socfpga/headsmp.S
+index f65ea0af4af3..a2b1c4a3afd8 100644
+--- a/arch/arm/mach-socfpga/headsmp.S
++++ b/arch/arm/mach-socfpga/headsmp.S
+@@ -12,6 +12,7 @@
+ #include <asm/memory.h>
+ 
+ 	.arch	armv7-a
++	.arm
+ 
+ ENTRY(secondary_trampoline)
+ 	/* CPU1 will always fetch from 0x0 when it is brought out of reset.
+diff --git a/arch/parisc/kernel/ptrace.c b/arch/parisc/kernel/ptrace.c
+index 9585c81f755f..ce0b2b4075c7 100644
+--- a/arch/parisc/kernel/ptrace.c
++++ b/arch/parisc/kernel/ptrace.c
+@@ -269,14 +269,19 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
+ 
+ long do_syscall_trace_enter(struct pt_regs *regs)
+ {
+-	long ret = 0;
+-
+ 	/* Do the secure computing check first. */
+ 	secure_computing_strict(regs->gr[20]);
+ 
+ 	if (test_thread_flag(TIF_SYSCALL_TRACE) &&
+-	    tracehook_report_syscall_entry(regs))
+-		ret = -1L;
++	    tracehook_report_syscall_entry(regs)) {
++		/*
++		 * Tracing decided this syscall should not happen or the
++		 * debugger stored an invalid system call number. Skip
++		 * the system call and the system call restart handling.
++		 */
++		regs->gr[20] = -1UL;
++		goto out;
++	}
+ 
+ #ifdef CONFIG_64BIT
+ 	if (!is_compat_task())
+@@ -290,7 +295,8 @@ long do_syscall_trace_enter(struct pt_regs *regs)
+ 			regs->gr[24] & 0xffffffff,
+ 			regs->gr[23] & 0xffffffff);
+ 
+-	return ret ? : regs->gr[20];
++out:
++	return regs->gr[20];
+ }
+ 
+ void do_syscall_trace_exit(struct pt_regs *regs)
+diff --git a/arch/parisc/kernel/syscall.S b/arch/parisc/kernel/syscall.S
+index 0b8d26d3ba43..099c23616901 100644
+--- a/arch/parisc/kernel/syscall.S
++++ b/arch/parisc/kernel/syscall.S
+@@ -342,8 +342,8 @@ tracesys_next:
+ 	stw     %r21, -56(%r30)                 /* 6th argument */
+ #endif
+ 
+-	comiclr,>>=	__NR_Linux_syscalls, %r20, %r0
+-	b,n	.Lsyscall_nosys
++	comiclr,>>	__NR_Linux_syscalls, %r20, %r0
++	b,n	.Ltracesys_nosys
+ 
+ 	LDREGX  %r20(%r19), %r19
+ 
+@@ -359,6 +359,9 @@ tracesys_next:
+ 	be      0(%sr7,%r19)
+ 	ldo	R%tracesys_exit(%r2),%r2
+ 
++.Ltracesys_nosys:
++	ldo	-ENOSYS(%r0),%r28		/* set errno */
++
+ 	/* Do *not* call this function on the gateway page, because it
+ 	makes a direct call to syscall_trace. */
+ 	
+diff --git a/arch/powerpc/include/asm/word-at-a-time.h b/arch/powerpc/include/asm/word-at-a-time.h
+index 5b3a903adae6..7043539e0248 100644
+--- a/arch/powerpc/include/asm/word-at-a-time.h
++++ b/arch/powerpc/include/asm/word-at-a-time.h
+@@ -77,7 +77,7 @@ static inline unsigned long create_zero_mask(unsigned long bits)
+ 	    "andc	%1,%1,%2\n\t"
+ 	    "popcntd	%0,%1"
+ 		: "=r" (leading_zero_bits), "=&r" (trailing_zero_bit_mask)
+-		: "r" (bits));
++		: "b" (bits));
+ 
+ 	return leading_zero_bits;
+ }
+diff --git a/arch/x86/kernel/sysfb_efi.c b/arch/x86/kernel/sysfb_efi.c
+index b285d4e8c68e..5da924bbf0a0 100644
+--- a/arch/x86/kernel/sysfb_efi.c
++++ b/arch/x86/kernel/sysfb_efi.c
+@@ -106,14 +106,24 @@ static int __init efifb_set_system(const struct dmi_system_id *id)
+ 					continue;
+ 				for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
+ 					resource_size_t start, end;
++					unsigned long flags;
++
++					flags = pci_resource_flags(dev, i);
++					if (!(flags & IORESOURCE_MEM))
++						continue;
++
++					if (flags & IORESOURCE_UNSET)
++						continue;
++
++					if (pci_resource_len(dev, i) == 0)
++						continue;
+ 
+ 					start = pci_resource_start(dev, i);
+-					if (start == 0)
+-						break;
+ 					end = pci_resource_end(dev, i);
+ 					if (screen_info.lfb_base >= start &&
+ 					    screen_info.lfb_base < end) {
+ 						found_bar = 1;
++						break;
+ 					}
+ 				}
+ 			}
+diff --git a/arch/x86/kernel/tsc_msr.c b/arch/x86/kernel/tsc_msr.c
+index 92ae6acac8a7..6aa0f4d9eea6 100644
+--- a/arch/x86/kernel/tsc_msr.c
++++ b/arch/x86/kernel/tsc_msr.c
+@@ -92,7 +92,7 @@ unsigned long try_msr_calibrate_tsc(void)
+ 
+ 	if (freq_desc_tables[cpu_index].msr_plat) {
+ 		rdmsr(MSR_PLATFORM_INFO, lo, hi);
+-		ratio = (lo >> 8) & 0x1f;
++		ratio = (lo >> 8) & 0xff;
+ 	} else {
+ 		rdmsr(MSR_IA32_PERF_STATUS, lo, hi);
+ 		ratio = (hi >> 8) & 0x1f;
+diff --git a/crypto/ahash.c b/crypto/ahash.c
+index 51d48cd34884..46ab9099c330 100644
+--- a/crypto/ahash.c
++++ b/crypto/ahash.c
+@@ -68,8 +68,9 @@ static int hash_walk_new_entry(struct crypto_hash_walk *walk)
+ 	struct scatterlist *sg;
+ 
+ 	sg = walk->sg;
+-	walk->pg = sg_page(sg);
+ 	walk->offset = sg->offset;
++	walk->pg = sg_page(walk->sg) + (walk->offset >> PAGE_SHIFT);
++	walk->offset = offset_in_page(walk->offset);
+ 	walk->entrylen = sg->length;
+ 
+ 	if (walk->entrylen > walk->total)
+diff --git a/drivers/acpi/acpica/dsmethod.c b/drivers/acpi/acpica/dsmethod.c
+index 3c7f7378b94d..86ddd0b3a7bf 100644
+--- a/drivers/acpi/acpica/dsmethod.c
++++ b/drivers/acpi/acpica/dsmethod.c
+@@ -412,6 +412,9 @@ acpi_ds_begin_method_execution(struct acpi_namespace_node *method_node,
+ 				obj_desc->method.mutex->mutex.
+ 				    original_sync_level =
+ 				    obj_desc->method.mutex->mutex.sync_level;
++
++				obj_desc->method.mutex->mutex.thread_id =
++				    acpi_os_get_thread_id();
+ 			}
+ 		}
+ 
+diff --git a/drivers/ata/ahci_platform.c b/drivers/ata/ahci_platform.c
+index 06f1d59fa678..83c2b8be99b8 100644
+--- a/drivers/ata/ahci_platform.c
++++ b/drivers/ata/ahci_platform.c
+@@ -43,6 +43,9 @@ static int ahci_probe(struct platform_device *pdev)
+ 	if (rc)
+ 		return rc;
+ 
++	of_property_read_u32(dev->of_node,
++			     "ports-implemented", &hpriv->force_port_map);
++
+ 	if (of_device_is_compatible(dev->of_node, "hisilicon,hisi-ahci"))
+ 		hpriv->flags |= AHCI_HFLAG_NO_FBS | AHCI_HFLAG_NO_NCQ;
+ 
+diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c
+index a1d1c0e16697..794448ce2fc0 100644
+--- a/drivers/ata/libahci.c
++++ b/drivers/ata/libahci.c
+@@ -467,6 +467,7 @@ void ahci_save_initial_config(struct device *dev, struct ahci_host_priv *hpriv)
+ 		dev_info(dev, "forcing port_map 0x%x -> 0x%x\n",
+ 			 port_map, hpriv->force_port_map);
+ 		port_map = hpriv->force_port_map;
++		hpriv->saved_port_map = port_map;
+ 	}
+ 
+ 	if (hpriv->mask_port_map) {
+diff --git a/drivers/base/regmap/regmap-spmi.c b/drivers/base/regmap/regmap-spmi.c
+index d7026dc33388..b394aaef3867 100644
+--- a/drivers/base/regmap/regmap-spmi.c
++++ b/drivers/base/regmap/regmap-spmi.c
+@@ -153,7 +153,7 @@ static int regmap_spmi_ext_read(void *context,
+ 	while (val_size) {
+ 		len = min_t(size_t, val_size, 8);
+ 
+-		err = spmi_ext_register_readl(context, addr, val, val_size);
++		err = spmi_ext_register_readl(context, addr, val, len);
+ 		if (err)
+ 			goto err_out;
+ 
+diff --git a/drivers/edac/i7core_edac.c b/drivers/edac/i7core_edac.c
+index 9cd0b301f81b..f84ef75b6487 100644
+--- a/drivers/edac/i7core_edac.c
++++ b/drivers/edac/i7core_edac.c
+@@ -1874,7 +1874,7 @@ static int i7core_mce_check_error(struct notifier_block *nb, unsigned long val,
+ 
+ 	i7_dev = get_i7core_dev(mce->socketid);
+ 	if (!i7_dev)
+-		return NOTIFY_BAD;
++		return NOTIFY_DONE;
+ 
+ 	mci = i7_dev->mci;
+ 	pvt = mci->pvt_info;
+diff --git a/drivers/edac/sb_edac.c b/drivers/edac/sb_edac.c
+index 6251a0aeadcb..6aa25a3aa2c7 100644
+--- a/drivers/edac/sb_edac.c
++++ b/drivers/edac/sb_edac.c
+@@ -2047,7 +2047,7 @@ static int sbridge_mce_check_error(struct notifier_block *nb, unsigned long val,
+ 
+ 	mci = get_mci_for_node_id(mce->socketid);
+ 	if (!mci)
+-		return NOTIFY_BAD;
++		return NOTIFY_DONE;
+ 	pvt = mci->pvt_info;
+ 
+ 	/*
+diff --git a/drivers/firmware/efi/vars.c b/drivers/firmware/efi/vars.c
+index 7f2ea21c730d..6f182fd91a6d 100644
+--- a/drivers/firmware/efi/vars.c
++++ b/drivers/firmware/efi/vars.c
+@@ -202,29 +202,44 @@ static const struct variable_validate variable_validate[] = {
+ 	{ NULL_GUID, "", NULL },
+ };
+ 
++/*
++ * Check if @var_name matches the pattern given in @match_name.
++ *
++ * @var_name: an array of @len non-NUL characters.
++ * @match_name: a NUL-terminated pattern string, optionally ending in "*". A
++ *              final "*" character matches any trailing characters @var_name,
++ *              including the case when there are none left in @var_name.
++ * @match: on output, the number of non-wildcard characters in @match_name
++ *         that @var_name matches, regardless of the return value.
++ * @return: whether @var_name fully matches @match_name.
++ */
+ static bool
+ variable_matches(const char *var_name, size_t len, const char *match_name,
+ 		 int *match)
+ {
+ 	for (*match = 0; ; (*match)++) {
+ 		char c = match_name[*match];
+-		char u = var_name[*match];
+ 
+-		/* Wildcard in the matching name means we've matched */
+-		if (c == '*')
++		switch (c) {
++		case '*':
++			/* Wildcard in @match_name means we've matched. */
+ 			return true;
+ 
+-		/* Case sensitive match */
+-		if (!c && *match == len)
+-			return true;
++		case '\0':
++			/* @match_name has ended. Has @var_name too? */
++			return (*match == len);
+ 
+-		if (c != u)
++		default:
++			/*
++			 * We've reached a non-wildcard char in @match_name.
++			 * Continue only if there's an identical character in
++			 * @var_name.
++			 */
++			if (*match < len && c == var_name[*match])
++				continue;
+ 			return false;
+-
+-		if (!c)
+-			return true;
++		}
+ 	}
+-	return true;
+ }
+ 
+ bool
+diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
+index 563d3d2c54a9..c4f8e8f172cd 100644
+--- a/drivers/gpu/drm/drm_dp_mst_topology.c
++++ b/drivers/gpu/drm/drm_dp_mst_topology.c
+@@ -1694,6 +1694,11 @@ int drm_dp_update_payload_part1(struct drm_dp_mst_topology_mgr *mgr)
+ 		req_payload.start_slot = cur_slots;
+ 		if (mgr->proposed_vcpis[i]) {
+ 			port = container_of(mgr->proposed_vcpis[i], struct drm_dp_mst_port, vcpi);
++			port = drm_dp_get_validated_port_ref(mgr, port);
++			if (!port) {
++				mutex_unlock(&mgr->payload_lock);
++				return -EINVAL;
++			}
+ 			req_payload.num_slots = mgr->proposed_vcpis[i]->num_slots;
+ 		} else {
+ 			port = NULL;
+@@ -1719,6 +1724,9 @@ int drm_dp_update_payload_part1(struct drm_dp_mst_topology_mgr *mgr)
+ 			mgr->payloads[i].payload_state = req_payload.payload_state;
+ 		}
+ 		cur_slots += req_payload.num_slots;
++
++		if (port)
++			drm_dp_put_port(port);
+ 	}
+ 
+ 	for (i = 0; i < mgr->max_payloads; i++) {
+@@ -2005,6 +2013,8 @@ int drm_dp_mst_topology_mgr_resume(struct drm_dp_mst_topology_mgr *mgr)
+ 
+ 	if (mgr->mst_primary) {
+ 		int sret;
++		u8 guid[16];
++
+ 		sret = drm_dp_dpcd_read(mgr->aux, DP_DPCD_REV, mgr->dpcd, DP_RECEIVER_CAP_SIZE);
+ 		if (sret != DP_RECEIVER_CAP_SIZE) {
+ 			DRM_DEBUG_KMS("dpcd read failed - undocked during suspend?\n");
+@@ -2019,6 +2029,16 @@ int drm_dp_mst_topology_mgr_resume(struct drm_dp_mst_topology_mgr *mgr)
+ 			ret = -1;
+ 			goto out_unlock;
+ 		}
++
++		/* Some hubs forget their guids after they resume */
++		sret = drm_dp_dpcd_read(mgr->aux, DP_GUID, guid, 16);
++		if (sret != 16) {
++			DRM_DEBUG_KMS("dpcd read failed - undocked during suspend?\n");
++			ret = -1;
++			goto out_unlock;
++		}
++		drm_dp_check_mstb_guid(mgr->mst_primary, guid);
++
+ 		ret = 0;
+ 	} else
+ 		ret = -1;
+diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
+index 9212e6504e0f..f0e8e2a2c547 100644
+--- a/drivers/gpu/drm/i915/intel_crt.c
++++ b/drivers/gpu/drm/i915/intel_crt.c
+@@ -311,8 +311,14 @@ static bool intel_crt_compute_config(struct intel_encoder *encoder,
+ 		pipe_config->has_pch_encoder = true;
+ 
+ 	/* LPT FDI RX only supports 8bpc. */
+-	if (HAS_PCH_LPT(dev))
++	if (HAS_PCH_LPT(dev)) {
++		if (pipe_config->bw_constrained && pipe_config->pipe_bpp < 24) {
++			DRM_DEBUG_KMS("LPT only supports 24bpp\n");
++			return false;
++		}
++
+ 		pipe_config->pipe_bpp = 24;
++	}
+ 
+ 	/* FDI must always be 2.7 GHz */
+ 	if (HAS_DDI(dev)) {
+diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
+index b63d4fa204a3..4b476aa4ab05 100644
+--- a/drivers/gpu/drm/i915/intel_ddi.c
++++ b/drivers/gpu/drm/i915/intel_ddi.c
+@@ -1515,12 +1515,6 @@ void intel_ddi_get_config(struct intel_encoder *encoder,
+ 	hsw_ddi_clock_get(encoder, pipe_config);
+ }
+ 
+-static void intel_ddi_destroy(struct drm_encoder *encoder)
+-{
+-	/* HDMI has nothing special to destroy, so we can go with this. */
+-	intel_dp_encoder_destroy(encoder);
+-}
+-
+ static bool intel_ddi_compute_config(struct intel_encoder *encoder,
+ 				     struct intel_crtc_config *pipe_config)
+ {
+@@ -1539,7 +1533,8 @@ static bool intel_ddi_compute_config(struct intel_encoder *encoder,
+ }
+ 
+ static const struct drm_encoder_funcs intel_ddi_funcs = {
+-	.destroy = intel_ddi_destroy,
++	.reset = intel_dp_encoder_reset,
++	.destroy = intel_dp_encoder_destroy,
+ };
+ 
+ static struct intel_connector *
+@@ -1612,6 +1607,7 @@ void intel_ddi_init(struct drm_device *dev, enum port port)
+ 	intel_encoder->post_disable = intel_ddi_post_disable;
+ 	intel_encoder->get_hw_state = intel_ddi_get_hw_state;
+ 	intel_encoder->get_config = intel_ddi_get_config;
++	intel_encoder->suspend = intel_dp_encoder_suspend;
+ 
+ 	intel_dig_port->port = port;
+ 	intel_dig_port->saved_port_bits = I915_READ(DDI_BUF_CTL(port)) &
+diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
+index 3104d06aa20c..a915d729c33d 100644
+--- a/drivers/gpu/drm/i915/intel_dp.c
++++ b/drivers/gpu/drm/i915/intel_dp.c
+@@ -4423,7 +4423,7 @@ void intel_dp_encoder_destroy(struct drm_encoder *encoder)
+ 	kfree(intel_dig_port);
+ }
+ 
+-static void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder)
++void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder)
+ {
+ 	struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
+ 
+@@ -4440,9 +4440,52 @@ static void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder)
+ 	pps_unlock(intel_dp);
+ }
+ 
+-static void intel_dp_encoder_reset(struct drm_encoder *encoder)
++static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp)
+ {
+-	intel_edp_panel_vdd_sanitize(to_intel_encoder(encoder));
++	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
++	struct drm_device *dev = intel_dig_port->base.base.dev;
++	struct drm_i915_private *dev_priv = dev->dev_private;
++	enum intel_display_power_domain power_domain;
++
++	lockdep_assert_held(&dev_priv->pps_mutex);
++
++	if (!edp_have_panel_vdd(intel_dp))
++		return;
++
++	/*
++	 * The VDD bit needs a power domain reference, so if the bit is
++	 * already enabled when we boot or resume, grab this reference and
++	 * schedule a vdd off, so we don't hold on to the reference
++	 * indefinitely.
++	 */
++	DRM_DEBUG_KMS("VDD left on by BIOS, adjusting state tracking\n");
++	power_domain = intel_display_port_power_domain(&intel_dig_port->base);
++	intel_display_power_get(dev_priv, power_domain);
++
++	edp_panel_vdd_schedule_off(intel_dp);
++}
++
++void intel_dp_encoder_reset(struct drm_encoder *encoder)
++{
++	struct intel_dp *intel_dp;
++
++	if (to_intel_encoder(encoder)->type != INTEL_OUTPUT_EDP)
++		return;
++
++	intel_dp = enc_to_intel_dp(encoder);
++
++	pps_lock(intel_dp);
++
++	/*
++	 * Read out the current power sequencer assignment,
++	 * in case the BIOS did something with it.
++	 */
++	if (IS_VALLEYVIEW(encoder->dev))
++		vlv_initial_power_sequencer_setup(intel_dp);
++
++	intel_edp_panel_vdd_sanitize(intel_dp);
++
++	pps_unlock(intel_dp);
+ }
+ 
+ static const struct drm_connector_funcs intel_dp_connector_funcs = {
+@@ -4924,37 +4967,6 @@ intel_dp_drrs_init(struct intel_digital_port *intel_dig_port,
+ 	return downclock_mode;
+ }
+ 
+-void intel_edp_panel_vdd_sanitize(struct intel_encoder *intel_encoder)
+-{
+-	struct drm_device *dev = intel_encoder->base.dev;
+-	struct drm_i915_private *dev_priv = dev->dev_private;
+-	struct intel_dp *intel_dp;
+-	enum intel_display_power_domain power_domain;
+-
+-	if (intel_encoder->type != INTEL_OUTPUT_EDP)
+-		return;
+-
+-	intel_dp = enc_to_intel_dp(&intel_encoder->base);
+-
+-	pps_lock(intel_dp);
+-
+-	if (!edp_have_panel_vdd(intel_dp))
+-		goto out;
+-	/*
+-	 * The VDD bit needs a power domain reference, so if the bit is
+-	 * already enabled when we boot or resume, grab this reference and
+-	 * schedule a vdd off, so we don't hold on to the reference
+-	 * indefinitely.
+-	 */
+-	DRM_DEBUG_KMS("VDD left on by BIOS, adjusting state tracking\n");
+-	power_domain = intel_display_port_power_domain(intel_encoder);
+-	intel_display_power_get(dev_priv, power_domain);
+-
+-	edp_panel_vdd_schedule_off(intel_dp);
+- out:
+-	pps_unlock(intel_dp);
+-}
+-
+ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
+ 				     struct intel_connector *intel_connector,
+ 				     struct edp_power_seq *power_seq)
+@@ -4975,7 +4987,9 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
+ 	if (!is_edp(intel_dp))
+ 		return true;
+ 
+-	intel_edp_panel_vdd_sanitize(intel_encoder);
++	pps_lock(intel_dp);
++	intel_edp_panel_vdd_sanitize(intel_dp);
++	pps_unlock(intel_dp);
+ 
+ 	/* Cache DPCD and EDID for edp. */
+ 	intel_edp_panel_vdd_on(intel_dp);
+diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
+index ba715229a540..87743dd1df48 100644
+--- a/drivers/gpu/drm/i915/intel_drv.h
++++ b/drivers/gpu/drm/i915/intel_drv.h
+@@ -925,6 +925,8 @@ void intel_dp_start_link_train(struct intel_dp *intel_dp);
+ void intel_dp_complete_link_train(struct intel_dp *intel_dp);
+ void intel_dp_stop_link_train(struct intel_dp *intel_dp);
+ void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode);
++void intel_dp_encoder_reset(struct drm_encoder *encoder);
++void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder);
+ void intel_dp_encoder_destroy(struct drm_encoder *encoder);
+ void intel_dp_check_link_status(struct intel_dp *intel_dp);
+ int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc);
+@@ -936,7 +938,6 @@ bool intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port,
+ void intel_edp_backlight_on(struct intel_dp *intel_dp);
+ void intel_edp_backlight_off(struct intel_dp *intel_dp);
+ void intel_edp_panel_vdd_on(struct intel_dp *intel_dp);
+-void intel_edp_panel_vdd_sanitize(struct intel_encoder *intel_encoder);
+ void intel_edp_panel_on(struct intel_dp *intel_dp);
+ void intel_edp_panel_off(struct intel_dp *intel_dp);
+ void intel_edp_psr_enable(struct intel_dp *intel_dp);
+diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c
+index 01701105653d..07dd3523425a 100644
+--- a/drivers/gpu/drm/radeon/atombios_encoders.c
++++ b/drivers/gpu/drm/radeon/atombios_encoders.c
+@@ -308,6 +308,10 @@ static bool radeon_atom_mode_fixup(struct drm_encoder *encoder,
+ 	    && (mode->crtc_vsync_start < (mode->crtc_vdisplay + 2)))
+ 		adjusted_mode->crtc_vsync_start = adjusted_mode->crtc_vdisplay + 2;
+ 
++	/* vertical FP must be at least 1 */
++	if (mode->crtc_vsync_start == mode->crtc_vdisplay)
++		adjusted_mode->crtc_vsync_start++;
++
+ 	/* get the native mode for scaling */
+ 	if (radeon_encoder->active_device & (ATOM_DEVICE_LCD_SUPPORT)) {
+ 		radeon_panel_mode_fixup(encoder, adjusted_mode);
+diff --git a/drivers/hv/ring_buffer.c b/drivers/hv/ring_buffer.c
+index 6361d124f67d..14d45c70056e 100644
+--- a/drivers/hv/ring_buffer.c
++++ b/drivers/hv/ring_buffer.c
+@@ -103,19 +103,30 @@ static bool hv_need_to_signal(u32 old_write, struct hv_ring_buffer_info *rbi)
+  *    there is room for the producer to send the pending packet.
+  */
+ 
+-static bool hv_need_to_signal_on_read(u32 old_rd,
+-					 struct hv_ring_buffer_info *rbi)
++static bool hv_need_to_signal_on_read(struct hv_ring_buffer_info *rbi)
+ {
+-	u32 prev_write_sz;
+ 	u32 cur_write_sz;
+ 	u32 r_size;
+-	u32 write_loc = rbi->ring_buffer->write_index;
++	u32 write_loc;
+ 	u32 read_loc = rbi->ring_buffer->read_index;
+-	u32 pending_sz = rbi->ring_buffer->pending_send_sz;
++	u32 pending_sz;
+ 
+ 	/*
+-	 * If the other end is not blocked on write don't bother.
++	 * Issue a full memory barrier before making the signaling decision.
++	 * Here is the reason for having this barrier:
++	 * If the reading of the pend_sz (in this function)
++	 * were to be reordered and read before we commit the new read
++	 * index (in the calling function)  we could
++	 * have a problem. If the host were to set the pending_sz after we
++	 * have sampled pending_sz and go to sleep before we commit the
++	 * read index, we could miss sending the interrupt. Issue a full
++	 * memory barrier to address this.
+ 	 */
++	mb();
++
++	pending_sz = rbi->ring_buffer->pending_send_sz;
++	write_loc = rbi->ring_buffer->write_index;
++	/* If the other end is not blocked on write don't bother. */
+ 	if (pending_sz == 0)
+ 		return false;
+ 
+@@ -123,22 +134,13 @@ static bool hv_need_to_signal_on_read(u32 old_rd,
+ 	cur_write_sz = write_loc >= read_loc ? r_size - (write_loc - read_loc) :
+ 			read_loc - write_loc;
+ 
+-	prev_write_sz = write_loc >= old_rd ? r_size - (write_loc - old_rd) :
+-			old_rd - write_loc;
+-
+-
+-	if ((prev_write_sz < pending_sz) && (cur_write_sz >= pending_sz))
++	if (cur_write_sz >= pending_sz)
+ 		return true;
+ 
+ 	return false;
+ }
+ 
+-/*
+- * hv_get_next_write_location()
+- *
+- * Get the next write location for the specified ring buffer
+- *
+- */
++/* Get the next write location for the specified ring buffer. */
+ static inline u32
+ hv_get_next_write_location(struct hv_ring_buffer_info *ring_info)
+ {
+@@ -147,12 +149,7 @@ hv_get_next_write_location(struct hv_ring_buffer_info *ring_info)
+ 	return next;
+ }
+ 
+-/*
+- * hv_set_next_write_location()
+- *
+- * Set the next write location for the specified ring buffer
+- *
+- */
++/* Set the next write location for the specified ring buffer. */
+ static inline void
+ hv_set_next_write_location(struct hv_ring_buffer_info *ring_info,
+ 		     u32 next_write_location)
+@@ -160,11 +157,7 @@ hv_set_next_write_location(struct hv_ring_buffer_info *ring_info,
+ 	ring_info->ring_buffer->write_index = next_write_location;
+ }
+ 
+-/*
+- * hv_get_next_read_location()
+- *
+- * Get the next read location for the specified ring buffer
+- */
++/* Get the next read location for the specified ring buffer. */
+ static inline u32
+ hv_get_next_read_location(struct hv_ring_buffer_info *ring_info)
+ {
+@@ -174,10 +167,8 @@ hv_get_next_read_location(struct hv_ring_buffer_info *ring_info)
+ }
+ 
+ /*
+- * hv_get_next_readlocation_withoffset()
+- *
+  * Get the next read location + offset for the specified ring buffer.
+- * This allows the caller to skip
++ * This allows the caller to skip.
+  */
+ static inline u32
+ hv_get_next_readlocation_withoffset(struct hv_ring_buffer_info *ring_info,
+@@ -191,13 +182,7 @@ hv_get_next_readlocation_withoffset(struct hv_ring_buffer_info *ring_info,
+ 	return next;
+ }
+ 
+-/*
+- *
+- * hv_set_next_read_location()
+- *
+- * Set the next read location for the specified ring buffer
+- *
+- */
++/* Set the next read location for the specified ring buffer. */
+ static inline void
+ hv_set_next_read_location(struct hv_ring_buffer_info *ring_info,
+ 		    u32 next_read_location)
+@@ -206,12 +191,7 @@ hv_set_next_read_location(struct hv_ring_buffer_info *ring_info,
+ }
+ 
+ 
+-/*
+- *
+- * hv_get_ring_buffer()
+- *
+- * Get the start of the ring buffer
+- */
++/* Get the start of the ring buffer. */
+ static inline void *
+ hv_get_ring_buffer(struct hv_ring_buffer_info *ring_info)
+ {
+@@ -219,25 +199,14 @@ hv_get_ring_buffer(struct hv_ring_buffer_info *ring_info)
+ }
+ 
+ 
+-/*
+- *
+- * hv_get_ring_buffersize()
+- *
+- * Get the size of the ring buffer
+- */
++/* Get the size of the ring buffer. */
+ static inline u32
+ hv_get_ring_buffersize(struct hv_ring_buffer_info *ring_info)
+ {
+ 	return ring_info->ring_datasize;
+ }
+ 
+-/*
+- *
+- * hv_get_ring_bufferindices()
+- *
+- * Get the read and write indices as u64 of the specified ring buffer
+- *
+- */
++/* Get the read and write indices as u64 of the specified ring buffer. */
+ static inline u64
+ hv_get_ring_bufferindices(struct hv_ring_buffer_info *ring_info)
+ {
+@@ -245,12 +214,8 @@ hv_get_ring_bufferindices(struct hv_ring_buffer_info *ring_info)
+ }
+ 
+ /*
+- *
+- * hv_copyfrom_ringbuffer()
+- *
+  * Helper routine to copy to source from ring buffer.
+  * Assume there is enough room. Handles wrap-around in src case only!!
+- *
+  */
+ static u32 hv_copyfrom_ringbuffer(
+ 	struct hv_ring_buffer_info	*ring_info,
+@@ -282,12 +247,8 @@ static u32 hv_copyfrom_ringbuffer(
+ 
+ 
+ /*
+- *
+- * hv_copyto_ringbuffer()
+- *
+  * Helper routine to copy from source to ring buffer.
+  * Assume there is enough room. Handles wrap-around in dest case only!!
+- *
+  */
+ static u32 hv_copyto_ringbuffer(
+ 	struct hv_ring_buffer_info	*ring_info,
+@@ -313,13 +274,7 @@ static u32 hv_copyto_ringbuffer(
+ 	return start_write_offset;
+ }
+ 
+-/*
+- *
+- * hv_ringbuffer_get_debuginfo()
+- *
+- * Get various debug metrics for the specified ring buffer
+- *
+- */
++/* Get various debug metrics for the specified ring buffer. */
+ void hv_ringbuffer_get_debuginfo(struct hv_ring_buffer_info *ring_info,
+ 			    struct hv_ring_buffer_debug_info *debug_info)
+ {
+@@ -342,13 +297,7 @@ void hv_ringbuffer_get_debuginfo(struct hv_ring_buffer_info *ring_info,
+ 	}
+ }
+ 
+-/*
+- *
+- * hv_ringbuffer_init()
+- *
+- *Initialize the ring buffer
+- *
+- */
++/* Initialize the ring buffer. */
+ int hv_ringbuffer_init(struct hv_ring_buffer_info *ring_info,
+ 		   void *buffer, u32 buflen)
+ {
+@@ -361,9 +310,7 @@ int hv_ringbuffer_init(struct hv_ring_buffer_info *ring_info,
+ 	ring_info->ring_buffer->read_index =
+ 		ring_info->ring_buffer->write_index = 0;
+ 
+-	/*
+-	 * Set the feature bit for enabling flow control.
+-	 */
++	/* Set the feature bit for enabling flow control. */
+ 	ring_info->ring_buffer->feature_bits.value = 1;
+ 
+ 	ring_info->ring_size = buflen;
+@@ -374,24 +321,12 @@ int hv_ringbuffer_init(struct hv_ring_buffer_info *ring_info,
+ 	return 0;
+ }
+ 
+-/*
+- *
+- * hv_ringbuffer_cleanup()
+- *
+- * Cleanup the ring buffer
+- *
+- */
++/* Cleanup the ring buffer. */
+ void hv_ringbuffer_cleanup(struct hv_ring_buffer_info *ring_info)
+ {
+ }
+ 
+-/*
+- *
+- * hv_ringbuffer_write()
+- *
+- * Write to the ring buffer
+- *
+- */
++/* Write to the ring buffer. */
+ int hv_ringbuffer_write(struct hv_ring_buffer_info *outring_info,
+ 		    struct kvec *kv_list, u32 kv_count, bool *signal)
+ {
+@@ -416,10 +351,11 @@ int hv_ringbuffer_write(struct hv_ring_buffer_info *outring_info,
+ 				&bytes_avail_toread,
+ 				&bytes_avail_towrite);
+ 
+-
+-	/* If there is only room for the packet, assume it is full. */
+-	/* Otherwise, the next time around, we think the ring buffer */
+-	/* is empty since the read index == write index */
++	/*
++	 * If there is only room for the packet, assume it is full.
++	 * Otherwise, the next time around, we think the ring buffer
++	 * is empty since the read index == write index.
++	 */
+ 	if (bytes_avail_towrite <= totalbytes_towrite) {
+ 		spin_unlock_irqrestore(&outring_info->ring_lock, flags);
+ 		return -EAGAIN;
+@@ -459,13 +395,7 @@ int hv_ringbuffer_write(struct hv_ring_buffer_info *outring_info,
+ }
+ 
+ 
+-/*
+- *
+- * hv_ringbuffer_peek()
+- *
+- * Read without advancing the read index
+- *
+- */
++/* Read without advancing the read index. */
+ int hv_ringbuffer_peek(struct hv_ring_buffer_info *Inring_info,
+ 		   void *Buffer, u32 buflen)
+ {
+@@ -502,13 +432,7 @@ int hv_ringbuffer_peek(struct hv_ring_buffer_info *Inring_info,
+ }
+ 
+ 
+-/*
+- *
+- * hv_ringbuffer_read()
+- *
+- * Read and advance the read index
+- *
+- */
++/* Read and advance the read index. */
+ int hv_ringbuffer_read(struct hv_ring_buffer_info *inring_info, void *buffer,
+ 		   u32 buflen, u32 offset, bool *signal)
+ {
+@@ -517,7 +441,6 @@ int hv_ringbuffer_read(struct hv_ring_buffer_info *inring_info, void *buffer,
+ 	u32 next_read_location = 0;
+ 	u64 prev_indices = 0;
+ 	unsigned long flags;
+-	u32 old_read;
+ 
+ 	if (buflen <= 0)
+ 		return -EINVAL;
+@@ -528,8 +451,6 @@ int hv_ringbuffer_read(struct hv_ring_buffer_info *inring_info, void *buffer,
+ 				&bytes_avail_toread,
+ 				&bytes_avail_towrite);
+ 
+-	old_read = bytes_avail_toread;
+-
+ 	/* Make sure there is something to read */
+ 	if (bytes_avail_toread < buflen) {
+ 		spin_unlock_irqrestore(&inring_info->ring_lock, flags);
+@@ -550,9 +471,11 @@ int hv_ringbuffer_read(struct hv_ring_buffer_info *inring_info, void *buffer,
+ 						sizeof(u64),
+ 						next_read_location);
+ 
+-	/* Make sure all reads are done before we update the read index since */
+-	/* the writer may start writing to the read area once the read index */
+-	/*is updated */
++	/*
++	 * Make sure all reads are done before we update the read index since
++	 * the writer may start writing to the read area once the read index
++	 * is updated.
++	 */
+ 	mb();
+ 
+ 	/* Update the read index */
+@@ -560,7 +483,7 @@ int hv_ringbuffer_read(struct hv_ring_buffer_info *inring_info, void *buffer,
+ 
+ 	spin_unlock_irqrestore(&inring_info->ring_lock, flags);
+ 
+-	*signal = hv_need_to_signal_on_read(old_read, inring_info);
++	*signal = hv_need_to_signal_on_read(inring_info);
+ 
+ 	return 0;
+ }
+diff --git a/drivers/iio/magnetometer/ak8975.c b/drivers/iio/magnetometer/ak8975.c
+index bf5ef077e791..f03c3bf9a780 100644
+--- a/drivers/iio/magnetometer/ak8975.c
++++ b/drivers/iio/magnetometer/ak8975.c
+@@ -160,6 +160,8 @@ static int ak8975_setup_irq(struct ak8975_data *data)
+ 	int rc;
+ 	int irq;
+ 
++	init_waitqueue_head(&data->data_ready_queue);
++	clear_bit(0, &data->flags);
+ 	if (client->irq)
+ 		irq = client->irq;
+ 	else
+@@ -175,8 +177,6 @@ static int ak8975_setup_irq(struct ak8975_data *data)
+ 		return rc;
+ 	}
+ 
+-	init_waitqueue_head(&data->data_ready_queue);
+-	clear_bit(0, &data->flags);
+ 	data->eoc_irq = irq;
+ 
+ 	return rc;
+diff --git a/drivers/infiniband/core/ucm.c b/drivers/infiniband/core/ucm.c
+index f2f63933e8a9..5befec118a18 100644
+--- a/drivers/infiniband/core/ucm.c
++++ b/drivers/infiniband/core/ucm.c
+@@ -48,6 +48,7 @@
+ 
+ #include <asm/uaccess.h>
+ 
++#include <rdma/ib.h>
+ #include <rdma/ib_cm.h>
+ #include <rdma/ib_user_cm.h>
+ #include <rdma/ib_marshall.h>
+@@ -1104,6 +1105,9 @@ static ssize_t ib_ucm_write(struct file *filp, const char __user *buf,
+ 	struct ib_ucm_cmd_hdr hdr;
+ 	ssize_t result;
+ 
++	if (WARN_ON_ONCE(!ib_safe_file_access(filp)))
++		return -EACCES;
++
+ 	if (len < sizeof(hdr))
+ 		return -EINVAL;
+ 
+diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c
+index 45d67e9228d7..81dd84d0b68b 100644
+--- a/drivers/infiniband/core/ucma.c
++++ b/drivers/infiniband/core/ucma.c
+@@ -1487,6 +1487,9 @@ static ssize_t ucma_write(struct file *filp, const char __user *buf,
+ 	struct rdma_ucm_cmd_hdr hdr;
+ 	ssize_t ret;
+ 
++	if (WARN_ON_ONCE(!ib_safe_file_access(filp)))
++		return -EACCES;
++
+ 	if (len < sizeof(hdr))
+ 		return -EINVAL;
+ 
+diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c
+index d3abb7ea2dee..c221310005f8 100644
+--- a/drivers/infiniband/core/uverbs_main.c
++++ b/drivers/infiniband/core/uverbs_main.c
+@@ -48,6 +48,8 @@
+ 
+ #include <asm/uaccess.h>
+ 
++#include <rdma/ib.h>
++
+ #include "uverbs.h"
+ 
+ MODULE_AUTHOR("Roland Dreier");
+@@ -610,6 +612,9 @@ static ssize_t ib_uverbs_write(struct file *filp, const char __user *buf,
+ 	struct ib_uverbs_cmd_hdr hdr;
+ 	__u32 flags;
+ 
++	if (WARN_ON_ONCE(!ib_safe_file_access(filp)))
++		return -EACCES;
++
+ 	if (count < sizeof hdr)
+ 		return -EINVAL;
+ 
+diff --git a/drivers/infiniband/hw/qib/qib_file_ops.c b/drivers/infiniband/hw/qib/qib_file_ops.c
+index b15e34eeef68..3ab8229b1d8c 100644
+--- a/drivers/infiniband/hw/qib/qib_file_ops.c
++++ b/drivers/infiniband/hw/qib/qib_file_ops.c
+@@ -45,6 +45,8 @@
+ #include <linux/delay.h>
+ #include <linux/export.h>
+ 
++#include <rdma/ib.h>
++
+ #include "qib.h"
+ #include "qib_common.h"
+ #include "qib_user_sdma.h"
+@@ -2058,6 +2060,9 @@ static ssize_t qib_write(struct file *fp, const char __user *data,
+ 	ssize_t ret = 0;
+ 	void *dest;
+ 
++	if (WARN_ON_ONCE(!ib_safe_file_access(fp)))
++		return -EACCES;
++
+ 	if (count < sizeof(cmd.type)) {
+ 		ret = -EINVAL;
+ 		goto bail;
+diff --git a/drivers/input/misc/max8997_haptic.c b/drivers/input/misc/max8997_haptic.c
+index a363ebbd9cc0..57f3d90d7b03 100644
+--- a/drivers/input/misc/max8997_haptic.c
++++ b/drivers/input/misc/max8997_haptic.c
+@@ -255,12 +255,14 @@ static int max8997_haptic_probe(struct platform_device *pdev)
+ 	struct max8997_dev *iodev = dev_get_drvdata(pdev->dev.parent);
+ 	const struct max8997_platform_data *pdata =
+ 					dev_get_platdata(iodev->dev);
+-	const struct max8997_haptic_platform_data *haptic_pdata =
+-					pdata->haptic_pdata;
++	const struct max8997_haptic_platform_data *haptic_pdata = NULL;
+ 	struct max8997_haptic *chip;
+ 	struct input_dev *input_dev;
+ 	int error;
+ 
++	if (pdata)
++		haptic_pdata = pdata->haptic_pdata;
++
+ 	if (!haptic_pdata) {
+ 		dev_err(&pdev->dev, "no haptic platform data\n");
+ 		return -EINVAL;
+diff --git a/drivers/md/md.c b/drivers/md/md.c
+index 6c169f18aab8..3d2cc7453ec1 100644
+--- a/drivers/md/md.c
++++ b/drivers/md/md.c
+@@ -282,6 +282,8 @@ static void md_make_request(struct request_queue *q, struct bio *bio)
+ 	 * go away inside make_request
+ 	 */
+ 	sectors = bio_sectors(bio);
++	/* bio could be mergeable after passing to underlayer */
++	bio->bi_rw &= ~REQ_NOMERGE;
+ 	mddev->pers->make_request(mddev, bio);
+ 
+ 	cpu = part_stat_lock();
+diff --git a/drivers/misc/cxl/irq.c b/drivers/misc/cxl/irq.c
+index 6fe4027feb7d..7355d9c114ef 100644
+--- a/drivers/misc/cxl/irq.c
++++ b/drivers/misc/cxl/irq.c
+@@ -272,7 +272,6 @@ unsigned int cxl_map_irq(struct cxl *adapter, irq_hw_number_t hwirq,
+ void cxl_unmap_irq(unsigned int virq, void *cookie)
+ {
+ 	free_irq(virq, cookie);
+-	irq_dispose_mapping(virq);
+ }
+ 
+ static int cxl_register_one_irq(struct cxl *adapter,
+diff --git a/drivers/regulator/s2mps11.c b/drivers/regulator/s2mps11.c
+index 697be114e21a..ad9920c3dda7 100644
+--- a/drivers/regulator/s2mps11.c
++++ b/drivers/regulator/s2mps11.c
+@@ -302,7 +302,7 @@ static struct regulator_ops s2mps11_buck_ops = {
+ 	.enable_mask	= S2MPS11_ENABLE_MASK			\
+ }
+ 
+-#define regulator_desc_s2mps11_buck6_10(num, min, step) {	\
++#define regulator_desc_s2mps11_buck67810(num, min, step) {	\
+ 	.name		= "BUCK"#num,				\
+ 	.id		= S2MPS11_BUCK##num,			\
+ 	.ops		= &s2mps11_buck_ops,			\
+@@ -318,6 +318,22 @@ static struct regulator_ops s2mps11_buck_ops = {
+ 	.enable_mask	= S2MPS11_ENABLE_MASK			\
+ }
+ 
++#define regulator_desc_s2mps11_buck9 {				\
++	.name		= "BUCK9",				\
++	.id		= S2MPS11_BUCK9,			\
++	.ops		= &s2mps11_buck_ops,			\
++	.type		= REGULATOR_VOLTAGE,			\
++	.owner		= THIS_MODULE,				\
++	.min_uV		= MIN_3000_MV,				\
++	.uV_step	= STEP_25_MV,				\
++	.n_voltages	= S2MPS11_BUCK9_N_VOLTAGES,		\
++	.ramp_delay	= S2MPS11_RAMP_DELAY,			\
++	.vsel_reg	= S2MPS11_REG_B9CTRL2,			\
++	.vsel_mask	= S2MPS11_BUCK9_VSEL_MASK,		\
++	.enable_reg	= S2MPS11_REG_B9CTRL1,			\
++	.enable_mask	= S2MPS11_ENABLE_MASK			\
++}
++
+ static const struct regulator_desc s2mps11_regulators[] = {
+ 	regulator_desc_s2mps11_ldo(1, STEP_25_MV),
+ 	regulator_desc_s2mps11_ldo(2, STEP_50_MV),
+@@ -362,11 +378,11 @@ static const struct regulator_desc s2mps11_regulators[] = {
+ 	regulator_desc_s2mps11_buck1_4(3),
+ 	regulator_desc_s2mps11_buck1_4(4),
+ 	regulator_desc_s2mps11_buck5,
+-	regulator_desc_s2mps11_buck6_10(6, MIN_600_MV, STEP_6_25_MV),
+-	regulator_desc_s2mps11_buck6_10(7, MIN_600_MV, STEP_6_25_MV),
+-	regulator_desc_s2mps11_buck6_10(8, MIN_600_MV, STEP_6_25_MV),
+-	regulator_desc_s2mps11_buck6_10(9, MIN_3000_MV, STEP_25_MV),
+-	regulator_desc_s2mps11_buck6_10(10, MIN_750_MV, STEP_12_5_MV),
++	regulator_desc_s2mps11_buck67810(6, MIN_600_MV, STEP_6_25_MV),
++	regulator_desc_s2mps11_buck67810(7, MIN_600_MV, STEP_6_25_MV),
++	regulator_desc_s2mps11_buck67810(8, MIN_600_MV, STEP_6_25_MV),
++	regulator_desc_s2mps11_buck9,
++	regulator_desc_s2mps11_buck67810(10, MIN_750_MV, STEP_12_5_MV),
+ };
+ 
+ static int s2mps14_regulator_enable(struct regulator_dev *rdev)
+diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
+index d8e1d5c1b9d2..74d856c7522b 100644
+--- a/drivers/usb/core/hub.c
++++ b/drivers/usb/core/hub.c
+@@ -5369,6 +5369,7 @@ static int usb_reset_and_verify_device(struct usb_device *udev)
+ 	}
+ 
+ 	bos = udev->bos;
++	udev->bos = NULL;
+ 
+ 	for (i = 0; i < SET_CONFIG_TRIES; ++i) {
+ 
+@@ -5461,11 +5462,8 @@ done:
+ 	usb_set_usb2_hardware_lpm(udev, 1);
+ 	usb_unlocked_enable_lpm(udev);
+ 	usb_enable_ltm(udev);
+-	/* release the new BOS descriptor allocated  by hub_port_init() */
+-	if (udev->bos != bos) {
+-		usb_release_bos_descriptor(udev);
+-		udev->bos = bos;
+-	}
++	usb_release_bos_descriptor(udev);
++	udev->bos = bos;
+ 	return 0;
+ 
+ re_enumerate:
+diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
+index d4541ac5bf48..642125d27df4 100644
+--- a/drivers/usb/serial/cp210x.c
++++ b/drivers/usb/serial/cp210x.c
+@@ -108,6 +108,7 @@ static const struct usb_device_id id_table[] = {
+ 	{ USB_DEVICE(0x10C4, 0x826B) }, /* Cygnal Integrated Products, Inc., Fasttrax GPS demonstration module */
+ 	{ USB_DEVICE(0x10C4, 0x8281) }, /* Nanotec Plug & Drive */
+ 	{ USB_DEVICE(0x10C4, 0x8293) }, /* Telegesis ETRX2USB */
++	{ USB_DEVICE(0x10C4, 0x82F4) }, /* Starizona MicroTouch */
+ 	{ USB_DEVICE(0x10C4, 0x82F9) }, /* Procyon AVS */
+ 	{ USB_DEVICE(0x10C4, 0x8341) }, /* Siemens MC35PU GPRS Modem */
+ 	{ USB_DEVICE(0x10C4, 0x8382) }, /* Cygnal Integrated Products, Inc. */
+@@ -117,6 +118,7 @@ static const struct usb_device_id id_table[] = {
+ 	{ USB_DEVICE(0x10C4, 0x8418) }, /* IRZ Automation Teleport SG-10 GSM/GPRS Modem */
+ 	{ USB_DEVICE(0x10C4, 0x846E) }, /* BEI USB Sensor Interface (VCP) */
+ 	{ USB_DEVICE(0x10C4, 0x8477) }, /* Balluff RFID */
++	{ USB_DEVICE(0x10C4, 0x84B6) }, /* Starizona Hyperion */
+ 	{ USB_DEVICE(0x10C4, 0x85EA) }, /* AC-Services IBUS-IF */
+ 	{ USB_DEVICE(0x10C4, 0x85EB) }, /* AC-Services CIS-IBUS */
+ 	{ USB_DEVICE(0x10C4, 0x85F8) }, /* Virtenio Preon32 */
+@@ -140,6 +142,8 @@ static const struct usb_device_id id_table[] = {
+ 	{ USB_DEVICE(0x10C4, 0xF004) }, /* Elan Digital Systems USBcount50 */
+ 	{ USB_DEVICE(0x10C5, 0xEA61) }, /* Silicon Labs MobiData GPRS USB Modem */
+ 	{ USB_DEVICE(0x10CE, 0xEA6A) }, /* Silicon Labs MobiData GPRS USB Modem 100EU */
++	{ USB_DEVICE(0x12B8, 0xEC60) }, /* Link G4 ECU */
++	{ USB_DEVICE(0x12B8, 0xEC62) }, /* Link G4+ ECU */
+ 	{ USB_DEVICE(0x13AD, 0x9999) }, /* Baltech card reader */
+ 	{ USB_DEVICE(0x1555, 0x0004) }, /* Owen AC4 USB-RS485 Converter */
+ 	{ USB_DEVICE(0x166A, 0x0201) }, /* Clipsal 5500PACA C-Bus Pascal Automation Controller */
+diff --git a/fs/isofs/rock.c b/fs/isofs/rock.c
+index 735d7522a3a9..204659a5f6db 100644
+--- a/fs/isofs/rock.c
++++ b/fs/isofs/rock.c
+@@ -203,6 +203,8 @@ int get_rock_ridge_filename(struct iso_directory_record *de,
+ 	int retnamlen = 0;
+ 	int truncate = 0;
+ 	int ret = 0;
++	char *p;
++	int len;
+ 
+ 	if (!ISOFS_SB(inode->i_sb)->s_rock)
+ 		return 0;
+@@ -267,12 +269,17 @@ repeat:
+ 					rr->u.NM.flags);
+ 				break;
+ 			}
+-			if ((strlen(retname) + rr->len - 5) >= 254) {
++			len = rr->len - 5;
++			if (retnamlen + len >= 254) {
+ 				truncate = 1;
+ 				break;
+ 			}
+-			strncat(retname, rr->u.NM.name, rr->len - 5);
+-			retnamlen += rr->len - 5;
++			p = memchr(rr->u.NM.name, '\0', len);
++			if (unlikely(p))
++				len = p - rr->u.NM.name;
++			memcpy(retname + retnamlen, rr->u.NM.name, len);
++			retnamlen += len;
++			retname[retnamlen] = '\0';
+ 			break;
+ 		case SIG('R', 'E'):
+ 			kfree(rs.buffer);
+diff --git a/fs/namei.c b/fs/namei.c
+index 2a8ba683a888..e1976450a1e2 100644
+--- a/fs/namei.c
++++ b/fs/namei.c
+@@ -2797,22 +2797,10 @@ no_open:
+ 		dentry = lookup_real(dir, dentry, nd->flags);
+ 		if (IS_ERR(dentry))
+ 			return PTR_ERR(dentry);
+-
+-		if (create_error) {
+-			int open_flag = op->open_flag;
+-
+-			error = create_error;
+-			if ((open_flag & O_EXCL)) {
+-				if (!dentry->d_inode)
+-					goto out;
+-			} else if (!dentry->d_inode) {
+-				goto out;
+-			} else if ((open_flag & O_TRUNC) &&
+-				   S_ISREG(dentry->d_inode->i_mode)) {
+-				goto out;
+-			}
+-			/* will fail later, go on to get the right error */
+-		}
++	}
++	if (create_error && !dentry->d_inode) {
++		error = create_error;
++		goto out;
+ 	}
+ looked_up:
+ 	path->dentry = dentry;
+diff --git a/fs/ocfs2/acl.c b/fs/ocfs2/acl.c
+index 7e8282dcea2a..8a7d2f812b5b 100644
+--- a/fs/ocfs2/acl.c
++++ b/fs/ocfs2/acl.c
+@@ -310,3 +310,90 @@ struct posix_acl *ocfs2_iop_get_acl(struct inode *inode, int type)
+ 
+ 	return acl;
+ }
++
++int ocfs2_acl_chmod(struct inode *inode, struct buffer_head *bh)
++{
++	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
++	struct posix_acl *acl;
++	int ret;
++
++	if (S_ISLNK(inode->i_mode))
++		return -EOPNOTSUPP;
++
++	if (!(osb->s_mount_opt & OCFS2_MOUNT_POSIX_ACL))
++		return 0;
++
++	acl = ocfs2_get_acl_nolock(inode, ACL_TYPE_ACCESS, bh);
++	if (IS_ERR(acl) || !acl)
++		return PTR_ERR(acl);
++	ret = __posix_acl_chmod(&acl, GFP_KERNEL, inode->i_mode);
++	if (ret)
++		return ret;
++	ret = ocfs2_set_acl(NULL, inode, NULL, ACL_TYPE_ACCESS,
++			    acl, NULL, NULL);
++	posix_acl_release(acl);
++	return ret;
++}
++
++/*
++ * Initialize the ACLs of a new inode. If parent directory has default ACL,
++ * then clone to new inode. Called from ocfs2_mknod.
++ */
++int ocfs2_init_acl(handle_t *handle,
++		   struct inode *inode,
++		   struct inode *dir,
++		   struct buffer_head *di_bh,
++		   struct buffer_head *dir_bh,
++		   struct ocfs2_alloc_context *meta_ac,
++		   struct ocfs2_alloc_context *data_ac)
++{
++	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
++	struct posix_acl *acl = NULL;
++	int ret = 0, ret2;
++	umode_t mode;
++
++	if (!S_ISLNK(inode->i_mode)) {
++		if (osb->s_mount_opt & OCFS2_MOUNT_POSIX_ACL) {
++			acl = ocfs2_get_acl_nolock(dir, ACL_TYPE_DEFAULT,
++						   dir_bh);
++			if (IS_ERR(acl))
++				return PTR_ERR(acl);
++		}
++		if (!acl) {
++			mode = inode->i_mode & ~current_umask();
++			ret = ocfs2_acl_set_mode(inode, di_bh, handle, mode);
++			if (ret) {
++				mlog_errno(ret);
++				goto cleanup;
++			}
++		}
++	}
++	if ((osb->s_mount_opt & OCFS2_MOUNT_POSIX_ACL) && acl) {
++		if (S_ISDIR(inode->i_mode)) {
++			ret = ocfs2_set_acl(handle, inode, di_bh,
++					    ACL_TYPE_DEFAULT, acl,
++					    meta_ac, data_ac);
++			if (ret)
++				goto cleanup;
++		}
++		mode = inode->i_mode;
++		ret = __posix_acl_create(&acl, GFP_NOFS, &mode);
++		if (ret < 0)
++			return ret;
++
++		ret2 = ocfs2_acl_set_mode(inode, di_bh, handle, mode);
++		if (ret2) {
++			mlog_errno(ret2);
++			ret = ret2;
++			goto cleanup;
++		}
++		if (ret > 0) {
++			ret = ocfs2_set_acl(handle, inode,
++					    di_bh, ACL_TYPE_ACCESS,
++					    acl, meta_ac, data_ac);
++		}
++	}
++cleanup:
++	posix_acl_release(acl);
++	return ret;
++}
+diff --git a/fs/ocfs2/acl.h b/fs/ocfs2/acl.h
+index 3fce68d08625..2783a75b3999 100644
+--- a/fs/ocfs2/acl.h
++++ b/fs/ocfs2/acl.h
+@@ -35,5 +35,10 @@ int ocfs2_set_acl(handle_t *handle,
+ 			 struct posix_acl *acl,
+ 			 struct ocfs2_alloc_context *meta_ac,
+ 			 struct ocfs2_alloc_context *data_ac);
++extern int ocfs2_acl_chmod(struct inode *, struct buffer_head *);
++extern int ocfs2_init_acl(handle_t *, struct inode *, struct inode *,
++			  struct buffer_head *, struct buffer_head *,
++			  struct ocfs2_alloc_context *,
++			  struct ocfs2_alloc_context *);
+ 
+ #endif /* OCFS2_ACL_H */
+diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c
+index e6e8d6449b47..9d5adfd6b326 100644
+--- a/fs/ocfs2/file.c
++++ b/fs/ocfs2/file.c
+@@ -1249,18 +1249,18 @@ bail_unlock_rw:
+ 	if (size_change)
+ 		ocfs2_rw_unlock(inode, 1);
+ bail:
+-	brelse(bh);
+ 
+ 	/* Release quota pointers in case we acquired them */
+ 	for (qtype = 0; qtype < OCFS2_MAXQUOTAS; qtype++)
+ 		dqput(transfer_to[qtype]);
+ 
+ 	if (!status && attr->ia_valid & ATTR_MODE) {
+-		status = posix_acl_chmod(inode, inode->i_mode);
++		status = ocfs2_acl_chmod(inode, bh);
+ 		if (status < 0)
+ 			mlog_errno(status);
+ 	}
+ 
++	brelse(bh);
+ 	return status;
+ }
+ 
+diff --git a/fs/ocfs2/namei.c b/fs/ocfs2/namei.c
+index 9fc1daecdfb3..8f7f5de38e91 100644
+--- a/fs/ocfs2/namei.c
++++ b/fs/ocfs2/namei.c
+@@ -253,7 +253,6 @@ static int ocfs2_mknod(struct inode *dir,
+ 	struct ocfs2_dir_lookup_result lookup = { NULL, };
+ 	sigset_t oldset;
+ 	int did_block_signals = 0;
+-	struct posix_acl *default_acl = NULL, *acl = NULL;
+ 	struct ocfs2_dentry_lock *dl = NULL;
+ 
+ 	trace_ocfs2_mknod(dir, dentry, dentry->d_name.len, dentry->d_name.name,
+@@ -356,14 +355,6 @@ static int ocfs2_mknod(struct inode *dir,
+ 		goto leave;
+ 	}
+ 
+-	status = posix_acl_create(dir, &mode, &default_acl, &acl);
+-	if (status) {
+-		mlog_errno(status);
+-		goto leave;
+-	}
+-	/* update inode->i_mode after mask with "umask". */
+-	inode->i_mode = mode;
+-
+ 	handle = ocfs2_start_trans(osb, ocfs2_mknod_credits(osb->sb,
+ 							    S_ISDIR(mode),
+ 							    xattr_credits));
+@@ -412,16 +403,8 @@ static int ocfs2_mknod(struct inode *dir,
+ 		inc_nlink(dir);
+ 	}
+ 
+-	if (default_acl) {
+-		status = ocfs2_set_acl(handle, inode, new_fe_bh,
+-				       ACL_TYPE_DEFAULT, default_acl,
+-				       meta_ac, data_ac);
+-	}
+-	if (!status && acl) {
+-		status = ocfs2_set_acl(handle, inode, new_fe_bh,
+-				       ACL_TYPE_ACCESS, acl,
+-				       meta_ac, data_ac);
+-	}
++	status = ocfs2_init_acl(handle, inode, dir, new_fe_bh, parent_fe_bh,
++			 meta_ac, data_ac);
+ 
+ 	if (status < 0) {
+ 		mlog_errno(status);
+@@ -463,10 +446,6 @@ static int ocfs2_mknod(struct inode *dir,
+ 	d_instantiate(dentry, inode);
+ 	status = 0;
+ leave:
+-	if (default_acl)
+-		posix_acl_release(default_acl);
+-	if (acl)
+-		posix_acl_release(acl);
+ 	if (status < 0 && did_quota_inode)
+ 		dquot_free_inode(inode);
+ 	if (handle)
+diff --git a/fs/ocfs2/refcounttree.c b/fs/ocfs2/refcounttree.c
+index d81f6e2a97f5..18e8b4d8447a 100644
+--- a/fs/ocfs2/refcounttree.c
++++ b/fs/ocfs2/refcounttree.c
+@@ -4268,20 +4268,12 @@ static int ocfs2_reflink(struct dentry *old_dentry, struct inode *dir,
+ 	struct inode *inode = old_dentry->d_inode;
+ 	struct buffer_head *old_bh = NULL;
+ 	struct inode *new_orphan_inode = NULL;
+-	struct posix_acl *default_acl, *acl;
+-	umode_t mode;
+ 
+ 	if (!ocfs2_refcount_tree(OCFS2_SB(inode->i_sb)))
+ 		return -EOPNOTSUPP;
+ 
+-	mode = inode->i_mode;
+-	error = posix_acl_create(dir, &mode, &default_acl, &acl);
+-	if (error) {
+-		mlog_errno(error);
+-		goto out;
+-	}
+ 
+-	error = ocfs2_create_inode_in_orphan(dir, mode,
++	error = ocfs2_create_inode_in_orphan(dir, inode->i_mode,
+ 					     &new_orphan_inode);
+ 	if (error) {
+ 		mlog_errno(error);
+@@ -4320,16 +4312,11 @@ static int ocfs2_reflink(struct dentry *old_dentry, struct inode *dir,
+ 	/* If the security isn't preserved, we need to re-initialize them. */
+ 	if (!preserve) {
+ 		error = ocfs2_init_security_and_acl(dir, new_orphan_inode,
+-						    &new_dentry->d_name,
+-						    default_acl, acl);
++						    &new_dentry->d_name);
+ 		if (error)
+ 			mlog_errno(error);
+ 	}
+ out:
+-	if (default_acl)
+-		posix_acl_release(default_acl);
+-	if (acl)
+-		posix_acl_release(acl);
+ 	if (!error) {
+ 		error = ocfs2_mv_orphaned_inode_to_new(dir, new_orphan_inode,
+ 						       new_dentry);
+diff --git a/fs/ocfs2/xattr.c b/fs/ocfs2/xattr.c
+index 016f01df3825..c237008c010d 100644
+--- a/fs/ocfs2/xattr.c
++++ b/fs/ocfs2/xattr.c
+@@ -7207,12 +7207,10 @@ out:
+  */
+ int ocfs2_init_security_and_acl(struct inode *dir,
+ 				struct inode *inode,
+-				const struct qstr *qstr,
+-				struct posix_acl *default_acl,
+-				struct posix_acl *acl)
++				const struct qstr *qstr)
+ {
+-	struct buffer_head *dir_bh = NULL;
+ 	int ret = 0;
++	struct buffer_head *dir_bh = NULL;
+ 
+ 	ret = ocfs2_init_security_get(inode, dir, qstr, NULL);
+ 	if (ret) {
+@@ -7225,11 +7223,9 @@ int ocfs2_init_security_and_acl(struct inode *dir,
+ 		mlog_errno(ret);
+ 		goto leave;
+ 	}
+-
+-	if (!ret && default_acl)
+-		ret = ocfs2_iop_set_acl(inode, default_acl, ACL_TYPE_DEFAULT);
+-	if (!ret && acl)
+-		ret = ocfs2_iop_set_acl(inode, acl, ACL_TYPE_ACCESS);
++	ret = ocfs2_init_acl(NULL, inode, dir, NULL, dir_bh, NULL, NULL);
++	if (ret)
++		mlog_errno(ret);
+ 
+ 	ocfs2_inode_unlock(dir, 0);
+ 	brelse(dir_bh);
+diff --git a/fs/ocfs2/xattr.h b/fs/ocfs2/xattr.h
+index f10d5b93c366..1633cc15ea1f 100644
+--- a/fs/ocfs2/xattr.h
++++ b/fs/ocfs2/xattr.h
+@@ -94,7 +94,5 @@ int ocfs2_reflink_xattrs(struct inode *old_inode,
+ 			 bool preserve_security);
+ int ocfs2_init_security_and_acl(struct inode *dir,
+ 				struct inode *inode,
+-				const struct qstr *qstr,
+-				struct posix_acl *default_acl,
+-				struct posix_acl *acl);
++				const struct qstr *qstr);
+ #endif /* OCFS2_XATTR_H */
+diff --git a/fs/pnode.c b/fs/pnode.c
+index aae331a5d03b..18e56fc4a88c 100644
+--- a/fs/pnode.c
++++ b/fs/pnode.c
+@@ -198,10 +198,15 @@ static struct mount *next_group(struct mount *m, struct mount *origin)
+ 
+ /* all accesses are serialized by namespace_sem */
+ static struct user_namespace *user_ns;
+-static struct mount *last_dest, *last_source, *dest_master;
++static struct mount *last_dest, *first_source, *last_source, *dest_master;
+ static struct mountpoint *mp;
+ static struct hlist_head *list;
+ 
++static inline bool peers(struct mount *m1, struct mount *m2)
++{
++	return m1->mnt_group_id == m2->mnt_group_id && m1->mnt_group_id;
++}
++
+ static int propagate_one(struct mount *m)
+ {
+ 	struct mount *child;
+@@ -212,24 +217,26 @@ static int propagate_one(struct mount *m)
+ 	/* skip if mountpoint isn't covered by it */
+ 	if (!is_subdir(mp->m_dentry, m->mnt.mnt_root))
+ 		return 0;
+-	if (m->mnt_group_id == last_dest->mnt_group_id) {
++	if (peers(m, last_dest)) {
+ 		type = CL_MAKE_SHARED;
+ 	} else {
+ 		struct mount *n, *p;
++		bool done;
+ 		for (n = m; ; n = p) {
+ 			p = n->mnt_master;
+-			if (p == dest_master || IS_MNT_MARKED(p)) {
+-				while (last_dest->mnt_master != p) {
+-					last_source = last_source->mnt_master;
+-					last_dest = last_source->mnt_parent;
+-				}
+-				if (n->mnt_group_id != last_dest->mnt_group_id) {
+-					last_source = last_source->mnt_master;
+-					last_dest = last_source->mnt_parent;
+-				}
++			if (p == dest_master || IS_MNT_MARKED(p))
+ 				break;
+-			}
+ 		}
++		do {
++			struct mount *parent = last_source->mnt_parent;
++			if (last_source == first_source)
++				break;
++			done = parent->mnt_master == p;
++			if (done && peers(n, parent))
++				break;
++			last_source = last_source->mnt_master;
++		} while (!done);
++
+ 		type = CL_SLAVE;
+ 		/* beginning of peer group among the slaves? */
+ 		if (IS_MNT_SHARED(m))
+@@ -280,6 +287,7 @@ int propagate_mnt(struct mount *dest_mnt, struct mountpoint *dest_mp,
+ 	 */
+ 	user_ns = current->nsproxy->mnt_ns->user_ns;
+ 	last_dest = dest_mnt;
++	first_source = source_mnt;
+ 	last_source = source_mnt;
+ 	mp = dest_mp;
+ 	list = tree_list;
+diff --git a/include/linux/hash.h b/include/linux/hash.h
+index d0494c399392..a75b1009d3f7 100644
+--- a/include/linux/hash.h
++++ b/include/linux/hash.h
+@@ -33,12 +33,28 @@
+ #error Wordsize not 32 or 64
+ #endif
+ 
++/*
++ * The above primes are actively bad for hashing, since they are
++ * too sparse. The 32-bit one is mostly ok, the 64-bit one causes
++ * real problems. Besides, the "prime" part is pointless for the
++ * multiplicative hash.
++ *
++ * Although a random odd number will do, it turns out that the golden
++ * ratio phi = (sqrt(5)-1)/2, or its negative, has particularly nice
++ * properties.
++ *
++ * These are the negative, (1 - phi) = (phi^2) = (3 - sqrt(5))/2.
++ * (See Knuth vol 3, section 6.4, exercise 9.)
++ */
++#define GOLDEN_RATIO_32 0x61C88647
++#define GOLDEN_RATIO_64 0x61C8864680B583EBull
++
+ static __always_inline u64 hash_64(u64 val, unsigned int bits)
+ {
+ 	u64 hash = val;
+ 
+-#if defined(CONFIG_ARCH_HAS_FAST_MULTIPLIER) && BITS_PER_LONG == 64
+-	hash = hash * GOLDEN_RATIO_PRIME_64;
++#if BITS_PER_LONG == 64
++	hash = hash * GOLDEN_RATIO_64;
+ #else
+ 	/*  Sigh, gcc can't optimise this alone like it does for 32 bits. */
+ 	u64 n = hash;
+diff --git a/include/linux/mfd/samsung/s2mps11.h b/include/linux/mfd/samsung/s2mps11.h
+index 7981a9d77d3f..ad81a1a7193f 100644
+--- a/include/linux/mfd/samsung/s2mps11.h
++++ b/include/linux/mfd/samsung/s2mps11.h
+@@ -173,10 +173,12 @@ enum s2mps11_regulators {
+ 
+ #define S2MPS11_LDO_VSEL_MASK	0x3F
+ #define S2MPS11_BUCK_VSEL_MASK	0xFF
++#define S2MPS11_BUCK9_VSEL_MASK	0x1F
+ #define S2MPS11_ENABLE_MASK	(0x03 << S2MPS11_ENABLE_SHIFT)
+ #define S2MPS11_ENABLE_SHIFT	0x06
+ #define S2MPS11_LDO_N_VOLTAGES	(S2MPS11_LDO_VSEL_MASK + 1)
+ #define S2MPS11_BUCK_N_VOLTAGES (S2MPS11_BUCK_VSEL_MASK + 1)
++#define S2MPS11_BUCK9_N_VOLTAGES (S2MPS11_BUCK9_VSEL_MASK + 1)
+ #define S2MPS11_RAMP_DELAY	25000		/* uV/us */
+ 
+ 
+diff --git a/include/rdma/ib.h b/include/rdma/ib.h
+index cf8f9e700e48..a6b93706b0fc 100644
+--- a/include/rdma/ib.h
++++ b/include/rdma/ib.h
+@@ -34,6 +34,7 @@
+ #define _RDMA_IB_H
+ 
+ #include <linux/types.h>
++#include <linux/sched.h>
+ 
+ struct ib_addr {
+ 	union {
+@@ -86,4 +87,19 @@ struct sockaddr_ib {
+ 	__u64			sib_scope_id;
+ };
+ 
++/*
++ * The IB interfaces that use write() as bi-directional ioctl() are
++ * fundamentally unsafe, since there are lots of ways to trigger "write()"
++ * calls from various contexts with elevated privileges. That includes the
++ * traditional suid executable error message writes, but also various kernel
++ * interfaces that can write to file descriptors.
++ *
++ * This function provides protection for the legacy API by restricting the
++ * calling context.
++ */
++static inline bool ib_safe_file_access(struct file *filp)
++{
++	return filp->f_cred == current_cred() && segment_eq(get_fs(), USER_DS);
++}
++
+ #endif /* _RDMA_IB_H */
+diff --git a/include/uapi/linux/v4l2-dv-timings.h b/include/uapi/linux/v4l2-dv-timings.h
+index 6c8f159e416e..63467ce7c3eb 100644
+--- a/include/uapi/linux/v4l2-dv-timings.h
++++ b/include/uapi/linux/v4l2-dv-timings.h
+@@ -175,70 +175,80 @@
+ 
+ #define V4L2_DV_BT_CEA_3840X2160P24 { \
+ 	.type = V4L2_DV_BT_656_1120, \
+-	V4L2_INIT_BT_TIMINGS(3840, 2160, 0, V4L2_DV_HSYNC_POS_POL, \
++	V4L2_INIT_BT_TIMINGS(3840, 2160, 0, \
++		V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
+ 		297000000, 1276, 88, 296, 8, 10, 72, 0, 0, 0, \
+ 		V4L2_DV_BT_STD_CEA861, V4L2_DV_FL_CAN_REDUCE_FPS) \
+ }
+ 
+ #define V4L2_DV_BT_CEA_3840X2160P25 { \
+ 	.type = V4L2_DV_BT_656_1120, \
+-	V4L2_INIT_BT_TIMINGS(3840, 2160, 0, V4L2_DV_HSYNC_POS_POL, \
++	V4L2_INIT_BT_TIMINGS(3840, 2160, 0, \
++		V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
+ 		297000000, 1056, 88, 296, 8, 10, 72, 0, 0, 0, \
+ 		V4L2_DV_BT_STD_CEA861, 0) \
+ }
+ 
+ #define V4L2_DV_BT_CEA_3840X2160P30 { \
+ 	.type = V4L2_DV_BT_656_1120, \
+-	V4L2_INIT_BT_TIMINGS(3840, 2160, 0, V4L2_DV_HSYNC_POS_POL, \
++	V4L2_INIT_BT_TIMINGS(3840, 2160, 0, \
++		V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
+ 		297000000, 176, 88, 296, 8, 10, 72, 0, 0, 0, \
+ 		V4L2_DV_BT_STD_CEA861, V4L2_DV_FL_CAN_REDUCE_FPS) \
+ }
+ 
+ #define V4L2_DV_BT_CEA_3840X2160P50 { \
+ 	.type = V4L2_DV_BT_656_1120, \
+-	V4L2_INIT_BT_TIMINGS(3840, 2160, 0, V4L2_DV_HSYNC_POS_POL, \
++	V4L2_INIT_BT_TIMINGS(3840, 2160, 0, \
++		V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
+ 		594000000, 1056, 88, 296, 8, 10, 72, 0, 0, 0, \
+ 		V4L2_DV_BT_STD_CEA861, 0) \
+ }
+ 
+ #define V4L2_DV_BT_CEA_3840X2160P60 { \
+ 	.type = V4L2_DV_BT_656_1120, \
+-	V4L2_INIT_BT_TIMINGS(3840, 2160, 0, V4L2_DV_HSYNC_POS_POL, \
++	V4L2_INIT_BT_TIMINGS(3840, 2160, 0, \
++		V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
+ 		594000000, 176, 88, 296, 8, 10, 72, 0, 0, 0, \
+ 		V4L2_DV_BT_STD_CEA861, V4L2_DV_FL_CAN_REDUCE_FPS) \
+ }
+ 
+ #define V4L2_DV_BT_CEA_4096X2160P24 { \
+ 	.type = V4L2_DV_BT_656_1120, \
+-	V4L2_INIT_BT_TIMINGS(4096, 2160, 0, V4L2_DV_HSYNC_POS_POL, \
++	V4L2_INIT_BT_TIMINGS(4096, 2160, 0, \
++		V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
+ 		297000000, 1020, 88, 296, 8, 10, 72, 0, 0, 0, \
+ 		V4L2_DV_BT_STD_CEA861, V4L2_DV_FL_CAN_REDUCE_FPS) \
+ }
+ 
+ #define V4L2_DV_BT_CEA_4096X2160P25 { \
+ 	.type = V4L2_DV_BT_656_1120, \
+-	V4L2_INIT_BT_TIMINGS(4096, 2160, 0, V4L2_DV_HSYNC_POS_POL, \
++	V4L2_INIT_BT_TIMINGS(4096, 2160, 0, \
++		V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
+ 		297000000, 968, 88, 128, 8, 10, 72, 0, 0, 0, \
+ 		V4L2_DV_BT_STD_CEA861, 0) \
+ }
+ 
+ #define V4L2_DV_BT_CEA_4096X2160P30 { \
+ 	.type = V4L2_DV_BT_656_1120, \
+-	V4L2_INIT_BT_TIMINGS(4096, 2160, 0, V4L2_DV_HSYNC_POS_POL, \
++	V4L2_INIT_BT_TIMINGS(4096, 2160, 0, \
++		V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
+ 		297000000, 88, 88, 128, 8, 10, 72, 0, 0, 0, \
+ 		V4L2_DV_BT_STD_CEA861, V4L2_DV_FL_CAN_REDUCE_FPS) \
+ }
+ 
+ #define V4L2_DV_BT_CEA_4096X2160P50 { \
+ 	.type = V4L2_DV_BT_656_1120, \
+-	V4L2_INIT_BT_TIMINGS(4096, 2160, 0, V4L2_DV_HSYNC_POS_POL, \
++	V4L2_INIT_BT_TIMINGS(4096, 2160, 0, \
++		V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
+ 		594000000, 968, 88, 128, 8, 10, 72, 0, 0, 0, \
+ 		V4L2_DV_BT_STD_CEA861, 0) \
+ }
+ 
+ #define V4L2_DV_BT_CEA_4096X2160P60 { \
+ 	.type = V4L2_DV_BT_656_1120, \
+-	V4L2_INIT_BT_TIMINGS(4096, 2160, 0, V4L2_DV_HSYNC_POS_POL, \
++	V4L2_INIT_BT_TIMINGS(4096, 2160, 0, \
++		V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
+ 		594000000, 88, 88, 128, 8, 10, 72, 0, 0, 0, \
+ 		V4L2_DV_BT_STD_CEA861, V4L2_DV_FL_CAN_REDUCE_FPS) \
+ }
+diff --git a/kernel/time/timer.c b/kernel/time/timer.c
+index 3260ffdb368f..3c4e3116cdb1 100644
+--- a/kernel/time/timer.c
++++ b/kernel/time/timer.c
+@@ -956,13 +956,27 @@ EXPORT_SYMBOL(add_timer);
+  */
+ void add_timer_on(struct timer_list *timer, int cpu)
+ {
+-	struct tvec_base *base = per_cpu(tvec_bases, cpu);
++	struct tvec_base *new_base = per_cpu(tvec_bases, cpu);
++	struct tvec_base *base;
+ 	unsigned long flags;
+ 
+ 	timer_stats_timer_set_start_info(timer);
+ 	BUG_ON(timer_pending(timer) || !timer->function);
+-	spin_lock_irqsave(&base->lock, flags);
+-	timer_set_base(timer, base);
++
++	/*
++	 * If @timer was on a different CPU, it should be migrated with the
++	 * old base locked to prevent other operations proceeding with the
++	 * wrong base locked.  See lock_timer_base().
++	 */
++	base = lock_timer_base(timer, &flags);
++	if (base != new_base) {
++		timer_set_base(timer, NULL);
++		spin_unlock(&base->lock);
++		base = new_base;
++		spin_lock(&base->lock);
++		timer_set_base(timer, base);
++	}
++
+ 	debug_activate(timer, timer->expires);
+ 	internal_add_timer(base, timer);
+ 	spin_unlock_irqrestore(&base->lock, flags);
+diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
+index b6cd1b653e21..51c47bc832d4 100644
+--- a/kernel/trace/trace_events.c
++++ b/kernel/trace/trace_events.c
+@@ -1584,8 +1584,13 @@ event_create_dir(struct dentry *parent, struct ftrace_event_file *file)
+ 	trace_create_file("filter", 0644, file->dir, file,
+ 			  &ftrace_event_filter_fops);
+ 
+-	trace_create_file("trigger", 0644, file->dir, file,
+-			  &event_trigger_fops);
++	/*
++	 * Only event directories that can be enabled should have
++	 * triggers.
++	 */
++	if (!(call->flags & TRACE_EVENT_FL_IGNORE_ENABLE))
++		trace_create_file("trigger", 0644, file->dir, file,
++				  &event_trigger_fops);
+ 
+ 	trace_create_file("format", 0444, file->dir, call,
+ 			  &ftrace_event_format_fops);
+diff --git a/kernel/workqueue.c b/kernel/workqueue.c
+index 2273f534b01a..0963b7fbc9a6 100644
+--- a/kernel/workqueue.c
++++ b/kernel/workqueue.c
+@@ -623,6 +623,35 @@ static void set_work_pool_and_clear_pending(struct work_struct *work,
+ 	 */
+ 	smp_wmb();
+ 	set_work_data(work, (unsigned long)pool_id << WORK_OFFQ_POOL_SHIFT, 0);
++	/*
++	 * The following mb guarantees that previous clear of a PENDING bit
++	 * will not be reordered with any speculative LOADS or STORES from
++	 * work->current_func, which is executed afterwards.  This possible
++	 * reordering can lead to a missed execution on attempt to qeueue
++	 * the same @work.  E.g. consider this case:
++	 *
++	 *   CPU#0                         CPU#1
++	 *   ----------------------------  --------------------------------
++	 *
++	 * 1  STORE event_indicated
++	 * 2  queue_work_on() {
++	 * 3    test_and_set_bit(PENDING)
++	 * 4 }                             set_..._and_clear_pending() {
++	 * 5                                 set_work_data() # clear bit
++	 * 6                                 smp_mb()
++	 * 7                               work->current_func() {
++	 * 8				      LOAD event_indicated
++	 *				   }
++	 *
++	 * Without an explicit full barrier speculative LOAD on line 8 can
++	 * be executed before CPU#0 does STORE on line 1.  If that happens,
++	 * CPU#0 observes the PENDING bit is still set and new execution of
++	 * a @work is not queued in a hope, that CPU#1 will eventually
++	 * finish the queued @work.  Meanwhile CPU#1 does not see
++	 * event_indicated is set, because speculative LOAD was executed
++	 * before actual STORE.
++	 */
++	smp_mb();
+ }
+ 
+ static void clear_work_data(struct work_struct *work)
+@@ -4513,6 +4542,17 @@ static void rebind_workers(struct worker_pool *pool)
+ 						  pool->attrs->cpumask) < 0);
+ 
+ 	spin_lock_irq(&pool->lock);
++
++	/*
++	 * XXX: CPU hotplug notifiers are weird and can call DOWN_FAILED
++	 * w/o preceding DOWN_PREPARE.  Work around it.  CPU hotplug is
++	 * being reworked and this can go away in time.
++	 */
++	if (!(pool->flags & POOL_DISASSOCIATED)) {
++		spin_unlock_irq(&pool->lock);
++		return;
++	}
++
+ 	pool->flags &= ~POOL_DISASSOCIATED;
+ 
+ 	for_each_pool_worker(worker, pool) {
+diff --git a/mm/compaction.c b/mm/compaction.c
+index b47f08e159d4..8d010df763dc 100644
+--- a/mm/compaction.c
++++ b/mm/compaction.c
+@@ -784,16 +784,8 @@ isolate_migratepages_range(struct compact_control *cc, unsigned long start_pfn,
+ 		pfn = isolate_migratepages_block(cc, pfn, block_end_pfn,
+ 							ISOLATE_UNEVICTABLE);
+ 
+-		/*
+-		 * In case of fatal failure, release everything that might
+-		 * have been isolated in the previous iteration, and signal
+-		 * the failure back to caller.
+-		 */
+-		if (!pfn) {
+-			putback_movable_pages(&cc->migratepages);
+-			cc->nr_migratepages = 0;
++		if (!pfn)
+ 			break;
+-		}
+ 
+ 		if (cc->nr_migratepages == COMPACT_CLUSTER_MAX)
+ 			break;
+diff --git a/mm/huge_memory.c b/mm/huge_memory.c
+index de984159cf0b..2e39d4e0ff09 100644
+--- a/mm/huge_memory.c
++++ b/mm/huge_memory.c
+@@ -2081,10 +2081,9 @@ int khugepaged_enter_vma_merge(struct vm_area_struct *vma,
+ 		 * page fault if needed.
+ 		 */
+ 		return 0;
+-	if (vma->vm_ops)
++	if (vma->vm_ops || (vm_flags & VM_NO_THP))
+ 		/* khugepaged not yet working on file or special mappings */
+ 		return 0;
+-	VM_BUG_ON_VMA(vm_flags & VM_NO_THP, vma);
+ 	hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
+ 	hend = vma->vm_end & HPAGE_PMD_MASK;
+ 	if (hstart < hend)
+@@ -2407,8 +2406,7 @@ static bool hugepage_vma_check(struct vm_area_struct *vma)
+ 		return false;
+ 	if (is_vma_temporary_stack(vma))
+ 		return false;
+-	VM_BUG_ON_VMA(vma->vm_flags & VM_NO_THP, vma);
+-	return true;
++	return !(vma->vm_flags & VM_NO_THP);
+ }
+ 
+ static void collapse_huge_page(struct mm_struct *mm,
+diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
+index c5880124ec0d..98cd0e78c94c 100644
+--- a/net/netfilter/nf_conntrack_core.c
++++ b/net/netfilter/nf_conntrack_core.c
+@@ -1739,6 +1739,7 @@ void nf_conntrack_init_end(void)
+ 
+ int nf_conntrack_init_net(struct net *net)
+ {
++	static atomic64_t unique_id;
+ 	int ret = -ENOMEM;
+ 	int cpu;
+ 
+@@ -1762,7 +1763,8 @@ int nf_conntrack_init_net(struct net *net)
+ 	if (!net->ct.stat)
+ 		goto err_pcpu_lists;
+ 
+-	net->ct.slabname = kasprintf(GFP_KERNEL, "nf_conntrack_%p", net);
++	net->ct.slabname = kasprintf(GFP_KERNEL, "nf_conntrack_%llu",
++				(u64)atomic64_inc_return(&unique_id));
+ 	if (!net->ct.slabname)
+ 		goto err_slabname;
+ 
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index c3c8337c4ea2..4a69d6f1b8ef 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -5413,6 +5413,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x17aa, 0x5034, "Thinkpad T450", ALC292_FIXUP_TPT440_DOCK),
+ 	SND_PCI_QUIRK(0x17aa, 0x5036, "Thinkpad T450s", ALC292_FIXUP_TPT440_DOCK),
+ 	SND_PCI_QUIRK(0x17aa, 0x503c, "Thinkpad L450", ALC292_FIXUP_TPT440_DOCK),
++	SND_PCI_QUIRK(0x17aa, 0x504a, "ThinkPad X260", ALC292_FIXUP_TPT440_DOCK),
+ 	SND_PCI_QUIRK(0x17aa, 0x504b, "Thinkpad", ALC293_FIXUP_LENOVO_SPK_NOISE),
+ 	SND_PCI_QUIRK(0x17aa, 0x5109, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
+ 	SND_PCI_QUIRK(0x17aa, 0x3bf8, "Quanta FL1", ALC269_FIXUP_PCM_44K),
+@@ -6391,6 +6392,8 @@ enum {
+ 	ALC668_FIXUP_AUTO_MUTE,
+ 	ALC668_FIXUP_DELL_DISABLE_AAMIX,
+ 	ALC668_FIXUP_DELL_XPS13,
++	ALC662_FIXUP_ASUS_Nx50,
++	ALC668_FIXUP_ASUS_Nx51,
+ };
+ 
+ static const struct hda_fixup alc662_fixups[] = {
+@@ -6631,6 +6634,21 @@ static const struct hda_fixup alc662_fixups[] = {
+ 		.type = HDA_FIXUP_FUNC,
+ 		.v.func = alc_fixup_bass_chmap,
+ 	},
++	[ALC662_FIXUP_ASUS_Nx50] = {
++		.type = HDA_FIXUP_FUNC,
++		.v.func = alc_fixup_auto_mute_via_amp,
++		.chained = true,
++		.chain_id = ALC662_FIXUP_BASS_1A
++	},
++	[ALC668_FIXUP_ASUS_Nx51] = {
++		.type = HDA_FIXUP_PINS,
++		.v.pins = (const struct hda_pintbl[]) {
++			{0x1a, 0x90170151}, /* bass speaker */
++			{}
++		},
++		.chained = true,
++		.chain_id = ALC662_FIXUP_BASS_CHMAP,
++	},
+ };
+ 
+ static const struct snd_pci_quirk alc662_fixup_tbl[] = {
+@@ -6651,9 +6669,14 @@ static const struct snd_pci_quirk alc662_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x1028, 0x0698, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x1028, 0x069f, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x103c, 0x1632, "HP RP5800", ALC662_FIXUP_HP_RP5800),
+-	SND_PCI_QUIRK(0x1043, 0x11cd, "Asus N550", ALC662_FIXUP_BASS_1A),
++	SND_PCI_QUIRK(0x1043, 0x1080, "Asus UX501VW", ALC668_FIXUP_HEADSET_MODE),
++	SND_PCI_QUIRK(0x1043, 0x11cd, "Asus N550", ALC662_FIXUP_ASUS_Nx50),
++	SND_PCI_QUIRK(0x1043, 0x13df, "Asus N550JX", ALC662_FIXUP_BASS_1A),
++	SND_PCI_QUIRK(0x1043, 0x129d, "Asus N750", ALC662_FIXUP_ASUS_Nx50),
+ 	SND_PCI_QUIRK(0x1043, 0x1477, "ASUS N56VZ", ALC662_FIXUP_BASS_MODE4_CHMAP),
+ 	SND_PCI_QUIRK(0x1043, 0x15a7, "ASUS UX51VZH", ALC662_FIXUP_BASS_16),
++	SND_PCI_QUIRK(0x1043, 0x177d, "ASUS N551", ALC668_FIXUP_ASUS_Nx51),
++	SND_PCI_QUIRK(0x1043, 0x17bd, "ASUS N751", ALC668_FIXUP_ASUS_Nx51),
+ 	SND_PCI_QUIRK(0x1043, 0x1b73, "ASUS N55SF", ALC662_FIXUP_BASS_16),
+ 	SND_PCI_QUIRK(0x1043, 0x1bf3, "ASUS N76VZ", ALC662_FIXUP_BASS_MODE4_CHMAP),
+ 	SND_PCI_QUIRK(0x1043, 0x8469, "ASUS mobo", ALC662_FIXUP_NO_JACK_DETECT),
+diff --git a/sound/soc/codecs/rt5640.c b/sound/soc/codecs/rt5640.c
+index c3f2decd643c..607c758febd8 100644
+--- a/sound/soc/codecs/rt5640.c
++++ b/sound/soc/codecs/rt5640.c
+@@ -361,7 +361,7 @@ static unsigned int bst_tlv[] = {
+ 
+ /* Interface data select */
+ static const char * const rt5640_data_select[] = {
+-	"Normal", "left copy to right", "right copy to left", "Swap"};
++	"Normal", "Swap", "left copy to right", "right copy to left"};
+ 
+ static SOC_ENUM_SINGLE_DECL(rt5640_if1_dac_enum, RT5640_DIG_INF_DATA,
+ 			    RT5640_IF1_DAC_SEL_SFT, rt5640_data_select);
+diff --git a/sound/soc/codecs/rt5640.h b/sound/soc/codecs/rt5640.h
+index 3deb8babeabb..243f42633989 100644
+--- a/sound/soc/codecs/rt5640.h
++++ b/sound/soc/codecs/rt5640.h
+@@ -442,39 +442,39 @@
+ #define RT5640_IF1_DAC_SEL_MASK			(0x3 << 14)
+ #define RT5640_IF1_DAC_SEL_SFT			14
+ #define RT5640_IF1_DAC_SEL_NOR			(0x0 << 14)
+-#define RT5640_IF1_DAC_SEL_L2R			(0x1 << 14)
+-#define RT5640_IF1_DAC_SEL_R2L			(0x2 << 14)
+-#define RT5640_IF1_DAC_SEL_SWAP			(0x3 << 14)
++#define RT5640_IF1_DAC_SEL_SWAP			(0x1 << 14)
++#define RT5640_IF1_DAC_SEL_L2R			(0x2 << 14)
++#define RT5640_IF1_DAC_SEL_R2L			(0x3 << 14)
+ #define RT5640_IF1_ADC_SEL_MASK			(0x3 << 12)
+ #define RT5640_IF1_ADC_SEL_SFT			12
+ #define RT5640_IF1_ADC_SEL_NOR			(0x0 << 12)
+-#define RT5640_IF1_ADC_SEL_L2R			(0x1 << 12)
+-#define RT5640_IF1_ADC_SEL_R2L			(0x2 << 12)
+-#define RT5640_IF1_ADC_SEL_SWAP			(0x3 << 12)
++#define RT5640_IF1_ADC_SEL_SWAP			(0x1 << 12)
++#define RT5640_IF1_ADC_SEL_L2R			(0x2 << 12)
++#define RT5640_IF1_ADC_SEL_R2L			(0x3 << 12)
+ #define RT5640_IF2_DAC_SEL_MASK			(0x3 << 10)
+ #define RT5640_IF2_DAC_SEL_SFT			10
+ #define RT5640_IF2_DAC_SEL_NOR			(0x0 << 10)
+-#define RT5640_IF2_DAC_SEL_L2R			(0x1 << 10)
+-#define RT5640_IF2_DAC_SEL_R2L			(0x2 << 10)
+-#define RT5640_IF2_DAC_SEL_SWAP			(0x3 << 10)
++#define RT5640_IF2_DAC_SEL_SWAP			(0x1 << 10)
++#define RT5640_IF2_DAC_SEL_L2R			(0x2 << 10)
++#define RT5640_IF2_DAC_SEL_R2L			(0x3 << 10)
+ #define RT5640_IF2_ADC_SEL_MASK			(0x3 << 8)
+ #define RT5640_IF2_ADC_SEL_SFT			8
+ #define RT5640_IF2_ADC_SEL_NOR			(0x0 << 8)
+-#define RT5640_IF2_ADC_SEL_L2R			(0x1 << 8)
+-#define RT5640_IF2_ADC_SEL_R2L			(0x2 << 8)
+-#define RT5640_IF2_ADC_SEL_SWAP			(0x3 << 8)
++#define RT5640_IF2_ADC_SEL_SWAP			(0x1 << 8)
++#define RT5640_IF2_ADC_SEL_L2R			(0x2 << 8)
++#define RT5640_IF2_ADC_SEL_R2L			(0x3 << 8)
+ #define RT5640_IF3_DAC_SEL_MASK			(0x3 << 6)
+ #define RT5640_IF3_DAC_SEL_SFT			6
+ #define RT5640_IF3_DAC_SEL_NOR			(0x0 << 6)
+-#define RT5640_IF3_DAC_SEL_L2R			(0x1 << 6)
+-#define RT5640_IF3_DAC_SEL_R2L			(0x2 << 6)
+-#define RT5640_IF3_DAC_SEL_SWAP			(0x3 << 6)
++#define RT5640_IF3_DAC_SEL_SWAP			(0x1 << 6)
++#define RT5640_IF3_DAC_SEL_L2R			(0x2 << 6)
++#define RT5640_IF3_DAC_SEL_R2L			(0x3 << 6)
+ #define RT5640_IF3_ADC_SEL_MASK			(0x3 << 4)
+ #define RT5640_IF3_ADC_SEL_SFT			4
+ #define RT5640_IF3_ADC_SEL_NOR			(0x0 << 4)
+-#define RT5640_IF3_ADC_SEL_L2R			(0x1 << 4)
+-#define RT5640_IF3_ADC_SEL_R2L			(0x2 << 4)
+-#define RT5640_IF3_ADC_SEL_SWAP			(0x3 << 4)
++#define RT5640_IF3_ADC_SEL_SWAP			(0x1 << 4)
++#define RT5640_IF3_ADC_SEL_L2R			(0x2 << 4)
++#define RT5640_IF3_ADC_SEL_R2L			(0x3 << 4)
+ 
+ /* REC Left Mixer Control 1 (0x3b) */
+ #define RT5640_G_HP_L_RM_L_MASK			(0x7 << 13)
+diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
+index c7f5ff4d8f98..982c2df6d0b5 100644
+--- a/sound/usb/quirks.c
++++ b/sound/usb/quirks.c
+@@ -1121,8 +1121,11 @@ bool snd_usb_get_sample_rate_quirk(struct snd_usb_audio *chip)
+ 	case USB_ID(0x047F, 0x0415): /* Plantronics BT-300 */
+ 	case USB_ID(0x047F, 0xAA05): /* Plantronics DA45 */
+ 	case USB_ID(0x04D8, 0xFEEA): /* Benchmark DAC1 Pre */
++	case USB_ID(0x0556, 0x0014): /* Phoenix Audio TMX320VC */
+ 	case USB_ID(0x074D, 0x3553): /* Outlaw RR2150 (Micronas UAC3553B) */
++	case USB_ID(0x1de7, 0x0013): /* Phoenix Audio MT202exe */
+ 	case USB_ID(0x1de7, 0x0014): /* Phoenix Audio TMX320 */
++	case USB_ID(0x1de7, 0x0114): /* Phoenix Audio MT202pcs */
+ 	case USB_ID(0x21B4, 0x0081): /* AudioQuest DragonFly */
+ 		return true;
+ 	}
+diff --git a/tools/lib/traceevent/parse-filter.c b/tools/lib/traceevent/parse-filter.c
+index b50234402fc2..88cccea3ca99 100644
+--- a/tools/lib/traceevent/parse-filter.c
++++ b/tools/lib/traceevent/parse-filter.c
+@@ -1058,6 +1058,7 @@ process_filter(struct event_format *event, struct filter_arg **parg,
+ 					*parg = current_op;
+ 				else
+ 					*parg = current_exp;
++				free(token);
+ 				return PEVENT_ERRNO__UNBALANCED_PAREN;
+ 			}
+ 			break;
+@@ -1163,11 +1164,12 @@ process_filter(struct event_format *event, struct filter_arg **parg,
+ 		current_op = current_exp;
+ 
+ 	ret = collapse_tree(current_op, parg, error_str);
++	/* collapse_tree() may free current_op, and updates parg accordingly */
++	current_op = NULL;
+ 	if (ret < 0)
+ 		goto fail;
+ 
+-	*parg = current_op;
+-
++	free(token);
+ 	return 0;
+ 
+  fail_alloc:


             reply	other threads:[~2016-05-24 12:03 UTC|newest]

Thread overview: 67+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2016-05-24 12:03 Mike Pagano [this message]
  -- strict thread matches above, loose matches on Subject: below --
2017-04-18 10:20 [gentoo-commits] proj/linux-patches:3.18 commit in: / Mike Pagano
2017-03-02 16:33 Mike Pagano
2017-03-02 16:33 Mike Pagano
2017-02-08 11:16 Mike Pagano
2017-01-18 21:46 Mike Pagano
2017-01-18 21:38 Mike Pagano
2016-12-09  0:21 Mike Pagano
2016-11-30 15:42 Mike Pagano
2016-11-25 22:57 Mike Pagano
2016-11-01  9:36 Alice Ferrazzi
2016-10-12 19:51 Mike Pagano
2016-09-18 12:43 Mike Pagano
2016-08-22 23:27 Mike Pagano
2016-08-10 12:54 Mike Pagano
2016-07-31 15:27 Mike Pagano
2016-07-15 14:46 Mike Pagano
2016-07-13 23:28 Mike Pagano
2016-07-01 20:50 Mike Pagano
2016-06-23 11:44 Mike Pagano
2016-06-08 11:20 Mike Pagano
2016-05-12  0:10 Mike Pagano
2016-04-20 11:21 Mike Pagano
2016-04-06 11:21 Mike Pagano
2016-03-17 22:50 Mike Pagano
2016-03-05 21:08 Mike Pagano
2016-02-16 16:34 Mike Pagano
2016-01-31 15:36 Mike Pagano
2016-01-20 14:35 Mike Pagano
2015-12-17 17:12 Mike Pagano
2015-11-03 18:39 Mike Pagano
2015-10-30 18:38 Mike Pagano
2015-10-03 17:45 Mike Pagano
2015-09-10  0:15 Mike Pagano
2015-08-21 12:57 Mike Pagano
2015-07-30 12:43 Mike Pagano
2015-07-22 10:13 Mike Pagano
2015-07-10 23:44 Mike Pagano
2015-06-19 15:22 Mike Pagano
2015-05-22  0:48 Mike Pagano
2015-05-13 14:27 Mike Pagano
2015-04-29 17:31 Mike Pagano
2015-04-27 17:18 Mike Pagano
2015-04-05  0:05 Mike Pagano
2015-03-28 19:46 Mike Pagano
2015-03-24 23:19 Mike Pagano
2015-03-21 20:02 Mike Pagano
2015-03-07 14:28 Mike Pagano
2015-02-27 13:26 Mike Pagano
2015-02-14 20:32 Mike Pagano
2015-02-13  1:35 Mike Pagano
2015-02-11 14:35 Mike Pagano
2015-02-07  1:07 Mike Pagano
2015-01-30 11:01 Mike Pagano
2015-01-28 23:56 Anthony G. Basile
2015-01-28 23:55 Anthony G. Basile
2015-01-28 22:18 Anthony G. Basile
2015-01-16 18:31 Mike Pagano
2015-01-16  0:28 Mike Pagano
2015-01-09 13:39 Mike Pagano
2015-01-05 14:39 Mike Pagano
2015-01-04 19:03 Mike Pagano
2015-01-02 19:06 Mike Pagano
2015-01-01 14:15 Mike Pagano
2014-12-16 19:44 Mike Pagano
2014-12-09 20:49 Mike Pagano
2014-11-26  0:36 Mike Pagano

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1464091397.35a866a10473dea83b293537d3682b15140b8dbc.mpagano@gentoo \
    --to=mpagano@gentoo.org \
    --cc=gentoo-commits@lists.gentoo.org \
    --cc=gentoo-dev@lists.gentoo.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox