public inbox for gentoo-commits@lists.gentoo.org
 help / color / mirror / Atom feed
* [gentoo-commits] proj/hardened-patchset:master commit in: 4.6.5/, 4.6.4/
@ 2016-07-29 10:42 Anthony G. Basile
  0 siblings, 0 replies; only message in thread
From: Anthony G. Basile @ 2016-07-29 10:42 UTC (permalink / raw
  To: gentoo-commits

commit:     45fd70a4e4cc66277c3ad793b16e7ce60b592d37
Author:     Anthony G. Basile <blueness <AT> gentoo <DOT> org>
AuthorDate: Fri Jul 29 10:42:10 2016 +0000
Commit:     Anthony G. Basile <blueness <AT> gentoo <DOT> org>
CommitDate: Fri Jul 29 10:42:10 2016 +0000
URL:        https://gitweb.gentoo.org/proj/hardened-patchset.git/commit/?id=45fd70a4

grsecurity-3.1-4.6.5-201607272152

 {4.6.4 => 4.6.5}/0000_README                       |    6 +-
 4.6.5/1004_linux-4.6.5.patch                       | 7262 ++++++++++++++++++++
 .../4420_grsecurity-3.1-4.6.5-201607272152.patch   | 1741 +++--
 {4.6.4 => 4.6.5}/4425_grsec_remove_EI_PAX.patch    |    0
 {4.6.4 => 4.6.5}/4427_force_XATTR_PAX_tmpfs.patch  |    0
 .../4430_grsec-remove-localversion-grsec.patch     |    0
 {4.6.4 => 4.6.5}/4435_grsec-mute-warnings.patch    |    0
 .../4440_grsec-remove-protected-paths.patch        |    0
 .../4450_grsec-kconfig-default-gids.patch          |    0
 .../4465_selinux-avc_audit-log-curr_ip.patch       |    0
 {4.6.4 => 4.6.5}/4470_disable-compat_vdso.patch    |    0
 {4.6.4 => 4.6.5}/4475_emutramp_default_on.patch    |    0
 12 files changed, 8045 insertions(+), 964 deletions(-)

diff --git a/4.6.4/0000_README b/4.6.5/0000_README
similarity index 92%
rename from 4.6.4/0000_README
rename to 4.6.5/0000_README
index 81410da..016e706 100644
--- a/4.6.4/0000_README
+++ b/4.6.5/0000_README
@@ -2,7 +2,11 @@ README
 -----------------------------------------------------------------------------
 Individual Patch Descriptions:
 -----------------------------------------------------------------------------
-Patch:	4420_grsecurity-3.1-4.6.4-201607242014.patch
+Patch:	1004_linux-4.6.5.patch
+From:	http://www.kernel.org
+Desc:	Linux 4.6.5
+
+Patch:	4420_grsecurity-3.1-4.6.5-201607272152.patch
 From:	http://www.grsecurity.net
 Desc:	hardened-sources base patch from upstream grsecurity
 

diff --git a/4.6.5/1004_linux-4.6.5.patch b/4.6.5/1004_linux-4.6.5.patch
new file mode 100644
index 0000000..98b6b74
--- /dev/null
+++ b/4.6.5/1004_linux-4.6.5.patch
@@ -0,0 +1,7262 @@
+diff --git a/Documentation/ABI/testing/sysfs-bus-iio-proximity-as3935 b/Documentation/ABI/testing/sysfs-bus-iio-proximity-as3935
+index 6708c5e..33e96f7 100644
+--- a/Documentation/ABI/testing/sysfs-bus-iio-proximity-as3935
++++ b/Documentation/ABI/testing/sysfs-bus-iio-proximity-as3935
+@@ -1,4 +1,4 @@
+-What		/sys/bus/iio/devices/iio:deviceX/in_proximity_raw
++What		/sys/bus/iio/devices/iio:deviceX/in_proximity_input
+ Date:		March 2014
+ KernelVersion:	3.15
+ Contact:	Matt Ranostay <mranostay@gmail.com>
+diff --git a/Documentation/scsi/scsi_eh.txt b/Documentation/scsi/scsi_eh.txt
+index 8638f61..37eca00 100644
+--- a/Documentation/scsi/scsi_eh.txt
++++ b/Documentation/scsi/scsi_eh.txt
+@@ -263,19 +263,23 @@ scmd->allowed.
+ 
+  3. scmd recovered
+     ACTION: scsi_eh_finish_cmd() is invoked to EH-finish scmd
+-	- shost->host_failed--
+ 	- clear scmd->eh_eflags
+ 	- scsi_setup_cmd_retry()
+ 	- move from local eh_work_q to local eh_done_q
+     LOCKING: none
++    CONCURRENCY: at most one thread per separate eh_work_q to
++		 keep queue manipulation lockless
+ 
+  4. EH completes
+     ACTION: scsi_eh_flush_done_q() retries scmds or notifies upper
+-	    layer of failure.
++	    layer of failure. May be called concurrently but must have
++	    a no more than one thread per separate eh_work_q to
++	    manipulate the queue locklessly
+ 	- scmd is removed from eh_done_q and scmd->eh_entry is cleared
+ 	- if retry is necessary, scmd is requeued using
+           scsi_queue_insert()
+ 	- otherwise, scsi_finish_command() is invoked for scmd
++	- zero shost->host_failed
+     LOCKING: queue or finish function performs appropriate locking
+ 
+ 
+diff --git a/Makefile b/Makefile
+index cd37442..7d693a8 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 4
+ PATCHLEVEL = 6
+-SUBLEVEL = 4
++SUBLEVEL = 5
+ EXTRAVERSION =
+ NAME = Charred Weasel
+ 
+diff --git a/arch/arm/boot/dts/armada-385-linksys.dtsi b/arch/arm/boot/dts/armada-385-linksys.dtsi
+index 8450944..22f7a13 100644
+--- a/arch/arm/boot/dts/armada-385-linksys.dtsi
++++ b/arch/arm/boot/dts/armada-385-linksys.dtsi
+@@ -58,8 +58,8 @@
+ 	soc {
+ 		ranges = <MBUS_ID(0xf0, 0x01) 0 0xf1000000 0x100000
+ 			  MBUS_ID(0x01, 0x1d) 0 0xfff00000 0x100000
+-			  MBUS_ID(0x09, 0x09) 0 0xf1100000 0x10000
+-			  MBUS_ID(0x09, 0x05) 0 0xf1110000 0x10000>;
++			  MBUS_ID(0x09, 0x19) 0 0xf1100000 0x10000
++			  MBUS_ID(0x09, 0x15) 0 0xf1110000 0x10000>;
+ 
+ 		internal-regs {
+ 
+diff --git a/arch/arm/boot/dts/sun5i-r8-chip.dts b/arch/arm/boot/dts/sun5i-r8-chip.dts
+index f6898c6..c937c85 100644
+--- a/arch/arm/boot/dts/sun5i-r8-chip.dts
++++ b/arch/arm/boot/dts/sun5i-r8-chip.dts
+@@ -52,7 +52,7 @@
+ 
+ / {
+ 	model = "NextThing C.H.I.P.";
+-	compatible = "nextthing,chip", "allwinner,sun5i-r8";
++	compatible = "nextthing,chip", "allwinner,sun5i-r8", "allwinner,sun5i-a13";
+ 
+ 	aliases {
+ 		i2c0 = &i2c0;
+diff --git a/arch/arm/boot/dts/sun6i-a31s-primo81.dts b/arch/arm/boot/dts/sun6i-a31s-primo81.dts
+index 68b479b..73c133f 100644
+--- a/arch/arm/boot/dts/sun6i-a31s-primo81.dts
++++ b/arch/arm/boot/dts/sun6i-a31s-primo81.dts
+@@ -176,8 +176,6 @@
+ };
+ 
+ &reg_dc1sw {
+-	regulator-min-microvolt = <3000000>;
+-	regulator-max-microvolt = <3000000>;
+ 	regulator-name = "vcc-lcd";
+ };
+ 
+diff --git a/arch/arm/boot/dts/sun6i-a31s-yones-toptech-bs1078-v2.dts b/arch/arm/boot/dts/sun6i-a31s-yones-toptech-bs1078-v2.dts
+index 360adfb..d6ad619 100644
+--- a/arch/arm/boot/dts/sun6i-a31s-yones-toptech-bs1078-v2.dts
++++ b/arch/arm/boot/dts/sun6i-a31s-yones-toptech-bs1078-v2.dts
+@@ -135,8 +135,6 @@
+ 
+ &reg_dc1sw {
+ 	regulator-name = "vcc-lcd-usb2";
+-	regulator-min-microvolt = <3000000>;
+-	regulator-max-microvolt = <3000000>;
+ };
+ 
+ &reg_dc5ldo {
+diff --git a/arch/arm/include/asm/pgtable-2level.h b/arch/arm/include/asm/pgtable-2level.h
+index aeddd28..92fd2c8 100644
+--- a/arch/arm/include/asm/pgtable-2level.h
++++ b/arch/arm/include/asm/pgtable-2level.h
+@@ -193,6 +193,7 @@ static inline pmd_t *pmd_offset(pud_t *pud, unsigned long addr)
+ 
+ #define pmd_large(pmd)		(pmd_val(pmd) & 2)
+ #define pmd_bad(pmd)		(pmd_val(pmd) & 2)
++#define pmd_present(pmd)	(pmd_val(pmd))
+ 
+ #define copy_pmd(pmdpd,pmdps)		\
+ 	do {				\
+diff --git a/arch/arm/include/asm/pgtable-3level.h b/arch/arm/include/asm/pgtable-3level.h
+index dc46398..7411466 100644
+--- a/arch/arm/include/asm/pgtable-3level.h
++++ b/arch/arm/include/asm/pgtable-3level.h
+@@ -211,6 +211,7 @@ static inline pmd_t *pmd_offset(pud_t *pud, unsigned long addr)
+ 						: !!(pmd_val(pmd) & (val)))
+ #define pmd_isclear(pmd, val)	(!(pmd_val(pmd) & (val)))
+ 
++#define pmd_present(pmd)	(pmd_isset((pmd), L_PMD_SECT_VALID))
+ #define pmd_young(pmd)		(pmd_isset((pmd), PMD_SECT_AF))
+ #define pte_special(pte)	(pte_isset((pte), L_PTE_SPECIAL))
+ static inline pte_t pte_mkspecial(pte_t pte)
+@@ -249,10 +250,10 @@ PMD_BIT_FUNC(mkyoung,   |= PMD_SECT_AF);
+ #define pfn_pmd(pfn,prot)	(__pmd(((phys_addr_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot)))
+ #define mk_pmd(page,prot)	pfn_pmd(page_to_pfn(page),prot)
+ 
+-/* represent a notpresent pmd by zero, this is used by pmdp_invalidate */
++/* represent a notpresent pmd by faulting entry, this is used by pmdp_invalidate */
+ static inline pmd_t pmd_mknotpresent(pmd_t pmd)
+ {
+-	return __pmd(0);
++	return __pmd(pmd_val(pmd) & ~L_PMD_SECT_VALID);
+ }
+ 
+ static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
+diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h
+index 348caab..d622040 100644
+--- a/arch/arm/include/asm/pgtable.h
++++ b/arch/arm/include/asm/pgtable.h
+@@ -182,7 +182,6 @@ extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
+ #define pgd_offset_k(addr)	pgd_offset(&init_mm, addr)
+ 
+ #define pmd_none(pmd)		(!pmd_val(pmd))
+-#define pmd_present(pmd)	(pmd_val(pmd))
+ 
+ static inline pte_t *pmd_page_vaddr(pmd_t pmd)
+ {
+diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c
+index dded1b7..72b11d9 100644
+--- a/arch/arm/kvm/arm.c
++++ b/arch/arm/kvm/arm.c
+@@ -267,6 +267,7 @@ void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
+ 	kvm_timer_vcpu_terminate(vcpu);
+ 	kvm_vgic_vcpu_destroy(vcpu);
+ 	kvm_pmu_vcpu_destroy(vcpu);
++	kvm_vcpu_uninit(vcpu);
+ 	kmem_cache_free(kvm_vcpu_cache, vcpu);
+ }
+ 
+diff --git a/arch/arm/mach-imx/mach-imx6ul.c b/arch/arm/mach-imx/mach-imx6ul.c
+index a38b16b..b56de4b 100644
+--- a/arch/arm/mach-imx/mach-imx6ul.c
++++ b/arch/arm/mach-imx/mach-imx6ul.c
+@@ -46,7 +46,7 @@ static int ksz8081_phy_fixup(struct phy_device *dev)
+ static void __init imx6ul_enet_phy_init(void)
+ {
+ 	if (IS_BUILTIN(CONFIG_PHYLIB))
+-		phy_register_fixup_for_uid(PHY_ID_KSZ8081, 0xffffffff,
++		phy_register_fixup_for_uid(PHY_ID_KSZ8081, MICREL_PHY_ID_MASK,
+ 					   ksz8081_phy_fixup);
+ }
+ 
+diff --git a/arch/arm/mach-mvebu/coherency.c b/arch/arm/mach-mvebu/coherency.c
+index 7e989d6..474abff 100644
+--- a/arch/arm/mach-mvebu/coherency.c
++++ b/arch/arm/mach-mvebu/coherency.c
+@@ -162,22 +162,16 @@ exit:
+ }
+ 
+ /*
+- * This ioremap hook is used on Armada 375/38x to ensure that PCIe
+- * memory areas are mapped as MT_UNCACHED instead of MT_DEVICE. This
+- * is needed as a workaround for a deadlock issue between the PCIe
+- * interface and the cache controller.
++ * This ioremap hook is used on Armada 375/38x to ensure that all MMIO
++ * areas are mapped as MT_UNCACHED instead of MT_DEVICE. This is
++ * needed for the HW I/O coherency mechanism to work properly without
++ * deadlock.
+  */
+ static void __iomem *
+-armada_pcie_wa_ioremap_caller(phys_addr_t phys_addr, size_t size,
+-			      unsigned int mtype, void *caller)
++armada_wa_ioremap_caller(phys_addr_t phys_addr, size_t size,
++			 unsigned int mtype, void *caller)
+ {
+-	struct resource pcie_mem;
+-
+-	mvebu_mbus_get_pcie_mem_aperture(&pcie_mem);
+-
+-	if (pcie_mem.start <= phys_addr && (phys_addr + size) <= pcie_mem.end)
+-		mtype = MT_UNCACHED;
+-
++	mtype = MT_UNCACHED;
+ 	return __arm_ioremap_caller(phys_addr, size, mtype, caller);
+ }
+ 
+@@ -186,7 +180,7 @@ static void __init armada_375_380_coherency_init(struct device_node *np)
+ 	struct device_node *cache_dn;
+ 
+ 	coherency_cpu_base = of_iomap(np, 0);
+-	arch_ioremap_caller = armada_pcie_wa_ioremap_caller;
++	arch_ioremap_caller = armada_wa_ioremap_caller;
+ 
+ 	/*
+ 	 * We should switch the PL310 to I/O coherency mode only if
+diff --git a/arch/arm64/include/asm/ptrace.h b/arch/arm64/include/asm/ptrace.h
+index a307eb6..7f94755 100644
+--- a/arch/arm64/include/asm/ptrace.h
++++ b/arch/arm64/include/asm/ptrace.h
+@@ -117,6 +117,8 @@ struct pt_regs {
+ 	};
+ 	u64 orig_x0;
+ 	u64 syscallno;
++	u64 orig_addr_limit;
++	u64 unused;	// maintain 16 byte alignment
+ };
+ 
+ #define arch_has_single_step()	(1)
+diff --git a/arch/arm64/kernel/asm-offsets.c b/arch/arm64/kernel/asm-offsets.c
+index 3ae6b31..1abcd88 100644
+--- a/arch/arm64/kernel/asm-offsets.c
++++ b/arch/arm64/kernel/asm-offsets.c
+@@ -59,6 +59,7 @@ int main(void)
+   DEFINE(S_PC,			offsetof(struct pt_regs, pc));
+   DEFINE(S_ORIG_X0,		offsetof(struct pt_regs, orig_x0));
+   DEFINE(S_SYSCALLNO,		offsetof(struct pt_regs, syscallno));
++  DEFINE(S_ORIG_ADDR_LIMIT,	offsetof(struct pt_regs, orig_addr_limit));
+   DEFINE(S_FRAME_SIZE,		sizeof(struct pt_regs));
+   BLANK();
+   DEFINE(MM_CONTEXT_ID,		offsetof(struct mm_struct, context.id.counter));
+diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
+index 12e8d2b..6c3b734 100644
+--- a/arch/arm64/kernel/entry.S
++++ b/arch/arm64/kernel/entry.S
+@@ -28,6 +28,7 @@
+ #include <asm/errno.h>
+ #include <asm/esr.h>
+ #include <asm/irq.h>
++#include <asm/memory.h>
+ #include <asm/thread_info.h>
+ #include <asm/unistd.h>
+ 
+@@ -97,7 +98,14 @@
+ 	mov	x29, xzr			// fp pointed to user-space
+ 	.else
+ 	add	x21, sp, #S_FRAME_SIZE
+-	.endif
++	get_thread_info tsk
++	/* Save the task's original addr_limit and set USER_DS (TASK_SIZE_64) */
++	ldr	x20, [tsk, #TI_ADDR_LIMIT]
++	str	x20, [sp, #S_ORIG_ADDR_LIMIT]
++	mov	x20, #TASK_SIZE_64
++	str	x20, [tsk, #TI_ADDR_LIMIT]
++	ALTERNATIVE(nop, SET_PSTATE_UAO(0), ARM64_HAS_UAO, CONFIG_ARM64_UAO)
++	.endif /* \el == 0 */
+ 	mrs	x22, elr_el1
+ 	mrs	x23, spsr_el1
+ 	stp	lr, x21, [sp, #S_LR]
+@@ -128,6 +136,14 @@
+ 	.endm
+ 
+ 	.macro	kernel_exit, el
++	.if	\el != 0
++	/* Restore the task's original addr_limit. */
++	ldr	x20, [sp, #S_ORIG_ADDR_LIMIT]
++	str	x20, [tsk, #TI_ADDR_LIMIT]
++
++	/* No need to restore UAO, it will be restored from SPSR_EL1 */
++	.endif
++
+ 	ldp	x21, x22, [sp, #S_PC]		// load ELR, SPSR
+ 	.if	\el == 0
+ 	ct_user_enter
+@@ -406,7 +422,6 @@ el1_irq:
+ 	bl	trace_hardirqs_off
+ #endif
+ 
+-	get_thread_info tsk
+ 	irq_handler
+ 
+ #ifdef CONFIG_PREEMPT
+diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c
+index c539208..58651a9 100644
+--- a/arch/arm64/kernel/traps.c
++++ b/arch/arm64/kernel/traps.c
+@@ -64,8 +64,7 @@ static void dump_mem(const char *lvl, const char *str, unsigned long bottom,
+ 
+ 	/*
+ 	 * We need to switch to kernel mode so that we can use __get_user
+-	 * to safely read from kernel space.  Note that we now dump the
+-	 * code first, just in case the backtrace kills us.
++	 * to safely read from kernel space.
+ 	 */
+ 	fs = get_fs();
+ 	set_fs(KERNEL_DS);
+@@ -111,21 +110,12 @@ static void dump_backtrace_entry(unsigned long where)
+ 	print_ip_sym(where);
+ }
+ 
+-static void dump_instr(const char *lvl, struct pt_regs *regs)
++static void __dump_instr(const char *lvl, struct pt_regs *regs)
+ {
+ 	unsigned long addr = instruction_pointer(regs);
+-	mm_segment_t fs;
+ 	char str[sizeof("00000000 ") * 5 + 2 + 1], *p = str;
+ 	int i;
+ 
+-	/*
+-	 * We need to switch to kernel mode so that we can use __get_user
+-	 * to safely read from kernel space.  Note that we now dump the
+-	 * code first, just in case the backtrace kills us.
+-	 */
+-	fs = get_fs();
+-	set_fs(KERNEL_DS);
+-
+ 	for (i = -4; i < 1; i++) {
+ 		unsigned int val, bad;
+ 
+@@ -139,8 +129,18 @@ static void dump_instr(const char *lvl, struct pt_regs *regs)
+ 		}
+ 	}
+ 	printk("%sCode: %s\n", lvl, str);
++}
+ 
+-	set_fs(fs);
++static void dump_instr(const char *lvl, struct pt_regs *regs)
++{
++	if (!user_mode(regs)) {
++		mm_segment_t fs = get_fs();
++		set_fs(KERNEL_DS);
++		__dump_instr(lvl, regs);
++		set_fs(fs);
++	} else {
++		__dump_instr(lvl, regs);
++	}
+ }
+ 
+ static void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk)
+diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
+index 10b79e9..e22849a9 100644
+--- a/arch/arm64/mm/fault.c
++++ b/arch/arm64/mm/fault.c
+@@ -284,7 +284,8 @@ static int __kprobes do_page_fault(unsigned long addr, unsigned int esr,
+ 	}
+ 
+ 	if (permission_fault(esr) && (addr < USER_DS)) {
+-		if (get_fs() == KERNEL_DS)
++		/* regs->orig_addr_limit may be 0 if we entered from EL0 */
++		if (regs->orig_addr_limit == KERNEL_DS)
+ 			die("Accessing user space memory with fs=KERNEL_DS", regs, esr);
+ 
+ 		if (!search_exception_tables(regs->pc))
+diff --git a/arch/arm64/mm/flush.c b/arch/arm64/mm/flush.c
+index dbd12ea..43a76b0 100644
+--- a/arch/arm64/mm/flush.c
++++ b/arch/arm64/mm/flush.c
+@@ -71,10 +71,6 @@ void __sync_icache_dcache(pte_t pte, unsigned long addr)
+ {
+ 	struct page *page = pte_page(pte);
+ 
+-	/* no flushing needed for anonymous pages */
+-	if (!page_mapping(page))
+-		return;
+-
+ 	if (!test_and_set_bit(PG_dcache_clean, &page->flags))
+ 		sync_icache_aliases(page_address(page),
+ 				    PAGE_SIZE << compound_order(page));
+diff --git a/arch/mips/include/asm/kvm_host.h b/arch/mips/include/asm/kvm_host.h
+index 942b8f6..1907ab3 100644
+--- a/arch/mips/include/asm/kvm_host.h
++++ b/arch/mips/include/asm/kvm_host.h
+@@ -336,6 +336,7 @@ struct kvm_mips_tlb {
+ #define KVM_MIPS_GUEST_TLB_SIZE	64
+ struct kvm_vcpu_arch {
+ 	void *host_ebase, *guest_ebase;
++	int (*vcpu_run)(struct kvm_run *run, struct kvm_vcpu *vcpu);
+ 	unsigned long host_stack;
+ 	unsigned long host_gp;
+ 
+diff --git a/arch/mips/kvm/interrupt.h b/arch/mips/kvm/interrupt.h
+index 4ab4bdf..2143884 100644
+--- a/arch/mips/kvm/interrupt.h
++++ b/arch/mips/kvm/interrupt.h
+@@ -28,6 +28,7 @@
+ #define MIPS_EXC_MAX                12
+ /* XXXSL More to follow */
+ 
++extern char __kvm_mips_vcpu_run_end[];
+ extern char mips32_exception[], mips32_exceptionEnd[];
+ extern char mips32_GuestException[], mips32_GuestExceptionEnd[];
+ 
+diff --git a/arch/mips/kvm/locore.S b/arch/mips/kvm/locore.S
+index 81687ab..fc93a08 100644
+--- a/arch/mips/kvm/locore.S
++++ b/arch/mips/kvm/locore.S
+@@ -227,6 +227,7 @@ FEXPORT(__kvm_mips_load_k0k1)
+ 
+ 	/* Jump to guest */
+ 	eret
++EXPORT(__kvm_mips_vcpu_run_end)
+ 
+ VECTOR(MIPSX(exception), unknown)
+ /* Find out what mode we came from and jump to the proper handler. */
+diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c
+index 70ef1a4..e223cb3 100644
+--- a/arch/mips/kvm/mips.c
++++ b/arch/mips/kvm/mips.c
+@@ -314,6 +314,15 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
+ 	memcpy(gebase + offset, mips32_GuestException,
+ 	       mips32_GuestExceptionEnd - mips32_GuestException);
+ 
++#ifdef MODULE
++	offset += mips32_GuestExceptionEnd - mips32_GuestException;
++	memcpy(gebase + offset, (char *)__kvm_mips_vcpu_run,
++	       __kvm_mips_vcpu_run_end - (char *)__kvm_mips_vcpu_run);
++	vcpu->arch.vcpu_run = gebase + offset;
++#else
++	vcpu->arch.vcpu_run = __kvm_mips_vcpu_run;
++#endif
++
+ 	/* Invalidate the icache for these ranges */
+ 	local_flush_icache_range((unsigned long)gebase,
+ 				(unsigned long)gebase + ALIGN(size, PAGE_SIZE));
+@@ -403,7 +412,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
+ 	/* Disable hardware page table walking while in guest */
+ 	htw_stop();
+ 
+-	r = __kvm_mips_vcpu_run(run, vcpu);
++	r = vcpu->arch.vcpu_run(run, vcpu);
+ 
+ 	/* Re-enable HTW before enabling interrupts */
+ 	htw_start();
+diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
+index b8500b4..bec85055 100644
+--- a/arch/powerpc/kernel/process.c
++++ b/arch/powerpc/kernel/process.c
+@@ -1501,6 +1501,16 @@ void start_thread(struct pt_regs *regs, unsigned long start, unsigned long sp)
+ 		current->thread.regs = regs - 1;
+ 	}
+ 
++#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
++	/*
++	 * Clear any transactional state, we're exec()ing. The cause is
++	 * not important as there will never be a recheckpoint so it's not
++	 * user visible.
++	 */
++	if (MSR_TM_SUSPENDED(mfmsr()))
++		tm_reclaim_current(0);
++#endif
++
+ 	memset(regs->gpr, 0, sizeof(regs->gpr));
+ 	regs->ctr = 0;
+ 	regs->link = 0;
+diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c
+index ccd2037..6ee4b72 100644
+--- a/arch/powerpc/kernel/prom_init.c
++++ b/arch/powerpc/kernel/prom_init.c
+@@ -719,7 +719,7 @@ unsigned char ibm_architecture_vec[] = {
+ 	 * must match by the macro below. Update the definition if
+ 	 * the structure layout changes.
+ 	 */
+-#define IBM_ARCH_VEC_NRCORES_OFFSET	125
++#define IBM_ARCH_VEC_NRCORES_OFFSET	133
+ 	W(NR_CPUS),			/* number of cores supported */
+ 	0,
+ 	0,
+diff --git a/arch/powerpc/platforms/pseries/iommu.c b/arch/powerpc/platforms/pseries/iommu.c
+index bd98ce2..3e8865b 100644
+--- a/arch/powerpc/platforms/pseries/iommu.c
++++ b/arch/powerpc/platforms/pseries/iommu.c
+@@ -912,7 +912,8 @@ machine_arch_initcall(pseries, find_existing_ddw_windows);
+ static int query_ddw(struct pci_dev *dev, const u32 *ddw_avail,
+ 			struct ddw_query_response *query)
+ {
+-	struct eeh_dev *edev;
++	struct device_node *dn;
++	struct pci_dn *pdn;
+ 	u32 cfg_addr;
+ 	u64 buid;
+ 	int ret;
+@@ -923,11 +924,10 @@ static int query_ddw(struct pci_dev *dev, const u32 *ddw_avail,
+ 	 * Retrieve them from the pci device, not the node with the
+ 	 * dma-window property
+ 	 */
+-	edev = pci_dev_to_eeh_dev(dev);
+-	cfg_addr = edev->config_addr;
+-	if (edev->pe_config_addr)
+-		cfg_addr = edev->pe_config_addr;
+-	buid = edev->phb->buid;
++	dn = pci_device_to_OF_node(dev);
++	pdn = PCI_DN(dn);
++	buid = pdn->phb->buid;
++	cfg_addr = ((pdn->busno << 16) | (pdn->devfn << 8));
+ 
+ 	ret = rtas_call(ddw_avail[0], 3, 5, (u32 *)query,
+ 		  cfg_addr, BUID_HI(buid), BUID_LO(buid));
+@@ -941,7 +941,8 @@ static int create_ddw(struct pci_dev *dev, const u32 *ddw_avail,
+ 			struct ddw_create_response *create, int page_shift,
+ 			int window_shift)
+ {
+-	struct eeh_dev *edev;
++	struct device_node *dn;
++	struct pci_dn *pdn;
+ 	u32 cfg_addr;
+ 	u64 buid;
+ 	int ret;
+@@ -952,11 +953,10 @@ static int create_ddw(struct pci_dev *dev, const u32 *ddw_avail,
+ 	 * Retrieve them from the pci device, not the node with the
+ 	 * dma-window property
+ 	 */
+-	edev = pci_dev_to_eeh_dev(dev);
+-	cfg_addr = edev->config_addr;
+-	if (edev->pe_config_addr)
+-		cfg_addr = edev->pe_config_addr;
+-	buid = edev->phb->buid;
++	dn = pci_device_to_OF_node(dev);
++	pdn = PCI_DN(dn);
++	buid = pdn->phb->buid;
++	cfg_addr = ((pdn->busno << 16) | (pdn->devfn << 8));
+ 
+ 	do {
+ 		/* extra outputs are LIOBN and dma-addr (hi, lo) */
+diff --git a/arch/s390/include/asm/fpu/api.h b/arch/s390/include/asm/fpu/api.h
+index 5e04f3c..8ae236b0 100644
+--- a/arch/s390/include/asm/fpu/api.h
++++ b/arch/s390/include/asm/fpu/api.h
+@@ -22,7 +22,7 @@ static inline int test_fp_ctl(u32 fpc)
+ 		"	la	%0,0\n"
+ 		"1:\n"
+ 		EX_TABLE(0b,1b)
+-		: "=d" (rc), "=d" (orig_fpc)
++		: "=d" (rc), "=&d" (orig_fpc)
+ 		: "d" (fpc), "0" (-EINVAL));
+ 	return rc;
+ }
+diff --git a/arch/s390/kernel/ipl.c b/arch/s390/kernel/ipl.c
+index f20abdb..d14069d 100644
+--- a/arch/s390/kernel/ipl.c
++++ b/arch/s390/kernel/ipl.c
+@@ -2064,12 +2064,5 @@ void s390_reset_system(void)
+ 	S390_lowcore.program_new_psw.addr =
+ 		(unsigned long) s390_base_pgm_handler;
+ 
+-	/*
+-	 * Clear subchannel ID and number to signal new kernel that no CCW or
+-	 * SCSI IPL has been done (for kexec and kdump)
+-	 */
+-	S390_lowcore.subchannel_id = 0;
+-	S390_lowcore.subchannel_nr = 0;
+-
+ 	do_reset_calls();
+ }
+diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c
+index 4324b87..9f0ce0e 100644
+--- a/arch/s390/mm/pgtable.c
++++ b/arch/s390/mm/pgtable.c
+@@ -437,7 +437,7 @@ void ptep_zap_unused(struct mm_struct *mm, unsigned long addr,
+ 	pgste = pgste_get_lock(ptep);
+ 	pgstev = pgste_val(pgste);
+ 	pte = *ptep;
+-	if (pte_swap(pte) &&
++	if (!reset && pte_swap(pte) &&
+ 	    ((pgstev & _PGSTE_GPS_USAGE_MASK) == _PGSTE_GPS_USAGE_UNUSED ||
+ 	     (pgstev & _PGSTE_GPS_ZERO))) {
+ 		ptep_zap_swap_entry(mm, pte_to_swp_entry(pte));
+diff --git a/arch/x86/boot/Makefile b/arch/x86/boot/Makefile
+index b1ef9e4..b67f9e8 100644
+--- a/arch/x86/boot/Makefile
++++ b/arch/x86/boot/Makefile
+@@ -171,6 +171,9 @@ isoimage: $(obj)/bzImage
+ 	for i in lib lib64 share end ; do \
+ 		if [ -f /usr/$$i/syslinux/isolinux.bin ] ; then \
+ 			cp /usr/$$i/syslinux/isolinux.bin $(obj)/isoimage ; \
++			if [ -f /usr/$$i/syslinux/ldlinux.c32 ]; then \
++				cp /usr/$$i/syslinux/ldlinux.c32 $(obj)/isoimage ; \
++			fi ; \
+ 			break ; \
+ 		fi ; \
+ 		if [ $$i = end ] ; then exit 1 ; fi ; \
+diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c
+index 041e442..7eb806c 100644
+--- a/arch/x86/events/core.c
++++ b/arch/x86/events/core.c
+@@ -2313,7 +2313,7 @@ void
+ perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
+ {
+ 	struct stack_frame frame;
+-	const void __user *fp;
++	const unsigned long __user *fp;
+ 
+ 	if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
+ 		/* TODO: We don't support guest os callchain now */
+@@ -2326,7 +2326,7 @@ perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
+ 	if (regs->flags & (X86_VM_MASK | PERF_EFLAGS_VM))
+ 		return;
+ 
+-	fp = (void __user *)regs->bp;
++	fp = (unsigned long __user *)regs->bp;
+ 
+ 	perf_callchain_store(entry, regs->ip);
+ 
+@@ -2339,16 +2339,17 @@ perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
+ 	pagefault_disable();
+ 	while (entry->nr < PERF_MAX_STACK_DEPTH) {
+ 		unsigned long bytes;
++
+ 		frame.next_frame	     = NULL;
+ 		frame.return_address = 0;
+ 
+-		if (!access_ok(VERIFY_READ, fp, 16))
++		if (!access_ok(VERIFY_READ, fp, sizeof(*fp) * 2))
+ 			break;
+ 
+-		bytes = __copy_from_user_nmi(&frame.next_frame, fp, 8);
++		bytes = __copy_from_user_nmi(&frame.next_frame, fp, sizeof(*fp));
+ 		if (bytes != 0)
+ 			break;
+-		bytes = __copy_from_user_nmi(&frame.return_address, fp+8, 8);
++		bytes = __copy_from_user_nmi(&frame.return_address, fp + 1, sizeof(*fp));
+ 		if (bytes != 0)
+ 			break;
+ 
+diff --git a/arch/x86/events/intel/rapl.c b/arch/x86/events/intel/rapl.c
+index 1705c9d..78ee9eb 100644
+--- a/arch/x86/events/intel/rapl.c
++++ b/arch/x86/events/intel/rapl.c
+@@ -665,7 +665,7 @@ static void __init cleanup_rapl_pmus(void)
+ 	int i;
+ 
+ 	for (i = 0; i < rapl_pmus->maxpkg; i++)
+-		kfree(rapl_pmus->pmus + i);
++		kfree(rapl_pmus->pmus[i]);
+ 	kfree(rapl_pmus);
+ }
+ 
+diff --git a/arch/x86/include/asm/msr.h b/arch/x86/include/asm/msr.h
+index 7a79ee2..33c709c 100644
+--- a/arch/x86/include/asm/msr.h
++++ b/arch/x86/include/asm/msr.h
+@@ -112,7 +112,7 @@ static inline void native_write_msr(unsigned int msr,
+ 				    unsigned low, unsigned high)
+ {
+ 	asm volatile("wrmsr" : : "c" (msr), "a"(low), "d" (high) : "memory");
+-	if (msr_tracepoint_active(__tracepoint_read_msr))
++	if (msr_tracepoint_active(__tracepoint_write_msr))
+ 		do_trace_write_msr(msr, ((u64)high << 32 | low), 0);
+ }
+ 
+@@ -131,7 +131,7 @@ notrace static inline int native_write_msr_safe(unsigned int msr,
+ 		     : "c" (msr), "0" (low), "d" (high),
+ 		       [fault] "i" (-EIO)
+ 		     : "memory");
+-	if (msr_tracepoint_active(__tracepoint_read_msr))
++	if (msr_tracepoint_active(__tracepoint_write_msr))
+ 		do_trace_write_msr(msr, ((u64)high << 32 | low), err);
+ 	return err;
+ }
+diff --git a/arch/x86/kernel/amd_nb.c b/arch/x86/kernel/amd_nb.c
+index a147e67..e991d5c 100644
+--- a/arch/x86/kernel/amd_nb.c
++++ b/arch/x86/kernel/amd_nb.c
+@@ -71,8 +71,8 @@ int amd_cache_northbridges(void)
+ 	while ((misc = next_northbridge(misc, amd_nb_misc_ids)) != NULL)
+ 		i++;
+ 
+-	if (i == 0)
+-		return 0;
++	if (!i)
++		return -ENODEV;
+ 
+ 	nb = kzalloc(i * sizeof(struct amd_northbridge), GFP_KERNEL);
+ 	if (!nb)
+diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c
+index ae703ac..44bcd57 100644
+--- a/arch/x86/kernel/kprobes/core.c
++++ b/arch/x86/kernel/kprobes/core.c
+@@ -960,7 +960,19 @@ int kprobe_fault_handler(struct pt_regs *regs, int trapnr)
+ 		 * normal page fault.
+ 		 */
+ 		regs->ip = (unsigned long)cur->addr;
++		/*
++		 * Trap flag (TF) has been set here because this fault
++		 * happened where the single stepping will be done.
++		 * So clear it by resetting the current kprobe:
++		 */
++		regs->flags &= ~X86_EFLAGS_TF;
++
++		/*
++		 * If the TF flag was set before the kprobe hit,
++		 * don't touch it:
++		 */
+ 		regs->flags |= kcb->kprobe_old_flags;
++
+ 		if (kcb->kprobe_status == KPROBE_REENTER)
+ 			restore_previous_kprobe(kcb);
+ 		else
+diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
+index faf52bac..c4217a2 100644
+--- a/arch/x86/kvm/vmx.c
++++ b/arch/x86/kvm/vmx.c
+@@ -2072,7 +2072,8 @@ static void vmx_vcpu_pi_load(struct kvm_vcpu *vcpu, int cpu)
+ 	unsigned int dest;
+ 
+ 	if (!kvm_arch_has_assigned_device(vcpu->kvm) ||
+-		!irq_remapping_cap(IRQ_POSTING_CAP))
++		!irq_remapping_cap(IRQ_POSTING_CAP)  ||
++		!kvm_vcpu_apicv_active(vcpu))
+ 		return;
+ 
+ 	do {
+@@ -2180,7 +2181,8 @@ static void vmx_vcpu_pi_put(struct kvm_vcpu *vcpu)
+ 	struct pi_desc *pi_desc = vcpu_to_pi_desc(vcpu);
+ 
+ 	if (!kvm_arch_has_assigned_device(vcpu->kvm) ||
+-		!irq_remapping_cap(IRQ_POSTING_CAP))
++		!irq_remapping_cap(IRQ_POSTING_CAP)  ||
++		!kvm_vcpu_apicv_active(vcpu))
+ 		return;
+ 
+ 	/* Set SN when the vCPU is preempted */
+@@ -6657,7 +6659,13 @@ static int get_vmx_mem_address(struct kvm_vcpu *vcpu,
+ 
+ 	/* Checks for #GP/#SS exceptions. */
+ 	exn = false;
+-	if (is_protmode(vcpu)) {
++	if (is_long_mode(vcpu)) {
++		/* Long mode: #GP(0)/#SS(0) if the memory address is in a
++		 * non-canonical form. This is the only check on the memory
++		 * destination for long mode!
++		 */
++		exn = is_noncanonical_address(*ret);
++	} else if (is_protmode(vcpu)) {
+ 		/* Protected mode: apply checks for segment validity in the
+ 		 * following order:
+ 		 * - segment type check (#GP(0) may be thrown)
+@@ -6674,17 +6682,10 @@ static int get_vmx_mem_address(struct kvm_vcpu *vcpu,
+ 			 * execute-only code segment
+ 			 */
+ 			exn = ((s.type & 0xa) == 8);
+-	}
+-	if (exn) {
+-		kvm_queue_exception_e(vcpu, GP_VECTOR, 0);
+-		return 1;
+-	}
+-	if (is_long_mode(vcpu)) {
+-		/* Long mode: #GP(0)/#SS(0) if the memory address is in a
+-		 * non-canonical form. This is an only check for long mode.
+-		 */
+-		exn = is_noncanonical_address(*ret);
+-	} else if (is_protmode(vcpu)) {
++		if (exn) {
++			kvm_queue_exception_e(vcpu, GP_VECTOR, 0);
++			return 1;
++		}
+ 		/* Protected mode: #GP(0)/#SS(0) if the segment is unusable.
+ 		 */
+ 		exn = (s.unusable != 0);
+@@ -10702,7 +10703,8 @@ static int vmx_pre_block(struct kvm_vcpu *vcpu)
+ 	struct pi_desc *pi_desc = vcpu_to_pi_desc(vcpu);
+ 
+ 	if (!kvm_arch_has_assigned_device(vcpu->kvm) ||
+-		!irq_remapping_cap(IRQ_POSTING_CAP))
++		!irq_remapping_cap(IRQ_POSTING_CAP)  ||
++		!kvm_vcpu_apicv_active(vcpu))
+ 		return 0;
+ 
+ 	vcpu->pre_pcpu = vcpu->cpu;
+@@ -10768,7 +10770,8 @@ static void vmx_post_block(struct kvm_vcpu *vcpu)
+ 	unsigned long flags;
+ 
+ 	if (!kvm_arch_has_assigned_device(vcpu->kvm) ||
+-		!irq_remapping_cap(IRQ_POSTING_CAP))
++		!irq_remapping_cap(IRQ_POSTING_CAP)  ||
++		!kvm_vcpu_apicv_active(vcpu))
+ 		return;
+ 
+ 	do {
+@@ -10821,7 +10824,8 @@ static int vmx_update_pi_irte(struct kvm *kvm, unsigned int host_irq,
+ 	int idx, ret = -EINVAL;
+ 
+ 	if (!kvm_arch_has_assigned_device(kvm) ||
+-		!irq_remapping_cap(IRQ_POSTING_CAP))
++		!irq_remapping_cap(IRQ_POSTING_CAP) ||
++		!kvm_vcpu_apicv_active(kvm->vcpus[0]))
+ 		return 0;
+ 
+ 	idx = srcu_read_lock(&kvm->irq_srcu);
+diff --git a/crypto/rsa-pkcs1pad.c b/crypto/rsa-pkcs1pad.c
+index ead8dc0..8ba4266 100644
+--- a/crypto/rsa-pkcs1pad.c
++++ b/crypto/rsa-pkcs1pad.c
+@@ -102,10 +102,10 @@ struct pkcs1pad_inst_ctx {
+ };
+ 
+ struct pkcs1pad_request {
+-	struct akcipher_request child_req;
+-
+ 	struct scatterlist in_sg[3], out_sg[2];
+ 	uint8_t *in_buf, *out_buf;
++
++	struct akcipher_request child_req;
+ };
+ 
+ static int pkcs1pad_set_pub_key(struct crypto_akcipher *tfm, const void *key,
+diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
+index 961acc7..91a9e6a 100644
+--- a/drivers/ata/libata-eh.c
++++ b/drivers/ata/libata-eh.c
+@@ -606,7 +606,7 @@ void ata_scsi_error(struct Scsi_Host *host)
+ 	ata_scsi_port_error_handler(host, ap);
+ 
+ 	/* finish or retry handled scmd's and clean up */
+-	WARN_ON(host->host_failed || !list_empty(&eh_work_q));
++	WARN_ON(!list_empty(&eh_work_q));
+ 
+ 	DPRINTK("EXIT\n");
+ }
+diff --git a/drivers/base/module.c b/drivers/base/module.c
+index db930d3..2a21578 100644
+--- a/drivers/base/module.c
++++ b/drivers/base/module.c
+@@ -24,10 +24,12 @@ static char *make_driver_name(struct device_driver *drv)
+ 
+ static void module_create_drivers_dir(struct module_kobject *mk)
+ {
+-	if (!mk || mk->drivers_dir)
+-		return;
++	static DEFINE_MUTEX(drivers_dir_mutex);
+ 
+-	mk->drivers_dir = kobject_create_and_add("drivers", &mk->kobj);
++	mutex_lock(&drivers_dir_mutex);
++	if (mk && !mk->drivers_dir)
++		mk->drivers_dir = kobject_create_and_add("drivers", &mk->kobj);
++	mutex_unlock(&drivers_dir_mutex);
+ }
+ 
+ void module_add_driver(struct module *mod, struct device_driver *drv)
+diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
+index 94fb407..44b1bd6 100644
+--- a/drivers/char/ipmi/ipmi_msghandler.c
++++ b/drivers/char/ipmi/ipmi_msghandler.c
+@@ -3820,6 +3820,7 @@ static void handle_new_recv_msgs(ipmi_smi_t intf)
+ 	while (!list_empty(&intf->waiting_rcv_msgs)) {
+ 		smi_msg = list_entry(intf->waiting_rcv_msgs.next,
+ 				     struct ipmi_smi_msg, link);
++		list_del(&smi_msg->link);
+ 		if (!run_to_completion)
+ 			spin_unlock_irqrestore(&intf->waiting_rcv_msgs_lock,
+ 					       flags);
+@@ -3829,11 +3830,14 @@ static void handle_new_recv_msgs(ipmi_smi_t intf)
+ 		if (rv > 0) {
+ 			/*
+ 			 * To preserve message order, quit if we
+-			 * can't handle a message.
++			 * can't handle a message.  Add the message
++			 * back at the head, this is safe because this
++			 * tasklet is the only thing that pulls the
++			 * messages.
+ 			 */
++			list_add(&smi_msg->link, &intf->waiting_rcv_msgs);
+ 			break;
+ 		} else {
+-			list_del(&smi_msg->link);
+ 			if (rv == 0)
+ 				/* Message handled */
+ 				ipmi_free_smi_msg(smi_msg);
+diff --git a/drivers/crypto/qat/qat_common/Makefile b/drivers/crypto/qat/qat_common/Makefile
+index 29c7c53..92561c8 100644
+--- a/drivers/crypto/qat/qat_common/Makefile
++++ b/drivers/crypto/qat/qat_common/Makefile
+@@ -2,6 +2,7 @@ $(obj)/qat_rsapubkey-asn1.o: $(obj)/qat_rsapubkey-asn1.c \
+ 			     $(obj)/qat_rsapubkey-asn1.h
+ $(obj)/qat_rsaprivkey-asn1.o: $(obj)/qat_rsaprivkey-asn1.c \
+ 			      $(obj)/qat_rsaprivkey-asn1.h
++$(obj)/qat_asym_algs.o: $(obj)/qat_rsapubkey-asn1.h $(obj)/qat_rsaprivkey-asn1.h
+ 
+ clean-files += qat_rsapubkey-asn1.c qat_rsapubkey-asn1.h
+ clean-files += qat_rsaprivkey-asn1.c qat_rsaprivkey-asn1.h
+diff --git a/drivers/edac/edac_mc.c b/drivers/edac/edac_mc.c
+index 1472f48..ff51b51 100644
+--- a/drivers/edac/edac_mc.c
++++ b/drivers/edac/edac_mc.c
+@@ -565,7 +565,8 @@ void edac_mc_reset_delay_period(unsigned long value)
+ 	list_for_each(item, &mc_devices) {
+ 		mci = list_entry(item, struct mem_ctl_info, link);
+ 
+-		edac_mod_work(&mci->work, value);
++		if (mci->op_state == OP_RUNNING_POLL)
++			edac_mod_work(&mci->work, value);
+ 	}
+ 	mutex_unlock(&mem_ctls_mutex);
+ }
+diff --git a/drivers/edac/sb_edac.c b/drivers/edac/sb_edac.c
+index 8bf745d..b274fa2 100644
+--- a/drivers/edac/sb_edac.c
++++ b/drivers/edac/sb_edac.c
+@@ -239,8 +239,11 @@ static const u32 rir_offset[MAX_RIR_RANGES][MAX_RIR_WAY] = {
+ 	{ 0x1a0, 0x1a4, 0x1a8, 0x1ac, 0x1b0, 0x1b4, 0x1b8, 0x1bc },
+ };
+ 
+-#define RIR_RNK_TGT(reg)		GET_BITFIELD(reg, 16, 19)
+-#define RIR_OFFSET(reg)		GET_BITFIELD(reg,  2, 14)
++#define RIR_RNK_TGT(type, reg) (((type) == BROADWELL) ? \
++	GET_BITFIELD(reg, 20, 23) : GET_BITFIELD(reg, 16, 19))
++
++#define RIR_OFFSET(type, reg) (((type) == HASWELL || (type) == BROADWELL) ? \
++	GET_BITFIELD(reg,  2, 15) : GET_BITFIELD(reg,  2, 14))
+ 
+ /* Device 16, functions 2-7 */
+ 
+@@ -1916,14 +1919,14 @@ static void get_memory_layout(const struct mem_ctl_info *mci)
+ 				pci_read_config_dword(pvt->pci_tad[i],
+ 						      rir_offset[j][k],
+ 						      &reg);
+-				tmp_mb = RIR_OFFSET(reg) << 6;
++				tmp_mb = RIR_OFFSET(pvt->info.type, reg) << 6;
+ 
+ 				gb = div_u64_rem(tmp_mb, 1024, &mb);
+ 				edac_dbg(0, "CH#%d RIR#%d INTL#%d, offset %u.%03u GB (0x%016Lx), tgt: %d, reg=0x%08x\n",
+ 					 i, j, k,
+ 					 gb, (mb*1000)/1024,
+ 					 ((u64)tmp_mb) << 20L,
+-					 (u32)RIR_RNK_TGT(reg),
++					 (u32)RIR_RNK_TGT(pvt->info.type, reg),
+ 					 reg);
+ 			}
+ 		}
+@@ -2256,7 +2259,7 @@ static int get_memory_error_data(struct mem_ctl_info *mci,
+ 	pci_read_config_dword(pvt->pci_tad[ch_add + base_ch],
+ 			      rir_offset[n_rir][idx],
+ 			      &reg);
+-	*rank = RIR_RNK_TGT(reg);
++	*rank = RIR_RNK_TGT(pvt->info.type, reg);
+ 
+ 	edac_dbg(0, "RIR#%d: channel address 0x%08Lx < 0x%08Lx, RIR interleave %d, index %d\n",
+ 		 n_rir,
+diff --git a/drivers/extcon/extcon-palmas.c b/drivers/extcon/extcon-palmas.c
+index 8b3226d..caff46c 100644
+--- a/drivers/extcon/extcon-palmas.c
++++ b/drivers/extcon/extcon-palmas.c
+@@ -360,6 +360,8 @@ static int palmas_usb_probe(struct platform_device *pdev)
+ 
+ 	palmas_enable_irq(palmas_usb);
+ 	/* perform initial detection */
++	if (palmas_usb->enable_gpio_vbus_detection)
++		palmas_vbus_irq_handler(palmas_usb->gpio_vbus_irq, palmas_usb);
+ 	palmas_gpio_id_detect(&palmas_usb->wq_detectid.work);
+ 	device_set_wakeup_capable(&pdev->dev, true);
+ 	return 0;
+diff --git a/drivers/gpio/gpio-sch.c b/drivers/gpio/gpio-sch.c
+index e85e753..eb43ae4 100644
+--- a/drivers/gpio/gpio-sch.c
++++ b/drivers/gpio/gpio-sch.c
+@@ -61,9 +61,8 @@ static unsigned sch_gpio_bit(struct sch_gpio *sch, unsigned gpio)
+ 	return gpio % 8;
+ }
+ 
+-static int sch_gpio_reg_get(struct gpio_chip *gc, unsigned gpio, unsigned reg)
++static int sch_gpio_reg_get(struct sch_gpio *sch, unsigned gpio, unsigned reg)
+ {
+-	struct sch_gpio *sch = gpiochip_get_data(gc);
+ 	unsigned short offset, bit;
+ 	u8 reg_val;
+ 
+@@ -75,10 +74,9 @@ static int sch_gpio_reg_get(struct gpio_chip *gc, unsigned gpio, unsigned reg)
+ 	return reg_val;
+ }
+ 
+-static void sch_gpio_reg_set(struct gpio_chip *gc, unsigned gpio, unsigned reg,
++static void sch_gpio_reg_set(struct sch_gpio *sch, unsigned gpio, unsigned reg,
+ 			     int val)
+ {
+-	struct sch_gpio *sch = gpiochip_get_data(gc);
+ 	unsigned short offset, bit;
+ 	u8 reg_val;
+ 
+@@ -98,14 +96,15 @@ static int sch_gpio_direction_in(struct gpio_chip *gc, unsigned gpio_num)
+ 	struct sch_gpio *sch = gpiochip_get_data(gc);
+ 
+ 	spin_lock(&sch->lock);
+-	sch_gpio_reg_set(gc, gpio_num, GIO, 1);
++	sch_gpio_reg_set(sch, gpio_num, GIO, 1);
+ 	spin_unlock(&sch->lock);
+ 	return 0;
+ }
+ 
+ static int sch_gpio_get(struct gpio_chip *gc, unsigned gpio_num)
+ {
+-	return sch_gpio_reg_get(gc, gpio_num, GLV);
++	struct sch_gpio *sch = gpiochip_get_data(gc);
++	return sch_gpio_reg_get(sch, gpio_num, GLV);
+ }
+ 
+ static void sch_gpio_set(struct gpio_chip *gc, unsigned gpio_num, int val)
+@@ -113,7 +112,7 @@ static void sch_gpio_set(struct gpio_chip *gc, unsigned gpio_num, int val)
+ 	struct sch_gpio *sch = gpiochip_get_data(gc);
+ 
+ 	spin_lock(&sch->lock);
+-	sch_gpio_reg_set(gc, gpio_num, GLV, val);
++	sch_gpio_reg_set(sch, gpio_num, GLV, val);
+ 	spin_unlock(&sch->lock);
+ }
+ 
+@@ -123,7 +122,7 @@ static int sch_gpio_direction_out(struct gpio_chip *gc, unsigned gpio_num,
+ 	struct sch_gpio *sch = gpiochip_get_data(gc);
+ 
+ 	spin_lock(&sch->lock);
+-	sch_gpio_reg_set(gc, gpio_num, GIO, 0);
++	sch_gpio_reg_set(sch, gpio_num, GIO, 0);
+ 	spin_unlock(&sch->lock);
+ 
+ 	/*
+@@ -182,13 +181,13 @@ static int sch_gpio_probe(struct platform_device *pdev)
+ 		 * GPIO7 is configured by the CMC as SLPIOVR
+ 		 * Enable GPIO[9:8] core powered gpios explicitly
+ 		 */
+-		sch_gpio_reg_set(&sch->chip, 8, GEN, 1);
+-		sch_gpio_reg_set(&sch->chip, 9, GEN, 1);
++		sch_gpio_reg_set(sch, 8, GEN, 1);
++		sch_gpio_reg_set(sch, 9, GEN, 1);
+ 		/*
+ 		 * SUS_GPIO[2:0] enabled by default
+ 		 * Enable SUS_GPIO3 resume powered gpio explicitly
+ 		 */
+-		sch_gpio_reg_set(&sch->chip, 13, GEN, 1);
++		sch_gpio_reg_set(sch, 13, GEN, 1);
+ 		break;
+ 
+ 	case PCI_DEVICE_ID_INTEL_ITC_LPC:
+diff --git a/drivers/gpio/gpiolib-legacy.c b/drivers/gpio/gpiolib-legacy.c
+index 3a5c701..8b83099 100644
+--- a/drivers/gpio/gpiolib-legacy.c
++++ b/drivers/gpio/gpiolib-legacy.c
+@@ -28,6 +28,10 @@ int gpio_request_one(unsigned gpio, unsigned long flags, const char *label)
+ 	if (!desc && gpio_is_valid(gpio))
+ 		return -EPROBE_DEFER;
+ 
++	err = gpiod_request(desc, label);
++	if (err)
++		return err;
++
+ 	if (flags & GPIOF_OPEN_DRAIN)
+ 		set_bit(FLAG_OPEN_DRAIN, &desc->flags);
+ 
+@@ -37,10 +41,6 @@ int gpio_request_one(unsigned gpio, unsigned long flags, const char *label)
+ 	if (flags & GPIOF_ACTIVE_LOW)
+ 		set_bit(FLAG_ACTIVE_LOW, &desc->flags);
+ 
+-	err = gpiod_request(desc, label);
+-	if (err)
+-		return err;
+-
+ 	if (flags & GPIOF_DIR_IN)
+ 		err = gpiod_direction_input(desc);
+ 	else
+diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
+index cf3e712..996a733 100644
+--- a/drivers/gpio/gpiolib.c
++++ b/drivers/gpio/gpiolib.c
+@@ -1324,14 +1324,6 @@ static int __gpiod_request(struct gpio_desc *desc, const char *label)
+ 		spin_lock_irqsave(&gpio_lock, flags);
+ 	}
+ done:
+-	if (status < 0) {
+-		/* Clear flags that might have been set by the caller before
+-		 * requesting the GPIO.
+-		 */
+-		clear_bit(FLAG_ACTIVE_LOW, &desc->flags);
+-		clear_bit(FLAG_OPEN_DRAIN, &desc->flags);
+-		clear_bit(FLAG_OPEN_SOURCE, &desc->flags);
+-	}
+ 	spin_unlock_irqrestore(&gpio_lock, flags);
+ 	return status;
+ }
+@@ -1345,8 +1337,12 @@ done:
+ #define VALIDATE_DESC(desc) do { \
+ 	if (!desc) \
+ 		return 0; \
++	if (IS_ERR(desc)) {						\
++		pr_warn("%s: invalid GPIO (errorpointer)\n", __func__); \
++		return PTR_ERR(desc); \
++	} \
+ 	if (!desc->gdev) { \
+-		pr_warn("%s: invalid GPIO\n", __func__); \
++		pr_warn("%s: invalid GPIO (no device)\n", __func__); \
+ 		return -EINVAL; \
+ 	} \
+ 	if ( !desc->gdev->chip ) { \
+@@ -1358,8 +1354,12 @@ done:
+ #define VALIDATE_DESC_VOID(desc) do { \
+ 	if (!desc) \
+ 		return; \
++	if (IS_ERR(desc)) {						\
++		pr_warn("%s: invalid GPIO (errorpointer)\n", __func__); \
++		return; \
++	} \
+ 	if (!desc->gdev) { \
+-		pr_warn("%s: invalid GPIO\n", __func__); \
++		pr_warn("%s: invalid GPIO (no device)\n", __func__); \
+ 		return; \
+ 	} \
+ 	if (!desc->gdev->chip) { \
+@@ -2011,7 +2011,7 @@ int gpiod_to_irq(const struct gpio_desc *desc)
+ 	 * requires this function to not return zero on an invalid descriptor
+ 	 * but rather a negative error number.
+ 	 */
+-	if (!desc || !desc->gdev || !desc->gdev->chip)
++	if (!desc || IS_ERR(desc) || !desc->gdev || !desc->gdev->chip)
+ 		return -EINVAL;
+ 
+ 	chip = desc->gdev->chip;
+@@ -2507,28 +2507,13 @@ struct gpio_desc *__must_check gpiod_get_optional(struct device *dev,
+ }
+ EXPORT_SYMBOL_GPL(gpiod_get_optional);
+ 
+-/**
+- * gpiod_parse_flags - helper function to parse GPIO lookup flags
+- * @desc:	gpio to be setup
+- * @lflags:	gpio_lookup_flags - returned from of_find_gpio() or
+- *		of_get_gpio_hog()
+- *
+- * Set the GPIO descriptor flags based on the given GPIO lookup flags.
+- */
+-static void gpiod_parse_flags(struct gpio_desc *desc, unsigned long lflags)
+-{
+-	if (lflags & GPIO_ACTIVE_LOW)
+-		set_bit(FLAG_ACTIVE_LOW, &desc->flags);
+-	if (lflags & GPIO_OPEN_DRAIN)
+-		set_bit(FLAG_OPEN_DRAIN, &desc->flags);
+-	if (lflags & GPIO_OPEN_SOURCE)
+-		set_bit(FLAG_OPEN_SOURCE, &desc->flags);
+-}
+ 
+ /**
+  * gpiod_configure_flags - helper function to configure a given GPIO
+  * @desc:	gpio whose value will be assigned
+  * @con_id:	function within the GPIO consumer
++ * @lflags:	gpio_lookup_flags - returned from of_find_gpio() or
++ *		of_get_gpio_hog()
+  * @dflags:	gpiod_flags - optional GPIO initialization flags
+  *
+  * Return 0 on success, -ENOENT if no GPIO has been assigned to the
+@@ -2536,10 +2521,17 @@ static void gpiod_parse_flags(struct gpio_desc *desc, unsigned long lflags)
+  * occurred while trying to acquire the GPIO.
+  */
+ static int gpiod_configure_flags(struct gpio_desc *desc, const char *con_id,
+-				 enum gpiod_flags dflags)
++		unsigned long lflags, enum gpiod_flags dflags)
+ {
+ 	int status;
+ 
++	if (lflags & GPIO_ACTIVE_LOW)
++		set_bit(FLAG_ACTIVE_LOW, &desc->flags);
++	if (lflags & GPIO_OPEN_DRAIN)
++		set_bit(FLAG_OPEN_DRAIN, &desc->flags);
++	if (lflags & GPIO_OPEN_SOURCE)
++		set_bit(FLAG_OPEN_SOURCE, &desc->flags);
++
+ 	/* No particular flag request, return here... */
+ 	if (!(dflags & GPIOD_FLAGS_BIT_DIR_SET)) {
+ 		pr_debug("no flags found for %s\n", con_id);
+@@ -2606,13 +2598,11 @@ struct gpio_desc *__must_check gpiod_get_index(struct device *dev,
+ 		return desc;
+ 	}
+ 
+-	gpiod_parse_flags(desc, lookupflags);
+-
+ 	status = gpiod_request(desc, con_id);
+ 	if (status < 0)
+ 		return ERR_PTR(status);
+ 
+-	status = gpiod_configure_flags(desc, con_id, flags);
++	status = gpiod_configure_flags(desc, con_id, lookupflags, flags);
+ 	if (status < 0) {
+ 		dev_dbg(dev, "setup of GPIO %s failed\n", con_id);
+ 		gpiod_put(desc);
+@@ -2668,6 +2658,10 @@ struct gpio_desc *fwnode_get_named_gpiod(struct fwnode_handle *fwnode,
+ 	if (IS_ERR(desc))
+ 		return desc;
+ 
++	ret = gpiod_request(desc, NULL);
++	if (ret)
++		return ERR_PTR(ret);
++
+ 	if (active_low)
+ 		set_bit(FLAG_ACTIVE_LOW, &desc->flags);
+ 
+@@ -2678,10 +2672,6 @@ struct gpio_desc *fwnode_get_named_gpiod(struct fwnode_handle *fwnode,
+ 			set_bit(FLAG_OPEN_SOURCE, &desc->flags);
+ 	}
+ 
+-	ret = gpiod_request(desc, NULL);
+-	if (ret)
+-		return ERR_PTR(ret);
+-
+ 	return desc;
+ }
+ EXPORT_SYMBOL_GPL(fwnode_get_named_gpiod);
+@@ -2734,8 +2724,6 @@ int gpiod_hog(struct gpio_desc *desc, const char *name,
+ 	chip = gpiod_to_chip(desc);
+ 	hwnum = gpio_chip_hwgpio(desc);
+ 
+-	gpiod_parse_flags(desc, lflags);
+-
+ 	local_desc = gpiochip_request_own_desc(chip, hwnum, name);
+ 	if (IS_ERR(local_desc)) {
+ 		pr_err("requesting hog GPIO %s (chip %s, offset %d) failed\n",
+@@ -2743,7 +2731,7 @@ int gpiod_hog(struct gpio_desc *desc, const char *name,
+ 		return PTR_ERR(local_desc);
+ 	}
+ 
+-	status = gpiod_configure_flags(desc, name, dflags);
++	status = gpiod_configure_flags(desc, name, lflags, dflags);
+ 	if (status < 0) {
+ 		pr_err("setup of hog GPIO %s (chip %s, offset %d) failed\n",
+ 		       name, chip->label, hwnum);
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
+index 6043dc7..3e21732 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
+@@ -880,7 +880,7 @@ static int amdgpu_cgs_acpi_eval_object(void *cgs_device,
+ 	struct cgs_acpi_method_argument *argument = NULL;
+ 	uint32_t i, count;
+ 	acpi_status status;
+-	int result;
++	int result = 0;
+ 	uint32_t func_no = 0xFFFFFFFF;
+ 
+ 	handle = ACPI_HANDLE(&adev->pdev->dev);
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+index b04337d..d78739d 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+@@ -448,7 +448,8 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
+ 			dev_info.max_memory_clock = adev->pm.default_mclk * 10;
+ 		}
+ 		dev_info.enabled_rb_pipes_mask = adev->gfx.config.backend_enable_mask;
+-		dev_info.num_rb_pipes = adev->gfx.config.num_rbs;
++		dev_info.num_rb_pipes = adev->gfx.config.max_backends_per_se *
++			adev->gfx.config.max_shader_engines;
+ 		dev_info.num_hw_gfx_contexts = adev->gfx.config.max_hw_contexts;
+ 		dev_info._pad = 0;
+ 		dev_info.ids_flags = 0;
+diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
+index bb8709066..d2216f8 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
+@@ -5074,7 +5074,7 @@ static int gfx_v7_0_eop_irq(struct amdgpu_device *adev,
+ 	case 2:
+ 		for (i = 0; i < adev->gfx.num_compute_rings; i++) {
+ 			ring = &adev->gfx.compute_ring[i];
+-			if ((ring->me == me_id) & (ring->pipe == pipe_id))
++			if ((ring->me == me_id) && (ring->pipe == pipe_id))
+ 				amdgpu_fence_process(ring);
+ 		}
+ 		break;
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
+index ac00579..7708d90 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
+@@ -242,13 +242,19 @@ static void kfd_process_notifier_release(struct mmu_notifier *mn,
+ 	pqm_uninit(&p->pqm);
+ 
+ 	/* Iterate over all process device data structure and check
+-	 * if we should reset all wavefronts */
+-	list_for_each_entry(pdd, &p->per_device_data, per_device_list)
++	 * if we should delete debug managers and reset all wavefronts
++	 */
++	list_for_each_entry(pdd, &p->per_device_data, per_device_list) {
++		if ((pdd->dev->dbgmgr) &&
++				(pdd->dev->dbgmgr->pasid == p->pasid))
++			kfd_dbgmgr_destroy(pdd->dev->dbgmgr);
++
+ 		if (pdd->reset_wavefronts) {
+ 			pr_warn("amdkfd: Resetting all wave fronts\n");
+ 			dbgdev_wave_reset_wavefronts(pdd->dev, p);
+ 			pdd->reset_wavefronts = false;
+ 		}
++	}
+ 
+ 	mutex_unlock(&p->mutex);
+ 
+@@ -404,42 +410,52 @@ void kfd_unbind_process_from_device(struct kfd_dev *dev, unsigned int pasid)
+ 
+ 	idx = srcu_read_lock(&kfd_processes_srcu);
+ 
++	/*
++	 * Look for the process that matches the pasid. If there is no such
++	 * process, we either released it in amdkfd's own notifier, or there
++	 * is a bug. Unfortunately, there is no way to tell...
++	 */
+ 	hash_for_each_rcu(kfd_processes_table, i, p, kfd_processes)
+-		if (p->pasid == pasid)
+-			break;
++		if (p->pasid == pasid) {
+ 
+-	srcu_read_unlock(&kfd_processes_srcu, idx);
++			srcu_read_unlock(&kfd_processes_srcu, idx);
+ 
+-	BUG_ON(p->pasid != pasid);
++			pr_debug("Unbinding process %d from IOMMU\n", pasid);
+ 
+-	mutex_lock(&p->mutex);
++			mutex_lock(&p->mutex);
+ 
+-	if ((dev->dbgmgr) && (dev->dbgmgr->pasid == p->pasid))
+-		kfd_dbgmgr_destroy(dev->dbgmgr);
++			if ((dev->dbgmgr) && (dev->dbgmgr->pasid == p->pasid))
++				kfd_dbgmgr_destroy(dev->dbgmgr);
+ 
+-	pqm_uninit(&p->pqm);
++			pqm_uninit(&p->pqm);
+ 
+-	pdd = kfd_get_process_device_data(dev, p);
++			pdd = kfd_get_process_device_data(dev, p);
+ 
+-	if (!pdd) {
+-		mutex_unlock(&p->mutex);
+-		return;
+-	}
++			if (!pdd) {
++				mutex_unlock(&p->mutex);
++				return;
++			}
+ 
+-	if (pdd->reset_wavefronts) {
+-		dbgdev_wave_reset_wavefronts(pdd->dev, p);
+-		pdd->reset_wavefronts = false;
+-	}
++			if (pdd->reset_wavefronts) {
++				dbgdev_wave_reset_wavefronts(pdd->dev, p);
++				pdd->reset_wavefronts = false;
++			}
+ 
+-	/*
+-	 * Just mark pdd as unbound, because we still need it to call
+-	 * amd_iommu_unbind_pasid() in when the process exits.
+-	 * We don't call amd_iommu_unbind_pasid() here
+-	 * because the IOMMU called us.
+-	 */
+-	pdd->bound = false;
++			/*
++			 * Just mark pdd as unbound, because we still need it
++			 * to call amd_iommu_unbind_pasid() in when the
++			 * process exits.
++			 * We don't call amd_iommu_unbind_pasid() here
++			 * because the IOMMU called us.
++			 */
++			pdd->bound = false;
+ 
+-	mutex_unlock(&p->mutex);
++			mutex_unlock(&p->mutex);
++
++			return;
++		}
++
++	srcu_read_unlock(&kfd_processes_srcu, idx);
+ }
+ 
+ struct kfd_process_device *kfd_get_first_process_device_data(struct kfd_process *p)
+diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c b/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c
+index fa208ad..efb77ed 100644
+--- a/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c
++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c
+@@ -306,10 +306,14 @@ int phm_store_dal_configuration_data(struct pp_hwmgr *hwmgr,
+ {
+ 	PHM_FUNC_CHECK(hwmgr);
+ 
+-	if (hwmgr->hwmgr_func->store_cc6_data == NULL)
++	if (display_config == NULL)
+ 		return -EINVAL;
+ 
+ 	hwmgr->display_config = *display_config;
++
++	if (hwmgr->hwmgr_func->store_cc6_data == NULL)
++		return -EINVAL;
++
+ 	/* to do pass other display configuration in furture */
+ 
+ 	if (hwmgr->hwmgr_func->store_cc6_data)
+diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/pp_acpi.c b/drivers/gpu/drm/amd/powerplay/hwmgr/pp_acpi.c
+index 7b2d500..7cce483 100644
+--- a/drivers/gpu/drm/amd/powerplay/hwmgr/pp_acpi.c
++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/pp_acpi.c
+@@ -21,6 +21,20 @@ bool acpi_atcs_functions_supported(void *device, uint32_t index)
+ 	return result == 0 ? (output_buf.function_bits & (1 << (index - 1))) != 0 : false;
+ }
+ 
++bool acpi_atcs_notify_pcie_device_ready(void *device)
++{
++	int32_t temp_buffer = 1;
++
++	return cgs_call_acpi_method(device, CGS_ACPI_METHOD_ATCS,
++				ATCS_FUNCTION_PCIE_DEVICE_READY_NOTIFICATION,
++						&temp_buffer,
++						NULL,
++						0,
++						sizeof(temp_buffer),
++						0);
++}
++
++
+ int acpi_pcie_perf_request(void *device, uint8_t perf_req, bool advertise)
+ {
+ 	struct atcs_pref_req_input atcs_input;
+@@ -29,7 +43,7 @@ int acpi_pcie_perf_request(void *device, uint8_t perf_req, bool advertise)
+ 	int result;
+ 	struct cgs_system_info info = {0};
+ 
+-	if (!acpi_atcs_functions_supported(device, ATCS_FUNCTION_PCIE_PERFORMANCE_REQUEST))
++	if( 0 != acpi_atcs_notify_pcie_device_ready(device))
+ 		return -EINVAL;
+ 
+ 	info.size = sizeof(struct cgs_system_info);
+@@ -54,7 +68,7 @@ int acpi_pcie_perf_request(void *device, uint8_t perf_req, bool advertise)
+ 						ATCS_FUNCTION_PCIE_PERFORMANCE_REQUEST,
+ 						&atcs_input,
+ 						&atcs_output,
+-						0,
++						1,
+ 						sizeof(atcs_input),
+ 						sizeof(atcs_output));
+ 		if (result != 0)
+diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.c
+index 0d5d837..aae2e8e 100644
+--- a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.c
++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.c
+@@ -1298,7 +1298,7 @@ static int tonga_populate_smc_mvdd_table(struct pp_hwmgr *hwmgr,
+ 			table->Smio[count] |=
+ 				data->mvdd_voltage_table.entries[count].smio_low;
+ 		}
+-		table->SmioMask2 = data->vddci_voltage_table.mask_low;
++		table->SmioMask2 = data->mvdd_voltage_table.mask_low;
+ 
+ 		CONVERT_FROM_HOST_TO_SMC_UL(table->MvddLevelCount);
+ 	}
+diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_processpptables.c b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_processpptables.c
+index b156481..17766e8 100644
+--- a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_processpptables.c
++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_processpptables.c
+@@ -299,7 +299,7 @@ static int init_dpm_2_parameters(
+ 			(((unsigned long)powerplay_table) + le16_to_cpu(powerplay_table->usPPMTableOffset));
+ 
+ 		if (0 != powerplay_table->usPPMTableOffset) {
+-			if (1 == get_platform_power_management_table(hwmgr, atom_ppm_table)) {
++			if (get_platform_power_management_table(hwmgr, atom_ppm_table) == 0) {
+ 				phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+ 					PHM_PlatformCaps_EnablePlatformPowerManagement);
+ 			}
+diff --git a/drivers/gpu/drm/amd/powerplay/inc/pp_acpi.h b/drivers/gpu/drm/amd/powerplay/inc/pp_acpi.h
+index 3bd5e69..3df5de2 100644
+--- a/drivers/gpu/drm/amd/powerplay/inc/pp_acpi.h
++++ b/drivers/gpu/drm/amd/powerplay/inc/pp_acpi.h
+@@ -26,3 +26,4 @@ extern bool acpi_atcs_functions_supported(void *device,
+ extern int acpi_pcie_perf_request(void *device,
+ 						uint8_t perf_req,
+ 						bool advertise);
++extern bool acpi_atcs_notify_pcie_device_ready(void *device);
+diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c
+index d65dcae..6d9c0f5 100644
+--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c
++++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c
+@@ -335,6 +335,8 @@ atmel_hlcdc_plane_update_pos_and_size(struct atmel_hlcdc_plane *plane,
+ 
+ 		atmel_hlcdc_layer_update_cfg(&plane->layer, 13, 0xffffffff,
+ 					     factor_reg);
++	} else {
++		atmel_hlcdc_layer_update_cfg(&plane->layer, 13, 0xffffffff, 0);
+ 	}
+ }
+ 
+diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c
+index d307d96..080a090 100644
+--- a/drivers/gpu/drm/drm_atomic.c
++++ b/drivers/gpu/drm/drm_atomic.c
+@@ -354,6 +354,8 @@ int drm_atomic_set_mode_prop_for_crtc(struct drm_crtc_state *state,
+ 	drm_property_unreference_blob(state->mode_blob);
+ 	state->mode_blob = NULL;
+ 
++	memset(&state->mode, 0, sizeof(state->mode));
++
+ 	if (blob) {
+ 		if (blob->length != sizeof(struct drm_mode_modeinfo) ||
+ 		    drm_mode_convert_umode(&state->mode,
+@@ -366,7 +368,6 @@ int drm_atomic_set_mode_prop_for_crtc(struct drm_crtc_state *state,
+ 		DRM_DEBUG_ATOMIC("Set [MODE:%s] for CRTC state %p\n",
+ 				 state->mode.name, state);
+ 	} else {
+-		memset(&state->mode, 0, sizeof(state->mode));
+ 		state->enable = false;
+ 		DRM_DEBUG_ATOMIC("Set [NOMODE] for CRTC state %p\n",
+ 				 state);
+@@ -1287,14 +1288,39 @@ EXPORT_SYMBOL(drm_atomic_add_affected_planes);
+  */
+ void drm_atomic_legacy_backoff(struct drm_atomic_state *state)
+ {
++	struct drm_device *dev = state->dev;
++	unsigned crtc_mask = 0;
++	struct drm_crtc *crtc;
+ 	int ret;
++	bool global = false;
++
++	drm_for_each_crtc(crtc, dev) {
++		if (crtc->acquire_ctx != state->acquire_ctx)
++			continue;
++
++		crtc_mask |= drm_crtc_mask(crtc);
++		crtc->acquire_ctx = NULL;
++	}
++
++	if (WARN_ON(dev->mode_config.acquire_ctx == state->acquire_ctx)) {
++		global = true;
++
++		dev->mode_config.acquire_ctx = NULL;
++	}
+ 
+ retry:
+ 	drm_modeset_backoff(state->acquire_ctx);
+ 
+-	ret = drm_modeset_lock_all_ctx(state->dev, state->acquire_ctx);
++	ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx);
+ 	if (ret)
+ 		goto retry;
++
++	drm_for_each_crtc(crtc, dev)
++		if (drm_crtc_mask(crtc) & crtc_mask)
++			crtc->acquire_ctx = state->acquire_ctx;
++
++	if (global)
++		dev->mode_config.acquire_ctx = state->acquire_ctx;
+ }
+ EXPORT_SYMBOL(drm_atomic_legacy_backoff);
+ 
+diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
+index f30de80..691a1b9 100644
+--- a/drivers/gpu/drm/drm_crtc.c
++++ b/drivers/gpu/drm/drm_crtc.c
+@@ -2800,8 +2800,6 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data,
+ 			goto out;
+ 		}
+ 
+-		drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V);
+-
+ 		/*
+ 		 * Check whether the primary plane supports the fb pixel format.
+ 		 * Drivers not implementing the universal planes API use a
+diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
+index 71ea052..ccfe7e7 100644
+--- a/drivers/gpu/drm/drm_dp_mst_topology.c
++++ b/drivers/gpu/drm/drm_dp_mst_topology.c
+@@ -2908,11 +2908,9 @@ static void drm_dp_destroy_connector_work(struct work_struct *work)
+ 		drm_dp_port_teardown_pdt(port, port->pdt);
+ 
+ 		if (!port->input && port->vcpi.vcpi > 0) {
+-			if (mgr->mst_state) {
+-				drm_dp_mst_reset_vcpi_slots(mgr, port);
+-				drm_dp_update_payload_part1(mgr);
+-				drm_dp_mst_put_payload_id(mgr, port->vcpi.vcpi);
+-			}
++			drm_dp_mst_reset_vcpi_slots(mgr, port);
++			drm_dp_update_payload_part1(mgr);
++			drm_dp_mst_put_payload_id(mgr, port->vcpi.vcpi);
+ 		}
+ 
+ 		kref_put(&port->kref, drm_dp_free_mst_port);
+diff --git a/drivers/gpu/drm/drm_fb_cma_helper.c b/drivers/gpu/drm/drm_fb_cma_helper.c
+index bb88e3d..e619b00 100644
+--- a/drivers/gpu/drm/drm_fb_cma_helper.c
++++ b/drivers/gpu/drm/drm_fb_cma_helper.c
+@@ -301,7 +301,7 @@ static int drm_fbdev_cma_create(struct drm_fb_helper *helper,
+ err_fb_info_destroy:
+ 	drm_fb_helper_release_fbi(helper);
+ err_gem_free_object:
+-	dev->driver->gem_free_object(&obj->base);
++	drm_gem_object_unreference_unlocked(&obj->base);
+ 	return ret;
+ }
+ 
+diff --git a/drivers/gpu/drm/drm_gem_cma_helper.c b/drivers/gpu/drm/drm_gem_cma_helper.c
+index 1f500a1..d988ca0 100644
+--- a/drivers/gpu/drm/drm_gem_cma_helper.c
++++ b/drivers/gpu/drm/drm_gem_cma_helper.c
+@@ -121,7 +121,7 @@ struct drm_gem_cma_object *drm_gem_cma_create(struct drm_device *drm,
+ 	return cma_obj;
+ 
+ error:
+-	drm->driver->gem_free_object(&cma_obj->base);
++	drm_gem_object_unreference_unlocked(&cma_obj->base);
+ 	return ERR_PTR(ret);
+ }
+ EXPORT_SYMBOL_GPL(drm_gem_cma_create);
+@@ -162,18 +162,12 @@ drm_gem_cma_create_with_handle(struct drm_file *file_priv,
+ 	 * and handle has the id what user can see.
+ 	 */
+ 	ret = drm_gem_handle_create(file_priv, gem_obj, handle);
+-	if (ret)
+-		goto err_handle_create;
+-
+ 	/* drop reference from allocate - handle holds it now. */
+ 	drm_gem_object_unreference_unlocked(gem_obj);
++	if (ret)
++		return ERR_PTR(ret);
+ 
+ 	return cma_obj;
+-
+-err_handle_create:
+-	drm->driver->gem_free_object(gem_obj);
+-
+-	return ERR_PTR(ret);
+ }
+ 
+ /**
+diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c
+index f7448a5..5d0fc26 100644
+--- a/drivers/gpu/drm/drm_modes.c
++++ b/drivers/gpu/drm/drm_modes.c
+@@ -1518,6 +1518,8 @@ int drm_mode_convert_umode(struct drm_display_mode *out,
+ 	if (out->status != MODE_OK)
+ 		goto out;
+ 
++	drm_mode_set_crtcinfo(out, CRTC_INTERLACE_HALVE_V);
++
+ 	ret = 0;
+ 
+ out:
+diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c
+index e8d9337..77886f1 100644
+--- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c
++++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c
+@@ -40,9 +40,10 @@ static const struct regmap_config fsl_dcu_regmap_config = {
+ 	.reg_bits = 32,
+ 	.reg_stride = 4,
+ 	.val_bits = 32,
+-	.cache_type = REGCACHE_RBTREE,
++	.cache_type = REGCACHE_FLAT,
+ 
+ 	.volatile_reg = fsl_dcu_drm_is_volatile_reg,
++	.max_register = 0x11fc,
+ };
+ 
+ static int fsl_dcu_drm_irq_init(struct drm_device *dev)
+diff --git a/drivers/gpu/drm/i915/i915_gem_shrinker.c b/drivers/gpu/drm/i915/i915_gem_shrinker.c
+index d3c473f..3af4061 100644
+--- a/drivers/gpu/drm/i915/i915_gem_shrinker.c
++++ b/drivers/gpu/drm/i915/i915_gem_shrinker.c
+@@ -39,7 +39,7 @@ static bool mutex_is_locked_by(struct mutex *mutex, struct task_struct *task)
+ 	if (!mutex_is_locked(mutex))
+ 		return false;
+ 
+-#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_MUTEXES)
++#if defined(CONFIG_DEBUG_MUTEXES) || defined(CONFIG_MUTEX_SPIN_ON_OWNER)
+ 	return mutex->owner == task;
+ #else
+ 	/* Since UP may be pre-empted, we cannot assume that we own the lock */
+diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
+index 7741efb..e5db9e1 100644
+--- a/drivers/gpu/drm/i915/intel_display.c
++++ b/drivers/gpu/drm/i915/intel_display.c
+@@ -8229,12 +8229,14 @@ static void ironlake_init_pch_refclk(struct drm_device *dev)
+ {
+ 	struct drm_i915_private *dev_priv = dev->dev_private;
+ 	struct intel_encoder *encoder;
++	int i;
+ 	u32 val, final;
+ 	bool has_lvds = false;
+ 	bool has_cpu_edp = false;
+ 	bool has_panel = false;
+ 	bool has_ck505 = false;
+ 	bool can_ssc = false;
++	bool using_ssc_source = false;
+ 
+ 	/* We need to take the global config into account */
+ 	for_each_intel_encoder(dev, encoder) {
+@@ -8261,8 +8263,22 @@ static void ironlake_init_pch_refclk(struct drm_device *dev)
+ 		can_ssc = true;
+ 	}
+ 
+-	DRM_DEBUG_KMS("has_panel %d has_lvds %d has_ck505 %d\n",
+-		      has_panel, has_lvds, has_ck505);
++	/* Check if any DPLLs are using the SSC source */
++	for (i = 0; i < dev_priv->num_shared_dpll; i++) {
++		u32 temp = I915_READ(PCH_DPLL(i));
++
++		if (!(temp & DPLL_VCO_ENABLE))
++			continue;
++
++		if ((temp & PLL_REF_INPUT_MASK) ==
++		    PLLB_REF_INPUT_SPREADSPECTRUMIN) {
++			using_ssc_source = true;
++			break;
++		}
++	}
++
++	DRM_DEBUG_KMS("has_panel %d has_lvds %d has_ck505 %d using_ssc_source %d\n",
++		      has_panel, has_lvds, has_ck505, using_ssc_source);
+ 
+ 	/* Ironlake: try to setup display ref clock before DPLL
+ 	 * enabling. This is only under driver's control after
+@@ -8299,9 +8315,9 @@ static void ironlake_init_pch_refclk(struct drm_device *dev)
+ 				final |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
+ 		} else
+ 			final |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
+-	} else {
+-		final |= DREF_SSC_SOURCE_DISABLE;
+-		final |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
++	} else if (using_ssc_source) {
++		final |= DREF_SSC_SOURCE_ENABLE;
++		final |= DREF_SSC1_ENABLE;
+ 	}
+ 
+ 	if (final == val)
+@@ -8347,7 +8363,7 @@ static void ironlake_init_pch_refclk(struct drm_device *dev)
+ 		POSTING_READ(PCH_DREF_CONTROL);
+ 		udelay(200);
+ 	} else {
+-		DRM_DEBUG_KMS("Disabling SSC entirely\n");
++		DRM_DEBUG_KMS("Disabling CPU source output\n");
+ 
+ 		val &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
+ 
+@@ -8358,16 +8374,20 @@ static void ironlake_init_pch_refclk(struct drm_device *dev)
+ 		POSTING_READ(PCH_DREF_CONTROL);
+ 		udelay(200);
+ 
+-		/* Turn off the SSC source */
+-		val &= ~DREF_SSC_SOURCE_MASK;
+-		val |= DREF_SSC_SOURCE_DISABLE;
++		if (!using_ssc_source) {
++			DRM_DEBUG_KMS("Disabling SSC source\n");
+ 
+-		/* Turn off SSC1 */
+-		val &= ~DREF_SSC1_ENABLE;
++			/* Turn off the SSC source */
++			val &= ~DREF_SSC_SOURCE_MASK;
++			val |= DREF_SSC_SOURCE_DISABLE;
+ 
+-		I915_WRITE(PCH_DREF_CONTROL, val);
+-		POSTING_READ(PCH_DREF_CONTROL);
+-		udelay(200);
++			/* Turn off SSC1 */
++			val &= ~DREF_SSC1_ENABLE;
++
++			I915_WRITE(PCH_DREF_CONTROL, val);
++			POSTING_READ(PCH_DREF_CONTROL);
++			udelay(200);
++		}
+ 	}
+ 
+ 	BUG_ON(val != final);
+diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
+index 412a34c..69054ef 100644
+--- a/drivers/gpu/drm/i915/intel_dp.c
++++ b/drivers/gpu/drm/i915/intel_dp.c
+@@ -4942,13 +4942,15 @@ static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp)
+ 
+ void intel_dp_encoder_reset(struct drm_encoder *encoder)
+ {
+-	struct intel_dp *intel_dp;
++	struct drm_i915_private *dev_priv = to_i915(encoder->dev);
++	struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
++
++	if (!HAS_DDI(dev_priv))
++		intel_dp->DP = I915_READ(intel_dp->output_reg);
+ 
+ 	if (to_intel_encoder(encoder)->type != INTEL_OUTPUT_EDP)
+ 		return;
+ 
+-	intel_dp = enc_to_intel_dp(encoder);
+-
+ 	pps_lock(intel_dp);
+ 
+ 	/*
+@@ -5020,9 +5022,6 @@ intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
+ 	intel_display_power_get(dev_priv, power_domain);
+ 
+ 	if (long_hpd) {
+-		/* indicate that we need to restart link training */
+-		intel_dp->train_set_valid = false;
+-
+ 		if (!intel_digital_port_connected(dev_priv, intel_dig_port))
+ 			goto mst_fail;
+ 
+diff --git a/drivers/gpu/drm/i915/intel_dp_link_training.c b/drivers/gpu/drm/i915/intel_dp_link_training.c
+index 0b8eefc..926a1e6 100644
+--- a/drivers/gpu/drm/i915/intel_dp_link_training.c
++++ b/drivers/gpu/drm/i915/intel_dp_link_training.c
+@@ -85,8 +85,7 @@ static bool
+ intel_dp_reset_link_train(struct intel_dp *intel_dp,
+ 			uint8_t dp_train_pat)
+ {
+-	if (!intel_dp->train_set_valid)
+-		memset(intel_dp->train_set, 0, sizeof(intel_dp->train_set));
++	memset(intel_dp->train_set, 0, sizeof(intel_dp->train_set));
+ 	intel_dp_set_signal_levels(intel_dp);
+ 	return intel_dp_set_link_train(intel_dp, dp_train_pat);
+ }
+@@ -161,22 +160,6 @@ intel_dp_link_training_clock_recovery(struct intel_dp *intel_dp)
+ 			break;
+ 		}
+ 
+-		/*
+-		 * if we used previously trained voltage and pre-emphasis values
+-		 * and we don't get clock recovery, reset link training values
+-		 */
+-		if (intel_dp->train_set_valid) {
+-			DRM_DEBUG_KMS("clock recovery not ok, reset");
+-			/* clear the flag as we are not reusing train set */
+-			intel_dp->train_set_valid = false;
+-			if (!intel_dp_reset_link_train(intel_dp,
+-						       DP_TRAINING_PATTERN_1 |
+-						       DP_LINK_SCRAMBLING_DISABLE)) {
+-				DRM_ERROR("failed to enable link training\n");
+-				return;
+-			}
+-			continue;
+-		}
+ 
+ 		/* Check to see if we've tried the max voltage */
+ 		for (i = 0; i < intel_dp->lane_count; i++)
+@@ -284,7 +267,6 @@ intel_dp_link_training_channel_equalization(struct intel_dp *intel_dp)
+ 		/* Make sure clock is still ok */
+ 		if (!drm_dp_clock_recovery_ok(link_status,
+ 					      intel_dp->lane_count)) {
+-			intel_dp->train_set_valid = false;
+ 			intel_dp_link_training_clock_recovery(intel_dp);
+ 			intel_dp_set_link_train(intel_dp,
+ 						training_pattern |
+@@ -301,7 +283,6 @@ intel_dp_link_training_channel_equalization(struct intel_dp *intel_dp)
+ 
+ 		/* Try 5 times, then try clock recovery if that fails */
+ 		if (tries > 5) {
+-			intel_dp->train_set_valid = false;
+ 			intel_dp_link_training_clock_recovery(intel_dp);
+ 			intel_dp_set_link_train(intel_dp,
+ 						training_pattern |
+@@ -322,10 +303,8 @@ intel_dp_link_training_channel_equalization(struct intel_dp *intel_dp)
+ 
+ 	intel_dp_set_idle_link_train(intel_dp);
+ 
+-	if (channel_eq) {
+-		intel_dp->train_set_valid = true;
++	if (channel_eq)
+ 		DRM_DEBUG_KMS("Channel EQ done. DP Training successful\n");
+-	}
+ }
+ 
+ void intel_dp_stop_link_train(struct intel_dp *intel_dp)
+diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
+index 3a30b37..8dd2cc5 100644
+--- a/drivers/gpu/drm/i915/intel_drv.h
++++ b/drivers/gpu/drm/i915/intel_drv.h
+@@ -811,8 +811,6 @@ struct intel_dp {
+ 	/* This is called before a link training is starterd */
+ 	void (*prepare_link_retrain)(struct intel_dp *intel_dp);
+ 
+-	bool train_set_valid;
+-
+ 	/* Displayport compliance testing */
+ 	unsigned long compliance_test_type;
+ 	unsigned long compliance_test_data;
+diff --git a/drivers/gpu/drm/i915/intel_fbc.c b/drivers/gpu/drm/i915/intel_fbc.c
+index 0f0492f..28f4407 100644
+--- a/drivers/gpu/drm/i915/intel_fbc.c
++++ b/drivers/gpu/drm/i915/intel_fbc.c
+@@ -823,8 +823,7 @@ static bool intel_fbc_can_choose(struct intel_crtc *crtc)
+ {
+ 	struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
+ 	struct intel_fbc *fbc = &dev_priv->fbc;
+-	bool enable_by_default = IS_HASWELL(dev_priv) ||
+-				 IS_BROADWELL(dev_priv);
++	bool enable_by_default = IS_BROADWELL(dev_priv);
+ 
+ 	if (intel_vgpu_active(dev_priv->dev)) {
+ 		fbc->no_fbc_reason = "VGPU is active";
+diff --git a/drivers/gpu/drm/mgag200/mgag200_mode.c b/drivers/gpu/drm/mgag200/mgag200_mode.c
+index 14e64e0..d347dca 100644
+--- a/drivers/gpu/drm/mgag200/mgag200_mode.c
++++ b/drivers/gpu/drm/mgag200/mgag200_mode.c
+@@ -182,7 +182,7 @@ static int mga_g200se_set_plls(struct mga_device *mdev, long clock)
+ 			}
+ 		}
+ 
+-		fvv = pllreffreq * testn / testm;
++		fvv = pllreffreq * (n + 1) / (m + 1);
+ 		fvv = (fvv - 800000) / 50000;
+ 
+ 		if (fvv > 15)
+@@ -202,6 +202,14 @@ static int mga_g200se_set_plls(struct mga_device *mdev, long clock)
+ 	WREG_DAC(MGA1064_PIX_PLLC_M, m);
+ 	WREG_DAC(MGA1064_PIX_PLLC_N, n);
+ 	WREG_DAC(MGA1064_PIX_PLLC_P, p);
++
++	if (mdev->unique_rev_id >= 0x04) {
++		WREG_DAC(0x1a, 0x09);
++		msleep(20);
++		WREG_DAC(0x1a, 0x01);
++
++	}
++
+ 	return 0;
+ }
+ 
+diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/disp.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/disp.h
+index db10c11..c5a6ebd 100644
+--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/disp.h
++++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/disp.h
+@@ -25,7 +25,8 @@ u16 nvbios_outp_match(struct nvkm_bios *, u16 type, u16 mask,
+ 		      u8 *ver, u8 *hdr, u8 *cnt, u8 *len, struct nvbios_outp *);
+ 
+ struct nvbios_ocfg {
+-	u16 match;
++	u8  proto;
++	u8  flags;
+ 	u16 clkcmp[2];
+ };
+ 
+@@ -33,7 +34,7 @@ u16 nvbios_ocfg_entry(struct nvkm_bios *, u16 outp, u8 idx,
+ 		      u8 *ver, u8 *hdr, u8 *cnt, u8 *len);
+ u16 nvbios_ocfg_parse(struct nvkm_bios *, u16 outp, u8 idx,
+ 		      u8 *ver, u8 *hdr, u8 *cnt, u8 *len, struct nvbios_ocfg *);
+-u16 nvbios_ocfg_match(struct nvkm_bios *, u16 outp, u16 type,
++u16 nvbios_ocfg_match(struct nvkm_bios *, u16 outp, u8 proto, u8 flags,
+ 		      u8 *ver, u8 *hdr, u8 *cnt, u8 *len, struct nvbios_ocfg *);
+ u16 nvbios_oclk_match(struct nvkm_bios *, u16 cmp, u32 khz);
+ #endif
+diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
+index 59f27e7..e40a1b0 100644
+--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c
++++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
+@@ -557,6 +557,8 @@ nouveau_fbcon_init(struct drm_device *dev)
+ 	if (ret)
+ 		goto fini;
+ 
++	if (fbcon->helper.fbdev)
++		fbcon->helper.fbdev->pixmap.buf_align = 4;
+ 	return 0;
+ 
+ fini:
+diff --git a/drivers/gpu/drm/nouveau/nv04_fbcon.c b/drivers/gpu/drm/nouveau/nv04_fbcon.c
+index 789dc29..8f715fe 100644
+--- a/drivers/gpu/drm/nouveau/nv04_fbcon.c
++++ b/drivers/gpu/drm/nouveau/nv04_fbcon.c
+@@ -82,7 +82,6 @@ nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
+ 	uint32_t fg;
+ 	uint32_t bg;
+ 	uint32_t dsize;
+-	uint32_t width;
+ 	uint32_t *data = (uint32_t *)image->data;
+ 	int ret;
+ 
+@@ -93,9 +92,6 @@ nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
+ 	if (ret)
+ 		return ret;
+ 
+-	width = ALIGN(image->width, 8);
+-	dsize = ALIGN(width * image->height, 32) >> 5;
+-
+ 	if (info->fix.visual == FB_VISUAL_TRUECOLOR ||
+ 	    info->fix.visual == FB_VISUAL_DIRECTCOLOR) {
+ 		fg = ((uint32_t *) info->pseudo_palette)[image->fg_color];
+@@ -111,10 +107,11 @@ nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
+ 			 ((image->dx + image->width) & 0xffff));
+ 	OUT_RING(chan, bg);
+ 	OUT_RING(chan, fg);
+-	OUT_RING(chan, (image->height << 16) | width);
++	OUT_RING(chan, (image->height << 16) | image->width);
+ 	OUT_RING(chan, (image->height << 16) | image->width);
+ 	OUT_RING(chan, (image->dy << 16) | (image->dx & 0xffff));
+ 
++	dsize = ALIGN(image->width * image->height, 32) >> 5;
+ 	while (dsize) {
+ 		int iter_len = dsize > 128 ? 128 : dsize;
+ 
+diff --git a/drivers/gpu/drm/nouveau/nv50_fbcon.c b/drivers/gpu/drm/nouveau/nv50_fbcon.c
+index e05499d..a4e259a 100644
+--- a/drivers/gpu/drm/nouveau/nv50_fbcon.c
++++ b/drivers/gpu/drm/nouveau/nv50_fbcon.c
+@@ -95,7 +95,7 @@ nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
+ 	struct nouveau_fbdev *nfbdev = info->par;
+ 	struct nouveau_drm *drm = nouveau_drm(nfbdev->dev);
+ 	struct nouveau_channel *chan = drm->channel;
+-	uint32_t width, dwords, *data = (uint32_t *)image->data;
++	uint32_t dwords, *data = (uint32_t *)image->data;
+ 	uint32_t mask = ~(~0 >> (32 - info->var.bits_per_pixel));
+ 	uint32_t *palette = info->pseudo_palette;
+ 	int ret;
+@@ -107,9 +107,6 @@ nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
+ 	if (ret)
+ 		return ret;
+ 
+-	width = ALIGN(image->width, 32);
+-	dwords = (width * image->height) >> 5;
+-
+ 	BEGIN_NV04(chan, NvSub2D, 0x0814, 2);
+ 	if (info->fix.visual == FB_VISUAL_TRUECOLOR ||
+ 	    info->fix.visual == FB_VISUAL_DIRECTCOLOR) {
+@@ -128,6 +125,7 @@ nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
+ 	OUT_RING(chan, 0);
+ 	OUT_RING(chan, image->dy);
+ 
++	dwords = ALIGN(image->width * image->height, 32) >> 5;
+ 	while (dwords) {
+ 		int push = dwords > 2047 ? 2047 : dwords;
+ 
+diff --git a/drivers/gpu/drm/nouveau/nvc0_fbcon.c b/drivers/gpu/drm/nouveau/nvc0_fbcon.c
+index c97395b..f28315e 100644
+--- a/drivers/gpu/drm/nouveau/nvc0_fbcon.c
++++ b/drivers/gpu/drm/nouveau/nvc0_fbcon.c
+@@ -95,7 +95,7 @@ nvc0_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
+ 	struct nouveau_fbdev *nfbdev = info->par;
+ 	struct nouveau_drm *drm = nouveau_drm(nfbdev->dev);
+ 	struct nouveau_channel *chan = drm->channel;
+-	uint32_t width, dwords, *data = (uint32_t *)image->data;
++	uint32_t dwords, *data = (uint32_t *)image->data;
+ 	uint32_t mask = ~(~0 >> (32 - info->var.bits_per_pixel));
+ 	uint32_t *palette = info->pseudo_palette;
+ 	int ret;
+@@ -107,9 +107,6 @@ nvc0_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
+ 	if (ret)
+ 		return ret;
+ 
+-	width = ALIGN(image->width, 32);
+-	dwords = (width * image->height) >> 5;
+-
+ 	BEGIN_NVC0(chan, NvSub2D, 0x0814, 2);
+ 	if (info->fix.visual == FB_VISUAL_TRUECOLOR ||
+ 	    info->fix.visual == FB_VISUAL_DIRECTCOLOR) {
+@@ -128,6 +125,7 @@ nvc0_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
+ 	OUT_RING  (chan, 0);
+ 	OUT_RING  (chan, image->dy);
+ 
++	dwords = ALIGN(image->width * image->height, 32) >> 5;
+ 	while (dwords) {
+ 		int push = dwords > 2047 ? 2047 : dwords;
+ 
+diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/pci.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/pci.c
+index 18fab397..62ad030 100644
+--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/pci.c
++++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/pci.c
+@@ -1614,7 +1614,7 @@ nvkm_device_pci_func = {
+ 	.fini = nvkm_device_pci_fini,
+ 	.resource_addr = nvkm_device_pci_resource_addr,
+ 	.resource_size = nvkm_device_pci_resource_size,
+-	.cpu_coherent = !IS_ENABLED(CONFIG_ARM) && !IS_ENABLED(CONFIG_ARM64),
++	.cpu_coherent = !IS_ENABLED(CONFIG_ARM),
+ };
+ 
+ int
+diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild
+index a74c5dd..e2a64ed 100644
+--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild
++++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild
+@@ -18,6 +18,7 @@ nvkm-y += nvkm/engine/disp/piornv50.o
+ nvkm-y += nvkm/engine/disp/sornv50.o
+ nvkm-y += nvkm/engine/disp/sorg94.o
+ nvkm-y += nvkm/engine/disp/sorgf119.o
++nvkm-y += nvkm/engine/disp/sorgm107.o
+ nvkm-y += nvkm/engine/disp/sorgm200.o
+ nvkm-y += nvkm/engine/disp/dport.o
+ 
+diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gf119.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gf119.c
+index f031466..5dd3438 100644
+--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gf119.c
++++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gf119.c
+@@ -76,6 +76,7 @@ exec_lookup(struct nv50_disp *disp, int head, int or, u32 ctrl,
+ 	mask |= 0x0001 << or;
+ 	mask |= 0x0100 << head;
+ 
++
+ 	list_for_each_entry(outp, &disp->base.outp, head) {
+ 		if ((outp->info.hasht & 0xff) == type &&
+ 		    (outp->info.hashm & mask) == mask) {
+@@ -155,25 +156,21 @@ exec_clkcmp(struct nv50_disp *disp, int head, int id, u32 pclk, u32 *conf)
+ 	if (!outp)
+ 		return NULL;
+ 
++	*conf = (ctrl & 0x00000f00) >> 8;
+ 	switch (outp->info.type) {
+ 	case DCB_OUTPUT_TMDS:
+-		*conf = (ctrl & 0x00000f00) >> 8;
+ 		if (*conf == 5)
+ 			*conf |= 0x0100;
+ 		break;
+ 	case DCB_OUTPUT_LVDS:
+-		*conf = disp->sor.lvdsconf;
+-		break;
+-	case DCB_OUTPUT_DP:
+-		*conf = (ctrl & 0x00000f00) >> 8;
++		*conf |= disp->sor.lvdsconf;
+ 		break;
+-	case DCB_OUTPUT_ANALOG:
+ 	default:
+-		*conf = 0x00ff;
+ 		break;
+ 	}
+ 
+-	data = nvbios_ocfg_match(bios, data, *conf, &ver, &hdr, &cnt, &len, &info2);
++	data = nvbios_ocfg_match(bios, data, *conf & 0xff, *conf >> 8,
++				 &ver, &hdr, &cnt, &len, &info2);
+ 	if (data && id < 0xff) {
+ 		data = nvbios_oclk_match(bios, info2.clkcmp[id], pclk);
+ 		if (data) {
+diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gm107.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gm107.c
+index b694414..f4b9cf8 100644
+--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gm107.c
++++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gm107.c
+@@ -36,7 +36,7 @@ gm107_disp = {
+ 	.outp.internal.crt = nv50_dac_output_new,
+ 	.outp.internal.tmds = nv50_sor_output_new,
+ 	.outp.internal.lvds = nv50_sor_output_new,
+-	.outp.internal.dp = gf119_sor_dp_new,
++	.outp.internal.dp = gm107_sor_dp_new,
+ 	.dac.nr = 3,
+ 	.dac.power = nv50_dac_power,
+ 	.dac.sense = nv50_dac_sense,
+diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c
+index 4226d21..fcb1b0c 100644
+--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c
++++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c
+@@ -387,22 +387,17 @@ exec_clkcmp(struct nv50_disp *disp, int head, int id, u32 pclk, u32 *conf)
+ 	if (!outp)
+ 		return NULL;
+ 
++	*conf = (ctrl & 0x00000f00) >> 8;
+ 	if (outp->info.location == 0) {
+ 		switch (outp->info.type) {
+ 		case DCB_OUTPUT_TMDS:
+-			*conf = (ctrl & 0x00000f00) >> 8;
+ 			if (*conf == 5)
+ 				*conf |= 0x0100;
+ 			break;
+ 		case DCB_OUTPUT_LVDS:
+-			*conf = disp->sor.lvdsconf;
++			*conf |= disp->sor.lvdsconf;
+ 			break;
+-		case DCB_OUTPUT_DP:
+-			*conf = (ctrl & 0x00000f00) >> 8;
+-			break;
+-		case DCB_OUTPUT_ANALOG:
+ 		default:
+-			*conf = 0x00ff;
+ 			break;
+ 		}
+ 	} else {
+@@ -410,7 +405,8 @@ exec_clkcmp(struct nv50_disp *disp, int head, int id, u32 pclk, u32 *conf)
+ 		pclk = pclk / 2;
+ 	}
+ 
+-	data = nvbios_ocfg_match(bios, data, *conf, &ver, &hdr, &cnt, &len, &info2);
++	data = nvbios_ocfg_match(bios, data, *conf & 0xff, *conf >> 8,
++				 &ver, &hdr, &cnt, &len, &info2);
+ 	if (data && id < 0xff) {
+ 		data = nvbios_oclk_match(bios, info2.clkcmp[id], pclk);
+ 		if (data) {
+diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/outpdp.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/outpdp.h
+index e9067ba..4e983f6 100644
+--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/outpdp.h
++++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/outpdp.h
+@@ -62,7 +62,12 @@ int g94_sor_dp_lnk_pwr(struct nvkm_output_dp *, int);
+ int gf119_sor_dp_new(struct nvkm_disp *, int, struct dcb_output *,
+ 		     struct nvkm_output **);
+ int gf119_sor_dp_lnk_ctl(struct nvkm_output_dp *, int, int, bool);
++int gf119_sor_dp_drv_ctl(struct nvkm_output_dp *, int, int, int, int);
+ 
+-int  gm200_sor_dp_new(struct nvkm_disp *, int, struct dcb_output *,
+-		      struct nvkm_output **);
++int gm107_sor_dp_new(struct nvkm_disp *, int, struct dcb_output *,
++		     struct nvkm_output **);
++int gm107_sor_dp_pattern(struct nvkm_output_dp *, int);
++
++int gm200_sor_dp_new(struct nvkm_disp *, int, struct dcb_output *,
++		     struct nvkm_output **);
+ #endif
+diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgf119.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgf119.c
+index b4b41b1..49bd5da 100644
+--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgf119.c
++++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgf119.c
+@@ -40,8 +40,8 @@ static int
+ gf119_sor_dp_pattern(struct nvkm_output_dp *outp, int pattern)
+ {
+ 	struct nvkm_device *device = outp->base.disp->engine.subdev.device;
+-	const u32 loff = gf119_sor_loff(outp);
+-	nvkm_mask(device, 0x61c110 + loff, 0x0f0f0f0f, 0x01010101 * pattern);
++	const u32 soff = gf119_sor_soff(outp);
++	nvkm_mask(device, 0x61c110 + soff, 0x0f0f0f0f, 0x01010101 * pattern);
+ 	return 0;
+ }
+ 
+@@ -64,7 +64,7 @@ gf119_sor_dp_lnk_ctl(struct nvkm_output_dp *outp, int nr, int bw, bool ef)
+ 	return 0;
+ }
+ 
+-static int
++int
+ gf119_sor_dp_drv_ctl(struct nvkm_output_dp *outp,
+ 		     int ln, int vs, int pe, int pc)
+ {
+diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgm107.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgm107.c
+new file mode 100644
+index 0000000..37790b2
+--- /dev/null
++++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgm107.c
+@@ -0,0 +1,53 @@
++/*
++ * Copyright 2016 Red Hat Inc.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ *
++ * Authors: Ben Skeggs <bskeggs@redhat.com>
++ */
++#include "nv50.h"
++#include "outpdp.h"
++
++int
++gm107_sor_dp_pattern(struct nvkm_output_dp *outp, int pattern)
++{
++	struct nvkm_device *device = outp->base.disp->engine.subdev.device;
++	const u32 soff = outp->base.or * 0x800;
++	const u32 data = 0x01010101 * pattern;
++	if (outp->base.info.sorconf.link & 1)
++		nvkm_mask(device, 0x61c110 + soff, 0x0f0f0f0f, data);
++	else
++		nvkm_mask(device, 0x61c12c + soff, 0x0f0f0f0f, data);
++	return 0;
++}
++
++static const struct nvkm_output_dp_func
++gm107_sor_dp_func = {
++	.pattern = gm107_sor_dp_pattern,
++	.lnk_pwr = g94_sor_dp_lnk_pwr,
++	.lnk_ctl = gf119_sor_dp_lnk_ctl,
++	.drv_ctl = gf119_sor_dp_drv_ctl,
++};
++
++int
++gm107_sor_dp_new(struct nvkm_disp *disp, int index,
++		 struct dcb_output *dcbE, struct nvkm_output **poutp)
++{
++	return nvkm_output_dp_new_(&gm107_sor_dp_func, disp, index, dcbE, poutp);
++}
+diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgm200.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgm200.c
+index 2cfbef9..c44fa7e 100644
+--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgm200.c
++++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgm200.c
+@@ -57,19 +57,6 @@ gm200_sor_dp_lane_map(struct nvkm_device *device, u8 lane)
+ }
+ 
+ static int
+-gm200_sor_dp_pattern(struct nvkm_output_dp *outp, int pattern)
+-{
+-	struct nvkm_device *device = outp->base.disp->engine.subdev.device;
+-	const u32 soff = gm200_sor_soff(outp);
+-	const u32 data = 0x01010101 * pattern;
+-	if (outp->base.info.sorconf.link & 1)
+-		nvkm_mask(device, 0x61c110 + soff, 0x0f0f0f0f, data);
+-	else
+-		nvkm_mask(device, 0x61c12c + soff, 0x0f0f0f0f, data);
+-	return 0;
+-}
+-
+-static int
+ gm200_sor_dp_lnk_pwr(struct nvkm_output_dp *outp, int nr)
+ {
+ 	struct nvkm_device *device = outp->base.disp->engine.subdev.device;
+@@ -129,7 +116,7 @@ gm200_sor_dp_drv_ctl(struct nvkm_output_dp *outp,
+ 
+ static const struct nvkm_output_dp_func
+ gm200_sor_dp_func = {
+-	.pattern = gm200_sor_dp_pattern,
++	.pattern = gm107_sor_dp_pattern,
+ 	.lnk_pwr = gm200_sor_dp_lnk_pwr,
+ 	.lnk_ctl = gf119_sor_dp_lnk_ctl,
+ 	.drv_ctl = gm200_sor_dp_drv_ctl,
+diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
+index b2de290..b0c7216 100644
+--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
++++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
+@@ -942,22 +942,41 @@ gf100_gr_trap_gpc_rop(struct gf100_gr *gr, int gpc)
+ }
+ 
+ static const struct nvkm_enum gf100_mp_warp_error[] = {
+-	{ 0x00, "NO_ERROR" },
+-	{ 0x01, "STACK_MISMATCH" },
++	{ 0x01, "STACK_ERROR" },
++	{ 0x02, "API_STACK_ERROR" },
++	{ 0x03, "RET_EMPTY_STACK_ERROR" },
++	{ 0x04, "PC_WRAP" },
+ 	{ 0x05, "MISALIGNED_PC" },
+-	{ 0x08, "MISALIGNED_GPR" },
+-	{ 0x09, "INVALID_OPCODE" },
+-	{ 0x0d, "GPR_OUT_OF_BOUNDS" },
+-	{ 0x0e, "MEM_OUT_OF_BOUNDS" },
+-	{ 0x0f, "UNALIGNED_MEM_ACCESS" },
++	{ 0x06, "PC_OVERFLOW" },
++	{ 0x07, "MISALIGNED_IMMC_ADDR" },
++	{ 0x08, "MISALIGNED_REG" },
++	{ 0x09, "ILLEGAL_INSTR_ENCODING" },
++	{ 0x0a, "ILLEGAL_SPH_INSTR_COMBO" },
++	{ 0x0b, "ILLEGAL_INSTR_PARAM" },
++	{ 0x0c, "INVALID_CONST_ADDR" },
++	{ 0x0d, "OOR_REG" },
++	{ 0x0e, "OOR_ADDR" },
++	{ 0x0f, "MISALIGNED_ADDR" },
+ 	{ 0x10, "INVALID_ADDR_SPACE" },
+-	{ 0x11, "INVALID_PARAM" },
++	{ 0x11, "ILLEGAL_INSTR_PARAM2" },
++	{ 0x12, "INVALID_CONST_ADDR_LDC" },
++	{ 0x13, "GEOMETRY_SM_ERROR" },
++	{ 0x14, "DIVERGENT" },
++	{ 0x15, "WARP_EXIT" },
+ 	{}
+ };
+ 
+ static const struct nvkm_bitfield gf100_mp_global_error[] = {
++	{ 0x00000001, "SM_TO_SM_FAULT" },
++	{ 0x00000002, "L1_ERROR" },
+ 	{ 0x00000004, "MULTIPLE_WARP_ERRORS" },
+-	{ 0x00000008, "OUT_OF_STACK_SPACE" },
++	{ 0x00000008, "PHYSICAL_STACK_OVERFLOW" },
++	{ 0x00000010, "BPT_INT" },
++	{ 0x00000020, "BPT_PAUSE" },
++	{ 0x00000040, "SINGLE_STEP_COMPLETE" },
++	{ 0x20000000, "ECC_SEC_ERROR" },
++	{ 0x40000000, "ECC_DED_ERROR" },
++	{ 0x80000000, "TIMEOUT" },
+ 	{}
+ };
+ 
+diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/disp.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/disp.c
+index a5e9213..9efb1b4 100644
+--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/disp.c
++++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/disp.c
+@@ -141,7 +141,8 @@ nvbios_ocfg_parse(struct nvkm_bios *bios, u16 outp, u8 idx,
+ {
+ 	u16 data = nvbios_ocfg_entry(bios, outp, idx, ver, hdr, cnt, len);
+ 	if (data) {
+-		info->match     = nvbios_rd16(bios, data + 0x00);
++		info->proto     = nvbios_rd08(bios, data + 0x00);
++		info->flags     = nvbios_rd16(bios, data + 0x01);
+ 		info->clkcmp[0] = nvbios_rd16(bios, data + 0x02);
+ 		info->clkcmp[1] = nvbios_rd16(bios, data + 0x04);
+ 	}
+@@ -149,12 +150,13 @@ nvbios_ocfg_parse(struct nvkm_bios *bios, u16 outp, u8 idx,
+ }
+ 
+ u16
+-nvbios_ocfg_match(struct nvkm_bios *bios, u16 outp, u16 type,
++nvbios_ocfg_match(struct nvkm_bios *bios, u16 outp, u8 proto, u8 flags,
+ 		  u8 *ver, u8 *hdr, u8 *cnt, u8 *len, struct nvbios_ocfg *info)
+ {
+ 	u16 data, idx = 0;
+ 	while ((data = nvbios_ocfg_parse(bios, outp, idx++, ver, hdr, cnt, len, info))) {
+-		if (info->match == type)
++		if ((info->proto == proto || info->proto == 0xff) &&
++		    (info->flags == flags))
+ 			break;
+ 	}
+ 	return data;
+diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gm107.c b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gm107.c
+index e292f56..389fb13 100644
+--- a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gm107.c
++++ b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gm107.c
+@@ -69,11 +69,11 @@ gm107_ltc_zbc_clear_depth(struct nvkm_ltc *ltc, int i, const u32 depth)
+ }
+ 
+ static void
+-gm107_ltc_lts_isr(struct nvkm_ltc *ltc, int c, int s)
++gm107_ltc_intr_lts(struct nvkm_ltc *ltc, int c, int s)
+ {
+ 	struct nvkm_subdev *subdev = &ltc->subdev;
+ 	struct nvkm_device *device = subdev->device;
+-	u32 base = 0x140000 + (c * 0x2000) + (s * 0x200);
++	u32 base = 0x140400 + (c * 0x2000) + (s * 0x200);
+ 	u32 stat = nvkm_rd32(device, base + 0x00c);
+ 
+ 	if (stat) {
+@@ -92,7 +92,7 @@ gm107_ltc_intr(struct nvkm_ltc *ltc)
+ 	while (mask) {
+ 		u32 s, c = __ffs(mask);
+ 		for (s = 0; s < ltc->lts_nr; s++)
+-			gm107_ltc_lts_isr(ltc, c, s);
++			gm107_ltc_intr_lts(ltc, c, s);
+ 		mask &= ~(1 << c);
+ 	}
+ }
+diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gm200.c b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gm200.c
+index 2a29bfd..e18e0dc 100644
+--- a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gm200.c
++++ b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gm200.c
+@@ -46,7 +46,7 @@ static const struct nvkm_ltc_func
+ gm200_ltc = {
+ 	.oneinit = gm200_ltc_oneinit,
+ 	.init = gm200_ltc_init,
+-	.intr = gm107_ltc_intr, /*XXX: not validated */
++	.intr = gm107_ltc_intr,
+ 	.cbc_clear = gm107_ltc_cbc_clear,
+ 	.cbc_wait = gm107_ltc_cbc_wait,
+ 	.zbc = 16,
+diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
+index d0826fb..cb29868 100644
+--- a/drivers/gpu/drm/radeon/radeon_device.c
++++ b/drivers/gpu/drm/radeon/radeon_device.c
+@@ -630,6 +630,23 @@ void radeon_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc)
+ /*
+  * GPU helpers function.
+  */
++
++/**
++ * radeon_device_is_virtual - check if we are running is a virtual environment
++ *
++ * Check if the asic has been passed through to a VM (all asics).
++ * Used at driver startup.
++ * Returns true if virtual or false if not.
++ */
++static bool radeon_device_is_virtual(void)
++{
++#ifdef CONFIG_X86
++	return boot_cpu_has(X86_FEATURE_HYPERVISOR);
++#else
++	return false;
++#endif
++}
++
+ /**
+  * radeon_card_posted - check if the hw has already been initialized
+  *
+@@ -643,6 +660,10 @@ bool radeon_card_posted(struct radeon_device *rdev)
+ {
+ 	uint32_t reg;
+ 
++	/* for pass through, always force asic_init */
++	if (radeon_device_is_virtual())
++		return false;
++
+ 	/* required for EFI mode on macbook2,1 which uses an r5xx asic */
+ 	if (efi_enabled(EFI_BOOT) &&
+ 	    (rdev->pdev->subsystem_vendor == PCI_VENDOR_ID_APPLE) &&
+diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
+index e3daafa..3e7c9ac 100644
+--- a/drivers/gpu/drm/ttm/ttm_bo.c
++++ b/drivers/gpu/drm/ttm/ttm_bo.c
+@@ -1016,9 +1016,9 @@ out_unlock:
+ 	return ret;
+ }
+ 
+-static bool ttm_bo_mem_compat(struct ttm_placement *placement,
+-			      struct ttm_mem_reg *mem,
+-			      uint32_t *new_flags)
++bool ttm_bo_mem_compat(struct ttm_placement *placement,
++		       struct ttm_mem_reg *mem,
++		       uint32_t *new_flags)
+ {
+ 	int i;
+ 
+@@ -1050,6 +1050,7 @@ static bool ttm_bo_mem_compat(struct ttm_placement *placement,
+ 
+ 	return false;
+ }
++EXPORT_SYMBOL(ttm_bo_mem_compat);
+ 
+ int ttm_bo_validate(struct ttm_buffer_object *bo,
+ 			struct ttm_placement *placement,
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c
+index 299925a..eadc981 100644
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c
+@@ -49,6 +49,7 @@ int vmw_dmabuf_pin_in_placement(struct vmw_private *dev_priv,
+ {
+ 	struct ttm_buffer_object *bo = &buf->base;
+ 	int ret;
++	uint32_t new_flags;
+ 
+ 	ret = ttm_write_lock(&dev_priv->reservation_sem, interruptible);
+ 	if (unlikely(ret != 0))
+@@ -60,7 +61,12 @@ int vmw_dmabuf_pin_in_placement(struct vmw_private *dev_priv,
+ 	if (unlikely(ret != 0))
+ 		goto err;
+ 
+-	ret = ttm_bo_validate(bo, placement, interruptible, false);
++	if (buf->pin_count > 0)
++		ret = ttm_bo_mem_compat(placement, &bo->mem,
++					&new_flags) == true ? 0 : -EINVAL;
++	else
++		ret = ttm_bo_validate(bo, placement, interruptible, false);
++
+ 	if (!ret)
+ 		vmw_bo_pin_reserved(buf, true);
+ 
+@@ -91,6 +97,7 @@ int vmw_dmabuf_pin_in_vram_or_gmr(struct vmw_private *dev_priv,
+ {
+ 	struct ttm_buffer_object *bo = &buf->base;
+ 	int ret;
++	uint32_t new_flags;
+ 
+ 	ret = ttm_write_lock(&dev_priv->reservation_sem, interruptible);
+ 	if (unlikely(ret != 0))
+@@ -102,6 +109,12 @@ int vmw_dmabuf_pin_in_vram_or_gmr(struct vmw_private *dev_priv,
+ 	if (unlikely(ret != 0))
+ 		goto err;
+ 
++	if (buf->pin_count > 0) {
++		ret = ttm_bo_mem_compat(&vmw_vram_gmr_placement, &bo->mem,
++					&new_flags) == true ? 0 : -EINVAL;
++		goto out_unreserve;
++	}
++
+ 	ret = ttm_bo_validate(bo, &vmw_vram_gmr_placement, interruptible,
+ 			      false);
+ 	if (likely(ret == 0) || ret == -ERESTARTSYS)
+@@ -161,6 +174,7 @@ int vmw_dmabuf_pin_in_start_of_vram(struct vmw_private *dev_priv,
+ 	struct ttm_placement placement;
+ 	struct ttm_place place;
+ 	int ret = 0;
++	uint32_t new_flags;
+ 
+ 	place = vmw_vram_placement.placement[0];
+ 	place.lpfn = bo->num_pages;
+@@ -185,10 +199,15 @@ int vmw_dmabuf_pin_in_start_of_vram(struct vmw_private *dev_priv,
+ 	 */
+ 	if (bo->mem.mem_type == TTM_PL_VRAM &&
+ 	    bo->mem.start < bo->num_pages &&
+-	    bo->mem.start > 0)
++	    bo->mem.start > 0 &&
++	    buf->pin_count == 0)
+ 		(void) ttm_bo_validate(bo, &vmw_sys_placement, false, false);
+ 
+-	ret = ttm_bo_validate(bo, &placement, interruptible, false);
++	if (buf->pin_count > 0)
++		ret = ttm_bo_mem_compat(&placement, &bo->mem,
++					&new_flags) == true ? 0 : -EINVAL;
++	else
++		ret = ttm_bo_validate(bo, &placement, interruptible, false);
+ 
+ 	/* For some reason we didn't end up at the start of vram */
+ 	WARN_ON(ret == 0 && bo->offset != 0);
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+index f2cf923..2a50546 100644
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+@@ -227,6 +227,7 @@ static int vmw_force_iommu;
+ static int vmw_restrict_iommu;
+ static int vmw_force_coherent;
+ static int vmw_restrict_dma_mask;
++static int vmw_assume_16bpp;
+ 
+ static int vmw_probe(struct pci_dev *, const struct pci_device_id *);
+ static void vmw_master_init(struct vmw_master *);
+@@ -243,6 +244,8 @@ MODULE_PARM_DESC(force_coherent, "Force coherent TTM pages");
+ module_param_named(force_coherent, vmw_force_coherent, int, 0600);
+ MODULE_PARM_DESC(restrict_dma_mask, "Restrict DMA mask to 44 bits with IOMMU");
+ module_param_named(restrict_dma_mask, vmw_restrict_dma_mask, int, 0600);
++MODULE_PARM_DESC(assume_16bpp, "Assume 16-bpp when filtering modes");
++module_param_named(assume_16bpp, vmw_assume_16bpp, int, 0600);
+ 
+ 
+ static void vmw_print_capabilities(uint32_t capabilities)
+@@ -653,6 +656,8 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
+ 	dev_priv->vram_start = pci_resource_start(dev->pdev, 1);
+ 	dev_priv->mmio_start = pci_resource_start(dev->pdev, 2);
+ 
++	dev_priv->assume_16bpp = !!vmw_assume_16bpp;
++
+ 	dev_priv->enable_fb = enable_fbdev;
+ 
+ 	vmw_write(dev_priv, SVGA_REG_ID, SVGA_ID_2);
+@@ -699,6 +704,13 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
+ 			vmw_read(dev_priv,
+ 				 SVGA_REG_SUGGESTED_GBOBJECT_MEM_SIZE_KB);
+ 
++		/*
++		 * Workaround for low memory 2D VMs to compensate for the
++		 * allocation taken by fbdev
++		 */
++		if (!(dev_priv->capabilities & SVGA_CAP_3D))
++			mem_size *= 2;
++
+ 		dev_priv->max_mob_pages = mem_size * 1024 / PAGE_SIZE;
+ 		dev_priv->prim_bb_mem =
+ 			vmw_read(dev_priv,
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+index 6db358a..cab0c54 100644
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+@@ -386,6 +386,7 @@ struct vmw_private {
+ 	spinlock_t hw_lock;
+ 	spinlock_t cap_lock;
+ 	bool has_dx;
++	bool assume_16bpp;
+ 
+ 	/*
+ 	 * VGA registers.
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
+index 679a4cb..d2d9395 100644
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
+@@ -517,28 +517,6 @@ static int vmw_fb_kms_framebuffer(struct fb_info *info)
+ 
+ 	par->set_fb = &vfb->base;
+ 
+-	if (!par->bo_ptr) {
+-		/*
+-		 * Pin before mapping. Since we don't know in what placement
+-		 * to pin, call into KMS to do it for us.
+-		 */
+-		ret = vfb->pin(vfb);
+-		if (ret) {
+-			DRM_ERROR("Could not pin the fbdev framebuffer.\n");
+-			return ret;
+-		}
+-
+-		ret = ttm_bo_kmap(&par->vmw_bo->base, 0,
+-				  par->vmw_bo->base.num_pages, &par->map);
+-		if (ret) {
+-			vfb->unpin(vfb);
+-			DRM_ERROR("Could not map the fbdev framebuffer.\n");
+-			return ret;
+-		}
+-
+-		par->bo_ptr = ttm_kmap_obj_virtual(&par->map, &par->bo_iowrite);
+-	}
+-
+ 	return 0;
+ }
+ 
+@@ -601,6 +579,31 @@ static int vmw_fb_set_par(struct fb_info *info)
+ 	if (ret)
+ 		goto out_unlock;
+ 
++	if (!par->bo_ptr) {
++		struct vmw_framebuffer *vfb = vmw_framebuffer_to_vfb(set.fb);
++
++		/*
++		 * Pin before mapping. Since we don't know in what placement
++		 * to pin, call into KMS to do it for us.
++		 */
++		ret = vfb->pin(vfb);
++		if (ret) {
++			DRM_ERROR("Could not pin the fbdev framebuffer.\n");
++			goto out_unlock;
++		}
++
++		ret = ttm_bo_kmap(&par->vmw_bo->base, 0,
++				  par->vmw_bo->base.num_pages, &par->map);
++		if (ret) {
++			vfb->unpin(vfb);
++			DRM_ERROR("Could not map the fbdev framebuffer.\n");
++			goto out_unlock;
++		}
++
++		par->bo_ptr = ttm_kmap_obj_virtual(&par->map, &par->bo_iowrite);
++	}
++
++
+ 	vmw_fb_dirty_mark(par, par->fb_x, par->fb_y,
+ 			  par->set_fb->width, par->set_fb->height);
+ 
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+index b07543b..6ccd61d 100644
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+@@ -1553,14 +1553,10 @@ int vmw_du_connector_fill_modes(struct drm_connector *connector,
+ 		DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC)
+ 	};
+ 	int i;
+-	u32 assumed_bpp = 2;
++	u32 assumed_bpp = 4;
+ 
+-	/*
+-	 * If using screen objects, then assume 32-bpp because that's what the
+-	 * SVGA device is assuming
+-	 */
+-	if (dev_priv->active_display_unit == vmw_du_screen_object)
+-		assumed_bpp = 4;
++	if (dev_priv->assume_16bpp)
++		assumed_bpp = 2;
+ 
+ 	if (dev_priv->active_display_unit == vmw_du_screen_target) {
+ 		max_width  = min(max_width,  dev_priv->stdu_max_width);
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
+index 9ca818f..41932a7 100644
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
+@@ -399,8 +399,10 @@ static int vmw_stdu_bind_fb(struct vmw_private *dev_priv,
+ 
+ 	WARN_ON_ONCE(!stdu->defined);
+ 
+-	if (!vfb->dmabuf && new_fb->width == mode->hdisplay &&
+-	    new_fb->height == mode->vdisplay)
++	new_vfbs = (vfb->dmabuf) ? NULL : vmw_framebuffer_to_vfbs(new_fb);
++
++	if (new_vfbs && new_vfbs->surface->base_size.width == mode->hdisplay &&
++	    new_vfbs->surface->base_size.height == mode->vdisplay)
+ 		new_content_type = SAME_AS_DISPLAY;
+ 	else if (vfb->dmabuf)
+ 		new_content_type = SEPARATE_DMA;
+@@ -444,7 +446,6 @@ static int vmw_stdu_bind_fb(struct vmw_private *dev_priv,
+ 			content_srf.mip_levels[0]     = 1;
+ 			content_srf.multisample_count = 0;
+ 		} else {
+-			new_vfbs = vmw_framebuffer_to_vfbs(new_fb);
+ 			content_srf = *new_vfbs->surface;
+ 		}
+ 
+@@ -464,7 +465,6 @@ static int vmw_stdu_bind_fb(struct vmw_private *dev_priv,
+ 			return ret;
+ 		}
+ 	} else if (new_content_type == SAME_AS_DISPLAY) {
+-		new_vfbs = vmw_framebuffer_to_vfbs(new_fb);
+ 		new_display_srf = vmw_surface_reference(new_vfbs->surface);
+ 	}
+ 
+diff --git a/drivers/hid/hid-elo.c b/drivers/hid/hid-elo.c
+index aad8c16..0cd4f72 100644
+--- a/drivers/hid/hid-elo.c
++++ b/drivers/hid/hid-elo.c
+@@ -261,7 +261,7 @@ static void elo_remove(struct hid_device *hdev)
+ 	struct elo_priv *priv = hid_get_drvdata(hdev);
+ 
+ 	hid_hw_stop(hdev);
+-	flush_workqueue(wq);
++	cancel_delayed_work_sync(&priv->work);
+ 	kfree(priv);
+ }
+ 
+diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c
+index c741f5e..0088979 100644
+--- a/drivers/hid/hid-multitouch.c
++++ b/drivers/hid/hid-multitouch.c
+@@ -61,6 +61,7 @@ MODULE_LICENSE("GPL");
+ #define MT_QUIRK_ALWAYS_VALID		(1 << 4)
+ #define MT_QUIRK_VALID_IS_INRANGE	(1 << 5)
+ #define MT_QUIRK_VALID_IS_CONFIDENCE	(1 << 6)
++#define MT_QUIRK_CONFIDENCE		(1 << 7)
+ #define MT_QUIRK_SLOT_IS_CONTACTID_MINUS_ONE	(1 << 8)
+ #define MT_QUIRK_NO_AREA		(1 << 9)
+ #define MT_QUIRK_IGNORE_DUPLICATES	(1 << 10)
+@@ -78,6 +79,7 @@ struct mt_slot {
+ 	__s32 contactid;	/* the device ContactID assigned to this slot */
+ 	bool touch_state;	/* is the touch valid? */
+ 	bool inrange_state;	/* is the finger in proximity of the sensor? */
++	bool confidence_state;  /* is the touch made by a finger? */
+ };
+ 
+ struct mt_class {
+@@ -503,10 +505,8 @@ static int mt_touch_input_mapping(struct hid_device *hdev, struct hid_input *hi,
+ 			return 1;
+ 		case HID_DG_CONFIDENCE:
+ 			if (cls->name == MT_CLS_WIN_8 &&
+-				field->application == HID_DG_TOUCHPAD) {
+-				cls->quirks &= ~MT_QUIRK_ALWAYS_VALID;
+-				cls->quirks |= MT_QUIRK_VALID_IS_CONFIDENCE;
+-			}
++				field->application == HID_DG_TOUCHPAD)
++				cls->quirks |= MT_QUIRK_CONFIDENCE;
+ 			mt_store_field(usage, td, hi);
+ 			return 1;
+ 		case HID_DG_TIPSWITCH:
+@@ -619,6 +619,7 @@ static void mt_complete_slot(struct mt_device *td, struct input_dev *input)
+ 		return;
+ 
+ 	if (td->curvalid || (td->mtclass.quirks & MT_QUIRK_ALWAYS_VALID)) {
++		int active;
+ 		int slotnum = mt_compute_slot(td, input);
+ 		struct mt_slot *s = &td->curdata;
+ 		struct input_mt *mt = input->mt;
+@@ -633,10 +634,14 @@ static void mt_complete_slot(struct mt_device *td, struct input_dev *input)
+ 				return;
+ 		}
+ 
++		if (!(td->mtclass.quirks & MT_QUIRK_CONFIDENCE))
++			s->confidence_state = 1;
++		active = (s->touch_state || s->inrange_state) &&
++							s->confidence_state;
++
+ 		input_mt_slot(input, slotnum);
+-		input_mt_report_slot_state(input, MT_TOOL_FINGER,
+-			s->touch_state || s->inrange_state);
+-		if (s->touch_state || s->inrange_state) {
++		input_mt_report_slot_state(input, MT_TOOL_FINGER, active);
++		if (active) {
+ 			/* this finger is in proximity of the sensor */
+ 			int wide = (s->w > s->h);
+ 			/* divided by two to match visual scale of touch */
+@@ -701,6 +706,8 @@ static void mt_process_mt_event(struct hid_device *hid, struct hid_field *field,
+ 			td->curdata.touch_state = value;
+ 			break;
+ 		case HID_DG_CONFIDENCE:
++			if (quirks & MT_QUIRK_CONFIDENCE)
++				td->curdata.confidence_state = value;
+ 			if (quirks & MT_QUIRK_VALID_IS_CONFIDENCE)
+ 				td->curvalid = value;
+ 			break;
+diff --git a/drivers/hid/usbhid/hiddev.c b/drivers/hid/usbhid/hiddev.c
+index 2f1ddca..700145b 100644
+--- a/drivers/hid/usbhid/hiddev.c
++++ b/drivers/hid/usbhid/hiddev.c
+@@ -516,13 +516,13 @@ static noinline int hiddev_ioctl_usage(struct hiddev *hiddev, unsigned int cmd,
+ 					goto inval;
+ 			} else if (uref->usage_index >= field->report_count)
+ 				goto inval;
+-
+-			else if ((cmd == HIDIOCGUSAGES || cmd == HIDIOCSUSAGES) &&
+-				 (uref_multi->num_values > HID_MAX_MULTI_USAGES ||
+-				  uref->usage_index + uref_multi->num_values > field->report_count))
+-				goto inval;
+ 		}
+ 
++		if ((cmd == HIDIOCGUSAGES || cmd == HIDIOCSUSAGES) &&
++		    (uref_multi->num_values > HID_MAX_MULTI_USAGES ||
++		     uref->usage_index + uref_multi->num_values > field->report_count))
++			goto inval;
++
+ 		switch (cmd) {
+ 		case HIDIOCGUSAGE:
+ 			uref->value = field->value[uref->usage_index];
+diff --git a/drivers/hwmon/dell-smm-hwmon.c b/drivers/hwmon/dell-smm-hwmon.c
+index c43318d..a9356a3 100644
+--- a/drivers/hwmon/dell-smm-hwmon.c
++++ b/drivers/hwmon/dell-smm-hwmon.c
+@@ -66,11 +66,13 @@
+ 
+ static DEFINE_MUTEX(i8k_mutex);
+ static char bios_version[4];
++static char bios_machineid[16];
+ static struct device *i8k_hwmon_dev;
+ static u32 i8k_hwmon_flags;
+ static uint i8k_fan_mult = I8K_FAN_MULT;
+ static uint i8k_pwm_mult;
+ static uint i8k_fan_max = I8K_FAN_HIGH;
++static bool disallow_fan_type_call;
+ 
+ #define I8K_HWMON_HAVE_TEMP1	(1 << 0)
+ #define I8K_HWMON_HAVE_TEMP2	(1 << 1)
+@@ -94,13 +96,13 @@ module_param(ignore_dmi, bool, 0);
+ MODULE_PARM_DESC(ignore_dmi, "Continue probing hardware even if DMI data does not match");
+ 
+ #if IS_ENABLED(CONFIG_I8K)
+-static bool restricted;
++static bool restricted = true;
+ module_param(restricted, bool, 0);
+-MODULE_PARM_DESC(restricted, "Allow fan control if SYS_ADMIN capability set");
++MODULE_PARM_DESC(restricted, "Restrict fan control and serial number to CAP_SYS_ADMIN (default: 1)");
+ 
+ static bool power_status;
+ module_param(power_status, bool, 0600);
+-MODULE_PARM_DESC(power_status, "Report power status in /proc/i8k");
++MODULE_PARM_DESC(power_status, "Report power status in /proc/i8k (default: 0)");
+ #endif
+ 
+ static uint fan_mult;
+@@ -235,14 +237,28 @@ static int i8k_get_fan_speed(int fan)
+ /*
+  * Read the fan type.
+  */
+-static int i8k_get_fan_type(int fan)
++static int _i8k_get_fan_type(int fan)
+ {
+ 	struct smm_regs regs = { .eax = I8K_SMM_GET_FAN_TYPE, };
+ 
++	if (disallow_fan_type_call)
++		return -EINVAL;
++
+ 	regs.ebx = fan & 0xff;
+ 	return i8k_smm(&regs) ? : regs.eax & 0xff;
+ }
+ 
++static int i8k_get_fan_type(int fan)
++{
++	/* I8K_SMM_GET_FAN_TYPE SMM call is expensive, so cache values */
++	static int types[2] = { INT_MIN, INT_MIN };
++
++	if (types[fan] == INT_MIN)
++		types[fan] = _i8k_get_fan_type(fan);
++
++	return types[fan];
++}
++
+ /*
+  * Read the fan nominal rpm for specific fan speed.
+  */
+@@ -392,9 +408,11 @@ i8k_ioctl_unlocked(struct file *fp, unsigned int cmd, unsigned long arg)
+ 		break;
+ 
+ 	case I8K_MACHINE_ID:
+-		memset(buff, 0, 16);
+-		strlcpy(buff, i8k_get_dmi_data(DMI_PRODUCT_SERIAL),
+-			sizeof(buff));
++		if (restricted && !capable(CAP_SYS_ADMIN))
++			return -EPERM;
++
++		memset(buff, 0, sizeof(buff));
++		strlcpy(buff, bios_machineid, sizeof(buff));
+ 		break;
+ 
+ 	case I8K_FN_STATUS:
+@@ -511,7 +529,7 @@ static int i8k_proc_show(struct seq_file *seq, void *offset)
+ 	seq_printf(seq, "%s %s %s %d %d %d %d %d %d %d\n",
+ 		   I8K_PROC_FMT,
+ 		   bios_version,
+-		   i8k_get_dmi_data(DMI_PRODUCT_SERIAL),
++		   (restricted && !capable(CAP_SYS_ADMIN)) ? "-1" : bios_machineid,
+ 		   cpu_temp,
+ 		   left_fan, right_fan, left_speed, right_speed,
+ 		   ac_power, fn_key);
+@@ -718,6 +736,9 @@ static struct attribute *i8k_attrs[] = {
+ static umode_t i8k_is_visible(struct kobject *kobj, struct attribute *attr,
+ 			      int index)
+ {
++	if (disallow_fan_type_call &&
++	    (index == 9 || index == 12))
++		return 0;
+ 	if (index >= 0 && index <= 1 &&
+ 	    !(i8k_hwmon_flags & I8K_HWMON_HAVE_TEMP1))
+ 		return 0;
+@@ -767,13 +788,17 @@ static int __init i8k_init_hwmon(void)
+ 	if (err >= 0)
+ 		i8k_hwmon_flags |= I8K_HWMON_HAVE_TEMP4;
+ 
+-	/* First fan attributes, if fan type is OK */
+-	err = i8k_get_fan_type(0);
++	/* First fan attributes, if fan status or type is OK */
++	err = i8k_get_fan_status(0);
++	if (err < 0)
++		err = i8k_get_fan_type(0);
+ 	if (err >= 0)
+ 		i8k_hwmon_flags |= I8K_HWMON_HAVE_FAN1;
+ 
+-	/* Second fan attributes, if fan type is OK */
+-	err = i8k_get_fan_type(1);
++	/* Second fan attributes, if fan status or type is OK */
++	err = i8k_get_fan_status(1);
++	if (err < 0)
++		err = i8k_get_fan_type(1);
+ 	if (err >= 0)
+ 		i8k_hwmon_flags |= I8K_HWMON_HAVE_FAN2;
+ 
+@@ -929,12 +954,14 @@ static struct dmi_system_id i8k_dmi_table[] __initdata = {
+ 
+ MODULE_DEVICE_TABLE(dmi, i8k_dmi_table);
+ 
+-static struct dmi_system_id i8k_blacklist_dmi_table[] __initdata = {
++/*
++ * On some machines once I8K_SMM_GET_FAN_TYPE is issued then CPU fan speed
++ * randomly going up and down due to bug in Dell SMM or BIOS. Here is blacklist
++ * of affected Dell machines for which we disallow I8K_SMM_GET_FAN_TYPE call.
++ * See bug: https://bugzilla.kernel.org/show_bug.cgi?id=100121
++ */
++static struct dmi_system_id i8k_blacklist_fan_type_dmi_table[] __initdata = {
+ 	{
+-		/*
+-		 * CPU fan speed going up and down on Dell Studio XPS 8000
+-		 * for unknown reasons.
+-		 */
+ 		.ident = "Dell Studio XPS 8000",
+ 		.matches = {
+ 			DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+@@ -942,16 +969,19 @@ static struct dmi_system_id i8k_blacklist_dmi_table[] __initdata = {
+ 		},
+ 	},
+ 	{
+-		/*
+-		 * CPU fan speed going up and down on Dell Studio XPS 8100
+-		 * for unknown reasons.
+-		 */
+ 		.ident = "Dell Studio XPS 8100",
+ 		.matches = {
+ 			DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ 			DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Studio XPS 8100"),
+ 		},
+ 	},
++	{
++		.ident = "Dell Inspiron 580",
++		.matches = {
++			DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
++			DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Inspiron 580 "),
++		},
++	},
+ 	{ }
+ };
+ 
+@@ -966,8 +996,7 @@ static int __init i8k_probe(void)
+ 	/*
+ 	 * Get DMI information
+ 	 */
+-	if (!dmi_check_system(i8k_dmi_table) ||
+-	    dmi_check_system(i8k_blacklist_dmi_table)) {
++	if (!dmi_check_system(i8k_dmi_table)) {
+ 		if (!ignore_dmi && !force)
+ 			return -ENODEV;
+ 
+@@ -978,8 +1007,13 @@ static int __init i8k_probe(void)
+ 			i8k_get_dmi_data(DMI_BIOS_VERSION));
+ 	}
+ 
++	if (dmi_check_system(i8k_blacklist_fan_type_dmi_table))
++		disallow_fan_type_call = true;
++
+ 	strlcpy(bios_version, i8k_get_dmi_data(DMI_BIOS_VERSION),
+ 		sizeof(bios_version));
++	strlcpy(bios_machineid, i8k_get_dmi_data(DMI_PRODUCT_SERIAL),
++		sizeof(bios_machineid));
+ 
+ 	/*
+ 	 * Get SMM Dell signature
+diff --git a/drivers/iio/accel/kxsd9.c b/drivers/iio/accel/kxsd9.c
+index 923f565..3a9f106 100644
+--- a/drivers/iio/accel/kxsd9.c
++++ b/drivers/iio/accel/kxsd9.c
+@@ -81,7 +81,7 @@ static int kxsd9_write_scale(struct iio_dev *indio_dev, int micro)
+ 
+ 	mutex_lock(&st->buf_lock);
+ 	ret = spi_w8r8(st->us, KXSD9_READ(KXSD9_REG_CTRL_C));
+-	if (ret)
++	if (ret < 0)
+ 		goto error_ret;
+ 	st->tx[0] = KXSD9_WRITE(KXSD9_REG_CTRL_C);
+ 	st->tx[1] = (ret & ~KXSD9_FS_MASK) | i;
+@@ -163,7 +163,7 @@ static int kxsd9_read_raw(struct iio_dev *indio_dev,
+ 		break;
+ 	case IIO_CHAN_INFO_SCALE:
+ 		ret = spi_w8r8(st->us, KXSD9_READ(KXSD9_REG_CTRL_C));
+-		if (ret)
++		if (ret < 0)
+ 			goto error_ret;
+ 		*val2 = kxsd9_micro_scales[ret & KXSD9_FS_MASK];
+ 		ret = IIO_VAL_INT_PLUS_MICRO;
+diff --git a/drivers/iio/adc/ad7266.c b/drivers/iio/adc/ad7266.c
+index 21e19b6..2123f0a 100644
+--- a/drivers/iio/adc/ad7266.c
++++ b/drivers/iio/adc/ad7266.c
+@@ -396,8 +396,8 @@ static int ad7266_probe(struct spi_device *spi)
+ 
+ 	st = iio_priv(indio_dev);
+ 
+-	st->reg = devm_regulator_get(&spi->dev, "vref");
+-	if (!IS_ERR_OR_NULL(st->reg)) {
++	st->reg = devm_regulator_get_optional(&spi->dev, "vref");
++	if (!IS_ERR(st->reg)) {
+ 		ret = regulator_enable(st->reg);
+ 		if (ret)
+ 			return ret;
+@@ -408,6 +408,9 @@ static int ad7266_probe(struct spi_device *spi)
+ 
+ 		st->vref_mv = ret / 1000;
+ 	} else {
++		/* Any other error indicates that the regulator does exist */
++		if (PTR_ERR(st->reg) != -ENODEV)
++			return PTR_ERR(st->reg);
+ 		/* Use internal reference */
+ 		st->vref_mv = 2500;
+ 	}
+diff --git a/drivers/iio/humidity/hdc100x.c b/drivers/iio/humidity/hdc100x.c
+index fa47676..a03832a 100644
+--- a/drivers/iio/humidity/hdc100x.c
++++ b/drivers/iio/humidity/hdc100x.c
+@@ -55,7 +55,7 @@ static const struct {
+ 	},
+ 	{ /* IIO_HUMIDITYRELATIVE channel */
+ 		.shift = 8,
+-		.mask = 2,
++		.mask = 3,
+ 	},
+ };
+ 
+@@ -164,14 +164,14 @@ static int hdc100x_get_measurement(struct hdc100x_data *data,
+ 		dev_err(&client->dev, "cannot read high byte measurement");
+ 		return ret;
+ 	}
+-	val = ret << 6;
++	val = ret << 8;
+ 
+ 	ret = i2c_smbus_read_byte(client);
+ 	if (ret < 0) {
+ 		dev_err(&client->dev, "cannot read low byte measurement");
+ 		return ret;
+ 	}
+-	val |= ret >> 2;
++	val |= ret;
+ 
+ 	return val;
+ }
+@@ -211,18 +211,18 @@ static int hdc100x_read_raw(struct iio_dev *indio_dev,
+ 		return IIO_VAL_INT_PLUS_MICRO;
+ 	case IIO_CHAN_INFO_SCALE:
+ 		if (chan->type == IIO_TEMP) {
+-			*val = 165;
+-			*val2 = 65536 >> 2;
++			*val = 165000;
++			*val2 = 65536;
+ 			return IIO_VAL_FRACTIONAL;
+ 		} else {
+-			*val = 0;
+-			*val2 = 10000;
+-			return IIO_VAL_INT_PLUS_MICRO;
++			*val = 100;
++			*val2 = 65536;
++			return IIO_VAL_FRACTIONAL;
+ 		}
+ 		break;
+ 	case IIO_CHAN_INFO_OFFSET:
+-		*val = -3971;
+-		*val2 = 879096;
++		*val = -15887;
++		*val2 = 515151;
+ 		return IIO_VAL_INT_PLUS_MICRO;
+ 	default:
+ 		return -EINVAL;
+diff --git a/drivers/iio/industrialio-trigger.c b/drivers/iio/industrialio-trigger.c
+index ae2806a..0c52dfe 100644
+--- a/drivers/iio/industrialio-trigger.c
++++ b/drivers/iio/industrialio-trigger.c
+@@ -210,22 +210,35 @@ static int iio_trigger_attach_poll_func(struct iio_trigger *trig,
+ 
+ 	/* Prevent the module from being removed whilst attached to a trigger */
+ 	__module_get(pf->indio_dev->info->driver_module);
++
++	/* Get irq number */
+ 	pf->irq = iio_trigger_get_irq(trig);
++	if (pf->irq < 0)
++		goto out_put_module;
++
++	/* Request irq */
+ 	ret = request_threaded_irq(pf->irq, pf->h, pf->thread,
+ 				   pf->type, pf->name,
+ 				   pf);
+-	if (ret < 0) {
+-		module_put(pf->indio_dev->info->driver_module);
+-		return ret;
+-	}
++	if (ret < 0)
++		goto out_put_irq;
+ 
++	/* Enable trigger in driver */
+ 	if (trig->ops && trig->ops->set_trigger_state && notinuse) {
+ 		ret = trig->ops->set_trigger_state(trig, true);
+ 		if (ret < 0)
+-			module_put(pf->indio_dev->info->driver_module);
++			goto out_free_irq;
+ 	}
+ 
+ 	return ret;
++
++out_free_irq:
++	free_irq(pf->irq, pf);
++out_put_irq:
++	iio_trigger_put_irq(trig, pf->irq);
++out_put_module:
++	module_put(pf->indio_dev->info->driver_module);
++	return ret;
+ }
+ 
+ static int iio_trigger_detach_poll_func(struct iio_trigger *trig,
+diff --git a/drivers/iio/light/apds9960.c b/drivers/iio/light/apds9960.c
+index a6af56a..6443aad 100644
+--- a/drivers/iio/light/apds9960.c
++++ b/drivers/iio/light/apds9960.c
+@@ -1006,6 +1006,7 @@ static int apds9960_probe(struct i2c_client *client,
+ 
+ 	iio_device_attach_buffer(indio_dev, buffer);
+ 
++	indio_dev->dev.parent = &client->dev;
+ 	indio_dev->info = &apds9960_info;
+ 	indio_dev->name = APDS9960_DRV_NAME;
+ 	indio_dev->channels = apds9960_channels;
+diff --git a/drivers/iio/pressure/st_pressure_core.c b/drivers/iio/pressure/st_pressure_core.c
+index 172393a..d3ca320 100644
+--- a/drivers/iio/pressure/st_pressure_core.c
++++ b/drivers/iio/pressure/st_pressure_core.c
+@@ -28,15 +28,21 @@
+ #include <linux/iio/common/st_sensors.h>
+ #include "st_pressure.h"
+ 
++#define MCELSIUS_PER_CELSIUS			1000
++
++/* Default pressure sensitivity */
+ #define ST_PRESS_LSB_PER_MBAR			4096UL
+ #define ST_PRESS_KPASCAL_NANO_SCALE		(100000000UL / \
+ 						 ST_PRESS_LSB_PER_MBAR)
++
++/* Default temperature sensitivity */
+ #define ST_PRESS_LSB_PER_CELSIUS		480UL
+-#define ST_PRESS_CELSIUS_NANO_SCALE		(1000000000UL / \
+-						 ST_PRESS_LSB_PER_CELSIUS)
++#define ST_PRESS_MILLI_CELSIUS_OFFSET		42500UL
++
+ #define ST_PRESS_NUMBER_DATA_CHANNELS		1
+ 
+ /* FULLSCALE */
++#define ST_PRESS_FS_AVL_1100MB			1100
+ #define ST_PRESS_FS_AVL_1260MB			1260
+ 
+ #define ST_PRESS_1_OUT_XL_ADDR			0x28
+@@ -54,9 +60,6 @@
+ #define ST_PRESS_LPS331AP_PW_MASK		0x80
+ #define ST_PRESS_LPS331AP_FS_ADDR		0x23
+ #define ST_PRESS_LPS331AP_FS_MASK		0x30
+-#define ST_PRESS_LPS331AP_FS_AVL_1260_VAL	0x00
+-#define ST_PRESS_LPS331AP_FS_AVL_1260_GAIN	ST_PRESS_KPASCAL_NANO_SCALE
+-#define ST_PRESS_LPS331AP_FS_AVL_TEMP_GAIN	ST_PRESS_CELSIUS_NANO_SCALE
+ #define ST_PRESS_LPS331AP_BDU_ADDR		0x20
+ #define ST_PRESS_LPS331AP_BDU_MASK		0x04
+ #define ST_PRESS_LPS331AP_DRDY_IRQ_ADDR		0x22
+@@ -65,9 +68,14 @@
+ #define ST_PRESS_LPS331AP_IHL_IRQ_ADDR		0x22
+ #define ST_PRESS_LPS331AP_IHL_IRQ_MASK		0x80
+ #define ST_PRESS_LPS331AP_MULTIREAD_BIT		true
+-#define ST_PRESS_LPS331AP_TEMP_OFFSET		42500
+ 
+ /* CUSTOM VALUES FOR LPS001WP SENSOR */
++
++/* LPS001WP pressure resolution */
++#define ST_PRESS_LPS001WP_LSB_PER_MBAR		16UL
++/* LPS001WP temperature resolution */
++#define ST_PRESS_LPS001WP_LSB_PER_CELSIUS	64UL
++
+ #define ST_PRESS_LPS001WP_WAI_EXP		0xba
+ #define ST_PRESS_LPS001WP_ODR_ADDR		0x20
+ #define ST_PRESS_LPS001WP_ODR_MASK		0x30
+@@ -76,6 +84,8 @@
+ #define ST_PRESS_LPS001WP_ODR_AVL_13HZ_VAL	0x03
+ #define ST_PRESS_LPS001WP_PW_ADDR		0x20
+ #define ST_PRESS_LPS001WP_PW_MASK		0x40
++#define ST_PRESS_LPS001WP_FS_AVL_PRESS_GAIN \
++	(100000000UL / ST_PRESS_LPS001WP_LSB_PER_MBAR)
+ #define ST_PRESS_LPS001WP_BDU_ADDR		0x20
+ #define ST_PRESS_LPS001WP_BDU_MASK		0x04
+ #define ST_PRESS_LPS001WP_MULTIREAD_BIT		true
+@@ -92,11 +102,6 @@
+ #define ST_PRESS_LPS25H_ODR_AVL_25HZ_VAL	0x04
+ #define ST_PRESS_LPS25H_PW_ADDR			0x20
+ #define ST_PRESS_LPS25H_PW_MASK			0x80
+-#define ST_PRESS_LPS25H_FS_ADDR			0x00
+-#define ST_PRESS_LPS25H_FS_MASK			0x00
+-#define ST_PRESS_LPS25H_FS_AVL_1260_VAL		0x00
+-#define ST_PRESS_LPS25H_FS_AVL_1260_GAIN	ST_PRESS_KPASCAL_NANO_SCALE
+-#define ST_PRESS_LPS25H_FS_AVL_TEMP_GAIN	ST_PRESS_CELSIUS_NANO_SCALE
+ #define ST_PRESS_LPS25H_BDU_ADDR		0x20
+ #define ST_PRESS_LPS25H_BDU_MASK		0x04
+ #define ST_PRESS_LPS25H_DRDY_IRQ_ADDR		0x23
+@@ -105,7 +110,6 @@
+ #define ST_PRESS_LPS25H_IHL_IRQ_ADDR		0x22
+ #define ST_PRESS_LPS25H_IHL_IRQ_MASK		0x80
+ #define ST_PRESS_LPS25H_MULTIREAD_BIT		true
+-#define ST_PRESS_LPS25H_TEMP_OFFSET		42500
+ #define ST_PRESS_LPS25H_OUT_XL_ADDR		0x28
+ #define ST_TEMP_LPS25H_OUT_L_ADDR		0x2b
+ 
+@@ -157,7 +161,9 @@ static const struct iio_chan_spec st_press_lps001wp_channels[] = {
+ 			.storagebits = 16,
+ 			.endianness = IIO_LE,
+ 		},
+-		.info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
++		.info_mask_separate =
++			BIT(IIO_CHAN_INFO_RAW) |
++			BIT(IIO_CHAN_INFO_SCALE),
+ 		.modified = 0,
+ 	},
+ 	{
+@@ -173,7 +179,7 @@ static const struct iio_chan_spec st_press_lps001wp_channels[] = {
+ 		},
+ 		.info_mask_separate =
+ 			BIT(IIO_CHAN_INFO_RAW) |
+-			BIT(IIO_CHAN_INFO_OFFSET),
++			BIT(IIO_CHAN_INFO_SCALE),
+ 		.modified = 0,
+ 	},
+ 	IIO_CHAN_SOFT_TIMESTAMP(1)
+@@ -208,11 +214,14 @@ static const struct st_sensor_settings st_press_sensors_settings[] = {
+ 			.addr = ST_PRESS_LPS331AP_FS_ADDR,
+ 			.mask = ST_PRESS_LPS331AP_FS_MASK,
+ 			.fs_avl = {
++				/*
++				 * Pressure and temperature sensitivity values
++				 * as defined in table 3 of LPS331AP datasheet.
++				 */
+ 				[0] = {
+ 					.num = ST_PRESS_FS_AVL_1260MB,
+-					.value = ST_PRESS_LPS331AP_FS_AVL_1260_VAL,
+-					.gain = ST_PRESS_LPS331AP_FS_AVL_1260_GAIN,
+-					.gain2 = ST_PRESS_LPS331AP_FS_AVL_TEMP_GAIN,
++					.gain = ST_PRESS_KPASCAL_NANO_SCALE,
++					.gain2 = ST_PRESS_LSB_PER_CELSIUS,
+ 				},
+ 			},
+ 		},
+@@ -254,7 +263,17 @@ static const struct st_sensor_settings st_press_sensors_settings[] = {
+ 			.value_off = ST_SENSORS_DEFAULT_POWER_OFF_VALUE,
+ 		},
+ 		.fs = {
+-			.addr = 0,
++			.fs_avl = {
++				/*
++				 * Pressure and temperature resolution values
++				 * as defined in table 3 of LPS001WP datasheet.
++				 */
++				[0] = {
++					.num = ST_PRESS_FS_AVL_1100MB,
++					.gain = ST_PRESS_LPS001WP_FS_AVL_PRESS_GAIN,
++					.gain2 = ST_PRESS_LPS001WP_LSB_PER_CELSIUS,
++				},
++			},
+ 		},
+ 		.bdu = {
+ 			.addr = ST_PRESS_LPS001WP_BDU_ADDR,
+@@ -291,14 +310,15 @@ static const struct st_sensor_settings st_press_sensors_settings[] = {
+ 			.value_off = ST_SENSORS_DEFAULT_POWER_OFF_VALUE,
+ 		},
+ 		.fs = {
+-			.addr = ST_PRESS_LPS25H_FS_ADDR,
+-			.mask = ST_PRESS_LPS25H_FS_MASK,
+ 			.fs_avl = {
++				/*
++				 * Pressure and temperature sensitivity values
++				 * as defined in table 3 of LPS25H datasheet.
++				 */
+ 				[0] = {
+ 					.num = ST_PRESS_FS_AVL_1260MB,
+-					.value = ST_PRESS_LPS25H_FS_AVL_1260_VAL,
+-					.gain = ST_PRESS_LPS25H_FS_AVL_1260_GAIN,
+-					.gain2 = ST_PRESS_LPS25H_FS_AVL_TEMP_GAIN,
++					.gain = ST_PRESS_KPASCAL_NANO_SCALE,
++					.gain2 = ST_PRESS_LSB_PER_CELSIUS,
+ 				},
+ 			},
+ 		},
+@@ -354,26 +374,26 @@ static int st_press_read_raw(struct iio_dev *indio_dev,
+ 
+ 		return IIO_VAL_INT;
+ 	case IIO_CHAN_INFO_SCALE:
+-		*val = 0;
+-
+ 		switch (ch->type) {
+ 		case IIO_PRESSURE:
++			*val = 0;
+ 			*val2 = press_data->current_fullscale->gain;
+-			break;
++			return IIO_VAL_INT_PLUS_NANO;
+ 		case IIO_TEMP:
++			*val = MCELSIUS_PER_CELSIUS;
+ 			*val2 = press_data->current_fullscale->gain2;
+-			break;
++			return IIO_VAL_FRACTIONAL;
+ 		default:
+ 			err = -EINVAL;
+ 			goto read_error;
+ 		}
+ 
+-		return IIO_VAL_INT_PLUS_NANO;
+ 	case IIO_CHAN_INFO_OFFSET:
+ 		switch (ch->type) {
+ 		case IIO_TEMP:
+-			*val = 425;
+-			*val2 = 10;
++			*val = ST_PRESS_MILLI_CELSIUS_OFFSET *
++			       press_data->current_fullscale->gain2;
++			*val2 = MCELSIUS_PER_CELSIUS;
+ 			break;
+ 		default:
+ 			err = -EINVAL;
+diff --git a/drivers/iio/proximity/as3935.c b/drivers/iio/proximity/as3935.c
+index f4d29d5..e2f926c 100644
+--- a/drivers/iio/proximity/as3935.c
++++ b/drivers/iio/proximity/as3935.c
+@@ -64,6 +64,7 @@ struct as3935_state {
+ 	struct delayed_work work;
+ 
+ 	u32 tune_cap;
++	u8 buffer[16]; /* 8-bit data + 56-bit padding + 64-bit timestamp */
+ 	u8 buf[2] ____cacheline_aligned;
+ };
+ 
+@@ -72,7 +73,8 @@ static const struct iio_chan_spec as3935_channels[] = {
+ 		.type           = IIO_PROXIMITY,
+ 		.info_mask_separate =
+ 			BIT(IIO_CHAN_INFO_RAW) |
+-			BIT(IIO_CHAN_INFO_PROCESSED),
++			BIT(IIO_CHAN_INFO_PROCESSED) |
++			BIT(IIO_CHAN_INFO_SCALE),
+ 		.scan_index     = 0,
+ 		.scan_type = {
+ 			.sign           = 'u',
+@@ -181,7 +183,12 @@ static int as3935_read_raw(struct iio_dev *indio_dev,
+ 		/* storm out of range */
+ 		if (*val == AS3935_DATA_MASK)
+ 			return -EINVAL;
+-		*val *= 1000;
++
++		if (m == IIO_CHAN_INFO_PROCESSED)
++			*val *= 1000;
++		break;
++	case IIO_CHAN_INFO_SCALE:
++		*val = 1000;
+ 		break;
+ 	default:
+ 		return -EINVAL;
+@@ -206,10 +213,10 @@ static irqreturn_t as3935_trigger_handler(int irq, void *private)
+ 	ret = as3935_read(st, AS3935_DATA, &val);
+ 	if (ret)
+ 		goto err_read;
+-	val &= AS3935_DATA_MASK;
+-	val *= 1000;
+ 
+-	iio_push_to_buffers_with_timestamp(indio_dev, &val, pf->timestamp);
++	st->buffer[0] = val & AS3935_DATA_MASK;
++	iio_push_to_buffers_with_timestamp(indio_dev, &st->buffer,
++					   pf->timestamp);
+ err_read:
+ 	iio_trigger_notify_done(indio_dev->trig);
+ 
+diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
+index 1d92e09..c995255 100644
+--- a/drivers/infiniband/core/cm.c
++++ b/drivers/infiniband/core/cm.c
+@@ -3452,14 +3452,14 @@ static int cm_establish(struct ib_cm_id *cm_id)
+ 	work->cm_event.event = IB_CM_USER_ESTABLISHED;
+ 
+ 	/* Check if the device started its remove_one */
+-	spin_lock_irq(&cm.lock);
++	spin_lock_irqsave(&cm.lock, flags);
+ 	if (!cm_dev->going_down) {
+ 		queue_delayed_work(cm.wq, &work->work, 0);
+ 	} else {
+ 		kfree(work);
+ 		ret = -ENODEV;
+ 	}
+-	spin_unlock_irq(&cm.lock);
++	spin_unlock_irqrestore(&cm.lock, flags);
+ 
+ out:
+ 	return ret;
+diff --git a/drivers/infiniband/hw/mlx4/ah.c b/drivers/infiniband/hw/mlx4/ah.c
+index 105246f..5fc6233 100644
+--- a/drivers/infiniband/hw/mlx4/ah.c
++++ b/drivers/infiniband/hw/mlx4/ah.c
+@@ -47,6 +47,7 @@ static struct ib_ah *create_ib_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr,
+ 
+ 	ah->av.ib.port_pd = cpu_to_be32(to_mpd(pd)->pdn | (ah_attr->port_num << 24));
+ 	ah->av.ib.g_slid  = ah_attr->src_path_bits;
++	ah->av.ib.sl_tclass_flowlabel = cpu_to_be32(ah_attr->sl << 28);
+ 	if (ah_attr->ah_flags & IB_AH_GRH) {
+ 		ah->av.ib.g_slid   |= 0x80;
+ 		ah->av.ib.gid_index = ah_attr->grh.sgid_index;
+@@ -64,7 +65,6 @@ static struct ib_ah *create_ib_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr,
+ 		       !(1 << ah->av.ib.stat_rate & dev->caps.stat_rate_support))
+ 			--ah->av.ib.stat_rate;
+ 	}
+-	ah->av.ib.sl_tclass_flowlabel = cpu_to_be32(ah_attr->sl << 28);
+ 
+ 	return &ah->ibah;
+ }
+diff --git a/drivers/infiniband/sw/rdmavt/qp.c b/drivers/infiniband/sw/rdmavt/qp.c
+index a9e3bcc..a0ecf08 100644
+--- a/drivers/infiniband/sw/rdmavt/qp.c
++++ b/drivers/infiniband/sw/rdmavt/qp.c
+@@ -683,8 +683,10 @@ struct ib_qp *rvt_create_qp(struct ib_pd *ibpd,
+ 		 * initialization that is needed.
+ 		 */
+ 		priv = rdi->driver_f.qp_priv_alloc(rdi, qp, gfp);
+-		if (!priv)
++		if (IS_ERR(priv)) {
++			ret = priv;
+ 			goto bail_qp;
++		}
+ 		qp->priv = priv;
+ 		qp->timeout_jiffies =
+ 			usecs_to_jiffies((4096UL * (1UL << qp->timeout)) /
+diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c
+index bf4959f..94f1bf7 100644
+--- a/drivers/iommu/amd_iommu_init.c
++++ b/drivers/iommu/amd_iommu_init.c
+@@ -1363,13 +1363,23 @@ static int __init amd_iommu_init_pci(void)
+ 			break;
+ 	}
+ 
++	/*
++	 * Order is important here to make sure any unity map requirements are
++	 * fulfilled. The unity mappings are created and written to the device
++	 * table during the amd_iommu_init_api() call.
++	 *
++	 * After that we call init_device_table_dma() to make sure any
++	 * uninitialized DTE will block DMA, and in the end we flush the caches
++	 * of all IOMMUs to make sure the changes to the device table are
++	 * active.
++	 */
++	ret = amd_iommu_init_api();
++
+ 	init_device_table_dma();
+ 
+ 	for_each_iommu(iommu)
+ 		iommu_flush_all_caches(iommu);
+ 
+-	ret = amd_iommu_init_api();
+-
+ 	if (!ret)
+ 		print_iommu_info();
+ 
+diff --git a/drivers/iommu/arm-smmu-v3.c b/drivers/iommu/arm-smmu-v3.c
+index 4ff73ff..3e20208 100644
+--- a/drivers/iommu/arm-smmu-v3.c
++++ b/drivers/iommu/arm-smmu-v3.c
+@@ -1942,6 +1942,7 @@ static struct iommu_ops arm_smmu_ops = {
+ 	.attach_dev		= arm_smmu_attach_dev,
+ 	.map			= arm_smmu_map,
+ 	.unmap			= arm_smmu_unmap,
++	.map_sg			= default_iommu_map_sg,
+ 	.iova_to_phys		= arm_smmu_iova_to_phys,
+ 	.add_device		= arm_smmu_add_device,
+ 	.remove_device		= arm_smmu_remove_device,
+diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
+index e1852e8..ae364e0 100644
+--- a/drivers/iommu/intel-iommu.c
++++ b/drivers/iommu/intel-iommu.c
+@@ -3169,11 +3169,6 @@ static int __init init_dmars(void)
+ 			}
+ 		}
+ 
+-		iommu_flush_write_buffer(iommu);
+-		iommu_set_root_entry(iommu);
+-		iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL);
+-		iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
+-
+ 		if (!ecap_pass_through(iommu->ecap))
+ 			hw_pass_through = 0;
+ #ifdef CONFIG_INTEL_IOMMU_SVM
+@@ -3182,6 +3177,18 @@ static int __init init_dmars(void)
+ #endif
+ 	}
+ 
++	/*
++	 * Now that qi is enabled on all iommus, set the root entry and flush
++	 * caches. This is required on some Intel X58 chipsets, otherwise the
++	 * flush_context function will loop forever and the boot hangs.
++	 */
++	for_each_active_iommu(iommu, drhd) {
++		iommu_flush_write_buffer(iommu);
++		iommu_set_root_entry(iommu);
++		iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL);
++		iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
++	}
++
+ 	if (iommu_pass_through)
+ 		iommu_identity_mapping |= IDENTMAP_ALL;
+ 
+diff --git a/drivers/iommu/rockchip-iommu.c b/drivers/iommu/rockchip-iommu.c
+index 5710a06..0ea8d9a 100644
+--- a/drivers/iommu/rockchip-iommu.c
++++ b/drivers/iommu/rockchip-iommu.c
+@@ -815,7 +815,7 @@ static int rk_iommu_attach_device(struct iommu_domain *domain,
+ 	dte_addr = virt_to_phys(rk_domain->dt);
+ 	for (i = 0; i < iommu->num_mmu; i++) {
+ 		rk_iommu_write(iommu->bases[i], RK_MMU_DTE_ADDR, dte_addr);
+-		rk_iommu_command(iommu->bases[i], RK_MMU_CMD_ZAP_CACHE);
++		rk_iommu_base_command(iommu->bases[i], RK_MMU_CMD_ZAP_CACHE);
+ 		rk_iommu_write(iommu->bases[i], RK_MMU_INT_MASK, RK_MMU_IRQ_MASK);
+ 	}
+ 
+diff --git a/drivers/irqchip/irq-mips-gic.c b/drivers/irqchip/irq-mips-gic.c
+index 4dffccf..40fb120 100644
+--- a/drivers/irqchip/irq-mips-gic.c
++++ b/drivers/irqchip/irq-mips-gic.c
+@@ -734,6 +734,12 @@ static int gic_irq_domain_alloc(struct irq_domain *d, unsigned int virq,
+ 		/* verify that it doesn't conflict with an IPI irq */
+ 		if (test_bit(spec->hwirq, ipi_resrv))
+ 			return -EBUSY;
++
++		hwirq = GIC_SHARED_TO_HWIRQ(spec->hwirq);
++
++		return irq_domain_set_hwirq_and_chip(d, virq, hwirq,
++						     &gic_level_irq_controller,
++						     NULL);
+ 	} else {
+ 		base_hwirq = find_first_bit(ipi_resrv, gic_shared_intrs);
+ 		if (base_hwirq == gic_shared_intrs) {
+@@ -855,10 +861,14 @@ static int gic_dev_domain_alloc(struct irq_domain *d, unsigned int virq,
+ 						    &gic_level_irq_controller,
+ 						    NULL);
+ 		if (ret)
+-			return ret;
++			goto error;
+ 	}
+ 
+ 	return 0;
++
++error:
++	irq_domain_free_irqs_parent(d, virq, nr_irqs);
++	return ret;
+ }
+ 
+ void gic_dev_domain_free(struct irq_domain *d, unsigned int virq,
+diff --git a/drivers/media/usb/uvc/uvc_v4l2.c b/drivers/media/usb/uvc/uvc_v4l2.c
+index d7723ce..12690c1 100644
+--- a/drivers/media/usb/uvc/uvc_v4l2.c
++++ b/drivers/media/usb/uvc/uvc_v4l2.c
+@@ -1408,47 +1408,44 @@ static int uvc_v4l2_put_xu_query(const struct uvc_xu_control_query *kp,
+ static long uvc_v4l2_compat_ioctl32(struct file *file,
+ 		     unsigned int cmd, unsigned long arg)
+ {
++	struct uvc_fh *handle = file->private_data;
+ 	union {
+ 		struct uvc_xu_control_mapping xmap;
+ 		struct uvc_xu_control_query xqry;
+ 	} karg;
+ 	void __user *up = compat_ptr(arg);
+-	mm_segment_t old_fs;
+ 	long ret;
+ 
+ 	switch (cmd) {
+ 	case UVCIOC_CTRL_MAP32:
+-		cmd = UVCIOC_CTRL_MAP;
+ 		ret = uvc_v4l2_get_xu_mapping(&karg.xmap, up);
++		if (ret)
++			return ret;
++		ret = uvc_ioctl_ctrl_map(handle->chain, &karg.xmap);
++		if (ret)
++			return ret;
++		ret = uvc_v4l2_put_xu_mapping(&karg.xmap, up);
++		if (ret)
++			return ret;
++
+ 		break;
+ 
+ 	case UVCIOC_CTRL_QUERY32:
+-		cmd = UVCIOC_CTRL_QUERY;
+ 		ret = uvc_v4l2_get_xu_query(&karg.xqry, up);
++		if (ret)
++			return ret;
++		ret = uvc_xu_ctrl_query(handle->chain, &karg.xqry);
++		if (ret)
++			return ret;
++		ret = uvc_v4l2_put_xu_query(&karg.xqry, up);
++		if (ret)
++			return ret;
+ 		break;
+ 
+ 	default:
+ 		return -ENOIOCTLCMD;
+ 	}
+ 
+-	old_fs = get_fs();
+-	set_fs(KERNEL_DS);
+-	ret = video_ioctl2(file, cmd, (unsigned long)&karg);
+-	set_fs(old_fs);
+-
+-	if (ret < 0)
+-		return ret;
+-
+-	switch (cmd) {
+-	case UVCIOC_CTRL_MAP:
+-		ret = uvc_v4l2_put_xu_mapping(&karg.xmap, up);
+-		break;
+-
+-	case UVCIOC_CTRL_QUERY:
+-		ret = uvc_v4l2_put_xu_query(&karg.xqry, up);
+-		break;
+-	}
+-
+ 	return ret;
+ }
+ #endif
+diff --git a/drivers/memory/omap-gpmc.c b/drivers/memory/omap-gpmc.c
+index 21825dd..859b4a1 100644
+--- a/drivers/memory/omap-gpmc.c
++++ b/drivers/memory/omap-gpmc.c
+@@ -394,7 +394,7 @@ static void gpmc_cs_bool_timings(int cs, const struct gpmc_bool_timings *p)
+ 	gpmc_cs_modify_reg(cs, GPMC_CS_CONFIG4,
+ 			   GPMC_CONFIG4_OEEXTRADELAY, p->oe_extra_delay);
+ 	gpmc_cs_modify_reg(cs, GPMC_CS_CONFIG4,
+-			   GPMC_CONFIG4_OEEXTRADELAY, p->we_extra_delay);
++			   GPMC_CONFIG4_WEEXTRADELAY, p->we_extra_delay);
+ 	gpmc_cs_modify_reg(cs, GPMC_CS_CONFIG6,
+ 			   GPMC_CONFIG6_CYCLE2CYCLESAMECSEN,
+ 			   p->cycle2cyclesamecsen);
+diff --git a/drivers/mtd/ubi/eba.c b/drivers/mtd/ubi/eba.c
+index 96fddb0..4dd0391 100644
+--- a/drivers/mtd/ubi/eba.c
++++ b/drivers/mtd/ubi/eba.c
+@@ -575,6 +575,7 @@ static int recover_peb(struct ubi_device *ubi, int pnum, int vol_id, int lnum,
+ 	int err, idx = vol_id2idx(ubi, vol_id), new_pnum, data_size, tries = 0;
+ 	struct ubi_volume *vol = ubi->volumes[idx];
+ 	struct ubi_vid_hdr *vid_hdr;
++	uint32_t crc;
+ 
+ 	vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_NOFS);
+ 	if (!vid_hdr)
+@@ -599,14 +600,8 @@ retry:
+ 		goto out_put;
+ 	}
+ 
+-	vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
+-	err = ubi_io_write_vid_hdr(ubi, new_pnum, vid_hdr);
+-	if (err) {
+-		up_read(&ubi->fm_eba_sem);
+-		goto write_error;
+-	}
++	ubi_assert(vid_hdr->vol_type == UBI_VID_DYNAMIC);
+ 
+-	data_size = offset + len;
+ 	mutex_lock(&ubi->buf_mutex);
+ 	memset(ubi->peb_buf + offset, 0xFF, len);
+ 
+@@ -621,6 +616,19 @@ retry:
+ 
+ 	memcpy(ubi->peb_buf + offset, buf, len);
+ 
++	data_size = offset + len;
++	crc = crc32(UBI_CRC32_INIT, ubi->peb_buf, data_size);
++	vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
++	vid_hdr->copy_flag = 1;
++	vid_hdr->data_size = cpu_to_be32(data_size);
++	vid_hdr->data_crc = cpu_to_be32(crc);
++	err = ubi_io_write_vid_hdr(ubi, new_pnum, vid_hdr);
++	if (err) {
++		mutex_unlock(&ubi->buf_mutex);
++		up_read(&ubi->fm_eba_sem);
++		goto write_error;
++	}
++
+ 	err = ubi_io_write_data(ubi, ubi->peb_buf, new_pnum, 0, data_size);
+ 	if (err) {
+ 		mutex_unlock(&ubi->buf_mutex);
+diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c
+index 9fcb489..c70e515 100644
+--- a/drivers/net/geneve.c
++++ b/drivers/net/geneve.c
+@@ -1092,12 +1092,17 @@ static netdev_tx_t geneve_xmit(struct sk_buff *skb, struct net_device *dev)
+ 
+ static int __geneve_change_mtu(struct net_device *dev, int new_mtu, bool strict)
+ {
++	struct geneve_dev *geneve = netdev_priv(dev);
+ 	/* The max_mtu calculation does not take account of GENEVE
+ 	 * options, to avoid excluding potentially valid
+ 	 * configurations.
+ 	 */
+-	int max_mtu = IP_MAX_MTU - GENEVE_BASE_HLEN - sizeof(struct iphdr)
+-		- dev->hard_header_len;
++	int max_mtu = IP_MAX_MTU - GENEVE_BASE_HLEN - dev->hard_header_len;
++
++	if (geneve->remote.sa.sa_family == AF_INET6)
++		max_mtu -= sizeof(struct ipv6hdr);
++	else
++		max_mtu -= sizeof(struct iphdr);
+ 
+ 	if (new_mtu < 68)
+ 		return -EINVAL;
+diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c
+index 9e803bb..8f3c55d 100644
+--- a/drivers/net/macsec.c
++++ b/drivers/net/macsec.c
+@@ -2564,6 +2564,7 @@ static netdev_tx_t macsec_start_xmit(struct sk_buff *skb,
+ 		u64_stats_update_begin(&secy_stats->syncp);
+ 		secy_stats->stats.OutPktsUntagged++;
+ 		u64_stats_update_end(&secy_stats->syncp);
++		skb->dev = macsec->real_dev;
+ 		len = skb->len;
+ 		ret = dev_queue_xmit(skb);
+ 		count_tx(dev, ret, len);
+diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c
+index 2fb31ed..d4425c56 100644
+--- a/drivers/net/usb/cdc_ncm.c
++++ b/drivers/net/usb/cdc_ncm.c
+@@ -852,6 +852,13 @@ int cdc_ncm_bind_common(struct usbnet *dev, struct usb_interface *intf, u8 data_
+ 	if (cdc_ncm_init(dev))
+ 		goto error2;
+ 
++	/* Some firmwares need a pause here or they will silently fail
++	 * to set up the interface properly.  This value was decided
++	 * empirically on a Sierra Wireless MC7455 running 02.08.02.00
++	 * firmware.
++	 */
++	usleep_range(10000, 20000);
++
+ 	/* configure data interface */
+ 	temp = usb_set_interface(dev->udev, iface_no, data_altsetting);
+ 	if (temp) {
+diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
+index e85e073..06664ba 100644
+--- a/drivers/net/wireless/mac80211_hwsim.c
++++ b/drivers/net/wireless/mac80211_hwsim.c
+@@ -2771,6 +2771,7 @@ static int hwsim_tx_info_frame_received_nl(struct sk_buff *skb_2,
+ 	if (!info->attrs[HWSIM_ATTR_ADDR_TRANSMITTER] ||
+ 	    !info->attrs[HWSIM_ATTR_FLAGS] ||
+ 	    !info->attrs[HWSIM_ATTR_COOKIE] ||
++	    !info->attrs[HWSIM_ATTR_SIGNAL] ||
+ 	    !info->attrs[HWSIM_ATTR_TX_INFO])
+ 		goto out;
+ 
+diff --git a/drivers/net/wireless/realtek/rtlwifi/core.c b/drivers/net/wireless/realtek/rtlwifi/core.c
+index 0f48048..3a0faa8 100644
+--- a/drivers/net/wireless/realtek/rtlwifi/core.c
++++ b/drivers/net/wireless/realtek/rtlwifi/core.c
+@@ -54,7 +54,7 @@ EXPORT_SYMBOL(channel5g_80m);
+ void rtl_addr_delay(u32 addr)
+ {
+ 	if (addr == 0xfe)
+-		msleep(50);
++		mdelay(50);
+ 	else if (addr == 0xfd)
+ 		msleep(5);
+ 	else if (addr == 0xfc)
+@@ -75,7 +75,7 @@ void rtl_rfreg_delay(struct ieee80211_hw *hw, enum radio_path rfpath, u32 addr,
+ 		rtl_addr_delay(addr);
+ 	} else {
+ 		rtl_set_rfreg(hw, rfpath, addr, mask, data);
+-		usleep_range(1, 2);
++		udelay(1);
+ 	}
+ }
+ EXPORT_SYMBOL(rtl_rfreg_delay);
+@@ -86,7 +86,7 @@ void rtl_bb_delay(struct ieee80211_hw *hw, u32 addr, u32 data)
+ 		rtl_addr_delay(addr);
+ 	} else {
+ 		rtl_set_bbreg(hw, addr, MASKDWORD, data);
+-		usleep_range(1, 2);
++		udelay(1);
+ 	}
+ }
+ EXPORT_SYMBOL(rtl_bb_delay);
+diff --git a/drivers/of/irq.c b/drivers/of/irq.c
+index e7bfc17..6ec743f 100644
+--- a/drivers/of/irq.c
++++ b/drivers/of/irq.c
+@@ -386,13 +386,13 @@ int of_irq_to_resource(struct device_node *dev, int index, struct resource *r)
+ EXPORT_SYMBOL_GPL(of_irq_to_resource);
+ 
+ /**
+- * of_irq_get - Decode a node's IRQ and return it as a Linux irq number
++ * of_irq_get - Decode a node's IRQ and return it as a Linux IRQ number
+  * @dev: pointer to device tree node
+- * @index: zero-based index of the irq
+- *
+- * Returns Linux irq number on success, or -EPROBE_DEFER if the irq domain
+- * is not yet created.
++ * @index: zero-based index of the IRQ
+  *
++ * Returns Linux IRQ number on success, or 0 on the IRQ mapping failure, or
++ * -EPROBE_DEFER if the IRQ domain is not yet created, or error code in case
++ * of any other failure.
+  */
+ int of_irq_get(struct device_node *dev, int index)
+ {
+@@ -413,12 +413,13 @@ int of_irq_get(struct device_node *dev, int index)
+ EXPORT_SYMBOL_GPL(of_irq_get);
+ 
+ /**
+- * of_irq_get_byname - Decode a node's IRQ and return it as a Linux irq number
++ * of_irq_get_byname - Decode a node's IRQ and return it as a Linux IRQ number
+  * @dev: pointer to device tree node
+- * @name: irq name
++ * @name: IRQ name
+  *
+- * Returns Linux irq number on success, or -EPROBE_DEFER if the irq domain
+- * is not yet created, or error code in case of any other failure.
++ * Returns Linux IRQ number on success, or 0 on the IRQ mapping failure, or
++ * -EPROBE_DEFER if the IRQ domain is not yet created, or error code in case
++ * of any other failure.
+  */
+ int of_irq_get_byname(struct device_node *dev, const char *name)
+ {
+diff --git a/drivers/pci/vc.c b/drivers/pci/vc.c
+index dfbab61..1fa3a32 100644
+--- a/drivers/pci/vc.c
++++ b/drivers/pci/vc.c
+@@ -221,9 +221,9 @@ static int pci_vc_do_save_buffer(struct pci_dev *dev, int pos,
+ 		else
+ 			pci_write_config_word(dev, pos + PCI_VC_PORT_CTRL,
+ 					      *(u16 *)buf);
+-		buf += 2;
++		buf += 4;
+ 	}
+-	len += 2;
++	len += 4;
+ 
+ 	/*
+ 	 * If we have any Low Priority VCs and a VC Arbitration Table Offset
+diff --git a/drivers/regulator/qcom_smd-regulator.c b/drivers/regulator/qcom_smd-regulator.c
+index 56a17ec..6c7fe477 100644
+--- a/drivers/regulator/qcom_smd-regulator.c
++++ b/drivers/regulator/qcom_smd-regulator.c
+@@ -140,6 +140,18 @@ static const struct regulator_ops rpm_smps_ldo_ops = {
+ 	.enable = rpm_reg_enable,
+ 	.disable = rpm_reg_disable,
+ 	.is_enabled = rpm_reg_is_enabled,
++	.list_voltage = regulator_list_voltage_linear_range,
++
++	.get_voltage = rpm_reg_get_voltage,
++	.set_voltage = rpm_reg_set_voltage,
++
++	.set_load = rpm_reg_set_load,
++};
++
++static const struct regulator_ops rpm_smps_ldo_ops_fixed = {
++	.enable = rpm_reg_enable,
++	.disable = rpm_reg_disable,
++	.is_enabled = rpm_reg_is_enabled,
+ 
+ 	.get_voltage = rpm_reg_get_voltage,
+ 	.set_voltage = rpm_reg_set_voltage,
+@@ -247,7 +259,7 @@ static const struct regulator_desc pm8941_nldo = {
+ static const struct regulator_desc pm8941_lnldo = {
+ 	.fixed_uV = 1740000,
+ 	.n_voltages = 1,
+-	.ops = &rpm_smps_ldo_ops,
++	.ops = &rpm_smps_ldo_ops_fixed,
+ };
+ 
+ static const struct regulator_desc pm8941_switch = {
+diff --git a/drivers/scsi/53c700.c b/drivers/scsi/53c700.c
+index d4c2856..3ddc85e 100644
+--- a/drivers/scsi/53c700.c
++++ b/drivers/scsi/53c700.c
+@@ -1122,7 +1122,7 @@ process_script_interrupt(__u32 dsps, __u32 dsp, struct scsi_cmnd *SCp,
+ 		} else {
+ 			struct scsi_cmnd *SCp;
+ 
+-			SCp = scsi_host_find_tag(SDp->host, SCSI_NO_TAG);
++			SCp = SDp->current_cmnd;
+ 			if(unlikely(SCp == NULL)) {
+ 				sdev_printk(KERN_ERR, SDp,
+ 					"no saved request for untagged cmd\n");
+@@ -1826,7 +1826,7 @@ NCR_700_queuecommand_lck(struct scsi_cmnd *SCp, void (*done)(struct scsi_cmnd *)
+ 		       slot->tag, slot);
+ 	} else {
+ 		slot->tag = SCSI_NO_TAG;
+-		/* must populate current_cmnd for scsi_host_find_tag to work */
++		/* save current command for reselection */
+ 		SCp->device->current_cmnd = SCp;
+ 	}
+ 	/* sanity check: some of the commands generated by the mid-layer
+diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
+index 984ddcb..1b9c049 100644
+--- a/drivers/scsi/scsi_error.c
++++ b/drivers/scsi/scsi_error.c
+@@ -1127,7 +1127,6 @@ static int scsi_eh_action(struct scsi_cmnd *scmd, int rtn)
+  */
+ void scsi_eh_finish_cmd(struct scsi_cmnd *scmd, struct list_head *done_q)
+ {
+-	scmd->device->host->host_failed--;
+ 	scmd->eh_eflags = 0;
+ 	list_move_tail(&scmd->eh_entry, done_q);
+ }
+@@ -2226,6 +2225,9 @@ int scsi_error_handler(void *data)
+ 		else
+ 			scsi_unjam_host(shost);
+ 
++		/* All scmds have been handled */
++		shost->host_failed = 0;
++
+ 		/*
+ 		 * Note - if the above fails completely, the action is to take
+ 		 * individual devices offline and flush the queue of any
+diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
+index f52b74c..41c3a2c 100644
+--- a/drivers/scsi/sd.c
++++ b/drivers/scsi/sd.c
+@@ -2862,10 +2862,10 @@ static int sd_revalidate_disk(struct gendisk *disk)
+ 	if (sdkp->opt_xfer_blocks &&
+ 	    sdkp->opt_xfer_blocks <= dev_max &&
+ 	    sdkp->opt_xfer_blocks <= SD_DEF_XFER_BLOCKS &&
+-	    sdkp->opt_xfer_blocks * sdp->sector_size >= PAGE_SIZE)
+-		rw_max = q->limits.io_opt =
+-			sdkp->opt_xfer_blocks * sdp->sector_size;
+-	else
++	    logical_to_bytes(sdp, sdkp->opt_xfer_blocks) >= PAGE_SIZE) {
++		q->limits.io_opt = logical_to_bytes(sdp, sdkp->opt_xfer_blocks);
++		rw_max = logical_to_sectors(sdp, sdkp->opt_xfer_blocks);
++	} else
+ 		rw_max = BLK_DEF_MAX_SECTORS;
+ 
+ 	/* Combine with controller limits */
+diff --git a/drivers/scsi/sd.h b/drivers/scsi/sd.h
+index 654630b..765a6f1 100644
+--- a/drivers/scsi/sd.h
++++ b/drivers/scsi/sd.h
+@@ -151,6 +151,11 @@ static inline sector_t logical_to_sectors(struct scsi_device *sdev, sector_t blo
+ 	return blocks << (ilog2(sdev->sector_size) - 9);
+ }
+ 
++static inline unsigned int logical_to_bytes(struct scsi_device *sdev, sector_t blocks)
++{
++	return blocks * sdev->sector_size;
++}
++
+ /*
+  * A DIF-capable target device can be formatted with different
+  * protection schemes.  Currently 0 through 3 are defined:
+diff --git a/drivers/staging/iio/accel/sca3000_core.c b/drivers/staging/iio/accel/sca3000_core.c
+index a8f533a..ec12181 100644
+--- a/drivers/staging/iio/accel/sca3000_core.c
++++ b/drivers/staging/iio/accel/sca3000_core.c
+@@ -594,7 +594,7 @@ static ssize_t sca3000_read_frequency(struct device *dev,
+ 		goto error_ret_mut;
+ 	ret = sca3000_read_ctrl_reg(st, SCA3000_REG_CTRL_SEL_OUT_CTRL);
+ 	mutex_unlock(&st->lock);
+-	if (ret)
++	if (ret < 0)
+ 		goto error_ret;
+ 	val = ret;
+ 	if (base_freq > 0)
+diff --git a/drivers/thermal/cpu_cooling.c b/drivers/thermal/cpu_cooling.c
+index 6ceac4f..5b4b47e 100644
+--- a/drivers/thermal/cpu_cooling.c
++++ b/drivers/thermal/cpu_cooling.c
+@@ -857,14 +857,6 @@ __cpufreq_cooling_register(struct device_node *np,
+ 		goto free_power_table;
+ 	}
+ 
+-	snprintf(dev_name, sizeof(dev_name), "thermal-cpufreq-%d",
+-		 cpufreq_dev->id);
+-
+-	cool_dev = thermal_of_cooling_device_register(np, dev_name, cpufreq_dev,
+-						      &cpufreq_cooling_ops);
+-	if (IS_ERR(cool_dev))
+-		goto remove_idr;
+-
+ 	/* Fill freq-table in descending order of frequencies */
+ 	for (i = 0, freq = -1; i <= cpufreq_dev->max_level; i++) {
+ 		freq = find_next_max(table, freq);
+@@ -877,6 +869,14 @@ __cpufreq_cooling_register(struct device_node *np,
+ 			pr_debug("%s: freq:%u KHz\n", __func__, freq);
+ 	}
+ 
++	snprintf(dev_name, sizeof(dev_name), "thermal-cpufreq-%d",
++		 cpufreq_dev->id);
++
++	cool_dev = thermal_of_cooling_device_register(np, dev_name, cpufreq_dev,
++						      &cpufreq_cooling_ops);
++	if (IS_ERR(cool_dev))
++		goto remove_idr;
++
+ 	cpufreq_dev->clipped_freq = cpufreq_dev->freq_table[0];
+ 	cpufreq_dev->cool_dev = cool_dev;
+ 
+diff --git a/drivers/tty/vt/keyboard.c b/drivers/tty/vt/keyboard.c
+index f973bfc..1e93a37 100644
+--- a/drivers/tty/vt/keyboard.c
++++ b/drivers/tty/vt/keyboard.c
+@@ -366,34 +366,22 @@ static void to_utf8(struct vc_data *vc, uint c)
+ 
+ static void do_compute_shiftstate(void)
+ {
+-	unsigned int i, j, k, sym, val;
++	unsigned int k, sym, val;
+ 
+ 	shift_state = 0;
+ 	memset(shift_down, 0, sizeof(shift_down));
+ 
+-	for (i = 0; i < ARRAY_SIZE(key_down); i++) {
+-
+-		if (!key_down[i])
++	for_each_set_bit(k, key_down, min(NR_KEYS, KEY_CNT)) {
++		sym = U(key_maps[0][k]);
++		if (KTYP(sym) != KT_SHIFT && KTYP(sym) != KT_SLOCK)
+ 			continue;
+ 
+-		k = i * BITS_PER_LONG;
+-
+-		for (j = 0; j < BITS_PER_LONG; j++, k++) {
+-
+-			if (!test_bit(k, key_down))
+-				continue;
++		val = KVAL(sym);
++		if (val == KVAL(K_CAPSSHIFT))
++			val = KVAL(K_SHIFT);
+ 
+-			sym = U(key_maps[0][k]);
+-			if (KTYP(sym) != KT_SHIFT && KTYP(sym) != KT_SLOCK)
+-				continue;
+-
+-			val = KVAL(sym);
+-			if (val == KVAL(K_CAPSSHIFT))
+-				val = KVAL(K_SHIFT);
+-
+-			shift_down[val]++;
+-			shift_state |= (1 << val);
+-		}
++		shift_down[val]++;
++		shift_state |= BIT(val);
+ 	}
+ }
+ 
+diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c
+index bd523ad..e9e29de 100644
+--- a/drivers/tty/vt/vt.c
++++ b/drivers/tty/vt/vt.c
+@@ -750,6 +750,7 @@ static void visual_init(struct vc_data *vc, int num, int init)
+ 	vc->vc_complement_mask = 0;
+ 	vc->vc_can_do_color = 0;
+ 	vc->vc_panic_force_write = false;
++	vc->vc_cur_blink_ms = DEFAULT_CURSOR_BLINK_MS;
+ 	vc->vc_sw->con_init(vc, init);
+ 	if (!vc->vc_complement_mask)
+ 		vc->vc_complement_mask = vc->vc_can_do_color ? 0x7700 : 0x0800;
+diff --git a/drivers/usb/common/usb-otg-fsm.c b/drivers/usb/common/usb-otg-fsm.c
+index 504708f..6c6040c 100644
+--- a/drivers/usb/common/usb-otg-fsm.c
++++ b/drivers/usb/common/usb-otg-fsm.c
+@@ -21,6 +21,7 @@
+  * 675 Mass Ave, Cambridge, MA 02139, USA.
+  */
+ 
++#include <linux/module.h>
+ #include <linux/kernel.h>
+ #include <linux/types.h>
+ #include <linux/mutex.h>
+@@ -452,3 +453,4 @@ int otg_statemachine(struct otg_fsm *fsm)
+ 	return state_changed;
+ }
+ EXPORT_SYMBOL_GPL(otg_statemachine);
++MODULE_LICENSE("GPL");
+diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
+index 980fc57..2d107d0 100644
+--- a/drivers/usb/core/hcd.c
++++ b/drivers/usb/core/hcd.c
+@@ -2597,26 +2597,23 @@ EXPORT_SYMBOL_GPL(usb_create_hcd);
+  * Don't deallocate the bandwidth_mutex until the last shared usb_hcd is
+  * deallocated.
+  *
+- * Make sure to only deallocate the bandwidth_mutex when the primary HCD is
+- * freed.  When hcd_release() is called for either hcd in a peer set
+- * invalidate the peer's ->shared_hcd and ->primary_hcd pointers to
+- * block new peering attempts
++ * Make sure to deallocate the bandwidth_mutex only when the last HCD is
++ * freed.  When hcd_release() is called for either hcd in a peer set,
++ * invalidate the peer's ->shared_hcd and ->primary_hcd pointers.
+  */
+ static void hcd_release(struct kref *kref)
+ {
+ 	struct usb_hcd *hcd = container_of (kref, struct usb_hcd, kref);
+ 
+ 	mutex_lock(&usb_port_peer_mutex);
+-	if (usb_hcd_is_primary_hcd(hcd)) {
+-		kfree(hcd->address0_mutex);
+-		kfree(hcd->bandwidth_mutex);
+-	}
+ 	if (hcd->shared_hcd) {
+ 		struct usb_hcd *peer = hcd->shared_hcd;
+ 
+ 		peer->shared_hcd = NULL;
+-		if (peer->primary_hcd == hcd)
+-			peer->primary_hcd = NULL;
++		peer->primary_hcd = NULL;
++	} else {
++		kfree(hcd->address0_mutex);
++		kfree(hcd->bandwidth_mutex);
+ 	}
+ 	mutex_unlock(&usb_port_peer_mutex);
+ 	kfree(hcd);
+diff --git a/drivers/usb/dwc2/core.h b/drivers/usb/dwc2/core.h
+index 3c58d63..dec0b21 100644
+--- a/drivers/usb/dwc2/core.h
++++ b/drivers/usb/dwc2/core.h
+@@ -64,6 +64,17 @@
+ 	DWC2_TRACE_SCHEDULER_VB(pr_fmt("%s: SCH: " fmt),		\
+ 				dev_name(hsotg->dev), ##__VA_ARGS__)
+ 
++#ifdef CONFIG_MIPS
++/*
++ * There are some MIPS machines that can run in either big-endian
++ * or little-endian mode and that use the dwc2 register without
++ * a byteswap in both ways.
++ * Unlike other architectures, MIPS apparently does not require a
++ * barrier before the __raw_writel() to synchronize with DMA but does
++ * require the barrier after the __raw_writel() to serialize a set of
++ * writes. This set of operations was added specifically for MIPS and
++ * should only be used there.
++ */
+ static inline u32 dwc2_readl(const void __iomem *addr)
+ {
+ 	u32 value = __raw_readl(addr);
+@@ -90,6 +101,22 @@ static inline void dwc2_writel(u32 value, void __iomem *addr)
+ 	pr_info("INFO:: wrote %08x to %p\n", value, addr);
+ #endif
+ }
++#else
++/* Normal architectures just use readl/write */
++static inline u32 dwc2_readl(const void __iomem *addr)
++{
++	return readl(addr);
++}
++
++static inline void dwc2_writel(u32 value, void __iomem *addr)
++{
++	writel(value, addr);
++
++#ifdef DWC2_LOG_WRITES
++	pr_info("info:: wrote %08x to %p\n", value, addr);
++#endif
++}
++#endif
+ 
+ /* Maximum number of Endpoints/HostChannels */
+ #define MAX_EPS_CHANNELS	16
+diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c
+index 7b6d74f..476c0e3 100644
+--- a/drivers/virtio/virtio_balloon.c
++++ b/drivers/virtio/virtio_balloon.c
+@@ -75,7 +75,7 @@ struct virtio_balloon {
+ 
+ 	/* The array of pfns we tell the Host about. */
+ 	unsigned int num_pfns;
+-	u32 pfns[VIRTIO_BALLOON_ARRAY_PFNS_MAX];
++	__virtio32 pfns[VIRTIO_BALLOON_ARRAY_PFNS_MAX];
+ 
+ 	/* Memory statistics */
+ 	struct virtio_balloon_stat stats[VIRTIO_BALLOON_S_NR];
+@@ -127,14 +127,16 @@ static void tell_host(struct virtio_balloon *vb, struct virtqueue *vq)
+ 
+ }
+ 
+-static void set_page_pfns(u32 pfns[], struct page *page)
++static void set_page_pfns(struct virtio_balloon *vb,
++			  __virtio32 pfns[], struct page *page)
+ {
+ 	unsigned int i;
+ 
+ 	/* Set balloon pfns pointing at this page.
+ 	 * Note that the first pfn points at start of the page. */
+ 	for (i = 0; i < VIRTIO_BALLOON_PAGES_PER_PAGE; i++)
+-		pfns[i] = page_to_balloon_pfn(page) + i;
++		pfns[i] = cpu_to_virtio32(vb->vdev,
++					  page_to_balloon_pfn(page) + i);
+ }
+ 
+ static unsigned fill_balloon(struct virtio_balloon *vb, size_t num)
+@@ -158,7 +160,7 @@ static unsigned fill_balloon(struct virtio_balloon *vb, size_t num)
+ 			msleep(200);
+ 			break;
+ 		}
+-		set_page_pfns(vb->pfns + vb->num_pfns, page);
++		set_page_pfns(vb, vb->pfns + vb->num_pfns, page);
+ 		vb->num_pages += VIRTIO_BALLOON_PAGES_PER_PAGE;
+ 		if (!virtio_has_feature(vb->vdev,
+ 					VIRTIO_BALLOON_F_DEFLATE_ON_OOM))
+@@ -177,10 +179,12 @@ static unsigned fill_balloon(struct virtio_balloon *vb, size_t num)
+ static void release_pages_balloon(struct virtio_balloon *vb)
+ {
+ 	unsigned int i;
++	struct page *page;
+ 
+ 	/* Find pfns pointing at start of each page, get pages and free them. */
+ 	for (i = 0; i < vb->num_pfns; i += VIRTIO_BALLOON_PAGES_PER_PAGE) {
+-		struct page *page = balloon_pfn_to_page(vb->pfns[i]);
++		page = balloon_pfn_to_page(virtio32_to_cpu(vb->vdev,
++							   vb->pfns[i]));
+ 		if (!virtio_has_feature(vb->vdev,
+ 					VIRTIO_BALLOON_F_DEFLATE_ON_OOM))
+ 			adjust_managed_page_count(page, 1);
+@@ -203,7 +207,7 @@ static unsigned leak_balloon(struct virtio_balloon *vb, size_t num)
+ 		page = balloon_page_dequeue(vb_dev_info);
+ 		if (!page)
+ 			break;
+-		set_page_pfns(vb->pfns + vb->num_pfns, page);
++		set_page_pfns(vb, vb->pfns + vb->num_pfns, page);
+ 		vb->num_pages -= VIRTIO_BALLOON_PAGES_PER_PAGE;
+ 	}
+ 
+@@ -471,13 +475,13 @@ static int virtballoon_migratepage(struct balloon_dev_info *vb_dev_info,
+ 	__count_vm_event(BALLOON_MIGRATE);
+ 	spin_unlock_irqrestore(&vb_dev_info->pages_lock, flags);
+ 	vb->num_pfns = VIRTIO_BALLOON_PAGES_PER_PAGE;
+-	set_page_pfns(vb->pfns, newpage);
++	set_page_pfns(vb, vb->pfns, newpage);
+ 	tell_host(vb, vb->inflate_vq);
+ 
+ 	/* balloon's page migration 2nd step -- deflate "page" */
+ 	balloon_page_delete(page);
+ 	vb->num_pfns = VIRTIO_BALLOON_PAGES_PER_PAGE;
+-	set_page_pfns(vb->pfns, page);
++	set_page_pfns(vb, vb->pfns, page);
+ 	tell_host(vb, vb->deflate_vq);
+ 
+ 	mutex_unlock(&vb->balloon_lock);
+diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c
+index d46839f..e4db19e 100644
+--- a/drivers/xen/balloon.c
++++ b/drivers/xen/balloon.c
+@@ -151,8 +151,6 @@ static DECLARE_WAIT_QUEUE_HEAD(balloon_wq);
+ static void balloon_process(struct work_struct *work);
+ static DECLARE_DELAYED_WORK(balloon_worker, balloon_process);
+ 
+-static void release_memory_resource(struct resource *resource);
+-
+ /* When ballooning out (allocating memory to return to Xen) we don't really
+    want the kernel to try too hard since that can trigger the oom killer. */
+ #define GFP_BALLOON \
+@@ -248,6 +246,19 @@ static enum bp_state update_schedule(enum bp_state state)
+ }
+ 
+ #ifdef CONFIG_XEN_BALLOON_MEMORY_HOTPLUG
++static void release_memory_resource(struct resource *resource)
++{
++	if (!resource)
++		return;
++
++	/*
++	 * No need to reset region to identity mapped since we now
++	 * know that no I/O can be in this region
++	 */
++	release_resource(resource);
++	kfree(resource);
++}
++
+ static struct resource *additional_memory_resource(phys_addr_t size)
+ {
+ 	struct resource *res;
+@@ -286,19 +297,6 @@ static struct resource *additional_memory_resource(phys_addr_t size)
+ 	return res;
+ }
+ 
+-static void release_memory_resource(struct resource *resource)
+-{
+-	if (!resource)
+-		return;
+-
+-	/*
+-	 * No need to reset region to identity mapped since we now
+-	 * know that no I/O can be in this region
+-	 */
+-	release_resource(resource);
+-	kfree(resource);
+-}
+-
+ static enum bp_state reserve_additional_memory(void)
+ {
+ 	long credit;
+diff --git a/drivers/xen/xen-acpi-processor.c b/drivers/xen/xen-acpi-processor.c
+index 076970a..4ce10bc 100644
+--- a/drivers/xen/xen-acpi-processor.c
++++ b/drivers/xen/xen-acpi-processor.c
+@@ -423,36 +423,7 @@ upload:
+ 
+ 	return 0;
+ }
+-static int __init check_prereq(void)
+-{
+-	struct cpuinfo_x86 *c = &cpu_data(0);
+-
+-	if (!xen_initial_domain())
+-		return -ENODEV;
+-
+-	if (!acpi_gbl_FADT.smi_command)
+-		return -ENODEV;
+-
+-	if (c->x86_vendor == X86_VENDOR_INTEL) {
+-		if (!cpu_has(c, X86_FEATURE_EST))
+-			return -ENODEV;
+ 
+-		return 0;
+-	}
+-	if (c->x86_vendor == X86_VENDOR_AMD) {
+-		/* Copied from powernow-k8.h, can't include ../cpufreq/powernow
+-		 * as we get compile warnings for the static functions.
+-		 */
+-#define CPUID_FREQ_VOLT_CAPABILITIES    0x80000007
+-#define USE_HW_PSTATE                   0x00000080
+-		u32 eax, ebx, ecx, edx;
+-		cpuid(CPUID_FREQ_VOLT_CAPABILITIES, &eax, &ebx, &ecx, &edx);
+-		if ((edx & USE_HW_PSTATE) != USE_HW_PSTATE)
+-			return -ENODEV;
+-		return 0;
+-	}
+-	return -ENODEV;
+-}
+ /* acpi_perf_data is a pointer to percpu data. */
+ static struct acpi_processor_performance __percpu *acpi_perf_data;
+ 
+@@ -509,10 +480,10 @@ struct notifier_block xen_acpi_processor_resume_nb = {
+ static int __init xen_acpi_processor_init(void)
+ {
+ 	unsigned int i;
+-	int rc = check_prereq();
++	int rc;
+ 
+-	if (rc)
+-		return rc;
++	if (!xen_initial_domain())
++		return -ENODEV;
+ 
+ 	nr_acpi_bits = get_max_acpi_id() + 1;
+ 	acpi_ids_done = kcalloc(BITS_TO_LONGS(nr_acpi_bits), sizeof(unsigned long), GFP_KERNEL);
+diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
+index ec7928a..234707c 100644
+--- a/fs/btrfs/ctree.c
++++ b/fs/btrfs/ctree.c
+@@ -1552,6 +1552,7 @@ noinline int btrfs_cow_block(struct btrfs_trans_handle *trans,
+ 		       trans->transid, root->fs_info->generation);
+ 
+ 	if (!should_cow_block(trans, root, buf)) {
++		trans->dirty = true;
+ 		*cow_ret = buf;
+ 		return 0;
+ 	}
+@@ -2773,8 +2774,10 @@ again:
+ 			 * then we don't want to set the path blocking,
+ 			 * so we test it here
+ 			 */
+-			if (!should_cow_block(trans, root, b))
++			if (!should_cow_block(trans, root, b)) {
++				trans->dirty = true;
+ 				goto cow_done;
++			}
+ 
+ 			/*
+ 			 * must have write locks on this node and the
+diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
+index 84e060e..78f1b57 100644
+--- a/fs/btrfs/extent-tree.c
++++ b/fs/btrfs/extent-tree.c
+@@ -7929,7 +7929,7 @@ btrfs_init_new_buffer(struct btrfs_trans_handle *trans, struct btrfs_root *root,
+ 		set_extent_dirty(&trans->transaction->dirty_pages, buf->start,
+ 			 buf->start + buf->len - 1, GFP_NOFS);
+ 	}
+-	trans->blocks_used++;
++	trans->dirty = true;
+ 	/* this returns a buffer locked for blocking */
+ 	return buf;
+ }
+diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
+index 00b8f37..d7c138f 100644
+--- a/fs/btrfs/super.c
++++ b/fs/btrfs/super.c
+@@ -239,7 +239,7 @@ void __btrfs_abort_transaction(struct btrfs_trans_handle *trans,
+ 	trans->aborted = errno;
+ 	/* Nothing used. The other threads that have joined this
+ 	 * transaction may be able to continue. */
+-	if (!trans->blocks_used && list_empty(&trans->new_bgs)) {
++	if (!trans->dirty && list_empty(&trans->new_bgs)) {
+ 		const char *errstr;
+ 
+ 		errstr = btrfs_decode_error(errno);
+diff --git a/fs/btrfs/transaction.h b/fs/btrfs/transaction.h
+index 72be51f..c0b501a 100644
+--- a/fs/btrfs/transaction.h
++++ b/fs/btrfs/transaction.h
+@@ -110,7 +110,6 @@ struct btrfs_trans_handle {
+ 	u64 chunk_bytes_reserved;
+ 	unsigned long use_count;
+ 	unsigned long blocks_reserved;
+-	unsigned long blocks_used;
+ 	unsigned long delayed_ref_updates;
+ 	struct btrfs_transaction *transaction;
+ 	struct btrfs_block_rsv *block_rsv;
+@@ -121,6 +120,7 @@ struct btrfs_trans_handle {
+ 	bool can_flush_pending_bgs;
+ 	bool reloc_reserved;
+ 	bool sync;
++	bool dirty;
+ 	unsigned int type;
+ 	/*
+ 	 * this root is only needed to validate that the root passed to
+diff --git a/fs/cifs/cifs_unicode.c b/fs/cifs/cifs_unicode.c
+index 5a53ac6..02b071bf 100644
+--- a/fs/cifs/cifs_unicode.c
++++ b/fs/cifs/cifs_unicode.c
+@@ -101,6 +101,12 @@ convert_sfm_char(const __u16 src_char, char *target)
+ 	case SFM_SLASH:
+ 		*target = '\\';
+ 		break;
++	case SFM_SPACE:
++		*target = ' ';
++		break;
++	case SFM_PERIOD:
++		*target = '.';
++		break;
+ 	default:
+ 		return false;
+ 	}
+@@ -404,7 +410,7 @@ static __le16 convert_to_sfu_char(char src_char)
+ 	return dest_char;
+ }
+ 
+-static __le16 convert_to_sfm_char(char src_char)
++static __le16 convert_to_sfm_char(char src_char, bool end_of_string)
+ {
+ 	__le16 dest_char;
+ 
+@@ -427,6 +433,18 @@ static __le16 convert_to_sfm_char(char src_char)
+ 	case '|':
+ 		dest_char = cpu_to_le16(SFM_PIPE);
+ 		break;
++	case '.':
++		if (end_of_string)
++			dest_char = cpu_to_le16(SFM_PERIOD);
++		else
++			dest_char = 0;
++		break;
++	case ' ':
++		if (end_of_string)
++			dest_char = cpu_to_le16(SFM_SPACE);
++		else
++			dest_char = 0;
++		break;
+ 	default:
+ 		dest_char = 0;
+ 	}
+@@ -469,9 +487,16 @@ cifsConvertToUTF16(__le16 *target, const char *source, int srclen,
+ 		/* see if we must remap this char */
+ 		if (map_chars == SFU_MAP_UNI_RSVD)
+ 			dst_char = convert_to_sfu_char(src_char);
+-		else if (map_chars == SFM_MAP_UNI_RSVD)
+-			dst_char = convert_to_sfm_char(src_char);
+-		else
++		else if (map_chars == SFM_MAP_UNI_RSVD) {
++			bool end_of_string;
++
++			if (i == srclen - 1)
++				end_of_string = true;
++			else
++				end_of_string = false;
++
++			dst_char = convert_to_sfm_char(src_char, end_of_string);
++		} else
+ 			dst_char = 0;
+ 		/*
+ 		 * FIXME: We can not handle remapping backslash (UNI_SLASH)
+diff --git a/fs/cifs/cifs_unicode.h b/fs/cifs/cifs_unicode.h
+index bdc52cb..479bc0a 100644
+--- a/fs/cifs/cifs_unicode.h
++++ b/fs/cifs/cifs_unicode.h
+@@ -64,6 +64,8 @@
+ #define SFM_LESSTHAN    ((__u16) 0xF023)
+ #define SFM_PIPE        ((__u16) 0xF027)
+ #define SFM_SLASH       ((__u16) 0xF026)
++#define SFM_PERIOD	((__u16) 0xF028)
++#define SFM_SPACE	((__u16) 0xF029)
+ 
+ /*
+  * Mapping mechanism to use when one of the seven reserved characters is
+diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
+index 6f62ac8..34cbc58 100644
+--- a/fs/cifs/connect.c
++++ b/fs/cifs/connect.c
+@@ -428,7 +428,9 @@ cifs_echo_request(struct work_struct *work)
+ 	 * server->ops->need_neg() == true. Also, no need to ping if
+ 	 * we got a response recently.
+ 	 */
+-	if (!server->ops->need_neg || server->ops->need_neg(server) ||
++
++	if (server->tcpStatus == CifsNeedReconnect ||
++	    server->tcpStatus == CifsExiting || server->tcpStatus == CifsNew ||
+ 	    (server->ops->can_echo && !server->ops->can_echo(server)) ||
+ 	    time_before(jiffies, server->lstrp + echo_interval - HZ))
+ 		goto requeue_echo;
+diff --git a/fs/cifs/ntlmssp.h b/fs/cifs/ntlmssp.h
+index 848249f..3079b38 100644
+--- a/fs/cifs/ntlmssp.h
++++ b/fs/cifs/ntlmssp.h
+@@ -133,6 +133,6 @@ typedef struct _AUTHENTICATE_MESSAGE {
+ 
+ int decode_ntlmssp_challenge(char *bcc_ptr, int blob_len, struct cifs_ses *ses);
+ void build_ntlmssp_negotiate_blob(unsigned char *pbuffer, struct cifs_ses *ses);
+-int build_ntlmssp_auth_blob(unsigned char *pbuffer, u16 *buflen,
++int build_ntlmssp_auth_blob(unsigned char **pbuffer, u16 *buflen,
+ 			struct cifs_ses *ses,
+ 			const struct nls_table *nls_cp);
+diff --git a/fs/cifs/sess.c b/fs/cifs/sess.c
+index af0ec2d..e88ffe1 100644
+--- a/fs/cifs/sess.c
++++ b/fs/cifs/sess.c
+@@ -364,19 +364,43 @@ void build_ntlmssp_negotiate_blob(unsigned char *pbuffer,
+ 	sec_blob->DomainName.MaximumLength = 0;
+ }
+ 
+-/* We do not malloc the blob, it is passed in pbuffer, because its
+-   maximum possible size is fixed and small, making this approach cleaner.
+-   This function returns the length of the data in the blob */
+-int build_ntlmssp_auth_blob(unsigned char *pbuffer,
++static int size_of_ntlmssp_blob(struct cifs_ses *ses)
++{
++	int sz = sizeof(AUTHENTICATE_MESSAGE) + ses->auth_key.len
++		- CIFS_SESS_KEY_SIZE + CIFS_CPHTXT_SIZE + 2;
++
++	if (ses->domainName)
++		sz += 2 * strnlen(ses->domainName, CIFS_MAX_DOMAINNAME_LEN);
++	else
++		sz += 2;
++
++	if (ses->user_name)
++		sz += 2 * strnlen(ses->user_name, CIFS_MAX_USERNAME_LEN);
++	else
++		sz += 2;
++
++	return sz;
++}
++
++int build_ntlmssp_auth_blob(unsigned char **pbuffer,
+ 					u16 *buflen,
+ 				   struct cifs_ses *ses,
+ 				   const struct nls_table *nls_cp)
+ {
+ 	int rc;
+-	AUTHENTICATE_MESSAGE *sec_blob = (AUTHENTICATE_MESSAGE *)pbuffer;
++	AUTHENTICATE_MESSAGE *sec_blob;
+ 	__u32 flags;
+ 	unsigned char *tmp;
+ 
++	rc = setup_ntlmv2_rsp(ses, nls_cp);
++	if (rc) {
++		cifs_dbg(VFS, "Error %d during NTLMSSP authentication\n", rc);
++		*buflen = 0;
++		goto setup_ntlmv2_ret;
++	}
++	*pbuffer = kmalloc(size_of_ntlmssp_blob(ses), GFP_KERNEL);
++	sec_blob = (AUTHENTICATE_MESSAGE *)*pbuffer;
++
+ 	memcpy(sec_blob->Signature, NTLMSSP_SIGNATURE, 8);
+ 	sec_blob->MessageType = NtLmAuthenticate;
+ 
+@@ -391,7 +415,7 @@ int build_ntlmssp_auth_blob(unsigned char *pbuffer,
+ 			flags |= NTLMSSP_NEGOTIATE_KEY_XCH;
+ 	}
+ 
+-	tmp = pbuffer + sizeof(AUTHENTICATE_MESSAGE);
++	tmp = *pbuffer + sizeof(AUTHENTICATE_MESSAGE);
+ 	sec_blob->NegotiateFlags = cpu_to_le32(flags);
+ 
+ 	sec_blob->LmChallengeResponse.BufferOffset =
+@@ -399,13 +423,9 @@ int build_ntlmssp_auth_blob(unsigned char *pbuffer,
+ 	sec_blob->LmChallengeResponse.Length = 0;
+ 	sec_blob->LmChallengeResponse.MaximumLength = 0;
+ 
+-	sec_blob->NtChallengeResponse.BufferOffset = cpu_to_le32(tmp - pbuffer);
++	sec_blob->NtChallengeResponse.BufferOffset =
++				cpu_to_le32(tmp - *pbuffer);
+ 	if (ses->user_name != NULL) {
+-		rc = setup_ntlmv2_rsp(ses, nls_cp);
+-		if (rc) {
+-			cifs_dbg(VFS, "Error %d during NTLMSSP authentication\n", rc);
+-			goto setup_ntlmv2_ret;
+-		}
+ 		memcpy(tmp, ses->auth_key.response + CIFS_SESS_KEY_SIZE,
+ 				ses->auth_key.len - CIFS_SESS_KEY_SIZE);
+ 		tmp += ses->auth_key.len - CIFS_SESS_KEY_SIZE;
+@@ -423,7 +443,7 @@ int build_ntlmssp_auth_blob(unsigned char *pbuffer,
+ 	}
+ 
+ 	if (ses->domainName == NULL) {
+-		sec_blob->DomainName.BufferOffset = cpu_to_le32(tmp - pbuffer);
++		sec_blob->DomainName.BufferOffset = cpu_to_le32(tmp - *pbuffer);
+ 		sec_blob->DomainName.Length = 0;
+ 		sec_blob->DomainName.MaximumLength = 0;
+ 		tmp += 2;
+@@ -432,14 +452,14 @@ int build_ntlmssp_auth_blob(unsigned char *pbuffer,
+ 		len = cifs_strtoUTF16((__le16 *)tmp, ses->domainName,
+ 				      CIFS_MAX_USERNAME_LEN, nls_cp);
+ 		len *= 2; /* unicode is 2 bytes each */
+-		sec_blob->DomainName.BufferOffset = cpu_to_le32(tmp - pbuffer);
++		sec_blob->DomainName.BufferOffset = cpu_to_le32(tmp - *pbuffer);
+ 		sec_blob->DomainName.Length = cpu_to_le16(len);
+ 		sec_blob->DomainName.MaximumLength = cpu_to_le16(len);
+ 		tmp += len;
+ 	}
+ 
+ 	if (ses->user_name == NULL) {
+-		sec_blob->UserName.BufferOffset = cpu_to_le32(tmp - pbuffer);
++		sec_blob->UserName.BufferOffset = cpu_to_le32(tmp - *pbuffer);
+ 		sec_blob->UserName.Length = 0;
+ 		sec_blob->UserName.MaximumLength = 0;
+ 		tmp += 2;
+@@ -448,13 +468,13 @@ int build_ntlmssp_auth_blob(unsigned char *pbuffer,
+ 		len = cifs_strtoUTF16((__le16 *)tmp, ses->user_name,
+ 				      CIFS_MAX_USERNAME_LEN, nls_cp);
+ 		len *= 2; /* unicode is 2 bytes each */
+-		sec_blob->UserName.BufferOffset = cpu_to_le32(tmp - pbuffer);
++		sec_blob->UserName.BufferOffset = cpu_to_le32(tmp - *pbuffer);
+ 		sec_blob->UserName.Length = cpu_to_le16(len);
+ 		sec_blob->UserName.MaximumLength = cpu_to_le16(len);
+ 		tmp += len;
+ 	}
+ 
+-	sec_blob->WorkstationName.BufferOffset = cpu_to_le32(tmp - pbuffer);
++	sec_blob->WorkstationName.BufferOffset = cpu_to_le32(tmp - *pbuffer);
+ 	sec_blob->WorkstationName.Length = 0;
+ 	sec_blob->WorkstationName.MaximumLength = 0;
+ 	tmp += 2;
+@@ -463,19 +483,19 @@ int build_ntlmssp_auth_blob(unsigned char *pbuffer,
+ 		(ses->ntlmssp->server_flags & NTLMSSP_NEGOTIATE_EXTENDED_SEC))
+ 			&& !calc_seckey(ses)) {
+ 		memcpy(tmp, ses->ntlmssp->ciphertext, CIFS_CPHTXT_SIZE);
+-		sec_blob->SessionKey.BufferOffset = cpu_to_le32(tmp - pbuffer);
++		sec_blob->SessionKey.BufferOffset = cpu_to_le32(tmp - *pbuffer);
+ 		sec_blob->SessionKey.Length = cpu_to_le16(CIFS_CPHTXT_SIZE);
+ 		sec_blob->SessionKey.MaximumLength =
+ 				cpu_to_le16(CIFS_CPHTXT_SIZE);
+ 		tmp += CIFS_CPHTXT_SIZE;
+ 	} else {
+-		sec_blob->SessionKey.BufferOffset = cpu_to_le32(tmp - pbuffer);
++		sec_blob->SessionKey.BufferOffset = cpu_to_le32(tmp - *pbuffer);
+ 		sec_blob->SessionKey.Length = 0;
+ 		sec_blob->SessionKey.MaximumLength = 0;
+ 	}
+ 
++	*buflen = tmp - *pbuffer;
+ setup_ntlmv2_ret:
+-	*buflen = tmp - pbuffer;
+ 	return rc;
+ }
+ 
+@@ -1266,7 +1286,7 @@ sess_auth_rawntlmssp_authenticate(struct sess_data *sess_data)
+ 	struct cifs_ses *ses = sess_data->ses;
+ 	__u16 bytes_remaining;
+ 	char *bcc_ptr;
+-	char *ntlmsspblob = NULL;
++	unsigned char *ntlmsspblob = NULL;
+ 	u16 blob_len;
+ 
+ 	cifs_dbg(FYI, "rawntlmssp session setup authenticate phase\n");
+@@ -1279,19 +1299,7 @@ sess_auth_rawntlmssp_authenticate(struct sess_data *sess_data)
+ 	/* Build security blob before we assemble the request */
+ 	pSMB = (SESSION_SETUP_ANDX *)sess_data->iov[0].iov_base;
+ 	smb_buf = (struct smb_hdr *)pSMB;
+-	/*
+-	 * 5 is an empirical value, large enough to hold
+-	 * authenticate message plus max 10 of av paris,
+-	 * domain, user, workstation names, flags, etc.
+-	 */
+-	ntlmsspblob = kzalloc(5*sizeof(struct _AUTHENTICATE_MESSAGE),
+-				GFP_KERNEL);
+-	if (!ntlmsspblob) {
+-		rc = -ENOMEM;
+-		goto out;
+-	}
+-
+-	rc = build_ntlmssp_auth_blob(ntlmsspblob,
++	rc = build_ntlmssp_auth_blob(&ntlmsspblob,
+ 					&blob_len, ses, sess_data->nls_cp);
+ 	if (rc)
+ 		goto out_free_ntlmsspblob;
+diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
+index 8f38e33..29e06db 100644
+--- a/fs/cifs/smb2pdu.c
++++ b/fs/cifs/smb2pdu.c
+@@ -588,7 +588,7 @@ SMB2_sess_setup(const unsigned int xid, struct cifs_ses *ses,
+ 	u16 blob_length = 0;
+ 	struct key *spnego_key = NULL;
+ 	char *security_blob = NULL;
+-	char *ntlmssp_blob = NULL;
++	unsigned char *ntlmssp_blob = NULL;
+ 	bool use_spnego = false; /* else use raw ntlmssp */
+ 
+ 	cifs_dbg(FYI, "Session Setup\n");
+@@ -713,13 +713,7 @@ ssetup_ntlmssp_authenticate:
+ 		iov[1].iov_len = blob_length;
+ 	} else if (phase == NtLmAuthenticate) {
+ 		req->hdr.SessionId = ses->Suid;
+-		ntlmssp_blob = kzalloc(sizeof(struct _NEGOTIATE_MESSAGE) + 500,
+-				       GFP_KERNEL);
+-		if (ntlmssp_blob == NULL) {
+-			rc = -ENOMEM;
+-			goto ssetup_exit;
+-		}
+-		rc = build_ntlmssp_auth_blob(ntlmssp_blob, &blob_length, ses,
++		rc = build_ntlmssp_auth_blob(&ntlmssp_blob, &blob_length, ses,
+ 					     nls_cp);
+ 		if (rc) {
+ 			cifs_dbg(FYI, "build_ntlmssp_auth_blob failed %d\n",
+@@ -1818,6 +1812,33 @@ SMB2_echo(struct TCP_Server_Info *server)
+ 
+ 	cifs_dbg(FYI, "In echo request\n");
+ 
++	if (server->tcpStatus == CifsNeedNegotiate) {
++		struct list_head *tmp, *tmp2;
++		struct cifs_ses *ses;
++		struct cifs_tcon *tcon;
++
++		cifs_dbg(FYI, "Need negotiate, reconnecting tcons\n");
++		spin_lock(&cifs_tcp_ses_lock);
++		list_for_each(tmp, &server->smb_ses_list) {
++			ses = list_entry(tmp, struct cifs_ses, smb_ses_list);
++			list_for_each(tmp2, &ses->tcon_list) {
++				tcon = list_entry(tmp2, struct cifs_tcon,
++						  tcon_list);
++				/* add check for persistent handle reconnect */
++				if (tcon && tcon->need_reconnect) {
++					spin_unlock(&cifs_tcp_ses_lock);
++					rc = smb2_reconnect(SMB2_ECHO, tcon);
++					spin_lock(&cifs_tcp_ses_lock);
++				}
++			}
++		}
++		spin_unlock(&cifs_tcp_ses_lock);
++	}
++
++	/* if no session, renegotiate failed above */
++	if (server->tcpStatus == CifsNeedNegotiate)
++		return -EIO;
++
+ 	rc = small_smb2_init(SMB2_ECHO, NULL, (void **)&req);
+ 	if (rc)
+ 		return rc;
+diff --git a/fs/namei.c b/fs/namei.c
+index 30145f8..aaa3b69 100644
+--- a/fs/namei.c
++++ b/fs/namei.c
+@@ -3173,6 +3173,10 @@ retry_lookup:
+ 		got_write = false;
+ 	}
+ 
++	error = follow_managed(&path, nd);
++	if (unlikely(error < 0))
++		return error;
++
+ 	if (unlikely(d_is_negative(path.dentry))) {
+ 		path_to_nameidata(&path, nd);
+ 		return -ENOENT;
+@@ -3188,10 +3192,6 @@ retry_lookup:
+ 		return -EEXIST;
+ 	}
+ 
+-	error = follow_managed(&path, nd);
+-	if (unlikely(error < 0))
+-		return error;
+-
+ 	seq = 0;	/* out of RCU mode, so the value doesn't matter */
+ 	inode = d_backing_inode(path.dentry);
+ finish_lookup:
+diff --git a/fs/namespace.c b/fs/namespace.c
+index 4fb1691..783004a 100644
+--- a/fs/namespace.c
++++ b/fs/namespace.c
+@@ -2409,8 +2409,10 @@ static int do_new_mount(struct path *path, const char *fstype, int flags,
+ 			mnt_flags |= MNT_NODEV | MNT_LOCK_NODEV;
+ 		}
+ 		if (type->fs_flags & FS_USERNS_VISIBLE) {
+-			if (!fs_fully_visible(type, &mnt_flags))
++			if (!fs_fully_visible(type, &mnt_flags)) {
++				put_filesystem(type);
+ 				return -EPERM;
++			}
+ 		}
+ 	}
+ 
+@@ -3245,6 +3247,10 @@ static bool fs_fully_visible(struct file_system_type *type, int *new_mnt_flags)
+ 		if (mnt->mnt.mnt_sb->s_iflags & SB_I_NOEXEC)
+ 			mnt_flags &= ~(MNT_LOCK_NOSUID | MNT_LOCK_NOEXEC);
+ 
++		/* Don't miss readonly hidden in the superblock flags */
++		if (mnt->mnt.mnt_sb->s_flags & MS_RDONLY)
++			mnt_flags |= MNT_LOCK_READONLY;
++
+ 		/* Verify the mount flags are equal to or more permissive
+ 		 * than the proposed new mount.
+ 		 */
+@@ -3271,7 +3277,7 @@ static bool fs_fully_visible(struct file_system_type *type, int *new_mnt_flags)
+ 		list_for_each_entry(child, &mnt->mnt_mounts, mnt_child) {
+ 			struct inode *inode = child->mnt_mountpoint->d_inode;
+ 			/* Only worry about locked mounts */
+-			if (!(mnt_flags & MNT_LOCKED))
++			if (!(child->mnt.mnt_flags & MNT_LOCKED))
+ 				continue;
+ 			/* Is the directory permanetly empty? */
+ 			if (!is_empty_dir_inode(inode))
+diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
+index 33eb817..a7dd1fe 100644
+--- a/fs/nfs/dir.c
++++ b/fs/nfs/dir.c
+@@ -1527,9 +1527,9 @@ int nfs_atomic_open(struct inode *dir, struct dentry *dentry,
+ 		err = PTR_ERR(inode);
+ 		trace_nfs_atomic_open_exit(dir, ctx, open_flags, err);
+ 		put_nfs_open_context(ctx);
++		d_drop(dentry);
+ 		switch (err) {
+ 		case -ENOENT:
+-			d_drop(dentry);
+ 			d_add(dentry, NULL);
+ 			nfs_set_verifier(dentry, nfs_save_change_attribute(dir));
+ 			break;
+diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
+index 327b8c3..de2523f 100644
+--- a/fs/nfs/nfs4proc.c
++++ b/fs/nfs/nfs4proc.c
+@@ -2860,12 +2860,11 @@ static void nfs4_close_prepare(struct rpc_task *task, void *data)
+ 			call_close |= is_wronly;
+ 		else if (is_wronly)
+ 			calldata->arg.fmode |= FMODE_WRITE;
++		if (calldata->arg.fmode != (FMODE_READ|FMODE_WRITE))
++			call_close |= is_rdwr;
+ 	} else if (is_rdwr)
+ 		calldata->arg.fmode |= FMODE_READ|FMODE_WRITE;
+ 
+-	if (calldata->arg.fmode == 0)
+-		call_close |= is_rdwr;
+-
+ 	if (!nfs4_valid_open_stateid(state))
+ 		call_close = 0;
+ 	spin_unlock(&state->owner->so_lock);
+diff --git a/fs/nfs/pnfs_nfs.c b/fs/nfs/pnfs_nfs.c
+index 776dccb..dcb7000 100644
+--- a/fs/nfs/pnfs_nfs.c
++++ b/fs/nfs/pnfs_nfs.c
+@@ -247,7 +247,11 @@ void pnfs_fetch_commit_bucket_list(struct list_head *pages,
+ }
+ 
+ /* Helper function for pnfs_generic_commit_pagelist to catch an empty
+- * page list. This can happen when two commits race. */
++ * page list. This can happen when two commits race.
++ *
++ * This must be called instead of nfs_init_commit - call one or the other, but
++ * not both!
++ */
+ static bool
+ pnfs_generic_commit_cancel_empty_pagelist(struct list_head *pages,
+ 					  struct nfs_commit_data *data,
+@@ -256,7 +260,11 @@ pnfs_generic_commit_cancel_empty_pagelist(struct list_head *pages,
+ 	if (list_empty(pages)) {
+ 		if (atomic_dec_and_test(&cinfo->mds->rpcs_out))
+ 			wake_up_atomic_t(&cinfo->mds->rpcs_out);
+-		nfs_commitdata_release(data);
++		/* don't call nfs_commitdata_release - it tries to put
++		 * the open_context which is not acquired until nfs_init_commit
++		 * which has not been called on @data */
++		WARN_ON_ONCE(data->context);
++		nfs_commit_free(data);
+ 		return true;
+ 	}
+ 
+diff --git a/fs/nfs/read.c b/fs/nfs/read.c
+index 6776d7a..572e5b3 100644
+--- a/fs/nfs/read.c
++++ b/fs/nfs/read.c
+@@ -367,13 +367,13 @@ readpage_async_filler(void *data, struct page *page)
+ 		nfs_list_remove_request(new);
+ 		nfs_readpage_release(new);
+ 		error = desc->pgio->pg_error;
+-		goto out_unlock;
++		goto out;
+ 	}
+ 	return 0;
+ out_error:
+ 	error = PTR_ERR(new);
+-out_unlock:
+ 	unlock_page(page);
++out:
+ 	return error;
+ }
+ 
+diff --git a/fs/nfsd/nfs2acl.c b/fs/nfsd/nfs2acl.c
+index 1580ea6..d08cd88 100644
+--- a/fs/nfsd/nfs2acl.c
++++ b/fs/nfsd/nfs2acl.c
+@@ -104,22 +104,21 @@ static __be32 nfsacld_proc_setacl(struct svc_rqst * rqstp,
+ 		goto out;
+ 
+ 	inode = d_inode(fh->fh_dentry);
+-	if (!IS_POSIXACL(inode) || !inode->i_op->set_acl) {
+-		error = -EOPNOTSUPP;
+-		goto out_errno;
+-	}
+ 
+ 	error = fh_want_write(fh);
+ 	if (error)
+ 		goto out_errno;
+ 
+-	error = inode->i_op->set_acl(inode, argp->acl_access, ACL_TYPE_ACCESS);
++	fh_lock(fh);
++
++	error = set_posix_acl(inode, ACL_TYPE_ACCESS, argp->acl_access);
+ 	if (error)
+-		goto out_drop_write;
+-	error = inode->i_op->set_acl(inode, argp->acl_default,
+-				     ACL_TYPE_DEFAULT);
++		goto out_drop_lock;
++	error = set_posix_acl(inode, ACL_TYPE_DEFAULT, argp->acl_default);
+ 	if (error)
+-		goto out_drop_write;
++		goto out_drop_lock;
++
++	fh_unlock(fh);
+ 
+ 	fh_drop_write(fh);
+ 
+@@ -131,7 +130,8 @@ out:
+ 	posix_acl_release(argp->acl_access);
+ 	posix_acl_release(argp->acl_default);
+ 	return nfserr;
+-out_drop_write:
++out_drop_lock:
++	fh_unlock(fh);
+ 	fh_drop_write(fh);
+ out_errno:
+ 	nfserr = nfserrno(error);
+diff --git a/fs/nfsd/nfs3acl.c b/fs/nfsd/nfs3acl.c
+index 01df4cd..0c89034 100644
+--- a/fs/nfsd/nfs3acl.c
++++ b/fs/nfsd/nfs3acl.c
+@@ -95,22 +95,20 @@ static __be32 nfsd3_proc_setacl(struct svc_rqst * rqstp,
+ 		goto out;
+ 
+ 	inode = d_inode(fh->fh_dentry);
+-	if (!IS_POSIXACL(inode) || !inode->i_op->set_acl) {
+-		error = -EOPNOTSUPP;
+-		goto out_errno;
+-	}
+ 
+ 	error = fh_want_write(fh);
+ 	if (error)
+ 		goto out_errno;
+ 
+-	error = inode->i_op->set_acl(inode, argp->acl_access, ACL_TYPE_ACCESS);
++	fh_lock(fh);
++
++	error = set_posix_acl(inode, ACL_TYPE_ACCESS, argp->acl_access);
+ 	if (error)
+-		goto out_drop_write;
+-	error = inode->i_op->set_acl(inode, argp->acl_default,
+-				     ACL_TYPE_DEFAULT);
++		goto out_drop_lock;
++	error = set_posix_acl(inode, ACL_TYPE_DEFAULT, argp->acl_default);
+ 
+-out_drop_write:
++out_drop_lock:
++	fh_unlock(fh);
+ 	fh_drop_write(fh);
+ out_errno:
+ 	nfserr = nfserrno(error);
+diff --git a/fs/nfsd/nfs4acl.c b/fs/nfsd/nfs4acl.c
+index 6adabd6..71292a0 100644
+--- a/fs/nfsd/nfs4acl.c
++++ b/fs/nfsd/nfs4acl.c
+@@ -770,9 +770,6 @@ nfsd4_set_nfs4_acl(struct svc_rqst *rqstp, struct svc_fh *fhp,
+ 	dentry = fhp->fh_dentry;
+ 	inode = d_inode(dentry);
+ 
+-	if (!inode->i_op->set_acl || !IS_POSIXACL(inode))
+-		return nfserr_attrnotsupp;
+-
+ 	if (S_ISDIR(inode->i_mode))
+ 		flags = NFS4_ACL_DIR;
+ 
+@@ -782,16 +779,19 @@ nfsd4_set_nfs4_acl(struct svc_rqst *rqstp, struct svc_fh *fhp,
+ 	if (host_error < 0)
+ 		goto out_nfserr;
+ 
+-	host_error = inode->i_op->set_acl(inode, pacl, ACL_TYPE_ACCESS);
++	fh_lock(fhp);
++
++	host_error = set_posix_acl(inode, ACL_TYPE_ACCESS, pacl);
+ 	if (host_error < 0)
+-		goto out_release;
++		goto out_drop_lock;
+ 
+ 	if (S_ISDIR(inode->i_mode)) {
+-		host_error = inode->i_op->set_acl(inode, dpacl,
+-						  ACL_TYPE_DEFAULT);
++		host_error = set_posix_acl(inode, ACL_TYPE_DEFAULT, dpacl);
+ 	}
+ 
+-out_release:
++out_drop_lock:
++	fh_unlock(fhp);
++
+ 	posix_acl_release(pacl);
+ 	posix_acl_release(dpacl);
+ out_nfserr:
+diff --git a/fs/nfsd/nfs4callback.c b/fs/nfsd/nfs4callback.c
+index 7389cb1..04c68d9 100644
+--- a/fs/nfsd/nfs4callback.c
++++ b/fs/nfsd/nfs4callback.c
+@@ -710,22 +710,6 @@ static struct rpc_cred *get_backchannel_cred(struct nfs4_client *clp, struct rpc
+ 	}
+ }
+ 
+-static struct rpc_clnt *create_backchannel_client(struct rpc_create_args *args)
+-{
+-	struct rpc_xprt *xprt;
+-
+-	if (args->protocol != XPRT_TRANSPORT_BC_TCP)
+-		return rpc_create(args);
+-
+-	xprt = args->bc_xprt->xpt_bc_xprt;
+-	if (xprt) {
+-		xprt_get(xprt);
+-		return rpc_create_xprt(args, xprt);
+-	}
+-
+-	return rpc_create(args);
+-}
+-
+ static int setup_callback_client(struct nfs4_client *clp, struct nfs4_cb_conn *conn, struct nfsd4_session *ses)
+ {
+ 	int maxtime = max_cb_time(clp->net);
+@@ -768,7 +752,7 @@ static int setup_callback_client(struct nfs4_client *clp, struct nfs4_cb_conn *c
+ 		args.authflavor = ses->se_cb_sec.flavor;
+ 	}
+ 	/* Create RPC client */
+-	client = create_backchannel_client(&args);
++	client = rpc_create(&args);
+ 	if (IS_ERR(client)) {
+ 		dprintk("NFSD: couldn't create callback client: %ld\n",
+ 			PTR_ERR(client));
+diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
+index 0462eed..9e04e49 100644
+--- a/fs/nfsd/nfs4state.c
++++ b/fs/nfsd/nfs4state.c
+@@ -3487,6 +3487,10 @@ init_open_stateid(struct nfs4_ol_stateid *stp, struct nfs4_file *fp,
+ 	struct nfs4_openowner *oo = open->op_openowner;
+ 	struct nfs4_ol_stateid *retstp = NULL;
+ 
++	/* We are moving these outside of the spinlocks to avoid the warnings */
++	mutex_init(&stp->st_mutex);
++	mutex_lock(&stp->st_mutex);
++
+ 	spin_lock(&oo->oo_owner.so_client->cl_lock);
+ 	spin_lock(&fp->fi_lock);
+ 
+@@ -3502,13 +3506,17 @@ init_open_stateid(struct nfs4_ol_stateid *stp, struct nfs4_file *fp,
+ 	stp->st_access_bmap = 0;
+ 	stp->st_deny_bmap = 0;
+ 	stp->st_openstp = NULL;
+-	init_rwsem(&stp->st_rwsem);
+ 	list_add(&stp->st_perstateowner, &oo->oo_owner.so_stateids);
+ 	list_add(&stp->st_perfile, &fp->fi_stateids);
+ 
+ out_unlock:
+ 	spin_unlock(&fp->fi_lock);
+ 	spin_unlock(&oo->oo_owner.so_client->cl_lock);
++	if (retstp) {
++		mutex_lock(&retstp->st_mutex);
++		/* Not that we need to, just for neatness */
++		mutex_unlock(&stp->st_mutex);
++	}
+ 	return retstp;
+ }
+ 
+@@ -4335,32 +4343,34 @@ nfsd4_process_open2(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nf
+ 	 */
+ 	if (stp) {
+ 		/* Stateid was found, this is an OPEN upgrade */
+-		down_read(&stp->st_rwsem);
++		mutex_lock(&stp->st_mutex);
+ 		status = nfs4_upgrade_open(rqstp, fp, current_fh, stp, open);
+ 		if (status) {
+-			up_read(&stp->st_rwsem);
++			mutex_unlock(&stp->st_mutex);
+ 			goto out;
+ 		}
+ 	} else {
+ 		stp = open->op_stp;
+ 		open->op_stp = NULL;
++		/*
++		 * init_open_stateid() either returns a locked stateid
++		 * it found, or initializes and locks the new one we passed in
++		 */
+ 		swapstp = init_open_stateid(stp, fp, open);
+ 		if (swapstp) {
+ 			nfs4_put_stid(&stp->st_stid);
+ 			stp = swapstp;
+-			down_read(&stp->st_rwsem);
+ 			status = nfs4_upgrade_open(rqstp, fp, current_fh,
+ 						stp, open);
+ 			if (status) {
+-				up_read(&stp->st_rwsem);
++				mutex_unlock(&stp->st_mutex);
+ 				goto out;
+ 			}
+ 			goto upgrade_out;
+ 		}
+-		down_read(&stp->st_rwsem);
+ 		status = nfs4_get_vfs_file(rqstp, fp, current_fh, stp, open);
+ 		if (status) {
+-			up_read(&stp->st_rwsem);
++			mutex_unlock(&stp->st_mutex);
+ 			release_open_stateid(stp);
+ 			goto out;
+ 		}
+@@ -4372,7 +4382,7 @@ nfsd4_process_open2(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nf
+ 	}
+ upgrade_out:
+ 	nfs4_inc_and_copy_stateid(&open->op_stateid, &stp->st_stid);
+-	up_read(&stp->st_rwsem);
++	mutex_unlock(&stp->st_mutex);
+ 
+ 	if (nfsd4_has_session(&resp->cstate)) {
+ 		if (open->op_deleg_want & NFS4_SHARE_WANT_NO_DELEG) {
+@@ -4983,12 +4993,12 @@ static __be32 nfs4_seqid_op_checks(struct nfsd4_compound_state *cstate, stateid_
+ 		 * revoked delegations are kept only for free_stateid.
+ 		 */
+ 		return nfserr_bad_stateid;
+-	down_write(&stp->st_rwsem);
++	mutex_lock(&stp->st_mutex);
+ 	status = check_stateid_generation(stateid, &stp->st_stid.sc_stateid, nfsd4_has_session(cstate));
+ 	if (status == nfs_ok)
+ 		status = nfs4_check_fh(current_fh, &stp->st_stid);
+ 	if (status != nfs_ok)
+-		up_write(&stp->st_rwsem);
++		mutex_unlock(&stp->st_mutex);
+ 	return status;
+ }
+ 
+@@ -5036,7 +5046,7 @@ static __be32 nfs4_preprocess_confirmed_seqid_op(struct nfsd4_compound_state *cs
+ 		return status;
+ 	oo = openowner(stp->st_stateowner);
+ 	if (!(oo->oo_flags & NFS4_OO_CONFIRMED)) {
+-		up_write(&stp->st_rwsem);
++		mutex_unlock(&stp->st_mutex);
+ 		nfs4_put_stid(&stp->st_stid);
+ 		return nfserr_bad_stateid;
+ 	}
+@@ -5068,12 +5078,12 @@ nfsd4_open_confirm(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
+ 	oo = openowner(stp->st_stateowner);
+ 	status = nfserr_bad_stateid;
+ 	if (oo->oo_flags & NFS4_OO_CONFIRMED) {
+-		up_write(&stp->st_rwsem);
++		mutex_unlock(&stp->st_mutex);
+ 		goto put_stateid;
+ 	}
+ 	oo->oo_flags |= NFS4_OO_CONFIRMED;
+ 	nfs4_inc_and_copy_stateid(&oc->oc_resp_stateid, &stp->st_stid);
+-	up_write(&stp->st_rwsem);
++	mutex_unlock(&stp->st_mutex);
+ 	dprintk("NFSD: %s: success, seqid=%d stateid=" STATEID_FMT "\n",
+ 		__func__, oc->oc_seqid, STATEID_VAL(&stp->st_stid.sc_stateid));
+ 
+@@ -5149,7 +5159,7 @@ nfsd4_open_downgrade(struct svc_rqst *rqstp,
+ 	nfs4_inc_and_copy_stateid(&od->od_stateid, &stp->st_stid);
+ 	status = nfs_ok;
+ put_stateid:
+-	up_write(&stp->st_rwsem);
++	mutex_unlock(&stp->st_mutex);
+ 	nfs4_put_stid(&stp->st_stid);
+ out:
+ 	nfsd4_bump_seqid(cstate, status);
+@@ -5202,7 +5212,7 @@ nfsd4_close(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
+ 	if (status)
+ 		goto out; 
+ 	nfs4_inc_and_copy_stateid(&close->cl_stateid, &stp->st_stid);
+-	up_write(&stp->st_rwsem);
++	mutex_unlock(&stp->st_mutex);
+ 
+ 	nfsd4_close_open_stateid(stp);
+ 
+@@ -5428,7 +5438,7 @@ init_lock_stateid(struct nfs4_ol_stateid *stp, struct nfs4_lockowner *lo,
+ 	stp->st_access_bmap = 0;
+ 	stp->st_deny_bmap = open_stp->st_deny_bmap;
+ 	stp->st_openstp = open_stp;
+-	init_rwsem(&stp->st_rwsem);
++	mutex_init(&stp->st_mutex);
+ 	list_add(&stp->st_locks, &open_stp->st_locks);
+ 	list_add(&stp->st_perstateowner, &lo->lo_owner.so_stateids);
+ 	spin_lock(&fp->fi_lock);
+@@ -5597,7 +5607,7 @@ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
+ 					&open_stp, nn);
+ 		if (status)
+ 			goto out;
+-		up_write(&open_stp->st_rwsem);
++		mutex_unlock(&open_stp->st_mutex);
+ 		open_sop = openowner(open_stp->st_stateowner);
+ 		status = nfserr_bad_stateid;
+ 		if (!same_clid(&open_sop->oo_owner.so_client->cl_clientid,
+@@ -5606,7 +5616,7 @@ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
+ 		status = lookup_or_create_lock_state(cstate, open_stp, lock,
+ 							&lock_stp, &new);
+ 		if (status == nfs_ok)
+-			down_write(&lock_stp->st_rwsem);
++			mutex_lock(&lock_stp->st_mutex);
+ 	} else {
+ 		status = nfs4_preprocess_seqid_op(cstate,
+ 				       lock->lk_old_lock_seqid,
+@@ -5710,7 +5720,7 @@ out:
+ 		    seqid_mutating_err(ntohl(status)))
+ 			lock_sop->lo_owner.so_seqid++;
+ 
+-		up_write(&lock_stp->st_rwsem);
++		mutex_unlock(&lock_stp->st_mutex);
+ 
+ 		/*
+ 		 * If this is a new, never-before-used stateid, and we are
+@@ -5880,7 +5890,7 @@ nfsd4_locku(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
+ fput:
+ 	fput(filp);
+ put_stateid:
+-	up_write(&stp->st_rwsem);
++	mutex_unlock(&stp->st_mutex);
+ 	nfs4_put_stid(&stp->st_stid);
+ out:
+ 	nfsd4_bump_seqid(cstate, status);
+diff --git a/fs/nfsd/state.h b/fs/nfsd/state.h
+index c050c53..c89d7b5 100644
+--- a/fs/nfsd/state.h
++++ b/fs/nfsd/state.h
+@@ -535,7 +535,7 @@ struct nfs4_ol_stateid {
+ 	unsigned char			st_access_bmap;
+ 	unsigned char			st_deny_bmap;
+ 	struct nfs4_ol_stateid		*st_openstp;
+-	struct rw_semaphore		st_rwsem;
++	struct mutex			st_mutex;
+ };
+ 
+ static inline struct nfs4_ol_stateid *openlockstateid(struct nfs4_stid *s)
+diff --git a/fs/overlayfs/dir.c b/fs/overlayfs/dir.c
+index b3fc0a3..fb35aa2 100644
+--- a/fs/overlayfs/dir.c
++++ b/fs/overlayfs/dir.c
+@@ -511,6 +511,7 @@ static int ovl_remove_and_whiteout(struct dentry *dentry, bool is_dir)
+ 	struct dentry *upper;
+ 	struct dentry *opaquedir = NULL;
+ 	int err;
++	int flags = 0;
+ 
+ 	if (WARN_ON(!workdir))
+ 		return -EROFS;
+@@ -540,46 +541,39 @@ static int ovl_remove_and_whiteout(struct dentry *dentry, bool is_dir)
+ 	if (err)
+ 		goto out_dput;
+ 
+-	whiteout = ovl_whiteout(workdir, dentry);
+-	err = PTR_ERR(whiteout);
+-	if (IS_ERR(whiteout))
++	upper = lookup_one_len(dentry->d_name.name, upperdir,
++			       dentry->d_name.len);
++	err = PTR_ERR(upper);
++	if (IS_ERR(upper))
+ 		goto out_unlock;
+ 
+-	upper = ovl_dentry_upper(dentry);
+-	if (!upper) {
+-		upper = lookup_one_len(dentry->d_name.name, upperdir,
+-				       dentry->d_name.len);
+-		err = PTR_ERR(upper);
+-		if (IS_ERR(upper))
+-			goto kill_whiteout;
+-
+-		err = ovl_do_rename(wdir, whiteout, udir, upper, 0);
+-		dput(upper);
+-		if (err)
+-			goto kill_whiteout;
+-	} else {
+-		int flags = 0;
++	err = -ESTALE;
++	if ((opaquedir && upper != opaquedir) ||
++	    (!opaquedir && ovl_dentry_upper(dentry) &&
++	     upper != ovl_dentry_upper(dentry))) {
++		goto out_dput_upper;
++	}
+ 
+-		if (opaquedir)
+-			upper = opaquedir;
+-		err = -ESTALE;
+-		if (upper->d_parent != upperdir)
+-			goto kill_whiteout;
++	whiteout = ovl_whiteout(workdir, dentry);
++	err = PTR_ERR(whiteout);
++	if (IS_ERR(whiteout))
++		goto out_dput_upper;
+ 
+-		if (is_dir)
+-			flags |= RENAME_EXCHANGE;
++	if (d_is_dir(upper))
++		flags = RENAME_EXCHANGE;
+ 
+-		err = ovl_do_rename(wdir, whiteout, udir, upper, flags);
+-		if (err)
+-			goto kill_whiteout;
++	err = ovl_do_rename(wdir, whiteout, udir, upper, flags);
++	if (err)
++		goto kill_whiteout;
++	if (flags)
++		ovl_cleanup(wdir, upper);
+ 
+-		if (is_dir)
+-			ovl_cleanup(wdir, upper);
+-	}
+ 	ovl_dentry_version_inc(dentry->d_parent);
+ out_d_drop:
+ 	d_drop(dentry);
+ 	dput(whiteout);
++out_dput_upper:
++	dput(upper);
+ out_unlock:
+ 	unlock_rename(workdir, upperdir);
+ out_dput:
+diff --git a/fs/overlayfs/inode.c b/fs/overlayfs/inode.c
+index a4ff5d0..d46fa60 100644
+--- a/fs/overlayfs/inode.c
++++ b/fs/overlayfs/inode.c
+@@ -59,16 +59,40 @@ int ovl_setattr(struct dentry *dentry, struct iattr *attr)
+ 	if (err)
+ 		goto out;
+ 
++	if (attr->ia_valid & ATTR_SIZE) {
++		struct inode *realinode = d_inode(ovl_dentry_real(dentry));
++
++		err = -ETXTBSY;
++		if (atomic_read(&realinode->i_writecount) < 0)
++			goto out_drop_write;
++	}
++
+ 	err = ovl_copy_up(dentry);
+ 	if (!err) {
++		struct inode *winode = NULL;
++
+ 		upperdentry = ovl_dentry_upper(dentry);
+ 
++		if (attr->ia_valid & ATTR_SIZE) {
++			winode = d_inode(upperdentry);
++			err = get_write_access(winode);
++			if (err)
++				goto out_drop_write;
++		}
++
++		if (attr->ia_valid & (ATTR_KILL_SUID|ATTR_KILL_SGID))
++			attr->ia_valid &= ~ATTR_MODE;
++
+ 		inode_lock(upperdentry->d_inode);
+ 		err = notify_change(upperdentry, attr, NULL);
+ 		if (!err)
+ 			ovl_copyattr(upperdentry->d_inode, dentry->d_inode);
+ 		inode_unlock(upperdentry->d_inode);
++
++		if (winode)
++			put_write_access(winode);
+ 	}
++out_drop_write:
+ 	ovl_drop_write(dentry);
+ out:
+ 	return err;
+@@ -121,16 +145,18 @@ int ovl_permission(struct inode *inode, int mask)
+ 
+ 		err = vfs_getattr(&realpath, &stat);
+ 		if (err)
+-			return err;
++			goto out_dput;
+ 
++		err = -ESTALE;
+ 		if ((stat.mode ^ inode->i_mode) & S_IFMT)
+-			return -ESTALE;
++			goto out_dput;
+ 
+ 		inode->i_mode = stat.mode;
+ 		inode->i_uid = stat.uid;
+ 		inode->i_gid = stat.gid;
+ 
+-		return generic_permission(inode, mask);
++		err = generic_permission(inode, mask);
++		goto out_dput;
+ 	}
+ 
+ 	/* Careful in RCU walk mode */
+@@ -400,12 +426,11 @@ struct inode *ovl_new_inode(struct super_block *sb, umode_t mode,
+ 	if (!inode)
+ 		return NULL;
+ 
+-	mode &= S_IFMT;
+-
+ 	inode->i_ino = get_next_ino();
+ 	inode->i_mode = mode;
+ 	inode->i_flags |= S_NOATIME | S_NOCMTIME;
+ 
++	mode &= S_IFMT;
+ 	switch (mode) {
+ 	case S_IFDIR:
+ 		inode->i_private = oe;
+diff --git a/fs/overlayfs/overlayfs.h b/fs/overlayfs/overlayfs.h
+index 6a7090f..294ccc0 100644
+--- a/fs/overlayfs/overlayfs.h
++++ b/fs/overlayfs/overlayfs.h
+@@ -185,6 +185,7 @@ static inline void ovl_copyattr(struct inode *from, struct inode *to)
+ {
+ 	to->i_uid = from->i_uid;
+ 	to->i_gid = from->i_gid;
++	to->i_mode = from->i_mode;
+ }
+ 
+ /* dir.c */
+diff --git a/fs/overlayfs/super.c b/fs/overlayfs/super.c
+index 791235e..7952a50f 100644
+--- a/fs/overlayfs/super.c
++++ b/fs/overlayfs/super.c
+@@ -1064,16 +1064,21 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent)
+ 		/*
+ 		 * Upper should support d_type, else whiteouts are visible.
+ 		 * Given workdir and upper are on same fs, we can do
+-		 * iterate_dir() on workdir.
++		 * iterate_dir() on workdir. This check requires successful
++		 * creation of workdir in previous step.
+ 		 */
+-		err = ovl_check_d_type_supported(&workpath);
+-		if (err < 0)
+-			goto out_put_workdir;
++		if (ufs->workdir) {
++			err = ovl_check_d_type_supported(&workpath);
++			if (err < 0)
++				goto out_put_workdir;
+ 
+-		if (!err) {
+-			pr_err("overlayfs: upper fs needs to support d_type.\n");
+-			err = -EINVAL;
+-			goto out_put_workdir;
++			/*
++			 * We allowed this configuration and don't want to
++			 * break users over kernel upgrade. So warn instead
++			 * of erroring out.
++			 */
++			if (!err)
++				pr_warn("overlayfs: upper fs needs to support d_type.\n");
+ 		}
+ 	}
+ 
+diff --git a/fs/posix_acl.c b/fs/posix_acl.c
+index 711dd51..e11ea5f 100644
+--- a/fs/posix_acl.c
++++ b/fs/posix_acl.c
+@@ -786,39 +786,43 @@ posix_acl_xattr_get(const struct xattr_handler *handler,
+ 	return error;
+ }
+ 
+-static int
+-posix_acl_xattr_set(const struct xattr_handler *handler,
+-		    struct dentry *dentry, const char *name,
+-		    const void *value, size_t size, int flags)
++int
++set_posix_acl(struct inode *inode, int type, struct posix_acl *acl)
+ {
+-	struct inode *inode = d_backing_inode(dentry);
+-	struct posix_acl *acl = NULL;
+-	int ret;
+-
+ 	if (!IS_POSIXACL(inode))
+ 		return -EOPNOTSUPP;
+ 	if (!inode->i_op->set_acl)
+ 		return -EOPNOTSUPP;
+ 
+-	if (handler->flags == ACL_TYPE_DEFAULT && !S_ISDIR(inode->i_mode))
+-		return value ? -EACCES : 0;
++	if (type == ACL_TYPE_DEFAULT && !S_ISDIR(inode->i_mode))
++		return acl ? -EACCES : 0;
+ 	if (!inode_owner_or_capable(inode))
+ 		return -EPERM;
+ 
++	if (acl) {
++		int ret = posix_acl_valid(acl);
++		if (ret)
++			return ret;
++	}
++	return inode->i_op->set_acl(inode, acl, type);
++}
++EXPORT_SYMBOL(set_posix_acl);
++
++static int
++posix_acl_xattr_set(const struct xattr_handler *handler,
++		    struct dentry *dentry, const char *name,
++		    const void *value, size_t size, int flags)
++{
++	struct inode *inode = d_backing_inode(dentry);
++	struct posix_acl *acl = NULL;
++	int ret;
++
+ 	if (value) {
+ 		acl = posix_acl_from_xattr(&init_user_ns, value, size);
+ 		if (IS_ERR(acl))
+ 			return PTR_ERR(acl);
+-
+-		if (acl) {
+-			ret = posix_acl_valid(acl);
+-			if (ret)
+-				goto out;
+-		}
+ 	}
+-
+-	ret = inode->i_op->set_acl(inode, acl, handler->flags);
+-out:
++	ret = set_posix_acl(inode, handler->flags, acl);
+ 	posix_acl_release(acl);
+ 	return ret;
+ }
+diff --git a/fs/ubifs/file.c b/fs/ubifs/file.c
+index 446753d..5b5ec8d 100644
+--- a/fs/ubifs/file.c
++++ b/fs/ubifs/file.c
+@@ -52,6 +52,7 @@
+ #include "ubifs.h"
+ #include <linux/mount.h>
+ #include <linux/slab.h>
++#include <linux/migrate.h>
+ 
+ static int read_block(struct inode *inode, void *addr, unsigned int block,
+ 		      struct ubifs_data_node *dn)
+@@ -1452,6 +1453,26 @@ static int ubifs_set_page_dirty(struct page *page)
+ 	return ret;
+ }
+ 
++#ifdef CONFIG_MIGRATION
++static int ubifs_migrate_page(struct address_space *mapping,
++		struct page *newpage, struct page *page, enum migrate_mode mode)
++{
++	int rc;
++
++	rc = migrate_page_move_mapping(mapping, newpage, page, NULL, mode, 0);
++	if (rc != MIGRATEPAGE_SUCCESS)
++		return rc;
++
++	if (PagePrivate(page)) {
++		ClearPagePrivate(page);
++		SetPagePrivate(newpage);
++	}
++
++	migrate_page_copy(newpage, page);
++	return MIGRATEPAGE_SUCCESS;
++}
++#endif
++
+ static int ubifs_releasepage(struct page *page, gfp_t unused_gfp_flags)
+ {
+ 	/*
+@@ -1591,6 +1612,9 @@ const struct address_space_operations ubifs_file_address_operations = {
+ 	.write_end      = ubifs_write_end,
+ 	.invalidatepage = ubifs_invalidatepage,
+ 	.set_page_dirty = ubifs_set_page_dirty,
++#ifdef CONFIG_MIGRATION
++	.migratepage	= ubifs_migrate_page,
++#endif
+ 	.releasepage    = ubifs_releasepage,
+ };
+ 
+diff --git a/include/asm-generic/qspinlock.h b/include/asm-generic/qspinlock.h
+index 6bd0570..05f05f1 100644
+--- a/include/asm-generic/qspinlock.h
++++ b/include/asm-generic/qspinlock.h
+@@ -22,37 +22,33 @@
+ #include <asm-generic/qspinlock_types.h>
+ 
+ /**
++ * queued_spin_unlock_wait - wait until the _current_ lock holder releases the lock
++ * @lock : Pointer to queued spinlock structure
++ *
++ * There is a very slight possibility of live-lock if the lockers keep coming
++ * and the waiter is just unfortunate enough to not see any unlock state.
++ */
++#ifndef queued_spin_unlock_wait
++extern void queued_spin_unlock_wait(struct qspinlock *lock);
++#endif
++
++/**
+  * queued_spin_is_locked - is the spinlock locked?
+  * @lock: Pointer to queued spinlock structure
+  * Return: 1 if it is locked, 0 otherwise
+  */
++#ifndef queued_spin_is_locked
+ static __always_inline int queued_spin_is_locked(struct qspinlock *lock)
+ {
+ 	/*
+-	 * queued_spin_lock_slowpath() can ACQUIRE the lock before
+-	 * issuing the unordered store that sets _Q_LOCKED_VAL.
+-	 *
+-	 * See both smp_cond_acquire() sites for more detail.
+-	 *
+-	 * This however means that in code like:
+-	 *
+-	 *   spin_lock(A)		spin_lock(B)
+-	 *   spin_unlock_wait(B)	spin_is_locked(A)
+-	 *   do_something()		do_something()
+-	 *
+-	 * Both CPUs can end up running do_something() because the store
+-	 * setting _Q_LOCKED_VAL will pass through the loads in
+-	 * spin_unlock_wait() and/or spin_is_locked().
++	 * See queued_spin_unlock_wait().
+ 	 *
+-	 * Avoid this by issuing a full memory barrier between the spin_lock()
+-	 * and the loads in spin_unlock_wait() and spin_is_locked().
+-	 *
+-	 * Note that regular mutual exclusion doesn't care about this
+-	 * delayed store.
++	 * Any !0 state indicates it is locked, even if _Q_LOCKED_VAL
++	 * isn't immediately observable.
+ 	 */
+-	smp_mb();
+-	return atomic_read(&lock->val) & _Q_LOCKED_MASK;
++	return atomic_read(&lock->val);
+ }
++#endif
+ 
+ /**
+  * queued_spin_value_unlocked - is the spinlock structure unlocked?
+@@ -122,21 +118,6 @@ static __always_inline void queued_spin_unlock(struct qspinlock *lock)
+ }
+ #endif
+ 
+-/**
+- * queued_spin_unlock_wait - wait until current lock holder releases the lock
+- * @lock : Pointer to queued spinlock structure
+- *
+- * There is a very slight possibility of live-lock if the lockers keep coming
+- * and the waiter is just unfortunate enough to not see any unlock state.
+- */
+-static inline void queued_spin_unlock_wait(struct qspinlock *lock)
+-{
+-	/* See queued_spin_is_locked() */
+-	smp_mb();
+-	while (atomic_read(&lock->val) & _Q_LOCKED_MASK)
+-		cpu_relax();
+-}
+-
+ #ifndef virt_spin_lock
+ static __always_inline bool virt_spin_lock(struct qspinlock *lock)
+ {
+diff --git a/include/drm/ttm/ttm_bo_api.h b/include/drm/ttm/ttm_bo_api.h
+index 055a08d..a74c49d 100644
+--- a/include/drm/ttm/ttm_bo_api.h
++++ b/include/drm/ttm/ttm_bo_api.h
+@@ -316,6 +316,20 @@ ttm_bo_reference(struct ttm_buffer_object *bo)
+  */
+ extern int ttm_bo_wait(struct ttm_buffer_object *bo, bool lazy,
+ 		       bool interruptible, bool no_wait);
++
++/**
++ * ttm_bo_mem_compat - Check if proposed placement is compatible with a bo
++ *
++ * @placement:  Return immediately if buffer is busy.
++ * @mem:  The struct ttm_mem_reg indicating the region where the bo resides
++ * @new_flags: Describes compatible placement found
++ *
++ * Returns true if the placement is compatible
++ */
++extern bool ttm_bo_mem_compat(struct ttm_placement *placement,
++			      struct ttm_mem_reg *mem,
++			      uint32_t *new_flags);
++
+ /**
+  * ttm_bo_validate
+  *
+diff --git a/include/linux/cpuidle.h b/include/linux/cpuidle.h
+index 786ad32..07b83d3 100644
+--- a/include/linux/cpuidle.h
++++ b/include/linux/cpuidle.h
+@@ -152,6 +152,8 @@ extern void cpuidle_disable_device(struct cpuidle_device *dev);
+ extern int cpuidle_play_dead(void);
+ 
+ extern struct cpuidle_driver *cpuidle_get_cpu_driver(struct cpuidle_device *dev);
++static inline struct cpuidle_device *cpuidle_get_device(void)
++{return __this_cpu_read(cpuidle_devices); }
+ #else
+ static inline void disable_cpuidle(void) { }
+ static inline bool cpuidle_not_available(struct cpuidle_driver *drv,
+@@ -187,6 +189,7 @@ static inline void cpuidle_disable_device(struct cpuidle_device *dev) { }
+ static inline int cpuidle_play_dead(void) {return -ENODEV; }
+ static inline struct cpuidle_driver *cpuidle_get_cpu_driver(
+ 	struct cpuidle_device *dev) {return NULL; }
++static inline struct cpuidle_device *cpuidle_get_device(void) {return NULL; }
+ #endif
+ 
+ #if defined(CONFIG_CPU_IDLE) && defined(CONFIG_SUSPEND)
+diff --git a/include/linux/dcache.h b/include/linux/dcache.h
+index 7e9422c..ad5d582 100644
+--- a/include/linux/dcache.h
++++ b/include/linux/dcache.h
+@@ -576,5 +576,17 @@ static inline struct inode *vfs_select_inode(struct dentry *dentry,
+ 	return inode;
+ }
+ 
++/**
++ * d_real_inode - Return the real inode
++ * @dentry: The dentry to query
++ *
++ * If dentry is on an union/overlay, then return the underlying, real inode.
++ * Otherwise return d_inode().
++ */
++static inline struct inode *d_real_inode(struct dentry *dentry)
++{
++	return d_backing_inode(d_real(dentry));
++}
++
+ 
+ #endif	/* __LINUX_DCACHE_H */
+diff --git a/include/linux/jump_label.h b/include/linux/jump_label.h
+index 0536524..6890446 100644
+--- a/include/linux/jump_label.h
++++ b/include/linux/jump_label.h
+@@ -117,13 +117,18 @@ struct module;
+ 
+ #include <linux/atomic.h>
+ 
++#ifdef HAVE_JUMP_LABEL
++
+ static inline int static_key_count(struct static_key *key)
+ {
+-	return atomic_read(&key->enabled);
++	/*
++	 * -1 means the first static_key_slow_inc() is in progress.
++	 *  static_key_enabled() must return true, so return 1 here.
++	 */
++	int n = atomic_read(&key->enabled);
++	return n >= 0 ? n : 1;
+ }
+ 
+-#ifdef HAVE_JUMP_LABEL
+-
+ #define JUMP_TYPE_FALSE	0UL
+ #define JUMP_TYPE_TRUE	1UL
+ #define JUMP_TYPE_MASK	1UL
+@@ -162,6 +167,11 @@ extern void jump_label_apply_nops(struct module *mod);
+ 
+ #else  /* !HAVE_JUMP_LABEL */
+ 
++static inline int static_key_count(struct static_key *key)
++{
++	return atomic_read(&key->enabled);
++}
++
+ static __always_inline void jump_label_init(void)
+ {
+ 	static_key_initialized = true;
+diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
+index 15d0df9..794b924 100644
+--- a/include/linux/skbuff.h
++++ b/include/linux/skbuff.h
+@@ -1062,6 +1062,7 @@ __skb_set_sw_hash(struct sk_buff *skb, __u32 hash, bool is_l4)
+ }
+ 
+ void __skb_get_hash(struct sk_buff *skb);
++u32 __skb_get_hash_symmetric(struct sk_buff *skb);
+ u32 skb_get_poff(const struct sk_buff *skb);
+ u32 __skb_get_poff(const struct sk_buff *skb, void *data,
+ 		   const struct flow_keys *keys, int hlen);
+@@ -2860,6 +2861,25 @@ static inline void skb_postpush_rcsum(struct sk_buff *skb,
+ }
+ 
+ /**
++ *	skb_push_rcsum - push skb and update receive checksum
++ *	@skb: buffer to update
++ *	@len: length of data pulled
++ *
++ *	This function performs an skb_push on the packet and updates
++ *	the CHECKSUM_COMPLETE checksum.  It should be used on
++ *	receive path processing instead of skb_push unless you know
++ *	that the checksum difference is zero (e.g., a valid IP header)
++ *	or you are setting ip_summed to CHECKSUM_NONE.
++ */
++static inline unsigned char *skb_push_rcsum(struct sk_buff *skb,
++					    unsigned int len)
++{
++	skb_push(skb, len);
++	skb_postpush_rcsum(skb, skb->data, len);
++	return skb->data;
++}
++
++/**
+  *	pskb_trim_rcsum - trim received skb and update checksum
+  *	@skb: buffer to trim
+  *	@len: new length
+diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h
+index 9a7ddba..14d70f5 100644
+--- a/include/linux/sunrpc/clnt.h
++++ b/include/linux/sunrpc/clnt.h
+@@ -137,8 +137,6 @@ struct rpc_create_args {
+ #define RPC_CLNT_CREATE_NO_RETRANS_TIMEOUT	(1UL << 9)
+ 
+ struct rpc_clnt *rpc_create(struct rpc_create_args *args);
+-struct rpc_clnt *rpc_create_xprt(struct rpc_create_args *args,
+-					struct rpc_xprt *xprt);
+ struct rpc_clnt	*rpc_bind_new_program(struct rpc_clnt *,
+ 				const struct rpc_program *, u32);
+ struct rpc_clnt *rpc_clone_client(struct rpc_clnt *);
+diff --git a/include/linux/sunrpc/svc_xprt.h b/include/linux/sunrpc/svc_xprt.h
+index b7dabc4..79ba508 100644
+--- a/include/linux/sunrpc/svc_xprt.h
++++ b/include/linux/sunrpc/svc_xprt.h
+@@ -84,6 +84,7 @@ struct svc_xprt {
+ 
+ 	struct net		*xpt_net;
+ 	struct rpc_xprt		*xpt_bc_xprt;	/* NFSv4.1 backchannel */
++	struct rpc_xprt_switch	*xpt_bc_xps;	/* NFSv4.1 backchannel */
+ };
+ 
+ static inline void unregister_xpt_user(struct svc_xprt *xpt, struct svc_xpt_user *u)
+diff --git a/include/linux/sunrpc/xprt.h b/include/linux/sunrpc/xprt.h
+index fb0d212..9f51e1d 100644
+--- a/include/linux/sunrpc/xprt.h
++++ b/include/linux/sunrpc/xprt.h
+@@ -296,6 +296,7 @@ struct xprt_create {
+ 	size_t			addrlen;
+ 	const char		*servername;
+ 	struct svc_xprt		*bc_xprt;	/* NFSv4.1 backchannel */
++	struct rpc_xprt_switch	*bc_xps;
+ 	unsigned int		flags;
+ };
+ 
+diff --git a/include/linux/usb/ehci_def.h b/include/linux/usb/ehci_def.h
+index 966889a..e479033 100644
+--- a/include/linux/usb/ehci_def.h
++++ b/include/linux/usb/ehci_def.h
+@@ -180,11 +180,11 @@ struct ehci_regs {
+  * PORTSCx
+  */
+ 	/* HOSTPC: offset 0x84 */
+-	u32		hostpc[1];	/* HOSTPC extension */
++	u32		hostpc[0];	/* HOSTPC extension */
+ #define HOSTPC_PHCD	(1<<22)		/* Phy clock disable */
+ #define HOSTPC_PSPD	(3<<25)		/* Port speed detection */
+ 
+-	u32		reserved5[16];
++	u32		reserved5[17];
+ 
+ 	/* USBMODE_EX: offset 0xc8 */
+ 	u32		usbmode_ex;	/* USB Device mode extension */
+diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h
+index fb2cef4..b8334a6 100644
+--- a/include/rdma/ib_verbs.h
++++ b/include/rdma/ib_verbs.h
+@@ -217,7 +217,7 @@ enum ib_device_cap_flags {
+ 	IB_DEVICE_CROSS_CHANNEL		= (1 << 27),
+ 	IB_DEVICE_MANAGED_FLOW_STEERING		= (1 << 29),
+ 	IB_DEVICE_SIGNATURE_HANDOVER		= (1 << 30),
+-	IB_DEVICE_ON_DEMAND_PAGING		= (1 << 31),
++	IB_DEVICE_ON_DEMAND_PAGING		= (1ULL << 31),
+ 	IB_DEVICE_SG_GAPS_REG			= (1ULL << 32),
+ 	IB_DEVICE_VIRTUAL_FUNCTION		= ((u64)1 << 33),
+ };
+diff --git a/include/rdma/rdma_vt.h b/include/rdma/rdma_vt.h
+index a869655..6ee9d97 100644
+--- a/include/rdma/rdma_vt.h
++++ b/include/rdma/rdma_vt.h
+@@ -203,7 +203,9 @@ struct rvt_driver_provided {
+ 
+ 	/*
+ 	 * Allocate a private queue pair data structure for driver specific
+-	 * information which is opaque to rdmavt.
++	 * information which is opaque to rdmavt.  Errors are returned via
++	 * ERR_PTR(err).  The driver is free to return NULL or a valid
++	 * pointer.
+ 	 */
+ 	void * (*qp_priv_alloc)(struct rvt_dev_info *rdi, struct rvt_qp *qp,
+ 				gfp_t gfp);
+diff --git a/kernel/futex.c b/kernel/futex.c
+index c20f06f..6555d54 100644
+--- a/kernel/futex.c
++++ b/kernel/futex.c
+@@ -469,7 +469,7 @@ get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key, int rw)
+ {
+ 	unsigned long address = (unsigned long)uaddr;
+ 	struct mm_struct *mm = current->mm;
+-	struct page *page;
++	struct page *page, *tail;
+ 	struct address_space *mapping;
+ 	int err, ro = 0;
+ 
+@@ -530,7 +530,15 @@ again:
+ 	 * considered here and page lock forces unnecessarily serialization
+ 	 * From this point on, mapping will be re-verified if necessary and
+ 	 * page lock will be acquired only if it is unavoidable
+-	 */
++	 *
++	 * Mapping checks require the head page for any compound page so the
++	 * head page and mapping is looked up now. For anonymous pages, it
++	 * does not matter if the page splits in the future as the key is
++	 * based on the address. For filesystem-backed pages, the tail is
++	 * required as the index of the page determines the key. For
++	 * base pages, there is no tail page and tail == page.
++	 */
++	tail = page;
+ 	page = compound_head(page);
+ 	mapping = READ_ONCE(page->mapping);
+ 
+@@ -654,7 +662,7 @@ again:
+ 
+ 		key->both.offset |= FUT_OFF_INODE; /* inode-based key */
+ 		key->shared.inode = inode;
+-		key->shared.pgoff = basepage_index(page);
++		key->shared.pgoff = basepage_index(tail);
+ 		rcu_read_unlock();
+ 	}
+ 
+diff --git a/kernel/jump_label.c b/kernel/jump_label.c
+index 05254ee..4b353e0 100644
+--- a/kernel/jump_label.c
++++ b/kernel/jump_label.c
+@@ -58,13 +58,36 @@ static void jump_label_update(struct static_key *key);
+ 
+ void static_key_slow_inc(struct static_key *key)
+ {
++	int v, v1;
++
+ 	STATIC_KEY_CHECK_USE();
+-	if (atomic_inc_not_zero(&key->enabled))
+-		return;
++
++	/*
++	 * Careful if we get concurrent static_key_slow_inc() calls;
++	 * later calls must wait for the first one to _finish_ the
++	 * jump_label_update() process.  At the same time, however,
++	 * the jump_label_update() call below wants to see
++	 * static_key_enabled(&key) for jumps to be updated properly.
++	 *
++	 * So give a special meaning to negative key->enabled: it sends
++	 * static_key_slow_inc() down the slow path, and it is non-zero
++	 * so it counts as "enabled" in jump_label_update().  Note that
++	 * atomic_inc_unless_negative() checks >= 0, so roll our own.
++	 */
++	for (v = atomic_read(&key->enabled); v > 0; v = v1) {
++		v1 = atomic_cmpxchg(&key->enabled, v, v + 1);
++		if (likely(v1 == v))
++			return;
++	}
+ 
+ 	jump_label_lock();
+-	if (atomic_inc_return(&key->enabled) == 1)
++	if (atomic_read(&key->enabled) == 0) {
++		atomic_set(&key->enabled, -1);
+ 		jump_label_update(key);
++		atomic_set(&key->enabled, 1);
++	} else {
++		atomic_inc(&key->enabled);
++	}
+ 	jump_label_unlock();
+ }
+ EXPORT_SYMBOL_GPL(static_key_slow_inc);
+@@ -72,6 +95,13 @@ EXPORT_SYMBOL_GPL(static_key_slow_inc);
+ static void __static_key_slow_dec(struct static_key *key,
+ 		unsigned long rate_limit, struct delayed_work *work)
+ {
++	/*
++	 * The negative count check is valid even when a negative
++	 * key->enabled is in use by static_key_slow_inc(); a
++	 * __static_key_slow_dec() before the first static_key_slow_inc()
++	 * returns is unbalanced, because all other static_key_slow_inc()
++	 * instances block while the update is in progress.
++	 */
+ 	if (!atomic_dec_and_mutex_lock(&key->enabled, &jump_label_mutex)) {
+ 		WARN(atomic_read(&key->enabled) < 0,
+ 		     "jump label: negative count!\n");
+diff --git a/kernel/locking/mutex.c b/kernel/locking/mutex.c
+index e364b42..79d2d76 100644
+--- a/kernel/locking/mutex.c
++++ b/kernel/locking/mutex.c
+@@ -486,9 +486,6 @@ __ww_mutex_lock_check_stamp(struct mutex *lock, struct ww_acquire_ctx *ctx)
+ 	if (!hold_ctx)
+ 		return 0;
+ 
+-	if (unlikely(ctx == hold_ctx))
+-		return -EALREADY;
+-
+ 	if (ctx->stamp - hold_ctx->stamp <= LONG_MAX &&
+ 	    (ctx->stamp != hold_ctx->stamp || ctx > hold_ctx)) {
+ #ifdef CONFIG_DEBUG_MUTEXES
+@@ -514,6 +511,12 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
+ 	unsigned long flags;
+ 	int ret;
+ 
++	if (use_ww_ctx) {
++		struct ww_mutex *ww = container_of(lock, struct ww_mutex, base);
++		if (unlikely(ww_ctx == READ_ONCE(ww->ctx)))
++			return -EALREADY;
++	}
++
+ 	preempt_disable();
+ 	mutex_acquire_nest(&lock->dep_map, subclass, 0, nest_lock, ip);
+ 
+diff --git a/kernel/locking/qspinlock.c b/kernel/locking/qspinlock.c
+index ce2f75e..5fc8c31 100644
+--- a/kernel/locking/qspinlock.c
++++ b/kernel/locking/qspinlock.c
+@@ -267,6 +267,66 @@ static __always_inline u32  __pv_wait_head_or_lock(struct qspinlock *lock,
+ #define queued_spin_lock_slowpath	native_queued_spin_lock_slowpath
+ #endif
+ 
++/*
++ * queued_spin_lock_slowpath() can (load-)ACQUIRE the lock before
++ * issuing an _unordered_ store to set _Q_LOCKED_VAL.
++ *
++ * This means that the store can be delayed, but no later than the
++ * store-release from the unlock. This means that simply observing
++ * _Q_LOCKED_VAL is not sufficient to determine if the lock is acquired.
++ *
++ * There are two paths that can issue the unordered store:
++ *
++ *  (1) clear_pending_set_locked():	*,1,0 -> *,0,1
++ *
++ *  (2) set_locked():			t,0,0 -> t,0,1 ; t != 0
++ *      atomic_cmpxchg_relaxed():	t,0,0 -> 0,0,1
++ *
++ * However, in both cases we have other !0 state we've set before to queue
++ * ourseves:
++ *
++ * For (1) we have the atomic_cmpxchg_acquire() that set _Q_PENDING_VAL, our
++ * load is constrained by that ACQUIRE to not pass before that, and thus must
++ * observe the store.
++ *
++ * For (2) we have a more intersting scenario. We enqueue ourselves using
++ * xchg_tail(), which ends up being a RELEASE. This in itself is not
++ * sufficient, however that is followed by an smp_cond_acquire() on the same
++ * word, giving a RELEASE->ACQUIRE ordering. This again constrains our load and
++ * guarantees we must observe that store.
++ *
++ * Therefore both cases have other !0 state that is observable before the
++ * unordered locked byte store comes through. This means we can use that to
++ * wait for the lock store, and then wait for an unlock.
++ */
++#ifndef queued_spin_unlock_wait
++void queued_spin_unlock_wait(struct qspinlock *lock)
++{
++	u32 val;
++
++	for (;;) {
++		val = atomic_read(&lock->val);
++
++		if (!val) /* not locked, we're done */
++			goto done;
++
++		if (val & _Q_LOCKED_MASK) /* locked, go wait for unlock */
++			break;
++
++		/* not locked, but pending, wait until we observe the lock */
++		cpu_relax();
++	}
++
++	/* any unlock is good */
++	while (atomic_read(&lock->val) & _Q_LOCKED_MASK)
++		cpu_relax();
++
++done:
++	smp_rmb(); /* CTRL + RMB -> ACQUIRE */
++}
++EXPORT_SYMBOL(queued_spin_unlock_wait);
++#endif
++
+ #endif /* _GEN_PV_LOCK_SLOWPATH */
+ 
+ /**
+diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
+index e7dd0ec..eeaf920 100644
+--- a/kernel/sched/fair.c
++++ b/kernel/sched/fair.c
+@@ -2821,6 +2821,23 @@ static inline void update_tg_load_avg(struct cfs_rq *cfs_rq, int force) {}
+ 
+ static inline u64 cfs_rq_clock_task(struct cfs_rq *cfs_rq);
+ 
++/*
++ * Unsigned subtract and clamp on underflow.
++ *
++ * Explicitly do a load-store to ensure the intermediate value never hits
++ * memory. This allows lockless observations without ever seeing the negative
++ * values.
++ */
++#define sub_positive(_ptr, _val) do {				\
++	typeof(_ptr) ptr = (_ptr);				\
++	typeof(*ptr) val = (_val);				\
++	typeof(*ptr) res, var = READ_ONCE(*ptr);		\
++	res = var - val;					\
++	if (res > var)						\
++		res = 0;					\
++	WRITE_ONCE(*ptr, res);					\
++} while (0)
++
+ /* Group cfs_rq's load_avg is used for task_h_load and update_cfs_share */
+ static inline int update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq)
+ {
+@@ -2829,15 +2846,15 @@ static inline int update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq)
+ 
+ 	if (atomic_long_read(&cfs_rq->removed_load_avg)) {
+ 		s64 r = atomic_long_xchg(&cfs_rq->removed_load_avg, 0);
+-		sa->load_avg = max_t(long, sa->load_avg - r, 0);
+-		sa->load_sum = max_t(s64, sa->load_sum - r * LOAD_AVG_MAX, 0);
++		sub_positive(&sa->load_avg, r);
++		sub_positive(&sa->load_sum, r * LOAD_AVG_MAX);
+ 		removed = 1;
+ 	}
+ 
+ 	if (atomic_long_read(&cfs_rq->removed_util_avg)) {
+ 		long r = atomic_long_xchg(&cfs_rq->removed_util_avg, 0);
+-		sa->util_avg = max_t(long, sa->util_avg - r, 0);
+-		sa->util_sum = max_t(s32, sa->util_sum - r * LOAD_AVG_MAX, 0);
++		sub_positive(&sa->util_avg, r);
++		sub_positive(&sa->util_sum, r * LOAD_AVG_MAX);
+ 	}
+ 
+ 	decayed = __update_load_avg(now, cpu_of(rq_of(cfs_rq)), sa,
+@@ -2927,10 +2944,10 @@ static void detach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *s
+ 			  &se->avg, se->on_rq * scale_load_down(se->load.weight),
+ 			  cfs_rq->curr == se, NULL);
+ 
+-	cfs_rq->avg.load_avg = max_t(long, cfs_rq->avg.load_avg - se->avg.load_avg, 0);
+-	cfs_rq->avg.load_sum = max_t(s64,  cfs_rq->avg.load_sum - se->avg.load_sum, 0);
+-	cfs_rq->avg.util_avg = max_t(long, cfs_rq->avg.util_avg - se->avg.util_avg, 0);
+-	cfs_rq->avg.util_sum = max_t(s32,  cfs_rq->avg.util_sum - se->avg.util_sum, 0);
++	sub_positive(&cfs_rq->avg.load_avg, se->avg.load_avg);
++	sub_positive(&cfs_rq->avg.load_sum, se->avg.load_sum);
++	sub_positive(&cfs_rq->avg.util_avg, se->avg.util_avg);
++	sub_positive(&cfs_rq->avg.util_sum, se->avg.util_sum);
+ }
+ 
+ /* Add the load generated by se into cfs_rq's load average */
+diff --git a/kernel/sched/idle.c b/kernel/sched/idle.c
+index bd12c6c..c5aeedf 100644
+--- a/kernel/sched/idle.c
++++ b/kernel/sched/idle.c
+@@ -127,7 +127,7 @@ static int call_cpuidle(struct cpuidle_driver *drv, struct cpuidle_device *dev,
+  */
+ static void cpuidle_idle_call(void)
+ {
+-	struct cpuidle_device *dev = __this_cpu_read(cpuidle_devices);
++	struct cpuidle_device *dev = cpuidle_get_device();
+ 	struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev);
+ 	int next_state, entered_state;
+ 
+diff --git a/kernel/trace/trace_printk.c b/kernel/trace/trace_printk.c
+index f96f038..ad1d616 100644
+--- a/kernel/trace/trace_printk.c
++++ b/kernel/trace/trace_printk.c
+@@ -36,6 +36,10 @@ struct trace_bprintk_fmt {
+ static inline struct trace_bprintk_fmt *lookup_format(const char *fmt)
+ {
+ 	struct trace_bprintk_fmt *pos;
++
++	if (!fmt)
++		return ERR_PTR(-EINVAL);
++
+ 	list_for_each_entry(pos, &trace_bprintk_fmt_list, list) {
+ 		if (!strcmp(pos->fmt, fmt))
+ 			return pos;
+@@ -57,7 +61,8 @@ void hold_module_trace_bprintk_format(const char **start, const char **end)
+ 	for (iter = start; iter < end; iter++) {
+ 		struct trace_bprintk_fmt *tb_fmt = lookup_format(*iter);
+ 		if (tb_fmt) {
+-			*iter = tb_fmt->fmt;
++			if (!IS_ERR(tb_fmt))
++				*iter = tb_fmt->fmt;
+ 			continue;
+ 		}
+ 
+diff --git a/mm/migrate.c b/mm/migrate.c
+index f9dfb18..bdf3410 100644
+--- a/mm/migrate.c
++++ b/mm/migrate.c
+@@ -431,6 +431,7 @@ int migrate_page_move_mapping(struct address_space *mapping,
+ 
+ 	return MIGRATEPAGE_SUCCESS;
+ }
++EXPORT_SYMBOL(migrate_page_move_mapping);
+ 
+ /*
+  * The expected number of remaining references is the same as that
+@@ -586,6 +587,7 @@ void migrate_page_copy(struct page *newpage, struct page *page)
+ 
+ 	mem_cgroup_migrate(page, newpage);
+ }
++EXPORT_SYMBOL(migrate_page_copy);
+ 
+ /************************************************************
+  *                    Migration functions
+diff --git a/mm/page-writeback.c b/mm/page-writeback.c
+index bc5149d..e389f0a 100644
+--- a/mm/page-writeback.c
++++ b/mm/page-writeback.c
+@@ -369,8 +369,9 @@ static void domain_dirty_limits(struct dirty_throttle_control *dtc)
+ 	struct dirty_throttle_control *gdtc = mdtc_gdtc(dtc);
+ 	unsigned long bytes = vm_dirty_bytes;
+ 	unsigned long bg_bytes = dirty_background_bytes;
+-	unsigned long ratio = vm_dirty_ratio;
+-	unsigned long bg_ratio = dirty_background_ratio;
++	/* convert ratios to per-PAGE_SIZE for higher precision */
++	unsigned long ratio = (vm_dirty_ratio * PAGE_SIZE) / 100;
++	unsigned long bg_ratio = (dirty_background_ratio * PAGE_SIZE) / 100;
+ 	unsigned long thresh;
+ 	unsigned long bg_thresh;
+ 	struct task_struct *tsk;
+@@ -382,26 +383,28 @@ static void domain_dirty_limits(struct dirty_throttle_control *dtc)
+ 		/*
+ 		 * The byte settings can't be applied directly to memcg
+ 		 * domains.  Convert them to ratios by scaling against
+-		 * globally available memory.
++		 * globally available memory.  As the ratios are in
++		 * per-PAGE_SIZE, they can be obtained by dividing bytes by
++		 * number of pages.
+ 		 */
+ 		if (bytes)
+-			ratio = min(DIV_ROUND_UP(bytes, PAGE_SIZE) * 100 /
+-				    global_avail, 100UL);
++			ratio = min(DIV_ROUND_UP(bytes, global_avail),
++				    PAGE_SIZE);
+ 		if (bg_bytes)
+-			bg_ratio = min(DIV_ROUND_UP(bg_bytes, PAGE_SIZE) * 100 /
+-				       global_avail, 100UL);
++			bg_ratio = min(DIV_ROUND_UP(bg_bytes, global_avail),
++				       PAGE_SIZE);
+ 		bytes = bg_bytes = 0;
+ 	}
+ 
+ 	if (bytes)
+ 		thresh = DIV_ROUND_UP(bytes, PAGE_SIZE);
+ 	else
+-		thresh = (ratio * available_memory) / 100;
++		thresh = (ratio * available_memory) / PAGE_SIZE;
+ 
+ 	if (bg_bytes)
+ 		bg_thresh = DIV_ROUND_UP(bg_bytes, PAGE_SIZE);
+ 	else
+-		bg_thresh = (bg_ratio * available_memory) / 100;
++		bg_thresh = (bg_ratio * available_memory) / PAGE_SIZE;
+ 
+ 	if (bg_thresh >= thresh)
+ 		bg_thresh = thresh / 2;
+diff --git a/mm/percpu.c b/mm/percpu.c
+index 0c59684..9903830 100644
+--- a/mm/percpu.c
++++ b/mm/percpu.c
+@@ -112,7 +112,7 @@ struct pcpu_chunk {
+ 	int			map_used;	/* # of map entries used before the sentry */
+ 	int			map_alloc;	/* # of map entries allocated */
+ 	int			*map;		/* allocation map */
+-	struct work_struct	map_extend_work;/* async ->map[] extension */
++	struct list_head	map_extend_list;/* on pcpu_map_extend_chunks */
+ 
+ 	void			*data;		/* chunk data */
+ 	int			first_free;	/* no free below this */
+@@ -162,10 +162,13 @@ static struct pcpu_chunk *pcpu_reserved_chunk;
+ static int pcpu_reserved_chunk_limit;
+ 
+ static DEFINE_SPINLOCK(pcpu_lock);	/* all internal data structures */
+-static DEFINE_MUTEX(pcpu_alloc_mutex);	/* chunk create/destroy, [de]pop */
++static DEFINE_MUTEX(pcpu_alloc_mutex);	/* chunk create/destroy, [de]pop, map ext */
+ 
+ static struct list_head *pcpu_slot __read_mostly; /* chunk list slots */
+ 
++/* chunks which need their map areas extended, protected by pcpu_lock */
++static LIST_HEAD(pcpu_map_extend_chunks);
++
+ /*
+  * The number of empty populated pages, protected by pcpu_lock.  The
+  * reserved chunk doesn't contribute to the count.
+@@ -395,13 +398,19 @@ static int pcpu_need_to_extend(struct pcpu_chunk *chunk, bool is_atomic)
+ {
+ 	int margin, new_alloc;
+ 
++	lockdep_assert_held(&pcpu_lock);
++
+ 	if (is_atomic) {
+ 		margin = 3;
+ 
+ 		if (chunk->map_alloc <
+-		    chunk->map_used + PCPU_ATOMIC_MAP_MARGIN_LOW &&
+-		    pcpu_async_enabled)
+-			schedule_work(&chunk->map_extend_work);
++		    chunk->map_used + PCPU_ATOMIC_MAP_MARGIN_LOW) {
++			if (list_empty(&chunk->map_extend_list)) {
++				list_add_tail(&chunk->map_extend_list,
++					      &pcpu_map_extend_chunks);
++				pcpu_schedule_balance_work();
++			}
++		}
+ 	} else {
+ 		margin = PCPU_ATOMIC_MAP_MARGIN_HIGH;
+ 	}
+@@ -435,6 +444,8 @@ static int pcpu_extend_area_map(struct pcpu_chunk *chunk, int new_alloc)
+ 	size_t old_size = 0, new_size = new_alloc * sizeof(new[0]);
+ 	unsigned long flags;
+ 
++	lockdep_assert_held(&pcpu_alloc_mutex);
++
+ 	new = pcpu_mem_zalloc(new_size);
+ 	if (!new)
+ 		return -ENOMEM;
+@@ -467,20 +478,6 @@ out_unlock:
+ 	return 0;
+ }
+ 
+-static void pcpu_map_extend_workfn(struct work_struct *work)
+-{
+-	struct pcpu_chunk *chunk = container_of(work, struct pcpu_chunk,
+-						map_extend_work);
+-	int new_alloc;
+-
+-	spin_lock_irq(&pcpu_lock);
+-	new_alloc = pcpu_need_to_extend(chunk, false);
+-	spin_unlock_irq(&pcpu_lock);
+-
+-	if (new_alloc)
+-		pcpu_extend_area_map(chunk, new_alloc);
+-}
+-
+ /**
+  * pcpu_fit_in_area - try to fit the requested allocation in a candidate area
+  * @chunk: chunk the candidate area belongs to
+@@ -740,7 +737,7 @@ static struct pcpu_chunk *pcpu_alloc_chunk(void)
+ 	chunk->map_used = 1;
+ 
+ 	INIT_LIST_HEAD(&chunk->list);
+-	INIT_WORK(&chunk->map_extend_work, pcpu_map_extend_workfn);
++	INIT_LIST_HEAD(&chunk->map_extend_list);
+ 	chunk->free_size = pcpu_unit_size;
+ 	chunk->contig_hint = pcpu_unit_size;
+ 
+@@ -895,6 +892,9 @@ static void __percpu *pcpu_alloc(size_t size, size_t align, bool reserved,
+ 		return NULL;
+ 	}
+ 
++	if (!is_atomic)
++		mutex_lock(&pcpu_alloc_mutex);
++
+ 	spin_lock_irqsave(&pcpu_lock, flags);
+ 
+ 	/* serve reserved allocations from the reserved chunk if available */
+@@ -967,12 +967,9 @@ restart:
+ 	if (is_atomic)
+ 		goto fail;
+ 
+-	mutex_lock(&pcpu_alloc_mutex);
+-
+ 	if (list_empty(&pcpu_slot[pcpu_nr_slots - 1])) {
+ 		chunk = pcpu_create_chunk();
+ 		if (!chunk) {
+-			mutex_unlock(&pcpu_alloc_mutex);
+ 			err = "failed to allocate new chunk";
+ 			goto fail;
+ 		}
+@@ -983,7 +980,6 @@ restart:
+ 		spin_lock_irqsave(&pcpu_lock, flags);
+ 	}
+ 
+-	mutex_unlock(&pcpu_alloc_mutex);
+ 	goto restart;
+ 
+ area_found:
+@@ -993,8 +989,6 @@ area_found:
+ 	if (!is_atomic) {
+ 		int page_start, page_end, rs, re;
+ 
+-		mutex_lock(&pcpu_alloc_mutex);
+-
+ 		page_start = PFN_DOWN(off);
+ 		page_end = PFN_UP(off + size);
+ 
+@@ -1005,7 +999,6 @@ area_found:
+ 
+ 			spin_lock_irqsave(&pcpu_lock, flags);
+ 			if (ret) {
+-				mutex_unlock(&pcpu_alloc_mutex);
+ 				pcpu_free_area(chunk, off, &occ_pages);
+ 				err = "failed to populate";
+ 				goto fail_unlock;
+@@ -1045,6 +1038,8 @@ fail:
+ 		/* see the flag handling in pcpu_blance_workfn() */
+ 		pcpu_atomic_alloc_failed = true;
+ 		pcpu_schedule_balance_work();
++	} else {
++		mutex_unlock(&pcpu_alloc_mutex);
+ 	}
+ 	return NULL;
+ }
+@@ -1129,6 +1124,7 @@ static void pcpu_balance_workfn(struct work_struct *work)
+ 		if (chunk == list_first_entry(free_head, struct pcpu_chunk, list))
+ 			continue;
+ 
++		list_del_init(&chunk->map_extend_list);
+ 		list_move(&chunk->list, &to_free);
+ 	}
+ 
+@@ -1146,6 +1142,25 @@ static void pcpu_balance_workfn(struct work_struct *work)
+ 		pcpu_destroy_chunk(chunk);
+ 	}
+ 
++	/* service chunks which requested async area map extension */
++	do {
++		int new_alloc = 0;
++
++		spin_lock_irq(&pcpu_lock);
++
++		chunk = list_first_entry_or_null(&pcpu_map_extend_chunks,
++					struct pcpu_chunk, map_extend_list);
++		if (chunk) {
++			list_del_init(&chunk->map_extend_list);
++			new_alloc = pcpu_need_to_extend(chunk, false);
++		}
++
++		spin_unlock_irq(&pcpu_lock);
++
++		if (new_alloc)
++			pcpu_extend_area_map(chunk, new_alloc);
++	} while (chunk);
++
+ 	/*
+ 	 * Ensure there are certain number of free populated pages for
+ 	 * atomic allocs.  Fill up from the most packed so that atomic
+@@ -1644,7 +1659,7 @@ int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,
+ 	 */
+ 	schunk = memblock_virt_alloc(pcpu_chunk_struct_size, 0);
+ 	INIT_LIST_HEAD(&schunk->list);
+-	INIT_WORK(&schunk->map_extend_work, pcpu_map_extend_workfn);
++	INIT_LIST_HEAD(&schunk->map_extend_list);
+ 	schunk->base_addr = base_addr;
+ 	schunk->map = smap;
+ 	schunk->map_alloc = ARRAY_SIZE(smap);
+@@ -1673,7 +1688,7 @@ int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,
+ 	if (dyn_size) {
+ 		dchunk = memblock_virt_alloc(pcpu_chunk_struct_size, 0);
+ 		INIT_LIST_HEAD(&dchunk->list);
+-		INIT_WORK(&dchunk->map_extend_work, pcpu_map_extend_workfn);
++		INIT_LIST_HEAD(&dchunk->map_extend_list);
+ 		dchunk->base_addr = base_addr;
+ 		dchunk->map = dmap;
+ 		dchunk->map_alloc = ARRAY_SIZE(dmap);
+diff --git a/mm/shmem.c b/mm/shmem.c
+index 719bd6b..9ca09f5 100644
+--- a/mm/shmem.c
++++ b/mm/shmem.c
+@@ -2236,9 +2236,11 @@ static long shmem_fallocate(struct file *file, int mode, loff_t offset,
+ 									NULL);
+ 		if (error) {
+ 			/* Remove the !PageUptodate pages we added */
+-			shmem_undo_range(inode,
+-				(loff_t)start << PAGE_SHIFT,
+-				(loff_t)index << PAGE_SHIFT, true);
++			if (index > start) {
++				shmem_undo_range(inode,
++				    (loff_t)start << PAGE_SHIFT,
++				    ((loff_t)index << PAGE_SHIFT) - 1, true);
++			}
+ 			goto undone;
+ 		}
+ 
+diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c
+index a669dea..61ad43f 100644
+--- a/net/core/flow_dissector.c
++++ b/net/core/flow_dissector.c
+@@ -651,6 +651,23 @@ void make_flow_keys_digest(struct flow_keys_digest *digest,
+ }
+ EXPORT_SYMBOL(make_flow_keys_digest);
+ 
++static struct flow_dissector flow_keys_dissector_symmetric __read_mostly;
++
++u32 __skb_get_hash_symmetric(struct sk_buff *skb)
++{
++	struct flow_keys keys;
++
++	__flow_hash_secret_init();
++
++	memset(&keys, 0, sizeof(keys));
++	__skb_flow_dissect(skb, &flow_keys_dissector_symmetric, &keys,
++			   NULL, 0, 0, 0,
++			   FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL);
++
++	return __flow_hash_from_keys(&keys, hashrnd);
++}
++EXPORT_SYMBOL_GPL(__skb_get_hash_symmetric);
++
+ /**
+  * __skb_get_hash: calculate a flow hash
+  * @skb: sk_buff to calculate flow hash from
+@@ -868,6 +885,29 @@ static const struct flow_dissector_key flow_keys_dissector_keys[] = {
+ 	},
+ };
+ 
++static const struct flow_dissector_key flow_keys_dissector_symmetric_keys[] = {
++	{
++		.key_id = FLOW_DISSECTOR_KEY_CONTROL,
++		.offset = offsetof(struct flow_keys, control),
++	},
++	{
++		.key_id = FLOW_DISSECTOR_KEY_BASIC,
++		.offset = offsetof(struct flow_keys, basic),
++	},
++	{
++		.key_id = FLOW_DISSECTOR_KEY_IPV4_ADDRS,
++		.offset = offsetof(struct flow_keys, addrs.v4addrs),
++	},
++	{
++		.key_id = FLOW_DISSECTOR_KEY_IPV6_ADDRS,
++		.offset = offsetof(struct flow_keys, addrs.v6addrs),
++	},
++	{
++		.key_id = FLOW_DISSECTOR_KEY_PORTS,
++		.offset = offsetof(struct flow_keys, ports),
++	},
++};
++
+ static const struct flow_dissector_key flow_keys_buf_dissector_keys[] = {
+ 	{
+ 		.key_id = FLOW_DISSECTOR_KEY_CONTROL,
+@@ -889,6 +929,9 @@ static int __init init_default_flow_dissectors(void)
+ 	skb_flow_dissector_init(&flow_keys_dissector,
+ 				flow_keys_dissector_keys,
+ 				ARRAY_SIZE(flow_keys_dissector_keys));
++	skb_flow_dissector_init(&flow_keys_dissector_symmetric,
++				flow_keys_dissector_symmetric_keys,
++				ARRAY_SIZE(flow_keys_dissector_symmetric_keys));
+ 	skb_flow_dissector_init(&flow_keys_buf_dissector,
+ 				flow_keys_buf_dissector_keys,
+ 				ARRAY_SIZE(flow_keys_buf_dissector_keys));
+diff --git a/net/core/skbuff.c b/net/core/skbuff.c
+index e561f9f..59bf4d7 100644
+--- a/net/core/skbuff.c
++++ b/net/core/skbuff.c
+@@ -3016,24 +3016,6 @@ int skb_append_pagefrags(struct sk_buff *skb, struct page *page,
+ EXPORT_SYMBOL_GPL(skb_append_pagefrags);
+ 
+ /**
+- *	skb_push_rcsum - push skb and update receive checksum
+- *	@skb: buffer to update
+- *	@len: length of data pulled
+- *
+- *	This function performs an skb_push on the packet and updates
+- *	the CHECKSUM_COMPLETE checksum.  It should be used on
+- *	receive path processing instead of skb_push unless you know
+- *	that the checksum difference is zero (e.g., a valid IP header)
+- *	or you are setting ip_summed to CHECKSUM_NONE.
+- */
+-static unsigned char *skb_push_rcsum(struct sk_buff *skb, unsigned len)
+-{
+-	skb_push(skb, len);
+-	skb_postpush_rcsum(skb, skb->data, len);
+-	return skb->data;
+-}
+-
+-/**
+  *	skb_pull_rcsum - pull skb and update receive checksum
+  *	@skb: buffer to update
+  *	@len: length of data pulled
+diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
+index ea071fa..c26fac2 100644
+--- a/net/ipv6/ip6_fib.c
++++ b/net/ipv6/ip6_fib.c
+@@ -177,6 +177,7 @@ static void rt6_free_pcpu(struct rt6_info *non_pcpu_rt)
+ 		}
+ 	}
+ 
++	free_percpu(non_pcpu_rt->rt6i_pcpu);
+ 	non_pcpu_rt->rt6i_pcpu = NULL;
+ }
+ 
+diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c
+index d32cefc..34a5712 100644
+--- a/net/mac80211/mesh.c
++++ b/net/mac80211/mesh.c
+@@ -150,19 +150,26 @@ u32 mesh_accept_plinks_update(struct ieee80211_sub_if_data *sdata)
+ void mesh_sta_cleanup(struct sta_info *sta)
+ {
+ 	struct ieee80211_sub_if_data *sdata = sta->sdata;
+-	u32 changed;
++	u32 changed = 0;
+ 
+ 	/*
+ 	 * maybe userspace handles peer allocation and peering, but in either
+ 	 * case the beacon is still generated by the kernel and we might need
+ 	 * an update.
+ 	 */
+-	changed = mesh_accept_plinks_update(sdata);
++	if (sdata->u.mesh.user_mpm &&
++	    sta->mesh->plink_state == NL80211_PLINK_ESTAB)
++		changed |= mesh_plink_dec_estab_count(sdata);
++	changed |= mesh_accept_plinks_update(sdata);
+ 	if (!sdata->u.mesh.user_mpm) {
+ 		changed |= mesh_plink_deactivate(sta);
+ 		del_timer_sync(&sta->mesh->plink_timer);
+ 	}
+ 
++	/* make sure no readers can access nexthop sta from here on */
++	mesh_path_flush_by_nexthop(sta);
++	synchronize_net();
++
+ 	if (changed)
+ 		ieee80211_mbss_info_change_notify(sdata, changed);
+ }
+diff --git a/net/mac80211/sta_info.h b/net/mac80211/sta_info.h
+index 62193f4..ba7ce53 100644
+--- a/net/mac80211/sta_info.h
++++ b/net/mac80211/sta_info.h
+@@ -275,7 +275,7 @@ struct ieee80211_fast_tx {
+ 	u8 sa_offs, da_offs, pn_offs;
+ 	u8 band;
+ 	u8 hdr[30 + 2 + IEEE80211_FAST_XMIT_MAX_IV +
+-	       sizeof(rfc1042_header)];
++	       sizeof(rfc1042_header)] __aligned(2);
+ 
+ 	struct rcu_head rcu_head;
+ };
+diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
+index 18d0bec..8012f67 100644
+--- a/net/packet/af_packet.c
++++ b/net/packet/af_packet.c
+@@ -1340,7 +1340,7 @@ static unsigned int fanout_demux_hash(struct packet_fanout *f,
+ 				      struct sk_buff *skb,
+ 				      unsigned int num)
+ {
+-	return reciprocal_scale(skb_get_hash(skb), num);
++	return reciprocal_scale(__skb_get_hash_symmetric(skb), num);
+ }
+ 
+ static unsigned int fanout_demux_lb(struct packet_fanout *f,
+diff --git a/net/sched/act_mirred.c b/net/sched/act_mirred.c
+index 8f3948d..934336e 100644
+--- a/net/sched/act_mirred.c
++++ b/net/sched/act_mirred.c
+@@ -180,7 +180,7 @@ static int tcf_mirred(struct sk_buff *skb, const struct tc_action *a,
+ 
+ 	if (!(at & AT_EGRESS)) {
+ 		if (m->tcfm_ok_push)
+-			skb_push(skb2, skb->mac_len);
++			skb_push_rcsum(skb2, skb->mac_len);
+ 	}
+ 
+ 	/* mirror is always swallowed */
+diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
+index 7e0c9bf..837dd91 100644
+--- a/net/sunrpc/clnt.c
++++ b/net/sunrpc/clnt.c
+@@ -446,16 +446,27 @@ out_no_rpciod:
+ 	return ERR_PTR(err);
+ }
+ 
+-struct rpc_clnt *rpc_create_xprt(struct rpc_create_args *args,
++static struct rpc_clnt *rpc_create_xprt(struct rpc_create_args *args,
+ 					struct rpc_xprt *xprt)
+ {
+ 	struct rpc_clnt *clnt = NULL;
+ 	struct rpc_xprt_switch *xps;
+ 
+-	xps = xprt_switch_alloc(xprt, GFP_KERNEL);
+-	if (xps == NULL)
+-		return ERR_PTR(-ENOMEM);
+-
++	if (args->bc_xprt && args->bc_xprt->xpt_bc_xps) {
++		WARN_ON(args->protocol != XPRT_TRANSPORT_BC_TCP);
++		xps = args->bc_xprt->xpt_bc_xps;
++		xprt_switch_get(xps);
++	} else {
++		xps = xprt_switch_alloc(xprt, GFP_KERNEL);
++		if (xps == NULL) {
++			xprt_put(xprt);
++			return ERR_PTR(-ENOMEM);
++		}
++		if (xprt->bc_xprt) {
++			xprt_switch_get(xps);
++			xprt->bc_xprt->xpt_bc_xps = xps;
++		}
++	}
+ 	clnt = rpc_new_client(args, xps, xprt, NULL);
+ 	if (IS_ERR(clnt))
+ 		return clnt;
+@@ -483,7 +494,6 @@ struct rpc_clnt *rpc_create_xprt(struct rpc_create_args *args,
+ 
+ 	return clnt;
+ }
+-EXPORT_SYMBOL_GPL(rpc_create_xprt);
+ 
+ /**
+  * rpc_create - create an RPC client and transport with one call
+@@ -509,6 +519,15 @@ struct rpc_clnt *rpc_create(struct rpc_create_args *args)
+ 	};
+ 	char servername[48];
+ 
++	if (args->bc_xprt) {
++		WARN_ON(args->protocol != XPRT_TRANSPORT_BC_TCP);
++		xprt = args->bc_xprt->xpt_bc_xprt;
++		if (xprt) {
++			xprt_get(xprt);
++			return rpc_create_xprt(args, xprt);
++		}
++	}
++
+ 	if (args->flags & RPC_CLNT_CREATE_INFINITE_SLOTS)
+ 		xprtargs.flags |= XPRT_CREATE_INFINITE_SLOTS;
+ 	if (args->flags & RPC_CLNT_CREATE_NO_IDLE_TIMEOUT)
+diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c
+index 7422f28..7231cb4 100644
+--- a/net/sunrpc/svc_xprt.c
++++ b/net/sunrpc/svc_xprt.c
+@@ -136,6 +136,8 @@ static void svc_xprt_free(struct kref *kref)
+ 	/* See comment on corresponding get in xs_setup_bc_tcp(): */
+ 	if (xprt->xpt_bc_xprt)
+ 		xprt_put(xprt->xpt_bc_xprt);
++	if (xprt->xpt_bc_xps)
++		xprt_switch_put(xprt->xpt_bc_xps);
+ 	xprt->xpt_ops->xpo_free(xprt);
+ 	module_put(owner);
+ }
+diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
+index 65e7595..e9e5dd0 100644
+--- a/net/sunrpc/xprtsock.c
++++ b/net/sunrpc/xprtsock.c
+@@ -3050,6 +3050,7 @@ static struct rpc_xprt *xs_setup_bc_tcp(struct xprt_create *args)
+ 		return xprt;
+ 
+ 	args->bc_xprt->xpt_bc_xprt = NULL;
++	args->bc_xprt->xpt_bc_xps = NULL;
+ 	xprt_put(xprt);
+ 	ret = ERR_PTR(-EINVAL);
+ out_err:
+diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
+index 8269da7..7748199 100644
+--- a/net/unix/af_unix.c
++++ b/net/unix/af_unix.c
+@@ -315,7 +315,7 @@ static struct sock *unix_find_socket_byinode(struct inode *i)
+ 		    &unix_socket_table[i->i_ino & (UNIX_HASH_SIZE - 1)]) {
+ 		struct dentry *dentry = unix_sk(s)->path.dentry;
+ 
+-		if (dentry && d_backing_inode(dentry) == i) {
++		if (dentry && d_real_inode(dentry) == i) {
+ 			sock_hold(s);
+ 			goto found;
+ 		}
+@@ -911,7 +911,7 @@ static struct sock *unix_find_other(struct net *net,
+ 		err = kern_path(sunname->sun_path, LOOKUP_FOLLOW, &path);
+ 		if (err)
+ 			goto fail;
+-		inode = d_backing_inode(path.dentry);
++		inode = d_real_inode(path.dentry);
+ 		err = inode_permission(inode, MAY_WRITE);
+ 		if (err)
+ 			goto put_fail;
+@@ -1048,7 +1048,7 @@ static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
+ 			goto out_up;
+ 		}
+ 		addr->hash = UNIX_HASH_SIZE;
+-		hash = d_backing_inode(dentry)->i_ino & (UNIX_HASH_SIZE - 1);
++		hash = d_real_inode(dentry)->i_ino & (UNIX_HASH_SIZE - 1);
+ 		spin_lock(&unix_table_lock);
+ 		u->path = u_path;
+ 		list = &unix_socket_table[hash];
+diff --git a/net/wireless/core.c b/net/wireless/core.c
+index 9f1c4aa..c878045 100644
+--- a/net/wireless/core.c
++++ b/net/wireless/core.c
+@@ -360,8 +360,6 @@ struct wiphy *wiphy_new_nm(const struct cfg80211_ops *ops, int sizeof_priv,
+ 	WARN_ON(ops->remain_on_channel && !ops->cancel_remain_on_channel);
+ 	WARN_ON(ops->tdls_channel_switch && !ops->tdls_cancel_channel_switch);
+ 	WARN_ON(ops->add_tx_ts && !ops->del_tx_ts);
+-	WARN_ON(ops->set_tx_power && !ops->get_tx_power);
+-	WARN_ON(ops->set_antenna && !ops->get_antenna);
+ 
+ 	alloc_size = sizeof(*rdev) + sizeof_priv;
+ 
+diff --git a/net/wireless/util.c b/net/wireless/util.c
+index 9f440a9..47b9178 100644
+--- a/net/wireless/util.c
++++ b/net/wireless/util.c
+@@ -509,7 +509,7 @@ static int __ieee80211_data_to_8023(struct sk_buff *skb, struct ethhdr *ehdr,
+ 		 * replace EtherType */
+ 		hdrlen += ETH_ALEN + 2;
+ 	else
+-		tmp.h_proto = htons(skb->len);
++		tmp.h_proto = htons(skb->len - hdrlen);
+ 
+ 	pskb_pull(skb, hdrlen);
+ 
+diff --git a/scripts/mod/file2alias.c b/scripts/mod/file2alias.c
+index a915507..fec7578 100644
+--- a/scripts/mod/file2alias.c
++++ b/scripts/mod/file2alias.c
+@@ -384,7 +384,7 @@ static void do_of_entry_multi(void *symval, struct module *mod)
+ 	len = sprintf(alias, "of:N%sT%s", (*name)[0] ? *name : "*",
+ 		      (*type)[0] ? *type : "*");
+ 
+-	if (compatible[0])
++	if ((*compatible)[0])
+ 		sprintf(&alias[len], "%sC%s", (*type)[0] ? "*" : "",
+ 			*compatible);
+ 
+diff --git a/security/apparmor/lsm.c b/security/apparmor/lsm.c
+index dec607c..5ee8201 100644
+--- a/security/apparmor/lsm.c
++++ b/security/apparmor/lsm.c
+@@ -523,34 +523,34 @@ static int apparmor_setprocattr(struct task_struct *task, char *name,
+ {
+ 	struct common_audit_data sa;
+ 	struct apparmor_audit_data aad = {0,};
+-	char *command, *args = value;
++	char *command, *largs = NULL, *args = value;
+ 	size_t arg_size;
+ 	int error;
+ 
+ 	if (size == 0)
+ 		return -EINVAL;
+-	/* args points to a PAGE_SIZE buffer, AppArmor requires that
+-	 * the buffer must be null terminated or have size <= PAGE_SIZE -1
+-	 * so that AppArmor can null terminate them
+-	 */
+-	if (args[size - 1] != '\0') {
+-		if (size == PAGE_SIZE)
+-			return -EINVAL;
+-		args[size] = '\0';
+-	}
+-
+ 	/* task can only write its own attributes */
+ 	if (current != task)
+ 		return -EACCES;
+ 
+-	args = value;
++	/* AppArmor requires that the buffer must be null terminated atm */
++	if (args[size - 1] != '\0') {
++		/* null terminate */
++		largs = args = kmalloc(size + 1, GFP_KERNEL);
++		if (!args)
++			return -ENOMEM;
++		memcpy(args, value, size);
++		args[size] = '\0';
++	}
++
++	error = -EINVAL;
+ 	args = strim(args);
+ 	command = strsep(&args, " ");
+ 	if (!args)
+-		return -EINVAL;
++		goto out;
+ 	args = skip_spaces(args);
+ 	if (!*args)
+-		return -EINVAL;
++		goto out;
+ 
+ 	arg_size = size - (args - (char *) value);
+ 	if (strcmp(name, "current") == 0) {
+@@ -576,10 +576,12 @@ static int apparmor_setprocattr(struct task_struct *task, char *name,
+ 			goto fail;
+ 	} else
+ 		/* only support the "current" and "exec" process attributes */
+-		return -EINVAL;
++		goto fail;
+ 
+ 	if (!error)
+ 		error = size;
++out:
++	kfree(largs);
+ 	return error;
+ 
+ fail:
+@@ -588,9 +590,9 @@ fail:
+ 	aad.profile = aa_current_profile();
+ 	aad.op = OP_SETPROCATTR;
+ 	aad.info = name;
+-	aad.error = -EINVAL;
++	aad.error = error = -EINVAL;
+ 	aa_audit_msg(AUDIT_APPARMOR_DENIED, &sa, NULL);
+-	return -EINVAL;
++	goto out;
+ }
+ 
+ static int apparmor_task_setrlimit(struct task_struct *task,
+diff --git a/security/keys/key.c b/security/keys/key.c
+index b287551..af7f682 100644
+--- a/security/keys/key.c
++++ b/security/keys/key.c
+@@ -584,7 +584,7 @@ int key_reject_and_link(struct key *key,
+ 
+ 	mutex_unlock(&key_construction_mutex);
+ 
+-	if (keyring)
++	if (keyring && link_ret == 0)
+ 		__key_link_end(keyring, &key->index_key, edit);
+ 
+ 	/* wake up anyone waiting for a key to be constructed */
+diff --git a/sound/core/control.c b/sound/core/control.c
+index a85d455..b4fe9b0 100644
+--- a/sound/core/control.c
++++ b/sound/core/control.c
+@@ -160,6 +160,8 @@ void snd_ctl_notify(struct snd_card *card, unsigned int mask,
+ 	
+ 	if (snd_BUG_ON(!card || !id))
+ 		return;
++	if (card->shutdown)
++		return;
+ 	read_lock(&card->ctl_files_rwlock);
+ #if IS_ENABLED(CONFIG_SND_MIXER_OSS)
+ 	card->mixer_oss_change_count++;
+diff --git a/sound/core/pcm.c b/sound/core/pcm.c
+index 308c9ec..8e980aa 100644
+--- a/sound/core/pcm.c
++++ b/sound/core/pcm.c
+@@ -849,6 +849,14 @@ int snd_pcm_new_internal(struct snd_card *card, const char *id, int device,
+ }
+ EXPORT_SYMBOL(snd_pcm_new_internal);
+ 
++static void free_chmap(struct snd_pcm_str *pstr)
++{
++	if (pstr->chmap_kctl) {
++		snd_ctl_remove(pstr->pcm->card, pstr->chmap_kctl);
++		pstr->chmap_kctl = NULL;
++	}
++}
++
+ static void snd_pcm_free_stream(struct snd_pcm_str * pstr)
+ {
+ 	struct snd_pcm_substream *substream, *substream_next;
+@@ -871,6 +879,7 @@ static void snd_pcm_free_stream(struct snd_pcm_str * pstr)
+ 		kfree(setup);
+ 	}
+ #endif
++	free_chmap(pstr);
+ 	if (pstr->substream_count)
+ 		put_device(&pstr->dev);
+ }
+@@ -1135,10 +1144,7 @@ static int snd_pcm_dev_disconnect(struct snd_device *device)
+ 	for (cidx = 0; cidx < 2; cidx++) {
+ 		if (!pcm->internal)
+ 			snd_unregister_device(&pcm->streams[cidx].dev);
+-		if (pcm->streams[cidx].chmap_kctl) {
+-			snd_ctl_remove(pcm->card, pcm->streams[cidx].chmap_kctl);
+-			pcm->streams[cidx].chmap_kctl = NULL;
+-		}
++		free_chmap(&pcm->streams[cidx]);
+ 	}
+ 	mutex_unlock(&pcm->open_mutex);
+ 	mutex_unlock(&register_mutex);
+diff --git a/sound/core/timer.c b/sound/core/timer.c
+index 6469bed..23b73f6 100644
+--- a/sound/core/timer.c
++++ b/sound/core/timer.c
+@@ -1954,6 +1954,7 @@ static ssize_t snd_timer_user_read(struct file *file, char __user *buffer,
+ 
+ 		qhead = tu->qhead++;
+ 		tu->qhead %= tu->queue_size;
++		tu->qused--;
+ 		spin_unlock_irq(&tu->qlock);
+ 
+ 		if (tu->tread) {
+@@ -1967,7 +1968,6 @@ static ssize_t snd_timer_user_read(struct file *file, char __user *buffer,
+ 		}
+ 
+ 		spin_lock_irq(&tu->qlock);
+-		tu->qused--;
+ 		if (err < 0)
+ 			goto _error;
+ 		result += unit;
+diff --git a/sound/drivers/dummy.c b/sound/drivers/dummy.c
+index c0f8f61..172dacd 100644
+--- a/sound/drivers/dummy.c
++++ b/sound/drivers/dummy.c
+@@ -420,6 +420,7 @@ static int dummy_hrtimer_stop(struct snd_pcm_substream *substream)
+ 
+ static inline void dummy_hrtimer_sync(struct dummy_hrtimer_pcm *dpcm)
+ {
++	hrtimer_cancel(&dpcm->timer);
+ 	tasklet_kill(&dpcm->tasklet);
+ }
+ 
+diff --git a/sound/hda/hdac_regmap.c b/sound/hda/hdac_regmap.c
+index 87041dd..47a358f 100644
+--- a/sound/hda/hdac_regmap.c
++++ b/sound/hda/hdac_regmap.c
+@@ -444,7 +444,7 @@ int snd_hdac_regmap_write_raw(struct hdac_device *codec, unsigned int reg,
+ 	err = reg_raw_write(codec, reg, val);
+ 	if (err == -EAGAIN) {
+ 		err = snd_hdac_power_up_pm(codec);
+-		if (!err)
++		if (err >= 0)
+ 			err = reg_raw_write(codec, reg, val);
+ 		snd_hdac_power_down_pm(codec);
+ 	}
+@@ -470,7 +470,7 @@ static int __snd_hdac_regmap_read_raw(struct hdac_device *codec,
+ 	err = reg_raw_read(codec, reg, val, uncached);
+ 	if (err == -EAGAIN) {
+ 		err = snd_hdac_power_up_pm(codec);
+-		if (!err)
++		if (err >= 0)
+ 			err = reg_raw_read(codec, reg, val, uncached);
+ 		snd_hdac_power_down_pm(codec);
+ 	}
+diff --git a/sound/pci/au88x0/au88x0_core.c b/sound/pci/au88x0/au88x0_core.c
+index 4667c32..7417718 100644
+--- a/sound/pci/au88x0/au88x0_core.c
++++ b/sound/pci/au88x0/au88x0_core.c
+@@ -1444,9 +1444,8 @@ static int vortex_wtdma_bufshift(vortex_t * vortex, int wtdma)
+ 	int page, p, pp, delta, i;
+ 
+ 	page =
+-	    (hwread(vortex->mmio, VORTEX_WTDMA_STAT + (wtdma << 2)) &
+-	     WT_SUBBUF_MASK)
+-	    >> WT_SUBBUF_SHIFT;
++	    (hwread(vortex->mmio, VORTEX_WTDMA_STAT + (wtdma << 2))
++	     >> WT_SUBBUF_SHIFT) & WT_SUBBUF_MASK;
+ 	if (dma->nr_periods >= 4)
+ 		delta = (page - dma->period_real) & 3;
+ 	else {
+diff --git a/sound/pci/echoaudio/echoaudio.c b/sound/pci/echoaudio/echoaudio.c
+index 1cb85ae..286f5e3 100644
+--- a/sound/pci/echoaudio/echoaudio.c
++++ b/sound/pci/echoaudio/echoaudio.c
+@@ -2200,11 +2200,11 @@ static int snd_echo_resume(struct device *dev)
+ 	u32 pipe_alloc_mask;
+ 	int err;
+ 
+-	commpage_bak = kmalloc(sizeof(struct echoaudio), GFP_KERNEL);
++	commpage_bak = kmalloc(sizeof(*commpage), GFP_KERNEL);
+ 	if (commpage_bak == NULL)
+ 		return -ENOMEM;
+ 	commpage = chip->comm_page;
+-	memcpy(commpage_bak, commpage, sizeof(struct comm_page));
++	memcpy(commpage_bak, commpage, sizeof(*commpage));
+ 
+ 	err = init_hw(chip, chip->pci->device, chip->pci->subsystem_device);
+ 	if (err < 0) {
+diff --git a/sound/pci/hda/hda_generic.c b/sound/pci/hda/hda_generic.c
+index dfaf1a9..d77cc76 100644
+--- a/sound/pci/hda/hda_generic.c
++++ b/sound/pci/hda/hda_generic.c
+@@ -3977,6 +3977,8 @@ static hda_nid_t set_path_power(struct hda_codec *codec, hda_nid_t nid,
+ 
+ 	for (n = 0; n < spec->paths.used; n++) {
+ 		path = snd_array_elem(&spec->paths, n);
++		if (!path->depth)
++			continue;
+ 		if (path->path[0] == nid ||
+ 		    path->path[path->depth - 1] == nid) {
+ 			bool pin_old = path->pin_enabled;
+diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
+index 94089fc..6f8ea13 100644
+--- a/sound/pci/hda/hda_intel.c
++++ b/sound/pci/hda/hda_intel.c
+@@ -367,9 +367,10 @@ enum {
+ #define IS_SKL_LP(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0x9d70)
+ #define IS_KBL(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0xa171)
+ #define IS_KBL_LP(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0x9d71)
++#define IS_KBL_H(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0xa2f0)
+ #define IS_BXT(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0x5a98)
+ #define IS_SKL_PLUS(pci) (IS_SKL(pci) || IS_SKL_LP(pci) || IS_BXT(pci)) || \
+-			IS_KBL(pci) || IS_KBL_LP(pci)
++			IS_KBL(pci) || IS_KBL_LP(pci) || IS_KBL_H(pci)
+ 
+ static char *driver_short_names[] = {
+ 	[AZX_DRIVER_ICH] = "HDA Intel",
+@@ -1217,8 +1218,10 @@ static int azx_free(struct azx *chip)
+ 	if (use_vga_switcheroo(hda)) {
+ 		if (chip->disabled && hda->probe_continued)
+ 			snd_hda_unlock_devices(&chip->bus);
+-		if (hda->vga_switcheroo_registered)
++		if (hda->vga_switcheroo_registered) {
+ 			vga_switcheroo_unregister_client(chip->pci);
++			vga_switcheroo_fini_domain_pm_ops(chip->card->dev);
++		}
+ 	}
+ 
+ 	if (bus->chip_init) {
+@@ -2190,6 +2193,9 @@ static const struct pci_device_id azx_ids[] = {
+ 	/* Kabylake-LP */
+ 	{ PCI_DEVICE(0x8086, 0x9d71),
+ 	  .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_SKYLAKE },
++	/* Kabylake-H */
++	{ PCI_DEVICE(0x8086, 0xa2f0),
++	  .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_SKYLAKE },
+ 	/* Broxton-P(Apollolake) */
+ 	{ PCI_DEVICE(0x8086, 0x5a98),
+ 	  .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_BROXTON },
+@@ -2263,6 +2269,8 @@ static const struct pci_device_id azx_ids[] = {
+ 	  .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS },
+ 	{ PCI_DEVICE(0x1002, 0x157a),
+ 	  .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS },
++	{ PCI_DEVICE(0x1002, 0x15b3),
++	  .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS },
+ 	{ PCI_DEVICE(0x1002, 0x793b),
+ 	  .driver_data = AZX_DRIVER_ATIHDMI | AZX_DCAPS_PRESET_ATI_HDMI },
+ 	{ PCI_DEVICE(0x1002, 0x7919),
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index 0fe18ed..abcb5a6 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -5650,6 +5650,9 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x17aa, 0x503c, "Thinkpad L450", ALC292_FIXUP_TPT440_DOCK),
+ 	SND_PCI_QUIRK(0x17aa, 0x504a, "ThinkPad X260", ALC292_FIXUP_TPT440_DOCK),
+ 	SND_PCI_QUIRK(0x17aa, 0x504b, "Thinkpad", ALC293_FIXUP_LENOVO_SPK_NOISE),
++	SND_PCI_QUIRK(0x17aa, 0x5050, "Thinkpad T560p", ALC292_FIXUP_TPT460),
++	SND_PCI_QUIRK(0x17aa, 0x5051, "Thinkpad L460", ALC292_FIXUP_TPT460),
++	SND_PCI_QUIRK(0x17aa, 0x5053, "Thinkpad T460", ALC292_FIXUP_TPT460),
+ 	SND_PCI_QUIRK(0x17aa, 0x5109, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
+ 	SND_PCI_QUIRK(0x17aa, 0x3bf8, "Quanta FL1", ALC269_FIXUP_PCM_44K),
+ 	SND_PCI_QUIRK(0x17aa, 0x9e54, "LENOVO NB", ALC269_FIXUP_LENOVO_EAPD),
+@@ -5735,7 +5738,6 @@ static const struct hda_model_fixup alc269_fixup_models[] = {
+ 	{}
+ };
+ #define ALC225_STANDARD_PINS \
+-	{0x12, 0xb7a60130}, \
+ 	{0x21, 0x04211020}
+ 
+ #define ALC256_STANDARD_PINS \
+@@ -5760,10 +5762,24 @@ static const struct hda_model_fixup alc269_fixup_models[] = {
+ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
+ 	SND_HDA_PIN_QUIRK(0x10ec0225, 0x1028, "Dell", ALC225_FIXUP_DELL1_MIC_NO_PRESENCE,
+ 		ALC225_STANDARD_PINS,
++		{0x12, 0xb7a60130},
+ 		{0x14, 0x901701a0}),
+ 	SND_HDA_PIN_QUIRK(0x10ec0225, 0x1028, "Dell", ALC225_FIXUP_DELL1_MIC_NO_PRESENCE,
+ 		ALC225_STANDARD_PINS,
++		{0x12, 0xb7a60130},
+ 		{0x14, 0x901701b0}),
++	SND_HDA_PIN_QUIRK(0x10ec0225, 0x1028, "Dell", ALC225_FIXUP_DELL1_MIC_NO_PRESENCE,
++		ALC225_STANDARD_PINS,
++		{0x12, 0xb7a60150},
++		{0x14, 0x901701a0}),
++	SND_HDA_PIN_QUIRK(0x10ec0225, 0x1028, "Dell", ALC225_FIXUP_DELL1_MIC_NO_PRESENCE,
++		ALC225_STANDARD_PINS,
++		{0x12, 0xb7a60150},
++		{0x14, 0x901701b0}),
++	SND_HDA_PIN_QUIRK(0x10ec0225, 0x1028, "Dell", ALC225_FIXUP_DELL1_MIC_NO_PRESENCE,
++		ALC225_STANDARD_PINS,
++		{0x12, 0xb7a60130},
++		{0x1b, 0x90170110}),
+ 	SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL2_MIC_NO_PRESENCE,
+ 		{0x14, 0x90170110},
+ 		{0x21, 0x02211020}),
+@@ -5832,6 +5848,10 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
+ 		{0x14, 0x90170120},
+ 		{0x21, 0x02211030}),
+ 	SND_HDA_PIN_QUIRK(0x10ec0256, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
++		{0x12, 0x90a60170},
++		{0x14, 0x90170120},
++		{0x21, 0x02211030}),
++	SND_HDA_PIN_QUIRK(0x10ec0256, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
+ 		ALC256_STANDARD_PINS),
+ 	SND_HDA_PIN_QUIRK(0x10ec0280, 0x103c, "HP", ALC280_FIXUP_HP_GPIO4,
+ 		{0x12, 0x90a60130},
+diff --git a/sound/usb/card.c b/sound/usb/card.c
+index 3fc6358..2d49350 100644
+--- a/sound/usb/card.c
++++ b/sound/usb/card.c
+@@ -552,7 +552,6 @@ static int usb_audio_probe(struct usb_interface *intf,
+ 				goto __error;
+ 			}
+ 			chip = usb_chip[i];
+-			dev_set_drvdata(&dev->dev, chip);
+ 			atomic_inc(&chip->active); /* avoid autopm */
+ 			break;
+ 		}
+@@ -578,6 +577,7 @@ static int usb_audio_probe(struct usb_interface *intf,
+ 			goto __error;
+ 		}
+ 	}
++	dev_set_drvdata(&dev->dev, chip);
+ 
+ 	/*
+ 	 * For devices with more than one control interface, we assume the
+diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
+index 4fd482f..7cb1224 100644
+--- a/virt/kvm/kvm_main.c
++++ b/virt/kvm/kvm_main.c
+@@ -2868,7 +2868,7 @@ static long kvm_vm_ioctl(struct file *filp,
+ 		if (copy_from_user(&routing, argp, sizeof(routing)))
+ 			goto out;
+ 		r = -EINVAL;
+-		if (routing.nr >= KVM_MAX_IRQ_ROUTES)
++		if (routing.nr > KVM_MAX_IRQ_ROUTES)
+ 			goto out;
+ 		if (routing.flags)
+ 			goto out;

diff --git a/4.6.4/4420_grsecurity-3.1-4.6.4-201607242014.patch b/4.6.5/4420_grsecurity-3.1-4.6.5-201607272152.patch
similarity index 99%
rename from 4.6.4/4420_grsecurity-3.1-4.6.4-201607242014.patch
rename to 4.6.5/4420_grsecurity-3.1-4.6.5-201607272152.patch
index f7868ce..927b9ba 100644
--- a/4.6.4/4420_grsecurity-3.1-4.6.4-201607242014.patch
+++ b/4.6.5/4420_grsecurity-3.1-4.6.5-201607272152.patch
@@ -420,7 +420,7 @@ index fcddfd5..71afd6b 100644
  
  A toggle value indicating if modules are allowed to be loaded
 diff --git a/Makefile b/Makefile
-index cd37442..4c8e887 100644
+index 7d693a8..28a594e 100644
 --- a/Makefile
 +++ b/Makefile
 @@ -298,7 +298,9 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH" ]; then echo $$BASH; \
@@ -1913,7 +1913,7 @@ index d0131ee..23a0939 100644
  #define PTE_EXT_AP0		(_AT(pteval_t, 1) << 4)
  #define PTE_EXT_AP1		(_AT(pteval_t, 2) << 4)
 diff --git a/arch/arm/include/asm/pgtable-2level.h b/arch/arm/include/asm/pgtable-2level.h
-index aeddd28..207745c 100644
+index 92fd2c8..061dae1 100644
 --- a/arch/arm/include/asm/pgtable-2level.h
 +++ b/arch/arm/include/asm/pgtable-2level.h
 @@ -127,6 +127,9 @@
@@ -1927,7 +1927,7 @@ index aeddd28..207745c 100644
   * These are the memory types, defined to be compatible with
   * pre-ARMv6 CPUs cacheable and bufferable bits: n/a,n/a,C,B
 diff --git a/arch/arm/include/asm/pgtable-3level.h b/arch/arm/include/asm/pgtable-3level.h
-index dc46398..70dab92 100644
+index 7411466..c57e55a 100644
 --- a/arch/arm/include/asm/pgtable-3level.h
 +++ b/arch/arm/include/asm/pgtable-3level.h
 @@ -80,6 +80,7 @@
@@ -1952,7 +1952,7 @@ index dc46398..70dab92 100644
  #define L_PTE_DIRTY_HIGH	(1 << (55 - 32))
  
 diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h
-index 348caab..306b62d 100644
+index d622040..fcebd79 100644
 --- a/arch/arm/include/asm/pgtable.h
 +++ b/arch/arm/include/asm/pgtable.h
 @@ -33,6 +33,9 @@
@@ -2035,7 +2035,7 @@ index 348caab..306b62d 100644
   */
  #define _L_PTE_DEFAULT	L_PTE_PRESENT | L_PTE_YOUNG
  
-@@ -307,7 +355,7 @@ static inline pte_t pte_mknexec(pte_t pte)
+@@ -306,7 +354,7 @@ static inline pte_t pte_mknexec(pte_t pte)
  static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
  {
  	const pteval_t mask = L_PTE_XN | L_PTE_RDONLY | L_PTE_USER |
@@ -3226,7 +3226,7 @@ index e2c6da0..6155a88 100644
  	. = ALIGN(1<<SECTION_SHIFT);
  #else
 diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c
-index dded1b7..6fa4373 100644
+index 72b11d9..505a0f5 100644
 --- a/arch/arm/kvm/arm.c
 +++ b/arch/arm/kvm/arm.c
 @@ -59,7 +59,7 @@ static unsigned long hyp_default_vectors;
@@ -3238,7 +3238,7 @@ index dded1b7..6fa4373 100644
  static u32 kvm_next_vmid;
  static unsigned int kvm_vmid_bits __read_mostly;
  static DEFINE_SPINLOCK(kvm_vmid_lock);
-@@ -392,7 +392,7 @@ void force_vm_exit(const cpumask_t *mask)
+@@ -393,7 +393,7 @@ void force_vm_exit(const cpumask_t *mask)
   */
  static bool need_new_vmid_gen(struct kvm *kvm)
  {
@@ -3247,7 +3247,7 @@ index dded1b7..6fa4373 100644
  }
  
  /**
-@@ -425,7 +425,7 @@ static void update_vttbr(struct kvm *kvm)
+@@ -426,7 +426,7 @@ static void update_vttbr(struct kvm *kvm)
  
  	/* First user of a new VMID generation? */
  	if (unlikely(kvm_next_vmid == 0)) {
@@ -3256,7 +3256,7 @@ index dded1b7..6fa4373 100644
  		kvm_next_vmid = 1;
  
  		/*
-@@ -442,7 +442,7 @@ static void update_vttbr(struct kvm *kvm)
+@@ -443,7 +443,7 @@ static void update_vttbr(struct kvm *kvm)
  		kvm_call_hyp(__kvm_flush_vm_context);
  	}
  
@@ -3375,27 +3375,27 @@ index 1ccbba9..7a95c29 100644
  }
  
 diff --git a/arch/arm/mach-mvebu/coherency.c b/arch/arm/mach-mvebu/coherency.c
-index 7e989d6..614bf8b 100644
+index 474abff..0d6a05a 100644
 --- a/arch/arm/mach-mvebu/coherency.c
 +++ b/arch/arm/mach-mvebu/coherency.c
 @@ -163,7 +163,7 @@ exit:
  
  /*
-  * This ioremap hook is used on Armada 375/38x to ensure that PCIe
-- * memory areas are mapped as MT_UNCACHED instead of MT_DEVICE. This
-+ * memory areas are mapped as MT_UNCACHED_RW instead of MT_DEVICE. This
-  * is needed as a workaround for a deadlock issue between the PCIe
-  * interface and the cache controller.
-  */
-@@ -176,7 +176,7 @@ armada_pcie_wa_ioremap_caller(phys_addr_t phys_addr, size_t size,
- 	mvebu_mbus_get_pcie_mem_aperture(&pcie_mem);
- 
- 	if (pcie_mem.start <= phys_addr && (phys_addr + size) <= pcie_mem.end)
--		mtype = MT_UNCACHED;
-+		mtype = MT_UNCACHED_RW;
- 
+  * This ioremap hook is used on Armada 375/38x to ensure that all MMIO
+- * areas are mapped as MT_UNCACHED instead of MT_DEVICE. This is
++ * areas are mapped as MT_UNCACHED_RW instead of MT_DEVICE. This is
+  * needed for the HW I/O coherency mechanism to work properly without
+  * deadlock.
+  */
+@@ -171,7 +171,7 @@ static void __iomem *
+ armada_wa_ioremap_caller(phys_addr_t phys_addr, size_t size,
+ 			 unsigned int mtype, void *caller)
+ {
+-	mtype = MT_UNCACHED;
++	mtype = MT_UNCACHED_RW;
  	return __arm_ioremap_caller(phys_addr, size, mtype, caller);
  }
+ 
 diff --git a/arch/arm/mach-omap2/board-n8x0.c b/arch/arm/mach-omap2/board-n8x0.c
 index b6443a4..20a0b74 100644
 --- a/arch/arm/mach-omap2/board-n8x0.c
@@ -8974,7 +8974,7 @@ index 5a7a78f..c0e4207 100644
  					 sechdrs, module);
  	if (!module->arch.tramp)
 diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
-index b8500b4..2873781 100644
+index bec85055..8a6b0c2b 100644
 --- a/arch/powerpc/kernel/process.c
 +++ b/arch/powerpc/kernel/process.c
 @@ -1318,8 +1318,8 @@ void show_regs(struct pt_regs * regs)
@@ -8988,7 +8988,7 @@ index b8500b4..2873781 100644
  #endif
  	show_stack(current, (unsigned long *) regs->gpr[1]);
  	if (!user_mode(regs))
-@@ -1829,10 +1829,10 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
+@@ -1839,10 +1839,10 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
  		newsp = stack[0];
  		ip = stack[STACK_FRAME_LR_SAVE];
  		if (!firstframe || ip != lr) {
@@ -9001,7 +9001,7 @@ index b8500b4..2873781 100644
  				       (void *)current->ret_stack[curr_frame].ret);
  				curr_frame--;
  			}
-@@ -1852,7 +1852,7 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
+@@ -1862,7 +1862,7 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
  			struct pt_regs *regs = (struct pt_regs *)
  				(sp + STACK_FRAME_OVERHEAD);
  			lr = regs->link;
@@ -9010,7 +9010,7 @@ index b8500b4..2873781 100644
  			       regs->trap, (void *)regs->nip, (void *)lr);
  			firstframe = 1;
  		}
-@@ -1889,13 +1889,6 @@ void notrace __ppc64_runlatch_off(void)
+@@ -1899,13 +1899,6 @@ void notrace __ppc64_runlatch_off(void)
  }
  #endif /* CONFIG_PPC64 */
  
@@ -16778,7 +16778,7 @@ index 10868aa..e645e1d 100644
 +ENDPROC(async_page_fault)
  #endif
 diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S
-index 858b555..d5a0c9b 100644
+index 858b555..9e9c957 100644
 --- a/arch/x86/entry/entry_64.S
 +++ b/arch/x86/entry/entry_64.S
 @@ -36,6 +36,8 @@
@@ -16790,7 +16790,7 @@ index 858b555..d5a0c9b 100644
  
  /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this.  */
  #include <linux/elf-em.h>
-@@ -53,6 +55,402 @@ ENTRY(native_usergs_sysret64)
+@@ -53,6 +55,395 @@ ENTRY(native_usergs_sysret64)
  ENDPROC(native_usergs_sysret64)
  #endif /* CONFIG_PARAVIRT */
  
@@ -16974,8 +16974,6 @@ index 858b555..d5a0c9b 100644
 +	sub	phys_base(%rip),%rbx
 +
 +#ifdef CONFIG_PARAVIRT
-+	cmpl	$0, pv_info+PARAVIRT_enabled
-+	jz	1f
 +	pushq %rdi
 +	i = 0
 +	.rept USER_PGD_PTRS
@@ -16986,17 +16984,15 @@ index 858b555..d5a0c9b 100644
 +	i = i + 1
 +	.endr
 +	popq	%rdi
-+	jmp	2f
-+1:
-+#endif
-+
++#else
 +	i = 0
 +	.rept USER_PGD_PTRS
 +	movb	$0,i*8(%rbx)
 +	i = i + 1
 +	.endr
++#endif
 +
-+2:	SET_RDI_INTO_CR3
++	SET_RDI_INTO_CR3
 +
 +#ifdef CONFIG_PAX_KERNEXEC
 +	GET_CR0_INTO_RDI
@@ -17048,8 +17044,6 @@ index 858b555..d5a0c9b 100644
 +	sub	phys_base(%rip),%rbx
 +
 +#ifdef CONFIG_PARAVIRT
-+	cmpl	$0, pv_info+PARAVIRT_enabled
-+	jz	1f
 +	i = 0
 +	.rept USER_PGD_PTRS
 +	mov	i*8(%rbx),%rsi
@@ -17058,15 +17052,14 @@ index 858b555..d5a0c9b 100644
 +	call	PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
 +	i = i + 1
 +	.endr
-+	jmp	2f
-+1:
-+#endif
-+
++#else
 +	i = 0
 +	.rept USER_PGD_PTRS
 +	movb	$0x67,i*8(%rbx)
 +	i = i + 1
 +	.endr
++#endif
++
 +2:
 +
 +#ifdef CONFIG_PARAVIRT
@@ -17193,7 +17186,7 @@ index 858b555..d5a0c9b 100644
  .macro TRACE_IRQS_IRETQ
  #ifdef CONFIG_TRACE_IRQFLAGS
  	bt	$9, EFLAGS(%rsp)		/* interrupts off? */
-@@ -88,7 +486,7 @@ ENDPROC(native_usergs_sysret64)
+@@ -88,7 +479,7 @@ ENDPROC(native_usergs_sysret64)
  .endm
  
  .macro TRACE_IRQS_IRETQ_DEBUG
@@ -17202,7 +17195,7 @@ index 858b555..d5a0c9b 100644
  	jnc	1f
  	TRACE_IRQS_ON_DEBUG
  1:
-@@ -175,11 +573,22 @@ GLOBAL(entry_SYSCALL_64_after_swapgs)
+@@ -175,11 +566,22 @@ GLOBAL(entry_SYSCALL_64_after_swapgs)
  	pushq	%r11				/* pt_regs->r11 */
  	sub	$(6*8), %rsp			/* pt_regs->bp, bx, r12-15 not saved */
  
@@ -17226,7 +17219,7 @@ index 858b555..d5a0c9b 100644
  	jnz	entry_SYSCALL64_slow_path
  
  entry_SYSCALL_64_fastpath:
-@@ -217,9 +626,13 @@ entry_SYSCALL_64_fastpath:
+@@ -217,9 +619,13 @@ entry_SYSCALL_64_fastpath:
  	 */
  	DISABLE_INTERRUPTS(CLBR_NONE)
  	TRACE_IRQS_OFF
@@ -17241,7 +17234,7 @@ index 858b555..d5a0c9b 100644
  	LOCKDEP_SYS_EXIT
  	TRACE_IRQS_ON		/* user mode is traced as IRQs on */
  	movq	RIP(%rsp), %rcx
-@@ -248,6 +661,9 @@ entry_SYSCALL64_slow_path:
+@@ -248,6 +654,9 @@ entry_SYSCALL64_slow_path:
  	call	do_syscall_64		/* returns with IRQs disabled */
  
  return_from_SYSCALL_64:
@@ -17251,7 +17244,7 @@ index 858b555..d5a0c9b 100644
  	RESTORE_EXTRA_REGS
  	TRACE_IRQS_IRETQ		/* we're about to change IF */
  
-@@ -322,7 +738,7 @@ syscall_return_via_sysret:
+@@ -322,7 +731,7 @@ syscall_return_via_sysret:
  opportunistic_sysret_failed:
  	SWAPGS
  	jmp	restore_c_regs_and_iret
@@ -17260,7 +17253,7 @@ index 858b555..d5a0c9b 100644
  
  ENTRY(stub_ptregs_64)
  	/*
-@@ -349,13 +765,13 @@ ENTRY(stub_ptregs_64)
+@@ -349,13 +758,13 @@ ENTRY(stub_ptregs_64)
  1:
  	/* Called from C */
  	jmp	*%rax				/* called from C */
@@ -17276,7 +17269,7 @@ index 858b555..d5a0c9b 100644
  .endm
  
  /* Instantiate ptregs_stub for each ptregs-using syscall */
-@@ -400,10 +816,12 @@ ENTRY(ret_from_fork)
+@@ -400,10 +809,12 @@ ENTRY(ret_from_fork)
  1:
  	movq	%rsp, %rdi
  	call	syscall_return_slowpath	/* returns with IRQs disabled */
@@ -17290,7 +17283,7 @@ index 858b555..d5a0c9b 100644
  
  /*
   * Build the entry stubs with some assembler magic.
-@@ -418,7 +836,7 @@ ENTRY(irq_entries_start)
+@@ -418,7 +829,7 @@ ENTRY(irq_entries_start)
  	jmp	common_interrupt
  	.align	8
      .endr
@@ -17299,7 +17292,7 @@ index 858b555..d5a0c9b 100644
  
  /*
   * Interrupt entry/exit.
-@@ -444,6 +862,12 @@ END(irq_entries_start)
+@@ -444,6 +855,12 @@ END(irq_entries_start)
  	 */
  	SWAPGS
  
@@ -17312,7 +17305,7 @@ index 858b555..d5a0c9b 100644
  	/*
  	 * We need to tell lockdep that IRQs are off.  We can't do this until
  	 * we fix gsbase, and we should do it before enter_from_user_mode
-@@ -456,7 +880,9 @@ END(irq_entries_start)
+@@ -456,7 +873,9 @@ END(irq_entries_start)
  
  	CALL_enter_from_user_mode
  
@@ -17323,7 +17316,7 @@ index 858b555..d5a0c9b 100644
  	/*
  	 * Save previous stack pointer, optionally switch to interrupt stack.
  	 * irq_count is used to check if a CPU is already on an interrupt stack
-@@ -468,6 +894,7 @@ END(irq_entries_start)
+@@ -468,6 +887,7 @@ END(irq_entries_start)
  	incl	PER_CPU_VAR(irq_count)
  	cmovzq	PER_CPU_VAR(irq_stack_ptr), %rsp
  	pushq	%rdi
@@ -17331,7 +17324,7 @@ index 858b555..d5a0c9b 100644
  	/* We entered an interrupt context - irqs are off: */
  	TRACE_IRQS_OFF
  
-@@ -499,6 +926,8 @@ ret_from_intr:
+@@ -499,6 +919,8 @@ ret_from_intr:
  GLOBAL(retint_user)
  	mov	%rsp,%rdi
  	call	prepare_exit_to_usermode
@@ -17340,7 +17333,7 @@ index 858b555..d5a0c9b 100644
  	TRACE_IRQS_IRETQ
  	SWAPGS
  	jmp	restore_regs_and_iret
-@@ -516,6 +945,21 @@ retint_kernel:
+@@ -516,6 +938,21 @@ retint_kernel:
  	jmp	0b
  1:
  #endif
@@ -17362,7 +17355,7 @@ index 858b555..d5a0c9b 100644
  	/*
  	 * The iretq could re-enable interrupts:
  	 */
-@@ -559,15 +1003,15 @@ native_irq_return_ldt:
+@@ -559,15 +996,15 @@ native_irq_return_ldt:
  	SWAPGS
  	movq	PER_CPU_VAR(espfix_waddr), %rdi
  	movq	%rax, (0*8)(%rdi)		/* RAX */
@@ -17383,7 +17376,7 @@ index 858b555..d5a0c9b 100644
  	movq	%rax, (4*8)(%rdi)
  	andl	$0xffff0000, %eax
  	popq	%rdi
-@@ -577,7 +1021,7 @@ native_irq_return_ldt:
+@@ -577,7 +1014,7 @@ native_irq_return_ldt:
  	popq	%rax
  	jmp	native_irq_return_iret
  #endif
@@ -17392,7 +17385,7 @@ index 858b555..d5a0c9b 100644
  
  /*
   * APIC interrupts.
-@@ -589,7 +1033,7 @@ ENTRY(\sym)
+@@ -589,7 +1026,7 @@ ENTRY(\sym)
  .Lcommon_\sym:
  	interrupt \do_sym
  	jmp	ret_from_intr
@@ -17401,7 +17394,7 @@ index 858b555..d5a0c9b 100644
  .endm
  
  #ifdef CONFIG_TRACING
-@@ -654,7 +1098,7 @@ apicinterrupt IRQ_WORK_VECTOR			irq_work_interrupt		smp_irq_work_interrupt
+@@ -654,7 +1091,7 @@ apicinterrupt IRQ_WORK_VECTOR			irq_work_interrupt		smp_irq_work_interrupt
  /*
   * Exception entry points.
   */
@@ -17410,7 +17403,7 @@ index 858b555..d5a0c9b 100644
  
  .macro idtentry sym do_sym has_error_code:req paranoid=0 shift_ist=-1
  ENTRY(\sym)
-@@ -701,6 +1145,12 @@ ENTRY(\sym)
+@@ -701,6 +1138,12 @@ ENTRY(\sym)
  	.endif
  
  	.if \shift_ist != -1
@@ -17423,7 +17416,7 @@ index 858b555..d5a0c9b 100644
  	subq	$EXCEPTION_STKSZ, CPU_TSS_IST(\shift_ist)
  	.endif
  
-@@ -744,7 +1194,7 @@ ENTRY(\sym)
+@@ -744,7 +1187,7 @@ ENTRY(\sym)
  
  	jmp	error_exit			/* %ebx: no swapgs flag */
  	.endif
@@ -17432,7 +17425,7 @@ index 858b555..d5a0c9b 100644
  .endm
  
  #ifdef CONFIG_TRACING
-@@ -786,8 +1236,9 @@ gs_change:
+@@ -786,8 +1229,9 @@ gs_change:
  2:	mfence					/* workaround */
  	SWAPGS
  	popfq
@@ -17443,7 +17436,7 @@ index 858b555..d5a0c9b 100644
  
  	_ASM_EXTABLE(gs_change, bad_gs)
  	.section .fixup, "ax"
-@@ -809,8 +1260,9 @@ ENTRY(do_softirq_own_stack)
+@@ -809,8 +1253,9 @@ ENTRY(do_softirq_own_stack)
  	call	__do_softirq
  	leaveq
  	decl	PER_CPU_VAR(irq_count)
@@ -17454,7 +17447,7 @@ index 858b555..d5a0c9b 100644
  
  #ifdef CONFIG_XEN
  idtentry xen_hypervisor_callback xen_do_hypervisor_callback has_error_code=0
-@@ -846,7 +1298,7 @@ ENTRY(xen_do_hypervisor_callback)		/* do_hypervisor_callback(struct *pt_regs) */
+@@ -846,7 +1291,7 @@ ENTRY(xen_do_hypervisor_callback)		/* do_hypervisor_callback(struct *pt_regs) */
  	call	xen_maybe_preempt_hcall
  #endif
  	jmp	error_exit
@@ -17463,7 +17456,7 @@ index 858b555..d5a0c9b 100644
  
  /*
   * Hypervisor uses this for application faults while it executes.
-@@ -891,7 +1343,7 @@ ENTRY(xen_failsafe_callback)
+@@ -891,7 +1336,7 @@ ENTRY(xen_failsafe_callback)
  	SAVE_C_REGS
  	SAVE_EXTRA_REGS
  	jmp	error_exit
@@ -17472,7 +17465,7 @@ index 858b555..d5a0c9b 100644
  
  apicinterrupt3 HYPERVISOR_CALLBACK_VECTOR \
  	xen_hvm_callback_vector xen_evtchn_do_upcall
-@@ -903,7 +1355,7 @@ apicinterrupt3 HYPERVISOR_CALLBACK_VECTOR \
+@@ -903,7 +1348,7 @@ apicinterrupt3 HYPERVISOR_CALLBACK_VECTOR \
  	hyperv_callback_vector hyperv_vector_handler
  #endif /* CONFIG_HYPERV */
  
@@ -17481,7 +17474,7 @@ index 858b555..d5a0c9b 100644
  idtentry int3			do_int3			has_error_code=0	paranoid=1 shift_ist=DEBUG_STACK
  idtentry stack_segment		do_stack_segment	has_error_code=1
  
-@@ -940,8 +1392,34 @@ ENTRY(paranoid_entry)
+@@ -940,8 +1385,34 @@ ENTRY(paranoid_entry)
  	js	1f				/* negative -> in kernel */
  	SWAPGS
  	xorl	%ebx, %ebx
@@ -17518,7 +17511,7 @@ index 858b555..d5a0c9b 100644
  
  /*
   * "Paranoid" exit path from exception stack.  This is invoked
-@@ -958,19 +1436,26 @@ END(paranoid_entry)
+@@ -958,19 +1429,26 @@ END(paranoid_entry)
  ENTRY(paranoid_exit)
  	DISABLE_INTERRUPTS(CLBR_NONE)
  	TRACE_IRQS_OFF_DEBUG
@@ -17547,7 +17540,7 @@ index 858b555..d5a0c9b 100644
  
  /*
   * Save all registers in pt_regs, and switch gs if needed.
-@@ -984,13 +1469,18 @@ ENTRY(error_entry)
+@@ -984,13 +1462,18 @@ ENTRY(error_entry)
  	testb	$3, CS+8(%rsp)
  	jz	.Lerror_kernelspace
  
@@ -17567,7 +17560,7 @@ index 858b555..d5a0c9b 100644
  .Lerror_entry_from_usermode_after_swapgs:
  	/*
  	 * We need to tell lockdep that IRQs are off.  We can't do this until
-@@ -999,10 +1489,12 @@ ENTRY(error_entry)
+@@ -999,10 +1482,12 @@ ENTRY(error_entry)
  	 */
  	TRACE_IRQS_OFF
  	CALL_enter_from_user_mode
@@ -17580,7 +17573,7 @@ index 858b555..d5a0c9b 100644
  	ret
  
  	/*
-@@ -1020,14 +1512,16 @@ ENTRY(error_entry)
+@@ -1020,14 +1505,16 @@ ENTRY(error_entry)
  	cmpq	%rax, RIP+8(%rsp)
  	je	.Lbstep_iret
  	cmpq	$gs_change, RIP+8(%rsp)
@@ -17599,7 +17592,7 @@ index 858b555..d5a0c9b 100644
  
  .Lbstep_iret:
  	/* Fix truncated RIP */
-@@ -1041,6 +1535,12 @@ ENTRY(error_entry)
+@@ -1041,6 +1528,12 @@ ENTRY(error_entry)
  	 */
  	SWAPGS
  
@@ -17612,7 +17605,7 @@ index 858b555..d5a0c9b 100644
  	/*
  	 * Pretend that the exception came from user mode: set up pt_regs
  	 * as if we faulted immediately after IRET and clear EBX so that
-@@ -1051,11 +1551,11 @@ ENTRY(error_entry)
+@@ -1051,11 +1544,11 @@ ENTRY(error_entry)
  	mov	%rax, %rsp
  	decl	%ebx
  	jmp	.Lerror_entry_from_usermode_after_swapgs
@@ -17626,7 +17619,7 @@ index 858b555..d5a0c9b 100644
   *   1: already in kernel mode, don't need SWAPGS
   *   0: user gsbase is loaded, we need SWAPGS and standard preparation for return to usermode
   */
-@@ -1063,10 +1563,10 @@ ENTRY(error_exit)
+@@ -1063,10 +1556,10 @@ ENTRY(error_exit)
  	movl	%ebx, %eax
  	DISABLE_INTERRUPTS(CLBR_NONE)
  	TRACE_IRQS_OFF
@@ -17639,7 +17632,7 @@ index 858b555..d5a0c9b 100644
  
  /* Runs on exception stack */
  ENTRY(nmi)
-@@ -1120,6 +1620,8 @@ ENTRY(nmi)
+@@ -1120,6 +1613,8 @@ ENTRY(nmi)
  	 * other IST entries.
  	 */
  
@@ -17648,7 +17641,7 @@ index 858b555..d5a0c9b 100644
  	/* Use %rdx as our temp variable throughout */
  	pushq	%rdx
  
-@@ -1163,6 +1665,12 @@ ENTRY(nmi)
+@@ -1163,6 +1658,12 @@ ENTRY(nmi)
  	pushq	%r14		/* pt_regs->r14 */
  	pushq	%r15		/* pt_regs->r15 */
  
@@ -17661,7 +17654,7 @@ index 858b555..d5a0c9b 100644
  	/*
  	 * At this point we no longer need to worry about stack damage
  	 * due to nesting -- we're on the normal thread stack and we're
-@@ -1173,12 +1681,19 @@ ENTRY(nmi)
+@@ -1173,12 +1674,19 @@ ENTRY(nmi)
  	movq	$-1, %rsi
  	call	do_nmi
  
@@ -17681,7 +17674,7 @@ index 858b555..d5a0c9b 100644
  	jmp	restore_c_regs_and_iret
  
  .Lnmi_from_kernel:
-@@ -1300,6 +1815,7 @@ nested_nmi_out:
+@@ -1300,6 +1808,7 @@ nested_nmi_out:
  	popq	%rdx
  
  	/* We are returning to kernel mode, so this cannot result in a fault. */
@@ -17689,7 +17682,7 @@ index 858b555..d5a0c9b 100644
  	INTERRUPT_RETURN
  
  first_nmi:
-@@ -1328,7 +1844,7 @@ first_nmi:
+@@ -1328,7 +1837,7 @@ first_nmi:
  	pushq	%rsp		/* RSP (minus 8 because of the previous push) */
  	addq	$8, (%rsp)	/* Fix up RSP */
  	pushfq			/* RFLAGS */
@@ -17698,7 +17691,7 @@ index 858b555..d5a0c9b 100644
  	pushq	$1f		/* RIP */
  	INTERRUPT_RETURN	/* continues at repeat_nmi below */
  1:
-@@ -1373,20 +1889,22 @@ end_repeat_nmi:
+@@ -1373,20 +1882,22 @@ end_repeat_nmi:
  	ALLOC_PT_GPREGS_ON_STACK
  
  	/*
@@ -17724,7 +17717,7 @@ index 858b555..d5a0c9b 100644
  	jnz	nmi_restore
  nmi_swapgs:
  	SWAPGS_UNSAFE_STACK
-@@ -1397,6 +1915,8 @@ nmi_restore:
+@@ -1397,6 +1908,8 @@ nmi_restore:
  	/* Point RSP at the "iret" frame. */
  	REMOVE_PT_GPREGS_FROM_STACK 6*8
  
@@ -17733,7 +17726,7 @@ index 858b555..d5a0c9b 100644
  	/*
  	 * Clear "NMI executing".  Set DF first so that we can easily
  	 * distinguish the remaining code between here and IRET from
-@@ -1414,9 +1934,9 @@ nmi_restore:
+@@ -1414,9 +1927,9 @@ nmi_restore:
  	 * mode, so this cannot result in a fault.
  	 */
  	INTERRUPT_RETURN
@@ -18249,7 +18242,7 @@ index 6011a57..311bea0 100644
  
  	while (amd_iommu_v2_event_descs[i].attr.attr.name)
 diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c
-index 041e442..3ed82386 100644
+index 7eb806c..13eb73d 100644
 --- a/arch/x86/events/core.c
 +++ b/arch/x86/events/core.c
 @@ -1535,7 +1535,7 @@ static void __init pmu_check_apic(void)
@@ -18279,7 +18272,7 @@ index 041e442..3ed82386 100644
  	}
  
  	return get_desc_base(desc);
-@@ -2356,7 +2356,7 @@ perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
+@@ -2357,7 +2357,7 @@ perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
  			break;
  
  		perf_callchain_store(entry, frame.return_address);
@@ -18645,7 +18638,7 @@ index 7377814..a128ad7 100644
  	pt_config_stop(event);
  
 diff --git a/arch/x86/events/intel/rapl.c b/arch/x86/events/intel/rapl.c
-index 1705c9d..4204f6f 100644
+index 78ee9eb..1aae264 100644
 --- a/arch/x86/events/intel/rapl.c
 +++ b/arch/x86/events/intel/rapl.c
 @@ -100,14 +100,14 @@ static const char *const rapl_domain_names[NR_RAPL_DOMAINS] __initconst = {
@@ -19416,7 +19409,7 @@ index 3e86742..1b19554 100644
   * @v: pointer to type int
   *
 diff --git a/arch/x86/include/asm/atomic64_32.h b/arch/x86/include/asm/atomic64_32.h
-index a984111..d1b3a88 100644
+index a984111..7a258c7 100644
 --- a/arch/x86/include/asm/atomic64_32.h
 +++ b/arch/x86/include/asm/atomic64_32.h
 @@ -11,6 +11,14 @@ typedef struct {
@@ -19616,6 +19609,25 @@ index a984111..d1b3a88 100644
   * atomic64_sub - subtract the atomic64 variable
   * @i: integer value to subtract
   * @v: pointer to type atomic64_t
+@@ -222,6 +341,18 @@ static inline void atomic64_inc(atomic64_t *v)
+ }
+ 
+ /**
++ * atomic64_inc_unchecked - increment atomic64 variable
++ * @v: pointer to type atomic64_unchecked_t
++ *
++ * Atomically increments @v by 1.
++ */
++static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
++{
++	__alternative_atomic64(inc_unchecked, inc_return_unchecked, /* no output */,
++			       "S" (v) : "memory", "eax", "ecx", "edx");
++}
++
++/**
+  * atomic64_dec - decrement atomic64 variable
+  * @v: pointer to type atomic64_t
+  *
 diff --git a/arch/x86/include/asm/atomic64_64.h b/arch/x86/include/asm/atomic64_64.h
 index 0373510..3619c56 100644
 --- a/arch/x86/include/asm/atomic64_64.h
@@ -25521,7 +25533,7 @@ index 9307f18..a43f175 100644
  	proc_create("apm", 0, NULL, &apm_file_ops);
  
 diff --git a/arch/x86/kernel/asm-offsets.c b/arch/x86/kernel/asm-offsets.c
-index 5c04246..7492f2f 100644
+index 5c04246..e280385 100644
 --- a/arch/x86/kernel/asm-offsets.c
 +++ b/arch/x86/kernel/asm-offsets.c
 @@ -32,6 +32,8 @@ void common(void) {
@@ -25533,15 +25545,7 @@ index 5c04246..7492f2f 100644
  
  	BLANK();
  	OFFSET(crypto_tfm_ctx_offset, crypto_tfm, __crt_ctx);
-@@ -59,6 +61,7 @@ void common(void) {
- 
- #ifdef CONFIG_PARAVIRT
- 	BLANK();
-+	OFFSET(PARAVIRT_enabled, pv_info, paravirt_enabled);
- 	OFFSET(PARAVIRT_PATCH_pv_cpu_ops, paravirt_patch_template, pv_cpu_ops);
- 	OFFSET(PARAVIRT_PATCH_pv_irq_ops, paravirt_patch_template, pv_irq_ops);
- 	OFFSET(PV_IRQ_irq_disable, pv_irq_ops, irq_disable);
-@@ -66,8 +69,26 @@ void common(void) {
+@@ -66,8 +68,26 @@ void common(void) {
  	OFFSET(PV_CPU_iret, pv_cpu_ops, iret);
  	OFFSET(PV_CPU_read_cr0, pv_cpu_ops, read_cr0);
  	OFFSET(PV_MMU_read_cr2, pv_mmu_ops, read_cr2);
@@ -25568,7 +25572,7 @@ index 5c04246..7492f2f 100644
  #ifdef CONFIG_XEN
  	BLANK();
  	OFFSET(XEN_vcpu_info_mask, vcpu_info, evtchn_upcall_mask);
-@@ -85,4 +106,5 @@ void common(void) {
+@@ -85,4 +105,5 @@ void common(void) {
  
  	BLANK();
  	DEFINE(PTREGS_SIZE, sizeof(struct pt_regs));
@@ -28547,7 +28551,7 @@ index 2da6ee9..fc0ca78 100644
  }
  
 diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c
-index ae703ac..d510d8a 100644
+index 44bcd57..044428a 100644
 --- a/arch/x86/kernel/kprobes/core.c
 +++ b/arch/x86/kernel/kprobes/core.c
 @@ -121,9 +121,12 @@ __synthesize_relative_insn(void *from, void *to, u8 op)
@@ -32012,7 +32016,7 @@ index 31346a3..038711e 100644
  	.disabled_by_bios = is_disabled,
  	.hardware_setup = svm_hardware_setup,
 diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
-index faf52bac..fdc4818 100644
+index c4217a2..964fb8e 100644
 --- a/arch/x86/kvm/vmx.c
 +++ b/arch/x86/kvm/vmx.c
 @@ -1589,14 +1589,14 @@ static __always_inline void vmcs_writel(unsigned long field, unsigned long value
@@ -32044,7 +32048,7 @@ index faf52bac..fdc4818 100644
  	load_TR_desc();
  }
  
-@@ -2158,6 +2162,10 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
+@@ -2159,6 +2163,10 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
  		vmcs_writel(HOST_TR_BASE, kvm_read_tr_base()); /* 22.2.4 */
  		vmcs_writel(HOST_GDTR_BASE, gdt->address);   /* 22.2.4 */
  
@@ -32055,7 +32059,7 @@ index faf52bac..fdc4818 100644
  		rdmsrl(MSR_IA32_SYSENTER_ESP, sysenter_esp);
  		vmcs_writel(HOST_IA32_SYSENTER_ESP, sysenter_esp); /* 22.2.3 */
  
-@@ -2481,7 +2489,7 @@ static void setup_msrs(struct vcpu_vmx *vmx)
+@@ -2483,7 +2491,7 @@ static void setup_msrs(struct vcpu_vmx *vmx)
   * guest_tsc = (host_tsc * tsc multiplier) >> 48 + tsc_offset
   * -- Intel TSC Scaling for Virtualization White Paper, sec 1.3
   */
@@ -32064,7 +32068,7 @@ index faf52bac..fdc4818 100644
  {
  	u64 host_tsc, tsc_offset;
  
-@@ -4722,7 +4730,10 @@ static void vmx_set_constant_host_state(struct vcpu_vmx *vmx)
+@@ -4724,7 +4732,10 @@ static void vmx_set_constant_host_state(struct vcpu_vmx *vmx)
  	unsigned long cr4;
  
  	vmcs_writel(HOST_CR0, read_cr0() & ~X86_CR0_TS);  /* 22.2.3 */
@@ -32075,7 +32079,7 @@ index faf52bac..fdc4818 100644
  
  	/* Save the most likely value for this task's CR4 in the VMCS. */
  	cr4 = cr4_read_shadow();
-@@ -4749,7 +4760,7 @@ static void vmx_set_constant_host_state(struct vcpu_vmx *vmx)
+@@ -4751,7 +4762,7 @@ static void vmx_set_constant_host_state(struct vcpu_vmx *vmx)
  	vmcs_writel(HOST_IDTR_BASE, dt.address);   /* 22.2.4 */
  	vmx->host_idt_base = dt.address;
  
@@ -32084,7 +32088,7 @@ index faf52bac..fdc4818 100644
  
  	rdmsr(MSR_IA32_SYSENTER_CS, low32, high32);
  	vmcs_write32(HOST_IA32_SYSENTER_CS, low32);
-@@ -6297,11 +6308,17 @@ static __init int hardware_setup(void)
+@@ -6299,11 +6310,17 @@ static __init int hardware_setup(void)
  	 * page upon invalidation.  No need to do anything if not
  	 * using the APIC_ACCESS_ADDR VMCS field.
  	 */
@@ -32104,7 +32108,7 @@ index faf52bac..fdc4818 100644
  
  	if (enable_ept && !cpu_has_vmx_ept_2m_page())
  		kvm_disable_largepages();
-@@ -6371,10 +6388,12 @@ static __init int hardware_setup(void)
+@@ -6373,10 +6390,12 @@ static __init int hardware_setup(void)
  		enable_pml = 0;
  
  	if (!enable_pml) {
@@ -32117,7 +32121,7 @@ index faf52bac..fdc4818 100644
  	}
  
  	kvm_set_posted_intr_wakeup_handler(wakeup_handler);
-@@ -8705,6 +8724,12 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
+@@ -8706,6 +8725,12 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
  		"jmp 2f \n\t"
  		"1: " __ex(ASM_VMX_VMRESUME) "\n\t"
  		"2: "
@@ -32130,7 +32134,7 @@ index faf52bac..fdc4818 100644
  		/* Save guest registers, load host registers, keep flags */
  		"mov %0, %c[wordsize](%%" _ASM_SP ") \n\t"
  		"pop %0 \n\t"
-@@ -8757,6 +8782,11 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
+@@ -8758,6 +8783,11 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
  #endif
  		[cr2]"i"(offsetof(struct vcpu_vmx, vcpu.arch.cr2)),
  		[wordsize]"i"(sizeof(ulong))
@@ -32142,7 +32146,7 @@ index faf52bac..fdc4818 100644
  	      : "cc", "memory"
  #ifdef CONFIG_X86_64
  		, "rax", "rbx", "rdi", "rsi"
-@@ -8770,7 +8800,7 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
+@@ -8771,7 +8801,7 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
  	if (debugctlmsr)
  		update_debugctlmsr(debugctlmsr);
  
@@ -32151,7 +32155,7 @@ index faf52bac..fdc4818 100644
  	/*
  	 * The sysexit path does not restore ds/es, so we must set them to
  	 * a reasonable value ourselves.
-@@ -8779,8 +8809,18 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
+@@ -8780,8 +8810,18 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
  	 * may be executed in interrupt context, which saves and restore segments
  	 * around it, nullifying its effect.
  	 */
@@ -32172,7 +32176,7 @@ index faf52bac..fdc4818 100644
  #endif
  
  	vcpu->arch.regs_avail = ~((1 << VCPU_REGS_RIP) | (1 << VCPU_REGS_RSP)
-@@ -10889,7 +10929,7 @@ out:
+@@ -10893,7 +10933,7 @@ out:
  	return ret;
  }
  
@@ -36183,7 +36187,7 @@ index 9d56f27..0d15fff 100644
  			(unsigned long)(&__init_begin),
  			(unsigned long)(&__init_end));
 diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
-index bd7a9b9..94d80a5 100644
+index bd7a9b9..f1dad0b 100644
 --- a/arch/x86/mm/init_32.c
 +++ b/arch/x86/mm/init_32.c
 @@ -62,33 +62,6 @@ static noinline int do_test_wp_bit(void);
@@ -36458,7 +36462,7 @@ index bd7a9b9..94d80a5 100644
 +	struct desc_struct d;
 +	int cpu;
  
-+	limit = paravirt_enabled() ? ktva_ktla(0xffffffff) : (unsigned long)&_etext;
++	limit = get_kernel_rpl() ? ktva_ktla(0xffffffff) : (unsigned long)&_etext;
 +	limit = (limit - 1UL) >> PAGE_SHIFT;
 +
 +	memset(__LOAD_PHYSICAL_ADDR + PAGE_OFFSET, POISON_FREE_INITMEM, PAGE_SIZE);
@@ -36476,7 +36480,7 @@ index bd7a9b9..94d80a5 100644
 +	start = ktla_ktva(start);
 +#ifdef CONFIG_PAX_KERNEXEC
 +	/* PaX: make KERNEL_CS read-only */
-+	if (!paravirt_enabled()) {
++	if (!get_kernel_rpl()) {
 +#endif
  	kernel_set_to_readonly = 1;
  
@@ -42843,7 +42847,7 @@ index be54e53..50272fe 100644
  {
  	struct hpet_timer __iomem *timer;
 diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
-index 94fb407..603dd43 100644
+index 44b1bd6..6877066 100644
 --- a/drivers/char/ipmi/ipmi_msghandler.c
 +++ b/drivers/char/ipmi/ipmi_msghandler.c
 @@ -436,7 +436,7 @@ struct ipmi_smi {
@@ -45022,7 +45026,7 @@ index ac8deb0..f3caa10 100644
  	return -EINVAL;
  }
 diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
-index cf3e712..2d532c3 100644
+index 996a733..742b84f 100644
 --- a/drivers/gpio/gpiolib.c
 +++ b/drivers/gpio/gpiolib.c
 @@ -1031,8 +1031,10 @@ static void gpiochip_irqchip_remove(struct gpio_chip *gpiochip)
@@ -45087,7 +45091,7 @@ index 35a1248..fd2510a 100644
  	if (amdgpu_atpx_priv.dhandle == ACPI_HANDLE(&pdev->dev))
  		return VGA_SWITCHEROO_IGD;
 diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
-index 6043dc7..517c964 100644
+index 3e21732..21fc524 100644
 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
 +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
 @@ -1076,49 +1076,49 @@ int amdgpu_cgs_call_acpi_method(void *cgs_device,
@@ -45285,10 +45289,10 @@ index f1e17d6..e7d750a 100644
  	amdgpu_sync_fini();
  }
 diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
-index b04337d..7db2712 100644
+index d78739d..64027ae 100644
 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
 +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
-@@ -755,4 +755,4 @@ const struct drm_ioctl_desc amdgpu_ioctls_kms[] = {
+@@ -756,4 +756,4 @@ const struct drm_ioctl_desc amdgpu_ioctls_kms[] = {
  	DRM_IOCTL_DEF_DRV(AMDGPU_GEM_OP, amdgpu_gem_op_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
  	DRM_IOCTL_DEF_DRV(AMDGPU_GEM_USERPTR, amdgpu_gem_userptr_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
  };
@@ -46110,10 +46114,10 @@ index 96926f0..69097ba 100644
  {
  	struct bochs_device *bochs =
 diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
-index f30de80..7893e10 100644
+index 691a1b9..8d05d29 100644
 --- a/drivers/gpu/drm/drm_crtc.c
 +++ b/drivers/gpu/drm/drm_crtc.c
-@@ -4258,7 +4258,7 @@ int drm_mode_getproperty_ioctl(struct drm_device *dev,
+@@ -4256,7 +4256,7 @@ int drm_mode_getproperty_ioctl(struct drm_device *dev,
  					goto done;
  				}
  
@@ -47008,10 +47012,10 @@ index d1a46ef..4999f42 100644
  
  /**
 diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
-index 7741efb..483be63 100644
+index e5db9e1..e07f0de 100644
 --- a/drivers/gpu/drm/i915/intel_display.c
 +++ b/drivers/gpu/drm/i915/intel_display.c
-@@ -15120,13 +15120,13 @@ struct intel_quirk {
+@@ -15140,13 +15140,13 @@ struct intel_quirk {
  	int subsystem_vendor;
  	int subsystem_device;
  	void (*hook)(struct drm_device *dev);
@@ -47028,7 +47032,7 @@ index 7741efb..483be63 100644
  
  static int intel_dmi_reverse_brightness(const struct dmi_system_id *id)
  {
-@@ -15134,18 +15134,20 @@ static int intel_dmi_reverse_brightness(const struct dmi_system_id *id)
+@@ -15154,18 +15154,20 @@ static int intel_dmi_reverse_brightness(const struct dmi_system_id *id)
  	return 1;
  }
  
@@ -47059,7 +47063,7 @@ index 7741efb..483be63 100644
  		.hook = quirk_invert_brightness,
  	},
  };
-@@ -15228,7 +15230,7 @@ static void intel_init_quirks(struct drm_device *dev)
+@@ -15248,7 +15250,7 @@ static void intel_init_quirks(struct drm_device *dev)
  			q->hook(dev);
  	}
  	for (i = 0; i < ARRAY_SIZE(intel_dmi_quirks); i++) {
@@ -47206,10 +47210,10 @@ index 792f924..aeb1334 100644
 -int mga_max_ioctl = ARRAY_SIZE(mga_ioctls);
 +const int mga_max_ioctl = ARRAY_SIZE(mga_ioctls);
 diff --git a/drivers/gpu/drm/mgag200/mgag200_mode.c b/drivers/gpu/drm/mgag200/mgag200_mode.c
-index 14e64e0..620b163 100644
+index d347dca..887b809 100644
 --- a/drivers/gpu/drm/mgag200/mgag200_mode.c
 +++ b/drivers/gpu/drm/mgag200/mgag200_mode.c
-@@ -1565,7 +1565,7 @@ static uint32_t mga_vga_calculate_mode_bandwidth(struct drm_display_mode *mode,
+@@ -1573,7 +1573,7 @@ static uint32_t mga_vga_calculate_mode_bandwidth(struct drm_display_mode *mode,
  
  #define MODE_BANDWIDTH	MODE_BAD
  
@@ -48006,10 +48010,10 @@ index 81a63d7..5c7f8e7 100644
  {
  	struct drm_device *dev = connector->dev;
 diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
-index d0826fb..b47e793 100644
+index cb29868..33523a2 100644
 --- a/drivers/gpu/drm/radeon/radeon_device.c
 +++ b/drivers/gpu/drm/radeon/radeon_device.c
-@@ -1253,7 +1253,7 @@ static bool radeon_switcheroo_can_switch(struct pci_dev *pdev)
+@@ -1274,7 +1274,7 @@ static bool radeon_switcheroo_can_switch(struct pci_dev *pdev)
  	 * locking inversion with the driver load path. And the access here is
  	 * completely racy anyway. So don't bother with locking for now.
  	 */
@@ -48989,10 +48993,10 @@ index 9fd924c..c64b065 100644
  
  static int virtio_gpu_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
 diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
-index 6db358a..6401339 100644
+index cab0c54..c03f271 100644
 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
 +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
-@@ -437,7 +437,7 @@ struct vmw_private {
+@@ -438,7 +438,7 @@ struct vmw_private {
  	 * Fencing and IRQs.
  	 */
  
@@ -49178,29 +49182,6 @@ index c13fb5b..55a3802 100644
  		return -EFAULT;
  
  	*off += size;
-diff --git a/drivers/hid/usbhid/hiddev.c b/drivers/hid/usbhid/hiddev.c
-index 2f1ddca..700145b 100644
---- a/drivers/hid/usbhid/hiddev.c
-+++ b/drivers/hid/usbhid/hiddev.c
-@@ -516,13 +516,13 @@ static noinline int hiddev_ioctl_usage(struct hiddev *hiddev, unsigned int cmd,
- 					goto inval;
- 			} else if (uref->usage_index >= field->report_count)
- 				goto inval;
--
--			else if ((cmd == HIDIOCGUSAGES || cmd == HIDIOCSUSAGES) &&
--				 (uref_multi->num_values > HID_MAX_MULTI_USAGES ||
--				  uref->usage_index + uref_multi->num_values > field->report_count))
--				goto inval;
- 		}
- 
-+		if ((cmd == HIDIOCGUSAGES || cmd == HIDIOCSUSAGES) &&
-+		    (uref_multi->num_values > HID_MAX_MULTI_USAGES ||
-+		     uref->usage_index + uref_multi->num_values > field->report_count))
-+			goto inval;
-+
- 		switch (cmd) {
- 		case HIDIOCGUSAGE:
- 			uref->value = field->value[uref->usage_index];
 diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
 index 56dd261..493d7e0 100644
 --- a/drivers/hv/channel.c
@@ -49488,10 +49469,10 @@ index 6a27eb2..349ed23 100644
  };
  
 diff --git a/drivers/hwmon/dell-smm-hwmon.c b/drivers/hwmon/dell-smm-hwmon.c
-index c43318d..24bfd03 100644
+index a9356a3..0785f99 100644
 --- a/drivers/hwmon/dell-smm-hwmon.c
 +++ b/drivers/hwmon/dell-smm-hwmon.c
-@@ -819,7 +819,7 @@ static const struct i8k_config_data i8k_config_data[] = {
+@@ -844,7 +844,7 @@ static const struct i8k_config_data i8k_config_data[] = {
  	},
  };
  
@@ -49500,15 +49481,20 @@ index c43318d..24bfd03 100644
  	{
  		.ident = "Dell Inspiron",
  		.matches = {
-@@ -929,7 +929,7 @@ static struct dmi_system_id i8k_dmi_table[] __initdata = {
- 
- MODULE_DEVICE_TABLE(dmi, i8k_dmi_table);
- 
--static struct dmi_system_id i8k_blacklist_dmi_table[] __initdata = {
-+static const struct dmi_system_id i8k_blacklist_dmi_table[] __initconst = {
+@@ -960,8 +960,12 @@ MODULE_DEVICE_TABLE(dmi, i8k_dmi_table);
+  * of affected Dell machines for which we disallow I8K_SMM_GET_FAN_TYPE call.
+  * See bug: https://bugzilla.kernel.org/show_bug.cgi?id=100121
+  */
+-static struct dmi_system_id i8k_blacklist_fan_type_dmi_table[] __initdata = {
++static const struct dmi_system_id i8k_blacklist_fan_type_dmi_table[] __initconst = {
  	{
- 		/*
- 		 * CPU fan speed going up and down on Dell Studio XPS 8000
++		/*
++		 * CPU fan speed going up and down on Dell Studio XPS 8000
++		 * for unknown reasons.
++		 */
+ 		.ident = "Dell Studio XPS 8000",
+ 		.matches = {
+ 			DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
 diff --git a/drivers/hwmon/ibmaem.c b/drivers/hwmon/ibmaem.c
 index 1f64378..2b6e615 100644
 --- a/drivers/hwmon/ibmaem.c
@@ -49830,7 +49816,7 @@ index 0e931a9..f7cba63 100644
  
  	if (chipset >= AK_MAX_TYPE) {
 diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
-index 1d92e09..215593e 100644
+index c995255..7de0b49 100644
 --- a/drivers/infiniband/core/cm.c
 +++ b/drivers/infiniband/core/cm.c
 @@ -115,7 +115,7 @@ static char const counter_group_names[CM_COUNTER_GROUPS]
@@ -51368,7 +51354,7 @@ index 5efadad..d1b358e 100644
  	CMD_SET_TYPE(cmd, CMD_COMPL_WAIT);
  }
 diff --git a/drivers/iommu/arm-smmu-v3.c b/drivers/iommu/arm-smmu-v3.c
-index 4ff73ff..6f1ffa8 100644
+index 3e20208..e0a3c34 100644
 --- a/drivers/iommu/arm-smmu-v3.c
 +++ b/drivers/iommu/arm-smmu-v3.c
 @@ -1562,7 +1562,7 @@ static int arm_smmu_domain_finalise(struct iommu_domain *domain)
@@ -57959,7 +57945,7 @@ index 170dd68..19b339d 100644
  			err = -EFAULT;
  		goto out_array_args;
 diff --git a/drivers/memory/omap-gpmc.c b/drivers/memory/omap-gpmc.c
-index 21825dd..6e29aa3 100644
+index 859b4a1..a6f983f 100644
 --- a/drivers/memory/omap-gpmc.c
 +++ b/drivers/memory/omap-gpmc.c
 @@ -232,7 +232,6 @@ struct omap3_gpmc_regs {
@@ -62979,10 +62965,10 @@ index 4684644..6687c7f 100644
  	u32 ii;
  	u32 num_frag;
 diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c
-index 9fcb489..8f58614 100644
+index c70e515..4abd90a 100644
 --- a/drivers/net/geneve.c
 +++ b/drivers/net/geneve.c
-@@ -1508,7 +1508,7 @@ nla_put_failure:
+@@ -1513,7 +1513,7 @@ nla_put_failure:
  	return -EMSGSIZE;
  }
  
@@ -63234,10 +63220,10 @@ index a400288..0c59bcd 100644
         .init = loopback_net_init,
  };
 diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c
-index 9e803bb..fb251d2 100644
+index 8f3c55d..223d73b 100644
 --- a/drivers/net/macsec.c
 +++ b/drivers/net/macsec.c
-@@ -3217,7 +3217,7 @@ nla_put_failure:
+@@ -3218,7 +3218,7 @@ nla_put_failure:
  	return -EMSGSIZE;
  }
  
@@ -67468,10 +67454,10 @@ index 48e8a97..3499ec8 100644
  
  const struct iw_handler_def prism54_handler_def = {
 diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
-index e85e073..5242113 100644
+index 06664ba..974b505 100644
 --- a/drivers/net/wireless/mac80211_hwsim.c
 +++ b/drivers/net/wireless/mac80211_hwsim.c
-@@ -3218,20 +3218,20 @@ static int __init init_mac80211_hwsim(void)
+@@ -3219,20 +3219,20 @@ static int __init init_mac80211_hwsim(void)
  	if (channels < 1)
  		return -EINVAL;
  
@@ -73037,6 +73023,125 @@ index 1deb6ad..3057db5 100644
  		drv = scsi_cmd_to_driver(cmd);
  		if (drv->done)
  			good_bytes = drv->done(cmd);
+diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
+index f3d69a98..40f8223 100644
+--- a/drivers/scsi/scsi_debug.c
++++ b/drivers/scsi/scsi_debug.c
+@@ -559,9 +559,9 @@ static bool scsi_debug_host_lock = DEF_HOST_LOCK;
+ static bool scsi_debug_strict = DEF_STRICT;
+ static bool sdebug_any_injecting_opt;
+ 
+-static atomic_t sdebug_cmnd_count;
+-static atomic_t sdebug_completions;
+-static atomic_t sdebug_a_tsf;		/* counter of 'almost' TSFs */
++static atomic_unchecked_t sdebug_cmnd_count;
++static atomic_unchecked_t sdebug_completions;
++static atomic_unchecked_t sdebug_a_tsf;		/* counter of 'almost' TSFs */
+ 
+ #define DEV_READONLY(TGT)      (0)
+ 
+@@ -3447,7 +3447,7 @@ static void sdebug_q_cmd_complete(unsigned long indx)
+ 	struct scsi_cmnd *scp;
+ 	struct sdebug_dev_info *devip;
+ 
+-	atomic_inc(&sdebug_completions);
++	atomic_inc_unchecked(&sdebug_completions);
+ 	qa_indx = indx;
+ 	if ((qa_indx < 0) || (qa_indx >= SCSI_DEBUG_CANQUEUE)) {
+ 		pr_err("wild qa_indx=%d\n", qa_indx);
+@@ -3507,7 +3507,7 @@ sdebug_q_cmd_hrt_complete(struct hrtimer *timer)
+ 	struct scsi_cmnd *scp;
+ 	struct sdebug_dev_info *devip;
+ 
+-	atomic_inc(&sdebug_completions);
++	atomic_inc_unchecked(&sdebug_completions);
+ 	qa_indx = sd_hrtp->qa_indx;
+ 	if ((qa_indx < 0) || (qa_indx >= SCSI_DEBUG_CANQUEUE)) {
+ 		pr_err("wild qa_indx=%d\n", qa_indx);
+@@ -3976,9 +3976,9 @@ schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
+ 		   (SCSI_DEBUG_OPT_RARE_TSF & scsi_debug_opts) &&
+ 		   (scsi_result == 0)) {
+ 		if ((num_in_q == (qdepth - 1)) &&
+-		    (atomic_inc_return(&sdebug_a_tsf) >=
++		    (atomic_inc_return_unchecked(&sdebug_a_tsf) >=
+ 		     abs(scsi_debug_every_nth))) {
+-			atomic_set(&sdebug_a_tsf, 0);
++			atomic_set_unchecked(&sdebug_a_tsf, 0);
+ 			inject = 1;
+ 			scsi_result = device_qfull_result;
+ 		}
+@@ -4182,7 +4182,7 @@ static int scsi_debug_write_info(struct Scsi_Host *host, char *buffer, int lengt
+ 		return -EINVAL;
+ 	scsi_debug_opts = opts;
+ 	if (scsi_debug_every_nth != 0)
+-		atomic_set(&sdebug_cmnd_count, 0);
++		atomic_set_unchecked(&sdebug_cmnd_count, 0);
+ 	return length;
+ }
+ 
+@@ -4197,8 +4197,8 @@ static int scsi_debug_show_info(struct seq_file *m, struct Scsi_Host *host)
+ 	if (scsi_debug_every_nth > 0)
+ 		snprintf(b, sizeof(b), " (curr:%d)",
+ 			 ((SCSI_DEBUG_OPT_RARE_TSF & scsi_debug_opts) ?
+-				atomic_read(&sdebug_a_tsf) :
+-				atomic_read(&sdebug_cmnd_count)));
++				atomic_read_unchecked(&sdebug_a_tsf) :
++				atomic_read_unchecked(&sdebug_cmnd_count)));
+ 	else
+ 		b[0] = '\0';
+ 
+@@ -4213,7 +4213,7 @@ static int scsi_debug_show_info(struct seq_file *m, struct Scsi_Host *host)
+ 		SCSI_DEBUG_VERSION, scsi_debug_version_date,
+ 		scsi_debug_num_tgts, scsi_debug_dev_size_mb, scsi_debug_opts,
+ 		scsi_debug_every_nth, b, scsi_debug_delay, scsi_debug_ndelay,
+-		scsi_debug_max_luns, atomic_read(&sdebug_completions),
++		scsi_debug_max_luns, atomic_read_unchecked(&sdebug_completions),
+ 		scsi_debug_sector_size, sdebug_cylinders_per, sdebug_heads,
+ 		sdebug_sectors_per, num_aborts, num_dev_resets,
+ 		num_target_resets, num_bus_resets, num_host_resets,
+@@ -4328,8 +4328,8 @@ opts_done:
+ 		sdebug_any_injecting_opt = true;
+ 	else if (SCSI_DEBUG_OPT_SHORT_TRANSFER & opts)
+ 		sdebug_any_injecting_opt = true;
+-	atomic_set(&sdebug_cmnd_count, 0);
+-	atomic_set(&sdebug_a_tsf, 0);
++	atomic_set_unchecked(&sdebug_cmnd_count, 0);
++	atomic_set_unchecked(&sdebug_a_tsf, 0);
+ 	return count;
+ }
+ static DRIVER_ATTR_RW(opts);
+@@ -4459,7 +4459,7 @@ static ssize_t every_nth_store(struct device_driver *ddp, const char *buf,
+ 
+ 	if ((count > 0) && (1 == sscanf(buf, "%d", &nth))) {
+ 		scsi_debug_every_nth = nth;
+-		atomic_set(&sdebug_cmnd_count, 0);
++		atomic_set_unchecked(&sdebug_cmnd_count, 0);
+ 		return count;
+ 	}
+ 	return -EINVAL;
+@@ -4783,8 +4783,8 @@ static int __init scsi_debug_init(void)
+ 	int k;
+ 	int ret;
+ 
+-	atomic_set(&sdebug_cmnd_count, 0);
+-	atomic_set(&sdebug_completions, 0);
++	atomic_set_unchecked(&sdebug_cmnd_count, 0);
++	atomic_set_unchecked(&sdebug_completions, 0);
+ 	atomic_set(&retired_max_queue, 0);
+ 
+ 	if (scsi_debug_ndelay >= 1000000000) {
+@@ -5106,9 +5106,9 @@ check_inject(struct scsi_cmnd *scp)
+ 
+ 	memset(ep, 0, sizeof(struct sdebug_scmd_extra_t));
+ 
+-	if (atomic_inc_return(&sdebug_cmnd_count) >=
++	if (atomic_inc_return_unchecked(&sdebug_cmnd_count) >=
+ 	    abs(scsi_debug_every_nth)) {
+-		atomic_set(&sdebug_cmnd_count, 0);
++		atomic_set_unchecked(&sdebug_cmnd_count, 0);
+ 		if (scsi_debug_every_nth < -1)
+ 			scsi_debug_every_nth = -1;
+ 		if (SCSI_DEBUG_OPT_TIMEOUT & scsi_debug_opts)
 diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
 index f704d02..18cad07 100644
 --- a/drivers/scsi/scsi_lib.c
@@ -73209,7 +73314,7 @@ index e3cd3ec..97ab643 100644
  
  	transport_setup_device(&rport->dev);
 diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
-index f52b74c..527a5d7 100644
+index 41c3a2c..9aa3d32 100644
 --- a/drivers/scsi/sd.c
 +++ b/drivers/scsi/sd.c
 @@ -112,7 +112,7 @@ static int sd_resume(struct device *);
@@ -75004,10 +75109,10 @@ index 13965f2..0ea64d0 100644
  void rtw_os_xmit_schedule(struct adapter *padapter);
  
 diff --git a/drivers/staging/rtl8188eu/os_dep/usb_ops_linux.c b/drivers/staging/rtl8188eu/os_dep/usb_ops_linux.c
-index 0fea338..e6f2263 100644
+index 0fea338..acc951f 100644
 --- a/drivers/staging/rtl8188eu/os_dep/usb_ops_linux.c
 +++ b/drivers/staging/rtl8188eu/os_dep/usb_ops_linux.c
-@@ -814,7 +814,7 @@ void usb_write_port_cancel(struct adapter *padapter)
+@@ -814,10 +814,10 @@ void usb_write_port_cancel(struct adapter *padapter)
  	}
  }
  
@@ -75015,8 +75120,12 @@ index 0fea338..e6f2263 100644
 +void rtl8188eu_recv_tasklet(unsigned long priv)
  {
  	struct sk_buff *pskb;
- 	struct adapter *adapt = priv;
-@@ -833,7 +833,7 @@ void rtl8188eu_recv_tasklet(void *priv)
+-	struct adapter *adapt = priv;
++	struct adapter *adapt = (struct adapter *)priv;
+ 	struct recv_priv *precvpriv = &adapt->recvpriv;
+ 
+ 	while (NULL != (pskb = skb_dequeue(&precvpriv->rx_skb_queue))) {
+@@ -833,10 +833,10 @@ void rtl8188eu_recv_tasklet(void *priv)
  	}
  }
  
@@ -75024,7 +75133,11 @@ index 0fea338..e6f2263 100644
 +void rtl8188eu_xmit_tasklet(unsigned long priv)
  {
  	int ret = false;
- 	struct adapter *adapt = priv;
+-	struct adapter *adapt = priv;
++	struct adapter *adapt = (struct adapter *)priv;
+ 	struct xmit_priv *pxmitpriv = &adapt->xmitpriv;
+ 
+ 	if (check_fwstate(&adapt->mlmepriv, _FW_UNDER_SURVEY))
 diff --git a/drivers/staging/rtl8188eu/os_dep/xmit_linux.c b/drivers/staging/rtl8188eu/os_dep/xmit_linux.c
 index 1593e28..a4345f9 100644
 --- a/drivers/staging/rtl8188eu/os_dep/xmit_linux.c
@@ -76272,7 +76385,7 @@ index c57e788..24d15dd 100644
  	login->tgt_agt = sbp_target_agent_register(login);
  	if (IS_ERR(login->tgt_agt)) {
 diff --git a/drivers/thermal/cpu_cooling.c b/drivers/thermal/cpu_cooling.c
-index 6ceac4f..f8059ccd 100644
+index 5b4b47e..94fac45 100644
 --- a/drivers/thermal/cpu_cooling.c
 +++ b/drivers/thermal/cpu_cooling.c
 @@ -838,10 +838,11 @@ __cpufreq_cooling_register(struct device_node *np,
@@ -77898,10 +78011,10 @@ index dbcca30..7549b6f 100644
  	tty_port_tty_set(port, tty);
  
 diff --git a/drivers/tty/vt/keyboard.c b/drivers/tty/vt/keyboard.c
-index f973bfc..cc47958 100644
+index 1e93a37..81c499f 100644
 --- a/drivers/tty/vt/keyboard.c
 +++ b/drivers/tty/vt/keyboard.c
-@@ -642,6 +642,16 @@ static void k_spec(struct vc_data *vc, unsigned char value, char up_flag)
+@@ -630,6 +630,16 @@ static void k_spec(struct vc_data *vc, unsigned char value, char up_flag)
  	     kbd->kbdmode == VC_OFF) &&
  	     value != KVAL(K_SAK))
  		return;		/* SAK is allowed even in raw mode */
@@ -77918,7 +78031,7 @@ index f973bfc..cc47958 100644
  	fn_handler[value](vc);
  }
  
-@@ -1876,9 +1886,6 @@ int vt_do_kdsk_ioctl(int cmd, struct kbentry __user *user_kbe, int perm,
+@@ -1864,9 +1874,6 @@ int vt_do_kdsk_ioctl(int cmd, struct kbentry __user *user_kbe, int perm,
  	if (copy_from_user(&tmp, user_kbe, sizeof(struct kbentry)))
  		return -EFAULT;
  
@@ -77928,7 +78041,7 @@ index f973bfc..cc47958 100644
  	switch (cmd) {
  	case KDGKBENT:
  		/* Ensure another thread doesn't free it under us */
-@@ -1893,6 +1900,9 @@ int vt_do_kdsk_ioctl(int cmd, struct kbentry __user *user_kbe, int perm,
+@@ -1881,6 +1888,9 @@ int vt_do_kdsk_ioctl(int cmd, struct kbentry __user *user_kbe, int perm,
  		spin_unlock_irqrestore(&kbd_event_lock, flags);
  		return put_user(val, &user_kbe->kb_value);
  	case KDSKBENT:
@@ -77938,7 +78051,7 @@ index f973bfc..cc47958 100644
  		if (!perm)
  			return -EPERM;
  		if (!i && v == K_NOSUCHMAP) {
-@@ -1983,9 +1993,6 @@ int vt_do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm)
+@@ -1971,9 +1981,6 @@ int vt_do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm)
  	int i, j, k;
  	int ret;
  
@@ -77948,7 +78061,7 @@ index f973bfc..cc47958 100644
  	kbs = kmalloc(sizeof(*kbs), GFP_KERNEL);
  	if (!kbs) {
  		ret = -ENOMEM;
-@@ -2019,6 +2026,9 @@ int vt_do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm)
+@@ -2007,6 +2014,9 @@ int vt_do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm)
  		kfree(kbs);
  		return ((p && *p) ? -EOVERFLOW : 0);
  	case KDSKBSENT:
@@ -78245,7 +78358,7 @@ index 52c4461..adf74f5 100644
  			(uurb->endpoint & USB_DIR_IN);
  
 diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
-index 980fc57..e36e99a 100644
+index 2d107d0..9489679 100644
 --- a/drivers/usb/core/hcd.c
 +++ b/drivers/usb/core/hcd.c
 @@ -1629,7 +1629,7 @@ int usb_hcd_submit_urb (struct urb *urb, gfp_t mem_flags)
@@ -96575,7 +96688,7 @@ index 20a2c02..5daa230 100644
  	else if (whole->bd_holder != NULL)
  		return false;	 /* is a partition of a held device */
 diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
-index ec7928a..89f482a 100644
+index 234707c..fa73ff27 100644
 --- a/fs/btrfs/ctree.c
 +++ b/fs/btrfs/ctree.c
 @@ -358,7 +358,7 @@ static inline void tree_mod_log_write_unlock(struct btrfs_fs_info *fs_info)
@@ -96604,9 +96717,20 @@ index ec7928a..89f482a 100644
  
  		WARN_ON(trans->transid != btrfs_header_generation(parent));
 diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
-index 208d199..0a5afe1 100644
+index 208d199..c8a54d0 100644
 --- a/fs/btrfs/ctree.h
 +++ b/fs/btrfs/ctree.h
+@@ -979,8 +979,8 @@ struct btrfs_dev_replace {
+ 	u64 replace_state;	/* see #define above */
+ 	u64 time_started;	/* seconds since 1-Jan-1970 */
+ 	u64 time_stopped;	/* seconds since 1-Jan-1970 */
+-	atomic64_t num_write_errors;
+-	atomic64_t num_uncorrectable_read_errors;
++	atomic64_unchecked_t num_write_errors;
++	atomic64_unchecked_t num_uncorrectable_read_errors;
+ 
+ 	u64 cursor_left;
+ 	u64 committed_cursor_left;
 @@ -1615,7 +1615,7 @@ struct btrfs_fs_info {
  
  	/* this protects tree_mod_seq_list */
@@ -96700,6 +96824,83 @@ index 430b368..85f12e1 100644
  
  	/* first set the basic ref node struct up */
  	atomic_set(&ref->refs, 1);
+diff --git a/fs/btrfs/dev-replace.c b/fs/btrfs/dev-replace.c
+index 26bcb48..0d4f068 100644
+--- a/fs/btrfs/dev-replace.c
++++ b/fs/btrfs/dev-replace.c
+@@ -85,8 +85,8 @@ no_valid_dev_replace_entry_found:
+ 		dev_replace->replace_state = 0;
+ 		dev_replace->time_started = 0;
+ 		dev_replace->time_stopped = 0;
+-		atomic64_set(&dev_replace->num_write_errors, 0);
+-		atomic64_set(&dev_replace->num_uncorrectable_read_errors, 0);
++		atomic64_set_unchecked(&dev_replace->num_write_errors, 0);
++		atomic64_set_unchecked(&dev_replace->num_uncorrectable_read_errors, 0);
+ 		dev_replace->cursor_left = 0;
+ 		dev_replace->committed_cursor_left = 0;
+ 		dev_replace->cursor_left_last_write_of_item = 0;
+@@ -115,9 +115,9 @@ no_valid_dev_replace_entry_found:
+ 	dev_replace->time_started = btrfs_dev_replace_time_started(eb, ptr);
+ 	dev_replace->time_stopped =
+ 		btrfs_dev_replace_time_stopped(eb, ptr);
+-	atomic64_set(&dev_replace->num_write_errors,
++	atomic64_set_unchecked(&dev_replace->num_write_errors,
+ 		     btrfs_dev_replace_num_write_errors(eb, ptr));
+-	atomic64_set(&dev_replace->num_uncorrectable_read_errors,
++	atomic64_set_unchecked(&dev_replace->num_uncorrectable_read_errors,
+ 		     btrfs_dev_replace_num_uncorrectable_read_errors(eb, ptr));
+ 	dev_replace->cursor_left = btrfs_dev_replace_cursor_left(eb, ptr);
+ 	dev_replace->committed_cursor_left = dev_replace->cursor_left;
+@@ -277,9 +277,9 @@ int btrfs_run_dev_replace(struct btrfs_trans_handle *trans,
+ 	btrfs_set_dev_replace_time_started(eb, ptr, dev_replace->time_started);
+ 	btrfs_set_dev_replace_time_stopped(eb, ptr, dev_replace->time_stopped);
+ 	btrfs_set_dev_replace_num_write_errors(eb, ptr,
+-		atomic64_read(&dev_replace->num_write_errors));
++		atomic64_read_unchecked(&dev_replace->num_write_errors));
+ 	btrfs_set_dev_replace_num_uncorrectable_read_errors(eb, ptr,
+-		atomic64_read(&dev_replace->num_uncorrectable_read_errors));
++		atomic64_read_unchecked(&dev_replace->num_uncorrectable_read_errors));
+ 	dev_replace->cursor_left_last_write_of_item =
+ 		dev_replace->cursor_left;
+ 	btrfs_set_dev_replace_cursor_left(eb, ptr,
+@@ -394,8 +394,8 @@ int btrfs_dev_replace_start(struct btrfs_root *root,
+ 	dev_replace->cursor_right = 0;
+ 	dev_replace->is_valid = 1;
+ 	dev_replace->item_needs_writeback = 1;
+-	atomic64_set(&dev_replace->num_write_errors, 0);
+-	atomic64_set(&dev_replace->num_uncorrectable_read_errors, 0);
++	atomic64_set_unchecked(&dev_replace->num_write_errors, 0);
++	atomic64_set_unchecked(&dev_replace->num_uncorrectable_read_errors, 0);
+ 	args->result = BTRFS_IOCTL_DEV_REPLACE_RESULT_NO_ERROR;
+ 	btrfs_dev_replace_unlock(dev_replace, 1);
+ 
+@@ -659,9 +659,9 @@ void btrfs_dev_replace_status(struct btrfs_fs_info *fs_info,
+ 	args->status.time_started = dev_replace->time_started;
+ 	args->status.time_stopped = dev_replace->time_stopped;
+ 	args->status.num_write_errors =
+-		atomic64_read(&dev_replace->num_write_errors);
++		atomic64_read_unchecked(&dev_replace->num_write_errors);
+ 	args->status.num_uncorrectable_read_errors =
+-		atomic64_read(&dev_replace->num_uncorrectable_read_errors);
++		atomic64_read_unchecked(&dev_replace->num_uncorrectable_read_errors);
+ 	switch (dev_replace->replace_state) {
+ 	case BTRFS_IOCTL_DEV_REPLACE_STATE_NEVER_STARTED:
+ 	case BTRFS_IOCTL_DEV_REPLACE_STATE_CANCELED:
+diff --git a/fs/btrfs/dev-replace.h b/fs/btrfs/dev-replace.h
+index 29e3ef5..37e8d79 100644
+--- a/fs/btrfs/dev-replace.h
++++ b/fs/btrfs/dev-replace.h
+@@ -40,8 +40,8 @@ void btrfs_dev_replace_set_lock_blocking(struct btrfs_dev_replace *dev_replace);
+ void btrfs_dev_replace_clear_lock_blocking(
+ 					struct btrfs_dev_replace *dev_replace);
+ 
+-static inline void btrfs_dev_replace_stats_inc(atomic64_t *stat_value)
++static inline void btrfs_dev_replace_stats_inc(atomic64_unchecked_t *stat_value)
+ {
+-	atomic64_inc(stat_value);
++	atomic64_inc_unchecked(stat_value);
+ }
+ #endif
 diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
 index 4e47849..d0d47dc 100644
 --- a/fs/btrfs/disk-io.c
@@ -96917,8 +97118,21 @@ index 0b7792e..be2ccab17 100644
  	/*
  	 * build a list of bios to read all the missing parts of this
  	 * stripe
+diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c
+index 4678f03..0cadde2 100644
+--- a/fs/btrfs/scrub.c
++++ b/fs/btrfs/scrub.c
+@@ -3667,7 +3667,7 @@ int scrub_enumerate_chunks(struct scrub_ctx *sctx,
+ 		if (ret)
+ 			break;
+ 		if (is_dev_replace &&
+-		    atomic64_read(&dev_replace->num_write_errors) > 0) {
++		    atomic64_read_unchecked(&dev_replace->num_write_errors) > 0) {
+ 			ret = -EIO;
+ 			break;
+ 		}
 diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
-index 00b8f37..c5e191f 100644
+index d7c138f..02ad00f 100644
 --- a/fs/btrfs/super.c
 +++ b/fs/btrfs/super.c
 @@ -248,7 +248,7 @@ void __btrfs_abort_transaction(struct btrfs_trans_handle *trans,
@@ -97860,10 +98074,10 @@ index 3525ed7..ac8afb7 100644
  }
  
 diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
-index 8f38e33..90f716a 100644
+index 29e06db..e14922b 100644
 --- a/fs/cifs/smb2pdu.c
 +++ b/fs/cifs/smb2pdu.c
-@@ -2388,8 +2388,7 @@ SMB2_query_directory(const unsigned int xid, struct cifs_tcon *tcon,
+@@ -2409,8 +2409,7 @@ SMB2_query_directory(const unsigned int xid, struct cifs_tcon *tcon,
  	default:
  		cifs_dbg(VFS, "info level %u isn't supported\n",
  			 srch_inf->info_level);
@@ -103538,7 +103752,7 @@ index 14db05d..687f6d8 100644
  #define MNT_NS_INTERNAL ERR_PTR(-EINVAL) /* distinct from any mnt_namespace */
  
 diff --git a/fs/namei.c b/fs/namei.c
-index 30145f8..ddb7af4 100644
+index aaa3b69..c610f44 100644
 --- a/fs/namei.c
 +++ b/fs/namei.c
 @@ -336,17 +336,32 @@ int generic_permission(struct inode *inode, int mask)
@@ -103838,7 +104052,7 @@ index 30145f8..ddb7af4 100644
  	/*
  	 * If atomic_open() acquired write access it is dropped now due to
  	 * possible mount and symlink following (this might be optimized away if
-@@ -3178,6 +3314,13 @@ retry_lookup:
+@@ -3182,6 +3318,13 @@ retry_lookup:
  		return -ENOENT;
  	}
  
@@ -104134,7 +104348,7 @@ index 30145f8..ddb7af4 100644
  out:
  	return len;
 diff --git a/fs/namespace.c b/fs/namespace.c
-index 4fb1691..3077a5c 100644
+index 783004a..419cf4a 100644
 --- a/fs/namespace.c
 +++ b/fs/namespace.c
 @@ -1516,6 +1516,9 @@ static int do_umount(struct mount *mnt, int flags)
@@ -104175,7 +104389,7 @@ index 4fb1691..3077a5c 100644
  {
  	return sys_umount(name, 0);
  }
-@@ -2721,6 +2727,16 @@ long do_mount(const char *dev_name, const char __user *dir_name,
+@@ -2723,6 +2729,16 @@ long do_mount(const char *dev_name, const char __user *dir_name,
  		   MS_NOATIME | MS_NODIRATIME | MS_RELATIME| MS_KERNMOUNT |
  		   MS_STRICTATIME);
  
@@ -104192,7 +104406,7 @@ index 4fb1691..3077a5c 100644
  	if (flags & MS_REMOUNT)
  		retval = do_remount(&path, flags & ~MS_REMOUNT, mnt_flags,
  				    data_page);
-@@ -2734,7 +2750,10 @@ long do_mount(const char *dev_name, const char __user *dir_name,
+@@ -2736,7 +2752,10 @@ long do_mount(const char *dev_name, const char __user *dir_name,
  		retval = do_new_mount(&path, type_page, flags, mnt_flags,
  				      dev_name, data_page);
  dput_out:
@@ -104203,7 +104417,7 @@ index 4fb1691..3077a5c 100644
  	return retval;
  }
  
-@@ -2752,7 +2771,7 @@ static void free_mnt_ns(struct mnt_namespace *ns)
+@@ -2754,7 +2773,7 @@ static void free_mnt_ns(struct mnt_namespace *ns)
   * number incrementing at 10Ghz will take 12,427 years to wrap which
   * is effectively never, so we can ignore the possibility.
   */
@@ -104212,7 +104426,7 @@ index 4fb1691..3077a5c 100644
  
  static struct mnt_namespace *alloc_mnt_ns(struct user_namespace *user_ns)
  {
-@@ -2768,7 +2787,7 @@ static struct mnt_namespace *alloc_mnt_ns(struct user_namespace *user_ns)
+@@ -2770,7 +2789,7 @@ static struct mnt_namespace *alloc_mnt_ns(struct user_namespace *user_ns)
  		return ERR_PTR(ret);
  	}
  	new_ns->ns.ops = &mntns_operations;
@@ -104221,7 +104435,7 @@ index 4fb1691..3077a5c 100644
  	atomic_set(&new_ns->count, 1);
  	new_ns->root = NULL;
  	INIT_LIST_HEAD(&new_ns->list);
-@@ -2778,6 +2797,7 @@ static struct mnt_namespace *alloc_mnt_ns(struct user_namespace *user_ns)
+@@ -2780,6 +2799,7 @@ static struct mnt_namespace *alloc_mnt_ns(struct user_namespace *user_ns)
  	return new_ns;
  }
  
@@ -104229,7 +104443,7 @@ index 4fb1691..3077a5c 100644
  struct mnt_namespace *copy_mnt_ns(unsigned long flags, struct mnt_namespace *ns,
  		struct user_namespace *user_ns, struct fs_struct *new_fs)
  {
-@@ -2899,8 +2919,8 @@ struct dentry *mount_subtree(struct vfsmount *mnt, const char *name)
+@@ -2901,8 +2921,8 @@ struct dentry *mount_subtree(struct vfsmount *mnt, const char *name)
  }
  EXPORT_SYMBOL(mount_subtree);
  
@@ -104240,7 +104454,7 @@ index 4fb1691..3077a5c 100644
  {
  	int ret;
  	char *kernel_type;
-@@ -3006,6 +3026,11 @@ SYSCALL_DEFINE2(pivot_root, const char __user *, new_root,
+@@ -3008,6 +3028,11 @@ SYSCALL_DEFINE2(pivot_root, const char __user *, new_root,
  	if (error)
  		goto out2;
  
@@ -104252,7 +104466,7 @@ index 4fb1691..3077a5c 100644
  	get_fs_root(current->fs, &root);
  	old_mp = lock_mount(&old);
  	error = PTR_ERR(old_mp);
-@@ -3324,7 +3349,7 @@ static int mntns_install(struct nsproxy *nsproxy, struct ns_common *ns)
+@@ -3330,7 +3355,7 @@ static int mntns_install(struct nsproxy *nsproxy, struct ns_common *ns)
  	    !ns_capable(current_user_ns(), CAP_SYS_ADMIN))
  		return -EPERM;
  
@@ -104618,7 +104832,7 @@ index 976c906..16b86ac 100644
  		.pc_ressize = 256,
  		.pc_xdrressize = NFS4_CALLBACK_BUFSIZE,
 diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
-index 33eb817..e72c6b7 100644
+index a7dd1fe..9d5f829 100644
 --- a/fs/nfs/dir.c
 +++ b/fs/nfs/dir.c
 @@ -681,8 +681,9 @@ out:
@@ -107117,7 +107331,7 @@ index 88474a4..55ee771 100644
  	.p_replen = NFS4_##restype##_sz,			\
  	.p_statidx = NFSPROC4_CLNT_##proc,			\
 diff --git a/fs/nfs/read.c b/fs/nfs/read.c
-index 6776d7a..f018f72 100644
+index 572e5b3..5245a0a 100644
 --- a/fs/nfs/read.c
 +++ b/fs/nfs/read.c
 @@ -346,7 +346,7 @@ struct nfs_readdesc {
@@ -107194,7 +107408,7 @@ index 4123551..813b403 100644
  
  #endif   /* _NFSD4_CURRENT_STATE_H */
 diff --git a/fs/nfsd/nfs2acl.c b/fs/nfsd/nfs2acl.c
-index 1580ea6..5d74e50 100644
+index d08cd88..5d74e50 100644
 --- a/fs/nfsd/nfs2acl.c
 +++ b/fs/nfsd/nfs2acl.c
 @@ -27,9 +27,10 @@ nfsacld_proc_null(struct svc_rqst *rqstp, void *argp, void *resp)
@@ -107224,47 +107438,6 @@ index 1580ea6..5d74e50 100644
  	struct inode *inode;
  	svc_fh *fh;
  	__be32 nfserr = 0;
-@@ -104,22 +105,21 @@ static __be32 nfsacld_proc_setacl(struct svc_rqst * rqstp,
- 		goto out;
- 
- 	inode = d_inode(fh->fh_dentry);
--	if (!IS_POSIXACL(inode) || !inode->i_op->set_acl) {
--		error = -EOPNOTSUPP;
--		goto out_errno;
--	}
- 
- 	error = fh_want_write(fh);
- 	if (error)
- 		goto out_errno;
- 
--	error = inode->i_op->set_acl(inode, argp->acl_access, ACL_TYPE_ACCESS);
-+	fh_lock(fh);
-+
-+	error = set_posix_acl(inode, ACL_TYPE_ACCESS, argp->acl_access);
- 	if (error)
--		goto out_drop_write;
--	error = inode->i_op->set_acl(inode, argp->acl_default,
--				     ACL_TYPE_DEFAULT);
-+		goto out_drop_lock;
-+	error = set_posix_acl(inode, ACL_TYPE_DEFAULT, argp->acl_default);
- 	if (error)
--		goto out_drop_write;
-+		goto out_drop_lock;
-+
-+	fh_unlock(fh);
- 
- 	fh_drop_write(fh);
- 
-@@ -131,7 +131,8 @@ out:
- 	posix_acl_release(argp->acl_access);
- 	posix_acl_release(argp->acl_default);
- 	return nfserr;
--out_drop_write:
-+out_drop_lock:
-+	fh_unlock(fh);
- 	fh_drop_write(fh);
- out_errno:
- 	nfserr = nfserrno(error);
 @@ -141,9 +142,10 @@ out_errno:
  /*
   * Check file attributes
@@ -107441,7 +107614,7 @@ index 1580ea6..5d74e50 100644
     sizeof(struct nfsd3_##rest##res),		\
     0,						\
 diff --git a/fs/nfsd/nfs3acl.c b/fs/nfsd/nfs3acl.c
-index 01df4cd..36a8d76 100644
+index 0c89034..36a8d76 100644
 --- a/fs/nfsd/nfs3acl.c
 +++ b/fs/nfsd/nfs3acl.c
 @@ -26,9 +26,10 @@ nfsd3_proc_null(struct svc_rqst *rqstp, void *argp, void *resp)
@@ -107471,37 +107644,7 @@ index 01df4cd..36a8d76 100644
  	struct inode *inode;
  	svc_fh *fh;
  	__be32 nfserr = 0;
-@@ -95,22 +96,20 @@ static __be32 nfsd3_proc_setacl(struct svc_rqst * rqstp,
- 		goto out;
- 
- 	inode = d_inode(fh->fh_dentry);
--	if (!IS_POSIXACL(inode) || !inode->i_op->set_acl) {
--		error = -EOPNOTSUPP;
--		goto out_errno;
--	}
- 
- 	error = fh_want_write(fh);
- 	if (error)
- 		goto out_errno;
- 
--	error = inode->i_op->set_acl(inode, argp->acl_access, ACL_TYPE_ACCESS);
-+	fh_lock(fh);
-+
-+	error = set_posix_acl(inode, ACL_TYPE_ACCESS, argp->acl_access);
- 	if (error)
--		goto out_drop_write;
--	error = inode->i_op->set_acl(inode, argp->acl_default,
--				     ACL_TYPE_DEFAULT);
-+		goto out_drop_lock;
-+	error = set_posix_acl(inode, ACL_TYPE_DEFAULT, argp->acl_default);
- 
--out_drop_write:
-+out_drop_lock:
-+	fh_unlock(fh);
- 	fh_drop_write(fh);
- out_errno:
- 	nfserr = nfserrno(error);
-@@ -125,9 +124,10 @@ out:
+@@ -123,9 +124,10 @@ out:
  /*
   * XDR decode functions
   */
@@ -107514,7 +107657,7 @@ index 01df4cd..36a8d76 100644
  	p = nfs3svc_decode_fh(p, &args->fh);
  	if (!p)
  		return 0;
-@@ -137,9 +137,10 @@ static int nfs3svc_decode_getaclargs(struct svc_rqst *rqstp, __be32 *p,
+@@ -135,9 +137,10 @@ static int nfs3svc_decode_getaclargs(struct svc_rqst *rqstp, __be32 *p,
  }
  
  
@@ -107527,7 +107670,7 @@ index 01df4cd..36a8d76 100644
  	struct kvec *head = rqstp->rq_arg.head;
  	unsigned int base;
  	int n;
-@@ -168,9 +169,10 @@ static int nfs3svc_decode_setaclargs(struct svc_rqst *rqstp, __be32 *p,
+@@ -166,9 +169,10 @@ static int nfs3svc_decode_setaclargs(struct svc_rqst *rqstp, __be32 *p,
   */
  
  /* GETACL */
@@ -107540,7 +107683,7 @@ index 01df4cd..36a8d76 100644
  	struct dentry *dentry = resp->fh.fh_dentry;
  
  	p = nfs3svc_encode_post_op_attr(rqstp, p, &resp->fh);
-@@ -213,9 +215,10 @@ static int nfs3svc_encode_getaclres(struct svc_rqst *rqstp, __be32 *p,
+@@ -211,9 +215,10 @@ static int nfs3svc_encode_getaclres(struct svc_rqst *rqstp, __be32 *p,
  }
  
  /* SETACL */
@@ -107553,7 +107696,7 @@ index 01df4cd..36a8d76 100644
  	p = nfs3svc_encode_post_op_attr(rqstp, p, &resp->fh);
  
  	return xdr_ressize_check(rqstp, p);
-@@ -224,9 +227,10 @@ static int nfs3svc_encode_setaclres(struct svc_rqst *rqstp, __be32 *p,
+@@ -222,9 +227,10 @@ static int nfs3svc_encode_setaclres(struct svc_rqst *rqstp, __be32 *p,
  /*
   * XDR release functions
   */
@@ -107566,7 +107709,7 @@ index 01df4cd..36a8d76 100644
  	fh_put(&resp->fh);
  	posix_acl_release(resp->acl_access);
  	posix_acl_release(resp->acl_default);
-@@ -240,10 +244,10 @@ static int nfs3svc_release_getacl(struct svc_rqst *rqstp, __be32 *p,
+@@ -238,10 +244,10 @@ static int nfs3svc_release_getacl(struct svc_rqst *rqstp, __be32 *p,
  struct nfsd3_voidargs { int dummy; };
  
  #define PROC(name, argt, rest, relt, cache, respsize)	\
@@ -108597,47 +108740,8 @@ index 2246454..b866de8 100644
  	fh_put(&resp->fh1);
  	fh_put(&resp->fh2);
  	return 1;
-diff --git a/fs/nfsd/nfs4acl.c b/fs/nfsd/nfs4acl.c
-index 6adabd6..71292a0 100644
---- a/fs/nfsd/nfs4acl.c
-+++ b/fs/nfsd/nfs4acl.c
-@@ -770,9 +770,6 @@ nfsd4_set_nfs4_acl(struct svc_rqst *rqstp, struct svc_fh *fhp,
- 	dentry = fhp->fh_dentry;
- 	inode = d_inode(dentry);
- 
--	if (!inode->i_op->set_acl || !IS_POSIXACL(inode))
--		return nfserr_attrnotsupp;
--
- 	if (S_ISDIR(inode->i_mode))
- 		flags = NFS4_ACL_DIR;
- 
-@@ -782,16 +779,19 @@ nfsd4_set_nfs4_acl(struct svc_rqst *rqstp, struct svc_fh *fhp,
- 	if (host_error < 0)
- 		goto out_nfserr;
- 
--	host_error = inode->i_op->set_acl(inode, pacl, ACL_TYPE_ACCESS);
-+	fh_lock(fhp);
-+
-+	host_error = set_posix_acl(inode, ACL_TYPE_ACCESS, pacl);
- 	if (host_error < 0)
--		goto out_release;
-+		goto out_drop_lock;
- 
- 	if (S_ISDIR(inode->i_mode)) {
--		host_error = inode->i_op->set_acl(inode, dpacl,
--						  ACL_TYPE_DEFAULT);
-+		host_error = set_posix_acl(inode, ACL_TYPE_DEFAULT, dpacl);
- 	}
- 
--out_release:
-+out_drop_lock:
-+	fh_unlock(fhp);
-+
- 	posix_acl_release(pacl);
- 	posix_acl_release(dpacl);
- out_nfserr:
 diff --git a/fs/nfsd/nfs4callback.c b/fs/nfsd/nfs4callback.c
-index 7389cb1..e031e30d 100644
+index 04c68d9..cc49866 100644
 --- a/fs/nfsd/nfs4callback.c
 +++ b/fs/nfsd/nfs4callback.c
 @@ -470,8 +470,7 @@ static int decode_cb_sequence4res(struct xdr_stream *xdr,
@@ -109544,7 +109648,7 @@ index de1ff1d..bd4c347 100644
  		.pc_ressize = sizeof(struct nfsd4_compoundres),
  		.pc_release = nfsd4_release_compoundargs,
 diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
-index 0462eed..7709d1d 100644
+index 9e04e49..1e7e2d3 100644
 --- a/fs/nfsd/nfs4state.c
 +++ b/fs/nfsd/nfs4state.c
 @@ -2362,8 +2362,9 @@ static bool client_has_state(struct nfs4_client *clp)
@@ -109657,7 +109761,7 @@ index 0462eed..7709d1d 100644
  	struct nfs4_client *conf, *unconf;
  	struct nfs4_client *old = NULL;
  	nfs4_verifier confirm = setclientid_confirm->sc_confirm; 
-@@ -4436,8 +4446,9 @@ void nfsd4_cleanup_open_state(struct nfsd4_compound_state *cstate,
+@@ -4446,8 +4456,9 @@ void nfsd4_cleanup_open_state(struct nfsd4_compound_state *cstate,
  
  __be32
  nfsd4_renew(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
@@ -109668,7 +109772,7 @@ index 0462eed..7709d1d 100644
  	struct nfs4_client *clp;
  	__be32 status;
  	struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
-@@ -4893,8 +4904,9 @@ out:
+@@ -4903,8 +4914,9 @@ out:
   */
  __be32
  nfsd4_test_stateid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
@@ -109679,7 +109783,7 @@ index 0462eed..7709d1d 100644
  	struct nfsd4_test_stateid_id *stateid;
  	struct nfs4_client *cl = cstate->session->se_client;
  
-@@ -4907,8 +4919,9 @@ nfsd4_test_stateid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
+@@ -4917,8 +4929,9 @@ nfsd4_test_stateid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
  
  __be32
  nfsd4_free_stateid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
@@ -109690,7 +109794,7 @@ index 0462eed..7709d1d 100644
  	stateid_t *stateid = &free_stateid->fr_stateid;
  	struct nfs4_stid *s;
  	struct nfs4_delegation *dp;
-@@ -5046,8 +5059,9 @@ static __be32 nfs4_preprocess_confirmed_seqid_op(struct nfsd4_compound_state *cs
+@@ -5056,8 +5069,9 @@ static __be32 nfs4_preprocess_confirmed_seqid_op(struct nfsd4_compound_state *cs
  
  __be32
  nfsd4_open_confirm(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
@@ -109701,7 +109805,7 @@ index 0462eed..7709d1d 100644
  	__be32 status;
  	struct nfs4_openowner *oo;
  	struct nfs4_ol_stateid *stp;
-@@ -5115,8 +5129,9 @@ static inline void nfs4_stateid_downgrade(struct nfs4_ol_stateid *stp, u32 to_ac
+@@ -5125,8 +5139,9 @@ static inline void nfs4_stateid_downgrade(struct nfs4_ol_stateid *stp, u32 to_ac
  __be32
  nfsd4_open_downgrade(struct svc_rqst *rqstp,
  		     struct nfsd4_compound_state *cstate,
@@ -109712,7 +109816,7 @@ index 0462eed..7709d1d 100644
  	__be32 status;
  	struct nfs4_ol_stateid *stp;
  	struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
-@@ -5184,8 +5199,9 @@ static void nfsd4_close_open_stateid(struct nfs4_ol_stateid *s)
+@@ -5194,8 +5209,9 @@ static void nfsd4_close_open_stateid(struct nfs4_ol_stateid *s)
   */
  __be32
  nfsd4_close(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
@@ -109723,7 +109827,7 @@ index 0462eed..7709d1d 100644
  	__be32 status;
  	struct nfs4_ol_stateid *stp;
  	struct net *net = SVC_NET(rqstp);
-@@ -5214,8 +5230,9 @@ out:
+@@ -5224,8 +5240,9 @@ out:
  
  __be32
  nfsd4_delegreturn(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
@@ -109734,7 +109838,7 @@ index 0462eed..7709d1d 100644
  	struct nfs4_delegation *dp;
  	stateid_t *stateid = &dr->dr_stateid;
  	struct nfs4_stid *s;
-@@ -5549,8 +5566,9 @@ out:
+@@ -5559,8 +5576,9 @@ out:
   */
  __be32
  nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
@@ -109745,7 +109849,7 @@ index 0462eed..7709d1d 100644
  	struct nfs4_openowner *open_sop = NULL;
  	struct nfs4_lockowner *lock_sop = NULL;
  	struct nfs4_ol_stateid *lock_stp = NULL;
-@@ -5753,8 +5771,9 @@ static __be32 nfsd_test_lock(struct svc_rqst *rqstp, struct svc_fh *fhp, struct
+@@ -5763,8 +5781,9 @@ static __be32 nfsd_test_lock(struct svc_rqst *rqstp, struct svc_fh *fhp, struct
   */
  __be32
  nfsd4_lockt(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
@@ -109756,7 +109860,7 @@ index 0462eed..7709d1d 100644
  	struct file_lock *file_lock = NULL;
  	struct nfs4_lockowner *lo = NULL;
  	__be32 status;
-@@ -5826,8 +5845,9 @@ out:
+@@ -5836,8 +5855,9 @@ out:
  
  __be32
  nfsd4_locku(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
@@ -109767,7 +109871,7 @@ index 0462eed..7709d1d 100644
  	struct nfs4_ol_stateid *stp;
  	struct file *filp = NULL;
  	struct file_lock *file_lock = NULL;
-@@ -5933,8 +5953,9 @@ check_for_locks(struct nfs4_file *fp, struct nfs4_lockowner *lowner)
+@@ -5943,8 +5963,9 @@ check_for_locks(struct nfs4_file *fp, struct nfs4_lockowner *lowner)
  __be32
  nfsd4_release_lockowner(struct svc_rqst *rqstp,
  			struct nfsd4_compound_state *cstate,
@@ -109778,7 +109882,7 @@ index 0462eed..7709d1d 100644
  	clientid_t *clid = &rlockowner->rl_clientid;
  	struct nfs4_stateowner *sop;
  	struct nfs4_lockowner *lo = NULL;
-@@ -6878,26 +6899,34 @@ clear_current_stateid(struct nfsd4_compound_state *cstate)
+@@ -6888,26 +6909,34 @@ clear_current_stateid(struct nfsd4_compound_state *cstate)
   * functions to set current state id
   */
  void
@@ -109817,7 +109921,7 @@ index 0462eed..7709d1d 100644
  	put_stateid(cstate, &lock->lk_resp_stateid);
  }
  
-@@ -6906,49 +6935,65 @@ nfsd4_set_lockstateid(struct nfsd4_compound_state *cstate, struct nfsd4_lock *lo
+@@ -6916,49 +6945,65 @@ nfsd4_set_lockstateid(struct nfsd4_compound_state *cstate, struct nfsd4_lock *lo
   */
  
  void
@@ -112947,48 +113051,10 @@ index cc514da..2895466 100644
  	if (res < 0) {
  		free_page((unsigned long) buf);
 diff --git a/fs/overlayfs/inode.c b/fs/overlayfs/inode.c
-index a4ff5d0..43d5748 100644
+index d46fa60..e9a726d 100644
 --- a/fs/overlayfs/inode.c
 +++ b/fs/overlayfs/inode.c
-@@ -59,16 +59,37 @@ int ovl_setattr(struct dentry *dentry, struct iattr *attr)
- 	if (err)
- 		goto out;
- 
-+	if (attr->ia_valid & ATTR_SIZE) {
-+		struct inode *realinode = d_inode(ovl_dentry_real(dentry));
-+
-+		err = -ETXTBSY;
-+		if (atomic_read(&realinode->i_writecount) < 0)
-+			goto out_drop_write;
-+	}
-+
- 	err = ovl_copy_up(dentry);
- 	if (!err) {
-+		struct inode *winode = NULL;
-+
- 		upperdentry = ovl_dentry_upper(dentry);
- 
-+		if (attr->ia_valid & ATTR_SIZE) {
-+			winode = d_inode(upperdentry);
-+			err = get_write_access(winode);
-+			if (err)
-+				goto out_drop_write;
-+		}
-+
- 		inode_lock(upperdentry->d_inode);
- 		err = notify_change(upperdentry, attr, NULL);
- 		if (!err)
- 			ovl_copyattr(upperdentry->d_inode, dentry->d_inode);
- 		inode_unlock(upperdentry->d_inode);
-+
-+		if (winode)
-+			put_write_access(winode);
- 	}
-+out_drop_write:
- 	ovl_drop_write(dentry);
- out:
- 	return err;
-@@ -347,6 +368,9 @@ struct inode *ovl_d_select_inode(struct dentry *dentry, unsigned file_flags)
+@@ -373,6 +373,9 @@ struct inode *ovl_d_select_inode(struct dentry *dentry, unsigned file_flags)
  	if (d_is_dir(dentry))
  		return d_backing_inode(dentry);
  
@@ -112999,7 +113065,7 @@ index a4ff5d0..43d5748 100644
  	if (ovl_open_need_copy_up(file_flags, type, realpath.dentry)) {
  		err = ovl_want_write(dentry);
 diff --git a/fs/overlayfs/super.c b/fs/overlayfs/super.c
-index 791235e..f6aecf4 100644
+index 7952a50f..631ff67 100644
 --- a/fs/overlayfs/super.c
 +++ b/fs/overlayfs/super.c
 @@ -194,7 +194,7 @@ void ovl_path_lower(struct dentry *dentry, struct path *path)
@@ -113022,25 +113088,6 @@ index 791235e..f6aecf4 100644
  	struct dentry *root_dentry;
  	struct ovl_entry *oe;
  	struct ovl_fs *ufs;
-@@ -1070,11 +1070,13 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent)
- 		if (err < 0)
- 			goto out_put_workdir;
- 
--		if (!err) {
--			pr_err("overlayfs: upper fs needs to support d_type.\n");
--			err = -EINVAL;
--			goto out_put_workdir;
--		}
-+		/*
-+		 * We allowed this configuration and don't want to
-+		 * break users over kernel upgrade. So warn instead
-+		 * of erroring out.
-+		 */
-+		if (!err)
-+			pr_warn("overlayfs: upper fs needs to support d_type.\n");
- 	}
- 
- 	err = -ENOMEM;
 diff --git a/fs/pipe.c b/fs/pipe.c
 index 0d3f516..91735ad 100644
 --- a/fs/pipe.c
@@ -113290,7 +113337,7 @@ index 0d3f516..91735ad 100644
  
  		if (!capable(CAP_SYS_RESOURCE) && size > pipe_max_size) {
 diff --git a/fs/posix_acl.c b/fs/posix_acl.c
-index 711dd51..afa7a82 100644
+index e11ea5f..20e2d4d 100644
 --- a/fs/posix_acl.c
 +++ b/fs/posix_acl.c
 @@ -20,6 +20,7 @@
@@ -113370,74 +113417,6 @@ index 711dd51..afa7a82 100644
  				acl_e->e_gid =
  					make_kgid(user_ns,
  						  le32_to_cpu(entry->e_id));
-@@ -786,39 +797,47 @@ posix_acl_xattr_get(const struct xattr_handler *handler,
- 	return error;
- }
- 
-+int
-+set_posix_acl(struct inode *inode, int type, struct posix_acl *acl)
-+{
-+	if (!IS_POSIXACL(inode))
-+		return -EOPNOTSUPP;
-+	if (!inode->i_op->set_acl)
-+		return -EOPNOTSUPP;
-+
-+	if (type == ACL_TYPE_DEFAULT && !S_ISDIR(inode->i_mode))
-+		return acl ? -EACCES : 0;
-+	if (!inode_owner_or_capable(inode))
-+		return -EPERM;
-+
-+	if (acl) {
-+		int ret = posix_acl_valid(acl);
-+		if (ret)
-+			return ret;
-+	}
-+	return inode->i_op->set_acl(inode, acl, type);
-+}
-+EXPORT_SYMBOL(set_posix_acl);
-+
- static int
- posix_acl_xattr_set(const struct xattr_handler *handler,
--		    struct dentry *dentry, const char *name,
--		    const void *value, size_t size, int flags)
-+		    struct dentry *dentry,
-+		    const char *name, const void *value,
-+		    size_t size, int flags)
- {
- 	struct inode *inode = d_backing_inode(dentry);
- 	struct posix_acl *acl = NULL;
- 	int ret;
- 
--	if (!IS_POSIXACL(inode))
--		return -EOPNOTSUPP;
--	if (!inode->i_op->set_acl)
--		return -EOPNOTSUPP;
--
--	if (handler->flags == ACL_TYPE_DEFAULT && !S_ISDIR(inode->i_mode))
--		return value ? -EACCES : 0;
--	if (!inode_owner_or_capable(inode))
--		return -EPERM;
-+	if (strcmp(name, "") != 0)
-+		return -EINVAL;
- 
- 	if (value) {
- 		acl = posix_acl_from_xattr(&init_user_ns, value, size);
- 		if (IS_ERR(acl))
- 			return PTR_ERR(acl);
--
--		if (acl) {
--			ret = posix_acl_valid(acl);
--			if (ret)
--				goto out;
--		}
- 	}
--
--	ret = inode->i_op->set_acl(inode, acl, handler->flags);
--out:
-+	ret = set_posix_acl(inode, handler->flags, acl);
- 	posix_acl_release(acl);
- 	return ret;
- }
 diff --git a/fs/proc/Kconfig b/fs/proc/Kconfig
 index 1ade120..a86f1a2 100644
 --- a/fs/proc/Kconfig
@@ -129806,7 +129785,7 @@ index 718e872..e16712a 100644
  /* flags */
  #define CPUFREQ_STICKY		(1 << 0)	/* driver isn't removed even if
 diff --git a/include/linux/cpuidle.h b/include/linux/cpuidle.h
-index 786ad32..09d1fb1 100644
+index 07b83d3..3373ad6 100644
 --- a/include/linux/cpuidle.h
 +++ b/include/linux/cpuidle.h
 @@ -59,7 +59,8 @@ struct cpuidle_state {
@@ -129819,7 +129798,7 @@ index 786ad32..09d1fb1 100644
  
  /* Idle State Flags */
  #define CPUIDLE_FLAG_COUPLED	(0x02) /* state applies to multiple cpus */
-@@ -234,7 +235,7 @@ struct cpuidle_governor {
+@@ -237,7 +238,7 @@ struct cpuidle_governor {
  	void (*reflect)		(struct cpuidle_device *dev, int index);
  
  	struct module 		*owner;
@@ -129965,7 +129944,7 @@ index 653589e..4ef254a 100644
  	return c | 0x20;
  }
 diff --git a/include/linux/dcache.h b/include/linux/dcache.h
-index 7e9422c..a1181c6 100644
+index ad5d582..a794cac 100644
 --- a/include/linux/dcache.h
 +++ b/include/linux/dcache.h
 @@ -123,6 +123,9 @@ struct dentry {
@@ -133448,6 +133427,63 @@ index 9230f9a..065b8f8 100644
  
  /* Function to register/unregister hook points. */
  int nf_register_net_hook(struct net *net, const struct nf_hook_ops *ops);
+diff --git a/include/linux/netfilter/ipset/ip_set.h b/include/linux/netfilter/ipset/ip_set.h
+index f48b8a6..8971034 100644
+--- a/include/linux/netfilter/ipset/ip_set.h
++++ b/include/linux/netfilter/ipset/ip_set.h
+@@ -104,8 +104,8 @@ struct ip_set_ext {
+ };
+ 
+ struct ip_set_counter {
+-	atomic64_t bytes;
+-	atomic64_t packets;
++	atomic64_unchecked_t bytes;
++	atomic64_unchecked_t packets;
+ };
+ 
+ struct ip_set_comment_rcu {
+@@ -297,25 +297,25 @@ ip_set_put_flags(struct sk_buff *skb, struct ip_set *set)
+ static inline void
+ ip_set_add_bytes(u64 bytes, struct ip_set_counter *counter)
+ {
+-	atomic64_add((long long)bytes, &(counter)->bytes);
++	atomic64_add_unchecked((long long)bytes, &(counter)->bytes);
+ }
+ 
+ static inline void
+ ip_set_add_packets(u64 packets, struct ip_set_counter *counter)
+ {
+-	atomic64_add((long long)packets, &(counter)->packets);
++	atomic64_add_unchecked((long long)packets, &(counter)->packets);
+ }
+ 
+ static inline u64
+ ip_set_get_bytes(const struct ip_set_counter *counter)
+ {
+-	return (u64)atomic64_read(&(counter)->bytes);
++	return (u64)atomic64_read_unchecked(&(counter)->bytes);
+ }
+ 
+ static inline u64
+ ip_set_get_packets(const struct ip_set_counter *counter)
+ {
+-	return (u64)atomic64_read(&(counter)->packets);
++	return (u64)atomic64_read_unchecked(&(counter)->packets);
+ }
+ 
+ static inline void
+@@ -384,9 +384,9 @@ ip_set_init_counter(struct ip_set_counter *counter,
+ 		    const struct ip_set_ext *ext)
+ {
+ 	if (ext->bytes != ULLONG_MAX)
+-		atomic64_set(&(counter)->bytes, (long long)(ext->bytes));
++		atomic64_set_unchecked(&(counter)->bytes, (long long)(ext->bytes));
+ 	if (ext->packets != ULLONG_MAX)
+-		atomic64_set(&(counter)->packets, (long long)(ext->packets));
++		atomic64_set_unchecked(&(counter)->packets, (long long)(ext->packets));
+ }
+ 
+ /* Netlink CB args */
 diff --git a/include/linux/netfilter/ipset/ip_set_comment.h b/include/linux/netfilter/ipset/ip_set_comment.h
 index 8d02485..a1e1aa5 100644
 --- a/include/linux/netfilter/ipset/ip_set_comment.h
@@ -134379,7 +134415,7 @@ index 556ec1e..38c19c9 100644
  
  /*
 diff --git a/include/linux/sched.h b/include/linux/sched.h
-index 52c4847..3a5c90b 100644
+index 52c4847..e87db2a 100644
 --- a/include/linux/sched.h
 +++ b/include/linux/sched.h
 @@ -7,7 +7,7 @@
@@ -134399,7 +134435,7 @@ index 52c4847..3a5c90b 100644
  
  #define VMACACHE_BITS 2
  #define VMACACHE_SIZE (1U << VMACACHE_BITS)
-@@ -441,6 +442,19 @@ struct nsproxy;
+@@ -441,6 +442,18 @@ struct nsproxy;
  struct user_namespace;
  
  #ifdef CONFIG_MMU
@@ -134414,12 +134450,11 @@ index 52c4847..3a5c90b 100644
 +#endif
 +
 +extern bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len, unsigned long offset);
-+extern unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len, unsigned long offset);
 +
  extern void arch_pick_mmap_layout(struct mm_struct *mm);
  extern unsigned long
  arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
-@@ -777,6 +791,17 @@ struct signal_struct {
+@@ -777,6 +790,17 @@ struct signal_struct {
  #ifdef CONFIG_TASKSTATS
  	struct taskstats *stats;
  #endif
@@ -134437,7 +134472,7 @@ index 52c4847..3a5c90b 100644
  #ifdef CONFIG_AUDIT
  	unsigned audit_tty;
  	struct tty_audit_buf *tty_audit_buf;
-@@ -790,7 +815,7 @@ struct signal_struct {
+@@ -790,7 +814,7 @@ struct signal_struct {
  	struct mutex cred_guard_mutex;	/* guard against foreign influences on
  					 * credential calculations
  					 * (notably. ptrace) */
@@ -134446,7 +134481,7 @@ index 52c4847..3a5c90b 100644
  
  /*
   * Bits in flags field of signal_struct.
-@@ -845,6 +870,14 @@ struct user_struct {
+@@ -845,6 +869,14 @@ struct user_struct {
  	struct key *session_keyring;	/* UID's default session keyring */
  #endif
  
@@ -134461,7 +134496,7 @@ index 52c4847..3a5c90b 100644
  	/* Hash table maintenance information */
  	struct hlist_node uidhash_node;
  	kuid_t uid;
-@@ -852,7 +885,7 @@ struct user_struct {
+@@ -852,7 +884,7 @@ struct user_struct {
  #if defined(CONFIG_PERF_EVENTS) || defined(CONFIG_BPF_SYSCALL)
  	atomic_long_t locked_vm;
  #endif
@@ -134470,7 +134505,7 @@ index 52c4847..3a5c90b 100644
  
  extern int uids_sysfs_init(void);
  
-@@ -1394,6 +1427,9 @@ struct tlbflush_unmap_batch {
+@@ -1394,6 +1426,9 @@ struct tlbflush_unmap_batch {
  struct task_struct {
  	volatile long state;	/* -1 unrunnable, 0 runnable, >0 stopped */
  	void *stack;
@@ -134480,7 +134515,7 @@ index 52c4847..3a5c90b 100644
  	atomic_t usage;
  	unsigned int flags;	/* per process flags, defined below */
  	unsigned int ptrace;
-@@ -1529,8 +1565,8 @@ struct task_struct {
+@@ -1529,8 +1564,8 @@ struct task_struct {
  	struct list_head thread_node;
  
  	struct completion *vfork_done;		/* for vfork() */
@@ -134491,7 +134526,7 @@ index 52c4847..3a5c90b 100644
  
  	cputime_t utime, stime, utimescaled, stimescaled;
  	cputime_t gtime;
-@@ -1560,11 +1596,6 @@ struct task_struct {
+@@ -1560,11 +1595,6 @@ struct task_struct {
  	struct task_cputime cputime_expires;
  	struct list_head cpu_timers[3];
  
@@ -134503,7 +134538,7 @@ index 52c4847..3a5c90b 100644
  	char comm[TASK_COMM_LEN]; /* executable name excluding path
  				     - access with [gs]et_task_comm (which lock
  				       it with task_lock())
-@@ -1580,6 +1611,8 @@ struct task_struct {
+@@ -1580,6 +1610,8 @@ struct task_struct {
  /* hung task detection */
  	unsigned long last_switch_count;
  #endif
@@ -134512,7 +134547,7 @@ index 52c4847..3a5c90b 100644
  /* filesystem information */
  	struct fs_struct *fs;
  /* open file information */
-@@ -1657,6 +1690,10 @@ struct task_struct {
+@@ -1657,6 +1689,10 @@ struct task_struct {
  	unsigned int in_ubsan;
  #endif
  
@@ -134523,7 +134558,7 @@ index 52c4847..3a5c90b 100644
  /* journalling filesystem info */
  	void *journal_info;
  
-@@ -1695,6 +1732,10 @@ struct task_struct {
+@@ -1695,6 +1731,10 @@ struct task_struct {
  	/* cg_list protected by css_set_lock and tsk->alloc_lock */
  	struct list_head cg_list;
  #endif
@@ -134534,7 +134569,7 @@ index 52c4847..3a5c90b 100644
  #ifdef CONFIG_FUTEX
  	struct robust_list_head __user *robust_list;
  #ifdef CONFIG_COMPAT
-@@ -1810,7 +1851,7 @@ struct task_struct {
+@@ -1810,7 +1850,7 @@ struct task_struct {
  	 * Number of functions that haven't been traced
  	 * because of depth overrun.
  	 */
@@ -134543,7 +134578,7 @@ index 52c4847..3a5c90b 100644
  	/* Pause for the tracing */
  	atomic_t tracing_graph_pause;
  #endif
-@@ -1852,22 +1893,89 @@ struct task_struct {
+@@ -1852,22 +1892,89 @@ struct task_struct {
  #ifdef CONFIG_MMU
  	struct task_struct *oom_reaper_list;
  #endif
@@ -134643,7 +134678,7 @@ index 52c4847..3a5c90b 100644
  /* Future-safe accessor for struct task_struct's cpus_allowed. */
  #define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed)
  
-@@ -1949,7 +2057,7 @@ struct pid_namespace;
+@@ -1949,7 +2056,7 @@ struct pid_namespace;
  pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type,
  			struct pid_namespace *ns);
  
@@ -134652,7 +134687,7 @@ index 52c4847..3a5c90b 100644
  {
  	return tsk->pid;
  }
-@@ -2311,6 +2419,48 @@ extern u64 sched_clock_cpu(int cpu);
+@@ -2311,6 +2418,48 @@ extern u64 sched_clock_cpu(int cpu);
  
  extern void sched_clock_init(void);
  
@@ -134701,7 +134736,7 @@ index 52c4847..3a5c90b 100644
  #ifndef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
  static inline void sched_clock_tick(void)
  {
-@@ -2436,7 +2586,9 @@ extern void set_curr_task(int cpu, struct task_struct *p);
+@@ -2436,7 +2585,9 @@ extern void set_curr_task(int cpu, struct task_struct *p);
  void yield(void);
  
  union thread_union {
@@ -134711,7 +134746,7 @@ index 52c4847..3a5c90b 100644
  	unsigned long stack[THREAD_SIZE/sizeof(long)];
  };
  
-@@ -2469,6 +2621,7 @@ extern struct pid_namespace init_pid_ns;
+@@ -2469,6 +2620,7 @@ extern struct pid_namespace init_pid_ns;
   */
  
  extern struct task_struct *find_task_by_vpid(pid_t nr);
@@ -134719,7 +134754,7 @@ index 52c4847..3a5c90b 100644
  extern struct task_struct *find_task_by_pid_ns(pid_t nr,
  		struct pid_namespace *ns);
  
-@@ -2500,7 +2653,7 @@ extern void proc_caches_init(void);
+@@ -2500,7 +2652,7 @@ extern void proc_caches_init(void);
  extern void flush_signals(struct task_struct *);
  extern void ignore_signals(struct task_struct *);
  extern void flush_signal_handlers(struct task_struct *, int force_default);
@@ -134728,7 +134763,7 @@ index 52c4847..3a5c90b 100644
  
  static inline int kernel_dequeue_signal(siginfo_t *info)
  {
-@@ -2654,7 +2807,7 @@ extern void __cleanup_sighand(struct sighand_struct *);
+@@ -2654,7 +2806,7 @@ extern void __cleanup_sighand(struct sighand_struct *);
  extern void exit_itimers(struct signal_struct *);
  extern void flush_itimer_signals(void);
  
@@ -134737,7 +134772,7 @@ index 52c4847..3a5c90b 100644
  
  extern int do_execve(struct filename *,
  		     const char __user * const __user *,
-@@ -2769,11 +2922,13 @@ static inline int thread_group_empty(struct task_struct *p)
+@@ -2769,11 +2921,13 @@ static inline int thread_group_empty(struct task_struct *p)
   * It must not be nested with write_lock_irq(&tasklist_lock),
   * neither inside nor outside.
   */
@@ -134751,7 +134786,7 @@ index 52c4847..3a5c90b 100644
  static inline void task_unlock(struct task_struct *p)
  {
  	spin_unlock(&p->alloc_lock);
-@@ -2859,9 +3014,9 @@ static inline unsigned long *end_of_stack(struct task_struct *p)
+@@ -2859,9 +3013,9 @@ static inline unsigned long *end_of_stack(struct task_struct *p)
  #define task_stack_end_corrupted(task) \
  		(*(end_of_stack(task)) != STACK_END_MAGIC)
  
@@ -134981,7 +135016,7 @@ index d80259a..41a639a 100644
  
  static inline void disallow_signal(int sig)
 diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
-index 15d0df9..31bf6d0 100644
+index 794b924..3b11d45 100644
 --- a/include/linux/skbuff.h
 +++ b/include/linux/skbuff.h
 @@ -889,7 +889,7 @@ struct sk_buff *__alloc_skb(unsigned int size, gfp_t priority, int flags,
@@ -134993,7 +135028,7 @@ index 15d0df9..31bf6d0 100644
  					gfp_t priority)
  {
  	return __alloc_skb(size, priority, 0, NUMA_NO_NODE);
-@@ -2187,7 +2187,7 @@ static inline unsigned char *skb_checksum_start(const struct sk_buff *skb)
+@@ -2188,7 +2188,7 @@ static inline unsigned char *skb_checksum_start(const struct sk_buff *skb)
  	return skb->head + skb->csum_start;
  }
  
@@ -135002,7 +135037,7 @@ index 15d0df9..31bf6d0 100644
  {
  	return skb_transport_header(skb) - skb->data;
  }
-@@ -2202,7 +2202,7 @@ static inline u32 skb_inner_network_header_len(const struct sk_buff *skb)
+@@ -2203,7 +2203,7 @@ static inline u32 skb_inner_network_header_len(const struct sk_buff *skb)
  	return skb->inner_transport_header - skb->inner_network_header;
  }
  
@@ -135011,7 +135046,7 @@ index 15d0df9..31bf6d0 100644
  {
  	return skb_network_header(skb) - skb->data;
  }
-@@ -2262,7 +2262,7 @@ static inline int pskb_network_may_pull(struct sk_buff *skb, unsigned int len)
+@@ -2263,7 +2263,7 @@ static inline int pskb_network_may_pull(struct sk_buff *skb, unsigned int len)
   * NET_IP_ALIGN(2) + ethernet_header(14) + IP_header(20/40) + ports(8)
   */
  #ifndef NET_SKB_PAD
@@ -135020,7 +135055,7 @@ index 15d0df9..31bf6d0 100644
  #endif
  
  int ___pskb_trim(struct sk_buff *skb, unsigned int len);
-@@ -2936,9 +2936,9 @@ struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags, int noblock,
+@@ -2956,9 +2956,9 @@ struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags, int noblock,
  				  int *err);
  unsigned int datagram_poll(struct file *file, struct socket *sock,
  			   struct poll_table_struct *wait);
@@ -135032,7 +135067,7 @@ index 15d0df9..31bf6d0 100644
  					struct msghdr *msg, int size)
  {
  	return skb_copy_datagram_iter(from, offset, &msg->msg_iter, size);
-@@ -3467,6 +3467,9 @@ static inline void nf_reset(struct sk_buff *skb)
+@@ -3487,6 +3487,9 @@ static inline void nf_reset(struct sk_buff *skb)
  	nf_bridge_put(skb->nf_bridge);
  	skb->nf_bridge = NULL;
  #endif
@@ -135533,7 +135568,7 @@ index 5c9c6cd..f16c5c9 100644
  
  #endif /* _LINUX_SUNRPC_ADDR_H */
 diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h
-index 9a7ddba..10918de1 100644
+index 14d70f5..88ae065 100644
 --- a/include/linux/sunrpc/clnt.h
 +++ b/include/linux/sunrpc/clnt.h
 @@ -103,7 +103,7 @@ struct rpc_procinfo {
@@ -140600,7 +140635,7 @@ index d277e83..824b594 100644
  	int threads = max_threads;
  	int min = MIN_THREADS;
 diff --git a/kernel/futex.c b/kernel/futex.c
-index c20f06f..a00ab76 100644
+index 6555d54..4c66189 100644
 --- a/kernel/futex.c
 +++ b/kernel/futex.c
 @@ -202,7 +202,7 @@ struct futex_pi_state {
@@ -140633,7 +140668,7 @@ index c20f06f..a00ab76 100644
  	/*
  	 * The futex address must be "naturally" aligned.
  	 */
-@@ -726,7 +731,7 @@ static int cmpxchg_futex_value_locked(u32 *curval, u32 __user *uaddr,
+@@ -734,7 +739,7 @@ static int cmpxchg_futex_value_locked(u32 *curval, u32 __user *uaddr,
  
  static int get_futex_value_locked(u32 *dest, u32 __user *from)
  {
@@ -140642,7 +140677,7 @@ index c20f06f..a00ab76 100644
  
  	pagefault_disable();
  	ret = __copy_from_user_inatomic(dest, from, sizeof(u32));
-@@ -3241,6 +3246,7 @@ static void __init futex_detect_cmpxchg(void)
+@@ -3249,6 +3254,7 @@ static void __init futex_detect_cmpxchg(void)
  {
  #ifndef CONFIG_HAVE_FUTEX_CMPXCHG
  	u32 curval;
@@ -140650,7 +140685,7 @@ index c20f06f..a00ab76 100644
  
  	/*
  	 * This will fail and we want it. Some arch implementations do
-@@ -3252,8 +3258,11 @@ static void __init futex_detect_cmpxchg(void)
+@@ -3260,8 +3266,11 @@ static void __init futex_detect_cmpxchg(void)
  	 * implementation, the non-functional ones will return
  	 * -ENOSYS.
  	 */
@@ -140744,7 +140779,7 @@ index 5707f97..d526a3d 100644
  			if (handled != desc->threads_handled_last) {
  				action_ret = IRQ_HANDLED;
 diff --git a/kernel/jump_label.c b/kernel/jump_label.c
-index 05254ee..a2e0725 100644
+index 4b353e0..94322e3 100644
 --- a/kernel/jump_label.c
 +++ b/kernel/jump_label.c
 @@ -14,6 +14,7 @@
@@ -140765,7 +140800,7 @@ index 05254ee..a2e0725 100644
  }
  
  static void jump_label_update(struct static_key *key);
-@@ -386,10 +389,12 @@ static void jump_label_invalidate_module_init(struct module *mod)
+@@ -416,10 +419,12 @@ static void jump_label_invalidate_module_init(struct module *mod)
  	struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
  	struct jump_entry *iter;
  
@@ -141345,10 +141380,10 @@ index 0799fd3..d06ae3b 100644
  extern void debug_mutex_init(struct mutex *lock, const char *name,
  			     struct lock_class_key *key);
 diff --git a/kernel/locking/mutex.c b/kernel/locking/mutex.c
-index e364b42..642bee3 100644
+index 79d2d76..a70b90d 100644
 --- a/kernel/locking/mutex.c
 +++ b/kernel/locking/mutex.c
-@@ -534,7 +534,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
+@@ -537,7 +537,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
  		goto skip_wait;
  
  	debug_mutex_lock_common(lock, &waiter);
@@ -141357,7 +141392,7 @@ index e364b42..642bee3 100644
  
  	/* add waiting tasks to the end of the waitqueue (FIFO): */
  	list_add_tail(&waiter.list, &lock->wait_list);
-@@ -581,7 +581,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
+@@ -584,7 +584,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
  	}
  	__set_task_state(task, TASK_RUNNING);
  
@@ -141366,7 +141401,7 @@ index e364b42..642bee3 100644
  	/* set it to 0 if there are no waiters left: */
  	if (likely(list_empty(&lock->wait_list)))
  		atomic_set(&lock->count, 0);
-@@ -602,7 +602,7 @@ skip_wait:
+@@ -605,7 +605,7 @@ skip_wait:
  	return 0;
  
  err:
@@ -143548,7 +143583,7 @@ index 686ec8a..8fc3873 100644
  static void push_dl_tasks(struct rq *);
  static void pull_dl_task(struct rq *);
 diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c
-index 4fbc3bd..591825d 100644
+index 4fbc3bd..6d8ac0d 100644
 --- a/kernel/sched/debug.c
 +++ b/kernel/sched/debug.c
 @@ -193,7 +193,7 @@ late_initcall(sched_init_debug);
@@ -143614,8 +143649,12 @@ index 4fbc3bd..591825d 100644
  		const char *procname, void *data, int maxlen,
  		umode_t mode, proc_handler *proc_handler,
  		bool load_idx)
-@@ -263,7 +266,7 @@ set_table_entry(struct ctl_table *entry,
- static struct ctl_table *
+@@ -260,10 +263,10 @@ set_table_entry(struct ctl_table *entry,
+ 	}
+ }
+ 
+-static struct ctl_table *
++static ctl_table_no_const *
  sd_alloc_ctl_domain_table(struct sched_domain *sd)
  {
 -	struct ctl_table *table = sd_alloc_ctl_entry(14);
@@ -143678,10 +143717,10 @@ index 4fbc3bd..591825d 100644
  		return -ENOMEM;
  	return 0;
 diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
-index e7dd0ec..bbbbe5e 100644
+index eeaf920..9f33e68 100644
 --- a/kernel/sched/fair.c
 +++ b/kernel/sched/fair.c
-@@ -8061,7 +8061,7 @@ static void nohz_idle_balance(struct rq *this_rq, enum cpu_idle_type idle) { }
+@@ -8078,7 +8078,7 @@ static void nohz_idle_balance(struct rq *this_rq, enum cpu_idle_type idle) { }
   * run_rebalance_domains is triggered when needed from the scheduler tick.
   * Also triggered for nohz idle balancing (with nohz_balancing_kick set).
   */
@@ -148529,10 +148568,10 @@ index 36cc01b..d862cd8 100644
  		capable(CAP_SYS_NICE) ? MPOL_MF_MOVE_ALL : MPOL_MF_MOVE);
  
 diff --git a/mm/migrate.c b/mm/migrate.c
-index f9dfb18..c97ca70 100644
+index bdf3410..d7158a8 100644
 --- a/mm/migrate.c
 +++ b/mm/migrate.c
-@@ -1524,8 +1524,7 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages,
+@@ -1526,8 +1526,7 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages,
  	 */
  	tcred = __task_cred(task);
  	if (!uid_eq(cred->euid, tcred->suid) && !uid_eq(cred->euid, tcred->uid) &&
@@ -148640,7 +148679,7 @@ index 5b72266..dc04ce5 100644
  	.priority = IPC_CALLBACK_PRI, /* use lowest priority */
  };
 diff --git a/mm/mmap.c b/mm/mmap.c
-index bd2e1a53..9299d7f 100644
+index bd2e1a53..cbb2d1b 100644
 --- a/mm/mmap.c
 +++ b/mm/mmap.c
 @@ -43,6 +43,7 @@
@@ -149028,7 +149067,7 @@ index bd2e1a53..9299d7f 100644
  	kmem_cache_free(vm_area_cachep, vma);
  unacct_error:
  	if (charged)
-@@ -1585,7 +1761,63 @@ unacct_error:
+@@ -1585,7 +1761,54 @@ unacct_error:
  	return error;
  }
  
@@ -149072,28 +149111,19 @@ index bd2e1a53..9299d7f 100644
 +	return true;
 +}
 +
-+unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len, unsigned long offset)
++unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long flag, unsigned long gap_start, unsigned long gap_end)
 +{
-+	if (vma->vm_start < len)
-+		return -ENOMEM;
-+
-+	if (!(vma->vm_flags & VM_GROWSDOWN)) {
-+		if (offset <= vma->vm_start - len)
-+			return vma->vm_start - len - offset;
-+		else
-+			return -ENOMEM;
-+	}
++	if (!vma || !(vma->vm_flags & flag))
++		return 0;
 +
-+	if (sysctl_heap_stack_gap <= vma->vm_start - len)
-+		return vma->vm_start - len - sysctl_heap_stack_gap;
-+	return -ENOMEM;
++	return min(sysctl_heap_stack_gap, gap_end - gap_start);
 +}
 +
 +unsigned long unmapped_area(const struct vm_unmapped_area_info *info)
  {
  	/*
  	 * We implement the search by looking for an rbtree node that
-@@ -1633,11 +1865,29 @@ unsigned long unmapped_area(struct vm_unmapped_area_info *info)
+@@ -1633,11 +1856,20 @@ unsigned long unmapped_area(struct vm_unmapped_area_info *info)
  			}
  		}
  
@@ -149109,22 +149139,13 @@ index bd2e1a53..9299d7f 100644
 +		else
 +			gap_start = gap_end;
 +
-+		if (vma->vm_prev && (vma->vm_prev->vm_flags & VM_GROWSUP)) {
-+			if (gap_end - gap_start > sysctl_heap_stack_gap)
-+				gap_start += sysctl_heap_stack_gap;
-+			else
-+				gap_start = gap_end;
-+		}
-+		if (vma->vm_flags & VM_GROWSDOWN) {
-+			if (gap_end - gap_start > sysctl_heap_stack_gap)
-+				gap_end -= sysctl_heap_stack_gap;
-+			else
-+				gap_end = gap_start;
-+		}
++		gap_start += skip_heap_stack_gap(vma->vm_prev, VM_GROWSUP, gap_start, gap_end);
++		gap_end -= skip_heap_stack_gap(vma, VM_GROWSDOWN, gap_start, gap_end);
++
  		if (gap_end >= low_limit && gap_end - gap_start >= length)
  			goto found;
  
-@@ -1687,7 +1937,7 @@ found:
+@@ -1687,7 +1919,7 @@ found:
  	return gap_start;
  }
  
@@ -149133,7 +149154,7 @@ index bd2e1a53..9299d7f 100644
  {
  	struct mm_struct *mm = current->mm;
  	struct vm_area_struct *vma;
-@@ -1741,6 +1991,24 @@ check_current:
+@@ -1741,6 +1973,15 @@ check_current:
  		gap_end = vma->vm_start;
  		if (gap_end < low_limit)
  			return -ENOMEM;
@@ -149143,22 +149164,13 @@ index bd2e1a53..9299d7f 100644
 +		else
 +			gap_end = gap_start;
 +
-+		if (vma->vm_prev && (vma->vm_prev->vm_flags & VM_GROWSUP)) {
-+			if (gap_end - gap_start > sysctl_heap_stack_gap)
-+				gap_start += sysctl_heap_stack_gap;
-+			else
-+				gap_start = gap_end;
-+		}
-+		if (vma->vm_flags & VM_GROWSDOWN) {
-+			if (gap_end - gap_start > sysctl_heap_stack_gap)
-+				gap_end -= sysctl_heap_stack_gap;
-+			else
-+				gap_end = gap_start;
-+		}
++		gap_start += skip_heap_stack_gap(vma->vm_prev, VM_GROWSUP, gap_start, gap_end);
++		gap_end -= skip_heap_stack_gap(vma, VM_GROWSDOWN, gap_start, gap_end);
++
  		if (gap_start <= high_limit && gap_end - gap_start >= length)
  			goto found;
  
-@@ -1804,6 +2072,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
+@@ -1804,6 +2045,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
  	struct mm_struct *mm = current->mm;
  	struct vm_area_struct *vma;
  	struct vm_unmapped_area_info info;
@@ -149166,7 +149178,7 @@ index bd2e1a53..9299d7f 100644
  
  	if (len > TASK_SIZE - mmap_min_addr)
  		return -ENOMEM;
-@@ -1811,11 +2080,15 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
+@@ -1811,11 +2053,15 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
  	if (flags & MAP_FIXED)
  		return addr;
  
@@ -149183,7 +149195,7 @@ index bd2e1a53..9299d7f 100644
  			return addr;
  	}
  
-@@ -1824,6 +2097,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
+@@ -1824,6 +2070,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
  	info.low_limit = mm->mmap_base;
  	info.high_limit = TASK_SIZE;
  	info.align_mask = 0;
@@ -149191,7 +149203,7 @@ index bd2e1a53..9299d7f 100644
  	return vm_unmapped_area(&info);
  }
  #endif
-@@ -1842,6 +2116,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+@@ -1842,6 +2089,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
  	struct mm_struct *mm = current->mm;
  	unsigned long addr = addr0;
  	struct vm_unmapped_area_info info;
@@ -149199,7 +149211,7 @@ index bd2e1a53..9299d7f 100644
  
  	/* requested length too big for entire address space */
  	if (len > TASK_SIZE - mmap_min_addr)
-@@ -1850,12 +2125,16 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+@@ -1850,12 +2098,16 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
  	if (flags & MAP_FIXED)
  		return addr;
  
@@ -149217,7 +149229,7 @@ index bd2e1a53..9299d7f 100644
  			return addr;
  	}
  
-@@ -1864,6 +2143,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+@@ -1864,6 +2116,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
  	info.low_limit = max(PAGE_SIZE, mmap_min_addr);
  	info.high_limit = mm->mmap_base;
  	info.align_mask = 0;
@@ -149225,7 +149237,7 @@ index bd2e1a53..9299d7f 100644
  	addr = vm_unmapped_area(&info);
  
  	/*
-@@ -1876,6 +2156,12 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+@@ -1876,6 +2129,12 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
  		VM_BUG_ON(addr != -ENOMEM);
  		info.flags = 0;
  		info.low_limit = TASK_UNMAPPED_BASE;
@@ -149238,7 +149250,7 @@ index bd2e1a53..9299d7f 100644
  		info.high_limit = TASK_SIZE;
  		addr = vm_unmapped_area(&info);
  	}
-@@ -1975,6 +2261,28 @@ find_vma_prev(struct mm_struct *mm, unsigned long addr,
+@@ -1975,6 +2234,28 @@ find_vma_prev(struct mm_struct *mm, unsigned long addr,
  	return vma;
  }
  
@@ -149267,7 +149279,7 @@ index bd2e1a53..9299d7f 100644
  /*
   * Verify that the stack growth is acceptable and
   * update accounting. This is shared with both the
-@@ -1992,8 +2300,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
+@@ -1992,8 +2273,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
  
  	/* Stack limit test */
  	actual_size = size;
@@ -149277,7 +149289,7 @@ index bd2e1a53..9299d7f 100644
  	if (actual_size > READ_ONCE(rlim[RLIMIT_STACK].rlim_cur))
  		return -ENOMEM;
  
-@@ -2004,6 +2311,10 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
+@@ -2004,6 +2284,10 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
  		locked = mm->locked_vm + grow;
  		limit = READ_ONCE(rlim[RLIMIT_MEMLOCK].rlim_cur);
  		limit >>= PAGE_SHIFT;
@@ -149288,7 +149300,7 @@ index bd2e1a53..9299d7f 100644
  		if (locked > limit && !capable(CAP_IPC_LOCK))
  			return -ENOMEM;
  	}
-@@ -2029,17 +2340,21 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
+@@ -2029,17 +2313,21 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
   * PA-RISC uses this for its stack; IA64 for its Register Backing Store.
   * vma is the last one with address > vma->vm_end.  Have to extend vma.
   */
@@ -149312,7 +149324,7 @@ index bd2e1a53..9299d7f 100644
  	else
  		return -ENOMEM;
  
-@@ -2047,15 +2362,24 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
+@@ -2047,15 +2335,24 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
  	if (unlikely(anon_vma_prepare(vma)))
  		return -ENOMEM;
  
@@ -149339,7 +149351,7 @@ index bd2e1a53..9299d7f 100644
  		unsigned long size, grow;
  
  		size = address - vma->vm_start;
-@@ -2093,6 +2417,8 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
+@@ -2093,6 +2390,8 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
  			}
  		}
  	}
@@ -149348,7 +149360,7 @@ index bd2e1a53..9299d7f 100644
  	anon_vma_unlock_write(vma->anon_vma);
  	khugepaged_enter_vma_merge(vma, vma->vm_flags);
  	validate_mm(mm);
-@@ -2108,6 +2434,8 @@ int expand_downwards(struct vm_area_struct *vma,
+@@ -2108,6 +2407,8 @@ int expand_downwards(struct vm_area_struct *vma,
  {
  	struct mm_struct *mm = vma->vm_mm;
  	int error;
@@ -149357,7 +149369,7 @@ index bd2e1a53..9299d7f 100644
  
  	address &= PAGE_MASK;
  	error = security_mmap_addr(address);
-@@ -2118,6 +2446,15 @@ int expand_downwards(struct vm_area_struct *vma,
+@@ -2118,6 +2419,15 @@ int expand_downwards(struct vm_area_struct *vma,
  	if (unlikely(anon_vma_prepare(vma)))
  		return -ENOMEM;
  
@@ -149373,7 +149385,7 @@ index bd2e1a53..9299d7f 100644
  	/*
  	 * vma->vm_start/vm_end cannot change under us because the caller
  	 * is required to hold the mmap_sem in read mode.  We need the
-@@ -2126,9 +2463,17 @@ int expand_downwards(struct vm_area_struct *vma,
+@@ -2126,9 +2436,17 @@ int expand_downwards(struct vm_area_struct *vma,
  	anon_vma_lock_write(vma->anon_vma);
  
  	/* Somebody else might have raced and expanded it already */
@@ -149392,7 +149404,7 @@ index bd2e1a53..9299d7f 100644
  		size = vma->vm_end - address;
  		grow = (vma->vm_start - address) >> PAGE_SHIFT;
  
-@@ -2156,13 +2501,27 @@ int expand_downwards(struct vm_area_struct *vma,
+@@ -2156,13 +2474,27 @@ int expand_downwards(struct vm_area_struct *vma,
  				vma->vm_pgoff -= grow;
  				anon_vma_interval_tree_post_update_vma(vma);
  				vma_gap_update(vma);
@@ -149420,7 +149432,7 @@ index bd2e1a53..9299d7f 100644
  	khugepaged_enter_vma_merge(vma, vma->vm_flags);
  	validate_mm(mm);
  	return error;
-@@ -2262,6 +2621,13 @@ static void remove_vma_list(struct mm_struct *mm, struct vm_area_struct *vma)
+@@ -2262,6 +2594,13 @@ static void remove_vma_list(struct mm_struct *mm, struct vm_area_struct *vma)
  	do {
  		long nrpages = vma_pages(vma);
  
@@ -149434,7 +149446,7 @@ index bd2e1a53..9299d7f 100644
  		if (vma->vm_flags & VM_ACCOUNT)
  			nr_accounted += nrpages;
  		vm_stat_account(mm, vma->vm_flags, -nrpages);
-@@ -2306,6 +2672,16 @@ detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma,
+@@ -2306,6 +2645,16 @@ detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma,
  	insertion_point = (prev ? &prev->vm_next : &mm->mmap);
  	vma->vm_prev = NULL;
  	do {
@@ -149451,7 +149463,7 @@ index bd2e1a53..9299d7f 100644
  		vma_rb_erase(vma, &mm->mm_rb);
  		mm->map_count--;
  		tail_vma = vma;
-@@ -2333,14 +2709,33 @@ static int __split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
+@@ -2333,14 +2682,33 @@ static int __split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
  	struct vm_area_struct *new;
  	int err;
  
@@ -149485,7 +149497,7 @@ index bd2e1a53..9299d7f 100644
  	/* most fields are the same, copy all, and then fixup */
  	*new = *vma;
  
-@@ -2353,6 +2748,22 @@ static int __split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
+@@ -2353,6 +2721,22 @@ static int __split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
  		new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT);
  	}
  
@@ -149508,7 +149520,7 @@ index bd2e1a53..9299d7f 100644
  	err = vma_dup_policy(vma, new);
  	if (err)
  		goto out_free_vma;
-@@ -2373,6 +2784,38 @@ static int __split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
+@@ -2373,6 +2757,38 @@ static int __split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
  	else
  		err = vma_adjust(vma, vma->vm_start, addr, vma->vm_pgoff, new);
  
@@ -149547,7 +149559,7 @@ index bd2e1a53..9299d7f 100644
  	/* Success. */
  	if (!err)
  		return 0;
-@@ -2382,10 +2825,18 @@ static int __split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
+@@ -2382,10 +2798,18 @@ static int __split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
  		new->vm_ops->close(new);
  	if (new->vm_file)
  		fput(new->vm_file);
@@ -149567,7 +149579,7 @@ index bd2e1a53..9299d7f 100644
  	kmem_cache_free(vm_area_cachep, new);
  	return err;
  }
-@@ -2397,6 +2848,15 @@ static int __split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
+@@ -2397,6 +2821,15 @@ static int __split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
  int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
  	      unsigned long addr, int new_below)
  {
@@ -149583,7 +149595,7 @@ index bd2e1a53..9299d7f 100644
  	if (mm->map_count >= sysctl_max_map_count)
  		return -ENOMEM;
  
-@@ -2408,11 +2868,30 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
+@@ -2408,11 +2841,30 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
   * work.  This now handles partial unmappings.
   * Jeremy Fitzhardinge <jeremy@goop.org>
   */
@@ -149614,7 +149626,7 @@ index bd2e1a53..9299d7f 100644
  	if ((offset_in_page(start)) || start > TASK_SIZE || len > TASK_SIZE-start)
  		return -EINVAL;
  
-@@ -2490,6 +2969,8 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
+@@ -2490,6 +2942,8 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
  	/* Fix up all other VM information */
  	remove_vma_list(mm, vma);
  
@@ -149623,11 +149635,10 @@ index bd2e1a53..9299d7f 100644
  	return 0;
  }
  
-@@ -2498,6 +2979,13 @@ int vm_munmap(unsigned long start, size_t len)
+@@ -2498,6 +2952,12 @@ int vm_munmap(unsigned long start, size_t len)
  	int ret;
  	struct mm_struct *mm = current->mm;
  
-+
 +#ifdef CONFIG_PAX_SEGMEXEC
 +	if ((mm->pax_flags & MF_PAX_SEGMEXEC) &&
 +	    (len > SEGMEXEC_TASK_SIZE || start > SEGMEXEC_TASK_SIZE-len))
@@ -149637,7 +149648,7 @@ index bd2e1a53..9299d7f 100644
  	down_write(&mm->mmap_sem);
  	ret = do_munmap(mm, start, len);
  	up_write(&mm->mmap_sem);
-@@ -2543,6 +3031,11 @@ SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size,
+@@ -2543,6 +3003,11 @@ SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size,
  	down_write(&mm->mmap_sem);
  	vma = find_vma(mm, start);
  
@@ -149649,7 +149660,7 @@ index bd2e1a53..9299d7f 100644
  	if (!vma || !(vma->vm_flags & VM_SHARED))
  		goto out;
  
-@@ -2603,16 +3096,6 @@ out:
+@@ -2603,16 +3068,6 @@ out:
  	return ret;
  }
  
@@ -149666,7 +149677,7 @@ index bd2e1a53..9299d7f 100644
  /*
   *  this is really a simplified "do_mmap".  it only handles
   *  anonymous maps.  eventually we may be able to do some
-@@ -2626,6 +3109,7 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
+@@ -2626,6 +3081,7 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
  	struct rb_node **rb_link, *rb_parent;
  	pgoff_t pgoff = addr >> PAGE_SHIFT;
  	int error;
@@ -149674,7 +149685,7 @@ index bd2e1a53..9299d7f 100644
  
  	len = PAGE_ALIGN(len);
  	if (!len)
-@@ -2633,10 +3117,24 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
+@@ -2633,10 +3089,24 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
  
  	flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
  
@@ -149699,7 +149710,7 @@ index bd2e1a53..9299d7f 100644
  	error = mlock_future_check(mm, mm->def_flags, len);
  	if (error)
  		return error;
-@@ -2654,16 +3152,17 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
+@@ -2654,16 +3124,17 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
  			      &rb_parent)) {
  		if (do_munmap(mm, addr, len))
  			return -ENOMEM;
@@ -149719,7 +149730,7 @@ index bd2e1a53..9299d7f 100644
  		return -ENOMEM;
  
  	/* Can we just expand an old private anonymous mapping? */
-@@ -2677,7 +3176,7 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
+@@ -2677,7 +3148,7 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
  	 */
  	vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
  	if (!vma) {
@@ -149728,7 +149739,7 @@ index bd2e1a53..9299d7f 100644
  		return -ENOMEM;
  	}
  
-@@ -2691,11 +3190,12 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
+@@ -2691,11 +3162,12 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
  	vma_link(mm, vma, prev, rb_link, rb_parent);
  out:
  	perf_event_mmap(vma);
@@ -149744,7 +149755,7 @@ index bd2e1a53..9299d7f 100644
  	return addr;
  }
  
-@@ -2757,6 +3257,7 @@ void exit_mmap(struct mm_struct *mm)
+@@ -2757,6 +3229,7 @@ void exit_mmap(struct mm_struct *mm)
  	while (vma) {
  		if (vma->vm_flags & VM_ACCOUNT)
  			nr_accounted += vma_pages(vma);
@@ -149752,7 +149763,7 @@ index bd2e1a53..9299d7f 100644
  		vma = remove_vma(vma);
  	}
  	vm_unacct_memory(nr_accounted);
-@@ -2771,6 +3272,10 @@ int insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma)
+@@ -2771,6 +3244,10 @@ int insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma)
  	struct vm_area_struct *prev;
  	struct rb_node **rb_link, *rb_parent;
  
@@ -149763,7 +149774,7 @@ index bd2e1a53..9299d7f 100644
  	if (find_vma_links(mm, vma->vm_start, vma->vm_end,
  			   &prev, &rb_link, &rb_parent))
  		return -ENOMEM;
-@@ -2778,6 +3283,9 @@ int insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma)
+@@ -2778,6 +3255,9 @@ int insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma)
  	     security_vm_enough_memory_mm(mm, vma_pages(vma)))
  		return -ENOMEM;
  
@@ -149773,7 +149784,7 @@ index bd2e1a53..9299d7f 100644
  	/*
  	 * The vm_pgoff of a purely anonymous vma should be irrelevant
  	 * until its first write fault, when page's anon_vma and index
-@@ -2795,7 +3303,21 @@ int insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma)
+@@ -2795,7 +3275,21 @@ int insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma)
  		vma->vm_pgoff = vma->vm_start >> PAGE_SHIFT;
  	}
  
@@ -149795,7 +149806,7 @@ index bd2e1a53..9299d7f 100644
  	return 0;
  }
  
-@@ -2814,6 +3336,8 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
+@@ -2814,6 +3308,8 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
  	struct rb_node **rb_link, *rb_parent;
  	bool faulted_in_anon_vma = true;
  
@@ -149804,7 +149815,7 @@ index bd2e1a53..9299d7f 100644
  	/*
  	 * If anonymous vma has not yet been faulted, update new pgoff
  	 * to match new location, to increase its chance of merging.
-@@ -2880,24 +3404,67 @@ out:
+@@ -2880,24 +3376,67 @@ out:
  	return NULL;
  }
  
@@ -149880,7 +149891,7 @@ index bd2e1a53..9299d7f 100644
  	}
  
  	return true;
-@@ -2905,6 +3472,11 @@ bool may_expand_vm(struct mm_struct *mm, vm_flags_t flags, unsigned long npages)
+@@ -2905,6 +3444,11 @@ bool may_expand_vm(struct mm_struct *mm, vm_flags_t flags, unsigned long npages)
  
  void vm_stat_account(struct mm_struct *mm, vm_flags_t flags, long npages)
  {
@@ -149892,7 +149903,7 @@ index bd2e1a53..9299d7f 100644
  	mm->total_vm += npages;
  
  	if (is_exec_mapping(flags))
-@@ -2989,6 +3561,22 @@ static struct vm_area_struct *__install_special_mapping(
+@@ -2989,6 +3533,22 @@ static struct vm_area_struct *__install_special_mapping(
  	vma->vm_start = addr;
  	vma->vm_end = addr + len;
  
@@ -150309,10 +150320,10 @@ index c8bd59a..82b24ab 100644
  	struct mm_struct *mm;
  
 diff --git a/mm/page-writeback.c b/mm/page-writeback.c
-index bc5149d..3f523f3 100644
+index e389f0a..2dfc34c 100644
 --- a/mm/page-writeback.c
 +++ b/mm/page-writeback.c
-@@ -870,7 +870,7 @@ static long long pos_ratio_polynom(unsigned long setpoint,
+@@ -873,7 +873,7 @@ static long long pos_ratio_polynom(unsigned long setpoint,
   *   card's wb_dirty may rush to many times higher than wb_setpoint.
   * - the wb dirty thresh drops quickly due to change of JBOD workload
   */
@@ -150490,18 +150501,9 @@ index 898fe3f..78ad9e4 100644
  		spin_unlock_irqrestore(&zone->lock, flags);
  	}
 diff --git a/mm/percpu.c b/mm/percpu.c
-index 0c59684..5176325 100644
+index 9903830..5176325 100644
 --- a/mm/percpu.c
 +++ b/mm/percpu.c
-@@ -112,7 +112,7 @@ struct pcpu_chunk {
- 	int			map_used;	/* # of map entries used before the sentry */
- 	int			map_alloc;	/* # of map entries allocated */
- 	int			*map;		/* allocation map */
--	struct work_struct	map_extend_work;/* async ->map[] extension */
-+	struct list_head	map_extend_list;/* on pcpu_map_extend_chunks */
- 
- 	void			*data;		/* chunk data */
- 	int			first_free;	/* no free below this */
 @@ -133,7 +133,7 @@ static unsigned int pcpu_low_unit_cpu __read_mostly;
  static unsigned int pcpu_high_unit_cpu __read_mostly;
  
@@ -150511,192 +150513,6 @@ index 0c59684..5176325 100644
  EXPORT_SYMBOL_GPL(pcpu_base_addr);
  
  static const int *pcpu_unit_map __read_mostly;		/* cpu -> unit */
-@@ -162,10 +162,13 @@ static struct pcpu_chunk *pcpu_reserved_chunk;
- static int pcpu_reserved_chunk_limit;
- 
- static DEFINE_SPINLOCK(pcpu_lock);	/* all internal data structures */
--static DEFINE_MUTEX(pcpu_alloc_mutex);	/* chunk create/destroy, [de]pop */
-+static DEFINE_MUTEX(pcpu_alloc_mutex);	/* chunk create/destroy, [de]pop, map ext */
- 
- static struct list_head *pcpu_slot __read_mostly; /* chunk list slots */
- 
-+/* chunks which need their map areas extended, protected by pcpu_lock */
-+static LIST_HEAD(pcpu_map_extend_chunks);
-+
- /*
-  * The number of empty populated pages, protected by pcpu_lock.  The
-  * reserved chunk doesn't contribute to the count.
-@@ -395,13 +398,19 @@ static int pcpu_need_to_extend(struct pcpu_chunk *chunk, bool is_atomic)
- {
- 	int margin, new_alloc;
- 
-+	lockdep_assert_held(&pcpu_lock);
-+
- 	if (is_atomic) {
- 		margin = 3;
- 
- 		if (chunk->map_alloc <
--		    chunk->map_used + PCPU_ATOMIC_MAP_MARGIN_LOW &&
--		    pcpu_async_enabled)
--			schedule_work(&chunk->map_extend_work);
-+		    chunk->map_used + PCPU_ATOMIC_MAP_MARGIN_LOW) {
-+			if (list_empty(&chunk->map_extend_list)) {
-+				list_add_tail(&chunk->map_extend_list,
-+					      &pcpu_map_extend_chunks);
-+				pcpu_schedule_balance_work();
-+			}
-+		}
- 	} else {
- 		margin = PCPU_ATOMIC_MAP_MARGIN_HIGH;
- 	}
-@@ -435,6 +444,8 @@ static int pcpu_extend_area_map(struct pcpu_chunk *chunk, int new_alloc)
- 	size_t old_size = 0, new_size = new_alloc * sizeof(new[0]);
- 	unsigned long flags;
- 
-+	lockdep_assert_held(&pcpu_alloc_mutex);
-+
- 	new = pcpu_mem_zalloc(new_size);
- 	if (!new)
- 		return -ENOMEM;
-@@ -467,20 +478,6 @@ out_unlock:
- 	return 0;
- }
- 
--static void pcpu_map_extend_workfn(struct work_struct *work)
--{
--	struct pcpu_chunk *chunk = container_of(work, struct pcpu_chunk,
--						map_extend_work);
--	int new_alloc;
--
--	spin_lock_irq(&pcpu_lock);
--	new_alloc = pcpu_need_to_extend(chunk, false);
--	spin_unlock_irq(&pcpu_lock);
--
--	if (new_alloc)
--		pcpu_extend_area_map(chunk, new_alloc);
--}
--
- /**
-  * pcpu_fit_in_area - try to fit the requested allocation in a candidate area
-  * @chunk: chunk the candidate area belongs to
-@@ -740,7 +737,7 @@ static struct pcpu_chunk *pcpu_alloc_chunk(void)
- 	chunk->map_used = 1;
- 
- 	INIT_LIST_HEAD(&chunk->list);
--	INIT_WORK(&chunk->map_extend_work, pcpu_map_extend_workfn);
-+	INIT_LIST_HEAD(&chunk->map_extend_list);
- 	chunk->free_size = pcpu_unit_size;
- 	chunk->contig_hint = pcpu_unit_size;
- 
-@@ -895,6 +892,9 @@ static void __percpu *pcpu_alloc(size_t size, size_t align, bool reserved,
- 		return NULL;
- 	}
- 
-+	if (!is_atomic)
-+		mutex_lock(&pcpu_alloc_mutex);
-+
- 	spin_lock_irqsave(&pcpu_lock, flags);
- 
- 	/* serve reserved allocations from the reserved chunk if available */
-@@ -967,12 +967,9 @@ restart:
- 	if (is_atomic)
- 		goto fail;
- 
--	mutex_lock(&pcpu_alloc_mutex);
--
- 	if (list_empty(&pcpu_slot[pcpu_nr_slots - 1])) {
- 		chunk = pcpu_create_chunk();
- 		if (!chunk) {
--			mutex_unlock(&pcpu_alloc_mutex);
- 			err = "failed to allocate new chunk";
- 			goto fail;
- 		}
-@@ -983,7 +980,6 @@ restart:
- 		spin_lock_irqsave(&pcpu_lock, flags);
- 	}
- 
--	mutex_unlock(&pcpu_alloc_mutex);
- 	goto restart;
- 
- area_found:
-@@ -993,8 +989,6 @@ area_found:
- 	if (!is_atomic) {
- 		int page_start, page_end, rs, re;
- 
--		mutex_lock(&pcpu_alloc_mutex);
--
- 		page_start = PFN_DOWN(off);
- 		page_end = PFN_UP(off + size);
- 
-@@ -1005,7 +999,6 @@ area_found:
- 
- 			spin_lock_irqsave(&pcpu_lock, flags);
- 			if (ret) {
--				mutex_unlock(&pcpu_alloc_mutex);
- 				pcpu_free_area(chunk, off, &occ_pages);
- 				err = "failed to populate";
- 				goto fail_unlock;
-@@ -1045,6 +1038,8 @@ fail:
- 		/* see the flag handling in pcpu_blance_workfn() */
- 		pcpu_atomic_alloc_failed = true;
- 		pcpu_schedule_balance_work();
-+	} else {
-+		mutex_unlock(&pcpu_alloc_mutex);
- 	}
- 	return NULL;
- }
-@@ -1129,6 +1124,7 @@ static void pcpu_balance_workfn(struct work_struct *work)
- 		if (chunk == list_first_entry(free_head, struct pcpu_chunk, list))
- 			continue;
- 
-+		list_del_init(&chunk->map_extend_list);
- 		list_move(&chunk->list, &to_free);
- 	}
- 
-@@ -1146,6 +1142,25 @@ static void pcpu_balance_workfn(struct work_struct *work)
- 		pcpu_destroy_chunk(chunk);
- 	}
- 
-+	/* service chunks which requested async area map extension */
-+	do {
-+		int new_alloc = 0;
-+
-+		spin_lock_irq(&pcpu_lock);
-+
-+		chunk = list_first_entry_or_null(&pcpu_map_extend_chunks,
-+					struct pcpu_chunk, map_extend_list);
-+		if (chunk) {
-+			list_del_init(&chunk->map_extend_list);
-+			new_alloc = pcpu_need_to_extend(chunk, false);
-+		}
-+
-+		spin_unlock_irq(&pcpu_lock);
-+
-+		if (new_alloc)
-+			pcpu_extend_area_map(chunk, new_alloc);
-+	} while (chunk);
-+
- 	/*
- 	 * Ensure there are certain number of free populated pages for
- 	 * atomic allocs.  Fill up from the most packed so that atomic
-@@ -1644,7 +1659,7 @@ int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,
- 	 */
- 	schunk = memblock_virt_alloc(pcpu_chunk_struct_size, 0);
- 	INIT_LIST_HEAD(&schunk->list);
--	INIT_WORK(&schunk->map_extend_work, pcpu_map_extend_workfn);
-+	INIT_LIST_HEAD(&schunk->map_extend_list);
- 	schunk->base_addr = base_addr;
- 	schunk->map = smap;
- 	schunk->map_alloc = ARRAY_SIZE(smap);
-@@ -1673,7 +1688,7 @@ int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,
- 	if (dyn_size) {
- 		dchunk = memblock_virt_alloc(pcpu_chunk_struct_size, 0);
- 		INIT_LIST_HEAD(&dchunk->list);
--		INIT_WORK(&dchunk->map_extend_work, pcpu_map_extend_workfn);
-+		INIT_LIST_HEAD(&dchunk->map_extend_list);
- 		dchunk->base_addr = base_addr;
- 		dchunk->map = dmap;
- 		dchunk->map_alloc = ARRAY_SIZE(dmap);
 diff --git a/mm/process_vm_access.c b/mm/process_vm_access.c
 index 07514d4..9989090 100644
 --- a/mm/process_vm_access.c
@@ -150866,7 +150682,7 @@ index 3ebf9c4..ec385cd 100644
  
  /*
 diff --git a/mm/shmem.c b/mm/shmem.c
-index 719bd6b..6b464fb 100644
+index 9ca09f5..448e8b3 100644
 --- a/mm/shmem.c
 +++ b/mm/shmem.c
 @@ -33,7 +33,7 @@
@@ -150887,7 +150703,7 @@ index 719bd6b..6b464fb 100644
  
  /*
   * shmem_fallocate communicates with shmem_fault or shmem_writepage via
-@@ -2665,6 +2665,23 @@ static int shmem_xattr_handler_set(const struct xattr_handler *handler,
+@@ -2667,6 +2667,23 @@ static int shmem_xattr_handler_set(const struct xattr_handler *handler,
  	return simple_xattr_set(&info->xattrs, name, value, size, flags);
  }
  
@@ -150911,7 +150727,7 @@ index 719bd6b..6b464fb 100644
  static const struct xattr_handler shmem_security_xattr_handler = {
  	.prefix = XATTR_SECURITY_PREFIX,
  	.get = shmem_xattr_handler_get,
-@@ -2677,6 +2694,14 @@ static const struct xattr_handler shmem_trusted_xattr_handler = {
+@@ -2679,6 +2696,14 @@ static const struct xattr_handler shmem_trusted_xattr_handler = {
  	.set = shmem_xattr_handler_set,
  };
  
@@ -150926,7 +150742,7 @@ index 719bd6b..6b464fb 100644
  static const struct xattr_handler *shmem_xattr_handlers[] = {
  #ifdef CONFIG_TMPFS_POSIX_ACL
  	&posix_acl_access_xattr_handler,
-@@ -2684,6 +2709,11 @@ static const struct xattr_handler *shmem_xattr_handlers[] = {
+@@ -2686,6 +2711,11 @@ static const struct xattr_handler *shmem_xattr_handlers[] = {
  #endif
  	&shmem_security_xattr_handler,
  	&shmem_trusted_xattr_handler,
@@ -150938,7 +150754,7 @@ index 719bd6b..6b464fb 100644
  	NULL
  };
  
-@@ -3044,8 +3074,7 @@ int shmem_fill_super(struct super_block *sb, void *data, int silent)
+@@ -3046,8 +3076,7 @@ int shmem_fill_super(struct super_block *sb, void *data, int silent)
  	int err = -ENOMEM;
  
  	/* Round up to L1_CACHE_BYTES to resist false sharing */
@@ -154634,7 +154450,7 @@ index 2696aef..dbd5807 100644
  		if (!err)
  			err = put_user(SCM_RIGHTS, &cm->cmsg_type);
 diff --git a/net/core/skbuff.c b/net/core/skbuff.c
-index e561f9f..25281cc 100644
+index 59bf4d7..71bbed8 100644
 --- a/net/core/skbuff.c
 +++ b/net/core/skbuff.c
 @@ -1046,7 +1046,8 @@ static void skb_headers_offset_update(struct sk_buff *skb, int off)
@@ -154656,7 +154472,7 @@ index e561f9f..25281cc 100644
  		.update  = csum_partial_ext,
  		.combine = csum_block_add_ext,
  	};
-@@ -3414,12 +3415,14 @@ void __init skb_init(void)
+@@ -3396,12 +3397,14 @@ void __init skb_init(void)
  	skbuff_head_cache = kmem_cache_create("skbuff_head_cache",
  					      sizeof(struct sk_buff),
  					      0,
@@ -156846,7 +156662,7 @@ index 70f2628..721dd1e 100644
  {
  	struct inet_hashinfo *hinfo = death_row->hashinfo;
 diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
-index ea071fa..8bdb3e0 100644
+index c26fac2..f944bc7 100644
 --- a/net/ipv6/ip6_fib.c
 +++ b/net/ipv6/ip6_fib.c
 @@ -98,9 +98,9 @@ static int fib6_new_sernum(struct net *net)
@@ -159444,9 +159260,64 @@ index 2011977..ba46f29 100644
  		};
  		return netlink_dump_start(nlsk, skb, nlh, &c);
 diff --git a/net/netfilter/nfnetlink_acct.c b/net/netfilter/nfnetlink_acct.c
-index dbd0803..1071d39 100644
+index dbd0803..9d18caa 100644
 --- a/net/netfilter/nfnetlink_acct.c
 +++ b/net/netfilter/nfnetlink_acct.c
+@@ -28,8 +28,8 @@ MODULE_AUTHOR("Pablo Neira Ayuso <pablo@netfilter.org>");
+ MODULE_DESCRIPTION("nfacct: Extended Netfilter accounting infrastructure");
+ 
+ struct nf_acct {
+-	atomic64_t		pkts;
+-	atomic64_t		bytes;
++	atomic64_unchecked_t	pkts;
++	atomic64_unchecked_t	bytes;
+ 	unsigned long		flags;
+ 	struct list_head	head;
+ 	atomic_t		refcnt;
+@@ -76,8 +76,8 @@ static int nfnl_acct_new(struct net *net, struct sock *nfnl,
+ 	if (matching) {
+ 		if (nlh->nlmsg_flags & NLM_F_REPLACE) {
+ 			/* reset counters if you request a replacement. */
+-			atomic64_set(&matching->pkts, 0);
+-			atomic64_set(&matching->bytes, 0);
++			atomic64_set_unchecked(&matching->pkts, 0);
++			atomic64_set_unchecked(&matching->bytes, 0);
+ 			smp_mb__before_atomic();
+ 			/* reset overquota flag if quota is enabled. */
+ 			if ((matching->flags & NFACCT_F_QUOTA))
+@@ -116,11 +116,11 @@ static int nfnl_acct_new(struct net *net, struct sock *nfnl,
+ 	strncpy(nfacct->name, nla_data(tb[NFACCT_NAME]), NFACCT_NAME_MAX);
+ 
+ 	if (tb[NFACCT_BYTES]) {
+-		atomic64_set(&nfacct->bytes,
++		atomic64_set_unchecked(&nfacct->bytes,
+ 			     be64_to_cpu(nla_get_be64(tb[NFACCT_BYTES])));
+ 	}
+ 	if (tb[NFACCT_PKTS]) {
+-		atomic64_set(&nfacct->pkts,
++		atomic64_set_unchecked(&nfacct->pkts,
+ 			     be64_to_cpu(nla_get_be64(tb[NFACCT_PKTS])));
+ 	}
+ 	atomic_set(&nfacct->refcnt, 1);
+@@ -153,14 +153,14 @@ nfnl_acct_fill_info(struct sk_buff *skb, u32 portid, u32 seq, u32 type,
+ 
+ 	old_flags = acct->flags;
+ 	if (type == NFNL_MSG_ACCT_GET_CTRZERO) {
+-		pkts = atomic64_xchg(&acct->pkts, 0);
+-		bytes = atomic64_xchg(&acct->bytes, 0);
++		pkts = atomic64_xchg_unchecked(&acct->pkts, 0);
++		bytes = atomic64_xchg_unchecked(&acct->bytes, 0);
+ 		smp_mb__before_atomic();
+ 		if (acct->flags & NFACCT_F_QUOTA)
+ 			clear_bit(NFACCT_OVERQUOTA_BIT, &acct->flags);
+ 	} else {
+-		pkts = atomic64_read(&acct->pkts);
+-		bytes = atomic64_read(&acct->bytes);
++		pkts = atomic64_read_unchecked(&acct->pkts);
++		bytes = atomic64_read_unchecked(&acct->bytes);
+ 	}
+ 	if (nla_put_be64(skb, NFACCT_PKTS, cpu_to_be64(pkts)) ||
+ 	    nla_put_be64(skb, NFACCT_BYTES, cpu_to_be64(bytes)) ||
 @@ -266,10 +266,11 @@ static int nfnl_acct_get(struct net *net, struct sock *nfnl,
  	char *acct_name;
  
@@ -159472,6 +159343,26 @@ index dbd0803..1071d39 100644
  	}
  
  	if (!tb[NFACCT_NAME])
+@@ -435,8 +436,8 @@ EXPORT_SYMBOL_GPL(nfnl_acct_put);
+ 
+ void nfnl_acct_update(const struct sk_buff *skb, struct nf_acct *nfacct)
+ {
+-	atomic64_inc(&nfacct->pkts);
+-	atomic64_add(skb->len, &nfacct->bytes);
++	atomic64_inc_unchecked(&nfacct->pkts);
++	atomic64_add_unchecked(skb->len, &nfacct->bytes);
+ }
+ EXPORT_SYMBOL_GPL(nfnl_acct_update);
+ 
+@@ -471,7 +472,7 @@ int nfnl_acct_overquota(const struct sk_buff *skb, struct nf_acct *nfacct)
+ 
+ 	quota = (u64 *)nfacct->data;
+ 	now = (nfacct->flags & NFACCT_F_QUOTA_PKTS) ?
+-	       atomic64_read(&nfacct->pkts) : atomic64_read(&nfacct->bytes);
++	       atomic64_read_unchecked(&nfacct->pkts) : atomic64_read_unchecked(&nfacct->bytes);
+ 
+ 	ret = now > *quota;
+ 
 diff --git a/net/netfilter/nfnetlink_cthelper.c b/net/netfilter/nfnetlink_cthelper.c
 index e924e95..1e2e233 100644
 --- a/net/netfilter/nfnetlink_cthelper.c
@@ -159931,7 +159822,7 @@ index 5eb7694..58d8f08 100644
  
  static int __init ovs_vxlan_tnl_init(void)
 diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
-index 18d0bec..8fdeb86 100644
+index 8012f67..1a3168e 100644
 --- a/net/packet/af_packet.c
 +++ b/net/packet/af_packet.c
 @@ -278,7 +278,7 @@ static int packet_direct_xmit(struct sk_buff *skb)
@@ -161172,10 +161063,10 @@ index 553bf95..c4c6132 100644
  		goto out_nomem;
  	cd->u.procfs.channel_ent = NULL;
 diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
-index 7e0c9bf..bdec2b8 100644
+index 837dd91..7a070a7 100644
 --- a/net/sunrpc/clnt.c
 +++ b/net/sunrpc/clnt.c
-@@ -1508,7 +1508,9 @@ call_start(struct rpc_task *task)
+@@ -1527,7 +1527,9 @@ call_start(struct rpc_task *task)
  			(RPC_IS_ASYNC(task) ? "async" : "sync"));
  
  	/* Increment call count */
@@ -161701,7 +161592,7 @@ index e6cb386..9eaa00c 100644
  	sub->evt.event = htohl(event, sub->swap);
  	sub->evt.found_lower = htohl(found_lower, sub->swap);
 diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
-index 8269da7..8525db6 100644
+index 7748199..1cc52d17 100644
 --- a/net/unix/af_unix.c
 +++ b/net/unix/af_unix.c
 @@ -919,6 +919,12 @@ static struct sock *unix_find_other(struct net *net,
@@ -163228,22 +163119,25 @@ index 0000000..de92ed9
 +randomize_layout_seed.h
 diff --git a/scripts/gcc-plugins/Makefile b/scripts/gcc-plugins/Makefile
 new file mode 100644
-index 0000000..ec5bc00
+index 0000000..8d234d3
 --- /dev/null
 +++ b/scripts/gcc-plugins/Makefile
-@@ -0,0 +1,35 @@
+@@ -0,0 +1,38 @@
 +GCC_PLUGINS_DIR := $(shell $(CC) -print-file-name=plugin)
 +
 +ifeq ($(PLUGINCC),$(HOSTCC))
 +  HOSTLIBS := hostlibs
-+  HOST_EXTRACFLAGS += -I$(GCC_PLUGINS_DIR)/include -I$(src) -std=gnu99 -ggdb -Wall -W
++  HOST_EXTRACFLAGS += -I$(GCC_PLUGINS_DIR)/include -I$(src)
++  HOST_EXTRACFLAGS += -std=gnu99 -ggdb -fvisibility=hidden
++  HOST_EXTRACFLAGS += -Wall -W
 +  export HOST_EXTRACFLAGS
 +else
 +  HOSTLIBS := hostcxxlibs
-+  HOST_EXTRACXXFLAGS += -I$(GCC_PLUGINS_DIR)/include -I$(src) -std=gnu++98 -fno-rtti
-+  HOST_EXTRACXXFLAGS += -fno-exceptions -fasynchronous-unwind-tables -ggdb
-+  HOST_EXTRACXXFLAGS += -Wno-narrowing -Wno-unused-variable
-+  HOST_EXTRACXXFLAGS += -Wall -W -Wno-unused-parameter
++  HOST_EXTRACXXFLAGS += -I$(GCC_PLUGINS_DIR)/include -I$(src)
++  HOST_EXTRACXXFLAGS += -std=gnu++98 -ggdb -fvisibility=hidden
++  HOST_EXTRACXXFLAGS += -fno-rtti -fno-exceptions -fasynchronous-unwind-tables
++  HOST_EXTRACXXFLAGS += -Wall -W
++  HOST_EXTRACXXFLAGS += -Wno-unused-parameter -Wno-narrowing -Wno-unused-variable
 +  export HOST_EXTRACXXFLAGS
 +endif
 +
@@ -163269,7 +163163,7 @@ index 0000000..ec5bc00
 +clean-files += *.so
 diff --git a/scripts/gcc-plugins/checker_plugin.c b/scripts/gcc-plugins/checker_plugin.c
 new file mode 100644
-index 0000000..efaf576
+index 0000000..2b3c178
 --- /dev/null
 +++ b/scripts/gcc-plugins/checker_plugin.c
 @@ -0,0 +1,496 @@
@@ -163300,7 +163194,7 @@ index 0000000..efaf576
 +extern bool default_addr_space_legitimate_address_p(enum machine_mode mode, rtx mem, bool strict, addr_space_t as);
 +extern rtx default_addr_space_legitimize_address(rtx x, rtx oldx, enum machine_mode mode, addr_space_t as);
 +
-+int plugin_is_GPL_compatible;
++__visible int plugin_is_GPL_compatible;
 +
 +static struct plugin_info checker_plugin_info = {
 +	.version	= "201602181345",
@@ -163704,7 +163598,7 @@ index 0000000..efaf576
 +#define TODO_FLAGS_FINISH TODO_verify_ssa | TODO_verify_stmts | TODO_dump_func | TODO_verify_flow | TODO_update_ssa
 +#include "gcc-generate-gimple-pass.h"
 +
-+int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
++__visible int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
 +{
 +	const char * const plugin_name = plugin_info->base_name;
 +	const int argc = plugin_info->argc;
@@ -163771,7 +163665,7 @@ index 0000000..efaf576
 +}
 diff --git a/scripts/gcc-plugins/colorize_plugin.c b/scripts/gcc-plugins/colorize_plugin.c
 new file mode 100644
-index 0000000..ffe60f6
+index 0000000..31fd196
 --- /dev/null
 +++ b/scripts/gcc-plugins/colorize_plugin.c
 @@ -0,0 +1,162 @@
@@ -163790,7 +163684,7 @@ index 0000000..ffe60f6
 +
 +#include "gcc-common.h"
 +
-+int plugin_is_GPL_compatible;
++__visible int plugin_is_GPL_compatible;
 +
 +static struct plugin_info colorize_plugin_info = {
 +	.version	= "201602181345",
@@ -163888,7 +163782,7 @@ index 0000000..ffe60f6
 +#endif
 +}
 +
-+int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
++__visible int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
 +{
 +	const char * const plugin_name = plugin_info->base_name;
 +	const int argc = plugin_info->argc;
@@ -163939,7 +163833,7 @@ index 0000000..ffe60f6
 +}
 diff --git a/scripts/gcc-plugins/constify_plugin.c b/scripts/gcc-plugins/constify_plugin.c
 new file mode 100644
-index 0000000..b769ccf
+index 0000000..5287631c
 --- /dev/null
 +++ b/scripts/gcc-plugins/constify_plugin.c
 @@ -0,0 +1,582 @@
@@ -163963,7 +163857,7 @@ index 0000000..b769ccf
 +// unused C type flag in all versions 4.5-6
 +#define TYPE_CONSTIFY_VISITED(TYPE) TYPE_LANG_FLAG_4(TYPE)
 +
-+int plugin_is_GPL_compatible;
++__visible int plugin_is_GPL_compatible;
 +
 +static bool enabled = true;
 +
@@ -164482,7 +164376,7 @@ index 0000000..b769ccf
 +	targetm.section_type_flags = constify_section_type_flags;
 +}
 +
-+int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
++__visible int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
 +{
 +	const char * const plugin_name = plugin_info->base_name;
 +	const int argc = plugin_info->argc;
@@ -164527,10 +164421,10 @@ index 0000000..b769ccf
 +}
 diff --git a/scripts/gcc-plugins/gcc-common.h b/scripts/gcc-plugins/gcc-common.h
 new file mode 100644
-index 0000000..7b14844
+index 0000000..a4e216d
 --- /dev/null
 +++ b/scripts/gcc-plugins/gcc-common.h
-@@ -0,0 +1,893 @@
+@@ -0,0 +1,894 @@
 +#ifndef GCC_COMMON_H_INCLUDED
 +#define GCC_COMMON_H_INCLUDED
 +
@@ -164688,6 +164582,7 @@ index 0000000..7b14844
 +#endif
 +
 +#define __unused __attribute__((__unused__))
++#define __visible __attribute__((visibility("default")))
 +
 +#define DECL_NAME_POINTER(node) IDENTIFIER_POINTER(DECL_NAME(node))
 +#define DECL_NAME_LENGTH(node) IDENTIFIER_LENGTH(DECL_NAME(node))
@@ -166278,7 +166173,7 @@ index 0000000..7514850
 +fi
 diff --git a/scripts/gcc-plugins/initify_plugin.c b/scripts/gcc-plugins/initify_plugin.c
 new file mode 100644
-index 0000000..bf3eb6c
+index 0000000..4b554fa
 --- /dev/null
 +++ b/scripts/gcc-plugins/initify_plugin.c
 @@ -0,0 +1,536 @@
@@ -166299,7 +166194,7 @@ index 0000000..bf3eb6c
 +
 +#include "gcc-common.h"
 +
-+int plugin_is_GPL_compatible;
++__visible int plugin_is_GPL_compatible;
 +
 +static struct plugin_info initify_plugin_info = {
 +	.version	= "20160306",
@@ -166796,7 +166691,7 @@ index 0000000..bf3eb6c
 +	targetm.section_type_flags = initify_section_type_flags;
 +}
 +
-+int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
++__visible int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
 +{
 +	const char * const plugin_name = plugin_info->base_name;
 +	struct register_pass_info initify_pass_info;
@@ -166820,7 +166715,7 @@ index 0000000..bf3eb6c
 +}
 diff --git a/scripts/gcc-plugins/kallocstat_plugin.c b/scripts/gcc-plugins/kallocstat_plugin.c
 new file mode 100644
-index 0000000..30ecc9a
+index 0000000..3bd3089
 --- /dev/null
 +++ b/scripts/gcc-plugins/kallocstat_plugin.c
 @@ -0,0 +1,135 @@
@@ -166843,7 +166738,7 @@ index 0000000..30ecc9a
 +
 +#include "gcc-common.h"
 +
-+int plugin_is_GPL_compatible;
++__visible int plugin_is_GPL_compatible;
 +
 +static struct plugin_info kallocstat_plugin_info = {
 +	.version	= "201602181345",
@@ -166939,7 +166834,7 @@ index 0000000..30ecc9a
 +#define NO_GATE
 +#include "gcc-generate-gimple-pass.h"
 +
-+int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
++__visible int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
 +{
 +	const char * const plugin_name = plugin_info->base_name;
 +	struct register_pass_info kallocstat_pass_info;
@@ -166961,7 +166856,7 @@ index 0000000..30ecc9a
 +}
 diff --git a/scripts/gcc-plugins/kernexec_plugin.c b/scripts/gcc-plugins/kernexec_plugin.c
 new file mode 100644
-index 0000000..e31e92f
+index 0000000..a213367
 --- /dev/null
 +++ b/scripts/gcc-plugins/kernexec_plugin.c
 @@ -0,0 +1,407 @@
@@ -166984,10 +166879,10 @@ index 0000000..e31e92f
 +
 +#include "gcc-common.h"
 +
-+int plugin_is_GPL_compatible;
++__visible int plugin_is_GPL_compatible;
 +
 +static struct plugin_info kernexec_plugin_info = {
-+	.version	= "201602181345",
++	.version	= "201607271510",
 +	.help		= "method=[bts|or]\tinstrumentation method\n"
 +};
 +
@@ -167308,7 +167203,7 @@ index 0000000..e31e92f
 +#define TODO_FLAGS_FINISH TODO_dump_func | TODO_ggc_collect
 +#include "gcc-generate-rtl-pass.h"
 +
-+int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
++__visible int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
 +{
 +	const char * const plugin_name = plugin_info->base_name;
 +	const int argc = plugin_info->argc;
@@ -167319,14 +167214,14 @@ index 0000000..e31e92f
 +	struct register_pass_info kernexec_retaddr_pass_info;
 +
 +	kernexec_reload_pass_info.pass				= make_kernexec_reload_pass();
-+	kernexec_reload_pass_info.reference_pass_name		= "ssa";
++	kernexec_reload_pass_info.reference_pass_name		= "early_optimizations";
 +	kernexec_reload_pass_info.ref_pass_instance_number	= 1;
-+	kernexec_reload_pass_info.pos_op 			= PASS_POS_INSERT_AFTER;
++	kernexec_reload_pass_info.pos_op 			= PASS_POS_INSERT_BEFORE;
 +
 +	kernexec_fptr_pass_info.pass				= make_kernexec_fptr_pass();
-+	kernexec_fptr_pass_info.reference_pass_name		= "ssa";
++	kernexec_fptr_pass_info.reference_pass_name		= "early_optimizations";
 +	kernexec_fptr_pass_info.ref_pass_instance_number	= 1;
-+	kernexec_fptr_pass_info.pos_op 				= PASS_POS_INSERT_AFTER;
++	kernexec_fptr_pass_info.pos_op 				= PASS_POS_INSERT_BEFORE;
 +
 +	kernexec_retaddr_pass_info.pass				= make_kernexec_retaddr_pass();
 +	kernexec_retaddr_pass_info.reference_pass_name		= "pro_and_epilogue";
@@ -167374,7 +167269,7 @@ index 0000000..e31e92f
 +}
 diff --git a/scripts/gcc-plugins/latent_entropy_plugin.c b/scripts/gcc-plugins/latent_entropy_plugin.c
 new file mode 100644
-index 0000000..be3978c
+index 0000000..a7161bf
 --- /dev/null
 +++ b/scripts/gcc-plugins/latent_entropy_plugin.c
 @@ -0,0 +1,613 @@
@@ -167456,7 +167351,7 @@ index 0000000..be3978c
 +
 +#include "gcc-common.h"
 +
-+int plugin_is_GPL_compatible;
++__visible int plugin_is_GPL_compatible;
 +
 +static GTY(()) tree latent_entropy_decl;
 +
@@ -167943,7 +167838,7 @@ index 0000000..be3978c
 +#define TODO_FLAGS_FINISH TODO_verify_ssa | TODO_verify_stmts | TODO_dump_func | TODO_update_ssa
 +#include "gcc-generate-gimple-pass.h"
 +
-+int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
++__visible int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
 +{
 +	bool enabled = true;
 +	const char * const plugin_name = plugin_info->base_name;
@@ -167993,7 +167888,7 @@ index 0000000..be3978c
 +}
 diff --git a/scripts/gcc-plugins/randomize_layout_plugin.c b/scripts/gcc-plugins/randomize_layout_plugin.c
 new file mode 100644
-index 0000000..a716d7a
+index 0000000..3ac813e
 --- /dev/null
 +++ b/scripts/gcc-plugins/randomize_layout_plugin.c
 @@ -0,0 +1,940 @@
@@ -168026,12 +167921,12 @@ index 0000000..a716d7a
 +#define ORIG_TYPE_NAME(node) \
 +	(TYPE_NAME(TYPE_MAIN_VARIANT(node)) != NULL_TREE ? ((const unsigned char *)IDENTIFIER_POINTER(TYPE_NAME(TYPE_MAIN_VARIANT(node)))) : (const unsigned char *)"anonymous")
 +
-+int plugin_is_GPL_compatible;
++__visible int plugin_is_GPL_compatible;
 +
 +static int performance_mode;
 +
 +static struct plugin_info randomize_layout_plugin_info = {
-+	.version	= "201402201816",
++	.version	= "201607271952",
 +	.help		= "disable\t\t\tdo not activate plugin\n"
 +			  "performance-mode\tenable cacheline-aware layout randomization\n"
 +};
@@ -168878,7 +168773,7 @@ index 0000000..a716d7a
 +#define TODO_FLAGS_FINISH TODO_dump_func
 +#include "gcc-generate-gimple-pass.h"
 +
-+int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
++__visible int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
 +{
 +	int i;
 +	const char * const plugin_name = plugin_info->base_name;
@@ -169607,10 +169502,10 @@ index 0000000..7c59f38
 +}
 diff --git a/scripts/gcc-plugins/rap_plugin/rap_plugin.c b/scripts/gcc-plugins/rap_plugin/rap_plugin.c
 new file mode 100644
-index 0000000..bca74dc
+index 0000000..80f2e14
 --- /dev/null
 +++ b/scripts/gcc-plugins/rap_plugin/rap_plugin.c
-@@ -0,0 +1,511 @@
+@@ -0,0 +1,513 @@
 +/*
 + * Copyright 2012-2016 by PaX Team <pageexec@freemail.hu>
 + * Licensed under the GPL v2
@@ -169627,7 +169522,7 @@ index 0000000..bca74dc
 +
 +#include "rap.h"
 +
-+int plugin_is_GPL_compatible;
++__visible int plugin_is_GPL_compatible;
 +
 +static struct plugin_info rap_plugin_info = {
 +	.version	= "201604272100",
@@ -169889,6 +169784,7 @@ index 0000000..bca74dc
 +		fprintf(asm_out_file, "\t.previous\n");
 +}
 +
++#if BUILDING_GCC_VERSION >= 4007
 +// emit the rap hash as an absolute symbol for all functions seen in the frontend
 +// this is necessary as later unreferenced nodes will be removed yet we'd like to emit as many hashes as possible
 +static void rap_finish_decl(void *event_data, void *data __unused)
@@ -169937,6 +169833,7 @@ index 0000000..bca74dc
 +
 +	fprintf(asm_out_file, "\t.previous\n");
 +}
++#endif
 +
 +static bool rap_unignore_gate(void)
 +{
@@ -169987,7 +169884,7 @@ index 0000000..bca74dc
 +	LAST_GGC_ROOT_TAB
 +};
 +
-+int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
++__visible int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
 +{
 +	int i;
 +	const char * const plugin_name = plugin_info->base_name;
@@ -185058,10 +184955,10 @@ index 0000000..00c7430
 +}
 diff --git a/scripts/gcc-plugins/size_overflow_plugin/size_overflow_hash.data b/scripts/gcc-plugins/size_overflow_plugin/size_overflow_hash.data
 new file mode 100644
-index 0000000..e6b58b6
+index 0000000..56d8f55
 --- /dev/null
 +++ b/scripts/gcc-plugins/size_overflow_plugin/size_overflow_hash.data
-@@ -0,0 +1,21990 @@
+@@ -0,0 +1,21992 @@
 +enable_so_recv_ctrl_pipe_us_data_0 recv_ctrl_pipe us_data 0 0 NULL
 +enable_so___earlyonly_bootmem_alloc_fndecl_3 __earlyonly_bootmem_alloc fndecl 2-3-4 3 NULL
 +enable_so_v9fs_xattr_get_acl_fndecl_4 v9fs_xattr_get_acl fndecl 5 4 NULL
@@ -185556,6 +185453,7 @@ index 0000000..e6b58b6
 +enable_so_idma64_alloc_desc_fndecl_1433 idma64_alloc_desc fndecl 1 1433 NULL
 +enable_so_relocation_count_drm_i915_gem_exec_object2_1435 relocation_count drm_i915_gem_exec_object2 0 1435 NULL
 +enable_so_rdev_num_s2mps11_info_1439 rdev_num s2mps11_info 0 1439 NULL
++enable_so_nr_cpusets_fndecl_1442 nr_cpusets fndecl 0 1442 NULL
 +enable_so_alloc_libipw_fndecl_1447 alloc_libipw fndecl 1 1447 NULL nohasharray
 +enable_so_loc_addr_rio_transfer_io_1447 loc_addr rio_transfer_io 0 1447 &enable_so_alloc_libipw_fndecl_1447
 +enable_so_size_ip_vs_sync_mesg_1448 size ip_vs_sync_mesg 0 1448 NULL
@@ -192875,7 +192773,8 @@ index 0000000..e6b58b6
 +enable_so_ib_uverbs_reg_mr_fndecl_23214 ib_uverbs_reg_mr fndecl 4 23214 NULL
 +enable_so_btt_major_vardecl_btt_c_23220 btt_major vardecl_btt.c 0 23220 NULL
 +enable_so_read_swap_header_fndecl_23222 read_swap_header fndecl 0 23222 NULL
-+enable_so_num_channels_sh_mtu2_device_23224 num_channels sh_mtu2_device 0 23224 NULL
++enable_so_num_channels_sh_mtu2_device_23224 num_channels sh_mtu2_device 0 23224 NULL nohasharray
++enable_so_size_of_ntlmssp_blob_fndecl_23224 size_of_ntlmssp_blob fndecl 0 23224 &enable_so_num_channels_sh_mtu2_device_23224
 +enable_so_clk_core_get_rate_fndecl_23225 clk_core_get_rate fndecl 0 23225 NULL nohasharray
 +enable_so_vid_hdr_offset_ubi_device_23225 vid_hdr_offset ubi_device 0 23225 &enable_so_clk_core_get_rate_fndecl_23225
 +enable_so_nvkm_client_map_fndecl_23228 nvkm_client_map fndecl 3-2 23228 NULL
@@ -208837,7 +208736,7 @@ index 0000000..b5291e1
 +
 diff --git a/scripts/gcc-plugins/size_overflow_plugin/size_overflow_plugin.c b/scripts/gcc-plugins/size_overflow_plugin/size_overflow_plugin.c
 new file mode 100644
-index 0000000..be40980
+index 0000000..c471f8c
 --- /dev/null
 +++ b/scripts/gcc-plugins/size_overflow_plugin/size_overflow_plugin.c
 @@ -0,0 +1,290 @@
@@ -208862,7 +208761,7 @@ index 0000000..be40980
 +
 +#include "size_overflow.h"
 +
-+int plugin_is_GPL_compatible;
++__visible int plugin_is_GPL_compatible;
 +
 +tree report_size_overflow_decl;
 +
@@ -209058,7 +208957,7 @@ index 0000000..be40980
 +
 +#include "gcc-generate-gimple-pass.h"
 +
-+int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
++__visible int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
 +{
 +	int i;
 +	const char * const plugin_name = plugin_info->base_name;
@@ -211271,7 +211170,7 @@ index 0000000..69e3a85
 +}
 diff --git a/scripts/gcc-plugins/stackleak_plugin.c b/scripts/gcc-plugins/stackleak_plugin.c
 new file mode 100644
-index 0000000..8b69bd4
+index 0000000..d8d3a9e
 --- /dev/null
 +++ b/scripts/gcc-plugins/stackleak_plugin.c
 @@ -0,0 +1,350 @@
@@ -211297,7 +211196,7 @@ index 0000000..8b69bd4
 +
 +#include "gcc-common.h"
 +
-+int plugin_is_GPL_compatible;
++__visible int plugin_is_GPL_compatible;
 +
 +static int track_frame_size = -1;
 +static const char track_function[] = "pax_track_stack";
@@ -211552,7 +211451,7 @@ index 0000000..8b69bd4
 +#define TODO_FLAGS_FINISH TODO_dump_func
 +#include "gcc-generate-rtl-pass.h"
 +
-+int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
++__visible int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
 +{
 +	const char * const plugin_name = plugin_info->base_name;
 +	const int argc = plugin_info->argc;
@@ -211627,7 +211526,7 @@ index 0000000..8b69bd4
 +}
 diff --git a/scripts/gcc-plugins/structleak_plugin.c b/scripts/gcc-plugins/structleak_plugin.c
 new file mode 100644
-index 0000000..d7596e6
+index 0000000..583faac
 --- /dev/null
 +++ b/scripts/gcc-plugins/structleak_plugin.c
 @@ -0,0 +1,239 @@
@@ -211662,10 +211561,10 @@ index 0000000..d7596e6
 +// unused C type flag in all versions 4.5-6
 +#define TYPE_USERSPACE(TYPE) TYPE_LANG_FLAG_5(TYPE)
 +
-+int plugin_is_GPL_compatible;
++__visible int plugin_is_GPL_compatible;
 +
 +static struct plugin_info structleak_plugin_info = {
-+	.version	= "201602181345",
++	.version	= "201607271510",
 +	.help		= "disable\tdo not activate plugin\n",
 +};
 +
@@ -211829,7 +211728,7 @@ index 0000000..d7596e6
 +#define TODO_FLAGS_FINISH TODO_verify_il | TODO_verify_ssa | TODO_verify_stmts | TODO_dump_func | TODO_remove_unused_locals | TODO_update_ssa | TODO_ggc_collect | TODO_verify_flow
 +#include "gcc-generate-gimple-pass.h"
 +
-+int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
++__visible int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
 +{
 +	int i;
 +	const char * const plugin_name = plugin_info->base_name;
@@ -211839,9 +211738,9 @@ index 0000000..d7596e6
 +	struct register_pass_info structleak_pass_info;
 +
 +	structleak_pass_info.pass			= make_structleak_pass();
-+	structleak_pass_info.reference_pass_name	= "ssa";
++	structleak_pass_info.reference_pass_name	= "early_optimizations";
 +	structleak_pass_info.ref_pass_instance_number	= 1;
-+	structleak_pass_info.pos_op			= PASS_POS_INSERT_AFTER;
++	structleak_pass_info.pos_op			= PASS_POS_INSERT_BEFORE;
 +
 +	if (!plugin_default_version_check(version, &gcc_version)) {
 +		error(G_("incompatible gcc/plugin versions"));
@@ -211978,7 +211877,7 @@ index 49d61ad..69ee2cf 100755
  kallsymso=""
  kallsyms_vmlinux=""
 diff --git a/scripts/mod/file2alias.c b/scripts/mod/file2alias.c
-index a915507..27c1b41 100644
+index fec7578..a9dc8fe 100644
 --- a/scripts/mod/file2alias.c
 +++ b/scripts/mod/file2alias.c
 @@ -156,7 +156,7 @@ static void device_id_check(const char *modname, const char *device_id,
@@ -213434,7 +213333,7 @@ index c28b0f2..3b9fee0 100644
  
  	struct dentry *dents[AAFS_NS_SIZEOF];
 diff --git a/security/apparmor/lsm.c b/security/apparmor/lsm.c
-index dec607c..53d479f 100644
+index 5ee8201..53d479f 100644
 --- a/security/apparmor/lsm.c
 +++ b/security/apparmor/lsm.c
 @@ -176,7 +176,7 @@ static int common_perm_dir_dentry(int op, struct path *dir,
@@ -213466,82 +213365,7 @@ index dec607c..53d479f 100644
  		struct path_cond cond = { d_backing_inode(old_dentry)->i_uid,
  					  d_backing_inode(old_dentry)->i_mode
  		};
-@@ -523,34 +523,34 @@ static int apparmor_setprocattr(struct task_struct *task, char *name,
- {
- 	struct common_audit_data sa;
- 	struct apparmor_audit_data aad = {0,};
--	char *command, *args = value;
-+	char *command, *largs = NULL, *args = value;
- 	size_t arg_size;
- 	int error;
- 
- 	if (size == 0)
- 		return -EINVAL;
--	/* args points to a PAGE_SIZE buffer, AppArmor requires that
--	 * the buffer must be null terminated or have size <= PAGE_SIZE -1
--	 * so that AppArmor can null terminate them
--	 */
--	if (args[size - 1] != '\0') {
--		if (size == PAGE_SIZE)
--			return -EINVAL;
--		args[size] = '\0';
--	}
--
- 	/* task can only write its own attributes */
- 	if (current != task)
- 		return -EACCES;
- 
--	args = value;
-+	/* AppArmor requires that the buffer must be null terminated atm */
-+	if (args[size - 1] != '\0') {
-+		/* null terminate */
-+		largs = args = kmalloc(size + 1, GFP_KERNEL);
-+		if (!args)
-+			return -ENOMEM;
-+		memcpy(args, value, size);
-+		args[size] = '\0';
-+	}
-+
-+	error = -EINVAL;
- 	args = strim(args);
- 	command = strsep(&args, " ");
- 	if (!args)
--		return -EINVAL;
-+		goto out;
- 	args = skip_spaces(args);
- 	if (!*args)
--		return -EINVAL;
-+		goto out;
- 
- 	arg_size = size - (args - (char *) value);
- 	if (strcmp(name, "current") == 0) {
-@@ -576,10 +576,12 @@ static int apparmor_setprocattr(struct task_struct *task, char *name,
- 			goto fail;
- 	} else
- 		/* only support the "current" and "exec" process attributes */
--		return -EINVAL;
-+		goto fail;
- 
- 	if (!error)
- 		error = size;
-+out:
-+	kfree(largs);
- 	return error;
- 
- fail:
-@@ -588,9 +590,9 @@ fail:
- 	aad.profile = aa_current_profile();
- 	aad.op = OP_SETPROCATTR;
- 	aad.info = name;
--	aad.error = -EINVAL;
-+	aad.error = error = -EINVAL;
- 	aa_audit_msg(AUDIT_APPARMOR_DENIED, &sa, NULL);
--	return -EINVAL;
-+	goto out;
- }
- 
- static int apparmor_task_setrlimit(struct task_struct *task,
-@@ -677,11 +679,11 @@ static const struct kernel_param_ops param_ops_aalockpolicy = {
+@@ -679,11 +679,11 @@ static const struct kernel_param_ops param_ops_aalockpolicy = {
  	.get = param_get_aalockpolicy
  };
  
@@ -213557,7 +213381,7 @@ index dec607c..53d479f 100644
  
  /* Flag values, also controllable via /sys/module/apparmor/parameters
   * We define special types as we want to do additional mediation.
-@@ -791,7 +793,7 @@ static int param_get_aauint(char *buffer, const struct kernel_param *kp)
+@@ -793,7 +793,7 @@ static int param_get_aauint(char *buffer, const struct kernel_param *kp)
  	return param_get_uint(buffer, kp);
  }
  
@@ -213566,7 +213390,7 @@ index dec607c..53d479f 100644
  {
  	if (!capable(CAP_MAC_ADMIN))
  		return -EPERM;
-@@ -802,7 +804,7 @@ static int param_get_audit(char *buffer, struct kernel_param *kp)
+@@ -804,7 +804,7 @@ static int param_get_audit(char *buffer, struct kernel_param *kp)
  	return sprintf(buffer, "%s", audit_mode_names[aa_g_audit]);
  }
  
@@ -213575,7 +213399,7 @@ index dec607c..53d479f 100644
  {
  	int i;
  	if (!capable(CAP_MAC_ADMIN))
-@@ -824,7 +826,7 @@ static int param_set_audit(const char *val, struct kernel_param *kp)
+@@ -826,7 +826,7 @@ static int param_set_audit(const char *val, struct kernel_param *kp)
  	return -EINVAL;
  }
  
@@ -213584,7 +213408,7 @@ index dec607c..53d479f 100644
  {
  	if (!capable(CAP_MAC_ADMIN))
  		return -EPERM;
-@@ -835,7 +837,7 @@ static int param_get_mode(char *buffer, struct kernel_param *kp)
+@@ -837,7 +837,7 @@ static int param_get_mode(char *buffer, struct kernel_param *kp)
  	return sprintf(buffer, "%s", aa_profile_mode_names[aa_g_profile_mode]);
  }
  
@@ -213755,7 +213579,7 @@ index 5105c2c..a5010e6 100644
  extern struct key_type key_type_request_key_auth;
  extern struct key *request_key_auth_new(struct key *target,
 diff --git a/security/keys/key.c b/security/keys/key.c
-index b287551..9c7e4ae6 100644
+index af7f682..9c7e4ae6 100644
 --- a/security/keys/key.c
 +++ b/security/keys/key.c
 @@ -283,7 +283,7 @@ struct key *key_alloc(struct key_type *type, const char *desc,
@@ -213767,15 +213591,6 @@ index b287551..9c7e4ae6 100644
  	key->index_key.type = type;
  	key->user = user;
  	key->quotalen = quotalen;
-@@ -584,7 +584,7 @@ int key_reject_and_link(struct key *key,
- 
- 	mutex_unlock(&key_construction_mutex);
- 
--	if (keyring)
-+	if (keyring && link_ret == 0)
- 		__key_link_end(keyring, &key->index_key, edit);
- 
- 	/* wake up anyone waiting for a key to be constructed */
 @@ -1079,7 +1079,9 @@ int register_key_type(struct key_type *ktype)
  	struct key_type *p;
  	int ret;
@@ -215474,7 +215289,7 @@ index 0a578fe..b81f62d 100644
  })
  
 diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
-index 4fd482f..9e5f572 100644
+index 7cb1224..a08f426 100644
 --- a/virt/kvm/kvm_main.c
 +++ b/virt/kvm/kvm_main.c
 @@ -90,12 +90,17 @@ LIST_HEAD(vm_list);

diff --git a/4.6.4/4425_grsec_remove_EI_PAX.patch b/4.6.5/4425_grsec_remove_EI_PAX.patch
similarity index 100%
rename from 4.6.4/4425_grsec_remove_EI_PAX.patch
rename to 4.6.5/4425_grsec_remove_EI_PAX.patch

diff --git a/4.6.4/4427_force_XATTR_PAX_tmpfs.patch b/4.6.5/4427_force_XATTR_PAX_tmpfs.patch
similarity index 100%
rename from 4.6.4/4427_force_XATTR_PAX_tmpfs.patch
rename to 4.6.5/4427_force_XATTR_PAX_tmpfs.patch

diff --git a/4.6.4/4430_grsec-remove-localversion-grsec.patch b/4.6.5/4430_grsec-remove-localversion-grsec.patch
similarity index 100%
rename from 4.6.4/4430_grsec-remove-localversion-grsec.patch
rename to 4.6.5/4430_grsec-remove-localversion-grsec.patch

diff --git a/4.6.4/4435_grsec-mute-warnings.patch b/4.6.5/4435_grsec-mute-warnings.patch
similarity index 100%
rename from 4.6.4/4435_grsec-mute-warnings.patch
rename to 4.6.5/4435_grsec-mute-warnings.patch

diff --git a/4.6.4/4440_grsec-remove-protected-paths.patch b/4.6.5/4440_grsec-remove-protected-paths.patch
similarity index 100%
rename from 4.6.4/4440_grsec-remove-protected-paths.patch
rename to 4.6.5/4440_grsec-remove-protected-paths.patch

diff --git a/4.6.4/4450_grsec-kconfig-default-gids.patch b/4.6.5/4450_grsec-kconfig-default-gids.patch
similarity index 100%
rename from 4.6.4/4450_grsec-kconfig-default-gids.patch
rename to 4.6.5/4450_grsec-kconfig-default-gids.patch

diff --git a/4.6.4/4465_selinux-avc_audit-log-curr_ip.patch b/4.6.5/4465_selinux-avc_audit-log-curr_ip.patch
similarity index 100%
rename from 4.6.4/4465_selinux-avc_audit-log-curr_ip.patch
rename to 4.6.5/4465_selinux-avc_audit-log-curr_ip.patch

diff --git a/4.6.4/4470_disable-compat_vdso.patch b/4.6.5/4470_disable-compat_vdso.patch
similarity index 100%
rename from 4.6.4/4470_disable-compat_vdso.patch
rename to 4.6.5/4470_disable-compat_vdso.patch

diff --git a/4.6.4/4475_emutramp_default_on.patch b/4.6.5/4475_emutramp_default_on.patch
similarity index 100%
rename from 4.6.4/4475_emutramp_default_on.patch
rename to 4.6.5/4475_emutramp_default_on.patch


^ permalink raw reply related	[flat|nested] only message in thread

only message in thread, other threads:[~2016-07-29 10:42 UTC | newest]

Thread overview: (only message) (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2016-07-29 10:42 [gentoo-commits] proj/hardened-patchset:master commit in: 4.6.5/, 4.6.4/ Anthony G. Basile

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox