From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from lists.gentoo.org (pigeon.gentoo.org [208.92.234.80]) by finch.gentoo.org (Postfix) with ESMTP id E28161388B6 for ; Fri, 24 Oct 2014 19:30:46 +0000 (UTC) Received: from pigeon.gentoo.org (localhost [127.0.0.1]) by pigeon.gentoo.org (Postfix) with SMTP id 64C7AE0833; Fri, 24 Oct 2014 19:30:45 +0000 (UTC) Received: from smtp.gentoo.org (smtp.gentoo.org [140.211.166.183]) (using TLSv1.2 with cipher AECDH-AES256-SHA (256/256 bits)) (No client certificate requested) by pigeon.gentoo.org (Postfix) with ESMTPS id BF5F5E0833 for ; Fri, 24 Oct 2014 19:30:44 +0000 (UTC) Received: from oystercatcher.gentoo.org (oystercatcher.gentoo.org [148.251.78.52]) (using TLSv1.2 with cipher AECDH-AES256-SHA (256/256 bits)) (No client certificate requested) by smtp.gentoo.org (Postfix) with ESMTPS id 05D953403EA for ; Fri, 24 Oct 2014 19:30:43 +0000 (UTC) Received: from localhost.localdomain (localhost [127.0.0.1]) by oystercatcher.gentoo.org (Postfix) with ESMTP id A6F2C8A67 for ; Fri, 24 Oct 2014 19:30:41 +0000 (UTC) From: "Mike Pagano" To: gentoo-commits@lists.gentoo.org Content-Transfer-Encoding: 8bit Content-type: text/plain; charset=UTF-8 Reply-To: gentoo-dev@lists.gentoo.org, "Mike Pagano" Message-ID: <1414178876.9af4ab13c5ebb5ad1085a58dff5527d93dd20fb6.mpagano@gentoo> Subject: [gentoo-commits] proj/linux-patches:3.12 commit in: / X-VCS-Repository: proj/linux-patches X-VCS-Files: 0000_README 1030_linux-3.12.31.patch X-VCS-Directories: / X-VCS-Committer: mpagano X-VCS-Committer-Name: Mike Pagano X-VCS-Revision: 9af4ab13c5ebb5ad1085a58dff5527d93dd20fb6 X-VCS-Branch: 3.12 Date: Fri, 24 Oct 2014 19:30:41 +0000 (UTC) Precedence: bulk List-Post: List-Help: List-Unsubscribe: List-Subscribe: List-Id: Gentoo Linux mail X-BeenThere: gentoo-commits@lists.gentoo.org X-Archives-Salt: d55ec32b-b24c-4a2a-bfe1-b8249d089b1c X-Archives-Hash: 9f424475dae26e755b94e58802985f28 commit: 9af4ab13c5ebb5ad1085a58dff5527d93dd20fb6 Author: Mike Pagano gentoo org> AuthorDate: Fri Oct 24 19:27:56 2014 +0000 Commit: Mike Pagano gentoo org> CommitDate: Fri Oct 24 19:27:56 2014 +0000 URL: http://sources.gentoo.org/gitweb/?p=proj/linux-patches.git;a=commit;h=9af4ab13 Linux patch 3.12.31 --- 0000_README | 4 + 1030_linux-3.12.31.patch | 6468 ++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 6472 insertions(+) diff --git a/0000_README b/0000_README index d8b89ec..0f4cf38 100644 --- a/0000_README +++ b/0000_README @@ -162,6 +162,10 @@ Patch: 1029_linux-3.12.30.patch From: http://www.kernel.org Desc: Linux 3.12.30 +Patch: 1030_linux-3.12.31.patch +From: http://www.kernel.org +Desc: Linux 3.12.31 + Patch: 1500_XATTR_USER_PREFIX.patch From: https://bugs.gentoo.org/show_bug.cgi?id=470644 Desc: Support for namespace user.pax.* on tmpfs. diff --git a/1030_linux-3.12.31.patch b/1030_linux-3.12.31.patch new file mode 100644 index 0000000..830049e --- /dev/null +++ b/1030_linux-3.12.31.patch @@ -0,0 +1,6468 @@ +diff --git a/Makefile b/Makefile +index 1ad1566225ca..10eda74e4b54 100644 +--- a/Makefile ++++ b/Makefile +@@ -1,6 +1,6 @@ + VERSION = 3 + PATCHLEVEL = 12 +-SUBLEVEL = 30 ++SUBLEVEL = 31 + EXTRAVERSION = + NAME = One Giant Leap for Frogkind + +diff --git a/arch/arm/include/asm/tls.h b/arch/arm/include/asm/tls.h +index 83259b873333..36172adda9d0 100644 +--- a/arch/arm/include/asm/tls.h ++++ b/arch/arm/include/asm/tls.h +@@ -1,6 +1,9 @@ + #ifndef __ASMARM_TLS_H + #define __ASMARM_TLS_H + ++#include ++#include ++ + #ifdef __ASSEMBLY__ + #include + .macro switch_tls_none, base, tp, tpuser, tmp1, tmp2 +@@ -50,6 +53,47 @@ + #endif + + #ifndef __ASSEMBLY__ ++ ++static inline void set_tls(unsigned long val) ++{ ++ struct thread_info *thread; ++ ++ thread = current_thread_info(); ++ ++ thread->tp_value[0] = val; ++ ++ /* ++ * This code runs with preemption enabled and therefore must ++ * be reentrant with respect to switch_tls. ++ * ++ * We need to ensure ordering between the shadow state and the ++ * hardware state, so that we don't corrupt the hardware state ++ * with a stale shadow state during context switch. ++ * ++ * If we're preempted here, switch_tls will load TPIDRURO from ++ * thread_info upon resuming execution and the following mcr ++ * is merely redundant. ++ */ ++ barrier(); ++ ++ if (!tls_emu) { ++ if (has_tls_reg) { ++ asm("mcr p15, 0, %0, c13, c0, 3" ++ : : "r" (val)); ++ } else { ++ /* ++ * User space must never try to access this ++ * directly. Expect your app to break ++ * eventually if you do so. The user helper ++ * at 0xffff0fe0 must be used instead. (see ++ * entry-armv.S for details) ++ */ ++ *((unsigned int *)0xffff0ff0) = val; ++ } ++ ++ } ++} ++ + static inline unsigned long get_tpuser(void) + { + unsigned long reg = 0; +@@ -59,5 +103,23 @@ static inline unsigned long get_tpuser(void) + + return reg; + } ++ ++static inline void set_tpuser(unsigned long val) ++{ ++ /* Since TPIDRURW is fully context-switched (unlike TPIDRURO), ++ * we need not update thread_info. ++ */ ++ if (has_tls_reg && !tls_emu) { ++ asm("mcr p15, 0, %0, c13, c0, 2" ++ : : "r" (val)); ++ } ++} ++ ++static inline void flush_tls(void) ++{ ++ set_tls(0); ++ set_tpuser(0); ++} ++ + #endif + #endif /* __ASMARM_TLS_H */ +diff --git a/arch/arm/kernel/irq.c b/arch/arm/kernel/irq.c +index 9723d17b8f38..1e782bdeee49 100644 +--- a/arch/arm/kernel/irq.c ++++ b/arch/arm/kernel/irq.c +@@ -163,7 +163,7 @@ static bool migrate_one_irq(struct irq_desc *desc) + c = irq_data_get_irq_chip(d); + if (!c->irq_set_affinity) + pr_debug("IRQ%u: unable to set affinity\n", d->irq); +- else if (c->irq_set_affinity(d, affinity, true) == IRQ_SET_MASK_OK && ret) ++ else if (c->irq_set_affinity(d, affinity, false) == IRQ_SET_MASK_OK && ret) + cpumask_copy(d->affinity, affinity); + + return ret; +diff --git a/arch/arm/kernel/machine_kexec.c b/arch/arm/kernel/machine_kexec.c +index faa3d4c41ece..8cf0996aa1a8 100644 +--- a/arch/arm/kernel/machine_kexec.c ++++ b/arch/arm/kernel/machine_kexec.c +@@ -14,11 +14,12 @@ + #include + #include + #include ++#include + #include + #include + #include + +-extern const unsigned char relocate_new_kernel[]; ++extern void relocate_new_kernel(void); + extern const unsigned int relocate_new_kernel_size; + + extern unsigned long kexec_start_address; +@@ -142,6 +143,8 @@ void machine_kexec(struct kimage *image) + { + unsigned long page_list; + unsigned long reboot_code_buffer_phys; ++ unsigned long reboot_entry = (unsigned long)relocate_new_kernel; ++ unsigned long reboot_entry_phys; + void *reboot_code_buffer; + + /* +@@ -168,18 +171,18 @@ void machine_kexec(struct kimage *image) + + + /* copy our kernel relocation code to the control code page */ +- memcpy(reboot_code_buffer, +- relocate_new_kernel, relocate_new_kernel_size); ++ reboot_entry = fncpy(reboot_code_buffer, ++ reboot_entry, ++ relocate_new_kernel_size); ++ reboot_entry_phys = (unsigned long)reboot_entry + ++ (reboot_code_buffer_phys - (unsigned long)reboot_code_buffer); + +- +- flush_icache_range((unsigned long) reboot_code_buffer, +- (unsigned long) reboot_code_buffer + KEXEC_CONTROL_PAGE_SIZE); + printk(KERN_INFO "Bye!\n"); + + if (kexec_reinit) + kexec_reinit(); + +- soft_restart(reboot_code_buffer_phys); ++ soft_restart(reboot_entry_phys); + } + + void arch_crash_save_vmcoreinfo(void) +diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c +index 92f7b15dd221..5f6e650ec9ab 100644 +--- a/arch/arm/kernel/process.c ++++ b/arch/arm/kernel/process.c +@@ -334,6 +334,8 @@ void flush_thread(void) + memset(&tsk->thread.debug, 0, sizeof(struct debug_info)); + memset(&thread->fpstate, 0, sizeof(union fp_state)); + ++ flush_tls(); ++ + thread_notify(THREAD_NOTIFY_FLUSH, thread); + } + +diff --git a/arch/arm/kernel/relocate_kernel.S b/arch/arm/kernel/relocate_kernel.S +index d0cdedf4864d..95858966d84e 100644 +--- a/arch/arm/kernel/relocate_kernel.S ++++ b/arch/arm/kernel/relocate_kernel.S +@@ -2,10 +2,12 @@ + * relocate_kernel.S - put the kernel image in place to boot + */ + ++#include + #include + +- .globl relocate_new_kernel +-relocate_new_kernel: ++ .align 3 /* not needed for this code, but keeps fncpy() happy */ ++ ++ENTRY(relocate_new_kernel) + + ldr r0,kexec_indirection_page + ldr r1,kexec_start_address +@@ -79,6 +81,8 @@ kexec_mach_type: + kexec_boot_atags: + .long 0x0 + ++ENDPROC(relocate_new_kernel) ++ + relocate_new_kernel_end: + + .globl relocate_new_kernel_size +diff --git a/arch/arm/kernel/thumbee.c b/arch/arm/kernel/thumbee.c +index 7b8403b76666..80f0d69205e7 100644 +--- a/arch/arm/kernel/thumbee.c ++++ b/arch/arm/kernel/thumbee.c +@@ -45,7 +45,7 @@ static int thumbee_notifier(struct notifier_block *self, unsigned long cmd, void + + switch (cmd) { + case THREAD_NOTIFY_FLUSH: +- thread->thumbee_state = 0; ++ teehbr_write(0); + break; + case THREAD_NOTIFY_SWITCH: + current_thread_info()->thumbee_state = teehbr_read(); +diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c +index 1f735aafd5ec..a8dd111ff99a 100644 +--- a/arch/arm/kernel/traps.c ++++ b/arch/arm/kernel/traps.c +@@ -571,7 +571,6 @@ do_cache_op(unsigned long start, unsigned long end, int flags) + #define NR(x) ((__ARM_NR_##x) - __ARM_NR_BASE) + asmlinkage int arm_syscall(int no, struct pt_regs *regs) + { +- struct thread_info *thread = current_thread_info(); + siginfo_t info; + + if ((no >> 16) != (__ARM_NR_BASE>> 16)) +@@ -622,21 +621,7 @@ asmlinkage int arm_syscall(int no, struct pt_regs *regs) + return regs->ARM_r0; + + case NR(set_tls): +- thread->tp_value[0] = regs->ARM_r0; +- if (tls_emu) +- return 0; +- if (has_tls_reg) { +- asm ("mcr p15, 0, %0, c13, c0, 3" +- : : "r" (regs->ARM_r0)); +- } else { +- /* +- * User space must never try to access this directly. +- * Expect your app to break eventually if you do so. +- * The user helper at 0xffff0fe0 must be used instead. +- * (see entry-armv.S for details) +- */ +- *((unsigned int *)0xffff0ff0) = regs->ARM_r0; +- } ++ set_tls(regs->ARM_r0); + return 0; + + #ifdef CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG +diff --git a/arch/arm/mach-omap2/soc.h b/arch/arm/mach-omap2/soc.h +index 4588df1447ed..78f44f37c8c7 100644 +--- a/arch/arm/mach-omap2/soc.h ++++ b/arch/arm/mach-omap2/soc.h +@@ -245,6 +245,8 @@ IS_AM_SUBCLASS(437x, 0x437) + #define soc_is_omap54xx() 0 + #define soc_is_omap543x() 0 + #define soc_is_dra7xx() 0 ++#define soc_is_dra74x() 0 ++#define soc_is_dra72x() 0 + + #if defined(MULTI_OMAP2) + # if defined(CONFIG_ARCH_OMAP2) +@@ -393,7 +395,11 @@ IS_OMAP_TYPE(3430, 0x3430) + + #if defined(CONFIG_SOC_DRA7XX) + #undef soc_is_dra7xx ++#undef soc_is_dra74x ++#undef soc_is_dra72x + #define soc_is_dra7xx() (of_machine_is_compatible("ti,dra7")) ++#define soc_is_dra74x() (of_machine_is_compatible("ti,dra74")) ++#define soc_is_dra72x() (of_machine_is_compatible("ti,dra72")) + #endif + + /* Various silicon revisions for omap2 */ +diff --git a/arch/arm/mm/abort-ev6.S b/arch/arm/mm/abort-ev6.S +index 80741992a9fc..5d777a567c35 100644 +--- a/arch/arm/mm/abort-ev6.S ++++ b/arch/arm/mm/abort-ev6.S +@@ -17,12 +17,6 @@ + */ + .align 5 + ENTRY(v6_early_abort) +-#ifdef CONFIG_CPU_V6 +- sub r1, sp, #4 @ Get unused stack location +- strex r0, r1, [r1] @ Clear the exclusive monitor +-#elif defined(CONFIG_CPU_32v6K) +- clrex +-#endif + mrc p15, 0, r1, c5, c0, 0 @ get FSR + mrc p15, 0, r0, c6, c0, 0 @ get FAR + /* +diff --git a/arch/arm/mm/abort-ev7.S b/arch/arm/mm/abort-ev7.S +index 703375277ba6..4812ad054214 100644 +--- a/arch/arm/mm/abort-ev7.S ++++ b/arch/arm/mm/abort-ev7.S +@@ -13,12 +13,6 @@ + */ + .align 5 + ENTRY(v7_early_abort) +- /* +- * The effect of data aborts on on the exclusive access monitor are +- * UNPREDICTABLE. Do a CLREX to clear the state +- */ +- clrex +- + mrc p15, 0, r1, c5, c0, 0 @ get FSR + mrc p15, 0, r0, c6, c0, 0 @ get FAR + +diff --git a/arch/arm/mm/alignment.c b/arch/arm/mm/alignment.c +index 6f4585b89078..1fe0bf5c7375 100644 +--- a/arch/arm/mm/alignment.c ++++ b/arch/arm/mm/alignment.c +@@ -39,6 +39,7 @@ + * This code is not portable to processors with late data abort handling. + */ + #define CODING_BITS(i) (i & 0x0e000000) ++#define COND_BITS(i) (i & 0xf0000000) + + #define LDST_I_BIT(i) (i & (1 << 26)) /* Immediate constant */ + #define LDST_P_BIT(i) (i & (1 << 24)) /* Preindex */ +@@ -812,6 +813,8 @@ do_alignment(unsigned long addr, unsigned int fsr, struct pt_regs *regs) + break; + + case 0x04000000: /* ldr or str immediate */ ++ if (COND_BITS(instr) == 0xf0000000) /* NEON VLDn, VSTn */ ++ goto bad; + offset.un = OFFSET_BITS(instr); + handler = do_alignment_ldrstr; + break; +diff --git a/arch/arm64/include/asm/hw_breakpoint.h b/arch/arm64/include/asm/hw_breakpoint.h +index d064047612b1..52b484b6aa1a 100644 +--- a/arch/arm64/include/asm/hw_breakpoint.h ++++ b/arch/arm64/include/asm/hw_breakpoint.h +@@ -79,7 +79,6 @@ static inline void decode_ctrl_reg(u32 reg, + */ + #define ARM_MAX_BRP 16 + #define ARM_MAX_WRP 16 +-#define ARM_MAX_HBP_SLOTS (ARM_MAX_BRP + ARM_MAX_WRP) + + /* Virtual debug register bases. */ + #define AARCH64_DBG_REG_BVR 0 +diff --git a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c +index 9fa78cd0f092..ee79a1a6e965 100644 +--- a/arch/arm64/kernel/ptrace.c ++++ b/arch/arm64/kernel/ptrace.c +@@ -81,7 +81,8 @@ static void ptrace_hbptriggered(struct perf_event *bp, + break; + } + } +- for (i = ARM_MAX_BRP; i < ARM_MAX_HBP_SLOTS && !bp; ++i) { ++ ++ for (i = 0; i < ARM_MAX_WRP; ++i) { + if (current->thread.debug.hbp_watch[i] == bp) { + info.si_errno = -((i << 1) + 1); + break; +diff --git a/arch/mips/boot/compressed/decompress.c b/arch/mips/boot/compressed/decompress.c +index 2c9573098c0d..d498a1f9bccf 100644 +--- a/arch/mips/boot/compressed/decompress.c ++++ b/arch/mips/boot/compressed/decompress.c +@@ -13,6 +13,7 @@ + + #include + #include ++#include + + #include + +diff --git a/arch/mips/kernel/mcount.S b/arch/mips/kernel/mcount.S +index 539b6294b613..8f89ff4ed524 100644 +--- a/arch/mips/kernel/mcount.S ++++ b/arch/mips/kernel/mcount.S +@@ -123,7 +123,11 @@ NESTED(_mcount, PT_SIZE, ra) + nop + #endif + b ftrace_stub ++#ifdef CONFIG_32BIT ++ addiu sp, sp, 8 ++#else + nop ++#endif + + static_trace: + MCOUNT_SAVE_REGS +@@ -133,6 +137,9 @@ static_trace: + move a1, AT /* arg2: parent's return address */ + + MCOUNT_RESTORE_REGS ++#ifdef CONFIG_32BIT ++ addiu sp, sp, 8 ++#endif + .globl ftrace_stub + ftrace_stub: + RETURN_BACK +@@ -177,6 +184,11 @@ NESTED(ftrace_graph_caller, PT_SIZE, ra) + jal prepare_ftrace_return + nop + MCOUNT_RESTORE_REGS ++#ifndef CONFIG_DYNAMIC_FTRACE ++#ifdef CONFIG_32BIT ++ addiu sp, sp, 8 ++#endif ++#endif + RETURN_BACK + END(ftrace_graph_caller) + +diff --git a/arch/parisc/Makefile b/arch/parisc/Makefile +index e02f665f804a..ceadda990f22 100644 +--- a/arch/parisc/Makefile ++++ b/arch/parisc/Makefile +@@ -48,7 +48,12 @@ cflags-y := -pipe + + # These flags should be implied by an hppa-linux configuration, but they + # are not in gcc 3.2. +-cflags-y += -mno-space-regs -mfast-indirect-calls ++cflags-y += -mno-space-regs ++ ++# -mfast-indirect-calls is only relevant for 32-bit kernels. ++ifndef CONFIG_64BIT ++cflags-y += -mfast-indirect-calls ++endif + + # Currently we save and restore fpregs on all kernel entry/interruption paths. + # If that gets optimized, we might need to disable the use of fpregs in the +diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c +index de8cbc30dcd1..5664be4a3680 100644 +--- a/arch/s390/mm/pgtable.c ++++ b/arch/s390/mm/pgtable.c +@@ -788,11 +788,21 @@ int set_guest_storage_key(struct mm_struct *mm, unsigned long addr, + pte_t *ptep; + + down_read(&mm->mmap_sem); ++retry: + ptep = get_locked_pte(current->mm, addr, &ptl); + if (unlikely(!ptep)) { + up_read(&mm->mmap_sem); + return -EFAULT; + } ++ if (!(pte_val(*ptep) & _PAGE_INVALID) && ++ (pte_val(*ptep) & _PAGE_PROTECT)) { ++ pte_unmap_unlock(*ptep, ptl); ++ if (fixup_user_fault(current, mm, addr, FAULT_FLAG_WRITE)) { ++ up_read(&mm->mmap_sem); ++ return -EFAULT; ++ } ++ goto retry; ++ } + + new = old = pgste_get_lock(ptep); + pgste_val(new) &= ~(PGSTE_GR_BIT | PGSTE_GC_BIT | +diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c +index b17dfe212233..9ccb7efe0270 100644 +--- a/arch/x86/kernel/smpboot.c ++++ b/arch/x86/kernel/smpboot.c +@@ -1285,6 +1285,9 @@ static void remove_siblinginfo(int cpu) + + for_each_cpu(sibling, cpu_sibling_mask(cpu)) + cpumask_clear_cpu(cpu, cpu_sibling_mask(sibling)); ++ for_each_cpu(sibling, cpu_llc_shared_mask(cpu)) ++ cpumask_clear_cpu(cpu, cpu_llc_shared_mask(sibling)); ++ cpumask_clear(cpu_llc_shared_mask(cpu)); + cpumask_clear(cpu_sibling_mask(cpu)); + cpumask_clear(cpu_core_mask(cpu)); + c->phys_proc_id = 0; +diff --git a/arch/xtensa/include/asm/pgtable.h b/arch/xtensa/include/asm/pgtable.h +index 0fdf5d043f82..4651cb99b62b 100644 +--- a/arch/xtensa/include/asm/pgtable.h ++++ b/arch/xtensa/include/asm/pgtable.h +@@ -67,7 +67,12 @@ + #define VMALLOC_START 0xC0000000 + #define VMALLOC_END 0xC7FEFFFF + #define TLBTEMP_BASE_1 0xC7FF0000 +-#define TLBTEMP_BASE_2 0xC7FF8000 ++#define TLBTEMP_BASE_2 (TLBTEMP_BASE_1 + DCACHE_WAY_SIZE) ++#if 2 * DCACHE_WAY_SIZE > ICACHE_WAY_SIZE ++#define TLBTEMP_SIZE (2 * DCACHE_WAY_SIZE) ++#else ++#define TLBTEMP_SIZE ICACHE_WAY_SIZE ++#endif + + /* + * For the Xtensa architecture, the PTE layout is as follows: +diff --git a/arch/xtensa/include/asm/uaccess.h b/arch/xtensa/include/asm/uaccess.h +index fd686dc45d1a..c7211e7e182d 100644 +--- a/arch/xtensa/include/asm/uaccess.h ++++ b/arch/xtensa/include/asm/uaccess.h +@@ -52,7 +52,12 @@ + */ + .macro get_fs ad, sp + GET_CURRENT(\ad,\sp) ++#if THREAD_CURRENT_DS > 1020 ++ addi \ad, \ad, TASK_THREAD ++ l32i \ad, \ad, THREAD_CURRENT_DS - TASK_THREAD ++#else + l32i \ad, \ad, THREAD_CURRENT_DS ++#endif + .endm + + /* +diff --git a/arch/xtensa/include/uapi/asm/ioctls.h b/arch/xtensa/include/uapi/asm/ioctls.h +index b4cb1100c0fb..a47909f0c34b 100644 +--- a/arch/xtensa/include/uapi/asm/ioctls.h ++++ b/arch/xtensa/include/uapi/asm/ioctls.h +@@ -28,17 +28,17 @@ + #define TCSETSW 0x5403 + #define TCSETSF 0x5404 + +-#define TCGETA _IOR('t', 23, struct termio) +-#define TCSETA _IOW('t', 24, struct termio) +-#define TCSETAW _IOW('t', 25, struct termio) +-#define TCSETAF _IOW('t', 28, struct termio) ++#define TCGETA 0x80127417 /* _IOR('t', 23, struct termio) */ ++#define TCSETA 0x40127418 /* _IOW('t', 24, struct termio) */ ++#define TCSETAW 0x40127419 /* _IOW('t', 25, struct termio) */ ++#define TCSETAF 0x4012741C /* _IOW('t', 28, struct termio) */ + + #define TCSBRK _IO('t', 29) + #define TCXONC _IO('t', 30) + #define TCFLSH _IO('t', 31) + +-#define TIOCSWINSZ _IOW('t', 103, struct winsize) +-#define TIOCGWINSZ _IOR('t', 104, struct winsize) ++#define TIOCSWINSZ 0x40087467 /* _IOW('t', 103, struct winsize) */ ++#define TIOCGWINSZ 0x80087468 /* _IOR('t', 104, struct winsize) */ + #define TIOCSTART _IO('t', 110) /* start output, like ^Q */ + #define TIOCSTOP _IO('t', 111) /* stop output, like ^S */ + #define TIOCOUTQ _IOR('t', 115, int) /* output queue size */ +@@ -88,7 +88,6 @@ + #define TIOCSETD _IOW('T', 35, int) + #define TIOCGETD _IOR('T', 36, int) + #define TCSBRKP _IOW('T', 37, int) /* Needed for POSIX tcsendbreak()*/ +-#define TIOCTTYGSTRUCT _IOR('T', 38, struct tty_struct) /* For debugging only*/ + #define TIOCSBRK _IO('T', 39) /* BSD compatibility */ + #define TIOCCBRK _IO('T', 40) /* BSD compatibility */ + #define TIOCGSID _IOR('T', 41, pid_t) /* Return the session ID of FD*/ +@@ -114,8 +113,10 @@ + #define TIOCSERGETLSR _IOR('T', 89, unsigned int) /* Get line status reg. */ + /* ioctl (fd, TIOCSERGETLSR, &result) where result may be as below */ + # define TIOCSER_TEMT 0x01 /* Transmitter physically empty */ +-#define TIOCSERGETMULTI _IOR('T', 90, struct serial_multiport_struct) /* Get multiport config */ +-#define TIOCSERSETMULTI _IOW('T', 91, struct serial_multiport_struct) /* Set multiport config */ ++#define TIOCSERGETMULTI 0x80a8545a /* Get multiport config */ ++ /* _IOR('T', 90, struct serial_multiport_struct) */ ++#define TIOCSERSETMULTI 0x40a8545b /* Set multiport config */ ++ /* _IOW('T', 91, struct serial_multiport_struct) */ + + #define TIOCMIWAIT _IO('T', 92) /* wait for a change on serial input line(s) */ + #define TIOCGICOUNT 0x545D /* read serial port inline interrupt counts */ +diff --git a/arch/xtensa/kernel/entry.S b/arch/xtensa/kernel/entry.S +index b61e25146a2f..4b8e636e60da 100644 +--- a/arch/xtensa/kernel/entry.S ++++ b/arch/xtensa/kernel/entry.S +@@ -1001,9 +1001,8 @@ ENTRY(fast_syscall_xtensa) + movi a7, 4 # sizeof(unsigned int) + access_ok a3, a7, a0, a2, .Leac # a0: scratch reg, a2: sp + +- addi a6, a6, -1 # assuming SYS_XTENSA_ATOMIC_SET = 1 +- _bgeui a6, SYS_XTENSA_COUNT - 1, .Lill +- _bnei a6, SYS_XTENSA_ATOMIC_CMP_SWP - 1, .Lnswp ++ _bgeui a6, SYS_XTENSA_COUNT, .Lill ++ _bnei a6, SYS_XTENSA_ATOMIC_CMP_SWP, .Lnswp + + /* Fall through for ATOMIC_CMP_SWP. */ + +@@ -1015,27 +1014,26 @@ TRY s32i a5, a3, 0 # different, modify value + l32i a7, a2, PT_AREG7 # restore a7 + l32i a0, a2, PT_AREG0 # restore a0 + movi a2, 1 # and return 1 +- addi a6, a6, 1 # restore a6 (really necessary?) + rfe + + 1: l32i a7, a2, PT_AREG7 # restore a7 + l32i a0, a2, PT_AREG0 # restore a0 + movi a2, 0 # return 0 (note that we cannot set +- addi a6, a6, 1 # restore a6 (really necessary?) + rfe + + .Lnswp: /* Atomic set, add, and exg_add. */ + + TRY l32i a7, a3, 0 # orig ++ addi a6, a6, -SYS_XTENSA_ATOMIC_SET + add a0, a4, a7 # + arg + moveqz a0, a4, a6 # set ++ addi a6, a6, SYS_XTENSA_ATOMIC_SET + TRY s32i a0, a3, 0 # write new value + + mov a0, a2 + mov a2, a7 + l32i a7, a0, PT_AREG7 # restore a7 + l32i a0, a0, PT_AREG0 # restore a0 +- addi a6, a6, 1 # restore a6 (really necessary?) + rfe + + CATCH +@@ -1044,7 +1042,7 @@ CATCH + movi a2, -EFAULT + rfe + +-.Lill: l32i a7, a2, PT_AREG0 # restore a7 ++.Lill: l32i a7, a2, PT_AREG7 # restore a7 + l32i a0, a2, PT_AREG0 # restore a0 + movi a2, -EINVAL + rfe +@@ -1600,7 +1598,7 @@ ENTRY(fast_second_level_miss) + rsr a0, excvaddr + bltu a0, a3, 2f + +- addi a1, a0, -(2 << (DCACHE_ALIAS_ORDER + PAGE_SHIFT)) ++ addi a1, a0, -TLBTEMP_SIZE + bgeu a1, a3, 2f + + /* Check if we have to restore an ITLB mapping. */ +@@ -1855,7 +1853,6 @@ ENTRY(_switch_to) + + entry a1, 16 + +- mov a10, a2 # preserve 'prev' (a2) + mov a11, a3 # and 'next' (a3) + + l32i a4, a2, TASK_THREAD_INFO +@@ -1863,8 +1860,14 @@ ENTRY(_switch_to) + + save_xtregs_user a4 a6 a8 a9 a12 a13 THREAD_XTREGS_USER + +- s32i a0, a10, THREAD_RA # save return address +- s32i a1, a10, THREAD_SP # save stack pointer ++#if THREAD_RA > 1020 || THREAD_SP > 1020 ++ addi a10, a2, TASK_THREAD ++ s32i a0, a10, THREAD_RA - TASK_THREAD # save return address ++ s32i a1, a10, THREAD_SP - TASK_THREAD # save stack pointer ++#else ++ s32i a0, a2, THREAD_RA # save return address ++ s32i a1, a2, THREAD_SP # save stack pointer ++#endif + + /* Disable ints while we manipulate the stack pointer. */ + +@@ -1905,7 +1908,6 @@ ENTRY(_switch_to) + load_xtregs_user a5 a6 a8 a9 a12 a13 THREAD_XTREGS_USER + + wsr a14, ps +- mov a2, a10 # return 'prev' + rsync + + retw +diff --git a/arch/xtensa/kernel/pci-dma.c b/arch/xtensa/kernel/pci-dma.c +index 2d9cc6dbfd78..e8b76b8e4b29 100644 +--- a/arch/xtensa/kernel/pci-dma.c ++++ b/arch/xtensa/kernel/pci-dma.c +@@ -49,9 +49,8 @@ dma_alloc_coherent(struct device *dev,size_t size,dma_addr_t *handle,gfp_t flag) + + /* We currently don't support coherent memory outside KSEG */ + +- if (ret < XCHAL_KSEG_CACHED_VADDR +- || ret >= XCHAL_KSEG_CACHED_VADDR + XCHAL_KSEG_SIZE) +- BUG(); ++ BUG_ON(ret < XCHAL_KSEG_CACHED_VADDR || ++ ret > XCHAL_KSEG_CACHED_VADDR + XCHAL_KSEG_SIZE - 1); + + + if (ret != 0) { +@@ -68,10 +67,11 @@ EXPORT_SYMBOL(dma_alloc_coherent); + void dma_free_coherent(struct device *hwdev, size_t size, + void *vaddr, dma_addr_t dma_handle) + { +- long addr=(long)vaddr+XCHAL_KSEG_CACHED_VADDR-XCHAL_KSEG_BYPASS_VADDR; ++ unsigned long addr = (unsigned long)vaddr + ++ XCHAL_KSEG_CACHED_VADDR - XCHAL_KSEG_BYPASS_VADDR; + +- if (addr < 0 || addr >= XCHAL_KSEG_SIZE) +- BUG(); ++ BUG_ON(addr < XCHAL_KSEG_CACHED_VADDR || ++ addr > XCHAL_KSEG_CACHED_VADDR + XCHAL_KSEG_SIZE - 1); + + free_pages(addr, get_order(size)); + } +diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c +index 434944cbd761..06c2bab69756 100644 +--- a/block/cfq-iosched.c ++++ b/block/cfq-iosched.c +@@ -1275,12 +1275,16 @@ __cfq_group_service_tree_add(struct cfq_rb_root *st, struct cfq_group *cfqg) + static void + cfq_update_group_weight(struct cfq_group *cfqg) + { +- BUG_ON(!RB_EMPTY_NODE(&cfqg->rb_node)); +- + if (cfqg->new_weight) { + cfqg->weight = cfqg->new_weight; + cfqg->new_weight = 0; + } ++} ++ ++static void ++cfq_update_group_leaf_weight(struct cfq_group *cfqg) ++{ ++ BUG_ON(!RB_EMPTY_NODE(&cfqg->rb_node)); + + if (cfqg->new_leaf_weight) { + cfqg->leaf_weight = cfqg->new_leaf_weight; +@@ -1299,7 +1303,7 @@ cfq_group_service_tree_add(struct cfq_rb_root *st, struct cfq_group *cfqg) + /* add to the service tree */ + BUG_ON(!RB_EMPTY_NODE(&cfqg->rb_node)); + +- cfq_update_group_weight(cfqg); ++ cfq_update_group_leaf_weight(cfqg); + __cfq_group_service_tree_add(st, cfqg); + + /* +@@ -1323,6 +1327,7 @@ cfq_group_service_tree_add(struct cfq_rb_root *st, struct cfq_group *cfqg) + */ + while ((parent = cfqg_parent(pos))) { + if (propagate) { ++ cfq_update_group_weight(pos); + propagate = !parent->nr_active++; + parent->children_weight += pos->weight; + } +diff --git a/block/genhd.c b/block/genhd.c +index 791f41943132..e6723bd4d7a1 100644 +--- a/block/genhd.c ++++ b/block/genhd.c +@@ -28,10 +28,10 @@ struct kobject *block_depr; + /* for extended dynamic devt allocation, currently only one major is used */ + #define NR_EXT_DEVT (1 << MINORBITS) + +-/* For extended devt allocation. ext_devt_mutex prevents look up ++/* For extended devt allocation. ext_devt_lock prevents look up + * results from going away underneath its user. + */ +-static DEFINE_MUTEX(ext_devt_mutex); ++static DEFINE_SPINLOCK(ext_devt_lock); + static DEFINE_IDR(ext_devt_idr); + + static struct device_type disk_type; +@@ -420,9 +420,13 @@ int blk_alloc_devt(struct hd_struct *part, dev_t *devt) + } + + /* allocate ext devt */ +- mutex_lock(&ext_devt_mutex); +- idx = idr_alloc(&ext_devt_idr, part, 0, NR_EXT_DEVT, GFP_KERNEL); +- mutex_unlock(&ext_devt_mutex); ++ idr_preload(GFP_KERNEL); ++ ++ spin_lock(&ext_devt_lock); ++ idx = idr_alloc(&ext_devt_idr, part, 0, NR_EXT_DEVT, GFP_NOWAIT); ++ spin_unlock(&ext_devt_lock); ++ ++ idr_preload_end(); + if (idx < 0) + return idx == -ENOSPC ? -EBUSY : idx; + +@@ -441,15 +445,13 @@ int blk_alloc_devt(struct hd_struct *part, dev_t *devt) + */ + void blk_free_devt(dev_t devt) + { +- might_sleep(); +- + if (devt == MKDEV(0, 0)) + return; + + if (MAJOR(devt) == BLOCK_EXT_MAJOR) { +- mutex_lock(&ext_devt_mutex); ++ spin_lock(&ext_devt_lock); + idr_remove(&ext_devt_idr, blk_mangle_minor(MINOR(devt))); +- mutex_unlock(&ext_devt_mutex); ++ spin_unlock(&ext_devt_lock); + } + } + +@@ -665,7 +667,6 @@ void del_gendisk(struct gendisk *disk) + sysfs_remove_link(block_depr, dev_name(disk_to_dev(disk))); + pm_runtime_set_memalloc_noio(disk_to_dev(disk), false); + device_del(disk_to_dev(disk)); +- blk_free_devt(disk_to_dev(disk)->devt); + } + EXPORT_SYMBOL(del_gendisk); + +@@ -690,13 +691,13 @@ struct gendisk *get_gendisk(dev_t devt, int *partno) + } else { + struct hd_struct *part; + +- mutex_lock(&ext_devt_mutex); ++ spin_lock(&ext_devt_lock); + part = idr_find(&ext_devt_idr, blk_mangle_minor(MINOR(devt))); + if (part && get_disk(part_to_disk(part))) { + *partno = part->partno; + disk = part_to_disk(part); + } +- mutex_unlock(&ext_devt_mutex); ++ spin_unlock(&ext_devt_lock); + } + + return disk; +@@ -1098,6 +1099,7 @@ static void disk_release(struct device *dev) + { + struct gendisk *disk = dev_to_disk(dev); + ++ blk_free_devt(dev->devt); + disk_release_events(disk); + kfree(disk->random); + disk_replace_part_tbl(disk, NULL); +diff --git a/block/partition-generic.c b/block/partition-generic.c +index 789cdea05893..0d9e5f97f0a8 100644 +--- a/block/partition-generic.c ++++ b/block/partition-generic.c +@@ -211,6 +211,7 @@ static const struct attribute_group *part_attr_groups[] = { + static void part_release(struct device *dev) + { + struct hd_struct *p = dev_to_part(dev); ++ blk_free_devt(dev->devt); + free_part_stats(p); + free_part_info(p); + kfree(p); +@@ -253,7 +254,6 @@ void delete_partition(struct gendisk *disk, int partno) + rcu_assign_pointer(ptbl->last_lookup, NULL); + kobject_put(part->holder_dir); + device_del(part_to_dev(part)); +- blk_free_devt(part_devt(part)); + + hd_struct_put(part); + } +diff --git a/block/partitions/aix.c b/block/partitions/aix.c +index 43be471d9b1d..0931f5136ab2 100644 +--- a/block/partitions/aix.c ++++ b/block/partitions/aix.c +@@ -253,7 +253,7 @@ int aix_partition(struct parsed_partitions *state) + continue; + } + lv_ix = be16_to_cpu(p->lv_ix) - 1; +- if (lv_ix > state->limit) { ++ if (lv_ix >= state->limit) { + cur_lv_ix = -1; + continue; + } +diff --git a/drivers/acpi/acpi_cmos_rtc.c b/drivers/acpi/acpi_cmos_rtc.c +index 84190ed89c04..aff69d9bfcbf 100644 +--- a/drivers/acpi/acpi_cmos_rtc.c ++++ b/drivers/acpi/acpi_cmos_rtc.c +@@ -35,7 +35,7 @@ acpi_cmos_rtc_space_handler(u32 function, acpi_physical_address address, + void *handler_context, void *region_context) + { + int i; +- u8 *value = (u8 *)&value64; ++ u8 *value = (u8 *)value64; + + if (address > 0xff || !value64) + return AE_BAD_PARAMETER; +diff --git a/drivers/ata/ata_piix.c b/drivers/ata/ata_piix.c +index 513ad7ed0c99..d2b5cf36b42d 100644 +--- a/drivers/ata/ata_piix.c ++++ b/drivers/ata/ata_piix.c +@@ -340,6 +340,14 @@ static const struct pci_device_id piix_pci_tbl[] = { + { 0x8086, 0x0F21, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata_byt }, + /* SATA Controller IDE (Coleto Creek) */ + { 0x8086, 0x23a6, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata }, ++ /* SATA Controller IDE (9 Series) */ ++ { 0x8086, 0x8c88, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata_snb }, ++ /* SATA Controller IDE (9 Series) */ ++ { 0x8086, 0x8c89, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata_snb }, ++ /* SATA Controller IDE (9 Series) */ ++ { 0x8086, 0x8c80, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_snb }, ++ /* SATA Controller IDE (9 Series) */ ++ { 0x8086, 0x8c81, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_snb }, + + { } /* terminate list */ + }; +diff --git a/drivers/base/regmap/regcache.c b/drivers/base/regmap/regcache.c +index d6c2d691b6e8..8560dca42aea 100644 +--- a/drivers/base/regmap/regcache.c ++++ b/drivers/base/regmap/regcache.c +@@ -690,7 +690,7 @@ int regcache_sync_block(struct regmap *map, void *block, + unsigned int block_base, unsigned int start, + unsigned int end) + { +- if (regmap_can_raw_write(map)) ++ if (regmap_can_raw_write(map) && !map->use_single_rw) + return regcache_sync_block_raw(map, block, cache_present, + block_base, start, end); + else +diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c +index 7d689a15c500..18ea82c9146c 100644 +--- a/drivers/base/regmap/regmap.c ++++ b/drivers/base/regmap/regmap.c +@@ -114,7 +114,7 @@ bool regmap_readable(struct regmap *map, unsigned int reg) + + bool regmap_volatile(struct regmap *map, unsigned int reg) + { +- if (!regmap_readable(map, reg)) ++ if (!map->format.format_write && !regmap_readable(map, reg)) + return false; + + if (map->volatile_reg) +diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c +index 8cc1e640f485..5369baf23d6a 100644 +--- a/drivers/block/drbd/drbd_nl.c ++++ b/drivers/block/drbd/drbd_nl.c +@@ -525,6 +525,12 @@ void conn_try_outdate_peer_async(struct drbd_tconn *tconn) + struct task_struct *opa; + + kref_get(&tconn->kref); ++ /* We may just have force_sig()'ed this thread ++ * to get it out of some blocking network function. ++ * Clear signals; otherwise kthread_run(), which internally uses ++ * wait_on_completion_killable(), will mistake our pending signal ++ * for a new fatal signal and fail. */ ++ flush_signals(current); + opa = kthread_run(_try_outdate_peer_async, tconn, "drbd_async_h"); + if (IS_ERR(opa)) { + conn_err(tconn, "out of mem, failed to invoke fence-peer helper\n"); +diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c +index a004769528e6..f5966413fbb2 100644 +--- a/drivers/clk/clk.c ++++ b/drivers/clk/clk.c +@@ -1368,6 +1368,7 @@ static struct clk *clk_propagate_rate_change(struct clk *clk, unsigned long even + static void clk_change_rate(struct clk *clk) + { + struct clk *child; ++ struct hlist_node *tmp; + unsigned long old_rate; + unsigned long best_parent_rate = 0; + +@@ -1391,7 +1392,11 @@ static void clk_change_rate(struct clk *clk) + if (clk->notifier_count && old_rate != clk->rate) + __clk_notify(clk, POST_RATE_CHANGE, old_rate, clk->rate); + +- hlist_for_each_entry(child, &clk->children, child_node) { ++ /* ++ * Use safe iteration, as change_rate can actually swap parents ++ * for certain clock types. ++ */ ++ hlist_for_each_entry_safe(child, tmp, &clk->children, child_node) { + /* Skip children who will be reparented to another clock */ + if (child->new_parent && child->new_parent != clk) + continue; +diff --git a/drivers/dma/TODO b/drivers/dma/TODO +index 734ed0206cd5..b8045cd42ee1 100644 +--- a/drivers/dma/TODO ++++ b/drivers/dma/TODO +@@ -7,7 +7,6 @@ TODO for slave dma + - imx-dma + - imx-sdma + - mxs-dma.c +- - dw_dmac + - intel_mid_dma + 4. Check other subsystems for dma drivers and merge/move to dmaengine + 5. Remove dma_slave_config's dma direction. +diff --git a/drivers/dma/dw/core.c b/drivers/dma/dw/core.c +index da87adf85f03..a8884b8aaa9e 100644 +--- a/drivers/dma/dw/core.c ++++ b/drivers/dma/dw/core.c +@@ -282,6 +282,15 @@ static void dwc_dostart(struct dw_dma_chan *dwc, struct dw_desc *first) + channel_set_bit(dw, CH_EN, dwc->mask); + } + ++static void dwc_dostart_first_queued(struct dw_dma_chan *dwc) ++{ ++ if (list_empty(&dwc->queue)) ++ return; ++ ++ list_move(dwc->queue.next, &dwc->active_list); ++ dwc_dostart(dwc, dwc_first_active(dwc)); ++} ++ + /*----------------------------------------------------------------------*/ + + static void +@@ -357,10 +366,7 @@ static void dwc_complete_all(struct dw_dma *dw, struct dw_dma_chan *dwc) + * the completed ones. + */ + list_splice_init(&dwc->active_list, &list); +- if (!list_empty(&dwc->queue)) { +- list_move(dwc->queue.next, &dwc->active_list); +- dwc_dostart(dwc, dwc_first_active(dwc)); +- } ++ dwc_dostart_first_queued(dwc); + + spin_unlock_irqrestore(&dwc->lock, flags); + +@@ -490,10 +496,7 @@ static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc) + /* Try to continue after resetting the channel... */ + dwc_chan_disable(dw, dwc); + +- if (!list_empty(&dwc->queue)) { +- list_move(dwc->queue.next, &dwc->active_list); +- dwc_dostart(dwc, dwc_first_active(dwc)); +- } ++ dwc_dostart_first_queued(dwc); + spin_unlock_irqrestore(&dwc->lock, flags); + } + +@@ -700,17 +703,9 @@ static dma_cookie_t dwc_tx_submit(struct dma_async_tx_descriptor *tx) + * possible, perhaps even appending to those already submitted + * for DMA. But this is hard to do in a race-free manner. + */ +- if (list_empty(&dwc->active_list)) { +- dev_vdbg(chan2dev(tx->chan), "%s: started %u\n", __func__, +- desc->txd.cookie); +- list_add_tail(&desc->desc_node, &dwc->active_list); +- dwc_dostart(dwc, dwc_first_active(dwc)); +- } else { +- dev_vdbg(chan2dev(tx->chan), "%s: queued %u\n", __func__, +- desc->txd.cookie); + +- list_add_tail(&desc->desc_node, &dwc->queue); +- } ++ dev_vdbg(chan2dev(tx->chan), "%s: queued %u\n", __func__, desc->txd.cookie); ++ list_add_tail(&desc->desc_node, &dwc->queue); + + spin_unlock_irqrestore(&dwc->lock, flags); + +@@ -1116,9 +1111,12 @@ dwc_tx_status(struct dma_chan *chan, + static void dwc_issue_pending(struct dma_chan *chan) + { + struct dw_dma_chan *dwc = to_dw_dma_chan(chan); ++ unsigned long flags; + +- if (!list_empty(&dwc->queue)) +- dwc_scan_descriptors(to_dw_dma(chan->device), dwc); ++ spin_lock_irqsave(&dwc->lock, flags); ++ if (list_empty(&dwc->active_list)) ++ dwc_dostart_first_queued(dwc); ++ spin_unlock_irqrestore(&dwc->lock, flags); + } + + static int dwc_alloc_chan_resources(struct dma_chan *chan) +diff --git a/drivers/gpu/drm/ast/ast_main.c b/drivers/gpu/drm/ast/ast_main.c +index 7f6152d374ca..d57a38d1ca69 100644 +--- a/drivers/gpu/drm/ast/ast_main.c ++++ b/drivers/gpu/drm/ast/ast_main.c +@@ -100,7 +100,7 @@ static int ast_detect_chip(struct drm_device *dev) + } + ast->vga2_clone = false; + } else { +- ast->chip = 2000; ++ ast->chip = AST2000; + DRM_INFO("AST 2000 detected\n"); + } + } +diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c +index 35066a9b535f..ef5fe7e9d86c 100644 +--- a/drivers/gpu/drm/i915/i915_gem.c ++++ b/drivers/gpu/drm/i915/i915_gem.c +@@ -1386,10 +1386,13 @@ unlock: + out: + switch (ret) { + case -EIO: +- /* If this -EIO is due to a gpu hang, give the reset code a +- * chance to clean up the mess. Otherwise return the proper +- * SIGBUS. */ +- if (i915_terminally_wedged(&dev_priv->gpu_error)) ++ /* ++ * We eat errors when the gpu is terminally wedged to avoid ++ * userspace unduly crashing (gl has no provisions for mmaps to ++ * fail). But any other -EIO isn't ours (e.g. swap in failure) ++ * and so needs to be reported. ++ */ ++ if (!i915_terminally_wedged(&dev_priv->gpu_error)) + return VM_FAULT_SIGBUS; + case -EAGAIN: + /* +diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c +index c7fa2e420d49..4ac33d27ee87 100644 +--- a/drivers/gpu/drm/i915/i915_gem_gtt.c ++++ b/drivers/gpu/drm/i915/i915_gem_gtt.c +@@ -521,6 +521,16 @@ void i915_check_and_clear_faults(struct drm_device *dev) + POSTING_READ(RING_FAULT_REG(&dev_priv->ring[RCS])); + } + ++static void i915_ggtt_flush(struct drm_i915_private *dev_priv) ++{ ++ if (INTEL_INFO(dev_priv->dev)->gen < 6) { ++ intel_gtt_chipset_flush(); ++ } else { ++ I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN); ++ POSTING_READ(GFX_FLSH_CNTL_GEN6); ++ } ++} ++ + void i915_gem_suspend_gtt_mappings(struct drm_device *dev) + { + struct drm_i915_private *dev_priv = dev->dev_private; +@@ -537,6 +547,8 @@ void i915_gem_suspend_gtt_mappings(struct drm_device *dev) + dev_priv->gtt.base.start / PAGE_SIZE, + dev_priv->gtt.base.total / PAGE_SIZE, + true); ++ ++ i915_ggtt_flush(dev_priv); + } + + void i915_gem_restore_gtt_mappings(struct drm_device *dev) +@@ -557,7 +569,7 @@ void i915_gem_restore_gtt_mappings(struct drm_device *dev) + i915_gem_gtt_bind_object(obj, obj->cache_level); + } + +- i915_gem_chipset_flush(dev); ++ i915_ggtt_flush(dev_priv); + } + + int i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj) +diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c +index 53f2bed8bc5f..16ca7f648d0a 100644 +--- a/drivers/gpu/drm/i915/intel_bios.c ++++ b/drivers/gpu/drm/i915/intel_bios.c +@@ -657,7 +657,7 @@ init_vbt_defaults(struct drm_i915_private *dev_priv) + DRM_DEBUG_KMS("Set default to SSC at %dMHz\n", dev_priv->vbt.lvds_ssc_freq); + } + +-static int __init intel_no_opregion_vbt_callback(const struct dmi_system_id *id) ++static int intel_no_opregion_vbt_callback(const struct dmi_system_id *id) + { + DRM_DEBUG_KMS("Falling back to manually reading VBT from " + "VBIOS ROM for %s\n", +diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c +index e8edbb751e9a..3c25af46ba07 100644 +--- a/drivers/gpu/drm/i915/intel_crt.c ++++ b/drivers/gpu/drm/i915/intel_crt.c +@@ -743,7 +743,7 @@ static const struct drm_encoder_funcs intel_crt_enc_funcs = { + .destroy = intel_encoder_destroy, + }; + +-static int __init intel_no_crt_dmi_callback(const struct dmi_system_id *id) ++static int intel_no_crt_dmi_callback(const struct dmi_system_id *id) + { + DRM_INFO("Skipping CRT initialization for %s\n", id->ident); + return 1; +diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c +index b8af94a5be39..667f2117e1d9 100644 +--- a/drivers/gpu/drm/i915/intel_lvds.c ++++ b/drivers/gpu/drm/i915/intel_lvds.c +@@ -523,7 +523,7 @@ static const struct drm_encoder_funcs intel_lvds_enc_funcs = { + .destroy = intel_encoder_destroy, + }; + +-static int __init intel_no_lvds_dmi_callback(const struct dmi_system_id *id) ++static int intel_no_lvds_dmi_callback(const struct dmi_system_id *id) + { + DRM_INFO("Skipping LVDS initialization for %s\n", id->ident); + return 1; +diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c +index a1e980938fef..6b58be124f9e 100644 +--- a/drivers/gpu/drm/i915/intel_tv.c ++++ b/drivers/gpu/drm/i915/intel_tv.c +@@ -854,6 +854,10 @@ intel_enable_tv(struct intel_encoder *encoder) + struct drm_device *dev = encoder->base.dev; + struct drm_i915_private *dev_priv = dev->dev_private; + ++ /* Prevents vblank waits from timing out in intel_tv_detect_type() */ ++ intel_wait_for_vblank(encoder->base.dev, ++ to_intel_crtc(encoder->base.crtc)->pipe); ++ + I915_WRITE(TV_CTL, I915_READ(TV_CTL) | TV_ENC_ENABLE); + } + +diff --git a/drivers/gpu/drm/nouveau/nouveau_vga.c b/drivers/gpu/drm/nouveau/nouveau_vga.c +index 81638d7f2eff..13790ea48c42 100644 +--- a/drivers/gpu/drm/nouveau/nouveau_vga.c ++++ b/drivers/gpu/drm/nouveau/nouveau_vga.c +@@ -98,7 +98,16 @@ void + nouveau_vga_fini(struct nouveau_drm *drm) + { + struct drm_device *dev = drm->dev; ++ bool runtime = false; ++ ++ if (nouveau_runtime_pm == 1) ++ runtime = true; ++ if ((nouveau_runtime_pm == -1) && (nouveau_is_optimus() || nouveau_is_v1_dsm())) ++ runtime = true; ++ + vga_switcheroo_unregister_client(dev->pdev); ++ if (runtime && nouveau_is_v1_dsm() && !nouveau_is_optimus()) ++ vga_switcheroo_fini_domain_pm_ops(drm->dev->dev); + vga_client_register(dev->pdev, NULL, NULL, NULL); + } + +diff --git a/drivers/gpu/drm/radeon/ci_dpm.c b/drivers/gpu/drm/radeon/ci_dpm.c +index 78e25d2e2fc4..95f4ab99c44b 100644 +--- a/drivers/gpu/drm/radeon/ci_dpm.c ++++ b/drivers/gpu/drm/radeon/ci_dpm.c +@@ -820,6 +820,9 @@ static int ci_set_thermal_temperature_range(struct radeon_device *rdev, + WREG32_SMC(CG_THERMAL_CTRL, tmp); + #endif + ++ rdev->pm.dpm.thermal.min_temp = low_temp; ++ rdev->pm.dpm.thermal.max_temp = high_temp; ++ + return 0; + } + +diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c +index 9d9770d201ae..ceba819891f4 100644 +--- a/drivers/gpu/drm/radeon/cik.c ++++ b/drivers/gpu/drm/radeon/cik.c +@@ -3764,7 +3764,7 @@ struct bonaire_mqd + */ + static int cik_cp_compute_resume(struct radeon_device *rdev) + { +- int r, i, idx; ++ int r, i, j, idx; + u32 tmp; + bool use_doorbell = true; + u64 hqd_gpu_addr; +@@ -3887,7 +3887,7 @@ static int cik_cp_compute_resume(struct radeon_device *rdev) + mqd->queue_state.cp_hqd_pq_wptr= 0; + if (RREG32(CP_HQD_ACTIVE) & 1) { + WREG32(CP_HQD_DEQUEUE_REQUEST, 1); +- for (i = 0; i < rdev->usec_timeout; i++) { ++ for (j = 0; j < rdev->usec_timeout; j++) { + if (!(RREG32(CP_HQD_ACTIVE) & 1)) + break; + udelay(1); +diff --git a/drivers/gpu/drm/radeon/cik_sdma.c b/drivers/gpu/drm/radeon/cik_sdma.c +index 0c6784f52410..dc055d40b96e 100644 +--- a/drivers/gpu/drm/radeon/cik_sdma.c ++++ b/drivers/gpu/drm/radeon/cik_sdma.c +@@ -369,13 +369,6 @@ int cik_sdma_resume(struct radeon_device *rdev) + { + int r; + +- /* Reset dma */ +- WREG32(SRBM_SOFT_RESET, SOFT_RESET_SDMA | SOFT_RESET_SDMA1); +- RREG32(SRBM_SOFT_RESET); +- udelay(50); +- WREG32(SRBM_SOFT_RESET, 0); +- RREG32(SRBM_SOFT_RESET); +- + r = cik_sdma_load_microcode(rdev); + if (r) + return r; +diff --git a/drivers/gpu/drm/radeon/ni_dma.c b/drivers/gpu/drm/radeon/ni_dma.c +index dd6e9688fbef..d0e4ab12f2e5 100644 +--- a/drivers/gpu/drm/radeon/ni_dma.c ++++ b/drivers/gpu/drm/radeon/ni_dma.c +@@ -119,12 +119,6 @@ int cayman_dma_resume(struct radeon_device *rdev) + u32 reg_offset, wb_offset; + int i, r; + +- /* Reset dma */ +- WREG32(SRBM_SOFT_RESET, SOFT_RESET_DMA | SOFT_RESET_DMA1); +- RREG32(SRBM_SOFT_RESET); +- udelay(50); +- WREG32(SRBM_SOFT_RESET, 0); +- + for (i = 0; i < 2; i++) { + if (i == 0) { + ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX]; +diff --git a/drivers/gpu/drm/radeon/r600_dma.c b/drivers/gpu/drm/radeon/r600_dma.c +index 3b317456512a..aad3c3634c2f 100644 +--- a/drivers/gpu/drm/radeon/r600_dma.c ++++ b/drivers/gpu/drm/radeon/r600_dma.c +@@ -116,15 +116,6 @@ int r600_dma_resume(struct radeon_device *rdev) + u32 rb_bufsz; + int r; + +- /* Reset dma */ +- if (rdev->family >= CHIP_RV770) +- WREG32(SRBM_SOFT_RESET, RV770_SOFT_RESET_DMA); +- else +- WREG32(SRBM_SOFT_RESET, SOFT_RESET_DMA); +- RREG32(SRBM_SOFT_RESET); +- udelay(50); +- WREG32(SRBM_SOFT_RESET, 0); +- + WREG32(DMA_SEM_INCOMPLETE_TIMER_CNTL, 0); + WREG32(DMA_SEM_WAIT_FAIL_TIMER_CNTL, 0); + +diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c +index 402d4630d13e..0f538a442abf 100644 +--- a/drivers/gpu/drm/radeon/radeon_atombios.c ++++ b/drivers/gpu/drm/radeon/radeon_atombios.c +@@ -464,6 +464,13 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev, + } + } + ++ /* Fujitsu D3003-S2 board lists DVI-I as DVI-I and VGA */ ++ if ((dev->pdev->device == 0x9805) && ++ (dev->pdev->subsystem_vendor == 0x1734) && ++ (dev->pdev->subsystem_device == 0x11bd)) { ++ if (*connector_type == DRM_MODE_CONNECTOR_VGA) ++ return false; ++ } + + return true; + } +@@ -2281,19 +2288,31 @@ static void radeon_atombios_add_pplib_thermal_controller(struct radeon_device *r + (controller->ucFanParameters & + ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); + rdev->pm.int_thermal_type = THERMAL_TYPE_KV; +- } else if ((controller->ucType == +- ATOM_PP_THERMALCONTROLLER_EXTERNAL_GPIO) || +- (controller->ucType == +- ATOM_PP_THERMALCONTROLLER_ADT7473_WITH_INTERNAL) || +- (controller->ucType == +- ATOM_PP_THERMALCONTROLLER_EMC2103_WITH_INTERNAL)) { +- DRM_INFO("Special thermal controller config\n"); ++ } else if (controller->ucType == ++ ATOM_PP_THERMALCONTROLLER_EXTERNAL_GPIO) { ++ DRM_INFO("External GPIO thermal controller %s fan control\n", ++ (controller->ucFanParameters & ++ ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); ++ rdev->pm.int_thermal_type = THERMAL_TYPE_EXTERNAL_GPIO; ++ } else if (controller->ucType == ++ ATOM_PP_THERMALCONTROLLER_ADT7473_WITH_INTERNAL) { ++ DRM_INFO("ADT7473 with internal thermal controller %s fan control\n", ++ (controller->ucFanParameters & ++ ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); ++ rdev->pm.int_thermal_type = THERMAL_TYPE_ADT7473_WITH_INTERNAL; ++ } else if (controller->ucType == ++ ATOM_PP_THERMALCONTROLLER_EMC2103_WITH_INTERNAL) { ++ DRM_INFO("EMC2103 with internal thermal controller %s fan control\n", ++ (controller->ucFanParameters & ++ ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); ++ rdev->pm.int_thermal_type = THERMAL_TYPE_EMC2103_WITH_INTERNAL; + } else if (controller->ucType < ARRAY_SIZE(pp_lib_thermal_controller_names)) { + DRM_INFO("Possible %s thermal controller at 0x%02x %s fan control\n", + pp_lib_thermal_controller_names[controller->ucType], + controller->ucI2cAddress >> 1, + (controller->ucFanParameters & + ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); ++ rdev->pm.int_thermal_type = THERMAL_TYPE_EXTERNAL; + i2c_bus = radeon_lookup_i2c_gpio(rdev, controller->ucI2cLine); + rdev->pm.i2c_bus = radeon_i2c_lookup(rdev, &i2c_bus); + if (rdev->pm.i2c_bus) { +diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c +index 3eb148667d63..89664933861f 100644 +--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c ++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c +@@ -163,8 +163,9 @@ void vmw_fifo_release(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo) + + mutex_lock(&dev_priv->hw_mutex); + ++ vmw_write(dev_priv, SVGA_REG_SYNC, SVGA_SYNC_GENERIC); + while (vmw_read(dev_priv, SVGA_REG_BUSY) != 0) +- vmw_write(dev_priv, SVGA_REG_SYNC, SVGA_SYNC_GENERIC); ++ ; + + dev_priv->last_read_seqno = ioread32(fifo_mem + SVGA_FIFO_FENCE); + +diff --git a/drivers/gpu/vga/vga_switcheroo.c b/drivers/gpu/vga/vga_switcheroo.c +index 6866448083b2..37ac7b5dbd06 100644 +--- a/drivers/gpu/vga/vga_switcheroo.c ++++ b/drivers/gpu/vga/vga_switcheroo.c +@@ -660,6 +660,12 @@ int vga_switcheroo_init_domain_pm_ops(struct device *dev, struct dev_pm_domain * + } + EXPORT_SYMBOL(vga_switcheroo_init_domain_pm_ops); + ++void vga_switcheroo_fini_domain_pm_ops(struct device *dev) ++{ ++ dev->pm_domain = NULL; ++} ++EXPORT_SYMBOL(vga_switcheroo_fini_domain_pm_ops); ++ + static int vga_switcheroo_runtime_resume_hdmi_audio(struct device *dev) + { + struct pci_dev *pdev = to_pci_dev(dev); +diff --git a/drivers/hid/hid-logitech-dj.c b/drivers/hid/hid-logitech-dj.c +index 9bf4c218cac8..b43d3636681c 100644 +--- a/drivers/hid/hid-logitech-dj.c ++++ b/drivers/hid/hid-logitech-dj.c +@@ -687,7 +687,6 @@ static int logi_dj_raw_event(struct hid_device *hdev, + struct dj_receiver_dev *djrcv_dev = hid_get_drvdata(hdev); + struct dj_report *dj_report = (struct dj_report *) data; + unsigned long flags; +- bool report_processed = false; + + dbg_hid("%s, size:%d\n", __func__, size); + +@@ -714,34 +713,42 @@ static int logi_dj_raw_event(struct hid_device *hdev, + * device (via hid_input_report() ) and return 1 so hid-core does not do + * anything else with it. + */ ++ ++ /* case 1) */ ++ if (data[0] != REPORT_ID_DJ_SHORT) ++ return false; ++ + if ((dj_report->device_index < DJ_DEVICE_INDEX_MIN) || + (dj_report->device_index > DJ_DEVICE_INDEX_MAX)) { +- dev_err(&hdev->dev, "%s: invalid device index:%d\n", ++ /* ++ * Device index is wrong, bail out. ++ * This driver can ignore safely the receiver notifications, ++ * so ignore those reports too. ++ */ ++ if (dj_report->device_index != DJ_RECEIVER_INDEX) ++ dev_err(&hdev->dev, "%s: invalid device index:%d\n", + __func__, dj_report->device_index); + return false; + } + + spin_lock_irqsave(&djrcv_dev->lock, flags); +- if (dj_report->report_id == REPORT_ID_DJ_SHORT) { +- switch (dj_report->report_type) { +- case REPORT_TYPE_NOTIF_DEVICE_PAIRED: +- case REPORT_TYPE_NOTIF_DEVICE_UNPAIRED: +- logi_dj_recv_queue_notification(djrcv_dev, dj_report); +- break; +- case REPORT_TYPE_NOTIF_CONNECTION_STATUS: +- if (dj_report->report_params[CONNECTION_STATUS_PARAM_STATUS] == +- STATUS_LINKLOSS) { +- logi_dj_recv_forward_null_report(djrcv_dev, dj_report); +- } +- break; +- default: +- logi_dj_recv_forward_report(djrcv_dev, dj_report); ++ switch (dj_report->report_type) { ++ case REPORT_TYPE_NOTIF_DEVICE_PAIRED: ++ case REPORT_TYPE_NOTIF_DEVICE_UNPAIRED: ++ logi_dj_recv_queue_notification(djrcv_dev, dj_report); ++ break; ++ case REPORT_TYPE_NOTIF_CONNECTION_STATUS: ++ if (dj_report->report_params[CONNECTION_STATUS_PARAM_STATUS] == ++ STATUS_LINKLOSS) { ++ logi_dj_recv_forward_null_report(djrcv_dev, dj_report); + } +- report_processed = true; ++ break; ++ default: ++ logi_dj_recv_forward_report(djrcv_dev, dj_report); + } + spin_unlock_irqrestore(&djrcv_dev->lock, flags); + +- return report_processed; ++ return true; + } + + static int logi_dj_probe(struct hid_device *hdev, +diff --git a/drivers/hid/hid-logitech-dj.h b/drivers/hid/hid-logitech-dj.h +index 4a4000340ce1..daeb0aa4bee9 100644 +--- a/drivers/hid/hid-logitech-dj.h ++++ b/drivers/hid/hid-logitech-dj.h +@@ -27,6 +27,7 @@ + + #define DJ_MAX_PAIRED_DEVICES 6 + #define DJ_MAX_NUMBER_NOTIFICATIONS 8 ++#define DJ_RECEIVER_INDEX 0 + #define DJ_DEVICE_INDEX_MIN 1 + #define DJ_DEVICE_INDEX_MAX 6 + +diff --git a/drivers/hid/hid-magicmouse.c b/drivers/hid/hid-magicmouse.c +index 3b43d1cfa936..991ba79cfc72 100644 +--- a/drivers/hid/hid-magicmouse.c ++++ b/drivers/hid/hid-magicmouse.c +@@ -290,6 +290,11 @@ static int magicmouse_raw_event(struct hid_device *hdev, + if (size < 4 || ((size - 4) % 9) != 0) + return 0; + npoints = (size - 4) / 9; ++ if (npoints > 15) { ++ hid_warn(hdev, "invalid size value (%d) for TRACKPAD_REPORT_ID\n", ++ size); ++ return 0; ++ } + msc->ntouches = 0; + for (ii = 0; ii < npoints; ii++) + magicmouse_emit_touch(msc, ii, data + ii * 9 + 4); +@@ -307,6 +312,11 @@ static int magicmouse_raw_event(struct hid_device *hdev, + if (size < 6 || ((size - 6) % 8) != 0) + return 0; + npoints = (size - 6) / 8; ++ if (npoints > 15) { ++ hid_warn(hdev, "invalid size value (%d) for MOUSE_REPORT_ID\n", ++ size); ++ return 0; ++ } + msc->ntouches = 0; + for (ii = 0; ii < npoints; ii++) + magicmouse_emit_touch(msc, ii, data + ii * 8 + 6); +diff --git a/drivers/hid/hid-picolcd_core.c b/drivers/hid/hid-picolcd_core.c +index acbb021065ec..020df3c2e8b4 100644 +--- a/drivers/hid/hid-picolcd_core.c ++++ b/drivers/hid/hid-picolcd_core.c +@@ -350,6 +350,12 @@ static int picolcd_raw_event(struct hid_device *hdev, + if (!data) + return 1; + ++ if (size > 64) { ++ hid_warn(hdev, "invalid size value (%d) for picolcd raw event\n", ++ size); ++ return 0; ++ } ++ + if (report->id == REPORT_KEY_STATE) { + if (data->input_keys) + ret = picolcd_raw_keypad(data, report, raw_data+1, size-1); +diff --git a/drivers/hwmon/ds1621.c b/drivers/hwmon/ds1621.c +index a26ba7a17c2b..298e557f2d88 100644 +--- a/drivers/hwmon/ds1621.c ++++ b/drivers/hwmon/ds1621.c +@@ -311,6 +311,7 @@ static ssize_t set_convrate(struct device *dev, struct device_attribute *da, + data->conf |= (resol << DS1621_REG_CONFIG_RESOL_SHIFT); + i2c_smbus_write_byte_data(client, DS1621_REG_CONF, data->conf); + data->update_interval = ds1721_convrates[resol]; ++ data->zbits = 7 - resol; + mutex_unlock(&data->update_lock); + + return count; +diff --git a/drivers/i2c/busses/i2c-at91.c b/drivers/i2c/busses/i2c-at91.c +index 9d3e846e0137..2f6d43bd0728 100644 +--- a/drivers/i2c/busses/i2c-at91.c ++++ b/drivers/i2c/busses/i2c-at91.c +@@ -101,6 +101,7 @@ struct at91_twi_dev { + unsigned twi_cwgr_reg; + struct at91_twi_pdata *pdata; + bool use_dma; ++ bool recv_len_abort; + struct at91_twi_dma dma; + }; + +@@ -267,12 +268,24 @@ static void at91_twi_read_next_byte(struct at91_twi_dev *dev) + *dev->buf = at91_twi_read(dev, AT91_TWI_RHR) & 0xff; + --dev->buf_len; + ++ /* return if aborting, we only needed to read RHR to clear RXRDY*/ ++ if (dev->recv_len_abort) ++ return; ++ + /* handle I2C_SMBUS_BLOCK_DATA */ + if (unlikely(dev->msg->flags & I2C_M_RECV_LEN)) { +- dev->msg->flags &= ~I2C_M_RECV_LEN; +- dev->buf_len += *dev->buf; +- dev->msg->len = dev->buf_len + 1; +- dev_dbg(dev->dev, "received block length %d\n", dev->buf_len); ++ /* ensure length byte is a valid value */ ++ if (*dev->buf <= I2C_SMBUS_BLOCK_MAX && *dev->buf > 0) { ++ dev->msg->flags &= ~I2C_M_RECV_LEN; ++ dev->buf_len += *dev->buf; ++ dev->msg->len = dev->buf_len + 1; ++ dev_dbg(dev->dev, "received block length %d\n", ++ dev->buf_len); ++ } else { ++ /* abort and send the stop by reading one more byte */ ++ dev->recv_len_abort = true; ++ dev->buf_len = 1; ++ } + } + + /* send stop if second but last byte has been read */ +@@ -421,8 +434,8 @@ static int at91_do_twi_transfer(struct at91_twi_dev *dev) + } + } + +- ret = wait_for_completion_interruptible_timeout(&dev->cmd_complete, +- dev->adapter.timeout); ++ ret = wait_for_completion_io_timeout(&dev->cmd_complete, ++ dev->adapter.timeout); + if (ret == 0) { + dev_err(dev->dev, "controller timed out\n"); + at91_init_twi_bus(dev); +@@ -444,6 +457,12 @@ static int at91_do_twi_transfer(struct at91_twi_dev *dev) + ret = -EIO; + goto error; + } ++ if (dev->recv_len_abort) { ++ dev_err(dev->dev, "invalid smbus block length recvd\n"); ++ ret = -EPROTO; ++ goto error; ++ } ++ + dev_dbg(dev->dev, "transfer complete\n"); + + return 0; +@@ -500,6 +519,7 @@ static int at91_twi_xfer(struct i2c_adapter *adap, struct i2c_msg *msg, int num) + dev->buf_len = m_start->len; + dev->buf = m_start->buf; + dev->msg = m_start; ++ dev->recv_len_abort = false; + + ret = at91_do_twi_transfer(dev); + +diff --git a/drivers/i2c/busses/i2c-ismt.c b/drivers/i2c/busses/i2c-ismt.c +index 1672effbcebb..303972ddd7d6 100644 +--- a/drivers/i2c/busses/i2c-ismt.c ++++ b/drivers/i2c/busses/i2c-ismt.c +@@ -497,7 +497,7 @@ static int ismt_access(struct i2c_adapter *adap, u16 addr, + desc->wr_len_cmd = dma_size; + desc->control |= ISMT_DESC_BLK; + priv->dma_buffer[0] = command; +- memcpy(&priv->dma_buffer[1], &data->block[1], dma_size); ++ memcpy(&priv->dma_buffer[1], &data->block[1], dma_size - 1); + } else { + /* Block Read */ + dev_dbg(dev, "I2C_SMBUS_BLOCK_DATA: READ\n"); +diff --git a/drivers/i2c/busses/i2c-mv64xxx.c b/drivers/i2c/busses/i2c-mv64xxx.c +index 564c1c076ea2..b07d53fb4731 100644 +--- a/drivers/i2c/busses/i2c-mv64xxx.c ++++ b/drivers/i2c/busses/i2c-mv64xxx.c +@@ -748,8 +748,7 @@ mv64xxx_of_config(struct mv64xxx_i2c_data *drv_data, + } + tclk = clk_get_rate(drv_data->clk); + +- rc = of_property_read_u32(np, "clock-frequency", &bus_freq); +- if (rc) ++ if (of_property_read_u32(np, "clock-frequency", &bus_freq)) + bus_freq = 100000; /* 100kHz by default */ + + if (!mv64xxx_find_baud_factors(bus_freq, tclk, +diff --git a/drivers/iio/accel/bma180.c b/drivers/iio/accel/bma180.c +index 60a3bab42263..4edc95c14e3e 100644 +--- a/drivers/iio/accel/bma180.c ++++ b/drivers/iio/accel/bma180.c +@@ -569,7 +569,7 @@ static int bma180_probe(struct i2c_client *client, + trig->ops = &bma180_trigger_ops; + iio_trigger_set_drvdata(trig, indio_dev); + data->trig = trig; +- indio_dev->trig = trig; ++ indio_dev->trig = iio_trigger_get(trig); + + ret = iio_trigger_register(trig); + if (ret) +diff --git a/drivers/iio/accel/hid-sensor-accel-3d.c b/drivers/iio/accel/hid-sensor-accel-3d.c +index 46d22f3fb1a9..7fbe136aeba4 100644 +--- a/drivers/iio/accel/hid-sensor-accel-3d.c ++++ b/drivers/iio/accel/hid-sensor-accel-3d.c +@@ -349,7 +349,7 @@ static int hid_accel_3d_probe(struct platform_device *pdev) + error_iio_unreg: + iio_device_unregister(indio_dev); + error_remove_trigger: +- hid_sensor_remove_trigger(indio_dev); ++ hid_sensor_remove_trigger(&accel_state->common_attributes); + error_unreg_buffer_funcs: + iio_triggered_buffer_cleanup(indio_dev); + error_free_dev_mem: +@@ -362,10 +362,11 @@ static int hid_accel_3d_remove(struct platform_device *pdev) + { + struct hid_sensor_hub_device *hsdev = pdev->dev.platform_data; + struct iio_dev *indio_dev = platform_get_drvdata(pdev); ++ struct accel_3d_state *accel_state = iio_priv(indio_dev); + + sensor_hub_remove_callback(hsdev, HID_USAGE_SENSOR_ACCEL_3D); + iio_device_unregister(indio_dev); +- hid_sensor_remove_trigger(indio_dev); ++ hid_sensor_remove_trigger(&accel_state->common_attributes); + iio_triggered_buffer_cleanup(indio_dev); + kfree(indio_dev->channels); + +diff --git a/drivers/iio/adc/ad_sigma_delta.c b/drivers/iio/adc/ad_sigma_delta.c +index f0d6335ae087..05d2733ef48c 100644 +--- a/drivers/iio/adc/ad_sigma_delta.c ++++ b/drivers/iio/adc/ad_sigma_delta.c +@@ -477,7 +477,7 @@ static int ad_sd_probe_trigger(struct iio_dev *indio_dev) + goto error_free_irq; + + /* select default trigger */ +- indio_dev->trig = sigma_delta->trig; ++ indio_dev->trig = iio_trigger_get(sigma_delta->trig); + + return 0; + +diff --git a/drivers/iio/common/hid-sensors/hid-sensor-trigger.c b/drivers/iio/common/hid-sensors/hid-sensor-trigger.c +index 87419c41b991..4129b6b7ca0b 100644 +--- a/drivers/iio/common/hid-sensors/hid-sensor-trigger.c ++++ b/drivers/iio/common/hid-sensors/hid-sensor-trigger.c +@@ -49,11 +49,10 @@ static int hid_sensor_data_rdy_trigger_set_state(struct iio_trigger *trig, + return 0; + } + +-void hid_sensor_remove_trigger(struct iio_dev *indio_dev) ++void hid_sensor_remove_trigger(struct hid_sensor_common *attrb) + { +- iio_trigger_unregister(indio_dev->trig); +- iio_trigger_free(indio_dev->trig); +- indio_dev->trig = NULL; ++ iio_trigger_unregister(attrb->trigger); ++ iio_trigger_free(attrb->trigger); + } + EXPORT_SYMBOL(hid_sensor_remove_trigger); + +@@ -84,7 +83,8 @@ int hid_sensor_setup_trigger(struct iio_dev *indio_dev, const char *name, + dev_err(&indio_dev->dev, "Trigger Register Failed\n"); + goto error_free_trig; + } +- indio_dev->trig = trig; ++ attrb->trigger = trig; ++ indio_dev->trig = iio_trigger_get(trig); + + return ret; + +diff --git a/drivers/iio/common/hid-sensors/hid-sensor-trigger.h b/drivers/iio/common/hid-sensors/hid-sensor-trigger.h +index 9a8731478eda..ca02f7811aa8 100644 +--- a/drivers/iio/common/hid-sensors/hid-sensor-trigger.h ++++ b/drivers/iio/common/hid-sensors/hid-sensor-trigger.h +@@ -21,6 +21,6 @@ + + int hid_sensor_setup_trigger(struct iio_dev *indio_dev, const char *name, + struct hid_sensor_common *attrb); +-void hid_sensor_remove_trigger(struct iio_dev *indio_dev); ++void hid_sensor_remove_trigger(struct hid_sensor_common *attrb); + + #endif +diff --git a/drivers/iio/common/st_sensors/st_sensors_trigger.c b/drivers/iio/common/st_sensors/st_sensors_trigger.c +index 8fc3a97eb266..8d8ca6f1e16a 100644 +--- a/drivers/iio/common/st_sensors/st_sensors_trigger.c ++++ b/drivers/iio/common/st_sensors/st_sensors_trigger.c +@@ -49,7 +49,7 @@ int st_sensors_allocate_trigger(struct iio_dev *indio_dev, + dev_err(&indio_dev->dev, "failed to register iio trigger.\n"); + goto iio_trigger_register_error; + } +- indio_dev->trig = sdata->trig; ++ indio_dev->trig = iio_trigger_get(sdata->trig); + + return 0; + +diff --git a/drivers/iio/gyro/hid-sensor-gyro-3d.c b/drivers/iio/gyro/hid-sensor-gyro-3d.c +index c688d974d3e3..74bbed7b82d4 100644 +--- a/drivers/iio/gyro/hid-sensor-gyro-3d.c ++++ b/drivers/iio/gyro/hid-sensor-gyro-3d.c +@@ -347,7 +347,7 @@ static int hid_gyro_3d_probe(struct platform_device *pdev) + error_iio_unreg: + iio_device_unregister(indio_dev); + error_remove_trigger: +- hid_sensor_remove_trigger(indio_dev); ++ hid_sensor_remove_trigger(&gyro_state->common_attributes); + error_unreg_buffer_funcs: + iio_triggered_buffer_cleanup(indio_dev); + error_free_dev_mem: +@@ -360,10 +360,11 @@ static int hid_gyro_3d_remove(struct platform_device *pdev) + { + struct hid_sensor_hub_device *hsdev = pdev->dev.platform_data; + struct iio_dev *indio_dev = platform_get_drvdata(pdev); ++ struct gyro_3d_state *gyro_state = iio_priv(indio_dev); + + sensor_hub_remove_callback(hsdev, HID_USAGE_SENSOR_GYRO_3D); + iio_device_unregister(indio_dev); +- hid_sensor_remove_trigger(indio_dev); ++ hid_sensor_remove_trigger(&gyro_state->common_attributes); + iio_triggered_buffer_cleanup(indio_dev); + kfree(indio_dev->channels); + +diff --git a/drivers/iio/gyro/itg3200_buffer.c b/drivers/iio/gyro/itg3200_buffer.c +index 6c43af9bb0a4..14917fae2d9d 100644 +--- a/drivers/iio/gyro/itg3200_buffer.c ++++ b/drivers/iio/gyro/itg3200_buffer.c +@@ -135,7 +135,7 @@ int itg3200_probe_trigger(struct iio_dev *indio_dev) + goto error_free_irq; + + /* select default trigger */ +- indio_dev->trig = st->trig; ++ indio_dev->trig = iio_trigger_get(st->trig); + + return 0; + +diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_trigger.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_trigger.c +index 03b9372c1212..926fccea8de0 100644 +--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_trigger.c ++++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_trigger.c +@@ -135,7 +135,7 @@ int inv_mpu6050_probe_trigger(struct iio_dev *indio_dev) + ret = iio_trigger_register(st->trig); + if (ret) + goto error_free_irq; +- indio_dev->trig = st->trig; ++ indio_dev->trig = iio_trigger_get(st->trig); + + return 0; + +diff --git a/drivers/iio/inkern.c b/drivers/iio/inkern.c +index 1e8e94d4db7d..4fc88e617acf 100644 +--- a/drivers/iio/inkern.c ++++ b/drivers/iio/inkern.c +@@ -178,7 +178,7 @@ static struct iio_channel *of_iio_channel_get_by_name(struct device_node *np, + index = of_property_match_string(np, "io-channel-names", + name); + chan = of_iio_channel_get(np, index); +- if (!IS_ERR(chan)) ++ if (!IS_ERR(chan) || PTR_ERR(chan) == -EPROBE_DEFER) + break; + else if (name && index >= 0) { + pr_err("ERROR: could not get IIO channel %s:%s(%i)\n", +diff --git a/drivers/iio/light/hid-sensor-als.c b/drivers/iio/light/hid-sensor-als.c +index e59d00c3139c..c104bda78c74 100644 +--- a/drivers/iio/light/hid-sensor-als.c ++++ b/drivers/iio/light/hid-sensor-als.c +@@ -313,7 +313,7 @@ static int hid_als_probe(struct platform_device *pdev) + error_iio_unreg: + iio_device_unregister(indio_dev); + error_remove_trigger: +- hid_sensor_remove_trigger(indio_dev); ++ hid_sensor_remove_trigger(&als_state->common_attributes); + error_unreg_buffer_funcs: + iio_triggered_buffer_cleanup(indio_dev); + error_free_dev_mem: +@@ -326,10 +326,11 @@ static int hid_als_remove(struct platform_device *pdev) + { + struct hid_sensor_hub_device *hsdev = pdev->dev.platform_data; + struct iio_dev *indio_dev = platform_get_drvdata(pdev); ++ struct als_state *als_state = iio_priv(indio_dev); + + sensor_hub_remove_callback(hsdev, HID_USAGE_SENSOR_ALS); + iio_device_unregister(indio_dev); +- hid_sensor_remove_trigger(indio_dev); ++ hid_sensor_remove_trigger(&als_state->common_attributes); + iio_triggered_buffer_cleanup(indio_dev); + kfree(indio_dev->channels); + +diff --git a/drivers/iio/magnetometer/hid-sensor-magn-3d.c b/drivers/iio/magnetometer/hid-sensor-magn-3d.c +index a98460b15e4b..ff7b9dabd58d 100644 +--- a/drivers/iio/magnetometer/hid-sensor-magn-3d.c ++++ b/drivers/iio/magnetometer/hid-sensor-magn-3d.c +@@ -350,7 +350,7 @@ static int hid_magn_3d_probe(struct platform_device *pdev) + error_iio_unreg: + iio_device_unregister(indio_dev); + error_remove_trigger: +- hid_sensor_remove_trigger(indio_dev); ++ hid_sensor_remove_trigger(&magn_state->common_attributes); + error_unreg_buffer_funcs: + iio_triggered_buffer_cleanup(indio_dev); + error_free_dev_mem: +@@ -363,10 +363,11 @@ static int hid_magn_3d_remove(struct platform_device *pdev) + { + struct hid_sensor_hub_device *hsdev = pdev->dev.platform_data; + struct iio_dev *indio_dev = platform_get_drvdata(pdev); ++ struct magn_3d_state *magn_state = iio_priv(indio_dev); + + sensor_hub_remove_callback(hsdev, HID_USAGE_SENSOR_COMPASS_3D); + iio_device_unregister(indio_dev); +- hid_sensor_remove_trigger(indio_dev); ++ hid_sensor_remove_trigger(&magn_state->common_attributes); + iio_triggered_buffer_cleanup(indio_dev); + kfree(indio_dev->channels); + +diff --git a/drivers/iio/magnetometer/st_magn_core.c b/drivers/iio/magnetometer/st_magn_core.c +index cab3bc7494a2..5888885dd08b 100644 +--- a/drivers/iio/magnetometer/st_magn_core.c ++++ b/drivers/iio/magnetometer/st_magn_core.c +@@ -42,7 +42,8 @@ + #define ST_MAGN_FS_AVL_5600MG 5600 + #define ST_MAGN_FS_AVL_8000MG 8000 + #define ST_MAGN_FS_AVL_8100MG 8100 +-#define ST_MAGN_FS_AVL_10000MG 10000 ++#define ST_MAGN_FS_AVL_12000MG 12000 ++#define ST_MAGN_FS_AVL_16000MG 16000 + + /* CUSTOM VALUES FOR SENSOR 1 */ + #define ST_MAGN_1_WAI_EXP 0x3c +@@ -69,20 +70,20 @@ + #define ST_MAGN_1_FS_AVL_4700_VAL 0x05 + #define ST_MAGN_1_FS_AVL_5600_VAL 0x06 + #define ST_MAGN_1_FS_AVL_8100_VAL 0x07 +-#define ST_MAGN_1_FS_AVL_1300_GAIN_XY 1100 +-#define ST_MAGN_1_FS_AVL_1900_GAIN_XY 855 +-#define ST_MAGN_1_FS_AVL_2500_GAIN_XY 670 +-#define ST_MAGN_1_FS_AVL_4000_GAIN_XY 450 +-#define ST_MAGN_1_FS_AVL_4700_GAIN_XY 400 +-#define ST_MAGN_1_FS_AVL_5600_GAIN_XY 330 +-#define ST_MAGN_1_FS_AVL_8100_GAIN_XY 230 +-#define ST_MAGN_1_FS_AVL_1300_GAIN_Z 980 +-#define ST_MAGN_1_FS_AVL_1900_GAIN_Z 760 +-#define ST_MAGN_1_FS_AVL_2500_GAIN_Z 600 +-#define ST_MAGN_1_FS_AVL_4000_GAIN_Z 400 +-#define ST_MAGN_1_FS_AVL_4700_GAIN_Z 355 +-#define ST_MAGN_1_FS_AVL_5600_GAIN_Z 295 +-#define ST_MAGN_1_FS_AVL_8100_GAIN_Z 205 ++#define ST_MAGN_1_FS_AVL_1300_GAIN_XY 909 ++#define ST_MAGN_1_FS_AVL_1900_GAIN_XY 1169 ++#define ST_MAGN_1_FS_AVL_2500_GAIN_XY 1492 ++#define ST_MAGN_1_FS_AVL_4000_GAIN_XY 2222 ++#define ST_MAGN_1_FS_AVL_4700_GAIN_XY 2500 ++#define ST_MAGN_1_FS_AVL_5600_GAIN_XY 3030 ++#define ST_MAGN_1_FS_AVL_8100_GAIN_XY 4347 ++#define ST_MAGN_1_FS_AVL_1300_GAIN_Z 1020 ++#define ST_MAGN_1_FS_AVL_1900_GAIN_Z 1315 ++#define ST_MAGN_1_FS_AVL_2500_GAIN_Z 1666 ++#define ST_MAGN_1_FS_AVL_4000_GAIN_Z 2500 ++#define ST_MAGN_1_FS_AVL_4700_GAIN_Z 2816 ++#define ST_MAGN_1_FS_AVL_5600_GAIN_Z 3389 ++#define ST_MAGN_1_FS_AVL_8100_GAIN_Z 4878 + #define ST_MAGN_1_MULTIREAD_BIT false + + /* CUSTOM VALUES FOR SENSOR 2 */ +@@ -105,10 +106,12 @@ + #define ST_MAGN_2_FS_MASK 0x60 + #define ST_MAGN_2_FS_AVL_4000_VAL 0x00 + #define ST_MAGN_2_FS_AVL_8000_VAL 0x01 +-#define ST_MAGN_2_FS_AVL_10000_VAL 0x02 +-#define ST_MAGN_2_FS_AVL_4000_GAIN 430 +-#define ST_MAGN_2_FS_AVL_8000_GAIN 230 +-#define ST_MAGN_2_FS_AVL_10000_GAIN 230 ++#define ST_MAGN_2_FS_AVL_12000_VAL 0x02 ++#define ST_MAGN_2_FS_AVL_16000_VAL 0x03 ++#define ST_MAGN_2_FS_AVL_4000_GAIN 146 ++#define ST_MAGN_2_FS_AVL_8000_GAIN 292 ++#define ST_MAGN_2_FS_AVL_12000_GAIN 438 ++#define ST_MAGN_2_FS_AVL_16000_GAIN 584 + #define ST_MAGN_2_MULTIREAD_BIT false + #define ST_MAGN_2_OUT_X_L_ADDR 0x28 + #define ST_MAGN_2_OUT_Y_L_ADDR 0x2a +@@ -266,9 +269,14 @@ static const struct st_sensors st_magn_sensors[] = { + .gain = ST_MAGN_2_FS_AVL_8000_GAIN, + }, + [2] = { +- .num = ST_MAGN_FS_AVL_10000MG, +- .value = ST_MAGN_2_FS_AVL_10000_VAL, +- .gain = ST_MAGN_2_FS_AVL_10000_GAIN, ++ .num = ST_MAGN_FS_AVL_12000MG, ++ .value = ST_MAGN_2_FS_AVL_12000_VAL, ++ .gain = ST_MAGN_2_FS_AVL_12000_GAIN, ++ }, ++ [3] = { ++ .num = ST_MAGN_FS_AVL_16000MG, ++ .value = ST_MAGN_2_FS_AVL_16000_VAL, ++ .gain = ST_MAGN_2_FS_AVL_16000_GAIN, + }, + }, + }, +diff --git a/drivers/infiniband/hw/qib/qib_debugfs.c b/drivers/infiniband/hw/qib/qib_debugfs.c +index 799a0c3bffc4..6abd3ed3cd51 100644 +--- a/drivers/infiniband/hw/qib/qib_debugfs.c ++++ b/drivers/infiniband/hw/qib/qib_debugfs.c +@@ -193,6 +193,7 @@ static void *_qp_stats_seq_start(struct seq_file *s, loff_t *pos) + struct qib_qp_iter *iter; + loff_t n = *pos; + ++ rcu_read_lock(); + iter = qib_qp_iter_init(s->private); + if (!iter) + return NULL; +@@ -224,7 +225,7 @@ static void *_qp_stats_seq_next(struct seq_file *s, void *iter_ptr, + + static void _qp_stats_seq_stop(struct seq_file *s, void *iter_ptr) + { +- /* nothing for now */ ++ rcu_read_unlock(); + } + + static int _qp_stats_seq_show(struct seq_file *s, void *iter_ptr) +diff --git a/drivers/infiniband/hw/qib/qib_qp.c b/drivers/infiniband/hw/qib/qib_qp.c +index 3cca55b51e54..2c018ba0b0ae 100644 +--- a/drivers/infiniband/hw/qib/qib_qp.c ++++ b/drivers/infiniband/hw/qib/qib_qp.c +@@ -1324,7 +1324,6 @@ int qib_qp_iter_next(struct qib_qp_iter *iter) + struct qib_qp *pqp = iter->qp; + struct qib_qp *qp; + +- rcu_read_lock(); + for (; n < dev->qp_table_size; n++) { + if (pqp) + qp = rcu_dereference(pqp->next); +@@ -1332,18 +1331,11 @@ int qib_qp_iter_next(struct qib_qp_iter *iter) + qp = rcu_dereference(dev->qp_table[n]); + pqp = qp; + if (qp) { +- if (iter->qp) +- atomic_dec(&iter->qp->refcount); +- atomic_inc(&qp->refcount); +- rcu_read_unlock(); + iter->qp = qp; + iter->n = n; + return 0; + } + } +- rcu_read_unlock(); +- if (iter->qp) +- atomic_dec(&iter->qp->refcount); + return ret; + } + +diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c +index 548d86847d18..4c2b42bf6cde 100644 +--- a/drivers/infiniband/ulp/isert/ib_isert.c ++++ b/drivers/infiniband/ulp/isert/ib_isert.c +@@ -515,7 +515,6 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event) + init_completion(&isert_conn->conn_wait); + init_completion(&isert_conn->conn_wait_comp_err); + kref_init(&isert_conn->conn_kref); +- kref_get(&isert_conn->conn_kref); + mutex_init(&isert_conn->conn_mutex); + spin_lock_init(&isert_conn->conn_lock); + INIT_LIST_HEAD(&isert_conn->conn_frwr_pool); +@@ -646,7 +645,9 @@ isert_connect_release(struct isert_conn *isert_conn) + static void + isert_connected_handler(struct rdma_cm_id *cma_id) + { +- return; ++ struct isert_conn *isert_conn = cma_id->context; ++ ++ kref_get(&isert_conn->conn_kref); + } + + static void +@@ -698,7 +699,6 @@ isert_disconnect_work(struct work_struct *work) + + wake_up: + complete(&isert_conn->conn_wait); +- isert_put_conn(isert_conn); + } + + static void +@@ -2708,6 +2708,7 @@ static void isert_wait_conn(struct iscsi_conn *conn) + wait_for_completion(&isert_conn->conn_wait_comp_err); + + wait_for_completion(&isert_conn->conn_wait); ++ isert_put_conn(isert_conn); + } + + static void isert_free_conn(struct iscsi_conn *conn) +diff --git a/drivers/input/keyboard/atkbd.c b/drivers/input/keyboard/atkbd.c +index 2dd1d0dd4f7d..6f5d79569136 100644 +--- a/drivers/input/keyboard/atkbd.c ++++ b/drivers/input/keyboard/atkbd.c +@@ -1791,14 +1791,6 @@ static const struct dmi_system_id atkbd_dmi_quirk_table[] __initconst = { + { + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "LG Electronics"), +- DMI_MATCH(DMI_PRODUCT_NAME, "LW25-B7HV"), +- }, +- .callback = atkbd_deactivate_fixup, +- }, +- { +- .matches = { +- DMI_MATCH(DMI_SYS_VENDOR, "LG Electronics"), +- DMI_MATCH(DMI_PRODUCT_NAME, "P1-J273B"), + }, + .callback = atkbd_deactivate_fixup, + }, +diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c +index 233516aff595..0b75b5764f31 100644 +--- a/drivers/input/mouse/elantech.c ++++ b/drivers/input/mouse/elantech.c +@@ -1253,6 +1253,13 @@ static bool elantech_is_signature_valid(const unsigned char *param) + if (param[1] == 0) + return true; + ++ /* ++ * Some models have a revision higher then 20. Meaning param[2] may ++ * be 10 or 20, skip the rates check for these. ++ */ ++ if (param[0] == 0x46 && (param[1] & 0xef) == 0x0f && param[2] < 40) ++ return true; ++ + for (i = 0; i < ARRAY_SIZE(rates); i++) + if (param[2] == rates[i]) + return false; +diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c +index f6fbba53f5d5..4b7996ebd150 100644 +--- a/drivers/input/mouse/synaptics.c ++++ b/drivers/input/mouse/synaptics.c +@@ -549,10 +549,61 @@ static int synaptics_parse_hw_state(const unsigned char buf[], + ((buf[0] & 0x04) >> 1) | + ((buf[3] & 0x04) >> 2)); + ++ if ((SYN_CAP_ADV_GESTURE(priv->ext_cap_0c) || ++ SYN_CAP_IMAGE_SENSOR(priv->ext_cap_0c)) && ++ hw->w == 2) { ++ synaptics_parse_agm(buf, priv, hw); ++ return 1; ++ } ++ ++ hw->x = (((buf[3] & 0x10) << 8) | ++ ((buf[1] & 0x0f) << 8) | ++ buf[4]); ++ hw->y = (((buf[3] & 0x20) << 7) | ++ ((buf[1] & 0xf0) << 4) | ++ buf[5]); ++ hw->z = buf[2]; ++ + hw->left = (buf[0] & 0x01) ? 1 : 0; + hw->right = (buf[0] & 0x02) ? 1 : 0; + +- if (SYN_CAP_CLICKPAD(priv->ext_cap_0c)) { ++ if (SYN_CAP_FORCEPAD(priv->ext_cap_0c)) { ++ /* ++ * ForcePads, like Clickpads, use middle button ++ * bits to report primary button clicks. ++ * Unfortunately they report primary button not ++ * only when user presses on the pad above certain ++ * threshold, but also when there are more than one ++ * finger on the touchpad, which interferes with ++ * out multi-finger gestures. ++ */ ++ if (hw->z == 0) { ++ /* No contacts */ ++ priv->press = priv->report_press = false; ++ } else if (hw->w >= 4 && ((buf[0] ^ buf[3]) & 0x01)) { ++ /* ++ * Single-finger touch with pressure above ++ * the threshold. If pressure stays long ++ * enough, we'll start reporting primary ++ * button. We rely on the device continuing ++ * sending data even if finger does not ++ * move. ++ */ ++ if (!priv->press) { ++ priv->press_start = jiffies; ++ priv->press = true; ++ } else if (time_after(jiffies, ++ priv->press_start + ++ msecs_to_jiffies(50))) { ++ priv->report_press = true; ++ } ++ } else { ++ priv->press = false; ++ } ++ ++ hw->left = priv->report_press; ++ ++ } else if (SYN_CAP_CLICKPAD(priv->ext_cap_0c)) { + /* + * Clickpad's button is transmitted as middle button, + * however, since it is primary button, we will report +@@ -571,21 +622,6 @@ static int synaptics_parse_hw_state(const unsigned char buf[], + hw->down = ((buf[0] ^ buf[3]) & 0x02) ? 1 : 0; + } + +- if ((SYN_CAP_ADV_GESTURE(priv->ext_cap_0c) || +- SYN_CAP_IMAGE_SENSOR(priv->ext_cap_0c)) && +- hw->w == 2) { +- synaptics_parse_agm(buf, priv, hw); +- return 1; +- } +- +- hw->x = (((buf[3] & 0x10) << 8) | +- ((buf[1] & 0x0f) << 8) | +- buf[4]); +- hw->y = (((buf[3] & 0x20) << 7) | +- ((buf[1] & 0xf0) << 4) | +- buf[5]); +- hw->z = buf[2]; +- + if (SYN_CAP_MULTI_BUTTON_NO(priv->ext_cap) && + ((buf[0] ^ buf[3]) & 0x02)) { + switch (SYN_CAP_MULTI_BUTTON_NO(priv->ext_cap) & ~0x01) { +diff --git a/drivers/input/mouse/synaptics.h b/drivers/input/mouse/synaptics.h +index e594af0b264b..fb2e076738ae 100644 +--- a/drivers/input/mouse/synaptics.h ++++ b/drivers/input/mouse/synaptics.h +@@ -78,6 +78,11 @@ + * 2 0x08 image sensor image sensor tracks 5 fingers, but only + * reports 2. + * 2 0x20 report min query 0x0f gives min coord reported ++ * 2 0x80 forcepad forcepad is a variant of clickpad that ++ * does not have physical buttons but rather ++ * uses pressure above certain threshold to ++ * report primary clicks. Forcepads also have ++ * clickpad bit set. + */ + #define SYN_CAP_CLICKPAD(ex0c) ((ex0c) & 0x100000) /* 1-button ClickPad */ + #define SYN_CAP_CLICKPAD2BTN(ex0c) ((ex0c) & 0x000100) /* 2-button ClickPad */ +@@ -86,6 +91,7 @@ + #define SYN_CAP_ADV_GESTURE(ex0c) ((ex0c) & 0x080000) + #define SYN_CAP_REDUCED_FILTERING(ex0c) ((ex0c) & 0x000400) + #define SYN_CAP_IMAGE_SENSOR(ex0c) ((ex0c) & 0x000800) ++#define SYN_CAP_FORCEPAD(ex0c) ((ex0c) & 0x008000) + + /* synaptics modes query bits */ + #define SYN_MODE_ABSOLUTE(m) ((m) & (1 << 7)) +@@ -177,6 +183,11 @@ struct synaptics_data { + */ + struct synaptics_hw_state agm; + bool agm_pending; /* new AGM packet received */ ++ ++ /* ForcePad handling */ ++ unsigned long press_start; ++ bool press; ++ bool report_press; + }; + + void synaptics_module_init(void); +diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h +index 0522c619acda..5f6d3e1d28a0 100644 +--- a/drivers/input/serio/i8042-x86ia64io.h ++++ b/drivers/input/serio/i8042-x86ia64io.h +@@ -465,6 +465,13 @@ static const struct dmi_system_id __initconst i8042_dmi_nomux_table[] = { + DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion dv4 Notebook PC"), + }, + }, ++ { ++ /* Avatar AVIU-145A6 */ ++ .matches = { ++ DMI_MATCH(DMI_SYS_VENDOR, "Intel"), ++ DMI_MATCH(DMI_PRODUCT_NAME, "IC4I"), ++ }, ++ }, + { } + }; + +@@ -608,6 +615,14 @@ static const struct dmi_system_id __initconst i8042_dmi_notimeout_table[] = { + DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion dv4 Notebook PC"), + }, + }, ++ { ++ /* Fujitsu U574 laptop */ ++ /* https://bugzilla.kernel.org/show_bug.cgi?id=69731 */ ++ .matches = { ++ DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"), ++ DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK U574"), ++ }, ++ }, + { } + }; + +diff --git a/drivers/input/serio/serport.c b/drivers/input/serio/serport.c +index 8755f5f3ad37..e4ecf3b64794 100644 +--- a/drivers/input/serio/serport.c ++++ b/drivers/input/serio/serport.c +@@ -21,6 +21,7 @@ + #include + #include + #include ++#include + + MODULE_AUTHOR("Vojtech Pavlik "); + MODULE_DESCRIPTION("Input device TTY line discipline"); +@@ -196,28 +197,55 @@ static ssize_t serport_ldisc_read(struct tty_struct * tty, struct file * file, u + return 0; + } + ++static void serport_set_type(struct tty_struct *tty, unsigned long type) ++{ ++ struct serport *serport = tty->disc_data; ++ ++ serport->id.proto = type & 0x000000ff; ++ serport->id.id = (type & 0x0000ff00) >> 8; ++ serport->id.extra = (type & 0x00ff0000) >> 16; ++} ++ + /* + * serport_ldisc_ioctl() allows to set the port protocol, and device ID + */ + +-static int serport_ldisc_ioctl(struct tty_struct * tty, struct file * file, unsigned int cmd, unsigned long arg) ++static int serport_ldisc_ioctl(struct tty_struct *tty, struct file *file, ++ unsigned int cmd, unsigned long arg) + { +- struct serport *serport = (struct serport*) tty->disc_data; +- unsigned long type; +- + if (cmd == SPIOCSTYPE) { ++ unsigned long type; ++ + if (get_user(type, (unsigned long __user *) arg)) + return -EFAULT; + +- serport->id.proto = type & 0x000000ff; +- serport->id.id = (type & 0x0000ff00) >> 8; +- serport->id.extra = (type & 0x00ff0000) >> 16; ++ serport_set_type(tty, type); ++ return 0; ++ } ++ ++ return -EINVAL; ++} ++ ++#ifdef CONFIG_COMPAT ++#define COMPAT_SPIOCSTYPE _IOW('q', 0x01, compat_ulong_t) ++static long serport_ldisc_compat_ioctl(struct tty_struct *tty, ++ struct file *file, ++ unsigned int cmd, unsigned long arg) ++{ ++ if (cmd == COMPAT_SPIOCSTYPE) { ++ void __user *uarg = compat_ptr(arg); ++ compat_ulong_t compat_type; ++ ++ if (get_user(compat_type, (compat_ulong_t __user *)uarg)) ++ return -EFAULT; + ++ serport_set_type(tty, compat_type); + return 0; + } + + return -EINVAL; + } ++#endif + + static void serport_ldisc_write_wakeup(struct tty_struct * tty) + { +@@ -241,6 +269,9 @@ static struct tty_ldisc_ops serport_ldisc = { + .close = serport_ldisc_close, + .read = serport_ldisc_read, + .ioctl = serport_ldisc_ioctl, ++#ifdef CONFIG_COMPAT ++ .compat_ioctl = serport_ldisc_compat_ioctl, ++#endif + .receive_buf = serport_ldisc_receive, + .write_wakeup = serport_ldisc_write_wakeup + }; +diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c +index 24a60b9979ca..e26905ca74fa 100644 +--- a/drivers/iommu/arm-smmu.c ++++ b/drivers/iommu/arm-smmu.c +@@ -767,8 +767,11 @@ static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain) + reg |= TTBCR_EAE | + (TTBCR_SH_IS << TTBCR_SH0_SHIFT) | + (TTBCR_RGN_WBWA << TTBCR_ORGN0_SHIFT) | +- (TTBCR_RGN_WBWA << TTBCR_IRGN0_SHIFT) | +- (TTBCR_SL0_LVL_1 << TTBCR_SL0_SHIFT); ++ (TTBCR_RGN_WBWA << TTBCR_IRGN0_SHIFT); ++ ++ if (!stage1) ++ reg |= (TTBCR_SL0_LVL_1 << TTBCR_SL0_SHIFT); ++ + writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR); + + /* MAIR0 (stage-1 only) */ +diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c +index 4c0b921ab5b3..843af26e9050 100644 +--- a/drivers/md/dm-cache-target.c ++++ b/drivers/md/dm-cache-target.c +@@ -786,8 +786,8 @@ static void migration_success_pre_commit(struct dm_cache_migration *mg) + struct cache *cache = mg->cache; + + if (mg->writeback) { +- cell_defer(cache, mg->old_ocell, false); + clear_dirty(cache, mg->old_oblock, mg->cblock); ++ cell_defer(cache, mg->old_ocell, false); + cleanup_migration(mg); + return; + +@@ -839,8 +839,8 @@ static void migration_success_post_commit(struct dm_cache_migration *mg) + cleanup_migration(mg); + + } else { +- cell_defer(cache, mg->new_ocell, true); + clear_dirty(cache, mg->new_oblock, mg->cblock); ++ cell_defer(cache, mg->new_ocell, true); + cleanup_migration(mg); + } + } +diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c +index c513e5e4cde6..0f64dc596bce 100644 +--- a/drivers/md/dm-crypt.c ++++ b/drivers/md/dm-crypt.c +@@ -1506,6 +1506,7 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv) + unsigned int key_size, opt_params; + unsigned long long tmpll; + int ret; ++ size_t iv_size_padding; + struct dm_arg_set as; + const char *opt_string; + char dummy; +@@ -1542,12 +1543,23 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv) + + cc->dmreq_start = sizeof(struct ablkcipher_request); + cc->dmreq_start += crypto_ablkcipher_reqsize(any_tfm(cc)); +- cc->dmreq_start = ALIGN(cc->dmreq_start, crypto_tfm_ctx_alignment()); +- cc->dmreq_start += crypto_ablkcipher_alignmask(any_tfm(cc)) & +- ~(crypto_tfm_ctx_alignment() - 1); ++ cc->dmreq_start = ALIGN(cc->dmreq_start, __alignof__(struct dm_crypt_request)); ++ ++ if (crypto_ablkcipher_alignmask(any_tfm(cc)) < CRYPTO_MINALIGN) { ++ /* Allocate the padding exactly */ ++ iv_size_padding = -(cc->dmreq_start + sizeof(struct dm_crypt_request)) ++ & crypto_ablkcipher_alignmask(any_tfm(cc)); ++ } else { ++ /* ++ * If the cipher requires greater alignment than kmalloc ++ * alignment, we don't know the exact position of the ++ * initialization vector. We must assume worst case. ++ */ ++ iv_size_padding = crypto_ablkcipher_alignmask(any_tfm(cc)); ++ } + + cc->req_pool = mempool_create_kmalloc_pool(MIN_IOS, cc->dmreq_start + +- sizeof(struct dm_crypt_request) + cc->iv_size); ++ sizeof(struct dm_crypt_request) + iv_size_padding + cc->iv_size); + if (!cc->req_pool) { + ti->error = "Cannot allocate crypt request mempool"; + goto bad; +diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c +index 9b582c9444f2..6564eebbdf0e 100644 +--- a/drivers/md/raid1.c ++++ b/drivers/md/raid1.c +@@ -2052,7 +2052,7 @@ static void fix_read_error(struct r1conf *conf, int read_disk, + d--; + rdev = conf->mirrors[d].rdev; + if (rdev && +- test_bit(In_sync, &rdev->flags)) ++ !test_bit(Faulty, &rdev->flags)) + r1_sync_page_io(rdev, sect, s, + conf->tmppage, WRITE); + } +@@ -2064,7 +2064,7 @@ static void fix_read_error(struct r1conf *conf, int read_disk, + d--; + rdev = conf->mirrors[d].rdev; + if (rdev && +- test_bit(In_sync, &rdev->flags)) { ++ !test_bit(Faulty, &rdev->flags)) { + if (r1_sync_page_io(rdev, sect, s, + conf->tmppage, READ)) { + atomic_add(s, &rdev->corrected_errors); +diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c +index 42510e40c23c..582bc3f69a43 100644 +--- a/drivers/md/raid5.c ++++ b/drivers/md/raid5.c +@@ -64,6 +64,10 @@ + #define cpu_to_group(cpu) cpu_to_node(cpu) + #define ANY_GROUP NUMA_NO_NODE + ++static bool devices_handle_discard_safely = false; ++module_param(devices_handle_discard_safely, bool, 0644); ++MODULE_PARM_DESC(devices_handle_discard_safely, ++ "Set to Y if all devices in each array reliably return zeroes on reads from discarded regions"); + static struct workqueue_struct *raid5_wq; + /* + * Stripe cache +@@ -5938,7 +5942,7 @@ static int run(struct mddev *mddev) + mddev->queue->limits.discard_granularity = stripe; + /* + * unaligned part of discard request will be ignored, so can't +- * guarantee discard_zerors_data ++ * guarantee discard_zeroes_data + */ + mddev->queue->limits.discard_zeroes_data = 0; + +@@ -5963,6 +5967,18 @@ static int run(struct mddev *mddev) + !bdev_get_queue(rdev->bdev)-> + limits.discard_zeroes_data) + discard_supported = false; ++ /* Unfortunately, discard_zeroes_data is not currently ++ * a guarantee - just a hint. So we only allow DISCARD ++ * if the sysadmin has confirmed that only safe devices ++ * are in use by setting a module parameter. ++ */ ++ if (!devices_handle_discard_safely) { ++ if (discard_supported) { ++ pr_info("md/raid456: discard support disabled due to uncertainty.\n"); ++ pr_info("Set raid456.devices_handle_discard_safely=Y to override.\n"); ++ } ++ discard_supported = false; ++ } + } + + if (discard_supported && +diff --git a/drivers/media/i2c/adv7604.c b/drivers/media/i2c/adv7604.c +index fbfdd2fc2a36..afbc0d72dd46 100644 +--- a/drivers/media/i2c/adv7604.c ++++ b/drivers/media/i2c/adv7604.c +@@ -1752,7 +1752,7 @@ static int adv7604_log_status(struct v4l2_subdev *sd) + v4l2_info(sd, "HDCP keys read: %s%s\n", + (hdmi_read(sd, 0x04) & 0x20) ? "yes" : "no", + (hdmi_read(sd, 0x04) & 0x10) ? "ERROR" : ""); +- if (!is_hdmi(sd)) { ++ if (is_hdmi(sd)) { + bool audio_pll_locked = hdmi_read(sd, 0x04) & 0x01; + bool audio_sample_packet_detect = hdmi_read(sd, 0x18) & 0x01; + bool audio_mute = io_read(sd, 0x65) & 0x40; +diff --git a/drivers/media/pci/cx18/cx18-driver.c b/drivers/media/pci/cx18/cx18-driver.c +index 91c694ba42f4..1aa7ecd1fd81 100644 +--- a/drivers/media/pci/cx18/cx18-driver.c ++++ b/drivers/media/pci/cx18/cx18-driver.c +@@ -1092,6 +1092,7 @@ static int cx18_probe(struct pci_dev *pci_dev, + setup.addr = ADDR_UNSET; + setup.type = cx->options.tuner; + setup.mode_mask = T_ANALOG_TV; /* matches TV tuners */ ++ setup.config = NULL; + if (cx->options.radio > 0) + setup.mode_mask |= T_RADIO; + setup.tuner_callback = (setup.type == TUNER_XC2028) ? +diff --git a/drivers/media/v4l2-core/videobuf2-core.c b/drivers/media/v4l2-core/videobuf2-core.c +index de0e87f0b2c3..c96bf9465baf 100644 +--- a/drivers/media/v4l2-core/videobuf2-core.c ++++ b/drivers/media/v4l2-core/videobuf2-core.c +@@ -703,6 +703,7 @@ static int __reqbufs(struct vb2_queue *q, struct v4l2_requestbuffers *req) + * to the userspace. + */ + req->count = allocated_buffers; ++ q->waiting_for_buffers = !V4L2_TYPE_IS_OUTPUT(q->type); + + return 0; + } +@@ -751,6 +752,7 @@ static int __create_bufs(struct vb2_queue *q, struct v4l2_create_buffers *create + memset(q->plane_sizes, 0, sizeof(q->plane_sizes)); + memset(q->alloc_ctx, 0, sizeof(q->alloc_ctx)); + q->memory = create->memory; ++ q->waiting_for_buffers = !V4L2_TYPE_IS_OUTPUT(q->type); + } + + num_buffers = min(create->count, VIDEO_MAX_FRAME - q->num_buffers); +@@ -1371,6 +1373,7 @@ static int __vb2_qbuf(struct vb2_queue *q, struct v4l2_buffer *b, + * dequeued in dqbuf. + */ + list_add_tail(&vb->queued_entry, &q->queued_list); ++ q->waiting_for_buffers = false; + vb->state = VB2_BUF_STATE_QUEUED; + + /* +@@ -1755,6 +1758,7 @@ int vb2_streamoff(struct vb2_queue *q, enum v4l2_buf_type type) + * and videobuf, effectively returning control over them to userspace. + */ + __vb2_queue_cancel(q); ++ q->waiting_for_buffers = !V4L2_TYPE_IS_OUTPUT(q->type); + + dprintk(3, "Streamoff successful\n"); + return 0; +@@ -2040,9 +2044,16 @@ unsigned int vb2_poll(struct vb2_queue *q, struct file *file, poll_table *wait) + } + + /* +- * There is nothing to wait for if no buffers have already been queued. ++ * There is nothing to wait for if the queue isn't streaming. + */ +- if (list_empty(&q->queued_list)) ++ if (!vb2_is_streaming(q)) ++ return res | POLLERR; ++ /* ++ * For compatibility with vb1: if QBUF hasn't been called yet, then ++ * return POLLERR as well. This only affects capture queues, output ++ * queues will always initialize waiting_for_buffers to false. ++ */ ++ if (q->waiting_for_buffers) + return res | POLLERR; + + if (list_empty(&q->done_list)) +diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c +index f74a76d8b7ec..18f0d772e544 100644 +--- a/drivers/net/ethernet/broadcom/tg3.c ++++ b/drivers/net/ethernet/broadcom/tg3.c +@@ -6898,7 +6898,8 @@ static int tg3_rx(struct tg3_napi *tnapi, int budget) + skb->protocol = eth_type_trans(skb, tp->dev); + + if (len > (tp->dev->mtu + ETH_HLEN) && +- skb->protocol != htons(ETH_P_8021Q)) { ++ skb->protocol != htons(ETH_P_8021Q) && ++ skb->protocol != htons(ETH_P_8021AD)) { + dev_kfree_skb(skb); + goto drop_it_no_recycle; + } +@@ -7890,8 +7891,6 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev) + + entry = tnapi->tx_prod; + base_flags = 0; +- if (skb->ip_summed == CHECKSUM_PARTIAL) +- base_flags |= TXD_FLAG_TCPUDP_CSUM; + + mss = skb_shinfo(skb)->gso_size; + if (mss) { +@@ -7907,6 +7906,13 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev) + + hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb) - ETH_HLEN; + ++ /* HW/FW can not correctly segment packets that have been ++ * vlan encapsulated. ++ */ ++ if (skb->protocol == htons(ETH_P_8021Q) || ++ skb->protocol == htons(ETH_P_8021AD)) ++ return tg3_tso_bug(tp, skb); ++ + if (!skb_is_gso_v6(skb)) { + iph->check = 0; + iph->tot_len = htons(mss + hdr_len); +@@ -7953,6 +7959,17 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev) + base_flags |= tsflags << 12; + } + } ++ } else if (skb->ip_summed == CHECKSUM_PARTIAL) { ++ /* HW/FW can not correctly checksum packets that have been ++ * vlan encapsulated. ++ */ ++ if (skb->protocol == htons(ETH_P_8021Q) || ++ skb->protocol == htons(ETH_P_8021AD)) { ++ if (skb_checksum_help(skb)) ++ goto drop; ++ } else { ++ base_flags |= TXD_FLAG_TCPUDP_CSUM; ++ } + } + + if (tg3_flag(tp, USE_JUMBO_BDFLAG) && +diff --git a/drivers/net/ethernet/cadence/macb.c b/drivers/net/ethernet/cadence/macb.c +index 92578690f6de..b020d1cb93c2 100644 +--- a/drivers/net/ethernet/cadence/macb.c ++++ b/drivers/net/ethernet/cadence/macb.c +@@ -29,7 +29,6 @@ + #include + #include + #include +-#include + + #include "macb.h" + +@@ -1755,7 +1754,6 @@ static int __init macb_probe(struct platform_device *pdev) + struct phy_device *phydev; + u32 config; + int err = -ENXIO; +- struct pinctrl *pinctrl; + const char *mac; + + regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); +@@ -1764,15 +1762,6 @@ static int __init macb_probe(struct platform_device *pdev) + goto err_out; + } + +- pinctrl = devm_pinctrl_get_select_default(&pdev->dev); +- if (IS_ERR(pinctrl)) { +- err = PTR_ERR(pinctrl); +- if (err == -EPROBE_DEFER) +- goto err_out; +- +- dev_warn(&pdev->dev, "No pinctrl provided\n"); +- } +- + err = -ENOMEM; + dev = alloc_etherdev(sizeof(*bp)); + if (!dev) +diff --git a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c +index 149355b52ad0..c155b9263a71 100644 +--- a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c ++++ b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c +@@ -872,6 +872,10 @@ static int myri10ge_dma_test(struct myri10ge_priv *mgp, int test_type) + return -ENOMEM; + dmatest_bus = pci_map_page(mgp->pdev, dmatest_page, 0, PAGE_SIZE, + DMA_BIDIRECTIONAL); ++ if (unlikely(pci_dma_mapping_error(mgp->pdev, dmatest_bus))) { ++ __free_page(dmatest_page); ++ return -ENOMEM; ++ } + + /* Run a small DMA test. + * The magic multipliers to the length tell the firmware +@@ -1293,6 +1297,7 @@ myri10ge_alloc_rx_pages(struct myri10ge_priv *mgp, struct myri10ge_rx_buf *rx, + int bytes, int watchdog) + { + struct page *page; ++ dma_addr_t bus; + int idx; + #if MYRI10GE_ALLOC_SIZE > 4096 + int end_offset; +@@ -1317,11 +1322,21 @@ myri10ge_alloc_rx_pages(struct myri10ge_priv *mgp, struct myri10ge_rx_buf *rx, + rx->watchdog_needed = 1; + return; + } ++ ++ bus = pci_map_page(mgp->pdev, page, 0, ++ MYRI10GE_ALLOC_SIZE, ++ PCI_DMA_FROMDEVICE); ++ if (unlikely(pci_dma_mapping_error(mgp->pdev, bus))) { ++ __free_pages(page, MYRI10GE_ALLOC_ORDER); ++ if (rx->fill_cnt - rx->cnt < 16) ++ rx->watchdog_needed = 1; ++ return; ++ } ++ + rx->page = page; + rx->page_offset = 0; +- rx->bus = pci_map_page(mgp->pdev, page, 0, +- MYRI10GE_ALLOC_SIZE, +- PCI_DMA_FROMDEVICE); ++ rx->bus = bus; ++ + } + rx->info[idx].page = rx->page; + rx->info[idx].page_offset = rx->page_offset; +@@ -2765,6 +2780,35 @@ myri10ge_submit_req(struct myri10ge_tx_buf *tx, struct mcp_kreq_ether_send *src, + mb(); + } + ++static void myri10ge_unmap_tx_dma(struct myri10ge_priv *mgp, ++ struct myri10ge_tx_buf *tx, int idx) ++{ ++ unsigned int len; ++ int last_idx; ++ ++ /* Free any DMA resources we've alloced and clear out the skb slot */ ++ last_idx = (idx + 1) & tx->mask; ++ idx = tx->req & tx->mask; ++ do { ++ len = dma_unmap_len(&tx->info[idx], len); ++ if (len) { ++ if (tx->info[idx].skb != NULL) ++ pci_unmap_single(mgp->pdev, ++ dma_unmap_addr(&tx->info[idx], ++ bus), len, ++ PCI_DMA_TODEVICE); ++ else ++ pci_unmap_page(mgp->pdev, ++ dma_unmap_addr(&tx->info[idx], ++ bus), len, ++ PCI_DMA_TODEVICE); ++ dma_unmap_len_set(&tx->info[idx], len, 0); ++ tx->info[idx].skb = NULL; ++ } ++ idx = (idx + 1) & tx->mask; ++ } while (idx != last_idx); ++} ++ + /* + * Transmit a packet. We need to split the packet so that a single + * segment does not cross myri10ge->tx_boundary, so this makes segment +@@ -2788,7 +2832,7 @@ static netdev_tx_t myri10ge_xmit(struct sk_buff *skb, + u32 low; + __be32 high_swapped; + unsigned int len; +- int idx, last_idx, avail, frag_cnt, frag_idx, count, mss, max_segments; ++ int idx, avail, frag_cnt, frag_idx, count, mss, max_segments; + u16 pseudo_hdr_offset, cksum_offset, queue; + int cum_len, seglen, boundary, rdma_count; + u8 flags, odd_flag; +@@ -2885,9 +2929,12 @@ again: + + /* map the skb for DMA */ + len = skb_headlen(skb); ++ bus = pci_map_single(mgp->pdev, skb->data, len, PCI_DMA_TODEVICE); ++ if (unlikely(pci_dma_mapping_error(mgp->pdev, bus))) ++ goto drop; ++ + idx = tx->req & tx->mask; + tx->info[idx].skb = skb; +- bus = pci_map_single(mgp->pdev, skb->data, len, PCI_DMA_TODEVICE); + dma_unmap_addr_set(&tx->info[idx], bus, bus); + dma_unmap_len_set(&tx->info[idx], len, len); + +@@ -2986,12 +3033,16 @@ again: + break; + + /* map next fragment for DMA */ +- idx = (count + tx->req) & tx->mask; + frag = &skb_shinfo(skb)->frags[frag_idx]; + frag_idx++; + len = skb_frag_size(frag); + bus = skb_frag_dma_map(&mgp->pdev->dev, frag, 0, len, + DMA_TO_DEVICE); ++ if (unlikely(pci_dma_mapping_error(mgp->pdev, bus))) { ++ myri10ge_unmap_tx_dma(mgp, tx, idx); ++ goto drop; ++ } ++ idx = (count + tx->req) & tx->mask; + dma_unmap_addr_set(&tx->info[idx], bus, bus); + dma_unmap_len_set(&tx->info[idx], len, len); + } +@@ -3022,31 +3073,8 @@ again: + return NETDEV_TX_OK; + + abort_linearize: +- /* Free any DMA resources we've alloced and clear out the skb +- * slot so as to not trip up assertions, and to avoid a +- * double-free if linearizing fails */ ++ myri10ge_unmap_tx_dma(mgp, tx, idx); + +- last_idx = (idx + 1) & tx->mask; +- idx = tx->req & tx->mask; +- tx->info[idx].skb = NULL; +- do { +- len = dma_unmap_len(&tx->info[idx], len); +- if (len) { +- if (tx->info[idx].skb != NULL) +- pci_unmap_single(mgp->pdev, +- dma_unmap_addr(&tx->info[idx], +- bus), len, +- PCI_DMA_TODEVICE); +- else +- pci_unmap_page(mgp->pdev, +- dma_unmap_addr(&tx->info[idx], +- bus), len, +- PCI_DMA_TODEVICE); +- dma_unmap_len_set(&tx->info[idx], len, 0); +- tx->info[idx].skb = NULL; +- } +- idx = (idx + 1) & tx->mask; +- } while (idx != last_idx); + if (skb_is_gso(skb)) { + netdev_err(mgp->dev, "TSO but wanted to linearize?!?!?\n"); + goto drop; +diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c +index f8135725bcf6..616b4e1dd44c 100644 +--- a/drivers/net/hyperv/netvsc_drv.c ++++ b/drivers/net/hyperv/netvsc_drv.c +@@ -138,6 +138,7 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net) + struct hv_netvsc_packet *packet; + int ret; + unsigned int i, num_pages, npg_data; ++ u32 skb_length = skb->len; + + /* Add multipages for skb->data and additional 2 for RNDIS */ + npg_data = (((unsigned long)skb->data + skb_headlen(skb) - 1) +@@ -208,7 +209,7 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net) + ret = rndis_filter_send(net_device_ctx->device_ctx, + packet); + if (ret == 0) { +- net->stats.tx_bytes += skb->len; ++ net->stats.tx_bytes += skb_length; + net->stats.tx_packets++; + } else { + kfree(packet); +diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c +index d0f165f2877b..052d1832d3fb 100644 +--- a/drivers/net/macvtap.c ++++ b/drivers/net/macvtap.c +@@ -108,17 +108,15 @@ out: + return err; + } + ++/* Requires RTNL */ + static int macvtap_set_queue(struct net_device *dev, struct file *file, + struct macvtap_queue *q) + { + struct macvlan_dev *vlan = netdev_priv(dev); +- int err = -EBUSY; + +- rtnl_lock(); + if (vlan->numqueues == MAX_MACVTAP_QUEUES) +- goto out; ++ return -EBUSY; + +- err = 0; + rcu_assign_pointer(q->vlan, vlan); + rcu_assign_pointer(vlan->taps[vlan->numvtaps], q); + sock_hold(&q->sk); +@@ -132,9 +130,7 @@ static int macvtap_set_queue(struct net_device *dev, struct file *file, + vlan->numvtaps++; + vlan->numqueues++; + +-out: +- rtnl_unlock(); +- return err; ++ return 0; + } + + static int macvtap_disable_queue(struct macvtap_queue *q) +@@ -450,11 +446,12 @@ static void macvtap_sock_destruct(struct sock *sk) + static int macvtap_open(struct inode *inode, struct file *file) + { + struct net *net = current->nsproxy->net_ns; +- struct net_device *dev = dev_get_by_macvtap_minor(iminor(inode)); ++ struct net_device *dev; + struct macvtap_queue *q; +- int err; ++ int err = -ENODEV; + +- err = -ENODEV; ++ rtnl_lock(); ++ dev = dev_get_by_macvtap_minor(iminor(inode)); + if (!dev) + goto out; + +@@ -494,6 +491,7 @@ out: + if (dev) + dev_put(dev); + ++ rtnl_unlock(); + return err; + } + +diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c +index 5c245d1fc79c..75864dfaa1c2 100644 +--- a/drivers/net/team/team.c ++++ b/drivers/net/team/team.c +@@ -647,7 +647,7 @@ static void team_notify_peers(struct team *team) + { + if (!team->notify_peers.count || !netif_running(team->dev)) + return; +- atomic_set(&team->notify_peers.count_pending, team->notify_peers.count); ++ atomic_add(team->notify_peers.count, &team->notify_peers.count_pending); + schedule_delayed_work(&team->notify_peers.dw, 0); + } + +@@ -687,7 +687,7 @@ static void team_mcast_rejoin(struct team *team) + { + if (!team->mcast_rejoin.count || !netif_running(team->dev)) + return; +- atomic_set(&team->mcast_rejoin.count_pending, team->mcast_rejoin.count); ++ atomic_add(team->mcast_rejoin.count, &team->mcast_rejoin.count_pending); + schedule_delayed_work(&team->mcast_rejoin.dw, 0); + } + +diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c +index aa2590a33754..f1735fb61362 100644 +--- a/drivers/net/vxlan.c ++++ b/drivers/net/vxlan.c +@@ -1228,7 +1228,7 @@ static int arp_reduce(struct net_device *dev, struct sk_buff *skb) + } else if (vxlan->flags & VXLAN_F_L3MISS) { + union vxlan_addr ipa = { + .sin.sin_addr.s_addr = tip, +- .sa.sa_family = AF_INET, ++ .sin.sin_family = AF_INET, + }; + + vxlan_ip_miss(dev, &ipa); +@@ -1389,7 +1389,7 @@ static int neigh_reduce(struct net_device *dev, struct sk_buff *skb) + } else if (vxlan->flags & VXLAN_F_L3MISS) { + union vxlan_addr ipa = { + .sin6.sin6_addr = msg->target, +- .sa.sa_family = AF_INET6, ++ .sin6.sin6_family = AF_INET6, + }; + + vxlan_ip_miss(dev, &ipa); +@@ -1422,7 +1422,7 @@ static bool route_shortcircuit(struct net_device *dev, struct sk_buff *skb) + if (!n && (vxlan->flags & VXLAN_F_L3MISS)) { + union vxlan_addr ipa = { + .sin.sin_addr.s_addr = pip->daddr, +- .sa.sa_family = AF_INET, ++ .sin.sin_family = AF_INET, + }; + + vxlan_ip_miss(dev, &ipa); +@@ -1443,7 +1443,7 @@ static bool route_shortcircuit(struct net_device *dev, struct sk_buff *skb) + if (!n && (vxlan->flags & VXLAN_F_L3MISS)) { + union vxlan_addr ipa = { + .sin6.sin6_addr = pip6->daddr, +- .sa.sa_family = AF_INET6, ++ .sin6.sin6_family = AF_INET6, + }; + + vxlan_ip_miss(dev, &ipa); +diff --git a/drivers/net/wireless/iwlwifi/dvm/rxon.c b/drivers/net/wireless/iwlwifi/dvm/rxon.c +index 6a5b7593ea42..d7ce2f12a907 100644 +--- a/drivers/net/wireless/iwlwifi/dvm/rxon.c ++++ b/drivers/net/wireless/iwlwifi/dvm/rxon.c +@@ -1068,6 +1068,13 @@ int iwlagn_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx) + /* recalculate basic rates */ + iwl_calc_basic_rates(priv, ctx); + ++ /* ++ * force CTS-to-self frames protection if RTS-CTS is not preferred ++ * one aggregation protection method ++ */ ++ if (!priv->hw_params.use_rts_for_aggregation) ++ ctx->staging.flags |= RXON_FLG_SELF_CTS_EN; ++ + if ((ctx->vif && ctx->vif->bss_conf.use_short_slot) || + !(ctx->staging.flags & RXON_FLG_BAND_24G_MSK)) + ctx->staging.flags |= RXON_FLG_SHORT_SLOT_MSK; +@@ -1473,6 +1480,11 @@ void iwlagn_bss_info_changed(struct ieee80211_hw *hw, + else + ctx->staging.flags &= ~RXON_FLG_TGG_PROTECT_MSK; + ++ if (bss_conf->use_cts_prot) ++ ctx->staging.flags |= RXON_FLG_SELF_CTS_EN; ++ else ++ ctx->staging.flags &= ~RXON_FLG_SELF_CTS_EN; ++ + memcpy(ctx->staging.bssid_addr, bss_conf->bssid, ETH_ALEN); + + if (vif->type == NL80211_IFTYPE_AP || +diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c +index 8188dcb512f0..e7a2af3ad05a 100644 +--- a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c ++++ b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c +@@ -316,6 +316,7 @@ static struct usb_device_id rtl8192c_usb_ids[] = { + {RTL_USB_DEVICE(0x0bda, 0x5088, rtl92cu_hal_cfg)}, /*Thinkware-CC&C*/ + {RTL_USB_DEVICE(0x0df6, 0x0052, rtl92cu_hal_cfg)}, /*Sitecom - Edimax*/ + {RTL_USB_DEVICE(0x0df6, 0x005c, rtl92cu_hal_cfg)}, /*Sitecom - Edimax*/ ++ {RTL_USB_DEVICE(0x0df6, 0x0070, rtl92cu_hal_cfg)}, /*Sitecom - 150N */ + {RTL_USB_DEVICE(0x0df6, 0x0077, rtl92cu_hal_cfg)}, /*Sitecom-WLA2100V2*/ + {RTL_USB_DEVICE(0x0eb0, 0x9071, rtl92cu_hal_cfg)}, /*NO Brand - Etop*/ + {RTL_USB_DEVICE(0x4856, 0x0091, rtl92cu_hal_cfg)}, /*NetweeN - Feixun*/ +diff --git a/drivers/nfc/microread/microread.c b/drivers/nfc/microread/microread.c +index cdb9f6de132a..562fa6b0cc62 100644 +--- a/drivers/nfc/microread/microread.c ++++ b/drivers/nfc/microread/microread.c +@@ -501,9 +501,13 @@ static void microread_target_discovered(struct nfc_hci_dev *hdev, u8 gate, + targets->sens_res = + be16_to_cpu(*(u16 *)&skb->data[MICROREAD_EMCF_A_ATQA]); + targets->sel_res = skb->data[MICROREAD_EMCF_A_SAK]; +- memcpy(targets->nfcid1, &skb->data[MICROREAD_EMCF_A_UID], +- skb->data[MICROREAD_EMCF_A_LEN]); + targets->nfcid1_len = skb->data[MICROREAD_EMCF_A_LEN]; ++ if (targets->nfcid1_len > sizeof(targets->nfcid1)) { ++ r = -EINVAL; ++ goto exit_free; ++ } ++ memcpy(targets->nfcid1, &skb->data[MICROREAD_EMCF_A_UID], ++ targets->nfcid1_len); + break; + case MICROREAD_GATE_ID_MREAD_ISO_A_3: + targets->supported_protocols = +@@ -511,9 +515,13 @@ static void microread_target_discovered(struct nfc_hci_dev *hdev, u8 gate, + targets->sens_res = + be16_to_cpu(*(u16 *)&skb->data[MICROREAD_EMCF_A3_ATQA]); + targets->sel_res = skb->data[MICROREAD_EMCF_A3_SAK]; +- memcpy(targets->nfcid1, &skb->data[MICROREAD_EMCF_A3_UID], +- skb->data[MICROREAD_EMCF_A3_LEN]); + targets->nfcid1_len = skb->data[MICROREAD_EMCF_A3_LEN]; ++ if (targets->nfcid1_len > sizeof(targets->nfcid1)) { ++ r = -EINVAL; ++ goto exit_free; ++ } ++ memcpy(targets->nfcid1, &skb->data[MICROREAD_EMCF_A3_UID], ++ targets->nfcid1_len); + break; + case MICROREAD_GATE_ID_MREAD_ISO_B: + targets->supported_protocols = NFC_PROTO_ISO14443_B_MASK; +diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c +index e3995612ea76..b69b23340a1e 100644 +--- a/drivers/scsi/libiscsi.c ++++ b/drivers/scsi/libiscsi.c +@@ -717,11 +717,21 @@ __iscsi_conn_send_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr, + return NULL; + } + ++ if (data_size > ISCSI_DEF_MAX_RECV_SEG_LEN) { ++ iscsi_conn_printk(KERN_ERR, conn, "Invalid buffer len of %u for login task. Max len is %u\n", data_size, ISCSI_DEF_MAX_RECV_SEG_LEN); ++ return NULL; ++ } ++ + task = conn->login_task; + } else { + if (session->state != ISCSI_STATE_LOGGED_IN) + return NULL; + ++ if (data_size != 0) { ++ iscsi_conn_printk(KERN_ERR, conn, "Can not send data buffer of len %u for op 0x%x\n", data_size, opcode); ++ return NULL; ++ } ++ + BUG_ON(conn->c_stage == ISCSI_CONN_INITIAL_STAGE); + BUG_ON(conn->c_stage == ISCSI_CONN_STOPPED); + +diff --git a/drivers/spi/spi-omap2-mcspi.c b/drivers/spi/spi-omap2-mcspi.c +index ed4af4708d9a..5f19cc975b27 100644 +--- a/drivers/spi/spi-omap2-mcspi.c ++++ b/drivers/spi/spi-omap2-mcspi.c +@@ -314,7 +314,8 @@ static void omap2_mcspi_set_fifo(const struct spi_device *spi, + disable_fifo: + if (t->rx_buf != NULL) + chconf &= ~OMAP2_MCSPI_CHCONF_FFER; +- else ++ ++ if (t->tx_buf != NULL) + chconf &= ~OMAP2_MCSPI_CHCONF_FFET; + + mcspi_write_chconf0(spi, chconf); +diff --git a/drivers/staging/iio/meter/ade7758_trigger.c b/drivers/staging/iio/meter/ade7758_trigger.c +index 7a94ddd42f59..8c4f2896cd0d 100644 +--- a/drivers/staging/iio/meter/ade7758_trigger.c ++++ b/drivers/staging/iio/meter/ade7758_trigger.c +@@ -85,7 +85,7 @@ int ade7758_probe_trigger(struct iio_dev *indio_dev) + ret = iio_trigger_register(st->trig); + + /* select default trigger */ +- indio_dev->trig = st->trig; ++ indio_dev->trig = iio_trigger_get(st->trig); + if (ret) + goto error_free_irq; + +diff --git a/drivers/staging/lustre/lustre/Kconfig b/drivers/staging/lustre/lustre/Kconfig +index 2156a44d0740..3e0e607c1f29 100644 +--- a/drivers/staging/lustre/lustre/Kconfig ++++ b/drivers/staging/lustre/lustre/Kconfig +@@ -57,4 +57,5 @@ config LUSTRE_TRANSLATE_ERRNOS + config LUSTRE_LLITE_LLOOP + bool "Lustre virtual block device" + depends on LUSTRE_FS && BLOCK ++ depends on !PPC_64K_PAGES && !ARM64_64K_PAGES + default m +diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c +index d2ff40680208..c60277e86e4b 100644 +--- a/drivers/target/iscsi/iscsi_target.c ++++ b/drivers/target/iscsi/iscsi_target.c +@@ -4511,6 +4511,7 @@ static void iscsit_logout_post_handler_diffcid( + { + struct iscsi_conn *l_conn; + struct iscsi_session *sess = conn->sess; ++ bool conn_found = false; + + if (!sess) + return; +@@ -4519,12 +4520,13 @@ static void iscsit_logout_post_handler_diffcid( + list_for_each_entry(l_conn, &sess->sess_conn_list, conn_list) { + if (l_conn->cid == cid) { + iscsit_inc_conn_usage_count(l_conn); ++ conn_found = true; + break; + } + } + spin_unlock_bh(&sess->conn_lock); + +- if (!l_conn) ++ if (!conn_found) + return; + + if (l_conn->sock) +diff --git a/drivers/target/iscsi/iscsi_target_parameters.c b/drivers/target/iscsi/iscsi_target_parameters.c +index 4d2e23fc76fd..43b7e6a616b8 100644 +--- a/drivers/target/iscsi/iscsi_target_parameters.c ++++ b/drivers/target/iscsi/iscsi_target_parameters.c +@@ -601,7 +601,7 @@ int iscsi_copy_param_list( + param_list = kzalloc(sizeof(struct iscsi_param_list), GFP_KERNEL); + if (!param_list) { + pr_err("Unable to allocate memory for struct iscsi_param_list.\n"); +- goto err_out; ++ return -1; + } + INIT_LIST_HEAD(¶m_list->param_list); + INIT_LIST_HEAD(¶m_list->extra_response_list); +diff --git a/drivers/tty/serial/8250/8250_dma.c b/drivers/tty/serial/8250/8250_dma.c +index ab9096dc3849..148ffe4c232f 100644 +--- a/drivers/tty/serial/8250/8250_dma.c ++++ b/drivers/tty/serial/8250/8250_dma.c +@@ -192,21 +192,28 @@ int serial8250_request_dma(struct uart_8250_port *p) + + dma->rx_buf = dma_alloc_coherent(dma->rxchan->device->dev, dma->rx_size, + &dma->rx_addr, GFP_KERNEL); +- if (!dma->rx_buf) { +- dma_release_channel(dma->rxchan); +- dma_release_channel(dma->txchan); +- return -ENOMEM; +- } ++ if (!dma->rx_buf) ++ goto err; + + /* TX buffer */ + dma->tx_addr = dma_map_single(dma->txchan->device->dev, + p->port.state->xmit.buf, + UART_XMIT_SIZE, + DMA_TO_DEVICE); ++ if (dma_mapping_error(dma->txchan->device->dev, dma->tx_addr)) { ++ dma_free_coherent(dma->rxchan->device->dev, dma->rx_size, ++ dma->rx_buf, dma->rx_addr); ++ goto err; ++ } + + dev_dbg_ratelimited(p->port.dev, "got both dma channels\n"); + + return 0; ++err: ++ dma_release_channel(dma->rxchan); ++ dma_release_channel(dma->txchan); ++ ++ return -ENOMEM; + } + EXPORT_SYMBOL_GPL(serial8250_request_dma); + +diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c +index f5df8b7067ad..6d402cf84cf1 100644 +--- a/drivers/tty/serial/8250/8250_pci.c ++++ b/drivers/tty/serial/8250/8250_pci.c +@@ -1567,6 +1567,7 @@ pci_wch_ch353_setup(struct serial_private *priv, + #define PCI_DEVICE_ID_COMMTECH_4222PCIE 0x0022 + #define PCI_DEVICE_ID_BROADCOM_TRUMANAGE 0x160a + #define PCI_DEVICE_ID_AMCC_ADDIDATA_APCI7800 0x818e ++#define PCI_DEVICE_ID_INTEL_QRK_UART 0x0936 + + #define PCI_VENDOR_ID_SUNIX 0x1fd4 + #define PCI_DEVICE_ID_SUNIX_1999 0x1999 +@@ -1875,6 +1876,13 @@ static struct pci_serial_quirk pci_serial_quirks[] __refdata = { + .setup = sbs_setup, + .exit = sbs_exit, + }, ++ { ++ .vendor = PCI_VENDOR_ID_INTEL, ++ .device = PCI_DEVICE_ID_INTEL_QRK_UART, ++ .subvendor = PCI_ANY_ID, ++ .subdevice = PCI_ANY_ID, ++ .setup = pci_default_setup, ++ }, + /* + * SBS Technologies, Inc., PMC-OCTALPRO 422 + */ +@@ -2450,6 +2458,7 @@ enum pci_board_num_t { + pbn_ADDIDATA_PCIe_4_3906250, + pbn_ADDIDATA_PCIe_8_3906250, + pbn_ce4100_1_115200, ++ pbn_qrk, + pbn_omegapci, + pbn_NETMOS9900_2s_115200, + pbn_brcm_trumanage, +@@ -3186,6 +3195,12 @@ static struct pciserial_board pci_boards[] = { + .base_baud = 921600, + .reg_shift = 2, + }, ++ [pbn_qrk] = { ++ .flags = FL_BASE0, ++ .num_ports = 1, ++ .base_baud = 2764800, ++ .reg_shift = 2, ++ }, + [pbn_omegapci] = { + .flags = FL_BASE0, + .num_ports = 8, +@@ -4854,6 +4869,12 @@ static struct pci_device_id serial_pci_tbl[] = { + pbn_ce4100_1_115200 }, + + /* ++ * Intel Quark x1000 ++ */ ++ { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_QRK_UART, ++ PCI_ANY_ID, PCI_ANY_ID, 0, 0, ++ pbn_qrk }, ++ /* + * Cronyx Omega PCI + */ + { PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_CRONYX_OMEGA, +diff --git a/drivers/usb/chipidea/ci_hdrc_msm.c b/drivers/usb/chipidea/ci_hdrc_msm.c +index 2d51d852b474..ca1123d415c5 100644 +--- a/drivers/usb/chipidea/ci_hdrc_msm.c ++++ b/drivers/usb/chipidea/ci_hdrc_msm.c +@@ -20,13 +20,13 @@ + static void ci_hdrc_msm_notify_event(struct ci_hdrc *ci, unsigned event) + { + struct device *dev = ci->gadget.dev.parent; +- int val; + + switch (event) { + case CI_HDRC_CONTROLLER_RESET_EVENT: + dev_dbg(dev, "CI_HDRC_CONTROLLER_RESET_EVENT received\n"); + writel(0, USB_AHBBURST); + writel(0, USB_AHBMODE); ++ usb_phy_init(ci->transceiver); + break; + case CI_HDRC_CONTROLLER_STOPPED_EVENT: + dev_dbg(dev, "CI_HDRC_CONTROLLER_STOPPED_EVENT received\n"); +@@ -34,10 +34,7 @@ static void ci_hdrc_msm_notify_event(struct ci_hdrc *ci, unsigned event) + * Put the transceiver in non-driving mode. Otherwise host + * may not detect soft-disconnection. + */ +- val = usb_phy_io_read(ci->transceiver, ULPI_FUNC_CTRL); +- val &= ~ULPI_FUNC_CTRL_OPMODE_MASK; +- val |= ULPI_FUNC_CTRL_OPMODE_NONDRIVING; +- usb_phy_io_write(ci->transceiver, val, ULPI_FUNC_CTRL); ++ usb_phy_notify_disconnect(ci->transceiver, USB_SPEED_UNKNOWN); + break; + default: + dev_dbg(dev, "unknown ci_hdrc event\n"); +diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c +index 721de375c543..d990898ed4dc 100644 +--- a/drivers/usb/core/hub.c ++++ b/drivers/usb/core/hub.c +@@ -1957,8 +1957,10 @@ void usb_set_device_state(struct usb_device *udev, + || new_state == USB_STATE_SUSPENDED) + ; /* No change to wakeup settings */ + else if (new_state == USB_STATE_CONFIGURED) +- wakeup = udev->actconfig->desc.bmAttributes +- & USB_CONFIG_ATT_WAKEUP; ++ wakeup = (udev->quirks & ++ USB_QUIRK_IGNORE_REMOTE_WAKEUP) ? 0 : ++ udev->actconfig->desc.bmAttributes & ++ USB_CONFIG_ATT_WAKEUP; + else + wakeup = 0; + } +@@ -4732,9 +4734,10 @@ static void hub_events(void) + + hub = list_entry(tmp, struct usb_hub, event_list); + kref_get(&hub->kref); ++ hdev = hub->hdev; ++ usb_get_dev(hdev); + spin_unlock_irq(&hub_event_lock); + +- hdev = hub->hdev; + hub_dev = hub->intfdev; + intf = to_usb_interface(hub_dev); + dev_dbg(hub_dev, "state %d ports %d chg %04x evt %04x\n", +@@ -4947,6 +4950,7 @@ static void hub_events(void) + usb_autopm_put_interface(intf); + loop_disconnected: + usb_unlock_device(hdev); ++ usb_put_dev(hdev); + kref_put(&hub->kref, hub_release); + + } /* end while (1) */ +diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c +index 6fd22252273c..347dce4aa76e 100644 +--- a/drivers/usb/core/quirks.c ++++ b/drivers/usb/core/quirks.c +@@ -169,6 +169,10 @@ static const struct usb_device_id usb_interface_quirk_list[] = { + { USB_VENDOR_AND_INTERFACE_INFO(0x046d, USB_CLASS_VIDEO, 1, 0), + .driver_info = USB_QUIRK_RESET_RESUME }, + ++ /* ASUS Base Station(T100) */ ++ { USB_DEVICE(0x0b05, 0x17e0), .driver_info = ++ USB_QUIRK_IGNORE_REMOTE_WAKEUP }, ++ + { } /* terminating entry must be last */ + }; + +diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c +index 474162e9d01d..6adf845fe3a0 100644 +--- a/drivers/usb/dwc3/core.c ++++ b/drivers/usb/dwc3/core.c +@@ -581,12 +581,6 @@ static int dwc3_remove(struct platform_device *pdev) + { + struct dwc3 *dwc = platform_get_drvdata(pdev); + +- usb_phy_set_suspend(dwc->usb2_phy, 1); +- usb_phy_set_suspend(dwc->usb3_phy, 1); +- +- pm_runtime_put(&pdev->dev); +- pm_runtime_disable(&pdev->dev); +- + dwc3_debugfs_exit(dwc); + + switch (dwc->dr_mode) { +@@ -607,8 +601,15 @@ static int dwc3_remove(struct platform_device *pdev) + + dwc3_event_buffers_cleanup(dwc); + dwc3_free_event_buffers(dwc); ++ ++ usb_phy_set_suspend(dwc->usb2_phy, 1); ++ usb_phy_set_suspend(dwc->usb3_phy, 1); ++ + dwc3_core_exit(dwc); + ++ pm_runtime_put_sync(&pdev->dev); ++ pm_runtime_disable(&pdev->dev); ++ + return 0; + } + +diff --git a/drivers/usb/dwc3/dwc3-omap.c b/drivers/usb/dwc3/dwc3-omap.c +index 7f7ea62e961b..2a0422b7c42f 100644 +--- a/drivers/usb/dwc3/dwc3-omap.c ++++ b/drivers/usb/dwc3/dwc3-omap.c +@@ -592,9 +592,9 @@ static int dwc3_omap_remove(struct platform_device *pdev) + if (omap->extcon_id_dev.edev) + extcon_unregister_interest(&omap->extcon_id_dev); + dwc3_omap_disable_irqs(omap); ++ device_for_each_child(&pdev->dev, NULL, dwc3_omap_remove_core); + pm_runtime_put_sync(&pdev->dev); + pm_runtime_disable(&pdev->dev); +- device_for_each_child(&pdev->dev, NULL, dwc3_omap_remove_core); + + return 0; + } +diff --git a/drivers/usb/host/ehci-hcd.c b/drivers/usb/host/ehci-hcd.c +index 784f6242b70e..51b1f4e18c0d 100644 +--- a/drivers/usb/host/ehci-hcd.c ++++ b/drivers/usb/host/ehci-hcd.c +@@ -968,8 +968,6 @@ rescan: + } + + qh->exception = 1; +- if (ehci->rh_state < EHCI_RH_RUNNING) +- qh->qh_state = QH_STATE_IDLE; + switch (qh->qh_state) { + case QH_STATE_LINKED: + WARN_ON(!list_empty(&qh->qtd_list)); +diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c +index e8b4c56dcf62..cd478409cad3 100644 +--- a/drivers/usb/host/xhci-hub.c ++++ b/drivers/usb/host/xhci-hub.c +@@ -470,7 +470,8 @@ static void xhci_hub_report_usb2_link_state(u32 *status, u32 status_reg) + } + + /* Updates Link Status for super Speed port */ +-static void xhci_hub_report_usb3_link_state(u32 *status, u32 status_reg) ++static void xhci_hub_report_usb3_link_state(struct xhci_hcd *xhci, ++ u32 *status, u32 status_reg) + { + u32 pls = status_reg & PORT_PLS_MASK; + +@@ -509,7 +510,8 @@ static void xhci_hub_report_usb3_link_state(u32 *status, u32 status_reg) + * in which sometimes the port enters compliance mode + * caused by a delay on the host-device negotiation. + */ +- if (pls == USB_SS_PORT_LS_COMP_MOD) ++ if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) && ++ (pls == USB_SS_PORT_LS_COMP_MOD)) + pls |= USB_PORT_STAT_CONNECTION; + } + +@@ -668,7 +670,7 @@ static u32 xhci_get_port_status(struct usb_hcd *hcd, + } + /* Update Port Link State */ + if (hcd->speed == HCD_USB3) { +- xhci_hub_report_usb3_link_state(&status, raw_port_status); ++ xhci_hub_report_usb3_link_state(xhci, &status, raw_port_status); + /* + * Verify if all USB3 Ports Have entered U0 already. + * Delete Compliance Mode Timer if so. +diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c +index 4483e6a307c0..837c333c827f 100644 +--- a/drivers/usb/host/xhci-mem.c ++++ b/drivers/usb/host/xhci-mem.c +@@ -1723,7 +1723,7 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci) + } + + num_ports = HCS_MAX_PORTS(xhci->hcs_params1); +- for (i = 0; i < num_ports; i++) { ++ for (i = 0; i < num_ports && xhci->rh_bw; i++) { + struct xhci_interval_bw_table *bwt = &xhci->rh_bw[i].bw_table; + for (j = 0; j < XHCI_MAX_INTERVAL; j++) { + struct list_head *ep = &bwt->interval_bw[j].endpoints; +diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c +index e3d12f164430..de1901222c00 100644 +--- a/drivers/usb/host/xhci.c ++++ b/drivers/usb/host/xhci.c +@@ -3925,13 +3925,21 @@ static int __maybe_unused xhci_change_max_exit_latency(struct xhci_hcd *xhci, + int ret; + + spin_lock_irqsave(&xhci->lock, flags); +- if (max_exit_latency == xhci->devs[udev->slot_id]->current_mel) { ++ ++ virt_dev = xhci->devs[udev->slot_id]; ++ ++ /* ++ * virt_dev might not exists yet if xHC resumed from hibernate (S4) and ++ * xHC was re-initialized. Exit latency will be set later after ++ * hub_port_finish_reset() is done and xhci->devs[] are re-allocated ++ */ ++ ++ if (!virt_dev || max_exit_latency == virt_dev->current_mel) { + spin_unlock_irqrestore(&xhci->lock, flags); + return 0; + } + + /* Attempt to issue an Evaluate Context command to change the MEL. */ +- virt_dev = xhci->devs[udev->slot_id]; + command = xhci->lpm_command; + ctrl_ctx = xhci_get_input_control_ctx(xhci, command->in_ctx); + if (!ctrl_ctx) { +diff --git a/drivers/usb/misc/sisusbvga/sisusb.c b/drivers/usb/misc/sisusbvga/sisusb.c +index de98906f786d..0aef801edbc1 100644 +--- a/drivers/usb/misc/sisusbvga/sisusb.c ++++ b/drivers/usb/misc/sisusbvga/sisusb.c +@@ -3248,6 +3248,7 @@ static const struct usb_device_id sisusb_table[] = { + { USB_DEVICE(0x0711, 0x0918) }, + { USB_DEVICE(0x0711, 0x0920) }, + { USB_DEVICE(0x0711, 0x0950) }, ++ { USB_DEVICE(0x0711, 0x5200) }, + { USB_DEVICE(0x182d, 0x021c) }, + { USB_DEVICE(0x182d, 0x0269) }, + { } +diff --git a/drivers/usb/phy/phy-tegra-usb.c b/drivers/usb/phy/phy-tegra-usb.c +index e9cb1cb8abc7..d85a782d7d05 100644 +--- a/drivers/usb/phy/phy-tegra-usb.c ++++ b/drivers/usb/phy/phy-tegra-usb.c +@@ -881,8 +881,8 @@ static int utmi_phy_probe(struct tegra_usb_phy *tegra_phy, + return -ENOMEM; + } + +- tegra_phy->config = devm_kzalloc(&pdev->dev, +- sizeof(*tegra_phy->config), GFP_KERNEL); ++ tegra_phy->config = devm_kzalloc(&pdev->dev, sizeof(*config), ++ GFP_KERNEL); + if (!tegra_phy->config) { + dev_err(&pdev->dev, + "unable to allocate memory for USB UTMIP config\n"); +diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c +index e5ac744ac73f..95b32d0e2b26 100644 +--- a/drivers/usb/serial/cp210x.c ++++ b/drivers/usb/serial/cp210x.c +@@ -122,6 +122,7 @@ static const struct usb_device_id id_table[] = { + { USB_DEVICE(0x10C4, 0x8665) }, /* AC-Services OBD-IF */ + { USB_DEVICE(0x10C4, 0x88A4) }, /* MMB Networks ZigBee USB Device */ + { USB_DEVICE(0x10C4, 0x88A5) }, /* Planet Innovation Ingeni ZigBee USB Device */ ++ { USB_DEVICE(0x10C4, 0x8946) }, /* Ketra N1 Wireless Interface */ + { USB_DEVICE(0x10C4, 0xEA60) }, /* Silicon Labs factory default */ + { USB_DEVICE(0x10C4, 0xEA61) }, /* Silicon Labs factory default */ + { USB_DEVICE(0x10C4, 0xEA70) }, /* Silicon Labs factory default */ +@@ -155,6 +156,7 @@ static const struct usb_device_id id_table[] = { + { USB_DEVICE(0x1ADB, 0x0001) }, /* Schweitzer Engineering C662 Cable */ + { USB_DEVICE(0x1B1C, 0x1C00) }, /* Corsair USB Dongle */ + { USB_DEVICE(0x1BE3, 0x07A6) }, /* WAGO 750-923 USB Service Cable */ ++ { USB_DEVICE(0x1D6F, 0x0010) }, /* Seluxit ApS RF Dongle */ + { USB_DEVICE(0x1E29, 0x0102) }, /* Festo CPX-USB */ + { USB_DEVICE(0x1E29, 0x0501) }, /* Festo CMSP */ + { USB_DEVICE(0x1FB9, 0x0100) }, /* Lake Shore Model 121 Current Source */ +diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c +index bb68ed5cd3bc..87d816bbf9e0 100644 +--- a/drivers/usb/serial/ftdi_sio.c ++++ b/drivers/usb/serial/ftdi_sio.c +@@ -742,6 +742,7 @@ static struct usb_device_id id_table_combined [] = { + { USB_DEVICE(FTDI_VID, FTDI_NDI_AURORA_SCU_PID), + .driver_info = (kernel_ulong_t)&ftdi_NDI_device_quirk }, + { USB_DEVICE(TELLDUS_VID, TELLDUS_TELLSTICK_PID) }, ++ { USB_DEVICE(NOVITUS_VID, NOVITUS_BONO_E_PID) }, + { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_S03_PID) }, + { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_59_PID) }, + { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_57A_PID) }, +@@ -953,6 +954,8 @@ static struct usb_device_id id_table_combined [] = { + { USB_DEVICE(FTDI_VID, FTDI_EKEY_CONV_USB_PID) }, + /* Infineon Devices */ + { USB_DEVICE_INTERFACE_NUMBER(INFINEON_VID, INFINEON_TRIBOARD_PID, 1) }, ++ /* GE Healthcare devices */ ++ { USB_DEVICE(GE_HEALTHCARE_VID, GE_HEALTHCARE_NEMO_TRACKER_PID) }, + { } /* Terminating entry */ + }; + +diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h +index 70b0b1d88ae9..5937b2d242f2 100644 +--- a/drivers/usb/serial/ftdi_sio_ids.h ++++ b/drivers/usb/serial/ftdi_sio_ids.h +@@ -837,6 +837,12 @@ + #define TELLDUS_TELLSTICK_PID 0x0C30 /* RF control dongle 433 MHz using FT232RL */ + + /* ++ * NOVITUS printers ++ */ ++#define NOVITUS_VID 0x1a28 ++#define NOVITUS_BONO_E_PID 0x6010 ++ ++/* + * RT Systems programming cables for various ham radios + */ + #define RTSYSTEMS_VID 0x2100 /* Vendor ID */ +@@ -1385,3 +1391,9 @@ + * ekey biometric systems GmbH (http://ekey.net/) + */ + #define FTDI_EKEY_CONV_USB_PID 0xCB08 /* Converter USB */ ++ ++/* ++ * GE Healthcare devices ++ */ ++#define GE_HEALTHCARE_VID 0x1901 ++#define GE_HEALTHCARE_NEMO_TRACKER_PID 0x0015 +diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c +index 9da566a3f5c8..e47aabe0c760 100644 +--- a/drivers/usb/serial/option.c ++++ b/drivers/usb/serial/option.c +@@ -275,8 +275,12 @@ static void option_instat_callback(struct urb *urb); + #define ZTE_PRODUCT_MF622 0x0001 + #define ZTE_PRODUCT_MF628 0x0015 + #define ZTE_PRODUCT_MF626 0x0031 +-#define ZTE_PRODUCT_MC2718 0xffe8 + #define ZTE_PRODUCT_AC2726 0xfff1 ++#define ZTE_PRODUCT_CDMA_TECH 0xfffe ++#define ZTE_PRODUCT_AC8710T 0xffff ++#define ZTE_PRODUCT_MC2718 0xffe8 ++#define ZTE_PRODUCT_AD3812 0xffeb ++#define ZTE_PRODUCT_MC2716 0xffed + + #define BENQ_VENDOR_ID 0x04a5 + #define BENQ_PRODUCT_H10 0x4068 +@@ -494,6 +498,10 @@ static void option_instat_callback(struct urb *urb); + #define INOVIA_VENDOR_ID 0x20a6 + #define INOVIA_SEW858 0x1105 + ++/* VIA Telecom */ ++#define VIATELECOM_VENDOR_ID 0x15eb ++#define VIATELECOM_PRODUCT_CDS7 0x0001 ++ + /* some devices interfaces need special handling due to a number of reasons */ + enum option_blacklist_reason { + OPTION_BLACKLIST_NONE = 0, +@@ -527,10 +535,18 @@ static const struct option_blacklist_info zte_k3765_z_blacklist = { + .reserved = BIT(4), + }; + ++static const struct option_blacklist_info zte_ad3812_z_blacklist = { ++ .sendsetup = BIT(0) | BIT(1) | BIT(2), ++}; ++ + static const struct option_blacklist_info zte_mc2718_z_blacklist = { + .sendsetup = BIT(1) | BIT(2) | BIT(3) | BIT(4), + }; + ++static const struct option_blacklist_info zte_mc2716_z_blacklist = { ++ .sendsetup = BIT(1) | BIT(2) | BIT(3), ++}; ++ + static const struct option_blacklist_info huawei_cdc12_blacklist = { + .reserved = BIT(1) | BIT(2), + }; +@@ -1070,6 +1086,7 @@ static const struct usb_device_id option_ids[] = { + { USB_DEVICE_INTERFACE_CLASS(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_1012, 0xff) }, + { USB_DEVICE(KYOCERA_VENDOR_ID, KYOCERA_PRODUCT_KPC650) }, + { USB_DEVICE(KYOCERA_VENDOR_ID, KYOCERA_PRODUCT_KPC680) }, ++ { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6000)}, /* ZTE AC8700 */ + { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6613)}, /* Onda H600/ZTE MF330 */ + { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x0023)}, /* ONYX 3G device */ + { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x9000)}, /* SIMCom SIM5218 */ +@@ -1544,13 +1561,18 @@ static const struct usb_device_id option_ids[] = { + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff93, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff94, 0xff, 0xff, 0xff) }, + +- /* NOTE: most ZTE CDMA devices should be driven by zte_ev, not option */ ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_CDMA_TECH, 0xff, 0xff, 0xff) }, ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_AC2726, 0xff, 0xff, 0xff) }, ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_AC8710T, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MC2718, 0xff, 0xff, 0xff), + .driver_info = (kernel_ulong_t)&zte_mc2718_z_blacklist }, ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_AD3812, 0xff, 0xff, 0xff), ++ .driver_info = (kernel_ulong_t)&zte_ad3812_z_blacklist }, ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MC2716, 0xff, 0xff, 0xff), ++ .driver_info = (kernel_ulong_t)&zte_mc2716_z_blacklist }, + { USB_VENDOR_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff, 0x02, 0x01) }, + { USB_VENDOR_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff, 0x02, 0x05) }, + { USB_VENDOR_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff, 0x86, 0x10) }, +- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_AC2726, 0xff, 0xff, 0xff) }, + + { USB_DEVICE(BENQ_VENDOR_ID, BENQ_PRODUCT_H10) }, + { USB_DEVICE(DLINK_VENDOR_ID, DLINK_PRODUCT_DWM_652) }, +@@ -1724,6 +1746,7 @@ static const struct usb_device_id option_ids[] = { + { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e01, 0xff, 0xff, 0xff) }, /* D-Link DWM-152/C1 */ + { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e02, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/C1 */ + { USB_DEVICE(INOVIA_VENDOR_ID, INOVIA_SEW858) }, ++ { USB_DEVICE(VIATELECOM_VENDOR_ID, VIATELECOM_PRODUCT_CDS7) }, + { } /* Terminating entry */ + }; + MODULE_DEVICE_TABLE(usb, option_ids); +@@ -1917,6 +1940,8 @@ static void option_instat_callback(struct urb *urb) + dev_dbg(dev, "%s: type %x req %x\n", __func__, + req_pkt->bRequestType, req_pkt->bRequest); + } ++ } else if (status == -ENOENT || status == -ESHUTDOWN) { ++ dev_dbg(dev, "%s: urb stopped: %d\n", __func__, status); + } else + dev_err(dev, "%s: error %d\n", __func__, status); + +diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c +index 6e09306b2a5e..81ab710c17ed 100644 +--- a/drivers/usb/serial/pl2303.c ++++ b/drivers/usb/serial/pl2303.c +@@ -48,6 +48,7 @@ static const struct usb_device_id id_table[] = { + { USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_GPRS) }, + { USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_HCR331) }, + { USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_MOTOROLA) }, ++ { USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_ZTEK) }, + { USB_DEVICE(IODATA_VENDOR_ID, IODATA_PRODUCT_ID) }, + { USB_DEVICE(IODATA_VENDOR_ID, IODATA_PRODUCT_ID_RSAQ5) }, + { USB_DEVICE(ATEN_VENDOR_ID, ATEN_PRODUCT_ID) }, +diff --git a/drivers/usb/serial/pl2303.h b/drivers/usb/serial/pl2303.h +index 42bc082896ac..71fd9da1d6e7 100644 +--- a/drivers/usb/serial/pl2303.h ++++ b/drivers/usb/serial/pl2303.h +@@ -22,6 +22,7 @@ + #define PL2303_PRODUCT_ID_GPRS 0x0609 + #define PL2303_PRODUCT_ID_HCR331 0x331a + #define PL2303_PRODUCT_ID_MOTOROLA 0x0307 ++#define PL2303_PRODUCT_ID_ZTEK 0xe1f1 + + #define ATEN_VENDOR_ID 0x0557 + #define ATEN_VENDOR_ID2 0x0547 +diff --git a/drivers/usb/serial/sierra.c b/drivers/usb/serial/sierra.c +index d84a3f31ae2d..d09a4e790892 100644 +--- a/drivers/usb/serial/sierra.c ++++ b/drivers/usb/serial/sierra.c +@@ -282,14 +282,19 @@ static const struct usb_device_id id_table[] = { + /* Sierra Wireless HSPA Non-Composite Device */ + { USB_DEVICE_AND_INTERFACE_INFO(0x1199, 0x6892, 0xFF, 0xFF, 0xFF)}, + { USB_DEVICE(0x1199, 0x6893) }, /* Sierra Wireless Device */ +- { USB_DEVICE(0x1199, 0x68A3), /* Sierra Wireless Direct IP modems */ ++ /* Sierra Wireless Direct IP modems */ ++ { USB_DEVICE_AND_INTERFACE_INFO(0x1199, 0x68A3, 0xFF, 0xFF, 0xFF), ++ .driver_info = (kernel_ulong_t)&direct_ip_interface_blacklist ++ }, ++ { USB_DEVICE_AND_INTERFACE_INFO(0x1199, 0x68AA, 0xFF, 0xFF, 0xFF), + .driver_info = (kernel_ulong_t)&direct_ip_interface_blacklist + }, + /* AT&T Direct IP LTE modems */ + { USB_DEVICE_AND_INTERFACE_INFO(0x0F3D, 0x68AA, 0xFF, 0xFF, 0xFF), + .driver_info = (kernel_ulong_t)&direct_ip_interface_blacklist + }, +- { USB_DEVICE(0x0f3d, 0x68A3), /* Airprime/Sierra Wireless Direct IP modems */ ++ /* Airprime/Sierra Wireless Direct IP modems */ ++ { USB_DEVICE_AND_INTERFACE_INFO(0x0F3D, 0x68A3, 0xFF, 0xFF, 0xFF), + .driver_info = (kernel_ulong_t)&direct_ip_interface_blacklist + }, + +diff --git a/drivers/usb/serial/usb-serial.c b/drivers/usb/serial/usb-serial.c +index 52260afaa102..cb6eff2b41ed 100644 +--- a/drivers/usb/serial/usb-serial.c ++++ b/drivers/usb/serial/usb-serial.c +@@ -764,29 +764,39 @@ static int usb_serial_probe(struct usb_interface *interface, + if (usb_endpoint_is_bulk_in(endpoint)) { + /* we found a bulk in endpoint */ + dev_dbg(ddev, "found bulk in on endpoint %d\n", i); +- bulk_in_endpoint[num_bulk_in] = endpoint; +- ++num_bulk_in; ++ if (num_bulk_in < MAX_NUM_PORTS) { ++ bulk_in_endpoint[num_bulk_in] = endpoint; ++ ++num_bulk_in; ++ } + } + + if (usb_endpoint_is_bulk_out(endpoint)) { + /* we found a bulk out endpoint */ + dev_dbg(ddev, "found bulk out on endpoint %d\n", i); +- bulk_out_endpoint[num_bulk_out] = endpoint; +- ++num_bulk_out; ++ if (num_bulk_out < MAX_NUM_PORTS) { ++ bulk_out_endpoint[num_bulk_out] = endpoint; ++ ++num_bulk_out; ++ } + } + + if (usb_endpoint_is_int_in(endpoint)) { + /* we found a interrupt in endpoint */ + dev_dbg(ddev, "found interrupt in on endpoint %d\n", i); +- interrupt_in_endpoint[num_interrupt_in] = endpoint; +- ++num_interrupt_in; ++ if (num_interrupt_in < MAX_NUM_PORTS) { ++ interrupt_in_endpoint[num_interrupt_in] = ++ endpoint; ++ ++num_interrupt_in; ++ } + } + + if (usb_endpoint_is_int_out(endpoint)) { + /* we found an interrupt out endpoint */ + dev_dbg(ddev, "found interrupt out on endpoint %d\n", i); +- interrupt_out_endpoint[num_interrupt_out] = endpoint; +- ++num_interrupt_out; ++ if (num_interrupt_out < MAX_NUM_PORTS) { ++ interrupt_out_endpoint[num_interrupt_out] = ++ endpoint; ++ ++num_interrupt_out; ++ } + } + } + +@@ -809,8 +819,10 @@ static int usb_serial_probe(struct usb_interface *interface, + if (usb_endpoint_is_int_in(endpoint)) { + /* we found a interrupt in endpoint */ + dev_dbg(ddev, "found interrupt in for Prolific device on separate interface\n"); +- interrupt_in_endpoint[num_interrupt_in] = endpoint; +- ++num_interrupt_in; ++ if (num_interrupt_in < MAX_NUM_PORTS) { ++ interrupt_in_endpoint[num_interrupt_in] = endpoint; ++ ++num_interrupt_in; ++ } + } + } + } +@@ -850,6 +862,11 @@ static int usb_serial_probe(struct usb_interface *interface, + num_ports = type->num_ports; + } + ++ if (num_ports > MAX_NUM_PORTS) { ++ dev_warn(ddev, "too many ports requested: %d\n", num_ports); ++ num_ports = MAX_NUM_PORTS; ++ } ++ + serial->num_ports = num_ports; + serial->num_bulk_in = num_bulk_in; + serial->num_bulk_out = num_bulk_out; +diff --git a/drivers/usb/serial/zte_ev.c b/drivers/usb/serial/zte_ev.c +index eae2c873b39f..88dd32ce5224 100644 +--- a/drivers/usb/serial/zte_ev.c ++++ b/drivers/usb/serial/zte_ev.c +@@ -273,28 +273,8 @@ static void zte_ev_usb_serial_close(struct usb_serial_port *port) + } + + static const struct usb_device_id id_table[] = { +- /* AC8710, AC8710T */ +- { USB_DEVICE_AND_INTERFACE_INFO(0x19d2, 0xffff, 0xff, 0xff, 0xff) }, +- /* AC8700 */ +- { USB_DEVICE_AND_INTERFACE_INFO(0x19d2, 0xfffe, 0xff, 0xff, 0xff) }, + /* MG880 */ + { USB_DEVICE(0x19d2, 0xfffd) }, +- { USB_DEVICE(0x19d2, 0xfffc) }, +- { USB_DEVICE(0x19d2, 0xfffb) }, +- /* AC8710_V3 */ +- { USB_DEVICE(0x19d2, 0xfff6) }, +- { USB_DEVICE(0x19d2, 0xfff7) }, +- { USB_DEVICE(0x19d2, 0xfff8) }, +- { USB_DEVICE(0x19d2, 0xfff9) }, +- { USB_DEVICE(0x19d2, 0xffee) }, +- /* AC2716, MC2716 */ +- { USB_DEVICE_AND_INTERFACE_INFO(0x19d2, 0xffed, 0xff, 0xff, 0xff) }, +- /* AD3812 */ +- { USB_DEVICE_AND_INTERFACE_INFO(0x19d2, 0xffeb, 0xff, 0xff, 0xff) }, +- { USB_DEVICE(0x19d2, 0xffec) }, +- { USB_DEVICE(0x05C6, 0x3197) }, +- { USB_DEVICE(0x05C6, 0x6000) }, +- { USB_DEVICE(0x05C6, 0x9008) }, + { }, + }; + MODULE_DEVICE_TABLE(usb, id_table); +diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h +index 042c83b01046..7f625306ea80 100644 +--- a/drivers/usb/storage/unusual_devs.h ++++ b/drivers/usb/storage/unusual_devs.h +@@ -101,6 +101,12 @@ UNUSUAL_DEV( 0x03f0, 0x4002, 0x0001, 0x0001, + "PhotoSmart R707", + USB_SC_DEVICE, USB_PR_DEVICE, NULL, US_FL_FIX_CAPACITY), + ++UNUSUAL_DEV( 0x03f3, 0x0001, 0x0000, 0x9999, ++ "Adaptec", ++ "USBConnect 2000", ++ USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_euscsi_init, ++ US_FL_SCM_MULT_TARG ), ++ + /* Reported by Sebastian Kapfer + * and Olaf Hering (different bcd's, same vendor/product) + * for USB floppies that need the SINGLE_LUN enforcement. +@@ -741,6 +747,12 @@ UNUSUAL_DEV( 0x059b, 0x0001, 0x0100, 0x0100, + USB_SC_DEVICE, USB_PR_DEVICE, NULL, + US_FL_SINGLE_LUN ), + ++UNUSUAL_DEV( 0x059b, 0x0040, 0x0100, 0x0100, ++ "Iomega", ++ "Jaz USB Adapter", ++ USB_SC_DEVICE, USB_PR_DEVICE, NULL, ++ US_FL_SINGLE_LUN ), ++ + /* Reported by */ + UNUSUAL_DEV( 0x059f, 0x0643, 0x0000, 0x0000, + "LaCie", +@@ -1113,6 +1125,18 @@ UNUSUAL_DEV( 0x0851, 0x1543, 0x0200, 0x0200, + USB_SC_DEVICE, USB_PR_DEVICE, NULL, + US_FL_NOT_LOCKABLE), + ++UNUSUAL_DEV( 0x085a, 0x0026, 0x0100, 0x0133, ++ "Xircom", ++ "PortGear USB-SCSI (Mac USB Dock)", ++ USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_euscsi_init, ++ US_FL_SCM_MULT_TARG ), ++ ++UNUSUAL_DEV( 0x085a, 0x0028, 0x0100, 0x0133, ++ "Xircom", ++ "PortGear USB to SCSI Converter", ++ USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_euscsi_init, ++ US_FL_SCM_MULT_TARG ), ++ + /* Submitted by Jan De Luyck */ + UNUSUAL_DEV( 0x08bd, 0x1100, 0x0000, 0x0000, + "CITIZEN", +@@ -1945,6 +1969,14 @@ UNUSUAL_DEV( 0x152d, 0x2329, 0x0100, 0x0100, + USB_SC_DEVICE, USB_PR_DEVICE, NULL, + US_FL_IGNORE_RESIDUE | US_FL_SANE_SENSE ), + ++/* Entrega Technologies U1-SC25 (later Xircom PortGear PGSCSI) ++ * and Mac USB Dock USB-SCSI */ ++UNUSUAL_DEV( 0x1645, 0x0007, 0x0100, 0x0133, ++ "Entrega Technologies", ++ "USB to SCSI Converter", ++ USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_euscsi_init, ++ US_FL_SCM_MULT_TARG ), ++ + /* Reported by Robert Schedel + * Note: this is a 'super top' device like the above 14cd/6600 device */ + UNUSUAL_DEV( 0x1652, 0x6600, 0x0201, 0x0201, +@@ -1967,6 +1999,12 @@ UNUSUAL_DEV( 0x177f, 0x0400, 0x0000, 0x0000, + USB_SC_DEVICE, USB_PR_DEVICE, NULL, + US_FL_BULK_IGNORE_TAG | US_FL_MAX_SECTORS_64 ), + ++UNUSUAL_DEV( 0x1822, 0x0001, 0x0000, 0x9999, ++ "Ariston Technologies", ++ "iConnect USB to SCSI adapter", ++ USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_euscsi_init, ++ US_FL_SCM_MULT_TARG ), ++ + /* Reported by Hans de Goede + * These Appotech controllers are found in Picture Frames, they provide a + * (buggy) emulation of a cdrom drive which contains the windows software +diff --git a/drivers/uwb/lc-dev.c b/drivers/uwb/lc-dev.c +index 9209eafc75b1..41db3b182c05 100644 +--- a/drivers/uwb/lc-dev.c ++++ b/drivers/uwb/lc-dev.c +@@ -441,16 +441,19 @@ void uwbd_dev_onair(struct uwb_rc *rc, struct uwb_beca_e *bce) + uwb_dev->mac_addr = *bce->mac_addr; + uwb_dev->dev_addr = bce->dev_addr; + dev_set_name(&uwb_dev->dev, "%s", macbuf); ++ ++ /* plug the beacon cache */ ++ bce->uwb_dev = uwb_dev; ++ uwb_dev->bce = bce; ++ uwb_bce_get(bce); /* released in uwb_dev_sys_release() */ ++ + result = uwb_dev_add(uwb_dev, &rc->uwb_dev.dev, rc); + if (result < 0) { + dev_err(dev, "new device %s: cannot instantiate device\n", + macbuf); + goto error_dev_add; + } +- /* plug the beacon cache */ +- bce->uwb_dev = uwb_dev; +- uwb_dev->bce = bce; +- uwb_bce_get(bce); /* released in uwb_dev_sys_release() */ ++ + dev_info(dev, "uwb device (mac %s dev %s) connected to %s %s\n", + macbuf, devbuf, rc->uwb_dev.dev.parent->bus->name, + dev_name(rc->uwb_dev.dev.parent)); +@@ -458,6 +461,8 @@ void uwbd_dev_onair(struct uwb_rc *rc, struct uwb_beca_e *bce) + return; + + error_dev_add: ++ bce->uwb_dev = NULL; ++ uwb_bce_put(bce); + kfree(uwb_dev); + return; + } +diff --git a/drivers/xen/manage.c b/drivers/xen/manage.c +index 624e8dc24532..602913d7ae03 100644 +--- a/drivers/xen/manage.c ++++ b/drivers/xen/manage.c +@@ -111,16 +111,11 @@ static void do_suspend(void) + + shutting_down = SHUTDOWN_SUSPEND; + +-#ifdef CONFIG_PREEMPT +- /* If the kernel is preemptible, we need to freeze all the processes +- to prevent them from being in the middle of a pagetable update +- during suspend. */ + err = freeze_processes(); + if (err) { + pr_err("%s: freeze failed %d\n", __func__, err); + goto out; + } +-#endif + + err = dpm_suspend_start(PMSG_FREEZE); + if (err) { +@@ -169,10 +164,8 @@ out_resume: + dpm_resume_end(si.cancelled ? PMSG_THAW : PMSG_RESTORE); + + out_thaw: +-#ifdef CONFIG_PREEMPT + thaw_processes(); + out: +-#endif + shutting_down = SHUTDOWN_INVALID; + } + #endif /* CONFIG_HIBERNATE_CALLBACKS */ +diff --git a/fs/aio.c b/fs/aio.c +index b732a9c32042..66bd7e4447ad 100644 +--- a/fs/aio.c ++++ b/fs/aio.c +@@ -796,6 +796,9 @@ void exit_aio(struct mm_struct *mm) + unsigned i = 0; + + while (1) { ++ struct completion requests_done = ++ COMPLETION_INITIALIZER_ONSTACK(requests_done); ++ + rcu_read_lock(); + table = rcu_dereference(mm->ioctx_table); + +@@ -823,7 +826,10 @@ void exit_aio(struct mm_struct *mm) + */ + ctx->mmap_size = 0; + +- kill_ioctx(mm, ctx, NULL); ++ kill_ioctx(mm, ctx, &requests_done); ++ ++ /* Wait until all IO for the context are done. */ ++ wait_for_completion(&requests_done); + } + } + +diff --git a/fs/buffer.c b/fs/buffer.c +index b7888527f7c3..fe2182ec812d 100644 +--- a/fs/buffer.c ++++ b/fs/buffer.c +@@ -1029,7 +1029,8 @@ grow_dev_page(struct block_device *bdev, sector_t block, + bh = page_buffers(page); + if (bh->b_size == size) { + end_block = init_page_buffers(page, bdev, +- index << sizebits, size); ++ (sector_t)index << sizebits, ++ size); + goto done; + } + if (!try_to_free_buffers(page)) +@@ -1050,7 +1051,8 @@ grow_dev_page(struct block_device *bdev, sector_t block, + */ + spin_lock(&inode->i_mapping->private_lock); + link_dev_buffers(page, bh); +- end_block = init_page_buffers(page, bdev, index << sizebits, size); ++ end_block = init_page_buffers(page, bdev, (sector_t)index << sizebits, ++ size); + spin_unlock(&inode->i_mapping->private_lock); + done: + ret = (block < end_block) ? 1 : -ENXIO; +diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h +index d13f77ea0034..cee6a796d596 100644 +--- a/fs/cifs/cifsglob.h ++++ b/fs/cifs/cifsglob.h +@@ -387,6 +387,8 @@ struct smb_version_operations { + int (*set_acl)(struct cifs_ntsd *, __u32, struct inode *, const char *, + int); + int (*validate_negotiate)(const unsigned int, struct cifs_tcon *); ++ /* check if we need to issue closedir */ ++ bool (*dir_needs_close)(struct cifsFileInfo *); + }; + + struct smb_version_values { +diff --git a/fs/cifs/file.c b/fs/cifs/file.c +index 892a1e947b5a..a2793c93d6ed 100644 +--- a/fs/cifs/file.c ++++ b/fs/cifs/file.c +@@ -762,7 +762,7 @@ int cifs_closedir(struct inode *inode, struct file *file) + + cifs_dbg(FYI, "Freeing private data in close dir\n"); + spin_lock(&cifs_file_list_lock); +- if (!cfile->srch_inf.endOfSearch && !cfile->invalidHandle) { ++ if (server->ops->dir_needs_close(cfile)) { + cfile->invalidHandle = true; + spin_unlock(&cifs_file_list_lock); + if (server->ops->close_dir) +diff --git a/fs/cifs/readdir.c b/fs/cifs/readdir.c +index 59edb8fd33aa..e327a9207ee1 100644 +--- a/fs/cifs/readdir.c ++++ b/fs/cifs/readdir.c +@@ -593,7 +593,7 @@ find_cifs_entry(const unsigned int xid, struct cifs_tcon *tcon, loff_t pos, + /* close and restart search */ + cifs_dbg(FYI, "search backing up - close and restart search\n"); + spin_lock(&cifs_file_list_lock); +- if (!cfile->srch_inf.endOfSearch && !cfile->invalidHandle) { ++ if (server->ops->dir_needs_close(cfile)) { + cfile->invalidHandle = true; + spin_unlock(&cifs_file_list_lock); + if (server->ops->close_dir) +diff --git a/fs/cifs/smb1ops.c b/fs/cifs/smb1ops.c +index 58bd01efa05b..09b0323a7727 100644 +--- a/fs/cifs/smb1ops.c ++++ b/fs/cifs/smb1ops.c +@@ -947,6 +947,12 @@ cifs_is_read_op(__u32 oplock) + return oplock == OPLOCK_READ; + } + ++static bool ++cifs_dir_needs_close(struct cifsFileInfo *cfile) ++{ ++ return !cfile->srch_inf.endOfSearch && !cfile->invalidHandle; ++} ++ + struct smb_version_operations smb1_operations = { + .send_cancel = send_nt_cancel, + .compare_fids = cifs_compare_fids, +@@ -1014,6 +1020,7 @@ struct smb_version_operations smb1_operations = { + .push_mand_locks = cifs_push_mandatory_locks, + .query_mf_symlink = open_query_close_cifs_symlink, + .is_read_op = cifs_is_read_op, ++ .dir_needs_close = cifs_dir_needs_close, + #ifdef CONFIG_CIFS_XATTR + .query_all_EAs = CIFSSMBQAllEAs, + .set_EA = CIFSSMBSetEA, +diff --git a/fs/cifs/smb2maperror.c b/fs/cifs/smb2maperror.c +index 824696fb24db..4768cf8be6e2 100644 +--- a/fs/cifs/smb2maperror.c ++++ b/fs/cifs/smb2maperror.c +@@ -214,7 +214,7 @@ static const struct status_to_posix_error smb2_error_map_table[] = { + {STATUS_BREAKPOINT, -EIO, "STATUS_BREAKPOINT"}, + {STATUS_SINGLE_STEP, -EIO, "STATUS_SINGLE_STEP"}, + {STATUS_BUFFER_OVERFLOW, -EIO, "STATUS_BUFFER_OVERFLOW"}, +- {STATUS_NO_MORE_FILES, -EIO, "STATUS_NO_MORE_FILES"}, ++ {STATUS_NO_MORE_FILES, -ENODATA, "STATUS_NO_MORE_FILES"}, + {STATUS_WAKE_SYSTEM_DEBUGGER, -EIO, "STATUS_WAKE_SYSTEM_DEBUGGER"}, + {STATUS_HANDLES_CLOSED, -EIO, "STATUS_HANDLES_CLOSED"}, + {STATUS_NO_INHERITANCE, -EIO, "STATUS_NO_INHERITANCE"}, +diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c +index 8956cf67299b..6f79cd867a2e 100644 +--- a/fs/cifs/smb2ops.c ++++ b/fs/cifs/smb2ops.c +@@ -843,6 +843,12 @@ smb3_parse_lease_buf(void *buf, unsigned int *epoch) + return le32_to_cpu(lc->lcontext.LeaseState); + } + ++static bool ++smb2_dir_needs_close(struct cifsFileInfo *cfile) ++{ ++ return !cfile->invalidHandle; ++} ++ + struct smb_version_operations smb20_operations = { + .compare_fids = smb2_compare_fids, + .setup_request = smb2_setup_request, +@@ -913,6 +919,7 @@ struct smb_version_operations smb20_operations = { + .set_oplock_level = smb2_set_oplock_level, + .create_lease_buf = smb2_create_lease_buf, + .parse_lease_buf = smb2_parse_lease_buf, ++ .dir_needs_close = smb2_dir_needs_close, + }; + + struct smb_version_operations smb21_operations = { +@@ -985,6 +992,7 @@ struct smb_version_operations smb21_operations = { + .set_oplock_level = smb21_set_oplock_level, + .create_lease_buf = smb2_create_lease_buf, + .parse_lease_buf = smb2_parse_lease_buf, ++ .dir_needs_close = smb2_dir_needs_close, + }; + + struct smb_version_operations smb30_operations = { +@@ -1060,6 +1068,7 @@ struct smb_version_operations smb30_operations = { + .create_lease_buf = smb3_create_lease_buf, + .parse_lease_buf = smb3_parse_lease_buf, + .validate_negotiate = smb3_validate_negotiate, ++ .dir_needs_close = smb2_dir_needs_close, + }; + + struct smb_version_values smb20_values = { +diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c +index fb0c67372a90..1f096f694030 100644 +--- a/fs/cifs/smb2pdu.c ++++ b/fs/cifs/smb2pdu.c +@@ -2084,6 +2084,10 @@ SMB2_query_directory(const unsigned int xid, struct cifs_tcon *tcon, + rsp = (struct smb2_query_directory_rsp *)iov[0].iov_base; + + if (rc) { ++ if (rc == -ENODATA && rsp->hdr.Status == STATUS_NO_MORE_FILES) { ++ srch_inf->endOfSearch = true; ++ rc = 0; ++ } + cifs_stats_fail_inc(tcon, SMB2_QUERY_DIRECTORY_HE); + goto qdir_exit; + } +@@ -2121,11 +2125,6 @@ SMB2_query_directory(const unsigned int xid, struct cifs_tcon *tcon, + else + cifs_dbg(VFS, "illegal search buffer type\n"); + +- if (rsp->hdr.Status == STATUS_NO_MORE_FILES) +- srch_inf->endOfSearch = 1; +- else +- srch_inf->endOfSearch = 0; +- + return rc; + + qdir_exit: +diff --git a/fs/eventpoll.c b/fs/eventpoll.c +index 55ebb8886014..758527413665 100644 +--- a/fs/eventpoll.c ++++ b/fs/eventpoll.c +@@ -1852,7 +1852,8 @@ SYSCALL_DEFINE4(epoll_ctl, int, epfd, int, op, int, fd, + goto error_tgt_fput; + + /* Check if EPOLLWAKEUP is allowed */ +- if ((epds.events & EPOLLWAKEUP) && !capable(CAP_BLOCK_SUSPEND)) ++ if (ep_op_has_event(op) && (epds.events & EPOLLWAKEUP) && ++ !capable(CAP_BLOCK_SUSPEND)) + epds.events &= ~EPOLLWAKEUP; + + /* +diff --git a/fs/lockd/svc.c b/fs/lockd/svc.c +index 6bf06a07f3e0..223e1cb14345 100644 +--- a/fs/lockd/svc.c ++++ b/fs/lockd/svc.c +@@ -253,13 +253,11 @@ static int lockd_up_net(struct svc_serv *serv, struct net *net) + + error = make_socks(serv, net); + if (error < 0) +- goto err_socks; ++ goto err_bind; + set_grace_period(net); + dprintk("lockd_up_net: per-net data created; net=%p\n", net); + return 0; + +-err_socks: +- svc_rpcb_cleanup(serv, net); + err_bind: + ln->nlmsvc_users--; + return error; +diff --git a/fs/namei.c b/fs/namei.c +index 227c78ae70b4..3ac674b793bf 100644 +--- a/fs/namei.c ++++ b/fs/namei.c +@@ -642,24 +642,22 @@ static int complete_walk(struct nameidata *nd) + + static __always_inline void set_root(struct nameidata *nd) + { +- if (!nd->root.mnt) +- get_fs_root(current->fs, &nd->root); ++ get_fs_root(current->fs, &nd->root); + } + + static int link_path_walk(const char *, struct nameidata *); + +-static __always_inline void set_root_rcu(struct nameidata *nd) ++static __always_inline unsigned set_root_rcu(struct nameidata *nd) + { +- if (!nd->root.mnt) { +- struct fs_struct *fs = current->fs; +- unsigned seq; ++ struct fs_struct *fs = current->fs; ++ unsigned seq, res; + +- do { +- seq = read_seqcount_begin(&fs->seq); +- nd->root = fs->root; +- nd->seq = __read_seqcount_begin(&nd->root.dentry->d_seq); +- } while (read_seqcount_retry(&fs->seq, seq)); +- } ++ do { ++ seq = read_seqcount_begin(&fs->seq); ++ nd->root = fs->root; ++ res = __read_seqcount_begin(&nd->root.dentry->d_seq); ++ } while (read_seqcount_retry(&fs->seq, seq)); ++ return res; + } + + static void path_put_conditional(struct path *path, struct nameidata *nd) +@@ -859,7 +857,8 @@ follow_link(struct path *link, struct nameidata *nd, void **p) + return PTR_ERR(s); + } + if (*s == '/') { +- set_root(nd); ++ if (!nd->root.mnt) ++ set_root(nd); + path_put(&nd->path); + nd->path = nd->root; + path_get(&nd->root); +@@ -1145,7 +1144,8 @@ static void follow_mount_rcu(struct nameidata *nd) + + static int follow_dotdot_rcu(struct nameidata *nd) + { +- set_root_rcu(nd); ++ if (!nd->root.mnt) ++ set_root_rcu(nd); + + while (1) { + if (nd->path.dentry == nd->root.dentry && +@@ -1247,7 +1247,8 @@ static void follow_mount(struct path *path) + + static void follow_dotdot(struct nameidata *nd) + { +- set_root(nd); ++ if (!nd->root.mnt) ++ set_root(nd); + + while(1) { + struct dentry *old = nd->path.dentry; +@@ -1876,7 +1877,7 @@ static int path_init(int dfd, const char *name, unsigned int flags, + if (*name=='/') { + if (flags & LOOKUP_RCU) { + lock_rcu_walk(); +- set_root_rcu(nd); ++ nd->seq = set_root_rcu(nd); + } else { + set_root(nd); + path_get(&nd->root); +diff --git a/fs/nfs/nfs4client.c b/fs/nfs/nfs4client.c +index 55ebebec4d3b..531f1b48b46b 100644 +--- a/fs/nfs/nfs4client.c ++++ b/fs/nfs/nfs4client.c +@@ -478,6 +478,16 @@ int nfs40_walk_client_list(struct nfs_client *new, + + spin_lock(&nn->nfs_client_lock); + list_for_each_entry(pos, &nn->nfs_client_list, cl_share_link) { ++ ++ if (pos->rpc_ops != new->rpc_ops) ++ continue; ++ ++ if (pos->cl_proto != new->cl_proto) ++ continue; ++ ++ if (pos->cl_minorversion != new->cl_minorversion) ++ continue; ++ + /* If "pos" isn't marked ready, we can't trust the + * remaining fields in "pos" */ + if (pos->cl_cons_state > NFS_CS_READY) { +@@ -497,15 +507,6 @@ int nfs40_walk_client_list(struct nfs_client *new, + if (pos->cl_cons_state != NFS_CS_READY) + continue; + +- if (pos->rpc_ops != new->rpc_ops) +- continue; +- +- if (pos->cl_proto != new->cl_proto) +- continue; +- +- if (pos->cl_minorversion != new->cl_minorversion) +- continue; +- + if (pos->cl_clientid != new->cl_clientid) + continue; + +@@ -611,6 +612,16 @@ int nfs41_walk_client_list(struct nfs_client *new, + + spin_lock(&nn->nfs_client_lock); + list_for_each_entry(pos, &nn->nfs_client_list, cl_share_link) { ++ ++ if (pos->rpc_ops != new->rpc_ops) ++ continue; ++ ++ if (pos->cl_proto != new->cl_proto) ++ continue; ++ ++ if (pos->cl_minorversion != new->cl_minorversion) ++ continue; ++ + /* If "pos" isn't marked ready, we can't trust the + * remaining fields in "pos", especially the client + * ID and serverowner fields. Wait for CREATE_SESSION +@@ -636,15 +647,6 @@ int nfs41_walk_client_list(struct nfs_client *new, + if (pos->cl_cons_state != NFS_CS_READY) + continue; + +- if (pos->rpc_ops != new->rpc_ops) +- continue; +- +- if (pos->cl_proto != new->cl_proto) +- continue; +- +- if (pos->cl_minorversion != new->cl_minorversion) +- continue; +- + if (!nfs4_match_clientids(pos, new)) + continue; + +diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c +index 067d8c90eb1a..609621532fc0 100644 +--- a/fs/nfs/nfs4proc.c ++++ b/fs/nfs/nfs4proc.c +@@ -2544,23 +2544,23 @@ static void nfs4_close_prepare(struct rpc_task *task, void *data) + is_rdwr = test_bit(NFS_O_RDWR_STATE, &state->flags); + is_rdonly = test_bit(NFS_O_RDONLY_STATE, &state->flags); + is_wronly = test_bit(NFS_O_WRONLY_STATE, &state->flags); +- /* Calculate the current open share mode */ +- calldata->arg.fmode = 0; +- if (is_rdonly || is_rdwr) +- calldata->arg.fmode |= FMODE_READ; +- if (is_wronly || is_rdwr) +- calldata->arg.fmode |= FMODE_WRITE; + /* Calculate the change in open mode */ ++ calldata->arg.fmode = 0; + if (state->n_rdwr == 0) { +- if (state->n_rdonly == 0) { +- call_close |= is_rdonly || is_rdwr; +- calldata->arg.fmode &= ~FMODE_READ; +- } +- if (state->n_wronly == 0) { +- call_close |= is_wronly || is_rdwr; +- calldata->arg.fmode &= ~FMODE_WRITE; +- } +- } ++ if (state->n_rdonly == 0) ++ call_close |= is_rdonly; ++ else if (is_rdonly) ++ calldata->arg.fmode |= FMODE_READ; ++ if (state->n_wronly == 0) ++ call_close |= is_wronly; ++ else if (is_wronly) ++ calldata->arg.fmode |= FMODE_WRITE; ++ } else if (is_rdwr) ++ calldata->arg.fmode |= FMODE_READ|FMODE_WRITE; ++ ++ if (calldata->arg.fmode == 0) ++ call_close |= is_rdwr; ++ + if (!nfs4_valid_open_stateid(state)) + call_close = 0; + spin_unlock(&state->owner->so_lock); +diff --git a/fs/nfs/pagelist.c b/fs/nfs/pagelist.c +index 2ffebf2081ce..27d7f2742592 100644 +--- a/fs/nfs/pagelist.c ++++ b/fs/nfs/pagelist.c +@@ -113,7 +113,7 @@ __nfs_iocounter_wait(struct nfs_io_counter *c) + if (atomic_read(&c->io_count) == 0) + break; + ret = nfs_wait_bit_killable(&c->flags); +- } while (atomic_read(&c->io_count) != 0); ++ } while (atomic_read(&c->io_count) != 0 && !ret); + finish_wait(wq, &q.wait); + return ret; + } +diff --git a/fs/nilfs2/inode.c b/fs/nilfs2/inode.c +index 7e350c562e0e..1e0bbae06ee7 100644 +--- a/fs/nilfs2/inode.c ++++ b/fs/nilfs2/inode.c +@@ -24,6 +24,7 @@ + #include + #include + #include ++#include + #include + #include + #include "nilfs.h" +@@ -219,10 +220,10 @@ static int nilfs_writepage(struct page *page, struct writeback_control *wbc) + + static int nilfs_set_page_dirty(struct page *page) + { ++ struct inode *inode = page->mapping->host; + int ret = __set_page_dirty_nobuffers(page); + + if (page_has_buffers(page)) { +- struct inode *inode = page->mapping->host; + unsigned nr_dirty = 0; + struct buffer_head *bh, *head; + +@@ -245,6 +246,10 @@ static int nilfs_set_page_dirty(struct page *page) + + if (nr_dirty) + nilfs_set_file_dirty(inode, nr_dirty); ++ } else if (ret) { ++ unsigned nr_dirty = 1 << (PAGE_CACHE_SHIFT - inode->i_blkbits); ++ ++ nilfs_set_file_dirty(inode, nr_dirty); + } + return ret; + } +diff --git a/fs/notify/fdinfo.c b/fs/notify/fdinfo.c +index 238a5930cb3c..9d7e2b9659cb 100644 +--- a/fs/notify/fdinfo.c ++++ b/fs/notify/fdinfo.c +@@ -42,7 +42,7 @@ static int show_mark_fhandle(struct seq_file *m, struct inode *inode) + { + struct { + struct file_handle handle; +- u8 pad[64]; ++ u8 pad[MAX_HANDLE_SZ]; + } f; + int size, ret, i; + +@@ -50,7 +50,7 @@ static int show_mark_fhandle(struct seq_file *m, struct inode *inode) + size = f.handle.handle_bytes >> 2; + + ret = exportfs_encode_inode_fh(inode, (struct fid *)f.handle.f_handle, &size, 0); +- if ((ret == 255) || (ret == -ENOSPC)) { ++ if ((ret == FILEID_INVALID) || (ret < 0)) { + WARN_ONCE(1, "Can't encode file handler for inotify: %d\n", ret); + return 0; + } +diff --git a/fs/ocfs2/dlm/dlmmaster.c b/fs/ocfs2/dlm/dlmmaster.c +index cf0f103963b1..673c9bf5debc 100644 +--- a/fs/ocfs2/dlm/dlmmaster.c ++++ b/fs/ocfs2/dlm/dlmmaster.c +@@ -650,12 +650,9 @@ void dlm_lockres_clear_refmap_bit(struct dlm_ctxt *dlm, + clear_bit(bit, res->refmap); + } + +- +-void dlm_lockres_grab_inflight_ref(struct dlm_ctxt *dlm, ++static void __dlm_lockres_grab_inflight_ref(struct dlm_ctxt *dlm, + struct dlm_lock_resource *res) + { +- assert_spin_locked(&res->spinlock); +- + res->inflight_locks++; + + mlog(0, "%s: res %.*s, inflight++: now %u, %ps()\n", dlm->name, +@@ -663,6 +660,13 @@ void dlm_lockres_grab_inflight_ref(struct dlm_ctxt *dlm, + __builtin_return_address(0)); + } + ++void dlm_lockres_grab_inflight_ref(struct dlm_ctxt *dlm, ++ struct dlm_lock_resource *res) ++{ ++ assert_spin_locked(&res->spinlock); ++ __dlm_lockres_grab_inflight_ref(dlm, res); ++} ++ + void dlm_lockres_drop_inflight_ref(struct dlm_ctxt *dlm, + struct dlm_lock_resource *res) + { +@@ -852,10 +856,8 @@ lookup: + /* finally add the lockres to its hash bucket */ + __dlm_insert_lockres(dlm, res); + +- /* Grab inflight ref to pin the resource */ +- spin_lock(&res->spinlock); +- dlm_lockres_grab_inflight_ref(dlm, res); +- spin_unlock(&res->spinlock); ++ /* since this lockres is new it doesn't not require the spinlock */ ++ __dlm_lockres_grab_inflight_ref(dlm, res); + + /* get an extra ref on the mle in case this is a BLOCK + * if so, the creator of the BLOCK may try to put the last +diff --git a/fs/udf/inode.c b/fs/udf/inode.c +index 062b7925bca0..47cacfd2c9af 100644 +--- a/fs/udf/inode.c ++++ b/fs/udf/inode.c +@@ -1270,13 +1270,22 @@ update_time: + return 0; + } + ++/* ++ * Maximum length of linked list formed by ICB hierarchy. The chosen number is ++ * arbitrary - just that we hopefully don't limit any real use of rewritten ++ * inode on write-once media but avoid looping for too long on corrupted media. ++ */ ++#define UDF_MAX_ICB_NESTING 1024 ++ + static void __udf_read_inode(struct inode *inode) + { + struct buffer_head *bh = NULL; + struct fileEntry *fe; + uint16_t ident; + struct udf_inode_info *iinfo = UDF_I(inode); ++ unsigned int indirections = 0; + ++reread: + /* + * Set defaults, but the inode is still incomplete! + * Note: get_new_inode() sets the following on a new inode: +@@ -1313,28 +1322,26 @@ static void __udf_read_inode(struct inode *inode) + ibh = udf_read_ptagged(inode->i_sb, &iinfo->i_location, 1, + &ident); + if (ident == TAG_IDENT_IE && ibh) { +- struct buffer_head *nbh = NULL; + struct kernel_lb_addr loc; + struct indirectEntry *ie; + + ie = (struct indirectEntry *)ibh->b_data; + loc = lelb_to_cpu(ie->indirectICB.extLocation); + +- if (ie->indirectICB.extLength && +- (nbh = udf_read_ptagged(inode->i_sb, &loc, 0, +- &ident))) { +- if (ident == TAG_IDENT_FE || +- ident == TAG_IDENT_EFE) { +- memcpy(&iinfo->i_location, +- &loc, +- sizeof(struct kernel_lb_addr)); +- brelse(bh); +- brelse(ibh); +- brelse(nbh); +- __udf_read_inode(inode); ++ if (ie->indirectICB.extLength) { ++ brelse(bh); ++ brelse(ibh); ++ memcpy(&iinfo->i_location, &loc, ++ sizeof(struct kernel_lb_addr)); ++ if (++indirections > UDF_MAX_ICB_NESTING) { ++ udf_err(inode->i_sb, ++ "too many ICBs in ICB hierarchy" ++ " (max %d supported)\n", ++ UDF_MAX_ICB_NESTING); ++ make_bad_inode(inode); + return; + } +- brelse(nbh); ++ goto reread; + } + } + brelse(ibh); +diff --git a/include/linux/hid-sensor-hub.h b/include/linux/hid-sensor-hub.h +index 32ba45158d39..c32411b4a696 100644 +--- a/include/linux/hid-sensor-hub.h ++++ b/include/linux/hid-sensor-hub.h +@@ -21,6 +21,8 @@ + + #include + #include ++#include ++#include + + /** + * struct hid_sensor_hub_attribute_info - Attribute info +@@ -166,6 +168,7 @@ struct hid_sensor_common { + struct platform_device *pdev; + unsigned usage_id; + bool data_ready; ++ struct iio_trigger *trigger; + struct hid_sensor_hub_attribute_info poll; + struct hid_sensor_hub_attribute_info report_state; + struct hid_sensor_hub_attribute_info power_state; +diff --git a/include/linux/if_vlan.h b/include/linux/if_vlan.h +index 715c343f7c00..0bd3943d759f 100644 +--- a/include/linux/if_vlan.h ++++ b/include/linux/if_vlan.h +@@ -90,7 +90,6 @@ extern struct net_device *vlan_dev_real_dev(const struct net_device *dev); + extern u16 vlan_dev_vlan_id(const struct net_device *dev); + + extern bool vlan_do_receive(struct sk_buff **skb); +-extern struct sk_buff *vlan_untag(struct sk_buff *skb); + + extern int vlan_vid_add(struct net_device *dev, __be16 proto, u16 vid); + extern void vlan_vid_del(struct net_device *dev, __be16 proto, u16 vid); +@@ -126,11 +125,6 @@ static inline bool vlan_do_receive(struct sk_buff **skb) + return false; + } + +-static inline struct sk_buff *vlan_untag(struct sk_buff *skb) +-{ +- return skb; +-} +- + static inline int vlan_vid_add(struct net_device *dev, __be16 proto, u16 vid) + { + return 0; +diff --git a/include/linux/iio/trigger.h b/include/linux/iio/trigger.h +index 369cf2cd5144..68f46cd5d514 100644 +--- a/include/linux/iio/trigger.h ++++ b/include/linux/iio/trigger.h +@@ -84,10 +84,12 @@ static inline void iio_trigger_put(struct iio_trigger *trig) + put_device(&trig->dev); + } + +-static inline void iio_trigger_get(struct iio_trigger *trig) ++static inline struct iio_trigger *iio_trigger_get(struct iio_trigger *trig) + { + get_device(&trig->dev); + __module_get(trig->ops->owner); ++ ++ return trig; + } + + /** +diff --git a/include/linux/jiffies.h b/include/linux/jiffies.h +index d235e88cfd7c..8acbb7bfb0cf 100644 +--- a/include/linux/jiffies.h ++++ b/include/linux/jiffies.h +@@ -258,23 +258,11 @@ extern unsigned long preset_lpj; + #define SEC_JIFFIE_SC (32 - SHIFT_HZ) + #endif + #define NSEC_JIFFIE_SC (SEC_JIFFIE_SC + 29) +-#define USEC_JIFFIE_SC (SEC_JIFFIE_SC + 19) + #define SEC_CONVERSION ((unsigned long)((((u64)NSEC_PER_SEC << SEC_JIFFIE_SC) +\ + TICK_NSEC -1) / (u64)TICK_NSEC)) + + #define NSEC_CONVERSION ((unsigned long)((((u64)1 << NSEC_JIFFIE_SC) +\ + TICK_NSEC -1) / (u64)TICK_NSEC)) +-#define USEC_CONVERSION \ +- ((unsigned long)((((u64)NSEC_PER_USEC << USEC_JIFFIE_SC) +\ +- TICK_NSEC -1) / (u64)TICK_NSEC)) +-/* +- * USEC_ROUND is used in the timeval to jiffie conversion. See there +- * for more details. It is the scaled resolution rounding value. Note +- * that it is a 64-bit value. Since, when it is applied, we are already +- * in jiffies (albit scaled), it is nothing but the bits we will shift +- * off. +- */ +-#define USEC_ROUND (u64)(((u64)1 << USEC_JIFFIE_SC) - 1) + /* + * The maximum jiffie value is (MAX_INT >> 1). Here we translate that + * into seconds. The 64-bit case will overflow if we are not careful, +diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h +index 9995165ff3d0..2960dab8b6fc 100644 +--- a/include/linux/skbuff.h ++++ b/include/linux/skbuff.h +@@ -2394,6 +2394,7 @@ extern struct sk_buff *skb_segment(struct sk_buff *skb, + netdev_features_t features); + + unsigned int skb_gso_transport_seglen(const struct sk_buff *skb); ++struct sk_buff *skb_vlan_untag(struct sk_buff *skb); + + static inline void *skb_header_pointer(const struct sk_buff *skb, int offset, + int len, void *buffer) +diff --git a/include/linux/usb/quirks.h b/include/linux/usb/quirks.h +index 55a17b188daa..32e0f5c04e72 100644 +--- a/include/linux/usb/quirks.h ++++ b/include/linux/usb/quirks.h +@@ -41,4 +41,7 @@ + */ + #define USB_QUIRK_LINEAR_UFRAME_INTR_BINTERVAL 0x00000080 + ++/* device generates spurious wakeup, ignore remote wakeup capability */ ++#define USB_QUIRK_IGNORE_REMOTE_WAKEUP 0x00000200 ++ + #endif /* __LINUX_USB_QUIRKS_H */ +diff --git a/include/linux/vga_switcheroo.h b/include/linux/vga_switcheroo.h +index 502073a53dd3..b483abd34493 100644 +--- a/include/linux/vga_switcheroo.h ++++ b/include/linux/vga_switcheroo.h +@@ -64,6 +64,7 @@ int vga_switcheroo_get_client_state(struct pci_dev *dev); + void vga_switcheroo_set_dynamic_switch(struct pci_dev *pdev, enum vga_switcheroo_state dynamic); + + int vga_switcheroo_init_domain_pm_ops(struct device *dev, struct dev_pm_domain *domain); ++void vga_switcheroo_fini_domain_pm_ops(struct device *dev); + int vga_switcheroo_init_domain_pm_optimus_hdmi_audio(struct device *dev, struct dev_pm_domain *domain); + #else + +@@ -82,6 +83,7 @@ static inline int vga_switcheroo_get_client_state(struct pci_dev *dev) { return + static inline void vga_switcheroo_set_dynamic_switch(struct pci_dev *pdev, enum vga_switcheroo_state dynamic) {} + + static inline int vga_switcheroo_init_domain_pm_ops(struct device *dev, struct dev_pm_domain *domain) { return -EINVAL; } ++static inline void vga_switcheroo_fini_domain_pm_ops(struct device *dev) {} + static inline int vga_switcheroo_init_domain_pm_optimus_hdmi_audio(struct device *dev, struct dev_pm_domain *domain) { return -EINVAL; } + + #endif +diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h +index 594521ba0d43..eff358e6945d 100644 +--- a/include/linux/workqueue.h ++++ b/include/linux/workqueue.h +@@ -455,7 +455,7 @@ __alloc_workqueue_key(const char *fmt, unsigned int flags, int max_active, + alloc_workqueue("%s", WQ_FREEZABLE | WQ_UNBOUND | WQ_MEM_RECLAIM, \ + 1, (name)) + #define create_singlethread_workqueue(name) \ +- alloc_workqueue("%s", WQ_UNBOUND | WQ_MEM_RECLAIM, 1, (name)) ++ alloc_ordered_workqueue("%s", WQ_MEM_RECLAIM, name) + + extern void destroy_workqueue(struct workqueue_struct *wq); + +diff --git a/include/media/videobuf2-core.h b/include/media/videobuf2-core.h +index 6781258d0b67..3d4c034b99f0 100644 +--- a/include/media/videobuf2-core.h ++++ b/include/media/videobuf2-core.h +@@ -321,6 +321,9 @@ struct v4l2_fh; + * @done_wq: waitqueue for processes waiting for buffers ready to be dequeued + * @alloc_ctx: memory type/allocator-specific contexts for each plane + * @streaming: current streaming state ++ * @waiting_for_buffers: used in poll() to check if vb2 is still waiting for ++ * buffers. Only set for capture queues if qbuf has not yet been ++ * called since poll() needs to return POLLERR in that situation. + * @fileio: file io emulator internal data, used only if emulator is active + */ + struct vb2_queue { +@@ -353,6 +356,7 @@ struct vb2_queue { + unsigned int plane_sizes[VIDEO_MAX_PLANES]; + + unsigned int streaming:1; ++ unsigned int waiting_for_buffers:1; + + struct vb2_fileio_data *fileio; + }; +diff --git a/include/net/dst.h b/include/net/dst.h +index 3c4c944096c9..9c123761efc1 100644 +--- a/include/net/dst.h ++++ b/include/net/dst.h +@@ -468,6 +468,7 @@ extern void dst_init(void); + /* Flags for xfrm_lookup flags argument. */ + enum { + XFRM_LOOKUP_ICMP = 1 << 0, ++ XFRM_LOOKUP_QUEUE = 1 << 1, + }; + + struct flowi; +@@ -478,7 +479,16 @@ static inline struct dst_entry *xfrm_lookup(struct net *net, + int flags) + { + return dst_orig; +-} ++} ++ ++static inline struct dst_entry *xfrm_lookup_route(struct net *net, ++ struct dst_entry *dst_orig, ++ const struct flowi *fl, ++ struct sock *sk, ++ int flags) ++{ ++ return dst_orig; ++} + + static inline struct xfrm_state *dst_xfrm(const struct dst_entry *dst) + { +@@ -490,6 +500,10 @@ extern struct dst_entry *xfrm_lookup(struct net *net, struct dst_entry *dst_orig + const struct flowi *fl, struct sock *sk, + int flags); + ++struct dst_entry *xfrm_lookup_route(struct net *net, struct dst_entry *dst_orig, ++ const struct flowi *fl, struct sock *sk, ++ int flags); ++ + /* skb attached with this dst needs transformation if dst->xfrm is valid */ + static inline struct xfrm_state *dst_xfrm(const struct dst_entry *dst) + { +diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h +index de2c78529afa..0a8f6f961baa 100644 +--- a/include/net/inet_connection_sock.h ++++ b/include/net/inet_connection_sock.h +@@ -62,6 +62,7 @@ struct inet_connection_sock_af_ops { + void (*addr2sockaddr)(struct sock *sk, struct sockaddr *); + int (*bind_conflict)(const struct sock *sk, + const struct inet_bind_bucket *tb, bool relax); ++ void (*mtu_reduced)(struct sock *sk); + }; + + /** inet_connection_sock - INET connection oriented sock +diff --git a/include/net/regulatory.h b/include/net/regulatory.h +index f17ed590d64a..23a019668705 100644 +--- a/include/net/regulatory.h ++++ b/include/net/regulatory.h +@@ -78,7 +78,7 @@ struct regulatory_request { + int wiphy_idx; + enum nl80211_reg_initiator initiator; + enum nl80211_user_reg_hint_type user_reg_hint_type; +- char alpha2[2]; ++ char alpha2[3]; + u8 dfs_region; + bool intersect; + bool processed; +diff --git a/include/net/sctp/command.h b/include/net/sctp/command.h +index 832f2191489c..c3f0cd9ff233 100644 +--- a/include/net/sctp/command.h ++++ b/include/net/sctp/command.h +@@ -116,7 +116,7 @@ typedef enum { + * analysis of the state functions, but in reality just taken from + * thin air in the hopes othat we don't trigger a kernel panic. + */ +-#define SCTP_MAX_NUM_COMMANDS 14 ++#define SCTP_MAX_NUM_COMMANDS 20 + + typedef union { + __s32 i32; +diff --git a/include/net/sock.h b/include/net/sock.h +index def541a583de..3899018a6b21 100644 +--- a/include/net/sock.h ++++ b/include/net/sock.h +@@ -938,7 +938,6 @@ struct proto { + struct sk_buff *skb); + + void (*release_cb)(struct sock *sk); +- void (*mtu_reduced)(struct sock *sk); + + /* Keeping track of sk's, looking them up, and port selection methods. */ + void (*hash)(struct sock *sk); +diff --git a/include/net/tcp.h b/include/net/tcp.h +index 31c48908ae32..da22d3a23a32 100644 +--- a/include/net/tcp.h ++++ b/include/net/tcp.h +@@ -454,6 +454,7 @@ extern const u8 *tcp_parse_md5sig_option(const struct tcphdr *th); + */ + + extern void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb); ++void tcp_v4_mtu_reduced(struct sock *sk); + extern int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb); + extern struct sock * tcp_create_openreq_child(struct sock *sk, + struct request_sock *req, +diff --git a/include/uapi/linux/xattr.h b/include/uapi/linux/xattr.h +index a63c14607f86..f2765c1ca32a 100644 +--- a/include/uapi/linux/xattr.h ++++ b/include/uapi/linux/xattr.h +@@ -13,7 +13,7 @@ + #ifndef _UAPI_LINUX_XATTR_H + #define _UAPI_LINUX_XATTR_H + +-#ifdef __UAPI_DEF_XATTR ++#if __UAPI_DEF_XATTR + #define __USE_KERNEL_XATTR_DEFS + + #define XATTR_CREATE 0x1 /* set value, fail if attr already exists */ +diff --git a/init/Kconfig b/init/Kconfig +index d42dc7c6ba64..734aa018e06c 100644 +--- a/init/Kconfig ++++ b/init/Kconfig +@@ -1408,6 +1408,7 @@ config FUTEX + + config HAVE_FUTEX_CMPXCHG + bool ++ depends on FUTEX + help + Architectures should select this if futex_atomic_cmpxchg_inatomic() + is implemented and always working. This removes a couple of runtime +diff --git a/kernel/events/core.c b/kernel/events/core.c +index 624befa90019..cf2413f6ce7f 100644 +--- a/kernel/events/core.c ++++ b/kernel/events/core.c +@@ -1503,6 +1503,11 @@ retry: + */ + if (ctx->is_active) { + raw_spin_unlock_irq(&ctx->lock); ++ /* ++ * Reload the task pointer, it might have been changed by ++ * a concurrent perf_event_context_sched_out(). ++ */ ++ task = ctx->task; + goto retry; + } + +@@ -1937,6 +1942,11 @@ retry: + */ + if (ctx->is_active) { + raw_spin_unlock_irq(&ctx->lock); ++ /* ++ * Reload the task pointer, it might have been changed by ++ * a concurrent perf_event_context_sched_out(). ++ */ ++ task = ctx->task; + goto retry; + } + +@@ -7755,8 +7765,10 @@ int perf_event_init_task(struct task_struct *child) + + for_each_task_context_nr(ctxn) { + ret = perf_event_init_context(child, ctxn); +- if (ret) ++ if (ret) { ++ perf_event_free_task(child); + return ret; ++ } + } + + return 0; +diff --git a/kernel/fork.c b/kernel/fork.c +index 29a1b0283d3b..5b486126147f 100644 +--- a/kernel/fork.c ++++ b/kernel/fork.c +@@ -1331,7 +1331,7 @@ static struct task_struct *copy_process(unsigned long clone_flags, + goto bad_fork_cleanup_policy; + retval = audit_alloc(p); + if (retval) +- goto bad_fork_cleanup_policy; ++ goto bad_fork_cleanup_perf; + /* copy all the process information */ + retval = copy_semundo(clone_flags, p); + if (retval) +@@ -1532,8 +1532,9 @@ bad_fork_cleanup_semundo: + exit_sem(p); + bad_fork_cleanup_audit: + audit_free(p); +-bad_fork_cleanup_policy: ++bad_fork_cleanup_perf: + perf_event_free_task(p); ++bad_fork_cleanup_policy: + #ifdef CONFIG_NUMA + mpol_put(p->mempolicy); + bad_fork_cleanup_cgroup: +diff --git a/kernel/kcmp.c b/kernel/kcmp.c +index e30ac0fe61c3..0aa69ea1d8fd 100644 +--- a/kernel/kcmp.c ++++ b/kernel/kcmp.c +@@ -44,11 +44,12 @@ static long kptr_obfuscate(long v, int type) + */ + static int kcmp_ptr(void *v1, void *v2, enum kcmp_type type) + { +- long ret; ++ long t1, t2; + +- ret = kptr_obfuscate((long)v1, type) - kptr_obfuscate((long)v2, type); ++ t1 = kptr_obfuscate((long)v1, type); ++ t2 = kptr_obfuscate((long)v2, type); + +- return (ret < 0) | ((ret > 0) << 1); ++ return (t1 < t2) | ((t1 > t2) << 1); + } + + /* The caller must have pinned the task */ +diff --git a/kernel/power/main.c b/kernel/power/main.c +index 1d1bf630e6e9..3ae41cdd804a 100644 +--- a/kernel/power/main.c ++++ b/kernel/power/main.c +@@ -293,12 +293,12 @@ static ssize_t state_show(struct kobject *kobj, struct kobj_attribute *attr, + { + char *s = buf; + #ifdef CONFIG_SUSPEND +- int i; ++ suspend_state_t i; ++ ++ for (i = PM_SUSPEND_MIN; i < PM_SUSPEND_MAX; i++) ++ if (pm_states[i].state) ++ s += sprintf(s,"%s ", pm_states[i].label); + +- for (i = 0; i < PM_SUSPEND_MAX; i++) { +- if (pm_states[i] && valid_state(i)) +- s += sprintf(s,"%s ", pm_states[i]); +- } + #endif + #ifdef CONFIG_HIBERNATION + s += sprintf(s, "%s\n", "disk"); +@@ -314,7 +314,7 @@ static suspend_state_t decode_state(const char *buf, size_t n) + { + #ifdef CONFIG_SUSPEND + suspend_state_t state = PM_SUSPEND_MIN; +- const char * const *s; ++ struct pm_sleep_state *s; + #endif + char *p; + int len; +@@ -328,8 +328,9 @@ static suspend_state_t decode_state(const char *buf, size_t n) + + #ifdef CONFIG_SUSPEND + for (s = &pm_states[state]; state < PM_SUSPEND_MAX; s++, state++) +- if (*s && len == strlen(*s) && !strncmp(buf, *s, len)) +- return state; ++ if (s->state && len == strlen(s->label) ++ && !strncmp(buf, s->label, len)) ++ return s->state; + #endif + + return PM_SUSPEND_ON; +@@ -447,8 +448,8 @@ static ssize_t autosleep_show(struct kobject *kobj, + + #ifdef CONFIG_SUSPEND + if (state < PM_SUSPEND_MAX) +- return sprintf(buf, "%s\n", valid_state(state) ? +- pm_states[state] : "error"); ++ return sprintf(buf, "%s\n", pm_states[state].state ? ++ pm_states[state].label : "error"); + #endif + #ifdef CONFIG_HIBERNATION + return sprintf(buf, "disk\n"); +diff --git a/kernel/power/power.h b/kernel/power/power.h +index 7d4b7ffb3c1d..f770cad3666c 100644 +--- a/kernel/power/power.h ++++ b/kernel/power/power.h +@@ -175,17 +175,20 @@ extern void swsusp_show_speed(struct timeval *, struct timeval *, + unsigned int, char *); + + #ifdef CONFIG_SUSPEND ++struct pm_sleep_state { ++ const char *label; ++ suspend_state_t state; ++}; ++ + /* kernel/power/suspend.c */ +-extern const char *const pm_states[]; ++extern struct pm_sleep_state pm_states[]; + +-extern bool valid_state(suspend_state_t state); + extern int suspend_devices_and_enter(suspend_state_t state); + #else /* !CONFIG_SUSPEND */ + static inline int suspend_devices_and_enter(suspend_state_t state) + { + return -ENOSYS; + } +-static inline bool valid_state(suspend_state_t state) { return false; } + #endif /* !CONFIG_SUSPEND */ + + #ifdef CONFIG_PM_TEST_SUSPEND +diff --git a/kernel/power/suspend.c b/kernel/power/suspend.c +index 62ee437b5c7e..5455d5c3c149 100644 +--- a/kernel/power/suspend.c ++++ b/kernel/power/suspend.c +@@ -29,10 +29,10 @@ + + #include "power.h" + +-const char *const pm_states[PM_SUSPEND_MAX] = { +- [PM_SUSPEND_FREEZE] = "freeze", +- [PM_SUSPEND_STANDBY] = "standby", +- [PM_SUSPEND_MEM] = "mem", ++struct pm_sleep_state pm_states[PM_SUSPEND_MAX] = { ++ [PM_SUSPEND_FREEZE] = { .label = "freeze", .state = PM_SUSPEND_FREEZE }, ++ [PM_SUSPEND_STANDBY] = { .label = "standby", }, ++ [PM_SUSPEND_MEM] = { .label = "mem", }, + }; + + static const struct platform_suspend_ops *suspend_ops; +@@ -62,42 +62,34 @@ void freeze_wake(void) + } + EXPORT_SYMBOL_GPL(freeze_wake); + ++static bool valid_state(suspend_state_t state) ++{ ++ /* ++ * PM_SUSPEND_STANDBY and PM_SUSPEND_MEM states need low level ++ * support and need to be valid to the low level ++ * implementation, no valid callback implies that none are valid. ++ */ ++ return suspend_ops && suspend_ops->valid && suspend_ops->valid(state); ++} ++ + /** + * suspend_set_ops - Set the global suspend method table. + * @ops: Suspend operations to use. + */ + void suspend_set_ops(const struct platform_suspend_ops *ops) + { ++ suspend_state_t i; ++ + lock_system_sleep(); ++ + suspend_ops = ops; ++ for (i = PM_SUSPEND_STANDBY; i <= PM_SUSPEND_MEM; i++) ++ pm_states[i].state = valid_state(i) ? i : 0; ++ + unlock_system_sleep(); + } + EXPORT_SYMBOL_GPL(suspend_set_ops); + +-bool valid_state(suspend_state_t state) +-{ +- if (state == PM_SUSPEND_FREEZE) { +-#ifdef CONFIG_PM_DEBUG +- if (pm_test_level != TEST_NONE && +- pm_test_level != TEST_FREEZER && +- pm_test_level != TEST_DEVICES && +- pm_test_level != TEST_PLATFORM) { +- printk(KERN_WARNING "Unsupported pm_test mode for " +- "freeze state, please choose " +- "none/freezer/devices/platform.\n"); +- return false; +- } +-#endif +- return true; +- } +- /* +- * PM_SUSPEND_STANDBY and PM_SUSPEND_MEMORY states need lowlevel +- * support and need to be valid to the lowlevel +- * implementation, no valid callback implies that none are valid. +- */ +- return suspend_ops && suspend_ops->valid && suspend_ops->valid(state); +-} +- + /** + * suspend_valid_only_mem - Generic memory-only valid callback. + * +@@ -324,9 +316,17 @@ static int enter_state(suspend_state_t state) + { + int error; + +- if (!valid_state(state)) +- return -ENODEV; +- ++ if (state == PM_SUSPEND_FREEZE) { ++#ifdef CONFIG_PM_DEBUG ++ if (pm_test_level != TEST_NONE && pm_test_level <= TEST_CPUS) { ++ pr_warning("PM: Unsupported test mode for freeze state," ++ "please choose none/freezer/devices/platform.\n"); ++ return -EAGAIN; ++ } ++#endif ++ } else if (!valid_state(state)) { ++ return -EINVAL; ++ } + if (!mutex_trylock(&pm_mutex)) + return -EBUSY; + +@@ -337,7 +337,7 @@ static int enter_state(suspend_state_t state) + sys_sync(); + printk("done.\n"); + +- pr_debug("PM: Preparing system for %s sleep\n", pm_states[state]); ++ pr_debug("PM: Preparing system for %s sleep\n", pm_states[state].label); + error = suspend_prepare(state); + if (error) + goto Unlock; +@@ -345,7 +345,7 @@ static int enter_state(suspend_state_t state) + if (suspend_test(TEST_FREEZER)) + goto Finish; + +- pr_debug("PM: Entering %s sleep\n", pm_states[state]); ++ pr_debug("PM: Entering %s sleep\n", pm_states[state].label); + pm_restrict_gfp_mask(); + error = suspend_devices_and_enter(state); + pm_restore_gfp_mask(); +diff --git a/kernel/power/suspend_test.c b/kernel/power/suspend_test.c +index 9b2a1d58558d..269b097e78ea 100644 +--- a/kernel/power/suspend_test.c ++++ b/kernel/power/suspend_test.c +@@ -92,13 +92,13 @@ static void __init test_wakealarm(struct rtc_device *rtc, suspend_state_t state) + } + + if (state == PM_SUSPEND_MEM) { +- printk(info_test, pm_states[state]); ++ printk(info_test, pm_states[state].label); + status = pm_suspend(state); + if (status == -ENODEV) + state = PM_SUSPEND_STANDBY; + } + if (state == PM_SUSPEND_STANDBY) { +- printk(info_test, pm_states[state]); ++ printk(info_test, pm_states[state].label); + status = pm_suspend(state); + } + if (status < 0) +@@ -136,18 +136,16 @@ static char warn_bad_state[] __initdata = + + static int __init setup_test_suspend(char *value) + { +- unsigned i; ++ suspend_state_t i; + + /* "=mem" ==> "mem" */ + value++; +- for (i = 0; i < PM_SUSPEND_MAX; i++) { +- if (!pm_states[i]) +- continue; +- if (strcmp(pm_states[i], value) != 0) +- continue; +- test_state = (__force suspend_state_t) i; +- return 0; +- } ++ for (i = PM_SUSPEND_MIN; i < PM_SUSPEND_MAX; i++) ++ if (!strcmp(pm_states[i].label, value)) { ++ test_state = pm_states[i].state; ++ return 0; ++ } ++ + printk(warn_bad_state, value); + return 0; + } +@@ -164,8 +162,8 @@ static int __init test_suspend(void) + /* PM is initialized by now; is that state testable? */ + if (test_state == PM_SUSPEND_ON) + goto done; +- if (!valid_state(test_state)) { +- printk(warn_bad_state, pm_states[test_state]); ++ if (!pm_states[test_state].state) { ++ printk(warn_bad_state, pm_states[test_state].label); + goto done; + } + +diff --git a/kernel/time.c b/kernel/time.c +index 7c7964c33ae7..3c49ab45f822 100644 +--- a/kernel/time.c ++++ b/kernel/time.c +@@ -496,17 +496,20 @@ EXPORT_SYMBOL(usecs_to_jiffies); + * that a remainder subtract here would not do the right thing as the + * resolution values don't fall on second boundries. I.e. the line: + * nsec -= nsec % TICK_NSEC; is NOT a correct resolution rounding. ++ * Note that due to the small error in the multiplier here, this ++ * rounding is incorrect for sufficiently large values of tv_nsec, but ++ * well formed timespecs should have tv_nsec < NSEC_PER_SEC, so we're ++ * OK. + * + * Rather, we just shift the bits off the right. + * + * The >> (NSEC_JIFFIE_SC - SEC_JIFFIE_SC) converts the scaled nsec + * value to a scaled second value. + */ +-unsigned long +-timespec_to_jiffies(const struct timespec *value) ++static unsigned long ++__timespec_to_jiffies(unsigned long sec, long nsec) + { +- unsigned long sec = value->tv_sec; +- long nsec = value->tv_nsec + TICK_NSEC - 1; ++ nsec = nsec + TICK_NSEC - 1; + + if (sec >= MAX_SEC_IN_JIFFIES){ + sec = MAX_SEC_IN_JIFFIES; +@@ -517,6 +520,13 @@ timespec_to_jiffies(const struct timespec *value) + (NSEC_JIFFIE_SC - SEC_JIFFIE_SC))) >> SEC_JIFFIE_SC; + + } ++ ++unsigned long ++timespec_to_jiffies(const struct timespec *value) ++{ ++ return __timespec_to_jiffies(value->tv_sec, value->tv_nsec); ++} ++ + EXPORT_SYMBOL(timespec_to_jiffies); + + void +@@ -533,31 +543,27 @@ jiffies_to_timespec(const unsigned long jiffies, struct timespec *value) + } + EXPORT_SYMBOL(jiffies_to_timespec); + +-/* Same for "timeval" ++/* ++ * We could use a similar algorithm to timespec_to_jiffies (with a ++ * different multiplier for usec instead of nsec). But this has a ++ * problem with rounding: we can't exactly add TICK_NSEC - 1 to the ++ * usec value, since it's not necessarily integral. + * +- * Well, almost. The problem here is that the real system resolution is +- * in nanoseconds and the value being converted is in micro seconds. +- * Also for some machines (those that use HZ = 1024, in-particular), +- * there is a LARGE error in the tick size in microseconds. +- +- * The solution we use is to do the rounding AFTER we convert the +- * microsecond part. Thus the USEC_ROUND, the bits to be shifted off. +- * Instruction wise, this should cost only an additional add with carry +- * instruction above the way it was done above. ++ * We could instead round in the intermediate scaled representation ++ * (i.e. in units of 1/2^(large scale) jiffies) but that's also ++ * perilous: the scaling introduces a small positive error, which ++ * combined with a division-rounding-upward (i.e. adding 2^(scale) - 1 ++ * units to the intermediate before shifting) leads to accidental ++ * overflow and overestimates. ++ * ++ * At the cost of one additional multiplication by a constant, just ++ * use the timespec implementation. + */ + unsigned long + timeval_to_jiffies(const struct timeval *value) + { +- unsigned long sec = value->tv_sec; +- long usec = value->tv_usec; +- +- if (sec >= MAX_SEC_IN_JIFFIES){ +- sec = MAX_SEC_IN_JIFFIES; +- usec = 0; +- } +- return (((u64)sec * SEC_CONVERSION) + +- (((u64)usec * USEC_CONVERSION + USEC_ROUND) >> +- (USEC_JIFFIE_SC - SEC_JIFFIE_SC))) >> SEC_JIFFIE_SC; ++ return __timespec_to_jiffies(value->tv_sec, ++ value->tv_usec * NSEC_PER_USEC); + } + EXPORT_SYMBOL(timeval_to_jiffies); + +diff --git a/kernel/time/alarmtimer.c b/kernel/time/alarmtimer.c +index fe75444ae7ec..cd45a0727a16 100644 +--- a/kernel/time/alarmtimer.c ++++ b/kernel/time/alarmtimer.c +@@ -464,18 +464,26 @@ static enum alarmtimer_type clock2alarm(clockid_t clockid) + static enum alarmtimer_restart alarm_handle_timer(struct alarm *alarm, + ktime_t now) + { ++ unsigned long flags; + struct k_itimer *ptr = container_of(alarm, struct k_itimer, + it.alarm.alarmtimer); +- if (posix_timer_event(ptr, 0) != 0) +- ptr->it_overrun++; ++ enum alarmtimer_restart result = ALARMTIMER_NORESTART; ++ ++ spin_lock_irqsave(&ptr->it_lock, flags); ++ if ((ptr->it_sigev_notify & ~SIGEV_THREAD_ID) != SIGEV_NONE) { ++ if (posix_timer_event(ptr, 0) != 0) ++ ptr->it_overrun++; ++ } + + /* Re-add periodic timers */ + if (ptr->it.alarm.interval.tv64) { + ptr->it_overrun += alarm_forward(alarm, now, + ptr->it.alarm.interval); +- return ALARMTIMER_RESTART; ++ result = ALARMTIMER_RESTART; + } +- return ALARMTIMER_NORESTART; ++ spin_unlock_irqrestore(&ptr->it_lock, flags); ++ ++ return result; + } + + /** +@@ -541,18 +549,22 @@ static int alarm_timer_create(struct k_itimer *new_timer) + * @new_timer: k_itimer pointer + * @cur_setting: itimerspec data to fill + * +- * Copies the itimerspec data out from the k_itimer ++ * Copies out the current itimerspec data + */ + static void alarm_timer_get(struct k_itimer *timr, + struct itimerspec *cur_setting) + { +- memset(cur_setting, 0, sizeof(struct itimerspec)); ++ ktime_t relative_expiry_time = ++ alarm_expires_remaining(&(timr->it.alarm.alarmtimer)); ++ ++ if (ktime_to_ns(relative_expiry_time) > 0) { ++ cur_setting->it_value = ktime_to_timespec(relative_expiry_time); ++ } else { ++ cur_setting->it_value.tv_sec = 0; ++ cur_setting->it_value.tv_nsec = 0; ++ } + +- cur_setting->it_interval = +- ktime_to_timespec(timr->it.alarm.interval); +- cur_setting->it_value = +- ktime_to_timespec(timr->it.alarm.alarmtimer.node.expires); +- return; ++ cur_setting->it_interval = ktime_to_timespec(timr->it.alarm.interval); + } + + /** +diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c +index 65da8249bae6..21ee379e3e4b 100644 +--- a/kernel/trace/ring_buffer.c ++++ b/kernel/trace/ring_buffer.c +@@ -626,8 +626,22 @@ int ring_buffer_poll_wait(struct ring_buffer *buffer, int cpu, + work = &cpu_buffer->irq_work; + } + +- work->waiters_pending = true; + poll_wait(filp, &work->waiters, poll_table); ++ work->waiters_pending = true; ++ /* ++ * There's a tight race between setting the waiters_pending and ++ * checking if the ring buffer is empty. Once the waiters_pending bit ++ * is set, the next event will wake the task up, but we can get stuck ++ * if there's only a single event in. ++ * ++ * FIXME: Ideally, we need a memory barrier on the writer side as well, ++ * but adding a memory barrier to all events will cause too much of a ++ * performance hit in the fast path. We only need a memory barrier when ++ * the buffer goes from empty to having content. But as this race is ++ * extremely small, and it's not a problem if another event comes in, we ++ * will fix it later. ++ */ ++ smp_mb(); + + if ((cpu == RING_BUFFER_ALL_CPUS && !ring_buffer_empty(buffer)) || + (cpu != RING_BUFFER_ALL_CPUS && !ring_buffer_empty_cpu(buffer, cpu))) +@@ -3358,7 +3372,7 @@ static void rb_iter_reset(struct ring_buffer_iter *iter) + iter->head = cpu_buffer->reader_page->read; + + iter->cache_reader_page = iter->head_page; +- iter->cache_read = iter->head; ++ iter->cache_read = cpu_buffer->read; + + if (iter->head) + iter->read_stamp = cpu_buffer->read_stamp; +diff --git a/mm/huge_memory.c b/mm/huge_memory.c +index 2ee53749eb48..10532dd43abc 100644 +--- a/mm/huge_memory.c ++++ b/mm/huge_memory.c +@@ -1740,21 +1740,24 @@ static int __split_huge_page_map(struct page *page, + if (pmd) { + pgtable = pgtable_trans_huge_withdraw(mm, pmd); + pmd_populate(mm, &_pmd, pgtable); ++ if (pmd_write(*pmd)) ++ BUG_ON(page_mapcount(page) != 1); + + haddr = address; + for (i = 0; i < HPAGE_PMD_NR; i++, haddr += PAGE_SIZE) { + pte_t *pte, entry; + BUG_ON(PageCompound(page+i)); ++ /* ++ * Note that pmd_numa is not transferred deliberately ++ * to avoid any possibility that pte_numa leaks to ++ * a PROT_NONE VMA by accident. ++ */ + entry = mk_pte(page + i, vma->vm_page_prot); + entry = maybe_mkwrite(pte_mkdirty(entry), vma); + if (!pmd_write(*pmd)) + entry = pte_wrprotect(entry); +- else +- BUG_ON(page_mapcount(page) != 1); + if (!pmd_young(*pmd)) + entry = pte_mkold(entry); +- if (pmd_numa(*pmd)) +- entry = pte_mknuma(entry); + pte = pte_offset_map(&_pmd, haddr); + BUG_ON(!pte_none(*pte)); + set_pte_at(mm, haddr, pte, entry); +diff --git a/mm/mempolicy.c b/mm/mempolicy.c +index cc61c7a7d6a1..3650036bb910 100644 +--- a/mm/mempolicy.c ++++ b/mm/mempolicy.c +@@ -1198,14 +1198,14 @@ static struct page *new_page(struct page *page, unsigned long start, int **x) + break; + vma = vma->vm_next; + } +- /* +- * queue_pages_range() confirms that @page belongs to some vma, +- * so vma shouldn't be NULL. +- */ +- BUG_ON(!vma); + +- if (PageHuge(page)) ++ if (PageHuge(page)) { ++ BUG_ON(!vma); + return alloc_huge_page_noerr(vma, address, 1); ++ } ++ /* ++ * if !vma, alloc_page_vma() will use task or system default policy ++ */ + return alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address); + } + #else +diff --git a/mm/migrate.c b/mm/migrate.c +index 96d4d814ae2f..d5c84b0a5243 100644 +--- a/mm/migrate.c ++++ b/mm/migrate.c +@@ -164,8 +164,11 @@ static int remove_migration_pte(struct page *new, struct vm_area_struct *vma, + pte = pte_mkold(mk_pte(new, vma->vm_page_prot)); + if (pte_swp_soft_dirty(*ptep)) + pte = pte_mksoft_dirty(pte); ++ ++ /* Recheck VMA as permissions can change since migration started */ + if (is_write_migration_entry(entry)) +- pte = pte_mkwrite(pte); ++ pte = maybe_mkwrite(pte, vma); ++ + #ifdef CONFIG_HUGETLB_PAGE + if (PageHuge(new)) { + pte = pte_mkhuge(pte); +diff --git a/mm/percpu-vm.c b/mm/percpu-vm.c +index 3707c71ae4cd..51108165f829 100644 +--- a/mm/percpu-vm.c ++++ b/mm/percpu-vm.c +@@ -108,7 +108,7 @@ static int pcpu_alloc_pages(struct pcpu_chunk *chunk, + int page_start, int page_end) + { + const gfp_t gfp = GFP_KERNEL | __GFP_HIGHMEM | __GFP_COLD; +- unsigned int cpu; ++ unsigned int cpu, tcpu; + int i; + + for_each_possible_cpu(cpu) { +@@ -116,14 +116,23 @@ static int pcpu_alloc_pages(struct pcpu_chunk *chunk, + struct page **pagep = &pages[pcpu_page_idx(cpu, i)]; + + *pagep = alloc_pages_node(cpu_to_node(cpu), gfp, 0); +- if (!*pagep) { +- pcpu_free_pages(chunk, pages, populated, +- page_start, page_end); +- return -ENOMEM; +- } ++ if (!*pagep) ++ goto err; + } + } + return 0; ++ ++err: ++ while (--i >= page_start) ++ __free_page(pages[pcpu_page_idx(cpu, i)]); ++ ++ for_each_possible_cpu(tcpu) { ++ if (tcpu == cpu) ++ break; ++ for (i = page_start; i < page_end; i++) ++ __free_page(pages[pcpu_page_idx(tcpu, i)]); ++ } ++ return -ENOMEM; + } + + /** +@@ -263,6 +272,7 @@ err: + __pcpu_unmap_pages(pcpu_chunk_addr(chunk, tcpu, page_start), + page_end - page_start); + } ++ pcpu_post_unmap_tlb_flush(chunk, page_start, page_end); + return err; + } + +diff --git a/mm/percpu.c b/mm/percpu.c +index 25e2ea52db82..9bc1bf914cc8 100644 +--- a/mm/percpu.c ++++ b/mm/percpu.c +@@ -1910,6 +1910,8 @@ void __init setup_per_cpu_areas(void) + + if (pcpu_setup_first_chunk(ai, fc) < 0) + panic("Failed to initialize percpu areas."); ++ ++ pcpu_free_alloc_info(ai); + } + + #endif /* CONFIG_SMP */ +diff --git a/mm/shmem.c b/mm/shmem.c +index ab05681f41cd..e9502a67e300 100644 +--- a/mm/shmem.c ++++ b/mm/shmem.c +@@ -2106,8 +2106,10 @@ static int shmem_rename(struct inode *old_dir, struct dentry *old_dentry, struct + + if (new_dentry->d_inode) { + (void) shmem_unlink(new_dir, new_dentry); +- if (they_are_dirs) ++ if (they_are_dirs) { ++ drop_nlink(new_dentry->d_inode); + drop_nlink(old_dir); ++ } + } else if (they_are_dirs) { + drop_nlink(old_dir); + inc_nlink(new_dir); +diff --git a/mm/slab.c b/mm/slab.c +index eb4078c7d183..c180fbb8460b 100644 +--- a/mm/slab.c ++++ b/mm/slab.c +@@ -2222,7 +2222,8 @@ static int __init_refok setup_cpu_cache(struct kmem_cache *cachep, gfp_t gfp) + int + __kmem_cache_create (struct kmem_cache *cachep, unsigned long flags) + { +- size_t left_over, slab_size, ralign; ++ size_t left_over, slab_size; ++ size_t ralign = BYTES_PER_WORD; + gfp_t gfp; + int err; + size_t size = cachep->size; +@@ -2255,14 +2256,6 @@ __kmem_cache_create (struct kmem_cache *cachep, unsigned long flags) + size &= ~(BYTES_PER_WORD - 1); + } + +- /* +- * Redzoning and user store require word alignment or possibly larger. +- * Note this will be overridden by architecture or caller mandated +- * alignment if either is greater than BYTES_PER_WORD. +- */ +- if (flags & SLAB_STORE_USER) +- ralign = BYTES_PER_WORD; +- + if (flags & SLAB_RED_ZONE) { + ralign = REDZONE_ALIGN; + /* If redzoning, ensure that the second redzone is suitably +diff --git a/net/8021q/vlan_core.c b/net/8021q/vlan_core.c +index 7e57135c7cc4..5d56e05d83dd 100644 +--- a/net/8021q/vlan_core.c ++++ b/net/8021q/vlan_core.c +@@ -106,59 +106,6 @@ u16 vlan_dev_vlan_id(const struct net_device *dev) + } + EXPORT_SYMBOL(vlan_dev_vlan_id); + +-static struct sk_buff *vlan_reorder_header(struct sk_buff *skb) +-{ +- if (skb_cow(skb, skb_headroom(skb)) < 0) { +- kfree_skb(skb); +- return NULL; +- } +- +- memmove(skb->data - ETH_HLEN, skb->data - VLAN_ETH_HLEN, 2 * ETH_ALEN); +- skb->mac_header += VLAN_HLEN; +- return skb; +-} +- +-struct sk_buff *vlan_untag(struct sk_buff *skb) +-{ +- struct vlan_hdr *vhdr; +- u16 vlan_tci; +- +- if (unlikely(vlan_tx_tag_present(skb))) { +- /* vlan_tci is already set-up so leave this for another time */ +- return skb; +- } +- +- skb = skb_share_check(skb, GFP_ATOMIC); +- if (unlikely(!skb)) +- goto err_free; +- +- if (unlikely(!pskb_may_pull(skb, VLAN_HLEN))) +- goto err_free; +- +- vhdr = (struct vlan_hdr *) skb->data; +- vlan_tci = ntohs(vhdr->h_vlan_TCI); +- __vlan_hwaccel_put_tag(skb, skb->protocol, vlan_tci); +- +- skb_pull_rcsum(skb, VLAN_HLEN); +- vlan_set_encap_proto(skb, vhdr); +- +- skb = vlan_reorder_header(skb); +- if (unlikely(!skb)) +- goto err_free; +- +- skb_reset_network_header(skb); +- skb_reset_transport_header(skb); +- skb_reset_mac_len(skb); +- +- return skb; +- +-err_free: +- kfree_skb(skb); +- return NULL; +-} +-EXPORT_SYMBOL(vlan_untag); +- +- + /* + * vlan info and vid list + */ +diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h +index de50e79b9c34..f02acd7c5472 100644 +--- a/net/bridge/br_private.h ++++ b/net/bridge/br_private.h +@@ -309,6 +309,9 @@ struct br_input_skb_cb { + int igmp; + int mrouters_only; + #endif ++#ifdef CONFIG_BRIDGE_VLAN_FILTERING ++ bool vlan_filtered; ++#endif + }; + + #define BR_INPUT_SKB_CB(__skb) ((struct br_input_skb_cb *)(__skb)->cb) +diff --git a/net/bridge/br_vlan.c b/net/bridge/br_vlan.c +index 12ce54c0e8ed..f0db99f57deb 100644 +--- a/net/bridge/br_vlan.c ++++ b/net/bridge/br_vlan.c +@@ -136,7 +136,7 @@ static struct sk_buff *br_vlan_untag(struct sk_buff *skb) + } + + skb->vlan_tci = 0; +- skb = vlan_untag(skb); ++ skb = skb_vlan_untag(skb); + if (skb) + skb->vlan_tci = 0; + +@@ -149,7 +149,8 @@ struct sk_buff *br_handle_vlan(struct net_bridge *br, + { + u16 vid; + +- if (!br->vlan_enabled) ++ /* If this packet was not filtered at input, let it pass */ ++ if (!BR_INPUT_SKB_CB(skb)->vlan_filtered) + goto out; + + /* At this point, we know that the frame was filtered and contains +@@ -194,8 +195,10 @@ bool br_allowed_ingress(struct net_bridge *br, struct net_port_vlans *v, + /* If VLAN filtering is disabled on the bridge, all packets are + * permitted. + */ +- if (!br->vlan_enabled) ++ if (!br->vlan_enabled) { ++ BR_INPUT_SKB_CB(skb)->vlan_filtered = false; + return true; ++ } + + /* If there are no vlan in the permitted list, all packets are + * rejected. +@@ -203,6 +206,8 @@ bool br_allowed_ingress(struct net_bridge *br, struct net_port_vlans *v, + if (!v) + goto drop; + ++ BR_INPUT_SKB_CB(skb)->vlan_filtered = true; ++ + err = br_vlan_get_tag(skb, vid); + if (!*vid) { + u16 pvid = br_get_pvid(v); +@@ -247,7 +252,8 @@ bool br_allowed_egress(struct net_bridge *br, + { + u16 vid; + +- if (!br->vlan_enabled) ++ /* If this packet was not filtered at input, let it pass */ ++ if (!BR_INPUT_SKB_CB(skb)->vlan_filtered) + return true; + + if (!v) +@@ -266,6 +272,7 @@ bool br_should_learn(struct net_bridge_port *p, struct sk_buff *skb, u16 *vid) + struct net_bridge *br = p->br; + struct net_port_vlans *v; + ++ /* If filtering was disabled at input, let it pass. */ + if (!br->vlan_enabled) + return true; + +diff --git a/net/core/dev.c b/net/core/dev.c +index ef2f239cc322..4b1f8d02c68f 100644 +--- a/net/core/dev.c ++++ b/net/core/dev.c +@@ -3517,7 +3517,7 @@ another_round: + + if (skb->protocol == cpu_to_be16(ETH_P_8021Q) || + skb->protocol == cpu_to_be16(ETH_P_8021AD)) { +- skb = vlan_untag(skb); ++ skb = skb_vlan_untag(skb); + if (unlikely(!skb)) + goto unlock; + } +diff --git a/net/core/netpoll.c b/net/core/netpoll.c +index 9b40f234b802..9d42f3bc5ee9 100644 +--- a/net/core/netpoll.c ++++ b/net/core/netpoll.c +@@ -785,7 +785,7 @@ int __netpoll_rx(struct sk_buff *skb, struct netpoll_info *npinfo) + } + + if (skb->protocol == cpu_to_be16(ETH_P_8021Q)) { +- skb = vlan_untag(skb); ++ skb = skb_vlan_untag(skb); + if (unlikely(!skb)) + goto out; + } +diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c +index 070c51506eb1..93ad6c5b2d77 100644 +--- a/net/core/rtnetlink.c ++++ b/net/core/rtnetlink.c +@@ -739,7 +739,8 @@ static inline int rtnl_vfinfo_size(const struct net_device *dev, + (nla_total_size(sizeof(struct ifla_vf_mac)) + + nla_total_size(sizeof(struct ifla_vf_vlan)) + + nla_total_size(sizeof(struct ifla_vf_tx_rate)) + +- nla_total_size(sizeof(struct ifla_vf_spoofchk))); ++ nla_total_size(sizeof(struct ifla_vf_spoofchk)) + ++ nla_total_size(sizeof(struct ifla_vf_link_state))); + return size; + } else + return 0; +diff --git a/net/core/skbuff.c b/net/core/skbuff.c +index 174ebd563868..a8cf33868f9c 100644 +--- a/net/core/skbuff.c ++++ b/net/core/skbuff.c +@@ -62,6 +62,7 @@ + #include + #include + #include ++#include + + #include + #include +@@ -3552,3 +3553,55 @@ unsigned int skb_gso_transport_seglen(const struct sk_buff *skb) + return shinfo->gso_size; + } + EXPORT_SYMBOL_GPL(skb_gso_transport_seglen); ++ ++static struct sk_buff *skb_reorder_vlan_header(struct sk_buff *skb) ++{ ++ if (skb_cow(skb, skb_headroom(skb)) < 0) { ++ kfree_skb(skb); ++ return NULL; ++ } ++ ++ memmove(skb->data - ETH_HLEN, skb->data - VLAN_ETH_HLEN, 2 * ETH_ALEN); ++ skb->mac_header += VLAN_HLEN; ++ return skb; ++} ++ ++struct sk_buff *skb_vlan_untag(struct sk_buff *skb) ++{ ++ struct vlan_hdr *vhdr; ++ u16 vlan_tci; ++ ++ if (unlikely(vlan_tx_tag_present(skb))) { ++ /* vlan_tci is already set-up so leave this for another time */ ++ return skb; ++ } ++ ++ skb = skb_share_check(skb, GFP_ATOMIC); ++ if (unlikely(!skb)) ++ goto err_free; ++ ++ if (unlikely(!pskb_may_pull(skb, VLAN_HLEN))) ++ goto err_free; ++ ++ vhdr = (struct vlan_hdr *)skb->data; ++ vlan_tci = ntohs(vhdr->h_vlan_TCI); ++ __vlan_hwaccel_put_tag(skb, skb->protocol, vlan_tci); ++ ++ skb_pull_rcsum(skb, VLAN_HLEN); ++ vlan_set_encap_proto(skb, vhdr); ++ ++ skb = skb_reorder_vlan_header(skb); ++ if (unlikely(!skb)) ++ goto err_free; ++ ++ skb_reset_network_header(skb); ++ skb_reset_transport_header(skb); ++ skb_reset_mac_len(skb); ++ ++ return skb; ++ ++err_free: ++ kfree_skb(skb); ++ return NULL; ++} ++EXPORT_SYMBOL(skb_vlan_untag); +diff --git a/net/ipv4/route.c b/net/ipv4/route.c +index 9089c4f2965c..f7fe946e534c 100644 +--- a/net/ipv4/route.c ++++ b/net/ipv4/route.c +@@ -2270,9 +2270,9 @@ struct rtable *ip_route_output_flow(struct net *net, struct flowi4 *flp4, + return rt; + + if (flp4->flowi4_proto) +- rt = (struct rtable *) xfrm_lookup(net, &rt->dst, +- flowi4_to_flowi(flp4), +- sk, 0); ++ rt = (struct rtable *)xfrm_lookup_route(net, &rt->dst, ++ flowi4_to_flowi(flp4), ++ sk, 0); + + return rt; + } +diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c +index 95f67671f56e..172cd999290c 100644 +--- a/net/ipv4/tcp_input.c ++++ b/net/ipv4/tcp_input.c +@@ -2628,7 +2628,6 @@ static void tcp_enter_recovery(struct sock *sk, bool ece_ack) + */ + static void tcp_process_loss(struct sock *sk, int flag, bool is_dupack) + { +- struct inet_connection_sock *icsk = inet_csk(sk); + struct tcp_sock *tp = tcp_sk(sk); + bool recovered = !before(tp->snd_una, tp->high_seq); + +@@ -2654,12 +2653,9 @@ static void tcp_process_loss(struct sock *sk, int flag, bool is_dupack) + + if (recovered) { + /* F-RTO RFC5682 sec 3.1 step 2.a and 1st part of step 3.a */ +- icsk->icsk_retransmits = 0; + tcp_try_undo_recovery(sk); + return; + } +- if (flag & FLAG_DATA_ACKED) +- icsk->icsk_retransmits = 0; + if (tcp_is_reno(tp)) { + /* A Reno DUPACK means new data in F-RTO step 2.b above are + * delivered. Lower inflight to clock out (re)tranmissions. +@@ -3348,8 +3344,10 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag) + icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) + tcp_rearm_rto(sk); + +- if (after(ack, prior_snd_una)) ++ if (after(ack, prior_snd_una)) { + flag |= FLAG_SND_UNA_ADVANCED; ++ icsk->icsk_retransmits = 0; ++ } + + prior_fackets = tp->fackets_out; + prior_in_flight = tcp_packets_in_flight(tp); +diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c +index 5031f68b545d..45f370302e4d 100644 +--- a/net/ipv4/tcp_ipv4.c ++++ b/net/ipv4/tcp_ipv4.c +@@ -269,7 +269,7 @@ EXPORT_SYMBOL(tcp_v4_connect); + * It can be called through tcp_release_cb() if socket was owned by user + * at the time tcp_v4_err() was called to handle ICMP message. + */ +-static void tcp_v4_mtu_reduced(struct sock *sk) ++void tcp_v4_mtu_reduced(struct sock *sk) + { + struct dst_entry *dst; + struct inet_sock *inet = inet_sk(sk); +@@ -299,6 +299,7 @@ static void tcp_v4_mtu_reduced(struct sock *sk) + tcp_simple_retransmit(sk); + } /* else let the usual retransmit timer handle it */ + } ++EXPORT_SYMBOL(tcp_v4_mtu_reduced); + + static void do_redirect(struct sk_buff *skb, struct sock *sk) + { +@@ -2117,6 +2118,7 @@ const struct inet_connection_sock_af_ops ipv4_specific = { + .compat_setsockopt = compat_ip_setsockopt, + .compat_getsockopt = compat_ip_getsockopt, + #endif ++ .mtu_reduced = tcp_v4_mtu_reduced, + }; + EXPORT_SYMBOL(ipv4_specific); + +@@ -2796,7 +2798,6 @@ struct proto tcp_prot = { + .sendpage = tcp_sendpage, + .backlog_rcv = tcp_v4_do_rcv, + .release_cb = tcp_release_cb, +- .mtu_reduced = tcp_v4_mtu_reduced, + .hash = inet_hash, + .unhash = inet_unhash, + .get_port = inet_csk_get_port, +diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c +index 0cce660cf7dd..4c0e55f14f2e 100644 +--- a/net/ipv4/tcp_output.c ++++ b/net/ipv4/tcp_output.c +@@ -785,7 +785,7 @@ void tcp_release_cb(struct sock *sk) + __sock_put(sk); + } + if (flags & (1UL << TCP_MTU_REDUCED_DEFERRED)) { +- sk->sk_prot->mtu_reduced(sk); ++ inet_csk(sk)->icsk_af_ops->mtu_reduced(sk); + __sock_put(sk); + } + } +@@ -2045,9 +2045,7 @@ void tcp_send_loss_probe(struct sock *sk) + if (WARN_ON(!skb || !tcp_skb_pcount(skb))) + goto rearm_timer; + +- /* Probe with zero data doesn't trigger fast recovery. */ +- if (skb->len > 0) +- err = __tcp_retransmit_skb(sk, skb); ++ err = __tcp_retransmit_skb(sk, skb); + + /* Record snd_nxt for loss detection. */ + if (likely(!err)) +diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c +index 994d73cc2fe0..99988b86f6af 100644 +--- a/net/ipv6/ip6_gre.c ++++ b/net/ipv6/ip6_gre.c +@@ -790,7 +790,7 @@ static inline int ip6gre_xmit_ipv4(struct sk_buff *skb, struct net_device *dev) + encap_limit = t->parms.encap_limit; + + memcpy(&fl6, &t->fl.u.ip6, sizeof(fl6)); +- fl6.flowi6_proto = IPPROTO_IPIP; ++ fl6.flowi6_proto = IPPROTO_GRE; + + dsfield = ipv4_get_dsfield(iph); + +@@ -840,7 +840,7 @@ static inline int ip6gre_xmit_ipv6(struct sk_buff *skb, struct net_device *dev) + encap_limit = t->parms.encap_limit; + + memcpy(&fl6, &t->fl.u.ip6, sizeof(fl6)); +- fl6.flowi6_proto = IPPROTO_IPV6; ++ fl6.flowi6_proto = IPPROTO_GRE; + + dsfield = ipv6_get_dsfield(ipv6h); + if (t->parms.flags & IP6_TNL_F_USE_ORIG_TCLASS) +diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c +index e5e59c36cfc5..602533d9cb97 100644 +--- a/net/ipv6/ip6_output.c ++++ b/net/ipv6/ip6_output.c +@@ -994,7 +994,7 @@ struct dst_entry *ip6_dst_lookup_flow(struct sock *sk, struct flowi6 *fl6, + if (can_sleep) + fl6->flowi6_flags |= FLOWI_FLAG_CAN_SLEEP; + +- return xfrm_lookup(sock_net(sk), dst, flowi6_to_flowi(fl6), sk, 0); ++ return xfrm_lookup_route(sock_net(sk), dst, flowi6_to_flowi(fl6), sk, 0); + } + EXPORT_SYMBOL_GPL(ip6_dst_lookup_flow); + +@@ -1030,7 +1030,7 @@ struct dst_entry *ip6_sk_dst_lookup_flow(struct sock *sk, struct flowi6 *fl6, + if (can_sleep) + fl6->flowi6_flags |= FLOWI_FLAG_CAN_SLEEP; + +- return xfrm_lookup(sock_net(sk), dst, flowi6_to_flowi(fl6), sk, 0); ++ return xfrm_lookup_route(sock_net(sk), dst, flowi6_to_flowi(fl6), sk, 0); + } + EXPORT_SYMBOL_GPL(ip6_sk_dst_lookup_flow); + +diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c +index e46fcde5a79b..ebdf18bbcc02 100644 +--- a/net/ipv6/sit.c ++++ b/net/ipv6/sit.c +@@ -101,19 +101,19 @@ static struct ip_tunnel *ipip6_tunnel_lookup(struct net *net, + for_each_ip_tunnel_rcu(t, sitn->tunnels_r_l[h0 ^ h1]) { + if (local == t->parms.iph.saddr && + remote == t->parms.iph.daddr && +- (!dev || !t->parms.link || dev->iflink == t->parms.link) && ++ (!dev || !t->parms.link || dev->ifindex == t->parms.link) && + (t->dev->flags & IFF_UP)) + return t; + } + for_each_ip_tunnel_rcu(t, sitn->tunnels_r[h0]) { + if (remote == t->parms.iph.daddr && +- (!dev || !t->parms.link || dev->iflink == t->parms.link) && ++ (!dev || !t->parms.link || dev->ifindex == t->parms.link) && + (t->dev->flags & IFF_UP)) + return t; + } + for_each_ip_tunnel_rcu(t, sitn->tunnels_l[h1]) { + if (local == t->parms.iph.saddr && +- (!dev || !t->parms.link || dev->iflink == t->parms.link) && ++ (!dev || !t->parms.link || dev->ifindex == t->parms.link) && + (t->dev->flags & IFF_UP)) + return t; + } +diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c +index 5c71501fc917..3058c4a89b3b 100644 +--- a/net/ipv6/tcp_ipv6.c ++++ b/net/ipv6/tcp_ipv6.c +@@ -1651,6 +1651,7 @@ static const struct inet_connection_sock_af_ops ipv6_specific = { + .compat_setsockopt = compat_ipv6_setsockopt, + .compat_getsockopt = compat_ipv6_getsockopt, + #endif ++ .mtu_reduced = tcp_v6_mtu_reduced, + }; + + #ifdef CONFIG_TCP_MD5SIG +@@ -1682,6 +1683,7 @@ static const struct inet_connection_sock_af_ops ipv6_mapped = { + .compat_setsockopt = compat_ipv6_setsockopt, + .compat_getsockopt = compat_ipv6_getsockopt, + #endif ++ .mtu_reduced = tcp_v4_mtu_reduced, + }; + + #ifdef CONFIG_TCP_MD5SIG +@@ -1919,7 +1921,6 @@ struct proto tcpv6_prot = { + .sendpage = tcp_sendpage, + .backlog_rcv = tcp_v6_do_rcv, + .release_cb = tcp_release_cb, +- .mtu_reduced = tcp_v6_mtu_reduced, + .hash = tcp_v6_hash, + .unhash = inet_unhash, + .get_port = inet_csk_get_port, +diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c +index 164fa9dcd97d..c3ae2411650c 100644 +--- a/net/l2tp/l2tp_ppp.c ++++ b/net/l2tp/l2tp_ppp.c +@@ -756,7 +756,8 @@ static int pppol2tp_connect(struct socket *sock, struct sockaddr *uservaddr, + /* If PMTU discovery was enabled, use the MTU that was discovered */ + dst = sk_dst_get(tunnel->sock); + if (dst != NULL) { +- u32 pmtu = dst_mtu(__sk_dst_get(tunnel->sock)); ++ u32 pmtu = dst_mtu(dst); ++ + if (pmtu != 0) + session->mtu = session->mru = pmtu - + PPPOL2TP_HEADER_OVERHEAD; +diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c +index 591d990a06e7..5fa4ee07dd7a 100644 +--- a/net/mac80211/mlme.c ++++ b/net/mac80211/mlme.c +@@ -4340,8 +4340,7 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata, + rcu_read_unlock(); + + if (bss->wmm_used && bss->uapsd_supported && +- (sdata->local->hw.flags & IEEE80211_HW_SUPPORTS_UAPSD) && +- sdata->wmm_acm != 0xff) { ++ (sdata->local->hw.flags & IEEE80211_HW_SUPPORTS_UAPSD)) { + assoc_data->uapsd = true; + ifmgd->flags |= IEEE80211_STA_UAPSD_ENABLED; + } else { +diff --git a/net/netfilter/ipvs/ip_vs_conn.c b/net/netfilter/ipvs/ip_vs_conn.c +index d585626d7676..d9a7f006b74e 100644 +--- a/net/netfilter/ipvs/ip_vs_conn.c ++++ b/net/netfilter/ipvs/ip_vs_conn.c +@@ -797,7 +797,6 @@ static void ip_vs_conn_expire(unsigned long data) + ip_vs_control_del(cp); + + if (cp->flags & IP_VS_CONN_F_NFCT) { +- ip_vs_conn_drop_conntrack(cp); + /* Do not access conntracks during subsys cleanup + * because nf_conntrack_find_get can not be used after + * conntrack cleanup for the net. +diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c +index e2d38e5318d4..f7a758fae8e5 100644 +--- a/net/netfilter/ipvs/ip_vs_core.c ++++ b/net/netfilter/ipvs/ip_vs_core.c +@@ -1906,7 +1906,7 @@ static struct nf_hook_ops ip_vs_ops[] __read_mostly = { + { + .hook = ip_vs_local_reply6, + .owner = THIS_MODULE, +- .pf = NFPROTO_IPV4, ++ .pf = NFPROTO_IPV6, + .hooknum = NF_INET_LOCAL_OUT, + .priority = NF_IP6_PRI_NAT_DST + 1, + }, +diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c +index 7f0e1cf2d7e8..1692e7534759 100644 +--- a/net/netfilter/ipvs/ip_vs_xmit.c ++++ b/net/netfilter/ipvs/ip_vs_xmit.c +@@ -967,8 +967,8 @@ ip_vs_tunnel_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp, + iph->nexthdr = IPPROTO_IPV6; + iph->payload_len = old_iph->payload_len; + be16_add_cpu(&iph->payload_len, sizeof(*old_iph)); +- iph->priority = old_iph->priority; + memset(&iph->flow_lbl, 0, sizeof(iph->flow_lbl)); ++ ipv6_change_dsfield(iph, 0, ipv6_get_dsfield(old_iph)); + iph->daddr = cp->daddr.in6; + iph->saddr = saddr; + iph->hop_limit = old_iph->hop_limit; +diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c +index d9a2598a5190..2a4f35e7b5c0 100644 +--- a/net/netlink/af_netlink.c ++++ b/net/netlink/af_netlink.c +@@ -204,7 +204,7 @@ static int __netlink_deliver_tap_skb(struct sk_buff *skb, + if (nskb) { + nskb->dev = dev; + nskb->protocol = htons((u16) sk->sk_protocol); +- ++ skb_reset_network_header(nskb); + ret = dev_queue_xmit(nskb); + if (unlikely(ret > 0)) + ret = net_xmit_errno(ret); +diff --git a/net/openvswitch/actions.c b/net/openvswitch/actions.c +index 65cfaa816075..07c4ae308bad 100644 +--- a/net/openvswitch/actions.c ++++ b/net/openvswitch/actions.c +@@ -42,6 +42,9 @@ static int do_execute_actions(struct datapath *dp, struct sk_buff *skb, + + static int make_writable(struct sk_buff *skb, int write_len) + { ++ if (!pskb_may_pull(skb, write_len)) ++ return -ENOMEM; ++ + if (!skb_cloned(skb) || skb_clone_writable(skb, write_len)) + return 0; + +@@ -70,6 +73,8 @@ static int __pop_vlan_tci(struct sk_buff *skb, __be16 *current_tci) + + vlan_set_encap_proto(skb, vhdr); + skb->mac_header += VLAN_HLEN; ++ if (skb_network_offset(skb) < ETH_HLEN) ++ skb_set_network_header(skb, ETH_HLEN); + skb_reset_mac_len(skb); + + return 0; +diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c +index 88cfbc189558..a84612585fc8 100644 +--- a/net/packet/af_packet.c ++++ b/net/packet/af_packet.c +@@ -565,6 +565,7 @@ static void init_prb_bdqc(struct packet_sock *po, + p1->tov_in_jiffies = msecs_to_jiffies(p1->retire_blk_tov); + p1->blk_sizeof_priv = req_u->req3.tp_sizeof_priv; + ++ p1->max_frame_len = p1->kblk_size - BLK_PLUS_PRIV(p1->blk_sizeof_priv); + prb_init_ft_ops(p1, req_u); + prb_setup_retire_blk_timer(po, tx_ring); + prb_open_block(p1, pbd); +@@ -1814,6 +1815,18 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev, + if ((int)snaplen < 0) + snaplen = 0; + } ++ } else if (unlikely(macoff + snaplen > ++ GET_PBDQC_FROM_RB(&po->rx_ring)->max_frame_len)) { ++ u32 nval; ++ ++ nval = GET_PBDQC_FROM_RB(&po->rx_ring)->max_frame_len - macoff; ++ pr_err_once("tpacket_rcv: packet too big, clamped from %u to %u. macoff=%u\n", ++ snaplen, nval, macoff); ++ snaplen = nval; ++ if (unlikely((int)snaplen < 0)) { ++ snaplen = 0; ++ macoff = GET_PBDQC_FROM_RB(&po->rx_ring)->max_frame_len; ++ } + } + spin_lock(&sk->sk_receive_queue.lock); + h.raw = packet_current_rx_frame(po, skb, +@@ -3610,6 +3623,10 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u, + goto out; + if (unlikely(req->tp_block_size & (PAGE_SIZE - 1))) + goto out; ++ if (po->tp_version >= TPACKET_V3 && ++ (int)(req->tp_block_size - ++ BLK_PLUS_PRIV(req_u->req3.tp_sizeof_priv)) <= 0) ++ goto out; + if (unlikely(req->tp_frame_size < po->tp_hdrlen + + po->tp_reserve)) + goto out; +diff --git a/net/packet/internal.h b/net/packet/internal.h +index 1035fa2d909c..ca086c0c2c08 100644 +--- a/net/packet/internal.h ++++ b/net/packet/internal.h +@@ -29,6 +29,7 @@ struct tpacket_kbdq_core { + char *pkblk_start; + char *pkblk_end; + int kblk_size; ++ unsigned int max_frame_len; + unsigned int knum_blocks; + uint64_t knxt_seq_num; + char *prev; +diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c +index 5dcfe8ca7f69..1dbcc6a4d800 100644 +--- a/net/sctp/sm_statefuns.c ++++ b/net/sctp/sm_statefuns.c +@@ -1776,9 +1776,22 @@ static sctp_disposition_t sctp_sf_do_dupcook_a(struct net *net, + /* Update the content of current association. */ + sctp_add_cmd_sf(commands, SCTP_CMD_UPDATE_ASSOC, SCTP_ASOC(new_asoc)); + sctp_add_cmd_sf(commands, SCTP_CMD_EVENT_ULP, SCTP_ULPEVENT(ev)); +- sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE, +- SCTP_STATE(SCTP_STATE_ESTABLISHED)); +- sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(repl)); ++ if (sctp_state(asoc, SHUTDOWN_PENDING) && ++ (sctp_sstate(asoc->base.sk, CLOSING) || ++ sock_flag(asoc->base.sk, SOCK_DEAD))) { ++ /* if were currently in SHUTDOWN_PENDING, but the socket ++ * has been closed by user, don't transition to ESTABLISHED. ++ * Instead trigger SHUTDOWN bundled with COOKIE_ACK. ++ */ ++ sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(repl)); ++ return sctp_sf_do_9_2_start_shutdown(net, ep, asoc, ++ SCTP_ST_CHUNK(0), NULL, ++ commands); ++ } else { ++ sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE, ++ SCTP_STATE(SCTP_STATE_ESTABLISHED)); ++ sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(repl)); ++ } + return SCTP_DISPOSITION_CONSUME; + + nomem_ev: +diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c +index c2853bbf8059..c3ef31a96de9 100644 +--- a/net/wireless/nl80211.c ++++ b/net/wireless/nl80211.c +@@ -6797,6 +6797,9 @@ void cfg80211_testmode_event(struct sk_buff *skb, gfp_t gfp) + void *hdr = ((void **)skb->cb)[1]; + struct nlattr *data = ((void **)skb->cb)[2]; + ++ /* clear CB data for netlink core to own from now on */ ++ memset(skb->cb, 0, sizeof(skb->cb)); ++ + nla_nest_end(skb, data); + genlmsg_end(skb, hdr); + genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), skb, 0, +diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c +index 76e1873811d4..6b07a5913383 100644 +--- a/net/xfrm/xfrm_policy.c ++++ b/net/xfrm/xfrm_policy.c +@@ -46,6 +46,11 @@ static DEFINE_SPINLOCK(xfrm_policy_sk_bundle_lock); + static struct dst_entry *xfrm_policy_sk_bundles; + static DEFINE_RWLOCK(xfrm_policy_lock); + ++struct xfrm_flo { ++ struct dst_entry *dst_orig; ++ u8 flags; ++}; ++ + static DEFINE_SPINLOCK(xfrm_policy_afinfo_lock); + static struct xfrm_policy_afinfo __rcu *xfrm_policy_afinfo[NPROTO] + __read_mostly; +@@ -1875,13 +1880,14 @@ static int xdst_queue_output(struct sk_buff *skb) + } + + static struct xfrm_dst *xfrm_create_dummy_bundle(struct net *net, +- struct dst_entry *dst, ++ struct xfrm_flo *xflo, + const struct flowi *fl, + int num_xfrms, + u16 family) + { + int err; + struct net_device *dev; ++ struct dst_entry *dst; + struct dst_entry *dst1; + struct xfrm_dst *xdst; + +@@ -1889,10 +1895,13 @@ static struct xfrm_dst *xfrm_create_dummy_bundle(struct net *net, + if (IS_ERR(xdst)) + return xdst; + +- if (net->xfrm.sysctl_larval_drop || num_xfrms <= 0 || ++ if (!(xflo->flags & XFRM_LOOKUP_QUEUE) || ++ net->xfrm.sysctl_larval_drop || ++ num_xfrms <= 0 || + (fl->flowi_flags & FLOWI_FLAG_CAN_SLEEP)) + return xdst; + ++ dst = xflo->dst_orig; + dst1 = &xdst->u.dst; + dst_hold(dst); + xdst->route = dst; +@@ -1934,7 +1943,7 @@ static struct flow_cache_object * + xfrm_bundle_lookup(struct net *net, const struct flowi *fl, u16 family, u8 dir, + struct flow_cache_object *oldflo, void *ctx) + { +- struct dst_entry *dst_orig = (struct dst_entry *)ctx; ++ struct xfrm_flo *xflo = (struct xfrm_flo *)ctx; + struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX]; + struct xfrm_dst *xdst, *new_xdst; + int num_pols = 0, num_xfrms = 0, i, err, pol_dead; +@@ -1975,7 +1984,8 @@ xfrm_bundle_lookup(struct net *net, const struct flowi *fl, u16 family, u8 dir, + goto make_dummy_bundle; + } + +- new_xdst = xfrm_resolve_and_create_bundle(pols, num_pols, fl, family, dst_orig); ++ new_xdst = xfrm_resolve_and_create_bundle(pols, num_pols, fl, family, ++ xflo->dst_orig); + if (IS_ERR(new_xdst)) { + err = PTR_ERR(new_xdst); + if (err != -EAGAIN) +@@ -2009,7 +2019,7 @@ make_dummy_bundle: + /* We found policies, but there's no bundles to instantiate: + * either because the policy blocks, has no transformations or + * we could not build template (no xfrm_states).*/ +- xdst = xfrm_create_dummy_bundle(net, dst_orig, fl, num_xfrms, family); ++ xdst = xfrm_create_dummy_bundle(net, xflo, fl, num_xfrms, family); + if (IS_ERR(xdst)) { + xfrm_pols_put(pols, num_pols); + return ERR_CAST(xdst); +@@ -2109,13 +2119,18 @@ restart: + } + + if (xdst == NULL) { ++ struct xfrm_flo xflo; ++ ++ xflo.dst_orig = dst_orig; ++ xflo.flags = flags; ++ + /* To accelerate a bit... */ + if ((dst_orig->flags & DST_NOXFRM) || + !net->xfrm.policy_count[XFRM_POLICY_OUT]) + goto nopol; + + flo = flow_cache_lookup(net, fl, family, dir, +- xfrm_bundle_lookup, dst_orig); ++ xfrm_bundle_lookup, &xflo); + if (flo == NULL) + goto nopol; + if (IS_ERR(flo)) { +@@ -2143,7 +2158,7 @@ restart: + xfrm_pols_put(pols, drop_pols); + XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTNOSTATES); + +- return make_blackhole(net, family, dst_orig); ++ return ERR_PTR(-EREMOTE); + } + if (fl->flowi_flags & FLOWI_FLAG_CAN_SLEEP) { + DECLARE_WAITQUEUE(wait, current); +@@ -2215,6 +2230,23 @@ dropdst: + } + EXPORT_SYMBOL(xfrm_lookup); + ++/* Callers of xfrm_lookup_route() must ensure a call to dst_output(). ++ * Otherwise we may send out blackholed packets. ++ */ ++struct dst_entry *xfrm_lookup_route(struct net *net, struct dst_entry *dst_orig, ++ const struct flowi *fl, ++ struct sock *sk, int flags) ++{ ++ struct dst_entry *dst = xfrm_lookup(net, dst_orig, fl, sk, ++ flags | XFRM_LOOKUP_QUEUE); ++ ++ if (IS_ERR(dst) && PTR_ERR(dst) == -EREMOTE) ++ return make_blackhole(net, dst_orig->ops->family, dst_orig); ++ ++ return dst; ++} ++EXPORT_SYMBOL(xfrm_lookup_route); ++ + static inline int + xfrm_secpath_reject(int idx, struct sk_buff *skb, const struct flowi *fl) + { +@@ -2480,7 +2512,7 @@ int __xfrm_route_forward(struct sk_buff *skb, unsigned short family) + + skb_dst_force(skb); + +- dst = xfrm_lookup(net, skb_dst(skb), &fl, NULL, 0); ++ dst = xfrm_lookup(net, skb_dst(skb), &fl, NULL, XFRM_LOOKUP_QUEUE); + if (IS_ERR(dst)) { + res = 0; + dst = NULL; +diff --git a/sound/core/info.c b/sound/core/info.c +index e79baa11b60e..08070e1eefeb 100644 +--- a/sound/core/info.c ++++ b/sound/core/info.c +@@ -679,7 +679,7 @@ int snd_info_card_free(struct snd_card *card) + * snd_info_get_line - read one line from the procfs buffer + * @buffer: the procfs buffer + * @line: the buffer to store +- * @len: the max. buffer size - 1 ++ * @len: the max. buffer size + * + * Reads one line from the buffer and stores the string. + * +@@ -699,7 +699,7 @@ int snd_info_get_line(struct snd_info_buffer *buffer, char *line, int len) + buffer->stop = 1; + if (c == '\n') + break; +- if (len) { ++ if (len > 1) { + len--; + *line++ = c; + } +diff --git a/sound/core/pcm_lib.c b/sound/core/pcm_lib.c +index a2104671f51d..e1ef106c8a6f 100644 +--- a/sound/core/pcm_lib.c ++++ b/sound/core/pcm_lib.c +@@ -1783,14 +1783,16 @@ static int snd_pcm_lib_ioctl_fifo_size(struct snd_pcm_substream *substream, + { + struct snd_pcm_hw_params *params = arg; + snd_pcm_format_t format; +- int channels, width; ++ int channels; ++ ssize_t frame_size; + + params->fifo_size = substream->runtime->hw.fifo_size; + if (!(substream->runtime->hw.info & SNDRV_PCM_INFO_FIFO_IN_FRAMES)) { + format = params_format(params); + channels = params_channels(params); +- width = snd_pcm_format_physical_width(format); +- params->fifo_size /= width * channels; ++ frame_size = snd_pcm_format_size(format, channels); ++ if (frame_size > 0) ++ params->fifo_size /= (unsigned)frame_size; + } + return 0; + } +diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c +index fde381d02afd..131d7d459cb6 100644 +--- a/sound/pci/hda/patch_conexant.c ++++ b/sound/pci/hda/patch_conexant.c +@@ -3232,6 +3232,7 @@ enum { + CXT_FIXUP_HEADPHONE_MIC_PIN, + CXT_FIXUP_HEADPHONE_MIC, + CXT_FIXUP_GPIO1, ++ CXT_FIXUP_ASPIRE_DMIC, + }; + + static void cxt_fixup_stereo_dmic(struct hda_codec *codec, +@@ -3386,6 +3387,12 @@ static const struct hda_fixup cxt_fixups[] = { + { } + }, + }, ++ [CXT_FIXUP_ASPIRE_DMIC] = { ++ .type = HDA_FIXUP_FUNC, ++ .v.func = cxt_fixup_stereo_dmic, ++ .chained = true, ++ .chain_id = CXT_FIXUP_GPIO1, ++ }, + }; + + static const struct snd_pci_quirk cxt5051_fixups[] = { +@@ -3395,7 +3402,7 @@ static const struct snd_pci_quirk cxt5051_fixups[] = { + + static const struct snd_pci_quirk cxt5066_fixups[] = { + SND_PCI_QUIRK(0x1025, 0x0543, "Acer Aspire One 522", CXT_FIXUP_STEREO_DMIC), +- SND_PCI_QUIRK(0x1025, 0x054c, "Acer Aspire 3830TG", CXT_FIXUP_GPIO1), ++ SND_PCI_QUIRK(0x1025, 0x054c, "Acer Aspire 3830TG", CXT_FIXUP_ASPIRE_DMIC), + SND_PCI_QUIRK(0x1043, 0x138d, "Asus", CXT_FIXUP_HEADPHONE_MIC_PIN), + SND_PCI_QUIRK(0x17aa, 0x20f2, "Lenovo T400", CXT_PINCFG_LENOVO_TP410), + SND_PCI_QUIRK(0x17aa, 0x215e, "Lenovo T410", CXT_PINCFG_LENOVO_TP410), +diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c +index e1f2c9a6d67d..0b94d48331f3 100644 +--- a/sound/pci/hda/patch_realtek.c ++++ b/sound/pci/hda/patch_realtek.c +@@ -323,6 +323,7 @@ static void alc_auto_init_amp(struct hda_codec *codec, int type) + case 0x10ec0885: + case 0x10ec0887: + /*case 0x10ec0889:*/ /* this causes an SPDIF problem */ ++ case 0x10ec0900: + alc889_coef_init(codec); + break; + case 0x10ec0888: +@@ -2261,6 +2262,7 @@ static int patch_alc882(struct hda_codec *codec) + switch (codec->vendor_id) { + case 0x10ec0882: + case 0x10ec0885: ++ case 0x10ec0900: + break; + default: + /* ALC883 and variants */ +diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c +index 3c90447c5810..38b47b7b9cb6 100644 +--- a/sound/pci/hda/patch_sigmatel.c ++++ b/sound/pci/hda/patch_sigmatel.c +@@ -548,8 +548,8 @@ static void stac_init_power_map(struct hda_codec *codec) + if (snd_hda_jack_tbl_get(codec, nid)) + continue; + if (def_conf == AC_JACK_PORT_COMPLEX && +- !(spec->vref_mute_led_nid == nid || +- is_jack_detectable(codec, nid))) { ++ spec->vref_mute_led_nid != nid && ++ is_jack_detectable(codec, nid)) { + snd_hda_jack_detect_enable_callback(codec, nid, + STAC_PWR_EVENT, + jack_update_power); +@@ -4198,11 +4198,18 @@ static int stac_parse_auto_config(struct hda_codec *codec) + return err; + } + +- stac_init_power_map(codec); +- + return 0; + } + ++static int stac_build_controls(struct hda_codec *codec) ++{ ++ int err = snd_hda_gen_build_controls(codec); ++ ++ if (err < 0) ++ return err; ++ stac_init_power_map(codec); ++ return 0; ++} + + static int stac_init(struct hda_codec *codec) + { +@@ -4336,7 +4343,7 @@ static void stac_set_power_state(struct hda_codec *codec, hda_nid_t fg, + #endif /* CONFIG_PM */ + + static const struct hda_codec_ops stac_patch_ops = { +- .build_controls = snd_hda_gen_build_controls, ++ .build_controls = stac_build_controls, + .build_pcms = snd_hda_gen_build_pcms, + .init = stac_init, + .free = stac_free, +diff --git a/sound/soc/davinci/davinci-mcasp.c b/sound/soc/davinci/davinci-mcasp.c +index 32ddb7fe5034..aab16a73a204 100644 +--- a/sound/soc/davinci/davinci-mcasp.c ++++ b/sound/soc/davinci/davinci-mcasp.c +@@ -632,8 +632,17 @@ static int davinci_config_channel_size(struct davinci_audio_dev *dev, + { + u32 fmt; + u32 tx_rotate = (word_length / 4) & 0x7; +- u32 rx_rotate = (32 - word_length) / 4; + u32 mask = (1ULL << word_length) - 1; ++ /* ++ * For captured data we should not rotate, inversion and masking is ++ * enoguh to get the data to the right position: ++ * Format data from bus after reverse (XRBUF) ++ * S16_LE: |LSB|MSB|xxx|xxx| |xxx|xxx|MSB|LSB| ++ * S24_3LE: |LSB|DAT|MSB|xxx| |xxx|MSB|DAT|LSB| ++ * S24_LE: |LSB|DAT|MSB|xxx| |xxx|MSB|DAT|LSB| ++ * S32_LE: |LSB|DAT|DAT|MSB| |MSB|DAT|DAT|LSB| ++ */ ++ u32 rx_rotate = 0; + + /* + * if s BCLK-to-LRCLK ratio has been configured via the set_clkdiv()