From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from lists.gentoo.org (pigeon.gentoo.org [208.92.234.80]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by finch.gentoo.org (Postfix) with ESMTPS id 27D3A139694 for ; Fri, 10 Feb 2017 12:29:17 +0000 (UTC) Received: from pigeon.gentoo.org (localhost [127.0.0.1]) by pigeon.gentoo.org (Postfix) with SMTP id 4A74CE0CAD; Fri, 10 Feb 2017 12:29:16 +0000 (UTC) Received: from smtp.gentoo.org (mail.gentoo.org [IPv6:2001:470:ea4a:1:5054:ff:fec7:86e4]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by pigeon.gentoo.org (Postfix) with ESMTPS id A6878E0CAD for ; Fri, 10 Feb 2017 12:29:15 +0000 (UTC) Received: from oystercatcher.gentoo.org (oystercatcher.gentoo.org [148.251.78.52]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.gentoo.org (Postfix) with ESMTPS id 784803413AD for ; Fri, 10 Feb 2017 12:29:13 +0000 (UTC) Received: from localhost.localdomain (localhost [IPv6:::1]) by oystercatcher.gentoo.org (Postfix) with ESMTP id EFF9C3FB8 for ; Fri, 10 Feb 2017 12:29:10 +0000 (UTC) From: "Mike Pagano" To: gentoo-commits@lists.gentoo.org Content-Transfer-Encoding: 8bit Content-type: text/plain; charset=UTF-8 Reply-To: gentoo-dev@lists.gentoo.org, "Mike Pagano" Message-ID: <1486729743.b6c5778cf4dcc0d419dd96295f8dddee7c9e4c60.mpagano@gentoo> Subject: [gentoo-commits] proj/linux-patches:3.10 commit in: / X-VCS-Repository: proj/linux-patches X-VCS-Files: 0000_README 1104_linux-3.10.105.patch X-VCS-Directories: / X-VCS-Committer: mpagano X-VCS-Committer-Name: Mike Pagano X-VCS-Revision: b6c5778cf4dcc0d419dd96295f8dddee7c9e4c60 X-VCS-Branch: 3.10 Date: Fri, 10 Feb 2017 12:29:10 +0000 (UTC) Precedence: bulk List-Post: List-Help: List-Unsubscribe: List-Subscribe: List-Id: Gentoo Linux mail X-BeenThere: gentoo-commits@lists.gentoo.org X-Archives-Salt: bb252772-f5fd-4293-ae16-e324e25506a1 X-Archives-Hash: 10ddf7e08507b7cccbaf217cb76cd8b7 commit: b6c5778cf4dcc0d419dd96295f8dddee7c9e4c60 Author: Mike Pagano gentoo org> AuthorDate: Fri Feb 10 12:29:03 2017 +0000 Commit: Mike Pagano gentoo org> CommitDate: Fri Feb 10 12:29:03 2017 +0000 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=b6c5778c Linux patch 3.10.105. 0000_README | 4 + 1104_linux-3.10.105.patch | 10841 ++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 10845 insertions(+) diff --git a/0000_README b/0000_README index 271c499..1216723 100644 --- a/0000_README +++ b/0000_README @@ -458,6 +458,10 @@ Patch: 1103_linux-3.10.104.patch From: http://www.kernel.org Desc: Linux 3.10.104 +Patch: 1104_linux-3.10.105.patch +From: http://www.kernel.org +Desc: Linux 3.10.105 + Patch: 1500_XATTR_USER_PREFIX.patch From: https://bugs.gentoo.org/show_bug.cgi?id=470644 Desc: Support for namespace user.pax.* on tmpfs. diff --git a/1104_linux-3.10.105.patch b/1104_linux-3.10.105.patch new file mode 100644 index 0000000..677e884 --- /dev/null +++ b/1104_linux-3.10.105.patch @@ -0,0 +1,10841 @@ +diff --git a/Makefile b/Makefile +index f6a2cbd438a1..80e180e1e4a2 100644 +--- a/Makefile ++++ b/Makefile +@@ -1,6 +1,6 @@ + VERSION = 3 + PATCHLEVEL = 10 +-SUBLEVEL = 104 ++SUBLEVEL = 105 + EXTRAVERSION = + NAME = TOSSUG Baby Fish + +diff --git a/arch/alpha/include/asm/uaccess.h b/arch/alpha/include/asm/uaccess.h +index 766fdfde2b7a..6e9d27ad5103 100644 +--- a/arch/alpha/include/asm/uaccess.h ++++ b/arch/alpha/include/asm/uaccess.h +@@ -371,14 +371,6 @@ __copy_tofrom_user_nocheck(void *to, const void *from, long len) + return __cu_len; + } + +-extern inline long +-__copy_tofrom_user(void *to, const void *from, long len, const void __user *validate) +-{ +- if (__access_ok((unsigned long)validate, len, get_fs())) +- len = __copy_tofrom_user_nocheck(to, from, len); +- return len; +-} +- + #define __copy_to_user(to,from,n) \ + ({ \ + __chk_user_ptr(to); \ +@@ -393,17 +385,22 @@ __copy_tofrom_user(void *to, const void *from, long len, const void __user *vali + #define __copy_to_user_inatomic __copy_to_user + #define __copy_from_user_inatomic __copy_from_user + +- + extern inline long + copy_to_user(void __user *to, const void *from, long n) + { +- return __copy_tofrom_user((__force void *)to, from, n, to); ++ if (likely(__access_ok((unsigned long)to, n, get_fs()))) ++ n = __copy_tofrom_user_nocheck((__force void *)to, from, n); ++ return n; + } + + extern inline long + copy_from_user(void *to, const void __user *from, long n) + { +- return __copy_tofrom_user(to, (__force void *)from, n, from); ++ if (likely(__access_ok((unsigned long)from, n, get_fs()))) ++ n = __copy_tofrom_user_nocheck(to, (__force void *)from, n); ++ else ++ memset(to, 0, n); ++ return n; + } + + extern void __do_clear_user(void); +diff --git a/arch/arc/include/asm/uaccess.h b/arch/arc/include/asm/uaccess.h +index 30c9baffa96f..08770c750696 100644 +--- a/arch/arc/include/asm/uaccess.h ++++ b/arch/arc/include/asm/uaccess.h +@@ -83,7 +83,10 @@ + "2: ;nop\n" \ + " .section .fixup, \"ax\"\n" \ + " .align 4\n" \ +- "3: mov %0, %3\n" \ ++ "3: # return -EFAULT\n" \ ++ " mov %0, %3\n" \ ++ " # zero out dst ptr\n" \ ++ " mov %1, 0\n" \ + " j 2b\n" \ + " .previous\n" \ + " .section __ex_table, \"a\"\n" \ +@@ -101,7 +104,11 @@ + "2: ;nop\n" \ + " .section .fixup, \"ax\"\n" \ + " .align 4\n" \ +- "3: mov %0, %3\n" \ ++ "3: # return -EFAULT\n" \ ++ " mov %0, %3\n" \ ++ " # zero out dst ptr\n" \ ++ " mov %1, 0\n" \ ++ " mov %R1, 0\n" \ + " j 2b\n" \ + " .previous\n" \ + " .section __ex_table, \"a\"\n" \ +diff --git a/arch/arc/kernel/signal.c b/arch/arc/kernel/signal.c +index 6763654239a2..0823087dc9c0 100644 +--- a/arch/arc/kernel/signal.c ++++ b/arch/arc/kernel/signal.c +@@ -80,13 +80,14 @@ static int restore_usr_regs(struct pt_regs *regs, struct rt_sigframe __user *sf) + int err; + + err = __copy_from_user(&set, &sf->uc.uc_sigmask, sizeof(set)); +- if (!err) +- set_current_blocked(&set); +- +- err |= __copy_from_user(regs, &(sf->uc.uc_mcontext.regs), ++ err |= __copy_from_user(regs, &(sf->uc.uc_mcontext.regs.scratch), + sizeof(sf->uc.uc_mcontext.regs.scratch)); ++ if (err) ++ return err; + +- return err; ++ set_current_blocked(&set); ++ ++ return 0; + } + + static inline int is_do_ss_needed(unsigned int magic) +diff --git a/arch/arm/boot/compressed/head.S b/arch/arm/boot/compressed/head.S +index 032a8d987148..9fef67ab1692 100644 +--- a/arch/arm/boot/compressed/head.S ++++ b/arch/arm/boot/compressed/head.S +@@ -715,7 +715,7 @@ __armv7_mmu_cache_on: + orrne r0, r0, #1 @ MMU enabled + movne r1, #0xfffffffd @ domain 0 = client + bic r6, r6, #1 << 31 @ 32-bit translation system +- bic r6, r6, #3 << 0 @ use only ttbr0 ++ bic r6, r6, #(7 << 0) | (1 << 4) @ use only ttbr0 + mcrne p15, 0, r3, c2, c0, 0 @ load page table pointer + mcrne p15, 0, r1, c3, c0, 0 @ load domain access control + mcrne p15, 0, r6, c2, c0, 2 @ load ttb control +diff --git a/arch/arm/common/sa1111.c b/arch/arm/common/sa1111.c +index e57d7e5bf96a..932125a20877 100644 +--- a/arch/arm/common/sa1111.c ++++ b/arch/arm/common/sa1111.c +@@ -872,9 +872,9 @@ struct sa1111_save_data { + + #ifdef CONFIG_PM + +-static int sa1111_suspend(struct platform_device *dev, pm_message_t state) ++static int sa1111_suspend_noirq(struct device *dev) + { +- struct sa1111 *sachip = platform_get_drvdata(dev); ++ struct sa1111 *sachip = dev_get_drvdata(dev); + struct sa1111_save_data *save; + unsigned long flags; + unsigned int val; +@@ -937,9 +937,9 @@ static int sa1111_suspend(struct platform_device *dev, pm_message_t state) + * restored by their respective drivers, and must be called + * via LDM after this function. + */ +-static int sa1111_resume(struct platform_device *dev) ++static int sa1111_resume_noirq(struct device *dev) + { +- struct sa1111 *sachip = platform_get_drvdata(dev); ++ struct sa1111 *sachip = dev_get_drvdata(dev); + struct sa1111_save_data *save; + unsigned long flags, id; + void __iomem *base; +@@ -955,7 +955,7 @@ static int sa1111_resume(struct platform_device *dev) + id = sa1111_readl(sachip->base + SA1111_SKID); + if ((id & SKID_ID_MASK) != SKID_SA1111_ID) { + __sa1111_remove(sachip); +- platform_set_drvdata(dev, NULL); ++ dev_set_drvdata(dev, NULL); + kfree(save); + return 0; + } +@@ -1006,8 +1006,8 @@ static int sa1111_resume(struct platform_device *dev) + } + + #else +-#define sa1111_suspend NULL +-#define sa1111_resume NULL ++#define sa1111_suspend_noirq NULL ++#define sa1111_resume_noirq NULL + #endif + + static int sa1111_probe(struct platform_device *pdev) +@@ -1041,6 +1041,11 @@ static int sa1111_remove(struct platform_device *pdev) + return 0; + } + ++static struct dev_pm_ops sa1111_pm_ops = { ++ .suspend_noirq = sa1111_suspend_noirq, ++ .resume_noirq = sa1111_resume_noirq, ++}; ++ + /* + * Not sure if this should be on the system bus or not yet. + * We really want some way to register a system device at +@@ -1053,11 +1058,10 @@ static int sa1111_remove(struct platform_device *pdev) + static struct platform_driver sa1111_device_driver = { + .probe = sa1111_probe, + .remove = sa1111_remove, +- .suspend = sa1111_suspend, +- .resume = sa1111_resume, + .driver = { + .name = "sa1111", + .owner = THIS_MODULE, ++ .pm = &sa1111_pm_ops, + }, + }; + +diff --git a/arch/arm/kernel/devtree.c b/arch/arm/kernel/devtree.c +index 5859c8bc727c..c2a6d8432005 100644 +--- a/arch/arm/kernel/devtree.c ++++ b/arch/arm/kernel/devtree.c +@@ -90,6 +90,8 @@ void __init arm_dt_init_cpu_maps(void) + return; + + for_each_child_of_node(cpus, cpu) { ++ const __be32 *cell; ++ int prop_bytes; + u32 hwid; + + if (of_node_cmp(cpu->type, "cpu")) +@@ -101,17 +103,23 @@ void __init arm_dt_init_cpu_maps(void) + * properties is considered invalid to build the + * cpu_logical_map. + */ +- if (of_property_read_u32(cpu, "reg", &hwid)) { ++ cell = of_get_property(cpu, "reg", &prop_bytes); ++ if (!cell || prop_bytes < sizeof(*cell)) { + pr_debug(" * %s missing reg property\n", + cpu->full_name); + return; + } + + /* +- * 8 MSBs must be set to 0 in the DT since the reg property ++ * Bits n:24 must be set to 0 in the DT since the reg property + * defines the MPIDR[23:0]. + */ +- if (hwid & ~MPIDR_HWID_BITMASK) ++ do { ++ hwid = be32_to_cpu(*cell++); ++ prop_bytes -= sizeof(*cell); ++ } while (!hwid && prop_bytes > 0); ++ ++ if (prop_bytes || (hwid & ~MPIDR_HWID_BITMASK)) + return; + + /* +diff --git a/arch/arm/mach-sa1100/generic.c b/arch/arm/mach-sa1100/generic.c +index 9db3e98e8b85..4983b1149ec2 100644 +--- a/arch/arm/mach-sa1100/generic.c ++++ b/arch/arm/mach-sa1100/generic.c +@@ -30,6 +30,7 @@ + + #include + #include ++#include + + #include "generic.h" + +@@ -133,6 +134,7 @@ static void sa1100_power_off(void) + + void sa11x0_restart(char mode, const char *cmd) + { ++ clear_reset_status(RESET_STATUS_ALL); + if (mode == 's') { + /* Jump into ROM at address 0 */ + soft_restart(0); +diff --git a/arch/arm64/include/asm/elf.h b/arch/arm64/include/asm/elf.h +index fe32c0e4ac01..e647e6d7b875 100644 +--- a/arch/arm64/include/asm/elf.h ++++ b/arch/arm64/include/asm/elf.h +@@ -126,6 +126,7 @@ extern unsigned long randomize_et_dyn(unsigned long base); + + #define SET_PERSONALITY(ex) clear_thread_flag(TIF_32BIT); + ++/* update AT_VECTOR_SIZE_ARCH if the number of NEW_AUX_ENT entries changes */ + #define ARCH_DLINFO \ + do { \ + NEW_AUX_ENT(AT_SYSINFO_EHDR, \ +diff --git a/arch/arm64/include/asm/spinlock.h b/arch/arm64/include/asm/spinlock.h +index 0defa0728a9b..c3cab6f87de4 100644 +--- a/arch/arm64/include/asm/spinlock.h ++++ b/arch/arm64/include/asm/spinlock.h +@@ -200,4 +200,14 @@ static inline int arch_read_trylock(arch_rwlock_t *rw) + #define arch_read_relax(lock) cpu_relax() + #define arch_write_relax(lock) cpu_relax() + ++/* ++ * Accesses appearing in program order before a spin_lock() operation ++ * can be reordered with accesses inside the critical section, by virtue ++ * of arch_spin_lock being constructed using acquire semantics. ++ * ++ * In cases where this is problematic (e.g. try_to_wake_up), an ++ * smp_mb__before_spinlock() can restore the required ordering. ++ */ ++#define smp_mb__before_spinlock() smp_mb() ++ + #endif /* __ASM_SPINLOCK_H */ +diff --git a/arch/arm64/include/uapi/asm/auxvec.h b/arch/arm64/include/uapi/asm/auxvec.h +index 22d6d8885854..4cf0c17787a8 100644 +--- a/arch/arm64/include/uapi/asm/auxvec.h ++++ b/arch/arm64/include/uapi/asm/auxvec.h +@@ -19,4 +19,6 @@ + /* vDSO location */ + #define AT_SYSINFO_EHDR 33 + ++#define AT_VECTOR_SIZE_ARCH 1 /* entries in ARCH_DLINFO */ ++ + #endif +diff --git a/arch/arm64/kernel/debug-monitors.c b/arch/arm64/kernel/debug-monitors.c +index f4726dc054b3..49e6e3046105 100644 +--- a/arch/arm64/kernel/debug-monitors.c ++++ b/arch/arm64/kernel/debug-monitors.c +@@ -276,8 +276,10 @@ int kernel_active_single_step(void) + /* ptrace API */ + void user_enable_single_step(struct task_struct *task) + { +- set_ti_thread_flag(task_thread_info(task), TIF_SINGLESTEP); +- set_regs_spsr_ss(task_pt_regs(task)); ++ struct thread_info *ti = task_thread_info(task); ++ ++ if (!test_and_set_ti_thread_flag(ti, TIF_SINGLESTEP)) ++ set_regs_spsr_ss(task_pt_regs(task)); + } + + void user_disable_single_step(struct task_struct *task) +diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S +index 7cd589ebca2a..5d515e629d0d 100644 +--- a/arch/arm64/kernel/entry.S ++++ b/arch/arm64/kernel/entry.S +@@ -490,7 +490,7 @@ el0_inv: + mov x0, sp + mov x1, #BAD_SYNC + mrs x2, esr_el1 +- b bad_mode ++ b bad_el0_sync + ENDPROC(el0_sync) + + .align 6 +diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c +index f30852d28590..488a7b36d48e 100644 +--- a/arch/arm64/kernel/traps.c ++++ b/arch/arm64/kernel/traps.c +@@ -307,16 +307,33 @@ asmlinkage long do_ni_syscall(struct pt_regs *regs) + } + + /* +- * bad_mode handles the impossible case in the exception vector. ++ * bad_mode handles the impossible case in the exception vector. This is always ++ * fatal. + */ + asmlinkage void bad_mode(struct pt_regs *regs, int reason, unsigned int esr) + { +- siginfo_t info; +- void __user *pc = (void __user *)instruction_pointer(regs); + console_verbose(); + + pr_crit("Bad mode in %s handler detected, code 0x%08x\n", + handler[reason], esr); ++ ++ die("Oops - bad mode", regs, 0); ++ local_irq_disable(); ++ panic("bad mode"); ++} ++ ++/* ++ * bad_el0_sync handles unexpected, but potentially recoverable synchronous ++ * exceptions taken from EL0. Unlike bad_mode, this returns. ++ */ ++asmlinkage void bad_el0_sync(struct pt_regs *regs, int reason, unsigned int esr) ++{ ++ siginfo_t info; ++ void __user *pc = (void __user *)instruction_pointer(regs); ++ console_verbose(); ++ ++ pr_crit("Bad EL0 synchronous exception detected on CPU%d, code 0x%08x\n", ++ smp_processor_id(), esr); + __show_regs(regs); + + info.si_signo = SIGILL; +@@ -324,7 +341,7 @@ asmlinkage void bad_mode(struct pt_regs *regs, int reason, unsigned int esr) + info.si_code = ILL_ILLOPC; + info.si_addr = pc; + +- arm64_notify_die("Oops - bad mode", regs, &info, 0); ++ force_sig_info(info.si_signo, &info, current); + } + + void __pte_error(const char *file, int line, unsigned long val) +diff --git a/arch/avr32/include/asm/uaccess.h b/arch/avr32/include/asm/uaccess.h +index 245b2ee213c9..a0a9b8c31041 100644 +--- a/arch/avr32/include/asm/uaccess.h ++++ b/arch/avr32/include/asm/uaccess.h +@@ -74,7 +74,7 @@ extern __kernel_size_t __copy_user(void *to, const void *from, + + extern __kernel_size_t copy_to_user(void __user *to, const void *from, + __kernel_size_t n); +-extern __kernel_size_t copy_from_user(void *to, const void __user *from, ++extern __kernel_size_t ___copy_from_user(void *to, const void __user *from, + __kernel_size_t n); + + static inline __kernel_size_t __copy_to_user(void __user *to, const void *from, +@@ -88,6 +88,15 @@ static inline __kernel_size_t __copy_from_user(void *to, + { + return __copy_user(to, (const void __force *)from, n); + } ++static inline __kernel_size_t copy_from_user(void *to, ++ const void __user *from, ++ __kernel_size_t n) ++{ ++ size_t res = ___copy_from_user(to, from, n); ++ if (unlikely(res)) ++ memset(to + (n - res), 0, res); ++ return res; ++} + + #define __copy_to_user_inatomic __copy_to_user + #define __copy_from_user_inatomic __copy_from_user +diff --git a/arch/avr32/kernel/avr32_ksyms.c b/arch/avr32/kernel/avr32_ksyms.c +index d93ead02daed..7c6cf14f0985 100644 +--- a/arch/avr32/kernel/avr32_ksyms.c ++++ b/arch/avr32/kernel/avr32_ksyms.c +@@ -36,7 +36,7 @@ EXPORT_SYMBOL(copy_page); + /* + * Userspace access stuff. + */ +-EXPORT_SYMBOL(copy_from_user); ++EXPORT_SYMBOL(___copy_from_user); + EXPORT_SYMBOL(copy_to_user); + EXPORT_SYMBOL(__copy_user); + EXPORT_SYMBOL(strncpy_from_user); +diff --git a/arch/avr32/lib/copy_user.S b/arch/avr32/lib/copy_user.S +index ea59c04b07de..075373471da1 100644 +--- a/arch/avr32/lib/copy_user.S ++++ b/arch/avr32/lib/copy_user.S +@@ -23,13 +23,13 @@ + */ + .text + .align 1 +- .global copy_from_user +- .type copy_from_user, @function +-copy_from_user: ++ .global ___copy_from_user ++ .type ___copy_from_user, @function ++___copy_from_user: + branch_if_kernel r8, __copy_user + ret_if_privileged r8, r11, r10, r10 + rjmp __copy_user +- .size copy_from_user, . - copy_from_user ++ .size ___copy_from_user, . - ___copy_from_user + + .global copy_to_user + .type copy_to_user, @function +diff --git a/arch/avr32/mach-at32ap/pio.c b/arch/avr32/mach-at32ap/pio.c +index 903c7d81d0d5..a8e208eaf2a4 100644 +--- a/arch/avr32/mach-at32ap/pio.c ++++ b/arch/avr32/mach-at32ap/pio.c +@@ -435,7 +435,7 @@ void __init at32_init_pio(struct platform_device *pdev) + struct resource *regs; + struct pio_device *pio; + +- if (pdev->id > MAX_NR_PIO_DEVICES) { ++ if (pdev->id >= MAX_NR_PIO_DEVICES) { + dev_err(&pdev->dev, "only %d PIO devices supported\n", + MAX_NR_PIO_DEVICES); + return; +diff --git a/arch/blackfin/include/asm/uaccess.h b/arch/blackfin/include/asm/uaccess.h +index 57701c3b8a59..a992a788409c 100644 +--- a/arch/blackfin/include/asm/uaccess.h ++++ b/arch/blackfin/include/asm/uaccess.h +@@ -177,11 +177,12 @@ static inline int bad_user_access_length(void) + static inline unsigned long __must_check + copy_from_user(void *to, const void __user *from, unsigned long n) + { +- if (access_ok(VERIFY_READ, from, n)) ++ if (likely(access_ok(VERIFY_READ, from, n))) { + memcpy(to, (const void __force *)from, n); +- else +- return n; +- return 0; ++ return 0; ++ } ++ memset(to, 0, n); ++ return n; + } + + static inline unsigned long __must_check +diff --git a/arch/cris/include/asm/uaccess.h b/arch/cris/include/asm/uaccess.h +index 914540801c5e..93bfa8acc38b 100644 +--- a/arch/cris/include/asm/uaccess.h ++++ b/arch/cris/include/asm/uaccess.h +@@ -176,30 +176,6 @@ extern unsigned long __copy_user(void __user *to, const void *from, unsigned lon + extern unsigned long __copy_user_zeroing(void *to, const void __user *from, unsigned long n); + extern unsigned long __do_clear_user(void __user *to, unsigned long n); + +-static inline unsigned long +-__generic_copy_to_user(void __user *to, const void *from, unsigned long n) +-{ +- if (access_ok(VERIFY_WRITE, to, n)) +- return __copy_user(to,from,n); +- return n; +-} +- +-static inline unsigned long +-__generic_copy_from_user(void *to, const void __user *from, unsigned long n) +-{ +- if (access_ok(VERIFY_READ, from, n)) +- return __copy_user_zeroing(to,from,n); +- return n; +-} +- +-static inline unsigned long +-__generic_clear_user(void __user *to, unsigned long n) +-{ +- if (access_ok(VERIFY_WRITE, to, n)) +- return __do_clear_user(to,n); +- return n; +-} +- + static inline long + __strncpy_from_user(char *dst, const char __user *src, long count) + { +@@ -262,7 +238,7 @@ __constant_copy_from_user(void *to, const void __user *from, unsigned long n) + else if (n == 24) + __asm_copy_from_user_24(to, from, ret); + else +- ret = __generic_copy_from_user(to, from, n); ++ ret = __copy_user_zeroing(to, from, n); + + return ret; + } +@@ -312,7 +288,7 @@ __constant_copy_to_user(void __user *to, const void *from, unsigned long n) + else if (n == 24) + __asm_copy_to_user_24(to, from, ret); + else +- ret = __generic_copy_to_user(to, from, n); ++ ret = __copy_user(to, from, n); + + return ret; + } +@@ -344,26 +320,43 @@ __constant_clear_user(void __user *to, unsigned long n) + else if (n == 24) + __asm_clear_24(to, ret); + else +- ret = __generic_clear_user(to, n); ++ ret = __do_clear_user(to, n); + + return ret; + } + + +-#define clear_user(to, n) \ +-(__builtin_constant_p(n) ? \ +- __constant_clear_user(to, n) : \ +- __generic_clear_user(to, n)) ++static inline size_t clear_user(void __user *to, size_t n) ++{ ++ if (unlikely(!access_ok(VERIFY_WRITE, to, n))) ++ return n; ++ if (__builtin_constant_p(n)) ++ return __constant_clear_user(to, n); ++ else ++ return __do_clear_user(to, n); ++} + +-#define copy_from_user(to, from, n) \ +-(__builtin_constant_p(n) ? \ +- __constant_copy_from_user(to, from, n) : \ +- __generic_copy_from_user(to, from, n)) ++static inline size_t copy_from_user(void *to, const void __user *from, size_t n) ++{ ++ if (unlikely(!access_ok(VERIFY_READ, from, n))) { ++ memset(to, 0, n); ++ return n; ++ } ++ if (__builtin_constant_p(n)) ++ return __constant_copy_from_user(to, from, n); ++ else ++ return __copy_user_zeroing(to, from, n); ++} + +-#define copy_to_user(to, from, n) \ +-(__builtin_constant_p(n) ? \ +- __constant_copy_to_user(to, from, n) : \ +- __generic_copy_to_user(to, from, n)) ++static inline size_t copy_to_user(void __user *to, const void *from, size_t n) ++{ ++ if (unlikely(!access_ok(VERIFY_WRITE, to, n))) ++ return n; ++ if (__builtin_constant_p(n)) ++ return __constant_copy_to_user(to, from, n); ++ else ++ return __copy_user(to, from, n); ++} + + /* We let the __ versions of copy_from/to_user inline, because they're often + * used in fast paths and have only a small space overhead. +diff --git a/arch/frv/include/asm/uaccess.h b/arch/frv/include/asm/uaccess.h +index 0b67ec5b4414..3a74137eeef8 100644 +--- a/arch/frv/include/asm/uaccess.h ++++ b/arch/frv/include/asm/uaccess.h +@@ -263,19 +263,25 @@ do { \ + extern long __memset_user(void *dst, unsigned long count); + extern long __memcpy_user(void *dst, const void *src, unsigned long count); + +-#define clear_user(dst,count) __memset_user(____force(dst), (count)) ++#define __clear_user(dst,count) __memset_user(____force(dst), (count)) + #define __copy_from_user_inatomic(to, from, n) __memcpy_user((to), ____force(from), (n)) + #define __copy_to_user_inatomic(to, from, n) __memcpy_user(____force(to), (from), (n)) + + #else + +-#define clear_user(dst,count) (memset(____force(dst), 0, (count)), 0) ++#define __clear_user(dst,count) (memset(____force(dst), 0, (count)), 0) + #define __copy_from_user_inatomic(to, from, n) (memcpy((to), ____force(from), (n)), 0) + #define __copy_to_user_inatomic(to, from, n) (memcpy(____force(to), (from), (n)), 0) + + #endif + +-#define __clear_user clear_user ++static inline unsigned long __must_check ++clear_user(void __user *to, unsigned long n) ++{ ++ if (likely(__access_ok(to, n))) ++ n = __clear_user(to, n); ++ return n; ++} + + static inline unsigned long __must_check + __copy_to_user(void __user *to, const void *from, unsigned long n) +diff --git a/arch/hexagon/include/asm/uaccess.h b/arch/hexagon/include/asm/uaccess.h +index e4127e4d6a5b..25fc9049db8a 100644 +--- a/arch/hexagon/include/asm/uaccess.h ++++ b/arch/hexagon/include/asm/uaccess.h +@@ -102,7 +102,8 @@ static inline long hexagon_strncpy_from_user(char *dst, const char __user *src, + { + long res = __strnlen_user(src, n); + +- /* return from strnlen can't be zero -- that would be rubbish. */ ++ if (unlikely(!res)) ++ return -EFAULT; + + if (res > n) { + copy_from_user(dst, src, n); +diff --git a/arch/ia64/include/asm/uaccess.h b/arch/ia64/include/asm/uaccess.h +index 449c8c0fa2bd..810926c56e31 100644 +--- a/arch/ia64/include/asm/uaccess.h ++++ b/arch/ia64/include/asm/uaccess.h +@@ -262,17 +262,15 @@ __copy_from_user (void *to, const void __user *from, unsigned long count) + __cu_len; \ + }) + +-#define copy_from_user(to, from, n) \ +-({ \ +- void *__cu_to = (to); \ +- const void __user *__cu_from = (from); \ +- long __cu_len = (n); \ +- \ +- __chk_user_ptr(__cu_from); \ +- if (__access_ok(__cu_from, __cu_len, get_fs())) \ +- __cu_len = __copy_user((__force void __user *) __cu_to, __cu_from, __cu_len); \ +- __cu_len; \ +-}) ++static inline unsigned long ++copy_from_user(void *to, const void __user *from, unsigned long n) ++{ ++ if (likely(__access_ok(from, n, get_fs()))) ++ n = __copy_user((__force void __user *) to, from, n); ++ else ++ memset(to, 0, n); ++ return n; ++} + + #define __copy_in_user(to, from, size) __copy_user((to), (from), (size)) + +diff --git a/arch/m32r/include/asm/uaccess.h b/arch/m32r/include/asm/uaccess.h +index 1c7047bea200..a26d28d59ae6 100644 +--- a/arch/m32r/include/asm/uaccess.h ++++ b/arch/m32r/include/asm/uaccess.h +@@ -215,7 +215,7 @@ extern int fixup_exception(struct pt_regs *regs); + #define __get_user_nocheck(x,ptr,size) \ + ({ \ + long __gu_err = 0; \ +- unsigned long __gu_val; \ ++ unsigned long __gu_val = 0; \ + might_sleep(); \ + __get_user_size(__gu_val,(ptr),(size),__gu_err); \ + (x) = (__typeof__(*(ptr)))__gu_val; \ +diff --git a/arch/metag/include/asm/atomic.h b/arch/metag/include/asm/atomic.h +index 307ecd2bd9a1..d7d6b9e53e44 100644 +--- a/arch/metag/include/asm/atomic.h ++++ b/arch/metag/include/asm/atomic.h +@@ -38,6 +38,7 @@ + #define atomic_dec(v) atomic_sub(1, (v)) + + #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) ++#define atomic_dec_if_positive(v) atomic_sub_if_positive(1, v) + + #define smp_mb__before_atomic_dec() barrier() + #define smp_mb__after_atomic_dec() barrier() +@@ -46,8 +47,6 @@ + + #endif + +-#define atomic_dec_if_positive(v) atomic_sub_if_positive(1, v) +- + #include + + #endif /* __ASM_METAG_ATOMIC_H */ +diff --git a/arch/metag/include/asm/uaccess.h b/arch/metag/include/asm/uaccess.h +index 0748b0a97986..7841f2290385 100644 +--- a/arch/metag/include/asm/uaccess.h ++++ b/arch/metag/include/asm/uaccess.h +@@ -199,8 +199,9 @@ extern unsigned long __must_check __copy_user_zeroing(void *to, + static inline unsigned long + copy_from_user(void *to, const void __user *from, unsigned long n) + { +- if (access_ok(VERIFY_READ, from, n)) ++ if (likely(access_ok(VERIFY_READ, from, n))) + return __copy_user_zeroing(to, from, n); ++ memset(to, 0, n); + return n; + } + +diff --git a/arch/microblaze/include/asm/uaccess.h b/arch/microblaze/include/asm/uaccess.h +index 04e49553bdf9..5488a1a71665 100644 +--- a/arch/microblaze/include/asm/uaccess.h ++++ b/arch/microblaze/include/asm/uaccess.h +@@ -226,7 +226,7 @@ extern long __user_bad(void); + + #define __get_user(x, ptr) \ + ({ \ +- unsigned long __gu_val; \ ++ unsigned long __gu_val = 0; \ + /*unsigned long __gu_ptr = (unsigned long)(ptr);*/ \ + long __gu_err; \ + switch (sizeof(*(ptr))) { \ +@@ -371,10 +371,13 @@ extern long __user_bad(void); + static inline long copy_from_user(void *to, + const void __user *from, unsigned long n) + { ++ unsigned long res = n; + might_sleep(); +- if (access_ok(VERIFY_READ, from, n)) +- return __copy_from_user(to, from, n); +- return n; ++ if (likely(access_ok(VERIFY_READ, from, n))) ++ res = __copy_from_user(to, from, n); ++ if (unlikely(res)) ++ memset(to + (n - res), 0, res); ++ return res; + } + + #define __copy_to_user(to, from, n) \ +diff --git a/arch/mips/include/asm/kvm_host.h b/arch/mips/include/asm/kvm_host.h +index 883a162083af..05863e3ee2e7 100644 +--- a/arch/mips/include/asm/kvm_host.h ++++ b/arch/mips/include/asm/kvm_host.h +@@ -375,7 +375,10 @@ struct kvm_vcpu_arch { + /* Host KSEG0 address of the EI/DI offset */ + void *kseg0_commpage; + +- u32 io_gpr; /* GPR used as IO source/target */ ++ /* Resume PC after MMIO completion */ ++ unsigned long io_pc; ++ /* GPR used as IO source/target */ ++ u32 io_gpr; + + /* Used to calibrate the virutal count register for the guest */ + int32_t host_cp0_count; +@@ -386,8 +389,6 @@ struct kvm_vcpu_arch { + /* Bitmask of pending exceptions to be cleared */ + unsigned long pending_exceptions_clr; + +- unsigned long pending_load_cause; +- + /* Save/Restore the entryhi register when are are preempted/scheduled back in */ + unsigned long preempt_entryhi; + +diff --git a/arch/mips/include/asm/ptrace.h b/arch/mips/include/asm/ptrace.h +index 5e6cd0947393..a288de2199d8 100644 +--- a/arch/mips/include/asm/ptrace.h ++++ b/arch/mips/include/asm/ptrace.h +@@ -73,7 +73,7 @@ static inline int is_syscall_success(struct pt_regs *regs) + + static inline long regs_return_value(struct pt_regs *regs) + { +- if (is_syscall_success(regs)) ++ if (is_syscall_success(regs) || !user_mode(regs)) + return regs->regs[2]; + else + return -regs->regs[2]; +diff --git a/arch/mips/include/asm/uaccess.h b/arch/mips/include/asm/uaccess.h +index f3fa3750f577..e09339df2232 100644 +--- a/arch/mips/include/asm/uaccess.h ++++ b/arch/mips/include/asm/uaccess.h +@@ -13,6 +13,7 @@ + #include + #include + #include ++#include + + /* + * The fs value determines whether argument validity checking should be +@@ -938,6 +939,8 @@ extern size_t __copy_user_inatomic(void *__to, const void *__from, size_t __n); + might_fault(); \ + __cu_len = __invoke_copy_from_user(__cu_to, __cu_from, \ + __cu_len); \ ++ } else { \ ++ memset(__cu_to, 0, __cu_len); \ + } \ + __cu_len; \ + }) +diff --git a/arch/mips/kvm/kvm_mips_emul.c b/arch/mips/kvm/kvm_mips_emul.c +index 9f7643874fba..716285497e0e 100644 +--- a/arch/mips/kvm/kvm_mips_emul.c ++++ b/arch/mips/kvm/kvm_mips_emul.c +@@ -254,15 +254,15 @@ enum emulation_result kvm_mips_emul_eret(struct kvm_vcpu *vcpu) + struct mips_coproc *cop0 = vcpu->arch.cop0; + enum emulation_result er = EMULATE_DONE; + +- if (kvm_read_c0_guest_status(cop0) & ST0_EXL) { ++ if (kvm_read_c0_guest_status(cop0) & ST0_ERL) { ++ kvm_clear_c0_guest_status(cop0, ST0_ERL); ++ vcpu->arch.pc = kvm_read_c0_guest_errorepc(cop0); ++ } else if (kvm_read_c0_guest_status(cop0) & ST0_EXL) { + kvm_debug("[%#lx] ERET to %#lx\n", vcpu->arch.pc, + kvm_read_c0_guest_epc(cop0)); + kvm_clear_c0_guest_status(cop0, ST0_EXL); + vcpu->arch.pc = kvm_read_c0_guest_epc(cop0); + +- } else if (kvm_read_c0_guest_status(cop0) & ST0_ERL) { +- kvm_clear_c0_guest_status(cop0, ST0_ERL); +- vcpu->arch.pc = kvm_read_c0_guest_errorepc(cop0); + } else { + printk("[%#lx] ERET when MIPS_SR_EXL|MIPS_SR_ERL == 0\n", + vcpu->arch.pc); +@@ -310,6 +310,47 @@ enum emulation_result kvm_mips_emul_tlbr(struct kvm_vcpu *vcpu) + return er; + } + ++/** ++ * kvm_mips_invalidate_guest_tlb() - Indicates a change in guest MMU map. ++ * @vcpu: VCPU with changed mappings. ++ * @tlb: TLB entry being removed. ++ * ++ * This is called to indicate a single change in guest MMU mappings, so that we ++ * can arrange TLB flushes on this and other CPUs. ++ */ ++static void kvm_mips_invalidate_guest_tlb(struct kvm_vcpu *vcpu, ++ struct kvm_mips_tlb *tlb) ++{ ++ int cpu, i; ++ bool user; ++ ++ /* No need to flush for entries which are already invalid */ ++ if (!((tlb->tlb_lo0 | tlb->tlb_lo1) & MIPS3_PG_V)) ++ return; ++ /* User address space doesn't need flushing for KSeg2/3 changes */ ++ user = tlb->tlb_hi < KVM_GUEST_KSEG0; ++ ++ preempt_disable(); ++ ++ /* ++ * Probe the shadow host TLB for the entry being overwritten, if one ++ * matches, invalidate it ++ */ ++ kvm_mips_host_tlb_inv(vcpu, tlb->tlb_hi); ++ ++ /* Invalidate the whole ASID on other CPUs */ ++ cpu = smp_processor_id(); ++ for_each_possible_cpu(i) { ++ if (i == cpu) ++ continue; ++ if (user) ++ vcpu->arch.guest_user_asid[i] = 0; ++ vcpu->arch.guest_kernel_asid[i] = 0; ++ } ++ ++ preempt_enable(); ++} ++ + /* Write Guest TLB Entry @ Index */ + enum emulation_result kvm_mips_emul_tlbwi(struct kvm_vcpu *vcpu) + { +@@ -331,10 +372,8 @@ enum emulation_result kvm_mips_emul_tlbwi(struct kvm_vcpu *vcpu) + } + + tlb = &vcpu->arch.guest_tlb[index]; +-#if 1 +- /* Probe the shadow host TLB for the entry being overwritten, if one matches, invalidate it */ +- kvm_mips_host_tlb_inv(vcpu, tlb->tlb_hi); +-#endif ++ ++ kvm_mips_invalidate_guest_tlb(vcpu, tlb); + + tlb->tlb_mask = kvm_read_c0_guest_pagemask(cop0); + tlb->tlb_hi = kvm_read_c0_guest_entryhi(cop0); +@@ -373,10 +412,7 @@ enum emulation_result kvm_mips_emul_tlbwr(struct kvm_vcpu *vcpu) + + tlb = &vcpu->arch.guest_tlb[index]; + +-#if 1 +- /* Probe the shadow host TLB for the entry being overwritten, if one matches, invalidate it */ +- kvm_mips_host_tlb_inv(vcpu, tlb->tlb_hi); +-#endif ++ kvm_mips_invalidate_guest_tlb(vcpu, tlb); + + tlb->tlb_mask = kvm_read_c0_guest_pagemask(cop0); + tlb->tlb_hi = kvm_read_c0_guest_entryhi(cop0); +@@ -419,6 +455,7 @@ kvm_mips_emulate_CP0(uint32_t inst, uint32_t *opc, uint32_t cause, + int32_t rt, rd, copz, sel, co_bit, op; + uint32_t pc = vcpu->arch.pc; + unsigned long curr_pc; ++ int cpu, i; + + /* + * Update PC and hold onto current PC in case there is +@@ -538,8 +575,16 @@ kvm_mips_emulate_CP0(uint32_t inst, uint32_t *opc, uint32_t cause, + ASID_MASK, + vcpu->arch.gprs[rt] & ASID_MASK); + ++ preempt_disable(); + /* Blow away the shadow host TLBs */ + kvm_mips_flush_host_tlb(1); ++ cpu = smp_processor_id(); ++ for_each_possible_cpu(i) ++ if (i != cpu) { ++ vcpu->arch.guest_user_asid[i] = 0; ++ vcpu->arch.guest_kernel_asid[i] = 0; ++ } ++ preempt_enable(); + } + kvm_write_c0_guest_entryhi(cop0, + vcpu->arch.gprs[rt]); +@@ -773,6 +818,7 @@ kvm_mips_emulate_load(uint32_t inst, uint32_t cause, + struct kvm_run *run, struct kvm_vcpu *vcpu) + { + enum emulation_result er = EMULATE_DO_MMIO; ++ unsigned long curr_pc; + int32_t op, base, rt, offset; + uint32_t bytes; + +@@ -781,7 +827,18 @@ kvm_mips_emulate_load(uint32_t inst, uint32_t cause, + offset = inst & 0xffff; + op = (inst >> 26) & 0x3f; + +- vcpu->arch.pending_load_cause = cause; ++ /* ++ * Find the resume PC now while we have safe and easy access to the ++ * prior branch instruction, and save it for ++ * kvm_mips_complete_mmio_load() to restore later. ++ */ ++ curr_pc = vcpu->arch.pc; ++ er = update_pc(vcpu, cause); ++ if (er == EMULATE_FAIL) ++ return er; ++ vcpu->arch.io_pc = vcpu->arch.pc; ++ vcpu->arch.pc = curr_pc; ++ + vcpu->arch.io_gpr = rt; + + switch (op) { +@@ -1610,7 +1667,6 @@ kvm_mips_complete_mmio_load(struct kvm_vcpu *vcpu, struct kvm_run *run) + { + unsigned long *gpr = &vcpu->arch.gprs[vcpu->arch.io_gpr]; + enum emulation_result er = EMULATE_DONE; +- unsigned long curr_pc; + + if (run->mmio.len > sizeof(*gpr)) { + printk("Bad MMIO length: %d", run->mmio.len); +@@ -1618,14 +1674,8 @@ kvm_mips_complete_mmio_load(struct kvm_vcpu *vcpu, struct kvm_run *run) + goto done; + } + +- /* +- * Update PC and hold onto current PC in case there is +- * an error and we want to rollback the PC +- */ +- curr_pc = vcpu->arch.pc; +- er = update_pc(vcpu, vcpu->arch.pending_load_cause); +- if (er == EMULATE_FAIL) +- return er; ++ /* Restore saved resume PC */ ++ vcpu->arch.pc = vcpu->arch.io_pc; + + switch (run->mmio.len) { + case 4: +@@ -1647,12 +1697,6 @@ kvm_mips_complete_mmio_load(struct kvm_vcpu *vcpu, struct kvm_run *run) + break; + } + +- if (vcpu->arch.pending_load_cause & CAUSEF_BD) +- kvm_debug +- ("[%#lx] Completing %d byte BD Load to gpr %d (0x%08lx) type %d\n", +- vcpu->arch.pc, run->mmio.len, vcpu->arch.io_gpr, *gpr, +- vcpu->mmio_needed); +- + done: + return er; + } +diff --git a/arch/mips/mti-malta/malta-setup.c b/arch/mips/mti-malta/malta-setup.c +index c72a06936781..2046e1c385d4 100644 +--- a/arch/mips/mti-malta/malta-setup.c ++++ b/arch/mips/mti-malta/malta-setup.c +@@ -36,6 +36,9 @@ + #include + #endif + ++#define ROCIT_CONFIG_GEN0 0x1f403000 ++#define ROCIT_CONFIG_GEN0_PCI_IOCU BIT(7) ++ + extern void malta_be_init(void); + extern int malta_be_handler(struct pt_regs *regs, int is_fixup); + +@@ -108,6 +111,8 @@ static void __init fd_activate(void) + static int __init plat_enable_iocoherency(void) + { + int supported = 0; ++ u32 cfg; ++ + if (mips_revision_sconid == MIPS_REVISION_SCON_BONITO) { + if (BONITO_PCICACHECTRL & BONITO_PCICACHECTRL_CPUCOH_PRES) { + BONITO_PCICACHECTRL |= BONITO_PCICACHECTRL_CPUCOH_EN; +@@ -130,7 +135,8 @@ static int __init plat_enable_iocoherency(void) + } else if (gcmp_niocu() != 0) { + /* Nothing special needs to be done to enable coherency */ + pr_info("CMP IOCU detected\n"); +- if ((*(unsigned int *)0xbf403000 & 0x81) != 0x81) { ++ cfg = __raw_readl((u32 *)CKSEG1ADDR(ROCIT_CONFIG_GEN0)); ++ if (!(cfg & ROCIT_CONFIG_GEN0_PCI_IOCU)) { + pr_crit("IOCU OPERATION DISABLED BY SWITCH - DEFAULTING TO SW IO COHERENCY\n"); + return 0; + } +diff --git a/arch/mn10300/include/asm/uaccess.h b/arch/mn10300/include/asm/uaccess.h +index d7966e0f7698..b9855e4f0ccd 100644 +--- a/arch/mn10300/include/asm/uaccess.h ++++ b/arch/mn10300/include/asm/uaccess.h +@@ -181,6 +181,7 @@ struct __large_struct { unsigned long buf[100]; }; + "2:\n" \ + " .section .fixup,\"ax\"\n" \ + "3:\n\t" \ ++ " mov 0,%1\n" \ + " mov %3,%0\n" \ + " jmp 2b\n" \ + " .previous\n" \ +diff --git a/arch/mn10300/lib/usercopy.c b/arch/mn10300/lib/usercopy.c +index 7826e6c364e7..ce8899e5e171 100644 +--- a/arch/mn10300/lib/usercopy.c ++++ b/arch/mn10300/lib/usercopy.c +@@ -9,7 +9,7 @@ + * as published by the Free Software Foundation; either version + * 2 of the Licence, or (at your option) any later version. + */ +-#include ++#include + + unsigned long + __generic_copy_to_user(void *to, const void *from, unsigned long n) +@@ -24,6 +24,8 @@ __generic_copy_from_user(void *to, const void *from, unsigned long n) + { + if (access_ok(VERIFY_READ, from, n)) + __copy_user_zeroing(to, from, n); ++ else ++ memset(to, 0, n); + return n; + } + +diff --git a/arch/openrisc/include/asm/uaccess.h b/arch/openrisc/include/asm/uaccess.h +index ab2e7a198a4c..d441480a4af4 100644 +--- a/arch/openrisc/include/asm/uaccess.h ++++ b/arch/openrisc/include/asm/uaccess.h +@@ -273,28 +273,20 @@ __copy_tofrom_user(void *to, const void *from, unsigned long size); + static inline unsigned long + copy_from_user(void *to, const void *from, unsigned long n) + { +- unsigned long over; +- +- if (access_ok(VERIFY_READ, from, n)) +- return __copy_tofrom_user(to, from, n); +- if ((unsigned long)from < TASK_SIZE) { +- over = (unsigned long)from + n - TASK_SIZE; +- return __copy_tofrom_user(to, from, n - over) + over; +- } +- return n; ++ unsigned long res = n; ++ ++ if (likely(access_ok(VERIFY_READ, from, n))) ++ res = __copy_tofrom_user(to, from, n); ++ if (unlikely(res)) ++ memset(to + (n - res), 0, res); ++ return res; + } + + static inline unsigned long + copy_to_user(void *to, const void *from, unsigned long n) + { +- unsigned long over; +- +- if (access_ok(VERIFY_WRITE, to, n)) +- return __copy_tofrom_user(to, from, n); +- if ((unsigned long)to < TASK_SIZE) { +- over = (unsigned long)to + n - TASK_SIZE; +- return __copy_tofrom_user(to, from, n - over) + over; +- } ++ if (likely(access_ok(VERIFY_WRITE, to, n))) ++ n = __copy_tofrom_user(to, from, n); + return n; + } + +@@ -303,13 +295,8 @@ extern unsigned long __clear_user(void *addr, unsigned long size); + static inline __must_check unsigned long + clear_user(void *addr, unsigned long size) + { +- +- if (access_ok(VERIFY_WRITE, addr, size)) +- return __clear_user(addr, size); +- if ((unsigned long)addr < TASK_SIZE) { +- unsigned long over = (unsigned long)addr + size - TASK_SIZE; +- return __clear_user(addr, size - over) + over; +- } ++ if (likely(access_ok(VERIFY_WRITE, addr, size))) ++ size = __clear_user(addr, size); + return size; + } + +diff --git a/arch/parisc/include/asm/uaccess.h b/arch/parisc/include/asm/uaccess.h +index e0a82358517e..9bbddafb0da3 100644 +--- a/arch/parisc/include/asm/uaccess.h ++++ b/arch/parisc/include/asm/uaccess.h +@@ -9,6 +9,8 @@ + #include + #include + ++#include ++ + #define VERIFY_READ 0 + #define VERIFY_WRITE 1 + +@@ -246,13 +248,14 @@ static inline unsigned long __must_check copy_from_user(void *to, + unsigned long n) + { + int sz = __compiletime_object_size(to); +- int ret = -EFAULT; ++ unsigned long ret = n; + + if (likely(sz == -1 || !__builtin_constant_p(n) || sz >= n)) + ret = __copy_from_user(to, from, n); + else + copy_from_user_overflow(); +- ++ if (unlikely(ret)) ++ memset(to + (n - ret), 0, ret); + return ret; + } + +diff --git a/arch/parisc/kernel/syscall.S b/arch/parisc/kernel/syscall.S +index e767ab733e32..69caa82c50d3 100644 +--- a/arch/parisc/kernel/syscall.S ++++ b/arch/parisc/kernel/syscall.S +@@ -106,8 +106,6 @@ linux_gateway_entry: + mtsp %r0,%sr4 /* get kernel space into sr4 */ + mtsp %r0,%sr5 /* get kernel space into sr5 */ + mtsp %r0,%sr6 /* get kernel space into sr6 */ +- mfsp %sr7,%r1 /* save user sr7 */ +- mtsp %r1,%sr3 /* and store it in sr3 */ + + #ifdef CONFIG_64BIT + /* for now we can *always* set the W bit on entry to the syscall +@@ -133,6 +131,14 @@ linux_gateway_entry: + depdi 0, 31, 32, %r21 + 1: + #endif ++ ++ /* We use a rsm/ssm pair to prevent sr3 from being clobbered ++ * by external interrupts. ++ */ ++ mfsp %sr7,%r1 /* save user sr7 */ ++ rsm PSW_SM_I, %r0 /* disable interrupts */ ++ mtsp %r1,%sr3 /* and store it in sr3 */ ++ + mfctl %cr30,%r1 + xor %r1,%r30,%r30 /* ye olde xor trick */ + xor %r1,%r30,%r1 +@@ -147,6 +153,7 @@ linux_gateway_entry: + */ + + mtsp %r0,%sr7 /* get kernel space into sr7 */ ++ ssm PSW_SM_I, %r0 /* enable interrupts */ + STREGM %r1,FRAME_SIZE(%r30) /* save r1 (usp) here for now */ + mfctl %cr30,%r1 /* get task ptr in %r1 */ + LDREG TI_TASK(%r1),%r1 +diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h +index 4db49590acf5..1d47060f488b 100644 +--- a/arch/powerpc/include/asm/uaccess.h ++++ b/arch/powerpc/include/asm/uaccess.h +@@ -323,30 +323,17 @@ extern unsigned long __copy_tofrom_user(void __user *to, + static inline unsigned long copy_from_user(void *to, + const void __user *from, unsigned long n) + { +- unsigned long over; +- +- if (access_ok(VERIFY_READ, from, n)) ++ if (likely(access_ok(VERIFY_READ, from, n))) + return __copy_tofrom_user((__force void __user *)to, from, n); +- if ((unsigned long)from < TASK_SIZE) { +- over = (unsigned long)from + n - TASK_SIZE; +- return __copy_tofrom_user((__force void __user *)to, from, +- n - over) + over; +- } ++ memset(to, 0, n); + return n; + } + + static inline unsigned long copy_to_user(void __user *to, + const void *from, unsigned long n) + { +- unsigned long over; +- + if (access_ok(VERIFY_WRITE, to, n)) + return __copy_tofrom_user(to, (__force void __user *)from, n); +- if ((unsigned long)to < TASK_SIZE) { +- over = (unsigned long)to + n - TASK_SIZE; +- return __copy_tofrom_user(to, (__force void __user *)from, +- n - over) + over; +- } + return n; + } + +@@ -437,10 +424,6 @@ static inline unsigned long clear_user(void __user *addr, unsigned long size) + might_sleep(); + if (likely(access_ok(VERIFY_WRITE, addr, size))) + return __clear_user(addr, size); +- if ((unsigned long)addr < TASK_SIZE) { +- unsigned long over = (unsigned long)addr + size - TASK_SIZE; +- return __clear_user(addr, size - over) + over; +- } + return size; + } + +diff --git a/arch/powerpc/kernel/nvram_64.c b/arch/powerpc/kernel/nvram_64.c +index 48fbc2b97e95..4047d8a035f7 100644 +--- a/arch/powerpc/kernel/nvram_64.c ++++ b/arch/powerpc/kernel/nvram_64.c +@@ -280,7 +280,7 @@ int __init nvram_remove_partition(const char *name, int sig, + + /* Make partition a free partition */ + part->header.signature = NVRAM_SIG_FREE; +- strncpy(part->header.name, "wwwwwwwwwwww", 12); ++ memset(part->header.name, 'w', 12); + part->header.checksum = nvram_checksum(&part->header); + rc = nvram_write_header(part); + if (rc <= 0) { +@@ -298,8 +298,8 @@ int __init nvram_remove_partition(const char *name, int sig, + } + if (prev) { + prev->header.length += part->header.length; +- prev->header.checksum = nvram_checksum(&part->header); +- rc = nvram_write_header(part); ++ prev->header.checksum = nvram_checksum(&prev->header); ++ rc = nvram_write_header(prev); + if (rc <= 0) { + printk(KERN_ERR "nvram_remove_partition: nvram_write failed (%d)\n", rc); + return rc; +diff --git a/arch/powerpc/kernel/vdso64/datapage.S b/arch/powerpc/kernel/vdso64/datapage.S +index 79796de11737..3263ee23170d 100644 +--- a/arch/powerpc/kernel/vdso64/datapage.S ++++ b/arch/powerpc/kernel/vdso64/datapage.S +@@ -57,7 +57,7 @@ V_FUNCTION_BEGIN(__kernel_get_syscall_map) + bl V_LOCAL_FUNC(__get_datapage) + mtlr r12 + addi r3,r3,CFG_SYSCALL_MAP64 +- cmpli cr0,r4,0 ++ cmpldi cr0,r4,0 + crclr cr0*4+so + beqlr + li r0,__NR_syscalls +diff --git a/arch/powerpc/kernel/vdso64/gettimeofday.S b/arch/powerpc/kernel/vdso64/gettimeofday.S +index a76b4af37ef2..382021324883 100644 +--- a/arch/powerpc/kernel/vdso64/gettimeofday.S ++++ b/arch/powerpc/kernel/vdso64/gettimeofday.S +@@ -145,7 +145,7 @@ V_FUNCTION_BEGIN(__kernel_clock_getres) + bne cr0,99f + + li r3,0 +- cmpli cr0,r4,0 ++ cmpldi cr0,r4,0 + crclr cr0*4+so + beqlr + lis r5,CLOCK_REALTIME_RES@h +diff --git a/arch/powerpc/lib/copyuser_64.S b/arch/powerpc/lib/copyuser_64.S +index d73a59014900..be94e1be4ae3 100644 +--- a/arch/powerpc/lib/copyuser_64.S ++++ b/arch/powerpc/lib/copyuser_64.S +@@ -336,6 +336,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_UNALIGNED_LD_STD) + addi r3,r3,8 + 171: + 177: ++179: + addi r3,r3,8 + 370: + 372: +@@ -350,7 +351,6 @@ END_FTR_SECTION_IFCLR(CPU_FTR_UNALIGNED_LD_STD) + 173: + 174: + 175: +-179: + 181: + 184: + 186: +diff --git a/arch/powerpc/mm/slb_low.S b/arch/powerpc/mm/slb_low.S +index 17aa6dfceb34..e507f5e733f3 100644 +--- a/arch/powerpc/mm/slb_low.S ++++ b/arch/powerpc/mm/slb_low.S +@@ -110,7 +110,12 @@ BEGIN_FTR_SECTION + END_MMU_FTR_SECTION_IFCLR(MMU_FTR_1T_SEGMENT) + b slb_finish_load_1T + +-0: ++0: /* ++ * For userspace addresses, make sure this is region 0. ++ */ ++ cmpdi r9, 0 ++ bne 8f ++ + /* when using slices, we extract the psize off the slice bitmaps + * and then we need to get the sllp encoding off the mmu_psize_defs + * array. +diff --git a/arch/powerpc/platforms/powernv/pci.c b/arch/powerpc/platforms/powernv/pci.c +index 0473d31b3a4d..d93c6cab18bf 100644 +--- a/arch/powerpc/platforms/powernv/pci.c ++++ b/arch/powerpc/platforms/powernv/pci.c +@@ -176,8 +176,8 @@ static void pnv_pci_dump_p7ioc_diag_data(struct pnv_phb *phb) + pr_info(" dma1ErrorLog1 = 0x%016llx\n", data->dma1ErrorLog1); + + for (i = 0; i < OPAL_P7IOC_NUM_PEST_REGS; i++) { +- if ((data->pestA[i] >> 63) == 0 && +- (data->pestB[i] >> 63) == 0) ++ if ((be64_to_cpu(data->pestA[i]) >> 63) == 0 && ++ (be64_to_cpu(data->pestB[i]) >> 63) == 0) + continue; + pr_info(" PE[%3d] PESTA = 0x%016llx\n", i, data->pestA[i]); + pr_info(" PESTB = 0x%016llx\n", data->pestB[i]); +diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h +index 9c33ed4e666f..b6017ace1515 100644 +--- a/arch/s390/include/asm/uaccess.h ++++ b/arch/s390/include/asm/uaccess.h +@@ -164,28 +164,28 @@ extern int __put_user_bad(void) __attribute__((noreturn)); + __chk_user_ptr(ptr); \ + switch (sizeof(*(ptr))) { \ + case 1: { \ +- unsigned char __x; \ ++ unsigned char __x = 0; \ + __gu_err = __get_user_fn(sizeof (*(ptr)), \ + ptr, &__x); \ + (x) = *(__force __typeof__(*(ptr)) *) &__x; \ + break; \ + }; \ + case 2: { \ +- unsigned short __x; \ ++ unsigned short __x = 0; \ + __gu_err = __get_user_fn(sizeof (*(ptr)), \ + ptr, &__x); \ + (x) = *(__force __typeof__(*(ptr)) *) &__x; \ + break; \ + }; \ + case 4: { \ +- unsigned int __x; \ ++ unsigned int __x = 0; \ + __gu_err = __get_user_fn(sizeof (*(ptr)), \ + ptr, &__x); \ + (x) = *(__force __typeof__(*(ptr)) *) &__x; \ + break; \ + }; \ + case 8: { \ +- unsigned long long __x; \ ++ unsigned long long __x = 0; \ + __gu_err = __get_user_fn(sizeof (*(ptr)), \ + ptr, &__x); \ + (x) = *(__force __typeof__(*(ptr)) *) &__x; \ +diff --git a/arch/score/include/asm/uaccess.h b/arch/score/include/asm/uaccess.h +index ab66ddde777b..69326dfb894d 100644 +--- a/arch/score/include/asm/uaccess.h ++++ b/arch/score/include/asm/uaccess.h +@@ -158,7 +158,7 @@ do { \ + __get_user_asm(val, "lw", ptr); \ + break; \ + case 8: \ +- if ((copy_from_user((void *)&val, ptr, 8)) == 0) \ ++ if (__copy_from_user((void *)&val, ptr, 8) == 0) \ + __gu_err = 0; \ + else \ + __gu_err = -EFAULT; \ +@@ -183,6 +183,8 @@ do { \ + \ + if (likely(access_ok(VERIFY_READ, __gu_ptr, size))) \ + __get_user_common((x), size, __gu_ptr); \ ++ else \ ++ (x) = 0; \ + \ + __gu_err; \ + }) +@@ -196,6 +198,7 @@ do { \ + "2:\n" \ + ".section .fixup,\"ax\"\n" \ + "3:li %0, %4\n" \ ++ "li %1, 0\n" \ + "j 2b\n" \ + ".previous\n" \ + ".section __ex_table,\"a\"\n" \ +@@ -293,35 +296,34 @@ extern int __copy_tofrom_user(void *to, const void *from, unsigned long len); + static inline unsigned long + copy_from_user(void *to, const void *from, unsigned long len) + { +- unsigned long over; ++ unsigned long res = len; + +- if (access_ok(VERIFY_READ, from, len)) +- return __copy_tofrom_user(to, from, len); ++ if (likely(access_ok(VERIFY_READ, from, len))) ++ res = __copy_tofrom_user(to, from, len); + +- if ((unsigned long)from < TASK_SIZE) { +- over = (unsigned long)from + len - TASK_SIZE; +- return __copy_tofrom_user(to, from, len - over) + over; +- } +- return len; ++ if (unlikely(res)) ++ memset(to + (len - res), 0, res); ++ ++ return res; + } + + static inline unsigned long + copy_to_user(void *to, const void *from, unsigned long len) + { +- unsigned long over; +- +- if (access_ok(VERIFY_WRITE, to, len)) +- return __copy_tofrom_user(to, from, len); ++ if (likely(access_ok(VERIFY_WRITE, to, len))) ++ len = __copy_tofrom_user(to, from, len); + +- if ((unsigned long)to < TASK_SIZE) { +- over = (unsigned long)to + len - TASK_SIZE; +- return __copy_tofrom_user(to, from, len - over) + over; +- } + return len; + } + +-#define __copy_from_user(to, from, len) \ +- __copy_tofrom_user((to), (from), (len)) ++static inline unsigned long ++__copy_from_user(void *to, const void *from, unsigned long len) ++{ ++ unsigned long left = __copy_tofrom_user(to, from, len); ++ if (unlikely(left)) ++ memset(to + (len - left), 0, left); ++ return left; ++} + + #define __copy_to_user(to, from, len) \ + __copy_tofrom_user((to), (from), (len)) +@@ -335,17 +337,17 @@ __copy_to_user_inatomic(void *to, const void *from, unsigned long len) + static inline unsigned long + __copy_from_user_inatomic(void *to, const void *from, unsigned long len) + { +- return __copy_from_user(to, from, len); ++ return __copy_tofrom_user(to, from, len); + } + +-#define __copy_in_user(to, from, len) __copy_from_user(to, from, len) ++#define __copy_in_user(to, from, len) __copy_tofrom_user(to, from, len) + + static inline unsigned long + copy_in_user(void *to, const void *from, unsigned long len) + { + if (access_ok(VERIFY_READ, from, len) && + access_ok(VERFITY_WRITE, to, len)) +- return copy_from_user(to, from, len); ++ return __copy_tofrom_user(to, from, len); + } + + /* +diff --git a/arch/sh/include/asm/uaccess.h b/arch/sh/include/asm/uaccess.h +index 9486376605f4..c04cc18ae9cd 100644 +--- a/arch/sh/include/asm/uaccess.h ++++ b/arch/sh/include/asm/uaccess.h +@@ -151,7 +151,10 @@ copy_from_user(void *to, const void __user *from, unsigned long n) + __kernel_size_t __copy_size = (__kernel_size_t) n; + + if (__copy_size && __access_ok(__copy_from, __copy_size)) +- return __copy_user(to, from, __copy_size); ++ __copy_size = __copy_user(to, from, __copy_size); ++ ++ if (unlikely(__copy_size)) ++ memset(to + (n - __copy_size), 0, __copy_size); + + return __copy_size; + } +diff --git a/arch/sh/include/asm/uaccess_64.h b/arch/sh/include/asm/uaccess_64.h +index 2e07e0f40c6a..a2f9d0531328 100644 +--- a/arch/sh/include/asm/uaccess_64.h ++++ b/arch/sh/include/asm/uaccess_64.h +@@ -24,6 +24,7 @@ + #define __get_user_size(x,ptr,size,retval) \ + do { \ + retval = 0; \ ++ x = 0; \ + switch (size) { \ + case 1: \ + retval = __get_user_asm_b((void *)&x, \ +diff --git a/arch/sparc/include/asm/uaccess_32.h b/arch/sparc/include/asm/uaccess_32.h +index 53a28dd59f59..01f602858de1 100644 +--- a/arch/sparc/include/asm/uaccess_32.h ++++ b/arch/sparc/include/asm/uaccess_32.h +@@ -265,8 +265,10 @@ static inline unsigned long copy_from_user(void *to, const void __user *from, un + { + if (n && __access_ok((unsigned long) from, n)) + return __copy_user((__force void __user *) to, from, n); +- else ++ else { ++ memset(to, 0, n); + return n; ++ } + } + + static inline unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n) +diff --git a/arch/tile/kernel/time.c b/arch/tile/kernel/time.c +index 5ac397ec6986..9df6d0d6d187 100644 +--- a/arch/tile/kernel/time.c ++++ b/arch/tile/kernel/time.c +@@ -215,8 +215,8 @@ void do_timer_interrupt(struct pt_regs *regs, int fault_num) + */ + unsigned long long sched_clock(void) + { +- return clocksource_cyc2ns(get_cycles(), +- sched_clock_mult, SCHED_CLOCK_SHIFT); ++ return mult_frac(get_cycles(), ++ sched_clock_mult, 1ULL << SCHED_CLOCK_SHIFT); + } + + int setup_profiling_timer(unsigned int multiplier) +diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile +index 7194d9f094bc..349cf190d236 100644 +--- a/arch/x86/boot/compressed/Makefile ++++ b/arch/x86/boot/compressed/Makefile +@@ -7,7 +7,7 @@ + targets := vmlinux vmlinux.bin vmlinux.bin.gz vmlinux.bin.bz2 vmlinux.bin.lzma vmlinux.bin.xz vmlinux.bin.lzo + + KBUILD_CFLAGS := -m$(BITS) -D__KERNEL__ $(LINUX_INCLUDE) -O2 +-KBUILD_CFLAGS += -fno-strict-aliasing -fPIC ++KBUILD_CFLAGS += -fno-strict-aliasing $(call cc-option, -fPIE, -fPIC) + KBUILD_CFLAGS += -DDISABLE_BRANCH_PROFILING + cflags-$(CONFIG_X86_32) := -march=i386 + cflags-$(CONFIG_X86_64) := -mcmodel=small +@@ -20,6 +20,18 @@ KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__ + GCOV_PROFILE := n + + LDFLAGS := -m elf_$(UTS_MACHINE) ++ifeq ($(CONFIG_RELOCATABLE),y) ++# If kernel is relocatable, build compressed kernel as PIE. ++ifeq ($(CONFIG_X86_32),y) ++LDFLAGS += $(call ld-option, -pie) $(call ld-option, --no-dynamic-linker) ++else ++# To build 64-bit compressed kernel as PIE, we disable relocation ++# overflow check to avoid relocation overflow error with a new linker ++# command-line option, -z noreloc-overflow. ++LDFLAGS += $(shell $(LD) --help 2>&1 | grep -q "\-z noreloc-overflow" \ ++ && echo "-z noreloc-overflow -pie --no-dynamic-linker") ++endif ++endif + LDFLAGS_vmlinux := -T + + hostprogs-y := mkpiggy +diff --git a/arch/x86/boot/compressed/head_32.S b/arch/x86/boot/compressed/head_32.S +index 3b28eff9b90b..104d7e46a6c2 100644 +--- a/arch/x86/boot/compressed/head_32.S ++++ b/arch/x86/boot/compressed/head_32.S +@@ -30,6 +30,34 @@ + #include + #include + ++/* ++ * The 32-bit x86 assembler in binutils 2.26 will generate R_386_GOT32X ++ * relocation to get the symbol address in PIC. When the compressed x86 ++ * kernel isn't built as PIC, the linker optimizes R_386_GOT32X ++ * relocations to their fixed symbol addresses. However, when the ++ * compressed x86 kernel is loaded at a different address, it leads ++ * to the following load failure: ++ * ++ * Failed to allocate space for phdrs ++ * ++ * during the decompression stage. ++ * ++ * If the compressed x86 kernel is relocatable at run-time, it should be ++ * compiled with -fPIE, instead of -fPIC, if possible and should be built as ++ * Position Independent Executable (PIE) so that linker won't optimize ++ * R_386_GOT32X relocation to its fixed symbol address. Older ++ * linkers generate R_386_32 relocations against locally defined symbols, ++ * _bss, _ebss, _got and _egot, in PIE. It isn't wrong, just less ++ * optimal than R_386_RELATIVE. But the x86 kernel fails to properly handle ++ * R_386_32 relocations when relocating the kernel. To generate ++ * R_386_RELATIVE relocations, we mark _bss, _ebss, _got and _egot as ++ * hidden: ++ */ ++ .hidden _bss ++ .hidden _ebss ++ .hidden _got ++ .hidden _egot ++ + __HEAD + ENTRY(startup_32) + #ifdef CONFIG_EFI_STUB +diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S +index 92059b8f3f7b..6ac508a75ae5 100644 +--- a/arch/x86/boot/compressed/head_64.S ++++ b/arch/x86/boot/compressed/head_64.S +@@ -34,6 +34,14 @@ + #include + #include + ++/* ++ * Locally defined symbols should be marked hidden: ++ */ ++ .hidden _bss ++ .hidden _ebss ++ .hidden _got ++ .hidden _egot ++ + __HEAD + .code32 + ENTRY(startup_32) +diff --git a/arch/x86/include/asm/hugetlb.h b/arch/x86/include/asm/hugetlb.h +index 68c05398bba9..7aadd3cea843 100644 +--- a/arch/x86/include/asm/hugetlb.h ++++ b/arch/x86/include/asm/hugetlb.h +@@ -4,6 +4,7 @@ + #include + #include + ++#define hugepages_supported() cpu_has_pse + + static inline int is_hugepage_only_range(struct mm_struct *mm, + unsigned long addr, +diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h +index 50a7fc0f824a..fb3285805beb 100644 +--- a/arch/x86/include/asm/tlbflush.h ++++ b/arch/x86/include/asm/tlbflush.h +@@ -17,7 +17,14 @@ + + static inline void __native_flush_tlb(void) + { ++ /* ++ * If current->mm == NULL then we borrow a mm which may change during a ++ * task switch and therefore we must not be preempted while we write CR3 ++ * back: ++ */ ++ preempt_disable(); + native_write_cr3(native_read_cr3()); ++ preempt_enable(); + } + + static inline void __native_flush_tlb_global_irq_disabled(void) +diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h +index 5ee26875baea..995c49aa1a19 100644 +--- a/arch/x86/include/asm/uaccess.h ++++ b/arch/x86/include/asm/uaccess.h +@@ -381,7 +381,7 @@ do { \ + asm volatile("1: mov"itype" %1,%"rtype"0\n" \ + "2:\n" \ + _ASM_EXTABLE_EX(1b, 2b) \ +- : ltype(x) : "m" (__m(addr))) ++ : ltype(x) : "m" (__m(addr)), "0" (0)) + + #define __put_user_nocheck(x, ptr, size) \ + ({ \ +diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c +index 9620d18cb638..3cd8bfc3c4b6 100644 +--- a/arch/x86/kernel/apic/apic.c ++++ b/arch/x86/kernel/apic/apic.c +@@ -1581,6 +1581,9 @@ void __init enable_IR_x2apic(void) + int ret, x2apic_enabled = 0; + int hardware_init_ret; + ++ if (skip_ioapic_setup) ++ return; ++ + /* Make sure irq_remap_ops are initialized */ + setup_irq_remapping_ops(); + +diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S +index 8060c8b95b3a..b7e330c57a49 100644 +--- a/arch/x86/kernel/head_32.S ++++ b/arch/x86/kernel/head_32.S +@@ -586,7 +586,7 @@ early_idt_handler_common: + movl %eax,%ds + movl %eax,%es + +- cmpl $(__KERNEL_CS),32(%esp) ++ cmpw $(__KERNEL_CS),32(%esp) + jne 10f + + leal 28(%esp),%eax # Pointer to %eip +diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c +index cd6de64cc480..8baf3acd7074 100644 +--- a/arch/x86/kernel/paravirt.c ++++ b/arch/x86/kernel/paravirt.c +@@ -46,12 +46,12 @@ void _paravirt_nop(void) + } + + /* identity function, which can be inlined */ +-u32 _paravirt_ident_32(u32 x) ++u32 notrace _paravirt_ident_32(u32 x) + { + return x; + } + +-u64 _paravirt_ident_64(u64 x) ++u64 notrace _paravirt_ident_64(u64 x) + { + return x; + } +diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c +index 335fe70967a8..7e9ca58ae875 100644 +--- a/arch/x86/kvm/vmx.c ++++ b/arch/x86/kvm/vmx.c +@@ -366,6 +366,7 @@ struct nested_vmx { + struct list_head vmcs02_pool; + int vmcs02_num; + u64 vmcs01_tsc_offset; ++ bool change_vmcs01_virtual_x2apic_mode; + /* L2 must run next, and mustn't decide to exit to L1. */ + bool nested_run_pending; + /* +@@ -6702,6 +6703,12 @@ static void vmx_set_virtual_x2apic_mode(struct kvm_vcpu *vcpu, bool set) + { + u32 sec_exec_control; + ++ /* Postpone execution until vmcs01 is the current VMCS. */ ++ if (is_guest_mode(vcpu)) { ++ to_vmx(vcpu)->nested.change_vmcs01_virtual_x2apic_mode = true; ++ return; ++ } ++ + /* + * There is not point to enable virtualize x2apic without enable + * apicv +@@ -8085,6 +8092,12 @@ static void nested_vmx_vmexit(struct kvm_vcpu *vcpu) + /* Update TSC_OFFSET if TSC was changed while L2 ran */ + vmcs_write64(TSC_OFFSET, vmx->nested.vmcs01_tsc_offset); + ++ if (vmx->nested.change_vmcs01_virtual_x2apic_mode) { ++ vmx->nested.change_vmcs01_virtual_x2apic_mode = false; ++ vmx_set_virtual_x2apic_mode(vcpu, ++ vcpu->arch.apic_base & X2APIC_ENABLE); ++ } ++ + /* This is needed for same reason as it was needed in prepare_vmcs02 */ + vmx->host_rsp = 0; + +diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c +index 8e57771d4bfd..b70b67bde90d 100644 +--- a/arch/x86/kvm/x86.c ++++ b/arch/x86/kvm/x86.c +@@ -178,7 +178,18 @@ static void kvm_on_user_return(struct user_return_notifier *urn) + struct kvm_shared_msrs *locals + = container_of(urn, struct kvm_shared_msrs, urn); + struct kvm_shared_msr_values *values; ++ unsigned long flags; + ++ /* ++ * Disabling irqs at this point since the following code could be ++ * interrupted and executed through kvm_arch_hardware_disable() ++ */ ++ local_irq_save(flags); ++ if (locals->registered) { ++ locals->registered = false; ++ user_return_notifier_unregister(urn); ++ } ++ local_irq_restore(flags); + for (slot = 0; slot < shared_msrs_global.nr; ++slot) { + values = &locals->values[slot]; + if (values->host != values->curr) { +@@ -186,8 +197,6 @@ static void kvm_on_user_return(struct user_return_notifier *urn) + values->curr = values->host; + } + } +- locals->registered = false; +- user_return_notifier_unregister(urn); + } + + static void shared_msr_update(unsigned slot, u32 msr) +@@ -3182,6 +3191,7 @@ long kvm_arch_vcpu_ioctl(struct file *filp, + }; + case KVM_SET_VAPIC_ADDR: { + struct kvm_vapic_addr va; ++ int idx; + + r = -EINVAL; + if (!irqchip_in_kernel(vcpu->kvm)) +@@ -3189,7 +3199,9 @@ long kvm_arch_vcpu_ioctl(struct file *filp, + r = -EFAULT; + if (copy_from_user(&va, argp, sizeof va)) + goto out; ++ idx = srcu_read_lock(&vcpu->kvm->srcu); + r = kvm_lapic_set_vapic_addr(vcpu, va.vapic_addr); ++ srcu_read_unlock(&vcpu->kvm->srcu, idx); + break; + } + case KVM_X86_SETUP_MCE: { +@@ -6509,11 +6521,13 @@ void kvm_put_guest_fpu(struct kvm_vcpu *vcpu) + + void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu) + { ++ void *wbinvd_dirty_mask = vcpu->arch.wbinvd_dirty_mask; ++ + kvmclock_reset(vcpu); + +- free_cpumask_var(vcpu->arch.wbinvd_dirty_mask); + fx_free(vcpu); + kvm_x86_ops->vcpu_free(vcpu); ++ free_cpumask_var(wbinvd_dirty_mask); + } + + struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, +diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c +index 657438858e83..7f0c8da7ecea 100644 +--- a/arch/x86/mm/pat.c ++++ b/arch/x86/mm/pat.c +@@ -505,11 +505,8 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size) + return 1; + + while (cursor < to) { +- if (!devmem_is_allowed(pfn)) { +- printk(KERN_INFO "Program %s tried to access /dev/mem between [mem %#010Lx-%#010Lx]\n", +- current->comm, from, to - 1); ++ if (!devmem_is_allowed(pfn)) + return 0; +- } + cursor += PAGE_SIZE; + pfn++; + } +diff --git a/arch/x86/um/asm/barrier.h b/arch/x86/um/asm/barrier.h +index 7d01b8c56c00..1da6bb44f94f 100644 +--- a/arch/x86/um/asm/barrier.h ++++ b/arch/x86/um/asm/barrier.h +@@ -51,11 +51,7 @@ + + #else /* CONFIG_SMP */ + +-#define smp_mb() barrier() +-#define smp_rmb() barrier() +-#define smp_wmb() barrier() +-#define smp_read_barrier_depends() do { } while (0) +-#define set_mb(var, value) do { var = value; barrier(); } while (0) ++#include + + #endif /* CONFIG_SMP */ + +diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c +index fdc3ba28ca38..53b061c9ad7e 100644 +--- a/arch/x86/xen/mmu.c ++++ b/arch/x86/xen/mmu.c +@@ -1187,7 +1187,7 @@ static void __init xen_cleanhighmap(unsigned long vaddr, + + /* NOTE: The loop is more greedy than the cleanup_highmap variant. + * We include the PMD passed in on _both_ boundaries. */ +- for (; vaddr <= vaddr_end && (pmd < (level2_kernel_pgt + PAGE_SIZE)); ++ for (; vaddr <= vaddr_end && (pmd < (level2_kernel_pgt + PTRS_PER_PMD)); + pmd++, vaddr += PMD_SIZE) { + if (pmd_none(*pmd)) + continue; +diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c +index 69111c5c352c..ddb0ebb89f47 100644 +--- a/block/cfq-iosched.c ++++ b/block/cfq-iosched.c +@@ -2812,7 +2812,6 @@ static struct request *cfq_check_fifo(struct cfq_queue *cfqq) + if (time_before(jiffies, rq_fifo_time(rq))) + rq = NULL; + +- cfq_log_cfqq(cfqq->cfqd, cfqq, "fifo=%p", rq); + return rq; + } + +@@ -3186,6 +3185,9 @@ static bool cfq_may_dispatch(struct cfq_data *cfqd, struct cfq_queue *cfqq) + { + unsigned int max_dispatch; + ++ if (cfq_cfqq_must_dispatch(cfqq)) ++ return true; ++ + /* + * Drain async requests before we start sync IO + */ +@@ -3277,15 +3279,20 @@ static bool cfq_dispatch_request(struct cfq_data *cfqd, struct cfq_queue *cfqq) + + BUG_ON(RB_EMPTY_ROOT(&cfqq->sort_list)); + ++ rq = cfq_check_fifo(cfqq); ++ if (rq) ++ cfq_mark_cfqq_must_dispatch(cfqq); ++ + if (!cfq_may_dispatch(cfqd, cfqq)) + return false; + + /* + * follow expired path, else get first next available + */ +- rq = cfq_check_fifo(cfqq); + if (!rq) + rq = cfqq->next_rq; ++ else ++ cfq_log_cfqq(cfqq->cfqd, cfqq, "fifo=%p", rq); + + /* + * insert request into driver dispatch list +@@ -3794,7 +3801,7 @@ cfq_should_preempt(struct cfq_data *cfqd, struct cfq_queue *new_cfqq, + * if the new request is sync, but the currently running queue is + * not, let the sync request have priority. + */ +- if (rq_is_sync(rq) && !cfq_cfqq_sync(cfqq)) ++ if (rq_is_sync(rq) && !cfq_cfqq_sync(cfqq) && !cfq_cfqq_must_dispatch(cfqq)) + return true; + + if (new_cfqq->cfqg != cfqq->cfqg) +diff --git a/crypto/ablkcipher.c b/crypto/ablkcipher.c +index ebcec7439a1a..2b6dd7401632 100644 +--- a/crypto/ablkcipher.c ++++ b/crypto/ablkcipher.c +@@ -379,6 +379,7 @@ static int crypto_init_ablkcipher_ops(struct crypto_tfm *tfm, u32 type, + } + crt->base = __crypto_ablkcipher_cast(tfm); + crt->ivsize = alg->ivsize; ++ crt->has_setkey = alg->max_keysize; + + return 0; + } +@@ -460,6 +461,7 @@ static int crypto_init_givcipher_ops(struct crypto_tfm *tfm, u32 type, + crt->givdecrypt = alg->givdecrypt ?: no_givdecrypt; + crt->base = __crypto_ablkcipher_cast(tfm); + crt->ivsize = alg->ivsize; ++ crt->has_setkey = alg->max_keysize; + + return 0; + } +diff --git a/crypto/af_alg.c b/crypto/af_alg.c +index 1aaa555fab56..68ec1ac4104a 100644 +--- a/crypto/af_alg.c ++++ b/crypto/af_alg.c +@@ -76,6 +76,8 @@ int af_alg_register_type(const struct af_alg_type *type) + goto unlock; + + type->ops->owner = THIS_MODULE; ++ if (type->ops_nokey) ++ type->ops_nokey->owner = THIS_MODULE; + node->type = type; + list_add(&node->list, &alg_types); + err = 0; +@@ -125,6 +127,26 @@ int af_alg_release(struct socket *sock) + } + EXPORT_SYMBOL_GPL(af_alg_release); + ++void af_alg_release_parent(struct sock *sk) ++{ ++ struct alg_sock *ask = alg_sk(sk); ++ unsigned int nokey = ask->nokey_refcnt; ++ bool last = nokey && !ask->refcnt; ++ ++ sk = ask->parent; ++ ask = alg_sk(sk); ++ ++ lock_sock(sk); ++ ask->nokey_refcnt -= nokey; ++ if (!last) ++ last = !--ask->refcnt; ++ release_sock(sk); ++ ++ if (last) ++ sock_put(sk); ++} ++EXPORT_SYMBOL_GPL(af_alg_release_parent); ++ + static int alg_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) + { + struct sock *sk = sock->sk; +@@ -132,6 +154,7 @@ static int alg_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) + struct sockaddr_alg *sa = (void *)uaddr; + const struct af_alg_type *type; + void *private; ++ int err; + + if (sock->state == SS_CONNECTED) + return -EINVAL; +@@ -157,16 +180,22 @@ static int alg_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) + return PTR_ERR(private); + } + ++ err = -EBUSY; + lock_sock(sk); ++ if (ask->refcnt | ask->nokey_refcnt) ++ goto unlock; + + swap(ask->type, type); + swap(ask->private, private); + ++ err = 0; ++ ++unlock: + release_sock(sk); + + alg_do_release(type, private); + +- return 0; ++ return err; + } + + static int alg_setkey(struct sock *sk, char __user *ukey, +@@ -199,11 +228,15 @@ static int alg_setsockopt(struct socket *sock, int level, int optname, + struct sock *sk = sock->sk; + struct alg_sock *ask = alg_sk(sk); + const struct af_alg_type *type; +- int err = -ENOPROTOOPT; ++ int err = -EBUSY; + + lock_sock(sk); ++ if (ask->refcnt) ++ goto unlock; ++ + type = ask->type; + ++ err = -ENOPROTOOPT; + if (level != SOL_ALG || !type) + goto unlock; + +@@ -228,6 +261,7 @@ int af_alg_accept(struct sock *sk, struct socket *newsock) + struct alg_sock *ask = alg_sk(sk); + const struct af_alg_type *type; + struct sock *sk2; ++ unsigned int nokey; + int err; + + lock_sock(sk); +@@ -247,18 +281,29 @@ int af_alg_accept(struct sock *sk, struct socket *newsock) + security_sk_clone(sk, sk2); + + err = type->accept(ask->private, sk2); ++ ++ nokey = err == -ENOKEY; ++ if (nokey && type->accept_nokey) ++ err = type->accept_nokey(ask->private, sk2); ++ + if (err) + goto unlock; + + sk2->sk_family = PF_ALG; + +- sock_hold(sk); ++ if (nokey || !ask->refcnt++) ++ sock_hold(sk); ++ ask->nokey_refcnt += nokey; + alg_sk(sk2)->parent = sk; + alg_sk(sk2)->type = type; ++ alg_sk(sk2)->nokey_refcnt = nokey; + + newsock->ops = type->ops; + newsock->state = SS_CONNECTED; + ++ if (nokey) ++ newsock->ops = type->ops_nokey; ++ + err = 0; + + unlock: +diff --git a/crypto/ahash.c b/crypto/ahash.c +index bcd5efc7eb4c..781a8a73a7ff 100644 +--- a/crypto/ahash.c ++++ b/crypto/ahash.c +@@ -370,6 +370,7 @@ static int crypto_ahash_init_tfm(struct crypto_tfm *tfm) + struct ahash_alg *alg = crypto_ahash_alg(hash); + + hash->setkey = ahash_nosetkey; ++ hash->has_setkey = false; + hash->export = ahash_no_export; + hash->import = ahash_no_import; + +@@ -382,8 +383,10 @@ static int crypto_ahash_init_tfm(struct crypto_tfm *tfm) + hash->finup = alg->finup ?: ahash_def_finup; + hash->digest = alg->digest; + +- if (alg->setkey) ++ if (alg->setkey) { + hash->setkey = alg->setkey; ++ hash->has_setkey = true; ++ } + if (alg->export) + hash->export = alg->export; + if (alg->import) +diff --git a/crypto/algif_hash.c b/crypto/algif_hash.c +index c542c0d88afd..d11d431251f7 100644 +--- a/crypto/algif_hash.c ++++ b/crypto/algif_hash.c +@@ -34,6 +34,11 @@ struct hash_ctx { + struct ahash_request req; + }; + ++struct algif_hash_tfm { ++ struct crypto_ahash *hash; ++ bool has_key; ++}; ++ + static int hash_sendmsg(struct kiocb *unused, struct socket *sock, + struct msghdr *msg, size_t ignored) + { +@@ -248,19 +253,151 @@ static struct proto_ops algif_hash_ops = { + .accept = hash_accept, + }; + ++static int hash_check_key(struct socket *sock) ++{ ++ int err = 0; ++ struct sock *psk; ++ struct alg_sock *pask; ++ struct algif_hash_tfm *tfm; ++ struct sock *sk = sock->sk; ++ struct alg_sock *ask = alg_sk(sk); ++ ++ lock_sock(sk); ++ if (ask->refcnt) ++ goto unlock_child; ++ ++ psk = ask->parent; ++ pask = alg_sk(ask->parent); ++ tfm = pask->private; ++ ++ err = -ENOKEY; ++ lock_sock_nested(psk, SINGLE_DEPTH_NESTING); ++ if (!tfm->has_key) ++ goto unlock; ++ ++ if (!pask->refcnt++) ++ sock_hold(psk); ++ ++ ask->refcnt = 1; ++ sock_put(psk); ++ ++ err = 0; ++ ++unlock: ++ release_sock(psk); ++unlock_child: ++ release_sock(sk); ++ ++ return err; ++} ++ ++static int hash_sendmsg_nokey(struct kiocb *unused, struct socket *sock, ++ struct msghdr *msg, size_t size) ++{ ++ int err; ++ ++ err = hash_check_key(sock); ++ if (err) ++ return err; ++ ++ return hash_sendmsg(unused, sock, msg, size); ++} ++ ++static ssize_t hash_sendpage_nokey(struct socket *sock, struct page *page, ++ int offset, size_t size, int flags) ++{ ++ int err; ++ ++ err = hash_check_key(sock); ++ if (err) ++ return err; ++ ++ return hash_sendpage(sock, page, offset, size, flags); ++} ++ ++static int hash_recvmsg_nokey(struct kiocb *unused, struct socket *sock, ++ struct msghdr *msg, size_t ignored, int flags) ++{ ++ int err; ++ ++ err = hash_check_key(sock); ++ if (err) ++ return err; ++ ++ return hash_recvmsg(unused, sock, msg, ignored, flags); ++} ++ ++static int hash_accept_nokey(struct socket *sock, struct socket *newsock, ++ int flags) ++{ ++ int err; ++ ++ err = hash_check_key(sock); ++ if (err) ++ return err; ++ ++ return hash_accept(sock, newsock, flags); ++} ++ ++static struct proto_ops algif_hash_ops_nokey = { ++ .family = PF_ALG, ++ ++ .connect = sock_no_connect, ++ .socketpair = sock_no_socketpair, ++ .getname = sock_no_getname, ++ .ioctl = sock_no_ioctl, ++ .listen = sock_no_listen, ++ .shutdown = sock_no_shutdown, ++ .getsockopt = sock_no_getsockopt, ++ .mmap = sock_no_mmap, ++ .bind = sock_no_bind, ++ .setsockopt = sock_no_setsockopt, ++ .poll = sock_no_poll, ++ ++ .release = af_alg_release, ++ .sendmsg = hash_sendmsg_nokey, ++ .sendpage = hash_sendpage_nokey, ++ .recvmsg = hash_recvmsg_nokey, ++ .accept = hash_accept_nokey, ++}; ++ + static void *hash_bind(const char *name, u32 type, u32 mask) + { +- return crypto_alloc_ahash(name, type, mask); ++ struct algif_hash_tfm *tfm; ++ struct crypto_ahash *hash; ++ ++ tfm = kzalloc(sizeof(*tfm), GFP_KERNEL); ++ if (!tfm) ++ return ERR_PTR(-ENOMEM); ++ ++ hash = crypto_alloc_ahash(name, type, mask); ++ if (IS_ERR(hash)) { ++ kfree(tfm); ++ return ERR_CAST(hash); ++ } ++ ++ tfm->hash = hash; ++ ++ return tfm; + } + + static void hash_release(void *private) + { +- crypto_free_ahash(private); ++ struct algif_hash_tfm *tfm = private; ++ ++ crypto_free_ahash(tfm->hash); ++ kfree(tfm); + } + + static int hash_setkey(void *private, const u8 *key, unsigned int keylen) + { +- return crypto_ahash_setkey(private, key, keylen); ++ struct algif_hash_tfm *tfm = private; ++ int err; ++ ++ err = crypto_ahash_setkey(tfm->hash, key, keylen); ++ tfm->has_key = !err; ++ ++ return err; + } + + static void hash_sock_destruct(struct sock *sk) +@@ -274,12 +411,14 @@ static void hash_sock_destruct(struct sock *sk) + af_alg_release_parent(sk); + } + +-static int hash_accept_parent(void *private, struct sock *sk) ++static int hash_accept_parent_nokey(void *private, struct sock *sk) + { + struct hash_ctx *ctx; + struct alg_sock *ask = alg_sk(sk); +- unsigned len = sizeof(*ctx) + crypto_ahash_reqsize(private); +- unsigned ds = crypto_ahash_digestsize(private); ++ struct algif_hash_tfm *tfm = private; ++ struct crypto_ahash *hash = tfm->hash; ++ unsigned len = sizeof(*ctx) + crypto_ahash_reqsize(hash); ++ unsigned ds = crypto_ahash_digestsize(hash); + + ctx = sock_kmalloc(sk, len, GFP_KERNEL); + if (!ctx) +@@ -299,7 +438,7 @@ static int hash_accept_parent(void *private, struct sock *sk) + + ask->private = ctx; + +- ahash_request_set_tfm(&ctx->req, private); ++ ahash_request_set_tfm(&ctx->req, hash); + ahash_request_set_callback(&ctx->req, CRYPTO_TFM_REQ_MAY_BACKLOG, + af_alg_complete, &ctx->completion); + +@@ -308,12 +447,24 @@ static int hash_accept_parent(void *private, struct sock *sk) + return 0; + } + ++static int hash_accept_parent(void *private, struct sock *sk) ++{ ++ struct algif_hash_tfm *tfm = private; ++ ++ if (!tfm->has_key && crypto_ahash_has_setkey(tfm->hash)) ++ return -ENOKEY; ++ ++ return hash_accept_parent_nokey(private, sk); ++} ++ + static const struct af_alg_type algif_type_hash = { + .bind = hash_bind, + .release = hash_release, + .setkey = hash_setkey, + .accept = hash_accept_parent, ++ .accept_nokey = hash_accept_parent_nokey, + .ops = &algif_hash_ops, ++ .ops_nokey = &algif_hash_ops_nokey, + .name = "hash", + .owner = THIS_MODULE + }; +diff --git a/crypto/algif_skcipher.c b/crypto/algif_skcipher.c +index 83187f497c7c..ea05c531db26 100644 +--- a/crypto/algif_skcipher.c ++++ b/crypto/algif_skcipher.c +@@ -31,6 +31,11 @@ struct skcipher_sg_list { + struct scatterlist sg[0]; + }; + ++struct skcipher_tfm { ++ struct crypto_ablkcipher *skcipher; ++ bool has_key; ++}; ++ + struct skcipher_ctx { + struct list_head tsgl; + struct af_alg_sgl rsgl; +@@ -441,13 +446,6 @@ static int skcipher_recvmsg(struct kiocb *unused, struct socket *sock, + char __user *from = iov->iov_base; + + while (seglen) { +- sgl = list_first_entry(&ctx->tsgl, +- struct skcipher_sg_list, list); +- sg = sgl->sg; +- +- while (!sg->length) +- sg++; +- + used = ctx->used; + if (!used) { + err = skcipher_wait_for_data(sk, flags); +@@ -469,6 +467,13 @@ static int skcipher_recvmsg(struct kiocb *unused, struct socket *sock, + if (!used) + goto free; + ++ sgl = list_first_entry(&ctx->tsgl, ++ struct skcipher_sg_list, list); ++ sg = sgl->sg; ++ ++ while (!sg->length) ++ sg++; ++ + ablkcipher_request_set_crypt(&ctx->req, sg, + ctx->rsgl.sg, used, + ctx->iv); +@@ -544,19 +549,139 @@ static struct proto_ops algif_skcipher_ops = { + .poll = skcipher_poll, + }; + ++static int skcipher_check_key(struct socket *sock) ++{ ++ int err = 0; ++ struct sock *psk; ++ struct alg_sock *pask; ++ struct skcipher_tfm *tfm; ++ struct sock *sk = sock->sk; ++ struct alg_sock *ask = alg_sk(sk); ++ ++ lock_sock(sk); ++ if (ask->refcnt) ++ goto unlock_child; ++ ++ psk = ask->parent; ++ pask = alg_sk(ask->parent); ++ tfm = pask->private; ++ ++ err = -ENOKEY; ++ lock_sock_nested(psk, SINGLE_DEPTH_NESTING); ++ if (!tfm->has_key) ++ goto unlock; ++ ++ if (!pask->refcnt++) ++ sock_hold(psk); ++ ++ ask->refcnt = 1; ++ sock_put(psk); ++ ++ err = 0; ++ ++unlock: ++ release_sock(psk); ++unlock_child: ++ release_sock(sk); ++ ++ return err; ++} ++ ++static int skcipher_sendmsg_nokey(struct kiocb *unused, struct socket *sock, ++ struct msghdr *msg, size_t size) ++{ ++ int err; ++ ++ err = skcipher_check_key(sock); ++ if (err) ++ return err; ++ ++ return skcipher_sendmsg(unused, sock, msg, size); ++} ++ ++static ssize_t skcipher_sendpage_nokey(struct socket *sock, struct page *page, ++ int offset, size_t size, int flags) ++{ ++ int err; ++ ++ err = skcipher_check_key(sock); ++ if (err) ++ return err; ++ ++ return skcipher_sendpage(sock, page, offset, size, flags); ++} ++ ++static int skcipher_recvmsg_nokey(struct kiocb *unused, struct socket *sock, ++ struct msghdr *msg, size_t ignored, int flags) ++{ ++ int err; ++ ++ err = skcipher_check_key(sock); ++ if (err) ++ return err; ++ ++ return skcipher_recvmsg(unused, sock, msg, ignored, flags); ++} ++ ++static struct proto_ops algif_skcipher_ops_nokey = { ++ .family = PF_ALG, ++ ++ .connect = sock_no_connect, ++ .socketpair = sock_no_socketpair, ++ .getname = sock_no_getname, ++ .ioctl = sock_no_ioctl, ++ .listen = sock_no_listen, ++ .shutdown = sock_no_shutdown, ++ .getsockopt = sock_no_getsockopt, ++ .mmap = sock_no_mmap, ++ .bind = sock_no_bind, ++ .accept = sock_no_accept, ++ .setsockopt = sock_no_setsockopt, ++ ++ .release = af_alg_release, ++ .sendmsg = skcipher_sendmsg_nokey, ++ .sendpage = skcipher_sendpage_nokey, ++ .recvmsg = skcipher_recvmsg_nokey, ++ .poll = skcipher_poll, ++}; ++ + static void *skcipher_bind(const char *name, u32 type, u32 mask) + { +- return crypto_alloc_ablkcipher(name, type, mask); ++ struct skcipher_tfm *tfm; ++ struct crypto_ablkcipher *skcipher; ++ ++ tfm = kzalloc(sizeof(*tfm), GFP_KERNEL); ++ if (!tfm) ++ return ERR_PTR(-ENOMEM); ++ ++ skcipher = crypto_alloc_ablkcipher(name, type, mask); ++ if (IS_ERR(skcipher)) { ++ kfree(tfm); ++ return ERR_CAST(skcipher); ++ } ++ ++ tfm->skcipher = skcipher; ++ ++ return tfm; + } + + static void skcipher_release(void *private) + { +- crypto_free_ablkcipher(private); ++ struct skcipher_tfm *tfm = private; ++ ++ crypto_free_ablkcipher(tfm->skcipher); ++ kfree(tfm); + } + + static int skcipher_setkey(void *private, const u8 *key, unsigned int keylen) + { +- return crypto_ablkcipher_setkey(private, key, keylen); ++ struct skcipher_tfm *tfm = private; ++ int err; ++ ++ err = crypto_ablkcipher_setkey(tfm->skcipher, key, keylen); ++ tfm->has_key = !err; ++ ++ return err; + } + + static void skcipher_sock_destruct(struct sock *sk) +@@ -571,24 +696,25 @@ static void skcipher_sock_destruct(struct sock *sk) + af_alg_release_parent(sk); + } + +-static int skcipher_accept_parent(void *private, struct sock *sk) ++static int skcipher_accept_parent_nokey(void *private, struct sock *sk) + { + struct skcipher_ctx *ctx; + struct alg_sock *ask = alg_sk(sk); +- unsigned int len = sizeof(*ctx) + crypto_ablkcipher_reqsize(private); ++ struct skcipher_tfm *tfm = private; ++ struct crypto_ablkcipher *skcipher = tfm->skcipher; ++ unsigned int len = sizeof(*ctx) + crypto_ablkcipher_reqsize(skcipher); + + ctx = sock_kmalloc(sk, len, GFP_KERNEL); + if (!ctx) + return -ENOMEM; +- +- ctx->iv = sock_kmalloc(sk, crypto_ablkcipher_ivsize(private), ++ ctx->iv = sock_kmalloc(sk, crypto_ablkcipher_ivsize(skcipher), + GFP_KERNEL); + if (!ctx->iv) { + sock_kfree_s(sk, ctx, len); + return -ENOMEM; + } + +- memset(ctx->iv, 0, crypto_ablkcipher_ivsize(private)); ++ memset(ctx->iv, 0, crypto_ablkcipher_ivsize(skcipher)); + + INIT_LIST_HEAD(&ctx->tsgl); + ctx->len = len; +@@ -600,21 +726,33 @@ static int skcipher_accept_parent(void *private, struct sock *sk) + + ask->private = ctx; + +- ablkcipher_request_set_tfm(&ctx->req, private); ++ ablkcipher_request_set_tfm(&ctx->req, skcipher); + ablkcipher_request_set_callback(&ctx->req, CRYPTO_TFM_REQ_MAY_BACKLOG, +- af_alg_complete, &ctx->completion); ++ af_alg_complete, &ctx->completion); + + sk->sk_destruct = skcipher_sock_destruct; + + return 0; + } + ++static int skcipher_accept_parent(void *private, struct sock *sk) ++{ ++ struct skcipher_tfm *tfm = private; ++ ++ if (!tfm->has_key && crypto_ablkcipher_has_setkey(tfm->skcipher)) ++ return -ENOKEY; ++ ++ return skcipher_accept_parent_nokey(private, sk); ++} ++ + static const struct af_alg_type algif_type_skcipher = { + .bind = skcipher_bind, + .release = skcipher_release, + .setkey = skcipher_setkey, + .accept = skcipher_accept_parent, ++ .accept_nokey = skcipher_accept_parent_nokey, + .ops = &algif_skcipher_ops, ++ .ops_nokey = &algif_skcipher_ops_nokey, + .name = "skcipher", + .owner = THIS_MODULE + }; +diff --git a/crypto/blkcipher.c b/crypto/blkcipher.c +index a79e7e9ab86e..39b09f25e3f5 100644 +--- a/crypto/blkcipher.c ++++ b/crypto/blkcipher.c +@@ -238,6 +238,8 @@ static int blkcipher_walk_next(struct blkcipher_desc *desc, + return blkcipher_walk_done(desc, walk, -EINVAL); + } + ++ bsize = min(walk->blocksize, n); ++ + walk->flags &= ~(BLKCIPHER_WALK_SLOW | BLKCIPHER_WALK_COPY | + BLKCIPHER_WALK_DIFF); + if (!scatterwalk_aligned(&walk->in, alignmask) || +@@ -250,7 +252,6 @@ static int blkcipher_walk_next(struct blkcipher_desc *desc, + } + } + +- bsize = min(walk->blocksize, n); + n = scatterwalk_clamp(&walk->in, n); + n = scatterwalk_clamp(&walk->out, n); + +@@ -458,6 +459,7 @@ static int crypto_init_blkcipher_ops_async(struct crypto_tfm *tfm) + } + crt->base = __crypto_ablkcipher_cast(tfm); + crt->ivsize = alg->ivsize; ++ crt->has_setkey = alg->max_keysize; + + return 0; + } +diff --git a/crypto/cryptd.c b/crypto/cryptd.c +index 75c415d37086..d85fab975514 100644 +--- a/crypto/cryptd.c ++++ b/crypto/cryptd.c +@@ -565,9 +565,14 @@ static int cryptd_hash_export(struct ahash_request *req, void *out) + + static int cryptd_hash_import(struct ahash_request *req, const void *in) + { +- struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req); ++ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); ++ struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(tfm); ++ struct shash_desc *desc = cryptd_shash_desc(req); ++ ++ desc->tfm = ctx->child; ++ desc->flags = req->base.flags; + +- return crypto_shash_import(&rctx->desc, in); ++ return crypto_shash_import(desc, in); + } + + static int cryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb, +diff --git a/crypto/gcm.c b/crypto/gcm.c +index 451e420ce56c..a1ec756b8438 100644 +--- a/crypto/gcm.c ++++ b/crypto/gcm.c +@@ -109,7 +109,7 @@ static int crypto_gcm_setkey(struct crypto_aead *aead, const u8 *key, + struct crypto_ablkcipher *ctr = ctx->ctr; + struct { + be128 hash; +- u8 iv[8]; ++ u8 iv[16]; + + struct crypto_gcm_setkey_result result; + +diff --git a/crypto/shash.c b/crypto/shash.c +index 929058a68561..ac4d76350d1b 100644 +--- a/crypto/shash.c ++++ b/crypto/shash.c +@@ -353,9 +353,10 @@ int crypto_init_shash_ops_async(struct crypto_tfm *tfm) + crt->final = shash_async_final; + crt->finup = shash_async_finup; + crt->digest = shash_async_digest; ++ crt->setkey = shash_async_setkey; ++ ++ crt->has_setkey = alg->setkey != shash_no_setkey; + +- if (alg->setkey) +- crt->setkey = shash_async_setkey; + if (alg->export) + crt->export = shash_async_export; + if (alg->import) +diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c +index fcd7d91cec34..070b843c37ee 100644 +--- a/drivers/acpi/apei/ghes.c ++++ b/drivers/acpi/apei/ghes.c +@@ -647,7 +647,7 @@ static int ghes_proc(struct ghes *ghes) + ghes_do_proc(ghes, ghes->estatus); + out: + ghes_clear_estatus(ghes); +- return 0; ++ return rc; + } + + static void ghes_add_timer(struct ghes *ghes) +diff --git a/drivers/base/core.c b/drivers/base/core.c +index 2a19097a7cb1..986fc4eeaae6 100644 +--- a/drivers/base/core.c ++++ b/drivers/base/core.c +@@ -827,11 +827,29 @@ static struct kobject *get_device_parent(struct device *dev, + return NULL; + } + ++static inline bool live_in_glue_dir(struct kobject *kobj, ++ struct device *dev) ++{ ++ if (!kobj || !dev->class || ++ kobj->kset != &dev->class->p->glue_dirs) ++ return false; ++ return true; ++} ++ ++static inline struct kobject *get_glue_dir(struct device *dev) ++{ ++ return dev->kobj.parent; ++} ++ ++/* ++ * make sure cleaning up dir as the last step, we need to make ++ * sure .release handler of kobject is run with holding the ++ * global lock ++ */ + static void cleanup_glue_dir(struct device *dev, struct kobject *glue_dir) + { + /* see if we live in a "glue" directory */ +- if (!glue_dir || !dev->class || +- glue_dir->kset != &dev->class->p->glue_dirs) ++ if (!live_in_glue_dir(glue_dir, dev)) + return; + + mutex_lock(&gdp_mutex); +@@ -839,11 +857,6 @@ static void cleanup_glue_dir(struct device *dev, struct kobject *glue_dir) + mutex_unlock(&gdp_mutex); + } + +-static void cleanup_device_parent(struct device *dev) +-{ +- cleanup_glue_dir(dev, dev->kobj.parent); +-} +- + static int device_add_class_symlinks(struct device *dev) + { + int error; +@@ -1007,6 +1020,7 @@ int device_add(struct device *dev) + struct kobject *kobj; + struct class_interface *class_intf; + int error = -EINVAL; ++ struct kobject *glue_dir = NULL; + + dev = get_device(dev); + if (!dev) +@@ -1051,8 +1065,10 @@ int device_add(struct device *dev) + /* first, register with generic layer. */ + /* we require the name to be set before, and pass NULL */ + error = kobject_add(&dev->kobj, dev->kobj.parent, NULL); +- if (error) ++ if (error) { ++ glue_dir = get_glue_dir(dev); + goto Error; ++ } + + /* notify platform of device entry */ + if (platform_notify) +@@ -1135,11 +1151,11 @@ done: + device_remove_file(dev, &uevent_attr); + attrError: + kobject_uevent(&dev->kobj, KOBJ_REMOVE); ++ glue_dir = get_glue_dir(dev); + kobject_del(&dev->kobj); + Error: +- cleanup_device_parent(dev); +- if (parent) +- put_device(parent); ++ cleanup_glue_dir(dev, glue_dir); ++ put_device(parent); + name_error: + kfree(dev->p); + dev->p = NULL; +@@ -1210,6 +1226,7 @@ void put_device(struct device *dev) + void device_del(struct device *dev) + { + struct device *parent = dev->parent; ++ struct kobject *glue_dir = NULL; + struct class_interface *class_intf; + + /* Notify clients of device removal. This call must come +@@ -1251,8 +1268,9 @@ void device_del(struct device *dev) + if (platform_notify_remove) + platform_notify_remove(dev); + kobject_uevent(&dev->kobj, KOBJ_REMOVE); +- cleanup_device_parent(dev); ++ glue_dir = get_glue_dir(dev); + kobject_del(&dev->kobj); ++ cleanup_glue_dir(dev, glue_dir); + put_device(parent); + } + +diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c +index a5dca6affcbb..776fc08aff0b 100644 +--- a/drivers/block/drbd/drbd_main.c ++++ b/drivers/block/drbd/drbd_main.c +@@ -1771,7 +1771,7 @@ int drbd_send(struct drbd_tconn *tconn, struct socket *sock, + * do we need to block DRBD_SIG if sock == &meta.socket ?? + * otherwise wake_asender() might interrupt some send_*Ack ! + */ +- rv = kernel_sendmsg(sock, &msg, &iov, 1, size); ++ rv = kernel_sendmsg(sock, &msg, &iov, 1, iov.iov_len); + if (rv == -EAGAIN) { + if (we_should_drop_the_connection(tconn, sock)) + break; +diff --git a/drivers/block/xen-blkback/common.h b/drivers/block/xen-blkback/common.h +index 60103e2517ba..467cb48fcf38 100644 +--- a/drivers/block/xen-blkback/common.h ++++ b/drivers/block/xen-blkback/common.h +@@ -269,8 +269,8 @@ static inline void blkif_get_x86_32_req(struct blkif_request *dst, + struct blkif_x86_32_request *src) + { + int i, n = BLKIF_MAX_SEGMENTS_PER_REQUEST; +- dst->operation = src->operation; +- switch (src->operation) { ++ dst->operation = ACCESS_ONCE(src->operation); ++ switch (dst->operation) { + case BLKIF_OP_READ: + case BLKIF_OP_WRITE: + case BLKIF_OP_WRITE_BARRIER: +@@ -305,8 +305,8 @@ static inline void blkif_get_x86_64_req(struct blkif_request *dst, + struct blkif_x86_64_request *src) + { + int i, n = BLKIF_MAX_SEGMENTS_PER_REQUEST; +- dst->operation = src->operation; +- switch (src->operation) { ++ dst->operation = ACCESS_ONCE(src->operation); ++ switch (dst->operation) { + case BLKIF_OP_READ: + case BLKIF_OP_WRITE: + case BLKIF_OP_WRITE_BARRIER: +diff --git a/drivers/char/hw_random/exynos-rng.c b/drivers/char/hw_random/exynos-rng.c +index 402ccfb625c5..b6ec73f320d6 100644 +--- a/drivers/char/hw_random/exynos-rng.c ++++ b/drivers/char/hw_random/exynos-rng.c +@@ -105,6 +105,7 @@ static int exynos_rng_probe(struct platform_device *pdev) + { + struct exynos_rng *exynos_rng; + struct resource *res; ++ int ret; + + exynos_rng = devm_kzalloc(&pdev->dev, sizeof(struct exynos_rng), + GFP_KERNEL); +@@ -132,7 +133,13 @@ static int exynos_rng_probe(struct platform_device *pdev) + pm_runtime_use_autosuspend(&pdev->dev); + pm_runtime_enable(&pdev->dev); + +- return hwrng_register(&exynos_rng->rng); ++ ret = hwrng_register(&exynos_rng->rng); ++ if (ret) { ++ pm_runtime_dont_use_autosuspend(&pdev->dev); ++ pm_runtime_disable(&pdev->dev); ++ } ++ ++ return ret; + } + + static int exynos_rng_remove(struct platform_device *pdev) +diff --git a/drivers/char/hw_random/omap-rng.c b/drivers/char/hw_random/omap-rng.c +index d2903e772270..2798fb1f91e2 100644 +--- a/drivers/char/hw_random/omap-rng.c ++++ b/drivers/char/hw_random/omap-rng.c +@@ -127,7 +127,12 @@ static int omap_rng_probe(struct platform_device *pdev) + dev_set_drvdata(&pdev->dev, priv); + + pm_runtime_enable(&pdev->dev); +- pm_runtime_get_sync(&pdev->dev); ++ ret = pm_runtime_get_sync(&pdev->dev); ++ if (ret < 0) { ++ dev_err(&pdev->dev, "Failed to runtime_get device: %d\n", ret); ++ pm_runtime_put_noidle(&pdev->dev); ++ goto err_ioremap; ++ } + + ret = hwrng_register(&omap_rng_ops); + if (ret) +@@ -182,8 +187,15 @@ static int omap_rng_suspend(struct device *dev) + static int omap_rng_resume(struct device *dev) + { + struct omap_rng_private_data *priv = dev_get_drvdata(dev); ++ int ret; ++ ++ ret = pm_runtime_get_sync(dev); ++ if (ret < 0) { ++ dev_err(dev, "Failed to runtime_get device: %d\n", ret); ++ pm_runtime_put_noidle(dev); ++ return ret; ++ } + +- pm_runtime_get_sync(dev); + omap_rng_write_reg(priv, RNG_MASK_REG, 0x1); + + return 0; +diff --git a/drivers/char/mem.c b/drivers/char/mem.c +index 1ccbe9482faa..598ece77ee9e 100644 +--- a/drivers/char/mem.c ++++ b/drivers/char/mem.c +@@ -68,12 +68,8 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size) + u64 cursor = from; + + while (cursor < to) { +- if (!devmem_is_allowed(pfn)) { +- printk(KERN_INFO +- "Program %s tried to access /dev/mem between %Lx->%Lx.\n", +- current->comm, from, to); ++ if (!devmem_is_allowed(pfn)) + return 0; +- } + cursor += PAGE_SIZE; + pfn++; + } +diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c +index 3b367973a802..d5cbb7c242f6 100644 +--- a/drivers/devfreq/devfreq.c ++++ b/drivers/devfreq/devfreq.c +@@ -472,7 +472,7 @@ struct devfreq *devfreq_add_device(struct device *dev, + devfreq->profile->max_state * + devfreq->profile->max_state, + GFP_KERNEL); +- devfreq->time_in_state = devm_kzalloc(dev, sizeof(unsigned int) * ++ devfreq->time_in_state = devm_kzalloc(dev, sizeof(unsigned long) * + devfreq->profile->max_state, + GFP_KERNEL); + devfreq->last_stat_updated = jiffies; +diff --git a/drivers/edac/edac_mc.c b/drivers/edac/edac_mc.c +index a9d98cdd11f4..9e15fc8df060 100644 +--- a/drivers/edac/edac_mc.c ++++ b/drivers/edac/edac_mc.c +@@ -968,7 +968,7 @@ static void edac_inc_ue_error(struct mem_ctl_info *mci, + mci->ue_mc += count; + + if (!enable_per_layer_report) { +- mci->ce_noinfo_count += count; ++ mci->ue_noinfo_count += count; + return; + } + +diff --git a/drivers/firewire/net.c b/drivers/firewire/net.c +index 7bdb6fe63236..132131934c77 100644 +--- a/drivers/firewire/net.c ++++ b/drivers/firewire/net.c +@@ -73,13 +73,13 @@ struct rfc2734_header { + + #define fwnet_get_hdr_lf(h) (((h)->w0 & 0xc0000000) >> 30) + #define fwnet_get_hdr_ether_type(h) (((h)->w0 & 0x0000ffff)) +-#define fwnet_get_hdr_dg_size(h) (((h)->w0 & 0x0fff0000) >> 16) ++#define fwnet_get_hdr_dg_size(h) ((((h)->w0 & 0x0fff0000) >> 16) + 1) + #define fwnet_get_hdr_fg_off(h) (((h)->w0 & 0x00000fff)) + #define fwnet_get_hdr_dgl(h) (((h)->w1 & 0xffff0000) >> 16) + +-#define fwnet_set_hdr_lf(lf) ((lf) << 30) ++#define fwnet_set_hdr_lf(lf) ((lf) << 30) + #define fwnet_set_hdr_ether_type(et) (et) +-#define fwnet_set_hdr_dg_size(dgs) ((dgs) << 16) ++#define fwnet_set_hdr_dg_size(dgs) (((dgs) - 1) << 16) + #define fwnet_set_hdr_fg_off(fgo) (fgo) + + #define fwnet_set_hdr_dgl(dgl) ((dgl) << 16) +@@ -591,6 +591,9 @@ static int fwnet_incoming_packet(struct fwnet_device *dev, __be32 *buf, int len, + int retval; + u16 ether_type; + ++ if (len <= RFC2374_UNFRAG_HDR_SIZE) ++ return 0; ++ + hdr.w0 = be32_to_cpu(buf[0]); + lf = fwnet_get_hdr_lf(&hdr); + if (lf == RFC2374_HDR_UNFRAG) { +@@ -615,7 +618,12 @@ static int fwnet_incoming_packet(struct fwnet_device *dev, __be32 *buf, int len, + return fwnet_finish_incoming_packet(net, skb, source_node_id, + is_broadcast, ether_type); + } ++ + /* A datagram fragment has been received, now the fun begins. */ ++ ++ if (len <= RFC2374_FRAG_HDR_SIZE) ++ return 0; ++ + hdr.w1 = ntohl(buf[1]); + buf += 2; + len -= RFC2374_FRAG_HDR_SIZE; +@@ -627,7 +635,10 @@ static int fwnet_incoming_packet(struct fwnet_device *dev, __be32 *buf, int len, + fg_off = fwnet_get_hdr_fg_off(&hdr); + } + datagram_label = fwnet_get_hdr_dgl(&hdr); +- dg_size = fwnet_get_hdr_dg_size(&hdr); /* ??? + 1 */ ++ dg_size = fwnet_get_hdr_dg_size(&hdr); ++ ++ if (fg_off + len > dg_size) ++ return 0; + + spin_lock_irqsave(&dev->lock, flags); + +@@ -735,6 +746,22 @@ static void fwnet_receive_packet(struct fw_card *card, struct fw_request *r, + fw_send_response(card, r, rcode); + } + ++static int gasp_source_id(__be32 *p) ++{ ++ return be32_to_cpu(p[0]) >> 16; ++} ++ ++static u32 gasp_specifier_id(__be32 *p) ++{ ++ return (be32_to_cpu(p[0]) & 0xffff) << 8 | ++ (be32_to_cpu(p[1]) & 0xff000000) >> 24; ++} ++ ++static u32 gasp_version(__be32 *p) ++{ ++ return be32_to_cpu(p[1]) & 0xffffff; ++} ++ + static void fwnet_receive_broadcast(struct fw_iso_context *context, + u32 cycle, size_t header_length, void *header, void *data) + { +@@ -744,9 +771,6 @@ static void fwnet_receive_broadcast(struct fw_iso_context *context, + __be32 *buf_ptr; + int retval; + u32 length; +- u16 source_node_id; +- u32 specifier_id; +- u32 ver; + unsigned long offset; + unsigned long flags; + +@@ -763,22 +787,17 @@ static void fwnet_receive_broadcast(struct fw_iso_context *context, + + spin_unlock_irqrestore(&dev->lock, flags); + +- specifier_id = (be32_to_cpu(buf_ptr[0]) & 0xffff) << 8 +- | (be32_to_cpu(buf_ptr[1]) & 0xff000000) >> 24; +- ver = be32_to_cpu(buf_ptr[1]) & 0xffffff; +- source_node_id = be32_to_cpu(buf_ptr[0]) >> 16; +- +- if (specifier_id == IANA_SPECIFIER_ID && +- (ver == RFC2734_SW_VERSION ++ if (length > IEEE1394_GASP_HDR_SIZE && ++ gasp_specifier_id(buf_ptr) == IANA_SPECIFIER_ID && ++ (gasp_version(buf_ptr) == RFC2734_SW_VERSION + #if IS_ENABLED(CONFIG_IPV6) +- || ver == RFC3146_SW_VERSION ++ || gasp_version(buf_ptr) == RFC3146_SW_VERSION + #endif +- )) { +- buf_ptr += 2; +- length -= IEEE1394_GASP_HDR_SIZE; +- fwnet_incoming_packet(dev, buf_ptr, length, source_node_id, ++ )) ++ fwnet_incoming_packet(dev, buf_ptr + 2, ++ length - IEEE1394_GASP_HDR_SIZE, ++ gasp_source_id(buf_ptr), + context->card->generation, true); +- } + + packet.payload_length = dev->rcv_buffer_size; + packet.interrupt = 1; +diff --git a/drivers/gpio/gpio-mpc8xxx.c b/drivers/gpio/gpio-mpc8xxx.c +index 2aa3ca215bd6..d5376aa1c5e1 100644 +--- a/drivers/gpio/gpio-mpc8xxx.c ++++ b/drivers/gpio/gpio-mpc8xxx.c +@@ -295,7 +295,7 @@ static int mpc8xxx_gpio_irq_map(struct irq_domain *h, unsigned int virq, + mpc8xxx_irq_chip.irq_set_type = mpc8xxx_gc->of_dev_id_data; + + irq_set_chip_data(virq, h->host_data); +- irq_set_chip_and_handler(virq, &mpc8xxx_irq_chip, handle_level_irq); ++ irq_set_chip_and_handler(virq, &mpc8xxx_irq_chip, handle_edge_irq); + + return 0; + } +diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c +index c24c35606836..121680fbebb9 100644 +--- a/drivers/gpu/drm/drm_crtc.c ++++ b/drivers/gpu/drm/drm_crtc.c +@@ -3422,6 +3422,9 @@ int drm_mode_page_flip_ioctl(struct drm_device *dev, + int hdisplay, vdisplay; + int ret = -EINVAL; + ++ if (!drm_core_check_feature(dev, DRIVER_MODESET)) ++ return -EINVAL; ++ + if (page_flip->flags & ~DRM_MODE_PAGE_FLIP_FLAGS || + page_flip->reserved != 0) + return -EINVAL; +diff --git a/drivers/gpu/drm/qxl/qxl_draw.c b/drivers/gpu/drm/qxl/qxl_draw.c +index 3c8c3dbf9378..ff320522f453 100644 +--- a/drivers/gpu/drm/qxl/qxl_draw.c ++++ b/drivers/gpu/drm/qxl/qxl_draw.c +@@ -114,6 +114,8 @@ static int qxl_palette_create_1bit(struct qxl_bo **palette_bo, + palette_bo); + + ret = qxl_bo_kmap(*palette_bo, (void **)&pal); ++ if (ret) ++ return ret; + pal->num_ents = 2; + pal->unique = unique++; + if (visual == FB_VISUAL_TRUECOLOR || visual == FB_VISUAL_DIRECTCOLOR) { +diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c +index 8ac333094991..4d09582744e6 100644 +--- a/drivers/gpu/drm/radeon/atombios_crtc.c ++++ b/drivers/gpu/drm/radeon/atombios_crtc.c +@@ -257,6 +257,8 @@ void atombios_crtc_dpms(struct drm_crtc *crtc, int mode) + atombios_enable_crtc_memreq(crtc, ATOM_ENABLE); + atombios_blank_crtc(crtc, ATOM_DISABLE); + drm_vblank_post_modeset(dev, radeon_crtc->crtc_id); ++ /* Make sure vblank interrupt is still enabled if needed */ ++ radeon_irq_set(rdev); + radeon_crtc_load_lut(crtc); + break; + case DRM_MODE_DPMS_STANDBY: +diff --git a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c +index bc73021d3596..ae0d7b1cb9aa 100644 +--- a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c ++++ b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c +@@ -331,6 +331,8 @@ static void radeon_crtc_dpms(struct drm_crtc *crtc, int mode) + WREG32_P(RADEON_CRTC_EXT_CNTL, crtc_ext_cntl, ~(mask | crtc_ext_cntl)); + } + drm_vblank_post_modeset(dev, radeon_crtc->crtc_id); ++ /* Make sure vblank interrupt is still enabled if needed */ ++ radeon_irq_set(rdev); + radeon_crtc_load_lut(crtc); + break; + case DRM_MODE_DPMS_STANDBY: +diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c +index f7015592544f..6c92c20426d6 100644 +--- a/drivers/gpu/drm/radeon/radeon_ttm.c ++++ b/drivers/gpu/drm/radeon/radeon_ttm.c +@@ -228,8 +228,8 @@ static int radeon_move_blit(struct ttm_buffer_object *bo, + + rdev = radeon_get_rdev(bo->bdev); + ridx = radeon_copy_ring_index(rdev); +- old_start = old_mem->start << PAGE_SHIFT; +- new_start = new_mem->start << PAGE_SHIFT; ++ old_start = (u64)old_mem->start << PAGE_SHIFT; ++ new_start = (u64)new_mem->start << PAGE_SHIFT; + + switch (old_mem->mem_type) { + case TTM_PL_VRAM: +diff --git a/drivers/hv/hv_util.c b/drivers/hv/hv_util.c +index 64c778f7756f..5f69c839d727 100644 +--- a/drivers/hv/hv_util.c ++++ b/drivers/hv/hv_util.c +@@ -244,10 +244,14 @@ static void heartbeat_onchannelcallback(void *context) + struct heartbeat_msg_data *heartbeat_msg; + u8 *hbeat_txf_buf = util_heartbeat.recv_buffer; + +- vmbus_recvpacket(channel, hbeat_txf_buf, +- PAGE_SIZE, &recvlen, &requestid); ++ while (1) { ++ ++ vmbus_recvpacket(channel, hbeat_txf_buf, ++ PAGE_SIZE, &recvlen, &requestid); ++ ++ if (!recvlen) ++ break; + +- if (recvlen > 0) { + icmsghdrp = (struct icmsg_hdr *)&hbeat_txf_buf[ + sizeof(struct vmbuspipe_hdr)]; + +diff --git a/drivers/hwmon/adt7411.c b/drivers/hwmon/adt7411.c +index d9299dee37d1..dddaa161aadb 100644 +--- a/drivers/hwmon/adt7411.c ++++ b/drivers/hwmon/adt7411.c +@@ -30,6 +30,7 @@ + + #define ADT7411_REG_CFG1 0x18 + #define ADT7411_CFG1_START_MONITOR (1 << 0) ++#define ADT7411_CFG1_RESERVED_BIT3 (1 << 3) + + #define ADT7411_REG_CFG2 0x19 + #define ADT7411_CFG2_DISABLE_AVG (1 << 5) +@@ -292,8 +293,10 @@ static int adt7411_probe(struct i2c_client *client, + mutex_init(&data->device_lock); + mutex_init(&data->update_lock); + ++ /* According to the datasheet, we must only write 1 to bit 3 */ + ret = adt7411_modify_bit(client, ADT7411_REG_CFG1, +- ADT7411_CFG1_START_MONITOR, 1); ++ ADT7411_CFG1_RESERVED_BIT3 ++ | ADT7411_CFG1_START_MONITOR, 1); + if (ret < 0) + return ret; + +diff --git a/drivers/i2c/busses/i2c-at91.c b/drivers/i2c/busses/i2c-at91.c +index ceabcfeb587c..c880d13f5405 100644 +--- a/drivers/i2c/busses/i2c-at91.c ++++ b/drivers/i2c/busses/i2c-at91.c +@@ -371,19 +371,57 @@ static irqreturn_t atmel_twi_interrupt(int irq, void *dev_id) + + if (!irqstatus) + return IRQ_NONE; +- else if (irqstatus & AT91_TWI_RXRDY) +- at91_twi_read_next_byte(dev); +- else if (irqstatus & AT91_TWI_TXRDY) +- at91_twi_write_next_byte(dev); +- +- /* catch error flags */ +- dev->transfer_status |= status; + ++ /* ++ * When a NACK condition is detected, the I2C controller sets the NACK, ++ * TXCOMP and TXRDY bits all together in the Status Register (SR). ++ * ++ * 1 - Handling NACK errors with CPU write transfer. ++ * ++ * In such case, we should not write the next byte into the Transmit ++ * Holding Register (THR) otherwise the I2C controller would start a new ++ * transfer and the I2C slave is likely to reply by another NACK. ++ * ++ * 2 - Handling NACK errors with DMA write transfer. ++ * ++ * By setting the TXRDY bit in the SR, the I2C controller also triggers ++ * the DMA controller to write the next data into the THR. Then the ++ * result depends on the hardware version of the I2C controller. ++ * ++ * 2a - Without support of the Alternative Command mode. ++ * ++ * This is the worst case: the DMA controller is triggered to write the ++ * next data into the THR, hence starting a new transfer: the I2C slave ++ * is likely to reply by another NACK. ++ * Concurrently, this interrupt handler is likely to be called to manage ++ * the first NACK before the I2C controller detects the second NACK and ++ * sets once again the NACK bit into the SR. ++ * When handling the first NACK, this interrupt handler disables the I2C ++ * controller interruptions, especially the NACK interrupt. ++ * Hence, the NACK bit is pending into the SR. This is why we should ++ * read the SR to clear all pending interrupts at the beginning of ++ * at91_do_twi_transfer() before actually starting a new transfer. ++ * ++ * 2b - With support of the Alternative Command mode. ++ * ++ * When a NACK condition is detected, the I2C controller also locks the ++ * THR (and sets the LOCK bit in the SR): even though the DMA controller ++ * is triggered by the TXRDY bit to write the next data into the THR, ++ * this data actually won't go on the I2C bus hence a second NACK is not ++ * generated. ++ */ + if (irqstatus & (AT91_TWI_TXCOMP | AT91_TWI_NACK)) { + at91_disable_twi_interrupts(dev); + complete(&dev->cmd_complete); ++ } else if (irqstatus & AT91_TWI_RXRDY) { ++ at91_twi_read_next_byte(dev); ++ } else if (irqstatus & AT91_TWI_TXRDY) { ++ at91_twi_write_next_byte(dev); + } + ++ /* catch error flags */ ++ dev->transfer_status |= status; ++ + return IRQ_HANDLED; + } + +@@ -391,6 +429,7 @@ static int at91_do_twi_transfer(struct at91_twi_dev *dev) + { + int ret; + bool has_unre_flag = dev->pdata->has_unre_flag; ++ unsigned sr; + + /* + * WARNING: the TXCOMP bit in the Status Register is NOT a clear on +@@ -426,13 +465,16 @@ static int at91_do_twi_transfer(struct at91_twi_dev *dev) + INIT_COMPLETION(dev->cmd_complete); + dev->transfer_status = 0; + ++ /* Clear pending interrupts, such as NACK. */ ++ sr = at91_twi_read(dev, AT91_TWI_SR); ++ + if (!dev->buf_len) { + at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_QUICK); + at91_twi_write(dev, AT91_TWI_IER, AT91_TWI_TXCOMP); + } else if (dev->msg->flags & I2C_M_RD) { + unsigned start_flags = AT91_TWI_START; + +- if (at91_twi_read(dev, AT91_TWI_SR) & AT91_TWI_RXRDY) { ++ if (sr & AT91_TWI_RXRDY) { + dev_err(dev->dev, "RXRDY still set!"); + at91_twi_read(dev, AT91_TWI_RHR); + } +diff --git a/drivers/i2c/busses/i2c-eg20t.c b/drivers/i2c/busses/i2c-eg20t.c +index 0f3752967c4b..773a6f5a509f 100644 +--- a/drivers/i2c/busses/i2c-eg20t.c ++++ b/drivers/i2c/busses/i2c-eg20t.c +@@ -798,13 +798,6 @@ static int pch_i2c_probe(struct pci_dev *pdev, + /* Set the number of I2C channel instance */ + adap_info->ch_num = id->driver_data; + +- ret = request_irq(pdev->irq, pch_i2c_handler, IRQF_SHARED, +- KBUILD_MODNAME, adap_info); +- if (ret) { +- pch_pci_err(pdev, "request_irq FAILED\n"); +- goto err_request_irq; +- } +- + for (i = 0; i < adap_info->ch_num; i++) { + pch_adap = &adap_info->pch_data[i].pch_adapter; + adap_info->pch_i2c_suspended = false; +@@ -821,6 +814,17 @@ static int pch_i2c_probe(struct pci_dev *pdev, + adap_info->pch_data[i].pch_base_address = base_addr + 0x100 * i; + + pch_adap->dev.parent = &pdev->dev; ++ } ++ ++ ret = request_irq(pdev->irq, pch_i2c_handler, IRQF_SHARED, ++ KBUILD_MODNAME, adap_info); ++ if (ret) { ++ pch_pci_err(pdev, "request_irq FAILED\n"); ++ goto err_request_irq; ++ } ++ ++ for (i = 0; i < adap_info->ch_num; i++) { ++ pch_adap = &adap_info->pch_data[i].pch_adapter; + + pch_i2c_init(&adap_info->pch_data[i]); + +diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c +index 9d539cbfc833..c0e4143bee90 100644 +--- a/drivers/i2c/i2c-core.c ++++ b/drivers/i2c/i2c-core.c +@@ -1323,6 +1323,7 @@ int i2c_register_driver(struct module *owner, struct i2c_driver *driver) + /* add the driver to the list of i2c drivers in the driver core */ + driver->driver.owner = owner; + driver->driver.bus = &i2c_bus_type; ++ INIT_LIST_HEAD(&driver->clients); + + /* When registration returns, the driver core + * will have called probe() for all matching-but-unbound devices. +@@ -1341,7 +1342,6 @@ int i2c_register_driver(struct module *owner, struct i2c_driver *driver) + + pr_debug("i2c-core: driver [%s] registered\n", driver->driver.name); + +- INIT_LIST_HEAD(&driver->clients); + /* Walk the adapters that are already present */ + i2c_for_each_dev(driver, __process_new_driver); + +diff --git a/drivers/iio/accel/kxsd9.c b/drivers/iio/accel/kxsd9.c +index a22c427454db..4f9d178e5fd6 100644 +--- a/drivers/iio/accel/kxsd9.c ++++ b/drivers/iio/accel/kxsd9.c +@@ -160,11 +160,13 @@ static int kxsd9_read_raw(struct iio_dev *indio_dev, + if (ret < 0) + goto error_ret; + *val = ret; ++ ret = IIO_VAL_INT; + break; + case IIO_CHAN_INFO_SCALE: + ret = spi_w8r8(st->us, KXSD9_READ(KXSD9_REG_CTRL_C)); + if (ret < 0) + goto error_ret; ++ *val = 0; + *val2 = kxsd9_micro_scales[ret & KXSD9_FS_MASK]; + ret = IIO_VAL_INT_PLUS_MICRO; + break; +diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c +index c410217fbe89..951a4f6a3b11 100644 +--- a/drivers/infiniband/core/cm.c ++++ b/drivers/infiniband/core/cm.c +@@ -79,6 +79,8 @@ static struct ib_cm { + __be32 random_id_operand; + struct list_head timewait_list; + struct workqueue_struct *wq; ++ /* Sync on cm change port state */ ++ spinlock_t state_lock; + } cm; + + /* Counter indexes ordered by attribute ID */ +@@ -160,6 +162,8 @@ struct cm_port { + struct ib_mad_agent *mad_agent; + struct kobject port_obj; + u8 port_num; ++ struct list_head cm_priv_prim_list; ++ struct list_head cm_priv_altr_list; + struct cm_counter_group counter_group[CM_COUNTER_GROUPS]; + }; + +@@ -237,6 +241,12 @@ struct cm_id_private { + u8 service_timeout; + u8 target_ack_delay; + ++ struct list_head prim_list; ++ struct list_head altr_list; ++ /* Indicates that the send port mad is registered and av is set */ ++ int prim_send_port_not_ready; ++ int altr_send_port_not_ready; ++ + struct list_head work_list; + atomic_t work_count; + }; +@@ -255,19 +265,46 @@ static int cm_alloc_msg(struct cm_id_private *cm_id_priv, + struct ib_mad_agent *mad_agent; + struct ib_mad_send_buf *m; + struct ib_ah *ah; ++ struct cm_av *av; ++ unsigned long flags, flags2; ++ int ret = 0; + ++ /* don't let the port to be released till the agent is down */ ++ spin_lock_irqsave(&cm.state_lock, flags2); ++ spin_lock_irqsave(&cm.lock, flags); ++ if (!cm_id_priv->prim_send_port_not_ready) ++ av = &cm_id_priv->av; ++ else if (!cm_id_priv->altr_send_port_not_ready && ++ (cm_id_priv->alt_av.port)) ++ av = &cm_id_priv->alt_av; ++ else { ++ pr_info("%s: not valid CM id\n", __func__); ++ ret = -ENODEV; ++ spin_unlock_irqrestore(&cm.lock, flags); ++ goto out; ++ } ++ spin_unlock_irqrestore(&cm.lock, flags); ++ /* Make sure the port haven't released the mad yet */ + mad_agent = cm_id_priv->av.port->mad_agent; +- ah = ib_create_ah(mad_agent->qp->pd, &cm_id_priv->av.ah_attr); +- if (IS_ERR(ah)) +- return PTR_ERR(ah); ++ if (!mad_agent) { ++ pr_info("%s: not a valid MAD agent\n", __func__); ++ ret = -ENODEV; ++ goto out; ++ } ++ ah = ib_create_ah(mad_agent->qp->pd, &av->ah_attr); ++ if (IS_ERR(ah)) { ++ ret = PTR_ERR(ah); ++ goto out; ++ } + + m = ib_create_send_mad(mad_agent, cm_id_priv->id.remote_cm_qpn, +- cm_id_priv->av.pkey_index, ++ av->pkey_index, + 0, IB_MGMT_MAD_HDR, IB_MGMT_MAD_DATA, + GFP_ATOMIC); + if (IS_ERR(m)) { + ib_destroy_ah(ah); +- return PTR_ERR(m); ++ ret = PTR_ERR(m); ++ goto out; + } + + /* Timeout set by caller if response is expected. */ +@@ -277,7 +314,10 @@ static int cm_alloc_msg(struct cm_id_private *cm_id_priv, + atomic_inc(&cm_id_priv->refcount); + m->context[0] = cm_id_priv; + *msg = m; +- return 0; ++ ++out: ++ spin_unlock_irqrestore(&cm.state_lock, flags2); ++ return ret; + } + + static int cm_alloc_response_msg(struct cm_port *port, +@@ -346,7 +386,8 @@ static void cm_init_av_for_response(struct cm_port *port, struct ib_wc *wc, + grh, &av->ah_attr); + } + +-static int cm_init_av_by_path(struct ib_sa_path_rec *path, struct cm_av *av) ++static int cm_init_av_by_path(struct ib_sa_path_rec *path, struct cm_av *av, ++ struct cm_id_private *cm_id_priv) + { + struct cm_device *cm_dev; + struct cm_port *port = NULL; +@@ -376,7 +417,18 @@ static int cm_init_av_by_path(struct ib_sa_path_rec *path, struct cm_av *av) + ib_init_ah_from_path(cm_dev->ib_device, port->port_num, path, + &av->ah_attr); + av->timeout = path->packet_life_time + 1; +- return 0; ++ ++ spin_lock_irqsave(&cm.lock, flags); ++ if (&cm_id_priv->av == av) ++ list_add_tail(&cm_id_priv->prim_list, &port->cm_priv_prim_list); ++ else if (&cm_id_priv->alt_av == av) ++ list_add_tail(&cm_id_priv->altr_list, &port->cm_priv_altr_list); ++ else ++ ret = -EINVAL; ++ ++ spin_unlock_irqrestore(&cm.lock, flags); ++ ++ return ret; + } + + static int cm_alloc_id(struct cm_id_private *cm_id_priv) +@@ -716,6 +768,8 @@ struct ib_cm_id *ib_create_cm_id(struct ib_device *device, + spin_lock_init(&cm_id_priv->lock); + init_completion(&cm_id_priv->comp); + INIT_LIST_HEAD(&cm_id_priv->work_list); ++ INIT_LIST_HEAD(&cm_id_priv->prim_list); ++ INIT_LIST_HEAD(&cm_id_priv->altr_list); + atomic_set(&cm_id_priv->work_count, -1); + atomic_set(&cm_id_priv->refcount, 1); + return &cm_id_priv->id; +@@ -914,6 +968,15 @@ retest: + break; + } + ++ spin_lock_irq(&cm.lock); ++ if (!list_empty(&cm_id_priv->altr_list) && ++ (!cm_id_priv->altr_send_port_not_ready)) ++ list_del(&cm_id_priv->altr_list); ++ if (!list_empty(&cm_id_priv->prim_list) && ++ (!cm_id_priv->prim_send_port_not_ready)) ++ list_del(&cm_id_priv->prim_list); ++ spin_unlock_irq(&cm.lock); ++ + cm_free_id(cm_id->local_id); + cm_deref_id(cm_id_priv); + wait_for_completion(&cm_id_priv->comp); +@@ -1137,12 +1200,13 @@ int ib_send_cm_req(struct ib_cm_id *cm_id, + goto out; + } + +- ret = cm_init_av_by_path(param->primary_path, &cm_id_priv->av); ++ ret = cm_init_av_by_path(param->primary_path, &cm_id_priv->av, ++ cm_id_priv); + if (ret) + goto error1; + if (param->alternate_path) { + ret = cm_init_av_by_path(param->alternate_path, +- &cm_id_priv->alt_av); ++ &cm_id_priv->alt_av, cm_id_priv); + if (ret) + goto error1; + } +@@ -1562,7 +1626,8 @@ static int cm_req_handler(struct cm_work *work) + + cm_process_routed_req(req_msg, work->mad_recv_wc->wc); + cm_format_paths_from_req(req_msg, &work->path[0], &work->path[1]); +- ret = cm_init_av_by_path(&work->path[0], &cm_id_priv->av); ++ ret = cm_init_av_by_path(&work->path[0], &cm_id_priv->av, ++ cm_id_priv); + if (ret) { + ib_get_cached_gid(work->port->cm_dev->ib_device, + work->port->port_num, 0, &work->path[0].sgid); +@@ -1572,7 +1637,8 @@ static int cm_req_handler(struct cm_work *work) + goto rejected; + } + if (req_msg->alt_local_lid) { +- ret = cm_init_av_by_path(&work->path[1], &cm_id_priv->alt_av); ++ ret = cm_init_av_by_path(&work->path[1], &cm_id_priv->alt_av, ++ cm_id_priv); + if (ret) { + ib_send_cm_rej(cm_id, IB_CM_REJ_INVALID_ALT_GID, + &work->path[0].sgid, +@@ -2627,7 +2693,8 @@ int ib_send_cm_lap(struct ib_cm_id *cm_id, + goto out; + } + +- ret = cm_init_av_by_path(alternate_path, &cm_id_priv->alt_av); ++ ret = cm_init_av_by_path(alternate_path, &cm_id_priv->alt_av, ++ cm_id_priv); + if (ret) + goto out; + cm_id_priv->alt_av.timeout = +@@ -2739,7 +2806,8 @@ static int cm_lap_handler(struct cm_work *work) + cm_init_av_for_response(work->port, work->mad_recv_wc->wc, + work->mad_recv_wc->recv_buf.grh, + &cm_id_priv->av); +- cm_init_av_by_path(param->alternate_path, &cm_id_priv->alt_av); ++ cm_init_av_by_path(param->alternate_path, &cm_id_priv->alt_av, ++ cm_id_priv); + ret = atomic_inc_and_test(&cm_id_priv->work_count); + if (!ret) + list_add_tail(&work->list, &cm_id_priv->work_list); +@@ -2931,7 +2999,7 @@ int ib_send_cm_sidr_req(struct ib_cm_id *cm_id, + return -EINVAL; + + cm_id_priv = container_of(cm_id, struct cm_id_private, id); +- ret = cm_init_av_by_path(param->path, &cm_id_priv->av); ++ ret = cm_init_av_by_path(param->path, &cm_id_priv->av, cm_id_priv); + if (ret) + goto out; + +@@ -3352,7 +3420,9 @@ out: + static int cm_migrate(struct ib_cm_id *cm_id) + { + struct cm_id_private *cm_id_priv; ++ struct cm_av tmp_av; + unsigned long flags; ++ int tmp_send_port_not_ready; + int ret = 0; + + cm_id_priv = container_of(cm_id, struct cm_id_private, id); +@@ -3361,7 +3431,14 @@ static int cm_migrate(struct ib_cm_id *cm_id) + (cm_id->lap_state == IB_CM_LAP_UNINIT || + cm_id->lap_state == IB_CM_LAP_IDLE)) { + cm_id->lap_state = IB_CM_LAP_IDLE; ++ /* Swap address vector */ ++ tmp_av = cm_id_priv->av; + cm_id_priv->av = cm_id_priv->alt_av; ++ cm_id_priv->alt_av = tmp_av; ++ /* Swap port send ready state */ ++ tmp_send_port_not_ready = cm_id_priv->prim_send_port_not_ready; ++ cm_id_priv->prim_send_port_not_ready = cm_id_priv->altr_send_port_not_ready; ++ cm_id_priv->altr_send_port_not_ready = tmp_send_port_not_ready; + } else + ret = -EINVAL; + spin_unlock_irqrestore(&cm_id_priv->lock, flags); +@@ -3767,6 +3844,9 @@ static void cm_add_one(struct ib_device *ib_device) + port->cm_dev = cm_dev; + port->port_num = i; + ++ INIT_LIST_HEAD(&port->cm_priv_prim_list); ++ INIT_LIST_HEAD(&port->cm_priv_altr_list); ++ + ret = cm_create_port_fs(port); + if (ret) + goto error1; +@@ -3813,6 +3893,8 @@ static void cm_remove_one(struct ib_device *ib_device) + { + struct cm_device *cm_dev; + struct cm_port *port; ++ struct cm_id_private *cm_id_priv; ++ struct ib_mad_agent *cur_mad_agent; + struct ib_port_modify port_modify = { + .clr_port_cap_mask = IB_PORT_CM_SUP + }; +@@ -3830,10 +3912,22 @@ static void cm_remove_one(struct ib_device *ib_device) + for (i = 1; i <= ib_device->phys_port_cnt; i++) { + port = cm_dev->port[i-1]; + ib_modify_port(ib_device, port->port_num, 0, &port_modify); +- ib_unregister_mad_agent(port->mad_agent); ++ /* Mark all the cm_id's as not valid */ ++ spin_lock_irq(&cm.lock); ++ list_for_each_entry(cm_id_priv, &port->cm_priv_altr_list, altr_list) ++ cm_id_priv->altr_send_port_not_ready = 1; ++ list_for_each_entry(cm_id_priv, &port->cm_priv_prim_list, prim_list) ++ cm_id_priv->prim_send_port_not_ready = 1; ++ spin_unlock_irq(&cm.lock); + flush_workqueue(cm.wq); ++ spin_lock_irq(&cm.state_lock); ++ cur_mad_agent = port->mad_agent; ++ port->mad_agent = NULL; ++ spin_unlock_irq(&cm.state_lock); ++ ib_unregister_mad_agent(cur_mad_agent); + cm_remove_port_fs(port); + } ++ + device_unregister(cm_dev->device); + kfree(cm_dev); + } +@@ -3846,6 +3940,7 @@ static int __init ib_cm_init(void) + INIT_LIST_HEAD(&cm.device_list); + rwlock_init(&cm.device_lock); + spin_lock_init(&cm.lock); ++ spin_lock_init(&cm.state_lock); + cm.listen_service_table = RB_ROOT; + cm.listen_service_id = be64_to_cpu(IB_CM_ASSIGN_SERVICE_ID); + cm.remote_id_table = RB_ROOT; +diff --git a/drivers/infiniband/core/multicast.c b/drivers/infiniband/core/multicast.c +index d2360a8ef0b2..180d7f436ed5 100644 +--- a/drivers/infiniband/core/multicast.c ++++ b/drivers/infiniband/core/multicast.c +@@ -106,7 +106,6 @@ struct mcast_group { + atomic_t refcount; + enum mcast_group_state state; + struct ib_sa_query *query; +- int query_id; + u16 pkey_index; + u8 leave_state; + int retries; +@@ -339,11 +338,7 @@ static int send_join(struct mcast_group *group, struct mcast_member *member) + member->multicast.comp_mask, + 3000, GFP_KERNEL, join_handler, group, + &group->query); +- if (ret >= 0) { +- group->query_id = ret; +- ret = 0; +- } +- return ret; ++ return (ret > 0) ? 0 : ret; + } + + static int send_leave(struct mcast_group *group, u8 leave_state) +@@ -363,11 +358,7 @@ static int send_leave(struct mcast_group *group, u8 leave_state) + IB_SA_MCMEMBER_REC_JOIN_STATE, + 3000, GFP_KERNEL, leave_handler, + group, &group->query); +- if (ret >= 0) { +- group->query_id = ret; +- ret = 0; +- } +- return ret; ++ return (ret > 0) ? 0 : ret; + } + + static void join_group(struct mcast_group *group, struct mcast_member *member, +diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c +index f50623d07a75..37b720794148 100644 +--- a/drivers/infiniband/core/uverbs_main.c ++++ b/drivers/infiniband/core/uverbs_main.c +@@ -224,12 +224,9 @@ static int ib_uverbs_cleanup_ucontext(struct ib_uverbs_file *file, + container_of(uobj, struct ib_uqp_object, uevent.uobject); + + idr_remove_uobj(&ib_uverbs_qp_idr, uobj); +- if (qp != qp->real_qp) { +- ib_close_qp(qp); +- } else { ++ if (qp == qp->real_qp) + ib_uverbs_detach_umcast(qp, uqp); +- ib_destroy_qp(qp); +- } ++ ib_destroy_qp(qp); + ib_uverbs_release_uevent(file, &uqp->uevent); + kfree(uqp); + } +diff --git a/drivers/infiniband/hw/mlx4/cq.c b/drivers/infiniband/hw/mlx4/cq.c +index d5e60f44ba5a..5b8a62c6bc8d 100644 +--- a/drivers/infiniband/hw/mlx4/cq.c ++++ b/drivers/infiniband/hw/mlx4/cq.c +@@ -239,11 +239,14 @@ struct ib_cq *mlx4_ib_create_cq(struct ib_device *ibdev, int entries, int vector + if (context) + if (ib_copy_to_udata(udata, &cq->mcq.cqn, sizeof (__u32))) { + err = -EFAULT; +- goto err_dbmap; ++ goto err_cq_free; + } + + return &cq->ibcq; + ++err_cq_free: ++ mlx4_cq_free(dev->dev, &cq->mcq); ++ + err_dbmap: + if (context) + mlx4_ib_db_unmap_user(to_mucontext(context), &cq->db); +diff --git a/drivers/infiniband/hw/mlx4/mcg.c b/drivers/infiniband/hw/mlx4/mcg.c +index 25b2cdff00f8..27bedc39b47c 100644 +--- a/drivers/infiniband/hw/mlx4/mcg.c ++++ b/drivers/infiniband/hw/mlx4/mcg.c +@@ -483,7 +483,7 @@ static u8 get_leave_state(struct mcast_group *group) + if (!group->members[i]) + leave_state |= (1 << i); + +- return leave_state & (group->rec.scope_join_state & 7); ++ return leave_state & (group->rec.scope_join_state & 0xf); + } + + static int join_group(struct mcast_group *group, int slave, u8 join_mask) +@@ -558,8 +558,8 @@ static void mlx4_ib_mcg_timeout_handler(struct work_struct *work) + } else + mcg_warn_group(group, "DRIVER BUG\n"); + } else if (group->state == MCAST_LEAVE_SENT) { +- if (group->rec.scope_join_state & 7) +- group->rec.scope_join_state &= 0xf8; ++ if (group->rec.scope_join_state & 0xf) ++ group->rec.scope_join_state &= 0xf0; + group->state = MCAST_IDLE; + mutex_unlock(&group->lock); + if (release_group(group, 1)) +@@ -599,7 +599,7 @@ static int handle_leave_req(struct mcast_group *group, u8 leave_mask, + static int handle_join_req(struct mcast_group *group, u8 join_mask, + struct mcast_req *req) + { +- u8 group_join_state = group->rec.scope_join_state & 7; ++ u8 group_join_state = group->rec.scope_join_state & 0xf; + int ref = 0; + u16 status; + struct ib_sa_mcmember_data *sa_data = (struct ib_sa_mcmember_data *)req->sa_mad.data; +@@ -684,8 +684,8 @@ static void mlx4_ib_mcg_work_handler(struct work_struct *work) + u8 cur_join_state; + + resp_join_state = ((struct ib_sa_mcmember_data *) +- group->response_sa_mad.data)->scope_join_state & 7; +- cur_join_state = group->rec.scope_join_state & 7; ++ group->response_sa_mad.data)->scope_join_state & 0xf; ++ cur_join_state = group->rec.scope_join_state & 0xf; + + if (method == IB_MGMT_METHOD_GET_RESP) { + /* successfull join */ +@@ -704,7 +704,7 @@ process_requests: + req = list_first_entry(&group->pending_list, struct mcast_req, + group_list); + sa_data = (struct ib_sa_mcmember_data *)req->sa_mad.data; +- req_join_state = sa_data->scope_join_state & 0x7; ++ req_join_state = sa_data->scope_join_state & 0xf; + + /* For a leave request, we will immediately answer the VF, and + * update our internal counters. The actual leave will be sent +diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h +index eb71aaa26a9a..fb9a7b340f1f 100644 +--- a/drivers/infiniband/ulp/ipoib/ipoib.h ++++ b/drivers/infiniband/ulp/ipoib/ipoib.h +@@ -460,6 +460,7 @@ void ipoib_send(struct net_device *dev, struct sk_buff *skb, + struct ipoib_ah *address, u32 qpn); + void ipoib_reap_ah(struct work_struct *work); + ++struct ipoib_path *__path_find(struct net_device *dev, void *gid); + void ipoib_mark_paths_invalid(struct net_device *dev); + void ipoib_flush_paths(struct net_device *dev); + struct ipoib_dev_priv *ipoib_intf_alloc(const char *format); +diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c +index 3eceb61e3532..aa9ad2d70ddd 100644 +--- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c ++++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c +@@ -1290,6 +1290,8 @@ void ipoib_cm_destroy_tx(struct ipoib_cm_tx *tx) + } + } + ++#define QPN_AND_OPTIONS_OFFSET 4 ++ + static void ipoib_cm_tx_start(struct work_struct *work) + { + struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv, +@@ -1298,6 +1300,7 @@ static void ipoib_cm_tx_start(struct work_struct *work) + struct ipoib_neigh *neigh; + struct ipoib_cm_tx *p; + unsigned long flags; ++ struct ipoib_path *path; + int ret; + + struct ib_sa_path_rec pathrec; +@@ -1310,7 +1313,19 @@ static void ipoib_cm_tx_start(struct work_struct *work) + p = list_entry(priv->cm.start_list.next, typeof(*p), list); + list_del_init(&p->list); + neigh = p->neigh; ++ + qpn = IPOIB_QPN(neigh->daddr); ++ /* ++ * As long as the search is with these 2 locks, ++ * path existence indicates its validity. ++ */ ++ path = __path_find(dev, neigh->daddr + QPN_AND_OPTIONS_OFFSET); ++ if (!path) { ++ pr_info("%s ignore not valid path %pI6\n", ++ __func__, ++ neigh->daddr + QPN_AND_OPTIONS_OFFSET); ++ goto free_neigh; ++ } + memcpy(&pathrec, &p->path->pathrec, sizeof pathrec); + + spin_unlock_irqrestore(&priv->lock, flags); +@@ -1322,6 +1337,7 @@ static void ipoib_cm_tx_start(struct work_struct *work) + spin_lock_irqsave(&priv->lock, flags); + + if (ret) { ++free_neigh: + neigh = p->neigh; + if (neigh) { + neigh->cm = NULL; +diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c +index 2cfa76f5d99e..39168d3cb7dc 100644 +--- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c ++++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c +@@ -979,8 +979,17 @@ static void __ipoib_ib_dev_flush(struct ipoib_dev_priv *priv, + } + + if (level == IPOIB_FLUSH_LIGHT) { ++ int oper_up; + ipoib_mark_paths_invalid(dev); ++ /* Set IPoIB operation as down to prevent races between: ++ * the flush flow which leaves MCG and on the fly joins ++ * which can happen during that time. mcast restart task ++ * should deal with join requests we missed. ++ */ ++ oper_up = test_and_clear_bit(IPOIB_FLAG_OPER_UP, &priv->flags); + ipoib_mcast_dev_flush(dev); ++ if (oper_up) ++ set_bit(IPOIB_FLAG_OPER_UP, &priv->flags); + } + + if (level >= IPOIB_FLUSH_NORMAL) +diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c +index a481094af85f..375f9edd4027 100644 +--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c ++++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c +@@ -251,7 +251,7 @@ int ipoib_set_mode(struct net_device *dev, const char *buf) + return -EINVAL; + } + +-static struct ipoib_path *__path_find(struct net_device *dev, void *gid) ++struct ipoib_path *__path_find(struct net_device *dev, void *gid) + { + struct ipoib_dev_priv *priv = netdev_priv(dev); + struct rb_node *n = priv->path_tree.rb_node; +diff --git a/drivers/input/serio/i8042.c b/drivers/input/serio/i8042.c +index 9870c540e6fb..2d8f9593fb18 100644 +--- a/drivers/input/serio/i8042.c ++++ b/drivers/input/serio/i8042.c +@@ -1223,6 +1223,7 @@ static int __init i8042_create_kbd_port(void) + serio->start = i8042_start; + serio->stop = i8042_stop; + serio->close = i8042_port_close; ++ serio->ps2_cmd_mutex = &i8042_mutex; + serio->port_data = port; + serio->dev.parent = &i8042_platform_device->dev; + strlcpy(serio->name, "i8042 KBD port", sizeof(serio->name)); +@@ -1248,6 +1249,7 @@ static int __init i8042_create_aux_port(int idx) + serio->write = i8042_aux_write; + serio->start = i8042_start; + serio->stop = i8042_stop; ++ serio->ps2_cmd_mutex = &i8042_mutex; + serio->port_data = port; + serio->dev.parent = &i8042_platform_device->dev; + if (idx < 0) { +@@ -1310,21 +1312,6 @@ static void i8042_unregister_ports(void) + } + } + +-/* +- * Checks whether port belongs to i8042 controller. +- */ +-bool i8042_check_port_owner(const struct serio *port) +-{ +- int i; +- +- for (i = 0; i < I8042_NUM_PORTS; i++) +- if (i8042_ports[i].serio == port) +- return true; +- +- return false; +-} +-EXPORT_SYMBOL(i8042_check_port_owner); +- + static void i8042_free_irqs(void) + { + if (i8042_aux_irq_registered) +diff --git a/drivers/input/serio/libps2.c b/drivers/input/serio/libps2.c +index 07a8363f3c5c..b5ec313cb9c9 100644 +--- a/drivers/input/serio/libps2.c ++++ b/drivers/input/serio/libps2.c +@@ -57,19 +57,17 @@ EXPORT_SYMBOL(ps2_sendbyte); + + void ps2_begin_command(struct ps2dev *ps2dev) + { +- mutex_lock(&ps2dev->cmd_mutex); ++ struct mutex *m = ps2dev->serio->ps2_cmd_mutex ?: &ps2dev->cmd_mutex; + +- if (i8042_check_port_owner(ps2dev->serio)) +- i8042_lock_chip(); ++ mutex_lock(m); + } + EXPORT_SYMBOL(ps2_begin_command); + + void ps2_end_command(struct ps2dev *ps2dev) + { +- if (i8042_check_port_owner(ps2dev->serio)) +- i8042_unlock_chip(); ++ struct mutex *m = ps2dev->serio->ps2_cmd_mutex ?: &ps2dev->cmd_mutex; + +- mutex_unlock(&ps2dev->cmd_mutex); ++ mutex_unlock(m); + } + EXPORT_SYMBOL(ps2_end_command); + +diff --git a/drivers/input/touchscreen/ili210x.c b/drivers/input/touchscreen/ili210x.c +index 1418bdda61bb..ceaa790b71a2 100644 +--- a/drivers/input/touchscreen/ili210x.c ++++ b/drivers/input/touchscreen/ili210x.c +@@ -169,7 +169,7 @@ static ssize_t ili210x_calibrate(struct device *dev, + + return count; + } +-static DEVICE_ATTR(calibrate, 0644, NULL, ili210x_calibrate); ++static DEVICE_ATTR(calibrate, S_IWUSR, NULL, ili210x_calibrate); + + static struct attribute *ili210x_attributes[] = { + &dev_attr_calibrate.attr, +diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c +index 6bde2a124c72..1c62c248da6a 100644 +--- a/drivers/iommu/amd_iommu.c ++++ b/drivers/iommu/amd_iommu.c +@@ -1991,6 +1991,9 @@ static void dma_ops_domain_free(struct dma_ops_domain *dom) + kfree(dom->aperture[i]); + } + ++ if (dom->domain.id) ++ domain_id_free(dom->domain.id); ++ + kfree(dom); + } + +@@ -2551,8 +2554,16 @@ static void update_device_table(struct protection_domain *domain) + { + struct iommu_dev_data *dev_data; + +- list_for_each_entry(dev_data, &domain->dev_list, list) ++ list_for_each_entry(dev_data, &domain->dev_list, list) { + set_dte_entry(dev_data->devid, domain, dev_data->ats.enabled); ++ ++ if (dev_data->alias_data == NULL) ++ continue; ++ ++ /* There is an alias, update device table entry for it */ ++ set_dte_entry(dev_data->alias_data->devid, domain, ++ dev_data->alias_data->ats.enabled); ++ } + } + + static void update_domain(struct protection_domain *domain) +diff --git a/drivers/isdn/hardware/mISDN/ipac.h b/drivers/isdn/hardware/mISDN/ipac.h +index 8121e046b739..31fb3b0fd0e4 100644 +--- a/drivers/isdn/hardware/mISDN/ipac.h ++++ b/drivers/isdn/hardware/mISDN/ipac.h +@@ -217,6 +217,7 @@ struct ipac_hw { + #define ISAC_IND_DR 0x0 + #define ISAC_IND_SD 0x2 + #define ISAC_IND_DIS 0x3 ++#define ISAC_IND_DR6 0x5 + #define ISAC_IND_EI 0x6 + #define ISAC_IND_RSY 0x4 + #define ISAC_IND_ARD 0x8 +diff --git a/drivers/isdn/hardware/mISDN/mISDNipac.c b/drivers/isdn/hardware/mISDN/mISDNipac.c +index ccd7d851be26..bac920c6022f 100644 +--- a/drivers/isdn/hardware/mISDN/mISDNipac.c ++++ b/drivers/isdn/hardware/mISDN/mISDNipac.c +@@ -80,6 +80,7 @@ isac_ph_state_bh(struct dchannel *dch) + l1_event(dch->l1, HW_DEACT_CNF); + break; + case ISAC_IND_DR: ++ case ISAC_IND_DR6: + dch->state = 3; + l1_event(dch->l1, HW_DEACT_IND); + break; +@@ -660,6 +661,7 @@ isac_l1cmd(struct dchannel *dch, u32 cmd) + spin_lock_irqsave(isac->hwlock, flags); + if ((isac->state == ISAC_IND_EI) || + (isac->state == ISAC_IND_DR) || ++ (isac->state == ISAC_IND_DR6) || + (isac->state == ISAC_IND_RS)) + ph_command(isac, ISAC_CMD_TIM); + else +diff --git a/drivers/isdn/mISDN/socket.c b/drivers/isdn/mISDN/socket.c +index 5cefb479c707..00bd80a63895 100644 +--- a/drivers/isdn/mISDN/socket.c ++++ b/drivers/isdn/mISDN/socket.c +@@ -717,6 +717,9 @@ base_sock_bind(struct socket *sock, struct sockaddr *addr, int addr_len) + if (!maddr || maddr->family != AF_ISDN) + return -EINVAL; + ++ if (addr_len < sizeof(struct sockaddr_mISDN)) ++ return -EINVAL; ++ + lock_sock(sk); + + if (_pms(sk)->dev) { +diff --git a/drivers/md/dm-flakey.c b/drivers/md/dm-flakey.c +index a9a47cd029d5..ace01a30f310 100644 +--- a/drivers/md/dm-flakey.c ++++ b/drivers/md/dm-flakey.c +@@ -286,15 +286,13 @@ static int flakey_map(struct dm_target *ti, struct bio *bio) + pb->bio_submitted = true; + + /* +- * Map reads as normal only if corrupt_bio_byte set. ++ * Error reads if neither corrupt_bio_byte or drop_writes are set. ++ * Otherwise, flakey_end_io() will decide if the reads should be modified. + */ + if (bio_data_dir(bio) == READ) { +- /* If flags were specified, only corrupt those that match. */ +- if (fc->corrupt_bio_byte && (fc->corrupt_bio_rw == READ) && +- all_corrupt_bio_flags_match(bio, fc)) +- goto map_bio; +- else ++ if (!fc->corrupt_bio_byte && !test_bit(DROP_WRITES, &fc->flags)) + return -EIO; ++ goto map_bio; + } + + /* +@@ -331,14 +329,21 @@ static int flakey_end_io(struct dm_target *ti, struct bio *bio, int error) + struct flakey_c *fc = ti->private; + struct per_bio_data *pb = dm_per_bio_data(bio, sizeof(struct per_bio_data)); + +- /* +- * Corrupt successful READs while in down state. +- */ + if (!error && pb->bio_submitted && (bio_data_dir(bio) == READ)) { +- if (fc->corrupt_bio_byte) ++ if (fc->corrupt_bio_byte && (fc->corrupt_bio_rw == READ) && ++ all_corrupt_bio_flags_match(bio, fc)) { ++ /* ++ * Corrupt successful matching READs while in down state. ++ */ + corrupt_bio_data(bio, fc); +- else ++ ++ } else if (!test_bit(DROP_WRITES, &fc->flags)) { ++ /* ++ * Error read during the down_interval if drop_writes ++ * wasn't configured. ++ */ + return -EIO; ++ } + } + + return error; +diff --git a/drivers/md/dm.c b/drivers/md/dm.c +index f69fed826a56..a77ef6cac62d 100644 +--- a/drivers/md/dm.c ++++ b/drivers/md/dm.c +@@ -2323,6 +2323,7 @@ EXPORT_SYMBOL_GPL(dm_device_name); + + static void __dm_destroy(struct mapped_device *md, bool wait) + { ++ struct request_queue *q = md->queue; + struct dm_table *map; + + might_sleep(); +@@ -2333,6 +2334,10 @@ static void __dm_destroy(struct mapped_device *md, bool wait) + set_bit(DMF_FREEING, &md->flags); + spin_unlock(&_minor_lock); + ++ spin_lock_irq(q->queue_lock); ++ queue_flag_set(QUEUE_FLAG_DYING, q); ++ spin_unlock_irq(q->queue_lock); ++ + /* + * Take suspend_lock so that presuspend and postsuspend methods + * do not race with internal suspend. +diff --git a/drivers/media/dvb-frontends/mb86a20s.c b/drivers/media/dvb-frontends/mb86a20s.c +index 2c7217fb1415..4a1346fb383e 100644 +--- a/drivers/media/dvb-frontends/mb86a20s.c ++++ b/drivers/media/dvb-frontends/mb86a20s.c +@@ -75,25 +75,27 @@ static struct regdata mb86a20s_init1[] = { + }; + + static struct regdata mb86a20s_init2[] = { +- { 0x28, 0x22 }, { 0x29, 0x00 }, { 0x2a, 0x1f }, { 0x2b, 0xf0 }, ++ { 0x50, 0xd1 }, { 0x51, 0x22 }, ++ { 0x39, 0x01 }, ++ { 0x71, 0x00 }, + { 0x3b, 0x21 }, +- { 0x3c, 0x38 }, ++ { 0x3c, 0x3a }, + { 0x01, 0x0d }, +- { 0x04, 0x08 }, { 0x05, 0x03 }, ++ { 0x04, 0x08 }, { 0x05, 0x05 }, + { 0x04, 0x0e }, { 0x05, 0x00 }, +- { 0x04, 0x0f }, { 0x05, 0x37 }, +- { 0x04, 0x0b }, { 0x05, 0x78 }, ++ { 0x04, 0x0f }, { 0x05, 0x14 }, ++ { 0x04, 0x0b }, { 0x05, 0x8c }, + { 0x04, 0x00 }, { 0x05, 0x00 }, +- { 0x04, 0x01 }, { 0x05, 0x1e }, +- { 0x04, 0x02 }, { 0x05, 0x07 }, +- { 0x04, 0x03 }, { 0x05, 0xd0 }, ++ { 0x04, 0x01 }, { 0x05, 0x07 }, ++ { 0x04, 0x02 }, { 0x05, 0x0f }, ++ { 0x04, 0x03 }, { 0x05, 0xa0 }, + { 0x04, 0x09 }, { 0x05, 0x00 }, + { 0x04, 0x0a }, { 0x05, 0xff }, +- { 0x04, 0x27 }, { 0x05, 0x00 }, ++ { 0x04, 0x27 }, { 0x05, 0x64 }, + { 0x04, 0x28 }, { 0x05, 0x00 }, +- { 0x04, 0x1e }, { 0x05, 0x00 }, +- { 0x04, 0x29 }, { 0x05, 0x64 }, +- { 0x04, 0x32 }, { 0x05, 0x02 }, ++ { 0x04, 0x1e }, { 0x05, 0xff }, ++ { 0x04, 0x29 }, { 0x05, 0x0a }, ++ { 0x04, 0x32 }, { 0x05, 0x0a }, + { 0x04, 0x14 }, { 0x05, 0x02 }, + { 0x04, 0x04 }, { 0x05, 0x00 }, + { 0x04, 0x05 }, { 0x05, 0x22 }, +@@ -101,8 +103,6 @@ static struct regdata mb86a20s_init2[] = { + { 0x04, 0x07 }, { 0x05, 0xd8 }, + { 0x04, 0x12 }, { 0x05, 0x00 }, + { 0x04, 0x13 }, { 0x05, 0xff }, +- { 0x04, 0x15 }, { 0x05, 0x4e }, +- { 0x04, 0x16 }, { 0x05, 0x20 }, + + /* + * On this demod, when the bit count reaches the count below, +@@ -156,42 +156,36 @@ static struct regdata mb86a20s_init2[] = { + { 0x50, 0x51 }, { 0x51, 0x04 }, /* MER symbol 4 */ + { 0x45, 0x04 }, /* CN symbol 4 */ + { 0x48, 0x04 }, /* CN manual mode */ +- ++ { 0x50, 0xd5 }, { 0x51, 0x01 }, + { 0x50, 0xd6 }, { 0x51, 0x1f }, + { 0x50, 0xd2 }, { 0x51, 0x03 }, +- { 0x50, 0xd7 }, { 0x51, 0xbf }, +- { 0x28, 0x74 }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0xff }, +- { 0x28, 0x46 }, { 0x29, 0x00 }, { 0x2a, 0x1a }, { 0x2b, 0x0c }, +- +- { 0x04, 0x40 }, { 0x05, 0x00 }, +- { 0x28, 0x00 }, { 0x2b, 0x08 }, +- { 0x28, 0x05 }, { 0x2b, 0x00 }, ++ { 0x50, 0xd7 }, { 0x51, 0x3f }, + { 0x1c, 0x01 }, +- { 0x28, 0x06 }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x1f }, +- { 0x28, 0x07 }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x18 }, +- { 0x28, 0x08 }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x12 }, +- { 0x28, 0x09 }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x30 }, +- { 0x28, 0x0a }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x37 }, +- { 0x28, 0x0b }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x02 }, +- { 0x28, 0x0c }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x09 }, +- { 0x28, 0x0d }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x06 }, +- { 0x28, 0x0e }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x7b }, +- { 0x28, 0x0f }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x76 }, +- { 0x28, 0x10 }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x7d }, +- { 0x28, 0x11 }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x08 }, +- { 0x28, 0x12 }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x0b }, +- { 0x28, 0x13 }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x00 }, +- { 0x28, 0x14 }, { 0x29, 0x00 }, { 0x2a, 0x01 }, { 0x2b, 0xf2 }, +- { 0x28, 0x15 }, { 0x29, 0x00 }, { 0x2a, 0x01 }, { 0x2b, 0xf3 }, +- { 0x28, 0x16 }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x05 }, +- { 0x28, 0x17 }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x16 }, +- { 0x28, 0x18 }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x0f }, +- { 0x28, 0x19 }, { 0x29, 0x00 }, { 0x2a, 0x07 }, { 0x2b, 0xef }, +- { 0x28, 0x1a }, { 0x29, 0x00 }, { 0x2a, 0x07 }, { 0x2b, 0xd8 }, +- { 0x28, 0x1b }, { 0x29, 0x00 }, { 0x2a, 0x07 }, { 0x2b, 0xf1 }, +- { 0x28, 0x1c }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x3d }, +- { 0x28, 0x1d }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x94 }, +- { 0x28, 0x1e }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0xba }, ++ { 0x28, 0x06 }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x03 }, ++ { 0x28, 0x07 }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x0d }, ++ { 0x28, 0x08 }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x02 }, ++ { 0x28, 0x09 }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x01 }, ++ { 0x28, 0x0a }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x21 }, ++ { 0x28, 0x0b }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x29 }, ++ { 0x28, 0x0c }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x16 }, ++ { 0x28, 0x0d }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x31 }, ++ { 0x28, 0x0e }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x0e }, ++ { 0x28, 0x0f }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x4e }, ++ { 0x28, 0x10 }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x46 }, ++ { 0x28, 0x11 }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x0f }, ++ { 0x28, 0x12 }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x56 }, ++ { 0x28, 0x13 }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x35 }, ++ { 0x28, 0x14 }, { 0x29, 0x00 }, { 0x2a, 0x01 }, { 0x2b, 0xbe }, ++ { 0x28, 0x15 }, { 0x29, 0x00 }, { 0x2a, 0x01 }, { 0x2b, 0x84 }, ++ { 0x28, 0x16 }, { 0x29, 0x00 }, { 0x2a, 0x03 }, { 0x2b, 0xee }, ++ { 0x28, 0x17 }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x98 }, ++ { 0x28, 0x18 }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x9f }, ++ { 0x28, 0x19 }, { 0x29, 0x00 }, { 0x2a, 0x07 }, { 0x2b, 0xb2 }, ++ { 0x28, 0x1a }, { 0x29, 0x00 }, { 0x2a, 0x06 }, { 0x2b, 0xc2 }, ++ { 0x28, 0x1b }, { 0x29, 0x00 }, { 0x2a, 0x07 }, { 0x2b, 0x4a }, ++ { 0x28, 0x1c }, { 0x29, 0x00 }, { 0x2a, 0x01 }, { 0x2b, 0xbc }, ++ { 0x28, 0x1d }, { 0x29, 0x00 }, { 0x2a, 0x04 }, { 0x2b, 0xba }, ++ { 0x28, 0x1e }, { 0x29, 0x00 }, { 0x2a, 0x06 }, { 0x2b, 0x14 }, + { 0x50, 0x1e }, { 0x51, 0x5d }, + { 0x50, 0x22 }, { 0x51, 0x00 }, + { 0x50, 0x23 }, { 0x51, 0xc8 }, +@@ -200,9 +194,7 @@ static struct regdata mb86a20s_init2[] = { + { 0x50, 0x26 }, { 0x51, 0x00 }, + { 0x50, 0x27 }, { 0x51, 0xc3 }, + { 0x50, 0x39 }, { 0x51, 0x02 }, +- { 0xec, 0x0f }, +- { 0xeb, 0x1f }, +- { 0x28, 0x6a }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x00 }, ++ { 0x50, 0xd5 }, { 0x51, 0x01 }, + { 0xd0, 0x00 }, + }; + +@@ -321,7 +313,11 @@ static int mb86a20s_read_status(struct dvb_frontend *fe, fe_status_t *status) + if (val >= 7) + *status |= FE_HAS_SYNC; + +- if (val >= 8) /* Maybe 9? */ ++ /* ++ * Actually, on state S8, it starts receiving TS, but the TS ++ * output is only on normal state after the transition to S9. ++ */ ++ if (val >= 9) + *status |= FE_HAS_LOCK; + + dev_dbg(&state->i2c->dev, "%s: Status = 0x%02x (state = %d)\n", +@@ -2080,6 +2076,11 @@ static void mb86a20s_release(struct dvb_frontend *fe) + kfree(state); + } + ++static int mb86a20s_get_frontend_algo(struct dvb_frontend *fe) ++{ ++ return DVBFE_ALGO_HW; ++} ++ + static struct dvb_frontend_ops mb86a20s_ops; + + struct dvb_frontend *mb86a20s_attach(const struct mb86a20s_config *config, +@@ -2153,6 +2154,7 @@ static struct dvb_frontend_ops mb86a20s_ops = { + .read_status = mb86a20s_read_status_and_stats, + .read_signal_strength = mb86a20s_read_signal_strength_from_cache, + .tune = mb86a20s_tune, ++ .get_frontend_algo = mb86a20s_get_frontend_algo, + }; + + MODULE_DESCRIPTION("DVB Frontend module for Fujitsu mb86A20s hardware"); +diff --git a/drivers/media/usb/cx231xx/cx231xx-avcore.c b/drivers/media/usb/cx231xx/cx231xx-avcore.c +index 235ba657d52e..79a24efc03d6 100644 +--- a/drivers/media/usb/cx231xx/cx231xx-avcore.c ++++ b/drivers/media/usb/cx231xx/cx231xx-avcore.c +@@ -1261,7 +1261,10 @@ int cx231xx_set_agc_analog_digital_mux_select(struct cx231xx *dev, + dev->board.agc_analog_digital_select_gpio, + analog_or_digital); + +- return status; ++ if (status < 0) ++ return status; ++ ++ return 0; + } + + int cx231xx_enable_i2c_port_3(struct cx231xx *dev, bool is_port_3) +diff --git a/drivers/media/usb/cx231xx/cx231xx-cards.c b/drivers/media/usb/cx231xx/cx231xx-cards.c +index 13249e5a7891..c13c32347ad2 100644 +--- a/drivers/media/usb/cx231xx/cx231xx-cards.c ++++ b/drivers/media/usb/cx231xx/cx231xx-cards.c +@@ -452,7 +452,7 @@ struct cx231xx_board cx231xx_boards[] = { + .output_mode = OUT_MODE_VIP11, + .demod_xfer_mode = 0, + .ctl_pin_status_mask = 0xFFFFFFC4, +- .agc_analog_digital_select_gpio = 0x00, /* According with PV cxPolaris.inf file */ ++ .agc_analog_digital_select_gpio = 0x1c, + .tuner_sif_gpio = -1, + .tuner_scl_gpio = -1, + .tuner_sda_gpio = -1, +diff --git a/drivers/media/usb/cx231xx/cx231xx-core.c b/drivers/media/usb/cx231xx/cx231xx-core.c +index 4ba3ce09b713..6f5ffcc19356 100644 +--- a/drivers/media/usb/cx231xx/cx231xx-core.c ++++ b/drivers/media/usb/cx231xx/cx231xx-core.c +@@ -723,6 +723,7 @@ int cx231xx_set_mode(struct cx231xx *dev, enum cx231xx_mode set_mode) + break; + case CX231XX_BOARD_CNXT_RDE_253S: + case CX231XX_BOARD_CNXT_RDU_253S: ++ case CX231XX_BOARD_PV_PLAYTV_USB_HYBRID: + errCode = cx231xx_set_agc_analog_digital_mux_select(dev, 1); + break; + case CX231XX_BOARD_HAUPPAUGE_EXETER: +@@ -747,7 +748,7 @@ int cx231xx_set_mode(struct cx231xx *dev, enum cx231xx_mode set_mode) + case CX231XX_BOARD_PV_PLAYTV_USB_HYBRID: + case CX231XX_BOARD_HAUPPAUGE_USB2_FM_PAL: + case CX231XX_BOARD_HAUPPAUGE_USB2_FM_NTSC: +- errCode = cx231xx_set_agc_analog_digital_mux_select(dev, 0); ++ errCode = cx231xx_set_agc_analog_digital_mux_select(dev, 0); + break; + default: + break; +diff --git a/drivers/media/usb/em28xx/em28xx-i2c.c b/drivers/media/usb/em28xx/em28xx-i2c.c +index c4ff9739a7ae..d28d9068396f 100644 +--- a/drivers/media/usb/em28xx/em28xx-i2c.c ++++ b/drivers/media/usb/em28xx/em28xx-i2c.c +@@ -469,9 +469,8 @@ static int em28xx_i2c_xfer(struct i2c_adapter *i2c_adap, + int addr, rc, i; + u8 reg; + +- rc = rt_mutex_trylock(&dev->i2c_bus_lock); +- if (rc < 0) +- return rc; ++ if (!rt_mutex_trylock(&dev->i2c_bus_lock)) ++ return -EAGAIN; + + /* Switch I2C bus if needed */ + if (bus != dev->cur_i2c_bus && +diff --git a/drivers/mfd/mfd-core.c b/drivers/mfd/mfd-core.c +index 7604f4e5df40..af6a245dc505 100644 +--- a/drivers/mfd/mfd-core.c ++++ b/drivers/mfd/mfd-core.c +@@ -263,6 +263,8 @@ int mfd_clone_cell(const char *cell, const char **clones, size_t n_clones) + clones[i]); + } + ++ put_device(dev); ++ + return 0; + } + EXPORT_SYMBOL(mfd_clone_cell); +diff --git a/drivers/misc/mei/nfc.c b/drivers/misc/mei/nfc.c +index 4b7ea3fb143c..1f8f856946cd 100644 +--- a/drivers/misc/mei/nfc.c ++++ b/drivers/misc/mei/nfc.c +@@ -292,7 +292,7 @@ static int mei_nfc_if_version(struct mei_nfc_dev *ndev) + return -ENOMEM; + + bytes_recv = __mei_cl_recv(cl, (u8 *)reply, if_version_length); +- if (bytes_recv < 0 || bytes_recv < sizeof(struct mei_nfc_reply)) { ++ if (bytes_recv < if_version_length) { + dev_err(&dev->pdev->dev, "Could not read IF version\n"); + ret = -EIO; + goto err; +diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c +index a2863b7b9e21..ce34c492a887 100644 +--- a/drivers/mmc/card/block.c ++++ b/drivers/mmc/card/block.c +@@ -2093,7 +2093,8 @@ static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card, + set_capacity(md->disk, size); + + if (mmc_host_cmd23(card->host)) { +- if (mmc_card_mmc(card) || ++ if ((mmc_card_mmc(card) && ++ card->csd.mmca_vsn >= CSD_SPEC_VER_3) || + (mmc_card_sd(card) && + card->scr.cmds & SD_SCR_CMD23_SUPPORT)) + md->flags |= MMC_BLK_CMD23; +diff --git a/drivers/mmc/host/mxs-mmc.c b/drivers/mmc/host/mxs-mmc.c +index 4278a1787d08..f3a423213108 100644 +--- a/drivers/mmc/host/mxs-mmc.c ++++ b/drivers/mmc/host/mxs-mmc.c +@@ -674,13 +674,13 @@ static int mxs_mmc_probe(struct platform_device *pdev) + + platform_set_drvdata(pdev, mmc); + ++ spin_lock_init(&host->lock); ++ + ret = devm_request_irq(&pdev->dev, irq_err, mxs_mmc_irq_handler, 0, + DRIVER_NAME, host); + if (ret) + goto out_free_dma; + +- spin_lock_init(&host->lock); +- + ret = mmc_add_host(mmc); + if (ret) + goto out_free_dma; +diff --git a/drivers/mtd/maps/pmcmsp-flash.c b/drivers/mtd/maps/pmcmsp-flash.c +index 744ca5cacc9b..f9fa3fad728e 100644 +--- a/drivers/mtd/maps/pmcmsp-flash.c ++++ b/drivers/mtd/maps/pmcmsp-flash.c +@@ -75,15 +75,15 @@ static int __init init_msp_flash(void) + + printk(KERN_NOTICE "Found %d PMC flash devices\n", fcnt); + +- msp_flash = kmalloc(fcnt * sizeof(struct map_info *), GFP_KERNEL); ++ msp_flash = kcalloc(fcnt, sizeof(*msp_flash), GFP_KERNEL); + if (!msp_flash) + return -ENOMEM; + +- msp_parts = kmalloc(fcnt * sizeof(struct mtd_partition *), GFP_KERNEL); ++ msp_parts = kcalloc(fcnt, sizeof(*msp_parts), GFP_KERNEL); + if (!msp_parts) + goto free_msp_flash; + +- msp_maps = kcalloc(fcnt, sizeof(struct mtd_info), GFP_KERNEL); ++ msp_maps = kcalloc(fcnt, sizeof(*msp_maps), GFP_KERNEL); + if (!msp_maps) + goto free_msp_parts; + +diff --git a/drivers/mtd/mtd_blkdevs.c b/drivers/mtd/mtd_blkdevs.c +index 32d5e40c6863..48b63e849067 100644 +--- a/drivers/mtd/mtd_blkdevs.c ++++ b/drivers/mtd/mtd_blkdevs.c +@@ -198,8 +198,8 @@ static int blktrans_open(struct block_device *bdev, fmode_t mode) + if (!dev) + return -ERESTARTSYS; /* FIXME: busy loop! -arnd*/ + +- mutex_lock(&dev->lock); + mutex_lock(&mtd_table_mutex); ++ mutex_lock(&dev->lock); + + if (dev->open) + goto unlock; +@@ -223,8 +223,8 @@ static int blktrans_open(struct block_device *bdev, fmode_t mode) + + unlock: + dev->open++; +- mutex_unlock(&mtd_table_mutex); + mutex_unlock(&dev->lock); ++ mutex_unlock(&mtd_table_mutex); + blktrans_dev_put(dev); + return ret; + +@@ -234,8 +234,8 @@ error_release: + error_put: + module_put(dev->tr->owner); + kref_put(&dev->ref, blktrans_dev_release); +- mutex_unlock(&mtd_table_mutex); + mutex_unlock(&dev->lock); ++ mutex_unlock(&mtd_table_mutex); + blktrans_dev_put(dev); + return ret; + } +@@ -247,8 +247,8 @@ static void blktrans_release(struct gendisk *disk, fmode_t mode) + if (!dev) + return; + +- mutex_lock(&dev->lock); + mutex_lock(&mtd_table_mutex); ++ mutex_lock(&dev->lock); + + if (--dev->open) + goto unlock; +@@ -262,8 +262,8 @@ static void blktrans_release(struct gendisk *disk, fmode_t mode) + __put_mtd_device(dev->mtd); + } + unlock: +- mutex_unlock(&mtd_table_mutex); + mutex_unlock(&dev->lock); ++ mutex_unlock(&mtd_table_mutex); + blktrans_dev_put(dev); + } + +diff --git a/drivers/mtd/nand/davinci_nand.c b/drivers/mtd/nand/davinci_nand.c +index c3e15a558173..e4f16cf413a5 100644 +--- a/drivers/mtd/nand/davinci_nand.c ++++ b/drivers/mtd/nand/davinci_nand.c +@@ -241,6 +241,9 @@ static void nand_davinci_hwctl_4bit(struct mtd_info *mtd, int mode) + unsigned long flags; + u32 val; + ++ /* Reset ECC hardware */ ++ davinci_nand_readl(info, NAND_4BIT_ECC1_OFFSET); ++ + spin_lock_irqsave(&davinci_nand_lock, flags); + + /* Start 4-bit ECC calculation for read/write */ +diff --git a/drivers/mtd/ubi/fastmap.c b/drivers/mtd/ubi/fastmap.c +index bf8108d65b73..f6f1604deb8e 100644 +--- a/drivers/mtd/ubi/fastmap.c ++++ b/drivers/mtd/ubi/fastmap.c +@@ -438,10 +438,11 @@ static int scan_pool(struct ubi_device *ubi, struct ubi_attach_info *ai, + unsigned long long ec = be64_to_cpu(ech->ec); + unmap_peb(ai, pnum); + dbg_bld("Adding PEB to free: %i", pnum); ++ + if (err == UBI_IO_FF_BITFLIPS) +- add_aeb(ai, free, pnum, ec, 1); +- else +- add_aeb(ai, free, pnum, ec, 0); ++ scrub = 1; ++ ++ add_aeb(ai, free, pnum, ec, scrub); + continue; + } else if (err == 0 || err == UBI_IO_BITFLIPS) { + dbg_bld("Found non empty PEB:%i in pool", pnum); +diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c +index c0ed7c802819..ce41616d9d1a 100644 +--- a/drivers/net/bonding/bond_main.c ++++ b/drivers/net/bonding/bond_main.c +@@ -1565,9 +1565,10 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev) + bond_dev->name, slave_dev->name); + } + +- /* already enslaved */ +- if (slave_dev->flags & IFF_SLAVE) { +- pr_debug("Error, Device was already enslaved\n"); ++ /* already in-use? */ ++ if (netdev_is_rx_handler_busy(slave_dev)) { ++ netdev_err(bond_dev, ++ "Error: Device is in use and cannot be enslaved\n"); + return -EBUSY; + } + +diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c +index 464e5f66b66d..284d751ea97f 100644 +--- a/drivers/net/can/dev.c ++++ b/drivers/net/can/dev.c +@@ -22,6 +22,7 @@ + #include + #include + #include ++#include + #include + #include + #include +@@ -394,9 +395,8 @@ EXPORT_SYMBOL_GPL(can_free_echo_skb); + /* + * CAN device restart for bus-off recovery + */ +-static void can_restart(unsigned long data) ++static void can_restart(struct net_device *dev) + { +- struct net_device *dev = (struct net_device *)data; + struct can_priv *priv = netdev_priv(dev); + struct net_device_stats *stats = &dev->stats; + struct sk_buff *skb; +@@ -436,6 +436,14 @@ restart: + netdev_err(dev, "Error %d during restart", err); + } + ++static void can_restart_work(struct work_struct *work) ++{ ++ struct delayed_work *dwork = to_delayed_work(work); ++ struct can_priv *priv = container_of(dwork, struct can_priv, restart_work); ++ ++ can_restart(priv->dev); ++} ++ + int can_restart_now(struct net_device *dev) + { + struct can_priv *priv = netdev_priv(dev); +@@ -449,8 +457,8 @@ int can_restart_now(struct net_device *dev) + if (priv->state != CAN_STATE_BUS_OFF) + return -EBUSY; + +- /* Runs as soon as possible in the timer context */ +- mod_timer(&priv->restart_timer, jiffies); ++ cancel_delayed_work_sync(&priv->restart_work); ++ can_restart(dev); + + return 0; + } +@@ -472,8 +480,8 @@ void can_bus_off(struct net_device *dev) + priv->can_stats.bus_off++; + + if (priv->restart_ms) +- mod_timer(&priv->restart_timer, +- jiffies + (priv->restart_ms * HZ) / 1000); ++ schedule_delayed_work(&priv->restart_work, ++ msecs_to_jiffies(priv->restart_ms)); + } + EXPORT_SYMBOL_GPL(can_bus_off); + +@@ -556,6 +564,7 @@ struct net_device *alloc_candev(int sizeof_priv, unsigned int echo_skb_max) + return NULL; + + priv = netdev_priv(dev); ++ priv->dev = dev; + + if (echo_skb_max) { + priv->echo_skb_max = echo_skb_max; +@@ -565,7 +574,7 @@ struct net_device *alloc_candev(int sizeof_priv, unsigned int echo_skb_max) + + priv->state = CAN_STATE_STOPPED; + +- init_timer(&priv->restart_timer); ++ INIT_DELAYED_WORK(&priv->restart_work, can_restart_work); + + return dev; + } +@@ -599,8 +608,6 @@ int open_candev(struct net_device *dev) + if (!netif_carrier_ok(dev)) + netif_carrier_on(dev); + +- setup_timer(&priv->restart_timer, can_restart, (unsigned long)dev); +- + return 0; + } + EXPORT_SYMBOL_GPL(open_candev); +@@ -615,7 +622,7 @@ void close_candev(struct net_device *dev) + { + struct can_priv *priv = netdev_priv(dev); + +- del_timer_sync(&priv->restart_timer); ++ cancel_delayed_work_sync(&priv->restart_work); + can_flush_echo_skb(dev); + } + EXPORT_SYMBOL_GPL(close_candev); +diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c +index d175bbd3ffd3..4ac9dfd3f127 100644 +--- a/drivers/net/ethernet/marvell/sky2.c ++++ b/drivers/net/ethernet/marvell/sky2.c +@@ -5197,6 +5197,19 @@ static SIMPLE_DEV_PM_OPS(sky2_pm_ops, sky2_suspend, sky2_resume); + + static void sky2_shutdown(struct pci_dev *pdev) + { ++ struct sky2_hw *hw = pci_get_drvdata(pdev); ++ int port; ++ ++ for (port = 0; port < hw->ports; port++) { ++ struct net_device *ndev = hw->dev[port]; ++ ++ rtnl_lock(); ++ if (netif_running(ndev)) { ++ dev_close(ndev); ++ netif_device_detach(ndev); ++ } ++ rtnl_unlock(); ++ } + sky2_suspend(&pdev->dev); + pci_wake_from_d3(pdev, device_may_wakeup(&pdev->dev)); + pci_set_power_state(pdev, PCI_D3hot); +diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c +index 063f3f4d4867..a206ce615e97 100644 +--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c ++++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c +@@ -2027,7 +2027,7 @@ static int mlx4_en_set_vf_mac(struct net_device *dev, int queue, u8 *mac) + struct mlx4_en_dev *mdev = en_priv->mdev; + u64 mac_u64 = mlx4_en_mac_to_u64(mac); + +- if (!is_valid_ether_addr(mac)) ++ if (is_multicast_ether_addr(mac)) + return -EINVAL; + + return mlx4_set_vf_mac(mdev->dev, en_priv->port, queue, mac_u64); +diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c +index 14a8d2958698..ab79c0f13d0a 100644 +--- a/drivers/net/ppp/ppp_generic.c ++++ b/drivers/net/ppp/ppp_generic.c +@@ -2317,8 +2317,6 @@ ppp_unregister_channel(struct ppp_channel *chan) + spin_lock_bh(&pn->all_channels_lock); + list_del(&pch->list); + spin_unlock_bh(&pn->all_channels_lock); +- put_net(pch->chan_net); +- pch->chan_net = NULL; + + pch->file.dead = 1; + wake_up_interruptible(&pch->file.rwait); +@@ -2925,6 +2923,9 @@ ppp_disconnect_channel(struct channel *pch) + */ + static void ppp_destroy_channel(struct channel *pch) + { ++ put_net(pch->chan_net); ++ pch->chan_net = NULL; ++ + atomic_dec(&channel_count); + + if (!pch->file.dead) { +diff --git a/drivers/net/usb/kaweth.c b/drivers/net/usb/kaweth.c +index afb117c16d2d..8ba774de3474 100644 +--- a/drivers/net/usb/kaweth.c ++++ b/drivers/net/usb/kaweth.c +@@ -1031,6 +1031,7 @@ static int kaweth_probe( + kaweth = netdev_priv(netdev); + kaweth->dev = udev; + kaweth->net = netdev; ++ kaweth->intf = intf; + + spin_lock_init(&kaweth->device_lock); + init_waitqueue_head(&kaweth->term_wait); +@@ -1141,8 +1142,6 @@ err_fw: + + dev_dbg(dev, "Initializing net device.\n"); + +- kaweth->intf = intf; +- + kaweth->tx_urb = usb_alloc_urb(0, GFP_KERNEL); + if (!kaweth->tx_urb) + goto err_free_netdev; +diff --git a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c +index 301e572e8923..2c524305589f 100644 +--- a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c ++++ b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c +@@ -3726,7 +3726,7 @@ brcmf_cfg80211_start_ap(struct wiphy *wiphy, struct net_device *ndev, + (u8 *)&settings->beacon.head[ie_offset], + settings->beacon.head_len - ie_offset, + WLAN_EID_SSID); +- if (!ssid_ie) ++ if (!ssid_ie || ssid_ie->len > IEEE80211_MAX_SSID_LEN) + return -EINVAL; + + memcpy(ssid_le.SSID, ssid_ie->data, ssid_ie->len); +diff --git a/drivers/net/wireless/brcm80211/brcmsmac/dma.c b/drivers/net/wireless/brcm80211/brcmsmac/dma.c +index 4fb9635d3919..7660b523dcf1 100644 +--- a/drivers/net/wireless/brcm80211/brcmsmac/dma.c ++++ b/drivers/net/wireless/brcm80211/brcmsmac/dma.c +@@ -1079,8 +1079,10 @@ bool dma_rxfill(struct dma_pub *pub) + + pa = dma_map_single(di->dmadev, p->data, di->rxbufsize, + DMA_FROM_DEVICE); +- if (dma_mapping_error(di->dmadev, pa)) ++ if (dma_mapping_error(di->dmadev, pa)) { ++ brcmu_pkt_buf_free_skb(p); + return false; ++ } + + /* save the free packet pointer */ + di->rxp[rxout] = p; +diff --git a/drivers/net/wireless/brcm80211/brcmsmac/stf.c b/drivers/net/wireless/brcm80211/brcmsmac/stf.c +index dd9162722495..0ab865de1491 100644 +--- a/drivers/net/wireless/brcm80211/brcmsmac/stf.c ++++ b/drivers/net/wireless/brcm80211/brcmsmac/stf.c +@@ -87,7 +87,7 @@ void + brcms_c_stf_ss_algo_channel_get(struct brcms_c_info *wlc, u16 *ss_algo_channel, + u16 chanspec) + { +- struct tx_power power; ++ struct tx_power power = { }; + u8 siso_mcs_id, cdd_mcs_id, stbc_mcs_id; + + /* Clear previous settings */ +diff --git a/drivers/net/wireless/iwlwifi/pcie/tx.c b/drivers/net/wireless/iwlwifi/pcie/tx.c +index f05962c32497..2e3a0d73f090 100644 +--- a/drivers/net/wireless/iwlwifi/pcie/tx.c ++++ b/drivers/net/wireless/iwlwifi/pcie/tx.c +@@ -1311,9 +1311,9 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans, + + /* start the TFD with the scratchbuf */ + scratch_size = min_t(int, copy_size, IWL_HCMD_SCRATCHBUF_SIZE); +- memcpy(&txq->scratchbufs[q->write_ptr], &out_cmd->hdr, scratch_size); ++ memcpy(&txq->scratchbufs[idx], &out_cmd->hdr, scratch_size); + iwl_pcie_txq_build_tfd(trans, txq, +- iwl_pcie_get_scratchbuf_dma(txq, q->write_ptr), ++ iwl_pcie_get_scratchbuf_dma(txq, idx), + scratch_size, 1); + + /* map first command fragment, if any remains */ +diff --git a/drivers/net/wireless/mwifiex/cfg80211.c b/drivers/net/wireless/mwifiex/cfg80211.c +index e7f7cdfafd51..fa0e45b82ce0 100644 +--- a/drivers/net/wireless/mwifiex/cfg80211.c ++++ b/drivers/net/wireless/mwifiex/cfg80211.c +@@ -1633,8 +1633,9 @@ done: + is_scanning_required = 1; + } else { + dev_dbg(priv->adapter->dev, +- "info: trying to associate to '%s' bssid %pM\n", +- (char *) req_ssid.ssid, bss->bssid); ++ "info: trying to associate to '%.*s' bssid %pM\n", ++ req_ssid.ssid_len, (char *)req_ssid.ssid, ++ bss->bssid); + memcpy(&priv->cfg_bssid, bss->bssid, ETH_ALEN); + break; + } +@@ -1675,8 +1676,8 @@ mwifiex_cfg80211_connect(struct wiphy *wiphy, struct net_device *dev, + return -EINVAL; + } + +- wiphy_dbg(wiphy, "info: Trying to associate to %s and bssid %pM\n", +- (char *) sme->ssid, sme->bssid); ++ wiphy_dbg(wiphy, "info: Trying to associate to %.*s and bssid %pM\n", ++ (int)sme->ssid_len, (char *)sme->ssid, sme->bssid); + + ret = mwifiex_cfg80211_assoc(priv, sme->ssid_len, sme->ssid, sme->bssid, + priv->bss_mode, sme->channel, sme, 0); +@@ -1799,8 +1800,8 @@ mwifiex_cfg80211_join_ibss(struct wiphy *wiphy, struct net_device *dev, + goto done; + } + +- wiphy_dbg(wiphy, "info: trying to join to %s and bssid %pM\n", +- (char *) params->ssid, params->bssid); ++ wiphy_dbg(wiphy, "info: trying to join to %.*s and bssid %pM\n", ++ params->ssid_len, (char *)params->ssid, params->bssid); + + mwifiex_set_ibss_params(priv, params); + +diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c +index 1595f818b8c0..ec88898ce42b 100644 +--- a/drivers/net/xen-netback/netback.c ++++ b/drivers/net/xen-netback/netback.c +@@ -454,17 +454,17 @@ static struct netbk_rx_meta *get_next_rx_buffer(struct xenvif *vif, + struct netrx_pending_operations *npo) + { + struct netbk_rx_meta *meta; +- struct xen_netif_rx_request *req; ++ struct xen_netif_rx_request req; + +- req = RING_GET_REQUEST(&vif->rx, vif->rx.req_cons++); ++ RING_COPY_REQUEST(&vif->rx, vif->rx.req_cons++, &req); + + meta = npo->meta + npo->meta_prod++; + meta->gso_size = 0; + meta->size = 0; +- meta->id = req->id; ++ meta->id = req.id; + + npo->copy_off = 0; +- npo->copy_gref = req->gref; ++ npo->copy_gref = req.gref; + + return meta; + } +@@ -582,7 +582,7 @@ static int netbk_gop_skb(struct sk_buff *skb, + struct xenvif *vif = netdev_priv(skb->dev); + int nr_frags = skb_shinfo(skb)->nr_frags; + int i; +- struct xen_netif_rx_request *req; ++ struct xen_netif_rx_request req; + struct netbk_rx_meta *meta; + unsigned char *data; + int head = 1; +@@ -592,14 +592,14 @@ static int netbk_gop_skb(struct sk_buff *skb, + + /* Set up a GSO prefix descriptor, if necessary */ + if (skb_shinfo(skb)->gso_size && vif->gso_prefix) { +- req = RING_GET_REQUEST(&vif->rx, vif->rx.req_cons++); ++ RING_COPY_REQUEST(&vif->rx, vif->rx.req_cons++, &req); + meta = npo->meta + npo->meta_prod++; + meta->gso_size = skb_shinfo(skb)->gso_size; + meta->size = 0; +- meta->id = req->id; ++ meta->id = req.id; + } + +- req = RING_GET_REQUEST(&vif->rx, vif->rx.req_cons++); ++ RING_COPY_REQUEST(&vif->rx, vif->rx.req_cons++, &req); + meta = npo->meta + npo->meta_prod++; + + if (!vif->gso_prefix) +@@ -608,9 +608,9 @@ static int netbk_gop_skb(struct sk_buff *skb, + meta->gso_size = 0; + + meta->size = 0; +- meta->id = req->id; ++ meta->id = req.id; + npo->copy_off = 0; +- npo->copy_gref = req->gref; ++ npo->copy_gref = req.gref; + + data = skb->data; + while (data < skb_tail_pointer(skb)) { +@@ -928,9 +928,7 @@ static void tx_add_credit(struct xenvif *vif) + * Allow a burst big enough to transmit a jumbo packet of up to 128kB. + * Otherwise the interface can seize up due to insufficient credit. + */ +- max_burst = RING_GET_REQUEST(&vif->tx, vif->tx.req_cons)->size; +- max_burst = min(max_burst, 131072UL); +- max_burst = max(max_burst, vif->credit_bytes); ++ max_burst = max(131072UL, vif->credit_bytes); + + /* Take care that adding a new chunk of credit doesn't wrap to zero. */ + max_credit = vif->remaining_credit + vif->credit_bytes; +@@ -956,7 +954,7 @@ static void netbk_tx_err(struct xenvif *vif, + make_tx_response(vif, txp, XEN_NETIF_RSP_ERROR); + if (cons == end) + break; +- txp = RING_GET_REQUEST(&vif->tx, cons++); ++ RING_COPY_REQUEST(&vif->tx, cons++, txp); + } while (1); + vif->tx.req_cons = cons; + xen_netbk_check_rx_xenvif(vif); +@@ -1023,8 +1021,7 @@ static int netbk_count_requests(struct xenvif *vif, + if (drop_err) + txp = &dropped_tx; + +- memcpy(txp, RING_GET_REQUEST(&vif->tx, cons + slots), +- sizeof(*txp)); ++ RING_COPY_REQUEST(&vif->tx, cons + slots, txp); + + /* If the guest submitted a frame >= 64 KiB then + * first->size overflowed and following slots will +@@ -1312,8 +1309,7 @@ static int xen_netbk_get_extras(struct xenvif *vif, + return -EBADR; + } + +- memcpy(&extra, RING_GET_REQUEST(&vif->tx, cons), +- sizeof(extra)); ++ RING_COPY_REQUEST(&vif->tx, cons, &extra); + if (unlikely(!extra.type || + extra.type >= XEN_NETIF_EXTRA_TYPE_MAX)) { + vif->tx.req_cons = ++cons; +@@ -1503,7 +1499,7 @@ static unsigned xen_netbk_tx_build_gops(struct xen_netbk *netbk) + + idx = vif->tx.req_cons; + rmb(); /* Ensure that we see the request before we copy it. */ +- memcpy(&txreq, RING_GET_REQUEST(&vif->tx, idx), sizeof(txreq)); ++ RING_COPY_REQUEST(&vif->tx, idx, &txreq); + + /* Credit-based scheduling. */ + if (txreq.size > vif->remaining_credit && +diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c +index a6637158d078..b6625e58bc57 100644 +--- a/drivers/pci/quirks.c ++++ b/drivers/pci/quirks.c +@@ -339,19 +339,52 @@ static void quirk_s3_64M(struct pci_dev *dev) + DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_S3, PCI_DEVICE_ID_S3_868, quirk_s3_64M); + DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_S3, PCI_DEVICE_ID_S3_968, quirk_s3_64M); + ++static void quirk_io(struct pci_dev *dev, int pos, unsigned size, ++ const char *name) ++{ ++ u32 region; ++ struct pci_bus_region bus_region; ++ struct resource *res = dev->resource + pos; ++ ++ pci_read_config_dword(dev, PCI_BASE_ADDRESS_0 + (pos << 2), ®ion); ++ ++ if (!region) ++ return; ++ ++ res->name = pci_name(dev); ++ res->flags = region & ~PCI_BASE_ADDRESS_IO_MASK; ++ res->flags |= ++ (IORESOURCE_IO | IORESOURCE_PCI_FIXED | IORESOURCE_SIZEALIGN); ++ region &= ~(size - 1); ++ ++ /* Convert from PCI bus to resource space */ ++ bus_region.start = region; ++ bus_region.end = region + size - 1; ++ pcibios_bus_to_resource(dev, res, &bus_region); ++ ++ dev_info(&dev->dev, FW_BUG "%s quirk: reg 0x%x: %pR\n", ++ name, PCI_BASE_ADDRESS_0 + (pos << 2), res); ++} ++ + /* + * Some CS5536 BIOSes (for example, the Soekris NET5501 board w/ comBIOS + * ver. 1.33 20070103) don't set the correct ISA PCI region header info. + * BAR0 should be 8 bytes; instead, it may be set to something like 8k + * (which conflicts w/ BAR1's memory range). ++ * ++ * CS553x's ISA PCI BARs may also be read-only (ref: ++ * https://bugzilla.kernel.org/show_bug.cgi?id=85991 - Comment #4 forward). + */ + static void quirk_cs5536_vsa(struct pci_dev *dev) + { ++ static char *name = "CS5536 ISA bridge"; ++ + if (pci_resource_len(dev, 0) != 8) { +- struct resource *res = &dev->resource[0]; +- res->end = res->start + 8 - 1; +- dev_info(&dev->dev, "CS5536 ISA bridge bug detected " +- "(incorrect header); workaround applied.\n"); ++ quirk_io(dev, 0, 8, name); /* SMB */ ++ quirk_io(dev, 1, 256, name); /* GPIO */ ++ quirk_io(dev, 2, 64, name); /* MFGPT */ ++ dev_info(&dev->dev, "%s bug detected (incorrect header); workaround applied\n", ++ name); + } + } + DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CS5536_ISA, quirk_cs5536_vsa); +diff --git a/drivers/regulator/tps65910-regulator.c b/drivers/regulator/tps65910-regulator.c +index 45c16447744b..1ed4145164d6 100644 +--- a/drivers/regulator/tps65910-regulator.c ++++ b/drivers/regulator/tps65910-regulator.c +@@ -1080,6 +1080,12 @@ static int tps65910_probe(struct platform_device *pdev) + pmic->num_regulators = ARRAY_SIZE(tps65910_regs); + pmic->ext_sleep_control = tps65910_ext_sleep_control; + info = tps65910_regs; ++ /* Work around silicon erratum SWCZ010: output programmed ++ * voltage level can go higher than expected or crash ++ * Workaround: use no synchronization of DCDC clocks ++ */ ++ tps65910_reg_clear_bits(pmic->mfd, TPS65910_DCDCCTRL, ++ DCDCCTRL_DCDCCKSYNC_MASK); + break; + case TPS65911: + pmic->get_ctrl_reg = &tps65911_get_ctrl_register; +diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c +index e91ec8cd9b09..aa9d384205c8 100644 +--- a/drivers/s390/block/dasd.c ++++ b/drivers/s390/block/dasd.c +@@ -1615,9 +1615,18 @@ void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm, + unsigned long long now; + int expires; + ++ cqr = (struct dasd_ccw_req *) intparm; + if (IS_ERR(irb)) { + switch (PTR_ERR(irb)) { + case -EIO: ++ if (cqr && cqr->status == DASD_CQR_CLEAR_PENDING) { ++ device = (struct dasd_device *) cqr->startdev; ++ cqr->status = DASD_CQR_CLEARED; ++ dasd_device_clear_timer(device); ++ wake_up(&dasd_flush_wq); ++ dasd_schedule_device_bh(device); ++ return; ++ } + break; + case -ETIMEDOUT: + DBF_EVENT_DEVID(DBF_WARNING, cdev, "%s: " +@@ -1633,7 +1642,6 @@ void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm, + } + + now = get_tod_clock(); +- cqr = (struct dasd_ccw_req *) intparm; + /* check for conditions that should be handled immediately */ + if (!cqr || + !(scsw_dstat(&irb->scsw) == (DEV_STAT_CHN_END | DEV_STAT_DEV_END) && +diff --git a/drivers/s390/scsi/zfcp_dbf.c b/drivers/s390/scsi/zfcp_dbf.c +index e1a8cc2526e7..c846a63ea672 100644 +--- a/drivers/s390/scsi/zfcp_dbf.c ++++ b/drivers/s390/scsi/zfcp_dbf.c +@@ -3,7 +3,7 @@ + * + * Debug traces for zfcp. + * +- * Copyright IBM Corp. 2002, 2010 ++ * Copyright IBM Corp. 2002, 2016 + */ + + #define KMSG_COMPONENT "zfcp" +@@ -58,7 +58,7 @@ void zfcp_dbf_pl_write(struct zfcp_dbf *dbf, void *data, u16 length, char *area, + * @tag: tag indicating which kind of unsolicited status has been received + * @req: request for which a response was received + */ +-void zfcp_dbf_hba_fsf_res(char *tag, struct zfcp_fsf_req *req) ++void zfcp_dbf_hba_fsf_res(char *tag, int level, struct zfcp_fsf_req *req) + { + struct zfcp_dbf *dbf = req->adapter->dbf; + struct fsf_qtcb_prefix *q_pref = &req->qtcb->prefix; +@@ -78,6 +78,8 @@ void zfcp_dbf_hba_fsf_res(char *tag, struct zfcp_fsf_req *req) + rec->u.res.req_issued = req->issued; + rec->u.res.prot_status = q_pref->prot_status; + rec->u.res.fsf_status = q_head->fsf_status; ++ rec->u.res.port_handle = q_head->port_handle; ++ rec->u.res.lun_handle = q_head->lun_handle; + + memcpy(rec->u.res.prot_status_qual, &q_pref->prot_status_qual, + FSF_PROT_STATUS_QUAL_SIZE); +@@ -90,7 +92,7 @@ void zfcp_dbf_hba_fsf_res(char *tag, struct zfcp_fsf_req *req) + rec->pl_len, "fsf_res", req->req_id); + } + +- debug_event(dbf->hba, 1, rec, sizeof(*rec)); ++ debug_event(dbf->hba, level, rec, sizeof(*rec)); + spin_unlock_irqrestore(&dbf->hba_lock, flags); + } + +@@ -234,7 +236,8 @@ static void zfcp_dbf_set_common(struct zfcp_dbf_rec *rec, + if (sdev) { + rec->lun_status = atomic_read(&sdev_to_zfcp(sdev)->status); + rec->lun = zfcp_scsi_dev_lun(sdev); +- } ++ } else ++ rec->lun = ZFCP_DBF_INVALID_LUN; + } + + /** +@@ -313,13 +316,48 @@ void zfcp_dbf_rec_run(char *tag, struct zfcp_erp_action *erp) + spin_unlock_irqrestore(&dbf->rec_lock, flags); + } + ++/** ++ * zfcp_dbf_rec_run_wka - trace wka port event with info like running recovery ++ * @tag: identifier for event ++ * @wka_port: well known address port ++ * @req_id: request ID to correlate with potential HBA trace record ++ */ ++void zfcp_dbf_rec_run_wka(char *tag, struct zfcp_fc_wka_port *wka_port, ++ u64 req_id) ++{ ++ struct zfcp_dbf *dbf = wka_port->adapter->dbf; ++ struct zfcp_dbf_rec *rec = &dbf->rec_buf; ++ unsigned long flags; ++ ++ spin_lock_irqsave(&dbf->rec_lock, flags); ++ memset(rec, 0, sizeof(*rec)); ++ ++ rec->id = ZFCP_DBF_REC_RUN; ++ memcpy(rec->tag, tag, ZFCP_DBF_TAG_LEN); ++ rec->port_status = wka_port->status; ++ rec->d_id = wka_port->d_id; ++ rec->lun = ZFCP_DBF_INVALID_LUN; ++ ++ rec->u.run.fsf_req_id = req_id; ++ rec->u.run.rec_status = ~0; ++ rec->u.run.rec_step = ~0; ++ rec->u.run.rec_action = ~0; ++ rec->u.run.rec_count = ~0; ++ ++ debug_event(dbf->rec, 1, rec, sizeof(*rec)); ++ spin_unlock_irqrestore(&dbf->rec_lock, flags); ++} ++ + static inline +-void zfcp_dbf_san(char *tag, struct zfcp_dbf *dbf, void *data, u8 id, u16 len, +- u64 req_id, u32 d_id) ++void zfcp_dbf_san(char *tag, struct zfcp_dbf *dbf, ++ char *paytag, struct scatterlist *sg, u8 id, u16 len, ++ u64 req_id, u32 d_id, u16 cap_len) + { + struct zfcp_dbf_san *rec = &dbf->san_buf; + u16 rec_len; + unsigned long flags; ++ struct zfcp_dbf_pay *payload = &dbf->pay_buf; ++ u16 pay_sum = 0; + + spin_lock_irqsave(&dbf->san_lock, flags); + memset(rec, 0, sizeof(*rec)); +@@ -327,10 +365,41 @@ void zfcp_dbf_san(char *tag, struct zfcp_dbf *dbf, void *data, u8 id, u16 len, + rec->id = id; + rec->fsf_req_id = req_id; + rec->d_id = d_id; +- rec_len = min(len, (u16)ZFCP_DBF_SAN_MAX_PAYLOAD); +- memcpy(rec->payload, data, rec_len); + memcpy(rec->tag, tag, ZFCP_DBF_TAG_LEN); ++ rec->pl_len = len; /* full length even if we cap pay below */ ++ if (!sg) ++ goto out; ++ rec_len = min_t(unsigned int, sg->length, ZFCP_DBF_SAN_MAX_PAYLOAD); ++ memcpy(rec->payload, sg_virt(sg), rec_len); /* part of 1st sg entry */ ++ if (len <= rec_len) ++ goto out; /* skip pay record if full content in rec->payload */ ++ ++ /* if (len > rec_len): ++ * dump data up to cap_len ignoring small duplicate in rec->payload ++ */ ++ spin_lock(&dbf->pay_lock); ++ memset(payload, 0, sizeof(*payload)); ++ memcpy(payload->area, paytag, ZFCP_DBF_TAG_LEN); ++ payload->fsf_req_id = req_id; ++ payload->counter = 0; ++ for (; sg && pay_sum < cap_len; sg = sg_next(sg)) { ++ u16 pay_len, offset = 0; ++ ++ while (offset < sg->length && pay_sum < cap_len) { ++ pay_len = min((u16)ZFCP_DBF_PAY_MAX_REC, ++ (u16)(sg->length - offset)); ++ /* cap_len <= pay_sum < cap_len+ZFCP_DBF_PAY_MAX_REC */ ++ memcpy(payload->data, sg_virt(sg) + offset, pay_len); ++ debug_event(dbf->pay, 1, payload, ++ zfcp_dbf_plen(pay_len)); ++ payload->counter++; ++ offset += pay_len; ++ pay_sum += pay_len; ++ } ++ } ++ spin_unlock(&dbf->pay_lock); + ++out: + debug_event(dbf->san, 1, rec, sizeof(*rec)); + spin_unlock_irqrestore(&dbf->san_lock, flags); + } +@@ -347,9 +416,62 @@ void zfcp_dbf_san_req(char *tag, struct zfcp_fsf_req *fsf, u32 d_id) + struct zfcp_fsf_ct_els *ct_els = fsf->data; + u16 length; + +- length = (u16)(ct_els->req->length + FC_CT_HDR_LEN); +- zfcp_dbf_san(tag, dbf, sg_virt(ct_els->req), ZFCP_DBF_SAN_REQ, length, +- fsf->req_id, d_id); ++ length = (u16)zfcp_qdio_real_bytes(ct_els->req); ++ zfcp_dbf_san(tag, dbf, "san_req", ct_els->req, ZFCP_DBF_SAN_REQ, ++ length, fsf->req_id, d_id, length); ++} ++ ++static u16 zfcp_dbf_san_res_cap_len_if_gpn_ft(char *tag, ++ struct zfcp_fsf_req *fsf, ++ u16 len) ++{ ++ struct zfcp_fsf_ct_els *ct_els = fsf->data; ++ struct fc_ct_hdr *reqh = sg_virt(ct_els->req); ++ struct fc_ns_gid_ft *reqn = (struct fc_ns_gid_ft *)(reqh + 1); ++ struct scatterlist *resp_entry = ct_els->resp; ++ struct fc_gpn_ft_resp *acc; ++ int max_entries, x, last = 0; ++ ++ if (!(memcmp(tag, "fsscth2", 7) == 0 ++ && ct_els->d_id == FC_FID_DIR_SERV ++ && reqh->ct_rev == FC_CT_REV ++ && reqh->ct_in_id[0] == 0 ++ && reqh->ct_in_id[1] == 0 ++ && reqh->ct_in_id[2] == 0 ++ && reqh->ct_fs_type == FC_FST_DIR ++ && reqh->ct_fs_subtype == FC_NS_SUBTYPE ++ && reqh->ct_options == 0 ++ && reqh->_ct_resvd1 == 0 ++ && reqh->ct_cmd == FC_NS_GPN_FT ++ /* reqh->ct_mr_size can vary so do not match but read below */ ++ && reqh->_ct_resvd2 == 0 ++ && reqh->ct_reason == 0 ++ && reqh->ct_explan == 0 ++ && reqh->ct_vendor == 0 ++ && reqn->fn_resvd == 0 ++ && reqn->fn_domain_id_scope == 0 ++ && reqn->fn_area_id_scope == 0 ++ && reqn->fn_fc4_type == FC_TYPE_FCP)) ++ return len; /* not GPN_FT response so do not cap */ ++ ++ acc = sg_virt(resp_entry); ++ max_entries = (reqh->ct_mr_size * 4 / sizeof(struct fc_gpn_ft_resp)) ++ + 1 /* zfcp_fc_scan_ports: bytes correct, entries off-by-one ++ * to account for header as 1st pseudo "entry" */; ++ ++ /* the basic CT_IU preamble is the same size as one entry in the GPN_FT ++ * response, allowing us to skip special handling for it - just skip it ++ */ ++ for (x = 1; x < max_entries && !last; x++) { ++ if (x % (ZFCP_FC_GPN_FT_ENT_PAGE + 1)) ++ acc++; ++ else ++ acc = sg_virt(++resp_entry); ++ ++ last = acc->fp_flags & FC_NS_FID_LAST; ++ } ++ len = min(len, (u16)(x * sizeof(struct fc_gpn_ft_resp))); ++ return len; /* cap after last entry */ + } + + /** +@@ -363,9 +485,10 @@ void zfcp_dbf_san_res(char *tag, struct zfcp_fsf_req *fsf) + struct zfcp_fsf_ct_els *ct_els = fsf->data; + u16 length; + +- length = (u16)(ct_els->resp->length + FC_CT_HDR_LEN); +- zfcp_dbf_san(tag, dbf, sg_virt(ct_els->resp), ZFCP_DBF_SAN_RES, length, +- fsf->req_id, 0); ++ length = (u16)zfcp_qdio_real_bytes(ct_els->resp); ++ zfcp_dbf_san(tag, dbf, "san_res", ct_els->resp, ZFCP_DBF_SAN_RES, ++ length, fsf->req_id, ct_els->d_id, ++ zfcp_dbf_san_res_cap_len_if_gpn_ft(tag, fsf, length)); + } + + /** +@@ -379,11 +502,13 @@ void zfcp_dbf_san_in_els(char *tag, struct zfcp_fsf_req *fsf) + struct fsf_status_read_buffer *srb = + (struct fsf_status_read_buffer *) fsf->data; + u16 length; ++ struct scatterlist sg; + + length = (u16)(srb->length - + offsetof(struct fsf_status_read_buffer, payload)); +- zfcp_dbf_san(tag, dbf, srb->payload.data, ZFCP_DBF_SAN_ELS, length, +- fsf->req_id, ntoh24(srb->d_id)); ++ sg_init_one(&sg, srb->payload.data, length); ++ zfcp_dbf_san(tag, dbf, "san_els", &sg, ZFCP_DBF_SAN_ELS, length, ++ fsf->req_id, ntoh24(srb->d_id), length); + } + + /** +@@ -392,7 +517,8 @@ void zfcp_dbf_san_in_els(char *tag, struct zfcp_fsf_req *fsf) + * @sc: pointer to struct scsi_cmnd + * @fsf: pointer to struct zfcp_fsf_req + */ +-void zfcp_dbf_scsi(char *tag, struct scsi_cmnd *sc, struct zfcp_fsf_req *fsf) ++void zfcp_dbf_scsi(char *tag, int level, struct scsi_cmnd *sc, ++ struct zfcp_fsf_req *fsf) + { + struct zfcp_adapter *adapter = + (struct zfcp_adapter *) sc->device->host->hostdata[0]; +@@ -434,7 +560,7 @@ void zfcp_dbf_scsi(char *tag, struct scsi_cmnd *sc, struct zfcp_fsf_req *fsf) + } + } + +- debug_event(dbf->scsi, 1, rec, sizeof(*rec)); ++ debug_event(dbf->scsi, level, rec, sizeof(*rec)); + spin_unlock_irqrestore(&dbf->scsi_lock, flags); + } + +diff --git a/drivers/s390/scsi/zfcp_dbf.h b/drivers/s390/scsi/zfcp_dbf.h +index 3ac7a4b30dd9..440aa619da1d 100644 +--- a/drivers/s390/scsi/zfcp_dbf.h ++++ b/drivers/s390/scsi/zfcp_dbf.h +@@ -2,7 +2,7 @@ + * zfcp device driver + * debug feature declarations + * +- * Copyright IBM Corp. 2008, 2010 ++ * Copyright IBM Corp. 2008, 2015 + */ + + #ifndef ZFCP_DBF_H +@@ -17,6 +17,11 @@ + + #define ZFCP_DBF_INVALID_LUN 0xFFFFFFFFFFFFFFFFull + ++enum zfcp_dbf_pseudo_erp_act_type { ++ ZFCP_PSEUDO_ERP_ACTION_RPORT_ADD = 0xff, ++ ZFCP_PSEUDO_ERP_ACTION_RPORT_DEL = 0xfe, ++}; ++ + /** + * struct zfcp_dbf_rec_trigger - trace record for triggered recovery action + * @ready: number of ready recovery actions +@@ -110,6 +115,7 @@ struct zfcp_dbf_san { + u32 d_id; + #define ZFCP_DBF_SAN_MAX_PAYLOAD (FC_CT_HDR_LEN + 32) + char payload[ZFCP_DBF_SAN_MAX_PAYLOAD]; ++ u16 pl_len; + } __packed; + + /** +@@ -126,6 +132,8 @@ struct zfcp_dbf_hba_res { + u8 prot_status_qual[FSF_PROT_STATUS_QUAL_SIZE]; + u32 fsf_status; + u8 fsf_status_qual[FSF_STATUS_QUALIFIER_SIZE]; ++ u32 port_handle; ++ u32 lun_handle; + } __packed; + + /** +@@ -279,7 +287,7 @@ static inline + void zfcp_dbf_hba_fsf_resp(char *tag, int level, struct zfcp_fsf_req *req) + { + if (level <= req->adapter->dbf->hba->level) +- zfcp_dbf_hba_fsf_res(tag, req); ++ zfcp_dbf_hba_fsf_res(tag, level, req); + } + + /** +@@ -318,7 +326,7 @@ void _zfcp_dbf_scsi(char *tag, int level, struct scsi_cmnd *scmd, + scmd->device->host->hostdata[0]; + + if (level <= adapter->dbf->scsi->level) +- zfcp_dbf_scsi(tag, scmd, req); ++ zfcp_dbf_scsi(tag, level, scmd, req); + } + + /** +diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c +index 8e8f3533d2a1..b4cd26d24152 100644 +--- a/drivers/s390/scsi/zfcp_erp.c ++++ b/drivers/s390/scsi/zfcp_erp.c +@@ -3,7 +3,7 @@ + * + * Error Recovery Procedures (ERP). + * +- * Copyright IBM Corp. 2002, 2010 ++ * Copyright IBM Corp. 2002, 2015 + */ + + #define KMSG_COMPONENT "zfcp" +@@ -1225,8 +1225,14 @@ static void zfcp_erp_action_cleanup(struct zfcp_erp_action *act, int result) + break; + + case ZFCP_ERP_ACTION_REOPEN_PORT: +- if (result == ZFCP_ERP_SUCCEEDED) +- zfcp_scsi_schedule_rport_register(port); ++ /* This switch case might also happen after a forced reopen ++ * was successfully done and thus overwritten with a new ++ * non-forced reopen at `ersfs_2'. In this case, we must not ++ * do the clean-up of the non-forced version. ++ */ ++ if (act->step != ZFCP_ERP_STEP_UNINITIALIZED) ++ if (result == ZFCP_ERP_SUCCEEDED) ++ zfcp_scsi_schedule_rport_register(port); + /* fall through */ + case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED: + put_device(&port->dev); +diff --git a/drivers/s390/scsi/zfcp_ext.h b/drivers/s390/scsi/zfcp_ext.h +index 1d3dd3f7d699..01527c31d1da 100644 +--- a/drivers/s390/scsi/zfcp_ext.h ++++ b/drivers/s390/scsi/zfcp_ext.h +@@ -3,7 +3,7 @@ + * + * External function declarations. + * +- * Copyright IBM Corp. 2002, 2010 ++ * Copyright IBM Corp. 2002, 2015 + */ + + #ifndef ZFCP_EXT_H +@@ -49,8 +49,9 @@ extern void zfcp_dbf_adapter_unregister(struct zfcp_adapter *); + extern void zfcp_dbf_rec_trig(char *, struct zfcp_adapter *, + struct zfcp_port *, struct scsi_device *, u8, u8); + extern void zfcp_dbf_rec_run(char *, struct zfcp_erp_action *); ++extern void zfcp_dbf_rec_run_wka(char *, struct zfcp_fc_wka_port *, u64); + extern void zfcp_dbf_hba_fsf_uss(char *, struct zfcp_fsf_req *); +-extern void zfcp_dbf_hba_fsf_res(char *, struct zfcp_fsf_req *); ++extern void zfcp_dbf_hba_fsf_res(char *, int, struct zfcp_fsf_req *); + extern void zfcp_dbf_hba_bit_err(char *, struct zfcp_fsf_req *); + extern void zfcp_dbf_hba_berr(struct zfcp_dbf *, struct zfcp_fsf_req *); + extern void zfcp_dbf_hba_def_err(struct zfcp_adapter *, u64, u16, void **); +@@ -58,7 +59,8 @@ extern void zfcp_dbf_hba_basic(char *, struct zfcp_adapter *); + extern void zfcp_dbf_san_req(char *, struct zfcp_fsf_req *, u32); + extern void zfcp_dbf_san_res(char *, struct zfcp_fsf_req *); + extern void zfcp_dbf_san_in_els(char *, struct zfcp_fsf_req *); +-extern void zfcp_dbf_scsi(char *, struct scsi_cmnd *, struct zfcp_fsf_req *); ++extern void zfcp_dbf_scsi(char *, int, struct scsi_cmnd *, ++ struct zfcp_fsf_req *); + + /* zfcp_erp.c */ + extern void zfcp_erp_set_adapter_status(struct zfcp_adapter *, u32); +diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c +index 9152999a0707..f246097b7c6d 100644 +--- a/drivers/s390/scsi/zfcp_fsf.c ++++ b/drivers/s390/scsi/zfcp_fsf.c +@@ -3,7 +3,7 @@ + * + * Implementation of FSF commands. + * +- * Copyright IBM Corp. 2002, 2013 ++ * Copyright IBM Corp. 2002, 2015 + */ + + #define KMSG_COMPONENT "zfcp" +@@ -513,7 +513,10 @@ static int zfcp_fsf_exchange_config_evaluate(struct zfcp_fsf_req *req) + fc_host_port_type(shost) = FC_PORTTYPE_PTP; + break; + case FSF_TOPO_FABRIC: +- fc_host_port_type(shost) = FC_PORTTYPE_NPORT; ++ if (bottom->connection_features & FSF_FEATURE_NPIV_MODE) ++ fc_host_port_type(shost) = FC_PORTTYPE_NPIV; ++ else ++ fc_host_port_type(shost) = FC_PORTTYPE_NPORT; + break; + case FSF_TOPO_AL: + fc_host_port_type(shost) = FC_PORTTYPE_NLPORT; +@@ -618,7 +621,6 @@ static void zfcp_fsf_exchange_port_evaluate(struct zfcp_fsf_req *req) + + if (adapter->connection_features & FSF_FEATURE_NPIV_MODE) { + fc_host_permanent_port_name(shost) = bottom->wwpn; +- fc_host_port_type(shost) = FC_PORTTYPE_NPIV; + } else + fc_host_permanent_port_name(shost) = fc_host_port_name(shost); + fc_host_maxframe_size(shost) = bottom->maximum_frame_size; +@@ -988,8 +990,12 @@ static int zfcp_fsf_setup_ct_els_sbals(struct zfcp_fsf_req *req, + if (zfcp_adapter_multi_buffer_active(adapter)) { + if (zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req, sg_req)) + return -EIO; ++ qtcb->bottom.support.req_buf_length = ++ zfcp_qdio_real_bytes(sg_req); + if (zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req, sg_resp)) + return -EIO; ++ qtcb->bottom.support.resp_buf_length = ++ zfcp_qdio_real_bytes(sg_resp); + + zfcp_qdio_set_data_div(qdio, &req->qdio_req, + zfcp_qdio_sbale_count(sg_req)); +@@ -1079,6 +1085,7 @@ int zfcp_fsf_send_ct(struct zfcp_fc_wka_port *wka_port, + + req->handler = zfcp_fsf_send_ct_handler; + req->qtcb->header.port_handle = wka_port->handle; ++ ct->d_id = wka_port->d_id; + req->data = ct; + + zfcp_dbf_san_req("fssct_1", req, wka_port->d_id); +@@ -1182,6 +1189,7 @@ int zfcp_fsf_send_els(struct zfcp_adapter *adapter, u32 d_id, + + hton24(req->qtcb->bottom.support.d_id, d_id); + req->handler = zfcp_fsf_send_els_handler; ++ els->d_id = d_id; + req->data = els; + + zfcp_dbf_san_req("fssels1", req, d_id); +@@ -1599,7 +1607,7 @@ out: + int zfcp_fsf_open_wka_port(struct zfcp_fc_wka_port *wka_port) + { + struct zfcp_qdio *qdio = wka_port->adapter->qdio; +- struct zfcp_fsf_req *req; ++ struct zfcp_fsf_req *req = NULL; + int retval = -EIO; + + spin_lock_irq(&qdio->req_q_lock); +@@ -1628,6 +1636,8 @@ int zfcp_fsf_open_wka_port(struct zfcp_fc_wka_port *wka_port) + zfcp_fsf_req_free(req); + out: + spin_unlock_irq(&qdio->req_q_lock); ++ if (req && !IS_ERR(req)) ++ zfcp_dbf_rec_run_wka("fsowp_1", wka_port, req->req_id); + return retval; + } + +@@ -1652,7 +1662,7 @@ static void zfcp_fsf_close_wka_port_handler(struct zfcp_fsf_req *req) + int zfcp_fsf_close_wka_port(struct zfcp_fc_wka_port *wka_port) + { + struct zfcp_qdio *qdio = wka_port->adapter->qdio; +- struct zfcp_fsf_req *req; ++ struct zfcp_fsf_req *req = NULL; + int retval = -EIO; + + spin_lock_irq(&qdio->req_q_lock); +@@ -1681,6 +1691,8 @@ int zfcp_fsf_close_wka_port(struct zfcp_fc_wka_port *wka_port) + zfcp_fsf_req_free(req); + out: + spin_unlock_irq(&qdio->req_q_lock); ++ if (req && !IS_ERR(req)) ++ zfcp_dbf_rec_run_wka("fscwp_1", wka_port, req->req_id); + return retval; + } + +diff --git a/drivers/s390/scsi/zfcp_fsf.h b/drivers/s390/scsi/zfcp_fsf.h +index 5e795b86931b..8cad41ffb6b8 100644 +--- a/drivers/s390/scsi/zfcp_fsf.h ++++ b/drivers/s390/scsi/zfcp_fsf.h +@@ -3,7 +3,7 @@ + * + * Interface to the FSF support functions. + * +- * Copyright IBM Corp. 2002, 2010 ++ * Copyright IBM Corp. 2002, 2015 + */ + + #ifndef FSF_H +@@ -462,6 +462,7 @@ struct zfcp_blk_drv_data { + * @handler_data: data passed to handler function + * @port: Optional pointer to port for zfcp internal ELS (only test link ADISC) + * @status: used to pass error status to calling function ++ * @d_id: Destination ID of either open WKA port for CT or of D_ID for ELS + */ + struct zfcp_fsf_ct_els { + struct scatterlist *req; +@@ -470,6 +471,7 @@ struct zfcp_fsf_ct_els { + void *handler_data; + struct zfcp_port *port; + int status; ++ u32 d_id; + }; + + #endif /* FSF_H */ +diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c +index 7b353647cb90..38ee0df633a3 100644 +--- a/drivers/s390/scsi/zfcp_scsi.c ++++ b/drivers/s390/scsi/zfcp_scsi.c +@@ -3,7 +3,7 @@ + * + * Interface to Linux SCSI midlayer. + * +- * Copyright IBM Corp. 2002, 2013 ++ * Copyright IBM Corp. 2002, 2015 + */ + + #define KMSG_COMPONENT "zfcp" +@@ -577,6 +577,9 @@ static void zfcp_scsi_rport_register(struct zfcp_port *port) + ids.port_id = port->d_id; + ids.roles = FC_RPORT_ROLE_FCP_TARGET; + ++ zfcp_dbf_rec_trig("scpaddy", port->adapter, port, NULL, ++ ZFCP_PSEUDO_ERP_ACTION_RPORT_ADD, ++ ZFCP_PSEUDO_ERP_ACTION_RPORT_ADD); + rport = fc_remote_port_add(port->adapter->scsi_host, 0, &ids); + if (!rport) { + dev_err(&port->adapter->ccw_device->dev, +@@ -598,6 +601,9 @@ static void zfcp_scsi_rport_block(struct zfcp_port *port) + struct fc_rport *rport = port->rport; + + if (rport) { ++ zfcp_dbf_rec_trig("scpdely", port->adapter, port, NULL, ++ ZFCP_PSEUDO_ERP_ACTION_RPORT_DEL, ++ ZFCP_PSEUDO_ERP_ACTION_RPORT_DEL); + fc_remote_port_delete(rport); + port->rport = NULL; + } +diff --git a/drivers/scsi/arcmsr/arcmsr_hba.c b/drivers/scsi/arcmsr/arcmsr_hba.c +index 1822cb9ec623..8d9477cc3227 100644 +--- a/drivers/scsi/arcmsr/arcmsr_hba.c ++++ b/drivers/scsi/arcmsr/arcmsr_hba.c +@@ -1803,7 +1803,8 @@ static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb, + + case ARCMSR_MESSAGE_WRITE_WQBUFFER: { + unsigned char *ver_addr; +- int32_t my_empty_len, user_len, wqbuf_firstindex, wqbuf_lastindex; ++ uint32_t user_len; ++ int32_t my_empty_len, wqbuf_firstindex, wqbuf_lastindex; + uint8_t *pQbuffer, *ptmpuserbuffer; + + ver_addr = kmalloc(1032, GFP_ATOMIC); +@@ -1820,6 +1821,11 @@ static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb, + } + ptmpuserbuffer = ver_addr; + user_len = pcmdmessagefld->cmdmessage.Length; ++ if (user_len > 1032) { ++ retvalue = ARCMSR_MESSAGE_FAIL; ++ kfree(ver_addr); ++ goto message_out; ++ } + memcpy(ptmpuserbuffer, pcmdmessagefld->messagedatabuffer, user_len); + wqbuf_lastindex = acb->wqbuf_lastindex; + wqbuf_firstindex = acb->wqbuf_firstindex; +@@ -2063,18 +2069,9 @@ static int arcmsr_queue_command_lck(struct scsi_cmnd *cmd, + struct AdapterControlBlock *acb = (struct AdapterControlBlock *) host->hostdata; + struct CommandControlBlock *ccb; + int target = cmd->device->id; +- int lun = cmd->device->lun; +- uint8_t scsicmd = cmd->cmnd[0]; + cmd->scsi_done = done; + cmd->host_scribble = NULL; + cmd->result = 0; +- if ((scsicmd == SYNCHRONIZE_CACHE) ||(scsicmd == SEND_DIAGNOSTIC)){ +- if(acb->devstate[target][lun] == ARECA_RAID_GONE) { +- cmd->result = (DID_NO_CONNECT << 16); +- } +- cmd->scsi_done(cmd); +- return 0; +- } + if (target == 16) { + /* virtual device for iop message transfer */ + arcmsr_handle_virtual_command(acb, cmd); +diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c +index 4e31caa21ddf..920686155310 100644 +--- a/drivers/scsi/ibmvscsi/ibmvfc.c ++++ b/drivers/scsi/ibmvscsi/ibmvfc.c +@@ -717,7 +717,6 @@ static int ibmvfc_reset_crq(struct ibmvfc_host *vhost) + spin_lock_irqsave(vhost->host->host_lock, flags); + vhost->state = IBMVFC_NO_CRQ; + vhost->logged_in = 0; +- ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_NONE); + + /* Clean out the queue */ + memset(crq->msgs, 0, PAGE_SIZE); +diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h +index 280e769a1686..a0e0a61dc882 100644 +--- a/drivers/scsi/megaraid/megaraid_sas.h ++++ b/drivers/scsi/megaraid/megaraid_sas.h +@@ -1402,7 +1402,7 @@ struct megasas_instance_template { + }; + + #define MEGASAS_IS_LOGICAL(scp) \ +- (scp->device->channel < MEGASAS_MAX_PD_CHANNELS) ? 0 : 1 ++ ((scp->device->channel < MEGASAS_MAX_PD_CHANNELS) ? 0 : 1) + + #define MEGASAS_DEV_INDEX(inst, scp) \ + ((scp->device->channel % 2) * MEGASAS_MAX_DEV_PER_CHANNEL) + \ +diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c +index 6ced6a398d60..0626a168c55b 100644 +--- a/drivers/scsi/megaraid/megaraid_sas_base.c ++++ b/drivers/scsi/megaraid/megaraid_sas_base.c +@@ -1487,16 +1487,13 @@ megasas_queue_command_lck(struct scsi_cmnd *scmd, void (*done) (struct scsi_cmnd + goto out_done; + } + +- switch (scmd->cmnd[0]) { +- case SYNCHRONIZE_CACHE: +- /* +- * FW takes care of flush cache on its own +- * No need to send it down +- */ ++ /* ++ * FW takes care of flush cache on its own for Virtual Disk. ++ * No need to send it down for VD. For JBOD send SYNCHRONIZE_CACHE to FW. ++ */ ++ if ((scmd->cmnd[0] == SYNCHRONIZE_CACHE) && MEGASAS_IS_LOGICAL(scmd)) { + scmd->result = DID_OK << 16; + goto out_done; +- default: +- break; + } + + if (instance->instancet->build_and_issue_cmd(instance, scmd)) { +diff --git a/drivers/scsi/mpt2sas/mpt2sas_scsih.c b/drivers/scsi/mpt2sas/mpt2sas_scsih.c +index fe76185cd79a..64caa5ce3237 100644 +--- a/drivers/scsi/mpt2sas/mpt2sas_scsih.c ++++ b/drivers/scsi/mpt2sas/mpt2sas_scsih.c +@@ -3926,6 +3926,11 @@ _scsih_setup_direct_io(struct MPT2SAS_ADAPTER *ioc, struct scsi_cmnd *scmd, + } + } + ++static inline bool ata_12_16_cmd(struct scsi_cmnd *scmd) ++{ ++ return (scmd->cmnd[0] == ATA_12 || scmd->cmnd[0] == ATA_16); ++} ++ + /** + * _scsih_qcmd - main scsi request entry point + * @scmd: pointer to scsi command object +@@ -3948,6 +3953,13 @@ _scsih_qcmd_lck(struct scsi_cmnd *scmd, void (*done)(struct scsi_cmnd *)) + u32 mpi_control; + u16 smid; + ++ /** ++ * Lock the device for any subsequent command until ++ * command is done. ++ */ ++ if (ata_12_16_cmd(scmd)) ++ scsi_internal_device_block(scmd->device); ++ + scmd->scsi_done = done; + sas_device_priv_data = scmd->device->hostdata; + if (!sas_device_priv_data || !sas_device_priv_data->sas_target) { +@@ -4454,6 +4466,9 @@ _scsih_io_done(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply) + if (scmd == NULL) + return 1; + ++ if (ata_12_16_cmd(scmd)) ++ scsi_internal_device_unblock(scmd->device, SDEV_RUNNING); ++ + mpi_request = mpt2sas_base_get_msg_frame(ioc, smid); + + if (mpi_reply == NULL) { +diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.h b/drivers/scsi/mpt3sas/mpt3sas_base.h +index 994656cbfac9..997e13f6d1ac 100644 +--- a/drivers/scsi/mpt3sas/mpt3sas_base.h ++++ b/drivers/scsi/mpt3sas/mpt3sas_base.h +@@ -219,6 +219,7 @@ struct MPT3SAS_TARGET { + * @eedp_enable: eedp support enable bit + * @eedp_type: 0(type_1), 1(type_2), 2(type_3) + * @eedp_block_length: block size ++ * @ata_command_pending: SATL passthrough outstanding for device + */ + struct MPT3SAS_DEVICE { + struct MPT3SAS_TARGET *sas_target; +@@ -227,6 +228,17 @@ struct MPT3SAS_DEVICE { + u8 configured_lun; + u8 block; + u8 tlr_snoop_check; ++ /* ++ * Bug workaround for SATL handling: the mpt2/3sas firmware ++ * doesn't return BUSY or TASK_SET_FULL for subsequent ++ * commands while a SATL pass through is in operation as the ++ * spec requires, it simply does nothing with them until the ++ * pass through completes, causing them possibly to timeout if ++ * the passthrough is a long executing command (like format or ++ * secure erase). This variable allows us to do the right ++ * thing while a SATL command is pending. ++ */ ++ unsigned long ata_command_pending; + }; + + #define MPT3_CMD_NOT_USED 0x8000 /* free */ +diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c +index f8c4b8564251..1d6e115571c9 100644 +--- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c ++++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c +@@ -3390,6 +3390,20 @@ _scsih_check_volume_delete_events(struct MPT3SAS_ADAPTER *ioc, + le16_to_cpu(event_data->VolDevHandle)); + } + ++static int _scsih_set_satl_pending(struct scsi_cmnd *scmd, bool pending) ++{ ++ struct MPT3SAS_DEVICE *priv = scmd->device->hostdata; ++ ++ if (scmd->cmnd[0] != ATA_12 && scmd->cmnd[0] != ATA_16) ++ return 0; ++ ++ if (pending) ++ return test_and_set_bit(0, &priv->ata_command_pending); ++ ++ clear_bit(0, &priv->ata_command_pending); ++ return 0; ++} ++ + /** + * _scsih_flush_running_cmds - completing outstanding commands. + * @ioc: per adapter object +@@ -3411,6 +3425,7 @@ _scsih_flush_running_cmds(struct MPT3SAS_ADAPTER *ioc) + if (!scmd) + continue; + count++; ++ _scsih_set_satl_pending(scmd, false); + mpt3sas_base_free_smid(ioc, smid); + scsi_dma_unmap(scmd); + if (ioc->pci_error_recovery) +@@ -3515,7 +3530,6 @@ _scsih_eedp_error_handling(struct scsi_cmnd *scmd, u16 ioc_status) + SAM_STAT_CHECK_CONDITION; + } + +- + /** + * _scsih_qcmd_lck - main scsi request entry point + * @scmd: pointer to scsi command object +@@ -3557,6 +3571,19 @@ _scsih_qcmd_lck(struct scsi_cmnd *scmd, void (*done)(struct scsi_cmnd *)) + return 0; + } + ++ /* ++ * Bug work around for firmware SATL handling. The loop ++ * is based on atomic operations and ensures consistency ++ * since we're lockless at this point ++ */ ++ do { ++ if (test_bit(0, &sas_device_priv_data->ata_command_pending)) { ++ scmd->result = SAM_STAT_BUSY; ++ scmd->scsi_done(scmd); ++ return 0; ++ } ++ } while (_scsih_set_satl_pending(scmd, true)); ++ + sas_target_priv_data = sas_device_priv_data->sas_target; + + /* invalid device handle */ +@@ -4046,6 +4073,8 @@ _scsih_io_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply) + if (scmd == NULL) + return 1; + ++ _scsih_set_satl_pending(scmd, false); ++ + mpi_request = mpt3sas_base_get_msg_frame(ioc, smid); + + if (mpi_reply == NULL) { +diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c +index 0a537a0515ca..be86e7a02bbc 100644 +--- a/drivers/scsi/scsi_debug.c ++++ b/drivers/scsi/scsi_debug.c +@@ -3504,6 +3504,7 @@ static void __exit scsi_debug_exit(void) + bus_unregister(&pseudo_lld_bus); + root_device_unregister(pseudo_primary); + ++ vfree(map_storep); + if (dif_storep) + vfree(dif_storep); + +diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c +index 859240408f9e..92d4f65cbc2e 100644 +--- a/drivers/scsi/scsi_scan.c ++++ b/drivers/scsi/scsi_scan.c +@@ -1517,12 +1517,12 @@ static int scsi_report_lun_scan(struct scsi_target *starget, int bflags, + out_err: + kfree(lun_data); + out: +- scsi_device_put(sdev); + if (scsi_device_created(sdev)) + /* + * the sdev we used didn't appear in the report luns scan + */ + __scsi_remove_device(sdev); ++ scsi_device_put(sdev); + return ret; + } + +diff --git a/drivers/staging/iio/impedance-analyzer/ad5933.c b/drivers/staging/iio/impedance-analyzer/ad5933.c +index bc23d66a7a1e..1ff17352abde 100644 +--- a/drivers/staging/iio/impedance-analyzer/ad5933.c ++++ b/drivers/staging/iio/impedance-analyzer/ad5933.c +@@ -646,6 +646,7 @@ static void ad5933_work(struct work_struct *work) + struct iio_dev *indio_dev = i2c_get_clientdata(st->client); + signed short buf[2]; + unsigned char status; ++ int ret; + + mutex_lock(&indio_dev->mlock); + if (st->state == AD5933_CTRL_INIT_START_FREQ) { +@@ -653,19 +654,22 @@ static void ad5933_work(struct work_struct *work) + ad5933_cmd(st, AD5933_CTRL_START_SWEEP); + st->state = AD5933_CTRL_START_SWEEP; + schedule_delayed_work(&st->work, st->poll_time_jiffies); +- mutex_unlock(&indio_dev->mlock); +- return; ++ goto out; + } + +- ad5933_i2c_read(st->client, AD5933_REG_STATUS, 1, &status); ++ ret = ad5933_i2c_read(st->client, AD5933_REG_STATUS, 1, &status); ++ if (ret) ++ goto out; + + if (status & AD5933_STAT_DATA_VALID) { + int scan_count = bitmap_weight(indio_dev->active_scan_mask, + indio_dev->masklength); +- ad5933_i2c_read(st->client, ++ ret = ad5933_i2c_read(st->client, + test_bit(1, indio_dev->active_scan_mask) ? + AD5933_REG_REAL_DATA : AD5933_REG_IMAG_DATA, + scan_count * 2, (u8 *)buf); ++ if (ret) ++ goto out; + + if (scan_count == 2) { + buf[0] = be16_to_cpu(buf[0]); +@@ -677,8 +681,7 @@ static void ad5933_work(struct work_struct *work) + } else { + /* no data available - try again later */ + schedule_delayed_work(&st->work, st->poll_time_jiffies); +- mutex_unlock(&indio_dev->mlock); +- return; ++ goto out; + } + + if (status & AD5933_STAT_SWEEP_DONE) { +@@ -690,7 +693,7 @@ static void ad5933_work(struct work_struct *work) + ad5933_cmd(st, AD5933_CTRL_INC_FREQ); + schedule_delayed_work(&st->work, st->poll_time_jiffies); + } +- ++out: + mutex_unlock(&indio_dev->mlock); + } + +diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c +index d755440791b7..f9bf597e836c 100644 +--- a/drivers/thermal/thermal_core.c ++++ b/drivers/thermal/thermal_core.c +@@ -924,7 +924,7 @@ temp_crit_show(struct device *dev, struct device_attribute *attr, + long temperature; + int ret; + +- ret = tz->ops->get_trip_temp(tz, 0, &temperature); ++ ret = tz->ops->get_crit_temp(tz, &temperature); + if (ret) + return ret; + +diff --git a/drivers/tty/tty_ldisc.c b/drivers/tty/tty_ldisc.c +index 1afe192bef6a..b5cbe12e2815 100644 +--- a/drivers/tty/tty_ldisc.c ++++ b/drivers/tty/tty_ldisc.c +@@ -400,6 +400,10 @@ EXPORT_SYMBOL_GPL(tty_ldisc_flush); + * they are not on hot paths so a little discipline won't do + * any harm. + * ++ * The line discipline-related tty_struct fields are reset to ++ * prevent the ldisc driver from re-using stale information for ++ * the new ldisc instance. ++ * + * Locking: takes termios_mutex + */ + +@@ -408,6 +412,9 @@ static void tty_set_termios_ldisc(struct tty_struct *tty, int num) + mutex_lock(&tty->termios_mutex); + tty->termios.c_line = num; + mutex_unlock(&tty->termios_mutex); ++ ++ tty->disc_data = NULL; ++ tty->receive_room = 0; + } + + /** +diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c +index 6dff194751f1..010ec70d59fb 100644 +--- a/drivers/tty/vt/vt.c ++++ b/drivers/tty/vt/vt.c +@@ -863,10 +863,15 @@ static int vc_do_resize(struct tty_struct *tty, struct vc_data *vc, + if (new_cols == vc->vc_cols && new_rows == vc->vc_rows) + return 0; + ++ if (new_screen_size > (4 << 20)) ++ return -EINVAL; + newscreen = kmalloc(new_screen_size, GFP_USER); + if (!newscreen) + return -ENOMEM; + ++ if (vc == sel_cons) ++ clear_selection(); ++ + old_rows = vc->vc_rows; + old_row_size = vc->vc_size_row; + +@@ -1164,7 +1169,7 @@ static void csi_J(struct vc_data *vc, int vpar) + break; + case 3: /* erase scroll-back buffer (and whole display) */ + scr_memsetw(vc->vc_screenbuf, vc->vc_video_erase_char, +- vc->vc_screenbuf_size >> 1); ++ vc->vc_screenbuf_size); + set_origin(vc); + if (CON_IS_VISIBLE(vc)) + update_screen(vc); +diff --git a/drivers/uio/uio_dmem_genirq.c b/drivers/uio/uio_dmem_genirq.c +index 252434c9ea9d..2290b1f4b41f 100644 +--- a/drivers/uio/uio_dmem_genirq.c ++++ b/drivers/uio/uio_dmem_genirq.c +@@ -229,7 +229,7 @@ static int uio_dmem_genirq_probe(struct platform_device *pdev) + ++uiomem; + } + +- priv->dmem_region_start = i; ++ priv->dmem_region_start = uiomem - &uioinfo->mem[0]; + priv->num_dmem_regions = pdata->num_dynamic_regions; + + for (i = 0; i < pdata->num_dynamic_regions; ++i) { +diff --git a/drivers/usb/chipidea/core.c b/drivers/usb/chipidea/core.c +index 475c9c114689..b77badb68890 100644 +--- a/drivers/usb/chipidea/core.c ++++ b/drivers/usb/chipidea/core.c +@@ -381,6 +381,7 @@ static int ci_hdrc_probe(struct platform_device *pdev) + return -ENOMEM; + } + ++ spin_lock_init(&ci->lock); + ci->dev = dev; + ci->platdata = dev->platform_data; + if (ci->platdata->phy) +diff --git a/drivers/usb/chipidea/udc.c b/drivers/usb/chipidea/udc.c +index f1cab425163f..45c8ffa798b8 100644 +--- a/drivers/usb/chipidea/udc.c ++++ b/drivers/usb/chipidea/udc.c +@@ -1647,8 +1647,6 @@ static int udc_start(struct ci13xxx *ci) + struct device *dev = ci->dev; + int retval = 0; + +- spin_lock_init(&ci->lock); +- + ci->gadget.ops = &usb_gadget_ops; + ci->gadget.speed = USB_SPEED_UNKNOWN; + ci->gadget.max_speed = USB_SPEED_HIGH; +diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c +index e7436ebbf04c..b364845de5ad 100644 +--- a/drivers/usb/class/cdc-acm.c ++++ b/drivers/usb/class/cdc-acm.c +@@ -1213,7 +1213,6 @@ made_compressed_probe: + spin_lock_init(&acm->write_lock); + spin_lock_init(&acm->read_lock); + mutex_init(&acm->mutex); +- acm->rx_endpoint = usb_rcvbulkpipe(usb_dev, epread->bEndpointAddress); + acm->is_int_ep = usb_endpoint_xfer_int(epread); + if (acm->is_int_ep) + acm->bInterval = epread->bInterval; +@@ -1262,14 +1261,14 @@ made_compressed_probe: + urb->transfer_dma = rb->dma; + if (acm->is_int_ep) { + usb_fill_int_urb(urb, acm->dev, +- acm->rx_endpoint, ++ usb_rcvintpipe(usb_dev, epread->bEndpointAddress), + rb->base, + acm->readsize, + acm_read_bulk_callback, rb, + acm->bInterval); + } else { + usb_fill_bulk_urb(urb, acm->dev, +- acm->rx_endpoint, ++ usb_rcvbulkpipe(usb_dev, epread->bEndpointAddress), + rb->base, + acm->readsize, + acm_read_bulk_callback, rb); +diff --git a/drivers/usb/class/cdc-acm.h b/drivers/usb/class/cdc-acm.h +index 1683ac161cf6..bf4e1bb4fb27 100644 +--- a/drivers/usb/class/cdc-acm.h ++++ b/drivers/usb/class/cdc-acm.h +@@ -95,7 +95,6 @@ struct acm { + struct urb *read_urbs[ACM_NR]; + struct acm_rb read_buffers[ACM_NR]; + int rx_buflimit; +- int rx_endpoint; + spinlock_t read_lock; + int write_used; /* number of non-empty write buffers */ + int transmitting; +diff --git a/drivers/usb/core/config.c b/drivers/usb/core/config.c +index 9b05e88d6220..3252bb2dcb80 100644 +--- a/drivers/usb/core/config.c ++++ b/drivers/usb/core/config.c +@@ -144,6 +144,31 @@ static void usb_parse_ss_endpoint_companion(struct device *ddev, int cfgno, + } + } + ++static const unsigned short low_speed_maxpacket_maxes[4] = { ++ [USB_ENDPOINT_XFER_CONTROL] = 8, ++ [USB_ENDPOINT_XFER_ISOC] = 0, ++ [USB_ENDPOINT_XFER_BULK] = 0, ++ [USB_ENDPOINT_XFER_INT] = 8, ++}; ++static const unsigned short full_speed_maxpacket_maxes[4] = { ++ [USB_ENDPOINT_XFER_CONTROL] = 64, ++ [USB_ENDPOINT_XFER_ISOC] = 1023, ++ [USB_ENDPOINT_XFER_BULK] = 64, ++ [USB_ENDPOINT_XFER_INT] = 64, ++}; ++static const unsigned short high_speed_maxpacket_maxes[4] = { ++ [USB_ENDPOINT_XFER_CONTROL] = 64, ++ [USB_ENDPOINT_XFER_ISOC] = 1024, ++ [USB_ENDPOINT_XFER_BULK] = 512, ++ [USB_ENDPOINT_XFER_INT] = 1024, ++}; ++static const unsigned short super_speed_maxpacket_maxes[4] = { ++ [USB_ENDPOINT_XFER_CONTROL] = 512, ++ [USB_ENDPOINT_XFER_ISOC] = 1024, ++ [USB_ENDPOINT_XFER_BULK] = 1024, ++ [USB_ENDPOINT_XFER_INT] = 1024, ++}; ++ + static int usb_parse_endpoint(struct device *ddev, int cfgno, int inum, + int asnum, struct usb_host_interface *ifp, int num_ep, + unsigned char *buffer, int size) +@@ -152,6 +177,8 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno, int inum, + struct usb_endpoint_descriptor *d; + struct usb_host_endpoint *endpoint; + int n, i, j, retval; ++ unsigned int maxp; ++ const unsigned short *maxpacket_maxes; + + d = (struct usb_endpoint_descriptor *) buffer; + buffer += d->bLength; +@@ -186,8 +213,10 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno, int inum, + memcpy(&endpoint->desc, d, n); + INIT_LIST_HEAD(&endpoint->urb_list); + +- /* Fix up bInterval values outside the legal range. Use 32 ms if no +- * proper value can be guessed. */ ++ /* ++ * Fix up bInterval values outside the legal range. ++ * Use 10 or 8 ms if no proper value can be guessed. ++ */ + i = 0; /* i = min, j = max, n = default */ + j = 255; + if (usb_endpoint_xfer_int(d)) { +@@ -195,20 +224,24 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno, int inum, + switch (to_usb_device(ddev)->speed) { + case USB_SPEED_SUPER: + case USB_SPEED_HIGH: +- /* Many device manufacturers are using full-speed ++ /* ++ * Many device manufacturers are using full-speed + * bInterval values in high-speed interrupt endpoint +- * descriptors. Try to fix those and fall back to a +- * 32 ms default value otherwise. */ ++ * descriptors. Try to fix those and fall back to an ++ * 8-ms default value otherwise. ++ */ + n = fls(d->bInterval*8); + if (n == 0) +- n = 9; /* 32 ms = 2^(9-1) uframes */ ++ n = 7; /* 8 ms = 2^(7-1) uframes */ + j = 16; + break; + default: /* USB_SPEED_FULL or _LOW */ +- /* For low-speed, 10 ms is the official minimum. ++ /* ++ * For low-speed, 10 ms is the official minimum. + * But some "overclocked" devices might want faster +- * polling so we'll allow it. */ +- n = 32; ++ * polling so we'll allow it. ++ */ ++ n = 10; + break; + } + } else if (usb_endpoint_xfer_isoc(d)) { +@@ -216,10 +249,10 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno, int inum, + j = 16; + switch (to_usb_device(ddev)->speed) { + case USB_SPEED_HIGH: +- n = 9; /* 32 ms = 2^(9-1) uframes */ ++ n = 7; /* 8 ms = 2^(7-1) uframes */ + break; + default: /* USB_SPEED_FULL */ +- n = 6; /* 32 ms = 2^(6-1) frames */ ++ n = 4; /* 8 ms = 2^(4-1) frames */ + break; + } + } +@@ -247,6 +280,41 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno, int inum, + endpoint->desc.wMaxPacketSize = cpu_to_le16(8); + } + ++ /* Validate the wMaxPacketSize field */ ++ maxp = usb_endpoint_maxp(&endpoint->desc); ++ ++ /* Find the highest legal maxpacket size for this endpoint */ ++ i = 0; /* additional transactions per microframe */ ++ switch (to_usb_device(ddev)->speed) { ++ case USB_SPEED_LOW: ++ maxpacket_maxes = low_speed_maxpacket_maxes; ++ break; ++ case USB_SPEED_FULL: ++ maxpacket_maxes = full_speed_maxpacket_maxes; ++ break; ++ case USB_SPEED_HIGH: ++ /* Bits 12..11 are allowed only for HS periodic endpoints */ ++ if (usb_endpoint_xfer_int(d) || usb_endpoint_xfer_isoc(d)) { ++ i = maxp & (BIT(12) | BIT(11)); ++ maxp &= ~i; ++ } ++ /* fallthrough */ ++ default: ++ maxpacket_maxes = high_speed_maxpacket_maxes; ++ break; ++ case USB_SPEED_SUPER: ++ maxpacket_maxes = super_speed_maxpacket_maxes; ++ break; ++ } ++ j = maxpacket_maxes[usb_endpoint_type(&endpoint->desc)]; ++ ++ if (maxp > j) { ++ dev_warn(ddev, "config %d interface %d altsetting %d endpoint 0x%X has invalid maxpacket %d, setting to %d\n", ++ cfgno, inum, asnum, d->bEndpointAddress, maxp, j); ++ maxp = j; ++ endpoint->desc.wMaxPacketSize = cpu_to_le16(i | maxp); ++ } ++ + /* + * Some buggy high speed devices have bulk endpoints using + * maxpacket sizes other than 512. High speed HCDs may not +@@ -254,9 +322,6 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno, int inum, + */ + if (to_usb_device(ddev)->speed == USB_SPEED_HIGH + && usb_endpoint_xfer_bulk(d)) { +- unsigned maxp; +- +- maxp = usb_endpoint_maxp(&endpoint->desc) & 0x07ff; + if (maxp != 512) + dev_warn(ddev, "config %d interface %d altsetting %d " + "bulk endpoint 0x%X has invalid maxpacket %d\n", +diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c +index 6e70c88b25fb..0dfee61f7878 100644 +--- a/drivers/usb/dwc3/gadget.c ++++ b/drivers/usb/dwc3/gadget.c +@@ -1802,14 +1802,6 @@ static int __dwc3_cleanup_done_trbs(struct dwc3 *dwc, struct dwc3_ep *dep, + s_pkt = 1; + } + +- /* +- * We assume here we will always receive the entire data block +- * which we should receive. Meaning, if we program RX to +- * receive 4K but we receive only 2K, we assume that's all we +- * should receive and we simply bounce the request back to the +- * gadget driver for further processing. +- */ +- req->request.actual += req->request.length - count; + if (s_pkt) + return 1; + if ((event->status & DEPEVT_STATUS_LST) && +@@ -1829,6 +1821,7 @@ static int dwc3_cleanup_done_reqs(struct dwc3 *dwc, struct dwc3_ep *dep, + struct dwc3_trb *trb; + unsigned int slot; + unsigned int i; ++ int count = 0; + int ret; + + do { +@@ -1845,6 +1838,8 @@ static int dwc3_cleanup_done_reqs(struct dwc3 *dwc, struct dwc3_ep *dep, + slot++; + slot %= DWC3_TRB_NUM; + trb = &dep->trb_pool[slot]; ++ count += trb->size & DWC3_TRB_SIZE_MASK; ++ + + ret = __dwc3_cleanup_done_trbs(dwc, dep, req, trb, + event, status); +@@ -1852,6 +1847,14 @@ static int dwc3_cleanup_done_reqs(struct dwc3 *dwc, struct dwc3_ep *dep, + break; + }while (++i < req->request.num_mapped_sgs); + ++ /* ++ * We assume here we will always receive the entire data block ++ * which we should receive. Meaning, if we program RX to ++ * receive 4K but we receive only 2K, we assume that's all we ++ * should receive and we simply bounce the request back to the ++ * gadget driver for further processing. ++ */ ++ req->request.actual += req->request.length - count; + dwc3_gadget_giveback(dep, req, status); + + if (ret) +diff --git a/drivers/usb/gadget/fsl_qe_udc.c b/drivers/usb/gadget/fsl_qe_udc.c +index 9a7ee3347e4d..9fd233003769 100644 +--- a/drivers/usb/gadget/fsl_qe_udc.c ++++ b/drivers/usb/gadget/fsl_qe_udc.c +@@ -1881,11 +1881,8 @@ static int qe_get_frame(struct usb_gadget *gadget) + + tmp = in_be16(&udc->usb_param->frame_n); + if (tmp & 0x8000) +- tmp = tmp & 0x07ff; +- else +- tmp = -EINVAL; +- +- return (int)tmp; ++ return tmp & 0x07ff; ++ return -EINVAL; + } + + static int fsl_qe_start(struct usb_gadget *gadget, +diff --git a/drivers/usb/gadget/u_ether.c b/drivers/usb/gadget/u_ether.c +index 4b76124ce96b..ef5c623cf0dd 100644 +--- a/drivers/usb/gadget/u_ether.c ++++ b/drivers/usb/gadget/u_ether.c +@@ -584,13 +584,6 @@ static netdev_tx_t eth_start_xmit(struct sk_buff *skb, + + req->length = length; + +- /* throttle high/super speed IRQ rate back slightly */ +- if (gadget_is_dualspeed(dev->gadget)) +- req->no_interrupt = (dev->gadget->speed == USB_SPEED_HIGH || +- dev->gadget->speed == USB_SPEED_SUPER) +- ? ((atomic_read(&dev->tx_qlen) % qmult) != 0) +- : 0; +- + retval = usb_ep_queue(in, req, GFP_ATOMIC); + switch (retval) { + default: +diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c +index 0f71c3a22507..0f6edce536cb 100644 +--- a/drivers/usb/host/xhci-hub.c ++++ b/drivers/usb/host/xhci-hub.c +@@ -275,6 +275,9 @@ static int xhci_stop_device(struct xhci_hcd *xhci, int slot_id, int suspend) + + ret = 0; + virt_dev = xhci->devs[slot_id]; ++ if (!virt_dev) ++ return -ENODEV; ++ + cmd = xhci_alloc_command(xhci, false, true, GFP_NOIO); + if (!cmd) { + xhci_dbg(xhci, "Couldn't allocate command structure.\n"); +diff --git a/drivers/usb/misc/legousbtower.c b/drivers/usb/misc/legousbtower.c +index 80894791c020..c3e9cfc7c276 100644 +--- a/drivers/usb/misc/legousbtower.c ++++ b/drivers/usb/misc/legousbtower.c +@@ -953,24 +953,6 @@ static int tower_probe (struct usb_interface *interface, const struct usb_device + dev->interrupt_in_interval = interrupt_in_interval ? interrupt_in_interval : dev->interrupt_in_endpoint->bInterval; + dev->interrupt_out_interval = interrupt_out_interval ? interrupt_out_interval : dev->interrupt_out_endpoint->bInterval; + +- /* we can register the device now, as it is ready */ +- usb_set_intfdata (interface, dev); +- +- retval = usb_register_dev (interface, &tower_class); +- +- if (retval) { +- /* something prevented us from registering this driver */ +- dev_err(idev, "Not able to get a minor for this device.\n"); +- usb_set_intfdata (interface, NULL); +- goto error; +- } +- dev->minor = interface->minor; +- +- /* let the user know what node this device is now attached to */ +- dev_info(&interface->dev, "LEGO USB Tower #%d now attached to major " +- "%d minor %d\n", (dev->minor - LEGO_USB_TOWER_MINOR_BASE), +- USB_MAJOR, dev->minor); +- + /* get the firmware version and log it */ + result = usb_control_msg (udev, + usb_rcvctrlpipe(udev, 0), +@@ -991,6 +973,23 @@ static int tower_probe (struct usb_interface *interface, const struct usb_device + get_version_reply.minor, + le16_to_cpu(get_version_reply.build_no)); + ++ /* we can register the device now, as it is ready */ ++ usb_set_intfdata (interface, dev); ++ ++ retval = usb_register_dev (interface, &tower_class); ++ ++ if (retval) { ++ /* something prevented us from registering this driver */ ++ dev_err(idev, "Not able to get a minor for this device.\n"); ++ usb_set_intfdata (interface, NULL); ++ goto error; ++ } ++ dev->minor = interface->minor; ++ ++ /* let the user know what node this device is now attached to */ ++ dev_info(&interface->dev, "LEGO USB Tower #%d now attached to major " ++ "%d minor %d\n", (dev->minor - LEGO_USB_TOWER_MINOR_BASE), ++ USB_MAJOR, dev->minor); + + exit: + dbg(2, "%s: leave, return value 0x%.8lx (dev)", __func__, (long) dev); +diff --git a/drivers/usb/renesas_usbhs/mod.c b/drivers/usb/renesas_usbhs/mod.c +index 6a030b931a3b..254194d61915 100644 +--- a/drivers/usb/renesas_usbhs/mod.c ++++ b/drivers/usb/renesas_usbhs/mod.c +@@ -272,9 +272,16 @@ static irqreturn_t usbhs_interrupt(int irq, void *data) + usbhs_write(priv, INTSTS0, ~irq_state.intsts0 & INTSTS0_MAGIC); + usbhs_write(priv, INTSTS1, ~irq_state.intsts1 & INTSTS1_MAGIC); + +- usbhs_write(priv, BRDYSTS, ~irq_state.brdysts); ++ /* ++ * The driver should not clear the xxxSTS after the line of ++ * "call irq callback functions" because each "if" statement is ++ * possible to call the callback function for avoiding any side effects. ++ */ ++ if (irq_state.intsts0 & BRDY) ++ usbhs_write(priv, BRDYSTS, ~irq_state.brdysts); + usbhs_write(priv, NRDYSTS, ~irq_state.nrdysts); +- usbhs_write(priv, BEMPSTS, ~irq_state.bempsts); ++ if (irq_state.intsts0 & BEMP) ++ usbhs_write(priv, BEMPSTS, ~irq_state.bempsts); + + /* + * call irq callback functions +diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c +index 0093261ccc57..003f8ddbfc3a 100644 +--- a/drivers/usb/serial/cp210x.c ++++ b/drivers/usb/serial/cp210x.c +@@ -793,7 +793,7 @@ static void cp210x_set_termios(struct tty_struct *tty, + } else { + modem_ctl[0] &= ~0x7B; + modem_ctl[0] |= 0x01; +- modem_ctl[1] |= 0x40; ++ modem_ctl[1] = 0x40; + dev_dbg(dev, "%s - flow control = NONE\n", __func__); + } + +@@ -853,7 +853,9 @@ static int cp210x_tiocmget(struct tty_struct *tty) + unsigned int control; + int result; + +- cp210x_get_config(port, CP210X_GET_MDMSTS, &control, 1); ++ result = cp210x_get_config(port, CP210X_GET_MDMSTS, &control, 1); ++ if (result) ++ return result; + + result = ((control & CONTROL_DTR) ? TIOCM_DTR : 0) + |((control & CONTROL_RTS) ? TIOCM_RTS : 0) +diff --git a/drivers/usb/serial/kobil_sct.c b/drivers/usb/serial/kobil_sct.c +index 78b48c31abf5..efa75b4e51f2 100644 +--- a/drivers/usb/serial/kobil_sct.c ++++ b/drivers/usb/serial/kobil_sct.c +@@ -336,7 +336,8 @@ static int kobil_write(struct tty_struct *tty, struct usb_serial_port *port, + port->interrupt_out_urb->transfer_buffer_length = length; + + priv->cur_pos = priv->cur_pos + length; +- result = usb_submit_urb(port->interrupt_out_urb, GFP_NOIO); ++ result = usb_submit_urb(port->interrupt_out_urb, ++ GFP_ATOMIC); + dev_dbg(&port->dev, "%s - Send write URB returns: %i\n", __func__, result); + todo = priv->filled - priv->cur_pos; + +@@ -351,7 +352,7 @@ static int kobil_write(struct tty_struct *tty, struct usb_serial_port *port, + if (priv->device_type == KOBIL_ADAPTER_B_PRODUCT_ID || + priv->device_type == KOBIL_ADAPTER_K_PRODUCT_ID) { + result = usb_submit_urb(port->interrupt_in_urb, +- GFP_NOIO); ++ GFP_ATOMIC); + dev_dbg(&port->dev, "%s - Send read URB returns: %i\n", __func__, result); + } + } +diff --git a/drivers/usb/serial/mos7720.c b/drivers/usb/serial/mos7720.c +index 0f16bf6ea71c..ddc71d706ac6 100644 +--- a/drivers/usb/serial/mos7720.c ++++ b/drivers/usb/serial/mos7720.c +@@ -1250,7 +1250,7 @@ static int mos7720_write(struct tty_struct *tty, struct usb_serial_port *port, + + if (urb->transfer_buffer == NULL) { + urb->transfer_buffer = kmalloc(URB_TRANSFER_BUFFER_SIZE, +- GFP_KERNEL); ++ GFP_ATOMIC); + if (urb->transfer_buffer == NULL) { + dev_err_console(port, "%s no more kernel memory...\n", + __func__); +diff --git a/drivers/usb/serial/mos7840.c b/drivers/usb/serial/mos7840.c +index d06013033def..7df7df62e177 100644 +--- a/drivers/usb/serial/mos7840.c ++++ b/drivers/usb/serial/mos7840.c +@@ -1438,8 +1438,8 @@ static int mos7840_write(struct tty_struct *tty, struct usb_serial_port *port, + } + + if (urb->transfer_buffer == NULL) { +- urb->transfer_buffer = +- kmalloc(URB_TRANSFER_BUFFER_SIZE, GFP_KERNEL); ++ urb->transfer_buffer = kmalloc(URB_TRANSFER_BUFFER_SIZE, ++ GFP_ATOMIC); + + if (urb->transfer_buffer == NULL) { + dev_err_console(port, "%s no more kernel memory...\n", +diff --git a/drivers/usb/serial/usb-serial.c b/drivers/usb/serial/usb-serial.c +index 80d689f0fda9..faeb36d6958d 100644 +--- a/drivers/usb/serial/usb-serial.c ++++ b/drivers/usb/serial/usb-serial.c +@@ -1444,7 +1444,7 @@ int usb_serial_register_drivers(struct usb_serial_driver *const serial_drivers[] + + rc = usb_register(udriver); + if (rc) +- return rc; ++ goto failed_usb_register; + + for (sd = serial_drivers; *sd; ++sd) { + (*sd)->usb_driver = udriver; +@@ -1462,6 +1462,8 @@ int usb_serial_register_drivers(struct usb_serial_driver *const serial_drivers[] + while (sd-- > serial_drivers) + usb_serial_deregister(*sd); + usb_deregister(udriver); ++failed_usb_register: ++ kfree(udriver); + return rc; + } + EXPORT_SYMBOL_GPL(usb_serial_register_drivers); +diff --git a/drivers/usb/storage/transport.c b/drivers/usb/storage/transport.c +index b1d815eb6d0b..8988b268a69a 100644 +--- a/drivers/usb/storage/transport.c ++++ b/drivers/usb/storage/transport.c +@@ -919,10 +919,15 @@ int usb_stor_CB_transport(struct scsi_cmnd *srb, struct us_data *us) + + /* COMMAND STAGE */ + /* let's send the command via the control pipe */ ++ /* ++ * Command is sometime (f.e. after scsi_eh_prep_cmnd) on the stack. ++ * Stack may be vmallocated. So no DMA for us. Make a copy. ++ */ ++ memcpy(us->iobuf, srb->cmnd, srb->cmd_len); + result = usb_stor_ctrl_transfer(us, us->send_ctrl_pipe, + US_CBI_ADSC, + USB_TYPE_CLASS | USB_RECIP_INTERFACE, 0, +- us->ifnum, srb->cmnd, srb->cmd_len); ++ us->ifnum, us->iobuf, srb->cmd_len); + + /* check the return code for the command */ + usb_stor_dbg(us, "Call to usb_stor_ctrl_transfer() returned %d\n", +diff --git a/drivers/video/efifb.c b/drivers/video/efifb.c +index 50fe668c6172..08dbe8ae0212 100644 +--- a/drivers/video/efifb.c ++++ b/drivers/video/efifb.c +@@ -270,9 +270,9 @@ static int efifb_setcolreg(unsigned regno, unsigned red, unsigned green, + return 1; + + if (regno < 16) { +- red >>= 8; +- green >>= 8; +- blue >>= 8; ++ red >>= 16 - info->var.red.length; ++ green >>= 16 - info->var.green.length; ++ blue >>= 16 - info->var.blue.length; + ((u32 *)(info->pseudo_palette))[regno] = + (red << info->var.red.offset) | + (green << info->var.green.offset) | +diff --git a/drivers/xen/xen-pciback/conf_space.c b/drivers/xen/xen-pciback/conf_space.c +index ba3fac8318bb..47a4177b16d2 100644 +--- a/drivers/xen/xen-pciback/conf_space.c ++++ b/drivers/xen/xen-pciback/conf_space.c +@@ -16,8 +16,8 @@ + #include "conf_space.h" + #include "conf_space_quirks.h" + +-bool permissive; +-module_param(permissive, bool, 0644); ++bool xen_pcibk_permissive; ++module_param_named(permissive, xen_pcibk_permissive, bool, 0644); + + /* This is where xen_pcibk_read_config_byte, xen_pcibk_read_config_word, + * xen_pcibk_write_config_word, and xen_pcibk_write_config_byte are created. */ +@@ -260,7 +260,7 @@ int xen_pcibk_config_write(struct pci_dev *dev, int offset, int size, u32 value) + * This means that some fields may still be read-only because + * they have entries in the config_field list that intercept + * the write and do nothing. */ +- if (dev_data->permissive || permissive) { ++ if (dev_data->permissive || xen_pcibk_permissive) { + switch (size) { + case 1: + err = pci_write_config_byte(dev, offset, +diff --git a/drivers/xen/xen-pciback/conf_space.h b/drivers/xen/xen-pciback/conf_space.h +index 2e1d73d1d5d0..62461a8ba1d6 100644 +--- a/drivers/xen/xen-pciback/conf_space.h ++++ b/drivers/xen/xen-pciback/conf_space.h +@@ -64,7 +64,7 @@ struct config_field_entry { + void *data; + }; + +-extern bool permissive; ++extern bool xen_pcibk_permissive; + + #define OFFSET(cfg_entry) ((cfg_entry)->base_offset+(cfg_entry)->field->offset) + +diff --git a/drivers/xen/xen-pciback/conf_space_header.c b/drivers/xen/xen-pciback/conf_space_header.c +index a5bb81a600f7..1667a9089a4a 100644 +--- a/drivers/xen/xen-pciback/conf_space_header.c ++++ b/drivers/xen/xen-pciback/conf_space_header.c +@@ -105,7 +105,7 @@ static int command_write(struct pci_dev *dev, int offset, u16 value, void *data) + + cmd->val = value; + +- if (!permissive && (!dev_data || !dev_data->permissive)) ++ if (!xen_pcibk_permissive && (!dev_data || !dev_data->permissive)) + return 0; + + /* Only allow the guest to control certain bits. */ +diff --git a/drivers/xen/xen-pciback/pciback.h b/drivers/xen/xen-pciback/pciback.h +index f72af87640e0..560b3ecbcba8 100644 +--- a/drivers/xen/xen-pciback/pciback.h ++++ b/drivers/xen/xen-pciback/pciback.h +@@ -37,6 +37,7 @@ struct xen_pcibk_device { + struct xen_pci_sharedinfo *sh_info; + unsigned long flags; + struct work_struct op_work; ++ struct xen_pci_op op; + }; + + struct xen_pcibk_dev_data { +diff --git a/drivers/xen/xen-pciback/pciback_ops.c b/drivers/xen/xen-pciback/pciback_ops.c +index b98cf0c35725..6c17f92341f5 100644 +--- a/drivers/xen/xen-pciback/pciback_ops.c ++++ b/drivers/xen/xen-pciback/pciback_ops.c +@@ -67,6 +67,13 @@ static void xen_pcibk_control_isr(struct pci_dev *dev, int reset) + enable ? "enable" : "disable"); + + if (enable) { ++ /* ++ * The MSI or MSI-X should not have an IRQ handler. Otherwise ++ * if the guest terminates we BUG_ON in free_msi_irqs. ++ */ ++ if (dev->msi_enabled || dev->msix_enabled) ++ goto out; ++ + rc = request_irq(dev_data->irq, + xen_pcibk_guest_interrupt, IRQF_SHARED, + dev_data->irq_name, dev); +@@ -141,7 +148,12 @@ int xen_pcibk_enable_msi(struct xen_pcibk_device *pdev, + if (unlikely(verbose_request)) + printk(KERN_DEBUG DRV_NAME ": %s: enable MSI\n", pci_name(dev)); + +- status = pci_enable_msi(dev); ++ if (dev->msi_enabled) ++ status = -EALREADY; ++ else if (dev->msix_enabled) ++ status = -ENXIO; ++ else ++ status = pci_enable_msi(dev); + + if (status) { + pr_warn_ratelimited(DRV_NAME ": %s: error enabling MSI for guest %u: err %d\n", +@@ -170,20 +182,23 @@ static + int xen_pcibk_disable_msi(struct xen_pcibk_device *pdev, + struct pci_dev *dev, struct xen_pci_op *op) + { +- struct xen_pcibk_dev_data *dev_data; +- + if (unlikely(verbose_request)) + printk(KERN_DEBUG DRV_NAME ": %s: disable MSI\n", + pci_name(dev)); +- pci_disable_msi(dev); + ++ if (dev->msi_enabled) { ++ struct xen_pcibk_dev_data *dev_data; ++ ++ pci_disable_msi(dev); ++ ++ dev_data = pci_get_drvdata(dev); ++ if (dev_data) ++ dev_data->ack_intr = 1; ++ } + op->value = dev->irq ? xen_pirq_from_irq(dev->irq) : 0; + if (unlikely(verbose_request)) + printk(KERN_DEBUG DRV_NAME ": %s: MSI: %d\n", pci_name(dev), + op->value); +- dev_data = pci_get_drvdata(dev); +- if (dev_data) +- dev_data->ack_intr = 1; + return 0; + } + +@@ -194,13 +209,26 @@ int xen_pcibk_enable_msix(struct xen_pcibk_device *pdev, + struct xen_pcibk_dev_data *dev_data; + int i, result; + struct msix_entry *entries; ++ u16 cmd; + + if (unlikely(verbose_request)) + printk(KERN_DEBUG DRV_NAME ": %s: enable MSI-X\n", + pci_name(dev)); ++ + if (op->value > SH_INFO_MAX_VEC) + return -EINVAL; + ++ if (dev->msix_enabled) ++ return -EALREADY; ++ ++ /* ++ * PCI_COMMAND_MEMORY must be enabled, otherwise we may not be able ++ * to access the BARs where the MSI-X entries reside. ++ */ ++ pci_read_config_word(dev, PCI_COMMAND, &cmd); ++ if (dev->msi_enabled || !(cmd & PCI_COMMAND_MEMORY)) ++ return -ENXIO; ++ + entries = kmalloc(op->value * sizeof(*entries), GFP_KERNEL); + if (entries == NULL) + return -ENOMEM; +@@ -242,23 +270,27 @@ static + int xen_pcibk_disable_msix(struct xen_pcibk_device *pdev, + struct pci_dev *dev, struct xen_pci_op *op) + { +- struct xen_pcibk_dev_data *dev_data; + if (unlikely(verbose_request)) + printk(KERN_DEBUG DRV_NAME ": %s: disable MSI-X\n", + pci_name(dev)); +- pci_disable_msix(dev); + ++ if (dev->msix_enabled) { ++ struct xen_pcibk_dev_data *dev_data; ++ ++ pci_disable_msix(dev); ++ ++ dev_data = pci_get_drvdata(dev); ++ if (dev_data) ++ dev_data->ack_intr = 1; ++ } + /* + * SR-IOV devices (which don't have any legacy IRQ) have + * an undefined IRQ value of zero. + */ + op->value = dev->irq ? xen_pirq_from_irq(dev->irq) : 0; + if (unlikely(verbose_request)) +- printk(KERN_DEBUG DRV_NAME ": %s: MSI-X: %d\n", pci_name(dev), +- op->value); +- dev_data = pci_get_drvdata(dev); +- if (dev_data) +- dev_data->ack_intr = 1; ++ printk(KERN_DEBUG DRV_NAME ": %s: MSI-X: %d\n", ++ pci_name(dev), op->value); + return 0; + } + #endif +@@ -295,9 +327,14 @@ void xen_pcibk_do_op(struct work_struct *data) + container_of(data, struct xen_pcibk_device, op_work); + struct pci_dev *dev; + struct xen_pcibk_dev_data *dev_data = NULL; +- struct xen_pci_op *op = &pdev->sh_info->op; ++ struct xen_pci_op *op = &pdev->op; + int test_intx = 0; ++#ifdef CONFIG_PCI_MSI ++ unsigned int nr = 0; ++#endif + ++ *op = pdev->sh_info->op; ++ barrier(); + dev = xen_pcibk_get_pci_dev(pdev, op->domain, op->bus, op->devfn); + + if (dev == NULL) +@@ -323,6 +360,7 @@ void xen_pcibk_do_op(struct work_struct *data) + op->err = xen_pcibk_disable_msi(pdev, dev, op); + break; + case XEN_PCI_OP_enable_msix: ++ nr = op->value; + op->err = xen_pcibk_enable_msix(pdev, dev, op); + break; + case XEN_PCI_OP_disable_msix: +@@ -339,6 +377,17 @@ void xen_pcibk_do_op(struct work_struct *data) + if ((dev_data->enable_intx != test_intx)) + xen_pcibk_control_isr(dev, 0 /* no reset */); + } ++ pdev->sh_info->op.err = op->err; ++ pdev->sh_info->op.value = op->value; ++#ifdef CONFIG_PCI_MSI ++ if (op->cmd == XEN_PCI_OP_enable_msix && op->err == 0) { ++ unsigned int i; ++ ++ for (i = 0; i < nr; i++) ++ pdev->sh_info->op.msix_entries[i].vector = ++ op->msix_entries[i].vector; ++ } ++#endif + /* Tell the driver domain that we're done. */ + wmb(); + clear_bit(_XEN_PCIF_active, (unsigned long *)&pdev->sh_info->flags); +diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c +index dbefa6c609f4..296cc1b49446 100644 +--- a/fs/btrfs/ioctl.c ++++ b/fs/btrfs/ioctl.c +@@ -1496,6 +1496,9 @@ static noinline int btrfs_ioctl_snap_create_transid(struct file *file, + int namelen; + int ret = 0; + ++ if (!S_ISDIR(file_inode(file)->i_mode)) ++ return -ENOTDIR; ++ + ret = mnt_want_write_file(file); + if (ret) + goto out; +@@ -1553,6 +1556,9 @@ static noinline int btrfs_ioctl_snap_create(struct file *file, + struct btrfs_ioctl_vol_args *vol_args; + int ret; + ++ if (!S_ISDIR(file_inode(file)->i_mode)) ++ return -ENOTDIR; ++ + vol_args = memdup_user(arg, sizeof(*vol_args)); + if (IS_ERR(vol_args)) + return PTR_ERR(vol_args); +@@ -1576,6 +1582,9 @@ static noinline int btrfs_ioctl_snap_create_v2(struct file *file, + bool readonly = false; + struct btrfs_qgroup_inherit *inherit = NULL; + ++ if (!S_ISDIR(file_inode(file)->i_mode)) ++ return -ENOTDIR; ++ + vol_args = memdup_user(arg, sizeof(*vol_args)); + if (IS_ERR(vol_args)) + return PTR_ERR(vol_args); +@@ -2081,6 +2090,9 @@ static noinline int btrfs_ioctl_snap_destroy(struct file *file, + int ret; + int err = 0; + ++ if (!S_ISDIR(dir->i_mode)) ++ return -ENOTDIR; ++ + vol_args = memdup_user(arg, sizeof(*vol_args)); + if (IS_ERR(vol_args)) + return PTR_ERR(vol_args); +diff --git a/fs/coredump.c b/fs/coredump.c +index 4f03b2b50375..a94f94d4f1a1 100644 +--- a/fs/coredump.c ++++ b/fs/coredump.c +@@ -1,6 +1,7 @@ + #include + #include + #include ++#include + #include + #include + #include +@@ -375,7 +376,9 @@ static int coredump_wait(int exit_code, struct core_state *core_state) + if (core_waiters > 0) { + struct core_thread *ptr; + ++ freezer_do_not_count(); + wait_for_completion(&core_state->startup); ++ freezer_count(); + /* + * Wait for all the threads to become inactive, so that + * all the thread context (extended register state, like +diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h +index 046e3e93783e..f9c938e21e65 100644 +--- a/fs/ext4/ext4.h ++++ b/fs/ext4/ext4.h +@@ -246,6 +246,7 @@ struct ext4_io_submit { + #define EXT4_MAX_BLOCK_SIZE 65536 + #define EXT4_MIN_BLOCK_LOG_SIZE 10 + #define EXT4_MAX_BLOCK_LOG_SIZE 16 ++#define EXT4_MAX_CLUSTER_LOG_SIZE 30 + #ifdef __KERNEL__ + # define EXT4_BLOCK_SIZE(s) ((s)->s_blocksize) + #else +diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c +index 221b58298847..31179ba2072c 100644 +--- a/fs/ext4/inode.c ++++ b/fs/ext4/inode.c +@@ -53,25 +53,31 @@ static __u32 ext4_inode_csum(struct inode *inode, struct ext4_inode *raw, + struct ext4_inode_info *ei) + { + struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); +- __u16 csum_lo; +- __u16 csum_hi = 0; + __u32 csum; ++ __u16 dummy_csum = 0; ++ int offset = offsetof(struct ext4_inode, i_checksum_lo); ++ unsigned int csum_size = sizeof(dummy_csum); + +- csum_lo = le16_to_cpu(raw->i_checksum_lo); +- raw->i_checksum_lo = 0; +- if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE && +- EXT4_FITS_IN_INODE(raw, ei, i_checksum_hi)) { +- csum_hi = le16_to_cpu(raw->i_checksum_hi); +- raw->i_checksum_hi = 0; +- } ++ csum = ext4_chksum(sbi, ei->i_csum_seed, (__u8 *)raw, offset); ++ csum = ext4_chksum(sbi, csum, (__u8 *)&dummy_csum, csum_size); ++ offset += csum_size; ++ csum = ext4_chksum(sbi, csum, (__u8 *)raw + offset, ++ EXT4_GOOD_OLD_INODE_SIZE - offset); + +- csum = ext4_chksum(sbi, ei->i_csum_seed, (__u8 *)raw, +- EXT4_INODE_SIZE(inode->i_sb)); +- +- raw->i_checksum_lo = cpu_to_le16(csum_lo); +- if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE && +- EXT4_FITS_IN_INODE(raw, ei, i_checksum_hi)) +- raw->i_checksum_hi = cpu_to_le16(csum_hi); ++ if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) { ++ offset = offsetof(struct ext4_inode, i_checksum_hi); ++ csum = ext4_chksum(sbi, csum, (__u8 *)raw + ++ EXT4_GOOD_OLD_INODE_SIZE, ++ offset - EXT4_GOOD_OLD_INODE_SIZE); ++ if (EXT4_FITS_IN_INODE(raw, ei, i_checksum_hi)) { ++ csum = ext4_chksum(sbi, csum, (__u8 *)&dummy_csum, ++ csum_size); ++ offset += csum_size; ++ csum = ext4_chksum(sbi, csum, (__u8 *)raw + offset, ++ EXT4_INODE_SIZE(inode->i_sb) - ++ offset); ++ } ++ } + + return csum; + } +@@ -3604,7 +3610,7 @@ int ext4_can_truncate(struct inode *inode) + } + + /* +- * ext4_punch_hole: punches a hole in a file by releaseing the blocks ++ * ext4_punch_hole: punches a hole in a file by releasing the blocks + * associated with the given offset and length + * + * @inode: File inode +@@ -3640,7 +3646,7 @@ int ext4_punch_hole(struct file *file, loff_t offset, loff_t length) + * Write out all dirty pages to avoid race conditions + * Then release them. + */ +- if (mapping->nrpages && mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) { ++ if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) { + ret = filemap_write_and_wait_range(mapping, offset, + offset + length - 1); + if (ret) +@@ -4474,14 +4480,14 @@ static int ext4_do_update_inode(handle_t *handle, + * Fix up interoperability with old kernels. Otherwise, old inodes get + * re-used with the upper 16 bits of the uid/gid intact + */ +- if (!ei->i_dtime) { ++ if (ei->i_dtime && list_empty(&ei->i_orphan)) { ++ raw_inode->i_uid_high = 0; ++ raw_inode->i_gid_high = 0; ++ } else { + raw_inode->i_uid_high = + cpu_to_le16(high_16_bits(i_uid)); + raw_inode->i_gid_high = + cpu_to_le16(high_16_bits(i_gid)); +- } else { +- raw_inode->i_uid_high = 0; +- raw_inode->i_gid_high = 0; + } + } else { + raw_inode->i_uid_low = cpu_to_le16(fs_high2lowuid(i_uid)); +diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c +index 08b4495c1b12..cb9eec025ba8 100644 +--- a/fs/ext4/mballoc.c ++++ b/fs/ext4/mballoc.c +@@ -808,7 +808,7 @@ static void mb_regenerate_buddy(struct ext4_buddy *e4b) + * for this page; do not hold this lock when calling this routine! + */ + +-static int ext4_mb_init_cache(struct page *page, char *incore) ++static int ext4_mb_init_cache(struct page *page, char *incore, gfp_t gfp) + { + ext4_group_t ngroups; + int blocksize; +@@ -841,7 +841,7 @@ static int ext4_mb_init_cache(struct page *page, char *incore) + /* allocate buffer_heads to read bitmaps */ + if (groups_per_page > 1) { + i = sizeof(struct buffer_head *) * groups_per_page; +- bh = kzalloc(i, GFP_NOFS); ++ bh = kzalloc(i, gfp); + if (bh == NULL) { + err = -ENOMEM; + goto out; +@@ -966,7 +966,7 @@ out: + * are on the same page e4b->bd_buddy_page is NULL and return value is 0. + */ + static int ext4_mb_get_buddy_page_lock(struct super_block *sb, +- ext4_group_t group, struct ext4_buddy *e4b) ++ ext4_group_t group, struct ext4_buddy *e4b, gfp_t gfp) + { + struct inode *inode = EXT4_SB(sb)->s_buddy_cache; + int block, pnum, poff; +@@ -985,7 +985,7 @@ static int ext4_mb_get_buddy_page_lock(struct super_block *sb, + block = group * 2; + pnum = block / blocks_per_page; + poff = block % blocks_per_page; +- page = find_or_create_page(inode->i_mapping, pnum, GFP_NOFS); ++ page = find_or_create_page(inode->i_mapping, pnum, gfp); + if (!page) + return -EIO; + BUG_ON(page->mapping != inode->i_mapping); +@@ -999,7 +999,7 @@ static int ext4_mb_get_buddy_page_lock(struct super_block *sb, + + block++; + pnum = block / blocks_per_page; +- page = find_or_create_page(inode->i_mapping, pnum, GFP_NOFS); ++ page = find_or_create_page(inode->i_mapping, pnum, gfp); + if (!page) + return -EIO; + BUG_ON(page->mapping != inode->i_mapping); +@@ -1025,7 +1025,7 @@ static void ext4_mb_put_buddy_page_lock(struct ext4_buddy *e4b) + * calling this routine! + */ + static noinline_for_stack +-int ext4_mb_init_group(struct super_block *sb, ext4_group_t group) ++int ext4_mb_init_group(struct super_block *sb, ext4_group_t group, gfp_t gfp) + { + + struct ext4_group_info *this_grp; +@@ -1043,7 +1043,7 @@ int ext4_mb_init_group(struct super_block *sb, ext4_group_t group) + * have taken a reference using ext4_mb_load_buddy and that + * would have pinned buddy page to page cache. + */ +- ret = ext4_mb_get_buddy_page_lock(sb, group, &e4b); ++ ret = ext4_mb_get_buddy_page_lock(sb, group, &e4b, gfp); + if (ret || !EXT4_MB_GRP_NEED_INIT(this_grp)) { + /* + * somebody initialized the group +@@ -1053,7 +1053,7 @@ int ext4_mb_init_group(struct super_block *sb, ext4_group_t group) + } + + page = e4b.bd_bitmap_page; +- ret = ext4_mb_init_cache(page, NULL); ++ ret = ext4_mb_init_cache(page, NULL, gfp); + if (ret) + goto err; + if (!PageUptodate(page)) { +@@ -1073,7 +1073,7 @@ int ext4_mb_init_group(struct super_block *sb, ext4_group_t group) + } + /* init buddy cache */ + page = e4b.bd_buddy_page; +- ret = ext4_mb_init_cache(page, e4b.bd_bitmap); ++ ret = ext4_mb_init_cache(page, e4b.bd_bitmap, gfp); + if (ret) + goto err; + if (!PageUptodate(page)) { +@@ -1092,8 +1092,8 @@ err: + * calling this routine! + */ + static noinline_for_stack int +-ext4_mb_load_buddy(struct super_block *sb, ext4_group_t group, +- struct ext4_buddy *e4b) ++ext4_mb_load_buddy_gfp(struct super_block *sb, ext4_group_t group, ++ struct ext4_buddy *e4b, gfp_t gfp) + { + int blocks_per_page; + int block; +@@ -1123,7 +1123,7 @@ ext4_mb_load_buddy(struct super_block *sb, ext4_group_t group, + * we need full data about the group + * to make a good selection + */ +- ret = ext4_mb_init_group(sb, group); ++ ret = ext4_mb_init_group(sb, group, gfp); + if (ret) + return ret; + } +@@ -1151,11 +1151,11 @@ ext4_mb_load_buddy(struct super_block *sb, ext4_group_t group, + * wait for it to initialize. + */ + page_cache_release(page); +- page = find_or_create_page(inode->i_mapping, pnum, GFP_NOFS); ++ page = find_or_create_page(inode->i_mapping, pnum, gfp); + if (page) { + BUG_ON(page->mapping != inode->i_mapping); + if (!PageUptodate(page)) { +- ret = ext4_mb_init_cache(page, NULL); ++ ret = ext4_mb_init_cache(page, NULL, gfp); + if (ret) { + unlock_page(page); + goto err; +@@ -1182,11 +1182,12 @@ ext4_mb_load_buddy(struct super_block *sb, ext4_group_t group, + if (page == NULL || !PageUptodate(page)) { + if (page) + page_cache_release(page); +- page = find_or_create_page(inode->i_mapping, pnum, GFP_NOFS); ++ page = find_or_create_page(inode->i_mapping, pnum, gfp); + if (page) { + BUG_ON(page->mapping != inode->i_mapping); + if (!PageUptodate(page)) { +- ret = ext4_mb_init_cache(page, e4b->bd_bitmap); ++ ret = ext4_mb_init_cache(page, e4b->bd_bitmap, ++ gfp); + if (ret) { + unlock_page(page); + goto err; +@@ -1220,6 +1221,12 @@ err: + return ret; + } + ++static int ext4_mb_load_buddy(struct super_block *sb, ext4_group_t group, ++ struct ext4_buddy *e4b) ++{ ++ return ext4_mb_load_buddy_gfp(sb, group, e4b, GFP_NOFS); ++} ++ + static void ext4_mb_unload_buddy(struct ext4_buddy *e4b) + { + if (e4b->bd_bitmap_page) +@@ -1993,7 +2000,7 @@ static int ext4_mb_good_group(struct ext4_allocation_context *ac, + + /* We only do this if the grp has never been initialized */ + if (unlikely(EXT4_MB_GRP_NEED_INIT(grp))) { +- int ret = ext4_mb_init_group(ac->ac_sb, group); ++ int ret = ext4_mb_init_group(ac->ac_sb, group, GFP_NOFS); + if (ret) + return 0; + } +@@ -4748,7 +4755,9 @@ do_more: + #endif + trace_ext4_mballoc_free(sb, inode, block_group, bit, count_clusters); + +- err = ext4_mb_load_buddy(sb, block_group, &e4b); ++ /* __GFP_NOFAIL: retry infinitely, ignore TIF_MEMDIE and memcg limit. */ ++ err = ext4_mb_load_buddy_gfp(sb, block_group, &e4b, ++ GFP_NOFS|__GFP_NOFAIL); + if (err) + goto error_return; + +@@ -5159,7 +5168,7 @@ int ext4_trim_fs(struct super_block *sb, struct fstrim_range *range) + grp = ext4_get_group_info(sb, group); + /* We only do this if the grp has never been initialized */ + if (unlikely(EXT4_MB_GRP_NEED_INIT(grp))) { +- ret = ext4_mb_init_group(sb, group); ++ ret = ext4_mb_init_group(sb, group, GFP_NOFS); + if (ret) + break; + } +diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c +index facf8590b714..407bcf79aa31 100644 +--- a/fs/ext4/namei.c ++++ b/fs/ext4/namei.c +@@ -417,15 +417,14 @@ static __le32 ext4_dx_csum(struct inode *inode, struct ext4_dir_entry *dirent, + struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); + struct ext4_inode_info *ei = EXT4_I(inode); + __u32 csum; +- __le32 save_csum; + int size; ++ __u32 dummy_csum = 0; ++ int offset = offsetof(struct dx_tail, dt_checksum); + + size = count_offset + (count * sizeof(struct dx_entry)); +- save_csum = t->dt_checksum; +- t->dt_checksum = 0; + csum = ext4_chksum(sbi, ei->i_csum_seed, (__u8 *)dirent, size); +- csum = ext4_chksum(sbi, csum, (__u8 *)t, sizeof(struct dx_tail)); +- t->dt_checksum = save_csum; ++ csum = ext4_chksum(sbi, csum, (__u8 *)t, offset); ++ csum = ext4_chksum(sbi, csum, (__u8 *)&dummy_csum, sizeof(dummy_csum)); + + return cpu_to_le32(csum); + } +diff --git a/fs/ext4/super.c b/fs/ext4/super.c +index 15a81897df4e..faa192087033 100644 +--- a/fs/ext4/super.c ++++ b/fs/ext4/super.c +@@ -1936,23 +1936,25 @@ failed: + static __le16 ext4_group_desc_csum(struct ext4_sb_info *sbi, __u32 block_group, + struct ext4_group_desc *gdp) + { +- int offset; ++ int offset = offsetof(struct ext4_group_desc, bg_checksum); + __u16 crc = 0; + __le32 le_group = cpu_to_le32(block_group); + + if ((sbi->s_es->s_feature_ro_compat & + cpu_to_le32(EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))) { + /* Use new metadata_csum algorithm */ +- __le16 save_csum; + __u32 csum32; ++ __u16 dummy_csum = 0; + +- save_csum = gdp->bg_checksum; +- gdp->bg_checksum = 0; + csum32 = ext4_chksum(sbi, sbi->s_csum_seed, (__u8 *)&le_group, + sizeof(le_group)); +- csum32 = ext4_chksum(sbi, csum32, (__u8 *)gdp, +- sbi->s_desc_size); +- gdp->bg_checksum = save_csum; ++ csum32 = ext4_chksum(sbi, csum32, (__u8 *)gdp, offset); ++ csum32 = ext4_chksum(sbi, csum32, (__u8 *)&dummy_csum, ++ sizeof(dummy_csum)); ++ offset += sizeof(dummy_csum); ++ if (offset < sbi->s_desc_size) ++ csum32 = ext4_chksum(sbi, csum32, (__u8 *)gdp + offset, ++ sbi->s_desc_size - offset); + + crc = csum32 & 0xFFFF; + goto out; +@@ -1963,8 +1965,6 @@ static __le16 ext4_group_desc_csum(struct ext4_sb_info *sbi, __u32 block_group, + cpu_to_le32(EXT4_FEATURE_RO_COMPAT_GDT_CSUM))) + return 0; + +- offset = offsetof(struct ext4_group_desc, bg_checksum); +- + crc = crc16(~0, sbi->s_es->s_uuid, sizeof(sbi->s_es->s_uuid)); + crc = crc16(crc, (__u8 *)&le_group, sizeof(le_group)); + crc = crc16(crc, (__u8 *)gdp, offset); +@@ -2002,6 +2002,7 @@ void ext4_group_desc_csum_set(struct super_block *sb, __u32 block_group, + + /* Called at mount-time, super-block is locked */ + static int ext4_check_descriptors(struct super_block *sb, ++ ext4_fsblk_t sb_block, + ext4_group_t *first_not_zeroed) + { + struct ext4_sb_info *sbi = EXT4_SB(sb); +@@ -2032,6 +2033,11 @@ static int ext4_check_descriptors(struct super_block *sb, + grp = i; + + block_bitmap = ext4_block_bitmap(sb, gdp); ++ if (block_bitmap == sb_block) { ++ ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: " ++ "Block bitmap for group %u overlaps " ++ "superblock", i); ++ } + if (block_bitmap < first_block || block_bitmap > last_block) { + ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: " + "Block bitmap for group %u not in group " +@@ -2039,6 +2045,11 @@ static int ext4_check_descriptors(struct super_block *sb, + return 0; + } + inode_bitmap = ext4_inode_bitmap(sb, gdp); ++ if (inode_bitmap == sb_block) { ++ ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: " ++ "Inode bitmap for group %u overlaps " ++ "superblock", i); ++ } + if (inode_bitmap < first_block || inode_bitmap > last_block) { + ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: " + "Inode bitmap for group %u not in group " +@@ -2046,6 +2057,11 @@ static int ext4_check_descriptors(struct super_block *sb, + return 0; + } + inode_table = ext4_inode_table(sb, gdp); ++ if (inode_table == sb_block) { ++ ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: " ++ "Inode table for group %u overlaps " ++ "superblock", i); ++ } + if (inode_table < first_block || + inode_table + sbi->s_itb_per_group - 1 > last_block) { + ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: " +@@ -3521,7 +3537,15 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent) + if (blocksize < EXT4_MIN_BLOCK_SIZE || + blocksize > EXT4_MAX_BLOCK_SIZE) { + ext4_msg(sb, KERN_ERR, +- "Unsupported filesystem blocksize %d", blocksize); ++ "Unsupported filesystem blocksize %d (%d log_block_size)", ++ blocksize, le32_to_cpu(es->s_log_block_size)); ++ goto failed_mount; ++ } ++ if (le32_to_cpu(es->s_log_block_size) > ++ (EXT4_MAX_BLOCK_LOG_SIZE - EXT4_MIN_BLOCK_LOG_SIZE)) { ++ ext4_msg(sb, KERN_ERR, ++ "Invalid log block size: %u", ++ le32_to_cpu(es->s_log_block_size)); + goto failed_mount; + } + +@@ -3636,6 +3660,13 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent) + "block size (%d)", clustersize, blocksize); + goto failed_mount; + } ++ if (le32_to_cpu(es->s_log_cluster_size) > ++ (EXT4_MAX_CLUSTER_LOG_SIZE - EXT4_MIN_BLOCK_LOG_SIZE)) { ++ ext4_msg(sb, KERN_ERR, ++ "Invalid log cluster size: %u", ++ le32_to_cpu(es->s_log_cluster_size)); ++ goto failed_mount; ++ } + sbi->s_cluster_bits = le32_to_cpu(es->s_log_cluster_size) - + le32_to_cpu(es->s_log_block_size); + sbi->s_clusters_per_group = +@@ -3766,7 +3797,7 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent) + goto failed_mount2; + } + } +- if (!ext4_check_descriptors(sb, &first_not_zeroed)) { ++ if (!ext4_check_descriptors(sb, logical_sb_block, &first_not_zeroed)) { + ext4_msg(sb, KERN_ERR, "group descriptors corrupted!"); + goto failed_mount2; + } +diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c +index a20816e7eb3a..92850bab4513 100644 +--- a/fs/ext4/xattr.c ++++ b/fs/ext4/xattr.c +@@ -123,17 +123,18 @@ static __le32 ext4_xattr_block_csum(struct inode *inode, + { + struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); + __u32 csum; +- __le32 save_csum; + __le64 dsk_block_nr = cpu_to_le64(block_nr); ++ __u32 dummy_csum = 0; ++ int offset = offsetof(struct ext4_xattr_header, h_checksum); + +- save_csum = hdr->h_checksum; +- hdr->h_checksum = 0; + csum = ext4_chksum(sbi, sbi->s_csum_seed, (__u8 *)&dsk_block_nr, + sizeof(dsk_block_nr)); +- csum = ext4_chksum(sbi, csum, (__u8 *)hdr, +- EXT4_BLOCK_SIZE(inode->i_sb)); ++ csum = ext4_chksum(sbi, csum, (__u8 *)hdr, offset); ++ csum = ext4_chksum(sbi, csum, (__u8 *)&dummy_csum, sizeof(dummy_csum)); ++ offset += sizeof(dummy_csum); ++ csum = ext4_chksum(sbi, csum, (__u8 *)hdr + offset, ++ EXT4_BLOCK_SIZE(inode->i_sb) - offset); + +- hdr->h_checksum = save_csum; + return cpu_to_le32(csum); + } + +diff --git a/fs/hostfs/hostfs_kern.c b/fs/hostfs/hostfs_kern.c +index b58a9cbb9695..f0faa87e23d3 100644 +--- a/fs/hostfs/hostfs_kern.c ++++ b/fs/hostfs/hostfs_kern.c +@@ -942,10 +942,11 @@ static int hostfs_fill_sb_common(struct super_block *sb, void *d, int silent) + + if (S_ISLNK(root_inode->i_mode)) { + char *name = follow_link(host_root_path); +- if (IS_ERR(name)) ++ if (IS_ERR(name)) { + err = PTR_ERR(name); +- else +- err = read_name(root_inode, name); ++ goto out_put; ++ } ++ err = read_name(root_inode, name); + kfree(name); + if (err) + goto out_put; +diff --git a/fs/isofs/inode.c b/fs/isofs/inode.c +index 10489bbd40fc..955fabf46a72 100644 +--- a/fs/isofs/inode.c ++++ b/fs/isofs/inode.c +@@ -726,6 +726,11 @@ static int isofs_fill_super(struct super_block *s, void *data, int silent) + pri_bh = NULL; + + root_found: ++ /* We don't support read-write mounts */ ++ if (!(s->s_flags & MS_RDONLY)) { ++ error = -EACCES; ++ goto out_freebh; ++ } + + if (joliet_level && (pri == NULL || !opt.rock)) { + /* This is the case of Joliet with the norock mount flag. +@@ -1538,9 +1543,6 @@ struct inode *__isofs_iget(struct super_block *sb, + static struct dentry *isofs_mount(struct file_system_type *fs_type, + int flags, const char *dev_name, void *data) + { +- /* We don't support read-write mounts */ +- if (!(flags & MS_RDONLY)) +- return ERR_PTR(-EACCES); + return mount_bdev(fs_type, flags, dev_name, data, isofs_fill_super); + } + +diff --git a/fs/nfs/callback.c b/fs/nfs/callback.c +index e05c96ebb27d..57d3b5ef22a0 100644 +--- a/fs/nfs/callback.c ++++ b/fs/nfs/callback.c +@@ -302,6 +302,7 @@ static int nfs_callback_up_net(int minorversion, struct svc_serv *serv, struct n + err_socks: + svc_rpcb_cleanup(serv, net); + err_bind: ++ nn->cb_users[minorversion]--; + dprintk("NFS: Couldn't create callback socket: err = %d; " + "net = %p\n", ret, net); + return ret; +diff --git a/fs/nfs/callback_xdr.c b/fs/nfs/callback_xdr.c +index e98ecf8d2588..7f7a89a67c6d 100644 +--- a/fs/nfs/callback_xdr.c ++++ b/fs/nfs/callback_xdr.c +@@ -884,7 +884,7 @@ static __be32 nfs4_callback_compound(struct svc_rqst *rqstp, void *argp, void *r + if (hdr_arg.minorversion == 0) { + cps.clp = nfs4_find_client_ident(SVC_NET(rqstp), hdr_arg.cb_ident); + if (!cps.clp || !check_gss_callback_principal(cps.clp, rqstp)) +- return rpc_drop_reply; ++ goto out_invalidcred; + } + + hdr_res.taglen = hdr_arg.taglen; +@@ -911,6 +911,10 @@ static __be32 nfs4_callback_compound(struct svc_rqst *rqstp, void *argp, void *r + nfs_put_client(cps.clp); + dprintk("%s: done, status = %u\n", __func__, ntohl(status)); + return rpc_success; ++ ++out_invalidcred: ++ pr_warn_ratelimited("NFS: NFSv4 callback contains invalid cred\n"); ++ return rpc_autherr_badcred; + } + + /* +diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c +index 2bdaf57c82d0..7d45b38aeb08 100644 +--- a/fs/nfs/nfs4state.c ++++ b/fs/nfs/nfs4state.c +@@ -1464,6 +1464,9 @@ restart: + "Zeroing state\n", __func__, status); + case -ENOENT: + case -ENOMEM: ++ case -EACCES: ++ case -EROFS: ++ case -EIO: + case -ESTALE: + /* + * Open state on this file cannot be recovered +diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c +index 4a58afa99654..b0878e1921be 100644 +--- a/fs/nfsd/nfs4state.c ++++ b/fs/nfsd/nfs4state.c +@@ -2193,7 +2193,8 @@ out: + if (!list_empty(&clp->cl_revoked)) + seq->status_flags |= SEQ4_STATUS_RECALLABLE_STATE_REVOKED; + out_no_session: +- kfree(conn); ++ if (conn) ++ free_conn(conn); + spin_unlock(&nn->client_lock); + return status; + out_put_session: +diff --git a/fs/ocfs2/dlm/dlmconvert.c b/fs/ocfs2/dlm/dlmconvert.c +index f65bdcf61526..6d97883e2652 100644 +--- a/fs/ocfs2/dlm/dlmconvert.c ++++ b/fs/ocfs2/dlm/dlmconvert.c +@@ -265,7 +265,6 @@ enum dlm_status dlmconvert_remote(struct dlm_ctxt *dlm, + struct dlm_lock *lock, int flags, int type) + { + enum dlm_status status; +- u8 old_owner = res->owner; + + mlog(0, "type=%d, convert_type=%d, busy=%d\n", lock->ml.type, + lock->ml.convert_type, res->state & DLM_LOCK_RES_IN_PROGRESS); +@@ -332,7 +331,6 @@ enum dlm_status dlmconvert_remote(struct dlm_ctxt *dlm, + + spin_lock(&res->spinlock); + res->state &= ~DLM_LOCK_RES_IN_PROGRESS; +- lock->convert_pending = 0; + /* if it failed, move it back to granted queue. + * if master returns DLM_NORMAL and then down before sending ast, + * it may have already been moved to granted queue, reset to +@@ -341,12 +339,14 @@ enum dlm_status dlmconvert_remote(struct dlm_ctxt *dlm, + if (status != DLM_NOTQUEUED) + dlm_error(status); + dlm_revert_pending_convert(res, lock); +- } else if ((res->state & DLM_LOCK_RES_RECOVERING) || +- (old_owner != res->owner)) { +- mlog(0, "res %.*s is in recovering or has been recovered.\n", +- res->lockname.len, res->lockname.name); ++ } else if (!lock->convert_pending) { ++ mlog(0, "%s: res %.*s, owner died and lock has been moved back " ++ "to granted list, retry convert.\n", ++ dlm->name, res->lockname.len, res->lockname.name); + status = DLM_RECOVERING; + } ++ ++ lock->convert_pending = 0; + bail: + spin_unlock(&res->spinlock); + +diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c +index d0e8c0b1767f..496af7fd87d5 100644 +--- a/fs/ocfs2/file.c ++++ b/fs/ocfs2/file.c +@@ -1499,7 +1499,8 @@ static int ocfs2_zero_partial_clusters(struct inode *inode, + u64 start, u64 len) + { + int ret = 0; +- u64 tmpend, end = start + len; ++ u64 tmpend = 0; ++ u64 end = start + len; + struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); + unsigned int csize = osb->s_clustersize; + handle_t *handle; +@@ -1531,18 +1532,31 @@ static int ocfs2_zero_partial_clusters(struct inode *inode, + } + + /* +- * We want to get the byte offset of the end of the 1st cluster. ++ * If start is on a cluster boundary and end is somewhere in another ++ * cluster, we have not COWed the cluster starting at start, unless ++ * end is also within the same cluster. So, in this case, we skip this ++ * first call to ocfs2_zero_range_for_truncate() truncate and move on ++ * to the next one. + */ +- tmpend = (u64)osb->s_clustersize + (start & ~(osb->s_clustersize - 1)); +- if (tmpend > end) +- tmpend = end; ++ if ((start & (csize - 1)) != 0) { ++ /* ++ * We want to get the byte offset of the end of the 1st ++ * cluster. ++ */ ++ tmpend = (u64)osb->s_clustersize + ++ (start & ~(osb->s_clustersize - 1)); ++ if (tmpend > end) ++ tmpend = end; + +- trace_ocfs2_zero_partial_clusters_range1((unsigned long long)start, +- (unsigned long long)tmpend); ++ trace_ocfs2_zero_partial_clusters_range1( ++ (unsigned long long)start, ++ (unsigned long long)tmpend); + +- ret = ocfs2_zero_range_for_truncate(inode, handle, start, tmpend); +- if (ret) +- mlog_errno(ret); ++ ret = ocfs2_zero_range_for_truncate(inode, handle, start, ++ tmpend); ++ if (ret) ++ mlog_errno(ret); ++ } + + if (tmpend < end) { + /* +diff --git a/fs/pstore/ram_core.c b/fs/pstore/ram_core.c +index bda61a759b68..7df456db7c33 100644 +--- a/fs/pstore/ram_core.c ++++ b/fs/pstore/ram_core.c +@@ -45,43 +45,10 @@ static inline size_t buffer_start(struct persistent_ram_zone *prz) + return atomic_read(&prz->buffer->start); + } + +-/* increase and wrap the start pointer, returning the old value */ +-static size_t buffer_start_add_atomic(struct persistent_ram_zone *prz, size_t a) +-{ +- int old; +- int new; +- +- do { +- old = atomic_read(&prz->buffer->start); +- new = old + a; +- while (unlikely(new > prz->buffer_size)) +- new -= prz->buffer_size; +- } while (atomic_cmpxchg(&prz->buffer->start, old, new) != old); +- +- return old; +-} +- +-/* increase the size counter until it hits the max size */ +-static void buffer_size_add_atomic(struct persistent_ram_zone *prz, size_t a) +-{ +- size_t old; +- size_t new; +- +- if (atomic_read(&prz->buffer->size) == prz->buffer_size) +- return; +- +- do { +- old = atomic_read(&prz->buffer->size); +- new = old + a; +- if (new > prz->buffer_size) +- new = prz->buffer_size; +- } while (atomic_cmpxchg(&prz->buffer->size, old, new) != old); +-} +- + static DEFINE_RAW_SPINLOCK(buffer_lock); + + /* increase and wrap the start pointer, returning the old value */ +-static size_t buffer_start_add_locked(struct persistent_ram_zone *prz, size_t a) ++static size_t buffer_start_add(struct persistent_ram_zone *prz, size_t a) + { + int old; + int new; +@@ -91,7 +58,7 @@ static size_t buffer_start_add_locked(struct persistent_ram_zone *prz, size_t a) + + old = atomic_read(&prz->buffer->start); + new = old + a; +- while (unlikely(new > prz->buffer_size)) ++ while (unlikely(new >= prz->buffer_size)) + new -= prz->buffer_size; + atomic_set(&prz->buffer->start, new); + +@@ -101,7 +68,7 @@ static size_t buffer_start_add_locked(struct persistent_ram_zone *prz, size_t a) + } + + /* increase the size counter until it hits the max size */ +-static void buffer_size_add_locked(struct persistent_ram_zone *prz, size_t a) ++static void buffer_size_add(struct persistent_ram_zone *prz, size_t a) + { + size_t old; + size_t new; +@@ -122,9 +89,6 @@ exit: + raw_spin_unlock_irqrestore(&buffer_lock, flags); + } + +-static size_t (*buffer_start_add)(struct persistent_ram_zone *, size_t) = buffer_start_add_atomic; +-static void (*buffer_size_add)(struct persistent_ram_zone *, size_t) = buffer_size_add_atomic; +- + static void notrace persistent_ram_encode_rs8(struct persistent_ram_zone *prz, + uint8_t *data, size_t len, uint8_t *ecc) + { +@@ -299,7 +263,7 @@ static void notrace persistent_ram_update(struct persistent_ram_zone *prz, + const void *s, unsigned int start, unsigned int count) + { + struct persistent_ram_buffer *buffer = prz->buffer; +- memcpy(buffer->data + start, s, count); ++ memcpy_toio(buffer->data + start, s, count); + persistent_ram_update_ecc(prz, start, count); + } + +@@ -322,8 +286,8 @@ void persistent_ram_save_old(struct persistent_ram_zone *prz) + } + + prz->old_log_size = size; +- memcpy(prz->old_log, &buffer->data[start], size - start); +- memcpy(prz->old_log + size - start, &buffer->data[0], start); ++ memcpy_fromio(prz->old_log, &buffer->data[start], size - start); ++ memcpy_fromio(prz->old_log + size - start, &buffer->data[0], start); + } + + int notrace persistent_ram_write(struct persistent_ram_zone *prz, +@@ -426,9 +390,6 @@ static void *persistent_ram_iomap(phys_addr_t start, size_t size, + return NULL; + } + +- buffer_start_add = buffer_start_add_locked; +- buffer_size_add = buffer_size_add_locked; +- + if (memtype) + va = ioremap(start, size); + else +diff --git a/fs/reiserfs/ibalance.c b/fs/reiserfs/ibalance.c +index e1978fd895f5..58cce0c606f1 100644 +--- a/fs/reiserfs/ibalance.c ++++ b/fs/reiserfs/ibalance.c +@@ -1082,8 +1082,9 @@ int balance_internal(struct tree_balance *tb, /* tree_balance structure + insert_ptr); + } + +- memcpy(new_insert_key_addr, &new_insert_key, KEY_SIZE); + insert_ptr[0] = new_insert_ptr; ++ if (new_insert_ptr) ++ memcpy(new_insert_key_addr, &new_insert_key, KEY_SIZE); + + return order; + } +diff --git a/fs/reiserfs/super.c b/fs/reiserfs/super.c +index e2e202a07b31..7ff27fa3a453 100644 +--- a/fs/reiserfs/super.c ++++ b/fs/reiserfs/super.c +@@ -184,7 +184,15 @@ static int remove_save_link_only(struct super_block *s, + static int reiserfs_quota_on_mount(struct super_block *, int); + #endif + +-/* look for uncompleted unlinks and truncates and complete them */ ++/* ++ * Look for uncompleted unlinks and truncates and complete them ++ * ++ * Called with superblock write locked. If quotas are enabled, we have to ++ * release/retake lest we call dquot_quota_on_mount(), proceed to ++ * schedule_on_each_cpu() in invalidate_bdev() and deadlock waiting for the per ++ * cpu worklets to complete flush_async_commits() that in turn wait for the ++ * superblock write lock. ++ */ + static int finish_unfinished(struct super_block *s) + { + INITIALIZE_PATH(path); +@@ -231,7 +239,9 @@ static int finish_unfinished(struct super_block *s) + quota_enabled[i] = 0; + continue; + } ++ reiserfs_write_unlock(s); + ret = reiserfs_quota_on_mount(s, i); ++ reiserfs_write_lock(s); + if (ret < 0) + reiserfs_warning(s, "reiserfs-2500", + "cannot turn on journaled " +diff --git a/fs/seq_file.c b/fs/seq_file.c +index 3dd44db1465e..c009e605c7c9 100644 +--- a/fs/seq_file.c ++++ b/fs/seq_file.c +@@ -206,8 +206,10 @@ ssize_t seq_read(struct file *file, char __user *buf, size_t size, loff_t *ppos) + size -= n; + buf += n; + copied += n; +- if (!m->count) ++ if (!m->count) { ++ m->from = 0; + m->index++; ++ } + if (!size) + goto Done; + } +diff --git a/fs/super.c b/fs/super.c +index 97280e76179c..fd3281d1ec45 100644 +--- a/fs/super.c ++++ b/fs/super.c +@@ -1327,8 +1327,8 @@ int freeze_super(struct super_block *sb) + } + } + /* +- * This is just for debugging purposes so that fs can warn if it +- * sees write activity when frozen is set to SB_FREEZE_COMPLETE. ++ * For debugging purposes so that fs can warn if it sees write activity ++ * when frozen is set to SB_FREEZE_COMPLETE, and for thaw_super(). + */ + sb->s_writers.frozen = SB_FREEZE_COMPLETE; + up_write(&sb->s_umount); +@@ -1347,7 +1347,7 @@ int thaw_super(struct super_block *sb) + int error; + + down_write(&sb->s_umount); +- if (sb->s_writers.frozen == SB_UNFROZEN) { ++ if (sb->s_writers.frozen != SB_FREEZE_COMPLETE) { + up_write(&sb->s_umount); + return -EINVAL; + } +diff --git a/fs/ubifs/dir.c b/fs/ubifs/dir.c +index 605af512aec2..db364d4d0d18 100644 +--- a/fs/ubifs/dir.c ++++ b/fs/ubifs/dir.c +@@ -348,7 +348,8 @@ static unsigned int vfs_dent_type(uint8_t type) + */ + static int ubifs_readdir(struct file *file, void *dirent, filldir_t filldir) + { +- int err, over = 0; ++ int err = 0; ++ int over = 0; + loff_t pos = file->f_pos; + struct qstr nm; + union ubifs_key key; +@@ -467,16 +468,23 @@ static int ubifs_readdir(struct file *file, void *dirent, filldir_t filldir) + } + + out: +- if (err != -ENOENT) { +- ubifs_err("cannot find next direntry, error %d", err); +- return err; +- } +- + kfree(file->private_data); + file->private_data = NULL; ++ ++ if (err != -ENOENT) ++ ubifs_err("cannot find next direntry, error %d", err); ++ else ++ /* ++ * -ENOENT is a non-fatal error in this context, the TNC uses ++ * it to indicate that the cursor moved past the current directory ++ * and readdir() has to stop. ++ */ ++ err = 0; ++ ++ + /* 2 is a special value indicating that there are no more direntries */ + file->f_pos = 2; +- return 0; ++ return err; + } + + static loff_t ubifs_dir_llseek(struct file *file, loff_t offset, int whence) +diff --git a/fs/ubifs/tnc_commit.c b/fs/ubifs/tnc_commit.c +index 52a6559275c4..3f620c0ba0a6 100644 +--- a/fs/ubifs/tnc_commit.c ++++ b/fs/ubifs/tnc_commit.c +@@ -370,7 +370,7 @@ static int layout_in_gaps(struct ubifs_info *c, int cnt) + + p = c->gap_lebs; + do { +- ubifs_assert(p < c->gap_lebs + sizeof(int) * c->lst.idx_lebs); ++ ubifs_assert(p < c->gap_lebs + c->lst.idx_lebs); + written = layout_leb_in_gaps(c, p); + if (written < 0) { + err = written; +diff --git a/fs/ubifs/xattr.c b/fs/ubifs/xattr.c +index 0f7139bdb2c2..69a42f36b421 100644 +--- a/fs/ubifs/xattr.c ++++ b/fs/ubifs/xattr.c +@@ -167,6 +167,7 @@ out_cancel: + host_ui->xattr_cnt -= 1; + host_ui->xattr_size -= CALC_DENT_SIZE(nm->len); + host_ui->xattr_size -= CALC_XATTR_BYTES(size); ++ host_ui->xattr_names -= nm->len; + mutex_unlock(&host_ui->ui_mutex); + out_free: + make_bad_inode(inode); +@@ -514,6 +515,7 @@ out_cancel: + host_ui->xattr_cnt += 1; + host_ui->xattr_size += CALC_DENT_SIZE(nm->len); + host_ui->xattr_size += CALC_XATTR_BYTES(ui->data_len); ++ host_ui->xattr_names += nm->len; + mutex_unlock(&host_ui->ui_mutex); + ubifs_release_budget(c, &req); + make_bad_inode(inode); +diff --git a/fs/xfs/xfs_dquot.c b/fs/xfs/xfs_dquot.c +index bac3e1635b7d..e59f309efbee 100644 +--- a/fs/xfs/xfs_dquot.c ++++ b/fs/xfs/xfs_dquot.c +@@ -309,8 +309,7 @@ xfs_dquot_buf_verify_crc( + if (mp->m_quotainfo) + ndquots = mp->m_quotainfo->qi_dqperchunk; + else +- ndquots = xfs_qm_calc_dquots_per_chunk(mp, +- XFS_BB_TO_FSB(mp, bp->b_length)); ++ ndquots = xfs_qm_calc_dquots_per_chunk(mp, bp->b_length); + + for (i = 0; i < ndquots; i++, d++) { + if (!xfs_verify_cksum((char *)d, sizeof(struct xfs_dqblk), +diff --git a/fs/xfs/xfs_mount.c b/fs/xfs/xfs_mount.c +index e8e310c05097..363c4cc9bfd5 100644 +--- a/fs/xfs/xfs_mount.c ++++ b/fs/xfs/xfs_mount.c +@@ -689,7 +689,8 @@ xfs_sb_verify( + * Only check the in progress field for the primary superblock as + * mkfs.xfs doesn't clear it from secondary superblocks. + */ +- return xfs_mount_validate_sb(mp, &sb, bp->b_bn == XFS_SB_DADDR, ++ return xfs_mount_validate_sb(mp, &sb, ++ bp->b_maps[0].bm_bn == XFS_SB_DADDR, + check_version); + } + +diff --git a/include/asm-generic/uaccess.h b/include/asm-generic/uaccess.h +index c184aa8ec8cd..a8203040f27a 100644 +--- a/include/asm-generic/uaccess.h ++++ b/include/asm-generic/uaccess.h +@@ -228,14 +228,18 @@ extern int __put_user_bad(void) __attribute__((noreturn)); + might_sleep(); \ + access_ok(VERIFY_READ, ptr, sizeof(*ptr)) ? \ + __get_user(x, ptr) : \ +- -EFAULT; \ ++ ((x) = (__typeof__(*(ptr)))0,-EFAULT); \ + }) + + #ifndef __get_user_fn + static inline int __get_user_fn(size_t size, const void __user *ptr, void *x) + { +- size = __copy_from_user(x, ptr, size); +- return size ? -EFAULT : size; ++ size_t n = __copy_from_user(x, ptr, size); ++ if (unlikely(n)) { ++ memset(x + (size - n), 0, n); ++ return -EFAULT; ++ } ++ return 0; + } + + #define __get_user_fn(sz, u, k) __get_user_fn(sz, u, k) +@@ -255,11 +259,13 @@ extern int __get_user_bad(void) __attribute__((noreturn)); + static inline long copy_from_user(void *to, + const void __user * from, unsigned long n) + { ++ unsigned long res = n; + might_sleep(); +- if (access_ok(VERIFY_READ, from, n)) +- return __copy_from_user(to, from, n); +- else +- return n; ++ if (likely(access_ok(VERIFY_READ, from, n))) ++ res = __copy_from_user(to, from, n); ++ if (unlikely(res)) ++ memset(to + (n - res), 0, res); ++ return res; + } + + static inline long copy_to_user(void __user *to, +diff --git a/include/crypto/hash.h b/include/crypto/hash.h +index 26cb1eb16f4c..c8c79878c082 100644 +--- a/include/crypto/hash.h ++++ b/include/crypto/hash.h +@@ -94,6 +94,7 @@ struct crypto_ahash { + unsigned int keylen); + + unsigned int reqsize; ++ bool has_setkey; + struct crypto_tfm base; + }; + +@@ -181,6 +182,11 @@ static inline void *ahash_request_ctx(struct ahash_request *req) + + int crypto_ahash_setkey(struct crypto_ahash *tfm, const u8 *key, + unsigned int keylen); ++static inline bool crypto_ahash_has_setkey(struct crypto_ahash *tfm) ++{ ++ return tfm->has_setkey; ++} ++ + int crypto_ahash_finup(struct ahash_request *req); + int crypto_ahash_final(struct ahash_request *req); + int crypto_ahash_digest(struct ahash_request *req); +diff --git a/include/crypto/if_alg.h b/include/crypto/if_alg.h +index d61c11170213..bfefd8139e18 100644 +--- a/include/crypto/if_alg.h ++++ b/include/crypto/if_alg.h +@@ -30,6 +30,9 @@ struct alg_sock { + + struct sock *parent; + ++ unsigned int refcnt; ++ unsigned int nokey_refcnt; ++ + const struct af_alg_type *type; + void *private; + }; +@@ -49,8 +52,10 @@ struct af_alg_type { + void (*release)(void *private); + int (*setkey)(void *private, const u8 *key, unsigned int keylen); + int (*accept)(void *private, struct sock *sk); ++ int (*accept_nokey)(void *private, struct sock *sk); + + struct proto_ops *ops; ++ struct proto_ops *ops_nokey; + struct module *owner; + char name[14]; + }; +@@ -64,6 +69,7 @@ int af_alg_register_type(const struct af_alg_type *type); + int af_alg_unregister_type(const struct af_alg_type *type); + + int af_alg_release(struct socket *sock); ++void af_alg_release_parent(struct sock *sk); + int af_alg_accept(struct sock *sk, struct socket *newsock); + + int af_alg_make_sg(struct af_alg_sgl *sgl, void __user *addr, int len, +@@ -80,11 +86,6 @@ static inline struct alg_sock *alg_sk(struct sock *sk) + return (struct alg_sock *)sk; + } + +-static inline void af_alg_release_parent(struct sock *sk) +-{ +- sock_put(alg_sk(sk)->parent); +-} +- + static inline void af_alg_init_completion(struct af_alg_completion *completion) + { + init_completion(&completion->completion); +diff --git a/include/linux/can/dev.h b/include/linux/can/dev.h +index fb0ab651a041..fb9fbe2f63e7 100644 +--- a/include/linux/can/dev.h ++++ b/include/linux/can/dev.h +@@ -31,6 +31,7 @@ enum can_mode { + * CAN common private data + */ + struct can_priv { ++ struct net_device *dev; + struct can_device_stats can_stats; + + struct can_bittiming bittiming; +@@ -42,7 +43,7 @@ struct can_priv { + u32 ctrlmode_supported; + + int restart_ms; +- struct timer_list restart_timer; ++ struct delayed_work restart_work; + + int (*do_set_bittiming)(struct net_device *dev); + int (*do_set_mode)(struct net_device *dev, enum can_mode mode); +diff --git a/include/linux/crypto.h b/include/linux/crypto.h +index 2b00d92a6e6f..61dd0b15d21c 100644 +--- a/include/linux/crypto.h ++++ b/include/linux/crypto.h +@@ -354,6 +354,7 @@ struct ablkcipher_tfm { + + unsigned int ivsize; + unsigned int reqsize; ++ bool has_setkey; + }; + + struct aead_tfm { +@@ -664,6 +665,13 @@ static inline int crypto_ablkcipher_setkey(struct crypto_ablkcipher *tfm, + return crt->setkey(crt->base, key, keylen); + } + ++static inline bool crypto_ablkcipher_has_setkey(struct crypto_ablkcipher *tfm) ++{ ++ struct ablkcipher_tfm *crt = crypto_ablkcipher_crt(tfm); ++ ++ return crt->has_setkey; ++} ++ + static inline struct crypto_ablkcipher *crypto_ablkcipher_reqtfm( + struct ablkcipher_request *req) + { +diff --git a/include/linux/filter.h b/include/linux/filter.h +index f65f5a69db8f..c2bea01d0466 100644 +--- a/include/linux/filter.h ++++ b/include/linux/filter.h +@@ -36,7 +36,11 @@ static inline unsigned int sk_filter_len(const struct sk_filter *fp) + return fp->len * sizeof(struct sock_filter) + sizeof(*fp); + } + +-extern int sk_filter(struct sock *sk, struct sk_buff *skb); ++int sk_filter_trim_cap(struct sock *sk, struct sk_buff *skb, unsigned int cap); ++static inline int sk_filter(struct sock *sk, struct sk_buff *skb) ++{ ++ return sk_filter_trim_cap(sk, skb, 1); ++} + extern unsigned int sk_run_filter(const struct sk_buff *skb, + const struct sock_filter *filter); + extern int sk_unattached_filter_create(struct sk_filter **pfp, +diff --git a/include/linux/i8042.h b/include/linux/i8042.h +index a986ff588944..801c307f6fcc 100644 +--- a/include/linux/i8042.h ++++ b/include/linux/i8042.h +@@ -38,7 +38,6 @@ struct serio; + void i8042_lock_chip(void); + void i8042_unlock_chip(void); + int i8042_command(unsigned char *param, int command); +-bool i8042_check_port_owner(const struct serio *); + int i8042_install_filter(bool (*filter)(unsigned char data, unsigned char str, + struct serio *serio)); + int i8042_remove_filter(bool (*filter)(unsigned char data, unsigned char str, +@@ -59,11 +58,6 @@ static inline int i8042_command(unsigned char *param, int command) + return -ENODEV; + } + +-static inline bool i8042_check_port_owner(const struct serio *serio) +-{ +- return false; +-} +- + static inline int i8042_install_filter(bool (*filter)(unsigned char data, unsigned char str, + struct serio *serio)) + { +diff --git a/include/linux/mfd/88pm80x.h b/include/linux/mfd/88pm80x.h +index e94537befabd..b55c95dd8748 100644 +--- a/include/linux/mfd/88pm80x.h ++++ b/include/linux/mfd/88pm80x.h +@@ -345,7 +345,7 @@ static inline int pm80x_dev_suspend(struct device *dev) + int irq = platform_get_irq(pdev, 0); + + if (device_may_wakeup(dev)) +- set_bit((1 << irq), &chip->wu_flag); ++ set_bit(irq, &chip->wu_flag); + + return 0; + } +@@ -357,7 +357,7 @@ static inline int pm80x_dev_resume(struct device *dev) + int irq = platform_get_irq(pdev, 0); + + if (device_may_wakeup(dev)) +- clear_bit((1 << irq), &chip->wu_flag); ++ clear_bit(irq, &chip->wu_flag); + + return 0; + } +diff --git a/include/linux/mroute.h b/include/linux/mroute.h +index 79aaa9fc1a15..d5277fc3ce2e 100644 +--- a/include/linux/mroute.h ++++ b/include/linux/mroute.h +@@ -103,5 +103,5 @@ struct mfc_cache { + struct rtmsg; + extern int ipmr_get_route(struct net *net, struct sk_buff *skb, + __be32 saddr, __be32 daddr, +- struct rtmsg *rtm, int nowait); ++ struct rtmsg *rtm, int nowait, u32 portid); + #endif +diff --git a/include/linux/mroute6.h b/include/linux/mroute6.h +index 66982e764051..f831155dc7d1 100644 +--- a/include/linux/mroute6.h ++++ b/include/linux/mroute6.h +@@ -115,7 +115,7 @@ struct mfc6_cache { + + struct rtmsg; + extern int ip6mr_get_route(struct net *net, struct sk_buff *skb, +- struct rtmsg *rtm, int nowait); ++ struct rtmsg *rtm, int nowait, u32 portid); + + #ifdef CONFIG_IPV6_MROUTE + extern struct sock *mroute6_socket(struct net *net, struct sk_buff *skb); +diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h +index 4d2e0418ab5a..45a618b58864 100644 +--- a/include/linux/netdevice.h ++++ b/include/linux/netdevice.h +@@ -2223,6 +2223,7 @@ static inline void napi_free_frags(struct napi_struct *napi) + napi->skb = NULL; + } + ++bool netdev_is_rx_handler_busy(struct net_device *dev); + extern int netdev_rx_handler_register(struct net_device *dev, + rx_handler_func_t *rx_handler, + void *rx_handler_data); +diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h +index e3dea75a078b..9497527daba3 100644 +--- a/include/linux/pagemap.h ++++ b/include/linux/pagemap.h +@@ -482,56 +482,56 @@ static inline int fault_in_pages_readable(const char __user *uaddr, int size) + */ + static inline int fault_in_multipages_writeable(char __user *uaddr, int size) + { +- int ret = 0; + char __user *end = uaddr + size - 1; + + if (unlikely(size == 0)) +- return ret; ++ return 0; + ++ if (unlikely(uaddr > end)) ++ return -EFAULT; + /* + * Writing zeroes into userspace here is OK, because we know that if + * the zero gets there, we'll be overwriting it. + */ +- while (uaddr <= end) { +- ret = __put_user(0, uaddr); +- if (ret != 0) +- return ret; ++ do { ++ if (unlikely(__put_user(0, uaddr) != 0)) ++ return -EFAULT; + uaddr += PAGE_SIZE; +- } ++ } while (uaddr <= end); + + /* Check whether the range spilled into the next page. */ + if (((unsigned long)uaddr & PAGE_MASK) == + ((unsigned long)end & PAGE_MASK)) +- ret = __put_user(0, end); ++ return __put_user(0, end); + +- return ret; ++ return 0; + } + + static inline int fault_in_multipages_readable(const char __user *uaddr, + int size) + { + volatile char c; +- int ret = 0; + const char __user *end = uaddr + size - 1; + + if (unlikely(size == 0)) +- return ret; ++ return 0; + +- while (uaddr <= end) { +- ret = __get_user(c, uaddr); +- if (ret != 0) +- return ret; ++ if (unlikely(uaddr > end)) ++ return -EFAULT; ++ ++ do { ++ if (unlikely(__get_user(c, uaddr) != 0)) ++ return -EFAULT; + uaddr += PAGE_SIZE; +- } ++ } while (uaddr <= end); + + /* Check whether the range spilled into the next page. */ + if (((unsigned long)uaddr & PAGE_MASK) == + ((unsigned long)end & PAGE_MASK)) { +- ret = __get_user(c, end); +- (void)c; ++ return __get_user(c, end); + } + +- return ret; ++ return 0; + } + + int add_to_page_cache_locked(struct page *page, struct address_space *mapping, +diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h +index 229a757e1c13..3204422317e0 100644 +--- a/include/linux/perf_event.h ++++ b/include/linux/perf_event.h +@@ -430,11 +430,6 @@ struct perf_event { + #endif /* CONFIG_PERF_EVENTS */ + }; + +-enum perf_event_context_type { +- task_context, +- cpu_context, +-}; +- + /** + * struct perf_event_context - event context structure + * +@@ -442,7 +437,6 @@ enum perf_event_context_type { + */ + struct perf_event_context { + struct pmu *pmu; +- enum perf_event_context_type type; + /* + * Protect the states of the events in the list, + * nr_active, and the list: +diff --git a/include/linux/serio.h b/include/linux/serio.h +index 36aac733840a..deffa4746e16 100644 +--- a/include/linux/serio.h ++++ b/include/linux/serio.h +@@ -28,7 +28,8 @@ struct serio { + + struct serio_device_id id; + +- spinlock_t lock; /* protects critical sections from port's interrupt handler */ ++ /* Protects critical sections from port's interrupt handler */ ++ spinlock_t lock; + + int (*write)(struct serio *, unsigned char); + int (*open)(struct serio *); +@@ -37,16 +38,29 @@ struct serio { + void (*stop)(struct serio *); + + struct serio *parent; +- struct list_head child_node; /* Entry in parent->children list */ ++ /* Entry in parent->children list */ ++ struct list_head child_node; + struct list_head children; +- unsigned int depth; /* level of nesting in serio hierarchy */ ++ /* Level of nesting in serio hierarchy */ ++ unsigned int depth; + +- struct serio_driver *drv; /* accessed from interrupt, must be protected by serio->lock and serio->sem */ +- struct mutex drv_mutex; /* protects serio->drv so attributes can pin driver */ ++ /* ++ * serio->drv is accessed from interrupt handlers; when modifying ++ * caller should acquire serio->drv_mutex and serio->lock. ++ */ ++ struct serio_driver *drv; ++ /* Protects serio->drv so attributes can pin current driver */ ++ struct mutex drv_mutex; + + struct device dev; + + struct list_head node; ++ ++ /* ++ * For use by PS/2 layer when several ports share hardware and ++ * may get indigestion when exposed to concurrent access (i8042). ++ */ ++ struct mutex *ps2_cmd_mutex; + }; + #define to_serio_port(d) container_of(d, struct serio, dev) + +diff --git a/include/linux/stddef.h b/include/linux/stddef.h +index f4aec0e75c3a..9c61c7cda936 100644 +--- a/include/linux/stddef.h ++++ b/include/linux/stddef.h +@@ -3,7 +3,6 @@ + + #include + +- + #undef NULL + #define NULL ((void *)0) + +@@ -14,8 +13,18 @@ enum { + + #undef offsetof + #ifdef __compiler_offsetof +-#define offsetof(TYPE,MEMBER) __compiler_offsetof(TYPE,MEMBER) ++#define offsetof(TYPE, MEMBER) __compiler_offsetof(TYPE, MEMBER) + #else +-#define offsetof(TYPE, MEMBER) ((size_t) &((TYPE *)0)->MEMBER) ++#define offsetof(TYPE, MEMBER) ((size_t)&((TYPE *)0)->MEMBER) + #endif ++ ++/** ++ * offsetofend(TYPE, MEMBER) ++ * ++ * @TYPE: The type of the structure ++ * @MEMBER: The member within the structure to get the end offset of ++ */ ++#define offsetofend(TYPE, MEMBER) \ ++ (offsetof(TYPE, MEMBER) + sizeof(((TYPE *)0)->MEMBER)) ++ + #endif +diff --git a/include/linux/vfio.h b/include/linux/vfio.h +index ac8d488e4372..ef4f73739a76 100644 +--- a/include/linux/vfio.h ++++ b/include/linux/vfio.h +@@ -76,18 +76,4 @@ extern int vfio_register_iommu_driver(const struct vfio_iommu_driver_ops *ops); + extern void vfio_unregister_iommu_driver( + const struct vfio_iommu_driver_ops *ops); + +-/** +- * offsetofend(TYPE, MEMBER) +- * +- * @TYPE: The type of the structure +- * @MEMBER: The member within the structure to get the end offset of +- * +- * Simple helper macro for dealing with variable sized structures passed +- * from user space. This allows us to easily determine if the provided +- * structure is sized to include various fields. +- */ +-#define offsetofend(TYPE, MEMBER) ({ \ +- TYPE tmp; \ +- offsetof(TYPE, MEMBER) + sizeof(tmp.MEMBER); }) \ +- + #endif /* VFIO_H */ +diff --git a/include/net/if_inet6.h b/include/net/if_inet6.h +index 100fb8cec17c..a49b65029164 100644 +--- a/include/net/if_inet6.h ++++ b/include/net/if_inet6.h +@@ -31,8 +31,10 @@ + #define IF_PREFIX_AUTOCONF 0x02 + + enum { ++ INET6_IFADDR_STATE_PREDAD, + INET6_IFADDR_STATE_DAD, + INET6_IFADDR_STATE_POSTDAD, ++ INET6_IFADDR_STATE_ERRDAD, + INET6_IFADDR_STATE_UP, + INET6_IFADDR_STATE_DEAD, + }; +@@ -50,7 +52,7 @@ struct inet6_ifaddr { + + int state; + +- __u8 probes; ++ __u8 dad_probes; + __u8 flags; + + __u16 scope; +@@ -58,7 +60,7 @@ struct inet6_ifaddr { + unsigned long cstamp; /* created timestamp */ + unsigned long tstamp; /* updated timestamp */ + +- struct timer_list timer; ++ struct delayed_work dad_work; + + struct inet6_dev *idev; + struct rt6_info *rt; +@@ -195,6 +197,10 @@ struct inet6_dev { + struct inet6_dev *next; + struct ipv6_devconf cnf; + struct ipv6_devstat stats; ++ ++ struct timer_list rs_timer; ++ __u8 rs_probes; ++ + unsigned long tstamp; /* ipv6InterfaceTable update timestamp */ + struct rcu_head rcu; + }; +diff --git a/include/net/ip6_tunnel.h b/include/net/ip6_tunnel.h +index 4da5de10d1d4..b140c6079e34 100644 +--- a/include/net/ip6_tunnel.h ++++ b/include/net/ip6_tunnel.h +@@ -75,6 +75,7 @@ static inline void ip6tunnel_xmit(struct sk_buff *skb, struct net_device *dev) + int pkt_len, err; + + nf_reset(skb); ++ memset(skb->cb, 0, sizeof(struct inet6_skb_parm)); + pkt_len = skb->len; + err = ip6_local_out(skb); + +diff --git a/include/net/ndisc.h b/include/net/ndisc.h +index 5043f8b08053..4b12d99a13cf 100644 +--- a/include/net/ndisc.h ++++ b/include/net/ndisc.h +@@ -190,7 +190,9 @@ static inline struct neighbour *__ipv6_neigh_lookup(struct net_device *dev, cons + } + + extern int ndisc_init(void); ++extern int ndisc_late_init(void); + ++extern void ndisc_late_cleanup(void); + extern void ndisc_cleanup(void); + + extern int ndisc_rcv(struct sk_buff *skb); +diff --git a/include/net/sock.h b/include/net/sock.h +index 2317d122874e..a46dd30ea58b 100644 +--- a/include/net/sock.h ++++ b/include/net/sock.h +@@ -1358,7 +1358,7 @@ static inline struct inode *SOCK_INODE(struct socket *socket) + * Functions for memory accounting + */ + extern int __sk_mem_schedule(struct sock *sk, int size, int kind); +-extern void __sk_mem_reclaim(struct sock *sk); ++void __sk_mem_reclaim(struct sock *sk, int amount); + + #define SK_MEM_QUANTUM ((int)PAGE_SIZE) + #define SK_MEM_QUANTUM_SHIFT ilog2(SK_MEM_QUANTUM) +@@ -1399,7 +1399,7 @@ static inline void sk_mem_reclaim(struct sock *sk) + if (!sk_has_account(sk)) + return; + if (sk->sk_forward_alloc >= SK_MEM_QUANTUM) +- __sk_mem_reclaim(sk); ++ __sk_mem_reclaim(sk, sk->sk_forward_alloc); + } + + static inline void sk_mem_reclaim_partial(struct sock *sk) +@@ -1407,7 +1407,7 @@ static inline void sk_mem_reclaim_partial(struct sock *sk) + if (!sk_has_account(sk)) + return; + if (sk->sk_forward_alloc > SK_MEM_QUANTUM) +- __sk_mem_reclaim(sk); ++ __sk_mem_reclaim(sk, sk->sk_forward_alloc - 1); + } + + static inline void sk_mem_charge(struct sock *sk, int size) +@@ -1422,6 +1422,16 @@ static inline void sk_mem_uncharge(struct sock *sk, int size) + if (!sk_has_account(sk)) + return; + sk->sk_forward_alloc += size; ++ ++ /* Avoid a possible overflow. ++ * TCP send queues can make this happen, if sk_mem_reclaim() ++ * is not called and more than 2 GBytes are released at once. ++ * ++ * If we reach 2 MBytes, reclaim 1 MBytes right now, there is ++ * no need to hold that much forward allocation anyway. ++ */ ++ if (unlikely(sk->sk_forward_alloc >= 1 << 21)) ++ __sk_mem_reclaim(sk, 1 << 20); + } + + static inline void sk_wmem_free_skb(struct sock *sk, struct sk_buff *skb) +diff --git a/include/net/tcp.h b/include/net/tcp.h +index 29a1a63cd303..79cd118d5994 100644 +--- a/include/net/tcp.h ++++ b/include/net/tcp.h +@@ -1029,6 +1029,7 @@ static inline void tcp_prequeue_init(struct tcp_sock *tp) + } + + extern bool tcp_prequeue(struct sock *sk, struct sk_buff *skb); ++int tcp_filter(struct sock *sk, struct sk_buff *skb); + + #undef STATE_TRACE + +@@ -1392,6 +1393,8 @@ static inline void tcp_check_send_head(struct sock *sk, struct sk_buff *skb_unli + { + if (sk->sk_send_head == skb_unlinked) + sk->sk_send_head = NULL; ++ if (tcp_sk(sk)->highest_sack == skb_unlinked) ++ tcp_sk(sk)->highest_sack = NULL; + } + + static inline void tcp_init_send_head(struct sock *sk) +diff --git a/include/xen/interface/io/ring.h b/include/xen/interface/io/ring.h +index 75271b9a8f61..50983a61eba3 100644 +--- a/include/xen/interface/io/ring.h ++++ b/include/xen/interface/io/ring.h +@@ -181,6 +181,20 @@ struct __name##_back_ring { \ + #define RING_GET_REQUEST(_r, _idx) \ + (&((_r)->sring->ring[((_idx) & (RING_SIZE(_r) - 1))].req)) + ++/* ++ * Get a local copy of a request. ++ * ++ * Use this in preference to RING_GET_REQUEST() so all processing is ++ * done on a local copy that cannot be modified by the other end. ++ * ++ * Note that https://gcc.gnu.org/bugzilla/show_bug.cgi?id=58145 may cause this ++ * to be ineffective where _req is a struct which consists of only bitfields. ++ */ ++#define RING_COPY_REQUEST(_r, _idx, _req) do { \ ++ /* Use volatile to force the copy into _req. */ \ ++ *(_req) = *(volatile typeof(_req))RING_GET_REQUEST(_r, _idx); \ ++} while (0) ++ + #define RING_GET_RESPONSE(_r, _idx) \ + (&((_r)->sring->ring[((_idx) & (RING_SIZE(_r) - 1))].rsp)) + +diff --git a/ipc/sem.c b/ipc/sem.c +index 47a15192b8b8..3b968a028ccf 100644 +--- a/ipc/sem.c ++++ b/ipc/sem.c +@@ -267,20 +267,12 @@ static void sem_rcu_free(struct rcu_head *head) + * Caller must own sem_perm.lock. + * New simple ops cannot start, because simple ops first check + * that sem_perm.lock is free. +- * that a) sem_perm.lock is free and b) complex_count is 0. + */ + static void sem_wait_array(struct sem_array *sma) + { + int i; + struct sem *sem; + +- if (sma->complex_count) { +- /* The thread that increased sma->complex_count waited on +- * all sem->lock locks. Thus we don't need to wait again. +- */ +- return; +- } +- + for (i = 0; i < sma->sem_nsems; i++) { + sem = sma->sem_base + i; + spin_unlock_wait(&sem->lock); +diff --git a/kernel/events/core.c b/kernel/events/core.c +index 0f5207839673..76e26b8e4e41 100644 +--- a/kernel/events/core.c ++++ b/kernel/events/core.c +@@ -6249,7 +6249,6 @@ skip_type: + __perf_event_init_context(&cpuctx->ctx); + lockdep_set_class(&cpuctx->ctx.mutex, &cpuctx_mutex); + lockdep_set_class(&cpuctx->ctx.lock, &cpuctx_lock); +- cpuctx->ctx.type = cpu_context; + cpuctx->ctx.pmu = pmu; + cpuctx->jiffies_interval = 1; + INIT_LIST_HEAD(&cpuctx->rotation_list); +@@ -6856,7 +6855,19 @@ SYSCALL_DEFINE5(perf_event_open, + * task or CPU context: + */ + if (move_group) { +- if (group_leader->ctx->type != ctx->type) ++ /* ++ * Make sure we're both on the same task, or both ++ * per-cpu events. ++ */ ++ if (group_leader->ctx->task != ctx->task) ++ goto err_context; ++ ++ /* ++ * Make sure we're both events for the same CPU; ++ * grouping events for different CPUs is broken; since ++ * you can never concurrently schedule them anyhow. ++ */ ++ if (group_leader->cpu != event->cpu) + goto err_context; + } else { + if (group_leader->ctx != ctx) +diff --git a/kernel/fork.c b/kernel/fork.c +index 2358bd4c8757..612e78d82194 100644 +--- a/kernel/fork.c ++++ b/kernel/fork.c +@@ -775,14 +775,12 @@ void mm_release(struct task_struct *tsk, struct mm_struct *mm) + deactivate_mm(tsk, mm); + + /* +- * If we're exiting normally, clear a user-space tid field if +- * requested. We leave this alone when dying by signal, to leave +- * the value intact in a core dump, and to save the unnecessary +- * trouble, say, a killed vfork parent shouldn't touch this mm. +- * Userland only wants this done for a sys_exit. ++ * Signal userspace if we're not exiting with a core dump ++ * because we want to leave the value intact for debugging ++ * purposes. + */ + if (tsk->clear_child_tid) { +- if (!(tsk->flags & PF_SIGNALED) && ++ if (!(tsk->signal->flags & SIGNAL_GROUP_COREDUMP) && + atomic_read(&mm->mm_users) > 1) { + /* + * We don't check the error code - if userspace has +diff --git a/kernel/power/suspend_test.c b/kernel/power/suspend_test.c +index 269b097e78ea..743615bfdcec 100644 +--- a/kernel/power/suspend_test.c ++++ b/kernel/power/suspend_test.c +@@ -169,8 +169,10 @@ static int __init test_suspend(void) + + /* RTCs have initialized by now too ... can we use one? */ + dev = class_find_device(rtc_class, NULL, NULL, has_wakealarm); +- if (dev) ++ if (dev) { + rtc = rtc_class_open(dev_name(dev)); ++ put_device(dev); ++ } + if (!rtc) { + printk(warn_no_rtc); + goto done; +diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h +index 3db5a375d8dd..468786bee4e3 100644 +--- a/kernel/rcutree_plugin.h ++++ b/kernel/rcutree_plugin.h +@@ -2243,6 +2243,7 @@ static int rcu_nocb_kthread(void *arg) + cl++; + c++; + local_bh_enable(); ++ cond_resched(); + list = next; + } + trace_rcu_batch_end(rdp->rsp->name, c, !!list, 0, 0, 1); +diff --git a/kernel/sched/core.c b/kernel/sched/core.c +index 655d6110a6e1..6a366f9d08db 100644 +--- a/kernel/sched/core.c ++++ b/kernel/sched/core.c +@@ -1501,11 +1501,52 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags) + success = 1; /* we're going to change ->state */ + cpu = task_cpu(p); + ++ /* ++ * Ensure we load p->on_rq _after_ p->state, otherwise it would ++ * be possible to, falsely, observe p->on_rq == 0 and get stuck ++ * in smp_cond_load_acquire() below. ++ * ++ * sched_ttwu_pending() try_to_wake_up() ++ * [S] p->on_rq = 1; [L] P->state ++ * UNLOCK rq->lock -----. ++ * \ ++ * +--- RMB ++ * schedule() / ++ * LOCK rq->lock -----' ++ * UNLOCK rq->lock ++ * ++ * [task p] ++ * [S] p->state = UNINTERRUPTIBLE [L] p->on_rq ++ * ++ * Pairs with the UNLOCK+LOCK on rq->lock from the ++ * last wakeup of our task and the schedule that got our task ++ * current. ++ */ ++ smp_rmb(); + if (p->on_rq && ttwu_remote(p, wake_flags)) + goto stat; + + #ifdef CONFIG_SMP + /* ++ * Ensure we load p->on_cpu _after_ p->on_rq, otherwise it would be ++ * possible to, falsely, observe p->on_cpu == 0. ++ * ++ * One must be running (->on_cpu == 1) in order to remove oneself ++ * from the runqueue. ++ * ++ * [S] ->on_cpu = 1; [L] ->on_rq ++ * UNLOCK rq->lock ++ * RMB ++ * LOCK rq->lock ++ * [S] ->on_rq = 0; [L] ->on_cpu ++ * ++ * Pairs with the full barrier implied in the UNLOCK+LOCK on rq->lock ++ * from the consecutive calls to schedule(); the first switching to our ++ * task, the second putting it to sleep. ++ */ ++ smp_rmb(); ++ ++ /* + * If the owning (remote) cpu is still in the middle of schedule() with + * this task as prev, wait until its done referencing the task. + */ +diff --git a/kernel/timer.c b/kernel/timer.c +index 20f45ea6f5a4..be22e45dc36f 100644 +--- a/kernel/timer.c ++++ b/kernel/timer.c +@@ -923,13 +923,26 @@ EXPORT_SYMBOL(add_timer); + */ + void add_timer_on(struct timer_list *timer, int cpu) + { +- struct tvec_base *base = per_cpu(tvec_bases, cpu); ++ struct tvec_base *new_base = per_cpu(tvec_bases, cpu); ++ struct tvec_base *base; + unsigned long flags; + + timer_stats_timer_set_start_info(timer); + BUG_ON(timer_pending(timer) || !timer->function); +- spin_lock_irqsave(&base->lock, flags); +- timer_set_base(timer, base); ++ ++ /* ++ * If @timer was on a different CPU, it should be migrated with the ++ * old base locked to prevent other operations proceeding with the ++ * wrong base locked. See lock_timer_base(). ++ */ ++ base = lock_timer_base(timer, &flags); ++ if (base != new_base) { ++ timer_set_base(timer, NULL); ++ spin_unlock(&base->lock); ++ base = new_base; ++ spin_lock(&base->lock); ++ timer_set_base(timer, base); ++ } + debug_activate(timer, timer->expires); + internal_add_timer(base, timer); + /* +diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c +index eff26a976f02..d6e72522fc4e 100644 +--- a/kernel/trace/trace.c ++++ b/kernel/trace/trace.c +@@ -4121,13 +4121,6 @@ tracing_read_pipe(struct file *filp, char __user *ubuf, + struct trace_array *tr = iter->tr; + ssize_t sret; + +- /* return any leftover data */ +- sret = trace_seq_to_user(&iter->seq, ubuf, cnt); +- if (sret != -EBUSY) +- return sret; +- +- trace_seq_init(&iter->seq); +- + /* copy the tracer to avoid using a global lock all around */ + mutex_lock(&trace_types_lock); + if (unlikely(iter->trace->name != tr->current_trace->name)) +@@ -4140,6 +4133,14 @@ tracing_read_pipe(struct file *filp, char __user *ubuf, + * is protected. + */ + mutex_lock(&iter->mutex); ++ ++ /* return any leftover data */ ++ sret = trace_seq_to_user(&iter->seq, ubuf, cnt); ++ if (sret != -EBUSY) ++ goto out; ++ ++ trace_seq_init(&iter->seq); ++ + if (iter->trace->read) { + sret = iter->trace->read(iter, filp, ubuf, cnt, ppos); + if (sret) +@@ -5168,11 +5169,6 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos, + } + #endif + +- if (splice_grow_spd(pipe, &spd)) { +- ret = -ENOMEM; +- goto out; +- } +- + if (*ppos & (PAGE_SIZE - 1)) { + ret = -EINVAL; + goto out; +@@ -5186,6 +5182,11 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos, + len &= PAGE_MASK; + } + ++ if (splice_grow_spd(pipe, &spd)) { ++ ret = -ENOMEM; ++ goto out; ++ } ++ + again: + trace_access_lock(iter->cpu_file); + entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file); +@@ -5241,21 +5242,22 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos, + if (!spd.nr_pages) { + if ((file->f_flags & O_NONBLOCK) || (flags & SPLICE_F_NONBLOCK)) { + ret = -EAGAIN; +- goto out; ++ goto out_shrink; + } + mutex_unlock(&trace_types_lock); + ret = iter->trace->wait_pipe(iter); + mutex_lock(&trace_types_lock); + if (ret) +- goto out; ++ goto out_shrink; + if (signal_pending(current)) { + ret = -EINTR; +- goto out; ++ goto out_shrink; + } + goto again; + } + + ret = splice_to_pipe(pipe, &spd); ++out_shrink: + splice_shrink_spd(&spd); + out: + mutex_unlock(&trace_types_lock); +diff --git a/lib/genalloc.c b/lib/genalloc.c +index 2a39bf62d8c1..ac5fba950eb1 100644 +--- a/lib/genalloc.c ++++ b/lib/genalloc.c +@@ -273,7 +273,7 @@ unsigned long gen_pool_alloc(struct gen_pool *pool, size_t size) + struct gen_pool_chunk *chunk; + unsigned long addr = 0; + int order = pool->min_alloc_order; +- int nbits, start_bit = 0, end_bit, remain; ++ int nbits, start_bit, end_bit, remain; + + #ifndef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG + BUG_ON(in_nmi()); +@@ -288,6 +288,7 @@ unsigned long gen_pool_alloc(struct gen_pool *pool, size_t size) + if (size > atomic_read(&chunk->avail)) + continue; + ++ start_bit = 0; + end_bit = chunk_size(chunk) >> order; + retry: + start_bit = pool->algo(chunk->bits, end_bit, start_bit, nbits, +diff --git a/lib/mpi/mpi-pow.c b/lib/mpi/mpi-pow.c +index 5464c8744ea9..e24388a863a7 100644 +--- a/lib/mpi/mpi-pow.c ++++ b/lib/mpi/mpi-pow.c +@@ -64,8 +64,13 @@ int mpi_powm(MPI res, MPI base, MPI exp, MPI mod) + if (!esize) { + /* Exponent is zero, result is 1 mod MOD, i.e., 1 or 0 + * depending on if MOD equals 1. */ +- rp[0] = 1; + res->nlimbs = (msize == 1 && mod->d[0] == 1) ? 0 : 1; ++ if (res->nlimbs) { ++ if (mpi_resize(res, 1) < 0) ++ goto enomem; ++ rp = res->d; ++ rp[0] = 1; ++ } + res->sign = 0; + goto leave; + } +diff --git a/lib/ratelimit.c b/lib/ratelimit.c +index 40e03ea2a967..2c5de86460c5 100644 +--- a/lib/ratelimit.c ++++ b/lib/ratelimit.c +@@ -49,7 +49,7 @@ int ___ratelimit(struct ratelimit_state *rs, const char *func) + if (rs->missed) + printk(KERN_WARNING "%s: %d callbacks suppressed\n", + func, rs->missed); +- rs->begin = 0; ++ rs->begin = jiffies; + rs->printed = 0; + rs->missed = 0; + } +diff --git a/mm/ksm.c b/mm/ksm.c +index 7bf748f30aab..d1b19b9e888e 100644 +--- a/mm/ksm.c ++++ b/mm/ksm.c +@@ -283,7 +283,8 @@ static inline struct rmap_item *alloc_rmap_item(void) + { + struct rmap_item *rmap_item; + +- rmap_item = kmem_cache_zalloc(rmap_item_cache, GFP_KERNEL); ++ rmap_item = kmem_cache_zalloc(rmap_item_cache, GFP_KERNEL | ++ __GFP_NORETRY | __GFP_NOWARN); + if (rmap_item) + ksm_rmap_items++; + return rmap_item; +diff --git a/mm/swapfile.c b/mm/swapfile.c +index 746af55b8455..d0a89838b99a 100644 +--- a/mm/swapfile.c ++++ b/mm/swapfile.c +@@ -1922,6 +1922,8 @@ static unsigned long read_swap_header(struct swap_info_struct *p, + swab32s(&swap_header->info.version); + swab32s(&swap_header->info.last_page); + swab32s(&swap_header->info.nr_badpages); ++ if (swap_header->info.nr_badpages > MAX_SWAP_BADPAGES) ++ return 0; + for (i = 0; i < swap_header->info.nr_badpages; i++) + swab32s(&swap_header->info.badpages[i]); + } +diff --git a/net/can/bcm.c b/net/can/bcm.c +index 35cf02d92766..dd0781c49ebb 100644 +--- a/net/can/bcm.c ++++ b/net/can/bcm.c +@@ -1500,24 +1500,31 @@ static int bcm_connect(struct socket *sock, struct sockaddr *uaddr, int len, + struct sockaddr_can *addr = (struct sockaddr_can *)uaddr; + struct sock *sk = sock->sk; + struct bcm_sock *bo = bcm_sk(sk); ++ int ret = 0; + + if (len < sizeof(*addr)) + return -EINVAL; + +- if (bo->bound) +- return -EISCONN; ++ lock_sock(sk); ++ ++ if (bo->bound) { ++ ret = -EISCONN; ++ goto fail; ++ } + + /* bind a device to this socket */ + if (addr->can_ifindex) { + struct net_device *dev; + + dev = dev_get_by_index(&init_net, addr->can_ifindex); +- if (!dev) +- return -ENODEV; +- ++ if (!dev) { ++ ret = -ENODEV; ++ goto fail; ++ } + if (dev->type != ARPHRD_CAN) { + dev_put(dev); +- return -ENODEV; ++ ret = -ENODEV; ++ goto fail; + } + + bo->ifindex = dev->ifindex; +@@ -1528,17 +1535,24 @@ static int bcm_connect(struct socket *sock, struct sockaddr *uaddr, int len, + bo->ifindex = 0; + } + +- bo->bound = 1; +- + if (proc_dir) { + /* unique socket address as filename */ + sprintf(bo->procname, "%lu", sock_i_ino(sk)); + bo->bcm_proc_read = proc_create_data(bo->procname, 0644, + proc_dir, + &bcm_proc_fops, sk); ++ if (!bo->bcm_proc_read) { ++ ret = -ENOMEM; ++ goto fail; ++ } + } + +- return 0; ++ bo->bound = 1; ++ ++fail: ++ release_sock(sk); ++ ++ return ret; + } + + static int bcm_recvmsg(struct kiocb *iocb, struct socket *sock, +diff --git a/net/core/dev.c b/net/core/dev.c +index 1ccfc49683b3..6494918b3eaa 100644 +--- a/net/core/dev.c ++++ b/net/core/dev.c +@@ -2234,7 +2234,7 @@ int skb_checksum_help(struct sk_buff *skb) + goto out; + } + +- *(__sum16 *)(skb->data + offset) = csum_fold(csum); ++ *(__sum16 *)(skb->data + offset) = csum_fold(csum) ?: CSUM_MANGLED_0; + out_set_summed: + skb->ip_summed = CHECKSUM_NONE; + out: +@@ -3346,6 +3346,22 @@ out: + #endif + + /** ++ * netdev_is_rx_handler_busy - check if receive handler is registered ++ * @dev: device to check ++ * ++ * Check if a receive handler is already registered for a given device. ++ * Return true if there one. ++ * ++ * The caller must hold the rtnl_mutex. ++ */ ++bool netdev_is_rx_handler_busy(struct net_device *dev) ++{ ++ ASSERT_RTNL(); ++ return dev && rtnl_dereference(dev->rx_handler); ++} ++EXPORT_SYMBOL_GPL(netdev_is_rx_handler_busy); ++ ++/** + * netdev_rx_handler_register - register receive handler + * @dev: device to register a handler for + * @rx_handler: receive handler to register +diff --git a/net/core/dst.c b/net/core/dst.c +index 1bf6842b89b8..582b861aeba6 100644 +--- a/net/core/dst.c ++++ b/net/core/dst.c +@@ -283,7 +283,9 @@ void dst_release(struct dst_entry *dst) + unsigned short nocache = dst->flags & DST_NOCACHE; + + newrefcnt = atomic_dec_return(&dst->__refcnt); +- WARN_ON(newrefcnt < 0); ++ if (unlikely(newrefcnt < 0)) ++ net_warn_ratelimited("%s: dst:%p refcnt:%d\n", ++ __func__, dst, newrefcnt); + if (!newrefcnt && unlikely(nocache)) + call_rcu(&dst->rcu_head, dst_destroy_rcu); + } +diff --git a/net/core/filter.c b/net/core/filter.c +index c6c18d8a2d88..65f2a65b5333 100644 +--- a/net/core/filter.c ++++ b/net/core/filter.c +@@ -67,9 +67,10 @@ static inline void *load_pointer(const struct sk_buff *skb, int k, + } + + /** +- * sk_filter - run a packet through a socket filter ++ * sk_filter_trim_cap - run a packet through a socket filter + * @sk: sock associated with &sk_buff + * @skb: buffer to filter ++ * @cap: limit on how short the eBPF program may trim the packet + * + * Run the filter code and then cut skb->data to correct size returned by + * sk_run_filter. If pkt_len is 0 we toss packet. If skb->len is smaller +@@ -78,7 +79,7 @@ static inline void *load_pointer(const struct sk_buff *skb, int k, + * be accepted or -EPERM if the packet should be tossed. + * + */ +-int sk_filter(struct sock *sk, struct sk_buff *skb) ++int sk_filter_trim_cap(struct sock *sk, struct sk_buff *skb, unsigned int cap) + { + int err; + struct sk_filter *filter; +@@ -99,14 +100,13 @@ int sk_filter(struct sock *sk, struct sk_buff *skb) + filter = rcu_dereference(sk->sk_filter); + if (filter) { + unsigned int pkt_len = SK_RUN_FILTER(filter, skb); +- +- err = pkt_len ? pskb_trim(skb, pkt_len) : -EPERM; ++ err = pkt_len ? pskb_trim(skb, max(cap, pkt_len)) : -EPERM; + } + rcu_read_unlock(); + + return err; + } +-EXPORT_SYMBOL(sk_filter); ++EXPORT_SYMBOL(sk_filter_trim_cap); + + /** + * sk_run_filter - run a filter on a socket +diff --git a/net/core/sock.c b/net/core/sock.c +index 5a954fccc7d3..e3cb45411f34 100644 +--- a/net/core/sock.c ++++ b/net/core/sock.c +@@ -1515,6 +1515,7 @@ struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority) + } + + newsk->sk_err = 0; ++ newsk->sk_err_soft = 0; + newsk->sk_priority = 0; + /* + * Before updating sk_refcnt, we must commit prior changes to memory +@@ -2048,12 +2049,13 @@ EXPORT_SYMBOL(__sk_mem_schedule); + /** + * __sk_reclaim - reclaim memory_allocated + * @sk: socket ++ * @amount: number of bytes (rounded down to a SK_MEM_QUANTUM multiple) + */ +-void __sk_mem_reclaim(struct sock *sk) ++void __sk_mem_reclaim(struct sock *sk, int amount) + { +- sk_memory_allocated_sub(sk, +- sk->sk_forward_alloc >> SK_MEM_QUANTUM_SHIFT); +- sk->sk_forward_alloc &= SK_MEM_QUANTUM - 1; ++ amount >>= SK_MEM_QUANTUM_SHIFT; ++ sk_memory_allocated_sub(sk, amount); ++ sk->sk_forward_alloc -= amount << SK_MEM_QUANTUM_SHIFT; + + if (sk_under_memory_pressure(sk) && + (sk_memory_allocated(sk) < sk_prot_mem_limits(sk, 0))) +diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c +index ebc54fef85a5..294c642fbebb 100644 +--- a/net/dccp/ipv4.c ++++ b/net/dccp/ipv4.c +@@ -212,7 +212,7 @@ static void dccp_v4_err(struct sk_buff *skb, u32 info) + { + const struct iphdr *iph = (struct iphdr *)skb->data; + const u8 offset = iph->ihl << 2; +- const struct dccp_hdr *dh = (struct dccp_hdr *)(skb->data + offset); ++ const struct dccp_hdr *dh; + struct dccp_sock *dp; + struct inet_sock *inet; + const int type = icmp_hdr(skb)->type; +@@ -222,11 +222,13 @@ static void dccp_v4_err(struct sk_buff *skb, u32 info) + int err; + struct net *net = dev_net(skb->dev); + +- if (skb->len < offset + sizeof(*dh) || +- skb->len < offset + __dccp_basic_hdr_len(dh)) { +- ICMP_INC_STATS_BH(net, ICMP_MIB_INERRORS); +- return; +- } ++ /* Only need dccph_dport & dccph_sport which are the first ++ * 4 bytes in dccp header. ++ * Our caller (icmp_socket_deliver()) already pulled 8 bytes for us. ++ */ ++ BUILD_BUG_ON(offsetofend(struct dccp_hdr, dccph_sport) > 8); ++ BUILD_BUG_ON(offsetofend(struct dccp_hdr, dccph_dport) > 8); ++ dh = (struct dccp_hdr *)(skb->data + offset); + + sk = inet_lookup(net, &dccp_hashinfo, + iph->daddr, dh->dccph_dport, +diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c +index 6cf9f7782ad4..94f8224d543e 100644 +--- a/net/dccp/ipv6.c ++++ b/net/dccp/ipv6.c +@@ -83,7 +83,7 @@ static void dccp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, + u8 type, u8 code, int offset, __be32 info) + { + const struct ipv6hdr *hdr = (const struct ipv6hdr *)skb->data; +- const struct dccp_hdr *dh = (struct dccp_hdr *)(skb->data + offset); ++ const struct dccp_hdr *dh; + struct dccp_sock *dp; + struct ipv6_pinfo *np; + struct sock *sk; +@@ -91,12 +91,13 @@ static void dccp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, + __u64 seq; + struct net *net = dev_net(skb->dev); + +- if (skb->len < offset + sizeof(*dh) || +- skb->len < offset + __dccp_basic_hdr_len(dh)) { +- ICMP6_INC_STATS_BH(net, __in6_dev_get(skb->dev), +- ICMP6_MIB_INERRORS); +- return; +- } ++ /* Only need dccph_dport & dccph_sport which are the first ++ * 4 bytes in dccp header. ++ * Our caller (icmpv6_notify()) already pulled 8 bytes for us. ++ */ ++ BUILD_BUG_ON(offsetofend(struct dccp_hdr, dccph_sport) > 8); ++ BUILD_BUG_ON(offsetofend(struct dccp_hdr, dccph_dport) > 8); ++ dh = (struct dccp_hdr *)(skb->data + offset); + + sk = inet6_lookup(net, &dccp_hashinfo, + &hdr->daddr, dh->dccph_dport, +@@ -1013,6 +1014,7 @@ static const struct inet_connection_sock_af_ops dccp_ipv6_mapped = { + .getsockopt = ipv6_getsockopt, + .addr2sockaddr = inet6_csk_addr2sockaddr, + .sockaddr_len = sizeof(struct sockaddr_in6), ++ .bind_conflict = inet6_csk_bind_conflict, + #ifdef CONFIG_COMPAT + .compat_setsockopt = compat_ipv6_setsockopt, + .compat_getsockopt = compat_ipv6_getsockopt, +diff --git a/net/dccp/proto.c b/net/dccp/proto.c +index 6c7c78b83940..cb55fb912401 100644 +--- a/net/dccp/proto.c ++++ b/net/dccp/proto.c +@@ -1012,6 +1012,10 @@ void dccp_close(struct sock *sk, long timeout) + __kfree_skb(skb); + } + ++ /* If socket has been already reset kill it. */ ++ if (sk->sk_state == DCCP_CLOSED) ++ goto adjudge_to_death; ++ + if (data_was_unread) { + /* Unread data was tossed, send an appropriate Reset Code */ + DCCP_WARN("ABORT with %u bytes unread\n", data_was_unread); +diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c +index 4d98a6b80b04..04c7e4618008 100644 +--- a/net/ipv4/ip_fragment.c ++++ b/net/ipv4/ip_fragment.c +@@ -656,6 +656,9 @@ int ip_defrag(struct sk_buff *skb, u32 user) + net = skb->dev ? dev_net(skb->dev) : dev_net(skb_dst(skb)->dev); + IP_INC_STATS_BH(net, IPSTATS_MIB_REASMREQDS); + ++ if (!net->ipv4.frags.high_thresh) ++ goto fail; ++ + /* Start by cleaning up the memory. */ + ip_evictor(net); + +@@ -672,6 +675,7 @@ int ip_defrag(struct sk_buff *skb, u32 user) + return ret; + } + ++fail: + IP_INC_STATS_BH(net, IPSTATS_MIB_REASMFAILS); + kfree_skb(skb); + return -ENOMEM; +diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c +index 57e745086302..5f077efad29d 100644 +--- a/net/ipv4/ip_output.c ++++ b/net/ipv4/ip_output.c +@@ -97,6 +97,9 @@ int __ip_local_out(struct sk_buff *skb) + + iph->tot_len = htons(skb->len); + ip_send_check(iph); ++ ++ skb->protocol = htons(ETH_P_IP); ++ + return nf_hook(NFPROTO_IPV4, NF_INET_LOCAL_OUT, skb, NULL, + skb_dst(skb)->dev, dst_output); + } +diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c +index 89570f070e0e..a429ac69af78 100644 +--- a/net/ipv4/ipmr.c ++++ b/net/ipv4/ipmr.c +@@ -2190,7 +2190,7 @@ static int __ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb, + + int ipmr_get_route(struct net *net, struct sk_buff *skb, + __be32 saddr, __be32 daddr, +- struct rtmsg *rtm, int nowait) ++ struct rtmsg *rtm, int nowait, u32 portid) + { + struct mfc_cache *cache; + struct mr_table *mrt; +@@ -2235,6 +2235,7 @@ int ipmr_get_route(struct net *net, struct sk_buff *skb, + return -ENOMEM; + } + ++ NETLINK_CB(skb2).portid = portid; + skb_push(skb2, sizeof(struct iphdr)); + skb_reset_network_header(skb2); + iph = ip_hdr(skb2); +diff --git a/net/ipv4/route.c b/net/ipv4/route.c +index 624ca8ed350c..e59d6332458b 100644 +--- a/net/ipv4/route.c ++++ b/net/ipv4/route.c +@@ -713,8 +713,10 @@ static void __ip_do_redirect(struct rtable *rt, struct sk_buff *skb, struct flow + goto reject_redirect; + } + +- n = ipv4_neigh_lookup(&rt->dst, NULL, &new_gw); +- if (n) { ++ n = __ipv4_neigh_lookup(rt->dst.dev, new_gw); ++ if (!n) ++ n = neigh_create(&arp_tbl, &new_gw, rt->dst.dev); ++ if (!IS_ERR(n)) { + if (!(n->nud_state & NUD_VALID)) { + neigh_event_send(n, NULL); + } else { +@@ -2325,7 +2327,8 @@ static int rt_fill_info(struct net *net, __be32 dst, __be32 src, + IPV4_DEVCONF_ALL(net, MC_FORWARDING)) { + int err = ipmr_get_route(net, skb, + fl4->saddr, fl4->daddr, +- r, nowait); ++ r, nowait, portid); ++ + if (err <= 0) { + if (!nowait) { + if (err == 0) +diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c +index 11f27a45b8ef..6504a085ca60 100644 +--- a/net/ipv4/tcp_ipv4.c ++++ b/net/ipv4/tcp_ipv4.c +@@ -824,7 +824,8 @@ static void tcp_v4_reqsk_send_ack(struct sock *sk, struct sk_buff *skb, + */ + tcp_v4_send_ack(skb, (sk->sk_state == TCP_LISTEN) ? + tcp_rsk(req)->snt_isn + 1 : tcp_sk(sk)->snd_nxt, +- tcp_rsk(req)->rcv_nxt, req->rcv_wnd, ++ tcp_rsk(req)->rcv_nxt, ++ req->rcv_wnd >> inet_rsk(req)->rcv_wscale, + tcp_time_stamp, + req->ts_recent, + 0, +@@ -1958,6 +1959,21 @@ bool tcp_prequeue(struct sock *sk, struct sk_buff *skb) + } + EXPORT_SYMBOL(tcp_prequeue); + ++int tcp_filter(struct sock *sk, struct sk_buff *skb) ++{ ++ struct tcphdr *th = (struct tcphdr *)skb->data; ++ unsigned int eaten = skb->len; ++ int err; ++ ++ err = sk_filter_trim_cap(sk, skb, th->doff * 4); ++ if (!err) { ++ eaten -= skb->len; ++ TCP_SKB_CB(skb)->end_seq -= eaten; ++ } ++ return err; ++} ++EXPORT_SYMBOL(tcp_filter); ++ + /* + * From tcp_input.c + */ +@@ -2020,8 +2036,10 @@ process: + goto discard_and_relse; + nf_reset(skb); + +- if (sk_filter(sk, skb)) ++ if (tcp_filter(sk, skb)) + goto discard_and_relse; ++ th = (const struct tcphdr *)skb->data; ++ iph = ip_hdr(skb); + + skb->dev = NULL; + +diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c +index 276b28301a6b..1f2f6b5406ee 100644 +--- a/net/ipv4/tcp_output.c ++++ b/net/ipv4/tcp_output.c +@@ -1753,12 +1753,14 @@ static int tcp_mtu_probe(struct sock *sk) + len = 0; + tcp_for_write_queue_from_safe(skb, next, sk) { + copy = min_t(int, skb->len, probe_size - len); +- if (nskb->ip_summed) ++ if (nskb->ip_summed) { + skb_copy_bits(skb, 0, skb_put(nskb, copy), copy); +- else +- nskb->csum = skb_copy_and_csum_bits(skb, 0, +- skb_put(nskb, copy), +- copy, nskb->csum); ++ } else { ++ __wsum csum = skb_copy_and_csum_bits(skb, 0, ++ skb_put(nskb, copy), ++ copy, 0); ++ nskb->csum = csum_block_add(nskb->csum, csum, len); ++ } + + if (skb->len <= copy) { + /* We've eaten all the data from this skb. +@@ -2327,7 +2329,8 @@ int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb) + * copying overhead: fragmentation, tunneling, mangling etc. + */ + if (atomic_read(&sk->sk_wmem_alloc) > +- min(sk->sk_wmem_queued + (sk->sk_wmem_queued >> 2), sk->sk_sndbuf)) ++ min_t(u32, sk->sk_wmem_queued + (sk->sk_wmem_queued >> 2), ++ sk->sk_sndbuf)) + return -EAGAIN; + + if (before(TCP_SKB_CB(skb)->seq, tp->snd_una)) { +diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c +index d0912acd9522..a3e2c34d5b7a 100644 +--- a/net/ipv6/addrconf.c ++++ b/net/ipv6/addrconf.c +@@ -139,10 +139,12 @@ static int ipv6_count_addresses(struct inet6_dev *idev); + static struct hlist_head inet6_addr_lst[IN6_ADDR_HSIZE]; + static DEFINE_SPINLOCK(addrconf_hash_lock); + +-static void addrconf_verify(unsigned long); ++static void addrconf_verify(void); ++static void addrconf_verify_rtnl(void); ++static void addrconf_verify_work(struct work_struct *); + +-static DEFINE_TIMER(addr_chk_timer, addrconf_verify, 0, 0); +-static DEFINE_SPINLOCK(addrconf_verify_lock); ++static struct workqueue_struct *addrconf_wq; ++static DECLARE_DELAYED_WORK(addr_chk_work, addrconf_verify_work); + + static void addrconf_join_anycast(struct inet6_ifaddr *ifp); + static void addrconf_leave_anycast(struct inet6_ifaddr *ifp); +@@ -157,7 +159,7 @@ static struct rt6_info *addrconf_get_prefix_route(const struct in6_addr *pfx, + u32 flags, u32 noflags); + + static void addrconf_dad_start(struct inet6_ifaddr *ifp); +-static void addrconf_dad_timer(unsigned long data); ++static void addrconf_dad_work(struct work_struct *w); + static void addrconf_dad_completed(struct inet6_ifaddr *ifp); + static void addrconf_dad_run(struct inet6_dev *idev); + static void addrconf_rs_timer(unsigned long data); +@@ -253,37 +255,32 @@ static inline bool addrconf_qdisc_ok(const struct net_device *dev) + return !qdisc_tx_is_noop(dev); + } + +-static void addrconf_del_timer(struct inet6_ifaddr *ifp) ++static void addrconf_del_rs_timer(struct inet6_dev *idev) + { +- if (del_timer(&ifp->timer)) ++ if (del_timer(&idev->rs_timer)) ++ __in6_dev_put(idev); ++} ++ ++static void addrconf_del_dad_work(struct inet6_ifaddr *ifp) ++{ ++ if (cancel_delayed_work(&ifp->dad_work)) + __in6_ifa_put(ifp); + } + +-enum addrconf_timer_t { +- AC_NONE, +- AC_DAD, +- AC_RS, +-}; ++static void addrconf_mod_rs_timer(struct inet6_dev *idev, ++ unsigned long when) ++{ ++ if (!timer_pending(&idev->rs_timer)) ++ in6_dev_hold(idev); ++ mod_timer(&idev->rs_timer, jiffies + when); ++} + +-static void addrconf_mod_timer(struct inet6_ifaddr *ifp, +- enum addrconf_timer_t what, +- unsigned long when) ++static void addrconf_mod_dad_work(struct inet6_ifaddr *ifp, ++ unsigned long delay) + { +- if (!del_timer(&ifp->timer)) ++ if (!delayed_work_pending(&ifp->dad_work)) + in6_ifa_hold(ifp); +- +- switch (what) { +- case AC_DAD: +- ifp->timer.function = addrconf_dad_timer; +- break; +- case AC_RS: +- ifp->timer.function = addrconf_rs_timer; +- break; +- default: +- break; +- } +- ifp->timer.expires = jiffies + when; +- add_timer(&ifp->timer); ++ mod_delayed_work(addrconf_wq, &ifp->dad_work, delay); + } + + static int snmp6_alloc_dev(struct inet6_dev *idev) +@@ -326,6 +323,7 @@ void in6_dev_finish_destroy(struct inet6_dev *idev) + + WARN_ON(!list_empty(&idev->addr_list)); + WARN_ON(idev->mc_list != NULL); ++ WARN_ON(timer_pending(&idev->rs_timer)); + + #ifdef NET_REFCNT_DEBUG + pr_debug("%s: %s\n", __func__, dev ? dev->name : "NIL"); +@@ -357,7 +355,8 @@ static struct inet6_dev *ipv6_add_dev(struct net_device *dev) + rwlock_init(&ndev->lock); + ndev->dev = dev; + INIT_LIST_HEAD(&ndev->addr_list); +- ++ setup_timer(&ndev->rs_timer, addrconf_rs_timer, ++ (unsigned long)ndev); + memcpy(&ndev->cnf, dev_net(dev)->ipv6.devconf_dflt, sizeof(ndev->cnf)); + ndev->cnf.mtu6 = dev->mtu; + ndev->cnf.sysctl = NULL; +@@ -776,8 +775,9 @@ void inet6_ifa_finish_destroy(struct inet6_ifaddr *ifp) + + in6_dev_put(ifp->idev); + +- if (del_timer(&ifp->timer)) +- pr_notice("Timer is still running, when freeing ifa=%p\n", ifp); ++ if (cancel_delayed_work(&ifp->dad_work)) ++ pr_notice("delayed DAD work was pending while freeing ifa=%p\n", ++ ifp); + + if (ifp->state != INET6_IFADDR_STATE_DEAD) { + pr_warn("Freeing alive inet6 address %p\n", ifp); +@@ -869,9 +869,8 @@ ipv6_add_addr(struct inet6_dev *idev, const struct in6_addr *addr, int pfxlen, + + spin_lock_init(&ifa->lock); + spin_lock_init(&ifa->state_lock); +- init_timer(&ifa->timer); ++ INIT_DELAYED_WORK(&ifa->dad_work, addrconf_dad_work); + INIT_HLIST_NODE(&ifa->addr_lst); +- ifa->timer.data = (unsigned long) ifa; + ifa->scope = scope; + ifa->prefix_len = pfxlen; + ifa->flags = flags | IFA_F_TENTATIVE; +@@ -930,6 +929,8 @@ static void ipv6_del_addr(struct inet6_ifaddr *ifp) + int deleted = 0, onlink = 0; + unsigned long expires = jiffies; + ++ ASSERT_RTNL(); ++ + spin_lock_bh(&ifp->state_lock); + state = ifp->state; + ifp->state = INET6_IFADDR_STATE_DEAD; +@@ -994,7 +995,7 @@ static void ipv6_del_addr(struct inet6_ifaddr *ifp) + } + write_unlock_bh(&idev->lock); + +- addrconf_del_timer(ifp); ++ addrconf_del_dad_work(ifp); + + ipv6_ifa_notify(RTM_DELADDR, ifp); + +@@ -1617,7 +1618,7 @@ static void addrconf_dad_stop(struct inet6_ifaddr *ifp, int dad_failed) + { + if (ifp->flags&IFA_F_PERMANENT) { + spin_lock_bh(&ifp->lock); +- addrconf_del_timer(ifp); ++ addrconf_del_dad_work(ifp); + ifp->flags |= IFA_F_TENTATIVE; + if (dad_failed) + ifp->flags |= IFA_F_DADFAILED; +@@ -1640,20 +1641,21 @@ static void addrconf_dad_stop(struct inet6_ifaddr *ifp, int dad_failed) + } + ipv6_del_addr(ifp); + #endif +- } else ++ } else { + ipv6_del_addr(ifp); ++ } + } + + static int addrconf_dad_end(struct inet6_ifaddr *ifp) + { + int err = -ENOENT; + +- spin_lock(&ifp->state_lock); ++ spin_lock_bh(&ifp->state_lock); + if (ifp->state == INET6_IFADDR_STATE_DAD) { + ifp->state = INET6_IFADDR_STATE_POSTDAD; + err = 0; + } +- spin_unlock(&ifp->state_lock); ++ spin_unlock_bh(&ifp->state_lock); + + return err; + } +@@ -1686,11 +1688,17 @@ void addrconf_dad_failure(struct inet6_ifaddr *ifp) + } + } + +- addrconf_dad_stop(ifp, 1); +-} ++ spin_lock_bh(&ifp->state_lock); ++ /* transition from _POSTDAD to _ERRDAD */ ++ ifp->state = INET6_IFADDR_STATE_ERRDAD; ++ spin_unlock_bh(&ifp->state_lock); + +-/* Join to solicited addr multicast group. */ ++ addrconf_mod_dad_work(ifp, 0); ++ in6_ifa_put(ifp); ++} + ++/* Join to solicited addr multicast group. ++ * caller must hold RTNL */ + void addrconf_join_solict(struct net_device *dev, const struct in6_addr *addr) + { + struct in6_addr maddr; +@@ -1702,6 +1710,7 @@ void addrconf_join_solict(struct net_device *dev, const struct in6_addr *addr) + ipv6_dev_mc_inc(dev, &maddr); + } + ++/* caller must hold RTNL */ + void addrconf_leave_solict(struct inet6_dev *idev, const struct in6_addr *addr) + { + struct in6_addr maddr; +@@ -1713,9 +1722,11 @@ void addrconf_leave_solict(struct inet6_dev *idev, const struct in6_addr *addr) + __ipv6_dev_mc_dec(idev, &maddr); + } + ++/* caller must hold RTNL */ + static void addrconf_join_anycast(struct inet6_ifaddr *ifp) + { + struct in6_addr addr; ++ + if (ifp->prefix_len == 127) /* RFC 6164 */ + return; + ipv6_addr_prefix(&addr, &ifp->addr, ifp->prefix_len); +@@ -1724,9 +1735,11 @@ static void addrconf_join_anycast(struct inet6_ifaddr *ifp) + ipv6_dev_ac_inc(ifp->idev->dev, &addr); + } + ++/* caller must hold RTNL */ + static void addrconf_leave_anycast(struct inet6_ifaddr *ifp) + { + struct in6_addr addr; ++ + if (ifp->prefix_len == 127) /* RFC 6164 */ + return; + ipv6_addr_prefix(&addr, &ifp->addr, ifp->prefix_len); +@@ -2361,7 +2374,7 @@ ok: + } + #endif + in6_ifa_put(ifp); +- addrconf_verify(0); ++ addrconf_verify(); + } + } + inet6_prefix_notify(RTM_NEWPREFIX, in6_dev, pinfo); +@@ -2504,7 +2517,7 @@ static int inet6_addr_add(struct net *net, int ifindex, const struct in6_addr *p + */ + addrconf_dad_start(ifp); + in6_ifa_put(ifp); +- addrconf_verify(0); ++ addrconf_verify_rtnl(); + return 0; + } + +@@ -2696,7 +2709,7 @@ static void init_loopback(struct net_device *dev) + * lo device down, release this obsolete dst and + * reallocate a new router for ifa. + */ +- if (sp_ifa->rt->dst.obsolete > 0) { ++ if (!atomic_read(&sp_ifa->rt->rt6i_ref)) { + ip6_rt_put(sp_ifa->rt); + sp_ifa->rt = NULL; + } else { +@@ -3085,7 +3098,7 @@ static int addrconf_ifdown(struct net_device *dev, int how) + hlist_for_each_entry_rcu(ifa, h, addr_lst) { + if (ifa->idev == idev) { + hlist_del_init_rcu(&ifa->addr_lst); +- addrconf_del_timer(ifa); ++ addrconf_del_dad_work(ifa); + goto restart; + } + } +@@ -3094,6 +3107,8 @@ static int addrconf_ifdown(struct net_device *dev, int how) + + write_lock_bh(&idev->lock); + ++ addrconf_del_rs_timer(idev); ++ + /* Step 2: clear flags for stateless addrconf */ + if (!how) + idev->if_flags &= ~(IF_RS_SENT|IF_RA_RCVD|IF_READY); +@@ -3123,7 +3138,7 @@ static int addrconf_ifdown(struct net_device *dev, int how) + while (!list_empty(&idev->addr_list)) { + ifa = list_first_entry(&idev->addr_list, + struct inet6_ifaddr, if_list); +- addrconf_del_timer(ifa); ++ addrconf_del_dad_work(ifa); + + list_del(&ifa->if_list); + +@@ -3165,10 +3180,10 @@ static int addrconf_ifdown(struct net_device *dev, int how) + + static void addrconf_rs_timer(unsigned long data) + { +- struct inet6_ifaddr *ifp = (struct inet6_ifaddr *) data; +- struct inet6_dev *idev = ifp->idev; ++ struct inet6_dev *idev = (struct inet6_dev *)data; ++ struct in6_addr lladdr; + +- read_lock(&idev->lock); ++ write_lock(&idev->lock); + if (idev->dead || !(idev->if_flags & IF_READY)) + goto out; + +@@ -3179,18 +3194,19 @@ static void addrconf_rs_timer(unsigned long data) + if (idev->if_flags & IF_RA_RCVD) + goto out; + +- spin_lock(&ifp->lock); +- if (ifp->probes++ < idev->cnf.rtr_solicits) { +- /* The wait after the last probe can be shorter */ +- addrconf_mod_timer(ifp, AC_RS, +- (ifp->probes == idev->cnf.rtr_solicits) ? +- idev->cnf.rtr_solicit_delay : +- idev->cnf.rtr_solicit_interval); +- spin_unlock(&ifp->lock); ++ if (idev->rs_probes++ < idev->cnf.rtr_solicits) { ++ if (!__ipv6_get_lladdr(idev, &lladdr, IFA_F_TENTATIVE)) ++ ndisc_send_rs(idev->dev, &lladdr, ++ &in6addr_linklocal_allrouters); ++ else ++ goto out; + +- ndisc_send_rs(idev->dev, &ifp->addr, &in6addr_linklocal_allrouters); ++ /* The wait after the last probe can be shorter */ ++ addrconf_mod_rs_timer(idev, (idev->rs_probes == ++ idev->cnf.rtr_solicits) ? ++ idev->cnf.rtr_solicit_delay : ++ idev->cnf.rtr_solicit_interval); + } else { +- spin_unlock(&ifp->lock); + /* + * Note: we do not support deprecated "all on-link" + * assumption any longer. +@@ -3199,8 +3215,8 @@ static void addrconf_rs_timer(unsigned long data) + } + + out: +- read_unlock(&idev->lock); +- in6_ifa_put(ifp); ++ write_unlock(&idev->lock); ++ in6_dev_put(idev); + } + + /* +@@ -3216,11 +3232,11 @@ static void addrconf_dad_kick(struct inet6_ifaddr *ifp) + else + rand_num = net_random() % (idev->cnf.rtr_solicit_delay ? : 1); + +- ifp->probes = idev->cnf.dad_transmits; +- addrconf_mod_timer(ifp, AC_DAD, rand_num); ++ ifp->dad_probes = idev->cnf.dad_transmits; ++ addrconf_mod_dad_work(ifp, rand_num); + } + +-static void addrconf_dad_start(struct inet6_ifaddr *ifp) ++static void addrconf_dad_begin(struct inet6_ifaddr *ifp) + { + struct inet6_dev *idev = ifp->idev; + struct net_device *dev = idev->dev; +@@ -3272,57 +3288,105 @@ out: + read_unlock_bh(&idev->lock); + } + +-static void addrconf_dad_timer(unsigned long data) ++static void addrconf_dad_start(struct inet6_ifaddr *ifp) + { +- struct inet6_ifaddr *ifp = (struct inet6_ifaddr *) data; ++ bool begin_dad = false; ++ ++ spin_lock_bh(&ifp->state_lock); ++ if (ifp->state != INET6_IFADDR_STATE_DEAD) { ++ ifp->state = INET6_IFADDR_STATE_PREDAD; ++ begin_dad = true; ++ } ++ spin_unlock_bh(&ifp->state_lock); ++ ++ if (begin_dad) ++ addrconf_mod_dad_work(ifp, 0); ++} ++ ++static void addrconf_dad_work(struct work_struct *w) ++{ ++ struct inet6_ifaddr *ifp = container_of(to_delayed_work(w), ++ struct inet6_ifaddr, ++ dad_work); + struct inet6_dev *idev = ifp->idev; + struct in6_addr mcaddr; + +- if (!ifp->probes && addrconf_dad_end(ifp)) ++ enum { ++ DAD_PROCESS, ++ DAD_BEGIN, ++ DAD_ABORT, ++ } action = DAD_PROCESS; ++ ++ rtnl_lock(); ++ ++ spin_lock_bh(&ifp->state_lock); ++ if (ifp->state == INET6_IFADDR_STATE_PREDAD) { ++ action = DAD_BEGIN; ++ ifp->state = INET6_IFADDR_STATE_DAD; ++ } else if (ifp->state == INET6_IFADDR_STATE_ERRDAD) { ++ action = DAD_ABORT; ++ ifp->state = INET6_IFADDR_STATE_POSTDAD; ++ } ++ spin_unlock_bh(&ifp->state_lock); ++ ++ if (action == DAD_BEGIN) { ++ addrconf_dad_begin(ifp); ++ goto out; ++ } else if (action == DAD_ABORT) { ++ in6_ifa_hold(ifp); ++ addrconf_dad_stop(ifp, 1); + goto out; ++ } + +- read_lock(&idev->lock); ++ if (!ifp->dad_probes && addrconf_dad_end(ifp)) ++ goto out; ++ ++ write_lock_bh(&idev->lock); + if (idev->dead || !(idev->if_flags & IF_READY)) { +- read_unlock(&idev->lock); ++ write_unlock_bh(&idev->lock); + goto out; + } + + spin_lock(&ifp->lock); + if (ifp->state == INET6_IFADDR_STATE_DEAD) { + spin_unlock(&ifp->lock); +- read_unlock(&idev->lock); ++ write_unlock_bh(&idev->lock); + goto out; + } + +- if (ifp->probes == 0) { ++ if (ifp->dad_probes == 0) { + /* + * DAD was successful + */ + + ifp->flags &= ~(IFA_F_TENTATIVE|IFA_F_OPTIMISTIC|IFA_F_DADFAILED); + spin_unlock(&ifp->lock); +- read_unlock(&idev->lock); ++ write_unlock_bh(&idev->lock); + + addrconf_dad_completed(ifp); + + goto out; + } + +- ifp->probes--; +- addrconf_mod_timer(ifp, AC_DAD, ifp->idev->nd_parms->retrans_time); ++ ifp->dad_probes--; ++ addrconf_mod_dad_work(ifp, ifp->idev->nd_parms->retrans_time); + spin_unlock(&ifp->lock); +- read_unlock(&idev->lock); ++ write_unlock_bh(&idev->lock); + + /* send a neighbour solicitation for our addr */ + addrconf_addr_solict_mult(&ifp->addr, &mcaddr); + ndisc_send_ns(ifp->idev->dev, NULL, &ifp->addr, &mcaddr, &in6addr_any); + out: + in6_ifa_put(ifp); ++ rtnl_unlock(); + } + + static void addrconf_dad_completed(struct inet6_ifaddr *ifp) + { + struct net_device *dev = ifp->idev->dev; ++ struct in6_addr lladdr; ++ ++ addrconf_del_dad_work(ifp); + + /* + * Configure the address for reception. Now it is valid. +@@ -3343,13 +3407,20 @@ static void addrconf_dad_completed(struct inet6_ifaddr *ifp) + * [...] as part of DAD [...] there is no need + * to delay again before sending the first RS + */ +- ndisc_send_rs(ifp->idev->dev, &ifp->addr, &in6addr_linklocal_allrouters); ++ if (!ipv6_get_lladdr(dev, &lladdr, IFA_F_TENTATIVE)) ++ ndisc_send_rs(dev, &lladdr, ++ &in6addr_linklocal_allrouters); ++ else ++ return; + +- spin_lock_bh(&ifp->lock); +- ifp->probes = 1; ++ write_lock_bh(&ifp->idev->lock); ++ spin_lock(&ifp->lock); ++ ifp->idev->rs_probes = 1; + ifp->idev->if_flags |= IF_RS_SENT; +- addrconf_mod_timer(ifp, AC_RS, ifp->idev->cnf.rtr_solicit_interval); +- spin_unlock_bh(&ifp->lock); ++ addrconf_mod_rs_timer(ifp->idev, ++ ifp->idev->cnf.rtr_solicit_interval); ++ spin_unlock(&ifp->lock); ++ write_unlock_bh(&ifp->idev->lock); + } + } + +@@ -3547,23 +3618,23 @@ int ipv6_chk_home_addr(struct net *net, const struct in6_addr *addr) + * Periodic address status verification + */ + +-static void addrconf_verify(unsigned long foo) ++static void addrconf_verify_rtnl(void) + { + unsigned long now, next, next_sec, next_sched; + struct inet6_ifaddr *ifp; + int i; + ++ ASSERT_RTNL(); ++ + rcu_read_lock_bh(); +- spin_lock(&addrconf_verify_lock); + now = jiffies; + next = round_jiffies_up(now + ADDR_CHECK_FREQUENCY); + +- del_timer(&addr_chk_timer); ++ cancel_delayed_work(&addr_chk_work); + + for (i = 0; i < IN6_ADDR_HSIZE; i++) { + restart: +- hlist_for_each_entry_rcu_bh(ifp, +- &inet6_addr_lst[i], addr_lst) { ++ hlist_for_each_entry_rcu_bh(ifp, &inet6_addr_lst[i], addr_lst) { + unsigned long age; + + if (ifp->flags & IFA_F_PERMANENT) +@@ -3654,13 +3725,22 @@ restart: + + ADBG((KERN_DEBUG "now = %lu, schedule = %lu, rounded schedule = %lu => %lu\n", + now, next, next_sec, next_sched)); +- +- addr_chk_timer.expires = next_sched; +- add_timer(&addr_chk_timer); +- spin_unlock(&addrconf_verify_lock); ++ mod_delayed_work(addrconf_wq, &addr_chk_work, next_sched - now); + rcu_read_unlock_bh(); + } + ++static void addrconf_verify_work(struct work_struct *w) ++{ ++ rtnl_lock(); ++ addrconf_verify_rtnl(); ++ rtnl_unlock(); ++} ++ ++static void addrconf_verify(void) ++{ ++ mod_delayed_work(addrconf_wq, &addr_chk_work, 0); ++} ++ + static struct in6_addr *extract_addr(struct nlattr *addr, struct nlattr *local) + { + struct in6_addr *pfx = NULL; +@@ -3712,6 +3792,8 @@ static int inet6_addr_modify(struct inet6_ifaddr *ifp, u8 ifa_flags, + clock_t expires; + unsigned long timeout; + ++ ASSERT_RTNL(); ++ + if (!valid_lft || (prefered_lft > valid_lft)) + return -EINVAL; + +@@ -3745,7 +3827,7 @@ static int inet6_addr_modify(struct inet6_ifaddr *ifp, u8 ifa_flags, + + addrconf_prefix_route(&ifp->addr, ifp->prefix_len, ifp->idev->dev, + expires, flags); +- addrconf_verify(0); ++ addrconf_verify_rtnl(); + + return 0; + } +@@ -4354,6 +4436,8 @@ static int inet6_set_iftoken(struct inet6_dev *idev, struct in6_addr *token) + bool update_rs = false; + struct in6_addr ll_addr; + ++ ASSERT_RTNL(); ++ + if (token == NULL) + return -EINVAL; + if (ipv6_addr_any(token)) +@@ -4399,6 +4483,7 @@ static int inet6_set_iftoken(struct inet6_dev *idev, struct in6_addr *token) + } + + write_unlock_bh(&idev->lock); ++ addrconf_verify_rtnl(); + return 0; + } + +@@ -4600,6 +4685,9 @@ static void __ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp) + { + struct net *net = dev_net(ifp->idev->dev); + ++ if (event) ++ ASSERT_RTNL(); ++ + inet6_ifa_notify(event ? : RTM_NEWADDR, ifp); + + switch (event) { +@@ -5128,6 +5216,12 @@ int __init addrconf_init(void) + if (err < 0) + goto out_addrlabel; + ++ addrconf_wq = create_workqueue("ipv6_addrconf"); ++ if (!addrconf_wq) { ++ err = -ENOMEM; ++ goto out_nowq; ++ } ++ + /* The addrconf netdev notifier requires that loopback_dev + * has it's ipv6 private information allocated and setup + * before it can bring up and give link-local addresses +@@ -5158,7 +5252,7 @@ int __init addrconf_init(void) + + register_netdevice_notifier(&ipv6_dev_notf); + +- addrconf_verify(0); ++ addrconf_verify(); + + err = rtnl_af_register(&inet6_ops); + if (err < 0) +@@ -5189,6 +5283,8 @@ errout: + errout_af: + unregister_netdevice_notifier(&ipv6_dev_notf); + errlo: ++ destroy_workqueue(addrconf_wq); ++out_nowq: + unregister_pernet_subsys(&addrconf_ops); + out_addrlabel: + ipv6_addr_label_cleanup(); +@@ -5224,7 +5320,8 @@ void addrconf_cleanup(void) + for (i = 0; i < IN6_ADDR_HSIZE; i++) + WARN_ON(!hlist_empty(&inet6_addr_lst[i])); + spin_unlock_bh(&addrconf_hash_lock); +- +- del_timer(&addr_chk_timer); ++ cancel_delayed_work(&addr_chk_work); + rtnl_unlock(); ++ ++ destroy_workqueue(addrconf_wq); + } +diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c +index a944f1313c5f..9443af7d7ecb 100644 +--- a/net/ipv6/af_inet6.c ++++ b/net/ipv6/af_inet6.c +@@ -900,6 +900,9 @@ static int __init inet6_init(void) + err = ip6_route_init(); + if (err) + goto ip6_route_fail; ++ err = ndisc_late_init(); ++ if (err) ++ goto ndisc_late_fail; + err = ip6_flowlabel_init(); + if (err) + goto ip6_flowlabel_fail; +@@ -960,6 +963,8 @@ ipv6_exthdrs_fail: + addrconf_fail: + ip6_flowlabel_cleanup(); + ip6_flowlabel_fail: ++ ndisc_late_cleanup(); ++ndisc_late_fail: + ip6_route_cleanup(); + ip6_route_fail: + #ifdef CONFIG_PROC_FS +@@ -1020,6 +1025,7 @@ static void __exit inet6_exit(void) + ipv6_exthdrs_exit(); + addrconf_cleanup(); + ip6_flowlabel_cleanup(); ++ ndisc_late_cleanup(); + ip6_route_cleanup(); + #ifdef CONFIG_PROC_FS + +diff --git a/net/ipv6/anycast.c b/net/ipv6/anycast.c +index 5a80f15a9de2..c59083c2a656 100644 +--- a/net/ipv6/anycast.c ++++ b/net/ipv6/anycast.c +@@ -77,6 +77,7 @@ int ipv6_sock_ac_join(struct sock *sk, int ifindex, const struct in6_addr *addr) + pac->acl_next = NULL; + pac->acl_addr = *addr; + ++ rtnl_lock(); + rcu_read_lock(); + if (ifindex == 0) { + struct rt6_info *rt; +@@ -137,6 +138,7 @@ int ipv6_sock_ac_join(struct sock *sk, int ifindex, const struct in6_addr *addr) + + error: + rcu_read_unlock(); ++ rtnl_unlock(); + if (pac) + sock_kfree_s(sk, pac, sizeof(*pac)); + return err; +@@ -171,13 +173,17 @@ int ipv6_sock_ac_drop(struct sock *sk, int ifindex, const struct in6_addr *addr) + + spin_unlock_bh(&ipv6_sk_ac_lock); + ++ rtnl_lock(); + rcu_read_lock(); + dev = dev_get_by_index_rcu(net, pac->acl_ifindex); + if (dev) + ipv6_dev_ac_dec(dev, &pac->acl_addr); + rcu_read_unlock(); ++ rtnl_unlock(); + + sock_kfree_s(sk, pac, sizeof(*pac)); ++ if (!dev) ++ return -ENODEV; + return 0; + } + +@@ -198,6 +204,7 @@ void ipv6_sock_ac_close(struct sock *sk) + spin_unlock_bh(&ipv6_sk_ac_lock); + + prev_index = 0; ++ rtnl_lock(); + rcu_read_lock(); + while (pac) { + struct ipv6_ac_socklist *next = pac->acl_next; +@@ -212,6 +219,7 @@ void ipv6_sock_ac_close(struct sock *sk) + pac = next; + } + rcu_read_unlock(); ++ rtnl_unlock(); + } + + static void aca_put(struct ifacaddr6 *ac) +@@ -233,6 +241,8 @@ int ipv6_dev_ac_inc(struct net_device *dev, const struct in6_addr *addr) + struct rt6_info *rt; + int err; + ++ ASSERT_RTNL(); ++ + idev = in6_dev_get(dev); + + if (idev == NULL) +@@ -302,6 +312,8 @@ int __ipv6_dev_ac_dec(struct inet6_dev *idev, const struct in6_addr *addr) + { + struct ifacaddr6 *aca, *prev_aca; + ++ ASSERT_RTNL(); ++ + write_lock_bh(&idev->lock); + prev_aca = NULL; + for (aca = idev->ac_list; aca; aca = aca->aca_next) { +diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c +index 7eb7267861ac..603f251b6ca2 100644 +--- a/net/ipv6/ip6_gre.c ++++ b/net/ipv6/ip6_gre.c +@@ -890,7 +890,6 @@ static int ip6gre_xmit_other(struct sk_buff *skb, struct net_device *dev) + encap_limit = t->parms.encap_limit; + + memcpy(&fl6, &t->fl.u.ip6, sizeof(fl6)); +- fl6.flowi6_proto = skb->protocol; + + err = ip6gre_xmit2(skb, dev, 0, &fl6, encap_limit, &mtu); + +diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c +index 31bab1ab007c..12984e6794b9 100644 +--- a/net/ipv6/ip6_tunnel.c ++++ b/net/ipv6/ip6_tunnel.c +@@ -950,12 +950,21 @@ static int ip6_tnl_xmit2(struct sk_buff *skb, + struct ipv6_tel_txoption opt; + struct dst_entry *dst = NULL, *ndst = NULL; + struct net_device *tdev; ++ bool use_cache = false; + int mtu; + unsigned int max_headroom = sizeof(struct ipv6hdr); + u8 proto; + int err = -1; + +- if (!fl6->flowi6_mark) ++ if (!(t->parms.flags & ++ (IP6_TNL_F_USE_ORIG_TCLASS | IP6_TNL_F_USE_ORIG_FWMARK))) { ++ /* enable the cache only only if the routing decision does ++ * not depend on the current inner header value ++ */ ++ use_cache = true; ++ } ++ ++ if (use_cache) + dst = ip6_tnl_dst_check(t); + if (!dst) { + ndst = ip6_route_output(net, NULL, fl6); +@@ -1012,7 +1021,7 @@ static int ip6_tnl_xmit2(struct sk_buff *skb, + skb = new_skb; + } + skb_dst_drop(skb); +- if (fl6->flowi6_mark) { ++ if (!use_cache) { + skb_dst_set(skb, dst); + ndst = NULL; + } else { +diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c +index 107f75283b1b..8344f686335d 100644 +--- a/net/ipv6/ip6mr.c ++++ b/net/ipv6/ip6mr.c +@@ -2275,8 +2275,8 @@ static int __ip6mr_fill_mroute(struct mr6_table *mrt, struct sk_buff *skb, + return 1; + } + +-int ip6mr_get_route(struct net *net, +- struct sk_buff *skb, struct rtmsg *rtm, int nowait) ++int ip6mr_get_route(struct net *net, struct sk_buff *skb, struct rtmsg *rtm, ++ int nowait, u32 portid) + { + int err; + struct mr6_table *mrt; +@@ -2321,6 +2321,7 @@ int ip6mr_get_route(struct net *net, + return -ENOMEM; + } + ++ NETLINK_CB(skb2).portid = portid; + skb_reset_transport_header(skb2); + + skb_put(skb2, sizeof(struct ipv6hdr)); +diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c +index 7ba6180ff8bd..cf16eb484cfe 100644 +--- a/net/ipv6/mcast.c ++++ b/net/ipv6/mcast.c +@@ -157,6 +157,7 @@ int ipv6_sock_mc_join(struct sock *sk, int ifindex, const struct in6_addr *addr) + mc_lst->next = NULL; + mc_lst->addr = *addr; + ++ rtnl_lock(); + rcu_read_lock(); + if (ifindex == 0) { + struct rt6_info *rt; +@@ -170,6 +171,7 @@ int ipv6_sock_mc_join(struct sock *sk, int ifindex, const struct in6_addr *addr) + + if (dev == NULL) { + rcu_read_unlock(); ++ rtnl_unlock(); + sock_kfree_s(sk, mc_lst, sizeof(*mc_lst)); + return -ENODEV; + } +@@ -187,6 +189,7 @@ int ipv6_sock_mc_join(struct sock *sk, int ifindex, const struct in6_addr *addr) + + if (err) { + rcu_read_unlock(); ++ rtnl_unlock(); + sock_kfree_s(sk, mc_lst, sizeof(*mc_lst)); + return err; + } +@@ -197,6 +200,7 @@ int ipv6_sock_mc_join(struct sock *sk, int ifindex, const struct in6_addr *addr) + spin_unlock(&ipv6_sk_mc_lock); + + rcu_read_unlock(); ++ rtnl_unlock(); + + return 0; + } +@@ -214,6 +218,7 @@ int ipv6_sock_mc_drop(struct sock *sk, int ifindex, const struct in6_addr *addr) + if (!ipv6_addr_is_multicast(addr)) + return -EINVAL; + ++ rtnl_lock(); + spin_lock(&ipv6_sk_mc_lock); + for (lnk = &np->ipv6_mc_list; + (mc_lst = rcu_dereference_protected(*lnk, +@@ -237,12 +242,15 @@ int ipv6_sock_mc_drop(struct sock *sk, int ifindex, const struct in6_addr *addr) + } else + (void) ip6_mc_leave_src(sk, mc_lst, NULL); + rcu_read_unlock(); ++ rtnl_unlock(); ++ + atomic_sub(sizeof(*mc_lst), &sk->sk_omem_alloc); + kfree_rcu(mc_lst, rcu); + return 0; + } + } + spin_unlock(&ipv6_sk_mc_lock); ++ rtnl_unlock(); + + return -EADDRNOTAVAIL; + } +@@ -287,6 +295,7 @@ void ipv6_sock_mc_close(struct sock *sk) + if (!rcu_access_pointer(np->ipv6_mc_list)) + return; + ++ rtnl_lock(); + spin_lock(&ipv6_sk_mc_lock); + while ((mc_lst = rcu_dereference_protected(np->ipv6_mc_list, + lockdep_is_held(&ipv6_sk_mc_lock))) != NULL) { +@@ -313,6 +322,7 @@ void ipv6_sock_mc_close(struct sock *sk) + spin_lock(&ipv6_sk_mc_lock); + } + spin_unlock(&ipv6_sk_mc_lock); ++ rtnl_unlock(); + } + + int ip6_mc_source(int add, int omode, struct sock *sk, +@@ -830,6 +840,8 @@ int ipv6_dev_mc_inc(struct net_device *dev, const struct in6_addr *addr) + struct ifmcaddr6 *mc; + struct inet6_dev *idev; + ++ ASSERT_RTNL(); ++ + /* we need to take a reference on idev */ + idev = in6_dev_get(dev); + +@@ -901,6 +913,8 @@ int __ipv6_dev_mc_dec(struct inet6_dev *idev, const struct in6_addr *addr) + { + struct ifmcaddr6 *ma, **map; + ++ ASSERT_RTNL(); ++ + write_lock_bh(&idev->lock); + for (map = &idev->mc_list; (ma=*map) != NULL; map = &ma->next) { + if (ipv6_addr_equal(&ma->mca_addr, addr)) { +diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c +index deedf7ddbc6e..de10ccfe7f7e 100644 +--- a/net/ipv6/ndisc.c ++++ b/net/ipv6/ndisc.c +@@ -1716,24 +1716,28 @@ int __init ndisc_init(void) + if (err) + goto out_unregister_pernet; + #endif +- err = register_netdevice_notifier(&ndisc_netdev_notifier); +- if (err) +- goto out_unregister_sysctl; + out: + return err; + +-out_unregister_sysctl: + #ifdef CONFIG_SYSCTL +- neigh_sysctl_unregister(&nd_tbl.parms); + out_unregister_pernet: +-#endif + unregister_pernet_subsys(&ndisc_net_ops); + goto out; ++#endif + } + +-void ndisc_cleanup(void) ++int __init ndisc_late_init(void) ++{ ++ return register_netdevice_notifier(&ndisc_netdev_notifier); ++} ++ ++void ndisc_late_cleanup(void) + { + unregister_netdevice_notifier(&ndisc_netdev_notifier); ++} ++ ++void ndisc_cleanup(void) ++{ + #ifdef CONFIG_SYSCTL + neigh_sysctl_unregister(&nd_tbl.parms); + #endif +diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c +index 7cd623588532..c11a40caf5b6 100644 +--- a/net/ipv6/netfilter/nf_conntrack_reasm.c ++++ b/net/ipv6/netfilter/nf_conntrack_reasm.c +@@ -569,6 +569,9 @@ struct sk_buff *nf_ct_frag6_gather(struct sk_buff *skb, u32 user) + if (find_prev_fhdr(skb, &prevhdr, &nhoff, &fhoff) < 0) + return skb; + ++ if (!net->nf_frag.frags.high_thresh) ++ return skb; ++ + clone = skb_clone(skb, GFP_ATOMIC); + if (clone == NULL) { + pr_debug("Can't clone skb\n"); +diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c +index a1fb511da3b5..1a5318efa31c 100644 +--- a/net/ipv6/reassembly.c ++++ b/net/ipv6/reassembly.c +@@ -556,6 +556,9 @@ static int ipv6_frag_rcv(struct sk_buff *skb) + return 1; + } + ++ if (!net->ipv6.frags.high_thresh) ++ goto fail_mem; ++ + evicted = inet_frag_evictor(&net->ipv6.frags, &ip6_frags, false); + if (evicted) + IP6_ADD_STATS_BH(net, ip6_dst_idev(skb_dst(skb)), +@@ -575,6 +578,7 @@ static int ipv6_frag_rcv(struct sk_buff *skb) + return ret; + } + ++fail_mem: + IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_REASMFAILS); + kfree_skb(skb); + return -1; +diff --git a/net/ipv6/route.c b/net/ipv6/route.c +index 6ebefd46f718..fb5010c27a22 100644 +--- a/net/ipv6/route.c ++++ b/net/ipv6/route.c +@@ -2536,7 +2536,9 @@ static int rt6_fill_node(struct net *net, + if (iif) { + #ifdef CONFIG_IPV6_MROUTE + if (ipv6_addr_is_multicast(&rt->rt6i_dst.addr)) { +- int err = ip6mr_get_route(net, skb, rtm, nowait); ++ int err = ip6mr_get_route(net, skb, rtm, nowait, ++ portid); ++ + if (err <= 0) { + if (!nowait) { + if (err == 0) +diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c +index 41c026f11edc..70b10ed169ae 100644 +--- a/net/ipv6/tcp_ipv6.c ++++ b/net/ipv6/tcp_ipv6.c +@@ -902,8 +902,14 @@ static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb) + static void tcp_v6_reqsk_send_ack(struct sock *sk, struct sk_buff *skb, + struct request_sock *req) + { ++ /* RFC 7323 2.3 ++ * The window field (SEG.WND) of every outgoing segment, with the ++ * exception of segments, MUST be right-shifted by ++ * Rcv.Wind.Shift bits: ++ */ + tcp_v6_send_ack(skb, tcp_rsk(req)->snt_isn + 1, tcp_rsk(req)->rcv_isn + 1, +- req->rcv_wnd, tcp_time_stamp, req->ts_recent, ++ req->rcv_wnd >> inet_rsk(req)->rcv_wscale, ++ tcp_time_stamp, req->ts_recent, + tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->daddr), 0); + } + +@@ -1324,7 +1330,7 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb) + goto discard; + #endif + +- if (sk_filter(sk, skb)) ++ if (tcp_filter(sk, skb)) + goto discard; + + /* +@@ -1495,8 +1501,10 @@ process: + if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) + goto discard_and_relse; + +- if (sk_filter(sk, skb)) ++ if (tcp_filter(sk, skb)) + goto discard_and_relse; ++ th = (const struct tcphdr *)skb->data; ++ hdr = ipv6_hdr(skb); + + skb->dev = NULL; + +diff --git a/net/irda/iriap.c b/net/irda/iriap.c +index e1b37f5a2691..bd42516e268b 100644 +--- a/net/irda/iriap.c ++++ b/net/irda/iriap.c +@@ -191,8 +191,12 @@ struct iriap_cb *iriap_open(__u8 slsap_sel, int mode, void *priv, + + self->magic = IAS_MAGIC; + self->mode = mode; +- if (mode == IAS_CLIENT) +- iriap_register_lsap(self, slsap_sel, mode); ++ if (mode == IAS_CLIENT) { ++ if (iriap_register_lsap(self, slsap_sel, mode)) { ++ kfree(self); ++ return NULL; ++ } ++ } + + self->confirm = callback; + self->priv = priv; +diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c +index e922bf3f422c..11a10d580d9e 100644 +--- a/net/mac80211/cfg.c ++++ b/net/mac80211/cfg.c +@@ -1072,7 +1072,7 @@ static int ieee80211_stop_ap(struct wiphy *wiphy, struct net_device *dev) + + /* free all potentially still buffered bcast frames */ + local->total_ps_buffered -= skb_queue_len(&sdata->u.ap.ps.bc_buf); +- skb_queue_purge(&sdata->u.ap.ps.bc_buf); ++ ieee80211_purge_tx_queue(&local->hw, &sdata->u.ap.ps.bc_buf); + + ieee80211_vif_copy_chanctx_to_vlans(sdata, true); + ieee80211_vif_release_channel(sdata); +diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c +index cd60be8d9aba..f8c7f46008ee 100644 +--- a/net/mac80211/rx.c ++++ b/net/mac80211/rx.c +@@ -1952,16 +1952,22 @@ ieee80211_rx_h_amsdu(struct ieee80211_rx_data *rx) + if (!(status->rx_flags & IEEE80211_RX_AMSDU)) + return RX_CONTINUE; + +- if (ieee80211_has_a4(hdr->frame_control) && +- rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN && +- !rx->sdata->u.vlan.sta) +- return RX_DROP_UNUSABLE; ++ if (unlikely(ieee80211_has_a4(hdr->frame_control))) { ++ switch (rx->sdata->vif.type) { ++ case NL80211_IFTYPE_AP_VLAN: ++ if (!rx->sdata->u.vlan.sta) ++ return RX_DROP_UNUSABLE; ++ break; ++ case NL80211_IFTYPE_STATION: ++ if (!rx->sdata->u.mgd.use_4addr) ++ return RX_DROP_UNUSABLE; ++ break; ++ default: ++ return RX_DROP_UNUSABLE; ++ } ++ } + +- if (is_multicast_ether_addr(hdr->addr1) && +- ((rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN && +- rx->sdata->u.vlan.sta) || +- (rx->sdata->vif.type == NL80211_IFTYPE_STATION && +- rx->sdata->u.mgd.use_4addr))) ++ if (is_multicast_ether_addr(hdr->addr1)) + return RX_DROP_UNUSABLE; + + skb->dev = dev; +diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c +index e960fbe9e271..129905342fc3 100644 +--- a/net/mac80211/tx.c ++++ b/net/mac80211/tx.c +@@ -335,7 +335,7 @@ static void purge_old_ps_buffers(struct ieee80211_local *local) + skb = skb_dequeue(&ps->bc_buf); + if (skb) { + purged++; +- dev_kfree_skb(skb); ++ ieee80211_free_txskb(&local->hw, skb); + } + total += skb_queue_len(&ps->bc_buf); + } +@@ -417,7 +417,7 @@ ieee80211_tx_h_multicast_ps_buf(struct ieee80211_tx_data *tx) + if (skb_queue_len(&ps->bc_buf) >= AP_MAX_BC_BUFFER) { + ps_dbg(tx->sdata, + "BC TX buffer full - dropping the oldest frame\n"); +- dev_kfree_skb(skb_dequeue(&ps->bc_buf)); ++ ieee80211_free_txskb(&tx->local->hw, skb_dequeue(&ps->bc_buf)); + } else + tx->local->total_ps_buffered++; + +@@ -2711,7 +2711,7 @@ ieee80211_get_buffered_bc(struct ieee80211_hw *hw, + sdata = IEEE80211_DEV_TO_SUB_IF(skb->dev); + if (!ieee80211_tx_prepare(sdata, &tx, skb)) + break; +- dev_kfree_skb_any(skb); ++ ieee80211_free_txskb(hw, skb); + } + + info = IEEE80211_SKB_CB(skb); +diff --git a/net/netfilter/ipvs/ip_vs_proto_tcp.c b/net/netfilter/ipvs/ip_vs_proto_tcp.c +index 50a15944c6c1..3032ede74e48 100644 +--- a/net/netfilter/ipvs/ip_vs_proto_tcp.c ++++ b/net/netfilter/ipvs/ip_vs_proto_tcp.c +@@ -373,6 +373,20 @@ static const char *const tcp_state_name_table[IP_VS_TCP_S_LAST+1] = { + [IP_VS_TCP_S_LAST] = "BUG!", + }; + ++static const bool tcp_state_active_table[IP_VS_TCP_S_LAST] = { ++ [IP_VS_TCP_S_NONE] = false, ++ [IP_VS_TCP_S_ESTABLISHED] = true, ++ [IP_VS_TCP_S_SYN_SENT] = true, ++ [IP_VS_TCP_S_SYN_RECV] = true, ++ [IP_VS_TCP_S_FIN_WAIT] = false, ++ [IP_VS_TCP_S_TIME_WAIT] = false, ++ [IP_VS_TCP_S_CLOSE] = false, ++ [IP_VS_TCP_S_CLOSE_WAIT] = false, ++ [IP_VS_TCP_S_LAST_ACK] = false, ++ [IP_VS_TCP_S_LISTEN] = false, ++ [IP_VS_TCP_S_SYNACK] = true, ++}; ++ + #define sNO IP_VS_TCP_S_NONE + #define sES IP_VS_TCP_S_ESTABLISHED + #define sSS IP_VS_TCP_S_SYN_SENT +@@ -396,6 +410,13 @@ static const char * tcp_state_name(int state) + return tcp_state_name_table[state] ? tcp_state_name_table[state] : "?"; + } + ++static bool tcp_state_active(int state) ++{ ++ if (state >= IP_VS_TCP_S_LAST) ++ return false; ++ return tcp_state_active_table[state]; ++} ++ + static struct tcp_states_t tcp_states [] = { + /* INPUT */ + /* sNO, sES, sSS, sSR, sFW, sTW, sCL, sCW, sLA, sLI, sSA */ +@@ -518,12 +539,12 @@ set_tcp_state(struct ip_vs_proto_data *pd, struct ip_vs_conn *cp, + + if (dest) { + if (!(cp->flags & IP_VS_CONN_F_INACTIVE) && +- (new_state != IP_VS_TCP_S_ESTABLISHED)) { ++ !tcp_state_active(new_state)) { + atomic_dec(&dest->activeconns); + atomic_inc(&dest->inactconns); + cp->flags |= IP_VS_CONN_F_INACTIVE; + } else if ((cp->flags & IP_VS_CONN_F_INACTIVE) && +- (new_state == IP_VS_TCP_S_ESTABLISHED)) { ++ tcp_state_active(new_state)) { + atomic_inc(&dest->activeconns); + atomic_dec(&dest->inactconns); + cp->flags &= ~IP_VS_CONN_F_INACTIVE; +diff --git a/net/netfilter/nf_log.c b/net/netfilter/nf_log.c +index 3b18dd1be7d9..07ed65af05a6 100644 +--- a/net/netfilter/nf_log.c ++++ b/net/netfilter/nf_log.c +@@ -253,7 +253,7 @@ static int nf_log_proc_dostring(ctl_table *table, int write, + size_t size = *lenp; + int r = 0; + int tindex = (unsigned long)table->extra1; +- struct net *net = current->nsproxy->net_ns; ++ struct net *net = table->extra2; + + if (write) { + if (size > sizeof(buf)) +@@ -306,7 +306,6 @@ static int netfilter_log_sysctl_init(struct net *net) + 3, "%d", i); + nf_log_sysctl_table[i].procname = + nf_log_sysctl_fnames[i]; +- nf_log_sysctl_table[i].data = NULL; + nf_log_sysctl_table[i].maxlen = + NFLOGGER_NAME_LEN * sizeof(char); + nf_log_sysctl_table[i].mode = 0644; +@@ -317,6 +316,9 @@ static int netfilter_log_sysctl_init(struct net *net) + } + } + ++ for (i = NFPROTO_UNSPEC; i < NFPROTO_NUMPROTO; i++) ++ table[i].extra2 = net; ++ + net->nf.nf_log_dir_header = register_net_sysctl(net, + "net/netfilter/nf_log", + table); +diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c +index 2d454a235e84..24f006623f7c 100644 +--- a/net/packet/af_packet.c ++++ b/net/packet/af_packet.c +@@ -3384,6 +3384,7 @@ static int packet_notifier(struct notifier_block *this, unsigned long msg, void + } + if (msg == NETDEV_UNREGISTER) { + packet_cached_dev_reset(po); ++ fanout_release(sk); + po->ifindex = -1; + if (po->prot_hook.dev) + dev_put(po->prot_hook.dev); +diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c +index d9cbecb62aca..df938b2ab848 100644 +--- a/net/sctp/sm_statefuns.c ++++ b/net/sctp/sm_statefuns.c +@@ -3428,6 +3428,12 @@ sctp_disposition_t sctp_sf_ootb(struct net *net, + return sctp_sf_violation_chunklen(net, ep, asoc, type, arg, + commands); + ++ /* Report violation if chunk len overflows */ ++ ch_end = ((__u8 *)ch) + WORD_ROUND(ntohs(ch->length)); ++ if (ch_end > skb_tail_pointer(skb)) ++ return sctp_sf_violation_chunklen(net, ep, asoc, type, arg, ++ commands); ++ + /* Now that we know we at least have a chunk header, + * do things that are type appropriate. + */ +@@ -3459,12 +3465,6 @@ sctp_disposition_t sctp_sf_ootb(struct net *net, + } + } + +- /* Report violation if chunk len overflows */ +- ch_end = ((__u8 *)ch) + WORD_ROUND(ntohs(ch->length)); +- if (ch_end > skb_tail_pointer(skb)) +- return sctp_sf_violation_chunklen(net, ep, asoc, type, arg, +- commands); +- + ch = (sctp_chunkhdr_t *) ch_end; + } while (ch_end < skb_tail_pointer(skb)); + +diff --git a/net/sctp/socket.c b/net/sctp/socket.c +index bdc3fb66717d..ede7c540ea24 100644 +--- a/net/sctp/socket.c ++++ b/net/sctp/socket.c +@@ -1231,9 +1231,12 @@ static int __sctp_connect(struct sock* sk, + + timeo = sock_sndtimeo(sk, f_flags & O_NONBLOCK); + +- err = sctp_wait_for_connect(asoc, &timeo); +- if ((err == 0 || err == -EINPROGRESS) && assoc_id) ++ if (assoc_id) + *assoc_id = asoc->assoc_id; ++ err = sctp_wait_for_connect(asoc, &timeo); ++ /* Note: the asoc may be freed after the return of ++ * sctp_wait_for_connect. ++ */ + + /* Don't free association on exit. */ + asoc = NULL; +@@ -4259,7 +4262,7 @@ static int sctp_getsockopt_disable_fragments(struct sock *sk, int len, + static int sctp_getsockopt_events(struct sock *sk, int len, char __user *optval, + int __user *optlen) + { +- if (len <= 0) ++ if (len == 0) + return -EINVAL; + if (len > sizeof(struct sctp_event_subscribe)) + len = sizeof(struct sctp_event_subscribe); +@@ -5770,6 +5773,9 @@ SCTP_STATIC int sctp_getsockopt(struct sock *sk, int level, int optname, + if (get_user(len, optlen)) + return -EFAULT; + ++ if (len < 0) ++ return -EINVAL; ++ + sctp_lock_sock(sk); + + switch (optname) { +diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c +index 89a588b4478b..c996a71fc9f1 100644 +--- a/net/sunrpc/svc.c ++++ b/net/sunrpc/svc.c +@@ -1182,11 +1182,17 @@ svc_process_common(struct svc_rqst *rqstp, struct kvec *argv, struct kvec *resv) + *statp = procp->pc_func(rqstp, rqstp->rq_argp, rqstp->rq_resp); + + /* Encode reply */ +- if (rqstp->rq_dropme) { ++ if (*statp == rpc_drop_reply || ++ rqstp->rq_dropme) { + if (procp->pc_release) + procp->pc_release(rqstp, NULL, rqstp->rq_resp); + goto dropit; + } ++ if (*statp == rpc_autherr_badcred) { ++ if (procp->pc_release) ++ procp->pc_release(rqstp, NULL, rqstp->rq_resp); ++ goto err_bad_auth; ++ } + if (*statp == rpc_success && + (xdr = procp->pc_encode) && + !xdr(rqstp, resv->iov_base+resv->iov_len, rqstp->rq_resp)) { +diff --git a/net/wireless/core.h b/net/wireless/core.h +index fd35dae547c4..d06da43e265f 100644 +--- a/net/wireless/core.h ++++ b/net/wireless/core.h +@@ -69,6 +69,7 @@ struct cfg80211_registered_device { + struct list_head bss_list; + struct rb_root bss_tree; + u32 bss_generation; ++ u32 bss_entries; + struct cfg80211_scan_request *scan_req; /* protected by RTNL */ + struct cfg80211_sched_scan_request *sched_scan_req; + unsigned long suspend_at; +diff --git a/net/wireless/scan.c b/net/wireless/scan.c +index 81019ee3ddc8..15ef12732d28 100644 +--- a/net/wireless/scan.c ++++ b/net/wireless/scan.c +@@ -55,6 +55,19 @@ + * also linked into the probe response struct. + */ + ++/* ++ * Limit the number of BSS entries stored in mac80211. Each one is ++ * a bit over 4k at most, so this limits to roughly 4-5M of memory. ++ * If somebody wants to really attack this though, they'd likely ++ * use small beacons, and only one type of frame, limiting each of ++ * the entries to a much smaller size (in order to generate more ++ * entries in total, so overhead is bigger.) ++ */ ++static int bss_entries_limit = 1000; ++module_param(bss_entries_limit, int, 0644); ++MODULE_PARM_DESC(bss_entries_limit, ++ "limit to number of scan BSS entries (per wiphy, default 1000)"); ++ + #define IEEE80211_SCAN_RESULT_EXPIRE (30 * HZ) + + static void bss_free(struct cfg80211_internal_bss *bss) +@@ -135,6 +148,10 @@ static bool __cfg80211_unlink_bss(struct cfg80211_registered_device *dev, + + list_del_init(&bss->list); + rb_erase(&bss->rbn, &dev->bss_tree); ++ dev->bss_entries--; ++ WARN_ONCE((dev->bss_entries == 0) ^ list_empty(&dev->bss_list), ++ "rdev bss entries[%d]/list[empty:%d] corruption\n", ++ dev->bss_entries, list_empty(&dev->bss_list)); + bss_ref_put(dev, bss); + return true; + } +@@ -338,6 +355,40 @@ void cfg80211_bss_expire(struct cfg80211_registered_device *dev) + __cfg80211_bss_expire(dev, jiffies - IEEE80211_SCAN_RESULT_EXPIRE); + } + ++static bool cfg80211_bss_expire_oldest(struct cfg80211_registered_device *rdev) ++{ ++ struct cfg80211_internal_bss *bss, *oldest = NULL; ++ bool ret; ++ ++ lockdep_assert_held(&rdev->bss_lock); ++ ++ list_for_each_entry(bss, &rdev->bss_list, list) { ++ if (atomic_read(&bss->hold)) ++ continue; ++ ++ if (!list_empty(&bss->hidden_list) && ++ !bss->pub.hidden_beacon_bss) ++ continue; ++ ++ if (oldest && time_before(oldest->ts, bss->ts)) ++ continue; ++ oldest = bss; ++ } ++ ++ if (WARN_ON(!oldest)) ++ return false; ++ ++ /* ++ * The callers make sure to increase rdev->bss_generation if anything ++ * gets removed (and a new entry added), so there's no need to also do ++ * it here. ++ */ ++ ++ ret = __cfg80211_unlink_bss(rdev, oldest); ++ WARN_ON(!ret); ++ return ret; ++} ++ + const u8 *cfg80211_find_ie(u8 eid, const u8 *ies, int len) + { + while (len > 2 && ies[0] != eid) { +@@ -622,6 +673,7 @@ static bool cfg80211_combine_bsses(struct cfg80211_registered_device *dev, + const u8 *ie; + int i, ssidlen; + u8 fold = 0; ++ u32 n_entries = 0; + + ies = rcu_access_pointer(new->pub.beacon_ies); + if (WARN_ON(!ies)) +@@ -645,6 +697,12 @@ static bool cfg80211_combine_bsses(struct cfg80211_registered_device *dev, + /* This is the bad part ... */ + + list_for_each_entry(bss, &dev->bss_list, list) { ++ /* ++ * we're iterating all the entries anyway, so take the ++ * opportunity to validate the list length accounting ++ */ ++ n_entries++; ++ + if (!ether_addr_equal(bss->pub.bssid, new->pub.bssid)) + continue; + if (bss->pub.channel != new->pub.channel) +@@ -674,6 +732,10 @@ static bool cfg80211_combine_bsses(struct cfg80211_registered_device *dev, + new->pub.beacon_ies); + } + ++ WARN_ONCE(n_entries != dev->bss_entries, ++ "rdev bss entries[%d]/list[len:%d] corruption\n", ++ dev->bss_entries, n_entries); ++ + return true; + } + +@@ -818,7 +880,14 @@ cfg80211_bss_update(struct cfg80211_registered_device *dev, + } + } + ++ if (dev->bss_entries >= bss_entries_limit && ++ !cfg80211_bss_expire_oldest(dev)) { ++ kfree(new); ++ goto drop; ++ } ++ + list_add_tail(&new->list, &dev->bss_list); ++ dev->bss_entries++; + rb_insert_bss(dev, new); + found = new; + } +diff --git a/security/keys/proc.c b/security/keys/proc.c +index 217b6855e815..374c3301b802 100644 +--- a/security/keys/proc.c ++++ b/security/keys/proc.c +@@ -188,7 +188,7 @@ static int proc_keys_show(struct seq_file *m, void *v) + struct timespec now; + unsigned long timo; + key_ref_t key_ref, skey_ref; +- char xbuf[12]; ++ char xbuf[16]; + int rc; + + key_ref = make_key_ref(key, 0); +diff --git a/sound/core/pcm_lib.c b/sound/core/pcm_lib.c +index 8eddece217bb..dfed3ef02475 100644 +--- a/sound/core/pcm_lib.c ++++ b/sound/core/pcm_lib.c +@@ -1856,10 +1856,10 @@ void snd_pcm_period_elapsed(struct snd_pcm_substream *substream) + if (substream->timer_running) + snd_timer_interrupt(substream->timer, 1); + _end: ++ kill_fasync(&runtime->fasync, SIGIO, POLL_IN); + snd_pcm_stream_unlock_irqrestore(substream, flags); + if (runtime->transfer_ack_end) + runtime->transfer_ack_end(substream); +- kill_fasync(&runtime->fasync, SIGIO, POLL_IN); + } + + EXPORT_SYMBOL(snd_pcm_period_elapsed); +diff --git a/sound/core/rawmidi.c b/sound/core/rawmidi.c +index 500765f20843..3e9761685c8c 100644 +--- a/sound/core/rawmidi.c ++++ b/sound/core/rawmidi.c +@@ -1564,10 +1564,12 @@ static int snd_rawmidi_dev_register(struct snd_device *device) + } + list_add_tail(&rmidi->list, &snd_rawmidi_devices); + sprintf(name, "midiC%iD%i", rmidi->card->number, rmidi->device); ++ mutex_unlock(®ister_mutex); + if ((err = snd_register_device(SNDRV_DEVICE_TYPE_RAWMIDI, + rmidi->card, rmidi->device, + &snd_rawmidi_f_ops, rmidi, name)) < 0) { + snd_printk(KERN_ERR "unable to register rawmidi device %i:%i\n", rmidi->card->number, rmidi->device); ++ mutex_lock(®ister_mutex); + list_del(&rmidi->list); + mutex_unlock(®ister_mutex); + return err; +@@ -1575,6 +1577,7 @@ static int snd_rawmidi_dev_register(struct snd_device *device) + if (rmidi->ops && rmidi->ops->dev_register && + (err = rmidi->ops->dev_register(rmidi)) < 0) { + snd_unregister_device(SNDRV_DEVICE_TYPE_RAWMIDI, rmidi->card, rmidi->device); ++ mutex_lock(®ister_mutex); + list_del(&rmidi->list); + mutex_unlock(®ister_mutex); + return err; +@@ -1603,7 +1606,6 @@ static int snd_rawmidi_dev_register(struct snd_device *device) + } + } + #endif /* CONFIG_SND_OSSEMUL */ +- mutex_unlock(®ister_mutex); + sprintf(name, "midi%d", rmidi->device); + entry = snd_info_create_card_entry(rmidi->card, name, rmidi->card->proc_root); + if (entry) { +diff --git a/sound/core/timer.c b/sound/core/timer.c +index 3476895ee1fb..749857a889e6 100644 +--- a/sound/core/timer.c ++++ b/sound/core/timer.c +@@ -291,8 +291,19 @@ int snd_timer_open(struct snd_timer_instance **ti, + } + timeri->slave_class = tid->dev_sclass; + timeri->slave_id = slave_id; +- if (list_empty(&timer->open_list_head) && timer->hw.open) +- timer->hw.open(timer); ++ ++ if (list_empty(&timer->open_list_head) && timer->hw.open) { ++ int err = timer->hw.open(timer); ++ if (err) { ++ kfree(timeri->owner); ++ kfree(timeri); ++ ++ module_put(timer->module); ++ mutex_unlock(®ister_mutex); ++ return err; ++ } ++ } ++ + list_add_tail(&timeri->open_list, &timer->open_list_head); + snd_timer_check_master(timeri); + mutex_unlock(®ister_mutex); +@@ -817,6 +828,7 @@ int snd_timer_new(struct snd_card *card, char *id, struct snd_timer_id *tid, + timer->tmr_subdevice = tid->subdevice; + if (id) + strlcpy(timer->id, id, sizeof(timer->id)); ++ timer->sticks = 1; + INIT_LIST_HEAD(&timer->device_list); + INIT_LIST_HEAD(&timer->open_list_head); + INIT_LIST_HEAD(&timer->active_list_head); +@@ -1922,19 +1934,23 @@ static ssize_t snd_timer_user_read(struct file *file, char __user *buffer, + if (err < 0) + goto _error; + ++ mutex_lock(&tu->ioctl_lock); + if (tu->tread) { + if (copy_to_user(buffer, &tu->tqueue[tu->qhead++], + sizeof(struct snd_timer_tread))) { ++ mutex_unlock(&tu->ioctl_lock); + err = -EFAULT; + goto _error; + } + } else { + if (copy_to_user(buffer, &tu->queue[tu->qhead++], + sizeof(struct snd_timer_read))) { ++ mutex_unlock(&tu->ioctl_lock); + err = -EFAULT; + goto _error; + } + } ++ mutex_unlock(&tu->ioctl_lock); + + tu->qhead %= tu->queue_size; + +diff --git a/sound/pci/ali5451/ali5451.c b/sound/pci/ali5451/ali5451.c +index 53754f5edeb1..097c8c4daaea 100644 +--- a/sound/pci/ali5451/ali5451.c ++++ b/sound/pci/ali5451/ali5451.c +@@ -1422,6 +1422,7 @@ snd_ali_playback_pointer(struct snd_pcm_substream *substream) + spin_unlock(&codec->reg_lock); + snd_ali_printk("playback pointer returned cso=%xh.\n", cso); + ++ cso %= runtime->buffer_size; + return cso; + } + +@@ -1442,6 +1443,7 @@ static snd_pcm_uframes_t snd_ali_pointer(struct snd_pcm_substream *substream) + cso = inw(ALI_REG(codec, ALI_CSO_ALPHA_FMS + 2)); + spin_unlock(&codec->reg_lock); + ++ cso %= runtime->buffer_size; + return cso; + } + +diff --git a/sound/soc/omap/omap-mcpdm.c b/sound/soc/omap/omap-mcpdm.c +index eb05c7ed6d05..5dc6b23b634e 100644 +--- a/sound/soc/omap/omap-mcpdm.c ++++ b/sound/soc/omap/omap-mcpdm.c +@@ -393,8 +393,8 @@ static int omap_mcpdm_probe(struct snd_soc_dai *dai) + pm_runtime_get_sync(mcpdm->dev); + omap_mcpdm_write(mcpdm, MCPDM_REG_CTRL, 0x00); + +- ret = devm_request_irq(mcpdm->dev, mcpdm->irq, omap_mcpdm_irq_handler, +- 0, "McPDM", (void *)mcpdm); ++ ret = request_irq(mcpdm->irq, omap_mcpdm_irq_handler, 0, "McPDM", ++ (void *)mcpdm); + + pm_runtime_put_sync(mcpdm->dev); + +@@ -414,6 +414,7 @@ static int omap_mcpdm_remove(struct snd_soc_dai *dai) + { + struct omap_mcpdm *mcpdm = snd_soc_dai_get_drvdata(dai); + ++ free_irq(mcpdm->irq, (void *)mcpdm); + pm_runtime_disable(mcpdm->dev); + + return 0; +diff --git a/tools/perf/util/symbol-elf.c b/tools/perf/util/symbol-elf.c +index 4b12bf850325..f7718c8fc93e 100644 +--- a/tools/perf/util/symbol-elf.c ++++ b/tools/perf/util/symbol-elf.c +@@ -831,8 +831,8 @@ new_symbol: + * For misannotated, zeroed, ASM function sizes. + */ + if (nr > 0) { +- symbols__fixup_duplicate(&dso->symbols[map->type]); + symbols__fixup_end(&dso->symbols[map->type]); ++ symbols__fixup_duplicate(&dso->symbols[map->type]); + if (kmap) { + /* + * We need to fixup this here too because we create new +diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c +index 8cf3b5426a9a..a2fe760605e1 100644 +--- a/tools/perf/util/symbol.c ++++ b/tools/perf/util/symbol.c +@@ -673,8 +673,8 @@ int dso__load_kallsyms(struct dso *dso, const char *filename, + if (dso__load_all_kallsyms(dso, filename, map) < 0) + return -1; + +- symbols__fixup_duplicate(&dso->symbols[map->type]); + symbols__fixup_end(&dso->symbols[map->type]); ++ symbols__fixup_duplicate(&dso->symbols[map->type]); + + if (dso->kernel == DSO_TYPE_GUEST_KERNEL) + dso->symtab_type = DSO_BINARY_TYPE__GUEST_KALLSYMS; +diff --git a/tools/vm/slabinfo.c b/tools/vm/slabinfo.c +index 808d5a9d5dcf..bcc6125657e5 100644 +--- a/tools/vm/slabinfo.c ++++ b/tools/vm/slabinfo.c +@@ -493,10 +493,11 @@ static void slab_stats(struct slabinfo *s) + s->alloc_node_mismatch, (s->alloc_node_mismatch * 100) / total); + } + +- if (s->cmpxchg_double_fail || s->cmpxchg_double_cpu_fail) ++ if (s->cmpxchg_double_fail || s->cmpxchg_double_cpu_fail) { + printf("\nCmpxchg_double Looping\n------------------------\n"); + printf("Locked Cmpxchg Double redos %lu\nUnlocked Cmpxchg Double redos %lu\n", + s->cmpxchg_double_fail, s->cmpxchg_double_cpu_fail); ++ } + } + + static void report(struct slabinfo *s)